shader_recompiler: Reorganize data share operations and implement GDS bit (#3222)

* shader_recompiler: Reorganize data share operations and implement GDS bit

* Review comments
This commit is contained in:
TheTurtle
2025-07-10 13:38:50 +03:00
committed by GitHub
parent dc6ef99dc7
commit 27cbd6647f
15 changed files with 525 additions and 291 deletions

View File

@@ -3,7 +3,6 @@
#include "shader_recompiler/frontend/translate/translate.h"
#include "shader_recompiler/ir/reg.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Gcn {
@@ -12,29 +11,29 @@ void Translator::EmitDataShare(const GcnInst& inst) {
switch (inst.opcode) {
// DS
case Opcode::DS_ADD_U32:
return DS_ADD_U32(inst, false);
return DS_OP(inst, AtomicOp::Add, false);
case Opcode::DS_ADD_U64:
return DS_ADD_U64(inst, false);
return DS_OP<IR::U64>(inst, AtomicOp::Add, false);
case Opcode::DS_SUB_U32:
return DS_SUB_U32(inst, false);
return DS_OP(inst, AtomicOp::Sub, false);
case Opcode::DS_INC_U32:
return DS_INC_U32(inst, false);
return DS_OP(inst, AtomicOp::Inc, false);
case Opcode::DS_DEC_U32:
return DS_DEC_U32(inst, false);
return DS_OP(inst, AtomicOp::Dec, false);
case Opcode::DS_MIN_I32:
return DS_MIN_U32(inst, true, false);
return DS_OP(inst, AtomicOp::Smin, false);
case Opcode::DS_MAX_I32:
return DS_MAX_U32(inst, true, false);
return DS_OP(inst, AtomicOp::Smax, false);
case Opcode::DS_MIN_U32:
return DS_MIN_U32(inst, false, false);
return DS_OP(inst, AtomicOp::Umin, false);
case Opcode::DS_MAX_U32:
return DS_MAX_U32(inst, false, false);
return DS_OP(inst, AtomicOp::Umax, false);
case Opcode::DS_AND_B32:
return DS_AND_B32(inst, false);
return DS_OP(inst, AtomicOp::And, false);
case Opcode::DS_OR_B32:
return DS_OR_B32(inst, false);
return DS_OP(inst, AtomicOp::Or, false);
case Opcode::DS_XOR_B32:
return DS_XOR_B32(inst, false);
return DS_OP(inst, AtomicOp::Xor, false);
case Opcode::DS_WRITE_B32:
return DS_WRITE(32, false, false, false, inst);
case Opcode::DS_WRITE2_B32:
@@ -42,19 +41,19 @@ void Translator::EmitDataShare(const GcnInst& inst) {
case Opcode::DS_WRITE2ST64_B32:
return DS_WRITE(32, false, true, true, inst);
case Opcode::DS_ADD_RTN_U32:
return DS_ADD_U32(inst, true);
return DS_OP(inst, AtomicOp::Add, true);
case Opcode::DS_SUB_RTN_U32:
return DS_SUB_U32(inst, true);
return DS_OP(inst, AtomicOp::Sub, true);
case Opcode::DS_MIN_RTN_U32:
return DS_MIN_U32(inst, false, true);
return DS_OP(inst, AtomicOp::Umin, true);
case Opcode::DS_MAX_RTN_U32:
return DS_MAX_U32(inst, false, true);
return DS_OP(inst, AtomicOp::Umax, true);
case Opcode::DS_AND_RTN_B32:
return DS_AND_B32(inst, true);
return DS_OP(inst, AtomicOp::And, true);
case Opcode::DS_OR_RTN_B32:
return DS_OR_B32(inst, true);
return DS_OP(inst, AtomicOp::Or, true);
case Opcode::DS_XOR_RTN_B32:
return DS_XOR_B32(inst, true);
return DS_OP(inst, AtomicOp::Xor, true);
case Opcode::DS_SWIZZLE_B32:
return DS_SWIZZLE_B32(inst);
case Opcode::DS_READ_B32:
@@ -117,92 +116,63 @@ void Translator::V_WRITELANE_B32(const GcnInst& inst) {
// DS
void Translator::DS_ADD_U32(const GcnInst& inst, bool rtn) {
template <typename T>
void Translator::DS_OP(const GcnInst& inst, AtomicOp op, bool rtn) {
const bool is_gds = inst.control.ds.gds;
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const T data = [&] {
if (op == AtomicOp::Inc || op == AtomicOp::Dec) {
return T{};
}
if constexpr (std::is_same_v<T, IR::U32>) {
return GetSrc(inst.src[1]);
} else {
return GetSrc64(inst.src[1]);
}
}();
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicIAdd(addr_offset, data);
const T original_val = [&] -> T {
switch (op) {
case AtomicOp::Add:
return ir.SharedAtomicIAdd(addr_offset, data, is_gds);
case AtomicOp::Umin:
return ir.SharedAtomicIMin(addr_offset, data, false, is_gds);
case AtomicOp::Smin:
return ir.SharedAtomicIMin(addr_offset, data, true, is_gds);
case AtomicOp::Umax:
return ir.SharedAtomicIMax(addr_offset, data, false, is_gds);
case AtomicOp::Smax:
return ir.SharedAtomicIMax(addr_offset, data, true, is_gds);
case AtomicOp::And:
return ir.SharedAtomicAnd(addr_offset, data, is_gds);
case AtomicOp::Or:
return ir.SharedAtomicOr(addr_offset, data, is_gds);
case AtomicOp::Xor:
return ir.SharedAtomicXor(addr_offset, data, is_gds);
case AtomicOp::Sub:
return ir.SharedAtomicISub(addr_offset, data, is_gds);
case AtomicOp::Inc:
return ir.SharedAtomicInc<T>(addr_offset, is_gds);
case AtomicOp::Dec:
return ir.SharedAtomicDec<T>(addr_offset, is_gds);
default:
UNREACHABLE();
}
}();
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_ADD_U64(const GcnInst& inst, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U64 data{GetSrc64(inst.src[1])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicIAdd(addr_offset, data);
if (rtn) {
SetDst64(inst.dst[0], IR::U64{original_val});
}
}
void Translator::DS_MIN_U32(const GcnInst& inst, bool is_signed, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicIMin(addr_offset, data, is_signed);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_MAX_U32(const GcnInst& inst, bool is_signed, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicIMax(addr_offset, data, is_signed);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_AND_B32(const GcnInst& inst, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicAnd(addr_offset, data);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_OR_B32(const GcnInst& inst, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicOr(addr_offset, data);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_XOR_B32(const GcnInst& inst, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicXor(addr_offset, data);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
if constexpr (std::is_same_v<T, IR::U32>) {
SetDst(inst.dst[0], original_val);
} else {
SetDst64(inst.dst[0], original_val);
}
}
}
void Translator::DS_WRITE(int bit_size, bool is_signed, bool is_pair, bool stride64,
const GcnInst& inst) {
const bool is_gds = inst.control.ds.gds;
const IR::U32 addr{ir.GetVectorReg(IR::VectorReg(inst.src[0].code))};
const IR::VectorReg data0{inst.src[1].code};
const IR::VectorReg data1{inst.src[2].code};
@@ -220,33 +190,85 @@ void Translator::DS_WRITE(int bit_size, bool is_signed, bool is_pair, bool strid
ir.WriteShared(64,
ir.PackUint2x32(ir.CompositeConstruct(ir.GetVectorReg(data0),
ir.GetVectorReg(data0 + 1))),
addr0);
addr0, is_gds);
} else if (bit_size == 32) {
ir.WriteShared(32, ir.GetVectorReg(data0), addr0);
ir.WriteShared(32, ir.GetVectorReg(data0), addr0, is_gds);
} else if (bit_size == 16) {
ir.WriteShared(16, ir.UConvert(16, ir.GetVectorReg(data0)), addr0);
ir.WriteShared(16, ir.UConvert(16, ir.GetVectorReg(data0)), addr0, is_gds);
}
const IR::U32 addr1 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset1 * adj)));
if (bit_size == 64) {
ir.WriteShared(64,
ir.PackUint2x32(ir.CompositeConstruct(ir.GetVectorReg(data1),
ir.GetVectorReg(data1 + 1))),
addr1);
addr1, is_gds);
} else if (bit_size == 32) {
ir.WriteShared(32, ir.GetVectorReg(data1), addr1);
ir.WriteShared(32, ir.GetVectorReg(data1), addr1, is_gds);
} else if (bit_size == 16) {
ir.WriteShared(16, ir.UConvert(16, ir.GetVectorReg(data1)), addr1);
ir.WriteShared(16, ir.UConvert(16, ir.GetVectorReg(data1)), addr1, is_gds);
}
} else {
const IR::U32 addr0 = ir.IAdd(addr, ir.Imm32(offset));
if (bit_size == 64) {
const IR::Value data =
ir.CompositeConstruct(ir.GetVectorReg(data0), ir.GetVectorReg(data0 + 1));
ir.WriteShared(bit_size, ir.PackUint2x32(data), addr0);
ir.WriteShared(bit_size, ir.PackUint2x32(data), addr0, is_gds);
} else if (bit_size == 32) {
ir.WriteShared(bit_size, ir.GetVectorReg(data0), addr0);
ir.WriteShared(bit_size, ir.GetVectorReg(data0), addr0, is_gds);
} else if (bit_size == 16) {
ir.WriteShared(bit_size, ir.UConvert(16, ir.GetVectorReg(data0)), addr0);
ir.WriteShared(bit_size, ir.UConvert(16, ir.GetVectorReg(data0)), addr0, is_gds);
}
}
}
void Translator::DS_READ(int bit_size, bool is_signed, bool is_pair, bool stride64,
const GcnInst& inst) {
const bool is_gds = inst.control.ds.gds;
const IR::U32 addr{ir.GetVectorReg(IR::VectorReg(inst.src[0].code))};
IR::VectorReg dst_reg{inst.dst[0].code};
const u32 offset = (inst.control.ds.offset1 << 8u) + inst.control.ds.offset0;
if (info.stage == Stage::Fragment) {
ASSERT_MSG(!is_pair && bit_size == 32 && offset % 256 == 0,
"Unexpected shared memory offset alignment: {}", offset);
ir.SetVectorReg(dst_reg, ir.GetVectorReg(GetScratchVgpr(offset)));
return;
}
if (is_pair) {
// Pair loads are either 32 or 64-bit
const u32 adj = (bit_size == 32 ? 4 : 8) * (stride64 ? 64 : 1);
const IR::U32 addr0 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset0 * adj)));
const IR::Value data0 = ir.LoadShared(bit_size, is_signed, addr0, is_gds);
if (bit_size == 64) {
const auto vector = ir.UnpackUint2x32(IR::U64{data0});
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(vector, 0)});
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(vector, 1)});
} else if (bit_size == 32) {
ir.SetVectorReg(dst_reg++, IR::U32{data0});
} else if (bit_size == 16) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.UConvert(32, IR::U16{data0})});
}
const IR::U32 addr1 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset1 * adj)));
const IR::Value data1 = ir.LoadShared(bit_size, is_signed, addr1, is_gds);
if (bit_size == 64) {
const auto vector = ir.UnpackUint2x32(IR::U64{data1});
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(vector, 0)});
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(vector, 1)});
} else if (bit_size == 32) {
ir.SetVectorReg(dst_reg++, IR::U32{data1});
} else if (bit_size == 16) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.UConvert(32, IR::U16{data1})});
}
} else {
const IR::U32 addr0 = ir.IAdd(addr, ir.Imm32(offset));
const IR::Value data = ir.LoadShared(bit_size, is_signed, addr0, is_gds);
if (bit_size == 64) {
const auto vector = ir.UnpackUint2x32(IR::U64{data});
ir.SetVectorReg(dst_reg, IR::U32{ir.CompositeExtract(vector, 0)});
ir.SetVectorReg(dst_reg + 1, IR::U32{ir.CompositeExtract(vector, 1)});
} else if (bit_size == 32) {
ir.SetVectorReg(dst_reg, IR::U32{data});
} else if (bit_size == 16) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.UConvert(32, IR::U16{data})});
}
}
}
@@ -263,91 +285,6 @@ void Translator::DS_SWIZZLE_B32(const GcnInst& inst) {
SetDst(inst.dst[0], ir.QuadShuffle(src, index));
}
void Translator::DS_INC_U32(const GcnInst& inst, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicInc(addr_offset);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_DEC_U32(const GcnInst& inst, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicDec(addr_offset);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_SUB_U32(const GcnInst& inst, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const IR::U32 offset =
ir.Imm32((u32(inst.control.ds.offset1) << 8u) + u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicISub(addr_offset, data);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_READ(int bit_size, bool is_signed, bool is_pair, bool stride64,
const GcnInst& inst) {
const IR::U32 addr{ir.GetVectorReg(IR::VectorReg(inst.src[0].code))};
IR::VectorReg dst_reg{inst.dst[0].code};
const u32 offset = (inst.control.ds.offset1 << 8u) + inst.control.ds.offset0;
if (info.stage == Stage::Fragment) {
ASSERT_MSG(!is_pair && bit_size == 32 && offset % 256 == 0,
"Unexpected shared memory offset alignment: {}", offset);
ir.SetVectorReg(dst_reg, ir.GetVectorReg(GetScratchVgpr(offset)));
return;
}
if (is_pair) {
// Pair loads are either 32 or 64-bit
const u32 adj = (bit_size == 32 ? 4 : 8) * (stride64 ? 64 : 1);
const IR::U32 addr0 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset0 * adj)));
const IR::Value data0 = ir.LoadShared(bit_size, is_signed, addr0);
if (bit_size == 64) {
const auto vector = ir.UnpackUint2x32(IR::U64{data0});
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(vector, 0)});
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(vector, 1)});
} else if (bit_size == 32) {
ir.SetVectorReg(dst_reg++, IR::U32{data0});
} else if (bit_size == 16) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.UConvert(32, IR::U16{data0})});
}
const IR::U32 addr1 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset1 * adj)));
const IR::Value data1 = ir.LoadShared(bit_size, is_signed, addr1);
if (bit_size == 64) {
const auto vector = ir.UnpackUint2x32(IR::U64{data1});
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(vector, 0)});
ir.SetVectorReg(dst_reg++, IR::U32{ir.CompositeExtract(vector, 1)});
} else if (bit_size == 32) {
ir.SetVectorReg(dst_reg++, IR::U32{data1});
} else if (bit_size == 16) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.UConvert(32, IR::U16{data1})});
}
} else {
const IR::U32 addr0 = ir.IAdd(addr, ir.Imm32(offset));
const IR::Value data = ir.LoadShared(bit_size, is_signed, addr0);
if (bit_size == 64) {
const auto vector = ir.UnpackUint2x32(IR::U64{data});
ir.SetVectorReg(dst_reg, IR::U32{ir.CompositeExtract(vector, 0)});
ir.SetVectorReg(dst_reg + 1, IR::U32{ir.CompositeExtract(vector, 1)});
} else if (bit_size == 32) {
ir.SetVectorReg(dst_reg, IR::U32{data});
} else if (bit_size == 16) {
ir.SetVectorReg(dst_reg++, IR::U32{ir.UConvert(32, IR::U16{data})});
}
}
}
void Translator::DS_APPEND(const GcnInst& inst) {
const u32 inst_offset = (u32(inst.control.ds.offset1) << 8u) + inst.control.ds.offset0;
const IR::U32 gds_offset = ir.IAdd(ir.GetM0(), ir.Imm32(inst_offset));

View File

@@ -270,21 +270,13 @@ public:
// Data share
// DS
void DS_ADD_U32(const GcnInst& inst, bool rtn);
void DS_ADD_U64(const GcnInst& inst, bool rtn);
void DS_MIN_U32(const GcnInst& inst, bool is_signed, bool rtn);
void DS_MAX_U32(const GcnInst& inst, bool is_signed, bool rtn);
template <typename T = IR::U32>
void DS_OP(const GcnInst& inst, AtomicOp op, bool rtn);
void DS_WRITE(int bit_size, bool is_signed, bool is_pair, bool stride64, const GcnInst& inst);
void DS_SWIZZLE_B32(const GcnInst& inst);
void DS_AND_B32(const GcnInst& inst, bool rtn);
void DS_OR_B32(const GcnInst& inst, bool rtn);
void DS_XOR_B32(const GcnInst& inst, bool rtn);
void DS_READ(int bit_size, bool is_signed, bool is_pair, bool stride64, const GcnInst& inst);
void DS_SWIZZLE_B32(const GcnInst& inst);
void DS_APPEND(const GcnInst& inst);
void DS_CONSUME(const GcnInst& inst);
void DS_SUB_U32(const GcnInst& inst, bool rtn);
void DS_INC_U32(const GcnInst& inst, bool rtn);
void DS_DEC_U32(const GcnInst& inst, bool rtn);
// Buffer Memory
// MUBUF / MTBUF

View File

@@ -565,7 +565,8 @@ void Translator::V_MBCNT_U32_B32(bool is_low, const GcnInst& inst) {
}
// v_mbcnt_hi_u32_b32 vX, exec_hi, 0/vZ
if ((inst.src[0].field == OperandField::ExecHi ||
inst.src[0].field == OperandField::VccHi) &&
inst.src[0].field == OperandField::VccHi ||
inst.src[0].field == OperandField::ScalarGPR) &&
(inst.src[1].field == OperandField::ConstZero ||
inst.src[1].field == OperandField::VectorGPR)) {
return SetDst(inst.dst[0], GetSrc(inst.src[1]));
@@ -579,7 +580,8 @@ void Translator::V_MBCNT_U32_B32(bool is_low, const GcnInst& inst) {
}
// v_mbcnt_lo_u32_b32 vY, exec_lo, vX
// used combined with above for append buffer indexing.
if (inst.src[0].field == OperandField::ExecLo || inst.src[0].field == OperandField::VccLo) {
if (inst.src[0].field == OperandField::ExecLo || inst.src[0].field == OperandField::VccLo ||
inst.src[0].field == OperandField::ScalarGPR) {
return SetDst(inst.dst[0], GetSrc(inst.src[1]));
}
UNREACHABLE();