mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-12-12 14:48:52 +00:00
Allow vector and scalar offset in buffer address arg to LoadBuffer/StoreBuffer (#3439)
* Allow vector and scalar offset in buffer address arg to LoadBuffer/StoreBuffer * remove is_ring check * fix atomics and update pattern matching for tess factor stores * remove old asserts about soffset * small fixes * copyright * Handle sgpr initialization for 2 special hull shader values, including tess factor buffer offset
This commit is contained in:
@@ -153,7 +153,11 @@ std::string NameOf(Attribute attribute) {
|
||||
case Attribute::TessellationEvaluationPointV:
|
||||
return "TessellationEvaluationPointV";
|
||||
case Attribute::PackedHullInvocationInfo:
|
||||
return "OffChipLdsBase";
|
||||
case Attribute::OffChipLdsBase:
|
||||
return "PackedHullInvocationInfo";
|
||||
case Attribute::TessFactorsBufferBase:
|
||||
return "TessFactorsBufferBase";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -85,6 +85,8 @@ enum class Attribute : u64 {
|
||||
TessellationEvaluationPointU = 88,
|
||||
TessellationEvaluationPointV = 89,
|
||||
PackedHullInvocationInfo = 90, // contains patch id within the VGT and invocation ID
|
||||
OffChipLdsBase = 91,
|
||||
TessFactorsBufferBase = 92,
|
||||
Max,
|
||||
};
|
||||
|
||||
|
||||
48
src/shader_recompiler/ir/operand_helper.h
Normal file
48
src/shader_recompiler/ir/operand_helper.h
Normal file
@@ -0,0 +1,48 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
// Some helpers to get operand indices of instructions by name to make it a bit safer.
|
||||
// Just a start, not widely used
|
||||
|
||||
#include "shader_recompiler/ir/value.h"
|
||||
|
||||
namespace Shader::IR {
|
||||
|
||||
// use namespaces. Enums would be better choice, but annoyingly need casting to size_t to use
|
||||
// as indices
|
||||
|
||||
namespace LoadBufferArgs {
|
||||
static const size_t Handle = 0;
|
||||
static const size_t Address = 1;
|
||||
}; // namespace LoadBufferArgs
|
||||
|
||||
namespace StoreBufferArgs {
|
||||
static const size_t Handle = 0;
|
||||
static const size_t Address = 1;
|
||||
static const size_t Data = 2;
|
||||
}; // namespace StoreBufferArgs
|
||||
|
||||
static_assert(LoadBufferArgs::Handle == StoreBufferArgs::Handle);
|
||||
static_assert(LoadBufferArgs::Address == StoreBufferArgs::Address);
|
||||
|
||||
// Get certain components of buffer address argument, used in Load/StoreBuffer variants.
|
||||
// We keep components separate as u32x3, before combining after sharp tracking
|
||||
static inline IR::U32 GetBufferAddressComponent(const Inst* buffer_inst, u32 comp) {
|
||||
Inst* address = buffer_inst->Arg(1).InstRecursive();
|
||||
ASSERT(address->GetOpcode() == IR::Opcode::CompositeConstructU32x3);
|
||||
return IR::U32{address->Arg(comp).Resolve()};
|
||||
}
|
||||
|
||||
static inline U32 GetBufferIndexArg(const Inst* buffer_inst) {
|
||||
return GetBufferAddressComponent(buffer_inst, 0);
|
||||
}
|
||||
|
||||
static inline U32 GetBufferVOffsetArg(const Inst* buffer_inst) {
|
||||
return GetBufferAddressComponent(buffer_inst, 1);
|
||||
}
|
||||
|
||||
static inline U32 GetBufferSOffsetArg(const Inst* buffer_inst) {
|
||||
return GetBufferAddressComponent(buffer_inst, 2);
|
||||
}
|
||||
|
||||
} // namespace Shader::IR
|
||||
@@ -7,6 +7,7 @@
|
||||
#include "shader_recompiler/ir/breadth_first_search.h"
|
||||
#include "shader_recompiler/ir/ir_emitter.h"
|
||||
#include "shader_recompiler/ir/opcodes.h"
|
||||
#include "shader_recompiler/ir/operand_helper.h"
|
||||
#include "shader_recompiler/ir/passes/ir_passes.h"
|
||||
#include "shader_recompiler/ir/pattern_matching.h"
|
||||
#include "shader_recompiler/ir/program.h"
|
||||
@@ -373,11 +374,27 @@ void HullShaderTransform(IR::Program& program, RuntimeInfo& runtime_info) {
|
||||
case IR::Opcode::StoreBufferU32x2:
|
||||
case IR::Opcode::StoreBufferU32x3:
|
||||
case IR::Opcode::StoreBufferU32x4: {
|
||||
const auto info = inst.Flags<IR::BufferInstInfo>();
|
||||
if (!info.globally_coherent) {
|
||||
IR::Value soffset = IR::GetBufferSOffsetArg(&inst);
|
||||
if (!M_GETATTRIBUTEU32(MatchAttribute(IR::Attribute::TessFactorsBufferBase),
|
||||
MatchIgnore())
|
||||
.Match(soffset)) {
|
||||
break;
|
||||
}
|
||||
|
||||
const auto info = inst.Flags<IR::BufferInstInfo>();
|
||||
IR::IREmitter ir{*block, IR::Block::InstructionList::s_iterator_to(inst)};
|
||||
|
||||
IR::Value voffset;
|
||||
bool success =
|
||||
M_COMPOSITECONSTRUCTU32X3(MatchU32(0), MatchImm(voffset), MatchIgnore())
|
||||
.Match(inst.Arg(IR::StoreBufferArgs::Address));
|
||||
ASSERT_MSG(success, "unhandled pattern in tess factor store");
|
||||
|
||||
const u32 gcn_factor_idx = (info.inst_offset.Value() + voffset.U32()) >> 2;
|
||||
const IR::Value data = inst.Arg(IR::StoreBufferArgs::Data);
|
||||
|
||||
const u32 num_dwords = u32(opcode) - u32(IR::Opcode::StoreBufferU32) + 1;
|
||||
|
||||
const auto GetValue = [&](IR::Value data) -> IR::F32 {
|
||||
if (auto* inst = data.TryInstRecursive();
|
||||
inst && inst->GetOpcode() == IR::Opcode::BitCastU32F32) {
|
||||
@@ -385,12 +402,7 @@ void HullShaderTransform(IR::Program& program, RuntimeInfo& runtime_info) {
|
||||
}
|
||||
return ir.BitCast<IR::F32, IR::U32>(IR::U32{data});
|
||||
};
|
||||
const u32 num_dwords = u32(opcode) - u32(IR::Opcode::StoreBufferU32) + 1;
|
||||
IR::U32 index = IR::U32{inst.Arg(1)};
|
||||
ASSERT(index.IsImmediate());
|
||||
const u32 gcn_factor_idx = (info.inst_offset.Value() + index.U32()) >> 2;
|
||||
|
||||
const IR::Value data = inst.Arg(2);
|
||||
auto get_factor_attr = [&](u32 gcn_factor_idx) -> IR::Patch {
|
||||
// The hull outputs tess factors in different formats depending on the shader.
|
||||
// For triangle domains, it seems to pack the entries into 4 consecutive floats,
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include "shader_recompiler/ir/basic_block.h"
|
||||
#include "shader_recompiler/ir/breadth_first_search.h"
|
||||
#include "shader_recompiler/ir/ir_emitter.h"
|
||||
#include "shader_recompiler/ir/operand_helper.h"
|
||||
#include "shader_recompiler/ir/program.h"
|
||||
#include "shader_recompiler/ir/reinterpret.h"
|
||||
#include "video_core/amdgpu/resource.h"
|
||||
@@ -740,22 +741,25 @@ IR::U32 CalculateBufferAddress(IR::IREmitter& ir, const IR::Inst& inst, const In
|
||||
: buffer.GetDataFmt();
|
||||
const u32 shift = BufferAddressShift(inst, data_format);
|
||||
const u32 mask = (1 << shift) - 1;
|
||||
const IR::U32 soffset = IR::GetBufferSOffsetArg(&inst);
|
||||
|
||||
// If address calculation is of the form "index * const_stride + offset" with offset constant
|
||||
// and both const_stride and offset are divisible with the element size, apply shift directly.
|
||||
if (inst_info.index_enable && !inst_info.offset_enable && !buffer.swizzle_enable &&
|
||||
!buffer.add_tid_enable && (stride & mask) == 0 && (inst_offset & mask) == 0) {
|
||||
// buffer_offset = index * (const_stride >> shift) + (inst_offset >> shift)
|
||||
const IR::U32 index = IR::U32{inst.Arg(1)};
|
||||
return ir.IAdd(ir.IMul(index, ir.Imm32(stride >> shift)), ir.Imm32(inst_offset >> shift));
|
||||
if (inst_info.index_enable && !inst_info.voffset_enable && soffset.IsImmediate() &&
|
||||
!buffer.swizzle_enable && !buffer.add_tid_enable && (stride & mask) == 0) {
|
||||
const u32 total_offset = soffset.U32() + inst_offset;
|
||||
if ((total_offset & mask) == 0) {
|
||||
// buffer_offset = index * (const_stride >> shift) + (offset >> shift)
|
||||
const IR::U32 index = IR::GetBufferIndexArg(&inst);
|
||||
return ir.IAdd(ir.IMul(index, ir.Imm32(stride >> shift)),
|
||||
ir.Imm32(total_offset >> shift));
|
||||
}
|
||||
}
|
||||
|
||||
// index = (inst_idxen ? vgpr_index : 0) + (const_add_tid_enable ? thread_id[5:0] : 0)
|
||||
IR::U32 index = ir.Imm32(0U);
|
||||
if (inst_info.index_enable) {
|
||||
const IR::U32 vgpr_index{inst_info.offset_enable
|
||||
? IR::U32{ir.CompositeExtract(inst.Arg(1), 0)}
|
||||
: IR::U32{inst.Arg(1)}};
|
||||
const IR::U32 vgpr_index = IR::GetBufferIndexArg(&inst);
|
||||
index = ir.IAdd(index, vgpr_index);
|
||||
}
|
||||
if (buffer.add_tid_enable) {
|
||||
@@ -766,11 +770,10 @@ IR::U32 CalculateBufferAddress(IR::IREmitter& ir, const IR::Inst& inst, const In
|
||||
}
|
||||
// offset = (inst_offen ? vgpr_offset : 0) + inst_offset
|
||||
IR::U32 offset = ir.Imm32(inst_offset);
|
||||
if (inst_info.offset_enable) {
|
||||
const IR::U32 vgpr_offset = inst_info.index_enable
|
||||
? IR::U32{ir.CompositeExtract(inst.Arg(1), 1)}
|
||||
: IR::U32{inst.Arg(1)};
|
||||
offset = ir.IAdd(offset, vgpr_offset);
|
||||
offset = ir.IAdd(offset, soffset);
|
||||
if (inst_info.voffset_enable) {
|
||||
const IR::U32 voffset = IR::GetBufferVOffsetArg(&inst);
|
||||
offset = ir.IAdd(offset, voffset);
|
||||
}
|
||||
const IR::U32 const_stride = ir.Imm32(stride);
|
||||
IR::U32 buffer_offset;
|
||||
@@ -815,7 +818,8 @@ void PatchBufferArgs(IR::Block& block, IR::Inst& inst, Info& info) {
|
||||
}
|
||||
|
||||
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
|
||||
inst.SetArg(1, CalculateBufferAddress(ir, inst, info, buffer, buffer.stride));
|
||||
inst.SetArg(IR::LoadBufferArgs::Address,
|
||||
CalculateBufferAddress(ir, inst, info, buffer, buffer.stride));
|
||||
}
|
||||
|
||||
IR::Value FixCubeCoords(IR::IREmitter& ir, const AmdGpu::Image& image, const IR::Value& x,
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "shader_recompiler/ir/ir_emitter.h"
|
||||
#include "shader_recompiler/ir/opcodes.h"
|
||||
#include "shader_recompiler/ir/operand_helper.h"
|
||||
#include "shader_recompiler/ir/position.h"
|
||||
#include "shader_recompiler/ir/program.h"
|
||||
#include "shader_recompiler/ir/reg.h"
|
||||
@@ -113,10 +114,12 @@ void RingAccessElimination(const IR::Program& program, const RuntimeInfo& runtim
|
||||
break;
|
||||
}
|
||||
|
||||
const auto shl_inst = inst.Arg(1).TryInstRecursive();
|
||||
const auto vertex_id = shl_inst->Arg(0).Resolve().U32() >> 2;
|
||||
const auto offset = inst.Arg(1).TryInstRecursive()->Arg(1);
|
||||
const auto bucket = offset.Resolve().U32() / 256u;
|
||||
const auto vertex_id = (info.index_enable ? IR::GetBufferIndexArg(&inst)
|
||||
: IR::GetBufferVOffsetArg(&inst))
|
||||
.U32() >>
|
||||
2;
|
||||
const auto soffset = IR::GetBufferSOffsetArg(&inst);
|
||||
const auto bucket = soffset.Resolve().U32() / 256u;
|
||||
const auto attrib = bucket < 4 ? IR::Attribute::Position0
|
||||
: IR::Attribute::Param0 + (bucket / 4 - 1);
|
||||
const auto comp = bucket % 4;
|
||||
|
||||
@@ -121,6 +121,8 @@ inline auto MakeInstPattern(Args&&... args) {
|
||||
MakeInstPattern<IR::Opcode::SetTcsGenericAttribute>(__VA_ARGS__)
|
||||
#define M_COMPOSITECONSTRUCTU32X2(...) \
|
||||
MakeInstPattern<IR::Opcode::CompositeConstructU32x2>(__VA_ARGS__)
|
||||
#define M_COMPOSITECONSTRUCTU32X3(...) \
|
||||
MakeInstPattern<IR::Opcode::CompositeConstructU32x3>(__VA_ARGS__)
|
||||
#define M_COMPOSITECONSTRUCTU32X4(...) \
|
||||
MakeInstPattern<IR::Opcode::CompositeConstructU32x4>(__VA_ARGS__)
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ union TextureInstInfo {
|
||||
union BufferInstInfo {
|
||||
u32 raw;
|
||||
BitField<0, 1, u32> index_enable;
|
||||
BitField<1, 1, u32> offset_enable;
|
||||
BitField<1, 1, u32> voffset_enable;
|
||||
BitField<2, 12, u32> inst_offset;
|
||||
BitField<14, 1, u32> system_coherent;
|
||||
BitField<15, 1, u32> globally_coherent;
|
||||
|
||||
Reference in New Issue
Block a user