From 399a725343db2c8b07de94ec95789025bc91dd5c Mon Sep 17 00:00:00 2001 From: TheTurtle Date: Mon, 14 Jul 2025 00:32:02 +0300 Subject: [PATCH] shader_recompiler: Replace buffer pulling with attribute divisor for instance step rates (#3238) * shader_recompiler: Replace buffer pulling with attribute divisor for instance step rates * flatten_extended_userdata: Remove special step rate buffer handling * Review comments * spirv_emit_context: Name all instance rate attribs properly * spirv: Merge ReadConstBuffer again template function only has 1 user now * attribute: Add missing attributes * translate: Reimplement step rate instance id * Resolve validation warnings * shader_recompiler: Separate vertex inputs from LS stage, cleanup tess --- .../spirv/emit_spirv_context_get_set.cpp | 128 +++++++----------- .../backend/spirv/emit_spirv_instructions.h | 2 +- .../backend/spirv/spirv_emit_context.cpp | 74 ++++------ .../backend/spirv/spirv_emit_context.h | 1 - src/shader_recompiler/frontend/fetch_shader.h | 12 -- .../frontend/translate/translate.cpp | 63 +++++---- src/shader_recompiler/info.h | 14 +- src/shader_recompiler/ir/attribute.cpp | 22 ++- src/shader_recompiler/ir/attribute.h | 2 - src/shader_recompiler/ir/ir_emitter.cpp | 4 +- src/shader_recompiler/ir/ir_emitter.h | 3 +- .../passes/flatten_extended_userdata_pass.cpp | 24 +--- .../ir/passes/ring_access_elimination.cpp | 11 +- src/shader_recompiler/ir/passes/srt.h | 13 +- src/shader_recompiler/runtime_info.h | 6 +- src/shader_recompiler/specialization.h | 20 ++- src/video_core/buffer_cache/buffer_cache.cpp | 5 +- .../renderer_vulkan/vk_graphics_pipeline.cpp | 46 +++++-- .../renderer_vulkan/vk_graphics_pipeline.h | 4 +- .../renderer_vulkan/vk_instance.cpp | 4 + .../renderer_vulkan/vk_pipeline_cache.cpp | 19 +-- .../renderer_vulkan/vk_rasterizer.cpp | 5 +- 22 files changed, 208 insertions(+), 274 deletions(-) diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp index f3a8c518c..40f8d307c 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp @@ -52,7 +52,7 @@ Id VsOutputAttrPointer(EmitContext& ctx, VsOutput output) { Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) { if (IR::IsParam(attr)) { const u32 attr_index{u32(attr) - u32(IR::Attribute::Param0)}; - if (ctx.stage == Stage::Local && ctx.runtime_info.ls_info.links_with_tcs) { + if (ctx.stage == Stage::Local) { const auto component_ptr = ctx.TypePointer(spv::StorageClass::Output, ctx.F32[1]); return ctx.OpAccessChain(component_ptr, ctx.output_attr_array, ctx.ConstU32(attr_index), ctx.ConstU32(element)); @@ -94,13 +94,9 @@ Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) { std::pair OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr) { if (IR::IsParam(attr)) { - if (ctx.stage == Stage::Local && ctx.runtime_info.ls_info.links_with_tcs) { - return {ctx.F32[1], false}; - } else { - const u32 index{u32(attr) - u32(IR::Attribute::Param0)}; - const auto& info{ctx.output_params.at(index)}; - return {info.component_type, info.is_integer}; - } + const u32 index{u32(attr) - u32(IR::Attribute::Param0)}; + const auto& info{ctx.output_params.at(index)}; + return {info.component_type, info.is_integer}; } if (IR::IsMrt(attr)) { const u32 index{u32(attr) - u32(IR::Attribute::RenderTarget0)}; @@ -120,6 +116,9 @@ std::pair OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr } } // Anonymous namespace +using PointerType = EmitContext::PointerType; +using PointerSize = EmitContext::PointerSize; + Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg) { const u32 index = ctx.binding.user_data + ctx.info.ud_mask.Index(reg); const u32 half = PushData::UdRegsIndex + (index >> 2); @@ -131,41 +130,6 @@ Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg) { return ud_reg; } -void EmitGetThreadBitScalarReg(EmitContext& ctx) { - UNREACHABLE_MSG("Unreachable instruction"); -} - -void EmitSetThreadBitScalarReg(EmitContext& ctx) { - UNREACHABLE_MSG("Unreachable instruction"); -} - -void EmitGetScalarRegister(EmitContext&) { - UNREACHABLE_MSG("Unreachable instruction"); -} - -void EmitSetScalarRegister(EmitContext&) { - UNREACHABLE_MSG("Unreachable instruction"); -} - -void EmitGetVectorRegister(EmitContext& ctx) { - UNREACHABLE_MSG("Unreachable instruction"); -} - -void EmitSetVectorRegister(EmitContext& ctx) { - UNREACHABLE_MSG("Unreachable instruction"); -} - -void EmitSetGotoVariable(EmitContext&) { - UNREACHABLE_MSG("Unreachable instruction"); -} - -void EmitGetGotoVariable(EmitContext&) { - UNREACHABLE_MSG("Unreachable instruction"); -} - -using PointerType = EmitContext::PointerType; -using PointerSize = EmitContext::PointerSize; - Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) { const u32 flatbuf_off_dw = inst->Flags(); if (!Config::directMemoryAccess()) { @@ -180,39 +144,27 @@ Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) { } } -template -Id ReadConstBuffer(EmitContext& ctx, u32 handle, Id index) { +Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) { const auto& buffer = ctx.buffers[handle]; if (const Id offset = buffer.Offset(PointerSize::B32); Sirit::ValidId(offset)) { index = ctx.OpIAdd(ctx.U32[1], index, offset); } - const auto [id, pointer_type] = buffer.Alias(type); - const auto value_type = type == PointerType::U32 ? ctx.U32[1] : ctx.F32[1]; + const auto [id, pointer_type] = buffer.Alias(PointerType::U32); const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index)}; - const Id result{ctx.OpLoad(value_type, ptr)}; + const Id result{ctx.OpLoad(ctx.U32[1], ptr)}; if (const Id size = buffer.Size(PointerSize::B32); Sirit::ValidId(size)) { const Id in_bounds = ctx.OpULessThan(ctx.U1[1], index, size); - return ctx.OpSelect(value_type, in_bounds, result, ctx.u32_zero_value); + return ctx.OpSelect(ctx.U32[1], in_bounds, result, ctx.u32_zero_value); } return result; } -Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) { - return ReadConstBuffer(ctx, handle, index); -} - -Id EmitReadStepRate(EmitContext& ctx, int rate_idx) { - const auto index{rate_idx == 0 ? PushData::Step0Index : PushData::Step1Index}; - return ctx.OpLoad( - ctx.U32[1], ctx.OpAccessChain(ctx.TypePointer(spv::StorageClass::PushConstant, ctx.U32[1]), - ctx.push_data_block, ctx.ConstU32(index))); -} - -static Id EmitGetAttributeForGeometry(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index) { +static Id EmitGetAttributeForGeometry(EmitContext& ctx, IR::Attribute attr, u32 comp, u32 index) { if (IR::IsPosition(attr)) { ASSERT(attr == IR::Attribute::Position0); const auto position_arr_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[4]); - const auto pointer{ctx.OpAccessChain(position_arr_ptr, ctx.gl_in, index, ctx.ConstU32(0u))}; + const auto pointer{ + ctx.OpAccessChain(position_arr_ptr, ctx.gl_in, ctx.ConstU32(index), ctx.ConstU32(0u))}; const auto position_comp_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[1]); return ctx.OpLoad(ctx.F32[1], ctx.OpAccessChain(position_comp_ptr, pointer, ctx.ConstU32(comp))); @@ -222,7 +174,7 @@ static Id EmitGetAttributeForGeometry(EmitContext& ctx, IR::Attribute attr, u32 const u32 param_id{u32(attr) - u32(IR::Attribute::Param0)}; const auto param = ctx.input_params.at(param_id).id; const auto param_arr_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[4]); - const auto pointer{ctx.OpAccessChain(param_arr_ptr, param, index)}; + const auto pointer{ctx.OpAccessChain(param_arr_ptr, param, ctx.ConstU32(index))}; const auto position_comp_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[1]); return ctx.OpLoad(ctx.F32[1], ctx.OpAccessChain(position_comp_ptr, pointer, ctx.ConstU32(comp))); @@ -230,7 +182,7 @@ static Id EmitGetAttributeForGeometry(EmitContext& ctx, IR::Attribute attr, u32 UNREACHABLE(); } -Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index) { +Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, u32 index) { if (ctx.info.l_stage == LogicalStage::Geometry) { return EmitGetAttributeForGeometry(ctx, attr, comp, index); } else if (ctx.info.l_stage == LogicalStage::TessellationControl || @@ -248,18 +200,6 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index) { if (IR::IsParam(attr)) { const u32 param_index{u32(attr) - u32(IR::Attribute::Param0)}; const auto& param{ctx.input_params.at(param_index)}; - if (param.buffer_handle >= 0) { - const auto step_rate = EmitReadStepRate(ctx, param.id.value); - const auto offset = ctx.OpIAdd( - ctx.U32[1], - ctx.OpIMul( - ctx.U32[1], - ctx.OpUDiv(ctx.U32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id), step_rate), - ctx.ConstU32(param.num_components)), - ctx.ConstU32(comp)); - return ReadConstBuffer(ctx, param.buffer_handle, offset); - } - Id result; if (param.is_loaded) { // Attribute is either default or manually interpolated. The id points to an already @@ -305,10 +245,6 @@ Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) { return ctx.OpLoad(ctx.U32[1], ctx.vertex_index); case IR::Attribute::InstanceId: return ctx.OpLoad(ctx.U32[1], ctx.instance_id); - case IR::Attribute::InstanceId0: - return EmitReadStepRate(ctx, 0); - case IR::Attribute::InstanceId1: - return EmitReadStepRate(ctx, 1); case IR::Attribute::WorkgroupIndex: return ctx.workgroup_index_id; case IR::Attribute::WorkgroupId: @@ -640,4 +576,36 @@ void EmitStoreBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id a UNREACHABLE_MSG("SPIR-V instruction"); } +void EmitGetThreadBitScalarReg(EmitContext& ctx) { + UNREACHABLE_MSG("Unreachable instruction"); +} + +void EmitSetThreadBitScalarReg(EmitContext& ctx) { + UNREACHABLE_MSG("Unreachable instruction"); +} + +void EmitGetScalarRegister(EmitContext&) { + UNREACHABLE_MSG("Unreachable instruction"); +} + +void EmitSetScalarRegister(EmitContext&) { + UNREACHABLE_MSG("Unreachable instruction"); +} + +void EmitGetVectorRegister(EmitContext& ctx) { + UNREACHABLE_MSG("Unreachable instruction"); +} + +void EmitSetVectorRegister(EmitContext& ctx) { + UNREACHABLE_MSG("Unreachable instruction"); +} + +void EmitSetGotoVariable(EmitContext&) { + UNREACHABLE_MSG("Unreachable instruction"); +} + +void EmitGetGotoVariable(EmitContext&) { + UNREACHABLE_MSG("Unreachable instruction"); +} + } // namespace Shader::Backend::SPIRV diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h index 74c94754d..37d5d84c9 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h +++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h @@ -108,7 +108,7 @@ Id EmitBufferAtomicXor32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addres Id EmitBufferAtomicSwap32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value); Id EmitBufferAtomicCmpSwap32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value, Id cmp_value); -Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index); +Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, u32 index); Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp); void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 comp); Id EmitGetTessGenericAttribute(EmitContext& ctx, Id vertex_index, Id attr_index, Id comp_index); diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp index 6a731d05c..e16bba755 100644 --- a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp +++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp @@ -377,35 +377,13 @@ void EmitContext::DefineInputs() { ASSERT(attrib.semantic < IR::NumParams); const auto sharp = attrib.GetSharp(info); const Id type{GetAttributeType(*this, sharp.GetNumberFmt())[4]}; - if (attrib.UsesStepRates()) { - const u32 rate_idx = - attrib.GetStepRate() == Gcn::VertexAttribute::InstanceIdType::OverStepRate0 ? 0 - : 1; - const u32 num_components = AmdGpu::NumComponents(sharp.GetDataFmt()); - const auto buffer = - std::ranges::find_if(info.buffers, [&attrib](const auto& buffer) { - return buffer.instance_attrib == attrib.semantic; - }); - // Note that we pass index rather than Id - input_params[attrib.semantic] = SpirvAttribute{ - .id = {rate_idx}, - .pointer_type = input_u32, - .component_type = U32[1], - .num_components = std::min(attrib.num_elements, num_components), - .is_integer = true, - .is_loaded = false, - .buffer_handle = int(buffer - info.buffers.begin()), - }; + Id id{DefineInput(type, attrib.semantic)}; + if (attrib.GetStepRate() != Gcn::VertexAttribute::InstanceIdType::None) { + Name(id, fmt::format("vs_instance_attr{}", attrib.semantic)); } else { - Id id{DefineInput(type, attrib.semantic)}; - if (attrib.GetStepRate() == Gcn::VertexAttribute::InstanceIdType::Plain) { - Name(id, fmt::format("vs_instance_attr{}", attrib.semantic)); - } else { - Name(id, fmt::format("vs_in_attr{}", attrib.semantic)); - } - input_params[attrib.semantic] = - GetAttributeInfo(sharp.GetNumberFmt(), id, 4, false); + Name(id, fmt::format("vs_in_attr{}", attrib.semantic)); } + input_params[attrib.semantic] = GetAttributeInfo(sharp.GetNumberFmt(), id, 4, false); } break; } @@ -573,7 +551,7 @@ void EmitContext::DefineOutputs() { cull_distances = DefineVariable(type, spv::BuiltIn::CullDistance, spv::StorageClass::Output); } - if (stage == Shader::Stage::Local && runtime_info.ls_info.links_with_tcs) { + if (stage == Stage::Local) { const u32 num_attrs = Common::AlignUp(runtime_info.ls_info.ls_stride, 16) >> 4; if (num_attrs > 0) { const Id type{TypeArray(F32[4], ConstU32(num_attrs))}; @@ -700,12 +678,10 @@ void EmitContext::DefineOutputs() { void EmitContext::DefinePushDataBlock() { // Create push constants block for instance steps rates - const Id struct_type{Name(TypeStruct(U32[1], U32[1], F32[1], F32[1], F32[1], F32[1], U32[4], - U32[4], U32[4], U32[4], U32[4], U32[4], U32[2]), + const Id struct_type{Name(TypeStruct(F32[1], F32[1], F32[1], F32[1], U32[4], U32[4], U32[4], + U32[4], U32[4], U32[4], U32[2]), "AuxData")}; Decorate(struct_type, spv::Decoration::Block); - MemberName(struct_type, PushData::Step0Index, "sr0"); - MemberName(struct_type, PushData::Step1Index, "sr1"); MemberName(struct_type, PushData::XOffsetIndex, "xoffset"); MemberName(struct_type, PushData::YOffsetIndex, "yoffset"); MemberName(struct_type, PushData::XScaleIndex, "xscale"); @@ -717,19 +693,17 @@ void EmitContext::DefinePushDataBlock() { MemberName(struct_type, PushData::BufOffsetIndex + 0, "buf_offsets0"); MemberName(struct_type, PushData::BufOffsetIndex + 1, "buf_offsets1"); MemberName(struct_type, PushData::BufOffsetIndex + 2, "buf_offsets2"); - MemberDecorate(struct_type, PushData::Step0Index, spv::Decoration::Offset, 0U); - MemberDecorate(struct_type, PushData::Step1Index, spv::Decoration::Offset, 4U); - MemberDecorate(struct_type, PushData::XOffsetIndex, spv::Decoration::Offset, 8U); - MemberDecorate(struct_type, PushData::YOffsetIndex, spv::Decoration::Offset, 12U); - MemberDecorate(struct_type, PushData::XScaleIndex, spv::Decoration::Offset, 16U); - MemberDecorate(struct_type, PushData::YScaleIndex, spv::Decoration::Offset, 20U); - MemberDecorate(struct_type, PushData::UdRegsIndex + 0, spv::Decoration::Offset, 24U); - MemberDecorate(struct_type, PushData::UdRegsIndex + 1, spv::Decoration::Offset, 40U); - MemberDecorate(struct_type, PushData::UdRegsIndex + 2, spv::Decoration::Offset, 56U); - MemberDecorate(struct_type, PushData::UdRegsIndex + 3, spv::Decoration::Offset, 72U); - MemberDecorate(struct_type, PushData::BufOffsetIndex + 0, spv::Decoration::Offset, 88U); - MemberDecorate(struct_type, PushData::BufOffsetIndex + 1, spv::Decoration::Offset, 104U); - MemberDecorate(struct_type, PushData::BufOffsetIndex + 2, spv::Decoration::Offset, 120U); + MemberDecorate(struct_type, PushData::XOffsetIndex, spv::Decoration::Offset, 0U); + MemberDecorate(struct_type, PushData::YOffsetIndex, spv::Decoration::Offset, 4U); + MemberDecorate(struct_type, PushData::XScaleIndex, spv::Decoration::Offset, 8U); + MemberDecorate(struct_type, PushData::YScaleIndex, spv::Decoration::Offset, 12U); + MemberDecorate(struct_type, PushData::UdRegsIndex + 0, spv::Decoration::Offset, 16U); + MemberDecorate(struct_type, PushData::UdRegsIndex + 1, spv::Decoration::Offset, 32U); + MemberDecorate(struct_type, PushData::UdRegsIndex + 2, spv::Decoration::Offset, 48U); + MemberDecorate(struct_type, PushData::UdRegsIndex + 3, spv::Decoration::Offset, 64U); + MemberDecorate(struct_type, PushData::BufOffsetIndex + 0, spv::Decoration::Offset, 80U); + MemberDecorate(struct_type, PushData::BufOffsetIndex + 1, spv::Decoration::Offset, 96U); + MemberDecorate(struct_type, PushData::BufOffsetIndex + 2, spv::Decoration::Offset, 112U); push_data_block = DefineVar(struct_type, spv::StorageClass::PushConstant); Name(push_data_block, "push_data"); interfaces.push_back(push_data_block); @@ -763,19 +737,19 @@ EmitContext::BufferSpv EmitContext::DefineBuffer(bool is_storage, bool is_writte Decorate(id, spv::Decoration::NonWritable); } switch (buffer_type) { - case Shader::BufferType::GdsBuffer: + case BufferType::GdsBuffer: Name(id, "gds_buffer"); break; - case Shader::BufferType::Flatbuf: + case BufferType::Flatbuf: Name(id, "srt_flatbuf"); break; - case Shader::BufferType::BdaPagetable: + case BufferType::BdaPagetable: Name(id, "bda_pagetable"); break; - case Shader::BufferType::FaultBuffer: + case BufferType::FaultBuffer: Name(id, "fault_buffer"); break; - case Shader::BufferType::SharedMemory: + case BufferType::SharedMemory: Name(id, "ssbo_shmem"); break; default: diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.h b/src/shader_recompiler/backend/spirv/spirv_emit_context.h index 28e9099d8..186925706 100644 --- a/src/shader_recompiler/backend/spirv/spirv_emit_context.h +++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.h @@ -361,7 +361,6 @@ public: u32 num_components; bool is_integer{}; bool is_loaded{}; - s32 buffer_handle{-1}; }; Id input_attr_array; Id output_attr_array; diff --git a/src/shader_recompiler/frontend/fetch_shader.h b/src/shader_recompiler/frontend/fetch_shader.h index 837caafa0..e77925232 100644 --- a/src/shader_recompiler/frontend/fetch_shader.h +++ b/src/shader_recompiler/frontend/fetch_shader.h @@ -3,7 +3,6 @@ #pragma once -#include #include #include "common/types.h" #include "shader_recompiler/info.h" @@ -29,11 +28,6 @@ struct VertexAttribute { return static_cast(instance_data); } - [[nodiscard]] bool UsesStepRates() const { - const auto step_rate = GetStepRate(); - return step_rate == OverStepRate0 || step_rate == OverStepRate1; - } - [[nodiscard]] constexpr AmdGpu::Buffer GetSharp(const Shader::Info& info) const noexcept { return info.ReadUdReg(sgpr_base, dword_offset); } @@ -52,12 +46,6 @@ struct FetchShaderData { s8 vertex_offset_sgpr = -1; ///< SGPR of vertex offset from VADDR s8 instance_offset_sgpr = -1; ///< SGPR of instance offset from VADDR - [[nodiscard]] bool UsesStepRates() const { - return std::ranges::find_if(attributes, [](const VertexAttribute& attribute) { - return attribute.UsesStepRates(); - }) != attributes.end(); - } - bool operator==(const FetchShaderData& other) const { return attributes == other.attributes && vertex_offset_sgpr == other.vertex_offset_sgpr && instance_offset_sgpr == other.instance_offset_sgpr; diff --git a/src/shader_recompiler/frontend/translate/translate.cpp b/src/shader_recompiler/frontend/translate/translate.cpp index 5853f3e72..310ac9156 100644 --- a/src/shader_recompiler/frontend/translate/translate.cpp +++ b/src/shader_recompiler/frontend/translate/translate.cpp @@ -90,17 +90,40 @@ void Translator::EmitPrologue(IR::Block* first_block) { case LogicalStage::Vertex: // v0: vertex ID, always present ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::VertexId)); - // v1: instance ID, step rate 0 - if (runtime_info.num_input_vgprs > 0) { - ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId0)); - } - // v2: instance ID, step rate 1 - if (runtime_info.num_input_vgprs > 1) { - ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId1)); - } - // v3: instance ID, plain - if (runtime_info.num_input_vgprs > 2) { - ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId)); + if (info.stage == Stage::Local) { + // v1: rel patch ID + if (runtime_info.num_input_vgprs > 0) { + ir.SetVectorReg(dst_vreg++, ir.Imm32(0)); + } + // v2: instance ID + if (runtime_info.num_input_vgprs > 1) { + ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId)); + } + } else { + // v1: instance ID, step rate 0 + if (runtime_info.num_input_vgprs > 0) { + if (runtime_info.vs_info.step_rate_0 != 0) { + ir.SetVectorReg(dst_vreg++, + ir.IDiv(ir.GetAttributeU32(IR::Attribute::InstanceId), + ir.Imm32(runtime_info.vs_info.step_rate_0))); + } else { + ir.SetVectorReg(dst_vreg++, ir.Imm32(0)); + } + } + // v2: instance ID, step rate 1 + if (runtime_info.num_input_vgprs > 1) { + if (runtime_info.vs_info.step_rate_1 != 0) { + ir.SetVectorReg(dst_vreg++, + ir.IDiv(ir.GetAttributeU32(IR::Attribute::InstanceId), + ir.Imm32(runtime_info.vs_info.step_rate_1))); + } else { + ir.SetVectorReg(dst_vreg++, ir.Imm32(0)); + } + } + // v3: instance ID, plain + if (runtime_info.num_input_vgprs > 2) { + ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId)); + } } break; case LogicalStage::Fragment: @@ -183,10 +206,8 @@ void Translator::EmitPrologue(IR::Block* first_block) { switch (runtime_info.gs_info.out_primitive[0]) { case AmdGpu::GsOutputPrimitiveType::TriangleStrip: ir.SetVectorReg(IR::VectorReg::V3, ir.Imm32(2u)); // vertex 2 - [[fallthrough]]; case AmdGpu::GsOutputPrimitiveType::LineStrip: ir.SetVectorReg(IR::VectorReg::V1, ir.Imm32(1u)); // vertex 1 - [[fallthrough]]; default: ir.SetVectorReg(IR::VectorReg::V0, ir.Imm32(0u)); // vertex 0 break; @@ -481,11 +502,11 @@ void Translator::SetDst64(const InstOperand& operand, const IR::U64F64& value_ra } void Translator::EmitFetch(const GcnInst& inst) { - // Read the pointer to the fetch shader assembly. const auto code_sgpr_base = inst.src[0].code; + + // The fetch shader must be inlined to access as regular buffers, so that + // bounds checks can be emitted to emulate robust buffer access. if (!profile.supports_robust_buffer_access) { - // The fetch shader must be inlined to access as regular buffers, so that - // bounds checks can be emitted to emulate robust buffer access. const auto* code = GetFetchShaderCode(info, code_sgpr_base); GcnCodeSlice slice(code, code + std::numeric_limits::max()); GcnDecodeContext decoder; @@ -535,16 +556,6 @@ void Translator::EmitFetch(const GcnInst& inst) { for (u32 i = 0; i < 4; i++) { ir.SetVectorReg(dst_reg++, IR::F32{ir.CompositeExtract(swizzled, i)}); } - - // In case of programmable step rates we need to fallback to instance data pulling in - // shader, so VBs should be bound as regular data buffers - if (attrib.UsesStepRates()) { - info.buffers.push_back({ - .sharp_idx = info.srt_info.ReserveSharp(attrib.sgpr_base, attrib.dword_offset, 4), - .used_types = IR::Type::F32, - .instance_attrib = attrib.semantic, - }); - } } } diff --git a/src/shader_recompiler/info.h b/src/shader_recompiler/info.h index 9703643e8..6e12c6816 100644 --- a/src/shader_recompiler/info.h +++ b/src/shader_recompiler/info.h @@ -113,17 +113,13 @@ struct FMaskResource { using FMaskResourceList = boost::container::small_vector; struct PushData { - static constexpr u32 Step0Index = 0; - static constexpr u32 Step1Index = 1; - static constexpr u32 XOffsetIndex = 2; - static constexpr u32 YOffsetIndex = 3; - static constexpr u32 XScaleIndex = 4; - static constexpr u32 YScaleIndex = 5; - static constexpr u32 UdRegsIndex = 6; + static constexpr u32 XOffsetIndex = 0; + static constexpr u32 YOffsetIndex = 1; + static constexpr u32 XScaleIndex = 2; + static constexpr u32 YScaleIndex = 3; + static constexpr u32 UdRegsIndex = 4; static constexpr u32 BufOffsetIndex = UdRegsIndex + NumUserDataRegs / 4; - u32 step0; - u32 step1; float xoffset; float yoffset; float xscale; diff --git a/src/shader_recompiler/ir/attribute.cpp b/src/shader_recompiler/ir/attribute.cpp index 6a267e21b..b2f11d141 100644 --- a/src/shader_recompiler/ir/attribute.cpp +++ b/src/shader_recompiler/ir/attribute.cpp @@ -100,22 +100,36 @@ std::string NameOf(Attribute attribute) { return "Param30"; case Attribute::Param31: return "Param31"; + case Attribute::ClipDistance: + return "ClipDistanace"; + case Attribute::CullDistance: + return "CullDistance"; + case Attribute::RenderTargetId: + return "RenderTargetId"; + case Attribute::ViewportId: + return "ViewportId"; case Attribute::VertexId: return "VertexId"; - case Attribute::InstanceId: - return "InstanceId"; case Attribute::PrimitiveId: return "PrimitiveId"; - case Attribute::FragCoord: - return "FragCoord"; + case Attribute::InstanceId: + return "InstanceId"; case Attribute::IsFrontFace: return "IsFrontFace"; + case Attribute::SampleIndex: + return "SampleIndex"; + case Attribute::GlobalInvocationId: + return "GlobalInvocationId"; case Attribute::WorkgroupId: return "WorkgroupId"; + case Attribute::WorkgroupIndex: + return "WorkgroupIndex"; case Attribute::LocalInvocationId: return "LocalInvocationId"; case Attribute::LocalInvocationIndex: return "LocalInvocationIndex"; + case Attribute::FragCoord: + return "FragCoord"; case Attribute::InvocationId: return "InvocationId"; case Attribute::PatchVertices: diff --git a/src/shader_recompiler/ir/attribute.h b/src/shader_recompiler/ir/attribute.h index 68472f052..b6b1c8b59 100644 --- a/src/shader_recompiler/ir/attribute.h +++ b/src/shader_recompiler/ir/attribute.h @@ -73,8 +73,6 @@ enum class Attribute : u64 { LocalInvocationId = 76, LocalInvocationIndex = 77, FragCoord = 78, - InstanceId0 = 79, // step rate 0 - InstanceId1 = 80, // step rate 1 InvocationId = 81, // TCS id in output patch and instanced geometry shader id PatchVertices = 82, TessellationEvaluationPointU = 83, diff --git a/src/shader_recompiler/ir/ir_emitter.cpp b/src/shader_recompiler/ir/ir_emitter.cpp index 4997145d7..6ca86b2c0 100644 --- a/src/shader_recompiler/ir/ir_emitter.cpp +++ b/src/shader_recompiler/ir/ir_emitter.cpp @@ -255,8 +255,8 @@ void IREmitter::SetM0(const U32& value) { Inst(Opcode::SetM0, value); } -F32 IREmitter::GetAttribute(IR::Attribute attribute, u32 comp, IR::Value index) { - return Inst(Opcode::GetAttribute, attribute, Imm32(comp), index); +F32 IREmitter::GetAttribute(IR::Attribute attribute, u32 comp, u32 index) { + return Inst(Opcode::GetAttribute, attribute, Imm32(comp), Imm32(index)); } U32 IREmitter::GetAttributeU32(IR::Attribute attribute, u32 comp) { diff --git a/src/shader_recompiler/ir/ir_emitter.h b/src/shader_recompiler/ir/ir_emitter.h index 6055df565..a105b042d 100644 --- a/src/shader_recompiler/ir/ir_emitter.h +++ b/src/shader_recompiler/ir/ir_emitter.h @@ -81,8 +81,7 @@ public: [[nodiscard]] U1 Condition(IR::Condition cond); - [[nodiscard]] F32 GetAttribute(Attribute attribute, u32 comp = 0, - IR::Value index = IR::Value(u32(0u))); + [[nodiscard]] F32 GetAttribute(Attribute attribute, u32 comp = 0, u32 index = 0); [[nodiscard]] U32 GetAttributeU32(Attribute attribute, u32 comp = 0); void SetAttribute(Attribute attribute, const F32& value, u32 comp = 0); diff --git a/src/shader_recompiler/ir/passes/flatten_extended_userdata_pass.cpp b/src/shader_recompiler/ir/passes/flatten_extended_userdata_pass.cpp index 7253e18c1..e0c99655d 100644 --- a/src/shader_recompiler/ir/passes/flatten_extended_userdata_pass.cpp +++ b/src/shader_recompiler/ir/passes/flatten_extended_userdata_pass.cpp @@ -191,7 +191,7 @@ static void VisitPointer(u32 off_dw, IR::Inst* subtree, PassInfo& pass_info, static void GenerateSrtProgram(Info& info, PassInfo& pass_info) { Xbyak::CodeGenerator& c = g_srt_codegen; - if (info.srt_info.srt_reservations.empty() && pass_info.srt_roots.empty()) { + if (pass_info.srt_roots.empty()) { return; } @@ -205,29 +205,7 @@ static void GenerateSrtProgram(Info& info, PassInfo& pass_info) { } info.srt_info.walker_func = c.getCurr(); - pass_info.dst_off_dw = NumUserDataRegs; - - // Special case for V# step rate buffers in fetch shader - for (const auto [sgpr_base, dword_offset, num_dwords] : info.srt_info.srt_reservations) { - // get pointer to V# - if (sgpr_base != IR::NumScalarRegs) { - PushPtr(c, sgpr_base); - } - u32 src_off = dword_offset << 2; - - for (auto j = 0; j < num_dwords; j++) { - c.mov(r11d, ptr[rdi + src_off]); - c.mov(ptr[rsi + (pass_info.dst_off_dw << 2)], r11d); - - src_off += 4; - ++pass_info.dst_off_dw; - } - if (sgpr_base != IR::NumScalarRegs) { - PopPtr(c); - } - } - ASSERT(pass_info.dst_off_dw == info.srt_info.flattened_bufsize_dw); for (const auto& [sgpr_base, root] : pass_info.srt_roots) { diff --git a/src/shader_recompiler/ir/passes/ring_access_elimination.cpp b/src/shader_recompiler/ir/passes/ring_access_elimination.cpp index b292b41b9..e1e5d762c 100644 --- a/src/shader_recompiler/ir/passes/ring_access_elimination.cpp +++ b/src/shader_recompiler/ir/passes/ring_access_elimination.cpp @@ -33,12 +33,9 @@ void RingAccessElimination(const IR::Program& program, const RuntimeInfo& runtim bool is_composite = opcode == IR::Opcode::WriteSharedU64; u32 num_components = opcode == IR::Opcode::WriteSharedU32 ? 1 : 2; - u32 offset = 0; - const auto* addr = inst.Arg(0).InstRecursive(); - if (addr->GetOpcode() == IR::Opcode::IAdd32) { - ASSERT(addr->Arg(1).IsImmediate()); - offset = addr->Arg(1).U32(); - } + ASSERT(inst.Arg(0).IsImmediate()); + + u32 offset = inst.Arg(0).U32(); IR::Value data = is_composite ? ir.UnpackUint2x32(IR::U64{inst.Arg(1).Resolve()}) : inst.Arg(1).Resolve(); for (s32 i = 0; i < num_components; i++) { @@ -116,7 +113,7 @@ void RingAccessElimination(const IR::Program& program, const RuntimeInfo& runtim } const auto shl_inst = inst.Arg(1).TryInstRecursive(); - const auto vertex_id = ir.Imm32(shl_inst->Arg(0).Resolve().U32() >> 2); + const auto vertex_id = shl_inst->Arg(0).Resolve().U32() >> 2; const auto offset = inst.Arg(1).TryInstRecursive()->Arg(1); const auto bucket = offset.Resolve().U32() / 256u; const auto attrib = bucket < 4 ? IR::Attribute::Position0 diff --git a/src/shader_recompiler/ir/passes/srt.h b/src/shader_recompiler/ir/passes/srt.h index 0ddc15ea6..4dce38674 100644 --- a/src/shader_recompiler/ir/passes/srt.h +++ b/src/shader_recompiler/ir/passes/srt.h @@ -20,18 +20,7 @@ struct PersistentSrtInfo { }; PFN_SrtWalker walker_func{}; - boost::container::small_vector srt_reservations; u32 flattened_bufsize_dw = 16; // NumUserDataRegs - - // Special case for fetch shaders because we don't generate IR to read from step rate buffers, - // so we won't see usage with GetUserData/ReadConst. - // Reserve space in the flattened buffer for a sharp ahead of time - u32 ReserveSharp(u32 sgpr_base, u32 dword_offset, u32 num_dwords) { - u32 rv = flattened_bufsize_dw; - srt_reservations.emplace_back(sgpr_base, dword_offset, num_dwords); - flattened_bufsize_dw += num_dwords; - return rv; - } }; -} // namespace Shader \ No newline at end of file +} // namespace Shader diff --git a/src/shader_recompiler/runtime_info.h b/src/shader_recompiler/runtime_info.h index 5a0408e2c..6cede44a8 100644 --- a/src/shader_recompiler/runtime_info.h +++ b/src/shader_recompiler/runtime_info.h @@ -42,7 +42,6 @@ constexpr u32 MaxStageTypes = static_cast(LogicalStage::NumLogicalStages); struct LocalRuntimeInfo { u32 ls_stride; - bool links_with_tcs; auto operator<=>(const LocalRuntimeInfo&) const noexcept = default; }; @@ -85,6 +84,8 @@ struct VertexRuntimeInfo { std::array outputs; bool emulate_depth_negative_one_to_one{}; bool clip_disable{}; + u32 step_rate_0; + u32 step_rate_1; // Domain AmdGpu::TessellationType tess_type; AmdGpu::TessellationTopology tess_topology; @@ -96,7 +97,8 @@ struct VertexRuntimeInfo { clip_disable == other.clip_disable && tess_type == other.tess_type && tess_topology == other.tess_topology && tess_partitioning == other.tess_partitioning && - hs_output_cp_stride == other.hs_output_cp_stride; + hs_output_cp_stride == other.hs_output_cp_stride && + step_rate_0 == other.step_rate_0 && step_rate_1 == other.step_rate_1; } void InitFromTessConstants(Shader::TessellationDataConstantBuffer& tess_constants) { diff --git a/src/shader_recompiler/specialization.h b/src/shader_recompiler/specialization.h index e40309aaf..d3e671c58 100644 --- a/src/shader_recompiler/specialization.h +++ b/src/shader_recompiler/specialization.h @@ -13,7 +13,7 @@ namespace Shader { struct VsAttribSpecialization { - s32 num_components{}; + u32 divisor{}; AmdGpu::NumberClass num_class{}; AmdGpu::CompMapping dst_select{}; @@ -74,13 +74,13 @@ struct SamplerSpecialization { * after the first compilation of a module. */ struct StageSpecialization { - static constexpr size_t MaxStageResources = 64; + static constexpr size_t MaxStageResources = 128; const Shader::Info* info; RuntimeInfo runtime_info; + std::bitset bitset{}; std::optional fetch_shader_data{}; boost::container::small_vector vs_attribs; - std::bitset bitset{}; boost::container::small_vector buffers; boost::container::small_vector images; boost::container::small_vector fmasks; @@ -94,10 +94,16 @@ struct StageSpecialization { if (info_.stage == Stage::Vertex && fetch_shader_data) { // Specialize shader on VS input number types to follow spec. ForEachSharp(vs_attribs, fetch_shader_data->attributes, - [&profile_](auto& spec, const auto& desc, AmdGpu::Buffer sharp) { - spec.num_components = desc.UsesStepRates() - ? AmdGpu::NumComponents(sharp.GetDataFmt()) - : 0; + [&profile_, this](auto& spec, const auto& desc, AmdGpu::Buffer sharp) { + using InstanceIdType = Shader::Gcn::VertexAttribute::InstanceIdType; + if (const auto step_rate = desc.GetStepRate(); + step_rate != InstanceIdType::None) { + spec.divisor = step_rate == InstanceIdType::OverStepRate0 + ? runtime_info.vs_info.step_rate_0 + : (step_rate == InstanceIdType::OverStepRate1 + ? runtime_info.vs_info.step_rate_1 + : 1); + } spec.num_class = profile_.support_legacy_vertex_attributes ? AmdGpu::NumberClass{} : AmdGpu::GetNumberClass(sharp.GetNumberFmt()); diff --git a/src/video_core/buffer_cache/buffer_cache.cpp b/src/video_core/buffer_cache/buffer_cache.cpp index 28444ac60..42e3c61a5 100644 --- a/src/video_core/buffer_cache/buffer_cache.cpp +++ b/src/video_core/buffer_cache/buffer_cache.cpp @@ -198,10 +198,13 @@ void BufferCache::DownloadBufferMemory(Buffer& buffer, VAddr device_addr, u64 si } void BufferCache::BindVertexBuffers(const Vulkan::GraphicsPipeline& pipeline) { + const auto& regs = liverpool->regs; Vulkan::VertexInputs attributes; Vulkan::VertexInputs bindings; + Vulkan::VertexInputs divisors; Vulkan::VertexInputs guest_buffers; - pipeline.GetVertexInputs(attributes, bindings, guest_buffers); + pipeline.GetVertexInputs(attributes, bindings, divisors, guest_buffers, + regs.vgt_instance_step_rate_0, regs.vgt_instance_step_rate_1); if (instance.IsVertexInputDynamicState()) { // Update current vertex inputs. diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 7c020a012..3596bb041 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -72,12 +72,21 @@ GraphicsPipeline::GraphicsPipeline( VertexInputs vertex_attributes; VertexInputs vertex_bindings; + VertexInputs divisors; VertexInputs guest_buffers; if (!instance.IsVertexInputDynamicState()) { - GetVertexInputs(vertex_attributes, vertex_bindings, guest_buffers); + const auto& vs_info = runtime_infos[u32(Shader::LogicalStage::Vertex)].vs_info; + GetVertexInputs(vertex_attributes, vertex_bindings, divisors, guest_buffers, + vs_info.step_rate_0, vs_info.step_rate_1); } + const vk::PipelineVertexInputDivisorStateCreateInfo divisor_state = { + .vertexBindingDivisorCount = static_cast(divisors.size()), + .pVertexBindingDivisors = divisors.data(), + }; + const vk::PipelineVertexInputStateCreateInfo vertex_input_info = { + .pNext = divisors.empty() ? nullptr : &divisor_state, .vertexBindingDescriptionCount = static_cast(vertex_bindings.size()), .pVertexBindingDescriptions = vertex_bindings.data(), .vertexAttributeDescriptionCount = static_cast(vertex_attributes.size()), @@ -304,19 +313,17 @@ GraphicsPipeline::GraphicsPipeline( GraphicsPipeline::~GraphicsPipeline() = default; template -void GraphicsPipeline::GetVertexInputs(VertexInputs& attributes, - VertexInputs& bindings, - VertexInputs& guest_buffers) const { +void GraphicsPipeline::GetVertexInputs( + VertexInputs& attributes, VertexInputs& bindings, + VertexInputs& divisors, + VertexInputs& guest_buffers, u32 step_rate_0, u32 step_rate_1) const { + using InstanceIdType = Shader::Gcn::VertexAttribute::InstanceIdType; if (!fetch_shader || fetch_shader->attributes.empty()) { return; } const auto& vs_info = GetStage(Shader::LogicalStage::Vertex); for (const auto& attrib : fetch_shader->attributes) { - if (attrib.UsesStepRates()) { - // Skip attribute binding as the data will be pulled by shader. - continue; - } - + const auto step_rate = attrib.GetStepRate(); const auto& buffer = attrib.GetSharp(vs_info); attributes.push_back(Attribute{ .location = attrib.semantic, @@ -327,12 +334,19 @@ void GraphicsPipeline::GetVertexInputs(VertexInputs& attributes, bindings.push_back(Binding{ .binding = attrib.semantic, .stride = buffer.GetStride(), - .inputRate = attrib.GetStepRate() == Shader::Gcn::VertexAttribute::InstanceIdType::None - ? vk::VertexInputRate::eVertex - : vk::VertexInputRate::eInstance, + .inputRate = step_rate == InstanceIdType::None ? vk::VertexInputRate::eVertex + : vk::VertexInputRate::eInstance, }); + const u32 divisor = step_rate == InstanceIdType::OverStepRate0 + ? step_rate_0 + : (step_rate == InstanceIdType::OverStepRate1 ? step_rate_1 : 1); if constexpr (std::is_same_v) { - bindings.back().divisor = 1; + bindings.back().divisor = divisor; + } else if (step_rate != InstanceIdType::None) { + divisors.push_back(vk::VertexInputBindingDivisorDescriptionEXT{ + .binding = attrib.semantic, + .divisor = divisor, + }); } guest_buffers.emplace_back(buffer); } @@ -342,11 +356,13 @@ void GraphicsPipeline::GetVertexInputs(VertexInputs& attributes, template void GraphicsPipeline::GetVertexInputs( VertexInputs& attributes, VertexInputs& bindings, - VertexInputs& guest_buffers) const; + VertexInputs& divisors, + VertexInputs& guest_buffers, u32 step_rate_0, u32 step_rate_1) const; template void GraphicsPipeline::GetVertexInputs( VertexInputs& attributes, VertexInputs& bindings, - VertexInputs& guest_buffers) const; + VertexInputs& divisors, + VertexInputs& guest_buffers, u32 step_rate_0, u32 step_rate_1) const; void GraphicsPipeline::BuildDescSetLayout() { boost::container::small_vector bindings; diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h index 59230ae46..ab67a52b4 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h @@ -81,7 +81,9 @@ public: /// Gets the attributes and bindings for vertex inputs. template void GetVertexInputs(VertexInputs& attributes, VertexInputs& bindings, - VertexInputs& guest_buffers) const; + VertexInputs& divisors, + VertexInputs& guest_buffers, u32 step_rate_0, + u32 step_rate_1) const; private: void BuildDescSetLayout(); diff --git a/src/video_core/renderer_vulkan/vk_instance.cpp b/src/video_core/renderer_vulkan/vk_instance.cpp index 237fa202d..85fc993a9 100644 --- a/src/video_core/renderer_vulkan/vk_instance.cpp +++ b/src/video_core/renderer_vulkan/vk_instance.cpp @@ -248,6 +248,7 @@ bool Instance::CreateDevice() { // Required ASSERT(add_extension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)); ASSERT(add_extension(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)); + ASSERT(add_extension(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME)); // Optional depth_range_unrestricted = add_extension(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME); @@ -436,6 +437,9 @@ bool Instance::CreateDevice() { vk::PhysicalDeviceLegacyVertexAttributesFeaturesEXT{ .legacyVertexAttributes = true, }, + vk::PhysicalDeviceVertexAttributeDivisorFeatures{ + .vertexAttributeInstanceRateDivisor = true, + }, vk::PhysicalDeviceShaderAtomicFloat2FeaturesEXT{ .shaderBufferFloat32AtomicMinMax = shader_atomic_float2_features.shaderBufferFloat32AtomicMinMax, diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 7dd468f9a..8d12b74f3 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -94,15 +94,10 @@ const Shader::RuntimeInfo& PipelineCache::BuildRuntimeInfo(Stage stage, LogicalS switch (stage) { case Stage::Local: { BuildCommon(regs.ls_program); - if (regs.stage_enable.IsStageEnabled(static_cast(Stage::Hull))) { - info.ls_info.links_with_tcs = true; - Shader::TessellationDataConstantBuffer tess_constants; - const auto* pgm = regs.ProgramForStage(static_cast(Stage::Hull)); - const auto params = Liverpool::GetParams(*pgm); - const auto& hull_info = program_cache.at(params.hash)->info; - hull_info.ReadTessConstantBuffer(tess_constants); - info.ls_info.ls_stride = tess_constants.ls_stride; - } + Shader::TessellationDataConstantBuffer tess_constants; + const auto* hull_info = infos[u32(Shader::LogicalStage::TessellationControl)]; + hull_info->ReadTessConstantBuffer(tess_constants); + info.ls_info.ls_stride = tess_constants.ls_stride; break; } case Stage::Hull: { @@ -122,6 +117,8 @@ const Shader::RuntimeInfo& PipelineCache::BuildRuntimeInfo(Stage stage, LogicalS case Stage::Vertex: { BuildCommon(regs.vs_program); GatherVertexOutputs(info.vs_info, regs.vs_output_control); + info.vs_info.step_rate_0 = regs.vgt_instance_step_rate_0; + info.vs_info.step_rate_1 = regs.vgt_instance_step_rate_1; info.vs_info.emulate_depth_negative_one_to_one = !instance.IsDepthClipControlSupported() && regs.clipper_control.clip_space == Liverpool::ClipSpace::MinusWToW; @@ -460,10 +457,6 @@ bool PipelineCache::RefreshGraphicsKey() { // Stride will still be handled outside the pipeline using dynamic state. u32 vertex_binding = 0; for (const auto& attrib : fetch_shader->attributes) { - if (attrib.UsesStepRates()) { - // Skip attribute binding as the data will be pulled by shader. - continue; - } const auto& buffer = attrib.GetSharp(*vs_info); ASSERT(vertex_binding < MaxVertexBufferCount); key.vertex_buffer_formats[vertex_binding++] = diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 5d0a14ce3..2a645f338 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -20,12 +20,9 @@ namespace Vulkan { static Shader::PushData MakeUserData(const AmdGpu::Liverpool::Regs& regs) { - Shader::PushData push_data{}; - push_data.step0 = regs.vgt_instance_step_rate_0; - push_data.step1 = regs.vgt_instance_step_rate_1; - // TODO(roamic): Add support for multiple viewports and geometry shaders when ViewportIndex // is encountered and implemented in the recompiler. + Shader::PushData push_data{}; push_data.xoffset = regs.viewport_control.xoffset_enable ? regs.viewports[0].xoffset : 0.f; push_data.xscale = regs.viewport_control.xscale_enable ? regs.viewports[0].xscale : 1.f; push_data.yoffset = regs.viewport_control.yoffset_enable ? regs.viewports[0].yoffset : 0.f;