Merge branch 'shadps4-emu:main' into volume-slider

This commit is contained in:
UltraDaCat 2025-07-14 15:08:08 +02:00 committed by GitHub
commit d4520a9392
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 208 additions and 274 deletions

View File

@ -52,7 +52,7 @@ Id VsOutputAttrPointer(EmitContext& ctx, VsOutput output) {
Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) { Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) {
if (IR::IsParam(attr)) { if (IR::IsParam(attr)) {
const u32 attr_index{u32(attr) - u32(IR::Attribute::Param0)}; const u32 attr_index{u32(attr) - u32(IR::Attribute::Param0)};
if (ctx.stage == Stage::Local && ctx.runtime_info.ls_info.links_with_tcs) { if (ctx.stage == Stage::Local) {
const auto component_ptr = ctx.TypePointer(spv::StorageClass::Output, ctx.F32[1]); const auto component_ptr = ctx.TypePointer(spv::StorageClass::Output, ctx.F32[1]);
return ctx.OpAccessChain(component_ptr, ctx.output_attr_array, ctx.ConstU32(attr_index), return ctx.OpAccessChain(component_ptr, ctx.output_attr_array, ctx.ConstU32(attr_index),
ctx.ConstU32(element)); ctx.ConstU32(element));
@ -94,13 +94,9 @@ Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) {
std::pair<Id, bool> OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr) { std::pair<Id, bool> OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr) {
if (IR::IsParam(attr)) { if (IR::IsParam(attr)) {
if (ctx.stage == Stage::Local && ctx.runtime_info.ls_info.links_with_tcs) { const u32 index{u32(attr) - u32(IR::Attribute::Param0)};
return {ctx.F32[1], false}; const auto& info{ctx.output_params.at(index)};
} else { return {info.component_type, info.is_integer};
const u32 index{u32(attr) - u32(IR::Attribute::Param0)};
const auto& info{ctx.output_params.at(index)};
return {info.component_type, info.is_integer};
}
} }
if (IR::IsMrt(attr)) { if (IR::IsMrt(attr)) {
const u32 index{u32(attr) - u32(IR::Attribute::RenderTarget0)}; const u32 index{u32(attr) - u32(IR::Attribute::RenderTarget0)};
@ -120,6 +116,9 @@ std::pair<Id, bool> OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr
} }
} // Anonymous namespace } // Anonymous namespace
using PointerType = EmitContext::PointerType;
using PointerSize = EmitContext::PointerSize;
Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg) { Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg) {
const u32 index = ctx.binding.user_data + ctx.info.ud_mask.Index(reg); const u32 index = ctx.binding.user_data + ctx.info.ud_mask.Index(reg);
const u32 half = PushData::UdRegsIndex + (index >> 2); const u32 half = PushData::UdRegsIndex + (index >> 2);
@ -131,41 +130,6 @@ Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg) {
return ud_reg; return ud_reg;
} }
void EmitGetThreadBitScalarReg(EmitContext& ctx) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitSetThreadBitScalarReg(EmitContext& ctx) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitGetScalarRegister(EmitContext&) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitSetScalarRegister(EmitContext&) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitGetVectorRegister(EmitContext& ctx) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitSetVectorRegister(EmitContext& ctx) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitSetGotoVariable(EmitContext&) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitGetGotoVariable(EmitContext&) {
UNREACHABLE_MSG("Unreachable instruction");
}
using PointerType = EmitContext::PointerType;
using PointerSize = EmitContext::PointerSize;
Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) { Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) {
const u32 flatbuf_off_dw = inst->Flags<u32>(); const u32 flatbuf_off_dw = inst->Flags<u32>();
if (!Config::directMemoryAccess()) { if (!Config::directMemoryAccess()) {
@ -180,39 +144,27 @@ Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) {
} }
} }
template <PointerType type> Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) {
Id ReadConstBuffer(EmitContext& ctx, u32 handle, Id index) {
const auto& buffer = ctx.buffers[handle]; const auto& buffer = ctx.buffers[handle];
if (const Id offset = buffer.Offset(PointerSize::B32); Sirit::ValidId(offset)) { if (const Id offset = buffer.Offset(PointerSize::B32); Sirit::ValidId(offset)) {
index = ctx.OpIAdd(ctx.U32[1], index, offset); index = ctx.OpIAdd(ctx.U32[1], index, offset);
} }
const auto [id, pointer_type] = buffer.Alias(type); const auto [id, pointer_type] = buffer.Alias(PointerType::U32);
const auto value_type = type == PointerType::U32 ? ctx.U32[1] : ctx.F32[1];
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index)}; const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index)};
const Id result{ctx.OpLoad(value_type, ptr)}; const Id result{ctx.OpLoad(ctx.U32[1], ptr)};
if (const Id size = buffer.Size(PointerSize::B32); Sirit::ValidId(size)) { if (const Id size = buffer.Size(PointerSize::B32); Sirit::ValidId(size)) {
const Id in_bounds = ctx.OpULessThan(ctx.U1[1], index, size); const Id in_bounds = ctx.OpULessThan(ctx.U1[1], index, size);
return ctx.OpSelect(value_type, in_bounds, result, ctx.u32_zero_value); return ctx.OpSelect(ctx.U32[1], in_bounds, result, ctx.u32_zero_value);
} }
return result; return result;
} }
Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) { static Id EmitGetAttributeForGeometry(EmitContext& ctx, IR::Attribute attr, u32 comp, u32 index) {
return ReadConstBuffer<PointerType::U32>(ctx, handle, index);
}
Id EmitReadStepRate(EmitContext& ctx, int rate_idx) {
const auto index{rate_idx == 0 ? PushData::Step0Index : PushData::Step1Index};
return ctx.OpLoad(
ctx.U32[1], ctx.OpAccessChain(ctx.TypePointer(spv::StorageClass::PushConstant, ctx.U32[1]),
ctx.push_data_block, ctx.ConstU32(index)));
}
static Id EmitGetAttributeForGeometry(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index) {
if (IR::IsPosition(attr)) { if (IR::IsPosition(attr)) {
ASSERT(attr == IR::Attribute::Position0); ASSERT(attr == IR::Attribute::Position0);
const auto position_arr_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[4]); const auto position_arr_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[4]);
const auto pointer{ctx.OpAccessChain(position_arr_ptr, ctx.gl_in, index, ctx.ConstU32(0u))}; const auto pointer{
ctx.OpAccessChain(position_arr_ptr, ctx.gl_in, ctx.ConstU32(index), ctx.ConstU32(0u))};
const auto position_comp_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[1]); const auto position_comp_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[1]);
return ctx.OpLoad(ctx.F32[1], return ctx.OpLoad(ctx.F32[1],
ctx.OpAccessChain(position_comp_ptr, pointer, ctx.ConstU32(comp))); ctx.OpAccessChain(position_comp_ptr, pointer, ctx.ConstU32(comp)));
@ -222,7 +174,7 @@ static Id EmitGetAttributeForGeometry(EmitContext& ctx, IR::Attribute attr, u32
const u32 param_id{u32(attr) - u32(IR::Attribute::Param0)}; const u32 param_id{u32(attr) - u32(IR::Attribute::Param0)};
const auto param = ctx.input_params.at(param_id).id; const auto param = ctx.input_params.at(param_id).id;
const auto param_arr_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[4]); const auto param_arr_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[4]);
const auto pointer{ctx.OpAccessChain(param_arr_ptr, param, index)}; const auto pointer{ctx.OpAccessChain(param_arr_ptr, param, ctx.ConstU32(index))};
const auto position_comp_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[1]); const auto position_comp_ptr = ctx.TypePointer(spv::StorageClass::Input, ctx.F32[1]);
return ctx.OpLoad(ctx.F32[1], return ctx.OpLoad(ctx.F32[1],
ctx.OpAccessChain(position_comp_ptr, pointer, ctx.ConstU32(comp))); ctx.OpAccessChain(position_comp_ptr, pointer, ctx.ConstU32(comp)));
@ -230,7 +182,7 @@ static Id EmitGetAttributeForGeometry(EmitContext& ctx, IR::Attribute attr, u32
UNREACHABLE(); UNREACHABLE();
} }
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index) { Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, u32 index) {
if (ctx.info.l_stage == LogicalStage::Geometry) { if (ctx.info.l_stage == LogicalStage::Geometry) {
return EmitGetAttributeForGeometry(ctx, attr, comp, index); return EmitGetAttributeForGeometry(ctx, attr, comp, index);
} else if (ctx.info.l_stage == LogicalStage::TessellationControl || } else if (ctx.info.l_stage == LogicalStage::TessellationControl ||
@ -248,18 +200,6 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index) {
if (IR::IsParam(attr)) { if (IR::IsParam(attr)) {
const u32 param_index{u32(attr) - u32(IR::Attribute::Param0)}; const u32 param_index{u32(attr) - u32(IR::Attribute::Param0)};
const auto& param{ctx.input_params.at(param_index)}; const auto& param{ctx.input_params.at(param_index)};
if (param.buffer_handle >= 0) {
const auto step_rate = EmitReadStepRate(ctx, param.id.value);
const auto offset = ctx.OpIAdd(
ctx.U32[1],
ctx.OpIMul(
ctx.U32[1],
ctx.OpUDiv(ctx.U32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id), step_rate),
ctx.ConstU32(param.num_components)),
ctx.ConstU32(comp));
return ReadConstBuffer<PointerType::F32>(ctx, param.buffer_handle, offset);
}
Id result; Id result;
if (param.is_loaded) { if (param.is_loaded) {
// Attribute is either default or manually interpolated. The id points to an already // Attribute is either default or manually interpolated. The id points to an already
@ -305,10 +245,6 @@ Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) {
return ctx.OpLoad(ctx.U32[1], ctx.vertex_index); return ctx.OpLoad(ctx.U32[1], ctx.vertex_index);
case IR::Attribute::InstanceId: case IR::Attribute::InstanceId:
return ctx.OpLoad(ctx.U32[1], ctx.instance_id); return ctx.OpLoad(ctx.U32[1], ctx.instance_id);
case IR::Attribute::InstanceId0:
return EmitReadStepRate(ctx, 0);
case IR::Attribute::InstanceId1:
return EmitReadStepRate(ctx, 1);
case IR::Attribute::WorkgroupIndex: case IR::Attribute::WorkgroupIndex:
return ctx.workgroup_index_id; return ctx.workgroup_index_id;
case IR::Attribute::WorkgroupId: case IR::Attribute::WorkgroupId:
@ -640,4 +576,36 @@ void EmitStoreBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id a
UNREACHABLE_MSG("SPIR-V instruction"); UNREACHABLE_MSG("SPIR-V instruction");
} }
void EmitGetThreadBitScalarReg(EmitContext& ctx) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitSetThreadBitScalarReg(EmitContext& ctx) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitGetScalarRegister(EmitContext&) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitSetScalarRegister(EmitContext&) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitGetVectorRegister(EmitContext& ctx) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitSetVectorRegister(EmitContext& ctx) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitSetGotoVariable(EmitContext&) {
UNREACHABLE_MSG("Unreachable instruction");
}
void EmitGetGotoVariable(EmitContext&) {
UNREACHABLE_MSG("Unreachable instruction");
}
} // namespace Shader::Backend::SPIRV } // namespace Shader::Backend::SPIRV

View File

@ -108,7 +108,7 @@ Id EmitBufferAtomicXor32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addres
Id EmitBufferAtomicSwap32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value); Id EmitBufferAtomicSwap32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
Id EmitBufferAtomicCmpSwap32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value, Id EmitBufferAtomicCmpSwap32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value,
Id cmp_value); Id cmp_value);
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index); Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, u32 index);
Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp); Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp);
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 comp); void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 comp);
Id EmitGetTessGenericAttribute(EmitContext& ctx, Id vertex_index, Id attr_index, Id comp_index); Id EmitGetTessGenericAttribute(EmitContext& ctx, Id vertex_index, Id attr_index, Id comp_index);

View File

@ -377,35 +377,13 @@ void EmitContext::DefineInputs() {
ASSERT(attrib.semantic < IR::NumParams); ASSERT(attrib.semantic < IR::NumParams);
const auto sharp = attrib.GetSharp(info); const auto sharp = attrib.GetSharp(info);
const Id type{GetAttributeType(*this, sharp.GetNumberFmt())[4]}; const Id type{GetAttributeType(*this, sharp.GetNumberFmt())[4]};
if (attrib.UsesStepRates()) { Id id{DefineInput(type, attrib.semantic)};
const u32 rate_idx = if (attrib.GetStepRate() != Gcn::VertexAttribute::InstanceIdType::None) {
attrib.GetStepRate() == Gcn::VertexAttribute::InstanceIdType::OverStepRate0 ? 0 Name(id, fmt::format("vs_instance_attr{}", attrib.semantic));
: 1;
const u32 num_components = AmdGpu::NumComponents(sharp.GetDataFmt());
const auto buffer =
std::ranges::find_if(info.buffers, [&attrib](const auto& buffer) {
return buffer.instance_attrib == attrib.semantic;
});
// Note that we pass index rather than Id
input_params[attrib.semantic] = SpirvAttribute{
.id = {rate_idx},
.pointer_type = input_u32,
.component_type = U32[1],
.num_components = std::min<u16>(attrib.num_elements, num_components),
.is_integer = true,
.is_loaded = false,
.buffer_handle = int(buffer - info.buffers.begin()),
};
} else { } else {
Id id{DefineInput(type, attrib.semantic)}; Name(id, fmt::format("vs_in_attr{}", attrib.semantic));
if (attrib.GetStepRate() == Gcn::VertexAttribute::InstanceIdType::Plain) {
Name(id, fmt::format("vs_instance_attr{}", attrib.semantic));
} else {
Name(id, fmt::format("vs_in_attr{}", attrib.semantic));
}
input_params[attrib.semantic] =
GetAttributeInfo(sharp.GetNumberFmt(), id, 4, false);
} }
input_params[attrib.semantic] = GetAttributeInfo(sharp.GetNumberFmt(), id, 4, false);
} }
break; break;
} }
@ -573,7 +551,7 @@ void EmitContext::DefineOutputs() {
cull_distances = cull_distances =
DefineVariable(type, spv::BuiltIn::CullDistance, spv::StorageClass::Output); DefineVariable(type, spv::BuiltIn::CullDistance, spv::StorageClass::Output);
} }
if (stage == Shader::Stage::Local && runtime_info.ls_info.links_with_tcs) { if (stage == Stage::Local) {
const u32 num_attrs = Common::AlignUp(runtime_info.ls_info.ls_stride, 16) >> 4; const u32 num_attrs = Common::AlignUp(runtime_info.ls_info.ls_stride, 16) >> 4;
if (num_attrs > 0) { if (num_attrs > 0) {
const Id type{TypeArray(F32[4], ConstU32(num_attrs))}; const Id type{TypeArray(F32[4], ConstU32(num_attrs))};
@ -700,12 +678,10 @@ void EmitContext::DefineOutputs() {
void EmitContext::DefinePushDataBlock() { void EmitContext::DefinePushDataBlock() {
// Create push constants block for instance steps rates // Create push constants block for instance steps rates
const Id struct_type{Name(TypeStruct(U32[1], U32[1], F32[1], F32[1], F32[1], F32[1], U32[4], const Id struct_type{Name(TypeStruct(F32[1], F32[1], F32[1], F32[1], U32[4], U32[4], U32[4],
U32[4], U32[4], U32[4], U32[4], U32[4], U32[2]), U32[4], U32[4], U32[4], U32[2]),
"AuxData")}; "AuxData")};
Decorate(struct_type, spv::Decoration::Block); Decorate(struct_type, spv::Decoration::Block);
MemberName(struct_type, PushData::Step0Index, "sr0");
MemberName(struct_type, PushData::Step1Index, "sr1");
MemberName(struct_type, PushData::XOffsetIndex, "xoffset"); MemberName(struct_type, PushData::XOffsetIndex, "xoffset");
MemberName(struct_type, PushData::YOffsetIndex, "yoffset"); MemberName(struct_type, PushData::YOffsetIndex, "yoffset");
MemberName(struct_type, PushData::XScaleIndex, "xscale"); MemberName(struct_type, PushData::XScaleIndex, "xscale");
@ -717,19 +693,17 @@ void EmitContext::DefinePushDataBlock() {
MemberName(struct_type, PushData::BufOffsetIndex + 0, "buf_offsets0"); MemberName(struct_type, PushData::BufOffsetIndex + 0, "buf_offsets0");
MemberName(struct_type, PushData::BufOffsetIndex + 1, "buf_offsets1"); MemberName(struct_type, PushData::BufOffsetIndex + 1, "buf_offsets1");
MemberName(struct_type, PushData::BufOffsetIndex + 2, "buf_offsets2"); MemberName(struct_type, PushData::BufOffsetIndex + 2, "buf_offsets2");
MemberDecorate(struct_type, PushData::Step0Index, spv::Decoration::Offset, 0U); MemberDecorate(struct_type, PushData::XOffsetIndex, spv::Decoration::Offset, 0U);
MemberDecorate(struct_type, PushData::Step1Index, spv::Decoration::Offset, 4U); MemberDecorate(struct_type, PushData::YOffsetIndex, spv::Decoration::Offset, 4U);
MemberDecorate(struct_type, PushData::XOffsetIndex, spv::Decoration::Offset, 8U); MemberDecorate(struct_type, PushData::XScaleIndex, spv::Decoration::Offset, 8U);
MemberDecorate(struct_type, PushData::YOffsetIndex, spv::Decoration::Offset, 12U); MemberDecorate(struct_type, PushData::YScaleIndex, spv::Decoration::Offset, 12U);
MemberDecorate(struct_type, PushData::XScaleIndex, spv::Decoration::Offset, 16U); MemberDecorate(struct_type, PushData::UdRegsIndex + 0, spv::Decoration::Offset, 16U);
MemberDecorate(struct_type, PushData::YScaleIndex, spv::Decoration::Offset, 20U); MemberDecorate(struct_type, PushData::UdRegsIndex + 1, spv::Decoration::Offset, 32U);
MemberDecorate(struct_type, PushData::UdRegsIndex + 0, spv::Decoration::Offset, 24U); MemberDecorate(struct_type, PushData::UdRegsIndex + 2, spv::Decoration::Offset, 48U);
MemberDecorate(struct_type, PushData::UdRegsIndex + 1, spv::Decoration::Offset, 40U); MemberDecorate(struct_type, PushData::UdRegsIndex + 3, spv::Decoration::Offset, 64U);
MemberDecorate(struct_type, PushData::UdRegsIndex + 2, spv::Decoration::Offset, 56U); MemberDecorate(struct_type, PushData::BufOffsetIndex + 0, spv::Decoration::Offset, 80U);
MemberDecorate(struct_type, PushData::UdRegsIndex + 3, spv::Decoration::Offset, 72U); MemberDecorate(struct_type, PushData::BufOffsetIndex + 1, spv::Decoration::Offset, 96U);
MemberDecorate(struct_type, PushData::BufOffsetIndex + 0, spv::Decoration::Offset, 88U); MemberDecorate(struct_type, PushData::BufOffsetIndex + 2, spv::Decoration::Offset, 112U);
MemberDecorate(struct_type, PushData::BufOffsetIndex + 1, spv::Decoration::Offset, 104U);
MemberDecorate(struct_type, PushData::BufOffsetIndex + 2, spv::Decoration::Offset, 120U);
push_data_block = DefineVar(struct_type, spv::StorageClass::PushConstant); push_data_block = DefineVar(struct_type, spv::StorageClass::PushConstant);
Name(push_data_block, "push_data"); Name(push_data_block, "push_data");
interfaces.push_back(push_data_block); interfaces.push_back(push_data_block);
@ -763,19 +737,19 @@ EmitContext::BufferSpv EmitContext::DefineBuffer(bool is_storage, bool is_writte
Decorate(id, spv::Decoration::NonWritable); Decorate(id, spv::Decoration::NonWritable);
} }
switch (buffer_type) { switch (buffer_type) {
case Shader::BufferType::GdsBuffer: case BufferType::GdsBuffer:
Name(id, "gds_buffer"); Name(id, "gds_buffer");
break; break;
case Shader::BufferType::Flatbuf: case BufferType::Flatbuf:
Name(id, "srt_flatbuf"); Name(id, "srt_flatbuf");
break; break;
case Shader::BufferType::BdaPagetable: case BufferType::BdaPagetable:
Name(id, "bda_pagetable"); Name(id, "bda_pagetable");
break; break;
case Shader::BufferType::FaultBuffer: case BufferType::FaultBuffer:
Name(id, "fault_buffer"); Name(id, "fault_buffer");
break; break;
case Shader::BufferType::SharedMemory: case BufferType::SharedMemory:
Name(id, "ssbo_shmem"); Name(id, "ssbo_shmem");
break; break;
default: default:

View File

@ -361,7 +361,6 @@ public:
u32 num_components; u32 num_components;
bool is_integer{}; bool is_integer{};
bool is_loaded{}; bool is_loaded{};
s32 buffer_handle{-1};
}; };
Id input_attr_array; Id input_attr_array;
Id output_attr_array; Id output_attr_array;

View File

@ -3,7 +3,6 @@
#pragma once #pragma once
#include <ranges>
#include <vector> #include <vector>
#include "common/types.h" #include "common/types.h"
#include "shader_recompiler/info.h" #include "shader_recompiler/info.h"
@ -29,11 +28,6 @@ struct VertexAttribute {
return static_cast<InstanceIdType>(instance_data); return static_cast<InstanceIdType>(instance_data);
} }
[[nodiscard]] bool UsesStepRates() const {
const auto step_rate = GetStepRate();
return step_rate == OverStepRate0 || step_rate == OverStepRate1;
}
[[nodiscard]] constexpr AmdGpu::Buffer GetSharp(const Shader::Info& info) const noexcept { [[nodiscard]] constexpr AmdGpu::Buffer GetSharp(const Shader::Info& info) const noexcept {
return info.ReadUdReg<AmdGpu::Buffer>(sgpr_base, dword_offset); return info.ReadUdReg<AmdGpu::Buffer>(sgpr_base, dword_offset);
} }
@ -52,12 +46,6 @@ struct FetchShaderData {
s8 vertex_offset_sgpr = -1; ///< SGPR of vertex offset from VADDR s8 vertex_offset_sgpr = -1; ///< SGPR of vertex offset from VADDR
s8 instance_offset_sgpr = -1; ///< SGPR of instance offset from VADDR s8 instance_offset_sgpr = -1; ///< SGPR of instance offset from VADDR
[[nodiscard]] bool UsesStepRates() const {
return std::ranges::find_if(attributes, [](const VertexAttribute& attribute) {
return attribute.UsesStepRates();
}) != attributes.end();
}
bool operator==(const FetchShaderData& other) const { bool operator==(const FetchShaderData& other) const {
return attributes == other.attributes && vertex_offset_sgpr == other.vertex_offset_sgpr && return attributes == other.attributes && vertex_offset_sgpr == other.vertex_offset_sgpr &&
instance_offset_sgpr == other.instance_offset_sgpr; instance_offset_sgpr == other.instance_offset_sgpr;

View File

@ -90,17 +90,40 @@ void Translator::EmitPrologue(IR::Block* first_block) {
case LogicalStage::Vertex: case LogicalStage::Vertex:
// v0: vertex ID, always present // v0: vertex ID, always present
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::VertexId)); ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::VertexId));
// v1: instance ID, step rate 0 if (info.stage == Stage::Local) {
if (runtime_info.num_input_vgprs > 0) { // v1: rel patch ID
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId0)); if (runtime_info.num_input_vgprs > 0) {
} ir.SetVectorReg(dst_vreg++, ir.Imm32(0));
// v2: instance ID, step rate 1 }
if (runtime_info.num_input_vgprs > 1) { // v2: instance ID
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId1)); if (runtime_info.num_input_vgprs > 1) {
} ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId));
// v3: instance ID, plain }
if (runtime_info.num_input_vgprs > 2) { } else {
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId)); // v1: instance ID, step rate 0
if (runtime_info.num_input_vgprs > 0) {
if (runtime_info.vs_info.step_rate_0 != 0) {
ir.SetVectorReg(dst_vreg++,
ir.IDiv(ir.GetAttributeU32(IR::Attribute::InstanceId),
ir.Imm32(runtime_info.vs_info.step_rate_0)));
} else {
ir.SetVectorReg(dst_vreg++, ir.Imm32(0));
}
}
// v2: instance ID, step rate 1
if (runtime_info.num_input_vgprs > 1) {
if (runtime_info.vs_info.step_rate_1 != 0) {
ir.SetVectorReg(dst_vreg++,
ir.IDiv(ir.GetAttributeU32(IR::Attribute::InstanceId),
ir.Imm32(runtime_info.vs_info.step_rate_1)));
} else {
ir.SetVectorReg(dst_vreg++, ir.Imm32(0));
}
}
// v3: instance ID, plain
if (runtime_info.num_input_vgprs > 2) {
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::InstanceId));
}
} }
break; break;
case LogicalStage::Fragment: case LogicalStage::Fragment:
@ -183,10 +206,8 @@ void Translator::EmitPrologue(IR::Block* first_block) {
switch (runtime_info.gs_info.out_primitive[0]) { switch (runtime_info.gs_info.out_primitive[0]) {
case AmdGpu::GsOutputPrimitiveType::TriangleStrip: case AmdGpu::GsOutputPrimitiveType::TriangleStrip:
ir.SetVectorReg(IR::VectorReg::V3, ir.Imm32(2u)); // vertex 2 ir.SetVectorReg(IR::VectorReg::V3, ir.Imm32(2u)); // vertex 2
[[fallthrough]];
case AmdGpu::GsOutputPrimitiveType::LineStrip: case AmdGpu::GsOutputPrimitiveType::LineStrip:
ir.SetVectorReg(IR::VectorReg::V1, ir.Imm32(1u)); // vertex 1 ir.SetVectorReg(IR::VectorReg::V1, ir.Imm32(1u)); // vertex 1
[[fallthrough]];
default: default:
ir.SetVectorReg(IR::VectorReg::V0, ir.Imm32(0u)); // vertex 0 ir.SetVectorReg(IR::VectorReg::V0, ir.Imm32(0u)); // vertex 0
break; break;
@ -481,11 +502,11 @@ void Translator::SetDst64(const InstOperand& operand, const IR::U64F64& value_ra
} }
void Translator::EmitFetch(const GcnInst& inst) { void Translator::EmitFetch(const GcnInst& inst) {
// Read the pointer to the fetch shader assembly.
const auto code_sgpr_base = inst.src[0].code; const auto code_sgpr_base = inst.src[0].code;
// The fetch shader must be inlined to access as regular buffers, so that
// bounds checks can be emitted to emulate robust buffer access.
if (!profile.supports_robust_buffer_access) { if (!profile.supports_robust_buffer_access) {
// The fetch shader must be inlined to access as regular buffers, so that
// bounds checks can be emitted to emulate robust buffer access.
const auto* code = GetFetchShaderCode(info, code_sgpr_base); const auto* code = GetFetchShaderCode(info, code_sgpr_base);
GcnCodeSlice slice(code, code + std::numeric_limits<u32>::max()); GcnCodeSlice slice(code, code + std::numeric_limits<u32>::max());
GcnDecodeContext decoder; GcnDecodeContext decoder;
@ -535,16 +556,6 @@ void Translator::EmitFetch(const GcnInst& inst) {
for (u32 i = 0; i < 4; i++) { for (u32 i = 0; i < 4; i++) {
ir.SetVectorReg(dst_reg++, IR::F32{ir.CompositeExtract(swizzled, i)}); ir.SetVectorReg(dst_reg++, IR::F32{ir.CompositeExtract(swizzled, i)});
} }
// In case of programmable step rates we need to fallback to instance data pulling in
// shader, so VBs should be bound as regular data buffers
if (attrib.UsesStepRates()) {
info.buffers.push_back({
.sharp_idx = info.srt_info.ReserveSharp(attrib.sgpr_base, attrib.dword_offset, 4),
.used_types = IR::Type::F32,
.instance_attrib = attrib.semantic,
});
}
} }
} }

View File

@ -113,17 +113,13 @@ struct FMaskResource {
using FMaskResourceList = boost::container::small_vector<FMaskResource, NumFMasks>; using FMaskResourceList = boost::container::small_vector<FMaskResource, NumFMasks>;
struct PushData { struct PushData {
static constexpr u32 Step0Index = 0; static constexpr u32 XOffsetIndex = 0;
static constexpr u32 Step1Index = 1; static constexpr u32 YOffsetIndex = 1;
static constexpr u32 XOffsetIndex = 2; static constexpr u32 XScaleIndex = 2;
static constexpr u32 YOffsetIndex = 3; static constexpr u32 YScaleIndex = 3;
static constexpr u32 XScaleIndex = 4; static constexpr u32 UdRegsIndex = 4;
static constexpr u32 YScaleIndex = 5;
static constexpr u32 UdRegsIndex = 6;
static constexpr u32 BufOffsetIndex = UdRegsIndex + NumUserDataRegs / 4; static constexpr u32 BufOffsetIndex = UdRegsIndex + NumUserDataRegs / 4;
u32 step0;
u32 step1;
float xoffset; float xoffset;
float yoffset; float yoffset;
float xscale; float xscale;

View File

@ -100,22 +100,36 @@ std::string NameOf(Attribute attribute) {
return "Param30"; return "Param30";
case Attribute::Param31: case Attribute::Param31:
return "Param31"; return "Param31";
case Attribute::ClipDistance:
return "ClipDistanace";
case Attribute::CullDistance:
return "CullDistance";
case Attribute::RenderTargetId:
return "RenderTargetId";
case Attribute::ViewportId:
return "ViewportId";
case Attribute::VertexId: case Attribute::VertexId:
return "VertexId"; return "VertexId";
case Attribute::InstanceId:
return "InstanceId";
case Attribute::PrimitiveId: case Attribute::PrimitiveId:
return "PrimitiveId"; return "PrimitiveId";
case Attribute::FragCoord: case Attribute::InstanceId:
return "FragCoord"; return "InstanceId";
case Attribute::IsFrontFace: case Attribute::IsFrontFace:
return "IsFrontFace"; return "IsFrontFace";
case Attribute::SampleIndex:
return "SampleIndex";
case Attribute::GlobalInvocationId:
return "GlobalInvocationId";
case Attribute::WorkgroupId: case Attribute::WorkgroupId:
return "WorkgroupId"; return "WorkgroupId";
case Attribute::WorkgroupIndex:
return "WorkgroupIndex";
case Attribute::LocalInvocationId: case Attribute::LocalInvocationId:
return "LocalInvocationId"; return "LocalInvocationId";
case Attribute::LocalInvocationIndex: case Attribute::LocalInvocationIndex:
return "LocalInvocationIndex"; return "LocalInvocationIndex";
case Attribute::FragCoord:
return "FragCoord";
case Attribute::InvocationId: case Attribute::InvocationId:
return "InvocationId"; return "InvocationId";
case Attribute::PatchVertices: case Attribute::PatchVertices:

View File

@ -73,8 +73,6 @@ enum class Attribute : u64 {
LocalInvocationId = 76, LocalInvocationId = 76,
LocalInvocationIndex = 77, LocalInvocationIndex = 77,
FragCoord = 78, FragCoord = 78,
InstanceId0 = 79, // step rate 0
InstanceId1 = 80, // step rate 1
InvocationId = 81, // TCS id in output patch and instanced geometry shader id InvocationId = 81, // TCS id in output patch and instanced geometry shader id
PatchVertices = 82, PatchVertices = 82,
TessellationEvaluationPointU = 83, TessellationEvaluationPointU = 83,

View File

@ -255,8 +255,8 @@ void IREmitter::SetM0(const U32& value) {
Inst(Opcode::SetM0, value); Inst(Opcode::SetM0, value);
} }
F32 IREmitter::GetAttribute(IR::Attribute attribute, u32 comp, IR::Value index) { F32 IREmitter::GetAttribute(IR::Attribute attribute, u32 comp, u32 index) {
return Inst<F32>(Opcode::GetAttribute, attribute, Imm32(comp), index); return Inst<F32>(Opcode::GetAttribute, attribute, Imm32(comp), Imm32(index));
} }
U32 IREmitter::GetAttributeU32(IR::Attribute attribute, u32 comp) { U32 IREmitter::GetAttributeU32(IR::Attribute attribute, u32 comp) {

View File

@ -81,8 +81,7 @@ public:
[[nodiscard]] U1 Condition(IR::Condition cond); [[nodiscard]] U1 Condition(IR::Condition cond);
[[nodiscard]] F32 GetAttribute(Attribute attribute, u32 comp = 0, [[nodiscard]] F32 GetAttribute(Attribute attribute, u32 comp = 0, u32 index = 0);
IR::Value index = IR::Value(u32(0u)));
[[nodiscard]] U32 GetAttributeU32(Attribute attribute, u32 comp = 0); [[nodiscard]] U32 GetAttributeU32(Attribute attribute, u32 comp = 0);
void SetAttribute(Attribute attribute, const F32& value, u32 comp = 0); void SetAttribute(Attribute attribute, const F32& value, u32 comp = 0);

View File

@ -191,7 +191,7 @@ static void VisitPointer(u32 off_dw, IR::Inst* subtree, PassInfo& pass_info,
static void GenerateSrtProgram(Info& info, PassInfo& pass_info) { static void GenerateSrtProgram(Info& info, PassInfo& pass_info) {
Xbyak::CodeGenerator& c = g_srt_codegen; Xbyak::CodeGenerator& c = g_srt_codegen;
if (info.srt_info.srt_reservations.empty() && pass_info.srt_roots.empty()) { if (pass_info.srt_roots.empty()) {
return; return;
} }
@ -205,29 +205,7 @@ static void GenerateSrtProgram(Info& info, PassInfo& pass_info) {
} }
info.srt_info.walker_func = c.getCurr<PFN_SrtWalker>(); info.srt_info.walker_func = c.getCurr<PFN_SrtWalker>();
pass_info.dst_off_dw = NumUserDataRegs; pass_info.dst_off_dw = NumUserDataRegs;
// Special case for V# step rate buffers in fetch shader
for (const auto [sgpr_base, dword_offset, num_dwords] : info.srt_info.srt_reservations) {
// get pointer to V#
if (sgpr_base != IR::NumScalarRegs) {
PushPtr(c, sgpr_base);
}
u32 src_off = dword_offset << 2;
for (auto j = 0; j < num_dwords; j++) {
c.mov(r11d, ptr[rdi + src_off]);
c.mov(ptr[rsi + (pass_info.dst_off_dw << 2)], r11d);
src_off += 4;
++pass_info.dst_off_dw;
}
if (sgpr_base != IR::NumScalarRegs) {
PopPtr(c);
}
}
ASSERT(pass_info.dst_off_dw == info.srt_info.flattened_bufsize_dw); ASSERT(pass_info.dst_off_dw == info.srt_info.flattened_bufsize_dw);
for (const auto& [sgpr_base, root] : pass_info.srt_roots) { for (const auto& [sgpr_base, root] : pass_info.srt_roots) {

View File

@ -33,12 +33,9 @@ void RingAccessElimination(const IR::Program& program, const RuntimeInfo& runtim
bool is_composite = opcode == IR::Opcode::WriteSharedU64; bool is_composite = opcode == IR::Opcode::WriteSharedU64;
u32 num_components = opcode == IR::Opcode::WriteSharedU32 ? 1 : 2; u32 num_components = opcode == IR::Opcode::WriteSharedU32 ? 1 : 2;
u32 offset = 0; ASSERT(inst.Arg(0).IsImmediate());
const auto* addr = inst.Arg(0).InstRecursive();
if (addr->GetOpcode() == IR::Opcode::IAdd32) { u32 offset = inst.Arg(0).U32();
ASSERT(addr->Arg(1).IsImmediate());
offset = addr->Arg(1).U32();
}
IR::Value data = is_composite ? ir.UnpackUint2x32(IR::U64{inst.Arg(1).Resolve()}) IR::Value data = is_composite ? ir.UnpackUint2x32(IR::U64{inst.Arg(1).Resolve()})
: inst.Arg(1).Resolve(); : inst.Arg(1).Resolve();
for (s32 i = 0; i < num_components; i++) { for (s32 i = 0; i < num_components; i++) {
@ -116,7 +113,7 @@ void RingAccessElimination(const IR::Program& program, const RuntimeInfo& runtim
} }
const auto shl_inst = inst.Arg(1).TryInstRecursive(); const auto shl_inst = inst.Arg(1).TryInstRecursive();
const auto vertex_id = ir.Imm32(shl_inst->Arg(0).Resolve().U32() >> 2); const auto vertex_id = shl_inst->Arg(0).Resolve().U32() >> 2;
const auto offset = inst.Arg(1).TryInstRecursive()->Arg(1); const auto offset = inst.Arg(1).TryInstRecursive()->Arg(1);
const auto bucket = offset.Resolve().U32() / 256u; const auto bucket = offset.Resolve().U32() / 256u;
const auto attrib = bucket < 4 ? IR::Attribute::Position0 const auto attrib = bucket < 4 ? IR::Attribute::Position0

View File

@ -20,18 +20,7 @@ struct PersistentSrtInfo {
}; };
PFN_SrtWalker walker_func{}; PFN_SrtWalker walker_func{};
boost::container::small_vector<SrtSharpReservation, 2> srt_reservations;
u32 flattened_bufsize_dw = 16; // NumUserDataRegs u32 flattened_bufsize_dw = 16; // NumUserDataRegs
// Special case for fetch shaders because we don't generate IR to read from step rate buffers,
// so we won't see usage with GetUserData/ReadConst.
// Reserve space in the flattened buffer for a sharp ahead of time
u32 ReserveSharp(u32 sgpr_base, u32 dword_offset, u32 num_dwords) {
u32 rv = flattened_bufsize_dw;
srt_reservations.emplace_back(sgpr_base, dword_offset, num_dwords);
flattened_bufsize_dw += num_dwords;
return rv;
}
}; };
} // namespace Shader } // namespace Shader

View File

@ -42,7 +42,6 @@ constexpr u32 MaxStageTypes = static_cast<u32>(LogicalStage::NumLogicalStages);
struct LocalRuntimeInfo { struct LocalRuntimeInfo {
u32 ls_stride; u32 ls_stride;
bool links_with_tcs;
auto operator<=>(const LocalRuntimeInfo&) const noexcept = default; auto operator<=>(const LocalRuntimeInfo&) const noexcept = default;
}; };
@ -85,6 +84,8 @@ struct VertexRuntimeInfo {
std::array<VsOutputMap, 3> outputs; std::array<VsOutputMap, 3> outputs;
bool emulate_depth_negative_one_to_one{}; bool emulate_depth_negative_one_to_one{};
bool clip_disable{}; bool clip_disable{};
u32 step_rate_0;
u32 step_rate_1;
// Domain // Domain
AmdGpu::TessellationType tess_type; AmdGpu::TessellationType tess_type;
AmdGpu::TessellationTopology tess_topology; AmdGpu::TessellationTopology tess_topology;
@ -96,7 +97,8 @@ struct VertexRuntimeInfo {
clip_disable == other.clip_disable && tess_type == other.tess_type && clip_disable == other.clip_disable && tess_type == other.tess_type &&
tess_topology == other.tess_topology && tess_topology == other.tess_topology &&
tess_partitioning == other.tess_partitioning && tess_partitioning == other.tess_partitioning &&
hs_output_cp_stride == other.hs_output_cp_stride; hs_output_cp_stride == other.hs_output_cp_stride &&
step_rate_0 == other.step_rate_0 && step_rate_1 == other.step_rate_1;
} }
void InitFromTessConstants(Shader::TessellationDataConstantBuffer& tess_constants) { void InitFromTessConstants(Shader::TessellationDataConstantBuffer& tess_constants) {

View File

@ -13,7 +13,7 @@
namespace Shader { namespace Shader {
struct VsAttribSpecialization { struct VsAttribSpecialization {
s32 num_components{}; u32 divisor{};
AmdGpu::NumberClass num_class{}; AmdGpu::NumberClass num_class{};
AmdGpu::CompMapping dst_select{}; AmdGpu::CompMapping dst_select{};
@ -74,13 +74,13 @@ struct SamplerSpecialization {
* after the first compilation of a module. * after the first compilation of a module.
*/ */
struct StageSpecialization { struct StageSpecialization {
static constexpr size_t MaxStageResources = 64; static constexpr size_t MaxStageResources = 128;
const Shader::Info* info; const Shader::Info* info;
RuntimeInfo runtime_info; RuntimeInfo runtime_info;
std::bitset<MaxStageResources> bitset{};
std::optional<Gcn::FetchShaderData> fetch_shader_data{}; std::optional<Gcn::FetchShaderData> fetch_shader_data{};
boost::container::small_vector<VsAttribSpecialization, 32> vs_attribs; boost::container::small_vector<VsAttribSpecialization, 32> vs_attribs;
std::bitset<MaxStageResources> bitset{};
boost::container::small_vector<BufferSpecialization, 16> buffers; boost::container::small_vector<BufferSpecialization, 16> buffers;
boost::container::small_vector<ImageSpecialization, 16> images; boost::container::small_vector<ImageSpecialization, 16> images;
boost::container::small_vector<FMaskSpecialization, 8> fmasks; boost::container::small_vector<FMaskSpecialization, 8> fmasks;
@ -94,10 +94,16 @@ struct StageSpecialization {
if (info_.stage == Stage::Vertex && fetch_shader_data) { if (info_.stage == Stage::Vertex && fetch_shader_data) {
// Specialize shader on VS input number types to follow spec. // Specialize shader on VS input number types to follow spec.
ForEachSharp(vs_attribs, fetch_shader_data->attributes, ForEachSharp(vs_attribs, fetch_shader_data->attributes,
[&profile_](auto& spec, const auto& desc, AmdGpu::Buffer sharp) { [&profile_, this](auto& spec, const auto& desc, AmdGpu::Buffer sharp) {
spec.num_components = desc.UsesStepRates() using InstanceIdType = Shader::Gcn::VertexAttribute::InstanceIdType;
? AmdGpu::NumComponents(sharp.GetDataFmt()) if (const auto step_rate = desc.GetStepRate();
: 0; step_rate != InstanceIdType::None) {
spec.divisor = step_rate == InstanceIdType::OverStepRate0
? runtime_info.vs_info.step_rate_0
: (step_rate == InstanceIdType::OverStepRate1
? runtime_info.vs_info.step_rate_1
: 1);
}
spec.num_class = profile_.support_legacy_vertex_attributes spec.num_class = profile_.support_legacy_vertex_attributes
? AmdGpu::NumberClass{} ? AmdGpu::NumberClass{}
: AmdGpu::GetNumberClass(sharp.GetNumberFmt()); : AmdGpu::GetNumberClass(sharp.GetNumberFmt());

View File

@ -198,10 +198,13 @@ void BufferCache::DownloadBufferMemory(Buffer& buffer, VAddr device_addr, u64 si
} }
void BufferCache::BindVertexBuffers(const Vulkan::GraphicsPipeline& pipeline) { void BufferCache::BindVertexBuffers(const Vulkan::GraphicsPipeline& pipeline) {
const auto& regs = liverpool->regs;
Vulkan::VertexInputs<vk::VertexInputAttributeDescription2EXT> attributes; Vulkan::VertexInputs<vk::VertexInputAttributeDescription2EXT> attributes;
Vulkan::VertexInputs<vk::VertexInputBindingDescription2EXT> bindings; Vulkan::VertexInputs<vk::VertexInputBindingDescription2EXT> bindings;
Vulkan::VertexInputs<vk::VertexInputBindingDivisorDescriptionEXT> divisors;
Vulkan::VertexInputs<AmdGpu::Buffer> guest_buffers; Vulkan::VertexInputs<AmdGpu::Buffer> guest_buffers;
pipeline.GetVertexInputs(attributes, bindings, guest_buffers); pipeline.GetVertexInputs(attributes, bindings, divisors, guest_buffers,
regs.vgt_instance_step_rate_0, regs.vgt_instance_step_rate_1);
if (instance.IsVertexInputDynamicState()) { if (instance.IsVertexInputDynamicState()) {
// Update current vertex inputs. // Update current vertex inputs.

View File

@ -72,12 +72,21 @@ GraphicsPipeline::GraphicsPipeline(
VertexInputs<vk::VertexInputAttributeDescription> vertex_attributes; VertexInputs<vk::VertexInputAttributeDescription> vertex_attributes;
VertexInputs<vk::VertexInputBindingDescription> vertex_bindings; VertexInputs<vk::VertexInputBindingDescription> vertex_bindings;
VertexInputs<vk::VertexInputBindingDivisorDescriptionEXT> divisors;
VertexInputs<AmdGpu::Buffer> guest_buffers; VertexInputs<AmdGpu::Buffer> guest_buffers;
if (!instance.IsVertexInputDynamicState()) { if (!instance.IsVertexInputDynamicState()) {
GetVertexInputs(vertex_attributes, vertex_bindings, guest_buffers); const auto& vs_info = runtime_infos[u32(Shader::LogicalStage::Vertex)].vs_info;
GetVertexInputs(vertex_attributes, vertex_bindings, divisors, guest_buffers,
vs_info.step_rate_0, vs_info.step_rate_1);
} }
const vk::PipelineVertexInputDivisorStateCreateInfo divisor_state = {
.vertexBindingDivisorCount = static_cast<u32>(divisors.size()),
.pVertexBindingDivisors = divisors.data(),
};
const vk::PipelineVertexInputStateCreateInfo vertex_input_info = { const vk::PipelineVertexInputStateCreateInfo vertex_input_info = {
.pNext = divisors.empty() ? nullptr : &divisor_state,
.vertexBindingDescriptionCount = static_cast<u32>(vertex_bindings.size()), .vertexBindingDescriptionCount = static_cast<u32>(vertex_bindings.size()),
.pVertexBindingDescriptions = vertex_bindings.data(), .pVertexBindingDescriptions = vertex_bindings.data(),
.vertexAttributeDescriptionCount = static_cast<u32>(vertex_attributes.size()), .vertexAttributeDescriptionCount = static_cast<u32>(vertex_attributes.size()),
@ -304,19 +313,17 @@ GraphicsPipeline::GraphicsPipeline(
GraphicsPipeline::~GraphicsPipeline() = default; GraphicsPipeline::~GraphicsPipeline() = default;
template <typename Attribute, typename Binding> template <typename Attribute, typename Binding>
void GraphicsPipeline::GetVertexInputs(VertexInputs<Attribute>& attributes, void GraphicsPipeline::GetVertexInputs(
VertexInputs<Binding>& bindings, VertexInputs<Attribute>& attributes, VertexInputs<Binding>& bindings,
VertexInputs<AmdGpu::Buffer>& guest_buffers) const { VertexInputs<vk::VertexInputBindingDivisorDescriptionEXT>& divisors,
VertexInputs<AmdGpu::Buffer>& guest_buffers, u32 step_rate_0, u32 step_rate_1) const {
using InstanceIdType = Shader::Gcn::VertexAttribute::InstanceIdType;
if (!fetch_shader || fetch_shader->attributes.empty()) { if (!fetch_shader || fetch_shader->attributes.empty()) {
return; return;
} }
const auto& vs_info = GetStage(Shader::LogicalStage::Vertex); const auto& vs_info = GetStage(Shader::LogicalStage::Vertex);
for (const auto& attrib : fetch_shader->attributes) { for (const auto& attrib : fetch_shader->attributes) {
if (attrib.UsesStepRates()) { const auto step_rate = attrib.GetStepRate();
// Skip attribute binding as the data will be pulled by shader.
continue;
}
const auto& buffer = attrib.GetSharp(vs_info); const auto& buffer = attrib.GetSharp(vs_info);
attributes.push_back(Attribute{ attributes.push_back(Attribute{
.location = attrib.semantic, .location = attrib.semantic,
@ -327,12 +334,19 @@ void GraphicsPipeline::GetVertexInputs(VertexInputs<Attribute>& attributes,
bindings.push_back(Binding{ bindings.push_back(Binding{
.binding = attrib.semantic, .binding = attrib.semantic,
.stride = buffer.GetStride(), .stride = buffer.GetStride(),
.inputRate = attrib.GetStepRate() == Shader::Gcn::VertexAttribute::InstanceIdType::None .inputRate = step_rate == InstanceIdType::None ? vk::VertexInputRate::eVertex
? vk::VertexInputRate::eVertex : vk::VertexInputRate::eInstance,
: vk::VertexInputRate::eInstance,
}); });
const u32 divisor = step_rate == InstanceIdType::OverStepRate0
? step_rate_0
: (step_rate == InstanceIdType::OverStepRate1 ? step_rate_1 : 1);
if constexpr (std::is_same_v<Binding, vk::VertexInputBindingDescription2EXT>) { if constexpr (std::is_same_v<Binding, vk::VertexInputBindingDescription2EXT>) {
bindings.back().divisor = 1; bindings.back().divisor = divisor;
} else if (step_rate != InstanceIdType::None) {
divisors.push_back(vk::VertexInputBindingDivisorDescriptionEXT{
.binding = attrib.semantic,
.divisor = divisor,
});
} }
guest_buffers.emplace_back(buffer); guest_buffers.emplace_back(buffer);
} }
@ -342,11 +356,13 @@ void GraphicsPipeline::GetVertexInputs(VertexInputs<Attribute>& attributes,
template void GraphicsPipeline::GetVertexInputs( template void GraphicsPipeline::GetVertexInputs(
VertexInputs<vk::VertexInputAttributeDescription>& attributes, VertexInputs<vk::VertexInputAttributeDescription>& attributes,
VertexInputs<vk::VertexInputBindingDescription>& bindings, VertexInputs<vk::VertexInputBindingDescription>& bindings,
VertexInputs<AmdGpu::Buffer>& guest_buffers) const; VertexInputs<vk::VertexInputBindingDivisorDescriptionEXT>& divisors,
VertexInputs<AmdGpu::Buffer>& guest_buffers, u32 step_rate_0, u32 step_rate_1) const;
template void GraphicsPipeline::GetVertexInputs( template void GraphicsPipeline::GetVertexInputs(
VertexInputs<vk::VertexInputAttributeDescription2EXT>& attributes, VertexInputs<vk::VertexInputAttributeDescription2EXT>& attributes,
VertexInputs<vk::VertexInputBindingDescription2EXT>& bindings, VertexInputs<vk::VertexInputBindingDescription2EXT>& bindings,
VertexInputs<AmdGpu::Buffer>& guest_buffers) const; VertexInputs<vk::VertexInputBindingDivisorDescriptionEXT>& divisors,
VertexInputs<AmdGpu::Buffer>& guest_buffers, u32 step_rate_0, u32 step_rate_1) const;
void GraphicsPipeline::BuildDescSetLayout() { void GraphicsPipeline::BuildDescSetLayout() {
boost::container::small_vector<vk::DescriptorSetLayoutBinding, 32> bindings; boost::container::small_vector<vk::DescriptorSetLayoutBinding, 32> bindings;

View File

@ -81,7 +81,9 @@ public:
/// Gets the attributes and bindings for vertex inputs. /// Gets the attributes and bindings for vertex inputs.
template <typename Attribute, typename Binding> template <typename Attribute, typename Binding>
void GetVertexInputs(VertexInputs<Attribute>& attributes, VertexInputs<Binding>& bindings, void GetVertexInputs(VertexInputs<Attribute>& attributes, VertexInputs<Binding>& bindings,
VertexInputs<AmdGpu::Buffer>& guest_buffers) const; VertexInputs<vk::VertexInputBindingDivisorDescriptionEXT>& divisors,
VertexInputs<AmdGpu::Buffer>& guest_buffers, u32 step_rate_0,
u32 step_rate_1) const;
private: private:
void BuildDescSetLayout(); void BuildDescSetLayout();

View File

@ -248,6 +248,7 @@ bool Instance::CreateDevice() {
// Required // Required
ASSERT(add_extension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)); ASSERT(add_extension(VK_KHR_SWAPCHAIN_EXTENSION_NAME));
ASSERT(add_extension(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)); ASSERT(add_extension(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME));
ASSERT(add_extension(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME));
// Optional // Optional
depth_range_unrestricted = add_extension(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME); depth_range_unrestricted = add_extension(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME);
@ -436,6 +437,9 @@ bool Instance::CreateDevice() {
vk::PhysicalDeviceLegacyVertexAttributesFeaturesEXT{ vk::PhysicalDeviceLegacyVertexAttributesFeaturesEXT{
.legacyVertexAttributes = true, .legacyVertexAttributes = true,
}, },
vk::PhysicalDeviceVertexAttributeDivisorFeatures{
.vertexAttributeInstanceRateDivisor = true,
},
vk::PhysicalDeviceShaderAtomicFloat2FeaturesEXT{ vk::PhysicalDeviceShaderAtomicFloat2FeaturesEXT{
.shaderBufferFloat32AtomicMinMax = .shaderBufferFloat32AtomicMinMax =
shader_atomic_float2_features.shaderBufferFloat32AtomicMinMax, shader_atomic_float2_features.shaderBufferFloat32AtomicMinMax,

View File

@ -94,15 +94,10 @@ const Shader::RuntimeInfo& PipelineCache::BuildRuntimeInfo(Stage stage, LogicalS
switch (stage) { switch (stage) {
case Stage::Local: { case Stage::Local: {
BuildCommon(regs.ls_program); BuildCommon(regs.ls_program);
if (regs.stage_enable.IsStageEnabled(static_cast<u32>(Stage::Hull))) { Shader::TessellationDataConstantBuffer tess_constants;
info.ls_info.links_with_tcs = true; const auto* hull_info = infos[u32(Shader::LogicalStage::TessellationControl)];
Shader::TessellationDataConstantBuffer tess_constants; hull_info->ReadTessConstantBuffer(tess_constants);
const auto* pgm = regs.ProgramForStage(static_cast<u32>(Stage::Hull)); info.ls_info.ls_stride = tess_constants.ls_stride;
const auto params = Liverpool::GetParams(*pgm);
const auto& hull_info = program_cache.at(params.hash)->info;
hull_info.ReadTessConstantBuffer(tess_constants);
info.ls_info.ls_stride = tess_constants.ls_stride;
}
break; break;
} }
case Stage::Hull: { case Stage::Hull: {
@ -122,6 +117,8 @@ const Shader::RuntimeInfo& PipelineCache::BuildRuntimeInfo(Stage stage, LogicalS
case Stage::Vertex: { case Stage::Vertex: {
BuildCommon(regs.vs_program); BuildCommon(regs.vs_program);
GatherVertexOutputs(info.vs_info, regs.vs_output_control); GatherVertexOutputs(info.vs_info, regs.vs_output_control);
info.vs_info.step_rate_0 = regs.vgt_instance_step_rate_0;
info.vs_info.step_rate_1 = regs.vgt_instance_step_rate_1;
info.vs_info.emulate_depth_negative_one_to_one = info.vs_info.emulate_depth_negative_one_to_one =
!instance.IsDepthClipControlSupported() && !instance.IsDepthClipControlSupported() &&
regs.clipper_control.clip_space == Liverpool::ClipSpace::MinusWToW; regs.clipper_control.clip_space == Liverpool::ClipSpace::MinusWToW;
@ -460,10 +457,6 @@ bool PipelineCache::RefreshGraphicsKey() {
// Stride will still be handled outside the pipeline using dynamic state. // Stride will still be handled outside the pipeline using dynamic state.
u32 vertex_binding = 0; u32 vertex_binding = 0;
for (const auto& attrib : fetch_shader->attributes) { for (const auto& attrib : fetch_shader->attributes) {
if (attrib.UsesStepRates()) {
// Skip attribute binding as the data will be pulled by shader.
continue;
}
const auto& buffer = attrib.GetSharp(*vs_info); const auto& buffer = attrib.GetSharp(*vs_info);
ASSERT(vertex_binding < MaxVertexBufferCount); ASSERT(vertex_binding < MaxVertexBufferCount);
key.vertex_buffer_formats[vertex_binding++] = key.vertex_buffer_formats[vertex_binding++] =

View File

@ -20,12 +20,9 @@
namespace Vulkan { namespace Vulkan {
static Shader::PushData MakeUserData(const AmdGpu::Liverpool::Regs& regs) { static Shader::PushData MakeUserData(const AmdGpu::Liverpool::Regs& regs) {
Shader::PushData push_data{};
push_data.step0 = regs.vgt_instance_step_rate_0;
push_data.step1 = regs.vgt_instance_step_rate_1;
// TODO(roamic): Add support for multiple viewports and geometry shaders when ViewportIndex // TODO(roamic): Add support for multiple viewports and geometry shaders when ViewportIndex
// is encountered and implemented in the recompiler. // is encountered and implemented in the recompiler.
Shader::PushData push_data{};
push_data.xoffset = regs.viewport_control.xoffset_enable ? regs.viewports[0].xoffset : 0.f; push_data.xoffset = regs.viewport_control.xoffset_enable ? regs.viewports[0].xoffset : 0.f;
push_data.xscale = regs.viewport_control.xscale_enable ? regs.viewports[0].xscale : 1.f; push_data.xscale = regs.viewport_control.xscale_enable ? regs.viewports[0].xscale : 1.f;
push_data.yoffset = regs.viewport_control.yoffset_enable ? regs.viewports[0].yoffset : 0.f; push_data.yoffset = regs.viewport_control.yoffset_enable ? regs.viewports[0].yoffset : 0.f;