mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-08-04 08:22:32 +00:00
emit_spirv_context: Add infrastructure for buffer aliases
* Splits out the buffer creation function so it can be reused when defining multiple type aliases
This commit is contained in:
parent
1a6d0c8d0a
commit
027c198d74
@ -23,10 +23,11 @@ Id SharedAtomicU32(EmitContext& ctx, Id offset, Id value,
|
||||
|
||||
Id BufferAtomicU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value,
|
||||
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
|
||||
auto& buffer = ctx.buffers[handle];
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
address = ctx.OpIAdd(ctx.U32[1], address, buffer.offset);
|
||||
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(2u));
|
||||
const Id ptr = ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index);
|
||||
const auto [id, pointer_type] = buffer[EmitContext::BufferAlias::U32];
|
||||
const Id ptr = ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index);
|
||||
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||
return (ctx.*atomic_func)(ctx.U32[1], ptr, scope, semantics, value);
|
||||
}
|
||||
@ -165,17 +166,17 @@ Id EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id co
|
||||
}
|
||||
|
||||
Id EmitDataAppend(EmitContext& ctx, u32 gds_addr, u32 binding) {
|
||||
auto& buffer = ctx.buffers[binding];
|
||||
const Id ptr = ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value,
|
||||
ctx.ConstU32(gds_addr));
|
||||
const auto& buffer = ctx.buffers[binding];
|
||||
const auto [id, pointer_type] = buffer[EmitContext::BufferAlias::U32];
|
||||
const Id ptr = ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, ctx.ConstU32(gds_addr));
|
||||
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||
return ctx.OpAtomicIIncrement(ctx.U32[1], ptr, scope, semantics);
|
||||
}
|
||||
|
||||
Id EmitDataConsume(EmitContext& ctx, u32 gds_addr, u32 binding) {
|
||||
auto& buffer = ctx.buffers[binding];
|
||||
const Id ptr = ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value,
|
||||
ctx.ConstU32(gds_addr));
|
||||
const auto& buffer = ctx.buffers[binding];
|
||||
const auto [id, pointer_type] = buffer[EmitContext::BufferAlias::U32];
|
||||
const Id ptr = ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, ctx.ConstU32(gds_addr));
|
||||
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||
return ctx.OpAtomicIDecrement(ctx.U32[1], ptr, scope, semantics);
|
||||
}
|
||||
|
@ -160,21 +160,23 @@ void EmitGetGotoVariable(EmitContext&) {
|
||||
UNREACHABLE_MSG("Unreachable instruction");
|
||||
}
|
||||
|
||||
using BufferAlias = EmitContext::BufferAlias;
|
||||
|
||||
Id EmitReadConst(EmitContext& ctx, IR::Inst* inst) {
|
||||
u32 flatbuf_off_dw = inst->Flags<u32>();
|
||||
ASSERT(ctx.srt_flatbuf.binding >= 0);
|
||||
ASSERT(flatbuf_off_dw > 0);
|
||||
Id index = ctx.ConstU32(flatbuf_off_dw);
|
||||
auto& buffer = ctx.srt_flatbuf;
|
||||
const Id ptr{ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index)};
|
||||
const u32 flatbuf_off_dw = inst->Flags<u32>();
|
||||
ASSERT(ctx.srt_flatbuf.binding >= 0 && flatbuf_off_dw > 0);
|
||||
const auto& buffer = ctx.srt_flatbuf;
|
||||
const auto [id, pointer_type] = buffer[BufferAlias::U32];
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, ctx.ConstU32(flatbuf_off_dw))};
|
||||
return ctx.OpLoad(ctx.U32[1], ptr);
|
||||
}
|
||||
|
||||
Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) {
|
||||
auto& buffer = ctx.buffers[handle];
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
index = ctx.OpIAdd(ctx.U32[1], index, buffer.offset_dwords);
|
||||
const Id ptr{ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index)};
|
||||
return ctx.OpLoad(buffer.data_types->Get(1), ptr);
|
||||
const auto [id, pointer_type] = buffer[BufferAlias::U32];
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index)};
|
||||
return ctx.OpLoad(ctx.U32[1], ptr);
|
||||
}
|
||||
|
||||
Id EmitReadStepRate(EmitContext& ctx, int rate_idx) {
|
||||
@ -398,21 +400,22 @@ void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
|
||||
|
||||
template <u32 N>
|
||||
static Id EmitLoadBufferU32xN(EmitContext& ctx, u32 handle, Id address) {
|
||||
auto& buffer = ctx.buffers[handle];
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
address = ctx.OpIAdd(ctx.U32[1], address, buffer.offset);
|
||||
const auto [id, pointer_type] = buffer[BufferAlias::U32];
|
||||
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(2u));
|
||||
if constexpr (N == 1) {
|
||||
const Id ptr{ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index)};
|
||||
return ctx.OpLoad(buffer.data_types->Get(1), ptr);
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index)};
|
||||
return ctx.OpLoad(ctx.U32[1], ptr);
|
||||
} else {
|
||||
boost::container::static_vector<Id, N> ids;
|
||||
for (u32 i = 0; i < N; i++) {
|
||||
const Id index_i = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(i));
|
||||
const Id ptr{
|
||||
ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index_i)};
|
||||
ids.push_back(ctx.OpLoad(buffer.data_types->Get(1), ptr));
|
||||
ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index_i)};
|
||||
ids.push_back(ctx.OpLoad(ctx.U32[1], ptr));
|
||||
}
|
||||
return ctx.OpCompositeConstruct(buffer.data_types->Get(N), ids);
|
||||
return ctx.OpCompositeConstruct(ctx.U32[N], ids);
|
||||
}
|
||||
}
|
||||
|
||||
@ -470,16 +473,17 @@ template <u32 N>
|
||||
static void EmitStoreBufferU32xN(EmitContext& ctx, u32 handle, Id address, Id value) {
|
||||
auto& buffer = ctx.buffers[handle];
|
||||
address = ctx.OpIAdd(ctx.U32[1], address, buffer.offset);
|
||||
const auto [id, pointer_type] = buffer[BufferAlias::U32];
|
||||
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(2u));
|
||||
if constexpr (N == 1) {
|
||||
const Id ptr{ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index)};
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index)};
|
||||
ctx.OpStore(ptr, value);
|
||||
} else {
|
||||
for (u32 i = 0; i < N; i++) {
|
||||
const Id index_i = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(i));
|
||||
const Id ptr =
|
||||
ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index_i);
|
||||
ctx.OpStore(ptr, ctx.OpCompositeExtract(buffer.data_types->Get(1), value, i));
|
||||
ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index_i);
|
||||
ctx.OpStore(ptr, ctx.OpCompositeExtract(ctx.U32[1], value, i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -588,78 +588,62 @@ void EmitContext::DefinePushDataBlock() {
|
||||
interfaces.push_back(push_data_block);
|
||||
}
|
||||
|
||||
void EmitContext::DefineBuffers() {
|
||||
boost::container::small_vector<Id, 8> type_ids;
|
||||
const auto define_struct = [&](Id record_array_type, bool is_instance_data,
|
||||
std::optional<std::string_view> explicit_name = {}) {
|
||||
const Id struct_type{TypeStruct(record_array_type)};
|
||||
if (std::ranges::find(type_ids, record_array_type.value, &Id::value) != type_ids.end()) {
|
||||
return struct_type;
|
||||
}
|
||||
Decorate(record_array_type, spv::Decoration::ArrayStride, 4);
|
||||
auto name = is_instance_data ? fmt::format("{}_instance_data_f32", stage)
|
||||
: fmt::format("{}_cbuf_block_f32", stage);
|
||||
name = explicit_name.value_or(name);
|
||||
Name(struct_type, name);
|
||||
EmitContext::BufferSpv EmitContext::DefineBuffer(bool is_storage, bool is_written, u32 elem_shift,
|
||||
BufferType buffer_type, Id data_type) {
|
||||
// Define array type.
|
||||
const Id max_num_items = ConstU32(u32(profile.max_ubo_size) >> elem_shift);
|
||||
const Id record_array_type{is_storage ? TypeRuntimeArray(data_type)
|
||||
: TypeArray(data_type, max_num_items)};
|
||||
// Define block struct type. Don't perform decorations twice on the same Id.
|
||||
const Id struct_type{TypeStruct(record_array_type)};
|
||||
if (std::ranges::find(buf_type_ids, record_array_type.value, &Id::value) == buf_type_ids.end()) {
|
||||
Decorate(record_array_type, spv::Decoration::ArrayStride, 1 << elem_shift);
|
||||
Decorate(struct_type, spv::Decoration::Block);
|
||||
MemberName(struct_type, 0, "data");
|
||||
MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
|
||||
type_ids.push_back(record_array_type);
|
||||
return struct_type;
|
||||
};
|
||||
|
||||
if (info.has_readconst) {
|
||||
const Id data_type = U32[1];
|
||||
const auto storage_class = spv::StorageClass::Uniform;
|
||||
const Id pointer_type = TypePointer(storage_class, data_type);
|
||||
const Id record_array_type{
|
||||
TypeArray(U32[1], ConstU32(static_cast<u32>(info.flattened_ud_buf.size())))};
|
||||
|
||||
const Id struct_type{define_struct(record_array_type, false, "srt_flatbuf_ty")};
|
||||
|
||||
const Id struct_pointer_type{TypePointer(storage_class, struct_type)};
|
||||
const Id id{AddGlobalVariable(struct_pointer_type, storage_class)};
|
||||
Decorate(id, spv::Decoration::Binding, binding.unified++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
||||
buf_type_ids.push_back(record_array_type);
|
||||
}
|
||||
// Define buffer binding interface.
|
||||
const auto storage_class =
|
||||
is_storage ? spv::StorageClass::StorageBuffer : spv::StorageClass::Uniform;
|
||||
const Id struct_pointer_type{TypePointer(storage_class, struct_type)};
|
||||
const Id pointer_type = TypePointer(storage_class, data_type);
|
||||
const Id id{AddGlobalVariable(struct_pointer_type, storage_class)};
|
||||
Decorate(id, spv::Decoration::Binding, binding.unified);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
||||
if (is_storage && !is_written) {
|
||||
Decorate(id, spv::Decoration::NonWritable);
|
||||
}
|
||||
switch (buffer_type) {
|
||||
case Shader::BufferType::GdsBuffer:
|
||||
Name(id, "gds_buffer");
|
||||
break;
|
||||
case Shader::BufferType::ReadConstUbo:
|
||||
Name(id, "srt_flatbuf_ubo");
|
||||
break;
|
||||
case Shader::BufferType::SharedMemory:
|
||||
Name(id, "ssbo_shmem");
|
||||
break;
|
||||
default:
|
||||
Name(id, fmt::format("{}_{}", is_storage ? "ssbo" : "ubo", binding.buffer));
|
||||
}
|
||||
interfaces.push_back(id);
|
||||
return {id, pointer_type};
|
||||
};
|
||||
|
||||
srt_flatbuf = {
|
||||
.id = id,
|
||||
.binding = binding.buffer++,
|
||||
.pointer_type = pointer_type,
|
||||
};
|
||||
interfaces.push_back(id);
|
||||
void EmitContext::DefineBuffers() {
|
||||
if (info.has_readconst) {
|
||||
srt_flatbuf[BufferAlias::U32] = DefineBuffer(false, false, 2, BufferType::ReadConstUbo, U32[1]);
|
||||
srt_flatbuf.binding = binding.buffer++;
|
||||
++binding.unified;
|
||||
}
|
||||
|
||||
for (const auto& desc : info.buffers) {
|
||||
const auto sharp = desc.GetSharp(info);
|
||||
const bool is_storage = desc.IsStorage(sharp, profile);
|
||||
const u32 array_size = profile.max_ubo_size >> 2;
|
||||
const auto* data_types = True(desc.used_types & IR::Type::F32) ? &F32 : &U32;
|
||||
const Id data_type = (*data_types)[1];
|
||||
const Id record_array_type{is_storage ? TypeRuntimeArray(data_type)
|
||||
: TypeArray(data_type, ConstU32(array_size))};
|
||||
const Id struct_type{define_struct(record_array_type, desc.is_instance_data)};
|
||||
|
||||
const auto storage_class =
|
||||
is_storage ? spv::StorageClass::StorageBuffer : spv::StorageClass::Uniform;
|
||||
const Id struct_pointer_type{TypePointer(storage_class, struct_type)};
|
||||
const Id pointer_type = TypePointer(storage_class, data_type);
|
||||
const Id id{AddGlobalVariable(struct_pointer_type, storage_class)};
|
||||
Decorate(id, spv::Decoration::Binding, binding.unified++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
||||
if (is_storage && !desc.is_written) {
|
||||
Decorate(id, spv::Decoration::NonWritable);
|
||||
}
|
||||
Name(id, fmt::format("{}_{}", is_storage ? "ssbo" : "cbuf", desc.sharp_idx));
|
||||
|
||||
buffers.push_back({
|
||||
.id = id,
|
||||
.binding = binding.buffer++,
|
||||
.data_types = data_types,
|
||||
.pointer_type = pointer_type,
|
||||
});
|
||||
interfaces.push_back(id);
|
||||
const auto buf_sharp = desc.GetSharp(info);
|
||||
const bool is_storage = desc.IsStorage(buf_sharp, profile);
|
||||
auto& spv_buffer = buffers.emplace_back(binding.buffer++, desc.buffer_type);
|
||||
spv_buffer[BufferAlias::U32] = DefineBuffer(is_storage, desc.is_written, 2, desc.buffer_type, U32[1]);
|
||||
++binding.unified;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,16 +227,33 @@ public:
|
||||
bool is_storage = false;
|
||||
};
|
||||
|
||||
struct BufferDefinition {
|
||||
enum class BufferAlias : u32 {
|
||||
U8,
|
||||
U16,
|
||||
U32,
|
||||
F32,
|
||||
NumAlias,
|
||||
};
|
||||
|
||||
struct BufferSpv {
|
||||
Id id;
|
||||
Id offset;
|
||||
Id offset_dwords;
|
||||
u32 binding;
|
||||
const VectorIds* data_types;
|
||||
Id pointer_type;
|
||||
};
|
||||
|
||||
struct BufferDefinition {
|
||||
u32 binding;
|
||||
BufferType buffer_type;
|
||||
Id offset;
|
||||
Id offset_dwords;
|
||||
std::array<BufferSpv, u32(BufferAlias::NumAlias)> aliases;
|
||||
|
||||
constexpr auto& operator[](this auto&& self, BufferAlias alias) {
|
||||
return self.aliases[u32(alias)];
|
||||
}
|
||||
};
|
||||
|
||||
Bindings& binding;
|
||||
boost::container::small_vector<Id, 16> buf_type_ids;
|
||||
boost::container::small_vector<BufferDefinition, 16> buffers;
|
||||
BufferDefinition srt_flatbuf;
|
||||
boost::container::small_vector<TextureDefinition, 8> images;
|
||||
@ -279,6 +296,9 @@ private:
|
||||
SpirvAttribute GetAttributeInfo(AmdGpu::NumberFormat fmt, Id id, u32 num_components,
|
||||
bool output);
|
||||
|
||||
BufferSpv DefineBuffer(bool is_storage, bool is_written, u32 elem_shift,
|
||||
BufferType buffer_type, Id data_type);
|
||||
|
||||
Id DefineFloat32ToUfloatM5(u32 mantissa_bits, std::string_view name);
|
||||
Id DefineUfloatM5ToFloat32(u32 mantissa_bits, std::string_view name);
|
||||
};
|
||||
|
@ -37,12 +37,20 @@ enum class TextureType : u32 {
|
||||
};
|
||||
constexpr u32 NUM_TEXTURE_TYPES = 7;
|
||||
|
||||
enum class BufferType : u32 {
|
||||
Guest,
|
||||
ReadConstUbo,
|
||||
GdsBuffer,
|
||||
SharedMemory,
|
||||
};
|
||||
|
||||
struct Info;
|
||||
|
||||
struct BufferResource {
|
||||
u32 sharp_idx;
|
||||
IR::Type used_types;
|
||||
AmdGpu::Buffer inline_cbuf;
|
||||
BufferType buffer_type;
|
||||
bool is_gds_buffer{};
|
||||
bool is_instance_data{};
|
||||
u8 instance_attrib{};
|
||||
|
Loading…
Reference in New Issue
Block a user