shader_recompiler: Improve shader exports accuracy (part 1) (#3447)

* video_core: support for RT layer outputs

- support for RT layer outputs
- refactor for handling of export attributes
- move output->attribute mapping to a separate header

* export: Rework render target exports

- Centralize all code related to MRT exports into a single function to make it easier to follow
- Apply swizzle to output RGBA colors instead of the render target channel.
  This fixes swizzles on formats with < 4 channels

For example with render target format R8_UNORM and COMP_SWAP ALT_REV the previous code would output

frag_color.a = color.r;

instead of

frag_color.r = color.a;

which would result in incorrect output in some cases

* vk_pipeline_cache: Apply swizzle to write masks

---------

Co-authored-by: polyproxy <47796739+polybiusproxy@users.noreply.github.com>
This commit is contained in:
TheTurtle
2025-08-24 00:39:59 +03:00
committed by GitHub
parent d42f4fcc4f
commit 6dd2b3090c
17 changed files with 289 additions and 275 deletions

View File

@@ -272,6 +272,9 @@ void SetupCapabilities(const Info& info, const Profile& profile, const RuntimeIn
if (info.has_image_query) {
ctx.AddCapability(spv::Capability::ImageQuery);
}
if (info.has_layer_output) {
ctx.AddCapability(spv::Capability::ShaderLayer);
}
if ((info.uses_image_atomic_float_min_max && profile.supports_image_fp32_atomic_min_max) ||
(info.uses_buffer_atomic_float_min_max && profile.supports_buffer_fp32_atomic_min_max)) {
ctx.AddExtension("SPV_EXT_shader_atomic_float_min_max");

View File

@@ -16,39 +16,6 @@
namespace Shader::Backend::SPIRV {
namespace {
Id VsOutputAttrPointer(EmitContext& ctx, VsOutput output) {
switch (output) {
case VsOutput::ClipDist0:
case VsOutput::ClipDist1:
case VsOutput::ClipDist2:
case VsOutput::ClipDist3:
case VsOutput::ClipDist4:
case VsOutput::ClipDist5:
case VsOutput::ClipDist6:
case VsOutput::ClipDist7: {
const u32 index = u32(output) - u32(VsOutput::ClipDist0);
const Id clip_num{ctx.ConstU32(index)};
ASSERT_MSG(Sirit::ValidId(ctx.clip_distances), "Clip distance used but not defined");
return ctx.OpAccessChain(ctx.output_f32, ctx.clip_distances, clip_num);
}
case VsOutput::CullDist0:
case VsOutput::CullDist1:
case VsOutput::CullDist2:
case VsOutput::CullDist3:
case VsOutput::CullDist4:
case VsOutput::CullDist5:
case VsOutput::CullDist6:
case VsOutput::CullDist7: {
const u32 index = u32(output) - u32(VsOutput::CullDist0);
const Id cull_num{ctx.ConstU32(index)};
ASSERT_MSG(Sirit::ValidId(ctx.cull_distances), "Cull distance used but not defined");
return ctx.OpAccessChain(ctx.output_f32, ctx.cull_distances, cull_num);
}
default:
UNREACHABLE_MSG("Vertex output {}", u32(output));
}
}
Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) {
if (IR::IsParam(attr)) {
const u32 attr_index{u32(attr) - u32(IR::Attribute::Param0)};
@@ -76,15 +43,14 @@ Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) {
}
}
switch (attr) {
case IR::Attribute::Position0: {
case IR::Attribute::Position0:
return ctx.OpAccessChain(ctx.output_f32, ctx.output_position, ctx.ConstU32(element));
}
case IR::Attribute::Position1:
case IR::Attribute::Position2:
case IR::Attribute::Position3: {
const u32 index = u32(attr) - u32(IR::Attribute::Position1);
return VsOutputAttrPointer(ctx, ctx.runtime_info.vs_info.outputs[index][element]);
}
case IR::Attribute::ClipDistance:
return ctx.OpAccessChain(ctx.output_f32, ctx.clip_distances, ctx.ConstU32(element));
case IR::Attribute::CullDistance:
return ctx.OpAccessChain(ctx.output_f32, ctx.cull_distances, ctx.ConstU32(element));
case IR::Attribute::RenderTargetId:
return ctx.output_layer;
case IR::Attribute::Depth:
return ctx.frag_depth;
default:
@@ -105,11 +71,13 @@ std::pair<Id, bool> OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr
}
switch (attr) {
case IR::Attribute::Position0:
case IR::Attribute::Position1:
case IR::Attribute::Position2:
case IR::Attribute::Position3:
case IR::Attribute::ClipDistance:
case IR::Attribute::CullDistance:
case IR::Attribute::Depth:
return {ctx.F32[1], false};
case IR::Attribute::RenderTargetId:
case IR::Attribute::ViewportId:
return {ctx.S32[1], true};
default:
UNREACHABLE_MSG("Write attribute {}", attr);
}
@@ -270,14 +238,10 @@ Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) {
}
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 element) {
if (attr == IR::Attribute::Position1) {
LOG_WARNING(Render_Vulkan, "Ignoring pos1 export");
return;
}
const Id pointer{OutputAttrPointer(ctx, attr, element)};
const auto component_type{OutputAttrComponentType(ctx, attr)};
if (component_type.second) {
ctx.OpStore(pointer, ctx.OpBitcast(component_type.first, value));
const auto [component_type, is_integer]{OutputAttrComponentType(ctx, attr)};
if (is_integer) {
ctx.OpStore(pointer, ctx.OpBitcast(component_type, value));
} else {
ctx.OpStore(pointer, value);
}

View File

@@ -539,24 +539,26 @@ void EmitContext::DefineInputs() {
}
}
void EmitContext::DefineVertexBlock() {
output_position = DefineVariable(F32[4], spv::BuiltIn::Position, spv::StorageClass::Output);
if (info.stores.GetAny(IR::Attribute::ClipDistance)) {
clip_distances = DefineVariable(TypeArray(F32[1], ConstU32(8U)), spv::BuiltIn::ClipDistance,
spv::StorageClass::Output);
}
if (info.stores.GetAny(IR::Attribute::CullDistance)) {
cull_distances = DefineVariable(TypeArray(F32[1], ConstU32(8U)), spv::BuiltIn::CullDistance,
spv::StorageClass::Output);
}
if (info.stores.GetAny(IR::Attribute::RenderTargetId)) {
output_layer = DefineVariable(S32[1], spv::BuiltIn::Layer, spv::StorageClass::Output);
}
}
void EmitContext::DefineOutputs() {
switch (l_stage) {
case LogicalStage::Vertex: {
// No point in defining builtin outputs (i.e. position) unless next stage is fragment?
// Might cause problems linking with tcs
output_position = DefineVariable(F32[4], spv::BuiltIn::Position, spv::StorageClass::Output);
const bool has_extra_pos_stores = info.stores.Get(IR::Attribute::Position1) ||
info.stores.Get(IR::Attribute::Position2) ||
info.stores.Get(IR::Attribute::Position3);
if (has_extra_pos_stores) {
const Id type{TypeArray(F32[1], ConstU32(8U))};
clip_distances =
DefineVariable(type, spv::BuiltIn::ClipDistance, spv::StorageClass::Output);
cull_distances =
DefineVariable(type, spv::BuiltIn::CullDistance, spv::StorageClass::Output);
}
if (stage == Stage::Local) {
DefineVertexBlock();
if (stage == Shader::Stage::Local) {
const u32 num_attrs = Common::AlignUp(runtime_info.ls_info.ls_stride, 16) >> 4;
if (num_attrs > 0) {
const Id type{TypeArray(F32[4], ConstU32(num_attrs))};
@@ -615,17 +617,7 @@ void EmitContext::DefineOutputs() {
break;
}
case LogicalStage::TessellationEval: {
output_position = DefineVariable(F32[4], spv::BuiltIn::Position, spv::StorageClass::Output);
const bool has_extra_pos_stores = info.stores.Get(IR::Attribute::Position1) ||
info.stores.Get(IR::Attribute::Position2) ||
info.stores.Get(IR::Attribute::Position3);
if (has_extra_pos_stores) {
const Id type{TypeArray(F32[1], ConstU32(8U))};
clip_distances =
DefineVariable(type, spv::BuiltIn::ClipDistance, spv::StorageClass::Output);
cull_distances =
DefineVariable(type, spv::BuiltIn::CullDistance, spv::StorageClass::Output);
}
DefineVertexBlock();
for (u32 i = 0; i < IR::NumParams; i++) {
const IR::Attribute param{IR::Attribute::Param0 + i};
if (!info.stores.GetAny(param)) {
@@ -665,8 +657,7 @@ void EmitContext::DefineOutputs() {
break;
}
case LogicalStage::Geometry: {
output_position = DefineVariable(F32[4], spv::BuiltIn::Position, spv::StorageClass::Output);
DefineVertexBlock();
for (u32 attr_id = 0; attr_id < info.gs_copy_data.num_attrs; attr_id++) {
const Id id{DefineOutput(F32[4], attr_id)};
Name(id, fmt::format("out_attr{}", attr_id));

View File

@@ -245,6 +245,7 @@ public:
boost::container::small_vector<Id, 16> interfaces;
Id output_position{};
Id output_layer{};
Id primitive_id{};
Id vertex_index{};
Id instance_id{};
@@ -388,6 +389,7 @@ private:
void DefineArithmeticTypes();
void DefineInterfaces();
void DefineInputs();
void DefineVertexBlock();
void DefineOutputs();
void DefinePushDataBlock();
void DefineBuffers();

View File

@@ -2,134 +2,113 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shader_recompiler/frontend/translate/translate.h"
#include "shader_recompiler/ir/position.h"
#include "shader_recompiler/ir/reinterpret.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Gcn {
u32 SwizzleMrtComponent(const PsColorBuffer& color_buffer, u32 comp) {
const auto [r, g, b, a] = color_buffer.swizzle;
const std::array swizzle_array = {r, g, b, a};
const auto swizzled_comp_type = static_cast<u32>(swizzle_array[comp]);
constexpr auto min_comp_type = static_cast<u32>(AmdGpu::CompSwizzle::Red);
return swizzled_comp_type >= min_comp_type ? swizzled_comp_type - min_comp_type : comp;
}
void Translator::ExportMrtValue(IR::Attribute attribute, u32 comp, const IR::F32& value,
const PsColorBuffer& color_buffer) {
auto converted = ApplyWriteNumberConversion(ir, value, color_buffer.num_conversion);
if (color_buffer.needs_unorm_fixup) {
// FIXME: Fix-up for GPUs where float-to-unorm rounding is off from expected.
converted = ir.FPSub(converted, ir.Imm32(1.f / 127500.f));
}
ir.SetAttribute(attribute, converted, comp);
}
void Translator::ExportMrtCompressed(IR::Attribute attribute, u32 idx, const IR::U32& value) {
u32 color_buffer_idx =
static_cast<u32>(attribute) - static_cast<u32>(IR::Attribute::RenderTarget0);
if (runtime_info.fs_info.dual_source_blending && attribute == IR::Attribute::RenderTarget1) {
color_buffer_idx = 0;
}
const auto color_buffer = runtime_info.fs_info.color_buffers[color_buffer_idx];
AmdGpu::NumberFormat num_format;
switch (color_buffer.export_format) {
case AmdGpu::Liverpool::ShaderExportFormat::Zero:
// No export
return;
static AmdGpu::NumberFormat NumberFormatCompressed(
AmdGpu::Liverpool::ShaderExportFormat export_format) {
switch (export_format) {
case AmdGpu::Liverpool::ShaderExportFormat::ABGR_FP16:
num_format = AmdGpu::NumberFormat::Float;
break;
return AmdGpu::NumberFormat::Float;
case AmdGpu::Liverpool::ShaderExportFormat::ABGR_UNORM16:
num_format = AmdGpu::NumberFormat::Unorm;
break;
return AmdGpu::NumberFormat::Unorm;
case AmdGpu::Liverpool::ShaderExportFormat::ABGR_SNORM16:
num_format = AmdGpu::NumberFormat::Snorm;
break;
return AmdGpu::NumberFormat::Snorm;
case AmdGpu::Liverpool::ShaderExportFormat::ABGR_UINT16:
num_format = AmdGpu::NumberFormat::Uint;
break;
return AmdGpu::NumberFormat::Uint;
case AmdGpu::Liverpool::ShaderExportFormat::ABGR_SINT16:
num_format = AmdGpu::NumberFormat::Sint;
break;
return AmdGpu::NumberFormat::Sint;
default:
UNREACHABLE_MSG("Unimplemented compressed MRT export format {}",
static_cast<u32>(color_buffer.export_format));
break;
static_cast<u32>(export_format));
}
const auto unpacked_value = ir.Unpack2x16(num_format, value);
const IR::F32 r = IR::F32{ir.CompositeExtract(unpacked_value, 0)};
const IR::F32 g = IR::F32{ir.CompositeExtract(unpacked_value, 1)};
const auto swizzled_r = SwizzleMrtComponent(color_buffer, idx * 2);
const auto swizzled_g = SwizzleMrtComponent(color_buffer, idx * 2 + 1);
ExportMrtValue(attribute, swizzled_r, r, color_buffer);
ExportMrtValue(attribute, swizzled_g, g, color_buffer);
}
void Translator::ExportMrtUncompressed(IR::Attribute attribute, u32 comp, const IR::F32& value) {
u32 color_buffer_idx =
static_cast<u32>(attribute) - static_cast<u32>(IR::Attribute::RenderTarget0);
if (runtime_info.fs_info.dual_source_blending && attribute == IR::Attribute::RenderTarget1) {
color_buffer_idx = 0;
}
const auto color_buffer = runtime_info.fs_info.color_buffers[color_buffer_idx];
const auto swizzled_comp = SwizzleMrtComponent(color_buffer, comp);
switch (color_buffer.export_format) {
case AmdGpu::Liverpool::ShaderExportFormat::Zero:
// No export
return;
static u32 MaskFromExportFormat(u8 mask, AmdGpu::Liverpool::ShaderExportFormat export_format) {
switch (export_format) {
case AmdGpu::Liverpool::ShaderExportFormat::R_32:
// Red only
if (swizzled_comp != 0) {
return;
}
break;
return mask & 1;
case AmdGpu::Liverpool::ShaderExportFormat::GR_32:
// Red and Green only
if (swizzled_comp != 0 && swizzled_comp != 1) {
return;
}
break;
return mask & 3;
case AmdGpu::Liverpool::ShaderExportFormat::AR_32:
// Red and Alpha only
if (swizzled_comp != 0 && swizzled_comp != 3) {
return;
}
break;
return mask & 9;
case AmdGpu::Liverpool::ShaderExportFormat::ABGR_32:
// All components
break;
return mask;
default:
UNREACHABLE_MSG("Unimplemented uncompressed MRT export format {}",
static_cast<u32>(color_buffer.export_format));
break;
static_cast<u32>(export_format));
}
ExportMrtValue(attribute, swizzled_comp, value, color_buffer);
}
void Translator::ExportCompressed(IR::Attribute attribute, u32 idx, const IR::U32& value) {
if (IsMrt(attribute)) {
ExportMrtCompressed(attribute, idx, value);
return;
}
const IR::Value unpacked_value = ir.Unpack2x16(AmdGpu::NumberFormat::Float, value);
const IR::F32 r = IR::F32{ir.CompositeExtract(unpacked_value, 0)};
const IR::F32 g = IR::F32{ir.CompositeExtract(unpacked_value, 1)};
ir.SetAttribute(attribute, r, idx * 2);
ir.SetAttribute(attribute, g, idx * 2 + 1);
}
void Translator::ExportRenderTarget(const GcnInst& inst) {
const auto& exp = inst.control.exp;
const IR::Attribute mrt{exp.target};
info.mrt_mask |= 1u << static_cast<u8>(mrt);
void Translator::ExportUncompressed(IR::Attribute attribute, u32 comp, const IR::F32& value) {
if (IsMrt(attribute)) {
ExportMrtUncompressed(attribute, comp, value);
// Dual source blending uses MRT1 for exporting src1
u32 color_buffer_idx = static_cast<u32>(mrt) - static_cast<u32>(IR::Attribute::RenderTarget0);
if (runtime_info.fs_info.dual_source_blending && mrt == IR::Attribute::RenderTarget1) {
color_buffer_idx = 0;
}
const auto color_buffer = runtime_info.fs_info.color_buffers[color_buffer_idx];
if (color_buffer.export_format == AmdGpu::Liverpool::ShaderExportFormat::Zero || exp.en == 0) {
// No export
return;
}
ir.SetAttribute(attribute, value, comp);
std::array<IR::F32, 4> components{};
if (exp.compr) {
// Components are float16 packed into a VGPR
const auto num_format = NumberFormatCompressed(color_buffer.export_format);
// Export R, G
if (exp.en & 1) {
const IR::Value unpacked_value =
ir.Unpack2x16(num_format, ir.GetVectorReg(IR::VectorReg(inst.src[0].code)));
components[0] = IR::F32{ir.CompositeExtract(unpacked_value, 0)};
components[1] = IR::F32{ir.CompositeExtract(unpacked_value, 1)};
}
// Export B, A
if ((exp.en >> 2) & 1) {
const IR::Value unpacked_value =
ir.Unpack2x16(num_format, ir.GetVectorReg(IR::VectorReg(inst.src[1].code)));
components[2] = IR::F32{ir.CompositeExtract(unpacked_value, 0)};
components[3] = IR::F32{ir.CompositeExtract(unpacked_value, 1)};
}
} else {
// Components are float32 into separate VGPRS
u32 mask = MaskFromExportFormat(exp.en, color_buffer.export_format);
for (u32 i = 0; i < 4; i++, mask >>= 1) {
if ((mask & 1) == 0) {
continue;
}
components[i] = ir.GetVectorReg<IR::F32>(IR::VectorReg(inst.src[i].code));
}
}
// Swizzle components and export
for (u32 i = 0; i < 4; ++i) {
const u32 comp_swizzle = static_cast<u32>(color_buffer.swizzle.array[i]);
constexpr u32 min_swizzle = static_cast<u32>(AmdGpu::CompSwizzle::Red);
const auto swizzled_comp =
components[comp_swizzle >= min_swizzle ? comp_swizzle - min_swizzle : i];
if (swizzled_comp.IsEmpty()) {
continue;
}
auto converted = ApplyWriteNumberConversion(ir, swizzled_comp, color_buffer.num_conversion);
if (color_buffer.needs_unorm_fixup) {
// FIXME: Fix-up for GPUs where float-to-unorm rounding is off from expected.
converted = ir.FPSub(converted, ir.Imm32(1.f / 127500.f));
}
ir.SetAttribute(mrt, converted, i);
}
}
void Translator::EmitExport(const GcnInst& inst) {
@@ -139,40 +118,27 @@ void Translator::EmitExport(const GcnInst& inst) {
const auto& exp = inst.control.exp;
const IR::Attribute attrib{exp.target};
if (IR::IsMrt(attrib)) {
return ExportRenderTarget(inst);
}
ASSERT_MSG(!exp.compr, "Compressed exports only supported for render targets");
if (attrib == IR::Attribute::Depth && exp.en != 0 && exp.en != 1) {
LOG_WARNING(Render_Vulkan, "Unsupported depth export");
return;
}
const std::array vsrc = {
IR::VectorReg(inst.src[0].code),
IR::VectorReg(inst.src[1].code),
IR::VectorReg(inst.src[2].code),
IR::VectorReg(inst.src[3].code),
};
// Components are float16 packed into a VGPR
if (exp.compr) {
// Export R, G
if (exp.en & 1) {
ExportCompressed(attrib, 0, ir.GetVectorReg<IR::U32>(vsrc[0]));
u32 mask = exp.en;
for (u32 i = 0; i < 4; i++, mask >>= 1) {
if ((mask & 1) == 0) {
continue;
}
// Export B, A
if ((exp.en >> 2) & 1) {
ExportCompressed(attrib, 1, ir.GetVectorReg<IR::U32>(vsrc[1]));
const auto value = ir.GetVectorReg<IR::F32>(IR::VectorReg(inst.src[i].code));
if (IsPosition(attrib)) {
IR::ExportPosition(ir, runtime_info.vs_info, attrib, i, value);
} else {
ir.SetAttribute(attrib, value, i);
}
} else {
// Components are float32 into separate VGPRS
u32 mask = exp.en;
for (u32 i = 0; i < 4; i++, mask >>= 1) {
if ((mask & 1) == 0) {
continue;
}
ExportUncompressed(attrib, i, ir.GetVectorReg<IR::F32>(vsrc[i]));
}
}
if (IR::IsMrt(attrib)) {
info.mrt_mask |= 1u << u8(attrib);
}
}

View File

@@ -317,13 +317,7 @@ private:
IR::F32 SelectCubeResult(const IR::F32& x, const IR::F32& y, const IR::F32& z,
const IR::F32& x_res, const IR::F32& y_res, const IR::F32& z_res);
void ExportMrtValue(IR::Attribute attribute, u32 comp, const IR::F32& value,
const PsColorBuffer& color_buffer);
void ExportMrtCompressed(IR::Attribute attribute, u32 idx, const IR::U32& value);
void ExportMrtUncompressed(IR::Attribute attribute, u32 comp, const IR::F32& value);
void ExportCompressed(IR::Attribute attribute, u32 idx, const IR::U32& value);
void ExportUncompressed(IR::Attribute attribute, u32 comp, const IR::F32& value);
void ExportRenderTarget(const GcnInst& inst);
void LogMissingOpcode(const GcnInst& inst);
IR::VectorReg GetScratchVgpr(u32 offset);

View File

@@ -210,6 +210,7 @@ struct Info {
bool has_bitwise_xor{};
bool has_image_gather{};
bool has_image_query{};
bool has_layer_output{};
bool uses_buffer_atomic_float_min_max{};
bool uses_image_atomic_float_min_max{};
bool uses_lane_id{};

View File

@@ -4,6 +4,7 @@
#include "common/assert.h"
#include "shader_recompiler/ir/ir_emitter.h"
#include "shader_recompiler/ir/opcodes.h"
#include "shader_recompiler/ir/position.h"
#include "shader_recompiler/ir/program.h"
#include "shader_recompiler/ir/reg.h"
#include "shader_recompiler/recompiler.h"
@@ -142,11 +143,12 @@ void RingAccessElimination(const IR::Program& program, const RuntimeInfo& runtim
ASSERT(it != info.gs_copy_data.attr_map.cend());
const auto& [attr, comp] = it->second;
inst.ReplaceOpcode(IR::Opcode::SetAttribute);
inst.ClearArgs();
inst.SetArg(0, IR::Value{attr});
inst.SetArg(1, data);
inst.SetArg(2, ir.Imm32(comp));
inst.Invalidate();
if (IsPosition(attr)) {
ExportPosition(ir, runtime_info.gs_info, attr, comp, data);
} else {
ir.SetAttribute(attr, data, comp);
}
break;
}
default:

View File

@@ -160,6 +160,10 @@ void CollectShaderInfoPass(IR::Program& program, const Profile& profile) {
}
}
if (info.stores.GetAny(IR::Attribute::RenderTargetId)) {
info.has_layer_output = true;
}
// In case Flatbuf has not already been bound by IR and is needed
// to query buffer sizes, bind it now.
if (!profile.supports_robust_buffer_access && !info.uses_dma) {

View File

@@ -0,0 +1,53 @@
// SPDX-FileCopyrightText: Copyright 2025 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "shader_recompiler/ir/ir_emitter.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::IR {
/// Maps special position export to builtin attribute stores
inline void ExportPosition(IREmitter& ir, const auto& stage, Attribute attribute, u32 comp,
const IR::F32& value) {
if (attribute == Attribute::Position0) {
ir.SetAttribute(attribute, value, comp);
return;
}
const u32 index = u32(attribute) - u32(Attribute::Position1);
const auto output = stage.outputs[index][comp];
switch (output) {
case Output::ClipDist0:
case Output::ClipDist1:
case Output::ClipDist2:
case Output::ClipDist3:
case Output::ClipDist4:
case Output::ClipDist5:
case Output::ClipDist6:
case Output::ClipDist7: {
const u32 index = u32(output) - u32(Output::ClipDist0);
ir.SetAttribute(IR::Attribute::ClipDistance, value, index);
break;
}
case Output::CullDist0:
case Output::CullDist1:
case Output::CullDist2:
case Output::CullDist3:
case Output::CullDist4:
case Output::CullDist5:
case Output::CullDist6:
case Output::CullDist7: {
const u32 index = u32(output) - u32(Output::CullDist0);
ir.SetAttribute(IR::Attribute::CullDistance, value, index);
break;
}
case Output::GsMrtIndex:
ir.SetAttribute(IR::Attribute::RenderTargetId, value);
break;
default:
UNREACHABLE_MSG("Unhandled output {} on attribute {}", u32(output), u32(attribute));
}
}
} // namespace Shader::IR

View File

@@ -52,7 +52,7 @@ struct ExportRuntimeInfo {
auto operator<=>(const ExportRuntimeInfo&) const noexcept = default;
};
enum class VsOutput : u8 {
enum class Output : u8 {
None,
PointSprite,
EdgeFlag,
@@ -77,11 +77,11 @@ enum class VsOutput : u8 {
ClipDist6,
ClipDist7,
};
using VsOutputMap = std::array<VsOutput, 4>;
using OutputMap = std::array<Output, 4>;
struct VertexRuntimeInfo {
u32 num_outputs;
std::array<VsOutputMap, 3> outputs;
std::array<OutputMap, 3> outputs;
bool emulate_depth_negative_one_to_one{};
bool clip_disable{};
u32 step_rate_0;
@@ -145,6 +145,8 @@ struct HullRuntimeInfo {
static constexpr auto GsMaxOutputStreams = 4u;
using GsOutputPrimTypes = std::array<AmdGpu::GsOutputPrimitiveType, GsMaxOutputStreams>;
struct GeometryRuntimeInfo {
u32 num_outputs;
std::array<OutputMap, 3> outputs;
u32 num_invocations{};
u32 output_vertices{};
u32 in_vertex_data_size{};
@@ -179,7 +181,7 @@ struct PsColorBuffer {
u32 pad : 20;
AmdGpu::CompMapping swizzle;
auto operator<=>(const PsColorBuffer&) const noexcept = default;
bool operator==(const PsColorBuffer& other) const noexcept = default;
};
struct FragmentRuntimeInfo {
@@ -189,11 +191,11 @@ struct FragmentRuntimeInfo {
bool is_flat;
u8 default_value;
[[nodiscard]] bool IsDefault() const {
bool IsDefault() const {
return is_default && !is_flat;
}
auto operator<=>(const PsInput&) const noexcept = default;
bool operator==(const PsInput&) const noexcept = default;
};
AmdGpu::Liverpool::PsInput en_flags;
AmdGpu::Liverpool::PsInput addr_flags;