video_core: Address various UE bugs (#3559)

* vk_rasterizer: Reorder image query in fast clear elimination

Fixes missing clears when a texture is being cleared using this method but never actually used for rendering purposes by ensuring the texture cache has at least a chance to register cmask

* shader_recompiler: Partial support for ANCILLARY_ENA

* pixel_format: Add number conversion of BC6 srgb format

* texture_cache: Support aliases of 3D and 2D array images

Used be UE to render its post processing LUT

* pixel_format: Test BC6 srgb as unorm

Still not sure what is up with snorm/unorm can be useful to have both actions to compare for now

* video_core: Use attachment feedback layout instead of general if possible

UE games often do mipgen passes where the previous mip of the image being rendered to is bound for reading. This appears to cause corruption issues so use attachment feedback loop extension to ensure correct output

* renderer_vulkan: Improve feedback loop code

* Set proper usage flag for feedback loop usage
* Add dynamic state extension and enable it for color aspect when necessary
* Check if image is bound instead of force_general for better code consistency

* shader_recompiler: More proper depth export implementation

* shader_recompiler: Fix bug in output modifiers

* shader_recompiler: Fix sampling from MSAA images

This is not allowed by any graphics API but seems hardware supports it somehow and it can be encountered. To avoid glitched output translate to to a texelFetch call on sample 0

* clang format

* image: Add back missing code

* shader_recompiler: Better ancillary implementation

Now is implemented with a custom attribute that is constant propagated depending on which parts of it are extracted. It will assert if an unknown part is used or if the attribute itself is not removed by dead code elim

* copy_shader: Ignore not enabled export channels

* constant_propagation: Invalidate ancillary after successful elimination

* spirv: Fix f11/f10 conversion to f32

---------

Co-authored-by: georgemoralis <giorgosmrls@gmail.com>
This commit is contained in:
TheTurtle
2025-09-12 19:29:16 +03:00
committed by GitHub
parent de7652384d
commit 374c2194d4
30 changed files with 369 additions and 183 deletions

View File

@@ -305,19 +305,23 @@ void SetupCapabilities(const Info& info, const Profile& profile, const RuntimeIn
runtime_info.fs_info.addr_flags.persp_sample_ena) {
ctx.AddCapability(spv::Capability::SampleRateShading);
}
if (info.loads.GetAny(IR::Attribute::RenderTargetIndex)) {
ctx.AddCapability(spv::Capability::Geometry);
}
}
if (stage == LogicalStage::TessellationControl || stage == LogicalStage::TessellationEval) {
ctx.AddCapability(spv::Capability::Tessellation);
}
if (stage == LogicalStage::Vertex || stage == LogicalStage::TessellationControl ||
stage == LogicalStage::TessellationEval) {
if (info.has_layer_output) {
if (info.stores.GetAny(IR::Attribute::RenderTargetIndex)) {
ctx.AddCapability(spv::Capability::ShaderLayer);
}
if (info.has_viewport_index_output) {
if (info.stores.GetAny(IR::Attribute::ViewportIndex)) {
ctx.AddCapability(spv::Capability::ShaderViewportIndex);
}
} else if (stage == LogicalStage::Geometry && info.has_viewport_index_output) {
} else if (stage == LogicalStage::Geometry &&
info.stores.GetAny(IR::Attribute::ViewportIndex)) {
ctx.AddCapability(spv::Capability::MultiViewport);
}
if (info.uses_dma) {

View File

@@ -3,7 +3,6 @@
#include "common/assert.h"
#include "common/config.h"
#include "common/logging/log.h"
#include "shader_recompiler/backend/spirv/emit_spirv_bounds.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
@@ -14,55 +13,11 @@
#include <magic_enum/magic_enum.hpp>
namespace Shader::Backend::SPIRV {
namespace {
Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) {
if (IR::IsParam(attr)) {
const u32 attr_index{u32(attr) - u32(IR::Attribute::Param0)};
if (ctx.stage == Stage::Local) {
const auto component_ptr = ctx.TypePointer(spv::StorageClass::Output, ctx.F32[1]);
return ctx.OpAccessChain(component_ptr, ctx.output_attr_array, ctx.ConstU32(attr_index),
ctx.ConstU32(element));
} else {
const auto& info{ctx.output_params.at(attr_index)};
ASSERT(info.num_components > 0);
if (info.num_components == 1) {
return info.id;
} else {
return ctx.OpAccessChain(info.pointer_type, info.id, ctx.ConstU32(element));
}
}
}
if (IR::IsMrt(attr)) {
const u32 index{u32(attr) - u32(IR::Attribute::RenderTarget0)};
const auto& info{ctx.frag_outputs.at(index)};
if (info.num_components == 1) {
return info.id;
} else {
return ctx.OpAccessChain(info.pointer_type, info.id, ctx.ConstU32(element));
}
}
switch (attr) {
case IR::Attribute::Position0:
return ctx.OpAccessChain(ctx.output_f32, ctx.output_position, ctx.ConstU32(element));
case IR::Attribute::ClipDistance:
return ctx.OpAccessChain(ctx.output_f32, ctx.clip_distances, ctx.ConstU32(element));
case IR::Attribute::CullDistance:
return ctx.OpAccessChain(ctx.output_f32, ctx.cull_distances, ctx.ConstU32(element));
case IR::Attribute::PointSize:
return ctx.output_point_size;
case IR::Attribute::RenderTargetIndex:
return ctx.output_layer;
case IR::Attribute::ViewportIndex:
return ctx.output_viewport_index;
case IR::Attribute::Depth:
return ctx.frag_depth;
default:
UNREACHABLE_MSG("Write attribute {}", attr);
}
}
using PointerType = EmitContext::PointerType;
using PointerSize = EmitContext::PointerSize;
std::pair<Id, bool> OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr) {
static std::pair<Id, bool> OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr) {
if (IR::IsParam(attr)) {
const u32 index{u32(attr) - u32(IR::Attribute::Param0)};
const auto& info{ctx.output_params.at(index)};
@@ -82,15 +37,13 @@ std::pair<Id, bool> OutputAttrComponentType(EmitContext& ctx, IR::Attribute attr
return {ctx.F32[1], false};
case IR::Attribute::RenderTargetIndex:
case IR::Attribute::ViewportIndex:
return {ctx.S32[1], true};
case IR::Attribute::SampleMask:
case IR::Attribute::StencilRef:
return {ctx.U32[1], true};
default:
UNREACHABLE_MSG("Write attribute {}", attr);
}
}
} // Anonymous namespace
using PointerType = EmitContext::PointerType;
using PointerSize = EmitContext::PointerSize;
Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg) {
const u32 index = ctx.binding.user_data + ctx.info.ud_mask.Index(reg);
@@ -212,6 +165,10 @@ Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) {
case IR::Attribute::IsFrontFace:
return ctx.OpSelect(ctx.U32[1], ctx.OpLoad(ctx.U1[1], ctx.front_facing), ctx.u32_one_value,
ctx.u32_zero_value);
case IR::Attribute::SampleIndex:
return ctx.OpLoad(ctx.U32[1], ctx.sample_index);
case IR::Attribute::RenderTargetIndex:
return ctx.OpLoad(ctx.U32[1], ctx.output_layer);
case IR::Attribute::PrimitiveId:
return ctx.OpLoad(ctx.U32[1], ctx.primitive_id);
case IR::Attribute::InvocationId:
@@ -243,12 +200,62 @@ Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) {
}
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 element) {
const Id pointer{OutputAttrPointer(ctx, attr, element)};
const auto [component_type, is_integer]{OutputAttrComponentType(ctx, attr)};
if (is_integer) {
ctx.OpStore(pointer, ctx.OpBitcast(component_type, value));
} else {
ctx.OpStore(pointer, value);
const auto op_store = [&](Id pointer) {
const auto [component_type, is_integer] = OutputAttrComponentType(ctx, attr);
if (is_integer) {
ctx.OpStore(pointer, ctx.OpBitcast(component_type, value));
} else {
ctx.OpStore(pointer, value);
}
};
if (IR::IsParam(attr)) {
const u32 attr_index{u32(attr) - u32(IR::Attribute::Param0)};
if (ctx.stage == Stage::Local) {
const auto component_ptr = ctx.TypePointer(spv::StorageClass::Output, ctx.F32[1]);
return op_store(ctx.OpAccessChain(component_ptr, ctx.output_attr_array,
ctx.ConstU32(attr_index), ctx.ConstU32(element)));
} else {
const auto& info{ctx.output_params.at(attr_index)};
ASSERT(info.num_components > 0);
if (info.num_components == 1) {
return op_store(info.id);
} else {
return op_store(
ctx.OpAccessChain(info.pointer_type, info.id, ctx.ConstU32(element)));
}
}
}
if (IR::IsMrt(attr)) {
const u32 index{u32(attr) - u32(IR::Attribute::RenderTarget0)};
const auto& info{ctx.frag_outputs.at(index)};
if (info.num_components == 1) {
return op_store(info.id);
} else {
return op_store(ctx.OpAccessChain(info.pointer_type, info.id, ctx.ConstU32(element)));
}
}
switch (attr) {
case IR::Attribute::Position0:
return op_store(
ctx.OpAccessChain(ctx.output_f32, ctx.output_position, ctx.ConstU32(element)));
case IR::Attribute::ClipDistance:
return op_store(
ctx.OpAccessChain(ctx.output_f32, ctx.clip_distances, ctx.ConstU32(element)));
case IR::Attribute::CullDistance:
return op_store(
ctx.OpAccessChain(ctx.output_f32, ctx.cull_distances, ctx.ConstU32(element)));
case IR::Attribute::PointSize:
return op_store(ctx.output_point_size);
case IR::Attribute::RenderTargetIndex:
return op_store(ctx.output_layer);
case IR::Attribute::ViewportIndex:
return op_store(ctx.output_viewport_index);
case IR::Attribute::Depth:
return op_store(ctx.frag_depth);
case IR::Attribute::SampleMask:
return op_store(ctx.OpAccessChain(ctx.output_u32, ctx.sample_mask, ctx.u32_zero_value));
default:
UNREACHABLE_MSG("Write attribute {}", attr);
}
}

View File

@@ -28,7 +28,7 @@ void ConvertDepthMode(EmitContext& ctx) {
}
void ConvertPositionToClipSpace(EmitContext& ctx) {
ASSERT_MSG(!ctx.info.has_viewport_index_output,
ASSERT_MSG(!ctx.info.stores.GetAny(IR::Attribute::ViewportIndex),
"Multi-viewport with shader clip space conversion not yet implemented.");
const Id type{ctx.F32[1]};

View File

@@ -370,13 +370,18 @@ void EmitContext::DefineInputs() {
if (info.loads.GetAny(IR::Attribute::FragCoord)) {
frag_coord = DefineVariable(F32[4], spv::BuiltIn::FragCoord, spv::StorageClass::Input);
}
if (info.stores.Get(IR::Attribute::Depth)) {
frag_depth = DefineVariable(F32[1], spv::BuiltIn::FragDepth, spv::StorageClass::Output);
}
if (info.loads.Get(IR::Attribute::IsFrontFace)) {
front_facing =
DefineVariable(U1[1], spv::BuiltIn::FrontFacing, spv::StorageClass::Input);
}
if (info.loads.GetAny(IR::Attribute::RenderTargetIndex)) {
output_layer = DefineVariable(U32[1], spv::BuiltIn::Layer, spv::StorageClass::Input);
Decorate(output_layer, spv::Decoration::Flat);
}
if (info.loads.Get(IR::Attribute::SampleIndex)) {
sample_index = DefineVariable(U32[1], spv::BuiltIn::SampleId, spv::StorageClass::Input);
Decorate(sample_index, spv::Decoration::Flat);
}
if (info.loads.GetAny(IR::Attribute::BaryCoordSmooth)) {
if (profile.supports_amd_shader_explicit_vertex_parameter) {
bary_coord_smooth = DefineVariable(F32[2], spv::BuiltIn::BaryCoordSmoothAMD,
@@ -560,11 +565,11 @@ void EmitContext::DefineVertexBlock() {
DefineVariable(F32[1], spv::BuiltIn::PointSize, spv::StorageClass::Output);
}
if (info.stores.GetAny(IR::Attribute::RenderTargetIndex)) {
output_layer = DefineVariable(S32[1], spv::BuiltIn::Layer, spv::StorageClass::Output);
output_layer = DefineVariable(U32[1], spv::BuiltIn::Layer, spv::StorageClass::Output);
}
if (info.stores.GetAny(IR::Attribute::ViewportIndex)) {
output_viewport_index =
DefineVariable(S32[1], spv::BuiltIn::ViewportIndex, spv::StorageClass::Output);
DefineVariable(U32[1], spv::BuiltIn::ViewportIndex, spv::StorageClass::Output);
}
}
@@ -646,6 +651,13 @@ void EmitContext::DefineOutputs() {
break;
}
case LogicalStage::Fragment: {
if (info.stores.Get(IR::Attribute::Depth)) {
frag_depth = DefineVariable(F32[1], spv::BuiltIn::FragDepth, spv::StorageClass::Output);
}
if (info.stores.Get(IR::Attribute::SampleMask)) {
sample_mask = DefineVariable(TypeArray(U32[1], u32_one_value), spv::BuiltIn::SampleMask,
spv::StorageClass::Output);
}
u32 num_render_targets = 0;
for (u32 i = 0; i < IR::NumRenderTargets; i++) {
const IR::Attribute mrt{IR::Attribute::RenderTarget0 + i};
@@ -1080,36 +1092,26 @@ Id EmitContext::DefineUfloatM5ToFloat32(u32 mantissa_bits, const std::string_vie
Name(func, name);
AddLabel();
const auto raw_mantissa{
OpBitFieldUExtract(U32[1], value, ConstU32(0U), ConstU32(mantissa_bits))};
const auto mantissa{OpConvertUToF(F32[1], raw_mantissa)};
const auto exponent{OpBitcast(
S32[1], OpBitFieldSExtract(U32[1], value, ConstU32(mantissa_bits), ConstU32(5U)))};
const auto is_exp_neg_one{OpIEqual(U1[1], exponent, ConstS32(-1))};
const auto is_exp_zero{OpIEqual(U1[1], exponent, ConstS32(0))};
const auto is_zero{OpIEqual(U1[1], value, ConstU32(0u))};
const auto is_nan{
OpLogicalAnd(U1[1], is_exp_neg_one, OpINotEqual(U1[1], raw_mantissa, ConstU32(0u)))};
const auto is_inf{
OpLogicalAnd(U1[1], is_exp_neg_one, OpIEqual(U1[1], raw_mantissa, ConstU32(0u)))};
const auto is_denorm{
OpLogicalAnd(U1[1], is_exp_zero, OpINotEqual(U1[1], raw_mantissa, ConstU32(0u)))};
const auto denorm{OpFMul(F32[1], mantissa, ConstF32(1.f / (1 << 20)))};
const auto norm{OpLdexp(
F32[1],
OpFAdd(F32[1],
OpFMul(F32[1], mantissa, ConstF32(1.f / static_cast<float>(1 << mantissa_bits))),
ConstF32(1.f)),
exponent)};
const auto result{OpSelect(F32[1], is_zero, ConstF32(0.f),
OpSelect(F32[1], is_nan, ConstF32(NAN),
OpSelect(F32[1], is_inf, ConstF32(INFINITY),
OpSelect(F32[1], is_denorm, denorm, norm))))};
const Id exponent{OpBitFieldUExtract(U32[1], value, ConstU32(mantissa_bits), ConstU32(5U))};
const Id mantissa{OpBitFieldUExtract(U32[1], value, ConstU32(0U), ConstU32(mantissa_bits))};
const Id mantissa_f{OpConvertUToF(F32[1], mantissa)};
const Id a{OpSelect(F32[1], OpINotEqual(U1[1], mantissa, u32_zero_value),
OpFMul(F32[1], ConstF32(1.f / (1 << (14 + mantissa_bits))), mantissa_f),
f32_zero_value)};
const Id b{OpBitcast(F32[1], OpBitwiseOr(U32[1], mantissa, ConstU32(0x7f800000U)))};
const Id exponent_c{OpISub(U32[1], exponent, ConstU32(15U))};
const Id scale_a{
OpFDiv(F32[1], ConstF32(1.f),
OpConvertUToF(F32[1], OpShiftLeftLogical(U32[1], u32_one_value,
OpSNegate(U32[1], exponent_c))))};
const Id scale_b{OpConvertUToF(F32[1], OpShiftLeftLogical(U32[1], u32_one_value, exponent_c))};
const Id scale{
OpSelect(F32[1], OpSLessThan(U1[1], exponent_c, u32_zero_value), scale_a, scale_b)};
const Id c{OpFMul(F32[1], scale,
OpFAdd(F32[1], ConstF32(1.f),
OpFDiv(F32[1], mantissa_f, ConstF32(f32(1 << mantissa_bits)))))};
const Id result{OpSelect(F32[1], OpIEqual(U1[1], exponent, u32_zero_value), a,
OpSelect(F32[1], OpIEqual(U1[1], exponent, ConstU32(31U)), b, c))};
OpReturnValue(result);
OpFunctionEnd();
return func;

View File

@@ -256,6 +256,8 @@ public:
Id frag_coord{};
Id front_facing{};
Id frag_depth{};
Id sample_mask{};
Id sample_index{};
Id clip_distances{};
Id cull_distances{};

View File

@@ -49,6 +49,9 @@ CopyShaderData ParseCopyShader(std::span<const u32> code) {
const auto& exp = inst.control.exp;
const IR::Attribute semantic = static_cast<IR::Attribute>(exp.target);
for (int i = 0; i < inst.src_count; ++i) {
if ((exp.en & (1 << i)) == 0) {
continue;
}
const auto ofs = offsets[inst.src[i].code];
if (ofs != -1) {
data.attr_map[ofs] = {semantic, i};

View File

@@ -22,7 +22,7 @@ static AmdGpu::NumberFormat NumberFormatCompressed(
case AmdGpu::Liverpool::ShaderExportFormat::ABGR_SINT16:
return AmdGpu::NumberFormat::Sint;
default:
UNREACHABLE_MSG("Unimplemented compressed MRT export format {}",
UNREACHABLE_MSG("Unimplemented compressed export format {}",
static_cast<u32>(export_format));
}
}
@@ -42,7 +42,7 @@ static u32 MaskFromExportFormat(u8 mask, AmdGpu::Liverpool::ShaderExportFormat e
// All components
return mask;
default:
UNREACHABLE_MSG("Unimplemented uncompressed MRT export format {}",
UNREACHABLE_MSG("Unimplemented uncompressed export format {}",
static_cast<u32>(export_format));
}
}
@@ -118,25 +118,68 @@ void Translator::ExportRenderTarget(const GcnInst& inst) {
}
}
void Translator::ExportDepth(const GcnInst& inst) {
const auto& exp = inst.control.exp;
if (exp.en == 0) {
// No export
return;
}
std::array<IR::F32, 4> components{};
if (exp.compr) {
// Components are float16 packed into a VGPR
const auto num_format = NumberFormatCompressed(runtime_info.fs_info.z_export_format);
// Export R, G
if (exp.en & 1) {
const IR::Value unpacked_value =
ir.Unpack2x16(num_format, ir.GetVectorReg(IR::VectorReg(inst.src[0].code)));
components[0] = IR::F32{ir.CompositeExtract(unpacked_value, 0)};
components[1] = IR::F32{ir.CompositeExtract(unpacked_value, 1)};
}
// Export B, A
if ((exp.en >> 2) & 1) {
const IR::Value unpacked_value =
ir.Unpack2x16(num_format, ir.GetVectorReg(IR::VectorReg(inst.src[1].code)));
components[2] = IR::F32{ir.CompositeExtract(unpacked_value, 0)};
// components[3] = IR::F32{ir.CompositeExtract(unpacked_value, 1)};
}
} else {
// Components are float32 into separate VGPRS
u32 mask = MaskFromExportFormat(exp.en, runtime_info.fs_info.z_export_format);
for (u32 i = 0; i < 4; i++, mask >>= 1) {
if ((mask & 1) == 0) {
continue;
}
components[i] = ir.GetVectorReg<IR::F32>(IR::VectorReg(inst.src[i].code));
}
}
static constexpr std::array MrtzBuiltins = {IR::Attribute::Depth, IR::Attribute::StencilRef,
IR::Attribute::SampleMask, IR::Attribute::Null};
for (u32 i = 0; i < 4; ++i) {
if (components[i].IsEmpty()) {
continue;
}
ir.SetAttribute(MrtzBuiltins[i], components[i]);
}
}
void Translator::EmitExport(const GcnInst& inst) {
if (info.stage == Stage::Fragment && inst.control.exp.vm) {
ir.Discard(ir.LogicalNot(ir.GetExec()));
}
const auto& exp = inst.control.exp;
const IR::Attribute attrib{exp.target};
const IR::Attribute attrib{inst.control.exp.target};
if (IR::IsMrt(attrib)) {
return ExportRenderTarget(inst);
}
if (attrib == IR::Attribute::Depth && exp.en != 0 && exp.en != 1) {
LOG_WARNING(Render_Vulkan, "Unsupported depth export");
return;
if (attrib == IR::Attribute::Depth) {
return ExportDepth(inst);
}
ASSERT_MSG(!exp.compr, "Compressed exports only supported for render targets");
ASSERT_MSG(!inst.control.exp.compr, "Compressed exports only supported for render targets");
u32 mask = exp.en;
u32 mask = inst.control.exp.en;
for (u32 i = 0; i < 4; i++, mask >>= 1) {
if ((mask & 1) == 0) {
continue;

View File

@@ -171,6 +171,13 @@ void Translator::EmitPrologue(IR::Block* first_block) {
ir.SetVectorReg(dst_vreg++, ir.Imm32(0));
}
}
if (runtime_info.fs_info.addr_flags.ancillary_ena) {
if (runtime_info.fs_info.en_flags.ancillary_ena) {
ir.SetVectorReg(dst_vreg++, ir.GetAttributeU32(IR::Attribute::PackedAncillary));
} else {
ir.SetVectorReg(dst_vreg++, ir.Imm32(0));
}
}
break;
case LogicalStage::TessellationControl: {
ir.SetVectorReg(IR::VectorReg::V0, ir.GetAttributeU32(IR::Attribute::PrimitiveId));
@@ -460,7 +467,7 @@ void Translator::SetDst(const InstOperand& operand, const IR::U32F32& value) {
result = ir.FPMul(result, ir.Imm32(operand.output_modifier.multiplier));
}
if (operand.output_modifier.clamp) {
result = ir.FPSaturate(value);
result = ir.FPSaturate(result);
}
}
@@ -490,7 +497,7 @@ void Translator::SetDst64(const InstOperand& operand, const IR::U64F64& value_ra
ir.FPMul(value_untyped, ir.Imm64(f64(operand.output_modifier.multiplier)));
}
if (operand.output_modifier.clamp) {
value_untyped = ir.FPSaturate(value_raw);
value_untyped = ir.FPSaturate(value_untyped);
}
}

View File

@@ -319,6 +319,7 @@ private:
const IR::F32& x_res, const IR::F32& y_res, const IR::F32& z_res);
void ExportRenderTarget(const GcnInst& inst);
void ExportDepth(const GcnInst& inst);
void LogMissingOpcode(const GcnInst& inst);
IR::VectorReg GetScratchVgpr(u32 offset);

View File

@@ -210,8 +210,6 @@ struct Info {
bool has_bitwise_xor{};
bool has_image_gather{};
bool has_image_query{};
bool has_layer_output{};
bool has_viewport_index_output{};
bool uses_buffer_atomic_float_min_max{};
bool uses_image_atomic_float_min_max{};
bool uses_lane_id{};

View File

@@ -160,6 +160,12 @@ std::string NameOf(Attribute attribute) {
return "TessFactorsBufferBase";
case Attribute::PointSize:
return "PointSize";
case Attribute::StencilRef:
return "StencilRef";
case Attribute::SampleMask:
return "SampleMask";
case Attribute::PackedAncillary:
return "PackedAncillary";
default:
break;
}

View File

@@ -88,6 +88,9 @@ enum class Attribute : u64 {
OffChipLdsBase = 91,
TessFactorsBufferBase = 92,
PointSize = 93,
StencilRef = 94,
SampleMask = 95,
PackedAncillary = 96,
Max,
};

View File

@@ -257,12 +257,50 @@ void FoldCmpClass(IR::Block& block, IR::Inst& inst) {
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
const IR::F32 value = IR::F32{inst.Arg(0)};
inst.ReplaceUsesWithAndRemove(
ir.LogicalNot(ir.LogicalOr(ir.FPIsInf(value), ir.FPIsInf(value))));
ir.LogicalNot(ir.LogicalOr(ir.FPIsNan(value), ir.FPIsInf(value))));
} else {
UNREACHABLE();
}
}
bool FoldPackedAncillary(IR::Block& block, IR::Inst& inst) {
if (inst.Arg(0).IsImmediate() || !inst.Arg(1).IsImmediate() || !inst.Arg(2).IsImmediate()) {
return false;
}
IR::Inst* value = inst.Arg(0).InstRecursive();
if (value->GetOpcode() != IR::Opcode::GetAttributeU32 ||
value->Arg(0).Attribute() != IR::Attribute::PackedAncillary) {
return false;
}
const u32 offset = inst.Arg(1).U32();
const u32 bits = inst.Arg(2).U32();
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
if (offset >= 8 && offset + bits <= 12) {
const auto sample_index = ir.GetAttributeU32(IR::Attribute::SampleIndex);
if (offset == 8 && bits == 4) {
inst.ReplaceUsesWithAndRemove(sample_index);
} else {
inst.ReplaceUsesWithAndRemove(
ir.BitFieldExtract(sample_index, ir.Imm32(offset - 8), ir.Imm32(bits)));
}
} else if (offset >= 16 && offset + bits <= 27) {
const auto mrt_index = ir.GetAttributeU32(IR::Attribute::RenderTargetIndex);
if (offset == 16 && bits == 11) {
inst.ReplaceUsesWithAndRemove(mrt_index);
} else {
inst.ReplaceUsesWithAndRemove(
ir.BitFieldExtract(mrt_index, ir.Imm32(offset - 16), ir.Imm32(bits)));
}
} else {
UNREACHABLE_MSG("Unhandled bitfield extract from ancillary VGPR offset={}, bits={}", offset,
bits);
}
value->ReplaceUsesWithAndRemove(ir.Imm32(0U));
return true;
}
void ConstantPropagation(IR::Block& block, IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::IAdd32:
@@ -475,6 +513,9 @@ void ConstantPropagation(IR::Block& block, IR::Inst& inst) {
FoldWhenAllImmediates(inst, [](u64 a) { return static_cast<u32>(std::popcount(a)); });
return;
case IR::Opcode::BitFieldUExtract:
if (FoldPackedAncillary(block, inst)) {
return;
}
FoldWhenAllImmediates(inst, [](u32 base, u32 shift, u32 count) {
if (static_cast<size_t>(shift) + static_cast<size_t>(count) > 32) {
UNREACHABLE_MSG("Undefined result in {}({}, {}, {})", IR::Opcode::BitFieldUExtract,

View File

@@ -934,14 +934,25 @@ void PatchImageSampleArgs(IR::Block& block, IR::Inst& inst, Info& info,
}
}();
const auto unnormalized = sampler.force_unnormalized || inst_info.is_unnormalized;
// Query dimensions of image if needed for normalization.
// We can't use the image sharp because it could be bound to a different image later.
const bool is_msaa = view_type == AmdGpu::ImageType::Color2DMsaa ||
view_type == AmdGpu::ImageType::Color2DMsaaArray;
const bool unnormalized = sampler.force_unnormalized || inst_info.is_unnormalized;
const bool needs_dimentions = (!is_msaa && unnormalized) || (is_msaa && !unnormalized);
const auto dimensions =
unnormalized ? ir.ImageQueryDimension(handle, ir.Imm32(0u), ir.Imm1(false), inst_info)
: IR::Value{};
needs_dimentions ? ir.ImageQueryDimension(handle, ir.Imm32(0u), ir.Imm1(false), inst_info)
: IR::Value{};
const auto get_coord = [&](u32 coord_idx, u32 dim_idx) -> IR::Value {
const auto coord = get_addr_reg(coord_idx);
if (is_msaa) {
// For MSAA images preserve the unnormalized coord or manually unnormalize it
if (unnormalized) {
return ir.ConvertFToU(32, coord);
} else {
const auto dim =
ir.ConvertUToF(32, 32, IR::U32{ir.CompositeExtract(dimensions, dim_idx)});
return ir.ConvertFToU(32, ir.FPMul(coord, dim));
}
}
if (unnormalized) {
// Normalize the coordinate for sampling, dividing by its corresponding dimension.
const auto dim =
@@ -958,12 +969,10 @@ void PatchImageSampleArgs(IR::Block& block, IR::Inst& inst, Info& info,
addr_reg = addr_reg + 1;
return get_coord(addr_reg - 1, 0);
case AmdGpu::ImageType::Color1DArray: // x, slice
[[fallthrough]];
case AmdGpu::ImageType::Color2D: // x, y
case AmdGpu::ImageType::Color2D: // x, y
case AmdGpu::ImageType::Color2DMsaa: // x, y
addr_reg = addr_reg + 2;
return ir.CompositeConstruct(get_coord(addr_reg - 2, 0), get_coord(addr_reg - 1, 1));
case AmdGpu::ImageType::Color2DMsaa: // x, y, frag
[[fallthrough]];
case AmdGpu::ImageType::Color2DArray: // x, y, slice
addr_reg = addr_reg + 3;
// Note we can use FixCubeCoords with fallthrough cases since it checks for image type.
@@ -986,6 +995,9 @@ void PatchImageSampleArgs(IR::Block& block, IR::Inst& inst, Info& info,
const IR::F32 lod_clamp = inst_info.has_lod_clamp ? get_addr_reg(addr_reg++) : IR::F32{};
auto texel = [&] -> IR::Value {
if (is_msaa) {
return ir.ImageRead(handle, coords, ir.Imm32(0U), ir.Imm32(0U), inst_info);
}
if (inst_info.is_gather) {
if (inst_info.is_depth) {
return ir.ImageGatherDref(handle, coords, offset, dref, inst_info);

View File

@@ -160,13 +160,6 @@ void CollectShaderInfoPass(IR::Program& program, const Profile& profile) {
}
}
if (info.stores.GetAny(IR::Attribute::RenderTargetIndex)) {
info.has_layer_output = true;
}
if (info.stores.GetAny(IR::Attribute::ViewportIndex)) {
info.has_viewport_index_output = true;
}
// In case Flatbuf has not already been bound by IR and is needed
// to query buffer sizes, bind it now.
if (!profile.supports_robust_buffer_access && !info.uses_dma) {

View File

@@ -22,7 +22,7 @@ inline Value ApplySwizzle(IREmitter& ir, const Value& vector, const AmdGpu::Comp
}
/// Converts gamma corrected value to linear space
inline F32 ApplyGammaToLinear(IREmitter& ir, F32& c) {
inline F32 ApplyGammaToLinear(IREmitter& ir, const F32& c) {
const F32 a =
ir.FPPow(ir.FPMul(ir.FPAdd(c, ir.Imm32(0.055f)), ir.Imm32(1.0f / 1.055f)), ir.Imm32(2.4f));
const F32 b = ir.FPMul(c, ir.Imm32(1.0f / 12.92f));
@@ -80,6 +80,9 @@ inline F32 ApplyReadNumberConversion(IREmitter& ir, const F32& value,
const auto float_val = ir.ConvertUToF(32, 32, ir.BitCast<U32>(value));
return ir.FPDiv(float_val, ir.Imm32(static_cast<float>(std::numeric_limits<u32>::max())));
}
case AmdGpu::NumberConversion::SrgbToNorm: {
return ApplyGammaToLinear(ir, value);
}
default:
UNREACHABLE();
}

View File

@@ -205,12 +205,13 @@ struct FragmentRuntimeInfo {
u32 num_inputs;
std::array<PsInput, 32> inputs;
std::array<PsColorBuffer, MaxColorBuffers> color_buffers;
AmdGpu::Liverpool::ShaderExportFormat z_export_format;
bool dual_source_blending;
bool operator==(const FragmentRuntimeInfo& other) const noexcept {
return std::ranges::equal(color_buffers, other.color_buffers) &&
en_flags.raw == other.en_flags.raw && addr_flags.raw == other.addr_flags.raw &&
num_inputs == other.num_inputs &&
num_inputs == other.num_inputs && z_export_format == other.z_export_format &&
dual_source_blending == other.dual_source_blending &&
std::ranges::equal(inputs.begin(), inputs.begin() + num_inputs, other.inputs.begin(),
other.inputs.begin() + num_inputs);