From d89d937501fed5f2992c53f6aa01148a6a0cf287 Mon Sep 17 00:00:00 2001 From: Lander Gallastegi Date: Sun, 20 Apr 2025 18:25:17 +0200 Subject: [PATCH] clang-format --- .../spirv/emit_spirv_context_get_set.cpp | 27 ++++++++-------- .../backend/spirv/spirv_emit_context.cpp | 28 ++++++++++------ .../backend/spirv/spirv_emit_context.h | 32 ++++++++++++------- src/video_core/buffer_cache/buffer.cpp | 16 +++++----- src/video_core/buffer_cache/buffer_cache.cpp | 24 +++++++------- src/video_core/buffer_cache/buffer_cache.h | 2 +- .../renderer_vulkan/vk_rasterizer.cpp | 2 +- 7 files changed, 73 insertions(+), 58 deletions(-) diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp index a87c38fb0..6056df2e7 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp @@ -168,20 +168,19 @@ Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) { const Id base_sift = ctx.OpShiftLeftLogical(ctx.U64, base_hi, ctx.ConstU32(32u)); const Id base = ctx.OpBitwiseOr(ctx.U64, base_lo, base_sift); const Id address = ctx.OpIAdd(ctx.U64, base, ctx.OpUConvert(ctx.U64, offset)); - return ctx.EmitMemoryAccess( - ctx.U32[1], address, [&]() { - const u32 flatbuf_off_dw = inst->Flags(); - if (flatbuf_off_dw == 0) { - return ctx.u32_zero_value; - } else { - const auto& srt_flatbuf = ctx.buffers[ctx.flatbuf_index]; - ASSERT(srt_flatbuf.binding >= 0 > 0 && srt_flatbuf.buffer_type == BufferType::Flatbuf); - const auto [id, pointer_type] = srt_flatbuf[PointerType::U32]; - const Id ptr{ - ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, ctx.ConstU32(flatbuf_off_dw))}; - return ctx.OpLoad(ctx.U32[1], ptr); - } - }); + return ctx.EmitMemoryAccess(ctx.U32[1], address, [&]() { + const u32 flatbuf_off_dw = inst->Flags(); + if (flatbuf_off_dw == 0) { + return ctx.u32_zero_value; + } else { + const auto& srt_flatbuf = ctx.buffers[ctx.flatbuf_index]; + ASSERT(srt_flatbuf.binding >= 0 > 0 && srt_flatbuf.buffer_type == BufferType::Flatbuf); + const auto [id, pointer_type] = srt_flatbuf[PointerType::U32]; + const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, + ctx.ConstU32(flatbuf_off_dw))}; + return ctx.OpLoad(ctx.U32[1], ptr); + } + }); } Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) { diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp index cb9cb221d..3d6fa4bfa 100644 --- a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp +++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp @@ -76,7 +76,7 @@ EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_inf } else { SetMemoryModel(spv::AddressingModel::Logical, spv::MemoryModel::GLSL450); } - + AddCapability(spv::Capability::Shader); DefineArithmeticTypes(); DefineInterfaces(); @@ -167,32 +167,40 @@ void EmitContext::DefineArithmeticTypes() { } if (True(info.dma_types & IR::Type::F64)) { - physical_pointer_types[PointerType::F64] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, F64[1]); + physical_pointer_types[PointerType::F64] = + TypePointer(spv::StorageClass::PhysicalStorageBuffer, F64[1]); } if (True(info.dma_types & IR::Type::U64)) { - physical_pointer_types[PointerType::U64] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, U64); + physical_pointer_types[PointerType::U64] = + TypePointer(spv::StorageClass::PhysicalStorageBuffer, U64); } if (True(info.dma_types & IR::Type::F32)) { - physical_pointer_types[PointerType::F32] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, F32[1]); + physical_pointer_types[PointerType::F32] = + TypePointer(spv::StorageClass::PhysicalStorageBuffer, F32[1]); } if (True(info.dma_types & IR::Type::U32)) { - physical_pointer_types[PointerType::U32] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, U32[1]); + physical_pointer_types[PointerType::U32] = + TypePointer(spv::StorageClass::PhysicalStorageBuffer, U32[1]); } if (True(info.dma_types & IR::Type::F16)) { - physical_pointer_types[PointerType::F16] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, F16[1]); + physical_pointer_types[PointerType::F16] = + TypePointer(spv::StorageClass::PhysicalStorageBuffer, F16[1]); } if (True(info.dma_types & IR::Type::U16)) { - physical_pointer_types[PointerType::U16] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, U16); + physical_pointer_types[PointerType::U16] = + TypePointer(spv::StorageClass::PhysicalStorageBuffer, U16); } if (True(info.dma_types & IR::Type::U8)) { - physical_pointer_types[PointerType::U8] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, U8); + physical_pointer_types[PointerType::U8] = + TypePointer(spv::StorageClass::PhysicalStorageBuffer, U8); } - + if (info.dma_types != IR::Type::Void) { constexpr u64 host_access_mask = 0x1UL; constexpr u64 host_access_inv_mask = ~host_access_mask; - caching_pagebits_value = Constant(U64, static_cast(VideoCore::BufferCache::CACHING_PAGEBITS)); + caching_pagebits_value = + Constant(U64, static_cast(VideoCore::BufferCache::CACHING_PAGEBITS)); caching_pagemask_value = Constant(U64, VideoCore::BufferCache::CACHING_PAGESIZE - 1); host_access_mask_value = Constant(U64, host_access_mask); host_access_inv_mask_value = Constant(U64, host_access_inv_mask); diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.h b/src/shader_recompiler/backend/spirv/spirv_emit_context.h index a6496da63..627d86cb6 100644 --- a/src/shader_recompiler/backend/spirv/spirv_emit_context.h +++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.h @@ -37,7 +37,6 @@ struct VectorIds { class EmitContext final : public Sirit::Module { public: - explicit EmitContext(const Profile& profile, const RuntimeInfo& runtime_info, Info& info, Bindings& binding); ~EmitContext(); @@ -156,13 +155,20 @@ public: } PointerType PointerTypeFromType(Id type) { - if (type.value == U8.value) return PointerType::U8; - if (type.value == U16.value) return PointerType::U16; - if (type.value == F16[1].value) return PointerType::F16; - if (type.value == U32[1].value) return PointerType::U32; - if (type.value == F32[1].value) return PointerType::F32; - if (type.value == U64.value) return PointerType::U64; - if (type.value == F64[1].value) return PointerType::F64; + if (type.value == U8.value) + return PointerType::U8; + if (type.value == U16.value) + return PointerType::U16; + if (type.value == F16[1].value) + return PointerType::F16; + if (type.value == U32[1].value) + return PointerType::U32; + if (type.value == F32[1].value) + return PointerType::F32; + if (type.value == U64.value) + return PointerType::U64; + if (type.value == F64[1].value) + return PointerType::F64; UNREACHABLE_MSG("Unknown type for pointer"); } @@ -184,7 +190,8 @@ public: // Check if it's a host memory access const Id bda_and_host_access_mask = OpBitwiseAnd(U64, bda, host_access_mask_value); - const Id bda_host_access = OpINotEqual(U1[1], bda_and_host_access_mask, host_access_mask_value); + const Id bda_host_access = + OpINotEqual(U1[1], bda_and_host_access_mask, host_access_mask_value); OpSelectionMerge(after_host_access_label, spv::SelectionControlMask::MaskNone); OpBranchConditional(bda_host_access, host_access_label, after_host_access_label); @@ -195,8 +202,8 @@ public: const Id page_div8 = OpShiftRightLogical(U32[1], page32, u32_three_value); const Id page_mod8 = OpBitwiseAnd(U32[1], page32, u32_seven_value); const Id page_mask = OpShiftLeftLogical(U32[1], u32_one_value, page_mod8); - const Id fault_ptr = OpAccessChain(fault_pointer_type, fault_buffer_id, u32_zero_value, - page_div8); + const Id fault_ptr = + OpAccessChain(fault_pointer_type, fault_buffer_id, u32_zero_value, page_div8); const Id fault_value = OpLoad(U8, fault_ptr); const Id page_mask8 = OpUConvert(U8, page_mask); const Id fault_value_masked = OpBitwiseOr(U8, fault_value, page_mask8); @@ -227,7 +234,8 @@ public: // Merge AddLabel(merge_label); - const Id final_result = OpPhi(type, fallback_result, fallback_label, result, available_label); + const Id final_result = + OpPhi(type, fallback_result, fallback_label, result, available_label); return final_result; } diff --git a/src/video_core/buffer_cache/buffer.cpp b/src/video_core/buffer_cache/buffer.cpp index 665918dc4..76dddd198 100644 --- a/src/video_core/buffer_cache/buffer.cpp +++ b/src/video_core/buffer_cache/buffer.cpp @@ -71,7 +71,8 @@ UniqueBuffer::~UniqueBuffer() { void UniqueBuffer::Create(const vk::BufferCreateInfo& buffer_ci, MemoryUsage usage, VmaAllocationInfo* out_alloc_info) { const bool with_bda = bool(buffer_ci.usage & vk::BufferUsageFlagBits::eShaderDeviceAddress); - const VmaAllocationCreateFlags bda_flag = with_bda ? VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT : 0; + const VmaAllocationCreateFlags bda_flag = + with_bda ? VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT : 0; const VmaAllocationCreateInfo alloc_ci = { .flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | bda_flag | MemoryUsageVmaFlags(usage), .usage = MemoryUsageVma(usage), @@ -145,8 +146,9 @@ ImportedHostBuffer::ImportedHostBuffer(const Vulkan::Instance& instance_, u32 memory_type_index = UINT32_MAX; for (u32 i = 0; i < mem_props.memoryTypeCount; ++i) { if ((ptr_props.memoryTypeBits & (1 << i)) != 0) { - if (mem_props.memoryTypes[i].propertyFlags & (vk::MemoryPropertyFlagBits::eHostVisible | - vk::MemoryPropertyFlagBits::eHostCoherent)) { + if (mem_props.memoryTypes[i].propertyFlags & + (vk::MemoryPropertyFlagBits::eHostVisible | + vk::MemoryPropertyFlagBits::eHostCoherent)) { memory_type_index = i; // We prefer cache coherent memory types. if (mem_props.memoryTypes[i].propertyFlags & @@ -181,7 +183,7 @@ ImportedHostBuffer::ImportedHostBuffer(const Vulkan::Instance& instance_, .allocationSize = size_bytes, .memoryTypeIndex = memory_type_index, }; - + auto buffer_result = instance->GetDevice().createBuffer(buffer_ci); ASSERT_MSG(buffer_result.result == vk::Result::eSuccess, "Failed creating imported host buffer with error {}", @@ -191,8 +193,7 @@ ImportedHostBuffer::ImportedHostBuffer(const Vulkan::Instance& instance_, auto device_memory_result = instance->GetDevice().allocateMemory(alloc_ci); if (device_memory_result.result != vk::Result::eSuccess) { // May fail to import the host memory if it is backed by a file. (AMD on Linux) - LOG_WARNING(Render_Vulkan, - "Failed to import host memory at {} size {:#x}, Reason: {}", + LOG_WARNING(Render_Vulkan, "Failed to import host memory at {} size {:#x}, Reason: {}", cpu_addr, size_bytes, vk::to_string(device_memory_result.result)); instance->GetDevice().destroyBuffer(buffer); buffer = VK_NULL_HANDLE; @@ -202,8 +203,7 @@ ImportedHostBuffer::ImportedHostBuffer(const Vulkan::Instance& instance_, device_memory = device_memory_result.value; auto result = instance->GetDevice().bindBufferMemory(buffer, device_memory, 0); - ASSERT_MSG(result == vk::Result::eSuccess, - "Failed binding imported host buffer with error {}", + ASSERT_MSG(result == vk::Result::eSuccess, "Failed binding imported host buffer with error {}", vk::to_string(result)); if (with_bda) { diff --git a/src/video_core/buffer_cache/buffer_cache.cpp b/src/video_core/buffer_cache/buffer_cache.cpp index e89906e10..4cb47e36d 100644 --- a/src/video_core/buffer_cache/buffer_cache.cpp +++ b/src/video_core/buffer_cache/buffer_cache.cpp @@ -26,8 +26,10 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s staging_buffer{instance, scheduler, MemoryUsage::Upload, StagingBufferSize}, stream_buffer{instance, scheduler, MemoryUsage::Stream, UboStreamBufferSize}, gds_buffer{instance, scheduler, MemoryUsage::Stream, 0, AllFlags, DataShareBufferSize}, - bda_pagetable_buffer{instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, BDA_PAGETABLE_SIZE}, - fault_readback_buffer(instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, FAULT_READBACK_SIZE), + bda_pagetable_buffer{instance, scheduler, MemoryUsage::DeviceLocal, + 0, AllFlags, BDA_PAGETABLE_SIZE}, + fault_readback_buffer(instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, + FAULT_READBACK_SIZE), memory_tracker{&tracker} { Vulkan::SetObjectName(instance.GetDevice(), gds_buffer.Handle(), "GDS Buffer"); @@ -362,7 +364,7 @@ void BufferCache::ImportMemory(u64 start, u64 end) { const u64 range_size = (range_end - range_start) << CACHING_PAGEBITS; ImportedHostBuffer buffer(instance, scheduler, cpu_addr, range_size, vk::BufferUsageFlagBits::eShaderDeviceAddress | - vk::BufferUsageFlagBits::eStorageBuffer); + vk::BufferUsageFlagBits::eStorageBuffer); if (buffer.HasFailed()) { continue; } @@ -377,8 +379,8 @@ void BufferCache::ImportMemory(u64 start, u64 end) { // create a GPU local buffer. bda_addrs.push_back(bda_addr + (i << CACHING_PAGEBITS)); } - WriteDataBuffer(bda_pagetable_buffer, range_start * sizeof(vk::DeviceAddress), bda_addrs.data(), - bda_addrs.size() * sizeof(vk::DeviceAddress)); + WriteDataBuffer(bda_pagetable_buffer, range_start * sizeof(vk::DeviceAddress), + bda_addrs.data(), bda_addrs.size() * sizeof(vk::DeviceAddress)); imported_buffers.emplace_back(std::move(buffer)); // Mark the pages as covered imported_regions += range; @@ -840,7 +842,8 @@ void BufferCache::SynchronizeRange(VAddr device_addr, u32 size) { }); } -void BufferCache::InlineDataBuffer(Buffer& buffer, VAddr address, const void* value, u32 num_bytes) { +void BufferCache::InlineDataBuffer(Buffer& buffer, VAddr address, const void* value, + u32 num_bytes) { scheduler.EndRendering(); const auto cmdbuf = scheduler.CommandBuffer(); const vk::BufferMemoryBarrier2 pre_barrier = { @@ -889,12 +892,9 @@ void BufferCache::WriteDataBuffer(Buffer& buffer, VAddr address, const void* val } else { // For large one time transfers use a temporary host buffer. // RenderDoc can lag quite a bit if the stream buffer is too large. - Buffer temp_buffer{instance, - scheduler, - MemoryUsage::Upload, - 0, - vk::BufferUsageFlagBits::eTransferSrc, - num_bytes}; + Buffer temp_buffer{ + instance, scheduler, MemoryUsage::Upload, 0, vk::BufferUsageFlagBits::eTransferSrc, + num_bytes}; src_buffer = temp_buffer.Handle(); u8* const staging = temp_buffer.mapped_data.data(); std::memcpy(staging, value, num_bytes); diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 1c04ed249..36c15a55c 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -45,7 +45,7 @@ public: static constexpr u64 CACHING_NUMPAGES = u64{1} << (40 - CACHING_PAGEBITS); static constexpr u64 BDA_PAGETABLE_SIZE = CACHING_NUMPAGES * sizeof(vk::DeviceAddress); - static constexpr u64 FAULT_READBACK_SIZE = CACHING_NUMPAGES / 8; // Bit per page + static constexpr u64 FAULT_READBACK_SIZE = CACHING_NUMPAGES / 8; // Bit per page struct Traits { using Entry = BufferId; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index ea05fa2c1..d1449fa48 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -459,7 +459,7 @@ bool Rasterizer::BindResources(const Pipeline* pipeline) { stage->PushUd(binding, push_data); BindBuffers(*stage, binding, push_data); BindTextures(*stage, binding); - + dma_enabled |= stage->dma_types != Shader::IR::Type::Void; }