From b1018901b65b57aa74fc1c875a489fced6db6e96 Mon Sep 17 00:00:00 2001 From: georgemoralis Date: Tue, 13 May 2025 09:29:22 +0300 Subject: [PATCH] fixed clang format --- src/common/recursive_lock.cpp | 2 +- src/common/recursive_lock.h | 14 ++++---- .../backend/spirv/spirv_emit_context.h | 4 +-- src/video_core/buffer_cache/buffer_cache.cpp | 36 +++++++++---------- src/video_core/buffer_cache/buffer_cache.h | 9 ++--- src/video_core/buffer_cache/range_set.h | 11 +++--- 6 files changed, 35 insertions(+), 41 deletions(-) diff --git a/src/common/recursive_lock.cpp b/src/common/recursive_lock.cpp index 2bc195f48..2471a2ee0 100644 --- a/src/common/recursive_lock.cpp +++ b/src/common/recursive_lock.cpp @@ -34,4 +34,4 @@ bool DecrementRecursiveLock(void* mutex, RecursiveLockType type) { return false; } -} // namespace Common +} // namespace Common::Detail diff --git a/src/common/recursive_lock.h b/src/common/recursive_lock.h index 9f69dc39b..5a5fc6658 100644 --- a/src/common/recursive_lock.h +++ b/src/common/recursive_lock.h @@ -3,19 +3,15 @@ #pragma once -#include #include +#include #include namespace Common { namespace Detail { -enum class RecursiveLockType { - None, - Shared, - Exclusive -}; +enum class RecursiveLockType { None, Shared, Exclusive }; bool IncrementRecursiveLock(void* mutex, RecursiveLockType type); bool DecrementRecursiveLock(void* mutex, RecursiveLockType type); @@ -31,13 +27,14 @@ public: m_lock.emplace(m_mutex); } } - + ~RecursiveScopedLock() { Detail::DecrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Exclusive); if (m_locked) { m_lock.reset(); } } + private: MutexType& m_mutex; std::optional> m_lock; @@ -53,13 +50,14 @@ public: m_lock.emplace(m_mutex); } } - + ~RecursiveSharedLock() { Detail::DecrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Shared); if (m_locked) { m_lock.reset(); } } + private: MutexType& m_mutex; std::optional> m_lock; diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.h b/src/shader_recompiler/backend/spirv/spirv_emit_context.h index 3368a3cef..09d2b9cf5 100644 --- a/src/shader_recompiler/backend/spirv/spirv_emit_context.h +++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.h @@ -227,13 +227,13 @@ public: AddLabel(available_label); OpSelectionMerge(after_save_masked_label, spv::SelectionControlMask::MaskNone); OpBranchConditional(first_time, save_masked_label, after_save_masked_label); - + // Save unmasked BDA AddLabel(save_masked_label); const Id masked_bda = OpBitwiseOr(U64, bda, bda_first_time_mask); OpStore(bda_ptr, masked_bda); OpBranch(after_save_masked_label); - + // Load value AddLabel(after_save_masked_label); const Id unmasked_bda = OpBitwiseAnd(U64, bda, bda_first_time_inv_mask); diff --git a/src/video_core/buffer_cache/buffer_cache.cpp b/src/video_core/buffer_cache/buffer_cache.cpp index a3d25ae0e..5d7676df8 100644 --- a/src/video_core/buffer_cache/buffer_cache.cpp +++ b/src/video_core/buffer_cache/buffer_cache.cpp @@ -35,14 +35,12 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s gds_buffer{instance, scheduler, MemoryUsage::Stream, 0, AllFlags, DataShareBufferSize}, bda_pagetable_buffer{instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, BDA_PAGETABLE_SIZE}, - fault_buffer(instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, - FAULT_BUFFER_SIZE), + fault_buffer(instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, FAULT_BUFFER_SIZE), memory_tracker{&tracker} { Vulkan::SetObjectName(instance.GetDevice(), gds_buffer.Handle(), "GDS Buffer"); Vulkan::SetObjectName(instance.GetDevice(), bda_pagetable_buffer.Handle(), "BDA Page Table Buffer"); - Vulkan::SetObjectName(instance.GetDevice(), fault_buffer.Handle(), - "Fault Buffer"); + Vulkan::SetObjectName(instance.GetDevice(), fault_buffer.Handle(), "Fault Buffer"); // Ensure the first slot is used for the null buffer const auto null_id = @@ -75,13 +73,11 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s auto [desc_layout_result, desc_layout] = instance.GetDevice().createDescriptorSetLayoutUnique(desc_layout_ci); ASSERT_MSG(desc_layout_result == vk::Result::eSuccess, - "Failed to create descriptor set layout: {}", - vk::to_string(desc_layout_result)); + "Failed to create descriptor set layout: {}", vk::to_string(desc_layout_result)); fault_process_desc_layout = std::move(desc_layout); - - const auto& module = Vulkan::Compile( - HostShaders::FAULT_BUFFER_PROCESS_COMP, vk::ShaderStageFlagBits::eCompute, - instance.GetDevice()); + + const auto& module = Vulkan::Compile(HostShaders::FAULT_BUFFER_PROCESS_COMP, + vk::ShaderStageFlagBits::eCompute, instance.GetDevice()); Vulkan::SetObjectName(instance.GetDevice(), module, "Fault Buffer Parser"); const vk::SpecializationMapEntry specialization_map_entry = { @@ -108,10 +104,8 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s .setLayoutCount = 1U, .pSetLayouts = &(*fault_process_desc_layout), }; - auto [layout_result, layout] = - instance.GetDevice().createPipelineLayoutUnique(layout_info); - ASSERT_MSG(layout_result == vk::Result::eSuccess, - "Failed to create pipeline layout: {}", + auto [layout_result, layout] = instance.GetDevice().createPipelineLayoutUnique(layout_info); + ASSERT_MSG(layout_result == vk::Result::eSuccess, "Failed to create pipeline layout: {}", vk::to_string(layout_result)); fault_process_pipeline_layout = std::move(layout); @@ -121,11 +115,11 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s }; auto [pipeline_result, pipeline] = instance.GetDevice().createComputePipelineUnique({}, pipeline_info); - ASSERT_MSG(pipeline_result == vk::Result::eSuccess, - "Failed to create compute pipeline: {}", + ASSERT_MSG(pipeline_result == vk::Result::eSuccess, "Failed to create compute pipeline: {}", vk::to_string(pipeline_result)); fault_process_pipeline = std::move(pipeline); - Vulkan::SetObjectName(instance.GetDevice(), *fault_process_pipeline, "Fault Buffer Parser Pipeline"); + Vulkan::SetObjectName(instance.GetDevice(), *fault_process_pipeline, + "Fault Buffer Parser Pipeline"); instance.GetDevice().destroyShaderModule(module); } @@ -145,7 +139,8 @@ void BufferCache::InvalidateMemory(VAddr device_addr, u64 size, bool unmap) { { std::scoped_lock lock(dma_sync_ranges_mutex); const VAddr aligned_addr = Common::AlignDown(device_addr, CACHING_PAGESIZE); - const u64 aligned_size = Common::AlignUp(device_addr + size, CACHING_PAGESIZE) - aligned_addr; + const u64 aligned_size = + Common::AlignUp(device_addr + size, CACHING_PAGESIZE) - aligned_addr; dma_sync_ranges.Add(device_addr, size); } } @@ -640,7 +635,8 @@ void BufferCache::ProcessFaultBuffer() { .pBufferMemoryBarriers = barriers.data(), }); cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, *fault_process_pipeline); - cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eCompute, *fault_process_pipeline_layout, 0, writes); + cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eCompute, *fault_process_pipeline_layout, 0, + writes); constexpr u32 num_threads = CACHING_NUMPAGES / 32; // 1 bit per page, 32 pages per workgroup constexpr u32 num_workgroups = Common::DivCeil(num_threads, 64u); cmdbuf.dispatch(num_workgroups, 1, 1); @@ -700,7 +696,7 @@ void BufferCache::ProcessFaultBuffer() { } // Buffer size is in 32 bits ASSERT_MSG((range.upper() - range.lower()) <= std::numeric_limits::max(), - "Buffer size is too large"); + "Buffer size is too large"); // Only create a buffer is the current range doesn't fit in an existing one FindBuffer(start, static_cast(end - start)); } diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 223fcecaa..ef705d412 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -148,10 +148,11 @@ public: private: template void ForEachBufferInRange(VAddr device_addr, u64 size, Func&& func) { - buffer_ranges.ForEachInRange(device_addr, size, [&](u64 page_start, u64 page_end, BufferId id) { - Buffer& buffer = slot_buffers[id]; - func(id, buffer); - }); + buffer_ranges.ForEachInRange(device_addr, size, + [&](u64 page_start, u64 page_end, BufferId id) { + Buffer& buffer = slot_buffers[id]; + func(id, buffer); + }); } void DownloadBufferMemory(Buffer& buffer, VAddr device_addr, u64 size); diff --git a/src/video_core/buffer_cache/range_set.h b/src/video_core/buffer_cache/range_set.h index 1b91fb893..5c8e78c7c 100644 --- a/src/video_core/buffer_cache/range_set.h +++ b/src/video_core/buffer_cache/range_set.h @@ -3,10 +3,10 @@ #pragma once +#include #include #include #include -#include #include #include #include @@ -221,11 +221,10 @@ private: template class SplitRangeMap { public: - using IntervalMap = - boost::icl::split_interval_map; + using IntervalMap = boost::icl::split_interval_map< + VAddr, T, boost::icl::total_absorber, std::less, boost::icl::inplace_identity, + boost::icl::inter_section, ICL_INTERVAL_INSTANCE(ICL_INTERVAL_DEFAULT, VAddr, std::less), + RangeSetsAllocator>; using IntervalType = typename IntervalMap::interval_type; public: