diff --git a/CMakeLists.txt b/CMakeLists.txt index cdb835dea..42f15dbd8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -668,6 +668,8 @@ set(COMMON src/common/logging/backend.cpp src/common/polyfill_thread.h src/common/rdtsc.cpp src/common/rdtsc.h + src/common/recursive_lock.cpp + src/common/recursive_lock.h src/common/sha1.h src/common/signal_context.h src/common/signal_context.cpp diff --git a/src/common/recursive_lock.cpp b/src/common/recursive_lock.cpp new file mode 100644 index 000000000..2bc195f48 --- /dev/null +++ b/src/common/recursive_lock.cpp @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include "common/assert.h" +#include "common/recursive_lock.h" + +namespace Common::Detail { + +struct RecursiveLockState { + RecursiveLockType type; + int count; +}; + +thread_local std::unordered_map g_recursive_locks; + +bool IncrementRecursiveLock(void* mutex, RecursiveLockType type) { + auto& state = g_recursive_locks[mutex]; + if (state.count == 0) { + ASSERT(state.type == RecursiveLockType::None); + state.type = type; + } + ASSERT(state.type == type); + return state.count++ == 0; +} + +bool DecrementRecursiveLock(void* mutex, RecursiveLockType type) { + auto& state = g_recursive_locks[mutex]; + ASSERT(state.type == type && state.count > 0); + if (--state.count == 0) { + g_recursive_locks.erase(mutex); + return true; + } + return false; +} + +} // namespace Common diff --git a/src/common/recursive_lock.h b/src/common/recursive_lock.h new file mode 100644 index 000000000..9f69dc39b --- /dev/null +++ b/src/common/recursive_lock.h @@ -0,0 +1,69 @@ +// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include +#include +#include + +namespace Common { + +namespace Detail { + +enum class RecursiveLockType { + None, + Shared, + Exclusive +}; + +bool IncrementRecursiveLock(void* mutex, RecursiveLockType type); +bool DecrementRecursiveLock(void* mutex, RecursiveLockType type); + +} // namespace Detail + +template +class RecursiveScopedLock { +public: + explicit RecursiveScopedLock(MutexType& mutex) : m_mutex(mutex), m_locked(false) { + if (Detail::IncrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Exclusive)) { + m_locked = true; + m_lock.emplace(m_mutex); + } + } + + ~RecursiveScopedLock() { + Detail::DecrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Exclusive); + if (m_locked) { + m_lock.reset(); + } + } +private: + MutexType& m_mutex; + std::optional> m_lock; + bool m_locked = false; +}; + +template +class RecursiveSharedLock { +public: + explicit RecursiveSharedLock(MutexType& mutex) : m_mutex(mutex), m_locked(false) { + if (Detail::IncrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Shared)) { + m_locked = true; + m_lock.emplace(m_mutex); + } + } + + ~RecursiveSharedLock() { + Detail::DecrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Shared); + if (m_locked) { + m_lock.reset(); + } + } +private: + MutexType& m_mutex; + std::optional> m_lock; + bool m_locked = false; +}; + +} // namespace Common \ No newline at end of file diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 78b88d21d..bfdd8d094 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -3,6 +3,7 @@ #include "common/config.h" #include "common/debug.h" +#include "common/recursive_lock.h" #include "core/memory.h" #include "shader_recompiler/runtime_info.h" #include "video_core/amdgpu/liverpool.h" @@ -477,7 +478,7 @@ bool Rasterizer::BindResources(const Pipeline* pipeline) { if (uses_dma) { // We only use fault buffer for DMA right now. { - std::shared_lock lock{dma_sync_mapped_ranges_mutex}; + Common::RecursiveSharedLock lock(mapped_ranges_mutex); for (const auto& range : dma_sync_mapped_ranges) { buffer_cache.SynchronizeBuffersInRange(range.lower(), range.upper() - range.lower()); } @@ -728,8 +729,7 @@ void Rasterizer::BindTextures(const Shader::Info& stage, Shader::Backend::Bindin void Rasterizer::AddDmaSyncRanges(const boost::icl::interval_set& ranges) { dma_sync_ranges += ranges; { - std::scoped_lock lock{dma_sync_mapped_ranges_mutex}; - std::shared_lock lock2(mapped_ranges_mutex); + std::scoped_lock lock(mapped_ranges_mutex); dma_sync_mapped_ranges = mapped_ranges & dma_sync_ranges; } } @@ -976,13 +976,13 @@ bool Rasterizer::IsMapped(VAddr addr, u64 size) { } const auto range = decltype(mapped_ranges)::interval_type::right_open(addr, addr + size); - std::shared_lock lock{mapped_ranges_mutex}; + Common::RecursiveSharedLock lock{mapped_ranges_mutex}; return boost::icl::contains(mapped_ranges, range); } void Rasterizer::MapMemory(VAddr addr, u64 size) { { - std::scoped_lock lock{mapped_ranges_mutex, dma_sync_mapped_ranges_mutex}; + std::scoped_lock lock{mapped_ranges_mutex}; mapped_ranges += decltype(mapped_ranges)::interval_type::right_open(addr, addr + size); dma_sync_mapped_ranges = mapped_ranges & dma_sync_ranges; } @@ -994,7 +994,7 @@ void Rasterizer::UnmapMemory(VAddr addr, u64 size) { texture_cache.UnmapMemory(addr, size); page_manager.OnGpuUnmap(addr, size); { - std::scoped_lock lock{mapped_ranges_mutex, dma_sync_mapped_ranges_mutex}; + std::scoped_lock lock{mapped_ranges_mutex}; mapped_ranges -= decltype(mapped_ranges)::interval_type::right_open(addr, addr + size); dma_sync_mapped_ranges -= decltype(mapped_ranges)::interval_type::right_open(addr, addr + size); } diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 5d587d666..aaf81adcb 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -115,9 +115,7 @@ private: boost::icl::interval_set mapped_ranges; boost::icl::interval_set dma_sync_ranges; boost::icl::interval_set dma_sync_mapped_ranges; - // use 2 mutexes to avoid undefined behavior when using shared lock std::shared_mutex mapped_ranges_mutex; - std::shared_mutex dma_sync_mapped_ranges_mutex; PipelineCache pipeline_cache; boost::container::static_vector<