mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-27 12:34:37 +00:00
Implement recursive shared/scoped locks
This commit is contained in:
parent
01a0e00dbb
commit
a6c8dc2252
@ -668,6 +668,8 @@ set(COMMON src/common/logging/backend.cpp
|
|||||||
src/common/polyfill_thread.h
|
src/common/polyfill_thread.h
|
||||||
src/common/rdtsc.cpp
|
src/common/rdtsc.cpp
|
||||||
src/common/rdtsc.h
|
src/common/rdtsc.h
|
||||||
|
src/common/recursive_lock.cpp
|
||||||
|
src/common/recursive_lock.h
|
||||||
src/common/sha1.h
|
src/common/sha1.h
|
||||||
src/common/signal_context.h
|
src/common/signal_context.h
|
||||||
src/common/signal_context.cpp
|
src/common/signal_context.cpp
|
||||||
|
37
src/common/recursive_lock.cpp
Normal file
37
src/common/recursive_lock.cpp
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include <unordered_map>
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/recursive_lock.h"
|
||||||
|
|
||||||
|
namespace Common::Detail {
|
||||||
|
|
||||||
|
struct RecursiveLockState {
|
||||||
|
RecursiveLockType type;
|
||||||
|
int count;
|
||||||
|
};
|
||||||
|
|
||||||
|
thread_local std::unordered_map<void*, RecursiveLockState> g_recursive_locks;
|
||||||
|
|
||||||
|
bool IncrementRecursiveLock(void* mutex, RecursiveLockType type) {
|
||||||
|
auto& state = g_recursive_locks[mutex];
|
||||||
|
if (state.count == 0) {
|
||||||
|
ASSERT(state.type == RecursiveLockType::None);
|
||||||
|
state.type = type;
|
||||||
|
}
|
||||||
|
ASSERT(state.type == type);
|
||||||
|
return state.count++ == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DecrementRecursiveLock(void* mutex, RecursiveLockType type) {
|
||||||
|
auto& state = g_recursive_locks[mutex];
|
||||||
|
ASSERT(state.type == type && state.count > 0);
|
||||||
|
if (--state.count == 0) {
|
||||||
|
g_recursive_locks.erase(mutex);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Common
|
69
src/common/recursive_lock.h
Normal file
69
src/common/recursive_lock.h
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
#include <mutex>
|
||||||
|
#include <shared_mutex>
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
namespace Detail {
|
||||||
|
|
||||||
|
enum class RecursiveLockType {
|
||||||
|
None,
|
||||||
|
Shared,
|
||||||
|
Exclusive
|
||||||
|
};
|
||||||
|
|
||||||
|
bool IncrementRecursiveLock(void* mutex, RecursiveLockType type);
|
||||||
|
bool DecrementRecursiveLock(void* mutex, RecursiveLockType type);
|
||||||
|
|
||||||
|
} // namespace Detail
|
||||||
|
|
||||||
|
template <typename MutexType>
|
||||||
|
class RecursiveScopedLock {
|
||||||
|
public:
|
||||||
|
explicit RecursiveScopedLock(MutexType& mutex) : m_mutex(mutex), m_locked(false) {
|
||||||
|
if (Detail::IncrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Exclusive)) {
|
||||||
|
m_locked = true;
|
||||||
|
m_lock.emplace(m_mutex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~RecursiveScopedLock() {
|
||||||
|
Detail::DecrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Exclusive);
|
||||||
|
if (m_locked) {
|
||||||
|
m_lock.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
MutexType& m_mutex;
|
||||||
|
std::optional<std::unique_lock<MutexType>> m_lock;
|
||||||
|
bool m_locked = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename MutexType>
|
||||||
|
class RecursiveSharedLock {
|
||||||
|
public:
|
||||||
|
explicit RecursiveSharedLock(MutexType& mutex) : m_mutex(mutex), m_locked(false) {
|
||||||
|
if (Detail::IncrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Shared)) {
|
||||||
|
m_locked = true;
|
||||||
|
m_lock.emplace(m_mutex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~RecursiveSharedLock() {
|
||||||
|
Detail::DecrementRecursiveLock(&m_mutex, Detail::RecursiveLockType::Shared);
|
||||||
|
if (m_locked) {
|
||||||
|
m_lock.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
MutexType& m_mutex;
|
||||||
|
std::optional<std::shared_lock<MutexType>> m_lock;
|
||||||
|
bool m_locked = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Common
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include "common/config.h"
|
#include "common/config.h"
|
||||||
#include "common/debug.h"
|
#include "common/debug.h"
|
||||||
|
#include "common/recursive_lock.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "shader_recompiler/runtime_info.h"
|
#include "shader_recompiler/runtime_info.h"
|
||||||
#include "video_core/amdgpu/liverpool.h"
|
#include "video_core/amdgpu/liverpool.h"
|
||||||
@ -477,7 +478,7 @@ bool Rasterizer::BindResources(const Pipeline* pipeline) {
|
|||||||
if (uses_dma) {
|
if (uses_dma) {
|
||||||
// We only use fault buffer for DMA right now.
|
// We only use fault buffer for DMA right now.
|
||||||
{
|
{
|
||||||
std::shared_lock lock{dma_sync_mapped_ranges_mutex};
|
Common::RecursiveSharedLock lock(mapped_ranges_mutex);
|
||||||
for (const auto& range : dma_sync_mapped_ranges) {
|
for (const auto& range : dma_sync_mapped_ranges) {
|
||||||
buffer_cache.SynchronizeBuffersInRange(range.lower(), range.upper() - range.lower());
|
buffer_cache.SynchronizeBuffersInRange(range.lower(), range.upper() - range.lower());
|
||||||
}
|
}
|
||||||
@ -728,8 +729,7 @@ void Rasterizer::BindTextures(const Shader::Info& stage, Shader::Backend::Bindin
|
|||||||
void Rasterizer::AddDmaSyncRanges(const boost::icl::interval_set<VAddr>& ranges) {
|
void Rasterizer::AddDmaSyncRanges(const boost::icl::interval_set<VAddr>& ranges) {
|
||||||
dma_sync_ranges += ranges;
|
dma_sync_ranges += ranges;
|
||||||
{
|
{
|
||||||
std::scoped_lock lock{dma_sync_mapped_ranges_mutex};
|
std::scoped_lock lock(mapped_ranges_mutex);
|
||||||
std::shared_lock lock2(mapped_ranges_mutex);
|
|
||||||
dma_sync_mapped_ranges = mapped_ranges & dma_sync_ranges;
|
dma_sync_mapped_ranges = mapped_ranges & dma_sync_ranges;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -976,13 +976,13 @@ bool Rasterizer::IsMapped(VAddr addr, u64 size) {
|
|||||||
}
|
}
|
||||||
const auto range = decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
|
const auto range = decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
|
||||||
|
|
||||||
std::shared_lock lock{mapped_ranges_mutex};
|
Common::RecursiveSharedLock lock{mapped_ranges_mutex};
|
||||||
return boost::icl::contains(mapped_ranges, range);
|
return boost::icl::contains(mapped_ranges, range);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Rasterizer::MapMemory(VAddr addr, u64 size) {
|
void Rasterizer::MapMemory(VAddr addr, u64 size) {
|
||||||
{
|
{
|
||||||
std::scoped_lock lock{mapped_ranges_mutex, dma_sync_mapped_ranges_mutex};
|
std::scoped_lock lock{mapped_ranges_mutex};
|
||||||
mapped_ranges += decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
|
mapped_ranges += decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
|
||||||
dma_sync_mapped_ranges = mapped_ranges & dma_sync_ranges;
|
dma_sync_mapped_ranges = mapped_ranges & dma_sync_ranges;
|
||||||
}
|
}
|
||||||
@ -994,7 +994,7 @@ void Rasterizer::UnmapMemory(VAddr addr, u64 size) {
|
|||||||
texture_cache.UnmapMemory(addr, size);
|
texture_cache.UnmapMemory(addr, size);
|
||||||
page_manager.OnGpuUnmap(addr, size);
|
page_manager.OnGpuUnmap(addr, size);
|
||||||
{
|
{
|
||||||
std::scoped_lock lock{mapped_ranges_mutex, dma_sync_mapped_ranges_mutex};
|
std::scoped_lock lock{mapped_ranges_mutex};
|
||||||
mapped_ranges -= decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
|
mapped_ranges -= decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
|
||||||
dma_sync_mapped_ranges -= decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
|
dma_sync_mapped_ranges -= decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
|
||||||
}
|
}
|
||||||
|
@ -115,9 +115,7 @@ private:
|
|||||||
boost::icl::interval_set<VAddr> mapped_ranges;
|
boost::icl::interval_set<VAddr> mapped_ranges;
|
||||||
boost::icl::interval_set<VAddr> dma_sync_ranges;
|
boost::icl::interval_set<VAddr> dma_sync_ranges;
|
||||||
boost::icl::interval_set<VAddr> dma_sync_mapped_ranges;
|
boost::icl::interval_set<VAddr> dma_sync_mapped_ranges;
|
||||||
// use 2 mutexes to avoid undefined behavior when using shared lock
|
|
||||||
std::shared_mutex mapped_ranges_mutex;
|
std::shared_mutex mapped_ranges_mutex;
|
||||||
std::shared_mutex dma_sync_mapped_ranges_mutex;
|
|
||||||
PipelineCache pipeline_cache;
|
PipelineCache pipeline_cache;
|
||||||
|
|
||||||
boost::container::static_vector<
|
boost::container::static_vector<
|
||||||
|
Loading…
Reference in New Issue
Block a user