Possible fix for NieR deadlock

This commit is contained in:
Lander Gallastegi 2025-04-22 19:08:12 +02:00
parent 359856d770
commit e2f56feb8b
4 changed files with 10 additions and 9 deletions

View File

@ -175,7 +175,7 @@ Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) {
return ctx.u32_zero_value;
} else {
const auto& srt_flatbuf = ctx.buffers[ctx.flatbuf_index];
ASSERT(srt_flatbuf.binding >= && srt_flatbuf.buffer_type == BufferType::Flatbuf);
ASSERT(srt_flatbuf.binding >= 0 && srt_flatbuf.buffer_type == BufferType::Flatbuf);
const auto [id, pointer_type] = srt_flatbuf[PointerType::U32];
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value,
ctx.ConstU32(flatbuf_off_dw))};

View File

@ -294,7 +294,7 @@ bool BufferCache::IsRegionRegistered(VAddr addr, size_t size) {
++page;
continue;
}
std::shared_lock lk{mutex};
std::shared_lock lk{slot_buffers_mutex};
Buffer& buffer = slot_buffers[buffer_id];
const VAddr buf_start_addr = buffer.CpuAddr();
const VAddr buf_end_addr = buf_start_addr + buffer.SizeBytes();
@ -331,7 +331,7 @@ BufferId BufferCache::FindBuffer(VAddr device_addr, u32 size) {
}
void BufferCache::QueueMemoryCoverage(VAddr device_addr, u64 size) {
std::scoped_lock lk{mutex};
std::scoped_lock lk{covered_regions_mutex};
const VAddr start = device_addr;
const VAddr end = device_addr + size;
auto queue_range = decltype(queued_converages)::interval_type::right_open(start, end);
@ -339,7 +339,7 @@ void BufferCache::QueueMemoryCoverage(VAddr device_addr, u64 size) {
}
void BufferCache::CoverQueuedRegions() {
std::scoped_lock lk{mutex};
std::scoped_lock lk{covered_regions_mutex};
if (queued_converages.empty()) {
return;
}
@ -505,7 +505,7 @@ BufferId BufferCache::CreateBuffer(VAddr device_addr, u32 wanted_size) {
const OverlapResult overlap = ResolveOverlaps(device_addr, wanted_size);
const u32 size = static_cast<u32>(overlap.end - overlap.begin);
const BufferId new_buffer_id = [&] {
std::scoped_lock lk{mutex};
std::scoped_lock lk{slot_buffers_mutex};
return slot_buffers.insert(instance, scheduler, MemoryUsage::DeviceLocal, overlap.begin,
AllFlags | vk::BufferUsageFlagBits::eShaderDeviceAddress, size);
}();
@ -521,7 +521,7 @@ BufferId BufferCache::CreateBuffer(VAddr device_addr, u32 wanted_size) {
bda_addrs.size() * sizeof(vk::DeviceAddress));
{
// Mark the pages as covered
std::scoped_lock lk{mutex};
std::scoped_lock lk{covered_regions_mutex};
convered_regions += boost::icl::interval_set<u64>::interval_type::right_open(
start_page, start_page + size_pages);
}

View File

@ -204,7 +204,8 @@ private:
std::array<u8, FAULT_READBACK_SIZE> fault_readback_cpu;
boost::icl::interval_set<VAddr> queued_converages;
boost::icl::interval_set<u64> convered_regions;
std::shared_mutex mutex;
std::shared_mutex covered_regions_mutex;
std::shared_mutex slot_buffers_mutex;
Common::SlotVector<Buffer> slot_buffers;
RangeSet gpu_modified_ranges;
MemoryTracker memory_tracker;

View File

@ -964,7 +964,7 @@ bool Rasterizer::IsMapped(VAddr addr, u64 size) {
void Rasterizer::MapMemory(VAddr addr, u64 size) {
{
std::unique_lock lock{mapped_ranges_mutex};
std::scoped_lock lock{mapped_ranges_mutex};
mapped_ranges += decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
}
page_manager.OnGpuMap(addr, size);
@ -976,7 +976,7 @@ void Rasterizer::UnmapMemory(VAddr addr, u64 size) {
texture_cache.UnmapMemory(addr, size);
page_manager.OnGpuUnmap(addr, size);
{
std::unique_lock lock{mapped_ranges_mutex};
std::scoped_lock lock{mapped_ranges_mutex};
mapped_ranges -= decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
}
}