diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 28798e040..bc659e177 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -68,7 +68,7 @@ void MemoryManager::SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1 } u64 MemoryManager::ClampRangeSize(VAddr virtual_addr, u64 size) { - static constexpr u64 MinSizeToClamp = 2_MB; + static constexpr u64 MinSizeToClamp = 3_GB; // Dont bother with clamping if the size is small so we dont pay a map lookup on every buffer. if (size < MinSizeToClamp) { return size; @@ -114,20 +114,14 @@ void MemoryManager::SetPrtArea(u32 id, VAddr address, u64 size) { void MemoryManager::CopySparseMemory(VAddr virtual_addr, u8* dest, u64 size) { ASSERT_MSG(IsValidAddress(reinterpret_cast(virtual_addr)), "Attempted to access invalid address {:#x}", virtual_addr); - const bool is_sparse = std::ranges::any_of( - prt_areas, [&](const PrtArea& area) { return area.Overlaps(virtual_addr, size); }); - if (!is_sparse) { - std::memcpy(dest, std::bit_cast(virtual_addr), size); - return; - } auto vma = FindVMA(virtual_addr); while (size) { u64 copy_size = std::min(vma->second.size - (virtual_addr - vma->first), size); - if (vma->second.IsFree()) { - std::memset(dest, 0, copy_size); - } else { + if (vma->second.IsMapped()) { std::memcpy(dest, std::bit_cast(virtual_addr), copy_size); + } else { + std::memset(dest, 0, copy_size); } size -= copy_size; virtual_addr += copy_size; diff --git a/src/video_core/buffer_cache/buffer_cache.cpp b/src/video_core/buffer_cache/buffer_cache.cpp index 41703dfe7..e3cb6cc81 100644 --- a/src/video_core/buffer_cache/buffer_cache.cpp +++ b/src/video_core/buffer_cache/buffer_cache.cpp @@ -923,7 +923,7 @@ vk::Buffer BufferCache::UploadCopies(Buffer& buffer, std::span c for (auto& copy : copies) { u8* const src_pointer = staging + copy.srcOffset; const VAddr device_addr = buffer.CpuAddr() + copy.dstOffset; - std::memcpy(src_pointer, std::bit_cast(device_addr), copy.size); + memory->CopySparseMemory(device_addr, src_pointer, copy.size); // Apply the staging offset copy.srcOffset += offset; } @@ -939,7 +939,7 @@ vk::Buffer BufferCache::UploadCopies(Buffer& buffer, std::span c for (const auto& copy : copies) { u8* const src_pointer = staging + copy.srcOffset; const VAddr device_addr = buffer.CpuAddr() + copy.dstOffset; - std::memcpy(src_pointer, std::bit_cast(device_addr), copy.size); + memory->CopySparseMemory(device_addr, src_pointer, copy.size); } scheduler.DeferOperation([buffer = std::move(temp_buffer)]() mutable { buffer.reset(); }); return src_buffer; diff --git a/src/video_core/page_manager.cpp b/src/video_core/page_manager.cpp index daa1218cc..2bf16afe0 100644 --- a/src/video_core/page_manager.cpp +++ b/src/video_core/page_manager.cpp @@ -248,9 +248,11 @@ struct PageManager::Impl { // Iterate requested pages const u64 aligned_addr = page << PAGE_BITS; const u64 aligned_end = page_end << PAGE_BITS; - ASSERT_MSG(rasterizer->IsMapped(aligned_addr, aligned_end - aligned_addr), - "Attempted to track non-GPU memory at address {:#x}, size {:#x}.", aligned_addr, - aligned_end - aligned_addr); + if (!rasterizer->IsMapped(aligned_addr, aligned_end - aligned_addr)) { + LOG_WARNING(Render, + "Tracking memory region {:#x} - {:#x} which is not fully GPU mapped.", + aligned_addr, aligned_end); + } for (; page != page_end; ++page) { PageState& state = cached_pages[page];