mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-27 12:34:37 +00:00
Remove memory coveragte logic
This commit is contained in:
parent
ef51446b36
commit
66e57a40e3
@ -415,45 +415,6 @@ BufferId BufferCache::FindBuffer(VAddr device_addr, u32 size) {
|
|||||||
return CreateBuffer(device_addr, size);
|
return CreateBuffer(device_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferCache::QueueMemoryCoverage(VAddr device_addr, u64 size) {
|
|
||||||
std::scoped_lock lk{covered_regions_mutex};
|
|
||||||
const VAddr start = device_addr;
|
|
||||||
const VAddr end = device_addr + size;
|
|
||||||
auto queue_range = decltype(queued_converages)::interval_type::right_open(start, end);
|
|
||||||
queued_converages += queue_range;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BufferCache::CoverQueuedRegions() {
|
|
||||||
std::scoped_lock lk{covered_regions_mutex};
|
|
||||||
if (queued_converages.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
for (const auto& range : queued_converages) {
|
|
||||||
CoverMemory(range.lower(), range.upper());
|
|
||||||
}
|
|
||||||
queued_converages.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
void BufferCache::CoverMemory(u64 start, u64 end) {
|
|
||||||
const u64 page_start = start >> CACHING_PAGEBITS;
|
|
||||||
const u64 page_end = Common::DivCeil(end, CACHING_PAGESIZE);
|
|
||||||
auto interval = decltype(convered_regions)::interval_type::right_open(page_start, page_end);
|
|
||||||
auto interval_set = boost::icl::interval_set<u64>{interval};
|
|
||||||
auto uncovered_ranges = interval_set - convered_regions;
|
|
||||||
if (uncovered_ranges.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// We fill any holes within the given range
|
|
||||||
for (const auto& range : uncovered_ranges) {
|
|
||||||
const u64 range_start = range.lower();
|
|
||||||
const u64 range_end = range.upper();
|
|
||||||
void* cpu_addr = reinterpret_cast<void*>(range_start << CACHING_PAGEBITS);
|
|
||||||
const u64 range_size = (range_end - range_start) << CACHING_PAGEBITS;
|
|
||||||
// Here to implement import of the mapped region
|
|
||||||
convered_regions += range;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
BufferCache::OverlapResult BufferCache::ResolveOverlaps(VAddr device_addr, u32 wanted_size) {
|
BufferCache::OverlapResult BufferCache::ResolveOverlaps(VAddr device_addr, u32 wanted_size) {
|
||||||
static constexpr int STREAM_LEAP_THRESHOLD = 16;
|
static constexpr int STREAM_LEAP_THRESHOLD = 16;
|
||||||
boost::container::small_vector<BufferId, 16> overlap_ids;
|
boost::container::small_vector<BufferId, 16> overlap_ids;
|
||||||
@ -608,12 +569,6 @@ BufferId BufferCache::CreateBuffer(VAddr device_addr, u32 wanted_size) {
|
|||||||
}
|
}
|
||||||
WriteDataBuffer(bda_pagetable_buffer, start_page * sizeof(vk::DeviceAddress), bda_addrs.data(),
|
WriteDataBuffer(bda_pagetable_buffer, start_page * sizeof(vk::DeviceAddress), bda_addrs.data(),
|
||||||
bda_addrs.size() * sizeof(vk::DeviceAddress));
|
bda_addrs.size() * sizeof(vk::DeviceAddress));
|
||||||
{
|
|
||||||
// Mark the pages as covered
|
|
||||||
std::scoped_lock lk{covered_regions_mutex};
|
|
||||||
convered_regions += boost::icl::interval_set<u64>::interval_type::right_open(
|
|
||||||
start_page, start_page + size_pages);
|
|
||||||
}
|
|
||||||
const size_t size_bytes = new_buffer.SizeBytes();
|
const size_t size_bytes = new_buffer.SizeBytes();
|
||||||
const auto cmdbuf = scheduler.CommandBuffer();
|
const auto cmdbuf = scheduler.CommandBuffer();
|
||||||
scheduler.EndRendering();
|
scheduler.EndRendering();
|
||||||
|
@ -133,12 +133,6 @@ public:
|
|||||||
/// Return buffer id for the specified region
|
/// Return buffer id for the specified region
|
||||||
BufferId FindBuffer(VAddr device_addr, u32 size);
|
BufferId FindBuffer(VAddr device_addr, u32 size);
|
||||||
|
|
||||||
/// Queue a region for coverage for DMA.
|
|
||||||
void QueueMemoryCoverage(VAddr device_addr, u64 size);
|
|
||||||
|
|
||||||
/// Covers all queued regions.
|
|
||||||
void CoverQueuedRegions();
|
|
||||||
|
|
||||||
/// Processes the fault buffer.
|
/// Processes the fault buffer.
|
||||||
void ProcessFaultBuffer();
|
void ProcessFaultBuffer();
|
||||||
|
|
||||||
@ -191,8 +185,6 @@ private:
|
|||||||
|
|
||||||
void DeleteBuffer(BufferId buffer_id);
|
void DeleteBuffer(BufferId buffer_id);
|
||||||
|
|
||||||
void CoverMemory(u64 start, u64 end);
|
|
||||||
|
|
||||||
const Vulkan::Instance& instance;
|
const Vulkan::Instance& instance;
|
||||||
Vulkan::Scheduler& scheduler;
|
Vulkan::Scheduler& scheduler;
|
||||||
Vulkan::Rasterizer& rasterizer;
|
Vulkan::Rasterizer& rasterizer;
|
||||||
@ -205,9 +197,6 @@ private:
|
|||||||
Buffer gds_buffer;
|
Buffer gds_buffer;
|
||||||
Buffer bda_pagetable_buffer;
|
Buffer bda_pagetable_buffer;
|
||||||
Buffer fault_buffer;
|
Buffer fault_buffer;
|
||||||
boost::icl::interval_set<VAddr> queued_converages;
|
|
||||||
boost::icl::interval_set<u64> convered_regions;
|
|
||||||
std::shared_mutex covered_regions_mutex;
|
|
||||||
std::shared_mutex slot_buffers_mutex;
|
std::shared_mutex slot_buffers_mutex;
|
||||||
Common::SlotVector<Buffer> slot_buffers;
|
Common::SlotVector<Buffer> slot_buffers;
|
||||||
RangeSet gpu_modified_ranges;
|
RangeSet gpu_modified_ranges;
|
||||||
|
@ -477,9 +477,6 @@ bool Rasterizer::BindResources(const Pipeline* pipeline) {
|
|||||||
if (uses_dma) {
|
if (uses_dma) {
|
||||||
fault_process_pending = true;
|
fault_process_pending = true;
|
||||||
// We only use fault buffer for DMA right now.
|
// We only use fault buffer for DMA right now.
|
||||||
// First, import any queued host memory, then sync every mapped
|
|
||||||
// region that is cached on GPU memory.
|
|
||||||
buffer_cache.CoverQueuedRegions();
|
|
||||||
{
|
{
|
||||||
std::shared_lock lock{dma_sync_mapped_ranges_mutex};
|
std::shared_lock lock{dma_sync_mapped_ranges_mutex};
|
||||||
for (const auto& range : dma_sync_mapped_ranges) {
|
for (const auto& range : dma_sync_mapped_ranges) {
|
||||||
@ -991,7 +988,6 @@ void Rasterizer::MapMemory(VAddr addr, u64 size) {
|
|||||||
dma_sync_mapped_ranges = mapped_ranges & dma_sync_ranges;
|
dma_sync_mapped_ranges = mapped_ranges & dma_sync_ranges;
|
||||||
}
|
}
|
||||||
page_manager.OnGpuMap(addr, size);
|
page_manager.OnGpuMap(addr, size);
|
||||||
buffer_cache.QueueMemoryCoverage(addr, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Rasterizer::UnmapMemory(VAddr addr, u64 size) {
|
void Rasterizer::UnmapMemory(VAddr addr, u64 size) {
|
||||||
|
Loading…
Reference in New Issue
Block a user