mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-23 10:35:03 +00:00
liverpool: Use pending downloads API for rewind
Prevents creating huge buffers
This commit is contained in:
parent
35c6e57469
commit
e580073430
@ -708,9 +708,7 @@ Liverpool::Task Liverpool::ProcessGraphics(std::span<const u32> dcb, std::span<c
|
|||||||
}
|
}
|
||||||
case PM4ItOpcode::Rewind: {
|
case PM4ItOpcode::Rewind: {
|
||||||
if (rasterizer) {
|
if (rasterizer) {
|
||||||
const VAddr flush_addr = VAddr(reinterpret_cast<const u32*>(header));
|
rasterizer->CommitPendingDownloads(true);
|
||||||
const u32 flush_size = dcb.size_bytes();
|
|
||||||
rasterizer->ReadMemory(flush_addr, flush_size);
|
|
||||||
}
|
}
|
||||||
const PM4CmdRewind* rewind = reinterpret_cast<const PM4CmdRewind*>(header);
|
const PM4CmdRewind* rewind = reinterpret_cast<const PM4CmdRewind*>(header);
|
||||||
while (!rewind->Valid()) {
|
while (!rewind->Valid()) {
|
||||||
@ -915,9 +913,7 @@ Liverpool::Task Liverpool::ProcessCompute(const u32* acb, u32 acb_dwords, u32 vq
|
|||||||
}
|
}
|
||||||
case PM4ItOpcode::Rewind: {
|
case PM4ItOpcode::Rewind: {
|
||||||
if (rasterizer) {
|
if (rasterizer) {
|
||||||
const VAddr flush_addr = VAddr(reinterpret_cast<const u32*>(header));
|
rasterizer->CommitPendingDownloads(true);
|
||||||
const u32 flush_size = acb_dwords * sizeof(u32);
|
|
||||||
rasterizer->ReadMemory(flush_addr, flush_size);
|
|
||||||
}
|
}
|
||||||
const PM4CmdRewind* rewind = reinterpret_cast<const PM4CmdRewind*>(header);
|
const PM4CmdRewind* rewind = reinterpret_cast<const PM4CmdRewind*>(header);
|
||||||
while (!rewind->Valid()) {
|
while (!rewind->Valid()) {
|
||||||
|
@ -27,7 +27,7 @@ static constexpr size_t UboStreamBufferSize = 128_MB;
|
|||||||
static constexpr size_t DownloadBufferSize = 128_MB;
|
static constexpr size_t DownloadBufferSize = 128_MB;
|
||||||
static constexpr size_t DeviceBufferSize = 128_MB;
|
static constexpr size_t DeviceBufferSize = 128_MB;
|
||||||
static constexpr size_t MaxPageFaults = 1024;
|
static constexpr size_t MaxPageFaults = 1024;
|
||||||
static constexpr size_t DownloadSizeThreshold = 2_MB;
|
static constexpr size_t DownloadSizeThreshold = 1_MB;
|
||||||
|
|
||||||
BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& scheduler_,
|
BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& scheduler_,
|
||||||
AmdGpu::Liverpool* liverpool_, TextureCache& texture_cache_,
|
AmdGpu::Liverpool* liverpool_, TextureCache& texture_cache_,
|
||||||
@ -188,7 +188,7 @@ void BufferCache::DownloadBufferMemory(const Buffer& buffer, VAddr device_addr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BufferCache::CommitPendingDownloads() {
|
bool BufferCache::CommitPendingDownloads(bool wait_done) {
|
||||||
if (pending_download_ranges.Empty()) {
|
if (pending_download_ranges.Empty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -254,7 +254,11 @@ bool BufferCache::CommitPendingDownloads() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
scheduler.Flush();
|
if (wait_done) {
|
||||||
|
scheduler.Finish();
|
||||||
|
} else {
|
||||||
|
scheduler.Flush();
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ public:
|
|||||||
void CopyBuffer(VAddr dst, VAddr src, u32 num_bytes, bool dst_gds, bool src_gds);
|
void CopyBuffer(VAddr dst, VAddr src, u32 num_bytes, bool dst_gds, bool src_gds);
|
||||||
|
|
||||||
/// Schedules pending GPU modified ranges since last commit to be copied back the host memory.
|
/// Schedules pending GPU modified ranges since last commit to be copied back the host memory.
|
||||||
bool CommitPendingDownloads();
|
bool CommitPendingDownloads(bool wait_done);
|
||||||
|
|
||||||
/// Obtains a buffer for the specified region.
|
/// Obtains a buffer for the specified region.
|
||||||
[[nodiscard]] std::pair<Buffer*, u32> ObtainBuffer(VAddr gpu_addr, u32 size, bool is_written,
|
[[nodiscard]] std::pair<Buffer*, u32> ObtainBuffer(VAddr gpu_addr, u32 size, bool is_written,
|
||||||
|
@ -60,9 +60,9 @@ void Rasterizer::CpSync() {
|
|||||||
vk::DependencyFlagBits::eByRegion, ib_barrier, {}, {});
|
vk::DependencyFlagBits::eByRegion, ib_barrier, {}, {});
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Rasterizer::CommitPendingDownloads() {
|
bool Rasterizer::CommitPendingDownloads(bool wait_done) {
|
||||||
scheduler.PopPendingOperations();
|
scheduler.PopPendingOperations();
|
||||||
return buffer_cache.CommitPendingDownloads();
|
return buffer_cache.CommitPendingDownloads(wait_done);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Rasterizer::FilterDraw() {
|
bool Rasterizer::FilterDraw() {
|
||||||
|
@ -65,7 +65,7 @@ public:
|
|||||||
void UnmapMemory(VAddr addr, u64 size);
|
void UnmapMemory(VAddr addr, u64 size);
|
||||||
|
|
||||||
void CpSync();
|
void CpSync();
|
||||||
bool CommitPendingDownloads();
|
bool CommitPendingDownloads(bool wait_done = false);
|
||||||
u64 Flush();
|
u64 Flush();
|
||||||
void Finish();
|
void Finish();
|
||||||
void ProcessFaults();
|
void ProcessFaults();
|
||||||
|
@ -357,8 +357,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Defers an operation until the gpu has reached the current cpu tick.
|
/// Defers an operation until the gpu has reached the current cpu tick.
|
||||||
void DeferOperation(Common::UniqueFunction<void>&& func, bool prev_tick = false) {
|
void DeferOperation(Common::UniqueFunction<void>&& func) {
|
||||||
pending_ops.emplace(std::move(func), prev_tick ? CurrentTick() - 1 : CurrentTick());
|
pending_ops.emplace(std::move(func), CurrentTick());
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::mutex submit_mutex;
|
static std::mutex submit_mutex;
|
||||||
|
Loading…
Reference in New Issue
Block a user