mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-27 20:44:28 +00:00
Some fixes (and async testing (doesn't work))
This commit is contained in:
parent
ce17f57954
commit
629dc6132e
@ -553,7 +553,6 @@ void BufferCache::CreateFaultBuffers() {
|
|||||||
.offset = 0,
|
.offset = 0,
|
||||||
.size = FAULT_READBACK_SIZE,
|
.size = FAULT_READBACK_SIZE,
|
||||||
};
|
};
|
||||||
staging_buffer.Commit();
|
|
||||||
scheduler.EndRendering();
|
scheduler.EndRendering();
|
||||||
const auto cmdbuf = scheduler.CommandBuffer();
|
const auto cmdbuf = scheduler.CommandBuffer();
|
||||||
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
|
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
|
||||||
@ -562,35 +561,39 @@ void BufferCache::CreateFaultBuffers() {
|
|||||||
.pBufferMemoryBarriers = &barrier,
|
.pBufferMemoryBarriers = &barrier,
|
||||||
});
|
});
|
||||||
cmdbuf.copyBuffer(fault_readback_buffer.buffer, staging_buffer.Handle(), copy);
|
cmdbuf.copyBuffer(fault_readback_buffer.buffer, staging_buffer.Handle(), copy);
|
||||||
scheduler.Finish();
|
staging_buffer.Commit();
|
||||||
std::memcpy(fault_readback_cpu.data(), mapped, FAULT_READBACK_SIZE);
|
scheduler.DeferOperation([this, mapped]() {
|
||||||
// Create the fault buffers batched
|
std::memcpy(fault_readback_cpu.data(), mapped, FAULT_READBACK_SIZE);
|
||||||
boost::icl::interval_set<VAddr> fault_ranges;
|
// Create the fault buffers batched
|
||||||
for (u64 i = 0; i < FAULT_READBACK_SIZE; ++i) {
|
boost::icl::interval_set<VAddr> fault_ranges;
|
||||||
if (fault_readback_cpu[i] == 0) {
|
for (u64 i = 0; i < FAULT_READBACK_SIZE; ++i) {
|
||||||
continue;
|
if (fault_readback_cpu[i] == 0) {
|
||||||
}
|
|
||||||
// Each bit is a page
|
|
||||||
const u64 page = i * 8;
|
|
||||||
for (u8 j = 0; j < 8; ++j) {
|
|
||||||
if ((fault_readback_cpu[i] & (1 << j)) == 0) {
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const VAddr start = (page + j) << CACHING_PAGEBITS;
|
// Each bit is a page
|
||||||
const VAddr end = start + CACHING_PAGESIZE;
|
const u64 page = i * 8;
|
||||||
fault_ranges += boost::icl::interval_set<VAddr>::interval_type::right_open(start, end);
|
for (u8 j = 0; j < 8; ++j) {
|
||||||
LOG_WARNING(Render_Vulkan, "Accessed non GPU-local memory at {:#x}", start);
|
if ((fault_readback_cpu[i] & (1 << j)) == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
const VAddr start = (page + j) << CACHING_PAGEBITS;
|
||||||
|
const VAddr end = start + CACHING_PAGESIZE;
|
||||||
|
fault_ranges +=
|
||||||
|
boost::icl::interval_set<VAddr>::interval_type::right_open(start, end);
|
||||||
|
LOG_WARNING(Render_Vulkan, "Accessed non GPU-local memory at {:#x}", start);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
for (const auto& range : fault_ranges) {
|
||||||
for (const auto& range : fault_ranges) {
|
const VAddr start = range.lower();
|
||||||
const VAddr start = range.lower();
|
const VAddr end = range.upper();
|
||||||
const u64 size = range.upper() - start;
|
// Buffer size is 32 bits
|
||||||
// Buffer size is 32 bits
|
for (VAddr addr = start; addr < end; addr += std::numeric_limits<u32>::max()) {
|
||||||
for (VAddr addr = start; addr < size; addr += std::numeric_limits<u32>::max()) {
|
const u32 size_buffer = std::min<u32>(end - addr, std::numeric_limits<u32>::max());
|
||||||
const u32 size_buffer = std::min<u32>(size, std::numeric_limits<u32>::max());
|
CreateBuffer(addr, size_buffer);
|
||||||
CreateBuffer(addr, size_buffer);
|
}
|
||||||
}
|
}
|
||||||
}
|
});
|
||||||
|
scheduler.Flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferCache::ResetFaultReadbackBuffer() {
|
void BufferCache::ResetFaultReadbackBuffer() {
|
||||||
|
@ -70,6 +70,11 @@ void Scheduler::Flush(SubmitInfo& info) {
|
|||||||
SubmitExecution(info);
|
SubmitExecution(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Scheduler::Flush() {
|
||||||
|
SubmitInfo info{};
|
||||||
|
Flush(info);
|
||||||
|
}
|
||||||
|
|
||||||
void Scheduler::Finish() {
|
void Scheduler::Finish() {
|
||||||
// When finishing, we need to wait for the submission to have executed on the device.
|
// When finishing, we need to wait for the submission to have executed on the device.
|
||||||
const u64 presubmit_tick = CurrentTick();
|
const u64 presubmit_tick = CurrentTick();
|
||||||
@ -85,6 +90,12 @@ void Scheduler::Wait(u64 tick) {
|
|||||||
Flush(info);
|
Flush(info);
|
||||||
}
|
}
|
||||||
master_semaphore.Wait(tick);
|
master_semaphore.Wait(tick);
|
||||||
|
|
||||||
|
// Only apply pending operations until the current tick.
|
||||||
|
while (!pending_ops.empty() && pending_ops.front().gpu_tick <= tick) {
|
||||||
|
pending_ops.front().callback();
|
||||||
|
pending_ops.pop();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::AllocateWorkerCommandBuffers() {
|
void Scheduler::AllocateWorkerCommandBuffers() {
|
||||||
|
@ -307,6 +307,10 @@ public:
|
|||||||
/// and increments the scheduler timeline semaphore.
|
/// and increments the scheduler timeline semaphore.
|
||||||
void Flush(SubmitInfo& info);
|
void Flush(SubmitInfo& info);
|
||||||
|
|
||||||
|
/// Sends the current execution context to the GPU
|
||||||
|
/// and increments the scheduler timeline semaphore.
|
||||||
|
void Flush();
|
||||||
|
|
||||||
/// Sends the current execution context to the GPU and waits for it to complete.
|
/// Sends the current execution context to the GPU and waits for it to complete.
|
||||||
void Finish();
|
void Finish();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user