video_core: Scheduler priority pending operation queue (#3848)

* Priority pending ops

* Use priority operations on image download

* clang-format

* Simplify thread

* I'm tired, it's too late :(
This commit is contained in:
Lander Gallastegi
2025-12-02 22:27:01 +01:00
committed by GitHub
parent b135a056ba
commit 9db4642f66
4 changed files with 52 additions and 40 deletions

View File

@@ -3,6 +3,7 @@
#include "common/assert.h"
#include "common/debug.h"
#include "common/thread.h"
#include "imgui/renderer/texture_manager.h"
#include "video_core/renderer_vulkan/vk_instance.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
@@ -17,6 +18,8 @@ Scheduler::Scheduler(const Instance& instance)
profiler_scope = reinterpret_cast<tracy::VkCtxScope*>(std::malloc(sizeof(tracy::VkCtxScope)));
#endif
AllocateWorkerCommandBuffers();
priority_pending_ops_thread =
std::jthread(std::bind_front(&Scheduler::PriorityPendingOpsThread, this));
}
Scheduler::~Scheduler() {
@@ -167,6 +170,32 @@ void Scheduler::SubmitExecution(SubmitInfo& info) {
PopPendingOperations();
}
void Scheduler::PriorityPendingOpsThread(std::stop_token stoken) {
Common::SetCurrentThreadName("shadPS4:GpuSchedPriorityPendingOpsRunner");
while (!stoken.stop_requested()) {
PendingOp op;
{
std::unique_lock lk(priority_pending_ops_mutex);
priority_pending_ops_cv.wait(lk, stoken,
[this] { return !priority_pending_ops.empty(); });
if (stoken.stop_requested()) {
break;
}
op = std::move(priority_pending_ops.front());
priority_pending_ops.pop();
}
master_semaphore.Wait(op.gpu_tick);
if (stoken.stop_requested()) {
break;
}
op.callback();
}
}
void DynamicState::Commit(const Instance& instance, const vk::CommandBuffer& cmdbuf) {
if (dirty_state.viewports) {
dirty_state.viewports = false;

View File

@@ -5,6 +5,7 @@
#include <condition_variable>
#include <mutex>
#include <thread>
#include <queue>
#include "common/unique_function.h"
@@ -401,10 +402,21 @@ public:
}
/// Defers an operation until the gpu has reached the current cpu tick.
/// Will be run when submitting or calling PopPendingOperations.
void DeferOperation(Common::UniqueFunction<void>&& func) {
pending_ops.emplace(std::move(func), CurrentTick());
}
/// Defers an operation until the gpu has reached the current cpu tick.
/// Runs as soon as possible in another thread.
void DeferPriorityOperation(Common::UniqueFunction<void>&& func) {
{
std::unique_lock lk(priority_pending_ops_mutex);
priority_pending_ops.emplace(std::move(func), CurrentTick());
}
priority_pending_ops_cv.notify_one();
}
static std::mutex submit_mutex;
private:
@@ -412,6 +424,8 @@ private:
void SubmitExecution(SubmitInfo& info);
void PriorityPendingOpsThread(std::stop_token stoken);
private:
const Instance& instance;
MasterSemaphore master_semaphore;
@@ -424,6 +438,10 @@ private:
u64 gpu_tick;
};
std::queue<PendingOp> pending_ops;
std::queue<PendingOp> priority_pending_ops;
std::mutex priority_pending_ops_mutex;
std::condition_variable_any priority_pending_ops_cv;
std::jthread priority_pending_ops_thread;
RenderState render_state;
bool is_rendering = false;
tracy::VkCtxScope* profiler_scope{};

View File

@@ -52,9 +52,6 @@ TextureCache::TextureCache(const Vulkan::Instance& instance_, Vulkan::Scheduler&
std::max<u64>(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical),
DEFAULT_CRITICAL_GC_MEMORY));
trigger_gc_memory = static_cast<u64>((device_local_memory - mem_threshold) / 2);
downloaded_images_thread =
std::jthread([&](const std::stop_token& token) { DownloadedImagesThread(token); });
}
TextureCache::~TextureCache() = default;
@@ -125,33 +122,11 @@ void TextureCache::DownloadImageMemory(ImageId image_id) {
cmdbuf.copyImageToBuffer(image.GetImage(), vk::ImageLayout::eTransferSrcOptimal,
download_buffer.Handle(), image_download);
{
std::unique_lock lock(downloaded_images_mutex);
downloaded_images_queue.emplace(scheduler.CurrentTick(), image.info.guest_address, download,
scheduler.DeferPriorityOperation(
[this, device_addr = image.info.guest_address, download, download_size] {
Core::Memory::Instance()->TryWriteBacking(std::bit_cast<u8*>(device_addr), download,
download_size);
downloaded_images_cv.notify_one();
}
}
void TextureCache::DownloadedImagesThread(const std::stop_token& token) {
auto* memory = Core::Memory::Instance();
while (!token.stop_requested()) {
DownloadedImage image;
{
std::unique_lock lock{downloaded_images_mutex};
downloaded_images_cv.wait(lock, token,
[this] { return !downloaded_images_queue.empty(); });
if (token.stop_requested()) {
break;
}
image = downloaded_images_queue.front();
downloaded_images_queue.pop();
}
scheduler.GetMasterSemaphore()->Wait(image.tick);
memory->TryWriteBacking(std::bit_cast<u8*>(image.device_addr), image.download,
image.download_size);
}
});
}
void TextureCache::MarkAsMaybeDirty(ImageId image_id, Image& image) {

View File

@@ -314,16 +314,6 @@ private:
Common::LeastRecentlyUsedCache<ImageId, u64> lru_cache;
PageTable page_table;
std::mutex mutex;
struct DownloadedImage {
u64 tick;
VAddr device_addr;
void* download;
size_t download_size;
};
std::queue<DownloadedImage> downloaded_images_queue;
std::mutex downloaded_images_mutex;
std::condition_variable_any downloaded_images_cv;
std::jthread downloaded_images_thread;
struct MetaDataInfo {
enum class Type {
CMask,