track potentially dirty images and hash them

This commit is contained in:
Vladislav Mikhalin 2024-11-24 12:55:30 +03:00
parent 7ac7398ab4
commit 7936dd7386
2 changed files with 28 additions and 3 deletions

View File

@ -22,9 +22,10 @@ VK_DEFINE_HANDLE(VmaAllocator)
namespace VideoCore { namespace VideoCore {
enum ImageFlagBits : u32 { enum ImageFlagBits : u32 {
CpuDirty = 1 << 1, ///< Contents have been modified from the CPU MaybeCpuDirty = 1 << 0, ///< The page this image is in was touched before the image address
CpuDirty = 1 << 1, ///< Contents have been modified from the CPU
GpuDirty = 1 << 2, ///< Contents have been modified from the GPU (valid data in buffer cache) GpuDirty = 1 << 2, ///< Contents have been modified from the GPU (valid data in buffer cache)
Dirty = CpuDirty | GpuDirty, Dirty = CpuDirty | GpuDirty | MaybeCpuDirty,
GpuModified = 1 << 3, ///< Contents have been modified from the GPU GpuModified = 1 << 3, ///< Contents have been modified from the GPU
Tracked = 1 << 4, ///< Writes and reads are being hooked from the CPU Tracked = 1 << 4, ///< Writes and reads are being hooked from the CPU
Registered = 1 << 6, ///< True when the image is registered Registered = 1 << 6, ///< True when the image is registered
@ -130,6 +131,7 @@ struct Image {
std::vector<State> subresource_states{}; std::vector<State> subresource_states{};
boost::container::small_vector<u64, 14> mip_hashes{}; boost::container::small_vector<u64, 14> mip_hashes{};
u64 tick_accessed_last{0}; u64 tick_accessed_last{0};
u64 hash{0};
struct { struct {
union { union {

View File

@ -47,7 +47,17 @@ void TextureCache::InvalidateMemory(VAddr addr, VAddr addr_aligned, size_t size)
std::scoped_lock lock{mutex}; std::scoped_lock lock{mutex};
ForEachImageInRegion(addr_aligned, size, [&](ImageId image_id, Image& image) { ForEachImageInRegion(addr_aligned, size, [&](ImageId image_id, Image& image) {
const auto image_end = image.info.guest_address + image.info.guest_size_bytes; const auto image_end = image.info.guest_address + image.info.guest_size_bytes;
if (addr < image_end) { const auto page_end = addr_aligned + size;
if (addr < image.info.guest_address) {
// This page access may or may not modify the image.
// We should not mark it as dirty now, if it really was modified,
// it will receive more invalidations on subsequent pages.
if (image_end < page_end) {
// Image ends on this page so it can not receive any more invalidations.
// We will check it's hash later to see if it really was modified.
image.flags |= ImageFlagBits::MaybeCpuDirty;
}
} else if (addr < image_end) {
// Ensure image is reuploaded when accessed again. // Ensure image is reuploaded when accessed again.
image.flags |= ImageFlagBits::CpuDirty; image.flags |= ImageFlagBits::CpuDirty;
} }
@ -418,6 +428,19 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
return; return;
} }
if (True(image.flags & ImageFlagBits::MaybeCpuDirty) &&
False(image.flags & ImageFlagBits::CpuDirty)) {
// The image size should be less than page size to be considered MaybeCpuDirty
// So this calculation should be very uncommon and reasonably fast
ASSERT(image.info.guest_size_bytes <= 4_KB);
const u8* addr = std::bit_cast<u8*>(image.info.guest_address);
const u64 hash = XXH3_64bits(addr, image.info.guest_size_bytes);
if (image.hash == hash) {
return;
}
image.hash = hash;
}
const auto& num_layers = image.info.resources.layers; const auto& num_layers = image.info.resources.layers;
const auto& num_mips = image.info.resources.levels; const auto& num_mips = image.info.resources.levels;
ASSERT(num_mips == image.info.mips_layout.size()); ASSERT(num_mips == image.info.mips_layout.size());