mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-08-03 16:02:26 +00:00
buffer_cache: Simplify invalidation scheme
This commit is contained in:
parent
cac23f7441
commit
f4966ba4af
@ -552,35 +552,17 @@ void BufferCache::SynchronizeBuffer(Buffer& buffer, VAddr device_addr, u32 size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr, u32 size) {
|
bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr, u32 size) {
|
||||||
boost::container::small_vector<ImageId, 8> image_ids;
|
static constexpr FindFlags find_flags = FindFlags::NoCreate | FindFlags::RelaxDim |
|
||||||
const u32 inv_size = std::min(size, MaxInvalidateDist);
|
FindFlags::RelaxFmt | FindFlags::RelaxSize;
|
||||||
texture_cache.ForEachImageInRegion(device_addr, inv_size, [&](ImageId image_id, Image& image) {
|
ImageInfo info{};
|
||||||
// Only consider GPU modified images, i.e render targets or storage images.
|
info.guest_address = device_addr;
|
||||||
// Also avoid any CPU modified images as the image data is likely to be stale.
|
info.guest_size_bytes = size;
|
||||||
if (True(image.flags & ImageFlagBits::CpuModified) ||
|
const ImageId image_id = texture_cache.FindImage(info, find_flags);
|
||||||
False(image.flags & ImageFlagBits::GpuModified)) {
|
if (!image_id) {
|
||||||
return;
|
|
||||||
}
|
|
||||||
// Image must fully overlap with the provided buffer range.
|
|
||||||
if (image.cpu_addr < device_addr || image.cpu_addr_end > device_addr + size) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
image_ids.push_back(image_id);
|
|
||||||
});
|
|
||||||
if (image_ids.empty()) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Sort images by modification tick. If there are overlaps we want to
|
|
||||||
// copy from least to most recently modified.
|
|
||||||
std::ranges::sort(image_ids, [&](ImageId lhs_id, ImageId rhs_id) {
|
|
||||||
const Image& lhs = texture_cache.GetImage(lhs_id);
|
|
||||||
const Image& rhs = texture_cache.GetImage(rhs_id);
|
|
||||||
return lhs.tick_accessed_last < rhs.tick_accessed_last;
|
|
||||||
});
|
|
||||||
boost::container::small_vector<vk::BufferImageCopy, 8> copies;
|
|
||||||
for (const ImageId image_id : image_ids) {
|
|
||||||
copies.clear();
|
|
||||||
Image& image = texture_cache.GetImage(image_id);
|
Image& image = texture_cache.GetImage(image_id);
|
||||||
|
boost::container::small_vector<vk::BufferImageCopy, 8> copies;
|
||||||
u32 offset = buffer.Offset(image.cpu_addr);
|
u32 offset = buffer.Offset(image.cpu_addr);
|
||||||
const u32 num_layers = image.info.resources.layers;
|
const u32 num_layers = image.info.resources.layers;
|
||||||
for (u32 m = 0; m < image.info.resources.levels; m++) {
|
for (u32 m = 0; m < image.info.resources.levels; m++) {
|
||||||
@ -609,7 +591,6 @@ bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr,
|
|||||||
const auto cmdbuf = scheduler.CommandBuffer();
|
const auto cmdbuf = scheduler.CommandBuffer();
|
||||||
cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal, buffer.buffer,
|
cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal, buffer.buffer,
|
||||||
copies);
|
copies);
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,9 +133,6 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
const u32 size = vsharp.GetSize();
|
const u32 size = vsharp.GetSize();
|
||||||
if (desc.is_written) {
|
|
||||||
texture_cache.InvalidateMemory(address, size);
|
|
||||||
}
|
|
||||||
const u32 alignment =
|
const u32 alignment =
|
||||||
is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
|
is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
|
||||||
const auto [vk_buffer, offset] =
|
const auto [vk_buffer, offset] =
|
||||||
@ -196,7 +193,7 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
|
|||||||
buffer_barriers.emplace_back(*barrier);
|
buffer_barriers.emplace_back(*barrier);
|
||||||
}
|
}
|
||||||
if (desc.is_written) {
|
if (desc.is_written) {
|
||||||
texture_cache.InvalidateMemory(address, size);
|
texture_cache.MarkWritten(address, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
set_writes.push_back({
|
set_writes.push_back({
|
||||||
|
@ -431,9 +431,6 @@ void GraphicsPipeline::BindResources(const Liverpool::Regs& regs,
|
|||||||
dst_access, vk::PipelineStageFlagBits2::eVertexShader)) {
|
dst_access, vk::PipelineStageFlagBits2::eVertexShader)) {
|
||||||
buffer_barriers.emplace_back(*barrier);
|
buffer_barriers.emplace_back(*barrier);
|
||||||
}
|
}
|
||||||
if (desc.is_written) {
|
|
||||||
texture_cache.InvalidateMemory(address, size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
set_writes.push_back({
|
set_writes.push_back({
|
||||||
.dstSet = VK_NULL_HANDLE,
|
.dstSet = VK_NULL_HANDLE,
|
||||||
|
@ -51,6 +51,20 @@ void TextureCache::InvalidateMemory(VAddr address, size_t size) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TextureCache::MarkWritten(VAddr address, size_t max_size) {
|
||||||
|
static constexpr FindFlags find_flags = FindFlags::NoCreate | FindFlags::RelaxDim |
|
||||||
|
FindFlags::RelaxFmt | FindFlags::RelaxSize;
|
||||||
|
ImageInfo info{};
|
||||||
|
info.guest_address = address;
|
||||||
|
info.guest_size_bytes = max_size;
|
||||||
|
const ImageId image_id = FindImage(info, find_flags);
|
||||||
|
if (!image_id) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Ensure image is copied when accessed again.
|
||||||
|
slot_images[image_id].flags |= ImageFlagBits::CpuModified;
|
||||||
|
}
|
||||||
|
|
||||||
void TextureCache::UnmapMemory(VAddr cpu_addr, size_t size) {
|
void TextureCache::UnmapMemory(VAddr cpu_addr, size_t size) {
|
||||||
std::scoped_lock lk{mutex};
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
@ -199,10 +213,14 @@ ImageId TextureCache::FindImage(const ImageInfo& info, FindFlags flags) {
|
|||||||
!IsVulkanFormatCompatible(info.pixel_format, cache_image.info.pixel_format)) {
|
!IsVulkanFormatCompatible(info.pixel_format, cache_image.info.pixel_format)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
ASSERT(cache_image.info.type == info.type);
|
ASSERT(cache_image.info.type == info.type || True(flags & FindFlags::RelaxFmt));
|
||||||
image_id = cache_id;
|
image_id = cache_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (True(flags & FindFlags::NoCreate) && !image_id) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
// Try to resolve overlaps (if any)
|
// Try to resolve overlaps (if any)
|
||||||
if (!image_id) {
|
if (!image_id) {
|
||||||
for (const auto& cache_id : image_ids) {
|
for (const auto& cache_id : image_ids) {
|
||||||
@ -211,10 +229,6 @@ ImageId TextureCache::FindImage(const ImageInfo& info, FindFlags flags) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (True(flags & FindFlags::NoCreate) && !image_id) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create and register a new image
|
// Create and register a new image
|
||||||
if (!image_id) {
|
if (!image_id) {
|
||||||
image_id = slot_images.insert(instance, scheduler, info);
|
image_id = slot_images.insert(instance, scheduler, info);
|
||||||
|
@ -50,6 +50,9 @@ public:
|
|||||||
/// Invalidates any image in the logical page range.
|
/// Invalidates any image in the logical page range.
|
||||||
void InvalidateMemory(VAddr address, size_t size);
|
void InvalidateMemory(VAddr address, size_t size);
|
||||||
|
|
||||||
|
/// Marks an image as dirty if it exists at the provided address.
|
||||||
|
void MarkWritten(VAddr address, size_t max_size);
|
||||||
|
|
||||||
/// Evicts any images that overlap the unmapped range.
|
/// Evicts any images that overlap the unmapped range.
|
||||||
void UnmapMemory(VAddr cpu_addr, size_t size);
|
void UnmapMemory(VAddr cpu_addr, size_t size);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user