mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-08-04 16:32:39 +00:00
memory: Clamp large buffers to mapped area
Sometimes huge buffers can be bound that start on some valid mapping but arent fully contained by it. It is not reasonable to expect the game needing all of the memory, so clamp the size to avoid the gpu tracking assert
This commit is contained in:
parent
2a3cf25cce
commit
5c46989bba
@ -56,6 +56,22 @@ void MemoryManager::SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1
|
|||||||
total_flexible_size, total_direct_size);
|
total_flexible_size, total_direct_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 MemoryManager::ClampRangeSize(VAddr virtual_addr, u64 size) {
|
||||||
|
static constexpr u64 MinSizeToClamp = 1_GB;
|
||||||
|
// Dont bother with clamping if the size is small so we dont pay a map lookup on every buffer.
|
||||||
|
if (size < MinSizeToClamp) {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
const auto vma = FindVMA(virtual_addr);
|
||||||
|
ASSERT_MSG(vma != vma_map.end(), "Attempted to access invalid GPU address {:#x}", virtual_addr);
|
||||||
|
const u64 clamped_size = std::min<u64>(size, vma->second.base + vma->second.size - virtual_addr);
|
||||||
|
if (size != clamped_size) {
|
||||||
|
LOG_WARNING(Kernel_Vmm, "Clamped requested buffer range addr={:#x}, size={:#x} to {:#x}",
|
||||||
|
virtual_addr, size, clamped_size);
|
||||||
|
}
|
||||||
|
return clamped_size;
|
||||||
|
}
|
||||||
|
|
||||||
bool MemoryManager::TryWriteBacking(void* address, const void* data, u32 num_bytes) {
|
bool MemoryManager::TryWriteBacking(void* address, const void* data, u32 num_bytes) {
|
||||||
const VAddr virtual_addr = std::bit_cast<VAddr>(address);
|
const VAddr virtual_addr = std::bit_cast<VAddr>(address);
|
||||||
const auto& vma = FindVMA(virtual_addr)->second;
|
const auto& vma = FindVMA(virtual_addr)->second;
|
||||||
|
@ -164,6 +164,8 @@ public:
|
|||||||
return virtual_addr >= vma_map.begin()->first && virtual_addr < end_addr;
|
return virtual_addr >= vma_map.begin()->first && virtual_addr < end_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 ClampRangeSize(VAddr virtual_addr, u64 size);
|
||||||
|
|
||||||
bool TryWriteBacking(void* address, const void* data, u32 num_bytes);
|
bool TryWriteBacking(void* address, const void* data, u32 num_bytes);
|
||||||
|
|
||||||
void SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1, bool use_extended_mem2);
|
void SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1, bool use_extended_mem2);
|
||||||
|
@ -503,16 +503,17 @@ void Rasterizer::BindBuffers(const Shader::Info& stage, Shader::Backend::Binding
|
|||||||
for (const auto& desc : stage.buffers) {
|
for (const auto& desc : stage.buffers) {
|
||||||
const auto vsharp = desc.GetSharp(stage);
|
const auto vsharp = desc.GetSharp(stage);
|
||||||
if (!desc.IsSpecial() && vsharp.base_address != 0 && vsharp.GetSize() > 0) {
|
if (!desc.IsSpecial() && vsharp.base_address != 0 && vsharp.GetSize() > 0) {
|
||||||
const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize());
|
const u64 size = memory->ClampRangeSize(vsharp.base_address, vsharp.GetSize());
|
||||||
buffer_bindings.emplace_back(buffer_id, vsharp);
|
const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, size);
|
||||||
|
buffer_bindings.emplace_back(buffer_id, vsharp, size);
|
||||||
} else {
|
} else {
|
||||||
buffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp);
|
buffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Second pass to re-bind buffers that were updated after binding
|
// Second pass to re-bind buffers that were updated after binding
|
||||||
for (u32 i = 0; i < buffer_bindings.size(); i++) {
|
for (u32 i = 0; i < buffer_bindings.size(); i++) {
|
||||||
const auto& [buffer_id, vsharp] = buffer_bindings[i];
|
const auto& [buffer_id, vsharp, size] = buffer_bindings[i];
|
||||||
const auto& desc = stage.buffers[i];
|
const auto& desc = stage.buffers[i];
|
||||||
const bool is_storage = desc.IsStorage(vsharp, pipeline_cache.GetProfile());
|
const bool is_storage = desc.IsStorage(vsharp, pipeline_cache.GetProfile());
|
||||||
// Buffer is not from the cache, either a special buffer or unbound.
|
// Buffer is not from the cache, either a special buffer or unbound.
|
||||||
@ -541,17 +542,15 @@ void Rasterizer::BindBuffers(const Shader::Info& stage, Shader::Backend::Binding
|
|||||||
buffer_infos.emplace_back(null_buffer.Handle(), 0, VK_WHOLE_SIZE);
|
buffer_infos.emplace_back(null_buffer.Handle(), 0, VK_WHOLE_SIZE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const auto [vk_buffer, offset] =
|
const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer(
|
||||||
buffer_cache.ObtainBuffer(vsharp.base_address, vsharp.GetSize(), desc.is_written,
|
vsharp.base_address, size, desc.is_written, desc.is_formatted, buffer_id);
|
||||||
desc.is_formatted, buffer_id);
|
|
||||||
const u32 alignment =
|
const u32 alignment =
|
||||||
is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
|
is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
|
||||||
const u32 offset_aligned = Common::AlignDown(offset, alignment);
|
const u32 offset_aligned = Common::AlignDown(offset, alignment);
|
||||||
const u32 adjust = offset - offset_aligned;
|
const u32 adjust = offset - offset_aligned;
|
||||||
ASSERT(adjust % 4 == 0);
|
ASSERT(adjust % 4 == 0);
|
||||||
push_data.AddOffset(binding.buffer, adjust);
|
push_data.AddOffset(binding.buffer, adjust);
|
||||||
buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned,
|
buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned, size + adjust);
|
||||||
vsharp.GetSize() + adjust);
|
|
||||||
if (auto barrier =
|
if (auto barrier =
|
||||||
vk_buffer->GetBarrier(desc.is_written ? vk::AccessFlagBits2::eShaderWrite
|
vk_buffer->GetBarrier(desc.is_written ? vk::AccessFlagBits2::eShaderWrite
|
||||||
: vk::AccessFlagBits2::eShaderRead,
|
: vk::AccessFlagBits2::eShaderRead,
|
||||||
@ -559,7 +558,7 @@ void Rasterizer::BindBuffers(const Shader::Info& stage, Shader::Backend::Binding
|
|||||||
buffer_barriers.emplace_back(*barrier);
|
buffer_barriers.emplace_back(*barrier);
|
||||||
}
|
}
|
||||||
if (desc.is_written && desc.is_formatted) {
|
if (desc.is_written && desc.is_formatted) {
|
||||||
texture_cache.InvalidateMemoryFromGPU(vsharp.base_address, vsharp.GetSize());
|
texture_cache.InvalidateMemoryFromGPU(vsharp.base_address, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,7 +118,7 @@ private:
|
|||||||
Pipeline::DescriptorWrites set_writes;
|
Pipeline::DescriptorWrites set_writes;
|
||||||
Pipeline::BufferBarriers buffer_barriers;
|
Pipeline::BufferBarriers buffer_barriers;
|
||||||
|
|
||||||
using BufferBindingInfo = std::pair<VideoCore::BufferId, AmdGpu::Buffer>;
|
using BufferBindingInfo = std::tuple<VideoCore::BufferId, AmdGpu::Buffer, u64>;
|
||||||
boost::container::static_vector<BufferBindingInfo, 32> buffer_bindings;
|
boost::container::static_vector<BufferBindingInfo, 32> buffer_bindings;
|
||||||
using ImageBindingInfo = std::pair<VideoCore::ImageId, VideoCore::TextureCache::TextureDesc>;
|
using ImageBindingInfo = std::pair<VideoCore::ImageId, VideoCore::TextureCache::TextureDesc>;
|
||||||
boost::container::static_vector<ImageBindingInfo, 64> image_bindings;
|
boost::container::static_vector<ImageBindingInfo, 64> image_bindings;
|
||||||
|
Loading…
Reference in New Issue
Block a user