Removed host buffers (may do it in another PR)

Also some random barrier fixes
This commit is contained in:
Lander Gallastegi 2025-04-21 23:51:02 +02:00
parent e97c3e9653
commit 87b3771285
10 changed files with 57 additions and 255 deletions

View File

@ -196,14 +196,10 @@ void EmitContext::DefineArithmeticTypes() {
}
if (info.dma_types != IR::Type::Void) {
constexpr u64 host_access_mask = 0x1UL;
constexpr u64 host_access_inv_mask = ~host_access_mask;
caching_pagebits_value =
Constant(U64, static_cast<u64>(VideoCore::BufferCache::CACHING_PAGEBITS));
caching_pagemask_value = Constant(U64, VideoCore::BufferCache::CACHING_PAGESIZE - 1);
host_access_mask_value = Constant(U64, host_access_mask);
host_access_inv_mask_value = Constant(U64, host_access_inv_mask);
// Used to calculate fault readback buffer position and mask
u32_three_value = ConstU32(3U);

View File

@ -175,8 +175,6 @@ public:
template <typename Func>
Id EmitMemoryAccess(Id type, Id address, Func&& fallback) {
const Id host_access_label = OpLabel();
const Id after_host_access_label = OpLabel();
const Id fallback_label = OpLabel();
const Id available_label = OpLabel();
const Id merge_label = OpLabel();
@ -189,15 +187,13 @@ public:
const Id bda_ptr = OpAccessChain(bda_pointer_type, bda_buffer_id, u32_zero_value, page32);
const Id bda = OpLoad(U64, bda_ptr);
// Check if it's a host memory access
const Id bda_and_host_access_mask = OpBitwiseAnd(U64, bda, host_access_mask_value);
const Id bda_host_access =
OpINotEqual(U1[1], bda_and_host_access_mask, host_access_mask_value);
OpSelectionMerge(after_host_access_label, spv::SelectionControlMask::MaskNone);
OpBranchConditional(bda_host_access, host_access_label, after_host_access_label);
// Check if the value is available
const Id bda_eq_zero = OpIEqual(U1[1], bda, u64_zero_value);
OpSelectionMerge(merge_label, spv::SelectionControlMask::MaskNone);
OpBranchConditional(bda_eq_zero, fallback_label, available_label);
// Host access, set bit in fault readback buffer
AddLabel(host_access_label);
// Fallback (and mark on faul buffer)
AddLabel(fallback_label);
const auto& fault_buffer = buffers[fault_readback_index];
const auto [fault_buffer_id, fault_pointer_type] = fault_buffer[PointerType::U8];
const Id page_div8 = OpShiftRightLogical(U32[1], page32, u32_three_value);
@ -209,24 +205,13 @@ public:
const Id page_mask8 = OpUConvert(U8, page_mask);
const Id fault_value_masked = OpBitwiseOr(U8, fault_value, page_mask8);
OpStore(fault_ptr, fault_value_masked);
OpBranch(after_host_access_label);
// Check if the value is available
AddLabel(after_host_access_label);
const Id bda_eq_zero = OpIEqual(U1[1], bda, u64_zero_value);
OpSelectionMerge(merge_label, spv::SelectionControlMask::MaskNone);
OpBranchConditional(bda_eq_zero, fallback_label, available_label);
// Fallback
AddLabel(fallback_label);
const Id fallback_result = fallback();
OpBranch(merge_label);
// Get value from memory
AddLabel(available_label);
const Id untagged_bda = OpBitwiseAnd(U64, bda, host_access_inv_mask_value);
const Id offset_in_bda = OpBitwiseAnd(U64, address, caching_pagemask_value);
const Id addr = OpIAdd(U64, untagged_bda, offset_in_bda);
const Id addr = OpIAdd(U64, bda, offset_in_bda);
const PointerType pointer_type = PointerTypeFromType(type);
const Id pointer_type_id = physical_pointer_types[pointer_type];
const Id addr_ptr = OpConvertUToPtr(pointer_type_id, addr);
@ -279,8 +264,6 @@ public:
Id caching_pagebits_value{};
Id caching_pagemask_value{};
Id host_access_mask_value{};
Id host_access_inv_mask_value{};
Id shared_u8{};
Id shared_u16{};

View File

@ -124,110 +124,6 @@ Buffer::Buffer(const Vulkan::Instance& instance_, Vulkan::Scheduler& scheduler_,
is_coherent = property_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
}
ImportedHostBuffer::ImportedHostBuffer(const Vulkan::Instance& instance_,
Vulkan::Scheduler& scheduler_, void* cpu_addr_,
u64 size_bytes_, vk::BufferUsageFlags flags)
: cpu_addr{cpu_addr_}, size_bytes{size_bytes_}, instance{&instance_}, scheduler{&scheduler_} {
ASSERT_MSG(size_bytes > 0, "Size must be greater than 0");
ASSERT_MSG(cpu_addr != 0, "CPU address must not be null");
const vk::DeviceSize alignment = instance->GetExternalHostMemoryHostAlignment();
ASSERT_MSG(reinterpret_cast<u64>(cpu_addr) % alignment == 0,
"CPU address {:#x} is not aligned to {:#x}", cpu_addr, alignment);
ASSERT_MSG(size_bytes % alignment == 0, "Size {:#x} is not aligned to {:#x}", size_bytes,
alignment);
// Test log, should be removed
LOG_WARNING(Render_Vulkan, "Creating imported host buffer at {} size {:#x}", cpu_addr,
size_bytes);
const auto& mem_props = instance->GetMemoryProperties();
auto ptr_props_result = instance->GetDevice().getMemoryHostPointerPropertiesEXT(
vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT, cpu_addr);
ASSERT_MSG(ptr_props_result.result == vk::Result::eSuccess,
"Failed getting host pointer properties with error {}",
vk::to_string(ptr_props_result.result));
auto ptr_props = ptr_props_result.value;
u32 memory_type_index = UINT32_MAX;
for (u32 i = 0; i < mem_props.memoryTypeCount; ++i) {
if ((ptr_props.memoryTypeBits & (1 << i)) != 0) {
if (mem_props.memoryTypes[i].propertyFlags &
(vk::MemoryPropertyFlagBits::eHostVisible |
vk::MemoryPropertyFlagBits::eHostCoherent)) {
memory_type_index = i;
// We prefer cache coherent memory types.
if (mem_props.memoryTypes[i].propertyFlags &
vk::MemoryPropertyFlagBits::eHostCached) {
break;
}
}
}
}
ASSERT_MSG(memory_type_index != UINT32_MAX,
"Failed to find a host visible memory type for the imported host buffer");
const bool with_bda = bool(flags & vk::BufferUsageFlagBits::eShaderDeviceAddress);
vk::ExternalMemoryBufferCreateInfo external_info{
.handleTypes = vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT,
};
vk::BufferCreateInfo buffer_ci{
.pNext = &external_info,
.size = size_bytes,
.usage = flags,
};
vk::ImportMemoryHostPointerInfoEXT import_info{
.handleType = vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT,
.pHostPointer = reinterpret_cast<void*>(cpu_addr),
};
vk::MemoryAllocateFlagsInfo memory_flags_info{
.pNext = &import_info,
.flags = with_bda ? vk::MemoryAllocateFlagBits::eDeviceAddress : vk::MemoryAllocateFlags{},
};
vk::MemoryAllocateInfo alloc_ci{
.pNext = &memory_flags_info,
.allocationSize = size_bytes,
.memoryTypeIndex = memory_type_index,
};
auto buffer_result = instance->GetDevice().createBuffer(buffer_ci);
ASSERT_MSG(buffer_result.result == vk::Result::eSuccess,
"Failed creating imported host buffer with error {}",
vk::to_string(buffer_result.result));
buffer = buffer_result.value;
auto device_memory_result = instance->GetDevice().allocateMemory(alloc_ci);
if (device_memory_result.result != vk::Result::eSuccess) {
// May fail to import the host memory if it is backed by a file. (AMD on Linux)
LOG_WARNING(Render_Vulkan, "Failed to import host memory at {} size {:#x}, Reason: {}",
cpu_addr, size_bytes, vk::to_string(device_memory_result.result));
instance->GetDevice().destroyBuffer(buffer);
buffer = VK_NULL_HANDLE;
has_failed = true;
return;
}
device_memory = device_memory_result.value;
auto result = instance->GetDevice().bindBufferMemory(buffer, device_memory, 0);
ASSERT_MSG(result == vk::Result::eSuccess, "Failed binding imported host buffer with error {}",
vk::to_string(result));
if (with_bda) {
vk::BufferDeviceAddressInfo bda_info{
.buffer = buffer,
};
bda_addr = instance->GetDevice().getBufferAddress(bda_info);
ASSERT_MSG(bda_addr != 0, "Failed getting buffer device address");
}
}
ImportedHostBuffer::~ImportedHostBuffer() {
if (!buffer) {
return;
}
const auto device = instance->GetDevice();
device.destroyBuffer(buffer);
device.freeMemory(device_memory);
}
constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000;
constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;

View File

@ -162,71 +162,6 @@ public:
vk::PipelineStageFlagBits2 stage{vk::PipelineStageFlagBits2::eAllCommands};
};
class ImportedHostBuffer {
public:
ImportedHostBuffer(const Vulkan::Instance& instance, Vulkan::Scheduler& scheduler,
void* cpu_addr_, u64 size_bytes_, vk::BufferUsageFlags flags);
~ImportedHostBuffer();
ImportedHostBuffer& operator=(const ImportedHostBuffer&) = delete;
ImportedHostBuffer(const ImportedHostBuffer&) = delete;
ImportedHostBuffer(ImportedHostBuffer&& other)
: size_bytes{std::exchange(other.size_bytes, 0)},
cpu_addr{std::exchange(other.cpu_addr, nullptr)},
bda_addr{std::exchange(other.bda_addr, 0)}, instance{other.instance},
scheduler{other.scheduler}, buffer{std::exchange(other.buffer, VK_NULL_HANDLE)},
device_memory{std::exchange(other.device_memory, VK_NULL_HANDLE)},
has_failed{std::exchange(other.has_failed, false)} {}
ImportedHostBuffer& operator=(ImportedHostBuffer&& other) {
size_bytes = std::exchange(other.size_bytes, 0);
cpu_addr = std::exchange(other.cpu_addr, nullptr);
bda_addr = std::exchange(other.bda_addr, false);
instance = other.instance;
scheduler = other.scheduler;
buffer = std::exchange(other.buffer, VK_NULL_HANDLE);
device_memory = std::exchange(other.device_memory, VK_NULL_HANDLE);
has_failed = std::exchange(other.has_failed, false);
return *this;
}
/// Returns the base CPU address of the buffer
void* CpuAddr() const noexcept {
return cpu_addr;
}
// Returns the handle to the Vulkan buffer
vk::Buffer Handle() const noexcept {
return buffer;
}
// Returns the size of the buffer in bytes
size_t SizeBytes() const noexcept {
return size_bytes;
}
// Returns true if the buffer failed to be created
bool HasFailed() const noexcept {
return has_failed;
}
// Returns the Buffer Device Address of the buffer
vk::DeviceAddress BufferDeviceAddress() const noexcept {
ASSERT_MSG(bda_addr != 0, "Can't get BDA from a non BDA buffer");
return bda_addr;
}
private:
size_t size_bytes = 0;
void* cpu_addr = 0;
vk::DeviceAddress bda_addr = 0;
const Vulkan::Instance* instance;
Vulkan::Scheduler* scheduler;
vk::Buffer buffer;
vk::DeviceMemory device_memory;
bool has_failed = false;
};
class StreamBuffer : public Buffer {
public:
explicit StreamBuffer(const Vulkan::Instance& instance, Vulkan::Scheduler& scheduler,

View File

@ -32,6 +32,10 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s
FAULT_READBACK_SIZE),
memory_tracker{&tracker} {
Vulkan::SetObjectName(instance.GetDevice(), gds_buffer.Handle(), "GDS Buffer");
Vulkan::SetObjectName(instance.GetDevice(), bda_pagetable_buffer.Handle(),
"BDA Page Table Buffer");
Vulkan::SetObjectName(instance.GetDevice(), fault_readback_buffer.Handle(),
"Fault Readback Buffer");
// Ensure the first slot is used for the null buffer
const auto null_id =
@ -326,64 +330,42 @@ BufferId BufferCache::FindBuffer(VAddr device_addr, u32 size) {
return CreateBuffer(device_addr, size);
}
void BufferCache::QueueMemoryImport(VAddr device_addr, u64 size) {
void BufferCache::QueueMemoryCoverage(VAddr device_addr, u64 size) {
std::scoped_lock lk{mutex};
const VAddr start = device_addr;
const VAddr end = device_addr + size;
auto queue_range = decltype(queued_imports)::interval_type::right_open(start, end);
queued_imports += queue_range;
auto queue_range = decltype(queued_converages)::interval_type::right_open(start, end);
queued_converages += queue_range;
}
void BufferCache::ImportQueuedRegions() {
void BufferCache::CoverQueuedRegions() {
std::scoped_lock lk{mutex};
if (queued_imports.empty()) {
if (queued_converages.empty()) {
return;
}
for (const auto& range : queued_imports) {
ImportMemory(range.lower(), range.upper());
for (const auto& range : queued_converages) {
CoverMemory(range.lower(), range.upper());
}
queued_imports.clear();
queued_converages.clear();
}
void BufferCache::ImportMemory(u64 start, u64 end) {
void BufferCache::CoverMemory(u64 start, u64 end) {
const u64 page_start = start >> CACHING_PAGEBITS;
const u64 page_end = Common::DivCeil(end, CACHING_PAGESIZE);
auto interval = decltype(imported_regions)::interval_type::right_open(page_start, page_end);
auto interval = decltype(convered_regions)::interval_type::right_open(page_start, page_end);
auto interval_set = boost::icl::interval_set<u64>{interval};
auto uncovered_ranges = interval_set - imported_regions;
auto uncovered_ranges = interval_set - convered_regions;
if (uncovered_ranges.empty()) {
return;
}
// We fill any holes within the given range
boost::container::small_vector<vk::DeviceAddress, 128> bda_addrs;
for (const auto& range : uncovered_ranges) {
// import host memory
const u64 range_start = range.lower();
const u64 range_end = range.upper();
void* cpu_addr = reinterpret_cast<void*>(range_start << CACHING_PAGEBITS);
const u64 range_size = (range_end - range_start) << CACHING_PAGEBITS;
ImportedHostBuffer buffer(instance, scheduler, cpu_addr, range_size,
vk::BufferUsageFlagBits::eShaderDeviceAddress |
vk::BufferUsageFlagBits::eStorageBuffer);
if (buffer.HasFailed()) {
continue;
}
// Update BDA page table
const u64 bda_addr = buffer.BufferDeviceAddress();
const u64 range_pages = range_end - range_start;
bda_addrs.clear();
bda_addrs.reserve(range_pages);
for (u64 i = 0; i < range_pages; ++i) {
// Don't mark the page as GPU local to let the shader know
// so that it can notify us if it accesses the page, so we can
// create a GPU local buffer.
bda_addrs.push_back(bda_addr + (i << CACHING_PAGEBITS));
}
WriteDataBuffer(bda_pagetable_buffer, range_start * sizeof(vk::DeviceAddress),
bda_addrs.data(), bda_addrs.size() * sizeof(vk::DeviceAddress));
imported_buffers.emplace_back(std::move(buffer));
// Mark the pages as covered
imported_regions += range;
// Here to implement import of the mapped region
convered_regions += range;
}
}
@ -533,15 +515,14 @@ BufferId BufferCache::CreateBuffer(VAddr device_addr, u32 wanted_size) {
const u64 size_pages = size >> CACHING_PAGEBITS;
bda_addrs.reserve(size_pages);
for (u64 i = 0; i < size_pages; ++i) {
// Here, we mark the page as backed by a GPU local buffer
bda_addrs.push_back((new_buffer.BufferDeviceAddress() + (i << CACHING_PAGEBITS)) | 0x1);
bda_addrs.push_back(new_buffer.BufferDeviceAddress() + (i << CACHING_PAGEBITS));
}
WriteDataBuffer(bda_pagetable_buffer, start_page * sizeof(vk::DeviceAddress), bda_addrs.data(),
bda_addrs.size() * sizeof(vk::DeviceAddress));
{
// Mark the pages as covered
std::scoped_lock lk{mutex};
imported_regions += boost::icl::interval_set<u64>::interval_type::right_open(
convered_regions += boost::icl::interval_set<u64>::interval_type::right_open(
start_page, start_page + size_pages);
}
const size_t size_bytes = new_buffer.SizeBytes();
@ -563,9 +544,23 @@ void BufferCache::CreateFaultBuffers() {
.dstOffset = offset,
.size = FAULT_READBACK_SIZE,
};
vk::BufferMemoryBarrier2 barrier {
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eShaderWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
.buffer = fault_readback_buffer.Handle(),
.offset = 0,
.size = FAULT_READBACK_SIZE,
};
staging_buffer.Commit();
scheduler.EndRendering();
const auto cmdbuf = scheduler.CommandBuffer();
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
.bufferMemoryBarrierCount = 1,
.pBufferMemoryBarriers = &barrier,
});
cmdbuf.copyBuffer(fault_readback_buffer.buffer, staging_buffer.Handle(), copy);
scheduler.Finish();
std::memcpy(fault_readback_cpu.data(), mapped, FAULT_READBACK_SIZE);
@ -834,8 +829,14 @@ void BufferCache::SynchronizeRange(VAddr device_addr, u32 size) {
if (device_addr == 0) {
return;
}
VAddr device_addr_end = device_addr + size;
ForEachBufferInRange(device_addr, size, [&](BufferId buffer_id, Buffer& buffer) {
SynchronizeBuffer(buffer, buffer.CpuAddr(), buffer.SizeBytes(), false);
VAddr buffer_start = buffer.CpuAddr();
VAddr buffer_end = buffer_start + buffer.SizeBytes();
VAddr start = std::max(buffer_start, device_addr);
VAddr end = std::min(buffer_end, device_addr_end);
u32 size = static_cast<u32>(end - start);
SynchronizeBuffer(buffer, start, end, false);
});
}

View File

@ -130,10 +130,10 @@ public:
[[nodiscard]] BufferId FindBuffer(VAddr device_addr, u32 size);
/// Queue a region for coverage for DMA.
void QueueMemoryImport(VAddr device_addr, u64 size);
void QueueMemoryCoverage(VAddr device_addr, u64 size);
/// Covers all queued regions.
void ImportQueuedRegions();
void CoverQueuedRegions();
/// Creates buffers for "faulted" shader accesses to host memory.
void CreateFaultBuffers();
@ -187,7 +187,7 @@ private:
void DeleteBuffer(BufferId buffer_id);
void ImportMemory(u64 start, u64 end);
void CoverMemory(u64 start, u64 end);
const Vulkan::Instance& instance;
Vulkan::Scheduler& scheduler;
@ -200,11 +200,10 @@ private:
Buffer gds_buffer;
Buffer bda_pagetable_buffer;
Buffer fault_readback_buffer;
// We need to define here to avoid stack overflow
// We need to define here to avoid stack underflow
std::array<u8, FAULT_READBACK_SIZE> fault_readback_cpu;
boost::icl::interval_set<VAddr> queued_imports;
boost::icl::interval_set<u64> imported_regions;
std::vector<ImportedHostBuffer> imported_buffers;
boost::icl::interval_set<VAddr> queued_converages;
boost::icl::interval_set<u64> convered_regions;
std::shared_mutex mutex;
Common::SlotVector<Buffer> slot_buffers;
RangeSet gpu_modified_ranges;

View File

@ -121,6 +121,7 @@ void SetOutputDir(const std::filesystem::path& path, const std::string& prefix)
if (!rdoc_api) {
return;
}
LOG_WARNING(Common, "RenderDoc capture path: {}", (path / prefix).string());
rdoc_api->SetCaptureFilePathTemplate(fmt::UTF((path / prefix).u8string()).data.data());
}

View File

@ -217,13 +217,10 @@ bool Instance::CreateDevice() {
const vk::StructureChain properties_chain = physical_device.getProperties2<
vk::PhysicalDeviceProperties2, vk::PhysicalDeviceVulkan11Properties,
vk::PhysicalDeviceVulkan12Properties, vk::PhysicalDevicePushDescriptorPropertiesKHR,
vk::PhysicalDeviceExternalMemoryHostPropertiesEXT>();
vk::PhysicalDeviceVulkan12Properties, vk::PhysicalDevicePushDescriptorPropertiesKHR>();
vk11_props = properties_chain.get<vk::PhysicalDeviceVulkan11Properties>();
vk12_props = properties_chain.get<vk::PhysicalDeviceVulkan12Properties>();
push_descriptor_props = properties_chain.get<vk::PhysicalDevicePushDescriptorPropertiesKHR>();
external_memory_host_props =
properties_chain.get<vk::PhysicalDeviceExternalMemoryHostPropertiesEXT>();
LOG_INFO(Render_Vulkan, "Physical device subgroup size {}", vk11_props.subgroupSize);
if (available_extensions.empty()) {

View File

@ -313,11 +313,6 @@ public:
properties.limits.framebufferStencilSampleCounts;
}
/// Returns the minimum alignment for imported host memory.
vk::DeviceSize GetExternalHostMemoryHostAlignment() const {
return external_memory_host_props.minImportedHostPointerAlignment;
}
/// Returns whether disabling primitive restart is supported.
bool IsPrimitiveRestartDisableSupported() const {
return driver_id != vk::DriverId::eMoltenvk;
@ -349,7 +344,6 @@ private:
vk::PhysicalDeviceVulkan11Properties vk11_props;
vk::PhysicalDeviceVulkan12Properties vk12_props;
vk::PhysicalDevicePushDescriptorPropertiesKHR push_descriptor_props;
vk::PhysicalDeviceExternalMemoryHostPropertiesEXT external_memory_host_props;
vk::PhysicalDeviceFeatures features;
vk::PhysicalDevicePortabilitySubsetFeaturesKHR portability_features;
vk::PhysicalDeviceExtendedDynamicState3FeaturesEXT dynamic_state_3_features;

View File

@ -468,7 +468,7 @@ bool Rasterizer::BindResources(const Pipeline* pipeline) {
if (dma_enabled) {
// First, import any queued host memory, then sync every mapped
// region that is cached on GPU memory.
buffer_cache.ImportQueuedRegions();
buffer_cache.CoverQueuedRegions();
{
std::shared_lock lock(mapped_ranges_mutex);
for (const auto& range : mapped_ranges) {
@ -968,7 +968,7 @@ void Rasterizer::MapMemory(VAddr addr, u64 size) {
mapped_ranges += decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
}
page_manager.OnGpuMap(addr, size);
buffer_cache.QueueMemoryImport(addr, size);
buffer_cache.QueueMemoryCoverage(addr, size);
}
void Rasterizer::UnmapMemory(VAddr addr, u64 size) {