From 683849d33bc74e6eda777a861aa5bd2073165fc4 Mon Sep 17 00:00:00 2001 From: psucien Date: Fri, 15 Nov 2024 19:48:12 +0100 Subject: [PATCH] resources binding moved into vk_rasterizer --- src/core/linker.cpp | 2 +- src/shader_recompiler/runtime_info.h | 2 +- .../renderer_vulkan/vk_compute_pipeline.cpp | 92 +---- .../renderer_vulkan/vk_compute_pipeline.h | 5 - .../renderer_vulkan/vk_graphics_pipeline.cpp | 67 ---- .../renderer_vulkan/vk_graphics_pipeline.h | 12 +- .../renderer_vulkan/vk_pipeline_cache.h | 2 - .../renderer_vulkan/vk_pipeline_common.cpp | 279 ++----------- .../renderer_vulkan/vk_pipeline_common.h | 44 ++- .../renderer_vulkan/vk_rasterizer.cpp | 368 ++++++++++++++++-- .../renderer_vulkan/vk_rasterizer.h | 29 ++ src/video_core/texture_cache/texture_cache.h | 3 +- 12 files changed, 427 insertions(+), 478 deletions(-) diff --git a/src/core/linker.cpp b/src/core/linker.cpp index 987845393..9592bee0f 100644 --- a/src/core/linker.cpp +++ b/src/core/linker.cpp @@ -328,7 +328,7 @@ void* Linker::AllocateTlsForThread(bool is_primary) { void* addr_out{reinterpret_cast(KernelAllocBase)}; if (is_primary) { const size_t tls_aligned = Common::AlignUp(total_tls_size, 16_KB); - const int ret = ::Libraries::Kernel::sceKernelMapNamedFlexibleMemory( + const int ret = Libraries::Kernel::sceKernelMapNamedFlexibleMemory( &addr_out, tls_aligned, 3, 0, "SceKernelPrimaryTcbTls"); ASSERT_MSG(ret == 0, "Unable to allocate TLS+TCB for the primary thread"); } else { diff --git a/src/shader_recompiler/runtime_info.h b/src/shader_recompiler/runtime_info.h index 03936f3a8..4662def93 100644 --- a/src/shader_recompiler/runtime_info.h +++ b/src/shader_recompiler/runtime_info.h @@ -20,7 +20,7 @@ enum class Stage : u32 { Local, Compute, }; -constexpr u32 MaxStageTypes = 6; +constexpr u32 MaxStageTypes = 7; [[nodiscard]] constexpr Stage StageFromIndex(size_t index) noexcept { return static_cast(index); diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp index 4ab290780..09d4e4195 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp @@ -15,8 +15,10 @@ ComputePipeline::ComputePipeline(const Instance& instance_, Scheduler& scheduler DescriptorHeap& desc_heap_, vk::PipelineCache pipeline_cache, u64 compute_key_, const Shader::Info& info_, vk::ShaderModule module) - : Pipeline{instance_, scheduler_, desc_heap_, pipeline_cache}, compute_key{compute_key_}, - info{&info_} { + : Pipeline{instance_, scheduler_, desc_heap_, pipeline_cache, true}, compute_key{compute_key_} { + auto& info = stages[int(Shader::Stage::Compute)]; + info = &info_; + const vk::PipelineShaderStageCreateInfo shader_ci = { .stage = vk::ShaderStageFlagBits::eCompute, .module = module, @@ -118,90 +120,4 @@ ComputePipeline::ComputePipeline(const Instance& instance_, Scheduler& scheduler ComputePipeline::~ComputePipeline() = default; -bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache, - VideoCore::TextureCache& texture_cache) const { - // Bind resource buffers and textures. - boost::container::small_vector set_writes; - BufferBarriers buffer_barriers; - Shader::PushData push_data{}; - Shader::Backend::Bindings binding{}; - - info->PushUd(binding, push_data); - - buffer_infos.clear(); - buffer_views.clear(); - image_infos.clear(); - - // Most of the time when a metadata is updated with a shader it gets cleared. It means - // we can skip the whole dispatch and update the tracked state instead. Also, it is not - // intended to be consumed and in such rare cases (e.g. HTile introspection, CRAA) we - // will need its full emulation anyways. For cases of metadata read a warning will be logged. - const auto IsMetaUpdate = [&](const auto& desc) { - const VAddr address = desc.GetSharp(*info).base_address; - if (desc.is_written) { - if (texture_cache.TouchMeta(address, true)) { - LOG_TRACE(Render_Vulkan, "Metadata update skipped"); - return true; - } - } else { - if (texture_cache.IsMeta(address)) { - LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (buffer)"); - } - } - return false; - }; - - for (const auto& desc : info->buffers) { - if (desc.is_gds_buffer) { - continue; - } - if (IsMetaUpdate(desc)) { - return false; - } - } - for (const auto& desc : info->texture_buffers) { - if (IsMetaUpdate(desc)) { - return false; - } - } - - BindBuffers(buffer_cache, texture_cache, *info, binding, push_data, set_writes, - buffer_barriers); - - BindTextures(texture_cache, *info, binding, set_writes); - - if (set_writes.empty()) { - return false; - } - - const auto cmdbuf = scheduler.CommandBuffer(); - if (!buffer_barriers.empty()) { - const auto dependencies = vk::DependencyInfo{ - .dependencyFlags = vk::DependencyFlagBits::eByRegion, - .bufferMemoryBarrierCount = u32(buffer_barriers.size()), - .pBufferMemoryBarriers = buffer_barriers.data(), - }; - scheduler.EndRendering(); - cmdbuf.pipelineBarrier2(dependencies); - } - - cmdbuf.pushConstants(*pipeline_layout, vk::ShaderStageFlagBits::eCompute, 0u, sizeof(push_data), - &push_data); - - // Bind descriptor set. - if (uses_push_descriptors) { - cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eCompute, *pipeline_layout, 0, - set_writes); - return true; - } - const auto desc_set = desc_heap.Commit(*desc_layout); - for (auto& set_write : set_writes) { - set_write.dstSet = desc_set; - } - instance.GetDevice().updateDescriptorSets(set_writes, {}); - cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, *pipeline_layout, 0, desc_set, {}); - - return true; -} - } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h index f1bc7285a..ca429b58d 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h @@ -24,13 +24,8 @@ public: vk::ShaderModule module); ~ComputePipeline(); - bool BindResources(VideoCore::BufferCache& buffer_cache, - VideoCore::TextureCache& texture_cache) const; - private: u64 compute_key; - const Shader::Info* info; - bool uses_push_descriptors{}; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 32e3bf8f8..d0d16ac75 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -16,10 +16,6 @@ namespace Vulkan { -static constexpr auto gp_stage_flags = vk::ShaderStageFlagBits::eVertex | - vk::ShaderStageFlagBits::eGeometry | - vk::ShaderStageFlagBits::eFragment; - GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& scheduler_, DescriptorHeap& desc_heap_, const GraphicsPipelineKey& key_, vk::PipelineCache pipeline_cache, @@ -389,67 +385,4 @@ void GraphicsPipeline::BuildDescSetLayout() { desc_layout = std::move(layout); } -void GraphicsPipeline::BindResources(const Liverpool::Regs& regs, - VideoCore::BufferCache& buffer_cache, - VideoCore::TextureCache& texture_cache) const { - // Bind resource buffers and textures. - boost::container::small_vector set_writes; - BufferBarriers buffer_barriers; - Shader::PushData push_data{}; - Shader::Backend::Bindings binding{}; - - buffer_infos.clear(); - buffer_views.clear(); - image_infos.clear(); - - for (const auto* stage : stages) { - if (!stage) { - continue; - } - if (stage->uses_step_rates) { - push_data.step0 = regs.vgt_instance_step_rate_0; - push_data.step1 = regs.vgt_instance_step_rate_1; - } - stage->PushUd(binding, push_data); - - BindBuffers(buffer_cache, texture_cache, *stage, binding, push_data, set_writes, - buffer_barriers); - - BindTextures(texture_cache, *stage, binding, set_writes); - } - - const auto cmdbuf = scheduler.CommandBuffer(); - SCOPE_EXIT { - cmdbuf.pushConstants(*pipeline_layout, gp_stage_flags, 0U, sizeof(push_data), &push_data); - cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, Handle()); - }; - - if (set_writes.empty()) { - return; - } - - if (!buffer_barriers.empty()) { - const auto dependencies = vk::DependencyInfo{ - .dependencyFlags = vk::DependencyFlagBits::eByRegion, - .bufferMemoryBarrierCount = u32(buffer_barriers.size()), - .pBufferMemoryBarriers = buffer_barriers.data(), - }; - scheduler.EndRendering(); - cmdbuf.pipelineBarrier2(dependencies); - } - - // Bind descriptor set. - if (uses_push_descriptors) { - cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eGraphics, *pipeline_layout, 0, - set_writes); - return; - } - const auto desc_set = desc_heap.Commit(*desc_layout); - for (auto& set_write : set_writes) { - set_write.dstSet = desc_set; - } - instance.GetDevice().updateDescriptorSets(set_writes, {}); - cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, *pipeline_layout, 0, desc_set, {}); -} - } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h index f7762eb12..4f4abfd16 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h @@ -2,6 +2,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include + #include "common/types.h" #include "video_core/renderer_vulkan/liverpool_to_vk.h" #include "video_core/renderer_vulkan/vk_common.h" @@ -14,8 +15,8 @@ class TextureCache; namespace Vulkan { -static constexpr u32 MaxVertexBufferCount = 32; static constexpr u32 MaxShaderStages = 5; +static constexpr u32 MaxVertexBufferCount = 32; class Instance; class Scheduler; @@ -61,13 +62,6 @@ public: std::span modules); ~GraphicsPipeline(); - void BindResources(const Liverpool::Regs& regs, VideoCore::BufferCache& buffer_cache, - VideoCore::TextureCache& texture_cache) const; - - const Shader::Info& GetStage(Shader::Stage stage) const noexcept { - return *stages[u32(stage)]; - } - bool IsEmbeddedVs() const noexcept { static constexpr size_t EmbeddedVsHash = 0x9b2da5cf47f8c29f; return key.stage_hashes[u32(Shader::Stage::Vertex)] == EmbeddedVsHash; @@ -99,9 +93,7 @@ private: void BuildDescSetLayout(); private: - std::array stages{}; GraphicsPipelineKey key; - bool uses_push_descriptors{}; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 7e44bbf09..43facbae4 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -38,8 +38,6 @@ struct Program { }; class PipelineCache { - static constexpr size_t MaxShaderStages = 5; - public: explicit PipelineCache(const Instance& instance, Scheduler& scheduler, AmdGpu::Liverpool* liverpool); diff --git a/src/video_core/renderer_vulkan/vk_pipeline_common.cpp b/src/video_core/renderer_vulkan/vk_pipeline_common.cpp index 582c9445b..6b48a40a0 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_common.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_common.cpp @@ -12,270 +12,47 @@ namespace Vulkan { -boost::container::static_vector Pipeline::image_infos; -boost::container::static_vector Pipeline::buffer_views; -boost::container::static_vector Pipeline::buffer_infos; -boost::container::static_vector Pipeline::bound_images; - Pipeline::Pipeline(const Instance& instance_, Scheduler& scheduler_, DescriptorHeap& desc_heap_, - vk::PipelineCache pipeline_cache) - : instance{instance_}, scheduler{scheduler_}, desc_heap{desc_heap_} {} + vk::PipelineCache pipeline_cache, bool is_compute_ /*= false*/) + : instance{instance_}, scheduler{scheduler_}, desc_heap{desc_heap_}, is_compute{is_compute_} {} Pipeline::~Pipeline() = default; -void Pipeline::BindBuffers(VideoCore::BufferCache& buffer_cache, - VideoCore::TextureCache& texture_cache, const Shader::Info& stage, - Shader::Backend::Bindings& binding, Shader::PushData& push_data, - DescriptorWrites& set_writes, BufferBarriers& buffer_barriers) const { - using BufferBindingInfo = std::pair; - static boost::container::static_vector buffer_bindings; +void Pipeline::BindResources(DescriptorWrites& set_writes, const BufferBarriers& buffer_barriers, + const Shader::PushData& push_data) const { + const auto cmdbuf = scheduler.CommandBuffer(); + const auto bind_point = + IsCompute() ? vk::PipelineBindPoint::eCompute : vk::PipelineBindPoint::eGraphics; - buffer_bindings.clear(); - - for (const auto& desc : stage.buffers) { - const auto vsharp = desc.GetSharp(stage); - if (!desc.is_gds_buffer && vsharp.base_address != 0 && vsharp.GetSize() > 0) { - const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize()); - buffer_bindings.emplace_back(buffer_id, vsharp); - } else { - buffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp); - } + if (!buffer_barriers.empty()) { + const auto dependencies = vk::DependencyInfo{ + .dependencyFlags = vk::DependencyFlagBits::eByRegion, + .bufferMemoryBarrierCount = u32(buffer_barriers.size()), + .pBufferMemoryBarriers = buffer_barriers.data(), + }; + scheduler.EndRendering(); + cmdbuf.pipelineBarrier2(dependencies); } - using TexBufferBindingInfo = std::pair; - static boost::container::static_vector texbuffer_bindings; + const auto stage_flags = IsCompute() ? vk::ShaderStageFlagBits::eCompute : gp_stage_flags; + cmdbuf.pushConstants(*pipeline_layout, stage_flags, 0u, sizeof(push_data), &push_data); - texbuffer_bindings.clear(); - - for (const auto& desc : stage.texture_buffers) { - const auto vsharp = desc.GetSharp(stage); - if (vsharp.base_address != 0 && vsharp.GetSize() > 0 && - vsharp.GetDataFmt() != AmdGpu::DataFormat::FormatInvalid) { - const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize()); - texbuffer_bindings.emplace_back(buffer_id, vsharp); - } else { - texbuffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp); - } + // Bind descriptor set. + if (set_writes.empty()) { + return; } - // Bind the flattened user data buffer as a UBO so it's accessible to the shader - if (stage.has_readconst) { - const auto [vk_buffer, offset] = buffer_cache.ObtainHostUBO(stage.flattened_ud_buf); - buffer_infos.emplace_back(vk_buffer->Handle(), offset, - stage.flattened_ud_buf.size() * sizeof(u32)); - set_writes.push_back({ - .dstSet = VK_NULL_HANDLE, - .dstBinding = binding.unified++, - .dstArrayElement = 0, - .descriptorCount = 1, - .descriptorType = vk::DescriptorType::eUniformBuffer, - .pBufferInfo = &buffer_infos.back(), - }); - ++binding.buffer; + if (uses_push_descriptors) { + cmdbuf.pushDescriptorSetKHR(bind_point, *pipeline_layout, 0, set_writes); + return; } - // Second pass to re-bind buffers that were updated after binding - for (u32 i = 0; i < buffer_bindings.size(); i++) { - const auto& [buffer_id, vsharp] = buffer_bindings[i]; - const auto& desc = stage.buffers[i]; - const bool is_storage = desc.IsStorage(vsharp); - if (!buffer_id) { - if (desc.is_gds_buffer) { - const auto* gds_buf = buffer_cache.GetGdsBuffer(); - buffer_infos.emplace_back(gds_buf->Handle(), 0, gds_buf->SizeBytes()); - } else if (instance.IsNullDescriptorSupported()) { - buffer_infos.emplace_back(VK_NULL_HANDLE, 0, VK_WHOLE_SIZE); - } else { - auto& null_buffer = buffer_cache.GetBuffer(VideoCore::NULL_BUFFER_ID); - buffer_infos.emplace_back(null_buffer.Handle(), 0, VK_WHOLE_SIZE); - } - } else { - const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer( - vsharp.base_address, vsharp.GetSize(), desc.is_written, false, buffer_id); - const u32 alignment = - is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment(); - const u32 offset_aligned = Common::AlignDown(offset, alignment); - const u32 adjust = offset - offset_aligned; - ASSERT(adjust % 4 == 0); - push_data.AddOffset(binding.buffer, adjust); - buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned, - vsharp.GetSize() + adjust); - } - - set_writes.push_back({ - .dstSet = VK_NULL_HANDLE, - .dstBinding = binding.unified++, - .dstArrayElement = 0, - .descriptorCount = 1, - .descriptorType = is_storage ? vk::DescriptorType::eStorageBuffer - : vk::DescriptorType::eUniformBuffer, - .pBufferInfo = &buffer_infos.back(), - }); - ++binding.buffer; - } - - const auto null_buffer_view = - instance.IsNullDescriptorSupported() ? VK_NULL_HANDLE : buffer_cache.NullBufferView(); - for (u32 i = 0; i < texbuffer_bindings.size(); i++) { - const auto& [buffer_id, vsharp] = texbuffer_bindings[i]; - const auto& desc = stage.texture_buffers[i]; - vk::BufferView& buffer_view = buffer_views.emplace_back(null_buffer_view); - if (buffer_id) { - const u32 alignment = instance.TexelBufferMinAlignment(); - const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer( - vsharp.base_address, vsharp.GetSize(), desc.is_written, true, buffer_id); - const u32 fmt_stride = AmdGpu::NumBits(vsharp.GetDataFmt()) >> 3; - ASSERT_MSG(fmt_stride == vsharp.GetStride(), - "Texel buffer stride must match format stride"); - const u32 offset_aligned = Common::AlignDown(offset, alignment); - const u32 adjust = offset - offset_aligned; - ASSERT(adjust % fmt_stride == 0); - push_data.AddOffset(binding.buffer, adjust / fmt_stride); - buffer_view = - vk_buffer->View(offset_aligned, vsharp.GetSize() + adjust, desc.is_written, - vsharp.GetDataFmt(), vsharp.GetNumberFmt()); - if (auto barrier = - vk_buffer->GetBarrier(desc.is_written ? vk::AccessFlagBits2::eShaderWrite - : vk::AccessFlagBits2::eShaderRead, - vk::PipelineStageFlagBits2::eComputeShader)) { - buffer_barriers.emplace_back(*barrier); - } - if (desc.is_written) { - texture_cache.InvalidateMemoryFromGPU(vsharp.base_address, vsharp.GetSize()); - } - } - - set_writes.push_back({ - .dstSet = VK_NULL_HANDLE, - .dstBinding = binding.unified++, - .dstArrayElement = 0, - .descriptorCount = 1, - .descriptorType = desc.is_written ? vk::DescriptorType::eStorageTexelBuffer - : vk::DescriptorType::eUniformTexelBuffer, - .pTexelBufferView = &buffer_view, - }); - ++binding.buffer; - } -} - -void Pipeline::BindTextures(VideoCore::TextureCache& texture_cache, const Shader::Info& stage, - Shader::Backend::Bindings& binding, - DescriptorWrites& set_writes) const { - using ImageBindingInfo = std::pair; - static boost::container::static_vector image_bindings; - - image_bindings.clear(); - - for (const auto& image_desc : stage.images) { - const auto tsharp = image_desc.GetSharp(stage); - if (texture_cache.IsMeta(tsharp.Address())) { - LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a shader (texture)"); - } - - if (tsharp.GetDataFmt() == AmdGpu::DataFormat::FormatInvalid) { - image_bindings.emplace_back(std::piecewise_construct, std::tuple{}, std::tuple{}); - continue; - } - - auto& [image_id, desc] = image_bindings.emplace_back(std::piecewise_construct, std::tuple{}, - std::tuple{tsharp, image_desc}); - image_id = texture_cache.FindImage(desc); - auto& image = texture_cache.GetImage(image_id); - ASSERT(False(image.flags & VideoCore::ImageFlagBits::Virtual)); - if (image.binding.is_bound) { - // The image is already bound. In case if it is about to be used as storage we need - // to force general layout on it. - image.binding.force_general |= image_desc.is_storage; - } - if (image.binding.is_target) { - // The image is already bound as target. Since we read and output to it need to force - // general layout too. - image.binding.force_general = 1u; - } - image.binding.is_bound = 1u; - } - - // Second pass to re-bind images that were updated after binding - for (auto& [image_id, desc] : image_bindings) { - bool is_storage = desc.type == VideoCore::TextureCache::BindingType::Storage; - if (!image_id) { - if (instance.IsNullDescriptorSupported()) { - image_infos.emplace_back(VK_NULL_HANDLE, VK_NULL_HANDLE, vk::ImageLayout::eGeneral); - } else { - auto& null_image = texture_cache.GetImageView(VideoCore::NULL_IMAGE_VIEW_ID); - image_infos.emplace_back(VK_NULL_HANDLE, *null_image.image_view, - vk::ImageLayout::eGeneral); - } - } else { - if (auto& old_image = texture_cache.GetImage(image_id); - old_image.binding.needs_rebind) { - old_image.binding.Reset(); // clean up previous image binding state - image_id = texture_cache.FindImage(desc); - } - - bound_images.emplace_back(image_id); - - auto& image = texture_cache.GetImage(image_id); - auto& image_view = texture_cache.FindTexture(image_id, desc.view_info); - - if (image.binding.force_general || image.binding.is_target) { - image.Transit(vk::ImageLayout::eGeneral, - vk::AccessFlagBits2::eShaderRead | - (image.info.IsDepthStencil() - ? vk::AccessFlagBits2::eDepthStencilAttachmentWrite - : vk::AccessFlagBits2::eColorAttachmentWrite), - {}); - } else { - if (is_storage) { - image.Transit(vk::ImageLayout::eGeneral, - vk::AccessFlagBits2::eShaderRead | - vk::AccessFlagBits2::eShaderWrite, - desc.view_info.range); - } else { - const auto new_layout = image.info.IsDepthStencil() - ? vk::ImageLayout::eDepthStencilReadOnlyOptimal - : vk::ImageLayout::eShaderReadOnlyOptimal; - image.Transit(new_layout, vk::AccessFlagBits2::eShaderRead, - desc.view_info.range); - } - } - image.usage.storage |= is_storage; - image.usage.texture |= !is_storage; - - image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view, - image.last_state.layout); - } - - set_writes.push_back({ - .dstSet = VK_NULL_HANDLE, - .dstBinding = binding.unified++, - .dstArrayElement = 0, - .descriptorCount = 1, - .descriptorType = - is_storage ? vk::DescriptorType::eStorageImage : vk::DescriptorType::eSampledImage, - .pImageInfo = &image_infos.back(), - }); - } - - for (const auto& sampler : stage.samplers) { - auto ssharp = sampler.GetSharp(stage); - if (sampler.disable_aniso) { - const auto& tsharp = stage.images[sampler.associated_image].GetSharp(stage); - if (tsharp.base_level == 0 && tsharp.last_level == 0) { - ssharp.max_aniso.Assign(AmdGpu::AnisoRatio::One); - } - } - const auto vk_sampler = texture_cache.GetSampler(ssharp); - image_infos.emplace_back(vk_sampler, VK_NULL_HANDLE, vk::ImageLayout::eGeneral); - set_writes.push_back({ - .dstSet = VK_NULL_HANDLE, - .dstBinding = binding.unified++, - .dstArrayElement = 0, - .descriptorCount = 1, - .descriptorType = vk::DescriptorType::eSampler, - .pImageInfo = &image_infos.back(), - }); + const auto desc_set = desc_heap.Commit(*desc_layout); + for (auto& set_write : set_writes) { + set_write.dstSet = desc_set; } + instance.GetDevice().updateDescriptorSets(set_writes, {}); + cmdbuf.bindDescriptorSets(bind_point, *pipeline_layout, 0, desc_set, {}); } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_pipeline_common.h b/src/video_core/renderer_vulkan/vk_pipeline_common.h index a367e3da6..8c48c83f7 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_common.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_common.h @@ -14,6 +14,10 @@ class BufferCache; namespace Vulkan { +static constexpr auto gp_stage_flags = vk::ShaderStageFlagBits::eVertex | + vk::ShaderStageFlagBits::eGeometry | + vk::ShaderStageFlagBits::eFragment; + class Instance; class Scheduler; class DescriptorHeap; @@ -21,7 +25,7 @@ class DescriptorHeap; class Pipeline { public: Pipeline(const Instance& instance, Scheduler& scheduler, DescriptorHeap& desc_heap, - vk::PipelineCache pipeline_cache); + vk::PipelineCache pipeline_cache, bool is_compute = false); virtual ~Pipeline(); vk::Pipeline Handle() const noexcept { @@ -32,22 +36,27 @@ public: return *pipeline_layout; } + auto GetStages() const { + if (is_compute) { + return std::span{stages.cend() - 1, stages.cend()}; + } else { + return std::span{stages.cbegin(), stages.cend() - 1}; + } + } + + const Shader::Info& GetStage(Shader::Stage stage) const noexcept { + return *stages[u32(stage)]; + } + + bool IsCompute() const { + return is_compute; + } + using DescriptorWrites = boost::container::small_vector; using BufferBarriers = boost::container::small_vector; - void BindBuffers(VideoCore::BufferCache& buffer_cache, VideoCore::TextureCache& texture_cache, - const Shader::Info& stage, Shader::Backend::Bindings& binding, - Shader::PushData& push_data, DescriptorWrites& set_writes, - BufferBarriers& buffer_barriers) const; - - void BindTextures(VideoCore::TextureCache& texture_cache, const Shader::Info& stage, - Shader::Backend::Bindings& binding, DescriptorWrites& set_writes) const; - void ResetBindings(VideoCore::TextureCache& texture_cache) const { - for (auto& image_id : bound_images) { - texture_cache.GetImage(image_id).binding.Reset(); - } - bound_images.clear(); - } + void BindResources(DescriptorWrites& set_writes, const BufferBarriers& buffer_barriers, + const Shader::PushData& push_data) const; protected: const Instance& instance; @@ -56,10 +65,9 @@ protected: vk::UniquePipeline pipeline; vk::UniquePipelineLayout pipeline_layout; vk::UniqueDescriptorSetLayout desc_layout; - static boost::container::static_vector image_infos; - static boost::container::static_vector buffer_views; - static boost::container::static_vector buffer_infos; - static boost::container::static_vector bound_images; + std::array stages{}; + bool uses_push_descriptors{}; + const bool is_compute; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 697713d5b..ba605831f 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -116,7 +116,7 @@ RenderState Rasterizer::PrepareRenderState(u32 mrt_mask) { auto& [image_id, desc] = cb_descs.emplace_back(std::piecewise_construct, std::tuple{}, std::tuple{col_buf, hint}); const auto& image_view = texture_cache.FindRenderTarget(desc); - image_id = image_view.image_id; + image_id = bound_images.emplace_back(image_view.image_id); auto& image = texture_cache.GetImage(image_id); image.binding.is_target = 1u; @@ -149,7 +149,7 @@ RenderState Rasterizer::PrepareRenderState(u32 mrt_mask) { std::tuple{regs.depth_buffer, regs.depth_view, regs.depth_control, htile_address, hint}); const auto& image_view = texture_cache.FindDepthTarget(desc); - image_id = image_view.image_id; + image_id = bound_images.emplace_back(image_view.image_id); auto& image = texture_cache.GetImage(image_id); image.binding.is_target = 1u; @@ -181,7 +181,6 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) { return; } - const auto cmdbuf = scheduler.CommandBuffer(); const auto& regs = liverpool->regs; const GraphicsPipeline* pipeline = pipeline_cache.GetGraphicsPipeline(); if (!pipeline) { @@ -190,10 +189,8 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) { auto state = PrepareRenderState(pipeline->GetMrtMask()); - try { - pipeline->BindResources(regs, buffer_cache, texture_cache); - } catch (...) { - UNREACHABLE(); + if (!BindResources(pipeline)) { + return; } const auto& vs_info = pipeline->GetStage(Shader::Stage::Vertex); @@ -205,6 +202,9 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) { const auto [vertex_offset, instance_offset] = vs_info.GetDrawOffsets(); + const auto cmdbuf = scheduler.CommandBuffer(); + cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline->Handle()); + if (is_indexed) { cmdbuf.drawIndexed(num_indices, regs.num_instances.NumInstances(), 0, s32(vertex_offset), instance_offset); @@ -215,7 +215,7 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) { instance_offset); } - pipeline->ResetBindings(texture_cache); + ResetBindings(); } void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u32 stride, @@ -237,10 +237,8 @@ void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u3 ASSERT_MSG(regs.primitive_type != AmdGpu::PrimitiveType::RectList, "Unsupported primitive type for indirect draw"); - try { - pipeline->BindResources(regs, buffer_cache, texture_cache); - } catch (...) { - UNREACHABLE(); + if (!BindResources(pipeline)) { + return; } const auto& vs_info = pipeline->GetStage(Shader::Stage::Vertex); @@ -263,6 +261,8 @@ void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u3 // instance offsets will be automatically applied by Vulkan from indirect args buffer. const auto cmdbuf = scheduler.CommandBuffer(); + cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline->Handle()); + if (is_indexed) { ASSERT(sizeof(VkDrawIndexedIndirectCommand) == stride); @@ -283,7 +283,7 @@ void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u3 } } - pipeline->ResetBindings(texture_cache); + ResetBindings(); } void Rasterizer::DispatchDirect() { @@ -296,20 +296,15 @@ void Rasterizer::DispatchDirect() { return; } - try { - const auto has_resources = pipeline->BindResources(buffer_cache, texture_cache); - if (!has_resources) { - return; - } - } catch (...) { - UNREACHABLE(); + if (!BindResources(pipeline)) { + return; } scheduler.EndRendering(); cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->Handle()); cmdbuf.dispatch(cs_program.dim_x, cs_program.dim_y, cs_program.dim_z); - pipeline->ResetBindings(texture_cache); + ResetBindings(); } void Rasterizer::DispatchIndirect(VAddr address, u32 offset, u32 size) { @@ -322,13 +317,8 @@ void Rasterizer::DispatchIndirect(VAddr address, u32 offset, u32 size) { return; } - try { - const auto has_resources = pipeline->BindResources(buffer_cache, texture_cache); - if (!has_resources) { - return; - } - } catch (...) { - UNREACHABLE(); + if (!BindResources(pipeline)) { + return; } scheduler.EndRendering(); @@ -336,7 +326,7 @@ void Rasterizer::DispatchIndirect(VAddr address, u32 offset, u32 size) { const auto [buffer, base] = buffer_cache.ObtainBuffer(address + offset, size, false); cmdbuf.dispatchIndirect(buffer->Handle(), base); - pipeline->ResetBindings(texture_cache); + ResetBindings(); } u64 Rasterizer::Flush() { @@ -350,13 +340,328 @@ void Rasterizer::Finish() { scheduler.Finish(); } +bool Rasterizer::BindResources(const Pipeline* pipeline) { + buffer_infos.clear(); + buffer_views.clear(); + image_infos.clear(); + + const auto& regs = liverpool->regs; + + if (pipeline->IsCompute()) { + const auto& info = pipeline->GetStage(Shader::Stage::Compute); + + // Most of the time when a metadata is updated with a shader it gets cleared. It means + // we can skip the whole dispatch and update the tracked state instead. Also, it is not + // intended to be consumed and in such rare cases (e.g. HTile introspection, CRAA) we + // will need its full emulation anyways. For cases of metadata read a warning will be + // logged. + const auto IsMetaUpdate = [&](const auto& desc) { + const VAddr address = desc.GetSharp(info).base_address; + if (desc.is_written) { + if (texture_cache.TouchMeta(address, true)) { + LOG_TRACE(Render_Vulkan, "Metadata update skipped"); + return true; + } + } else { + if (texture_cache.IsMeta(address)) { + LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (buffer)"); + } + } + return false; + }; + + for (const auto& desc : info.buffers) { + if (desc.is_gds_buffer) { + continue; + } + if (IsMetaUpdate(desc)) { + return false; + } + } + for (const auto& desc : info.texture_buffers) { + if (IsMetaUpdate(desc)) { + return false; + } + } + } + + set_writes.clear(); + buffer_barriers.clear(); + + // Bind resource buffers and textures. + Shader::PushData push_data{}; + Shader::Backend::Bindings binding{}; + + for (const auto* stage : pipeline->GetStages()) { + if (!stage) { + continue; + } + if (stage->uses_step_rates) { + push_data.step0 = regs.vgt_instance_step_rate_0; + push_data.step1 = regs.vgt_instance_step_rate_1; + } + stage->PushUd(binding, push_data); + + BindBuffers(*stage, binding, push_data, set_writes, buffer_barriers); + BindTextures(*stage, binding, set_writes); + } + + pipeline->BindResources(set_writes, buffer_barriers, push_data); + + return true; +} + +void Rasterizer::BindBuffers(const Shader::Info& stage, Shader::Backend::Bindings& binding, + Shader::PushData& push_data, Pipeline::DescriptorWrites& set_writes, + Pipeline::BufferBarriers& buffer_barriers) { + buffer_bindings.clear(); + + for (const auto& desc : stage.buffers) { + const auto vsharp = desc.GetSharp(stage); + if (!desc.is_gds_buffer && vsharp.base_address != 0 && vsharp.GetSize() > 0) { + const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize()); + buffer_bindings.emplace_back(buffer_id, vsharp); + } else { + buffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp); + } + } + + texbuffer_bindings.clear(); + + for (const auto& desc : stage.texture_buffers) { + const auto vsharp = desc.GetSharp(stage); + if (vsharp.base_address != 0 && vsharp.GetSize() > 0 && + vsharp.GetDataFmt() != AmdGpu::DataFormat::FormatInvalid) { + const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize()); + texbuffer_bindings.emplace_back(buffer_id, vsharp); + } else { + texbuffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp); + } + } + + // Bind the flattened user data buffer as a UBO so it's accessible to the shader + if (stage.has_readconst) { + const auto [vk_buffer, offset] = buffer_cache.ObtainHostUBO(stage.flattened_ud_buf); + buffer_infos.emplace_back(vk_buffer->Handle(), offset, + stage.flattened_ud_buf.size() * sizeof(u32)); + set_writes.push_back({ + .dstSet = VK_NULL_HANDLE, + .dstBinding = binding.unified++, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .pBufferInfo = &buffer_infos.back(), + }); + ++binding.buffer; + } + + // Second pass to re-bind buffers that were updated after binding + for (u32 i = 0; i < buffer_bindings.size(); i++) { + const auto& [buffer_id, vsharp] = buffer_bindings[i]; + const auto& desc = stage.buffers[i]; + const bool is_storage = desc.IsStorage(vsharp); + if (!buffer_id) { + if (desc.is_gds_buffer) { + const auto* gds_buf = buffer_cache.GetGdsBuffer(); + buffer_infos.emplace_back(gds_buf->Handle(), 0, gds_buf->SizeBytes()); + } else if (instance.IsNullDescriptorSupported()) { + buffer_infos.emplace_back(VK_NULL_HANDLE, 0, VK_WHOLE_SIZE); + } else { + auto& null_buffer = buffer_cache.GetBuffer(VideoCore::NULL_BUFFER_ID); + buffer_infos.emplace_back(null_buffer.Handle(), 0, VK_WHOLE_SIZE); + } + } else { + const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer( + vsharp.base_address, vsharp.GetSize(), desc.is_written, false, buffer_id); + const u32 alignment = + is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment(); + const u32 offset_aligned = Common::AlignDown(offset, alignment); + const u32 adjust = offset - offset_aligned; + ASSERT(adjust % 4 == 0); + push_data.AddOffset(binding.buffer, adjust); + buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned, + vsharp.GetSize() + adjust); + } + + set_writes.push_back({ + .dstSet = VK_NULL_HANDLE, + .dstBinding = binding.unified++, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = is_storage ? vk::DescriptorType::eStorageBuffer + : vk::DescriptorType::eUniformBuffer, + .pBufferInfo = &buffer_infos.back(), + }); + ++binding.buffer; + } + + const auto null_buffer_view = + instance.IsNullDescriptorSupported() ? VK_NULL_HANDLE : buffer_cache.NullBufferView(); + for (u32 i = 0; i < texbuffer_bindings.size(); i++) { + const auto& [buffer_id, vsharp] = texbuffer_bindings[i]; + const auto& desc = stage.texture_buffers[i]; + vk::BufferView& buffer_view = buffer_views.emplace_back(null_buffer_view); + if (buffer_id) { + const u32 alignment = instance.TexelBufferMinAlignment(); + const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer( + vsharp.base_address, vsharp.GetSize(), desc.is_written, true, buffer_id); + const u32 fmt_stride = AmdGpu::NumBits(vsharp.GetDataFmt()) >> 3; + ASSERT_MSG(fmt_stride == vsharp.GetStride(), + "Texel buffer stride must match format stride"); + const u32 offset_aligned = Common::AlignDown(offset, alignment); + const u32 adjust = offset - offset_aligned; + ASSERT(adjust % fmt_stride == 0); + push_data.AddOffset(binding.buffer, adjust / fmt_stride); + buffer_view = + vk_buffer->View(offset_aligned, vsharp.GetSize() + adjust, desc.is_written, + vsharp.GetDataFmt(), vsharp.GetNumberFmt()); + if (auto barrier = + vk_buffer->GetBarrier(desc.is_written ? vk::AccessFlagBits2::eShaderWrite + : vk::AccessFlagBits2::eShaderRead, + vk::PipelineStageFlagBits2::eComputeShader)) { + buffer_barriers.emplace_back(*barrier); + } + if (desc.is_written) { + texture_cache.InvalidateMemoryFromGPU(vsharp.base_address, vsharp.GetSize()); + } + } + + set_writes.push_back({ + .dstSet = VK_NULL_HANDLE, + .dstBinding = binding.unified++, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = desc.is_written ? vk::DescriptorType::eStorageTexelBuffer + : vk::DescriptorType::eUniformTexelBuffer, + .pTexelBufferView = &buffer_view, + }); + ++binding.buffer; + } +} + +void Rasterizer::BindTextures(const Shader::Info& stage, Shader::Backend::Bindings& binding, + Pipeline::DescriptorWrites& set_writes) { + image_bindings.clear(); + + for (const auto& image_desc : stage.images) { + const auto tsharp = image_desc.GetSharp(stage); + if (texture_cache.IsMeta(tsharp.Address())) { + LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a shader (texture)"); + } + + if (tsharp.GetDataFmt() == AmdGpu::DataFormat::FormatInvalid) { + image_bindings.emplace_back(std::piecewise_construct, std::tuple{}, std::tuple{}); + continue; + } + + auto& [image_id, desc] = image_bindings.emplace_back(std::piecewise_construct, std::tuple{}, + std::tuple{tsharp, image_desc}); + image_id = texture_cache.FindImage(desc); + auto& image = texture_cache.GetImage(image_id); + ASSERT(False(image.flags & VideoCore::ImageFlagBits::Virtual)); + if (image.binding.is_bound) { + // The image is already bound. In case if it is about to be used as storage we need + // to force general layout on it. + image.binding.force_general |= image_desc.is_storage; + } + if (image.binding.is_target) { + // The image is already bound as target. Since we read and output to it need to force + // general layout too. + image.binding.force_general = 1u; + } + image.binding.is_bound = 1u; + } + + // Second pass to re-bind images that were updated after binding + for (auto& [image_id, desc] : image_bindings) { + bool is_storage = desc.type == VideoCore::TextureCache::BindingType::Storage; + if (!image_id) { + if (instance.IsNullDescriptorSupported()) { + image_infos.emplace_back(VK_NULL_HANDLE, VK_NULL_HANDLE, vk::ImageLayout::eGeneral); + } else { + auto& null_image = texture_cache.GetImageView(VideoCore::NULL_IMAGE_VIEW_ID); + image_infos.emplace_back(VK_NULL_HANDLE, *null_image.image_view, + vk::ImageLayout::eGeneral); + } + } else { + if (auto& old_image = texture_cache.GetImage(image_id); + old_image.binding.needs_rebind) { + old_image.binding.Reset(); // clean up previous image binding state + image_id = texture_cache.FindImage(desc); + } + + bound_images.emplace_back(image_id); + + auto& image = texture_cache.GetImage(image_id); + auto& image_view = texture_cache.FindTexture(image_id, desc.view_info); + + if (image.binding.force_general || image.binding.is_target) { + image.Transit(vk::ImageLayout::eGeneral, + vk::AccessFlagBits2::eShaderRead | + (image.info.IsDepthStencil() + ? vk::AccessFlagBits2::eDepthStencilAttachmentWrite + : vk::AccessFlagBits2::eColorAttachmentWrite), + {}); + } else { + if (is_storage) { + image.Transit(vk::ImageLayout::eGeneral, + vk::AccessFlagBits2::eShaderRead | + vk::AccessFlagBits2::eShaderWrite, + desc.view_info.range); + } else { + const auto new_layout = image.info.IsDepthStencil() + ? vk::ImageLayout::eDepthStencilReadOnlyOptimal + : vk::ImageLayout::eShaderReadOnlyOptimal; + image.Transit(new_layout, vk::AccessFlagBits2::eShaderRead, + desc.view_info.range); + } + } + image.usage.storage |= is_storage; + image.usage.texture |= !is_storage; + + image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view, + image.last_state.layout); + } + + set_writes.push_back({ + .dstSet = VK_NULL_HANDLE, + .dstBinding = binding.unified++, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = + is_storage ? vk::DescriptorType::eStorageImage : vk::DescriptorType::eSampledImage, + .pImageInfo = &image_infos.back(), + }); + } + + for (const auto& sampler : stage.samplers) { + auto ssharp = sampler.GetSharp(stage); + if (sampler.disable_aniso) { + const auto& tsharp = stage.images[sampler.associated_image].GetSharp(stage); + if (tsharp.base_level == 0 && tsharp.last_level == 0) { + ssharp.max_aniso.Assign(AmdGpu::AnisoRatio::One); + } + } + const auto vk_sampler = texture_cache.GetSampler(ssharp); + image_infos.emplace_back(vk_sampler, VK_NULL_HANDLE, vk::ImageLayout::eGeneral); + set_writes.push_back({ + .dstSet = VK_NULL_HANDLE, + .dstBinding = binding.unified++, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eSampler, + .pImageInfo = &image_infos.back(), + }); + } +} + void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& state) { int cb_index = 0; for (auto& [image_id, desc] : cb_descs) { if (auto& old_img = texture_cache.GetImage(image_id); old_img.binding.needs_rebind) { auto& view = texture_cache.FindRenderTarget(desc); ASSERT(view.image_id != image_id); - image_id = view.image_id; + image_id = bound_images.emplace_back(view.image_id); auto& image = texture_cache.GetImage(view.image_id); state.color_attachments[cb_index].imageView = *view.image_view; state.color_attachments[cb_index].imageLayout = image.last_state.layout; @@ -367,7 +672,6 @@ void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& s state.height = std::min(state.height, std::max(image.info.size.height >> mip, 1u)); ASSERT(old_img.info.size.width == state.width); ASSERT(old_img.info.size.height == state.height); - old_img.binding.Reset(); } auto& image = texture_cache.GetImage(image_id); if (image.binding.force_general) { @@ -383,7 +687,6 @@ void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& s } image.usage.render_target = 1u; state.color_attachments[cb_index].imageLayout = image.last_state.layout; - image.binding.Reset(); ++cb_index; } @@ -416,7 +719,6 @@ void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& s state.depth_attachment.imageLayout = image.last_state.layout; image.usage.depth_target = true; image.usage.stencil = has_stencil; - image.binding.Reset(); } scheduler.BeginRendering(state); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index a83a37abf..5102cda38 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -65,6 +65,21 @@ private: bool FilterDraw(); + void BindBuffers(const Shader::Info& stage, Shader::Backend::Bindings& binding, + Shader::PushData& push_data, Pipeline::DescriptorWrites& set_writes, + Pipeline::BufferBarriers& buffer_barriers); + + void BindTextures(const Shader::Info& stage, Shader::Backend::Bindings& binding, + Pipeline::DescriptorWrites& set_writes); + + bool BindResources(const Pipeline* pipeline); + void ResetBindings() { + for (auto& image_id : bound_images) { + texture_cache.GetImage(image_id).binding.Reset(); + } + bound_images.clear(); + } + private: const Instance& instance; Scheduler& scheduler; @@ -79,6 +94,20 @@ private: std::pair, 8> cb_descs; std::optional> db_desc; + boost::container::static_vector image_infos; + boost::container::static_vector buffer_views; + boost::container::static_vector buffer_infos; + boost::container::static_vector bound_images; + + Pipeline::DescriptorWrites set_writes; + Pipeline::BufferBarriers buffer_barriers; + + using BufferBindingInfo = std::pair; + boost::container::static_vector buffer_bindings; + using TexBufferBindingInfo = std::pair; + boost::container::static_vector texbuffer_bindings; + using ImageBindingInfo = std::pair; + boost::container::static_vector image_bindings; }; } // namespace Vulkan diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 3527964ba..8ac603f06 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -71,8 +71,7 @@ public: struct RenderTargetDesc : public BaseDesc { RenderTargetDesc(const AmdGpu::Liverpool::ColorBuffer& buffer, const AmdGpu::Liverpool::CbDbExtent& hint = {}) - : BaseDesc{BindingType::RenderTarget, ImageInfo{buffer, hint}, - ImageViewInfo{buffer, false}} {} + : BaseDesc{BindingType::RenderTarget, ImageInfo{buffer, hint}, ImageViewInfo{buffer}} {} }; struct DepthTargetDesc : public BaseDesc {