mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-27 12:34:37 +00:00
fixed clang format
This commit is contained in:
parent
60d3e3545d
commit
b1018901b6
@ -34,4 +34,4 @@ bool DecrementRecursiveLock(void* mutex, RecursiveLockType type) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common::Detail
|
||||||
|
@ -3,19 +3,15 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <optional>
|
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
#include <optional>
|
||||||
#include <shared_mutex>
|
#include <shared_mutex>
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
namespace Detail {
|
namespace Detail {
|
||||||
|
|
||||||
enum class RecursiveLockType {
|
enum class RecursiveLockType { None, Shared, Exclusive };
|
||||||
None,
|
|
||||||
Shared,
|
|
||||||
Exclusive
|
|
||||||
};
|
|
||||||
|
|
||||||
bool IncrementRecursiveLock(void* mutex, RecursiveLockType type);
|
bool IncrementRecursiveLock(void* mutex, RecursiveLockType type);
|
||||||
bool DecrementRecursiveLock(void* mutex, RecursiveLockType type);
|
bool DecrementRecursiveLock(void* mutex, RecursiveLockType type);
|
||||||
@ -38,6 +34,7 @@ public:
|
|||||||
m_lock.reset();
|
m_lock.reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MutexType& m_mutex;
|
MutexType& m_mutex;
|
||||||
std::optional<std::unique_lock<MutexType>> m_lock;
|
std::optional<std::unique_lock<MutexType>> m_lock;
|
||||||
@ -60,6 +57,7 @@ public:
|
|||||||
m_lock.reset();
|
m_lock.reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MutexType& m_mutex;
|
MutexType& m_mutex;
|
||||||
std::optional<std::shared_lock<MutexType>> m_lock;
|
std::optional<std::shared_lock<MutexType>> m_lock;
|
||||||
|
@ -35,14 +35,12 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s
|
|||||||
gds_buffer{instance, scheduler, MemoryUsage::Stream, 0, AllFlags, DataShareBufferSize},
|
gds_buffer{instance, scheduler, MemoryUsage::Stream, 0, AllFlags, DataShareBufferSize},
|
||||||
bda_pagetable_buffer{instance, scheduler, MemoryUsage::DeviceLocal,
|
bda_pagetable_buffer{instance, scheduler, MemoryUsage::DeviceLocal,
|
||||||
0, AllFlags, BDA_PAGETABLE_SIZE},
|
0, AllFlags, BDA_PAGETABLE_SIZE},
|
||||||
fault_buffer(instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags,
|
fault_buffer(instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, FAULT_BUFFER_SIZE),
|
||||||
FAULT_BUFFER_SIZE),
|
|
||||||
memory_tracker{&tracker} {
|
memory_tracker{&tracker} {
|
||||||
Vulkan::SetObjectName(instance.GetDevice(), gds_buffer.Handle(), "GDS Buffer");
|
Vulkan::SetObjectName(instance.GetDevice(), gds_buffer.Handle(), "GDS Buffer");
|
||||||
Vulkan::SetObjectName(instance.GetDevice(), bda_pagetable_buffer.Handle(),
|
Vulkan::SetObjectName(instance.GetDevice(), bda_pagetable_buffer.Handle(),
|
||||||
"BDA Page Table Buffer");
|
"BDA Page Table Buffer");
|
||||||
Vulkan::SetObjectName(instance.GetDevice(), fault_buffer.Handle(),
|
Vulkan::SetObjectName(instance.GetDevice(), fault_buffer.Handle(), "Fault Buffer");
|
||||||
"Fault Buffer");
|
|
||||||
|
|
||||||
// Ensure the first slot is used for the null buffer
|
// Ensure the first slot is used for the null buffer
|
||||||
const auto null_id =
|
const auto null_id =
|
||||||
@ -75,13 +73,11 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s
|
|||||||
auto [desc_layout_result, desc_layout] =
|
auto [desc_layout_result, desc_layout] =
|
||||||
instance.GetDevice().createDescriptorSetLayoutUnique(desc_layout_ci);
|
instance.GetDevice().createDescriptorSetLayoutUnique(desc_layout_ci);
|
||||||
ASSERT_MSG(desc_layout_result == vk::Result::eSuccess,
|
ASSERT_MSG(desc_layout_result == vk::Result::eSuccess,
|
||||||
"Failed to create descriptor set layout: {}",
|
"Failed to create descriptor set layout: {}", vk::to_string(desc_layout_result));
|
||||||
vk::to_string(desc_layout_result));
|
|
||||||
fault_process_desc_layout = std::move(desc_layout);
|
fault_process_desc_layout = std::move(desc_layout);
|
||||||
|
|
||||||
const auto& module = Vulkan::Compile(
|
const auto& module = Vulkan::Compile(HostShaders::FAULT_BUFFER_PROCESS_COMP,
|
||||||
HostShaders::FAULT_BUFFER_PROCESS_COMP, vk::ShaderStageFlagBits::eCompute,
|
vk::ShaderStageFlagBits::eCompute, instance.GetDevice());
|
||||||
instance.GetDevice());
|
|
||||||
Vulkan::SetObjectName(instance.GetDevice(), module, "Fault Buffer Parser");
|
Vulkan::SetObjectName(instance.GetDevice(), module, "Fault Buffer Parser");
|
||||||
|
|
||||||
const vk::SpecializationMapEntry specialization_map_entry = {
|
const vk::SpecializationMapEntry specialization_map_entry = {
|
||||||
@ -108,10 +104,8 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s
|
|||||||
.setLayoutCount = 1U,
|
.setLayoutCount = 1U,
|
||||||
.pSetLayouts = &(*fault_process_desc_layout),
|
.pSetLayouts = &(*fault_process_desc_layout),
|
||||||
};
|
};
|
||||||
auto [layout_result, layout] =
|
auto [layout_result, layout] = instance.GetDevice().createPipelineLayoutUnique(layout_info);
|
||||||
instance.GetDevice().createPipelineLayoutUnique(layout_info);
|
ASSERT_MSG(layout_result == vk::Result::eSuccess, "Failed to create pipeline layout: {}",
|
||||||
ASSERT_MSG(layout_result == vk::Result::eSuccess,
|
|
||||||
"Failed to create pipeline layout: {}",
|
|
||||||
vk::to_string(layout_result));
|
vk::to_string(layout_result));
|
||||||
fault_process_pipeline_layout = std::move(layout);
|
fault_process_pipeline_layout = std::move(layout);
|
||||||
|
|
||||||
@ -121,11 +115,11 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s
|
|||||||
};
|
};
|
||||||
auto [pipeline_result, pipeline] =
|
auto [pipeline_result, pipeline] =
|
||||||
instance.GetDevice().createComputePipelineUnique({}, pipeline_info);
|
instance.GetDevice().createComputePipelineUnique({}, pipeline_info);
|
||||||
ASSERT_MSG(pipeline_result == vk::Result::eSuccess,
|
ASSERT_MSG(pipeline_result == vk::Result::eSuccess, "Failed to create compute pipeline: {}",
|
||||||
"Failed to create compute pipeline: {}",
|
|
||||||
vk::to_string(pipeline_result));
|
vk::to_string(pipeline_result));
|
||||||
fault_process_pipeline = std::move(pipeline);
|
fault_process_pipeline = std::move(pipeline);
|
||||||
Vulkan::SetObjectName(instance.GetDevice(), *fault_process_pipeline, "Fault Buffer Parser Pipeline");
|
Vulkan::SetObjectName(instance.GetDevice(), *fault_process_pipeline,
|
||||||
|
"Fault Buffer Parser Pipeline");
|
||||||
|
|
||||||
instance.GetDevice().destroyShaderModule(module);
|
instance.GetDevice().destroyShaderModule(module);
|
||||||
}
|
}
|
||||||
@ -145,7 +139,8 @@ void BufferCache::InvalidateMemory(VAddr device_addr, u64 size, bool unmap) {
|
|||||||
{
|
{
|
||||||
std::scoped_lock lock(dma_sync_ranges_mutex);
|
std::scoped_lock lock(dma_sync_ranges_mutex);
|
||||||
const VAddr aligned_addr = Common::AlignDown(device_addr, CACHING_PAGESIZE);
|
const VAddr aligned_addr = Common::AlignDown(device_addr, CACHING_PAGESIZE);
|
||||||
const u64 aligned_size = Common::AlignUp(device_addr + size, CACHING_PAGESIZE) - aligned_addr;
|
const u64 aligned_size =
|
||||||
|
Common::AlignUp(device_addr + size, CACHING_PAGESIZE) - aligned_addr;
|
||||||
dma_sync_ranges.Add(device_addr, size);
|
dma_sync_ranges.Add(device_addr, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -640,7 +635,8 @@ void BufferCache::ProcessFaultBuffer() {
|
|||||||
.pBufferMemoryBarriers = barriers.data(),
|
.pBufferMemoryBarriers = barriers.data(),
|
||||||
});
|
});
|
||||||
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, *fault_process_pipeline);
|
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, *fault_process_pipeline);
|
||||||
cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eCompute, *fault_process_pipeline_layout, 0, writes);
|
cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eCompute, *fault_process_pipeline_layout, 0,
|
||||||
|
writes);
|
||||||
constexpr u32 num_threads = CACHING_NUMPAGES / 32; // 1 bit per page, 32 pages per workgroup
|
constexpr u32 num_threads = CACHING_NUMPAGES / 32; // 1 bit per page, 32 pages per workgroup
|
||||||
constexpr u32 num_workgroups = Common::DivCeil(num_threads, 64u);
|
constexpr u32 num_workgroups = Common::DivCeil(num_threads, 64u);
|
||||||
cmdbuf.dispatch(num_workgroups, 1, 1);
|
cmdbuf.dispatch(num_workgroups, 1, 1);
|
||||||
@ -700,7 +696,7 @@ void BufferCache::ProcessFaultBuffer() {
|
|||||||
}
|
}
|
||||||
// Buffer size is in 32 bits
|
// Buffer size is in 32 bits
|
||||||
ASSERT_MSG((range.upper() - range.lower()) <= std::numeric_limits<u32>::max(),
|
ASSERT_MSG((range.upper() - range.lower()) <= std::numeric_limits<u32>::max(),
|
||||||
"Buffer size is too large");
|
"Buffer size is too large");
|
||||||
// Only create a buffer is the current range doesn't fit in an existing one
|
// Only create a buffer is the current range doesn't fit in an existing one
|
||||||
FindBuffer(start, static_cast<u32>(end - start));
|
FindBuffer(start, static_cast<u32>(end - start));
|
||||||
}
|
}
|
||||||
|
@ -148,10 +148,11 @@ public:
|
|||||||
private:
|
private:
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
void ForEachBufferInRange(VAddr device_addr, u64 size, Func&& func) {
|
void ForEachBufferInRange(VAddr device_addr, u64 size, Func&& func) {
|
||||||
buffer_ranges.ForEachInRange(device_addr, size, [&](u64 page_start, u64 page_end, BufferId id) {
|
buffer_ranges.ForEachInRange(device_addr, size,
|
||||||
Buffer& buffer = slot_buffers[id];
|
[&](u64 page_start, u64 page_end, BufferId id) {
|
||||||
func(id, buffer);
|
Buffer& buffer = slot_buffers[id];
|
||||||
});
|
func(id, buffer);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void DownloadBufferMemory(Buffer& buffer, VAddr device_addr, u64 size);
|
void DownloadBufferMemory(Buffer& buffer, VAddr device_addr, u64 size);
|
||||||
|
@ -3,10 +3,10 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <boost/icl/discrete_interval.hpp>
|
||||||
#include <boost/icl/interval_map.hpp>
|
#include <boost/icl/interval_map.hpp>
|
||||||
#include <boost/icl/split_interval_map.hpp>
|
#include <boost/icl/split_interval_map.hpp>
|
||||||
#include <boost/icl/split_interval_set.hpp>
|
#include <boost/icl/split_interval_set.hpp>
|
||||||
#include <boost/icl/discrete_interval.hpp>
|
|
||||||
#include <boost/pool/pool.hpp>
|
#include <boost/pool/pool.hpp>
|
||||||
#include <boost/pool/pool_alloc.hpp>
|
#include <boost/pool/pool_alloc.hpp>
|
||||||
#include <boost/pool/poolfwd.hpp>
|
#include <boost/pool/poolfwd.hpp>
|
||||||
@ -221,11 +221,10 @@ private:
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
class SplitRangeMap {
|
class SplitRangeMap {
|
||||||
public:
|
public:
|
||||||
using IntervalMap =
|
using IntervalMap = boost::icl::split_interval_map<
|
||||||
boost::icl::split_interval_map<VAddr, T, boost::icl::total_absorber, std::less,
|
VAddr, T, boost::icl::total_absorber, std::less, boost::icl::inplace_identity,
|
||||||
boost::icl::inplace_identity, boost::icl::inter_section,
|
boost::icl::inter_section, ICL_INTERVAL_INSTANCE(ICL_INTERVAL_DEFAULT, VAddr, std::less),
|
||||||
ICL_INTERVAL_INSTANCE(ICL_INTERVAL_DEFAULT, VAddr, std::less),
|
RangeSetsAllocator>;
|
||||||
RangeSetsAllocator>;
|
|
||||||
using IntervalType = typename IntervalMap::interval_type;
|
using IntervalType = typename IntervalMap::interval_type;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
Loading…
Reference in New Issue
Block a user