mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-27 20:44:28 +00:00
clang-format
This commit is contained in:
parent
31df795701
commit
d89d937501
@ -168,8 +168,7 @@ Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) {
|
||||
const Id base_sift = ctx.OpShiftLeftLogical(ctx.U64, base_hi, ctx.ConstU32(32u));
|
||||
const Id base = ctx.OpBitwiseOr(ctx.U64, base_lo, base_sift);
|
||||
const Id address = ctx.OpIAdd(ctx.U64, base, ctx.OpUConvert(ctx.U64, offset));
|
||||
return ctx.EmitMemoryAccess(
|
||||
ctx.U32[1], address, [&]() {
|
||||
return ctx.EmitMemoryAccess(ctx.U32[1], address, [&]() {
|
||||
const u32 flatbuf_off_dw = inst->Flags<u32>();
|
||||
if (flatbuf_off_dw == 0) {
|
||||
return ctx.u32_zero_value;
|
||||
@ -177,8 +176,8 @@ Id EmitReadConst(EmitContext& ctx, IR::Inst* inst, Id addr, Id offset) {
|
||||
const auto& srt_flatbuf = ctx.buffers[ctx.flatbuf_index];
|
||||
ASSERT(srt_flatbuf.binding >= 0 > 0 && srt_flatbuf.buffer_type == BufferType::Flatbuf);
|
||||
const auto [id, pointer_type] = srt_flatbuf[PointerType::U32];
|
||||
const Id ptr{
|
||||
ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, ctx.ConstU32(flatbuf_off_dw))};
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value,
|
||||
ctx.ConstU32(flatbuf_off_dw))};
|
||||
return ctx.OpLoad(ctx.U32[1], ptr);
|
||||
}
|
||||
});
|
||||
|
@ -167,32 +167,40 @@ void EmitContext::DefineArithmeticTypes() {
|
||||
}
|
||||
|
||||
if (True(info.dma_types & IR::Type::F64)) {
|
||||
physical_pointer_types[PointerType::F64] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, F64[1]);
|
||||
physical_pointer_types[PointerType::F64] =
|
||||
TypePointer(spv::StorageClass::PhysicalStorageBuffer, F64[1]);
|
||||
}
|
||||
if (True(info.dma_types & IR::Type::U64)) {
|
||||
physical_pointer_types[PointerType::U64] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, U64);
|
||||
physical_pointer_types[PointerType::U64] =
|
||||
TypePointer(spv::StorageClass::PhysicalStorageBuffer, U64);
|
||||
}
|
||||
if (True(info.dma_types & IR::Type::F32)) {
|
||||
physical_pointer_types[PointerType::F32] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, F32[1]);
|
||||
physical_pointer_types[PointerType::F32] =
|
||||
TypePointer(spv::StorageClass::PhysicalStorageBuffer, F32[1]);
|
||||
}
|
||||
if (True(info.dma_types & IR::Type::U32)) {
|
||||
physical_pointer_types[PointerType::U32] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, U32[1]);
|
||||
physical_pointer_types[PointerType::U32] =
|
||||
TypePointer(spv::StorageClass::PhysicalStorageBuffer, U32[1]);
|
||||
}
|
||||
if (True(info.dma_types & IR::Type::F16)) {
|
||||
physical_pointer_types[PointerType::F16] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, F16[1]);
|
||||
physical_pointer_types[PointerType::F16] =
|
||||
TypePointer(spv::StorageClass::PhysicalStorageBuffer, F16[1]);
|
||||
}
|
||||
if (True(info.dma_types & IR::Type::U16)) {
|
||||
physical_pointer_types[PointerType::U16] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, U16);
|
||||
physical_pointer_types[PointerType::U16] =
|
||||
TypePointer(spv::StorageClass::PhysicalStorageBuffer, U16);
|
||||
}
|
||||
if (True(info.dma_types & IR::Type::U8)) {
|
||||
physical_pointer_types[PointerType::U8] = TypePointer(spv::StorageClass::PhysicalStorageBuffer, U8);
|
||||
physical_pointer_types[PointerType::U8] =
|
||||
TypePointer(spv::StorageClass::PhysicalStorageBuffer, U8);
|
||||
}
|
||||
|
||||
if (info.dma_types != IR::Type::Void) {
|
||||
constexpr u64 host_access_mask = 0x1UL;
|
||||
constexpr u64 host_access_inv_mask = ~host_access_mask;
|
||||
|
||||
caching_pagebits_value = Constant(U64, static_cast<u64>(VideoCore::BufferCache::CACHING_PAGEBITS));
|
||||
caching_pagebits_value =
|
||||
Constant(U64, static_cast<u64>(VideoCore::BufferCache::CACHING_PAGEBITS));
|
||||
caching_pagemask_value = Constant(U64, VideoCore::BufferCache::CACHING_PAGESIZE - 1);
|
||||
host_access_mask_value = Constant(U64, host_access_mask);
|
||||
host_access_inv_mask_value = Constant(U64, host_access_inv_mask);
|
||||
|
@ -37,7 +37,6 @@ struct VectorIds {
|
||||
|
||||
class EmitContext final : public Sirit::Module {
|
||||
public:
|
||||
|
||||
explicit EmitContext(const Profile& profile, const RuntimeInfo& runtime_info, Info& info,
|
||||
Bindings& binding);
|
||||
~EmitContext();
|
||||
@ -156,13 +155,20 @@ public:
|
||||
}
|
||||
|
||||
PointerType PointerTypeFromType(Id type) {
|
||||
if (type.value == U8.value) return PointerType::U8;
|
||||
if (type.value == U16.value) return PointerType::U16;
|
||||
if (type.value == F16[1].value) return PointerType::F16;
|
||||
if (type.value == U32[1].value) return PointerType::U32;
|
||||
if (type.value == F32[1].value) return PointerType::F32;
|
||||
if (type.value == U64.value) return PointerType::U64;
|
||||
if (type.value == F64[1].value) return PointerType::F64;
|
||||
if (type.value == U8.value)
|
||||
return PointerType::U8;
|
||||
if (type.value == U16.value)
|
||||
return PointerType::U16;
|
||||
if (type.value == F16[1].value)
|
||||
return PointerType::F16;
|
||||
if (type.value == U32[1].value)
|
||||
return PointerType::U32;
|
||||
if (type.value == F32[1].value)
|
||||
return PointerType::F32;
|
||||
if (type.value == U64.value)
|
||||
return PointerType::U64;
|
||||
if (type.value == F64[1].value)
|
||||
return PointerType::F64;
|
||||
UNREACHABLE_MSG("Unknown type for pointer");
|
||||
}
|
||||
|
||||
@ -184,7 +190,8 @@ public:
|
||||
|
||||
// Check if it's a host memory access
|
||||
const Id bda_and_host_access_mask = OpBitwiseAnd(U64, bda, host_access_mask_value);
|
||||
const Id bda_host_access = OpINotEqual(U1[1], bda_and_host_access_mask, host_access_mask_value);
|
||||
const Id bda_host_access =
|
||||
OpINotEqual(U1[1], bda_and_host_access_mask, host_access_mask_value);
|
||||
OpSelectionMerge(after_host_access_label, spv::SelectionControlMask::MaskNone);
|
||||
OpBranchConditional(bda_host_access, host_access_label, after_host_access_label);
|
||||
|
||||
@ -195,8 +202,8 @@ public:
|
||||
const Id page_div8 = OpShiftRightLogical(U32[1], page32, u32_three_value);
|
||||
const Id page_mod8 = OpBitwiseAnd(U32[1], page32, u32_seven_value);
|
||||
const Id page_mask = OpShiftLeftLogical(U32[1], u32_one_value, page_mod8);
|
||||
const Id fault_ptr = OpAccessChain(fault_pointer_type, fault_buffer_id, u32_zero_value,
|
||||
page_div8);
|
||||
const Id fault_ptr =
|
||||
OpAccessChain(fault_pointer_type, fault_buffer_id, u32_zero_value, page_div8);
|
||||
const Id fault_value = OpLoad(U8, fault_ptr);
|
||||
const Id page_mask8 = OpUConvert(U8, page_mask);
|
||||
const Id fault_value_masked = OpBitwiseOr(U8, fault_value, page_mask8);
|
||||
@ -227,7 +234,8 @@ public:
|
||||
|
||||
// Merge
|
||||
AddLabel(merge_label);
|
||||
const Id final_result = OpPhi(type, fallback_result, fallback_label, result, available_label);
|
||||
const Id final_result =
|
||||
OpPhi(type, fallback_result, fallback_label, result, available_label);
|
||||
return final_result;
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,8 @@ UniqueBuffer::~UniqueBuffer() {
|
||||
void UniqueBuffer::Create(const vk::BufferCreateInfo& buffer_ci, MemoryUsage usage,
|
||||
VmaAllocationInfo* out_alloc_info) {
|
||||
const bool with_bda = bool(buffer_ci.usage & vk::BufferUsageFlagBits::eShaderDeviceAddress);
|
||||
const VmaAllocationCreateFlags bda_flag = with_bda ? VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT : 0;
|
||||
const VmaAllocationCreateFlags bda_flag =
|
||||
with_bda ? VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT : 0;
|
||||
const VmaAllocationCreateInfo alloc_ci = {
|
||||
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | bda_flag | MemoryUsageVmaFlags(usage),
|
||||
.usage = MemoryUsageVma(usage),
|
||||
@ -145,7 +146,8 @@ ImportedHostBuffer::ImportedHostBuffer(const Vulkan::Instance& instance_,
|
||||
u32 memory_type_index = UINT32_MAX;
|
||||
for (u32 i = 0; i < mem_props.memoryTypeCount; ++i) {
|
||||
if ((ptr_props.memoryTypeBits & (1 << i)) != 0) {
|
||||
if (mem_props.memoryTypes[i].propertyFlags & (vk::MemoryPropertyFlagBits::eHostVisible |
|
||||
if (mem_props.memoryTypes[i].propertyFlags &
|
||||
(vk::MemoryPropertyFlagBits::eHostVisible |
|
||||
vk::MemoryPropertyFlagBits::eHostCoherent)) {
|
||||
memory_type_index = i;
|
||||
// We prefer cache coherent memory types.
|
||||
@ -191,8 +193,7 @@ ImportedHostBuffer::ImportedHostBuffer(const Vulkan::Instance& instance_,
|
||||
auto device_memory_result = instance->GetDevice().allocateMemory(alloc_ci);
|
||||
if (device_memory_result.result != vk::Result::eSuccess) {
|
||||
// May fail to import the host memory if it is backed by a file. (AMD on Linux)
|
||||
LOG_WARNING(Render_Vulkan,
|
||||
"Failed to import host memory at {} size {:#x}, Reason: {}",
|
||||
LOG_WARNING(Render_Vulkan, "Failed to import host memory at {} size {:#x}, Reason: {}",
|
||||
cpu_addr, size_bytes, vk::to_string(device_memory_result.result));
|
||||
instance->GetDevice().destroyBuffer(buffer);
|
||||
buffer = VK_NULL_HANDLE;
|
||||
@ -202,8 +203,7 @@ ImportedHostBuffer::ImportedHostBuffer(const Vulkan::Instance& instance_,
|
||||
device_memory = device_memory_result.value;
|
||||
|
||||
auto result = instance->GetDevice().bindBufferMemory(buffer, device_memory, 0);
|
||||
ASSERT_MSG(result == vk::Result::eSuccess,
|
||||
"Failed binding imported host buffer with error {}",
|
||||
ASSERT_MSG(result == vk::Result::eSuccess, "Failed binding imported host buffer with error {}",
|
||||
vk::to_string(result));
|
||||
|
||||
if (with_bda) {
|
||||
|
@ -26,8 +26,10 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s
|
||||
staging_buffer{instance, scheduler, MemoryUsage::Upload, StagingBufferSize},
|
||||
stream_buffer{instance, scheduler, MemoryUsage::Stream, UboStreamBufferSize},
|
||||
gds_buffer{instance, scheduler, MemoryUsage::Stream, 0, AllFlags, DataShareBufferSize},
|
||||
bda_pagetable_buffer{instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, BDA_PAGETABLE_SIZE},
|
||||
fault_readback_buffer(instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags, FAULT_READBACK_SIZE),
|
||||
bda_pagetable_buffer{instance, scheduler, MemoryUsage::DeviceLocal,
|
||||
0, AllFlags, BDA_PAGETABLE_SIZE},
|
||||
fault_readback_buffer(instance, scheduler, MemoryUsage::DeviceLocal, 0, AllFlags,
|
||||
FAULT_READBACK_SIZE),
|
||||
memory_tracker{&tracker} {
|
||||
Vulkan::SetObjectName(instance.GetDevice(), gds_buffer.Handle(), "GDS Buffer");
|
||||
|
||||
@ -377,8 +379,8 @@ void BufferCache::ImportMemory(u64 start, u64 end) {
|
||||
// create a GPU local buffer.
|
||||
bda_addrs.push_back(bda_addr + (i << CACHING_PAGEBITS));
|
||||
}
|
||||
WriteDataBuffer(bda_pagetable_buffer, range_start * sizeof(vk::DeviceAddress), bda_addrs.data(),
|
||||
bda_addrs.size() * sizeof(vk::DeviceAddress));
|
||||
WriteDataBuffer(bda_pagetable_buffer, range_start * sizeof(vk::DeviceAddress),
|
||||
bda_addrs.data(), bda_addrs.size() * sizeof(vk::DeviceAddress));
|
||||
imported_buffers.emplace_back(std::move(buffer));
|
||||
// Mark the pages as covered
|
||||
imported_regions += range;
|
||||
@ -840,7 +842,8 @@ void BufferCache::SynchronizeRange(VAddr device_addr, u32 size) {
|
||||
});
|
||||
}
|
||||
|
||||
void BufferCache::InlineDataBuffer(Buffer& buffer, VAddr address, const void* value, u32 num_bytes) {
|
||||
void BufferCache::InlineDataBuffer(Buffer& buffer, VAddr address, const void* value,
|
||||
u32 num_bytes) {
|
||||
scheduler.EndRendering();
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
const vk::BufferMemoryBarrier2 pre_barrier = {
|
||||
@ -889,11 +892,8 @@ void BufferCache::WriteDataBuffer(Buffer& buffer, VAddr address, const void* val
|
||||
} else {
|
||||
// For large one time transfers use a temporary host buffer.
|
||||
// RenderDoc can lag quite a bit if the stream buffer is too large.
|
||||
Buffer temp_buffer{instance,
|
||||
scheduler,
|
||||
MemoryUsage::Upload,
|
||||
0,
|
||||
vk::BufferUsageFlagBits::eTransferSrc,
|
||||
Buffer temp_buffer{
|
||||
instance, scheduler, MemoryUsage::Upload, 0, vk::BufferUsageFlagBits::eTransferSrc,
|
||||
num_bytes};
|
||||
src_buffer = temp_buffer.Handle();
|
||||
u8* const staging = temp_buffer.mapped_data.data();
|
||||
|
Loading…
Reference in New Issue
Block a user