mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-12-08 20:58:41 +00:00
libkernel: Reduce log spam (#3569)
* Fix address formatting for invalid address asserts * Reduce event flag message to trace This log effectively contributes nothing to debugging. * Remove excess logging in sceKernelBatchMap Every one of these opcodes will just log from their individual function calls anyway, and if there were issues with batch map logic, we would've known for a while now. * Log error return during sceKernelBatchMap May help with debugging some unstable UE games?
This commit is contained in:
@@ -344,39 +344,24 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, s32 numEn
|
||||
result = sceKernelMapNamedDirectMemory(&entries[i].start, entries[i].length,
|
||||
entries[i].protection, flags,
|
||||
static_cast<s64>(entries[i].offset), 0, "anon");
|
||||
LOG_INFO(Kernel_Vmm,
|
||||
"entry = {}, operation = {}, len = {:#x}, offset = {:#x}, type = {}, "
|
||||
"result = {}",
|
||||
i, entries[i].operation, entries[i].length, entries[i].offset,
|
||||
(u8)entries[i].type, result);
|
||||
break;
|
||||
}
|
||||
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_UNMAP: {
|
||||
result = sceKernelMunmap(entries[i].start, entries[i].length);
|
||||
LOG_INFO(Kernel_Vmm, "entry = {}, operation = {}, len = {:#x}, result = {}", i,
|
||||
entries[i].operation, entries[i].length, result);
|
||||
break;
|
||||
}
|
||||
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_PROTECT: {
|
||||
result = sceKernelMprotect(entries[i].start, entries[i].length, entries[i].protection);
|
||||
LOG_INFO(Kernel_Vmm, "entry = {}, operation = {}, len = {:#x}, result = {}", i,
|
||||
entries[i].operation, entries[i].length, result);
|
||||
break;
|
||||
}
|
||||
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_MAP_FLEXIBLE: {
|
||||
result = sceKernelMapNamedFlexibleMemory(&entries[i].start, entries[i].length,
|
||||
entries[i].protection, flags, "anon");
|
||||
LOG_INFO(Kernel_Vmm,
|
||||
"entry = {}, operation = {}, len = {:#x}, type = {}, "
|
||||
"result = {}",
|
||||
i, entries[i].operation, entries[i].length, (u8)entries[i].type, result);
|
||||
break;
|
||||
}
|
||||
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_TYPE_PROTECT: {
|
||||
result = sceKernelMtypeprotect(entries[i].start, entries[i].length, entries[i].type,
|
||||
entries[i].protection);
|
||||
LOG_INFO(Kernel_Vmm, "entry = {}, operation = {}, len = {:#x}, result = {}", i,
|
||||
entries[i].operation, entries[i].length, result);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
@@ -385,6 +370,7 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, s32 numEn
|
||||
}
|
||||
|
||||
if (result != ORBIS_OK) {
|
||||
LOG_ERROR(Kernel_Vmm, "failed with error code {:#x}", result);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,8 +182,8 @@ struct OrbisKernelEventFlagOptParam {
|
||||
int PS4_SYSV_ABI sceKernelCreateEventFlag(OrbisKernelEventFlag* ef, const char* pName, u32 attr,
|
||||
u64 initPattern,
|
||||
const OrbisKernelEventFlagOptParam* pOptParam) {
|
||||
LOG_INFO(Kernel_Event, "called name = {} attr = {:#x} initPattern = {:#x}", pName, attr,
|
||||
initPattern);
|
||||
LOG_TRACE(Kernel_Event, "called name = {} attr = {:#x} initPattern = {:#x}", pName, attr,
|
||||
initPattern);
|
||||
if (ef == nullptr || pName == nullptr) {
|
||||
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ void MemoryManager::Free(PAddr phys_addr, u64 size) {
|
||||
|
||||
s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot) {
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)),
|
||||
"Attempted to access invalid address {}", virtual_addr);
|
||||
"Attempted to access invalid address {:#x}", virtual_addr);
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
const u64 alignment = 64_KB;
|
||||
@@ -352,7 +352,7 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
|
||||
// On a PS4, the Fixed flag is ignored if address 0 is provided.
|
||||
if (True(flags & MemoryMapFlags::Fixed) && virtual_addr != 0) {
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(mapped_addr)),
|
||||
"Attempted to access invalid address {}", mapped_addr);
|
||||
"Attempted to access invalid address {:#x}", mapped_addr);
|
||||
auto vma = FindVMA(mapped_addr)->second;
|
||||
// There's a possible edge case where we're mapping to a partially reserved range.
|
||||
// To account for this, unmap any reserved areas within this mapping range first.
|
||||
@@ -432,7 +432,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
|
||||
|
||||
VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr;
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(mapped_addr)),
|
||||
"Attempted to access invalid address {}", mapped_addr);
|
||||
"Attempted to access invalid address {:#x}", mapped_addr);
|
||||
const u64 size_aligned = Common::AlignUp(size, 16_KB);
|
||||
|
||||
// Find first free area to map the file.
|
||||
@@ -480,7 +480,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
|
||||
|
||||
s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)),
|
||||
"Attempted to access invalid address {}", virtual_addr);
|
||||
"Attempted to access invalid address {:#x}", virtual_addr);
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
const auto it = FindVMA(virtual_addr);
|
||||
@@ -586,7 +586,7 @@ s32 MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, u64 size) {
|
||||
size = Common::AlignUp(size, 16_KB);
|
||||
do {
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)),
|
||||
"Attempted to access invalid address {}", virtual_addr);
|
||||
"Attempted to access invalid address {:#x}", virtual_addr);
|
||||
auto it = FindVMA(virtual_addr + unmapped_bytes);
|
||||
auto& vma_base = it->second;
|
||||
auto unmapped =
|
||||
@@ -600,7 +600,7 @@ s32 MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, u64 size) {
|
||||
|
||||
s32 MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) {
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(addr)),
|
||||
"Attempted to access invalid address {}", addr);
|
||||
"Attempted to access invalid address {:#x}", addr);
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
const auto it = FindVMA(addr);
|
||||
@@ -684,7 +684,7 @@ s32 MemoryManager::Protect(VAddr addr, u64 size, MemoryProt prot) {
|
||||
s64 protected_bytes = 0;
|
||||
while (protected_bytes < aligned_size) {
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(aligned_addr)),
|
||||
"Attempted to access invalid address {}", aligned_addr);
|
||||
"Attempted to access invalid address {:#x}", aligned_addr);
|
||||
auto it = FindVMA(aligned_addr + protected_bytes);
|
||||
auto& vma_base = it->second;
|
||||
auto result = ProtectBytes(aligned_addr + protected_bytes, vma_base,
|
||||
@@ -832,7 +832,7 @@ void MemoryManager::NameVirtualRange(VAddr virtual_addr, u64 size, std::string_v
|
||||
auto aligned_addr = Common::AlignDown(virtual_addr, 16_KB);
|
||||
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(aligned_addr)),
|
||||
"Attempted to access invalid address {}", aligned_addr);
|
||||
"Attempted to access invalid address {:#x}", aligned_addr);
|
||||
auto it = FindVMA(aligned_addr);
|
||||
s64 remaining_size = aligned_size;
|
||||
auto current_addr = aligned_addr;
|
||||
@@ -874,7 +874,7 @@ s32 MemoryManager::GetDirectMemoryType(PAddr addr, s32* directMemoryTypeOut,
|
||||
|
||||
s32 MemoryManager::IsStack(VAddr addr, void** start, void** end) {
|
||||
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(addr)),
|
||||
"Attempted to access invalid address {}", addr);
|
||||
"Attempted to access invalid address {:#x}", addr);
|
||||
auto vma_handle = FindVMA(addr);
|
||||
|
||||
const VirtualMemoryArea& vma = vma_handle->second;
|
||||
|
||||
Reference in New Issue
Block a user