libkernel: Reduce log spam (#3569)

* Fix address formatting for invalid address asserts

* Reduce event flag message to trace

This log effectively contributes nothing to debugging.

* Remove excess logging in sceKernelBatchMap

Every one of these opcodes will just log from their individual function calls anyway, and if there were issues with batch map logic, we would've known for a while now.

* Log error return during sceKernelBatchMap

May help with debugging some unstable UE games?
This commit is contained in:
Stephen Miller
2025-09-10 19:38:56 -05:00
committed by GitHub
parent 4b0069b296
commit 99d0f85739
3 changed files with 12 additions and 26 deletions

View File

@@ -344,39 +344,24 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, s32 numEn
result = sceKernelMapNamedDirectMemory(&entries[i].start, entries[i].length, result = sceKernelMapNamedDirectMemory(&entries[i].start, entries[i].length,
entries[i].protection, flags, entries[i].protection, flags,
static_cast<s64>(entries[i].offset), 0, "anon"); static_cast<s64>(entries[i].offset), 0, "anon");
LOG_INFO(Kernel_Vmm,
"entry = {}, operation = {}, len = {:#x}, offset = {:#x}, type = {}, "
"result = {}",
i, entries[i].operation, entries[i].length, entries[i].offset,
(u8)entries[i].type, result);
break; break;
} }
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_UNMAP: { case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_UNMAP: {
result = sceKernelMunmap(entries[i].start, entries[i].length); result = sceKernelMunmap(entries[i].start, entries[i].length);
LOG_INFO(Kernel_Vmm, "entry = {}, operation = {}, len = {:#x}, result = {}", i,
entries[i].operation, entries[i].length, result);
break; break;
} }
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_PROTECT: { case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_PROTECT: {
result = sceKernelMprotect(entries[i].start, entries[i].length, entries[i].protection); result = sceKernelMprotect(entries[i].start, entries[i].length, entries[i].protection);
LOG_INFO(Kernel_Vmm, "entry = {}, operation = {}, len = {:#x}, result = {}", i,
entries[i].operation, entries[i].length, result);
break; break;
} }
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_MAP_FLEXIBLE: { case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_MAP_FLEXIBLE: {
result = sceKernelMapNamedFlexibleMemory(&entries[i].start, entries[i].length, result = sceKernelMapNamedFlexibleMemory(&entries[i].start, entries[i].length,
entries[i].protection, flags, "anon"); entries[i].protection, flags, "anon");
LOG_INFO(Kernel_Vmm,
"entry = {}, operation = {}, len = {:#x}, type = {}, "
"result = {}",
i, entries[i].operation, entries[i].length, (u8)entries[i].type, result);
break; break;
} }
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_TYPE_PROTECT: { case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_TYPE_PROTECT: {
result = sceKernelMtypeprotect(entries[i].start, entries[i].length, entries[i].type, result = sceKernelMtypeprotect(entries[i].start, entries[i].length, entries[i].type,
entries[i].protection); entries[i].protection);
LOG_INFO(Kernel_Vmm, "entry = {}, operation = {}, len = {:#x}, result = {}", i,
entries[i].operation, entries[i].length, result);
break; break;
} }
default: { default: {
@@ -385,6 +370,7 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, s32 numEn
} }
if (result != ORBIS_OK) { if (result != ORBIS_OK) {
LOG_ERROR(Kernel_Vmm, "failed with error code {:#x}", result);
break; break;
} }
} }

View File

@@ -182,8 +182,8 @@ struct OrbisKernelEventFlagOptParam {
int PS4_SYSV_ABI sceKernelCreateEventFlag(OrbisKernelEventFlag* ef, const char* pName, u32 attr, int PS4_SYSV_ABI sceKernelCreateEventFlag(OrbisKernelEventFlag* ef, const char* pName, u32 attr,
u64 initPattern, u64 initPattern,
const OrbisKernelEventFlagOptParam* pOptParam) { const OrbisKernelEventFlagOptParam* pOptParam) {
LOG_INFO(Kernel_Event, "called name = {} attr = {:#x} initPattern = {:#x}", pName, attr, LOG_TRACE(Kernel_Event, "called name = {} attr = {:#x} initPattern = {:#x}", pName, attr,
initPattern); initPattern);
if (ef == nullptr || pName == nullptr) { if (ef == nullptr || pName == nullptr) {
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }

View File

@@ -259,7 +259,7 @@ void MemoryManager::Free(PAddr phys_addr, u64 size) {
s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot) { s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot) {
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)),
"Attempted to access invalid address {}", virtual_addr); "Attempted to access invalid address {:#x}", virtual_addr);
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
const u64 alignment = 64_KB; const u64 alignment = 64_KB;
@@ -352,7 +352,7 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
// On a PS4, the Fixed flag is ignored if address 0 is provided. // On a PS4, the Fixed flag is ignored if address 0 is provided.
if (True(flags & MemoryMapFlags::Fixed) && virtual_addr != 0) { if (True(flags & MemoryMapFlags::Fixed) && virtual_addr != 0) {
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(mapped_addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(mapped_addr)),
"Attempted to access invalid address {}", mapped_addr); "Attempted to access invalid address {:#x}", mapped_addr);
auto vma = FindVMA(mapped_addr)->second; auto vma = FindVMA(mapped_addr)->second;
// There's a possible edge case where we're mapping to a partially reserved range. // There's a possible edge case where we're mapping to a partially reserved range.
// To account for this, unmap any reserved areas within this mapping range first. // To account for this, unmap any reserved areas within this mapping range first.
@@ -432,7 +432,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr; VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr;
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(mapped_addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(mapped_addr)),
"Attempted to access invalid address {}", mapped_addr); "Attempted to access invalid address {:#x}", mapped_addr);
const u64 size_aligned = Common::AlignUp(size, 16_KB); const u64 size_aligned = Common::AlignUp(size, 16_KB);
// Find first free area to map the file. // Find first free area to map the file.
@@ -480,7 +480,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) { s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)),
"Attempted to access invalid address {}", virtual_addr); "Attempted to access invalid address {:#x}", virtual_addr);
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
const auto it = FindVMA(virtual_addr); const auto it = FindVMA(virtual_addr);
@@ -586,7 +586,7 @@ s32 MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, u64 size) {
size = Common::AlignUp(size, 16_KB); size = Common::AlignUp(size, 16_KB);
do { do {
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(virtual_addr)),
"Attempted to access invalid address {}", virtual_addr); "Attempted to access invalid address {:#x}", virtual_addr);
auto it = FindVMA(virtual_addr + unmapped_bytes); auto it = FindVMA(virtual_addr + unmapped_bytes);
auto& vma_base = it->second; auto& vma_base = it->second;
auto unmapped = auto unmapped =
@@ -600,7 +600,7 @@ s32 MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, u64 size) {
s32 MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) { s32 MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) {
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(addr)),
"Attempted to access invalid address {}", addr); "Attempted to access invalid address {:#x}", addr);
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
const auto it = FindVMA(addr); const auto it = FindVMA(addr);
@@ -684,7 +684,7 @@ s32 MemoryManager::Protect(VAddr addr, u64 size, MemoryProt prot) {
s64 protected_bytes = 0; s64 protected_bytes = 0;
while (protected_bytes < aligned_size) { while (protected_bytes < aligned_size) {
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(aligned_addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(aligned_addr)),
"Attempted to access invalid address {}", aligned_addr); "Attempted to access invalid address {:#x}", aligned_addr);
auto it = FindVMA(aligned_addr + protected_bytes); auto it = FindVMA(aligned_addr + protected_bytes);
auto& vma_base = it->second; auto& vma_base = it->second;
auto result = ProtectBytes(aligned_addr + protected_bytes, vma_base, auto result = ProtectBytes(aligned_addr + protected_bytes, vma_base,
@@ -832,7 +832,7 @@ void MemoryManager::NameVirtualRange(VAddr virtual_addr, u64 size, std::string_v
auto aligned_addr = Common::AlignDown(virtual_addr, 16_KB); auto aligned_addr = Common::AlignDown(virtual_addr, 16_KB);
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(aligned_addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(aligned_addr)),
"Attempted to access invalid address {}", aligned_addr); "Attempted to access invalid address {:#x}", aligned_addr);
auto it = FindVMA(aligned_addr); auto it = FindVMA(aligned_addr);
s64 remaining_size = aligned_size; s64 remaining_size = aligned_size;
auto current_addr = aligned_addr; auto current_addr = aligned_addr;
@@ -874,7 +874,7 @@ s32 MemoryManager::GetDirectMemoryType(PAddr addr, s32* directMemoryTypeOut,
s32 MemoryManager::IsStack(VAddr addr, void** start, void** end) { s32 MemoryManager::IsStack(VAddr addr, void** start, void** end) {
ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(addr)), ASSERT_MSG(IsValidAddress(reinterpret_cast<void*>(addr)),
"Attempted to access invalid address {}", addr); "Attempted to access invalid address {:#x}", addr);
auto vma_handle = FindVMA(addr); auto vma_handle = FindVMA(addr);
const VirtualMemoryArea& vma = vma_handle->second; const VirtualMemoryArea& vma = vma_handle->second;