Update sceKernelMemoryPoolExpand

Hardware tests show that this function is basically the same as sceKernelAllocateDirectMemory, with some minor differences.
Update the memory searching code to match my updated AllocateDirectMemory code, with appropriate error conditions.
This commit is contained in:
Stephen Miller 2025-05-09 16:07:10 -05:00
parent 6477dc4f1e
commit 4f1a8c57fc
2 changed files with 39 additions and 20 deletions

View File

@ -383,13 +383,12 @@ s32 PS4_SYSV_ABI sceKernelMemoryPoolExpand(u64 searchStart, u64 searchEnd, size_
LOG_ERROR(Kernel_Vmm, "Provided address range is invalid!"); LOG_ERROR(Kernel_Vmm, "Provided address range is invalid!");
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }
const bool is_in_range = searchEnd - searchStart >= len; if (len <= 0 || !Common::Is64KBAligned(len)) {
if (len <= 0 || !Common::Is64KBAligned(len) || !is_in_range) { LOG_ERROR(Kernel_Vmm, "Provided length {:#x} is invalid!", len);
LOG_ERROR(Kernel_Vmm, "Provided address range is invalid!");
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }
if (alignment != 0 && !Common::Is64KBAligned(alignment)) { if (alignment != 0 && !Common::Is64KBAligned(alignment)) {
LOG_ERROR(Kernel_Vmm, "Alignment value is invalid!"); LOG_ERROR(Kernel_Vmm, "Alignment {:#x} is invalid!", alignment);
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }
if (physAddrOut == nullptr) { if (physAddrOut == nullptr) {
@ -397,8 +396,21 @@ s32 PS4_SYSV_ABI sceKernelMemoryPoolExpand(u64 searchStart, u64 searchEnd, size_
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }
const bool is_in_range = searchEnd - searchStart >= len;
if (searchEnd <= searchStart || searchEnd < len || !is_in_range) {
LOG_ERROR(Kernel_Vmm,
"Provided address range is too small!"
" searchStart = {:#x}, searchEnd = {:#x}, length = {:#x}",
searchStart, searchEnd, len);
return ORBIS_KERNEL_ERROR_ENOMEM;
}
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
PAddr phys_addr = memory->PoolExpand(searchStart, searchEnd, len, alignment); PAddr phys_addr = memory->PoolExpand(searchStart, searchEnd, len, alignment);
if (phys_addr == -1) {
return ORBIS_KERNEL_ERROR_ENOMEM;
}
*physAddrOut = static_cast<s64>(phys_addr); *physAddrOut = static_cast<s64>(phys_addr);
LOG_INFO(Kernel_Vmm, LOG_INFO(Kernel_Vmm,

View File

@ -109,31 +109,38 @@ bool MemoryManager::TryWriteBacking(void* address, const void* data, u32 num_byt
PAddr MemoryManager::PoolExpand(PAddr search_start, PAddr search_end, size_t size, u64 alignment) { PAddr MemoryManager::PoolExpand(PAddr search_start, PAddr search_end, size_t size, u64 alignment) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
alignment = alignment > 0 ? alignment : 64_KB;
auto dmem_area = FindDmemArea(search_start); auto dmem_area = FindDmemArea(search_start);
auto mapping_start = search_start > dmem_area->second.base
? Common::AlignUp(search_start, alignment)
: Common::AlignUp(dmem_area->second.base, alignment);
auto mapping_end = mapping_start + size;
const auto is_suitable = [&] { // Find the first free, large enough dmem area in the range.
const auto aligned_base = alignment > 0 ? Common::AlignUp(dmem_area->second.base, alignment) while (!dmem_area->second.is_free || dmem_area->second.GetEnd() < mapping_end) {
: dmem_area->second.base; // The current dmem_area isn't suitable, move to the next one.
const auto alignment_size = aligned_base - dmem_area->second.base;
const auto remaining_size =
dmem_area->second.size >= alignment_size ? dmem_area->second.size - alignment_size : 0;
return dmem_area->second.is_free && remaining_size >= size;
};
while (!is_suitable() && dmem_area->second.GetEnd() <= search_end) {
dmem_area++; dmem_area++;
} if (dmem_area == dmem_map.end()) {
ASSERT_MSG(is_suitable(), "Unable to find free direct memory area: size = {:#x}", size); break;
}
// Align free position // Update local variables based on the new dmem_area
PAddr free_addr = dmem_area->second.base; mapping_start = Common::AlignUp(dmem_area->second.base, alignment);
free_addr = alignment > 0 ? Common::AlignUp(free_addr, alignment) : free_addr; mapping_end = mapping_start + size;
}
if (dmem_area == dmem_map.end()) {
// There are no suitable mappings in this range
LOG_ERROR(Kernel_Vmm, "Unable to find free direct memory area: size = {:#x}", size);
return -1;
}
// Add the allocated region to the list and commit its pages. // Add the allocated region to the list and commit its pages.
auto& area = CarveDmemArea(free_addr, size)->second; auto& area = CarveDmemArea(mapping_start, size)->second;
area.is_free = false; area.is_free = false;
area.is_pooled = true; area.is_pooled = true;
return free_addr; return mapping_start;
} }
PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment, PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,