From 49d1486d261c56b80a81db757ac19f7c86fddccc Mon Sep 17 00:00:00 2001 From: Stephen Miller Date: Mon, 2 Jun 2025 19:45:47 -0500 Subject: [PATCH] Move prot validation outside loop The prot variable shouldn't change during a mprotect call, so we can check the flags before protecting instead. Also cleans up the code for prot validation. This should improve performance, and is more accurate to FreeBSD code. --- src/core/memory.cpp | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 99da3472d..8550ece17 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -557,18 +557,6 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t s return adjusted_size; } - // Validate protection flags - constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead | - MemoryProt::CpuReadWrite | MemoryProt::GpuRead | - MemoryProt::GpuWrite | MemoryProt::GpuReadWrite; - - MemoryProt invalid_flags = prot & ~valid_flags; - if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) { - LOG_ERROR(Kernel_Vmm, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}", - u32(prot), u32(invalid_flags)); - return ORBIS_KERNEL_ERROR_EINVAL; - } - // Change protection vma_base.prot = prot; @@ -598,10 +586,24 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t s s32 MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) { std::scoped_lock lk{mutex}; - s64 protected_bytes = 0; + // Validate protection flags + constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead | + MemoryProt::CpuReadWrite | MemoryProt::GpuRead | + MemoryProt::GpuWrite | MemoryProt::GpuReadWrite; + + MemoryProt invalid_flags = prot & ~valid_flags; + if (invalid_flags != MemoryProt::NoAccess) { + LOG_ERROR(Kernel_Vmm, "Invalid protection flags"); + return ORBIS_KERNEL_ERROR_EINVAL; + } + + // Align addr and size to the nearest page boundary. auto aligned_addr = Common::AlignDown(addr, 16_KB); auto aligned_size = Common::AlignUp(size + addr - aligned_addr, 16_KB); + + // Protect all VMAs between aligned_addr and aligned_addr + aligned_size. + s64 protected_bytes = 0; while (protected_bytes < aligned_size) { auto it = FindVMA(aligned_addr + protected_bytes); auto& vma_base = it->second;