mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-25 11:34:55 +00:00
Move prot validation outside loop
The prot variable shouldn't change during a mprotect call, so we can check the flags before protecting instead. Also cleans up the code for prot validation. This should improve performance, and is more accurate to FreeBSD code.
This commit is contained in:
parent
5307e9d913
commit
49d1486d26
@ -557,18 +557,6 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t s
|
||||
return adjusted_size;
|
||||
}
|
||||
|
||||
// Validate protection flags
|
||||
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
|
||||
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
|
||||
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
|
||||
|
||||
MemoryProt invalid_flags = prot & ~valid_flags;
|
||||
if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) {
|
||||
LOG_ERROR(Kernel_Vmm, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}",
|
||||
u32(prot), u32(invalid_flags));
|
||||
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||
}
|
||||
|
||||
// Change protection
|
||||
vma_base.prot = prot;
|
||||
|
||||
@ -598,10 +586,24 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t s
|
||||
|
||||
s32 MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) {
|
||||
std::scoped_lock lk{mutex};
|
||||
s64 protected_bytes = 0;
|
||||
|
||||
// Validate protection flags
|
||||
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
|
||||
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
|
||||
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
|
||||
|
||||
MemoryProt invalid_flags = prot & ~valid_flags;
|
||||
if (invalid_flags != MemoryProt::NoAccess) {
|
||||
LOG_ERROR(Kernel_Vmm, "Invalid protection flags");
|
||||
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||
}
|
||||
|
||||
// Align addr and size to the nearest page boundary.
|
||||
auto aligned_addr = Common::AlignDown(addr, 16_KB);
|
||||
auto aligned_size = Common::AlignUp(size + addr - aligned_addr, 16_KB);
|
||||
|
||||
// Protect all VMAs between aligned_addr and aligned_addr + aligned_size.
|
||||
s64 protected_bytes = 0;
|
||||
while (protected_bytes < aligned_size) {
|
||||
auto it = FindVMA(aligned_addr + protected_bytes);
|
||||
auto& vma_base = it->second;
|
||||
|
Loading…
Reference in New Issue
Block a user