Add partial unmap support

This commit is contained in:
Borchev 2024-07-26 18:27:00 -07:00
parent 159be2c7f4
commit ed6a6ff6f4
4 changed files with 42 additions and 14 deletions

View File

@ -133,7 +133,6 @@ struct AddressSpace::Impl {
ASSERT_MSG(it != placeholders.end(), "Cannot map already mapped region"); ASSERT_MSG(it != placeholders.end(), "Cannot map already mapped region");
ASSERT_MSG(virtual_addr >= it->lower() && virtual_addr + size <= it->upper(), ASSERT_MSG(virtual_addr >= it->lower() && virtual_addr + size <= it->upper(),
"Map range must be fully contained in a placeholder"); "Map range must be fully contained in a placeholder");
// Windows only allows splitting a placeholder into two. // Windows only allows splitting a placeholder into two.
// This means that if the map range is fully // This means that if the map range is fully
// contained the the placeholder we need to perform two split operations, // contained the the placeholder we need to perform two split operations,
@ -454,8 +453,28 @@ void* AddressSpace::MapFile(VAddr virtual_addr, size_t size, size_t offset, u32
#endif #endif
} }
void AddressSpace::Unmap(VAddr virtual_addr, size_t size, bool has_backing) { void AddressSpace::Unmap(VAddr virtual_addr, size_t size, VAddr start_in_vma, VAddr end_in_vma,
return impl->Unmap(virtual_addr, size, has_backing); PAddr phys_base, bool is_exec, bool has_backing) {
#ifdef _WIN32
// There does not appear to be comparable support for partial unmapping on Windows.
// Unfortunately, a least one title was found to require this. The workaround is to unmap
// the entire allocation and remap the portions outside of the requested unmapping range.
impl->Unmap(virtual_addr, size, has_backing);
// TODO: Determine if any titles require partial unmapping support for flexible allocations.
ASSERT_MSG(!has_backing && (start_in_vma != 0 || end_in_vma != size),
"Partial unmapping of flexible allocations is not supported");
if (start_in_vma != 0) {
Map(virtual_addr, start_in_vma, 0, phys_base, is_exec);
}
if (end_in_vma != size) {
Map(virtual_addr + end_in_vma, size - end_in_vma, 0, phys_base + end_in_vma, is_exec);
}
#else
impl->Unmap(virtual_addr + start_in_vma, end_in_vma - start_in_vma, has_backing);
#endif
} }
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) { void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {

View File

@ -91,7 +91,8 @@ public:
void* MapFile(VAddr virtual_addr, size_t size, size_t offset, u32 prot, uintptr_t fd); void* MapFile(VAddr virtual_addr, size_t size, size_t offset, u32 prot, uintptr_t fd);
/// Unmaps specified virtual memory area. /// Unmaps specified virtual memory area.
void Unmap(VAddr virtual_addr, size_t size, bool has_backing); void Unmap(VAddr virtual_addr, size_t size, VAddr start_in_vma, VAddr end_in_vma,
PAddr phys_base, bool is_exec, bool has_backing);
void Protect(VAddr virtual_addr, size_t size, MemoryPermission perms); void Protect(VAddr virtual_addr, size_t size, MemoryPermission perms);

View File

@ -54,7 +54,7 @@ PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size,
free_addr = alignment > 0 ? Common::AlignUp(free_addr, alignment) : free_addr; free_addr = alignment > 0 ? Common::AlignUp(free_addr, alignment) : free_addr;
// Add the allocated region to the list and commit its pages. // Add the allocated region to the list and commit its pages.
auto& area = CarveDmemArea(free_addr, size); auto& area = CarveDmemArea(free_addr, size)->second;
area.memory_type = memory_type; area.memory_type = memory_type;
area.is_free = false; area.is_free = false;
return free_addr; return free_addr;
@ -63,9 +63,8 @@ PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size,
void MemoryManager::Free(PAddr phys_addr, size_t size) { void MemoryManager::Free(PAddr phys_addr, size_t size) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
const auto dmem_area = FindDmemArea(phys_addr); auto dmem_area = CarveDmemArea(phys_addr, size);
ASSERT(dmem_area != dmem_map.end() && dmem_area->second.base == phys_addr && ASSERT(dmem_area != dmem_map.end() && dmem_area->second.size >= size);
dmem_area->second.size == size);
// Release any dmem mappings that reference this physical block. // Release any dmem mappings that reference this physical block.
std::vector<std::pair<VAddr, u64>> remove_list; std::vector<std::pair<VAddr, u64>> remove_list;
@ -169,6 +168,7 @@ int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, M
new_vma.prot = prot; new_vma.prot = prot;
new_vma.name = name; new_vma.name = name;
new_vma.type = type; new_vma.type = type;
new_vma.is_exec = is_exec;
if (type == VMAType::Direct) { if (type == VMAType::Direct) {
new_vma.phys_base = phys_addr; new_vma.phys_base = phys_addr;
@ -216,10 +216,16 @@ void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
const auto it = FindVMA(virtual_addr); const auto it = FindVMA(virtual_addr);
ASSERT_MSG(it->second.Contains(virtual_addr, size), const auto& vma_base = it->second;
ASSERT_MSG(vma_base.Contains(virtual_addr, size),
"Existing mapping does not contain requested unmap range"); "Existing mapping does not contain requested unmap range");
const auto type = it->second.type; const auto vma_base_addr = vma_base.base;
const auto vma_base_size = vma_base.size;
const auto phys_base = vma_base.phys_base;
const bool is_exec = vma_base.is_exec;
const auto start_in_vma = virtual_addr - vma_base_addr;
const auto type = vma_base.type;
const bool has_backing = type == VMAType::Direct || type == VMAType::File; const bool has_backing = type == VMAType::Direct || type == VMAType::File;
if (type == VMAType::Direct) { if (type == VMAType::Direct) {
UnmapVulkanMemory(virtual_addr, size); UnmapVulkanMemory(virtual_addr, size);
@ -239,7 +245,8 @@ void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
MergeAdjacent(vma_map, new_it); MergeAdjacent(vma_map, new_it);
// Unmap the memory region. // Unmap the memory region.
impl.Unmap(virtual_addr, size, has_backing); impl.Unmap(vma_base_addr, vma_base_size, start_in_vma, start_in_vma + size, phys_base, is_exec,
has_backing);
TRACK_FREE(virtual_addr, "VMEM"); TRACK_FREE(virtual_addr, "VMEM");
} }
@ -404,7 +411,7 @@ MemoryManager::VMAHandle MemoryManager::CarveVMA(VAddr virtual_addr, size_t size
return vma_handle; return vma_handle;
} }
DirectMemoryArea& MemoryManager::CarveDmemArea(PAddr addr, size_t size) { MemoryManager::DMemHandle MemoryManager::CarveDmemArea(PAddr addr, size_t size) {
auto dmem_handle = FindDmemArea(addr); auto dmem_handle = FindDmemArea(addr);
ASSERT_MSG(dmem_handle != dmem_map.end(), "Physical address not in dmem_map"); ASSERT_MSG(dmem_handle != dmem_map.end(), "Physical address not in dmem_map");
@ -425,7 +432,7 @@ DirectMemoryArea& MemoryManager::CarveDmemArea(PAddr addr, size_t size) {
dmem_handle = Split(dmem_handle, start_in_area); dmem_handle = Split(dmem_handle, start_in_area);
} }
return dmem_handle->second; return dmem_handle;
} }
MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offset_in_vma) { MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offset_in_vma) {

View File

@ -87,6 +87,7 @@ struct VirtualMemoryArea {
bool disallow_merge = false; bool disallow_merge = false;
std::string name = ""; std::string name = "";
uintptr_t fd = 0; uintptr_t fd = 0;
bool is_exec = false;
bool Contains(VAddr addr, size_t size) const { bool Contains(VAddr addr, size_t size) const {
return addr >= base && (addr + size) <= (base + this->size); return addr >= base && (addr + size) <= (base + this->size);
@ -212,7 +213,7 @@ private:
VMAHandle CarveVMA(VAddr virtual_addr, size_t size); VMAHandle CarveVMA(VAddr virtual_addr, size_t size);
DirectMemoryArea& CarveDmemArea(PAddr addr, size_t size); DMemHandle CarveDmemArea(PAddr addr, size_t size);
VMAHandle Split(VMAHandle vma_handle, size_t offset_in_vma); VMAHandle Split(VMAHandle vma_handle, size_t offset_in_vma);