Sync page manager protections

This commit is contained in:
Lander Gallastegi 2025-06-19 21:22:43 +02:00
parent 4c279f64c7
commit 71d5d27ab2

View File

@ -57,12 +57,6 @@ struct PageManager::Impl {
} }
}; };
struct UpdateProtectRange {
VAddr addr;
u64 size;
Core::MemoryPermission perms;
};
static constexpr size_t ADDRESS_BITS = 40; static constexpr size_t ADDRESS_BITS = 40;
static constexpr size_t NUM_ADDRESS_PAGES = 1ULL << (40 - PAGE_BITS); static constexpr size_t NUM_ADDRESS_PAGES = 1ULL << (40 - PAGE_BITS);
inline static Vulkan::Rasterizer* rasterizer; inline static Vulkan::Rasterizer* rasterizer;
@ -195,69 +189,60 @@ struct PageManager::Impl {
template <bool track> template <bool track>
void UpdatePageWatchers(VAddr addr, u64 size) { void UpdatePageWatchers(VAddr addr, u64 size) {
RENDERER_TRACE; RENDERER_TRACE;
boost::container::small_vector<UpdateProtectRange, 16> update_ranges;
{
std::scoped_lock lk(lock);
size_t page = addr >> PAGE_BITS; size_t page = addr >> PAGE_BITS;
auto perms = cached_pages[page].Perm(); auto perms = cached_pages[page].Perm();
u64 range_begin = 0; u64 range_begin = 0;
u64 range_bytes = 0; u64 range_bytes = 0;
const auto release_pending = [&] { const auto release_pending = [&] {
if (range_bytes > 0) { if (range_bytes > 0) {
RENDERER_TRACE; RENDERER_TRACE;
// Add pending (un)protect action // Perform pending (un)protect action
update_ranges.push_back({range_begin << PAGE_BITS, range_bytes, perms}); Protect(range_begin << PAGE_BITS, range_bytes, perms);
range_bytes = 0; range_bytes = 0;
} }
}; };
// Iterate requested pages std::scoped_lock lk(lock);
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE);
const u64 aligned_addr = page << PAGE_BITS;
const u64 aligned_end = page_end << PAGE_BITS;
ASSERT_MSG(rasterizer->IsMapped(aligned_addr, aligned_end - aligned_addr),
"Attempted to track non-GPU memory at address {:#x}, size {:#x}.",
aligned_addr, aligned_end - aligned_addr);
for (; page != page_end; ++page) { // Iterate requested pages
PageState& state = cached_pages[page]; const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE);
const u64 aligned_addr = page << PAGE_BITS;
const u64 aligned_end = page_end << PAGE_BITS;
ASSERT_MSG(rasterizer->IsMapped(aligned_addr, aligned_end - aligned_addr),
"Attempted to track non-GPU memory at address {:#x}, size {:#x}.",
aligned_addr, aligned_end - aligned_addr);
// Apply the change to the page state for (; page != page_end; ++page) {
const u8 new_count = state.AddDelta<track ? 1 : -1>(); PageState& state = cached_pages[page];
if (auto new_perms = state.Perm(); new_perms != perms) [[unlikely]] { // Apply the change to the page state
// If the protection changed add pending (un)protect action const u8 new_count = state.AddDelta<track ? 1 : -1>();
release_pending();
perms = new_perms;
} else if (range_bytes != 0) {
// If the protection did not change, extend the current range
range_bytes += PAGE_SIZE;
}
// Only start a new range if the page must be (un)protected if (auto new_perms = state.Perm(); new_perms != perms) [[unlikely]] {
if (range_bytes == 0 && ((new_count == 0 && !track) || (new_count == 1 && track))) { // If the protection changed add pending (un)protect action
range_begin = page; release_pending();
range_bytes = PAGE_SIZE; perms = new_perms;
} } else if (range_bytes != 0) {
// If the protection did not change, extend the current range
range_bytes += PAGE_SIZE;
} }
// Add pending (un)protect action // Only start a new range if the page must be (un)protected
release_pending(); if (range_bytes == 0 && ((new_count == 0 && !track) || (new_count == 1 && track))) {
range_begin = page;
range_bytes = PAGE_SIZE;
}
} }
// Flush deferred protects // Add pending (un)protect action
for (const auto& range : update_ranges) { release_pending();
Protect(range.addr, range.size, range.perms);
}
} }
template <bool track> template <bool track>
void UpdatePageWatchersMasked(VAddr base_addr, RegionBits& mask) { void UpdatePageWatchersMasked(VAddr base_addr, RegionBits& mask) {
RENDERER_TRACE; RENDERER_TRACE;
boost::container::small_vector<UpdateProtectRange, 16> update_ranges;
auto start_range = mask.FirstRange(); auto start_range = mask.FirstRange();
auto end_range = mask.LastRange(); auto end_range = mask.LastRange();
@ -270,60 +255,54 @@ struct PageManager::Impl {
return; return;
} }
{ size_t base_page = (base_addr >> PAGE_BITS);
std::scoped_lock lk(lock); auto perms = cached_pages[base_page + start_range.first].Perm();
u64 range_begin = 0;
u64 range_bytes = 0;
size_t base_page = (base_addr >> PAGE_BITS); const auto release_pending = [&] {
auto perms = cached_pages[base_page + start_range.first].Perm(); if (range_bytes > 0) {
u64 range_begin = 0; RENDERER_TRACE;
u64 range_bytes = 0; // Perform pending (un)protect action
Protect((range_begin << PAGE_BITS), range_bytes, perms);
range_bytes = 0;
}
};
const auto release_pending = [&] { std::scoped_lock lk(lock);
if (range_bytes > 0) {
RENDERER_TRACE;
// Add pending (un)protect action
update_ranges.push_back({range_begin << PAGE_BITS, range_bytes, perms});
range_bytes = 0;
}
};
for (size_t page = start_range.first; page < end_range.second; ++page) { // Iterate pages
PageState& state = cached_pages[base_page + page]; for (size_t page = start_range.first; page < end_range.second; ++page) {
const bool update = mask.Get(page); PageState& state = cached_pages[base_page + page];
const bool update = mask.Get(page);
// Apply the change to the page state // Apply the change to the page state
const u8 new_count = const u8 new_count =
update ? state.AddDelta<track ? 1 : -1>() : state.AddDelta<0>(); update ? state.AddDelta<track ? 1 : -1>() : state.AddDelta<0>();
if (auto new_perms = state.Perm(); new_perms != perms) [[unlikely]] { if (auto new_perms = state.Perm(); new_perms != perms) [[unlikely]] {
// If the protection changed add pending (un)protect action // If the protection changed add pending (un)protect action
release_pending(); release_pending();
perms = new_perms; perms = new_perms;
} else if (range_bytes != 0) { } else if (range_bytes != 0) {
// If the protection did not change, extend the current range // If the protection did not change, extend the current range
range_bytes += PAGE_SIZE; range_bytes += PAGE_SIZE;
}
// If the page is not being updated, skip it
if (!update) {
continue;
}
// Only start a new range if the page must be (un)protected
if (range_bytes == 0 && ((new_count == 0 && !track) || (new_count == 1 && track))) {
range_begin = base_page + page;
range_bytes = PAGE_SIZE;
}
} }
// Add pending (un)protect action // If the page is not being updated, skip it
release_pending(); if (!update) {
continue;
}
// Only start a new range if the page must be (un)protected
if (range_bytes == 0 && ((new_count == 0 && !track) || (new_count == 1 && track))) {
range_begin = base_page + page;
range_bytes = PAGE_SIZE;
}
} }
// Flush deferred protects // Add pending (un)protect action
for (const auto& range : update_ranges) { release_pending();
Protect(range.addr, range.size, range.perms);
}
} }
std::array<PageState, NUM_ADDRESS_PAGES> cached_pages{}; std::array<PageState, NUM_ADDRESS_PAGES> cached_pages{};