summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/kernel/k_page_table.cpp105
-rw-r--r--src/core/hle/kernel/k_page_table.h19
2 files changed, 124 insertions, 0 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 0a932334f..e47d9ce29 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -2068,4 +2068,109 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
return ResultSuccess;
}
+ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
+ size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
+ // Validate basic preconditions.
+ ASSERT((lock_attr & attr) == KMemoryAttribute::None);
+ ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
+ KMemoryAttribute::None);
+
+ // Validate the lock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check that the output page group is empty, if it exists.
+ if (out_pg) {
+ ASSERT(out_pg->GetNumPages() == 0);
+ }
+
+ // Check the state.
+ KMemoryState old_state{};
+ KMemoryPermission old_perm{};
+ KMemoryAttribute old_attr{};
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Get the physical address, if we're supposed to.
+ if (out_paddr != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ }
+
+ // Make the page group, if we're supposed to.
+ if (out_pg != nullptr) {
+ R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+ }
+
+ // Apply the memory block updates.
+ block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+
+ return ResultSuccess;
+}
+
+ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
+ // Validate basic preconditions.
+ ASSERT((attr_mask & lock_attr) == lock_attr);
+ ASSERT((attr & lock_attr) == lock_attr);
+
+ // Validate the unlock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check the state.
+ KMemoryState old_state{};
+ KMemoryPermission old_perm{};
+ KMemoryAttribute old_attr{};
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Check the page group.
+ if (pg != nullptr) {
+ UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+ }
+
+ // Apply the memory block updates.
+ block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+
+ return ResultSuccess;
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index b61a39145..bfabdf38c 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -164,6 +164,17 @@ private:
attr_mask, attr, ignore_attr);
}
+ ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageLinkedList* pg);
+
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
@@ -176,6 +187,14 @@ private:
return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
}
+ bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ *out = GetPhysicalAddr(virt_addr);
+
+ return *out != 0;
+ }
+
mutable KLightLock general_lock;
mutable KLightLock map_physical_memory_lock;