summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp6
-rw-r--r--src/core/hle/kernel/kernel.cpp5
-rw-r--r--src/core/hle/kernel/process.cpp18
-rw-r--r--src/core/hle/kernel/process.h11
-rw-r--r--src/core/hle/kernel/scheduler.cpp60
-rw-r--r--src/core/hle/kernel/scheduler.h6
-rw-r--r--src/core/hle/kernel/svc.cpp36
-rw-r--r--src/core/hle/kernel/thread.cpp5
-rw-r--r--src/core/hle/kernel/thread.h3
-rw-r--r--src/core/hle/kernel/vm_manager.cpp76
-rw-r--r--src/core/hle/kernel/vm_manager.h55
11 files changed, 167 insertions, 114 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 352190da8..c8842410b 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
// them all.
std::size_t last = waiting_threads.size();
if (num_to_wake > 0) {
- last = num_to_wake;
+ last = std::min(last, static_cast<std::size_t>(num_to_wake));
}
// Signal the waiting threads.
@@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
// Determine the modified value depending on the waiting count.
s32 updated_value;
if (waiting_threads.empty()) {
- updated_value = value - 1;
- } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
updated_value = value + 1;
+ } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
+ updated_value = value - 1;
} else {
updated_value = value;
}
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index a7e4ddc05..6baeb3494 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -34,7 +34,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
const auto& system = Core::System::GetInstance();
// Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
+ std::lock_guard lock{HLE::g_hle_lock};
SharedPtr<Thread> thread =
system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
@@ -62,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
thread->GetWaitHandle() != 0) {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
+ thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->SetMutexWaitAddress(0);
thread->SetCondVarWaitAddress(0);
thread->SetWaitHandle(0);
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 0d782e4ba..b0b7af76b 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -5,6 +5,7 @@
#include <algorithm>
#include <memory>
#include <random>
+#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
@@ -75,6 +76,10 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const {
return resource_limit;
}
+u64 Process::GetTotalPhysicalMemoryUsed() const {
+ return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size;
+}
+
ResultCode Process::ClearSignalState() {
if (status == ProcessStatus::Exited) {
LOG_ERROR(Kernel, "called on a terminated process instance.");
@@ -107,14 +112,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
return handle_table.SetSize(capabilities.GetHandleTableSize());
}
-void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
+void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) {
+ // The kernel always ensures that the given stack size is page aligned.
+ main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
+
// Allocate and map the main thread stack
// TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
// of the user address space.
+ const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
vm_manager
- .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size,
- std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
- MemoryState::Stack)
+ .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size),
+ 0, main_thread_stack_size, MemoryState::Stack)
.Unwrap();
vm_manager.LogLayout();
@@ -224,6 +232,8 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
+ code_memory_size += module_.memory->size();
+
// Clear instruction cache in CPU JIT
system.InvalidateCpuInstructionCaches();
}
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index a0217d3d8..732d12170 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -186,6 +186,9 @@ public:
return random_entropy.at(index);
}
+ /// Retrieves the total physical memory used by this process in bytes.
+ u64 GetTotalPhysicalMemoryUsed() const;
+
/// Clears the signaled state of the process if and only if it's signaled.
///
/// @pre The process must not be already terminated. If this is called on a
@@ -210,7 +213,7 @@ public:
/**
* Applies address space changes and launches the process main thread.
*/
- void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size);
+ void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size);
/**
* Prepares a process for termination by stopping all of its threads
@@ -247,6 +250,12 @@ private:
/// Memory manager for this process.
Kernel::VMManager vm_manager;
+ /// Size of the main thread's stack in bytes.
+ u64 main_thread_stack_size = 0;
+
+ /// Size of the loaded code memory in bytes.
+ u64 code_memory_size = 0;
+
/// Current status of the process
ProcessStatus status;
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index cc189cc64..ac501bf7f 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -29,8 +29,8 @@ Scheduler::~Scheduler() {
}
bool Scheduler::HaveReadyThreads() const {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
- return ready_queue.get_first() != nullptr;
+ std::lock_guard lock{scheduler_mutex};
+ return !ready_queue.empty();
}
Thread* Scheduler::GetCurrentThread() const {
@@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() {
Thread* thread = GetCurrentThread();
if (thread && thread->GetStatus() == ThreadStatus::Running) {
+ if (ready_queue.empty()) {
+ return thread;
+ }
// We have to do better than the current thread.
// This call returns null when that's not possible.
- next = ready_queue.pop_first_better(thread->GetPriority());
- if (!next) {
- // Otherwise just keep going with the current thread
+ next = ready_queue.front();
+ if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
next = thread;
}
} else {
- next = ready_queue.pop_first();
+ if (ready_queue.empty()) {
+ return nullptr;
+ }
+ next = ready_queue.front();
}
return next;
}
void Scheduler::SwitchContext(Thread* new_thread) {
- Thread* const previous_thread = GetCurrentThread();
+ Thread* previous_thread = GetCurrentThread();
Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
- ready_queue.push_front(previous_thread->GetPriority(), previous_thread);
+ ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
previous_thread->SetStatus(ThreadStatus::Ready);
}
}
@@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
current_thread = new_thread;
- ready_queue.remove(new_thread->GetPriority(), new_thread);
+ ready_queue.remove(new_thread, new_thread->GetPriority());
new_thread->SetStatus(ThreadStatus::Running);
auto* const thread_owner_process = current_thread->GetOwnerProcess();
@@ -127,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
}
void Scheduler::Reschedule() {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
Thread* cur = GetCurrentThread();
Thread* next = PopNextReadyThread();
@@ -143,51 +148,54 @@ void Scheduler::Reschedule() {
SwitchContext(next);
}
-void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+void Scheduler::AddThread(SharedPtr<Thread> thread) {
+ std::lock_guard lock{scheduler_mutex};
thread_list.push_back(std::move(thread));
- ready_queue.prepare(priority);
}
void Scheduler::RemoveThread(Thread* thread) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
}
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
- ready_queue.push_back(priority, thread);
+ ready_queue.add(thread, priority);
}
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
- ready_queue.remove(priority, thread);
+ ready_queue.remove(thread, priority);
}
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
+ if (thread->GetPriority() == priority) {
+ return;
+ }
// If thread was ready, adjust queues
if (thread->GetStatus() == ThreadStatus::Ready)
- ready_queue.move(thread, thread->GetPriority(), priority);
- else
- ready_queue.prepare(priority);
+ ready_queue.adjust(thread, thread->GetPriority(), priority);
}
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
const u32 mask = 1U << core;
- return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) {
- return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority;
- });
+ for (auto* thread : ready_queue) {
+ if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
+ return thread;
+ }
+ }
+ return nullptr;
}
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c5bf57d9..b29bf7be8 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -7,7 +7,7 @@
#include <mutex>
#include <vector>
#include "common/common_types.h"
-#include "common/thread_queue_list.h"
+#include "common/multi_level_queue.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
@@ -38,7 +38,7 @@ public:
u64 GetLastContextSwitchTicks() const;
/// Adds a new thread to the scheduler
- void AddThread(SharedPtr<Thread> thread, u32 priority);
+ void AddThread(SharedPtr<Thread> thread);
/// Removes a thread from the scheduler
void RemoveThread(Thread* thread);
@@ -156,7 +156,7 @@ private:
std::vector<SharedPtr<Thread>> thread_list;
/// Lists only ready thread ids.
- Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
+ Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
SharedPtr<Thread> current_thread = nullptr;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index e5e7f99e1..76a8b0191 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -175,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
return ERR_INVALID_SIZE;
}
- auto& vm_manager = Core::CurrentProcess()->VMManager();
- const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress();
- const auto alloc_result =
- vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
-
+ auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
+ const auto alloc_result = vm_manager.SetHeapSize(heap_size);
if (alloc_result.Failed()) {
return alloc_result.Code();
}
@@ -712,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
HeapRegionBaseAddr = 4,
HeapRegionSize = 5,
TotalMemoryUsage = 6,
- TotalHeapUsage = 7,
+ TotalPhysicalMemoryUsed = 7,
IsCurrentProcessBeingDebugged = 8,
RegisterResourceLimit = 9,
IdleTickCount = 10,
@@ -748,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
case GetInfoType::NewMapRegionBaseAddr:
case GetInfoType::NewMapRegionSize:
case GetInfoType::TotalMemoryUsage:
- case GetInfoType::TotalHeapUsage:
+ case GetInfoType::TotalPhysicalMemoryUsed:
case GetInfoType::IsVirtualAddressMemoryEnabled:
case GetInfoType::PersonalMmHeapUsage:
case GetInfoType::TitleId:
@@ -808,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
*result = process->VMManager().GetTotalMemoryUsage();
return RESULT_SUCCESS;
- case GetInfoType::TotalHeapUsage:
- *result = process->VMManager().GetTotalHeapUsage();
+ case GetInfoType::TotalPhysicalMemoryUsed:
+ *result = process->GetTotalPhysicalMemoryUsed();
return RESULT_SUCCESS;
case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1356,7 +1353,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
current_thread->SetCondVarWaitAddress(condition_variable_addr);
current_thread->SetMutexWaitAddress(mutex_addr);
current_thread->SetWaitHandle(thread_handle);
- current_thread->SetStatus(ThreadStatus::WaitMutex);
+ current_thread->SetStatus(ThreadStatus::WaitCondVar);
current_thread->InvalidateWakeupCallback();
current_thread->WakeAfterDelay(nano_seconds);
@@ -1400,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// them all.
std::size_t last = waiting_threads.size();
if (target != -1)
- last = target;
+ last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
// If there are no threads waiting on this condition variable, just exit
- if (last > waiting_threads.size())
+ if (last == 0)
return RESULT_SUCCESS;
for (std::size_t index = 0; index < last; ++index) {
@@ -1411,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
+ // liberate Cond Var Thread.
+ thread->SetCondVarWaitAddress(0);
+
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
auto& monitor = Core::System::GetInstance().Monitor();
@@ -1429,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
}
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
thread->GetWaitHandle()));
-
if (mutex_val == 0) {
// We were able to acquire the mutex, resume this thread.
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->ResumeFromWait();
auto* const lock_owner = thread->GetLockOwner();
@@ -1442,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
thread->SetLockOwner(nullptr);
thread->SetMutexWaitAddress(0);
- thread->SetCondVarWaitAddress(0);
thread->SetWaitHandle(0);
+ Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
} else {
// Atomically signal that the mutex now has a waiting thread.
do {
@@ -1462,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
auto owner = handle_table.Get<Thread>(owner_handle);
ASSERT(owner);
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->InvalidateWakeupCallback();
+ thread->SetStatus(ThreadStatus::WaitMutex);
owner->AddMutexWaiter(thread);
-
- Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
}
}
@@ -2140,7 +2138,7 @@ void CallSVC(u32 immediate) {
MICROPROFILE_SCOPE(Kernel_SVC);
// Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
+ std::lock_guard lock{HLE::g_hle_lock};
const FunctionDef* info = GetSVCInfo(immediate);
if (info) {
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 3b22e8e0d..fa3ac3abc 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -105,6 +105,7 @@ void Thread::ResumeFromWait() {
case ThreadStatus::WaitSleep:
case ThreadStatus::WaitIPC:
case ThreadStatus::WaitMutex:
+ case ThreadStatus::WaitCondVar:
case ThreadStatus::WaitArb:
break;
@@ -198,7 +199,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
thread->owner_process = &owner_process;
thread->scheduler = &system.Scheduler(processor_id);
- thread->scheduler->AddThread(thread, priority);
+ thread->scheduler->AddThread(thread);
thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
@@ -351,7 +352,7 @@ void Thread::ChangeScheduler() {
if (*new_processor_id != processor_id) {
// Remove thread from previous core's scheduler
scheduler->RemoveThread(this);
- next_scheduler.AddThread(this, current_priority);
+ next_scheduler.AddThread(this);
}
processor_id = *new_processor_id;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index faad5f391..9c684758c 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -51,7 +51,8 @@ enum class ThreadStatus {
WaitIPC, ///< Waiting for the reply from an IPC request
WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false
WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true
- WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc
+ WaitMutex, ///< Waiting due to an ArbitrateLock svc
+ WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
Dormant, ///< Created but not yet made ready
Dead ///< Run to completion, or forcefully terminated
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 22bf55ce7..ec0a480ce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
return RESULT_SUCCESS;
}
-ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
- if (!IsWithinHeapRegion(target, size)) {
- return ERR_INVALID_ADDRESS;
+ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
+ if (size > GetHeapRegionSize()) {
+ return ERR_OUT_OF_MEMORY;
+ }
+
+ // No need to do any additional work if the heap is already the given size.
+ if (size == GetCurrentHeapSize()) {
+ return MakeResult(heap_region_base);
}
if (heap_memory == nullptr) {
// Initialize heap
- heap_memory = std::make_shared<std::vector<u8>>();
- heap_start = heap_end = target;
+ heap_memory = std::make_shared<std::vector<u8>>(size);
+ heap_end = heap_region_base + size;
} else {
- UnmapRange(heap_start, heap_end - heap_start);
- }
-
- // If necessary, expand backing vector to cover new heap extents.
- if (target < heap_start) {
- heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
- heap_start = target;
- RefreshMemoryBlockMappings(heap_memory.get());
- }
- if (target + size > heap_end) {
- heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
- heap_end = target + size;
- RefreshMemoryBlockMappings(heap_memory.get());
+ UnmapRange(heap_region_base, GetCurrentHeapSize());
}
- ASSERT(heap_end - heap_start == heap_memory->size());
- CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size,
- MemoryState::Heap));
- Reprotect(vma, perms);
+ // If necessary, expand backing vector to cover new heap extents in
+ // the case of allocating. Otherwise, shrink the backing memory,
+ // if a smaller heap has been requested.
+ const u64 old_heap_size = GetCurrentHeapSize();
+ if (size > old_heap_size) {
+ const u64 alloc_size = size - old_heap_size;
- heap_used = size;
-
- return MakeResult<VAddr>(heap_end - size);
-}
+ heap_memory->insert(heap_memory->end(), alloc_size, 0);
+ RefreshMemoryBlockMappings(heap_memory.get());
+ } else if (size < old_heap_size) {
+ heap_memory->resize(size);
+ heap_memory->shrink_to_fit();
-ResultCode VMManager::HeapFree(VAddr target, u64 size) {
- if (!IsWithinHeapRegion(target, size)) {
- return ERR_INVALID_ADDRESS;
+ RefreshMemoryBlockMappings(heap_memory.get());
}
- if (size == 0) {
- return RESULT_SUCCESS;
- }
+ heap_end = heap_region_base + size;
+ ASSERT(GetCurrentHeapSize() == heap_memory->size());
- const ResultCode result = UnmapRange(target, size);
- if (result.IsError()) {
- return result;
+ const auto mapping_result =
+ MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
+ if (mapping_result.Failed()) {
+ return mapping_result.Code();
}
- heap_used -= size;
- return RESULT_SUCCESS;
+ return MakeResult<VAddr>(heap_region_base);
}
MemoryInfo VMManager::QueryMemory(VAddr address) const {
@@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
heap_region_base = map_region_end;
heap_region_end = heap_region_base + heap_region_size;
+ heap_end = heap_region_base;
new_map_region_base = heap_region_end;
new_map_region_end = new_map_region_base + new_map_region_size;
@@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const {
return 0xF8000000;
}
-u64 VMManager::GetTotalHeapUsage() const {
- return heap_used;
-}
-
VAddr VMManager::GetAddressSpaceBaseAddress() const {
return address_space_base;
}
@@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const {
return heap_region_end - heap_region_base;
}
+u64 VMManager::GetCurrentHeapSize() const {
+ return heap_end - heap_region_base;
+}
+
bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
GetHeapRegionEndAddress());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 7cdff6094..6f484b7bf 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -380,11 +380,41 @@ public:
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
- ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
- ResultCode HeapFree(VAddr target, u64 size);
-
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
+ /// Attempts to allocate a heap with the given size.
+ ///
+ /// @param size The size of the heap to allocate in bytes.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size that is equal to the size of the current heap,
+ /// then this function will do nothing and return the current
+ /// heap's starting address, as there's no need to perform
+ /// any additional heap allocation work.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size less than the current heap's size, then
+ /// this function will attempt to shrink the heap.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size larger than the current heap's size, then
+ /// this function will attempt to extend the size of the heap.
+ ///
+ /// @returns A result indicating either success or failure.
+ /// <p>
+ /// If successful, this function will return a result
+ /// containing the starting address to the allocated heap.
+ /// <p>
+ /// If unsuccessful, this function will return a result
+ /// containing an error code.
+ ///
+ /// @pre The given size must lie within the allowable heap
+ /// memory region managed by this VMManager instance.
+ /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
+ /// being returned as the result.
+ ///
+ ResultVal<VAddr> SetHeapSize(u64 size);
+
/// Queries the memory manager for information about the given address.
///
/// @param address The address to query the memory manager about for information.
@@ -418,9 +448,6 @@ public:
/// Gets the total memory usage, used by svcGetInfo
u64 GetTotalMemoryUsage() const;
- /// Gets the total heap usage, used by svcGetInfo
- u64 GetTotalHeapUsage() const;
-
/// Gets the address space base address
VAddr GetAddressSpaceBaseAddress() const;
@@ -469,6 +496,13 @@ public:
/// Gets the total size of the heap region in bytes.
u64 GetHeapRegionSize() const;
+ /// Gets the total size of the current heap in bytes.
+ ///
+ /// @note This is the current allocated heap size, not the size
+ /// of the region it's allowed to exist within.
+ ///
+ u64 GetCurrentHeapSize() const;
+
/// Determines whether or not the specified range is within the heap region.
bool IsWithinHeapRegion(VAddr address, u64 size) const;
@@ -617,9 +651,6 @@ private:
VAddr new_map_region_base = 0;
VAddr new_map_region_end = 0;
- VAddr main_code_region_base = 0;
- VAddr main_code_region_end = 0;
-
VAddr tls_io_region_base = 0;
VAddr tls_io_region_end = 0;
@@ -628,9 +659,9 @@ private:
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
std::shared_ptr<std::vector<u8>> heap_memory;
- // The left/right bounds of the address space covered by heap_memory.
- VAddr heap_start = 0;
+
+ // The end of the currently allocated heap. This is not an inclusive
+ // end of the range. This is essentially 'base_address + current_size'.
VAddr heap_end = 0;
- u64 heap_used = 0;
};
} // namespace Kernel