summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp6
-rw-r--r--src/core/hle/kernel/code_set.cpp12
-rw-r--r--src/core/hle/kernel/code_set.h89
-rw-r--r--src/core/hle/kernel/kernel.cpp13
-rw-r--r--src/core/hle/kernel/kernel.h6
-rw-r--r--src/core/hle/kernel/mutex.cpp35
-rw-r--r--src/core/hle/kernel/mutex.h20
-rw-r--r--src/core/hle/kernel/object.cpp2
-rw-r--r--src/core/hle/kernel/object.h2
-rw-r--r--src/core/hle/kernel/process.cpp51
-rw-r--r--src/core/hle/kernel/process.h95
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/kernel/readable_event.h2
-rw-r--r--src/core/hle/kernel/resource_limit.cpp7
-rw-r--r--src/core/hle/kernel/resource_limit.h11
-rw-r--r--src/core/hle/kernel/scheduler.cpp68
-rw-r--r--src/core/hle/kernel/scheduler.h6
-rw-r--r--src/core/hle/kernel/server_port.cpp2
-rw-r--r--src/core/hle/kernel/server_port.h2
-rw-r--r--src/core/hle/kernel/server_session.cpp2
-rw-r--r--src/core/hle/kernel/server_session.h2
-rw-r--r--src/core/hle/kernel/shared_memory.cpp11
-rw-r--r--src/core/hle/kernel/shared_memory.h10
-rw-r--r--src/core/hle/kernel/svc.cpp302
-rw-r--r--src/core/hle/kernel/svc_wrap.h8
-rw-r--r--src/core/hle/kernel/thread.cpp93
-rw-r--r--src/core/hle/kernel/thread.h32
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp81
-rw-r--r--src/core/hle/kernel/transfer_memory.h103
-rw-r--r--src/core/hle/kernel/vm_manager.cpp102
-rw-r--r--src/core/hle/kernel/vm_manager.h69
-rw-r--r--src/core/hle/kernel/wait_object.h2
32 files changed, 917 insertions, 331 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 352190da8..c8842410b 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
// them all.
std::size_t last = waiting_threads.size();
if (num_to_wake > 0) {
- last = num_to_wake;
+ last = std::min(last, static_cast<std::size_t>(num_to_wake));
}
// Signal the waiting threads.
@@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
// Determine the modified value depending on the waiting count.
s32 updated_value;
if (waiting_threads.empty()) {
- updated_value = value - 1;
- } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
updated_value = value + 1;
+ } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
+ updated_value = value - 1;
} else {
updated_value = value;
}
diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp
new file mode 100644
index 000000000..1f434e9af
--- /dev/null
+++ b/src/core/hle/kernel/code_set.cpp
@@ -0,0 +1,12 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/code_set.h"
+
+namespace Kernel {
+
+CodeSet::CodeSet() = default;
+CodeSet::~CodeSet() = default;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
new file mode 100644
index 000000000..879957dcb
--- /dev/null
+++ b/src/core/hle/kernel/code_set.h
@@ -0,0 +1,89 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+/**
+ * Represents executable data that may be loaded into a kernel process.
+ *
+ * A code set consists of three basic segments:
+ * - A code (AKA text) segment,
+ * - A read-only data segment (rodata)
+ * - A data segment
+ *
+ * The code segment is the portion of the object file that contains
+ * executable instructions.
+ *
+ * The read-only data segment in the portion of the object file that
+ * contains (as one would expect) read-only data, such as fixed constant
+ * values and data structures.
+ *
+ * The data segment is similar to the read-only data segment -- it contains
+ * variables and data structures that have predefined values, however,
+ * entities within this segment can be modified.
+ */
+struct CodeSet final {
+ /// A single segment within a code set.
+ struct Segment final {
+ /// The byte offset that this segment is located at.
+ std::size_t offset = 0;
+
+ /// The address to map this segment to.
+ VAddr addr = 0;
+
+ /// The size of this segment in bytes.
+ u32 size = 0;
+ };
+
+ explicit CodeSet();
+ ~CodeSet();
+
+ CodeSet(const CodeSet&) = delete;
+ CodeSet& operator=(const CodeSet&) = delete;
+
+ CodeSet(CodeSet&&) = default;
+ CodeSet& operator=(CodeSet&&) = default;
+
+ Segment& CodeSegment() {
+ return segments[0];
+ }
+
+ const Segment& CodeSegment() const {
+ return segments[0];
+ }
+
+ Segment& RODataSegment() {
+ return segments[1];
+ }
+
+ const Segment& RODataSegment() const {
+ return segments[1];
+ }
+
+ Segment& DataSegment() {
+ return segments[2];
+ }
+
+ const Segment& DataSegment() const {
+ return segments[2];
+ }
+
+ /// The overall data that backs this code set.
+ std::vector<u8> memory;
+
+ /// The segments that comprise this code set.
+ std::array<Segment, 3> segments;
+
+ /// The entry point address for this code set.
+ VAddr entrypoint = 0;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 4d224d01d..3f14bfa86 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -29,12 +29,12 @@ namespace Kernel {
* @param thread_handle The handle of the thread that's been awoken
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
*/
-static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_late) {
+static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
const auto proper_handle = static_cast<Handle>(thread_handle);
const auto& system = Core::System::GetInstance();
// Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
+ std::lock_guard lock{HLE::g_hle_lock};
SharedPtr<Thread> thread =
system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
@@ -62,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_
if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
thread->GetWaitHandle() != 0) {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
+ thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->SetMutexWaitAddress(0);
thread->SetCondVarWaitAddress(0);
thread->SetWaitHandle(0);
@@ -114,7 +115,7 @@ struct KernelCore::Impl {
// Creates the default system resource limit
void InitializeSystemResourceLimit(KernelCore& kernel) {
- system_resource_limit = ResourceLimit::Create(kernel, "System");
+ system_resource_limit = ResourceLimit::Create(kernel);
// If setting the default system values fails, then something seriously wrong has occurred.
ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000)
@@ -190,6 +191,10 @@ const Process* KernelCore::CurrentProcess() const {
return impl->current_process;
}
+const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const {
+ return impl->process_list;
+}
+
void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
impl->named_ports.emplace(std::move(name), std::move(port));
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index ff17ff865..6b8738599 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -8,9 +8,6 @@
#include <unordered_map>
#include "core/hle/kernel/object.h"
-template <typename T>
-class ResultVal;
-
namespace Core {
class System;
}
@@ -75,6 +72,9 @@ public:
/// Retrieves a const pointer to the current process.
const Process* CurrentProcess() const;
+ /// Retrieves the list of processes.
+ const std::vector<SharedPtr<Process>>& GetProcessList() const;
+
/// Adds a port to the named port table
void AddNamedPort(std::string name, SharedPtr<ClientPort> port);
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 0743670ad..98e87313b 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <map>
#include <utility>
#include <vector>
@@ -10,8 +9,11 @@
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -57,41 +59,47 @@ static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_t
}
}
-ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle holding_thread_handle,
+Mutex::Mutex(Core::System& system) : system{system} {}
+Mutex::~Mutex() = default;
+
+ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
Handle requesting_thread_handle) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
return ERR_INVALID_ADDRESS;
}
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ Thread* const current_thread = system.CurrentScheduler().GetCurrentThread();
SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle);
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another
// thread.
- ASSERT(requesting_thread == GetCurrentThread());
+ ASSERT(requesting_thread == current_thread);
- u32 addr_value = Memory::Read32(address);
+ const u32 addr_value = Memory::Read32(address);
// If the mutex isn't being held, just return success.
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
return RESULT_SUCCESS;
}
- if (holding_thread == nullptr)
+ if (holding_thread == nullptr) {
return ERR_INVALID_HANDLE;
+ }
// Wait until the mutex is released
- GetCurrentThread()->SetMutexWaitAddress(address);
- GetCurrentThread()->SetWaitHandle(requesting_thread_handle);
+ current_thread->SetMutexWaitAddress(address);
+ current_thread->SetWaitHandle(requesting_thread_handle);
- GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex);
- GetCurrentThread()->InvalidateWakeupCallback();
+ current_thread->SetStatus(ThreadStatus::WaitMutex);
+ current_thread->InvalidateWakeupCallback();
// Update the lock holder thread's priority to prevent priority inversion.
- holding_thread->AddMutexWaiter(GetCurrentThread());
+ holding_thread->AddMutexWaiter(current_thread);
- Core::System::GetInstance().PrepareReschedule();
+ system.PrepareReschedule();
return RESULT_SUCCESS;
}
@@ -102,7 +110,8 @@ ResultCode Mutex::Release(VAddr address) {
return ERR_INVALID_ADDRESS;
}
- auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address);
+ auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
+ auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
// There are no more threads waiting for the mutex, release it completely.
if (thread == nullptr) {
@@ -111,7 +120,7 @@ ResultCode Mutex::Release(VAddr address) {
}
// Transfer the ownership of the mutex from the previous owner to the new one.
- TransferMutexOwnership(address, GetCurrentThread(), thread);
+ TransferMutexOwnership(address, current_thread, thread);
u32 mutex_value = thread->GetWaitHandle();
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
index 81e62d497..b904de2e8 100644
--- a/src/core/hle/kernel/mutex.h
+++ b/src/core/hle/kernel/mutex.h
@@ -5,32 +5,34 @@
#pragma once
#include "common/common_types.h"
-#include "core/hle/kernel/object.h"
union ResultCode;
-namespace Kernel {
+namespace Core {
+class System;
+}
-class HandleTable;
-class Thread;
+namespace Kernel {
class Mutex final {
public:
+ explicit Mutex(Core::System& system);
+ ~Mutex();
+
/// Flag that indicates that a mutex still has threads waiting for it.
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
/// Mask of the bits in a mutex address value that contain the mutex owner.
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
/// Attempts to acquire a mutex at the specified address.
- static ResultCode TryAcquire(HandleTable& handle_table, VAddr address,
- Handle holding_thread_handle, Handle requesting_thread_handle);
+ ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
+ Handle requesting_thread_handle);
/// Releases the mutex at the specified address.
- static ResultCode Release(VAddr address);
+ ResultCode Release(VAddr address);
private:
- Mutex() = default;
- ~Mutex() = default;
+ Core::System& system;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
index 8870463d0..10431e94c 100644
--- a/src/core/hle/kernel/object.cpp
+++ b/src/core/hle/kernel/object.cpp
@@ -23,7 +23,7 @@ bool Object::IsWaitable() const {
case HandleType::Unknown:
case HandleType::WritableEvent:
case HandleType::SharedMemory:
- case HandleType::AddressArbiter:
+ case HandleType::TransferMemory:
case HandleType::ResourceLimit:
case HandleType::ClientPort:
case HandleType::ClientSession:
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index 4c2505908..332876c27 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -22,9 +22,9 @@ enum class HandleType : u32 {
WritableEvent,
ReadableEvent,
SharedMemory,
+ TransferMemory,
Thread,
Process,
- AddressArbiter,
ResourceLimit,
ClientPort,
ServerPort,
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 49fced7b1..041267318 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -5,10 +5,12 @@
#include <algorithm>
#include <memory>
#include <random>
+#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
@@ -31,7 +33,7 @@ namespace {
*/
void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) {
// Setup page table so we can write to memory
- SetCurrentPageTable(&owner_process.VMManager().page_table);
+ Memory::SetCurrentPageTable(&owner_process.VMManager().page_table);
// Initialize new "main" thread
const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress();
@@ -50,9 +52,6 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi
}
} // Anonymous namespace
-CodeSet::CodeSet() = default;
-CodeSet::~CodeSet() = default;
-
SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) {
auto& kernel = system.Kernel();
@@ -77,6 +76,18 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const {
return resource_limit;
}
+u64 Process::GetTotalPhysicalMemoryUsed() const {
+ return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size;
+}
+
+void Process::RegisterThread(const Thread* thread) {
+ thread_list.push_back(thread);
+}
+
+void Process::UnregisterThread(const Thread* thread) {
+ thread_list.remove(thread);
+}
+
ResultCode Process::ClearSignalState() {
if (status == ProcessStatus::Exited) {
LOG_ERROR(Kernel, "called on a terminated process instance.");
@@ -109,14 +120,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
return handle_table.SetSize(capabilities.GetHandleTableSize());
}
-void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
+void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) {
+ // The kernel always ensures that the given stack size is page aligned.
+ main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
+
// Allocate and map the main thread stack
// TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
// of the user address space.
+ const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
vm_manager
- .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size,
- std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
- MemoryState::Stack)
+ .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size),
+ 0, main_thread_stack_size, MemoryState::Stack)
.Unwrap();
vm_manager.LogLayout();
@@ -212,33 +226,38 @@ void Process::FreeTLSSlot(VAddr tls_address) {
}
void Process::LoadModule(CodeSet module_, VAddr base_addr) {
- const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
+ const auto memory = std::make_shared<std::vector<u8>>(std::move(module_.memory));
+
+ const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
const auto vma = vm_manager
- .MapMemoryBlock(segment.addr + base_addr, module_.memory,
- segment.offset, segment.size, memory_state)
+ .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset,
+ segment.size, memory_state)
.Unwrap();
vm_manager.Reprotect(vma, permissions);
};
// Map CodeSet segments
- MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::CodeStatic);
- MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeMutable);
- MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable);
+ MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
+ MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
+ MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
+
+ code_memory_size += module_.memory.size();
// Clear instruction cache in CPU JIT
system.InvalidateCpuInstructionCaches();
}
Process::Process(Core::System& system)
- : WaitObject{system.Kernel()}, address_arbiter{system}, system{system} {}
+ : WaitObject{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
+
Process::~Process() = default;
void Process::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
}
-bool Process::ShouldWait(Thread* thread) const {
+bool Process::ShouldWait(const Thread* thread) const {
return !is_signaled;
}
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 47ffd4ad3..f060f2a3b 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -7,13 +7,14 @@
#include <array>
#include <bitset>
#include <cstddef>
-#include <memory>
+#include <list>
#include <string>
#include <vector>
#include <boost/container/static_vector.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/kernel/wait_object.h"
@@ -33,13 +34,7 @@ class KernelCore;
class ResourceLimit;
class Thread;
-struct AddressMapping {
- // Address and size must be page-aligned
- VAddr address;
- u64 size;
- bool read_only;
- bool unk_flag;
-};
+struct CodeSet;
enum class MemoryRegion : u16 {
APPLICATION = 1,
@@ -65,46 +60,6 @@ enum class ProcessStatus {
DebugBreak,
};
-struct CodeSet final {
- struct Segment {
- std::size_t offset = 0;
- VAddr addr = 0;
- u32 size = 0;
- };
-
- explicit CodeSet();
- ~CodeSet();
-
- Segment& CodeSegment() {
- return segments[0];
- }
-
- const Segment& CodeSegment() const {
- return segments[0];
- }
-
- Segment& RODataSegment() {
- return segments[1];
- }
-
- const Segment& RODataSegment() const {
- return segments[1];
- }
-
- Segment& DataSegment() {
- return segments[2];
- }
-
- const Segment& DataSegment() const {
- return segments[2];
- }
-
- std::shared_ptr<std::vector<u8>> memory;
-
- std::array<Segment, 3> segments;
- VAddr entrypoint = 0;
-};
-
class Process final : public WaitObject {
public:
enum : u64 {
@@ -165,6 +120,16 @@ public:
return address_arbiter;
}
+ /// Gets a reference to the process' mutex lock.
+ Mutex& GetMutex() {
+ return mutex;
+ }
+
+ /// Gets a const reference to the process' mutex lock
+ const Mutex& GetMutex() const {
+ return mutex;
+ }
+
/// Gets the current status of the process
ProcessStatus GetStatus() const {
return status;
@@ -222,6 +187,22 @@ public:
return random_entropy.at(index);
}
+ /// Retrieves the total physical memory used by this process in bytes.
+ u64 GetTotalPhysicalMemoryUsed() const;
+
+ /// Gets the list of all threads created with this process as their owner.
+ const std::list<const Thread*>& GetThreadList() const {
+ return thread_list;
+ }
+
+ /// Registers a thread as being created under this process,
+ /// adding it to this process' thread list.
+ void RegisterThread(const Thread* thread);
+
+ /// Unregisters a thread from this process, removing it
+ /// from this process' thread list.
+ void UnregisterThread(const Thread* thread);
+
/// Clears the signaled state of the process if and only if it's signaled.
///
/// @pre The process must not be already terminated. If this is called on a
@@ -246,7 +227,7 @@ public:
/**
* Applies address space changes and launches the process main thread.
*/
- void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size);
+ void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size);
/**
* Prepares a process for termination by stopping all of its threads
@@ -270,7 +251,7 @@ private:
~Process() override;
/// Checks if the specified thread should wait until this process is available.
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
/// Acquires/locks this process for the specified thread if it's available.
void Acquire(Thread* thread) override;
@@ -283,6 +264,12 @@ private:
/// Memory manager for this process.
Kernel::VMManager vm_manager;
+ /// Size of the main thread's stack in bytes.
+ u64 main_thread_stack_size = 0;
+
+ /// Size of the loaded code memory in bytes.
+ u64 code_memory_size = 0;
+
/// Current status of the process
ProcessStatus status;
@@ -327,9 +314,17 @@ private:
/// Per-process address arbiter.
AddressArbiter address_arbiter;
+ /// The per-process mutex lock instance used for handling various
+ /// forms of services, such as lock arbitration, and condition
+ /// variable related facilities.
+ Mutex mutex;
+
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
+ /// List of threads that are running with this process as their owner.
+ std::list<const Thread*> thread_list;
+
/// System context
Core::System& system;
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 0e5083f70..c2b798a4e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -14,7 +14,7 @@ namespace Kernel {
ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {}
ReadableEvent::~ReadableEvent() = default;
-bool ReadableEvent::ShouldWait(Thread* thread) const {
+bool ReadableEvent::ShouldWait(const Thread* thread) const {
return !signaled;
}
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
index 77a9c362c..2eb9dcbb7 100644
--- a/src/core/hle/kernel/readable_event.h
+++ b/src/core/hle/kernel/readable_event.h
@@ -36,7 +36,7 @@ public:
return HANDLE_TYPE;
}
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
/// Unconditionally clears the readable event's state.
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 2f9695005..173f69915 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -16,11 +16,8 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
ResourceLimit::~ResourceLimit() = default;
-SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel, std::string name) {
- SharedPtr<ResourceLimit> resource_limit(new ResourceLimit(kernel));
-
- resource_limit->name = std::move(name);
- return resource_limit;
+SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
+ return new ResourceLimit(kernel);
}
s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
index 59dc11c22..70e09858a 100644
--- a/src/core/hle/kernel/resource_limit.h
+++ b/src/core/hle/kernel/resource_limit.h
@@ -31,16 +31,14 @@ constexpr bool IsValidResourceType(ResourceType type) {
class ResourceLimit final : public Object {
public:
- /**
- * Creates a resource limit object.
- */
- static SharedPtr<ResourceLimit> Create(KernelCore& kernel, std::string name = "Unknown");
+ /// Creates a resource limit object.
+ static SharedPtr<ResourceLimit> Create(KernelCore& kernel);
std::string GetTypeName() const override {
return "ResourceLimit";
}
std::string GetName() const override {
- return name;
+ return GetTypeName();
}
static const HandleType HANDLE_TYPE = HandleType::ResourceLimit;
@@ -95,9 +93,6 @@ private:
ResourceArray limits{};
/// Current resource limit values.
ResourceArray values{};
-
- /// Name of resource limit object.
- std::string name;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5fccfd9f4..ac501bf7f 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -29,8 +29,8 @@ Scheduler::~Scheduler() {
}
bool Scheduler::HaveReadyThreads() const {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
- return ready_queue.get_first() != nullptr;
+ std::lock_guard lock{scheduler_mutex};
+ return !ready_queue.empty();
}
Thread* Scheduler::GetCurrentThread() const {
@@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() {
Thread* thread = GetCurrentThread();
if (thread && thread->GetStatus() == ThreadStatus::Running) {
+ if (ready_queue.empty()) {
+ return thread;
+ }
// We have to do better than the current thread.
// This call returns null when that's not possible.
- next = ready_queue.pop_first_better(thread->GetPriority());
- if (!next) {
- // Otherwise just keep going with the current thread
+ next = ready_queue.front();
+ if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
next = thread;
}
} else {
- next = ready_queue.pop_first();
+ if (ready_queue.empty()) {
+ return nullptr;
+ }
+ next = ready_queue.front();
}
return next;
}
void Scheduler::SwitchContext(Thread* new_thread) {
- Thread* const previous_thread = GetCurrentThread();
+ Thread* previous_thread = GetCurrentThread();
Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
- ready_queue.push_front(previous_thread->GetPriority(), previous_thread);
+ ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
previous_thread->SetStatus(ThreadStatus::Ready);
}
}
@@ -90,13 +95,13 @@ void Scheduler::SwitchContext(Thread* new_thread) {
current_thread = new_thread;
- ready_queue.remove(new_thread->GetPriority(), new_thread);
+ ready_queue.remove(new_thread, new_thread->GetPriority());
new_thread->SetStatus(ThreadStatus::Running);
auto* const thread_owner_process = current_thread->GetOwnerProcess();
if (previous_process != thread_owner_process) {
system.Kernel().MakeCurrentProcess(thread_owner_process);
- SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
+ Memory::SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
}
cpu_core.LoadContext(new_thread->GetContext());
@@ -127,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
}
void Scheduler::Reschedule() {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
Thread* cur = GetCurrentThread();
Thread* next = PopNextReadyThread();
@@ -143,51 +148,54 @@ void Scheduler::Reschedule() {
SwitchContext(next);
}
-void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+void Scheduler::AddThread(SharedPtr<Thread> thread) {
+ std::lock_guard lock{scheduler_mutex};
thread_list.push_back(std::move(thread));
- ready_queue.prepare(priority);
}
void Scheduler::RemoveThread(Thread* thread) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
}
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
- ready_queue.push_back(priority, thread);
+ ready_queue.add(thread, priority);
}
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
- ready_queue.remove(priority, thread);
+ ready_queue.remove(thread, priority);
}
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
+ if (thread->GetPriority() == priority) {
+ return;
+ }
// If thread was ready, adjust queues
if (thread->GetStatus() == ThreadStatus::Ready)
- ready_queue.move(thread, thread->GetPriority(), priority);
- else
- ready_queue.prepare(priority);
+ ready_queue.adjust(thread, thread->GetPriority(), priority);
}
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
const u32 mask = 1U << core;
- return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) {
- return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority;
- });
+ for (auto* thread : ready_queue) {
+ if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
+ return thread;
+ }
+ }
+ return nullptr;
}
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
@@ -199,8 +207,7 @@ void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
// Yield this thread -- sleep for zero time and force reschedule to different thread
- WaitCurrentThread_Sleep();
- GetCurrentThread()->WakeAfterDelay(0);
+ GetCurrentThread()->Sleep(0);
}
void Scheduler::YieldWithLoadBalancing(Thread* thread) {
@@ -215,8 +222,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) {
ASSERT(priority < THREADPRIO_COUNT);
// Sleep for zero time to be able to force reschedule to different thread
- WaitCurrentThread_Sleep();
- GetCurrentThread()->WakeAfterDelay(0);
+ GetCurrentThread()->Sleep(0);
Thread* suggested_thread = nullptr;
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c5bf57d9..b29bf7be8 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -7,7 +7,7 @@
#include <mutex>
#include <vector>
#include "common/common_types.h"
-#include "common/thread_queue_list.h"
+#include "common/multi_level_queue.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
@@ -38,7 +38,7 @@ public:
u64 GetLastContextSwitchTicks() const;
/// Adds a new thread to the scheduler
- void AddThread(SharedPtr<Thread> thread, u32 priority);
+ void AddThread(SharedPtr<Thread> thread);
/// Removes a thread from the scheduler
void RemoveThread(Thread* thread);
@@ -156,7 +156,7 @@ private:
std::vector<SharedPtr<Thread>> thread_list;
/// Lists only ready thread ids.
- Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
+ Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
SharedPtr<Thread> current_thread = nullptr;
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index 0e1515c89..708fdf9e1 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -30,7 +30,7 @@ void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session)
pending_sessions.push_back(std::move(pending_session));
}
-bool ServerPort::ShouldWait(Thread* thread) const {
+bool ServerPort::ShouldWait(const Thread* thread) const {
// If there are no pending sessions, we wait until a new one is added.
return pending_sessions.empty();
}
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 9bc667cf2..76293cb8b 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -75,7 +75,7 @@ public:
/// waiting to be accepted by this port.
void AppendPendingSession(SharedPtr<ServerSession> pending_session);
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
private:
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 4d8a337a7..40cec143e 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -46,7 +46,7 @@ ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, st
return MakeResult(std::move(server_session));
}
-bool ServerSession::ShouldWait(Thread* thread) const {
+bool ServerSession::ShouldWait(const Thread* thread) const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
if (parent->client == nullptr)
return false;
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index aea4ccfeb..79b84bade 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -82,7 +82,7 @@ public:
*/
ResultCode HandleSyncRequest(SharedPtr<Thread> thread);
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index 62861da36..f15c5ee36 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -9,7 +9,6 @@
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
-#include "core/memory.h"
namespace Kernel {
@@ -119,7 +118,15 @@ ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermi
ConvertPermissions(permissions));
}
-ResultCode SharedMemory::Unmap(Process& target_process, VAddr address) {
+ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) {
+ if (unmap_size != size) {
+ LOG_ERROR(Kernel,
+ "Invalid size passed to Unmap. Size must be equal to the size of the "
+ "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}",
+ size, unmap_size);
+ return ERR_INVALID_SIZE;
+ }
+
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not
// mapped to a SharedMemory.
return target_process.VMManager().UnmapRange(address, size);
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index dab2a6bea..37e18c443 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -104,11 +104,17 @@ public:
/**
* Unmaps a shared memory block from the specified address in system memory
+ *
* @param target_process Process from which to unmap the memory block.
- * @param address Address in system memory where the shared memory block is mapped
+ * @param address Address in system memory where the shared memory block is mapped.
+ * @param unmap_size The amount of bytes to unmap from this shared memory instance.
+ *
* @return Result code of the unmap operation
+ *
+ * @pre The given size to unmap must be the same size as the amount of memory managed by
+ * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned.
*/
- ResultCode Unmap(Process& target_process, VAddr address);
+ ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size);
/**
* Gets a pointer to the shared memory block
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 77d0e3d96..2fd07ab34 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -32,6 +32,7 @@
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_wrap.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/transfer_memory.h"
#include "core/hle/kernel/writable_event.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
@@ -174,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
return ERR_INVALID_SIZE;
}
- auto& vm_manager = Core::CurrentProcess()->VMManager();
- const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress();
- const auto alloc_result =
- vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
-
+ auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
+ const auto alloc_result = vm_manager.SetHeapSize(heap_size);
if (alloc_result.Failed()) {
return alloc_result.Code();
}
@@ -551,9 +549,9 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
return ERR_INVALID_ADDRESS;
}
- auto& handle_table = Core::CurrentProcess()->GetHandleTable();
- return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle,
- requesting_thread_handle);
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
+ requesting_thread_handle);
}
/// Unlock a mutex
@@ -571,7 +569,8 @@ static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
return ERR_INVALID_ADDRESS;
}
- return Mutex::Release(mutex_addr);
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ return current_process->GetMutex().Release(mutex_addr);
}
enum class BreakType : u32 {
@@ -710,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
HeapRegionBaseAddr = 4,
HeapRegionSize = 5,
TotalMemoryUsage = 6,
- TotalHeapUsage = 7,
+ TotalPhysicalMemoryUsed = 7,
IsCurrentProcessBeingDebugged = 8,
RegisterResourceLimit = 9,
IdleTickCount = 10,
@@ -746,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
case GetInfoType::NewMapRegionBaseAddr:
case GetInfoType::NewMapRegionSize:
case GetInfoType::TotalMemoryUsage:
- case GetInfoType::TotalHeapUsage:
+ case GetInfoType::TotalPhysicalMemoryUsed:
case GetInfoType::IsVirtualAddressMemoryEnabled:
case GetInfoType::PersonalMmHeapUsage:
case GetInfoType::TitleId:
@@ -806,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
*result = process->VMManager().GetTotalMemoryUsage();
return RESULT_SUCCESS;
- case GetInfoType::TotalHeapUsage:
- *result = process->VMManager().GetTotalHeapUsage();
+ case GetInfoType::TotalPhysicalMemoryUsed:
+ *result = process->GetTotalPhysicalMemoryUsed();
return RESULT_SUCCESS;
case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1141,7 +1140,7 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64
return ERR_INVALID_MEMORY_RANGE;
}
- return shared_memory->Unmap(*current_process, addr);
+ return shared_memory->Unmap(*current_process, addr, size);
}
static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address,
@@ -1284,10 +1283,14 @@ static ResultCode StartThread(Handle thread_handle) {
/// Called when a thread exits
static void ExitThread() {
- LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC());
+ auto& system = Core::System::GetInstance();
- ExitCurrentThread();
- Core::System::GetInstance().PrepareReschedule();
+ LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
+
+ auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
+ current_thread->Stop();
+ system.CurrentScheduler().RemoveThread(current_thread);
+ system.PrepareReschedule();
}
/// Sleep the current thread
@@ -1300,32 +1303,32 @@ static void SleepThread(s64 nanoseconds) {
YieldAndWaitForLoadBalancing = -2,
};
+ auto& system = Core::System::GetInstance();
+ auto& scheduler = system.CurrentScheduler();
+ auto* const current_thread = scheduler.GetCurrentThread();
+
if (nanoseconds <= 0) {
- auto& scheduler{Core::System::GetInstance().CurrentScheduler()};
switch (static_cast<SleepType>(nanoseconds)) {
case SleepType::YieldWithoutLoadBalancing:
- scheduler.YieldWithoutLoadBalancing(GetCurrentThread());
+ scheduler.YieldWithoutLoadBalancing(current_thread);
break;
case SleepType::YieldWithLoadBalancing:
- scheduler.YieldWithLoadBalancing(GetCurrentThread());
+ scheduler.YieldWithLoadBalancing(current_thread);
break;
case SleepType::YieldAndWaitForLoadBalancing:
- scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread());
+ scheduler.YieldAndWaitForLoadBalancing(current_thread);
break;
default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
} else {
- // Sleep current thread and check for next thread to schedule
- WaitCurrentThread_Sleep();
-
- // Create an event to wake the thread up after the specified nanosecond delay has passed
- GetCurrentThread()->WakeAfterDelay(nanoseconds);
+ current_thread->Sleep(nanoseconds);
}
// Reschedule all CPU cores
- for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i)
- Core::System::GetInstance().CpuCore(i).PrepareReschedule();
+ for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) {
+ system.CpuCore(i).PrepareReschedule();
+ }
}
/// Wait process wide key atomic
@@ -1336,17 +1339,35 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
- const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
+ if (Memory::IsKernelVirtualAddress(mutex_addr)) {
+ LOG_ERROR(
+ Kernel_SVC,
+ "Given mutex address must not be within the kernel address space. address=0x{:016X}",
+ mutex_addr);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ if (!Common::IsWordAligned(mutex_addr)) {
+ LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}",
+ mutex_addr);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
ASSERT(thread);
- CASCADE_CODE(Mutex::Release(mutex_addr));
+ const auto release_result = current_process->GetMutex().Release(mutex_addr);
+ if (release_result.IsError()) {
+ return release_result;
+ }
SharedPtr<Thread> current_thread = GetCurrentThread();
current_thread->SetCondVarWaitAddress(condition_variable_addr);
current_thread->SetMutexWaitAddress(mutex_addr);
current_thread->SetWaitHandle(thread_handle);
- current_thread->SetStatus(ThreadStatus::WaitMutex);
+ current_thread->SetStatus(ThreadStatus::WaitCondVar);
current_thread->InvalidateWakeupCallback();
current_thread->WakeAfterDelay(nano_seconds);
@@ -1390,10 +1411,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// them all.
std::size_t last = waiting_threads.size();
if (target != -1)
- last = target;
+ last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
// If there are no threads waiting on this condition variable, just exit
- if (last > waiting_threads.size())
+ if (last == 0)
return RESULT_SUCCESS;
for (std::size_t index = 0; index < last; ++index) {
@@ -1401,6 +1422,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
+ // liberate Cond Var Thread.
+ thread->SetCondVarWaitAddress(0);
+
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
auto& monitor = Core::System::GetInstance().Monitor();
@@ -1419,10 +1443,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
}
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
thread->GetWaitHandle()));
-
if (mutex_val == 0) {
// We were able to acquire the mutex, resume this thread.
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->ResumeFromWait();
auto* const lock_owner = thread->GetLockOwner();
@@ -1432,8 +1455,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
thread->SetLockOwner(nullptr);
thread->SetMutexWaitAddress(0);
- thread->SetCondVarWaitAddress(0);
thread->SetWaitHandle(0);
+ Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
} else {
// Atomically signal that the mutex now has a waiting thread.
do {
@@ -1452,12 +1475,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
auto owner = handle_table.Get<Thread>(owner_handle);
ASSERT(owner);
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->InvalidateWakeupCallback();
+ thread->SetStatus(ThreadStatus::WaitMutex);
owner->AddMutexWaiter(thread);
-
- Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
}
}
@@ -1577,14 +1599,121 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32
}
auto& kernel = Core::System::GetInstance().Kernel();
- auto process = kernel.CurrentProcess();
- auto& handle_table = process->GetHandleTable();
- const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr);
+ auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms);
- CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
+ auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ const auto result = handle_table.Create(std::move(transfer_mem_handle));
+ if (result.Failed()) {
+ return result.Code();
+ }
+
+ *handle = *result;
return RESULT_SUCCESS;
}
+static ResultCode MapTransferMemory(Handle handle, VAddr address, u64 size, u32 permission_raw) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}",
+ handle, address, size, permission_raw);
+
+ if (!Common::Is4KBAligned(address)) {
+ LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
+ address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
+ size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size overflows the 64-bit range (address=0x{:016X}, "
+ "size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto permissions = static_cast<MemoryPermission>(permission_raw);
+ if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read &&
+ permissions != MemoryPermission::ReadWrite) {
+ LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).",
+ permission_raw);
+ return ERR_INVALID_STATE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto* const current_process = kernel.CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
+
+ auto transfer_memory = handle_table.Get<TransferMemory>(handle);
+ if (!transfer_memory) {
+ LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
+ handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size don't fully fit within the ASLR region "
+ "(address=0x{:016X}, size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return transfer_memory->MapMemory(address, size, permissions);
+}
+
+static ResultCode UnmapTransferMemory(Handle handle, VAddr address, u64 size) {
+ LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle,
+ address, size);
+
+ if (!Common::Is4KBAligned(address)) {
+ LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
+ address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
+ size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size overflows the 64-bit range (address=0x{:016X}, "
+ "size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto* const current_process = kernel.CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
+
+ auto transfer_memory = handle_table.Get<TransferMemory>(handle);
+ if (!transfer_memory) {
+ LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
+ handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size don't fully fit within the ASLR region "
+ "(address=0x{:016X}, size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return transfer_memory->UnmapMemory(address, size);
+}
+
static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
@@ -1868,6 +1997,83 @@ static ResultCode SetResourceLimitLimitValue(Handle resource_limit, u32 resource
return RESULT_SUCCESS;
}
+static ResultCode GetProcessList(u32* out_num_processes, VAddr out_process_ids,
+ u32 out_process_ids_size) {
+ LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
+ out_process_ids, out_process_ids_size);
+
+ // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
+ if ((out_process_ids_size & 0xF0000000) != 0) {
+ LOG_ERROR(Kernel_SVC,
+ "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
+ out_process_ids_size);
+ return ERR_OUT_OF_RANGE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto& vm_manager = kernel.CurrentProcess()->VMManager();
+ const auto total_copy_size = out_process_ids_size * sizeof(u64);
+
+ if (out_process_ids_size > 0 &&
+ !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) {
+ LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
+ out_process_ids, out_process_ids + total_copy_size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& process_list = kernel.GetProcessList();
+ const auto num_processes = process_list.size();
+ const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
+
+ for (std::size_t i = 0; i < copy_amount; ++i) {
+ Memory::Write64(out_process_ids, process_list[i]->GetProcessID());
+ out_process_ids += sizeof(u64);
+ }
+
+ *out_num_processes = static_cast<u32>(num_processes);
+ return RESULT_SUCCESS;
+}
+
+ResultCode GetThreadList(u32* out_num_threads, VAddr out_thread_ids, u32 out_thread_ids_size,
+ Handle debug_handle) {
+ // TODO: Handle this case when debug events are supported.
+ UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
+
+ LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
+ out_thread_ids, out_thread_ids_size);
+
+ // If the size is negative or larger than INT32_MAX / sizeof(u64)
+ if ((out_thread_ids_size & 0xF0000000) != 0) {
+ LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
+ out_thread_ids_size);
+ return ERR_OUT_OF_RANGE;
+ }
+
+ const auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ const auto& vm_manager = current_process->VMManager();
+ const auto total_copy_size = out_thread_ids_size * sizeof(u64);
+
+ if (out_thread_ids_size > 0 &&
+ !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) {
+ LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
+ out_thread_ids, out_thread_ids + total_copy_size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& thread_list = current_process->GetThreadList();
+ const auto num_threads = thread_list.size();
+ const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
+
+ auto list_iter = thread_list.cbegin();
+ for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
+ Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID());
+ out_thread_ids += sizeof(u64);
+ }
+
+ *out_num_threads = static_cast<u32>(num_threads);
+ return RESULT_SUCCESS;
+}
+
namespace {
struct FunctionDef {
using Func = void();
@@ -1960,8 +2166,8 @@ static const FunctionDef SVC_Table[] = {
{0x4E, nullptr, "ReadWriteRegister"},
{0x4F, nullptr, "SetProcessActivity"},
{0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
- {0x51, nullptr, "MapTransferMemory"},
- {0x52, nullptr, "UnmapTransferMemory"},
+ {0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"},
+ {0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"},
{0x53, nullptr, "CreateInterruptEvent"},
{0x54, nullptr, "QueryPhysicalAddress"},
{0x55, nullptr, "QueryIoMapping"},
@@ -1980,8 +2186,8 @@ static const FunctionDef SVC_Table[] = {
{0x62, nullptr, "TerminateDebugProcess"},
{0x63, nullptr, "GetDebugEvent"},
{0x64, nullptr, "ContinueDebugEvent"},
- {0x65, nullptr, "GetProcessList"},
- {0x66, nullptr, "GetThreadList"},
+ {0x65, SvcWrap<GetProcessList>, "GetProcessList"},
+ {0x66, SvcWrap<GetThreadList>, "GetThreadList"},
{0x67, nullptr, "GetDebugThreadContext"},
{0x68, nullptr, "SetDebugThreadContext"},
{0x69, nullptr, "QueryDebugProcessMemory"},
@@ -2023,7 +2229,7 @@ void CallSVC(u32 immediate) {
MICROPROFILE_SCOPE(Kernel_SVC);
// Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
+ std::lock_guard lock{HLE::g_hle_lock};
const FunctionDef* info = GetSVCInfo(immediate);
if (info) {
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 2a2c2c5ea..b3733680f 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -78,6 +78,14 @@ void SvcWrap() {
FuncReturn(retval);
}
+template <ResultCode func(u32*, u64, u32)>
+void SvcWrap() {
+ u32 param_1 = 0;
+ const u32 retval = func(&param_1, Param(1), static_cast<u32>(Param(2))).raw;
+ Core::CurrentArmInterface().SetReg(1, param_1);
+ FuncReturn(retval);
+}
+
template <ResultCode func(u64*, u32)>
void SvcWrap() {
u64 param_1 = 0;
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index eb54d6651..1b891f632 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -7,8 +7,6 @@
#include <optional>
#include <vector>
-#include <boost/range/algorithm_ext/erase.hpp>
-
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
@@ -30,7 +28,7 @@
namespace Kernel {
-bool Thread::ShouldWait(Thread* thread) const {
+bool Thread::ShouldWait(const Thread* thread) const {
return status != ThreadStatus::Dead;
}
@@ -64,21 +62,12 @@ void Thread::Stop() {
}
wait_objects.clear();
+ owner_process->UnregisterThread(this);
+
// Mark the TLS slot in the thread's page as free.
owner_process->FreeTLSSlot(tls_address);
}
-void WaitCurrentThread_Sleep() {
- Thread* thread = GetCurrentThread();
- thread->SetStatus(ThreadStatus::WaitSleep);
-}
-
-void ExitCurrentThread() {
- Thread* thread = GetCurrentThread();
- thread->Stop();
- Core::System::GetInstance().CurrentScheduler().RemoveThread(thread);
-}
-
void Thread::WakeAfterDelay(s64 nanoseconds) {
// Don't schedule a wakeup if the thread wants to wait forever
if (nanoseconds == -1)
@@ -118,6 +107,7 @@ void Thread::ResumeFromWait() {
case ThreadStatus::WaitSleep:
case ThreadStatus::WaitIPC:
case ThreadStatus::WaitMutex:
+ case ThreadStatus::WaitCondVar:
case ThreadStatus::WaitArb:
break;
@@ -211,9 +201,11 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
thread->owner_process = &owner_process;
thread->scheduler = &system.Scheduler(processor_id);
- thread->scheduler->AddThread(thread, priority);
+ thread->scheduler->AddThread(thread);
thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
+ thread->owner_process->RegisterThread(thread.get());
+
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
// to initialize the context
ResetThreadContext(thread->context, stack_top, entry_point, arg);
@@ -241,16 +233,16 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
context.cpu_registers[1] = output;
}
-s32 Thread::GetWaitObjectIndex(WaitObject* object) const {
+s32 Thread::GetWaitObjectIndex(const WaitObject* object) const {
ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything");
- auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
+ const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1);
}
VAddr Thread::GetCommandBufferAddress() const {
// Offset from the start of TLS at which the IPC command buffer begins.
- static constexpr int CommandHeaderOffset = 0x80;
- return GetTLSAddress() + CommandHeaderOffset;
+ constexpr u64 command_header_offset = 0x80;
+ return GetTLSAddress() + command_header_offset;
}
void Thread::SetStatus(ThreadStatus new_status) {
@@ -269,8 +261,8 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
if (thread->lock_owner == this) {
// If the thread is already waiting for this thread to release the mutex, ensure that the
// waiters list is consistent and return without doing anything.
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr != wait_mutex_threads.end());
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter != wait_mutex_threads.end());
return;
}
@@ -278,11 +270,16 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
ASSERT(thread->lock_owner == nullptr);
// Ensure that the thread is not already in the list of mutex waiters
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr == wait_mutex_threads.end());
-
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter == wait_mutex_threads.end());
+
+ // Keep the list in an ordered fashion
+ const auto insertion_point = std::find_if(
+ wait_mutex_threads.begin(), wait_mutex_threads.end(),
+ [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
+ wait_mutex_threads.insert(insertion_point, thread);
thread->lock_owner = this;
- wait_mutex_threads.emplace_back(std::move(thread));
+
UpdatePriority();
}
@@ -290,32 +287,44 @@ void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) {
ASSERT(thread->lock_owner == this);
// Ensure that the thread is in the list of mutex waiters
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr != wait_mutex_threads.end());
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter != wait_mutex_threads.end());
+
+ wait_mutex_threads.erase(iter);
- boost::remove_erase(wait_mutex_threads, thread);
thread->lock_owner = nullptr;
UpdatePriority();
}
void Thread::UpdatePriority() {
- // Find the highest priority among all the threads that are waiting for this thread's lock
+ // If any of the threads waiting on the mutex have a higher priority
+ // (taking into account priority inheritance), then this thread inherits
+ // that thread's priority.
u32 new_priority = nominal_priority;
- for (const auto& thread : wait_mutex_threads) {
- if (thread->nominal_priority < new_priority)
- new_priority = thread->nominal_priority;
+ if (!wait_mutex_threads.empty()) {
+ if (wait_mutex_threads.front()->current_priority < new_priority) {
+ new_priority = wait_mutex_threads.front()->current_priority;
+ }
}
- if (new_priority == current_priority)
+ if (new_priority == current_priority) {
return;
+ }
scheduler->SetThreadPriority(this, new_priority);
-
current_priority = new_priority;
+ if (!lock_owner) {
+ return;
+ }
+
+ // Ensure that the thread is within the correct location in the waiting list.
+ auto old_owner = lock_owner;
+ lock_owner->RemoveMutexWaiter(this);
+ old_owner->AddMutexWaiter(this);
+
// Recursively update the priority of the thread that depends on the priority of this one.
- if (lock_owner)
- lock_owner->UpdatePriority();
+ lock_owner->UpdatePriority();
}
void Thread::ChangeCore(u32 core, u64 mask) {
@@ -347,7 +356,7 @@ void Thread::ChangeScheduler() {
if (*new_processor_id != processor_id) {
// Remove thread from previous core's scheduler
scheduler->RemoveThread(this);
- next_scheduler.AddThread(this, current_priority);
+ next_scheduler.AddThread(this);
}
processor_id = *new_processor_id;
@@ -362,7 +371,7 @@ void Thread::ChangeScheduler() {
system.CpuCore(processor_id).PrepareReschedule();
}
-bool Thread::AllWaitObjectsReady() {
+bool Thread::AllWaitObjectsReady() const {
return std::none_of(
wait_objects.begin(), wait_objects.end(),
[this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
@@ -391,6 +400,14 @@ void Thread::SetActivity(ThreadActivity value) {
}
}
+void Thread::Sleep(s64 nanoseconds) {
+ // Sleep current thread and check for next thread to schedule
+ SetStatus(ThreadStatus::WaitSleep);
+
+ // Create an event to wake the thread up after the specified nanosecond delay has passed
+ WakeAfterDelay(nanoseconds);
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index c48b21aba..73e5d1bb4 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -51,7 +51,8 @@ enum class ThreadStatus {
WaitIPC, ///< Waiting for the reply from an IPC request
WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false
WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true
- WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc
+ WaitMutex, ///< Waiting due to an ArbitrateLock svc
+ WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
Dormant, ///< Created but not yet made ready
Dead ///< Run to completion, or forcefully terminated
@@ -110,7 +111,7 @@ public:
return HANDLE_TYPE;
}
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
/**
@@ -204,7 +205,7 @@ public:
* object in the list.
* @param object Object to query the index of.
*/
- s32 GetWaitObjectIndex(WaitObject* object) const;
+ s32 GetWaitObjectIndex(const WaitObject* object) const;
/**
* Stops a thread, invalidating it from further use
@@ -298,7 +299,7 @@ public:
}
/// Determines whether all the objects this thread is waiting on are ready.
- bool AllWaitObjectsReady();
+ bool AllWaitObjectsReady() const;
const MutexWaitingThreads& GetMutexWaitingThreads() const {
return wait_mutex_threads;
@@ -383,6 +384,9 @@ public:
void SetActivity(ThreadActivity value);
+ /// Sleeps this thread for the given amount of nanoseconds.
+ void Sleep(s64 nanoseconds);
+
private:
explicit Thread(KernelCore& kernel);
~Thread() override;
@@ -398,8 +402,14 @@ private:
VAddr entry_point = 0;
VAddr stack_top = 0;
- u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application
- u32 current_priority = 0; ///< Current thread priority, can be temporarily changed
+ /// Nominal thread priority, as set by the emulated application.
+ /// The nominal priority is the thread priority without priority
+ /// inheritance taken into account.
+ u32 nominal_priority = 0;
+
+ /// Current thread priority. This may change over the course of the
+ /// thread's lifetime in order to facilitate priority inheritance.
+ u32 current_priority = 0;
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
@@ -460,14 +470,4 @@ private:
*/
Thread* GetCurrentThread();
-/**
- * Waits the current thread on a sleep
- */
-void WaitCurrentThread_Sleep();
-
-/**
- * Stops the current thread and removes it from the thread_list
- */
-void ExitCurrentThread();
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
new file mode 100644
index 000000000..26c4e5e67
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -0,0 +1,81 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/shared_memory.h"
+#include "core/hle/kernel/transfer_memory.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {}
+TransferMemory::~TransferMemory() = default;
+
+SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address, u64 size,
+ MemoryPermission permissions) {
+ SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)};
+
+ transfer_memory->base_address = base_address;
+ transfer_memory->memory_size = size;
+ transfer_memory->owner_permissions = permissions;
+ transfer_memory->owner_process = kernel.CurrentProcess();
+
+ return transfer_memory;
+}
+
+const u8* TransferMemory::GetPointer() const {
+ return backing_block.get()->data();
+}
+
+u64 TransferMemory::GetSize() const {
+ return memory_size;
+}
+
+ResultCode TransferMemory::MapMemory(VAddr address, u64 size, MemoryPermission permissions) {
+ if (memory_size != size) {
+ return ERR_INVALID_SIZE;
+ }
+
+ if (owner_permissions != permissions) {
+ return ERR_INVALID_STATE;
+ }
+
+ if (is_mapped) {
+ return ERR_INVALID_STATE;
+ }
+
+ backing_block = std::make_shared<std::vector<u8>>(size);
+
+ const auto map_state = owner_permissions == MemoryPermission::None
+ ? MemoryState::TransferMemoryIsolated
+ : MemoryState::TransferMemory;
+ auto& vm_manager = owner_process->VMManager();
+ const auto map_result = vm_manager.MapMemoryBlock(address, backing_block, 0, size, map_state);
+ if (map_result.Failed()) {
+ return map_result.Code();
+ }
+
+ is_mapped = true;
+ return RESULT_SUCCESS;
+}
+
+ResultCode TransferMemory::UnmapMemory(VAddr address, u64 size) {
+ if (memory_size != size) {
+ return ERR_INVALID_SIZE;
+ }
+
+ auto& vm_manager = owner_process->VMManager();
+ const auto result = vm_manager.UnmapRange(address, size);
+
+ if (result.IsError()) {
+ return result;
+ }
+
+ is_mapped = false;
+ return RESULT_SUCCESS;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
new file mode 100644
index 000000000..a140b1e2b
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -0,0 +1,103 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include "core/hle/kernel/object.h"
+
+union ResultCode;
+
+namespace Kernel {
+
+class KernelCore;
+class Process;
+
+enum class MemoryPermission : u32;
+
+/// Defines the interface for transfer memory objects.
+///
+/// Transfer memory is typically used for the purpose of
+/// transferring memory between separate process instances,
+/// thus the name.
+///
+class TransferMemory final : public Object {
+public:
+ static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
+
+ static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, u64 size,
+ MemoryPermission permissions);
+
+ TransferMemory(const TransferMemory&) = delete;
+ TransferMemory& operator=(const TransferMemory&) = delete;
+
+ TransferMemory(TransferMemory&&) = delete;
+ TransferMemory& operator=(TransferMemory&&) = delete;
+
+ std::string GetTypeName() const override {
+ return "TransferMemory";
+ }
+
+ std::string GetName() const override {
+ return GetTypeName();
+ }
+
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ /// Gets a pointer to the backing block of this instance.
+ const u8* GetPointer() const;
+
+ /// Gets the size of the memory backing this instance in bytes.
+ u64 GetSize() const;
+
+ /// Attempts to map transfer memory with the given range and memory permissions.
+ ///
+ /// @param address The base address to being mapping memory at.
+ /// @param size The size of the memory to map, in bytes.
+ /// @param permissions The memory permissions to check against when mapping memory.
+ ///
+ /// @pre The given address, size, and memory permissions must all match
+ /// the same values that were given when creating the transfer memory
+ /// instance.
+ ///
+ ResultCode MapMemory(VAddr address, u64 size, MemoryPermission permissions);
+
+ /// Unmaps the transfer memory with the given range
+ ///
+ /// @param address The base address to begin unmapping memory at.
+ /// @param size The size of the memory to unmap, in bytes.
+ ///
+ /// @pre The given address and size must be the same as the ones used
+ /// to create the transfer memory instance.
+ ///
+ ResultCode UnmapMemory(VAddr address, u64 size);
+
+private:
+ explicit TransferMemory(KernelCore& kernel);
+ ~TransferMemory() override;
+
+ /// Memory block backing this instance.
+ std::shared_ptr<std::vector<u8>> backing_block;
+
+ /// The base address for the memory managed by this instance.
+ VAddr base_address = 0;
+
+ /// Size of the memory, in bytes, that this instance manages.
+ u64 memory_size = 0;
+
+ /// The memory permissions that are applied to this instance.
+ MemoryPermission owner_permissions{};
+
+ /// The process that this transfer memory instance was created under.
+ Process* owner_process = nullptr;
+
+ /// Whether or not this transfer memory instance has mapped memory.
+ bool is_mapped = false;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 05c59af34..ec0a480ce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -7,29 +7,29 @@
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
+#include "common/memory_hook.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
-#include "core/memory_hook.h"
#include "core/memory_setup.h"
namespace Kernel {
namespace {
const char* GetMemoryStateName(MemoryState state) {
static constexpr const char* names[] = {
- "Unmapped", "Io",
- "Normal", "CodeStatic",
- "CodeMutable", "Heap",
- "Shared", "Unknown1",
- "ModuleCodeStatic", "ModuleCodeMutable",
- "IpcBuffer0", "Stack",
- "ThreadLocal", "TransferMemoryIsolated",
- "TransferMemory", "ProcessMemory",
- "Inaccessible", "IpcBuffer1",
- "IpcBuffer3", "KernelStack",
+ "Unmapped", "Io",
+ "Normal", "Code",
+ "CodeData", "Heap",
+ "Shared", "Unknown1",
+ "ModuleCode", "ModuleCodeData",
+ "IpcBuffer0", "Stack",
+ "ThreadLocal", "TransferMemoryIsolated",
+ "TransferMemory", "ProcessMemory",
+ "Inaccessible", "IpcBuffer1",
+ "IpcBuffer3", "KernelStack",
};
return names[ToSvcMemoryState(state)];
@@ -177,7 +177,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
MemoryState state,
- Memory::MemoryHookPointer mmio_handler) {
+ Common::MemoryHookPointer mmio_handler) {
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
@@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
return RESULT_SUCCESS;
}
-ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
- if (!IsWithinHeapRegion(target, size)) {
- return ERR_INVALID_ADDRESS;
+ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
+ if (size > GetHeapRegionSize()) {
+ return ERR_OUT_OF_MEMORY;
+ }
+
+ // No need to do any additional work if the heap is already the given size.
+ if (size == GetCurrentHeapSize()) {
+ return MakeResult(heap_region_base);
}
if (heap_memory == nullptr) {
// Initialize heap
- heap_memory = std::make_shared<std::vector<u8>>();
- heap_start = heap_end = target;
+ heap_memory = std::make_shared<std::vector<u8>>(size);
+ heap_end = heap_region_base + size;
} else {
- UnmapRange(heap_start, heap_end - heap_start);
- }
-
- // If necessary, expand backing vector to cover new heap extents.
- if (target < heap_start) {
- heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
- heap_start = target;
- RefreshMemoryBlockMappings(heap_memory.get());
- }
- if (target + size > heap_end) {
- heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
- heap_end = target + size;
- RefreshMemoryBlockMappings(heap_memory.get());
+ UnmapRange(heap_region_base, GetCurrentHeapSize());
}
- ASSERT(heap_end - heap_start == heap_memory->size());
- CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size,
- MemoryState::Heap));
- Reprotect(vma, perms);
+ // If necessary, expand backing vector to cover new heap extents in
+ // the case of allocating. Otherwise, shrink the backing memory,
+ // if a smaller heap has been requested.
+ const u64 old_heap_size = GetCurrentHeapSize();
+ if (size > old_heap_size) {
+ const u64 alloc_size = size - old_heap_size;
- heap_used = size;
-
- return MakeResult<VAddr>(heap_end - size);
-}
+ heap_memory->insert(heap_memory->end(), alloc_size, 0);
+ RefreshMemoryBlockMappings(heap_memory.get());
+ } else if (size < old_heap_size) {
+ heap_memory->resize(size);
+ heap_memory->shrink_to_fit();
-ResultCode VMManager::HeapFree(VAddr target, u64 size) {
- if (!IsWithinHeapRegion(target, size)) {
- return ERR_INVALID_ADDRESS;
+ RefreshMemoryBlockMappings(heap_memory.get());
}
- if (size == 0) {
- return RESULT_SUCCESS;
- }
+ heap_end = heap_region_base + size;
+ ASSERT(GetCurrentHeapSize() == heap_memory->size());
- const ResultCode result = UnmapRange(target, size);
- if (result.IsError()) {
- return result;
+ const auto mapping_result =
+ MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
+ if (mapping_result.Failed()) {
+ return mapping_result.Code();
}
- heap_used -= size;
- return RESULT_SUCCESS;
+ return MakeResult<VAddr>(heap_region_base);
}
MemoryInfo VMManager::QueryMemory(VAddr address) const {
@@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
heap_region_base = map_region_end;
heap_region_end = heap_region_base + heap_region_size;
+ heap_end = heap_region_base;
new_map_region_base = heap_region_end;
new_map_region_end = new_map_region_base + new_map_region_size;
@@ -624,7 +618,7 @@ void VMManager::ClearPageTable() {
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
page_table.special_regions.clear();
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
- Memory::PageType::Unmapped);
+ Common::PageType::Unmapped);
}
VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,
@@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const {
return 0xF8000000;
}
-u64 VMManager::GetTotalHeapUsage() const {
- return heap_used;
-}
-
VAddr VMManager::GetAddressSpaceBaseAddress() const {
return address_space_base;
}
@@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const {
return heap_region_end - heap_region_base;
}
+u64 VMManager::GetCurrentHeapSize() const {
+ return heap_end - heap_region_base;
+}
+
bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
GetHeapRegionEndAddress());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 88e0b3c02..6f484b7bf 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -9,9 +9,10 @@
#include <tuple>
#include <vector>
#include "common/common_types.h"
+#include "common/memory_hook.h"
+#include "common/page_table.h"
#include "core/hle/result.h"
#include "core/memory.h"
-#include "core/memory_hook.h"
namespace FileSys {
enum class ProgramAddressSpaceType : u8;
@@ -164,12 +165,12 @@ enum class MemoryState : u32 {
Unmapped = 0x00,
Io = 0x01 | FlagMapped,
Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
- CodeStatic = 0x03 | CodeFlags | FlagMapProcess,
- CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory,
+ Code = 0x03 | CodeFlags | FlagMapProcess,
+ CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory,
Heap = 0x05 | DataFlags | FlagCodeMemory,
Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
- ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
- ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
+ ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
+ ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
@@ -290,7 +291,7 @@ struct VirtualMemoryArea {
// Settings for type = MMIO
/// Physical address of the register area this VMA maps to.
PAddr paddr = 0;
- Memory::MemoryHookPointer mmio_handler = nullptr;
+ Common::MemoryHookPointer mmio_handler = nullptr;
/// Tests if this area can be merged to the right with `next`.
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
@@ -368,7 +369,7 @@ public:
* @param mmio_handler The handler that will implement read and write for this MMIO region.
*/
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
- Memory::MemoryHookPointer mmio_handler);
+ Common::MemoryHookPointer mmio_handler);
/// Unmaps a range of addresses, splitting VMAs as necessary.
ResultCode UnmapRange(VAddr target, u64 size);
@@ -379,11 +380,41 @@ public:
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
- ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
- ResultCode HeapFree(VAddr target, u64 size);
-
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
+ /// Attempts to allocate a heap with the given size.
+ ///
+ /// @param size The size of the heap to allocate in bytes.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size that is equal to the size of the current heap,
+ /// then this function will do nothing and return the current
+ /// heap's starting address, as there's no need to perform
+ /// any additional heap allocation work.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size less than the current heap's size, then
+ /// this function will attempt to shrink the heap.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size larger than the current heap's size, then
+ /// this function will attempt to extend the size of the heap.
+ ///
+ /// @returns A result indicating either success or failure.
+ /// <p>
+ /// If successful, this function will return a result
+ /// containing the starting address to the allocated heap.
+ /// <p>
+ /// If unsuccessful, this function will return a result
+ /// containing an error code.
+ ///
+ /// @pre The given size must lie within the allowable heap
+ /// memory region managed by this VMManager instance.
+ /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
+ /// being returned as the result.
+ ///
+ ResultVal<VAddr> SetHeapSize(u64 size);
+
/// Queries the memory manager for information about the given address.
///
/// @param address The address to query the memory manager about for information.
@@ -417,9 +448,6 @@ public:
/// Gets the total memory usage, used by svcGetInfo
u64 GetTotalMemoryUsage() const;
- /// Gets the total heap usage, used by svcGetInfo
- u64 GetTotalHeapUsage() const;
-
/// Gets the address space base address
VAddr GetAddressSpaceBaseAddress() const;
@@ -468,6 +496,13 @@ public:
/// Gets the total size of the heap region in bytes.
u64 GetHeapRegionSize() const;
+ /// Gets the total size of the current heap in bytes.
+ ///
+ /// @note This is the current allocated heap size, not the size
+ /// of the region it's allowed to exist within.
+ ///
+ u64 GetCurrentHeapSize() const;
+
/// Determines whether or not the specified range is within the heap region.
bool IsWithinHeapRegion(VAddr address, u64 size) const;
@@ -509,7 +544,7 @@ public:
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
- Memory::PageTable page_table;
+ Common::PageTable page_table{Memory::PAGE_BITS};
private:
using VMAIter = VMAMap::iterator;
@@ -624,9 +659,9 @@ private:
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
std::shared_ptr<std::vector<u8>> heap_memory;
- // The left/right bounds of the address space covered by heap_memory.
- VAddr heap_start = 0;
+
+ // The end of the currently allocated heap. This is not an inclusive
+ // end of the range. This is essentially 'base_address + current_size'.
VAddr heap_end = 0;
- u64 heap_used = 0;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h
index 5987fb971..04464a51a 100644
--- a/src/core/hle/kernel/wait_object.h
+++ b/src/core/hle/kernel/wait_object.h
@@ -24,7 +24,7 @@ public:
* @param thread The thread about which we're deciding.
* @return True if the current thread should wait due to this object being unavailable
*/
- virtual bool ShouldWait(Thread* thread) const = 0;
+ virtual bool ShouldWait(const Thread* thread) const = 0;
/// Acquire/lock the object for the specified thread if it is available
virtual void Acquire(Thread* thread) = 0;