summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h2
-rw-r--r--src/core/hle/kernel/k_page_table.cpp204
-rw-r--r--src/core/hle/kernel/k_page_table.h11
-rw-r--r--src/core/hle/kernel/k_priority_queue.h2
-rw-r--r--src/core/hle/kernel/k_process.cpp26
-rw-r--r--src/core/hle/kernel/k_process.h8
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp45
-rw-r--r--src/core/hle/kernel/k_thread.cpp11
-rw-r--r--src/core/hle/kernel/svc.cpp6
9 files changed, 154 insertions, 161 deletions
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
index b906895fc..cf704ce87 100644
--- a/src/core/hle/kernel/k_affinity_mask.h
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -31,8 +31,6 @@ public:
}
constexpr void SetAffinity(s32 core, bool set) {
- ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
-
if (set) {
this->mask |= GetCoreBit(core);
} else {
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index b650ea31d..2ebbc0819 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -276,22 +276,23 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
KMemoryPermission perm) {
- std::lock_guard lock{page_table_lock};
-
const u64 size{num_pages * PageSize};
- if (!CanContain(addr, size, state)) {
- return ResultInvalidCurrentMemory;
- }
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
- if (IsRegionMapped(addr, size)) {
- return ResultInvalidCurrentMemory;
- }
+ // Lock the table.
+ std::lock_guard lock{page_table_lock};
+
+ // Verify that the destination memory is unmapped.
+ R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
KPageLinkedList page_linked_list;
- CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool,
- allocation_option));
- CASCADE_CODE(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
+ R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool,
+ allocation_option));
+ R_TRY(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
block_manager->Update(addr, num_pages, state, perm);
@@ -395,39 +396,12 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
return ResultSuccess;
}
-void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) {
- auto node{page_linked_list.Nodes().begin()};
- PAddr map_addr{node->GetAddress()};
- std::size_t src_num_pages{node->GetNumPages()};
-
- block_manager->IterateForRange(start, end, [&](const KMemoryInfo& info) {
- if (info.state != KMemoryState::Free) {
- return;
- }
-
- std::size_t dst_num_pages{GetSizeInRange(info, start, end) / PageSize};
- VAddr dst_addr{GetAddressInRange(info, start)};
-
- while (dst_num_pages) {
- if (!src_num_pages) {
- node = std::next(node);
- map_addr = node->GetAddress();
- src_num_pages = node->GetNumPages();
- }
-
- const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
- Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
- map_addr);
-
- dst_addr += num_pages * PageSize;
- map_addr += num_pages * PageSize;
- src_num_pages -= num_pages;
- dst_num_pages -= num_pages;
- }
- });
-}
ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
+ // Lock the physical memory lock.
+ std::lock_guard phys_lk(map_physical_memory_lock);
+
+ // Lock the table.
std::lock_guard lock{page_table_lock};
std::size_t mapped_size{};
@@ -463,7 +437,35 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
// We succeeded, so commit the memory reservation.
memory_reservation.Commit();
- MapPhysicalMemory(page_linked_list, addr, end_addr);
+ // Map the memory.
+ auto node{page_linked_list.Nodes().begin()};
+ PAddr map_addr{node->GetAddress()};
+ std::size_t src_num_pages{node->GetNumPages()};
+ block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
+ if (info.state != KMemoryState::Free) {
+ return;
+ }
+
+ std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize};
+ VAddr dst_addr{GetAddressInRange(info, addr)};
+
+ while (dst_num_pages) {
+ if (!src_num_pages) {
+ node = std::next(node);
+ map_addr = node->GetAddress();
+ src_num_pages = node->GetNumPages();
+ }
+
+ const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
+ Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
+ map_addr);
+
+ dst_addr += num_pages * PageSize;
+ map_addr += num_pages * PageSize;
+ src_num_pages -= num_pages;
+ dst_num_pages -= num_pages;
+ }
+ });
mapped_physical_memory_size += remaining_size;
@@ -503,23 +505,8 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
return ResultSuccess;
}
- CASCADE_CODE(UnmapMemory(addr, size));
-
- auto process{system.Kernel().CurrentProcess()};
- process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
- mapped_physical_memory_size -= mapped_size;
-
- return ResultSuccess;
-}
-
-ResultCode KPageTable::UnmapMemory(VAddr addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
-
- const VAddr end_addr{addr + size};
- ResultCode result{ResultSuccess};
- KPageLinkedList page_linked_list;
-
// Unmap each region within the range
+ KPageLinkedList page_linked_list;
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state == KMemoryState::Normal) {
const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
@@ -535,7 +522,6 @@ ResultCode KPageTable::UnmapMemory(VAddr addr, std::size_t size) {
}
}
});
-
if (result.IsError()) {
return result;
}
@@ -546,10 +532,14 @@ ResultCode KPageTable::UnmapMemory(VAddr addr, std::size_t size) {
block_manager->Update(addr, num_pages, KMemoryState::Free);
+ auto process{system.Kernel().CurrentProcess()};
+ process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
+ mapped_physical_memory_size -= mapped_size;
+
return ResultSuccess;
}
-ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
std::lock_guard lock{page_table_lock};
KMemoryState src_state{};
@@ -588,7 +578,7 @@ ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
std::lock_guard lock{page_table_lock};
KMemoryState src_state{};
@@ -652,24 +642,26 @@ ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_l
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
- KMemoryPermission perm) {
- std::lock_guard lock{page_table_lock};
-
+ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list,
+ KMemoryState state, KMemoryPermission perm) {
+ // Check that the map is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
- if (!CanContain(addr, size, state)) {
- return ResultInvalidCurrentMemory;
- }
+ // Lock the table.
+ std::lock_guard lock{page_table_lock};
- if (IsRegionMapped(addr, num_pages * PageSize)) {
- return ResultInvalidCurrentMemory;
- }
+ // Check the memory state.
+ R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
- CASCADE_CODE(MapPages(addr, page_linked_list, perm));
+ // Map the pages.
+ R_TRY(MapPages(address, page_linked_list, perm));
- block_manager->Update(addr, num_pages, state, perm);
+ // Update the blocks.
+ block_manager->Update(address, num_pages, state, perm);
return ResultSuccess;
}
@@ -693,21 +685,23 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
KMemoryState state) {
- std::lock_guard lock{page_table_lock};
-
+ // Check that the unmap is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
- if (!CanContain(addr, size, state)) {
- return ResultInvalidCurrentMemory;
- }
+ // Lock the table.
+ std::lock_guard lock{page_table_lock};
- if (IsRegionMapped(addr, num_pages * PageSize)) {
- return ResultInvalidCurrentMemory;
- }
+ // Check the memory state.
+ R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
- CASCADE_CODE(UnmapPages(addr, page_linked_list));
+ // Perform the unmap.
+ R_TRY(UnmapPages(addr, page_linked_list));
+ // Update the blocks.
block_manager->Update(addr, num_pages, state, KMemoryPermission::None);
return ResultSuccess;
@@ -765,7 +759,6 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
// Ensure cache coherency, if we're setting pages as executable.
if (is_x) {
- // Memory execution state is changing, invalidate CPU cache range
system.InvalidateCpuInstructionCacheRange(addr, size);
}
@@ -793,12 +786,12 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
KMemoryState state{};
KMemoryAttribute attribute{};
- CASCADE_CODE(CheckMemoryState(
- &state, nullptr, &attribute, nullptr, addr, size,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask, KMemoryAttribute::None,
- KMemoryAttribute::IpcAndDeviceMapped));
+ R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size,
+ KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
+ KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Mask, KMemoryAttribute::None,
+ KMemoryAttribute::IpcAndDeviceMapped));
block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked);
@@ -810,12 +803,11 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
KMemoryState state{};
- CASCADE_CODE(
- CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
- KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
- KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
+ R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size,
+ KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
+ KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
+ KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite);
return ResultSuccess;
@@ -871,8 +863,9 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
// Determine the new attribute.
- const auto new_attr = ((old_attr & static_cast<KMemoryAttribute>(~mask)) |
- static_cast<KMemoryAttribute>(attr & mask));
+ const KMemoryAttribute new_attr =
+ static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
+ static_cast<KMemoryAttribute>(attr & mask)));
// Perform operation.
this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
@@ -896,6 +889,9 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
}
ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
+ // Lock the physical memory lock.
+ std::lock_guard phys_lk(map_physical_memory_lock);
+
// Try to perform a reduction in heap, instead of an extension.
VAddr cur_address{};
std::size_t allocation_size{};
@@ -1025,12 +1021,12 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
}
if (is_map_only) {
- CASCADE_CODE(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
+ R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
KPageLinkedList page_group;
- CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages,
- memory_pool, allocation_option));
- CASCADE_CODE(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
+ R_TRY(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool,
+ allocation_option));
+ R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
}
block_manager->Update(addr, needed_num_pages, state, perm);
@@ -1186,7 +1182,7 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_page
ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
OperationType operation) {
- std::lock_guard lock{page_table_lock};
+ ASSERT(this->IsLockedByCurrentThread());
ASSERT(Common::IsAligned(addr, PageSize));
ASSERT(num_pages > 0);
@@ -1211,7 +1207,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
OperationType operation, PAddr map_addr) {
- std::lock_guard lock{page_table_lock};
+ ASSERT(this->IsLockedByCurrentThread());
ASSERT(num_pages > 0);
ASSERT(Common::IsAligned(addr, PageSize));
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index f67986e91..60ae9b9e8 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -37,9 +37,8 @@ public:
VAddr src_addr);
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
- ResultCode UnmapMemory(VAddr addr, std::size_t size);
- ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
@@ -88,7 +87,6 @@ private:
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
- void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
@@ -148,6 +146,7 @@ private:
}
std::recursive_mutex page_table_lock;
+ std::mutex map_physical_memory_lock;
std::unique_ptr<KMemoryBlockManager> block_manager;
public:
@@ -249,7 +248,9 @@ public:
return !IsOutsideASLRRegion(address, size);
}
constexpr PAddr GetPhysicalAddr(VAddr addr) {
- return page_table_impl.backing_addr[addr >> PageBits] + addr;
+ const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
+ ASSERT(backing_addr);
+ return backing_addr + addr;
}
constexpr bool Contains(VAddr addr) const {
return address_space_start <= addr && addr <= address_space_end - 1;
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index 0b894c8cf..bd779739d 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -258,7 +258,7 @@ private:
private:
constexpr void ClearAffinityBit(u64& affinity, s32 core) {
- affinity &= ~(u64(1) << core);
+ affinity &= ~(UINT64_C(1) << core);
}
constexpr s32 GetNextCore(u64& affinity) {
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 265ac6fa1..85c506979 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -146,6 +146,13 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
// Open a reference to the resource limit.
process->resource_limit->Open();
+ // Clear remaining fields.
+ process->num_running_threads = 0;
+ process->is_signaled = false;
+ process->exception_thread = nullptr;
+ process->is_suspended = false;
+ process->schedule_count = 0;
+
return ResultSuccess;
}
@@ -157,20 +164,17 @@ KResourceLimit* KProcess::GetResourceLimit() const {
return resource_limit;
}
-void KProcess::IncrementThreadCount() {
- ASSERT(num_threads >= 0);
- num_created_threads++;
-
- if (const auto count = ++num_threads; count > peak_num_threads) {
- peak_num_threads = count;
- }
+void KProcess::IncrementRunningThreadCount() {
+ ASSERT(num_running_threads.load() >= 0);
+ ++num_running_threads;
}
-void KProcess::DecrementThreadCount() {
- ASSERT(num_threads > 0);
+void KProcess::DecrementRunningThreadCount() {
+ ASSERT(num_running_threads.load() > 0);
- if (const auto count = --num_threads; count == 0) {
- LOG_WARNING(Kernel, "Process termination is not fully implemented.");
+ if (const auto prev = num_running_threads--; prev == 1) {
+ // TODO(bunnei): Process termination to be implemented when multiprocess is supported.
+ UNIMPLEMENTED_MSG("KProcess termination is not implemennted!");
}
}
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index c2a672021..38b446350 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -235,8 +235,8 @@ public:
++schedule_count;
}
- void IncrementThreadCount();
- void DecrementThreadCount();
+ void IncrementRunningThreadCount();
+ void DecrementRunningThreadCount();
void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
running_threads[core] = thread;
@@ -473,9 +473,7 @@ private:
bool is_suspended{};
bool is_initialized{};
- std::atomic<s32> num_created_threads{};
- std::atomic<u16> num_threads{};
- u16 peak_num_threads{};
+ std::atomic<u16> num_running_threads{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index b32d4f285..c96520828 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -710,23 +710,19 @@ void KScheduler::Unload(KThread* thread) {
}
void KScheduler::Reload(KThread* thread) {
- LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
+ LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName());
- if (thread) {
- ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
-
- Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
- cpu_core.LoadContext(thread->GetContext32());
- cpu_core.LoadContext(thread->GetContext64());
- cpu_core.SetTlsAddress(thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
- }
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.LoadContext(thread->GetContext32());
+ cpu_core.LoadContext(thread->GetContext64());
+ cpu_core.SetTlsAddress(thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
}
void KScheduler::SwitchContextStep2() {
// Load context of new thread
- Reload(current_thread.load());
+ Reload(GetCurrentThread());
RescheduleCurrentCore();
}
@@ -735,13 +731,17 @@ void KScheduler::ScheduleImpl() {
KThread* previous_thread = GetCurrentThread();
KThread* next_thread = state.highest_priority_thread;
- state.needs_scheduling = false;
+ state.needs_scheduling.store(false);
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
if (next_thread == nullptr) {
next_thread = idle_thread;
}
+ if (next_thread->GetCurrentCore() != core_id) {
+ next_thread->SetCurrentCore(core_id);
+ }
+
// We never want to schedule a dummy thread, as these are only used by host threads for locking.
if (next_thread->GetThreadType() == ThreadType::Dummy) {
ASSERT_MSG(false, "Dummy threads should never be scheduled!");
@@ -755,14 +755,8 @@ void KScheduler::ScheduleImpl() {
return;
}
- if (next_thread->GetCurrentCore() != core_id) {
- next_thread->SetCurrentCore(core_id);
- }
-
- current_thread.store(next_thread);
-
+ // Update the CPU time tracking variables.
KProcess* const previous_process = system.Kernel().CurrentProcess();
-
UpdateLastContextSwitchTime(previous_thread, previous_process);
// Save context for previous thread
@@ -770,6 +764,10 @@ void KScheduler::ScheduleImpl() {
std::shared_ptr<Common::Fiber>* old_context;
old_context = &previous_thread->GetHostContext();
+
+ // Set the new thread.
+ current_thread.store(next_thread);
+
guard.Unlock();
Common::Fiber::YieldTo(*old_context, *switch_fiber);
@@ -797,8 +795,8 @@ void KScheduler::SwitchToCurrent() {
do {
auto next_thread = current_thread.load();
if (next_thread != nullptr) {
- next_thread->context_guard.Lock();
- if (next_thread->GetRawState() != ThreadState::Runnable) {
+ const auto locked = next_thread->context_guard.TryLock();
+ if (state.needs_scheduling.load()) {
next_thread->context_guard.Unlock();
break;
}
@@ -806,6 +804,9 @@ void KScheduler::SwitchToCurrent() {
next_thread->context_guard.Unlock();
break;
}
+ if (!locked) {
+ continue;
+ }
}
auto thread = next_thread ? next_thread : idle_thread;
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index f42abb8a1..de3ffe0c7 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -215,7 +215,6 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
parent = owner;
parent->Open();
- parent->IncrementThreadCount();
}
// Initialize thread context.
@@ -327,11 +326,6 @@ void KThread::Finalize() {
}
}
- // Decrement the parent process's thread count.
- if (parent != nullptr) {
- parent->DecrementThreadCount();
- }
-
// Perform inherited finalization.
KSynchronizationObject::Finalize();
}
@@ -1011,7 +1005,7 @@ ResultCode KThread::Run() {
if (IsUserThread() && IsSuspended()) {
this->UpdateState();
}
- owner->IncrementThreadCount();
+ owner->IncrementRunningThreadCount();
}
// Set our state and finish.
@@ -1026,10 +1020,11 @@ ResultCode KThread::Run() {
void KThread::Exit() {
ASSERT(this == GetCurrentThreadPointer(kernel));
- // Release the thread resource hint from parent.
+ // Release the thread resource hint, running thread count from parent.
if (parent != nullptr) {
parent->GetResourceLimit()->Release(Kernel::LimitableResource::Threads, 0, 1);
resource_limit_release_hint = true;
+ parent->DecrementRunningThreadCount();
}
// Perform termination.
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index c7f5140f4..4f7aebf3f 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -230,7 +230,7 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
return result;
}
- return page_table.Map(dst_addr, src_addr, size);
+ return page_table.MapMemory(dst_addr, src_addr, size);
}
static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
@@ -249,7 +249,7 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
return result;
}
- return page_table.Unmap(dst_addr, src_addr, size);
+ return page_table.UnmapMemory(dst_addr, src_addr, size);
}
static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
@@ -2613,7 +2613,7 @@ static const FunctionDef SVC_Table_32[] = {
{0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"},
{0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"},
{0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"},
- {0x36, nullptr, "Unknown"},
+ {0x36, SvcWrap32<SynchronizePreemptionState>, "SynchronizePreemptionState32"},
{0x37, nullptr, "Unknown"},
{0x38, nullptr, "Unknown"},
{0x39, nullptr, "Unknown"},