From 39c8d18feba8eafcd43fbb55e73ae150a1947aad Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 13 Oct 2020 08:10:50 -0400 Subject: core/CMakeLists: Make some warnings errors Makes our error coverage a little more consistent across the board by applying it to Linux side of things as well. This also makes it more consistent with the warning settings in other libraries in the project. This also updates httplib to 0.7.9, as there are several warning cleanups made that allow us to enable several warnings as errors. --- src/core/hle/kernel/handle_table.cpp | 2 +- src/core/hle/kernel/scheduler.cpp | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index fb30b6f8b..3e745c18b 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -118,7 +118,7 @@ std::shared_ptr HandleTable::GetGeneric(Handle handle) const { void HandleTable::Clear() { for (u16 i = 0; i < table_size; ++i) { - generations[i] = i + 1; + generations[i] = static_cast(i + 1); objects[i] = nullptr; } next_free_slot = 0; diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 5cbd3b912..6b7db5372 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -72,7 +72,7 @@ u32 GlobalScheduler::SelectThreads() { if (top_thread != nullptr) { // TODO(Blinkhawk): Implement Thread Pinning } else { - idle_cores |= (1ul << core); + idle_cores |= (1U << core); } top_threads[core] = top_thread; } @@ -126,7 +126,7 @@ u32 GlobalScheduler::SelectThreads() { top_threads[core_id] = suggested; } - idle_cores &= ~(1ul << core_id); + idle_cores &= ~(1U << core_id); } u32 cores_needing_context_switch{}; for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { @@ -134,7 +134,7 @@ u32 GlobalScheduler::SelectThreads() { ASSERT(top_threads[core] == nullptr || static_cast(top_threads[core]->GetProcessorID()) == core); if (update_thread(top_threads[core], sched)) { - cores_needing_context_switch |= (1ul << core); + cores_needing_context_switch |= (1U << core); } } return cores_needing_context_switch; @@ -364,7 +364,7 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, } else { must_context_switch = true; } - cores_pending_reschedule &= ~(1ul << core); + cores_pending_reschedule &= ~(1U << core); } if (must_context_switch) { auto& core_scheduler = kernel.CurrentScheduler(); @@ -767,7 +767,7 @@ void Scheduler::SwitchToCurrent() { current_thread->context_guard.unlock(); break; } - if (current_thread->GetProcessorID() != core_id) { + if (static_cast(current_thread->GetProcessorID()) != core_id) { current_thread->context_guard.unlock(); break; } -- cgit v1.2.3 From b9a9b83bee124a86501905d0b75def4ccb1cb966 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Tue, 13 Oct 2020 18:00:25 -0300 Subject: kernel: Implement host thread register methods without locking Locks on GetCurrentHostThreadID were causing performance issues according to Visual Studio's profiler. It was consuming twice the time as arm_interface.Run(). The cost was not in the function itself but in the lockinig it required. Reimplement these functions using atomics and static storage instead of an unordered_map. This is a side effect to avoid locking and using linked lists for reads. Replace unordered_map with a linear search. --- src/core/hle/kernel/kernel.cpp | 66 ++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 28 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index f2b0fe2fd..96ca01194 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -107,7 +106,11 @@ struct KernelCore::Impl { cores.clear(); exclusive_monitor.reset(); - host_thread_ids.clear(); + + num_host_threads = 0; + std::fill(register_host_thread_keys.begin(), register_host_thread_keys.end(), + std::thread::id{}); + std::fill(register_host_thread_values.begin(), register_host_thread_values.end(), 0); } void InitializePhysicalCores() { @@ -177,54 +180,56 @@ struct KernelCore::Impl { void MakeCurrentProcess(Process* process) { current_process = process; - if (process == nullptr) { return; } - - u32 core_id = GetCurrentHostThreadID(); + const u32 core_id = GetCurrentHostThreadID(); if (core_id < Core::Hardware::NUM_CPU_CORES) { system.Memory().SetCurrentPageTable(*process, core_id); } } void RegisterCoreThread(std::size_t core_id) { - std::unique_lock lock{register_thread_mutex}; + const std::thread::id this_id = std::this_thread::get_id(); if (!is_multicore) { - single_core_thread_id = std::this_thread::get_id(); + single_core_thread_id = this_id; } - const std::thread::id this_id = std::this_thread::get_id(); - const auto it = host_thread_ids.find(this_id); + const auto end = register_host_thread_keys.begin() + num_host_threads; + const auto it = std::find(register_host_thread_keys.begin(), end, this_id); ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - ASSERT(it == host_thread_ids.end()); + ASSERT(it == end); ASSERT(!registered_core_threads[core_id]); - host_thread_ids[this_id] = static_cast(core_id); + InsertHostThread(static_cast(core_id)); registered_core_threads.set(core_id); } void RegisterHostThread() { - std::unique_lock lock{register_thread_mutex}; const std::thread::id this_id = std::this_thread::get_id(); - const auto it = host_thread_ids.find(this_id); - if (it != host_thread_ids.end()) { - return; + const auto end = register_host_thread_keys.begin() + num_host_threads; + const auto it = std::find(register_host_thread_keys.begin(), end, this_id); + if (it == end) { + InsertHostThread(registered_thread_ids++); } - host_thread_ids[this_id] = registered_thread_ids++; } - u32 GetCurrentHostThreadID() const { + void InsertHostThread(u32 value) { + const size_t index = num_host_threads++; + ASSERT_MSG(index < NUM_REGISTRABLE_HOST_THREADS, "Too many host threads"); + register_host_thread_values[index] = value; + register_host_thread_keys[index] = std::this_thread::get_id(); + } + + [[nodiscard]] u32 GetCurrentHostThreadID() const { const std::thread::id this_id = std::this_thread::get_id(); - if (!is_multicore) { - if (single_core_thread_id == this_id) { - return static_cast(system.GetCpuManager().CurrentCore()); - } + if (!is_multicore && single_core_thread_id == this_id) { + return static_cast(system.GetCpuManager().CurrentCore()); } - std::unique_lock lock{register_thread_mutex}; - const auto it = host_thread_ids.find(this_id); - if (it == host_thread_ids.end()) { + const auto end = register_host_thread_keys.begin() + num_host_threads; + const auto it = std::find(register_host_thread_keys.begin(), end, this_id); + if (it == end) { return Core::INVALID_HOST_THREAD_ID; } - return it->second; + return register_host_thread_values[std::distance(register_host_thread_keys.begin(), it)]; } Core::EmuThreadHandle GetCurrentEmuThreadID() const { @@ -322,10 +327,15 @@ struct KernelCore::Impl { std::vector cores; // 0-3 IDs represent core threads, >3 represent others - std::unordered_map host_thread_ids; - u32 registered_thread_ids{Core::Hardware::NUM_CPU_CORES}; + std::atomic registered_thread_ids{Core::Hardware::NUM_CPU_CORES}; std::bitset registered_core_threads; - mutable std::mutex register_thread_mutex; + + // Number of host threads is a relatively high number to avoid overflowing + static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 64; + std::atomic num_host_threads{0}; + std::array, NUM_REGISTRABLE_HOST_THREADS> + register_host_thread_keys{}; + std::array, NUM_REGISTRABLE_HOST_THREADS> register_host_thread_values{}; // Kernel memory management std::unique_ptr memory_manager; -- cgit v1.2.3 From be1954e04cb5a0c3a526f78ed5490a5e65310280 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Thu, 15 Oct 2020 14:49:45 -0400 Subject: core: Fix clang build Recent changes to the build system that made more warnings be flagged as errors caused building via clang to break. Fixes #4795 --- src/core/hle/kernel/address_arbiter.cpp | 4 +- src/core/hle/kernel/handle_table.cpp | 2 +- src/core/hle/kernel/hle_ipc.cpp | 2 +- src/core/hle/kernel/kernel.cpp | 2 +- src/core/hle/kernel/memory/address_space_info.cpp | 2 + src/core/hle/kernel/memory/memory_manager.cpp | 4 +- src/core/hle/kernel/memory/page_heap.cpp | 17 +++--- src/core/hle/kernel/memory/page_heap.h | 18 +++--- src/core/hle/kernel/memory/page_table.cpp | 6 +- src/core/hle/kernel/physical_core.h | 2 +- src/core/hle/kernel/process.cpp | 17 +++--- src/core/hle/kernel/resource_limit.cpp | 4 +- src/core/hle/kernel/scheduler.cpp | 72 ++++++++++++++++------- src/core/hle/kernel/svc.cpp | 7 ++- src/core/hle/kernel/svc_wrap.h | 17 +++--- src/core/hle/kernel/synchronization.cpp | 4 +- src/core/hle/kernel/thread.cpp | 2 +- src/core/hle/kernel/thread.h | 6 +- 18 files changed, 114 insertions(+), 74 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index b882eaa0f..b6ebc5329 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -108,7 +108,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a auto& monitor = system.Monitor(); s32 updated_value; do { - updated_value = monitor.ExclusiveRead32(current_core, address); + updated_value = static_cast(monitor.ExclusiveRead32(current_core, address)); if (updated_value != value) { return ERR_INVALID_STATE; @@ -129,7 +129,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a updated_value = value; } } - } while (!monitor.ExclusiveWrite32(current_core, address, updated_value)); + } while (!monitor.ExclusiveWrite32(current_core, address, static_cast(updated_value))); WakeThreads(waiting_threads, num_to_wake); return RESULT_SUCCESS; diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index 3e745c18b..fe4988f84 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -68,7 +68,7 @@ ResultVal HandleTable::Create(std::shared_ptr obj) { generations[slot] = generation; objects[slot] = std::move(obj); - Handle handle = generation | (slot << 15); + const auto handle = static_cast(generation | static_cast(slot << 15)); return MakeResult(handle); } diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 81f85643b..0a2de4270 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -58,7 +58,7 @@ std::shared_ptr HLERequestContext::SleepClientThread( { Handle event_handle = InvalidHandle; - SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); + SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), static_cast(timeout)); thread->SetHLECallback( [context = *this, callback](std::shared_ptr thread) mutable -> bool { ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index f2b0fe2fd..c04b23eff 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -168,7 +168,7 @@ struct KernelCore::Impl { const auto type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND); auto thread_res = - Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast(i), 0, + Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast(i), 0, nullptr, std::move(init_func), init_func_parameter); suspend_threads[i] = std::move(thread_res).Unwrap(); diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp index e4288cab4..6cf43ba24 100644 --- a/src/core/hle/kernel/memory/address_space_info.cpp +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } UNREACHABLE(); + return 0; } std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { @@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } UNREACHABLE(); + return 0; } } // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp index acf13585c..a96157c37 100644 --- a/src/core/hle/kernel/memory/memory_manager.cpp +++ b/src/core/hle/kernel/memory/memory_manager.cpp @@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align } // If we allocated more than we need, free some - const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)}; + const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast(heap_index))}; if (allocated_pages > num_pages) { chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); } @@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa // Keep allocating until we've allocated all our pages for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { - const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)}; + const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast(index))}; while (num_pages >= pages_per_alloc) { // Allocate a block diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp index 0ab1f7205..7890b8c1a 100644 --- a/src/core/hle/kernel/memory/page_heap.cpp +++ b/src/core/hle/kernel/memory/page_heap.cpp @@ -33,11 +33,12 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_ } VAddr PageHeap::AllocateBlock(s32 index) { - const std::size_t needed_size{blocks[index].GetSize()}; + const auto u_index = static_cast(index); + const auto needed_size{blocks[u_index].GetSize()}; - for (s32 i{index}; i < static_cast(MemoryBlockPageShifts.size()); i++) { - if (const VAddr addr{blocks[i].PopBlock()}; addr) { - if (const std::size_t allocated_size{blocks[i].GetSize()}; + for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) { + if (const VAddr addr = blocks[i].PopBlock(); addr != 0) { + if (const std::size_t allocated_size = blocks[i].GetSize(); allocated_size > needed_size) { Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } @@ -50,7 +51,7 @@ VAddr PageHeap::AllocateBlock(s32 index) { void PageHeap::FreeBlock(VAddr block, s32 index) { do { - block = blocks[index++].PushBlock(block); + block = blocks[static_cast(index++)].PushBlock(block); } while (block != 0); } @@ -69,7 +70,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { VAddr after_start{end}; VAddr after_end{end}; while (big_index >= 0) { - const std::size_t block_size{blocks[big_index].GetSize()}; + const std::size_t block_size{blocks[static_cast(big_index)].GetSize()}; const VAddr big_start{Common::AlignUp((start), block_size)}; const VAddr big_end{Common::AlignDown((end), block_size)}; if (big_start < big_end) { @@ -87,7 +88,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { // Free space before the big blocks for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[i].GetSize()}; + const std::size_t block_size{blocks[static_cast(i)].GetSize()}; while (before_start + block_size <= before_end) { before_end -= block_size; FreeBlock(before_end, i); @@ -96,7 +97,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { // Free space after the big blocks for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[i].GetSize()}; + const std::size_t block_size{blocks[static_cast(i)].GetSize()}; while (after_start + block_size <= after_end) { FreeBlock(after_start, i); after_start += block_size; diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h index 22b0de860..92a2bce04 100644 --- a/src/core/hle/kernel/memory/page_heap.h +++ b/src/core/hle/kernel/memory/page_heap.h @@ -34,7 +34,9 @@ public: static constexpr s32 GetBlockIndex(std::size_t num_pages) { for (s32 i{static_cast(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { - if (num_pages >= (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { + const auto shift_index = static_cast(i); + if (num_pages >= + (static_cast(1) << MemoryBlockPageShifts[shift_index]) / PageSize) { return i; } } @@ -86,7 +88,7 @@ private: // Set the bitmap pointers for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { - bit_storages[depth] = storage; + bit_storages[static_cast(depth)] = storage; size = Common::AlignUp(size, 64) / 64; storage += size; } @@ -99,7 +101,7 @@ private: s32 depth{}; do { - const u64 v{bit_storages[depth][offset]}; + const u64 v{bit_storages[static_cast(depth)][offset]}; if (v == 0) { // Non-zero depth indicates that a previous level had a free block ASSERT(depth == 0); @@ -125,7 +127,7 @@ private: constexpr bool ClearRange(std::size_t offset, std::size_t count) { const s32 depth{GetHighestDepthIndex()}; const auto bit_ind{offset / 64}; - u64* bits{bit_storages[depth]}; + u64* bits{bit_storages[static_cast(depth)]}; if (count < 64) { const auto shift{offset % 64}; ASSERT(shift + count <= 64); @@ -177,11 +179,11 @@ private: const auto which{offset % 64}; const u64 mask{1ULL << which}; - u64* bit{std::addressof(bit_storages[depth][ind])}; + u64* bit{std::addressof(bit_storages[static_cast(depth)][ind])}; const u64 v{*bit}; ASSERT((v & mask) == 0); *bit = v | mask; - if (v) { + if (v != 0) { break; } offset = ind; @@ -195,12 +197,12 @@ private: const auto which{offset % 64}; const u64 mask{1ULL << which}; - u64* bit{std::addressof(bit_storages[depth][ind])}; + u64* bit{std::addressof(bit_storages[static_cast(depth)][ind])}; u64 v{*bit}; ASSERT((v & mask) != 0); v &= ~mask; *bit = v; - if (v) { + if (v != 0) { break; } offset = ind; diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index a3fadb533..4f759d078 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -414,7 +414,8 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { const std::size_t remaining_pages{remaining_size / PageSize}; if (process->GetResourceLimit() && - !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) { + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, + static_cast(remaining_size))) { return ERR_RESOURCE_LIMIT_EXCEEDED; } @@ -778,7 +779,8 @@ ResultVal PageTable::SetHeapSize(std::size_t size) { auto process{system.Kernel().CurrentProcess()}; if (process->GetResourceLimit() && delta != 0 && - !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) { + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, + static_cast(delta))) { return ERR_RESOURCE_LIMIT_EXCEEDED; } diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index d7a7a951c..6cb59d0fc 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -34,7 +34,7 @@ public: PhysicalCore& operator=(const PhysicalCore&) = delete; PhysicalCore(PhysicalCore&&) = default; - PhysicalCore& operator=(PhysicalCore&&) = default; + PhysicalCore& operator=(PhysicalCore&&) = delete; void Idle(); /// Interrupt this physical core. diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index ff9d9248b..0b39f2955 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -137,9 +137,10 @@ std::shared_ptr Process::GetResourceLimit() const { } u64 Process::GetTotalPhysicalMemoryAvailable() const { - const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + - page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + - main_thread_stack_size}; + const u64 capacity{ + static_cast(resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory)) + + page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + + main_thread_stack_size}; if (capacity < memory_usage_capacity) { return capacity; @@ -279,12 +280,12 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, // Set initial resource limits resource_limit->SetLimitValue( ResourceType::PhysicalMemory, - kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application)); + static_cast(kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application))); resource_limit->SetLimitValue(ResourceType::Threads, 608); resource_limit->SetLimitValue(ResourceType::Events, 700); resource_limit->SetLimitValue(ResourceType::TransferMemory, 128); resource_limit->SetLimitValue(ResourceType::Sessions, 894); - ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size)); + ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast(code_size))); // Create TLS region tls_region_address = CreateTLSRegion(); @@ -300,9 +301,9 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) { ChangeStatus(ProcessStatus::Running); - SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top); + SetupMainThread(system, *this, static_cast(main_thread_priority), main_thread_stack_top); resource_limit->Reserve(ResourceType::Threads, 1); - resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); + resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast(main_thread_stack_size)); } void Process::PrepareForTermination() { @@ -363,7 +364,7 @@ VAddr Process::CreateTLSRegion() { ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize, Memory::MemoryState::ThreadLocal, Memory::MemoryPermission::ReadAndWrite, tls_map_addr) - .ValueOr(0)}; + .ValueOr(0U)}; ASSERT(tls_page_addr); diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp index 212e442f4..e94093f24 100644 --- a/src/core/hle/kernel/resource_limit.cpp +++ b/src/core/hle/kernel/resource_limit.cpp @@ -43,8 +43,8 @@ void ResourceLimit::Release(ResourceType resource, u64 amount) { void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) { const std::size_t index{ResourceTypeToIndex(resource)}; - current[index] -= used_amount; - available[index] -= available_amount; + current[index] -= static_cast(used_amount); + available[index] -= static_cast(available_amount); } std::shared_ptr ResourceLimit::Create(KernelCore& kernel) { diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 6b7db5372..4a9a762f3 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -89,9 +89,11 @@ u32 GlobalScheduler::SelectThreads() { while (iter != suggested_queue[core_id].end()) { suggested = *iter; iter++; - s32 suggested_core_id = suggested->GetProcessorID(); - Thread* top_thread = - suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr; + const s32 suggested_core_id = suggested->GetProcessorID(); + Thread* top_thread = suggested_core_id >= 0 + ? top_threads[static_cast(suggested_core_id)] + : nullptr; + if (top_thread != suggested) { if (top_thread != nullptr && top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { @@ -102,16 +104,19 @@ u32 GlobalScheduler::SelectThreads() { TransferToCore(suggested->GetPriority(), static_cast(core_id), suggested); break; } + suggested = nullptr; migration_candidates[num_candidates++] = suggested_core_id; } + // Step 3: Select a suggested thread from another core if (suggested == nullptr) { for (std::size_t i = 0; i < num_candidates; i++) { - s32 candidate_core = migration_candidates[i]; + const auto candidate_core = static_cast(migration_candidates[i]); suggested = top_threads[candidate_core]; auto it = scheduled_queue[candidate_core].begin(); - it++; + ++it; + Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; if (next != nullptr) { TransferToCore(suggested->GetPriority(), static_cast(core_id), @@ -128,7 +133,8 @@ u32 GlobalScheduler::SelectThreads() { idle_cores &= ~(1U << core_id); } - u32 cores_needing_context_switch{}; + + u32 cores_needing_context_switch = 0; for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { Scheduler& sched = kernel.Scheduler(core); ASSERT(top_threads[core] == nullptr || @@ -186,13 +192,16 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { for (auto& thread : suggested_queue[core_id]) { const s32 source_core = thread->GetProcessorID(); if (source_core >= 0) { - if (current_threads[source_core] != nullptr) { - if (thread == current_threads[source_core] || - current_threads[source_core]->GetPriority() < min_regular_priority) { + const auto sanitized_source_core = static_cast(source_core); + + if (current_threads[sanitized_source_core] != nullptr) { + if (thread == current_threads[sanitized_source_core] || + current_threads[sanitized_source_core]->GetPriority() < min_regular_priority) { continue; } } } + if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || next_thread->GetPriority() < thread->GetPriority()) { if (thread->GetPriority() <= priority) { @@ -240,17 +249,25 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread for (std::size_t i = 0; i < current_threads.size(); i++) { current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); } + for (auto& thread : suggested_queue[core_id]) { const s32 source_core = thread->GetProcessorID(); - if (source_core < 0 || thread == current_threads[source_core]) { + if (source_core < 0) { + continue; + } + + const auto sanitized_source_core = static_cast(source_core); + if (thread == current_threads[sanitized_source_core]) { continue; } - if (current_threads[source_core] == nullptr || - current_threads[source_core]->GetPriority() >= min_regular_priority) { + + if (current_threads[sanitized_source_core] == nullptr || + current_threads[sanitized_source_core]->GetPriority() >= min_regular_priority) { winner = thread; } break; } + if (winner != nullptr) { if (winner != yielding_thread) { TransferToCore(winner->GetPriority(), static_cast(core_id), winner); @@ -292,17 +309,22 @@ void GlobalScheduler::PreemptThreads() { if (thread->GetPriority() != priority) { continue; } + if (source_core >= 0) { - Thread* next_thread = scheduled_queue[source_core].empty() + const auto sanitized_source_core = static_cast(source_core); + Thread* next_thread = scheduled_queue[sanitized_source_core].empty() ? nullptr - : scheduled_queue[source_core].front(); + : scheduled_queue[sanitized_source_core].front(); + if (next_thread != nullptr && next_thread->GetPriority() < 2) { break; } + if (next_thread == thread) { continue; } } + if (current_thread != nullptr && current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { winner = thread; @@ -322,17 +344,22 @@ void GlobalScheduler::PreemptThreads() { if (thread->GetPriority() < priority) { continue; } + if (source_core >= 0) { - Thread* next_thread = scheduled_queue[source_core].empty() + const auto sanitized_source_core = static_cast(source_core); + Thread* next_thread = scheduled_queue[sanitized_source_core].empty() ? nullptr - : scheduled_queue[source_core].front(); + : scheduled_queue[sanitized_source_core].front(); + if (next_thread != nullptr && next_thread->GetPriority() < 2) { break; } + if (next_thread == thread) { continue; } } + if (current_thread != nullptr && current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { winner = thread; @@ -352,11 +379,11 @@ void GlobalScheduler::PreemptThreads() { void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, Core::EmuThreadHandle global_thread) { - u32 current_core = global_thread.host_handle; + const u32 current_core = global_thread.host_handle; bool must_context_switch = global_thread.guest_handle != InvalidHandle && (current_core < Core::Hardware::NUM_CPU_CORES); while (cores_pending_reschedule != 0) { - u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); + const u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); ASSERT(core < Core::Hardware::NUM_CPU_CORES); if (!must_context_switch || core != current_core) { auto& phys_core = kernel.PhysicalCore(core); @@ -366,6 +393,7 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, } cores_pending_reschedule &= ~(1U << core); } + if (must_context_switch) { auto& core_scheduler = kernel.CurrentScheduler(); kernel.ExitSVCProfile(); @@ -803,9 +831,11 @@ void Scheduler::Initialize() { std::string name = "Idle Thread Id:" + std::to_string(core_id); std::function init_func = Core::CpuManager::GetIdleThreadStartFunc(); void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - ThreadType type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); - auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast(core_id), 0, - nullptr, std::move(init_func), init_func_parameter); + const auto type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); + auto thread_res = + Thread::Create(system, type, std::move(name), 0, 64, 0, static_cast(core_id), 0, + nullptr, std::move(init_func), init_func_parameter); + idle_thread = std::move(thread_res).Unwrap(); } diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index bafd1ced7..b8623e831 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -482,7 +482,8 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, s32 handle_count, u32 timeout_high, Handle* index) { const s64 nano_seconds{(static_cast(timeout_high) << 32) | static_cast(timeout_low)}; - return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); + return WaitSynchronization(system, index, handles_address, static_cast(handle_count), + nano_seconds); } /// Resumes a thread waiting on WaitSynchronization @@ -2002,7 +2003,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, return ERR_INVALID_HANDLE; } - *core = thread->GetIdealCore(); + *core = static_cast(thread->GetIdealCore()); *mask = thread->GetAffinityMask(); return RESULT_SUCCESS; @@ -2070,7 +2071,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, return ERR_INVALID_HANDLE; } - return thread->SetCoreAndAffinityMask(core, affinity_mask); + return thread->SetCoreAndAffinityMask(static_cast(core), affinity_mask); } static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 0b6dd9df0..9284a4c84 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -11,11 +11,11 @@ namespace Kernel { -static inline u64 Param(const Core::System& system, int n) { +static inline u64 Param(const Core::System& system, std::size_t n) { return system.CurrentArmInterface().GetReg(n); } -static inline u32 Param32(const Core::System& system, int n) { +static inline u32 Param32(const Core::System& system, std::size_t n) { return static_cast(system.CurrentArmInterface().GetReg(n)); } @@ -29,7 +29,7 @@ static inline void FuncReturn(Core::System& system, u64 result) { } static inline void FuncReturn32(Core::System& system, u32 result) { - system.CurrentArmInterface().SetReg(0, (u64)result); + system.CurrentArmInterface().SetReg(0, static_cast(result)); } //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -386,9 +386,10 @@ template void SvcWrap32(Core::System& system) { Handle param_1 = 0; - const u32 retval = func(system, ¶m_1, Param32(system, 0), Param32(system, 1), - Param32(system, 2), Param32(system, 3), Param32(system, 4)) - .raw; + const u32 retval = + func(system, ¶m_1, Param32(system, 0), Param32(system, 1), Param32(system, 2), + Param32(system, 3), static_cast(Param32(system, 4))) + .raw; system.CurrentArmInterface().SetReg(1, param_1); FuncReturn(system, retval); @@ -542,8 +543,8 @@ void SvcWrap32(Core::System& system) { template void SvcWrap32(Core::System& system) { u32 param_1 = 0; - const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2), - Param32(system, 3), ¶m_1) + const u32 retval = func(system, Param32(system, 0), Param32(system, 1), + static_cast(Param32(system, 2)), Param32(system, 3), ¶m_1) .raw; system.CurrentArmInterface().SetReg(1, param_1); FuncReturn(system, retval); diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 8b875d853..653f722b3 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -51,7 +51,7 @@ std::pair Synchronization::WaitFor( // We found a ready object, acquire it and set the result value SynchronizationObject* object = itr->get(); object->Acquire(thread); - const u32 index = static_cast(std::distance(sync_objects.begin(), itr)); + const auto index = static_cast(std::distance(sync_objects.begin(), itr)); lock.CancelSleep(); return {RESULT_SUCCESS, index}; } @@ -105,7 +105,7 @@ std::pair Synchronization::WaitFor( }); ASSERT(itr != sync_objects.end()); signaling_object->Acquire(thread); - const u32 index = static_cast(std::distance(sync_objects.begin(), itr)); + const auto index = static_cast(std::distance(sync_objects.begin(), itr)); return {signaling_result, index}; } return {signaling_result, -1}; diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index d132aba34..323e740e9 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -525,7 +525,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { if (old_affinity_mask != new_affinity_mask) { const s32 old_core = processor_id; if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { - if (static_cast(ideal_core) < 0) { + if (ideal_core < 0) { processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); } else { processor_id = ideal_core; diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 8daf79fac..21b22ca45 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -470,7 +470,7 @@ public: bool InvokeHLECallback(std::shared_ptr thread); - u32 GetIdealCore() const { + s32 GetIdealCore() const { return ideal_core; } @@ -654,8 +654,8 @@ private: Scheduler* scheduler = nullptr; - u32 ideal_core{0xFFFFFFFF}; - u64 affinity_mask{0x1}; + s32 ideal_core = -1; + u64 affinity_mask = 1; s32 ideal_core_override = -1; u64 affinity_mask_override = 0x1; -- cgit v1.2.3 From fdd91540695594c4b015f234325a950a5e6566e9 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 20 Oct 2020 20:22:33 -0400 Subject: kernel: Fix build with recent compiler flag changes This slipped through the cracks due to another change being merged before the compiler flag changes. --- src/core/hle/kernel/kernel.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index ed30854ee..56e14da6b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -194,7 +194,8 @@ struct KernelCore::Impl { if (!is_multicore) { single_core_thread_id = this_id; } - const auto end = register_host_thread_keys.begin() + num_host_threads; + const auto end = + register_host_thread_keys.begin() + static_cast(num_host_threads); const auto it = std::find(register_host_thread_keys.begin(), end, this_id); ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); ASSERT(it == end); @@ -205,7 +206,8 @@ struct KernelCore::Impl { void RegisterHostThread() { const std::thread::id this_id = std::this_thread::get_id(); - const auto end = register_host_thread_keys.begin() + num_host_threads; + const auto end = + register_host_thread_keys.begin() + static_cast(num_host_threads); const auto it = std::find(register_host_thread_keys.begin(), end, this_id); if (it == end) { InsertHostThread(registered_thread_ids++); @@ -224,12 +226,14 @@ struct KernelCore::Impl { if (!is_multicore && single_core_thread_id == this_id) { return static_cast(system.GetCpuManager().CurrentCore()); } - const auto end = register_host_thread_keys.begin() + num_host_threads; + const auto end = + register_host_thread_keys.begin() + static_cast(num_host_threads); const auto it = std::find(register_host_thread_keys.begin(), end, this_id); if (it == end) { return Core::INVALID_HOST_THREAD_ID; } - return register_host_thread_values[std::distance(register_host_thread_keys.begin(), it)]; + return register_host_thread_values[static_cast( + std::distance(register_host_thread_keys.begin(), it))]; } Core::EmuThreadHandle GetCurrentEmuThreadID() const { -- cgit v1.2.3 From 3d592972dc3fd61cc88771b889eff237e4e03e0f Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 20 Oct 2020 19:07:39 -0700 Subject: Revert "core: Fix clang build" --- src/core/hle/kernel/address_arbiter.cpp | 4 +- src/core/hle/kernel/handle_table.cpp | 2 +- src/core/hle/kernel/hle_ipc.cpp | 2 +- src/core/hle/kernel/kernel.cpp | 2 +- src/core/hle/kernel/memory/address_space_info.cpp | 2 - src/core/hle/kernel/memory/memory_manager.cpp | 4 +- src/core/hle/kernel/memory/page_heap.cpp | 17 +++--- src/core/hle/kernel/memory/page_heap.h | 18 +++--- src/core/hle/kernel/memory/page_table.cpp | 6 +- src/core/hle/kernel/physical_core.h | 2 +- src/core/hle/kernel/process.cpp | 17 +++--- src/core/hle/kernel/resource_limit.cpp | 4 +- src/core/hle/kernel/scheduler.cpp | 72 +++++++---------------- src/core/hle/kernel/svc.cpp | 7 +-- src/core/hle/kernel/svc_wrap.h | 17 +++--- src/core/hle/kernel/synchronization.cpp | 4 +- src/core/hle/kernel/thread.cpp | 2 +- src/core/hle/kernel/thread.h | 6 +- 18 files changed, 74 insertions(+), 114 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index b6ebc5329..b882eaa0f 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -108,7 +108,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a auto& monitor = system.Monitor(); s32 updated_value; do { - updated_value = static_cast(monitor.ExclusiveRead32(current_core, address)); + updated_value = monitor.ExclusiveRead32(current_core, address); if (updated_value != value) { return ERR_INVALID_STATE; @@ -129,7 +129,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a updated_value = value; } } - } while (!monitor.ExclusiveWrite32(current_core, address, static_cast(updated_value))); + } while (!monitor.ExclusiveWrite32(current_core, address, updated_value)); WakeThreads(waiting_threads, num_to_wake); return RESULT_SUCCESS; diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index fe4988f84..3e745c18b 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -68,7 +68,7 @@ ResultVal HandleTable::Create(std::shared_ptr obj) { generations[slot] = generation; objects[slot] = std::move(obj); - const auto handle = static_cast(generation | static_cast(slot << 15)); + Handle handle = generation | (slot << 15); return MakeResult(handle); } diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 0a2de4270..81f85643b 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -58,7 +58,7 @@ std::shared_ptr HLERequestContext::SleepClientThread( { Handle event_handle = InvalidHandle; - SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), static_cast(timeout)); + SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); thread->SetHLECallback( [context = *this, callback](std::shared_ptr thread) mutable -> bool { ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 56e14da6b..b2b5b8adf 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -171,7 +171,7 @@ struct KernelCore::Impl { const auto type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND); auto thread_res = - Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast(i), 0, + Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast(i), 0, nullptr, std::move(init_func), init_func_parameter); suspend_threads[i] = std::move(thread_res).Unwrap(); diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp index 6cf43ba24..e4288cab4 100644 --- a/src/core/hle/kernel/memory/address_space_info.cpp +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -96,7 +96,6 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } UNREACHABLE(); - return 0; } std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { @@ -113,7 +112,6 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } UNREACHABLE(); - return 0; } } // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp index a96157c37..acf13585c 100644 --- a/src/core/hle/kernel/memory/memory_manager.cpp +++ b/src/core/hle/kernel/memory/memory_manager.cpp @@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align } // If we allocated more than we need, free some - const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast(heap_index))}; + const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)}; if (allocated_pages > num_pages) { chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); } @@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa // Keep allocating until we've allocated all our pages for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { - const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast(index))}; + const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)}; while (num_pages >= pages_per_alloc) { // Allocate a block diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp index 7890b8c1a..0ab1f7205 100644 --- a/src/core/hle/kernel/memory/page_heap.cpp +++ b/src/core/hle/kernel/memory/page_heap.cpp @@ -33,12 +33,11 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_ } VAddr PageHeap::AllocateBlock(s32 index) { - const auto u_index = static_cast(index); - const auto needed_size{blocks[u_index].GetSize()}; + const std::size_t needed_size{blocks[index].GetSize()}; - for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) { - if (const VAddr addr = blocks[i].PopBlock(); addr != 0) { - if (const std::size_t allocated_size = blocks[i].GetSize(); + for (s32 i{index}; i < static_cast(MemoryBlockPageShifts.size()); i++) { + if (const VAddr addr{blocks[i].PopBlock()}; addr) { + if (const std::size_t allocated_size{blocks[i].GetSize()}; allocated_size > needed_size) { Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } @@ -51,7 +50,7 @@ VAddr PageHeap::AllocateBlock(s32 index) { void PageHeap::FreeBlock(VAddr block, s32 index) { do { - block = blocks[static_cast(index++)].PushBlock(block); + block = blocks[index++].PushBlock(block); } while (block != 0); } @@ -70,7 +69,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { VAddr after_start{end}; VAddr after_end{end}; while (big_index >= 0) { - const std::size_t block_size{blocks[static_cast(big_index)].GetSize()}; + const std::size_t block_size{blocks[big_index].GetSize()}; const VAddr big_start{Common::AlignUp((start), block_size)}; const VAddr big_end{Common::AlignDown((end), block_size)}; if (big_start < big_end) { @@ -88,7 +87,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { // Free space before the big blocks for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[static_cast(i)].GetSize()}; + const std::size_t block_size{blocks[i].GetSize()}; while (before_start + block_size <= before_end) { before_end -= block_size; FreeBlock(before_end, i); @@ -97,7 +96,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { // Free space after the big blocks for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[static_cast(i)].GetSize()}; + const std::size_t block_size{blocks[i].GetSize()}; while (after_start + block_size <= after_end) { FreeBlock(after_start, i); after_start += block_size; diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h index 92a2bce04..22b0de860 100644 --- a/src/core/hle/kernel/memory/page_heap.h +++ b/src/core/hle/kernel/memory/page_heap.h @@ -34,9 +34,7 @@ public: static constexpr s32 GetBlockIndex(std::size_t num_pages) { for (s32 i{static_cast(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { - const auto shift_index = static_cast(i); - if (num_pages >= - (static_cast(1) << MemoryBlockPageShifts[shift_index]) / PageSize) { + if (num_pages >= (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { return i; } } @@ -88,7 +86,7 @@ private: // Set the bitmap pointers for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { - bit_storages[static_cast(depth)] = storage; + bit_storages[depth] = storage; size = Common::AlignUp(size, 64) / 64; storage += size; } @@ -101,7 +99,7 @@ private: s32 depth{}; do { - const u64 v{bit_storages[static_cast(depth)][offset]}; + const u64 v{bit_storages[depth][offset]}; if (v == 0) { // Non-zero depth indicates that a previous level had a free block ASSERT(depth == 0); @@ -127,7 +125,7 @@ private: constexpr bool ClearRange(std::size_t offset, std::size_t count) { const s32 depth{GetHighestDepthIndex()}; const auto bit_ind{offset / 64}; - u64* bits{bit_storages[static_cast(depth)]}; + u64* bits{bit_storages[depth]}; if (count < 64) { const auto shift{offset % 64}; ASSERT(shift + count <= 64); @@ -179,11 +177,11 @@ private: const auto which{offset % 64}; const u64 mask{1ULL << which}; - u64* bit{std::addressof(bit_storages[static_cast(depth)][ind])}; + u64* bit{std::addressof(bit_storages[depth][ind])}; const u64 v{*bit}; ASSERT((v & mask) == 0); *bit = v | mask; - if (v != 0) { + if (v) { break; } offset = ind; @@ -197,12 +195,12 @@ private: const auto which{offset % 64}; const u64 mask{1ULL << which}; - u64* bit{std::addressof(bit_storages[static_cast(depth)][ind])}; + u64* bit{std::addressof(bit_storages[depth][ind])}; u64 v{*bit}; ASSERT((v & mask) != 0); v &= ~mask; *bit = v; - if (v != 0) { + if (v) { break; } offset = ind; diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index 4f759d078..a3fadb533 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -414,8 +414,7 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { const std::size_t remaining_pages{remaining_size / PageSize}; if (process->GetResourceLimit() && - !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, - static_cast(remaining_size))) { + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) { return ERR_RESOURCE_LIMIT_EXCEEDED; } @@ -779,8 +778,7 @@ ResultVal PageTable::SetHeapSize(std::size_t size) { auto process{system.Kernel().CurrentProcess()}; if (process->GetResourceLimit() && delta != 0 && - !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, - static_cast(delta))) { + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) { return ERR_RESOURCE_LIMIT_EXCEEDED; } diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index 6cb59d0fc..d7a7a951c 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -34,7 +34,7 @@ public: PhysicalCore& operator=(const PhysicalCore&) = delete; PhysicalCore(PhysicalCore&&) = default; - PhysicalCore& operator=(PhysicalCore&&) = delete; + PhysicalCore& operator=(PhysicalCore&&) = default; void Idle(); /// Interrupt this physical core. diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 0b39f2955..ff9d9248b 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -137,10 +137,9 @@ std::shared_ptr Process::GetResourceLimit() const { } u64 Process::GetTotalPhysicalMemoryAvailable() const { - const u64 capacity{ - static_cast(resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory)) + - page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + - main_thread_stack_size}; + const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + + page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + + main_thread_stack_size}; if (capacity < memory_usage_capacity) { return capacity; @@ -280,12 +279,12 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, // Set initial resource limits resource_limit->SetLimitValue( ResourceType::PhysicalMemory, - static_cast(kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application))); + kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application)); resource_limit->SetLimitValue(ResourceType::Threads, 608); resource_limit->SetLimitValue(ResourceType::Events, 700); resource_limit->SetLimitValue(ResourceType::TransferMemory, 128); resource_limit->SetLimitValue(ResourceType::Sessions, 894); - ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast(code_size))); + ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size)); // Create TLS region tls_region_address = CreateTLSRegion(); @@ -301,9 +300,9 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) { ChangeStatus(ProcessStatus::Running); - SetupMainThread(system, *this, static_cast(main_thread_priority), main_thread_stack_top); + SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top); resource_limit->Reserve(ResourceType::Threads, 1); - resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast(main_thread_stack_size)); + resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); } void Process::PrepareForTermination() { @@ -364,7 +363,7 @@ VAddr Process::CreateTLSRegion() { ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize, Memory::MemoryState::ThreadLocal, Memory::MemoryPermission::ReadAndWrite, tls_map_addr) - .ValueOr(0U)}; + .ValueOr(0)}; ASSERT(tls_page_addr); diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp index e94093f24..212e442f4 100644 --- a/src/core/hle/kernel/resource_limit.cpp +++ b/src/core/hle/kernel/resource_limit.cpp @@ -43,8 +43,8 @@ void ResourceLimit::Release(ResourceType resource, u64 amount) { void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) { const std::size_t index{ResourceTypeToIndex(resource)}; - current[index] -= static_cast(used_amount); - available[index] -= static_cast(available_amount); + current[index] -= used_amount; + available[index] -= available_amount; } std::shared_ptr ResourceLimit::Create(KernelCore& kernel) { diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 4a9a762f3..6b7db5372 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -89,11 +89,9 @@ u32 GlobalScheduler::SelectThreads() { while (iter != suggested_queue[core_id].end()) { suggested = *iter; iter++; - const s32 suggested_core_id = suggested->GetProcessorID(); - Thread* top_thread = suggested_core_id >= 0 - ? top_threads[static_cast(suggested_core_id)] - : nullptr; - + s32 suggested_core_id = suggested->GetProcessorID(); + Thread* top_thread = + suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr; if (top_thread != suggested) { if (top_thread != nullptr && top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { @@ -104,19 +102,16 @@ u32 GlobalScheduler::SelectThreads() { TransferToCore(suggested->GetPriority(), static_cast(core_id), suggested); break; } - suggested = nullptr; migration_candidates[num_candidates++] = suggested_core_id; } - // Step 3: Select a suggested thread from another core if (suggested == nullptr) { for (std::size_t i = 0; i < num_candidates; i++) { - const auto candidate_core = static_cast(migration_candidates[i]); + s32 candidate_core = migration_candidates[i]; suggested = top_threads[candidate_core]; auto it = scheduled_queue[candidate_core].begin(); - ++it; - + it++; Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; if (next != nullptr) { TransferToCore(suggested->GetPriority(), static_cast(core_id), @@ -133,8 +128,7 @@ u32 GlobalScheduler::SelectThreads() { idle_cores &= ~(1U << core_id); } - - u32 cores_needing_context_switch = 0; + u32 cores_needing_context_switch{}; for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { Scheduler& sched = kernel.Scheduler(core); ASSERT(top_threads[core] == nullptr || @@ -192,16 +186,13 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { for (auto& thread : suggested_queue[core_id]) { const s32 source_core = thread->GetProcessorID(); if (source_core >= 0) { - const auto sanitized_source_core = static_cast(source_core); - - if (current_threads[sanitized_source_core] != nullptr) { - if (thread == current_threads[sanitized_source_core] || - current_threads[sanitized_source_core]->GetPriority() < min_regular_priority) { + if (current_threads[source_core] != nullptr) { + if (thread == current_threads[source_core] || + current_threads[source_core]->GetPriority() < min_regular_priority) { continue; } } } - if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || next_thread->GetPriority() < thread->GetPriority()) { if (thread->GetPriority() <= priority) { @@ -249,25 +240,17 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread for (std::size_t i = 0; i < current_threads.size(); i++) { current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); } - for (auto& thread : suggested_queue[core_id]) { const s32 source_core = thread->GetProcessorID(); - if (source_core < 0) { - continue; - } - - const auto sanitized_source_core = static_cast(source_core); - if (thread == current_threads[sanitized_source_core]) { + if (source_core < 0 || thread == current_threads[source_core]) { continue; } - - if (current_threads[sanitized_source_core] == nullptr || - current_threads[sanitized_source_core]->GetPriority() >= min_regular_priority) { + if (current_threads[source_core] == nullptr || + current_threads[source_core]->GetPriority() >= min_regular_priority) { winner = thread; } break; } - if (winner != nullptr) { if (winner != yielding_thread) { TransferToCore(winner->GetPriority(), static_cast(core_id), winner); @@ -309,22 +292,17 @@ void GlobalScheduler::PreemptThreads() { if (thread->GetPriority() != priority) { continue; } - if (source_core >= 0) { - const auto sanitized_source_core = static_cast(source_core); - Thread* next_thread = scheduled_queue[sanitized_source_core].empty() + Thread* next_thread = scheduled_queue[source_core].empty() ? nullptr - : scheduled_queue[sanitized_source_core].front(); - + : scheduled_queue[source_core].front(); if (next_thread != nullptr && next_thread->GetPriority() < 2) { break; } - if (next_thread == thread) { continue; } } - if (current_thread != nullptr && current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { winner = thread; @@ -344,22 +322,17 @@ void GlobalScheduler::PreemptThreads() { if (thread->GetPriority() < priority) { continue; } - if (source_core >= 0) { - const auto sanitized_source_core = static_cast(source_core); - Thread* next_thread = scheduled_queue[sanitized_source_core].empty() + Thread* next_thread = scheduled_queue[source_core].empty() ? nullptr - : scheduled_queue[sanitized_source_core].front(); - + : scheduled_queue[source_core].front(); if (next_thread != nullptr && next_thread->GetPriority() < 2) { break; } - if (next_thread == thread) { continue; } } - if (current_thread != nullptr && current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { winner = thread; @@ -379,11 +352,11 @@ void GlobalScheduler::PreemptThreads() { void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, Core::EmuThreadHandle global_thread) { - const u32 current_core = global_thread.host_handle; + u32 current_core = global_thread.host_handle; bool must_context_switch = global_thread.guest_handle != InvalidHandle && (current_core < Core::Hardware::NUM_CPU_CORES); while (cores_pending_reschedule != 0) { - const u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); + u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); ASSERT(core < Core::Hardware::NUM_CPU_CORES); if (!must_context_switch || core != current_core) { auto& phys_core = kernel.PhysicalCore(core); @@ -393,7 +366,6 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, } cores_pending_reschedule &= ~(1U << core); } - if (must_context_switch) { auto& core_scheduler = kernel.CurrentScheduler(); kernel.ExitSVCProfile(); @@ -831,11 +803,9 @@ void Scheduler::Initialize() { std::string name = "Idle Thread Id:" + std::to_string(core_id); std::function init_func = Core::CpuManager::GetIdleThreadStartFunc(); void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - const auto type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); - auto thread_res = - Thread::Create(system, type, std::move(name), 0, 64, 0, static_cast(core_id), 0, - nullptr, std::move(init_func), init_func_parameter); - + ThreadType type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); + auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast(core_id), 0, + nullptr, std::move(init_func), init_func_parameter); idle_thread = std::move(thread_res).Unwrap(); } diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index b8623e831..bafd1ced7 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -482,8 +482,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, s32 handle_count, u32 timeout_high, Handle* index) { const s64 nano_seconds{(static_cast(timeout_high) << 32) | static_cast(timeout_low)}; - return WaitSynchronization(system, index, handles_address, static_cast(handle_count), - nano_seconds); + return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); } /// Resumes a thread waiting on WaitSynchronization @@ -2003,7 +2002,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, return ERR_INVALID_HANDLE; } - *core = static_cast(thread->GetIdealCore()); + *core = thread->GetIdealCore(); *mask = thread->GetAffinityMask(); return RESULT_SUCCESS; @@ -2071,7 +2070,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, return ERR_INVALID_HANDLE; } - return thread->SetCoreAndAffinityMask(static_cast(core), affinity_mask); + return thread->SetCoreAndAffinityMask(core, affinity_mask); } static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 9284a4c84..0b6dd9df0 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -11,11 +11,11 @@ namespace Kernel { -static inline u64 Param(const Core::System& system, std::size_t n) { +static inline u64 Param(const Core::System& system, int n) { return system.CurrentArmInterface().GetReg(n); } -static inline u32 Param32(const Core::System& system, std::size_t n) { +static inline u32 Param32(const Core::System& system, int n) { return static_cast(system.CurrentArmInterface().GetReg(n)); } @@ -29,7 +29,7 @@ static inline void FuncReturn(Core::System& system, u64 result) { } static inline void FuncReturn32(Core::System& system, u32 result) { - system.CurrentArmInterface().SetReg(0, static_cast(result)); + system.CurrentArmInterface().SetReg(0, (u64)result); } //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -386,10 +386,9 @@ template void SvcWrap32(Core::System& system) { Handle param_1 = 0; - const u32 retval = - func(system, ¶m_1, Param32(system, 0), Param32(system, 1), Param32(system, 2), - Param32(system, 3), static_cast(Param32(system, 4))) - .raw; + const u32 retval = func(system, ¶m_1, Param32(system, 0), Param32(system, 1), + Param32(system, 2), Param32(system, 3), Param32(system, 4)) + .raw; system.CurrentArmInterface().SetReg(1, param_1); FuncReturn(system, retval); @@ -543,8 +542,8 @@ void SvcWrap32(Core::System& system) { template void SvcWrap32(Core::System& system) { u32 param_1 = 0; - const u32 retval = func(system, Param32(system, 0), Param32(system, 1), - static_cast(Param32(system, 2)), Param32(system, 3), ¶m_1) + const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2), + Param32(system, 3), ¶m_1) .raw; system.CurrentArmInterface().SetReg(1, param_1); FuncReturn(system, retval); diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 653f722b3..8b875d853 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -51,7 +51,7 @@ std::pair Synchronization::WaitFor( // We found a ready object, acquire it and set the result value SynchronizationObject* object = itr->get(); object->Acquire(thread); - const auto index = static_cast(std::distance(sync_objects.begin(), itr)); + const u32 index = static_cast(std::distance(sync_objects.begin(), itr)); lock.CancelSleep(); return {RESULT_SUCCESS, index}; } @@ -105,7 +105,7 @@ std::pair Synchronization::WaitFor( }); ASSERT(itr != sync_objects.end()); signaling_object->Acquire(thread); - const auto index = static_cast(std::distance(sync_objects.begin(), itr)); + const u32 index = static_cast(std::distance(sync_objects.begin(), itr)); return {signaling_result, index}; } return {signaling_result, -1}; diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 323e740e9..d132aba34 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -525,7 +525,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { if (old_affinity_mask != new_affinity_mask) { const s32 old_core = processor_id; if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { - if (ideal_core < 0) { + if (static_cast(ideal_core) < 0) { processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); } else { processor_id = ideal_core; diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 21b22ca45..8daf79fac 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -470,7 +470,7 @@ public: bool InvokeHLECallback(std::shared_ptr thread); - s32 GetIdealCore() const { + u32 GetIdealCore() const { return ideal_core; } @@ -654,8 +654,8 @@ private: Scheduler* scheduler = nullptr; - s32 ideal_core = -1; - u64 affinity_mask = 1; + u32 ideal_core{0xFFFFFFFF}; + u64 affinity_mask{0x1}; s32 ideal_core_override = -1; u64 affinity_mask_override = 0x1; -- cgit v1.2.3 From 8bd246032af3e93e05535d8cc2e4ad312a664050 Mon Sep 17 00:00:00 2001 From: lat9nq Date: Mon, 26 Oct 2020 21:42:11 -0400 Subject: kernel: Use the current time as the default RNG seed Use the current time, not zero, as the default RNG seed. --- src/core/hle/kernel/process.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index ff9d9248b..4cc77c635 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -123,7 +123,7 @@ std::shared_ptr Process::Create(Core::System& system, std::string name, : kernel.CreateNewUserProcessID(); process->capabilities.InitializeForMetadatalessProcess(); - std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(0)); + std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))); std::uniform_int_distribution distribution; std::generate(process->random_entropy.begin(), process->random_entropy.end(), [&] { return distribution(rng); }); -- cgit v1.2.3 From ce69ff2890e0a3a34ba6b80af6b3d60811c5f7ea Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Tue, 27 Oct 2020 01:55:33 -0300 Subject: hle/kernel: Remove unused registered_core_threads to fix data races This member was only used on asserts and it triggered data races. Remove it to fix them. --- src/core/hle/kernel/kernel.cpp | 5 ----- 1 file changed, 5 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index b2b5b8adf..bb3e312a7 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -86,8 +86,6 @@ struct KernelCore::Impl { } cores.clear(); - registered_core_threads.reset(); - process_list.clear(); current_process = nullptr; @@ -199,9 +197,7 @@ struct KernelCore::Impl { const auto it = std::find(register_host_thread_keys.begin(), end, this_id); ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); ASSERT(it == end); - ASSERT(!registered_core_threads[core_id]); InsertHostThread(static_cast(core_id)); - registered_core_threads.set(core_id); } void RegisterHostThread() { @@ -332,7 +328,6 @@ struct KernelCore::Impl { // 0-3 IDs represent core threads, >3 represent others std::atomic registered_thread_ids{Core::Hardware::NUM_CPU_CORES}; - std::bitset registered_core_threads; // Number of host threads is a relatively high number to avoid overflowing static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 64; -- cgit v1.2.3 From 9cfc5fee2fe28f2d7f9da45fbe01cbebb03069f1 Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Thu, 29 Oct 2020 03:17:20 -0400 Subject: kernel/process: Add missing include Fixes compilation on MSVC --- src/core/hle/kernel/process.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 4cc77c635..b17529dee 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include "common/alignment.h" -- cgit v1.2.3 From fc6db97a09e2de5eff10131ddcab9cf8fb2f736c Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 3 Nov 2020 19:54:53 -0500 Subject: core: Remove usage of unicorn Unicorn long-since lost most of its use, due to dynarmic gaining support for handling most instructions. At this point any further issues encountered should be used to make dynarmic better. This also allows us to remove our dependency on Python. --- src/core/hle/kernel/physical_core.cpp | 16 ++-------------- src/core/hle/kernel/thread.cpp | 17 +++-------------- 2 files changed, 5 insertions(+), 28 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index c6bbdb080..6e04d025f 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -2,30 +2,18 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include "common/assert.h" -#include "common/logging/log.h" #include "common/spin_lock.h" -#include "core/arm/arm_interface.h" -#ifdef ARCHITECTURE_x86_64 -#include "core/arm/dynarmic/arm_dynarmic_32.h" -#include "core/arm/dynarmic/arm_dynarmic_64.h" -#endif #include "core/arm/cpu_interrupt_handler.h" -#include "core/arm/exclusive_monitor.h" -#include "core/arm/unicorn/arm_unicorn.h" #include "core/core.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/scheduler.h" -#include "core/hle/kernel/thread.h" namespace Kernel { PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler, Core::CPUInterruptHandler& interrupt_handler) - : interrupt_handler{interrupt_handler}, core_index{id}, scheduler{scheduler} { - - guard = std::make_unique(); -} + : interrupt_handler{interrupt_handler}, + core_index{id}, scheduler{scheduler}, guard{std::make_unique()} {} PhysicalCore::~PhysicalCore() = default; diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index d132aba34..da0cb26b6 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -13,7 +13,6 @@ #include "common/logging/log.h" #include "common/thread_queue_list.h" #include "core/arm/arm_interface.h" -#include "core/arm/unicorn/arm_unicorn.h" #include "core/core.h" #include "core/cpu_manager.h" #include "core/hardware_properties.h" @@ -217,8 +216,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy } else { thread->tls_address = 0; } - // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used - // to initialize the context + thread->arm_interface.reset(); if ((type_flags & THREADTYPE_HLE) == 0) { #ifdef ARCHITECTURE_x86_64 @@ -231,19 +229,10 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(), processor_id); } - #else - if (owner_process && !owner_process->Is64BitProcess()) { - thread->arm_interface = std::make_shared( - system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32, - processor_id); - } else { - thread->arm_interface = std::make_shared( - system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64, - processor_id); - } - LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); +#error Platform not supported yet. #endif + ResetThreadContext32(thread->context_32, static_cast(stack_top), static_cast(entry_point), static_cast(arg)); ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); -- cgit v1.2.3 From da7be67dafc90c84529304cfef57dfa5f9291017 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Sun, 8 Nov 2020 15:49:45 -0500 Subject: ipc_helpers: Remove usage of the global system instance Resolves numerous deprecation warnings throughout the codebase due to inclusion of this header. Now building core should be significantly less noisy (and also relying on less global state). This also uncovered quite a few modules that were relying on indirect includes, which have also been fixed. --- src/core/hle/kernel/hle_ipc.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index f3277b766..c31a65476 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -24,6 +24,10 @@ namespace Core::Memory { class Memory; } +namespace IPC { +class ResponseBuilder; +} + namespace Service { class ServiceFrameworkBase; } @@ -287,6 +291,8 @@ public: } private: + friend class IPC::ResponseBuilder; + void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming); std::array cmd_buf; -- cgit v1.2.3 From 874be0e3e167626904d227a4ae55e9b38612b3ee Mon Sep 17 00:00:00 2001 From: Lioncash Date: Mon, 23 Nov 2020 10:17:18 -0500 Subject: svc: Remove unnecessary [[maybe_unused]] tag The parameter is used in this function, so this suppression isn't necessary. --- src/core/hle/kernel/svc.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index bafd1ced7..e3b770d66 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -681,7 +681,7 @@ static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) { } /// Used to output a message on a debug hardware unit - does nothing on a retail unit -static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) { +static void OutputDebugString(Core::System& system, VAddr address, u64 len) { if (len == 0) { return; } -- cgit v1.2.3 From 7b642c77811dc3887756f5abac5a9710564b098e Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 13 Nov 2020 11:11:12 -0800 Subject: hle: kernel: multicore: Replace n-JITs impl. with 4 JITs. --- src/core/hle/kernel/kernel.cpp | 23 +++++++++++++++------ src/core/hle/kernel/kernel.h | 3 +++ src/core/hle/kernel/physical_core.cpp | 38 +++++++++++++++++++++++++++-------- src/core/hle/kernel/physical_core.h | 29 +++++++++++++++++++++----- src/core/hle/kernel/scheduler.cpp | 20 ++++++++++-------- src/core/hle/kernel/svc.cpp | 9 +++++++++ src/core/hle/kernel/thread.cpp | 27 ++----------------------- src/core/hle/kernel/thread.h | 5 ----- 8 files changed, 97 insertions(+), 57 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index bb3e312a7..4cf9cee42 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -68,6 +68,12 @@ struct KernelCore::Impl { InitializeSuspendThreads(); } + void InitializeCores() { + for (auto& core : cores) { + core.Initialize(current_process->Is64BitProcess()); + } + } + void Shutdown() { next_object_id = 0; next_kernel_process_id = Process::InitialKIPIDMin; @@ -116,7 +122,7 @@ struct KernelCore::Impl { Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { schedulers[i] = std::make_unique(system, i); - cores.emplace_back(system, i, *schedulers[i], interrupts[i]); + cores.emplace_back(i, system, *schedulers[i], interrupts); } } @@ -181,6 +187,7 @@ struct KernelCore::Impl { if (process == nullptr) { return; } + const u32 core_id = GetCurrentHostThreadID(); if (core_id < Core::Hardware::NUM_CPU_CORES) { system.Memory().SetCurrentPageTable(*process, core_id); @@ -372,6 +379,10 @@ void KernelCore::Initialize() { impl->Initialize(*this); } +void KernelCore::InitializeCores() { + impl->InitializeCores(); +} + void KernelCore::Shutdown() { impl->Shutdown(); } @@ -486,12 +497,12 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { } void KernelCore::InvalidateAllInstructionCaches() { - auto& threads = GlobalScheduler().GetThreadList(); - for (auto& thread : threads) { - if (!thread->IsHLEThread()) { - auto& arm_interface = thread->ArmInterface(); - arm_interface.ClearInstructionCache(); + if (!IsMulticore()) { + for (auto& physical_core : impl->cores) { + physical_core.ArmInterface().ClearInstructionCache(); } + } else { + ASSERT_MSG(false, "UNIMPLEMENTED!!!!!!!!!!!"); } } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 16285c3f0..a9fdc5860 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -74,6 +74,9 @@ public: /// Resets the kernel to a clean slate for use. void Initialize(); + /// Initializes the CPU cores. + void InitializeCores(); + /// Clears all resources in use by the kernel instance. void Shutdown(); diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index 6e04d025f..50aca5752 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -4,21 +4,43 @@ #include "common/spin_lock.h" #include "core/arm/cpu_interrupt_handler.h" +#include "core/arm/dynarmic/arm_dynarmic_32.h" +#include "core/arm/dynarmic/arm_dynarmic_64.h" #include "core/core.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/scheduler.h" namespace Kernel { -PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler, - Core::CPUInterruptHandler& interrupt_handler) - : interrupt_handler{interrupt_handler}, - core_index{id}, scheduler{scheduler}, guard{std::make_unique()} {} +PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, + Kernel::Scheduler& scheduler, Core::CPUInterrupts& interrupts) + : core_index{core_index}, system{system}, scheduler{scheduler}, + interrupts{interrupts}, guard{std::make_unique()} {} PhysicalCore::~PhysicalCore() = default; +void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { +#ifdef ARCHITECTURE_x86_64 + auto& kernel = system.Kernel(); + if (is_64_bit) { + arm_interface = std::make_unique( + system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); + } else { + arm_interface = std::make_unique( + system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); + } +#else +#error Platform not supported yet. +#endif +} + +void PhysicalCore::Run() { + arm_interface->Run(); +} + void PhysicalCore::Idle() { - interrupt_handler.AwaitInterrupt(); + interrupts[core_index].AwaitInterrupt(); } void PhysicalCore::Shutdown() { @@ -26,18 +48,18 @@ void PhysicalCore::Shutdown() { } bool PhysicalCore::IsInterrupted() const { - return interrupt_handler.IsInterrupted(); + return interrupts[core_index].IsInterrupted(); } void PhysicalCore::Interrupt() { guard->lock(); - interrupt_handler.SetInterrupt(true); + interrupts[core_index].SetInterrupt(true); guard->unlock(); } void PhysicalCore::ClearInterrupt() { guard->lock(); - interrupt_handler.SetInterrupt(false); + interrupts[core_index].SetInterrupt(false); guard->unlock(); } diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index d7a7a951c..ace058a5a 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -4,9 +4,12 @@ #pragma once +#include #include #include +#include "core/arm/arm_interface.h" + namespace Common { class SpinLock; } @@ -16,7 +19,6 @@ class Scheduler; } // namespace Kernel namespace Core { -class ARM_Interface; class CPUInterruptHandler; class ExclusiveMonitor; class System; @@ -26,8 +28,8 @@ namespace Kernel { class PhysicalCore { public: - PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler, - Core::CPUInterruptHandler& interrupt_handler); + PhysicalCore(std::size_t core_index, Core::System& system, Kernel::Scheduler& scheduler, + Core::CPUInterrupts& interrupts); ~PhysicalCore(); PhysicalCore(const PhysicalCore&) = delete; @@ -36,7 +38,14 @@ public: PhysicalCore(PhysicalCore&&) = default; PhysicalCore& operator=(PhysicalCore&&) = default; + /// Initialize the core for the specified parameters. + void Initialize(bool is_64_bit); + + /// Execute current jit state + void Run(); + void Idle(); + /// Interrupt this physical core. void Interrupt(); @@ -49,6 +58,14 @@ public: // Shutdown this physical core. void Shutdown(); + Core::ARM_Interface& ArmInterface() { + return *arm_interface; + } + + const Core::ARM_Interface& ArmInterface() const { + return *arm_interface; + } + bool IsMainCore() const { return core_index == 0; } @@ -70,10 +87,12 @@ public: } private: - Core::CPUInterruptHandler& interrupt_handler; - std::size_t core_index; + const std::size_t core_index; + Core::System& system; Kernel::Scheduler& scheduler; + Core::CPUInterrupts& interrupts; std::unique_ptr guard; + std::unique_ptr arm_interface; }; } // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 6b7db5372..0805e9914 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -621,11 +621,14 @@ void Scheduler::OnThreadStart() { void Scheduler::Unload() { Thread* thread = current_thread.get(); if (thread) { - thread->SetContinuousOnSVC(false); thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); thread->SetIsRunning(false); + if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) { + system.ArmInterface(core_id).ExceptionalExit(); + thread->SetContinuousOnSVC(false); + } if (!thread->IsHLEThread() && !thread->HasExited()) { - Core::ARM_Interface& cpu_core = thread->ArmInterface(); + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.SaveContext(thread->GetContext32()); cpu_core.SaveContext(thread->GetContext64()); // Save the TPIDR_EL0 system register in case it was modified. @@ -652,12 +655,11 @@ void Scheduler::Reload() { system.Kernel().MakeCurrentProcess(thread_owner_process); } if (!thread->IsHLEThread()) { - Core::ARM_Interface& cpu_core = thread->ArmInterface(); + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext64()); cpu_core.SetTlsAddress(thread->GetTLSAddress()); cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); - cpu_core.ChangeProcessorID(this->core_id); cpu_core.ClearExclusiveState(); } } @@ -679,12 +681,11 @@ void Scheduler::SwitchContextStep2() { system.Kernel().MakeCurrentProcess(thread_owner_process); } if (!selected_thread->IsHLEThread()) { - Core::ARM_Interface& cpu_core = selected_thread->ArmInterface(); + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.LoadContext(selected_thread->GetContext32()); cpu_core.LoadContext(selected_thread->GetContext64()); cpu_core.SetTlsAddress(selected_thread->GetTLSAddress()); cpu_core.SetTPIDR_EL0(selected_thread->GetTPIDR_EL0()); - cpu_core.ChangeProcessorID(this->core_id); cpu_core.ClearExclusiveState(); } } @@ -715,11 +716,14 @@ void Scheduler::SwitchContext() { if (new_thread != nullptr && new_thread->IsSuspendThread()) { previous_thread->SetWasRunning(true); } - previous_thread->SetContinuousOnSVC(false); previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); previous_thread->SetIsRunning(false); + if (previous_thread->IsContinuousOnSVC() && !previous_thread->IsHLEThread()) { + system.ArmInterface(core_id).ExceptionalExit(); + previous_thread->SetContinuousOnSVC(false); + } if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) { - Core::ARM_Interface& cpu_core = previous_thread->ArmInterface(); + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.SaveContext(previous_thread->GetContext32()); cpu_core.SaveContext(previous_thread->GetContext64()); // Save the TPIDR_EL0 system register in case it was modified. diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index e3b770d66..95d6e2b4d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -2639,6 +2639,9 @@ void Call(Core::System& system, u32 immediate) { auto& kernel = system.Kernel(); kernel.EnterSVCProfile(); + auto* thread = system.CurrentScheduler().GetCurrentThread(); + thread->SetContinuousOnSVC(true); + const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) : GetSVCInfo32(immediate); if (info) { @@ -2652,6 +2655,12 @@ void Call(Core::System& system, u32 immediate) { } kernel.ExitSVCProfile(); + + if (!thread->IsContinuousOnSVC()) { + auto* host_context = thread->GetHostContext().get(); + host_context->Rewind(); + } + system.EnterDynarmicProfile(); } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index da0cb26b6..3abe12810 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -12,7 +12,6 @@ #include "common/fiber.h" #include "common/logging/log.h" #include "common/thread_queue_list.h" -#include "core/arm/arm_interface.h" #include "core/core.h" #include "core/cpu_manager.h" #include "core/hardware_properties.h" @@ -62,7 +61,6 @@ void Thread::Stop() { // Mark the TLS slot in the thread's page as free. owner_process->FreeTLSRegion(tls_address); } - arm_interface.reset(); has_exited = true; } global_handle = 0; @@ -217,22 +215,9 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->tls_address = 0; } - thread->arm_interface.reset(); + // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used + // to initialize the context if ((type_flags & THREADTYPE_HLE) == 0) { -#ifdef ARCHITECTURE_x86_64 - if (owner_process && !owner_process->Is64BitProcess()) { - thread->arm_interface = std::make_unique( - system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(), - processor_id); - } else { - thread->arm_interface = std::make_unique( - system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(), - processor_id); - } -#else -#error Platform not supported yet. -#endif - ResetThreadContext32(thread->context_32, static_cast(stack_top), static_cast(entry_point), static_cast(arg)); ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); @@ -268,14 +253,6 @@ VAddr Thread::GetCommandBufferAddress() const { return GetTLSAddress() + command_header_offset; } -Core::ARM_Interface& Thread::ArmInterface() { - return *arm_interface; -} - -const Core::ARM_Interface& Thread::ArmInterface() const { - return *arm_interface; -} - void Thread::SetStatus(ThreadStatus new_status) { if (new_status == status) { return; diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 8daf79fac..20e86fb81 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -248,10 +248,6 @@ public: void SetSynchronizationResults(SynchronizationObject* object, ResultCode result); - Core::ARM_Interface& ArmInterface(); - - const Core::ARM_Interface& ArmInterface() const; - SynchronizationObject* GetSignalingObject() const { return signaling_object; } @@ -586,7 +582,6 @@ private: Common::SpinLock context_guard{}; ThreadContext32 context_32{}; ThreadContext64 context_64{}; - std::unique_ptr arm_interface{}; std::shared_ptr host_context{}; u64 thread_id = 0; -- cgit v1.2.3 From c042a89113617f75e81163f103ef82d6d714cd87 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 13 Nov 2020 15:17:47 -0800 Subject: common: fiber: Use boost::context instead of native fibers on Windows. --- src/core/hle/kernel/kernel.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 4cf9cee42..c426b6378 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -502,7 +502,7 @@ void KernelCore::InvalidateAllInstructionCaches() { physical_core.ArmInterface().ClearInstructionCache(); } } else { - ASSERT_MSG(false, "UNIMPLEMENTED!!!!!!!!!!!"); + UNIMPLEMENTED(); } } -- cgit v1.2.3 From 9423347c1b7adb58d4881995a15c80d41faa5c74 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 13 Nov 2020 21:23:04 -0800 Subject: hle: kernel: SynchronizationObject: Use atomic_bool for is_signaled. --- src/core/hle/kernel/synchronization_object.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h index f89b24204..7408ed51f 100644 --- a/src/core/hle/kernel/synchronization_object.h +++ b/src/core/hle/kernel/synchronization_object.h @@ -4,6 +4,7 @@ #pragma once +#include #include #include @@ -56,7 +57,7 @@ public: void ClearWaitingThreads(); protected: - bool is_signaled{}; // Tells if this sync object is signalled; + std::atomic_bool is_signaled{}; // Tells if this sync object is signaled private: /// Threads waiting for this object to become available -- cgit v1.2.3 From 9705f651b25ad622dfefd5c19ca147b93068cf47 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 13 Nov 2020 21:28:12 -0800 Subject: hle: kernel: AddressArbiter: Remove unused code. --- src/core/hle/kernel/address_arbiter.cpp | 6 ------ src/core/hle/kernel/address_arbiter.h | 3 --- 2 files changed, 9 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index b882eaa0f..048acd30e 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -275,12 +275,6 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t return current_thread->GetSignalingResult(); } -void AddressArbiter::HandleWakeupThread(std::shared_ptr thread) { - ASSERT(thread->GetStatus() == ThreadStatus::WaitArb); - RemoveThread(thread); - thread->SetArbiterWaitAddress(0); -} - void AddressArbiter::InsertThread(std::shared_ptr thread) { const VAddr arb_addr = thread->GetArbiterWaitAddress(); std::list>& thread_list = arb_threads[arb_addr]; diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h index 0b05d533c..b91edc67d 100644 --- a/src/core/hle/kernel/address_arbiter.h +++ b/src/core/hle/kernel/address_arbiter.h @@ -50,9 +50,6 @@ public: /// Waits on an address with a particular arbitration type. ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns); - /// Removes a thread from the container and resets its address arbiter adress to 0 - void HandleWakeupThread(std::shared_ptr thread); - private: /// Signals an address being waited on. ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake); -- cgit v1.2.3 From c0870315fd89dfeabdbe5833d52e753a5d8417ea Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 13 Nov 2020 22:49:33 -0800 Subject: hle: kernel: time_manager: Avoid a crash on process exit. --- src/core/hle/kernel/time_manager.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 95f2446c9..ea9089ff8 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -24,7 +24,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { return; } auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); - thread->OnWakeUp(); + if (thread) { + // Thread can be null if process has exited + thread->OnWakeUp(); + } }); } -- cgit v1.2.3 From 63fd1bb50302867b233325f253b1e2abbc379875 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 13 Nov 2020 23:20:32 -0800 Subject: core: arm: Implement InvalidateCacheRange for CPU cache invalidation. --- src/core/hle/kernel/kernel.cpp | 15 ++++++++++----- src/core/hle/kernel/kernel.h | 2 ++ src/core/hle/kernel/memory/page_table.cpp | 5 +++++ src/core/hle/kernel/physical_core.h | 4 ++++ 4 files changed, 21 insertions(+), 5 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index c426b6378..929db696d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -497,12 +497,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { } void KernelCore::InvalidateAllInstructionCaches() { - if (!IsMulticore()) { - for (auto& physical_core : impl->cores) { - physical_core.ArmInterface().ClearInstructionCache(); + for (auto& physical_core : impl->cores) { + physical_core.ArmInterface().ClearInstructionCache(); + } +} + +void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { + for (auto& physical_core : impl->cores) { + if (!physical_core.IsInitialized()) { + continue; } - } else { - UNIMPLEMENTED(); + physical_core.ArmInterface().InvalidateCacheRange(addr, size); } } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index a9fdc5860..a73a93039 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -156,6 +156,8 @@ public: void InvalidateAllInstructionCaches(); + void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); + /// Adds a port to the named port table void AddNamedPort(std::string name, std::shared_ptr port); diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index a3fadb533..f53a7be82 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -670,6 +670,11 @@ ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, Memo return RESULT_SUCCESS; } + if ((prev_perm & MemoryPermission::Execute) != (perm & MemoryPermission::Execute)) { + // Memory execution state is changing, invalidate CPU cache range + system.InvalidateCpuInstructionCacheRange(addr, size); + } + const std::size_t num_pages{size / PageSize}; const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None ? OperationType::ChangePermissionsAndRefresh diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index ace058a5a..37513130a 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -58,6 +58,10 @@ public: // Shutdown this physical core. void Shutdown(); + bool IsInitialized() const { + return arm_interface != nullptr; + } + Core::ARM_Interface& ArmInterface() { return *arm_interface; } -- cgit v1.2.3 From c2ad1243baaf25dcb6f9c80121c48ff6da1986cb Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 14 Nov 2020 22:37:45 -0800 Subject: hle: kernel: thread: Remove unused "Running" state. --- src/core/hle/kernel/thread.cpp | 5 ----- src/core/hle/kernel/thread.h | 1 - 2 files changed, 6 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 3abe12810..7d1eb2c6e 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -88,10 +88,6 @@ void Thread::ResumeFromWait() { // before actually resuming. We can ignore subsequent wakeups if the thread status has // already been set to ThreadStatus::Ready. return; - - case ThreadStatus::Running: - DEBUG_ASSERT_MSG(false, "Thread with object id {} has already resumed.", GetObjectId()); - return; case ThreadStatus::Dead: // This should never happen, as threads must complete before being stopped. DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", @@ -260,7 +256,6 @@ void Thread::SetStatus(ThreadStatus new_status) { switch (new_status) { case ThreadStatus::Ready: - case ThreadStatus::Running: SetSchedulingStatus(ThreadSchedStatus::Runnable); break; case ThreadStatus::Dormant: diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 20e86fb81..a75071e9b 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -72,7 +72,6 @@ enum ThreadProcessorId : s32 { }; enum class ThreadStatus { - Running, ///< Currently running Ready, ///< Ready to run Paused, ///< Paused by SetThreadActivity or debug WaitHLEEvent, ///< Waiting for hle event to finish -- cgit v1.2.3 From b7ef581c6e19d0159206aa42e1e03de461a77d7e Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 18 Nov 2020 16:19:00 -0800 Subject: kernel: time_manager: Protect access with a mutex. --- src/core/hle/kernel/time_manager.cpp | 4 +++- src/core/hle/kernel/time_manager.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index ea9089ff8..caf329bfb 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -32,6 +32,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { } void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) { + std::lock_guard lock{mutex}; event_handle = timetask->GetGlobalHandle(); if (nanoseconds > 0) { ASSERT(timetask); @@ -46,6 +47,7 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 } void TimeManager::UnscheduleTimeEvent(Handle event_handle) { + std::lock_guard lock{mutex}; if (event_handle == InvalidHandle) { return; } @@ -54,7 +56,7 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) { } void TimeManager::CancelTimeEvent(Thread* time_task) { - Handle event_handle = time_task->GetGlobalHandle(); + const Handle event_handle = time_task->GetGlobalHandle(); UnscheduleTimeEvent(event_handle); } diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h index 307a18765..f39df39a0 100644 --- a/src/core/hle/kernel/time_manager.h +++ b/src/core/hle/kernel/time_manager.h @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include "core/hle/kernel/object.h" @@ -42,6 +43,7 @@ private: Core::System& system; std::shared_ptr time_manager_event_type; std::unordered_map cancelled_events; + std::mutex mutex; }; } // namespace Kernel -- cgit v1.2.3 From 4b9e1b6586a8a4017b8e3e0fb52457d1e2568066 Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 18 Nov 2020 16:52:47 -0800 Subject: kernel: scheduler: Minor cleanup to remove duplicated code. --- src/core/hle/kernel/scheduler.cpp | 58 ++++++++------------------------------- src/core/hle/kernel/scheduler.h | 2 ++ 2 files changed, 14 insertions(+), 46 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 0805e9914..5c63b0b4a 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -618,8 +618,7 @@ void Scheduler::OnThreadStart() { SwitchContextStep2(); } -void Scheduler::Unload() { - Thread* thread = current_thread.get(); +void Scheduler::Unload(Thread* thread) { if (thread) { thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); thread->SetIsRunning(false); @@ -639,8 +638,11 @@ void Scheduler::Unload() { } } -void Scheduler::Reload() { - Thread* thread = current_thread.get(); +void Scheduler::Unload() { + Unload(current_thread.get()); +} + +void Scheduler::Reload(Thread* thread) { if (thread) { ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, "Thread must be runnable."); @@ -665,30 +667,13 @@ void Scheduler::Reload() { } } +void Scheduler::Reload() { + Reload(current_thread.get()); +} + void Scheduler::SwitchContextStep2() { // Load context of new thread - if (selected_thread) { - ASSERT_MSG(selected_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, - "Thread must be runnable."); - - // Cancel any outstanding wakeup events for this thread - selected_thread->SetIsRunning(true); - selected_thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); - selected_thread->SetWasRunning(false); - - auto* const thread_owner_process = current_thread->GetOwnerProcess(); - if (thread_owner_process != nullptr) { - system.Kernel().MakeCurrentProcess(thread_owner_process); - } - if (!selected_thread->IsHLEThread()) { - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.LoadContext(selected_thread->GetContext32()); - cpu_core.LoadContext(selected_thread->GetContext64()); - cpu_core.SetTlsAddress(selected_thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(selected_thread->GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } - } + Reload(selected_thread.get()); TryDoContextSwitch(); } @@ -712,26 +697,7 @@ void Scheduler::SwitchContext() { UpdateLastContextSwitchTime(previous_thread, previous_process); // Save context for previous thread - if (previous_thread) { - if (new_thread != nullptr && new_thread->IsSuspendThread()) { - previous_thread->SetWasRunning(true); - } - previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); - previous_thread->SetIsRunning(false); - if (previous_thread->IsContinuousOnSVC() && !previous_thread->IsHLEThread()) { - system.ArmInterface(core_id).ExceptionalExit(); - previous_thread->SetContinuousOnSVC(false); - } - if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) { - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.SaveContext(previous_thread->GetContext32()); - cpu_core.SaveContext(previous_thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } - previous_thread->context_guard.unlock(); - } + Unload(previous_thread); std::shared_ptr* old_context; if (previous_thread != nullptr) { diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index b6f04dcea..68db4a5ef 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -212,8 +212,10 @@ public: /// The next two are for SingleCore Only. /// Unload current thread before preempting core. + void Unload(Thread* thread); void Unload(); /// Reload current thread after core preemption. + void Reload(Thread* thread); void Reload(); /// Gets the current running thread -- cgit v1.2.3 From f95602f15207851b849c57e2a2dd313a087b2493 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Sat, 5 Dec 2020 11:40:14 -0500 Subject: video_core: Resolve more variable shadowing scenarios pt.3 Cleans out the rest of the occurrences of variable shadowing and makes any further occurrences of shadowing compiler errors. --- src/core/hle/kernel/memory/memory_block.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h index 9d7839d08..37fe19916 100644 --- a/src/core/hle/kernel/memory/memory_block.h +++ b/src/core/hle/kernel/memory/memory_block.h @@ -222,9 +222,9 @@ public: public: constexpr MemoryBlock() = default; - constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state, - MemoryPermission perm, MemoryAttribute attribute) - : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {} + constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_, + MemoryPermission perm_, MemoryAttribute attribute_) + : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {} constexpr VAddr GetAddress() const { return addr; -- cgit v1.2.3 From e18ee8d681bf05e8c1480dd1ad7133778ead773d Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 16 Nov 2020 21:02:45 -0800 Subject: hle: kernel: Port KAffinityMask from Mesosphere. --- src/core/hle/kernel/k_affinity_mask.h | 62 +++++++++++++++++++++++++++++++++++ src/core/hle/kernel/scheduler.cpp | 10 +++--- src/core/hle/kernel/svc.cpp | 2 +- src/core/hle/kernel/thread.cpp | 11 ++++--- src/core/hle/kernel/thread.h | 6 ++-- 5 files changed, 77 insertions(+), 14 deletions(-) create mode 100644 src/core/hle/kernel/k_affinity_mask.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h new file mode 100644 index 000000000..fa2a720a4 --- /dev/null +++ b/src/core/hle/kernel/k_affinity_mask.h @@ -0,0 +1,62 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/assert.h" +#include "common/common_types.h" +#include "core/hardware_properties.h" + +namespace Kernel { + +class KAffinityMask { +private: + static constexpr u64 AllowedAffinityMask = (1ul << Core::Hardware::NUM_CPU_CORES) - 1; + +private: + u64 mask; + +private: + static constexpr u64 GetCoreBit(s32 core) { + ASSERT(0 <= core && core < static_cast(Core::Hardware::NUM_CPU_CORES)); + return (1ull << core); + } + +public: + constexpr KAffinityMask() : mask(0) { + ASSERT(this); + } + + constexpr u64 GetAffinityMask() const { + return this->mask; + } + + constexpr void SetAffinityMask(u64 new_mask) { + ASSERT((new_mask & ~AllowedAffinityMask) == 0); + this->mask = new_mask; + } + + constexpr bool GetAffinity(s32 core) const { + return this->mask & GetCoreBit(core); + } + + constexpr void SetAffinity(s32 core, bool set) { + ASSERT(0 <= core && core < static_cast(Core::Hardware::NUM_CPU_CORES)); + + if (set) { + this->mask |= GetCoreBit(core); + } else { + this->mask &= ~GetCoreBit(core); + } + } + + constexpr void SetAll() { + this->mask = AllowedAffinityMask; + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 5c63b0b4a..9a969fdb5 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -452,7 +452,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast(thread->processor_id) && - ((thread->affinity_mask >> core) & 1) != 0) { + thread->affinity_mask.GetAffinity(core)) { Unsuggest(thread->current_priority, core, thread); } } @@ -464,7 +464,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast(thread->processor_id) && - ((thread->affinity_mask >> core) & 1) != 0) { + thread->affinity_mask.GetAffinity(core)) { Suggest(thread->current_priority, core, thread); } } @@ -484,7 +484,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast(thread->processor_id) && - ((thread->affinity_mask >> core) & 1) != 0) { + thread->affinity_mask.GetAffinity(core)) { Unsuggest(old_priority, core, thread); } } @@ -500,7 +500,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast(thread->processor_id) && - ((thread->affinity_mask >> core) & 1) != 0) { + thread->affinity_mask.GetAffinity(core)) { Suggest(thread->current_priority, core, thread); } } @@ -527,7 +527,7 @@ void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinit } for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (((thread->affinity_mask >> core) & 1) != 0) { + if (thread->affinity_mask.GetAffinity(core)) { if (core == static_cast(thread->processor_id)) { Schedule(thread->current_priority, core, thread); } else { diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 95d6e2b4d..0cd712d09 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -2003,7 +2003,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, } *core = thread->GetIdealCore(); - *mask = thread->GetAffinityMask(); + *mask = thread->GetAffinityMask().GetAffinityMask(); return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 7d1eb2c6e..38b4a0987 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -191,7 +191,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->last_running_ticks = 0; thread->processor_id = processor_id; thread->ideal_core = processor_id; - thread->affinity_mask = 1ULL << processor_id; + thread->affinity_mask.SetAffinity(processor_id, true); thread->wait_objects = nullptr; thread->mutex_wait_address = 0; thread->condvar_wait_address = 0; @@ -479,15 +479,16 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { } if (use_override) { ideal_core_override = new_core; - affinity_mask_override = new_affinity_mask; } else { - const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); + const auto old_affinity_mask = affinity_mask.GetAffinityMask(); + affinity_mask.SetAffinityMask(new_affinity_mask); ideal_core = new_core; if (old_affinity_mask != new_affinity_mask) { const s32 old_core = processor_id; - if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { + if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { if (static_cast(ideal_core) < 0) { - processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); + processor_id = HighestSetCore(affinity_mask.GetAffinityMask(), + Core::Hardware::NUM_CPU_CORES); } else { processor_id = ideal_core; } diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index a75071e9b..5192ecff1 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -12,6 +12,7 @@ #include "common/common_types.h" #include "common/spin_lock.h" #include "core/arm/arm_interface.h" +#include "core/hle/kernel/k_affinity_mask.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" @@ -469,7 +470,7 @@ public: return ideal_core; } - u64 GetAffinityMask() const { + constexpr const KAffinityMask& GetAffinityMask() const { return affinity_mask; } @@ -649,10 +650,9 @@ private: Scheduler* scheduler = nullptr; u32 ideal_core{0xFFFFFFFF}; - u64 affinity_mask{0x1}; + KAffinityMask affinity_mask{}; s32 ideal_core_override = -1; - u64 affinity_mask_override = 0x1; u32 affinity_override_count = 0; u32 scheduling_state = 0; -- cgit v1.2.3 From a3ccac3eb7f6feb87ca7026c89531a0afd5fabab Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 11:23:36 -0800 Subject: common: Port KPriorityQueue from Mesosphere. --- src/core/hle/kernel/k_priority_queue.h | 443 +++++++++++++++++++++++++++++++++ 1 file changed, 443 insertions(+) create mode 100644 src/core/hle/kernel/k_priority_queue.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h new file mode 100644 index 000000000..88e4e1ed4 --- /dev/null +++ b/src/core/hle/kernel/k_priority_queue.h @@ -0,0 +1,443 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/assert.h" +#include "common/bit_set.h" +#include "common/bit_util.h" +#include "common/common_types.h" + +namespace Kernel { + +class Thread; + +template +concept KPriorityQueueAffinityMask = !std::is_reference::value && requires(T & t) { + { t.GetAffinityMask() } + ->std::convertible_to; + {t.SetAffinityMask(std::declval())}; + + { t.GetAffinity(std::declval()) } + ->std::same_as; + {t.SetAffinity(std::declval(), std::declval())}; + {t.SetAll()}; +}; + +template +concept KPriorityQueueMember = !std::is_reference::value && requires(T & t) { + {typename T::QueueEntry()}; + {(typename T::QueueEntry()).Initialize()}; + {(typename T::QueueEntry()).SetPrev(std::addressof(t))}; + {(typename T::QueueEntry()).SetNext(std::addressof(t))}; + { (typename T::QueueEntry()).GetNext() } + ->std::same_as; + { (typename T::QueueEntry()).GetPrev() } + ->std::same_as; + { t.GetPriorityQueueEntry(std::declval()) } + ->std::same_as; + + {t.GetAffinityMask()}; + { typename std::remove_cvref::type() } + ->KPriorityQueueAffinityMask; + + { t.GetActiveCore() } + ->std::convertible_to; + { t.GetPriority() } + ->std::convertible_to; +}; + +template +requires KPriorityQueueMember class KPriorityQueue { +public: + using AffinityMaskType = typename std::remove_cv().GetAffinityMask())>::type>::type; + + static_assert(LowestPriority >= 0); + static_assert(HighestPriority >= 0); + static_assert(LowestPriority >= HighestPriority); + static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1; + static constexpr size_t NumCores = _NumCores; + + static constexpr bool IsValidCore(s32 core) { + return 0 <= core && core < static_cast(NumCores); + } + + static constexpr bool IsValidPriority(s32 priority) { + return HighestPriority <= priority && priority <= LowestPriority + 1; + } + +private: + using Entry = typename Member::QueueEntry; + +public: + class KPerCoreQueue { + private: + Entry root[NumCores]; + + public: + constexpr KPerCoreQueue() : root() { + for (size_t i = 0; i < NumCores; i++) { + this->root[i].Initialize(); + } + } + + constexpr bool PushBack(s32 core, Member* member) { + /* Get the entry associated with the member. */ + Entry& member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entry associated with the end of the queue. */ + Member* tail = this->root[core].GetPrev(); + Entry& tail_entry = + (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; + + /* Link the entries. */ + member_entry.SetPrev(tail); + member_entry.SetNext(nullptr); + tail_entry.SetNext(member); + this->root[core].SetPrev(member); + + return (tail == nullptr); + } + + constexpr bool PushFront(s32 core, Member* member) { + /* Get the entry associated with the member. */ + Entry& member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entry associated with the front of the queue. */ + Member* head = this->root[core].GetNext(); + Entry& head_entry = + (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; + + /* Link the entries. */ + member_entry.SetPrev(nullptr); + member_entry.SetNext(head); + head_entry.SetPrev(member); + this->root[core].SetNext(member); + + return (head == nullptr); + } + + constexpr bool Remove(s32 core, Member* member) { + /* Get the entry associated with the member. */ + Entry& member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entries associated with next and prev. */ + Member* prev = member_entry.GetPrev(); + Member* next = member_entry.GetNext(); + Entry& prev_entry = + (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core]; + Entry& next_entry = + (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; + + /* Unlink. */ + prev_entry.SetNext(next); + next_entry.SetPrev(prev); + + return (this->GetFront(core) == nullptr); + } + + constexpr Member* GetFront(s32 core) const { + return this->root[core].GetNext(); + } + }; + + class KPriorityQueueImpl { + private: + KPerCoreQueue queues[NumPriority]; + Common::BitSet64 available_priorities[NumCores]; + + public: + constexpr KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ + } + + constexpr void PushBack(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + if (this->queues[priority].PushBack(core, member)) { + this->available_priorities[core].SetBit(priority); + } + } + } + + constexpr void PushFront(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + if (this->queues[priority].PushFront(core, member)) { + this->available_priorities[core].SetBit(priority); + } + } + } + + constexpr void Remove(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + if (this->queues[priority].Remove(core, member)) { + this->available_priorities[core].ClearBit(priority); + } + } + } + + constexpr Member* GetFront(s32 core) const { + ASSERT(IsValidCore(core)); + + const s32 priority = + static_cast(this->available_priorities[core].CountLeadingZero()); + if (priority <= LowestPriority) { + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + + constexpr Member* GetFront(s32 priority, s32 core) const { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + + constexpr Member* GetNext(s32 core, const Member* member) const { + ASSERT(IsValidCore(core)); + + Member* next = member->GetPriorityQueueEntry(core).GetNext(); + if (next == nullptr) { + const s32 priority = static_cast( + this->available_priorities[core].GetNextSet(member->GetPriority())); + if (priority <= LowestPriority) { + next = this->queues[priority].GetFront(core); + } + } + return next; + } + + constexpr void MoveToFront(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + this->queues[priority].Remove(core, member); + this->queues[priority].PushFront(core, member); + } + } + + constexpr Member* MoveToBack(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + this->queues[priority].Remove(core, member); + this->queues[priority].PushBack(core, member); + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + }; + +private: + KPriorityQueueImpl scheduled_queue; + KPriorityQueueImpl suggested_queue; + +private: + constexpr void ClearAffinityBit(u64& affinity, s32 core) { + affinity &= ~(u64(1ul) << core); + } + + constexpr s32 GetNextCore(u64& affinity) { + const s32 core = Common::CountTrailingZeroes64(affinity); + ClearAffinityBit(affinity, core); + return core; + } + + constexpr void PushBack(s32 priority, Member* member) { + ASSERT(IsValidPriority(priority)); + + /* Push onto the scheduled queue for its core, if we can. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.PushBack(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* And suggest the thread for all other cores. */ + while (affinity) { + this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + } + } + + constexpr void PushFront(s32 priority, Member* member) { + ASSERT(IsValidPriority(priority)); + + /* Push onto the scheduled queue for its core, if we can. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.PushFront(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* And suggest the thread for all other cores. */ + /* Note: Nintendo pushes onto the back of the suggested queue, not the front. */ + while (affinity) { + this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + } + } + + constexpr void Remove(s32 priority, Member* member) { + ASSERT(IsValidPriority(priority)); + + /* Remove from the scheduled queue for its core. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.Remove(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* Remove from the suggested queue for all other cores. */ + while (affinity) { + this->suggested_queue.Remove(priority, GetNextCore(affinity), member); + } + } + +public: + constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ + } + + /* Getters. */ + constexpr Member* GetScheduledFront(s32 core) const { + return this->scheduled_queue.GetFront(core); + } + + constexpr Member* GetScheduledFront(s32 core, s32 priority) const { + return this->scheduled_queue.GetFront(priority, core); + } + + constexpr Member* GetSuggestedFront(s32 core) const { + return this->suggested_queue.GetFront(core); + } + + constexpr Member* GetSuggestedFront(s32 core, s32 priority) const { + return this->suggested_queue.GetFront(priority, core); + } + + constexpr Member* GetScheduledNext(s32 core, const Member* member) const { + return this->scheduled_queue.GetNext(core, member); + } + + constexpr Member* GetSuggestedNext(s32 core, const Member* member) const { + return this->suggested_queue.GetNext(core, member); + } + + constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const { + return member->GetPriorityQueueEntry(core).GetNext(); + } + + /* Mutators. */ + constexpr void PushBack(Member* member) { + this->PushBack(member->GetPriority(), member); + } + + constexpr void Remove(Member* member) { + this->Remove(member->GetPriority(), member); + } + + constexpr void MoveToScheduledFront(Member* member) { + this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); + } + + constexpr Thread* MoveToScheduledBack(Member* member) { + return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), + member); + } + + /* First class fancy operations. */ + constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) { + ASSERT(IsValidPriority(prev_priority)); + + /* Remove the member from the queues. */ + const s32 new_priority = member->GetPriority(); + this->Remove(prev_priority, member); + + /* And enqueue. If the member is running, we want to keep it running. */ + if (is_running) { + this->PushFront(new_priority, member); + } else { + this->PushBack(new_priority, member); + } + } + + constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity, + Member* member) { + /* Get the new information. */ + const s32 priority = member->GetPriority(); + const AffinityMaskType& new_affinity = member->GetAffinityMask(); + const s32 new_core = member->GetActiveCore(); + + /* Remove the member from all queues it was in before. */ + for (s32 core = 0; core < static_cast(NumCores); core++) { + if (prev_affinity.GetAffinity(core)) { + if (core == prev_core) { + this->scheduled_queue.Remove(priority, core, member); + } else { + this->suggested_queue.Remove(priority, core, member); + } + } + } + + /* And add the member to all queues it should be in now. */ + for (s32 core = 0; core < static_cast(NumCores); core++) { + if (new_affinity.GetAffinity(core)) { + if (core == new_core) { + this->scheduled_queue.PushBack(priority, core, member); + } else { + this->suggested_queue.PushBack(priority, core, member); + } + } + } + } + + constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) { + /* Get the new information. */ + const s32 new_core = member->GetActiveCore(); + const s32 priority = member->GetPriority(); + + /* We don't need to do anything if the core is the same. */ + if (prev_core != new_core) { + /* Remove from the scheduled queue for the previous core. */ + if (prev_core >= 0) { + this->scheduled_queue.Remove(priority, prev_core, member); + } + + /* Remove from the suggested queue and add to the scheduled queue for the new core. */ + if (new_core >= 0) { + this->suggested_queue.Remove(priority, new_core, member); + if (to_front) { + this->scheduled_queue.PushFront(priority, new_core, member); + } else { + this->scheduled_queue.PushBack(priority, new_core, member); + } + } + + /* Add to the suggested queue for the previous core. */ + if (prev_core >= 0) { + this->suggested_queue.PushBack(priority, prev_core, member); + } + } + } +}; + +} // namespace Kernel -- cgit v1.2.3 From 493263f4159b15f54c5683d92590cb20b60e4d7a Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 11:43:29 -0800 Subject: hle: kernel: svc: Remove unnecessary hack in svcSleep. --- src/core/hle/kernel/svc.cpp | 7 ------- 1 file changed, 7 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 0cd712d09..fcb864427 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1627,13 +1627,6 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { } else { current_thread->Sleep(nanoseconds); } - - if (is_redundant && !system.Kernel().IsMulticore()) { - system.Kernel().ExitSVCProfile(); - system.CoreTiming().AddTicks(1000U); - system.GetCpuManager().PreemptSingleCore(); - system.Kernel().EnterSVCProfile(); - } } static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) { -- cgit v1.2.3 From d58a609ae43d2a1db296e90cb87dcfa3e4d4e7f3 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 11:54:41 -0800 Subject: hle: kernel: process: Add schedule count tracking, to be used for yield impl. --- src/core/hle/kernel/process.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index f45cb5674..36d8547bd 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -216,6 +216,16 @@ public: total_process_running_time_ticks += ticks; } + /// Gets the process schedule count, used for thread yelding + constexpr s64 GetScheduledCount() const { + return schedule_count; + } + + /// Increments the process schedule count, used for thread yielding. + constexpr void IncrementScheduledCount() { + ++schedule_count; + } + /// Gets 8 bytes of random data for svcGetInfo RandomEntropy u64 GetRandomEntropy(std::size_t index) const { return random_entropy.at(index); @@ -397,6 +407,9 @@ private: /// Name of this process std::string name; + /// Schedule count of this process + s64 schedule_count{}; + /// System context Core::System& system; }; -- cgit v1.2.3 From 39d356782e5652a7e9bbdf7f0748be0b126a42f2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 11:55:30 -0800 Subject: hle: kernel: svc: Remove reschedule on svcBreak. - This breaks things, and is unnecessary, since emulation will be done at this point. --- src/core/hle/kernel/svc.cpp | 5 ----- 1 file changed, 5 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index fcb864427..9742aaf4c 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -658,7 +658,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); if (!break_reason.signal_debugger) { - SchedulerLock lock(system.Kernel()); LOG_CRITICAL( Debug_Emulated, "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", @@ -669,10 +668,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); const auto thread_processor_id = current_thread->GetProcessorID(); system.ArmInterface(static_cast(thread_processor_id)).LogBacktrace(); - - // Kill the current thread - system.Kernel().ExceptionalExit(); - current_thread->Stop(); } } -- cgit v1.2.3 From 7e5d0f1fe311663329bc15b9e387f3ce2083dcf0 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 13:34:07 -0800 Subject: hle: kernel: Port KAbstractSchedulerLock from Mesosphere. --- src/core/hle/kernel/k_scheduler_lock.h | 76 ++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 src/core/hle/kernel/k_scheduler_lock.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h new file mode 100644 index 000000000..d46f5fc04 --- /dev/null +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -0,0 +1,76 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/assert.h" +#include "common/spin_lock.h" +#include "core/hardware_properties.h" + +namespace Kernel { + +class KernelCore; + +template +class KAbstractSchedulerLock { +private: + KernelCore& kernel; + Common::SpinLock spin_lock; + s32 lock_count; + Core::EmuThreadHandle owner_thread; + +public: + KAbstractSchedulerLock(KernelCore& kernel) + : kernel{kernel}, spin_lock(), lock_count(0), + owner_thread(Core::EmuThreadHandle::InvalidHandle()) {} + + bool IsLockedByCurrentThread() const { + return this->owner_thread == kernel.GetCurrentEmuThreadID(); + } + + void Lock() { + if (this->IsLockedByCurrentThread()) { + /* If we already own the lock, we can just increment the count. */ + ASSERT(this->lock_count > 0); + this->lock_count++; + } else { + /* Otherwise, we want to disable scheduling and acquire the spinlock. */ + SchedulerType::DisableScheduling(kernel); + this->spin_lock.lock(); + + /* For debug, ensure that our state is valid. */ + ASSERT(this->lock_count == 0); + ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle()); + + /* Increment count, take ownership. */ + this->lock_count = 1; + this->owner_thread = kernel.GetCurrentEmuThreadID(); + } + } + + void Unlock() { + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(this->lock_count > 0); + + /* Release an instance of the lock. */ + if ((--this->lock_count) == 0) { + /* We're no longer going to hold the lock. Take note of what cores need scheduling. */ + const u64 cores_needing_scheduling = + SchedulerType::UpdateHighestPriorityThreads(kernel); + Core::EmuThreadHandle leaving_thread = owner_thread; + + /* Note that we no longer hold the lock, and unlock the spinlock. */ + this->owner_thread = Core::EmuThreadHandle::InvalidHandle(); + this->spin_lock.unlock(); + + /* Enable scheduling, and perform a rescheduling operation. */ + SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); + } + } +}; + +} // namespace Kernel -- cgit v1.2.3 From c10a37e5b6bdece089fc765e9843f81a097b08a2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 1 Dec 2020 20:53:51 -0800 Subject: hle: kernel: physical_core: Clear exclusive state after each run. - This is closer to pre-multicore behavior, and works a bit better. --- src/core/hle/kernel/physical_core.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index 50aca5752..d6a5742bd 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -37,6 +37,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { void PhysicalCore::Run() { arm_interface->Run(); + arm_interface->ClearExclusiveState(); } void PhysicalCore::Idle() { -- cgit v1.2.3 From 9e29e36a784496f7290c03b6a42e400a164a5b1e Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 2 Dec 2020 18:08:35 -0800 Subject: hle: kernel: Rewrite scheduler implementation based on Mesopshere. --- src/core/hle/kernel/address_arbiter.cpp | 6 +- src/core/hle/kernel/handle_table.cpp | 4 +- src/core/hle/kernel/hle_ipc.cpp | 2 +- src/core/hle/kernel/k_scheduler.cpp | 873 ++++++++++++++++++++++++++++++++ src/core/hle/kernel/k_scheduler.h | 297 +++++++++++ src/core/hle/kernel/kernel.cpp | 59 +-- src/core/hle/kernel/kernel.h | 17 +- src/core/hle/kernel/mutex.cpp | 6 +- src/core/hle/kernel/physical_core.cpp | 9 +- src/core/hle/kernel/physical_core.h | 13 +- src/core/hle/kernel/process.cpp | 6 +- src/core/hle/kernel/readable_event.cpp | 2 +- src/core/hle/kernel/scheduler.cpp | 819 ------------------------------ src/core/hle/kernel/scheduler.h | 320 ------------ src/core/hle/kernel/server_session.cpp | 2 +- src/core/hle/kernel/svc.cpp | 57 +-- src/core/hle/kernel/synchronization.cpp | 4 +- src/core/hle/kernel/thread.cpp | 50 +- src/core/hle/kernel/thread.h | 107 +++- src/core/hle/kernel/time_manager.cpp | 2 +- 20 files changed, 1344 insertions(+), 1311 deletions(-) create mode 100644 src/core/hle/kernel/k_scheduler.cpp create mode 100644 src/core/hle/kernel/k_scheduler.h delete mode 100644 src/core/hle/kernel/scheduler.cpp delete mode 100644 src/core/hle/kernel/scheduler.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 048acd30e..bc32be18b 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -12,8 +12,8 @@ #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/result.h" @@ -153,7 +153,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 bool should_decrement) { auto& memory = system.Memory(); auto& kernel = system.Kernel(); - Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); + Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); Handle event_handle = InvalidHandle; { @@ -223,7 +223,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { auto& memory = system.Memory(); auto& kernel = system.Kernel(); - Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); + Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); Handle event_handle = InvalidHandle; { diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index 3e745c18b..40988b0fd 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -8,9 +8,9 @@ #include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" namespace Kernel { @@ -105,7 +105,7 @@ bool HandleTable::IsValid(Handle handle) const { std::shared_ptr HandleTable::GetGeneric(Handle handle) const { if (handle == CurrentThread) { - return SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); + return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); } else if (handle == CurrentProcess) { return SharedFrom(kernel.CurrentProcess()); } diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 81f85643b..7eda89786 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -17,11 +17,11 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/server_session.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp new file mode 100644 index 000000000..7f7da610d --- /dev/null +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -0,0 +1,873 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#include +#include +#include +#include +#include + +#include "common/assert.h" +#include "common/bit_util.h" +#include "common/fiber.h" +#include "common/logging/log.h" +#include "core/arm/arm_interface.h" +#include "core/core.h" +#include "core/core_timing.h" +#include "core/cpu_manager.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/physical_core.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/thread.h" +#include "core/hle/kernel/time_manager.h" + +namespace Kernel { + +static void IncrementScheduledCount(Kernel::Thread* thread) { + if (auto process = thread->GetOwnerProcess(); process) { + process->IncrementScheduledCount(); + } +} + +GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) + : kernel{kernel}, scheduler_lock{kernel} {} + +GlobalSchedulerContext::~GlobalSchedulerContext() = default; + +/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, + Core::EmuThreadHandle global_thread) { + u32 current_core = global_thread.host_handle; + bool must_context_switch = global_thread.guest_handle != InvalidHandle && + (current_core < Core::Hardware::NUM_CPU_CORES); + + while (cores_pending_reschedule != 0) { + u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule); + ASSERT(core < Core::Hardware::NUM_CPU_CORES); + if (!must_context_switch || core != current_core) { + auto& phys_core = kernel.PhysicalCore(core); + phys_core.Interrupt(); + } else { + must_context_switch = true; + } + cores_pending_reschedule &= ~(1ULL << core); + } + if (must_context_switch) { + auto core_scheduler = kernel.CurrentScheduler(); + kernel.ExitSVCProfile(); + core_scheduler->RescheduleCurrentCore(); + kernel.EnterSVCProfile(); + } +} + +u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { + std::scoped_lock lock{guard}; + if (Thread* prev_highest_thread = this->state.highest_priority_thread; + prev_highest_thread != highest_thread) { + if (prev_highest_thread != nullptr) { + IncrementScheduledCount(prev_highest_thread); + prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks()); + } + if (this->state.should_count_idle) { + if (highest_thread != nullptr) { + // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { + // process->SetRunningThread(this->core_id, highest_thread, + // this->state.idle_count); + //} + } else { + this->state.idle_count++; + } + } + + this->state.highest_priority_thread = highest_thread; + this->state.needs_scheduling = true; + return (1ULL << this->core_id); + } else { + return 0; + } +} + +/*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + /* Clear that we need to update. */ + ClearSchedulerUpdateNeeded(kernel); + + u64 cores_needing_scheduling = 0, idle_cores = 0; + Thread* top_threads[Core::Hardware::NUM_CPU_CORES]; + auto& priority_queue = GetPriorityQueue(kernel); + + /* We want to go over all cores, finding the highest priority thread and determining if + * scheduling is needed for that core. */ + for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); + if (top_thread != nullptr) { + ///* If the thread has no waiters, we need to check if the process has a thread pinned. + ///*/ + // if (top_thread->GetNumKernelWaiters() == 0) { + // if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { + // if (Thread* pinned = parent->GetPinnedThread(core_id); + // pinned != nullptr && pinned != top_thread) { + // /* We prefer our parent's pinned thread if possible. However, we also + // don't + // * want to schedule un-runnable threads. */ + // if (pinned->GetRawState() == Thread::ThreadState_Runnable) { + // top_thread = pinned; + // } else { + // top_thread = nullptr; + // } + // } + // } + //} + } else { + idle_cores |= (1ULL << core_id); + } + + top_threads[core_id] = top_thread; + cores_needing_scheduling |= + kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); + } + + /* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */ + while (idle_cores != 0) { + u32 core_id = Common::CountTrailingZeroes64(idle_cores); + if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { + s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; + size_t num_candidates = 0; + + /* While we have a suggested thread, try to migrate it! */ + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (Thread* top_thread = + (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; + top_thread != suggested) { + /* Make sure we're not dealing with threads too high priority for migration. */ + if (top_thread != nullptr && + top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) { + break; + } + + /* The suggested thread isn't bound to its core, so we can migrate it! */ + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested); + + top_threads[core_id] = suggested; + cores_needing_scheduling |= + kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); + break; + } + + /* Note this core as a candidate for migration. */ + ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES); + migration_candidates[num_candidates++] = suggested_core; + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + /* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our + * candidate cores' top threads. */ + if (suggested == nullptr) { + for (size_t i = 0; i < num_candidates; i++) { + /* Check if there's some other thread that can run on the candidate core. */ + const s32 candidate_core = migration_candidates[i]; + suggested = top_threads[candidate_core]; + if (Thread* next_on_candidate_core = + priority_queue.GetScheduledNext(candidate_core, suggested); + next_on_candidate_core != nullptr) { + /* The candidate core can run some other thread! We'll migrate its current + * top thread to us. */ + top_threads[candidate_core] = next_on_candidate_core; + cores_needing_scheduling |= + kernel.Scheduler(candidate_core) + .UpdateHighestPriorityThread(top_threads[candidate_core]); + + /* Perform the migration. */ + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(candidate_core, suggested); + + top_threads[core_id] = suggested; + cores_needing_scheduling |= + kernel.Scheduler(core_id).UpdateHighestPriorityThread( + top_threads[core_id]); + break; + } + } + } + } + + idle_cores &= ~(1ULL << core_id); + } + + return cores_needing_scheduling; +} + +void GlobalSchedulerContext::AddThread(std::shared_ptr thread) { + std::scoped_lock lock{global_list_guard}; + thread_list.push_back(std::move(thread)); +} + +void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) { + std::scoped_lock lock{global_list_guard}; + thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), + thread_list.end()); +} + +void GlobalSchedulerContext::PreemptThreads() { + // The priority levels at which the global scheduler preempts threads every 10 ms. They are + // ordered from Core 0 to Core 3. + std::array preemption_priorities = {59, 59, 59, 63}; + + ASSERT(IsLocked()); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + const u32 priority = preemption_priorities[core_id]; + kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); + } +} + +bool GlobalSchedulerContext::IsLocked() const { + return scheduler_lock.IsLockedByCurrentThread(); +} + +/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, + u32 old_state) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + /* Check if the state has changed, because if it hasn't there's nothing to do. */ + const auto cur_state = thread->scheduling_state; + if (cur_state == old_state) { + return; + } + + /* Update the priority queues. */ + if (old_state == static_cast(ThreadSchedStatus::Runnable)) { + /* If we were previously runnable, then we're not runnable now, and we should remove. */ + GetPriorityQueue(kernel).Remove(thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(kernel); + } else if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + /* If we're now runnable, then we weren't previously, and we should add. */ + GetPriorityQueue(kernel).PushBack(thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(kernel); + } +} + +/*static*/ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, + Thread* current_thread, u32 old_priority) { + + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + /* If the thread is runnable, we want to change its priority in the queue. */ + if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { + GetPriorityQueue(kernel).ChangePriority( + old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(kernel); + } +} + +/*static*/ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, + const KAffinityMask& old_affinity, + s32 old_core) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + /* If the thread is runnable, we want to change its affinity in the queue. */ + if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { + GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(kernel); + } +} + +void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { + ASSERT(system.GlobalSchedulerContext().IsLocked()); + + /* Get a reference to the priority queue. */ + auto& kernel = system.Kernel(); + auto& priority_queue = GetPriorityQueue(kernel); + + /* Rotate the front of the queue to the end. */ + Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority); + Thread* next_thread = nullptr; + if (top_thread != nullptr) { + next_thread = priority_queue.MoveToScheduledBack(top_thread); + if (next_thread != top_thread) { + IncrementScheduledCount(top_thread); + IncrementScheduledCount(next_thread); + } + } + + /* While we have a suggested thread, try to migrate it! */ + { + Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority); + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (Thread* top_on_suggested_core = + (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) + : nullptr; + top_on_suggested_core != suggested) { + /* If the next thread is a new thread that has been waiting longer than our + * suggestion, we prefer it to our suggestion. */ + if (top_thread != next_thread && next_thread != nullptr && + next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) { + suggested = nullptr; + break; + } + + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion + * to the front of the queue. */ + if (top_on_suggested_core == nullptr || + top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSamePriorityNext(core_id, suggested); + } + } + + /* Now that we might have migrated a thread with the same priority, check if we can do better. + */ + { + Thread* best_thread = priority_queue.GetScheduledFront(core_id); + if (best_thread == GetCurrentThread()) { + best_thread = priority_queue.GetScheduledNext(core_id, best_thread); + } + + /* If the best thread we can choose has a priority the same or worse than ours, try to + * migrate a higher priority thread. */ + if (best_thread != nullptr && best_thread->GetPriority() >= static_cast(priority)) { + Thread* suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* If the suggestion's priority is the same as ours, don't bother. */ + if (suggested->GetPriority() >= best_thread->GetPriority()) { + break; + } + + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (Thread* top_on_suggested_core = + (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) + : nullptr; + top_on_suggested_core != suggested) { + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the + * suggestion to the front of the queue. */ + if (top_on_suggested_core == nullptr || + top_on_suggested_core->GetPriority() >= + HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + } + } + + /* After a rotation, we need a scheduler update. */ + SetSchedulerUpdateNeeded(kernel); +} + +/*static*/ bool KScheduler::CanSchedule(KernelCore& kernel) { + return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; +} + +/*static*/ bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { + return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire); +} + +/*static*/ void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { + kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release); +} + +/*static*/ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { + kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release); +} + +/*static*/ void KScheduler::DisableScheduling(KernelCore& kernel) { + if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { + ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); + scheduler->GetCurrentThread()->DisableDispatch(); + } +} + +/*static*/ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread) { + if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { + scheduler->GetCurrentThread()->EnableDispatch(); + } + RescheduleCores(kernel, cores_needing_scheduling, global_thread); +} + +/*static*/ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { + if (IsSchedulerUpdateNeeded(kernel)) { + return UpdateHighestPriorityThreadsImpl(kernel); + } else { + return 0; + } +} + +/*static*/ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { + return kernel.GlobalSchedulerContext().priority_queue; +} + +void KScheduler::YieldWithoutCoreMigration() { + auto& kernel = system.Kernel(); + + /* Validate preconditions. */ + ASSERT(CanSchedule(kernel)); + ASSERT(kernel.CurrentProcess() != nullptr); + + /* Get the current thread and process. */ + Thread& cur_thread = *GetCurrentThread(); + Process& cur_process = *kernel.CurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto& priority_queue = GetPriorityQueue(kernel); + + /* Perform the yield. */ + { + SchedulerLock lock(kernel); + + const auto cur_state = cur_thread.scheduling_state; + if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + /* Put the current thread at the back of the queue. */ + Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* If the next thread is different, we have an update to perform. */ + if (next_thread != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(kernel); + } else { + /* Otherwise, set the thread's yield count so that we won't waste work until the + * process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } + } +} + +void KScheduler::YieldWithCoreMigration() { + auto& kernel = system.Kernel(); + + /* Validate preconditions. */ + ASSERT(CanSchedule(kernel)); + ASSERT(kernel.CurrentProcess() != nullptr); + + /* Get the current thread and process. */ + Thread& cur_thread = *GetCurrentThread(); + Process& cur_process = *kernel.CurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto& priority_queue = GetPriorityQueue(kernel); + + /* Perform the yield. */ + { + SchedulerLock lock(kernel); + + const auto cur_state = cur_thread.scheduling_state; + if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + /* Get the current active core. */ + const s32 core_id = cur_thread.GetActiveCore(); + + /* Put the current thread at the back of the queue. */ + Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* While we have a suggested thread, try to migrate it! */ + bool recheck = false; + Thread* suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* Check if the suggested thread is the thread running on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + + if (Thread* running_on_suggested_core = + (suggested_core >= 0) + ? kernel.Scheduler(suggested_core).state.highest_priority_thread + : nullptr; + running_on_suggested_core != suggested) { + /* If the current thread's priority is higher than our suggestion's we prefer + * the next thread to the suggestion. */ + /* We also prefer the next thread when the current thread's priority is equal to + * the suggestions, but the next thread has been waiting longer. */ + if ((suggested->GetPriority() > cur_thread.GetPriority()) || + (suggested->GetPriority() == cur_thread.GetPriority() && + next_thread != std::addressof(cur_thread) && + next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) { + suggested = nullptr; + break; + } + + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the + * suggestion to the front of the queue. */ + if (running_on_suggested_core == nullptr || + running_on_suggested_core->GetPriority() >= + HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } else { + /* We couldn't perform a migration, but we should check again on a future + * yield. */ + recheck = true; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + /* If we still have a suggestion or the next thread is different, we have an update to + * perform. */ + if (suggested != nullptr || next_thread != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(kernel); + } else if (!recheck) { + /* Otherwise if we don't need to re-check, set the thread's yield count so that we + * won't waste work until the process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } + } +} + +void KScheduler::YieldToAnyThread() { + auto& kernel = system.Kernel(); + + /* Validate preconditions. */ + ASSERT(CanSchedule(kernel)); + ASSERT(kernel.CurrentProcess() != nullptr); + + /* Get the current thread and process. */ + Thread& cur_thread = *GetCurrentThread(); + Process& cur_process = *kernel.CurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto& priority_queue = GetPriorityQueue(kernel); + + /* Perform the yield. */ + { + SchedulerLock lock(kernel); + + const auto cur_state = cur_thread.scheduling_state; + if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + /* Get the current active core. */ + const s32 core_id = cur_thread.GetActiveCore(); + + /* Migrate the current thread to core -1. */ + cur_thread.SetActiveCore(-1); + priority_queue.ChangeCore(core_id, std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* If there's nothing scheduled, we can try to perform a migration. */ + if (priority_queue.GetScheduledFront(core_id) == nullptr) { + /* While we have a suggested thread, try to migrate it! */ + Thread* suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (Thread* top_on_suggested_core = + (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) + : nullptr; + top_on_suggested_core != suggested) { + /* If we're allowed to do a migration, do one. */ + if (top_on_suggested_core == nullptr || + top_on_suggested_core->GetPriority() >= + HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested); + IncrementScheduledCount(suggested); + } + + /* Regardless of whether we migrated, we had a candidate, so we're done. */ + break; + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + /* If the suggestion is different from the current thread, we need to perform an + * update. */ + if (suggested != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(kernel); + } else { + /* Otherwise, set the thread's yield count so that we won't waste work until the + * process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } else { + /* Otherwise, we have an update to perform. */ + SetSchedulerUpdateNeeded(kernel); + } + } + } +} + +void GlobalSchedulerContext::Lock() { + scheduler_lock.Lock(); +} + +void GlobalSchedulerContext::Unlock() { + scheduler_lock.Unlock(); +} + +KScheduler::KScheduler(Core::System& system, std::size_t core_id) + : system(system), core_id(core_id) { + switch_fiber = std::make_shared(std::function(OnSwitch), this); + this->state.needs_scheduling = true; + this->state.interrupt_task_thread_runnable = false; + this->state.should_count_idle = false; + this->state.idle_count = 0; + this->state.idle_thread_stack = nullptr; + this->state.highest_priority_thread = nullptr; +} + +KScheduler::~KScheduler() = default; + +Thread* KScheduler::GetCurrentThread() const { + if (current_thread) { + return current_thread; + } + return idle_thread; +} + +u64 KScheduler::GetLastContextSwitchTicks() const { + return last_context_switch_time; +} + +void KScheduler::RescheduleCurrentCore() { + ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); + + auto& phys_core = system.Kernel().PhysicalCore(core_id); + if (phys_core.IsInterrupted()) { + phys_core.ClearInterrupt(); + } + guard.lock(); + if (this->state.needs_scheduling) { + Schedule(); + } else { + guard.unlock(); + } +} + +void KScheduler::OnThreadStart() { + SwitchContextStep2(); +} + +void KScheduler::Unload(Thread* thread) { + if (thread) { + thread->SetIsRunning(false); + if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) { + system.ArmInterface(core_id).ExceptionalExit(); + thread->SetContinuousOnSVC(false); + } + if (!thread->IsHLEThread() && !thread->HasExited()) { + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); + cpu_core.SaveContext(thread->GetContext32()); + cpu_core.SaveContext(thread->GetContext64()); + // Save the TPIDR_EL0 system register in case it was modified. + thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + } + thread->context_guard.unlock(); + } +} + +void KScheduler::Reload(Thread* thread) { + if (thread) { + ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, + "Thread must be runnable."); + + // Cancel any outstanding wakeup events for this thread + thread->SetIsRunning(true); + thread->SetWasRunning(false); + + auto* const thread_owner_process = thread->GetOwnerProcess(); + if (thread_owner_process != nullptr) { + system.Kernel().MakeCurrentProcess(thread_owner_process); + } + if (!thread->IsHLEThread()) { + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); + cpu_core.LoadContext(thread->GetContext32()); + cpu_core.LoadContext(thread->GetContext64()); + cpu_core.SetTlsAddress(thread->GetTLSAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + } + } +} + +void KScheduler::SwitchContextStep2() { + // Load context of new thread + Reload(current_thread); + + RescheduleCurrentCore(); +} + +void KScheduler::ScheduleImpl() { + Thread* previous_thread = current_thread; + current_thread = state.highest_priority_thread; + + this->state.needs_scheduling = false; + + if (current_thread == previous_thread) { + guard.unlock(); + return; + } + + Process* const previous_process = system.Kernel().CurrentProcess(); + + UpdateLastContextSwitchTime(previous_thread, previous_process); + + // Save context for previous thread + Unload(previous_thread); + + std::shared_ptr* old_context; + if (previous_thread != nullptr) { + old_context = &previous_thread->GetHostContext(); + } else { + old_context = &idle_thread->GetHostContext(); + } + guard.unlock(); + + Common::Fiber::YieldTo(*old_context, switch_fiber); + /// When a thread wakes up, the scheduler may have changed to other in another core. + auto& next_scheduler = *system.Kernel().CurrentScheduler(); + next_scheduler.SwitchContextStep2(); +} + +void KScheduler::OnSwitch(void* this_scheduler) { + KScheduler* sched = static_cast(this_scheduler); + sched->SwitchToCurrent(); +} + +void KScheduler::SwitchToCurrent() { + while (true) { + { + std::scoped_lock lock{guard}; + current_thread = state.highest_priority_thread; + this->state.needs_scheduling = false; + } + const auto is_switch_pending = [this] { + std::scoped_lock lock{guard}; + return !!this->state.needs_scheduling; + }; + do { + if (current_thread != nullptr && !current_thread->IsHLEThread()) { + current_thread->context_guard.lock(); + if (!current_thread->IsRunnable()) { + current_thread->context_guard.unlock(); + break; + } + if (static_cast(current_thread->GetProcessorID()) != core_id) { + current_thread->context_guard.unlock(); + break; + } + } + std::shared_ptr* next_context; + if (current_thread != nullptr) { + next_context = ¤t_thread->GetHostContext(); + } else { + next_context = &idle_thread->GetHostContext(); + } + Common::Fiber::YieldTo(switch_fiber, *next_context); + } while (!is_switch_pending()); + } +} + +void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { + const u64 prev_switch_ticks = last_context_switch_time; + const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); + const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; + + if (thread != nullptr) { + thread->UpdateCPUTimeTicks(update_ticks); + } + + if (process != nullptr) { + process->UpdateCPUTimeTicks(update_ticks); + } + + last_context_switch_time = most_recent_switch_ticks; +} + +void KScheduler::Initialize() { + std::string name = "Idle Thread Id:" + std::to_string(core_id); + std::function init_func = Core::CpuManager::GetIdleThreadStartFunc(); + void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); + ThreadType type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); + auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast(core_id), 0, + nullptr, std::move(init_func), init_func_parameter); + idle_thread = thread_res.Unwrap().get(); + + { + KScopedSchedulerLock lock{system.Kernel()}; + idle_thread->SetStatus(ThreadStatus::Ready); + } +} + +SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} { + kernel.GlobalSchedulerContext().Lock(); +} + +SchedulerLock::~SchedulerLock() { + kernel.GlobalSchedulerContext().Unlock(); +} + +SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, + Thread* time_task, s64 nanoseconds) + : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{ + nanoseconds} { + event_handle = InvalidHandle; +} + +SchedulerLockAndSleep::~SchedulerLockAndSleep() { + if (sleep_cancelled) { + return; + } + auto& time_manager = kernel.TimeManager(); + time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); +} + +void SchedulerLockAndSleep::Release() { + if (sleep_cancelled) { + return; + } + auto& time_manager = kernel.TimeManager(); + time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); + sleep_cancelled = true; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h new file mode 100644 index 000000000..535ee34b9 --- /dev/null +++ b/src/core/hle/kernel/k_scheduler.h @@ -0,0 +1,297 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include +#include +#include +#include + +#include "common/common_types.h" +#include "common/multi_level_queue.h" +#include "common/scope_exit.h" +#include "common/spin_lock.h" +#include "core/core_timing.h" +#include "core/hardware_properties.h" +#include "core/hle/kernel/k_priority_queue.h" +#include "core/hle/kernel/k_scheduler_lock.h" +#include "core/hle/kernel/thread.h" + +namespace Common { +class Fiber; +} + +namespace Core { +class ARM_Interface; +class System; +} // namespace Core + +namespace Kernel { + +class KernelCore; +class Process; +class SchedulerLock; + +using KSchedulerPriorityQueue = + KPriorityQueue; +static constexpr s32 HighestCoreMigrationAllowedPriority = 2; + +class GlobalSchedulerContext final { + friend class KScheduler; + +public: + explicit GlobalSchedulerContext(KernelCore& kernel); + ~GlobalSchedulerContext(); + + /// Adds a new thread to the scheduler + void AddThread(std::shared_ptr thread); + + /// Removes a thread from the scheduler + void RemoveThread(std::shared_ptr thread); + + /// Returns a list of all threads managed by the scheduler + const std::vector>& GetThreadList() const { + return thread_list; + } + + /** + * Rotates the scheduling queues of threads at a preemption priority and then does + * some core rebalancing. Preemption priorities can be found in the array + * 'preemption_priorities'. + * + * @note This operation happens every 10ms. + */ + void PreemptThreads(); + + u32 CpuCoresCount() const { + return Core::Hardware::NUM_CPU_CORES; + } + + bool IsLocked() const; + +private: + friend class SchedulerLock; + + /// Lock the scheduler to the current thread. + void Lock(); + + /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling + /// and reschedules current core if needed. + void Unlock(); + + using LockType = KAbstractSchedulerLock; + + KernelCore& kernel; + + std::atomic_bool scheduler_update_needed{}; + KSchedulerPriorityQueue priority_queue; + LockType scheduler_lock; + + /// Lists all thread ids that aren't deleted/etc. + std::vector> thread_list; + Common::SpinLock global_list_guard{}; +}; + +class KScheduler final { +public: + explicit KScheduler(Core::System& system, std::size_t core_id); + ~KScheduler(); + + /// Reschedules to the next available thread (call after current thread is suspended) + void RescheduleCurrentCore(); + + /// Reschedules cores pending reschedule, to be called on EnableScheduling. + static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, + Core::EmuThreadHandle global_thread); + + /// The next two are for SingleCore Only. + /// Unload current thread before preempting core. + void Unload(Thread* thread); + + /// Reload current thread after core preemption. + void Reload(Thread* thread); + + /// Gets the current running thread + Thread* GetCurrentThread() const; + + /// Gets the timestamp for the last context switch in ticks. + u64 GetLastContextSwitchTicks() const; + + bool ContextSwitchPending() const { + return this->state.needs_scheduling; + } + + void Initialize(); + + void OnThreadStart(); + + std::shared_ptr& ControlContext() { + return switch_fiber; + } + + const std::shared_ptr& ControlContext() const { + return switch_fiber; + } + + std::size_t CurrentCoreId() const { + return core_id; + } + + u64 UpdateHighestPriorityThread(Thread* highest_thread); + + /** + * Takes a thread and moves it to the back of the it's priority list. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldWithoutCoreMigration(); + + /** + * Takes a thread and moves it to the back of the it's priority list. + * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or + * a better priority than the next thread in the core. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldWithCoreMigration(); + + /** + * Takes a thread and moves it out of the scheduling queue. + * and into the suggested queue. If no thread can be scheduled afterwards in that core, + * a suggested thread is obtained instead. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldToAnyThread(); + + /// Notify the scheduler a thread's status has changed. + static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state); + + /// Notify the scheduler a thread's priority has changed. + static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, + u32 old_priority); + + /// Notify the scheduler a thread's core and/or affinity mask has changed. + static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, + const KAffinityMask& old_affinity, s32 old_core); + +private: + /** + * Takes care of selecting the new scheduled threads in three steps: + * + * 1. First a thread is selected from the top of the priority queue. If no thread + * is obtained then we move to step two, else we are done. + * + * 2. Second we try to get a suggested thread that's not assigned to any core or + * that is not the top thread in that core. + * + * 3. Third is no suggested thread is found, we do a second pass and pick a running + * thread in another core and swap it with its current thread. + * + * returns the cores needing scheduling. + */ + static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); + + void RotateScheduledQueue(s32 core_id, s32 priority); + +public: + static bool CanSchedule(KernelCore& kernel); + static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); + static void SetSchedulerUpdateNeeded(KernelCore& kernel); + static void ClearSchedulerUpdateNeeded(KernelCore& kernel); + static void DisableScheduling(KernelCore& kernel); + static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread); + static u64 UpdateHighestPriorityThreads(KernelCore& kernel); + +private: + friend class GlobalSchedulerContext; + + static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); + + void Schedule() { + ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); + this->ScheduleImpl(); + } + + /// Switches the CPU's active thread context to that of the specified thread + void ScheduleImpl(); + void SwitchThread(Thread* next_thread); + + /// When a thread wakes up, it must run this through it's new scheduler + void SwitchContextStep2(); + + /** + * Called on every context switch to update the internal timestamp + * This also updates the running time ticks for the given thread and + * process using the following difference: + * + * ticks += most_recent_ticks - last_context_switch_ticks + * + * The internal tick timestamp for the scheduler is simply the + * most recent tick count retrieved. No special arithmetic is + * applied to it. + */ + void UpdateLastContextSwitchTime(Thread* thread, Process* process); + + static void OnSwitch(void* this_scheduler); + void SwitchToCurrent(); + +private: + Thread* current_thread{}; + Thread* idle_thread{}; + + std::shared_ptr switch_fiber{}; + + struct SchedulingState { + std::atomic needs_scheduling; + bool interrupt_task_thread_runnable{}; + bool should_count_idle{}; + u64 idle_count{}; + Thread* highest_priority_thread{}; + void* idle_thread_stack{}; + }; + + SchedulingState state; + + Core::System& system; + u64 last_context_switch_time{}; + const std::size_t core_id; + + Common::SpinLock guard{}; +}; + +class SchedulerLock { +public: + [[nodiscard]] explicit SchedulerLock(KernelCore& kernel); + ~SchedulerLock(); + +protected: + KernelCore& kernel; +}; + +class SchedulerLockAndSleep : public SchedulerLock { +public: + explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task, + s64 nanoseconds); + ~SchedulerLockAndSleep(); + + void CancelSleep() { + sleep_cancelled = true; + } + + void Release(); + +private: + Handle& event_handle; + Thread* time_task; + s64 nanoseconds; + bool sleep_cancelled{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 929db696d..b74e34c40 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -27,6 +27,7 @@ #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_layout.h" #include "core/hle/kernel/memory/memory_manager.h" @@ -34,7 +35,6 @@ #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" @@ -49,17 +49,18 @@ namespace Kernel { struct KernelCore::Impl { explicit Impl(Core::System& system, KernelCore& kernel) - : global_scheduler{kernel}, synchronization{system}, time_manager{system}, - global_handle_table{kernel}, system{system} {} + : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{ + system} {} void SetMulticore(bool is_multicore) { this->is_multicore = is_multicore; } void Initialize(KernelCore& kernel) { - Shutdown(); RegisterHostThread(); + global_scheduler_context = std::make_unique(kernel); + InitializePhysicalCores(); InitializeSystemResourceLimit(kernel); InitializeMemoryLayout(); @@ -86,29 +87,20 @@ struct KernelCore::Impl { } } - for (std::size_t i = 0; i < cores.size(); i++) { - cores[i].Shutdown(); - schedulers[i].reset(); - } cores.clear(); process_list.clear(); + current_process = nullptr; system_resource_limit = nullptr; global_handle_table.Clear(); - preemption_event = nullptr; - global_scheduler.Shutdown(); + preemption_event = nullptr; named_ports.clear(); - for (auto& core : cores) { - core.Shutdown(); - } - cores.clear(); - exclusive_monitor.reset(); num_host_threads = 0; @@ -121,7 +113,7 @@ struct KernelCore::Impl { exclusive_monitor = Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { - schedulers[i] = std::make_unique(system, i); + schedulers[i] = std::make_unique(system, i); cores.emplace_back(i, system, *schedulers[i], interrupts); } } @@ -155,7 +147,7 @@ struct KernelCore::Impl { "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { { SchedulerLock lock(kernel); - global_scheduler.PreemptThreads(); + global_scheduler_context->PreemptThreads(); } const auto time_interval = std::chrono::nanoseconds{ Core::Timing::msToCycles(std::chrono::milliseconds(10))}; @@ -245,7 +237,7 @@ struct KernelCore::Impl { if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { return result; } - const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); + const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler(); const Kernel::Thread* current = sched.GetCurrentThread(); if (current != nullptr && !current->IsPhantomMode()) { result.guest_handle = current->GetGlobalHandle(); @@ -314,7 +306,7 @@ struct KernelCore::Impl { // Lists all processes that exist in the current session. std::vector> process_list; Process* current_process = nullptr; - Kernel::GlobalScheduler global_scheduler; + std::unique_ptr global_scheduler_context; Kernel::Synchronization synchronization; Kernel::TimeManager time_manager; @@ -355,7 +347,7 @@ struct KernelCore::Impl { std::array, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; std::array interrupts{}; - std::array, Core::Hardware::NUM_CPU_CORES> schedulers{}; + std::array, Core::Hardware::NUM_CPU_CORES> schedulers{}; bool is_multicore{}; std::thread::id single_core_thread_id{}; @@ -415,19 +407,19 @@ const std::vector>& KernelCore::GetProcessList() const return impl->process_list; } -Kernel::GlobalScheduler& KernelCore::GlobalScheduler() { - return impl->global_scheduler; +Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() { + return *impl->global_scheduler_context; } -const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { - return impl->global_scheduler; +const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const { + return *impl->global_scheduler_context; } -Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { +Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) { return *impl->schedulers[id]; } -const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { +const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const { return *impl->schedulers[id]; } @@ -451,16 +443,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { return impl->cores[core_id]; } -Kernel::Scheduler& KernelCore::CurrentScheduler() { +Kernel::KScheduler* KernelCore::CurrentScheduler() { u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return *impl->schedulers[core_id]; -} - -const Kernel::Scheduler& KernelCore::CurrentScheduler() const { - u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return *impl->schedulers[core_id]; + if (core_id >= Core::Hardware::NUM_CPU_CORES) { + // This is expected when called from not a guest thread + return {}; + } + return impl->schedulers[core_id].get(); } std::array& KernelCore::Interrupts() { diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index a73a93039..5846c3f39 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -35,12 +35,12 @@ class SlabHeap; class AddressArbiter; class ClientPort; -class GlobalScheduler; +class GlobalSchedulerContext; class HandleTable; class PhysicalCore; class Process; class ResourceLimit; -class Scheduler; +class KScheduler; class SharedMemory; class Synchronization; class Thread; @@ -102,16 +102,16 @@ public: const std::vector>& GetProcessList() const; /// Gets the sole instance of the global scheduler - Kernel::GlobalScheduler& GlobalScheduler(); + Kernel::GlobalSchedulerContext& GlobalSchedulerContext(); /// Gets the sole instance of the global scheduler - const Kernel::GlobalScheduler& GlobalScheduler() const; + const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const; /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' - Kernel::Scheduler& Scheduler(std::size_t id); + Kernel::KScheduler& Scheduler(std::size_t id); /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' - const Kernel::Scheduler& Scheduler(std::size_t id) const; + const Kernel::KScheduler& Scheduler(std::size_t id) const; /// Gets the an instance of the respective physical CPU core. Kernel::PhysicalCore& PhysicalCore(std::size_t id); @@ -120,10 +120,7 @@ public: const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; /// Gets the sole instance of the Scheduler at the current running core. - Kernel::Scheduler& CurrentScheduler(); - - /// Gets the sole instance of the Scheduler at the current running core. - const Kernel::Scheduler& CurrentScheduler() const; + Kernel::KScheduler* CurrentScheduler(); /// Gets the an instance of the current physical CPU core. Kernel::PhysicalCore& CurrentPhysicalCore(); diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 8f6c944d1..6299b1342 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -11,11 +11,11 @@ #include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/result.h" #include "core/memory.h" @@ -73,7 +73,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, auto& kernel = system.Kernel(); std::shared_ptr current_thread = - SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); + SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); { SchedulerLock lock(kernel); // The mutex address must be 4-byte aligned @@ -156,7 +156,7 @@ ResultCode Mutex::Release(VAddr address) { SchedulerLock lock(kernel); std::shared_ptr current_thread = - SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); + SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); auto [result, new_owner] = Unlock(current_thread, address); diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index d6a5742bd..7fea45f96 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -7,14 +7,14 @@ #include "core/arm/dynarmic/arm_dynarmic_32.h" #include "core/arm/dynarmic/arm_dynarmic_64.h" #include "core/core.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" -#include "core/hle/kernel/scheduler.h" namespace Kernel { PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, - Kernel::Scheduler& scheduler, Core::CPUInterrupts& interrupts) + Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts) : core_index{core_index}, system{system}, scheduler{scheduler}, interrupts{interrupts}, guard{std::make_unique()} {} @@ -37,17 +37,12 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { void PhysicalCore::Run() { arm_interface->Run(); - arm_interface->ClearExclusiveState(); } void PhysicalCore::Idle() { interrupts[core_index].AwaitInterrupt(); } -void PhysicalCore::Shutdown() { - scheduler.Shutdown(); -} - bool PhysicalCore::IsInterrupted() const { return interrupts[core_index].IsInterrupted(); } diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index 37513130a..b4d3c15de 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -15,7 +15,7 @@ class SpinLock; } namespace Kernel { -class Scheduler; +class KScheduler; } // namespace Kernel namespace Core { @@ -28,7 +28,7 @@ namespace Kernel { class PhysicalCore { public: - PhysicalCore(std::size_t core_index, Core::System& system, Kernel::Scheduler& scheduler, + PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts); ~PhysicalCore(); @@ -55,9 +55,6 @@ public: /// Check if this core is interrupted bool IsInterrupted() const; - // Shutdown this physical core. - void Shutdown(); - bool IsInitialized() const { return arm_interface != nullptr; } @@ -82,18 +79,18 @@ public: return core_index; } - Kernel::Scheduler& Scheduler() { + Kernel::KScheduler& Scheduler() { return scheduler; } - const Kernel::Scheduler& Scheduler() const { + const Kernel::KScheduler& Scheduler() const { return scheduler; } private: const std::size_t core_index; Core::System& system; - Kernel::Scheduler& scheduler; + Kernel::KScheduler& scheduler; Core::CPUInterrupts& interrupts; std::unique_ptr guard; std::unique_ptr arm_interface; diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index b17529dee..238c03a13 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -15,13 +15,13 @@ #include "core/file_sys/program_metadata.h" #include "core/hle/kernel/code_set.h" #include "core/hle/kernel/errors.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_block_manager.h" #include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/memory/slab_heap.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/lock.h" #include "core/memory.h" @@ -314,7 +314,7 @@ void Process::PrepareForTermination() { if (thread->GetOwnerProcess() != this) continue; - if (thread.get() == system.CurrentScheduler().GetCurrentThread()) + if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread()) continue; // TODO(Subv): When are the other running/ready threads terminated? @@ -325,7 +325,7 @@ void Process::PrepareForTermination() { } }; - stop_threads(system.GlobalScheduler().GetThreadList()); + stop_threads(system.GlobalSchedulerContext().GetThreadList()); FreeTLSRegion(tls_region_address); tls_region_address = 0; diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index 6e286419e..927f88fed 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp @@ -6,10 +6,10 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/hle/kernel/errors.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/readable_event.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" namespace Kernel { diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp deleted file mode 100644 index 9a969fdb5..000000000 --- a/src/core/hle/kernel/scheduler.cpp +++ /dev/null @@ -1,819 +0,0 @@ -// Copyright 2018 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. -// -// SelectThreads, Yield functions originally by TuxSH. -// licensed under GPLv2 or later under exception provided by the author. - -#include -#include -#include -#include -#include - -#include "common/assert.h" -#include "common/bit_util.h" -#include "common/fiber.h" -#include "common/logging/log.h" -#include "core/arm/arm_interface.h" -#include "core/core.h" -#include "core/core_timing.h" -#include "core/cpu_manager.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/physical_core.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" -#include "core/hle/kernel/time_manager.h" - -namespace Kernel { - -GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {} - -GlobalScheduler::~GlobalScheduler() = default; - -void GlobalScheduler::AddThread(std::shared_ptr thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.push_back(std::move(thread)); -} - -void GlobalScheduler::RemoveThread(std::shared_ptr thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), - thread_list.end()); -} - -u32 GlobalScheduler::SelectThreads() { - ASSERT(is_locked); - const auto update_thread = [](Thread* thread, Scheduler& sched) { - std::scoped_lock lock{sched.guard}; - if (thread != sched.selected_thread_set.get()) { - if (thread == nullptr) { - ++sched.idle_selection_count; - } - sched.selected_thread_set = SharedFrom(thread); - } - const bool reschedule_pending = - sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread); - sched.is_context_switch_pending = reschedule_pending; - std::atomic_thread_fence(std::memory_order_seq_cst); - return reschedule_pending; - }; - if (!is_reselection_pending.load()) { - return 0; - } - std::array top_threads{}; - - u32 idle_cores{}; - - // Step 1: Get top thread in schedule queue. - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - Thread* top_thread = - scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); - if (top_thread != nullptr) { - // TODO(Blinkhawk): Implement Thread Pinning - } else { - idle_cores |= (1U << core); - } - top_threads[core] = top_thread; - } - - while (idle_cores != 0) { - u32 core_id = Common::CountTrailingZeroes32(idle_cores); - - if (!suggested_queue[core_id].empty()) { - std::array migration_candidates{}; - std::size_t num_candidates = 0; - auto iter = suggested_queue[core_id].begin(); - Thread* suggested = nullptr; - // Step 2: Try selecting a suggested thread. - while (iter != suggested_queue[core_id].end()) { - suggested = *iter; - iter++; - s32 suggested_core_id = suggested->GetProcessorID(); - Thread* top_thread = - suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr; - if (top_thread != suggested) { - if (top_thread != nullptr && - top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { - suggested = nullptr; - break; - // There's a too high thread to do core migration, cancel - } - TransferToCore(suggested->GetPriority(), static_cast(core_id), suggested); - break; - } - suggested = nullptr; - migration_candidates[num_candidates++] = suggested_core_id; - } - // Step 3: Select a suggested thread from another core - if (suggested == nullptr) { - for (std::size_t i = 0; i < num_candidates; i++) { - s32 candidate_core = migration_candidates[i]; - suggested = top_threads[candidate_core]; - auto it = scheduled_queue[candidate_core].begin(); - it++; - Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; - if (next != nullptr) { - TransferToCore(suggested->GetPriority(), static_cast(core_id), - suggested); - top_threads[candidate_core] = next; - break; - } else { - suggested = nullptr; - } - } - } - top_threads[core_id] = suggested; - } - - idle_cores &= ~(1U << core_id); - } - u32 cores_needing_context_switch{}; - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - Scheduler& sched = kernel.Scheduler(core); - ASSERT(top_threads[core] == nullptr || - static_cast(top_threads[core]->GetProcessorID()) == core); - if (update_thread(top_threads[core], sched)) { - cores_needing_context_switch |= (1U << core); - } - } - return cores_needing_context_switch; -} - -bool GlobalScheduler::YieldThread(Thread* yielding_thread) { - ASSERT(is_locked); - // Note: caller should use critical section, etc. - if (!yielding_thread->IsRunnable()) { - // Normally this case shouldn't happen except for SetThreadActivity. - is_reselection_pending.store(true, std::memory_order_release); - return false; - } - const u32 core_id = static_cast(yielding_thread->GetProcessorID()); - const u32 priority = yielding_thread->GetPriority(); - - // Yield the thread - Reschedule(priority, core_id, yielding_thread); - const Thread* const winner = scheduled_queue[core_id].front(); - if (kernel.GetCurrentHostThreadID() != core_id) { - is_reselection_pending.store(true, std::memory_order_release); - } - - return AskForReselectionOrMarkRedundant(yielding_thread, winner); -} - -bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { - ASSERT(is_locked); - // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, - // etc. - if (!yielding_thread->IsRunnable()) { - // Normally this case shouldn't happen except for SetThreadActivity. - is_reselection_pending.store(true, std::memory_order_release); - return false; - } - const u32 core_id = static_cast(yielding_thread->GetProcessorID()); - const u32 priority = yielding_thread->GetPriority(); - - // Yield the thread - Reschedule(priority, core_id, yielding_thread); - - std::array current_threads; - for (std::size_t i = 0; i < current_threads.size(); i++) { - current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); - } - - Thread* next_thread = scheduled_queue[core_id].front(priority); - Thread* winner = nullptr; - for (auto& thread : suggested_queue[core_id]) { - const s32 source_core = thread->GetProcessorID(); - if (source_core >= 0) { - if (current_threads[source_core] != nullptr) { - if (thread == current_threads[source_core] || - current_threads[source_core]->GetPriority() < min_regular_priority) { - continue; - } - } - } - if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || - next_thread->GetPriority() < thread->GetPriority()) { - if (thread->GetPriority() <= priority) { - winner = thread; - break; - } - } - } - - if (winner != nullptr) { - if (winner != yielding_thread) { - TransferToCore(winner->GetPriority(), s32(core_id), winner); - } - } else { - winner = next_thread; - } - - if (kernel.GetCurrentHostThreadID() != core_id) { - is_reselection_pending.store(true, std::memory_order_release); - } - - return AskForReselectionOrMarkRedundant(yielding_thread, winner); -} - -bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { - ASSERT(is_locked); - // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, - // etc. - if (!yielding_thread->IsRunnable()) { - // Normally this case shouldn't happen except for SetThreadActivity. - is_reselection_pending.store(true, std::memory_order_release); - return false; - } - Thread* winner = nullptr; - const u32 core_id = static_cast(yielding_thread->GetProcessorID()); - - // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead - TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); - - // If the core is idle, perform load balancing, excluding the threads that have just used this - // function... - if (scheduled_queue[core_id].empty()) { - // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 - std::array current_threads; - for (std::size_t i = 0; i < current_threads.size(); i++) { - current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); - } - for (auto& thread : suggested_queue[core_id]) { - const s32 source_core = thread->GetProcessorID(); - if (source_core < 0 || thread == current_threads[source_core]) { - continue; - } - if (current_threads[source_core] == nullptr || - current_threads[source_core]->GetPriority() >= min_regular_priority) { - winner = thread; - } - break; - } - if (winner != nullptr) { - if (winner != yielding_thread) { - TransferToCore(winner->GetPriority(), static_cast(core_id), winner); - } - } else { - winner = yielding_thread; - } - } else { - winner = scheduled_queue[core_id].front(); - } - - if (kernel.GetCurrentHostThreadID() != core_id) { - is_reselection_pending.store(true, std::memory_order_release); - } - - return AskForReselectionOrMarkRedundant(yielding_thread, winner); -} - -void GlobalScheduler::PreemptThreads() { - ASSERT(is_locked); - for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - const u32 priority = preemption_priorities[core_id]; - - if (scheduled_queue[core_id].size(priority) > 0) { - if (scheduled_queue[core_id].size(priority) > 1) { - scheduled_queue[core_id].front(priority)->IncrementYieldCount(); - } - scheduled_queue[core_id].yield(priority); - if (scheduled_queue[core_id].size(priority) > 1) { - scheduled_queue[core_id].front(priority)->IncrementYieldCount(); - } - } - - Thread* current_thread = - scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front(); - Thread* winner = nullptr; - for (auto& thread : suggested_queue[core_id]) { - const s32 source_core = thread->GetProcessorID(); - if (thread->GetPriority() != priority) { - continue; - } - if (source_core >= 0) { - Thread* next_thread = scheduled_queue[source_core].empty() - ? nullptr - : scheduled_queue[source_core].front(); - if (next_thread != nullptr && next_thread->GetPriority() < 2) { - break; - } - if (next_thread == thread) { - continue; - } - } - if (current_thread != nullptr && - current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { - winner = thread; - break; - } - } - - if (winner != nullptr) { - TransferToCore(winner->GetPriority(), s32(core_id), winner); - current_thread = - winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; - } - - if (current_thread != nullptr && current_thread->GetPriority() > priority) { - for (auto& thread : suggested_queue[core_id]) { - const s32 source_core = thread->GetProcessorID(); - if (thread->GetPriority() < priority) { - continue; - } - if (source_core >= 0) { - Thread* next_thread = scheduled_queue[source_core].empty() - ? nullptr - : scheduled_queue[source_core].front(); - if (next_thread != nullptr && next_thread->GetPriority() < 2) { - break; - } - if (next_thread == thread) { - continue; - } - } - if (current_thread != nullptr && - current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { - winner = thread; - break; - } - } - - if (winner != nullptr) { - TransferToCore(winner->GetPriority(), s32(core_id), winner); - current_thread = winner; - } - } - - is_reselection_pending.store(true, std::memory_order_release); - } -} - -void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, - Core::EmuThreadHandle global_thread) { - u32 current_core = global_thread.host_handle; - bool must_context_switch = global_thread.guest_handle != InvalidHandle && - (current_core < Core::Hardware::NUM_CPU_CORES); - while (cores_pending_reschedule != 0) { - u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); - ASSERT(core < Core::Hardware::NUM_CPU_CORES); - if (!must_context_switch || core != current_core) { - auto& phys_core = kernel.PhysicalCore(core); - phys_core.Interrupt(); - } else { - must_context_switch = true; - } - cores_pending_reschedule &= ~(1U << core); - } - if (must_context_switch) { - auto& core_scheduler = kernel.CurrentScheduler(); - kernel.ExitSVCProfile(); - core_scheduler.TryDoContextSwitch(); - kernel.EnterSVCProfile(); - } -} - -void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - suggested_queue[core].add(thread, priority); -} - -void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - suggested_queue[core].remove(thread, priority); -} - -void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); - scheduled_queue[core].add(thread, priority); -} - -void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); - scheduled_queue[core].add(thread, priority, false); -} - -void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - scheduled_queue[core].remove(thread, priority); - scheduled_queue[core].add(thread, priority); -} - -void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - scheduled_queue[core].remove(thread, priority); -} - -void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { - ASSERT(is_locked); - const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; - const s32 source_core = thread->GetProcessorID(); - if (source_core == destination_core || !schedulable) { - return; - } - thread->SetProcessorID(destination_core); - if (source_core >= 0) { - Unschedule(priority, static_cast(source_core), thread); - } - if (destination_core >= 0) { - Unsuggest(priority, static_cast(destination_core), thread); - Schedule(priority, static_cast(destination_core), thread); - } - if (source_core >= 0) { - Suggest(priority, static_cast(source_core), thread); - } -} - -bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, - const Thread* winner) { - if (current_thread == winner) { - current_thread->IncrementYieldCount(); - return true; - } else { - is_reselection_pending.store(true, std::memory_order_release); - return false; - } -} - -void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { - if (old_flags == thread->scheduling_state) { - return; - } - ASSERT(is_locked); - - if (old_flags == static_cast(ThreadSchedStatus::Runnable)) { - // In this case the thread was running, now it's pausing/exitting - if (thread->processor_id >= 0) { - Unschedule(thread->current_priority, static_cast(thread->processor_id), thread); - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (core != static_cast(thread->processor_id) && - thread->affinity_mask.GetAffinity(core)) { - Unsuggest(thread->current_priority, core, thread); - } - } - } else if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { - // The thread is now set to running from being stopped - if (thread->processor_id >= 0) { - Schedule(thread->current_priority, static_cast(thread->processor_id), thread); - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (core != static_cast(thread->processor_id) && - thread->affinity_mask.GetAffinity(core)) { - Suggest(thread->current_priority, core, thread); - } - } - } - - SetReselectionPending(); -} - -void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) { - if (thread->scheduling_state != static_cast(ThreadSchedStatus::Runnable)) { - return; - } - ASSERT(is_locked); - if (thread->processor_id >= 0) { - Unschedule(old_priority, static_cast(thread->processor_id), thread); - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (core != static_cast(thread->processor_id) && - thread->affinity_mask.GetAffinity(core)) { - Unsuggest(old_priority, core, thread); - } - } - - if (thread->processor_id >= 0) { - if (thread == kernel.CurrentScheduler().GetCurrentThread()) { - SchedulePrepend(thread->current_priority, static_cast(thread->processor_id), - thread); - } else { - Schedule(thread->current_priority, static_cast(thread->processor_id), thread); - } - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (core != static_cast(thread->processor_id) && - thread->affinity_mask.GetAffinity(core)) { - Suggest(thread->current_priority, core, thread); - } - } - thread->IncrementYieldCount(); - SetReselectionPending(); -} - -void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, - s32 old_core) { - if (thread->scheduling_state != static_cast(ThreadSchedStatus::Runnable) || - thread->current_priority >= THREADPRIO_COUNT) { - return; - } - ASSERT(is_locked); - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (((old_affinity_mask >> core) & 1) != 0) { - if (core == static_cast(old_core)) { - Unschedule(thread->current_priority, core, thread); - } else { - Unsuggest(thread->current_priority, core, thread); - } - } - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (thread->affinity_mask.GetAffinity(core)) { - if (core == static_cast(thread->processor_id)) { - Schedule(thread->current_priority, core, thread); - } else { - Suggest(thread->current_priority, core, thread); - } - } - } - - thread->IncrementYieldCount(); - SetReselectionPending(); -} - -void GlobalScheduler::Shutdown() { - for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - scheduled_queue[core].clear(); - suggested_queue[core].clear(); - } - thread_list.clear(); -} - -void GlobalScheduler::Lock() { - Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); - ASSERT(!current_thread.IsInvalid()); - if (current_thread == current_owner) { - ++scope_lock; - } else { - inner_lock.lock(); - is_locked = true; - current_owner = current_thread; - ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); - scope_lock = 1; - } -} - -void GlobalScheduler::Unlock() { - if (--scope_lock != 0) { - ASSERT(scope_lock > 0); - return; - } - u32 cores_pending_reschedule = SelectThreads(); - Core::EmuThreadHandle leaving_thread = current_owner; - current_owner = Core::EmuThreadHandle::InvalidHandle(); - scope_lock = 1; - is_locked = false; - inner_lock.unlock(); - EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); -} - -Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { - switch_fiber = std::make_shared(std::function(OnSwitch), this); -} - -Scheduler::~Scheduler() = default; - -bool Scheduler::HaveReadyThreads() const { - return system.GlobalScheduler().HaveReadyThreads(core_id); -} - -Thread* Scheduler::GetCurrentThread() const { - if (current_thread) { - return current_thread.get(); - } - return idle_thread.get(); -} - -Thread* Scheduler::GetSelectedThread() const { - return selected_thread.get(); -} - -u64 Scheduler::GetLastContextSwitchTicks() const { - return last_context_switch_time; -} - -void Scheduler::TryDoContextSwitch() { - auto& phys_core = system.Kernel().CurrentPhysicalCore(); - if (phys_core.IsInterrupted()) { - phys_core.ClearInterrupt(); - } - guard.lock(); - if (is_context_switch_pending) { - SwitchContext(); - } else { - guard.unlock(); - } -} - -void Scheduler::OnThreadStart() { - SwitchContextStep2(); -} - -void Scheduler::Unload(Thread* thread) { - if (thread) { - thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); - thread->SetIsRunning(false); - if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) { - system.ArmInterface(core_id).ExceptionalExit(); - thread->SetContinuousOnSVC(false); - } - if (!thread->IsHLEThread() && !thread->HasExited()) { - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.SaveContext(thread->GetContext32()); - cpu_core.SaveContext(thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } - thread->context_guard.unlock(); - } -} - -void Scheduler::Unload() { - Unload(current_thread.get()); -} - -void Scheduler::Reload(Thread* thread) { - if (thread) { - ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, - "Thread must be runnable."); - - // Cancel any outstanding wakeup events for this thread - thread->SetIsRunning(true); - thread->SetWasRunning(false); - thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); - - auto* const thread_owner_process = thread->GetOwnerProcess(); - if (thread_owner_process != nullptr) { - system.Kernel().MakeCurrentProcess(thread_owner_process); - } - if (!thread->IsHLEThread()) { - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.LoadContext(thread->GetContext32()); - cpu_core.LoadContext(thread->GetContext64()); - cpu_core.SetTlsAddress(thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } - } -} - -void Scheduler::Reload() { - Reload(current_thread.get()); -} - -void Scheduler::SwitchContextStep2() { - // Load context of new thread - Reload(selected_thread.get()); - - TryDoContextSwitch(); -} - -void Scheduler::SwitchContext() { - current_thread_prev = current_thread; - selected_thread = selected_thread_set; - Thread* previous_thread = current_thread_prev.get(); - Thread* new_thread = selected_thread.get(); - current_thread = selected_thread; - - is_context_switch_pending = false; - - if (new_thread == previous_thread) { - guard.unlock(); - return; - } - - Process* const previous_process = system.Kernel().CurrentProcess(); - - UpdateLastContextSwitchTime(previous_thread, previous_process); - - // Save context for previous thread - Unload(previous_thread); - - std::shared_ptr* old_context; - if (previous_thread != nullptr) { - old_context = &previous_thread->GetHostContext(); - } else { - old_context = &idle_thread->GetHostContext(); - } - guard.unlock(); - - Common::Fiber::YieldTo(*old_context, switch_fiber); - /// When a thread wakes up, the scheduler may have changed to other in another core. - auto& next_scheduler = system.Kernel().CurrentScheduler(); - next_scheduler.SwitchContextStep2(); -} - -void Scheduler::OnSwitch(void* this_scheduler) { - Scheduler* sched = static_cast(this_scheduler); - sched->SwitchToCurrent(); -} - -void Scheduler::SwitchToCurrent() { - while (true) { - { - std::scoped_lock lock{guard}; - selected_thread = selected_thread_set; - current_thread = selected_thread; - is_context_switch_pending = false; - } - const auto is_switch_pending = [this] { - std::scoped_lock lock{guard}; - return is_context_switch_pending; - }; - do { - if (current_thread != nullptr && !current_thread->IsHLEThread()) { - current_thread->context_guard.lock(); - if (!current_thread->IsRunnable()) { - current_thread->context_guard.unlock(); - break; - } - if (static_cast(current_thread->GetProcessorID()) != core_id) { - current_thread->context_guard.unlock(); - break; - } - } - std::shared_ptr* next_context; - if (current_thread != nullptr) { - next_context = ¤t_thread->GetHostContext(); - } else { - next_context = &idle_thread->GetHostContext(); - } - Common::Fiber::YieldTo(switch_fiber, *next_context); - } while (!is_switch_pending()); - } -} - -void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { - const u64 prev_switch_ticks = last_context_switch_time; - const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); - const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; - - if (thread != nullptr) { - thread->UpdateCPUTimeTicks(update_ticks); - } - - if (process != nullptr) { - process->UpdateCPUTimeTicks(update_ticks); - } - - last_context_switch_time = most_recent_switch_ticks; -} - -void Scheduler::Initialize() { - std::string name = "Idle Thread Id:" + std::to_string(core_id); - std::function init_func = Core::CpuManager::GetIdleThreadStartFunc(); - void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - ThreadType type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); - auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast(core_id), 0, - nullptr, std::move(init_func), init_func_parameter); - idle_thread = std::move(thread_res).Unwrap(); -} - -void Scheduler::Shutdown() { - current_thread = nullptr; - selected_thread = nullptr; -} - -SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} { - kernel.GlobalScheduler().Lock(); -} - -SchedulerLock::~SchedulerLock() { - kernel.GlobalScheduler().Unlock(); -} - -SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, - Thread* time_task, s64 nanoseconds) - : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{ - nanoseconds} { - event_handle = InvalidHandle; -} - -SchedulerLockAndSleep::~SchedulerLockAndSleep() { - if (sleep_cancelled) { - return; - } - auto& time_manager = kernel.TimeManager(); - time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); -} - -void SchedulerLockAndSleep::Release() { - if (sleep_cancelled) { - return; - } - auto& time_manager = kernel.TimeManager(); - time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); - sleep_cancelled = true; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h deleted file mode 100644 index 68db4a5ef..000000000 --- a/src/core/hle/kernel/scheduler.h +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2018 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include -#include -#include -#include - -#include "common/common_types.h" -#include "common/multi_level_queue.h" -#include "common/spin_lock.h" -#include "core/hardware_properties.h" -#include "core/hle/kernel/thread.h" - -namespace Common { -class Fiber; -} - -namespace Core { -class ARM_Interface; -class System; -} // namespace Core - -namespace Kernel { - -class KernelCore; -class Process; -class SchedulerLock; - -class GlobalScheduler final { -public: - explicit GlobalScheduler(KernelCore& kernel); - ~GlobalScheduler(); - - /// Adds a new thread to the scheduler - void AddThread(std::shared_ptr thread); - - /// Removes a thread from the scheduler - void RemoveThread(std::shared_ptr thread); - - /// Returns a list of all threads managed by the scheduler - const std::vector>& GetThreadList() const { - return thread_list; - } - - /// Notify the scheduler a thread's status has changed. - void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags); - - /// Notify the scheduler a thread's priority has changed. - void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority); - - /// Notify the scheduler a thread's core and/or affinity mask has changed. - void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core); - - /** - * Takes care of selecting the new scheduled threads in three steps: - * - * 1. First a thread is selected from the top of the priority queue. If no thread - * is obtained then we move to step two, else we are done. - * - * 2. Second we try to get a suggested thread that's not assigned to any core or - * that is not the top thread in that core. - * - * 3. Third is no suggested thread is found, we do a second pass and pick a running - * thread in another core and swap it with its current thread. - * - * returns the cores needing scheduling. - */ - u32 SelectThreads(); - - bool HaveReadyThreads(std::size_t core_id) const { - return !scheduled_queue[core_id].empty(); - } - - /** - * Takes a thread and moves it to the back of the it's priority list. - * - * @note This operation can be redundant and no scheduling is changed if marked as so. - */ - bool YieldThread(Thread* thread); - - /** - * Takes a thread and moves it to the back of the it's priority list. - * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or - * a better priority than the next thread in the core. - * - * @note This operation can be redundant and no scheduling is changed if marked as so. - */ - bool YieldThreadAndBalanceLoad(Thread* thread); - - /** - * Takes a thread and moves it out of the scheduling queue. - * and into the suggested queue. If no thread can be scheduled afterwards in that core, - * a suggested thread is obtained instead. - * - * @note This operation can be redundant and no scheduling is changed if marked as so. - */ - bool YieldThreadAndWaitForLoadBalancing(Thread* thread); - - /** - * Rotates the scheduling queues of threads at a preemption priority and then does - * some core rebalancing. Preemption priorities can be found in the array - * 'preemption_priorities'. - * - * @note This operation happens every 10ms. - */ - void PreemptThreads(); - - u32 CpuCoresCount() const { - return Core::Hardware::NUM_CPU_CORES; - } - - void SetReselectionPending() { - is_reselection_pending.store(true, std::memory_order_release); - } - - bool IsReselectionPending() const { - return is_reselection_pending.load(std::memory_order_acquire); - } - - void Shutdown(); - -private: - friend class SchedulerLock; - - /// Lock the scheduler to the current thread. - void Lock(); - - /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling - /// and reschedules current core if needed. - void Unlock(); - - void EnableInterruptAndSchedule(u32 cores_pending_reschedule, - Core::EmuThreadHandle global_thread); - - /** - * Add a thread to the suggested queue of a cpu core. Suggested threads may be - * picked if no thread is scheduled to run on the core. - */ - void Suggest(u32 priority, std::size_t core, Thread* thread); - - /** - * Remove a thread to the suggested queue of a cpu core. Suggested threads may be - * picked if no thread is scheduled to run on the core. - */ - void Unsuggest(u32 priority, std::size_t core, Thread* thread); - - /** - * Add a thread to the scheduling queue of a cpu core. The thread is added at the - * back the queue in its priority level. - */ - void Schedule(u32 priority, std::size_t core, Thread* thread); - - /** - * Add a thread to the scheduling queue of a cpu core. The thread is added at the - * front the queue in its priority level. - */ - void SchedulePrepend(u32 priority, std::size_t core, Thread* thread); - - /// Reschedule an already scheduled thread based on a new priority - void Reschedule(u32 priority, std::size_t core, Thread* thread); - - /// Unschedules a thread. - void Unschedule(u32 priority, std::size_t core, Thread* thread); - - /** - * Transfers a thread into an specific core. If the destination_core is -1 - * it will be unscheduled from its source code and added into its suggested - * queue. - */ - void TransferToCore(u32 priority, s32 destination_core, Thread* thread); - - bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner); - - static constexpr u32 min_regular_priority = 2; - std::array, Core::Hardware::NUM_CPU_CORES> - scheduled_queue; - std::array, Core::Hardware::NUM_CPU_CORES> - suggested_queue; - std::atomic is_reselection_pending{false}; - - // The priority levels at which the global scheduler preempts threads every 10 ms. They are - // ordered from Core 0 to Core 3. - std::array preemption_priorities = {59, 59, 59, 62}; - - /// Scheduler lock mechanisms. - bool is_locked{}; - std::mutex inner_lock; - std::atomic scope_lock{}; - Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; - - Common::SpinLock global_list_guard{}; - - /// Lists all thread ids that aren't deleted/etc. - std::vector> thread_list; - KernelCore& kernel; -}; - -class Scheduler final { -public: - explicit Scheduler(Core::System& system, std::size_t core_id); - ~Scheduler(); - - /// Returns whether there are any threads that are ready to run. - bool HaveReadyThreads() const; - - /// Reschedules to the next available thread (call after current thread is suspended) - void TryDoContextSwitch(); - - /// The next two are for SingleCore Only. - /// Unload current thread before preempting core. - void Unload(Thread* thread); - void Unload(); - /// Reload current thread after core preemption. - void Reload(Thread* thread); - void Reload(); - - /// Gets the current running thread - Thread* GetCurrentThread() const; - - /// Gets the currently selected thread from the top of the multilevel queue - Thread* GetSelectedThread() const; - - /// Gets the timestamp for the last context switch in ticks. - u64 GetLastContextSwitchTicks() const; - - bool ContextSwitchPending() const { - return is_context_switch_pending; - } - - void Initialize(); - - /// Shutdowns the scheduler. - void Shutdown(); - - void OnThreadStart(); - - std::shared_ptr& ControlContext() { - return switch_fiber; - } - - const std::shared_ptr& ControlContext() const { - return switch_fiber; - } - -private: - friend class GlobalScheduler; - - /// Switches the CPU's active thread context to that of the specified thread - void SwitchContext(); - - /// When a thread wakes up, it must run this through it's new scheduler - void SwitchContextStep2(); - - /** - * Called on every context switch to update the internal timestamp - * This also updates the running time ticks for the given thread and - * process using the following difference: - * - * ticks += most_recent_ticks - last_context_switch_ticks - * - * The internal tick timestamp for the scheduler is simply the - * most recent tick count retrieved. No special arithmetic is - * applied to it. - */ - void UpdateLastContextSwitchTime(Thread* thread, Process* process); - - static void OnSwitch(void* this_scheduler); - void SwitchToCurrent(); - - std::shared_ptr current_thread = nullptr; - std::shared_ptr selected_thread = nullptr; - std::shared_ptr current_thread_prev = nullptr; - std::shared_ptr selected_thread_set = nullptr; - std::shared_ptr idle_thread = nullptr; - - std::shared_ptr switch_fiber = nullptr; - - Core::System& system; - u64 last_context_switch_time = 0; - u64 idle_selection_count = 0; - const std::size_t core_id; - - Common::SpinLock guard{}; - - bool is_context_switch_pending = false; -}; - -class SchedulerLock { -public: - [[nodiscard]] explicit SchedulerLock(KernelCore& kernel); - ~SchedulerLock(); - -protected: - KernelCore& kernel; -}; - -class SchedulerLockAndSleep : public SchedulerLock { -public: - explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task, - s64 nanoseconds); - ~SchedulerLockAndSleep(); - - void CancelSleep() { - sleep_cancelled = true; - } - - void Release(); - -private: - Handle& event_handle; - Thread* time_task; - s64 nanoseconds; - bool sleep_cancelled{}; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 8c19f2534..bf2c90028 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -14,9 +14,9 @@ #include "core/hle/kernel/client_session.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/server_session.h" #include "core/hle/kernel/session.h" #include "core/hle/kernel/thread.h" diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 9742aaf4c..2612a6b0d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -24,6 +24,7 @@ #include "core/hle/kernel/client_session.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_block.h" #include "core/hle/kernel/memory/page_table.h" @@ -32,7 +33,6 @@ #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/resource_limit.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/svc.h" #include "core/hle/kernel/svc_types.h" @@ -332,7 +332,8 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle, /// Makes a blocking IPC call to an OS service. static ResultCode SendSyncRequest(Core::System& system, Handle handle) { - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); + auto& kernel = system.Kernel(); + const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); std::shared_ptr session = handle_table.Get(handle); if (!session) { LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle); @@ -341,9 +342,9 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); - auto thread = system.CurrentScheduler().GetCurrentThread(); + auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { - SchedulerLock lock(system.Kernel()); + SchedulerLock lock(kernel); thread->InvalidateHLECallback(); thread->SetStatus(ThreadStatus::WaitIPC); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); @@ -352,12 +353,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { if (thread->HasHLECallback()) { Handle event_handle = thread->GetHLETimeEvent(); if (event_handle != InvalidHandle) { - auto& time_manager = system.Kernel().TimeManager(); + auto& time_manager = kernel.TimeManager(); time_manager.UnscheduleTimeEvent(event_handle); } { - SchedulerLock lock(system.Kernel()); + SchedulerLock lock(kernel); auto* sync_object = thread->GetHLESyncObject(); sync_object->RemoveWaitingThread(SharedFrom(thread)); } @@ -665,7 +666,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { handle_debug_buffer(info1, info2); - auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); + auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); const auto thread_processor_id = current_thread->GetProcessorID(); system.ArmInterface(static_cast(thread_processor_id)).LogBacktrace(); } @@ -917,7 +918,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha } const auto& core_timing = system.CoreTiming(); - const auto& scheduler = system.CurrentScheduler(); + const auto& scheduler = *system.Kernel().CurrentScheduler(); const auto* const current_thread = scheduler.GetCurrentThread(); const bool same_thread = current_thread == thread.get(); @@ -1085,7 +1086,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act return ERR_INVALID_HANDLE; } - if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { + if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); return ERR_BUSY; } @@ -1118,7 +1119,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H return ERR_INVALID_HANDLE; } - if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { + if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); return ERR_BUSY; } @@ -1475,7 +1476,7 @@ static void ExitProcess(Core::System& system) { current_process->PrepareForTermination(); // Kill the current thread - system.CurrentScheduler().GetCurrentThread()->Stop(); + system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop(); } static void ExitProcess32(Core::System& system) { @@ -1576,8 +1577,8 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) { static void ExitThread(Core::System& system) { LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); - auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); - system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); + auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); + system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread)); current_thread->Stop(); } @@ -1590,37 +1591,31 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds); enum class SleepType : s64 { - YieldWithoutLoadBalancing = 0, - YieldWithLoadBalancing = -1, + YieldWithoutCoreMigration = 0, + YieldWithCoreMigration = -1, YieldAndWaitForLoadBalancing = -2, }; - auto& scheduler = system.CurrentScheduler(); - auto* const current_thread = scheduler.GetCurrentThread(); - bool is_redundant = false; - + auto& scheduler = *system.Kernel().CurrentScheduler(); if (nanoseconds <= 0) { switch (static_cast(nanoseconds)) { - case SleepType::YieldWithoutLoadBalancing: { - auto pair = current_thread->YieldSimple(); - is_redundant = pair.second; + case SleepType::YieldWithoutCoreMigration: { + scheduler.YieldWithoutCoreMigration(); break; } - case SleepType::YieldWithLoadBalancing: { - auto pair = current_thread->YieldAndBalanceLoad(); - is_redundant = pair.second; + case SleepType::YieldWithCoreMigration: { + scheduler.YieldWithCoreMigration(); break; } case SleepType::YieldAndWaitForLoadBalancing: { - auto pair = current_thread->YieldAndWaitForLoadBalancing(); - is_redundant = pair.second; + scheduler.YieldToAnyThread(); break; } default: UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); } } else { - current_thread->Sleep(nanoseconds); + scheduler.GetCurrentThread()->Sleep(nanoseconds); } } @@ -1656,8 +1651,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); auto& kernel = system.Kernel(); Handle event_handle; - Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); - auto* const current_process = system.Kernel().CurrentProcess(); + Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); + auto* const current_process = kernel.CurrentProcess(); { SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); const auto& handle_table = current_process->GetHandleTable(); @@ -2627,7 +2622,7 @@ void Call(Core::System& system, u32 immediate) { auto& kernel = system.Kernel(); kernel.EnterSVCProfile(); - auto* thread = system.CurrentScheduler().GetCurrentThread(); + auto* thread = kernel.CurrentScheduler()->GetCurrentThread(); thread->SetContinuousOnSVC(true); const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 8b875d853..342fb4516 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -5,8 +5,8 @@ #include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/synchronization_object.h" #include "core/hle/kernel/thread.h" @@ -37,7 +37,7 @@ void Synchronization::SignalObject(SynchronizationObject& obj) const { std::pair Synchronization::WaitFor( std::vector>& sync_objects, s64 nano_seconds) { auto& kernel = system.Kernel(); - auto* const thread = system.CurrentScheduler().GetCurrentThread(); + auto* const thread = kernel.CurrentScheduler()->GetCurrentThread(); Handle event_handle = InvalidHandle; { SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 38b4a0987..804e07f2b 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -17,10 +17,10 @@ #include "core/hardware_properties.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/result.h" @@ -186,9 +186,11 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->status = ThreadStatus::Dormant; thread->entry_point = entry_point; thread->stack_top = stack_top; + thread->disable_count = 1; thread->tpidr_el0 = 0; thread->nominal_priority = thread->current_priority = priority; - thread->last_running_ticks = 0; + thread->schedule_count = -1; + thread->last_scheduled_tick = 0; thread->processor_id = processor_id; thread->ideal_core = processor_id; thread->affinity_mask.SetAffinity(processor_id, true); @@ -201,7 +203,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->owner_process = owner_process; thread->type = type_flags; if ((type_flags & THREADTYPE_IDLE) == 0) { - auto& scheduler = kernel.GlobalScheduler(); + auto& scheduler = kernel.GlobalSchedulerContext(); scheduler.AddThread(thread); } if (owner_process) { @@ -402,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) { return RESULT_SUCCESS; } -std::pair Thread::YieldSimple() { - bool is_redundant = false; - { - SchedulerLock lock(kernel); - is_redundant = kernel.GlobalScheduler().YieldThread(this); - } - return {RESULT_SUCCESS, is_redundant}; -} - -std::pair Thread::YieldAndBalanceLoad() { - bool is_redundant = false; - { - SchedulerLock lock(kernel); - is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this); - } - return {RESULT_SUCCESS, is_redundant}; -} - -std::pair Thread::YieldAndWaitForLoadBalancing() { - bool is_redundant = false; - { - SchedulerLock lock(kernel); - is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this); - } - return {RESULT_SUCCESS, is_redundant}; -} - void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { const u32 old_state = scheduling_state; pausing_state |= static_cast(flag); const u32 base_scheduling = static_cast(GetSchedulingStatus()); scheduling_state = base_scheduling | pausing_state; - kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); + KScheduler::OnThreadStateChanged(kernel, this, old_state); } void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { @@ -442,19 +417,20 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { pausing_state &= ~static_cast(flag); const u32 base_scheduling = static_cast(GetSchedulingStatus()); scheduling_state = base_scheduling | pausing_state; - kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); + KScheduler::OnThreadStateChanged(kernel, this, old_state); } void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { const u32 old_state = scheduling_state; scheduling_state = (scheduling_state & static_cast(ThreadSchedMasks::HighMask)) | static_cast(new_status); - kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); + KScheduler::OnThreadStateChanged(kernel, this, old_state); } void Thread::SetCurrentPriority(u32 new_priority) { const u32 old_priority = std::exchange(current_priority, new_priority); - kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority); + KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(), + old_priority); } ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { @@ -480,10 +456,10 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { if (use_override) { ideal_core_override = new_core; } else { - const auto old_affinity_mask = affinity_mask.GetAffinityMask(); + const auto old_affinity_mask = affinity_mask; affinity_mask.SetAffinityMask(new_affinity_mask); ideal_core = new_core; - if (old_affinity_mask != new_affinity_mask) { + if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) { const s32 old_core = processor_id; if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { if (static_cast(ideal_core) < 0) { @@ -493,7 +469,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { processor_id = ideal_core; } } - kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core); + KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core); } } return RESULT_SUCCESS; diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 5192ecff1..f1aa358a4 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -28,10 +28,10 @@ class System; namespace Kernel { -class GlobalScheduler; +class GlobalSchedulerContext; class KernelCore; class Process; -class Scheduler; +class KScheduler; enum ThreadPriority : u32 { THREADPRIO_HIGHEST = 0, ///< Highest thread priority @@ -346,8 +346,11 @@ public: void SetStatus(ThreadStatus new_status); - u64 GetLastRunningTicks() const { - return last_running_ticks; + constexpr s64 GetLastScheduledTick() const { + return this->last_scheduled_tick; + } + constexpr void SetLastScheduledTick(s64 tick) { + this->last_scheduled_tick = tick; } u64 GetTotalCPUTimeTicks() const { @@ -362,10 +365,18 @@ public: return processor_id; } + s32 GetActiveCore() const { + return GetProcessorID(); + } + void SetProcessorID(s32 new_core) { processor_id = new_core; } + void SetActiveCore(s32 new_core) { + processor_id = new_core; + } + Process* GetOwnerProcess() { return owner_process; } @@ -479,21 +490,11 @@ public: /// Sleeps this thread for the given amount of nanoseconds. ResultCode Sleep(s64 nanoseconds); - /// Yields this thread without rebalancing loads. - std::pair YieldSimple(); - - /// Yields this thread and does a load rebalancing. - std::pair YieldAndBalanceLoad(); - - /// Yields this thread and if the core is left idle, loads are rebalanced - std::pair YieldAndWaitForLoadBalancing(); - - void IncrementYieldCount() { - yield_count++; + constexpr s64 GetYieldScheduleCount() const { + return this->schedule_count; } - - u64 GetYieldCount() const { - return yield_count; + constexpr void SetYieldScheduleCount(s64 count) { + this->schedule_count = count; } ThreadSchedStatus GetSchedulingStatus() const { @@ -569,9 +570,62 @@ public: return has_exited; } + struct QueueEntry { + private: + Thread* prev; + Thread* next; + + public: + constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ + } + + constexpr void Initialize() { + this->prev = nullptr; + this->next = nullptr; + } + + constexpr Thread* GetPrev() const { + return this->prev; + } + constexpr Thread* GetNext() const { + return this->next; + } + constexpr void SetPrev(Thread* t) { + this->prev = t; + } + constexpr void SetNext(Thread* t) { + this->next = t; + } + }; + + constexpr QueueEntry& GetPriorityQueueEntry(s32 core) { + return this->per_core_priority_queue_entry[core]; + } + constexpr const QueueEntry& GetPriorityQueueEntry(s32 core) const { + return this->per_core_priority_queue_entry[core]; + } + + s32 GetDisableDispatchCount() const { + return disable_count; + } + + void DisableDispatch() { + ASSERT(GetDisableDispatchCount() >= 0); + disable_count++; + } + + void EnableDispatch() { + ASSERT(GetDisableDispatchCount() > 0); + disable_count--; + } + + ThreadStatus status = ThreadStatus::Dormant; + u32 scheduling_state = 0; + private: - friend class GlobalScheduler; - friend class Scheduler; + friend class GlobalSchedulerContext; + friend class KScheduler; + friend class Process; void SetSchedulingStatus(ThreadSchedStatus new_status); void AddSchedulingFlag(ThreadSchedFlags flag); @@ -586,10 +640,9 @@ private: u64 thread_id = 0; - ThreadStatus status = ThreadStatus::Dormant; - VAddr entry_point = 0; VAddr stack_top = 0; + std::atomic_int disable_count = 0; ThreadType type; @@ -603,9 +656,8 @@ private: u32 current_priority = 0; u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. - u64 last_running_ticks = 0; ///< CPU tick when thread was last running - u64 yield_count = 0; ///< Number of redundant yields carried by this thread. - ///< a redundant yield is one where no scheduling is changed + s64 schedule_count{}; + s64 last_scheduled_tick{}; s32 processor_id = 0; @@ -647,7 +699,9 @@ private: Handle hle_time_event; SynchronizationObject* hle_object; - Scheduler* scheduler = nullptr; + KScheduler* scheduler = nullptr; + + QueueEntry per_core_priority_queue_entry[Core::Hardware::NUM_CPU_CORES]{}; u32 ideal_core{0xFFFFFFFF}; KAffinityMask affinity_mask{}; @@ -655,7 +709,6 @@ private: s32 ideal_core_override = -1; u32 affinity_override_count = 0; - u32 scheduling_state = 0; u32 pausing_state = 0; bool is_running = false; bool is_waiting_on_sync = false; diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index caf329bfb..8e4769694 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -7,8 +7,8 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" -- cgit v1.2.3 From 8d3e06349e12e7de17c334619f1f986792d1de4b Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 16:43:18 -0800 Subject: hle: kernel: Separate KScheduler from GlobalSchedulerContext class. --- src/core/hle/kernel/global_scheduler_context.cpp | 55 +++++++++++++++++ src/core/hle/kernel/global_scheduler_context.h | 79 ++++++++++++++++++++++++ src/core/hle/kernel/k_scheduler.cpp | 48 +------------- src/core/hle/kernel/k_scheduler.h | 74 +--------------------- 4 files changed, 138 insertions(+), 118 deletions(-) create mode 100644 src/core/hle/kernel/global_scheduler_context.cpp create mode 100644 src/core/hle/kernel/global_scheduler_context.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp new file mode 100644 index 000000000..40e9adf47 --- /dev/null +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -0,0 +1,55 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "common/assert.h" +#include "core/core.h" +#include "core/hle/kernel/global_scheduler_context.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/kernel.h" + +namespace Kernel { + +GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) + : kernel{kernel}, scheduler_lock{kernel} {} + +GlobalSchedulerContext::~GlobalSchedulerContext() = default; + +void GlobalSchedulerContext::AddThread(std::shared_ptr thread) { + std::scoped_lock lock{global_list_guard}; + thread_list.push_back(std::move(thread)); +} + +void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) { + std::scoped_lock lock{global_list_guard}; + thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), + thread_list.end()); +} + +void GlobalSchedulerContext::PreemptThreads() { + // The priority levels at which the global scheduler preempts threads every 10 ms. They are + // ordered from Core 0 to Core 3. + std::array preemption_priorities = {59, 59, 59, 63}; + + ASSERT(IsLocked()); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + const u32 priority = preemption_priorities[core_id]; + kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); + } +} + +bool GlobalSchedulerContext::IsLocked() const { + return scheduler_lock.IsLockedByCurrentThread(); +} + +void GlobalSchedulerContext::Lock() { + scheduler_lock.Lock(); +} + +void GlobalSchedulerContext::Unlock() { + scheduler_lock.Unlock(); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h new file mode 100644 index 000000000..40fe44cc0 --- /dev/null +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -0,0 +1,79 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include + +#include "common/common_types.h" +#include "common/spin_lock.h" +#include "core/hardware_properties.h" +#include "core/hle/kernel/k_priority_queue.h" +#include "core/hle/kernel/k_scheduler_lock.h" +#include "core/hle/kernel/thread.h" + +namespace Kernel { + +class KernelCore; +class SchedulerLock; + +using KSchedulerPriorityQueue = + KPriorityQueue; +static constexpr s32 HighestCoreMigrationAllowedPriority = 2; + +class GlobalSchedulerContext final { + friend class KScheduler; + +public: + explicit GlobalSchedulerContext(KernelCore& kernel); + ~GlobalSchedulerContext(); + + /// Adds a new thread to the scheduler + void AddThread(std::shared_ptr thread); + + /// Removes a thread from the scheduler + void RemoveThread(std::shared_ptr thread); + + /// Returns a list of all threads managed by the scheduler + const std::vector>& GetThreadList() const { + return thread_list; + } + + /** + * Rotates the scheduling queues of threads at a preemption priority and then does + * some core rebalancing. Preemption priorities can be found in the array + * 'preemption_priorities'. + * + * @note This operation happens every 10ms. + */ + void PreemptThreads(); + + /// Returns true if the global scheduler lock is acquired + bool IsLocked() const; + +private: + friend class SchedulerLock; + + /// Lock the scheduler to the current thread. + void Lock(); + + /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling + /// and reschedules current core if needed. + void Unlock(); + + using LockType = KAbstractSchedulerLock; + + KernelCore& kernel; + + std::atomic_bool scheduler_update_needed{}; + KSchedulerPriorityQueue priority_queue; + LockType scheduler_lock; + + /// Lists all thread ids that aren't deleted/etc. + std::vector> thread_list; + Common::SpinLock global_list_guard{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 7f7da610d..c7e2eabd4 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -5,12 +5,6 @@ // This file references various implementation details from Atmosphere, an open-source firmware for // the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. -#include -#include -#include -#include -#include - #include "common/assert.h" #include "common/bit_util.h" #include "common/fiber.h" @@ -19,10 +13,10 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/cpu_manager.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" @@ -34,11 +28,6 @@ static void IncrementScheduledCount(Kernel::Thread* thread) { } } -GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) - : kernel{kernel}, scheduler_lock{kernel} {} - -GlobalSchedulerContext::~GlobalSchedulerContext() = default; - /*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, Core::EmuThreadHandle global_thread) { u32 current_core = global_thread.host_handle; @@ -205,33 +194,6 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { return cores_needing_scheduling; } -void GlobalSchedulerContext::AddThread(std::shared_ptr thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.push_back(std::move(thread)); -} - -void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), - thread_list.end()); -} - -void GlobalSchedulerContext::PreemptThreads() { - // The priority levels at which the global scheduler preempts threads every 10 ms. They are - // ordered from Core 0 to Core 3. - std::array preemption_priorities = {59, 59, 59, 63}; - - ASSERT(IsLocked()); - for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - const u32 priority = preemption_priorities[core_id]; - kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); - } -} - -bool GlobalSchedulerContext::IsLocked() const { - return scheduler_lock.IsLockedByCurrentThread(); -} - /*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); @@ -635,14 +597,6 @@ void KScheduler::YieldToAnyThread() { } } -void GlobalSchedulerContext::Lock() { - scheduler_lock.Lock(); -} - -void GlobalSchedulerContext::Unlock() { - scheduler_lock.Unlock(); -} - KScheduler::KScheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { switch_fiber = std::make_shared(std::function(OnSwitch), this); diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 535ee34b9..7f020d96e 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -8,94 +8,27 @@ #pragma once #include -#include -#include -#include #include "common/common_types.h" -#include "common/multi_level_queue.h" -#include "common/scope_exit.h" #include "common/spin_lock.h" -#include "core/core_timing.h" -#include "core/hardware_properties.h" +#include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_priority_queue.h" #include "core/hle/kernel/k_scheduler_lock.h" -#include "core/hle/kernel/thread.h" namespace Common { class Fiber; } namespace Core { -class ARM_Interface; class System; -} // namespace Core +} namespace Kernel { class KernelCore; class Process; class SchedulerLock; - -using KSchedulerPriorityQueue = - KPriorityQueue; -static constexpr s32 HighestCoreMigrationAllowedPriority = 2; - -class GlobalSchedulerContext final { - friend class KScheduler; - -public: - explicit GlobalSchedulerContext(KernelCore& kernel); - ~GlobalSchedulerContext(); - - /// Adds a new thread to the scheduler - void AddThread(std::shared_ptr thread); - - /// Removes a thread from the scheduler - void RemoveThread(std::shared_ptr thread); - - /// Returns a list of all threads managed by the scheduler - const std::vector>& GetThreadList() const { - return thread_list; - } - - /** - * Rotates the scheduling queues of threads at a preemption priority and then does - * some core rebalancing. Preemption priorities can be found in the array - * 'preemption_priorities'. - * - * @note This operation happens every 10ms. - */ - void PreemptThreads(); - - u32 CpuCoresCount() const { - return Core::Hardware::NUM_CPU_CORES; - } - - bool IsLocked() const; - -private: - friend class SchedulerLock; - - /// Lock the scheduler to the current thread. - void Lock(); - - /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling - /// and reschedules current core if needed. - void Unlock(); - - using LockType = KAbstractSchedulerLock; - - KernelCore& kernel; - - std::atomic_bool scheduler_update_needed{}; - KSchedulerPriorityQueue priority_queue; - LockType scheduler_lock; - - /// Lists all thread ids that aren't deleted/etc. - std::vector> thread_list; - Common::SpinLock global_list_guard{}; -}; +class Thread; class KScheduler final { public: @@ -221,7 +154,6 @@ private: /// Switches the CPU's active thread context to that of the specified thread void ScheduleImpl(); - void SwitchThread(Thread* next_thread); /// When a thread wakes up, it must run this through it's new scheduler void SwitchContextStep2(); -- cgit v1.2.3 From 4756cb203e8ef09377988eb1b49ca20ef45f4492 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 21:56:02 -0800 Subject: hle: kernel: Separate KScopedSchedulerLockAndSleep from k_scheduler. --- src/core/hle/kernel/address_arbiter.cpp | 5 +- src/core/hle/kernel/global_scheduler_context.cpp | 8 ---- src/core/hle/kernel/global_scheduler_context.h | 10 +--- src/core/hle/kernel/hle_ipc.cpp | 8 ++-- src/core/hle/kernel/k_scheduler.cpp | 25 +--------- src/core/hle/kernel/k_scheduler.h | 19 -------- .../hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 56 ++++++++++++++++++++++ src/core/hle/kernel/svc.cpp | 3 +- src/core/hle/kernel/synchronization.cpp | 3 +- src/core/hle/kernel/thread.cpp | 3 +- 10 files changed, 71 insertions(+), 69 deletions(-) create mode 100644 src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index bc32be18b..ac4913173 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -13,6 +13,7 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" @@ -157,7 +158,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 Handle event_handle = InvalidHandle; { - SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); if (current_thread->IsPendingTermination()) { lock.CancelSleep(); @@ -227,7 +228,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t Handle event_handle = InvalidHandle; { - SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); if (current_thread->IsPendingTermination()) { lock.CancelSleep(); diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 40e9adf47..a9cb48b38 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -44,12 +44,4 @@ bool GlobalSchedulerContext::IsLocked() const { return scheduler_lock.IsLockedByCurrentThread(); } -void GlobalSchedulerContext::Lock() { - scheduler_lock.Lock(); -} - -void GlobalSchedulerContext::Unlock() { - scheduler_lock.Unlock(); -} - } // namespace Kernel diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 40fe44cc0..39c383746 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -55,15 +55,7 @@ public: private: friend class SchedulerLock; - - /// Lock the scheduler to the current thread. - void Lock(); - - /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling - /// and reschedules current core if needed. - void Unlock(); - - using LockType = KAbstractSchedulerLock; + friend class KScopedSchedulerLockAndSleep; KernelCore& kernel; diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 7eda89786..e75e80ad0 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -18,6 +18,7 @@ #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" @@ -56,9 +57,9 @@ std::shared_ptr HLERequestContext::SleepClientThread( writable_event = pair.writable; } + Handle event_handle = InvalidHandle; { - Handle event_handle = InvalidHandle; - SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); thread->SetHLECallback( [context = *this, callback](std::shared_ptr thread) mutable -> bool { ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT @@ -74,9 +75,8 @@ std::shared_ptr HLERequestContext::SleepClientThread( thread->SetStatus(ThreadStatus::WaitHLEEvent); thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); readable_event->AddWaitingThread(thread); - lock.Release(); - thread->SetHLETimeEvent(event_handle); } + thread->SetHLETimeEvent(event_handle); is_thread_waiting = true; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index c7e2eabd4..466147498 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -14,6 +14,7 @@ #include "core/core_timing.h" #include "core/cpu_manager.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" @@ -800,28 +801,4 @@ SchedulerLock::~SchedulerLock() { kernel.GlobalSchedulerContext().Unlock(); } -SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, - Thread* time_task, s64 nanoseconds) - : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{ - nanoseconds} { - event_handle = InvalidHandle; -} - -SchedulerLockAndSleep::~SchedulerLockAndSleep() { - if (sleep_cancelled) { - return; - } - auto& time_manager = kernel.TimeManager(); - time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); -} - -void SchedulerLockAndSleep::Release() { - if (sleep_cancelled) { - return; - } - auto& time_manager = kernel.TimeManager(); - time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); - sleep_cancelled = true; -} - } // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 7f020d96e..5ba0f3c32 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -207,23 +207,4 @@ protected: KernelCore& kernel; }; -class SchedulerLockAndSleep : public SchedulerLock { -public: - explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task, - s64 nanoseconds); - ~SchedulerLockAndSleep(); - - void CancelSleep() { - sleep_cancelled = true; - } - - void Release(); - -private: - Handle& event_handle; - Thread* time_task; - s64 nanoseconds; - bool sleep_cancelled{}; -}; - } // namespace Kernel diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h new file mode 100644 index 000000000..f11a62216 --- /dev/null +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -0,0 +1,56 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/common_types.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/thread.h" +#include "core/hle/kernel/time_manager.h" + +namespace Kernel { + +class KScopedSchedulerLockAndSleep { +private: + KernelCore& kernel; + s64 timeout_tick{}; + Thread* thread{}; + Handle* event_handle{}; + +public: + explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Thread* t, s64 timeout) + : kernel(kernel), timeout_tick(timeout), thread(t) { + /* Lock the scheduler. */ + kernel.GlobalSchedulerContext().scheduler_lock.Lock(); + } + + explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t, + s64 timeout) + : kernel(kernel), event_handle(&event_handle), timeout_tick(timeout), thread(t) { + /* Lock the scheduler. */ + kernel.GlobalSchedulerContext().scheduler_lock.Lock(); + } + + ~KScopedSchedulerLockAndSleep() { + /* Register the sleep. */ + if (this->timeout_tick > 0) { + auto& time_manager = kernel.TimeManager(); + Handle handle{}; + time_manager.ScheduleTimeEvent(event_handle ? *event_handle : handle, this->thread, + this->timeout_tick); + } + + /* Unlock the scheduler. */ + kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); + } + + void CancelSleep() { + this->timeout_tick = 0; + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2612a6b0d..2760a307c 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -25,6 +25,7 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_block.h" #include "core/hle/kernel/memory/page_table.h" @@ -1654,7 +1655,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); auto* const current_process = kernel.CurrentProcess(); { - SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); const auto& handle_table = current_process->GetHandleTable(); std::shared_ptr thread = handle_table.Get(thread_handle); ASSERT(thread); diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 342fb4516..6651ad90c 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -6,6 +6,7 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/synchronization_object.h" @@ -40,7 +41,7 @@ std::pair Synchronization::WaitFor( auto* const thread = kernel.CurrentScheduler()->GetCurrentThread(); Handle event_handle = InvalidHandle; { - SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); const auto itr = std::find_if(sync_objects.begin(), sync_objects.end(), [thread](const std::shared_ptr& object) { diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 804e07f2b..6f89238ca 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -18,6 +18,7 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" @@ -393,7 +394,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) { ResultCode Thread::Sleep(s64 nanoseconds) { Handle event_handle{}; { - SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); SetStatus(ThreadStatus::WaitSleep); } -- cgit v1.2.3 From ccce6cb3be062fc7ae162bed32202538ebc2e3d9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 22:26:42 -0800 Subject: hle: kernel: Migrate to KScopedSchedulerLock. --- src/core/hle/kernel/address_arbiter.cpp | 10 +++---- src/core/hle/kernel/global_scheduler_context.h | 12 +++++++- src/core/hle/kernel/k_scheduler.cpp | 15 ++++------ src/core/hle/kernel/k_scheduler.h | 10 +++---- src/core/hle/kernel/k_scoped_lock.h | 39 ++++++++++++++++++++++++++ src/core/hle/kernel/kernel.cpp | 4 +-- src/core/hle/kernel/mutex.cpp | 6 ++-- src/core/hle/kernel/process.cpp | 8 +++--- src/core/hle/kernel/readable_event.cpp | 2 +- src/core/hle/kernel/server_session.cpp | 2 +- src/core/hle/kernel/svc.cpp | 8 +++--- src/core/hle/kernel/synchronization.cpp | 4 +-- src/core/hle/kernel/thread.cpp | 17 ++++++----- src/core/hle/kernel/time_manager.cpp | 2 +- 14 files changed, 91 insertions(+), 48 deletions(-) create mode 100644 src/core/hle/kernel/k_scoped_lock.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index ac4913173..20ffa7d47 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -59,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v } ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); const std::vector> waiting_threads = GetThreadsWaitingOnAddress(address); WakeThreads(waiting_threads, num_to_wake); @@ -68,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); auto& memory = system.Memory(); // Ensure that we can write to the address. @@ -93,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); auto& memory = system.Memory(); // Ensure that we can write to the address. @@ -211,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (current_thread->IsWaitingForArbitration()) { RemoveThread(SharedFrom(current_thread)); current_thread->WaitForArbitration(false); @@ -266,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (current_thread->IsWaitingForArbitration()) { RemoveThread(SharedFrom(current_thread)); current_thread->WaitForArbitration(false); diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 39c383746..c4bc23eed 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -27,6 +27,8 @@ class GlobalSchedulerContext final { friend class KScheduler; public: + using LockType = KAbstractSchedulerLock; + explicit GlobalSchedulerContext(KernelCore& kernel); ~GlobalSchedulerContext(); @@ -53,8 +55,16 @@ public: /// Returns true if the global scheduler lock is acquired bool IsLocked() const; + LockType& SchedulerLock() { + return scheduler_lock; + } + + const LockType& SchedulerLock() const { + return scheduler_lock; + } + private: - friend class SchedulerLock; + friend class KScopedSchedulerLock; friend class KScopedSchedulerLockAndSleep; KernelCore& kernel; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 466147498..9645fee22 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -410,7 +410,7 @@ void KScheduler::YieldWithoutCoreMigration() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { @@ -451,7 +451,7 @@ void KScheduler::YieldWithCoreMigration() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { @@ -541,7 +541,7 @@ void KScheduler::YieldToAnyThread() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { @@ -793,12 +793,9 @@ void KScheduler::Initialize() { } } -SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} { - kernel.GlobalSchedulerContext().Lock(); -} +KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) + : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {} -SchedulerLock::~SchedulerLock() { - kernel.GlobalSchedulerContext().Unlock(); -} +KScopedSchedulerLock::~KScopedSchedulerLock() = default; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 5ba0f3c32..d52ecc0db 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -14,6 +14,7 @@ #include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_priority_queue.h" #include "core/hle/kernel/k_scheduler_lock.h" +#include "core/hle/kernel/k_scoped_lock.h" namespace Common { class Fiber; @@ -198,13 +199,10 @@ private: Common::SpinLock guard{}; }; -class SchedulerLock { +class KScopedSchedulerLock : KScopedLock { public: - [[nodiscard]] explicit SchedulerLock(KernelCore& kernel); - ~SchedulerLock(); - -protected: - KernelCore& kernel; + explicit KScopedSchedulerLock(KernelCore& kernel); + ~KScopedSchedulerLock(); }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h new file mode 100644 index 000000000..03320859f --- /dev/null +++ b/src/core/hle/kernel/k_scoped_lock.h @@ -0,0 +1,39 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel { + +template +concept KLockable = !std::is_reference::value && requires(T & t) { + { t.Lock() } + ->std::same_as; + { t.Unlock() } + ->std::same_as; +}; + +template +requires KLockable class KScopedLock : NonCopyable { + +private: + T* lock_ptr; + +public: + explicit KScopedLock(T* l) : lock_ptr(l) { + this->lock_ptr->Lock(); + } + explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */ + } + ~KScopedLock() { + this->lock_ptr->Unlock(); + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index b74e34c40..04cae3a43 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -146,7 +146,7 @@ struct KernelCore::Impl { preemption_event = Core::Timing::CreateEvent( "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); global_scheduler_context->PreemptThreads(); } const auto time_interval = std::chrono::nanoseconds{ @@ -612,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const { void KernelCore::Suspend(bool in_suspention) { const bool should_suspend = exception_exited || in_suspention; { - SchedulerLock lock(*this); + KScopedSchedulerLock lock(*this); ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { impl->suspend_threads[i]->SetStatus(status); diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 6299b1342..4f8075e0e 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -75,7 +75,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, std::shared_ptr current_thread = SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); // The mutex address must be 4-byte aligned if ((address % sizeof(u32)) != 0) { return ERR_INVALID_ADDRESS; @@ -114,7 +114,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* owner = current_thread->GetLockOwner(); if (owner != nullptr) { owner->RemoveMutexWaiter(current_thread); @@ -153,7 +153,7 @@ std::pair> Mutex::Unlock(std::shared_ptr current_thread = SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 238c03a13..b905b486a 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -54,7 +54,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, auto& kernel = system.Kernel(); // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires { - SchedulerLock lock{kernel}; + KScopedSchedulerLock lock{kernel}; thread->SetStatus(ThreadStatus::Ready); } } @@ -213,7 +213,7 @@ void Process::UnregisterThread(const Thread* thread) { } ResultCode Process::ClearSignalState() { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); if (status == ProcessStatus::Exited) { LOG_ERROR(Kernel, "called on a terminated process instance."); return ERR_INVALID_STATE; @@ -347,7 +347,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector& tls_pages) { } VAddr Process::CreateTLSRegion() { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; tls_page_iter != tls_pages.cend()) { return *tls_page_iter->ReserveSlot(); @@ -378,7 +378,7 @@ VAddr Process::CreateTLSRegion() { } void Process::FreeTLSRegion(VAddr tls_address) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); auto iter = std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index 927f88fed..cea262ce0 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp @@ -39,7 +39,7 @@ void ReadableEvent::Clear() { } ResultCode ReadableEvent::Reset() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (!is_signaled) { LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", GetObjectId(), GetTypeName(), GetName()); diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index bf2c90028..78e41b13e 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -171,7 +171,7 @@ ResultCode ServerSession::CompleteSyncRequest() { // Some service requests require the thread to block { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (!context.IsThreadWaiting()) { context.GetThread().ResumeFromWait(); context.GetThread().SetSynchronizationResults(nullptr, result); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2760a307c..30d60aeb6 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -345,7 +345,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); thread->InvalidateHLECallback(); thread->SetStatus(ThreadStatus::WaitIPC); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); @@ -359,7 +359,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* sync_object = thread->GetHLESyncObject(); sync_object->RemoveWaitingThread(SharedFrom(thread)); } @@ -1691,7 +1691,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* owner = current_thread->GetLockOwner(); if (owner != nullptr) { @@ -1724,7 +1724,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ // Retrieve a list of all threads that are waiting for this condition variable. auto& kernel = system.Kernel(); - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* const current_process = kernel.CurrentProcess(); std::vector> waiting_threads = current_process->GetConditionVariableThreads(condition_variable_addr); diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 6651ad90c..d3f520ea2 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -19,7 +19,7 @@ Synchronization::Synchronization(Core::System& system) : system{system} {} void Synchronization::SignalObject(SynchronizationObject& obj) const { auto& kernel = system.Kernel(); - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (obj.IsSignaled()) { for (auto thread : obj.GetWaitingThreads()) { if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) { @@ -90,7 +90,7 @@ std::pair Synchronization::WaitFor( } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); ResultCode signaling_result = thread->GetSignalingResult(); SynchronizationObject* signaling_object = thread->GetSignalingObject(); thread->SetSynchronizationObjects(nullptr); diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 6f89238ca..a4f9e0d97 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -51,7 +51,7 @@ Thread::~Thread() = default; void Thread::Stop() { { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Dead); Signal(); kernel.GlobalHandleTable().Close(global_handle); @@ -68,7 +68,7 @@ void Thread::Stop() { } void Thread::ResumeFromWait() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); switch (status) { case ThreadStatus::Paused: case ThreadStatus::WaitSynch: @@ -100,19 +100,18 @@ void Thread::ResumeFromWait() { } void Thread::OnWakeUp() { - SchedulerLock lock(kernel); - + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Ready); } ResultCode Thread::Start() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Ready); return RESULT_SUCCESS; } void Thread::CancelWait() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { is_sync_cancelled = true; return; @@ -228,7 +227,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy } void Thread::SetPriority(u32 priority) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, "Invalid priority value."); nominal_priority = priority; @@ -365,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr thread) { } ResultCode Thread::SetActivity(ThreadActivity value) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto sched_status = GetSchedulingStatus(); @@ -435,7 +434,7 @@ void Thread::SetCurrentPriority(u32 new_priority) { } ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto HighestSetCore = [](u64 mask, u32 max_cores) { for (s32 core = static_cast(max_cores - 1); core >= 0; core--) { if (((mask >> core) & 1) != 0) { diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 8e4769694..aea53cdb0 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -18,7 +18,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { time_manager_event_type = Core::Timing::CreateEvent( "Kernel::TimeManagerCallback", [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { - const SchedulerLock lock(system.Kernel()); + const KScopedSchedulerLock lock(system.Kernel()); const auto proper_handle = static_cast(thread_handle); if (cancelled_events[proper_handle]) { return; -- cgit v1.2.3 From b9b7e4f915ffc47836342a8741fe8e46a3bd619c Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 22:43:35 -0800 Subject: kernel: time_manager: Add missing lock guards. --- src/core/hle/kernel/time_manager.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index aea53cdb0..79628e2b4 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -20,10 +20,16 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { const KScopedSchedulerLock lock(system.Kernel()); const auto proper_handle = static_cast(thread_handle); - if (cancelled_events[proper_handle]) { - return; + + std::shared_ptr thread; + { + std::lock_guard lock{mutex}; + if (cancelled_events[proper_handle]) { + return; + } + thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); } - auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); + if (thread) { // Thread can be null if process has exited thread->OnWakeUp(); @@ -56,6 +62,7 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) { } void TimeManager::CancelTimeEvent(Thread* time_task) { + std::lock_guard lock{mutex}; const Handle event_handle = time_task->GetGlobalHandle(); UnscheduleTimeEvent(event_handle); } -- cgit v1.2.3 From bc59ca92b6540efbcf45baab79d0f498eb103f30 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 22:44:06 -0800 Subject: kernel: KScopedSchedulerLockAndSleep: Remove unused ctor. --- .../hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index f11a62216..16f470923 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -8,6 +8,7 @@ #pragma once #include "common/common_types.h" +#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" @@ -17,20 +18,16 @@ namespace Kernel { class KScopedSchedulerLockAndSleep { private: KernelCore& kernel; - s64 timeout_tick{}; + Handle& event_handle; Thread* thread{}; - Handle* event_handle{}; + s64 timeout_tick{}; public: - explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Thread* t, s64 timeout) - : kernel(kernel), timeout_tick(timeout), thread(t) { - /* Lock the scheduler. */ - kernel.GlobalSchedulerContext().scheduler_lock.Lock(); - } - explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t, s64 timeout) - : kernel(kernel), event_handle(&event_handle), timeout_tick(timeout), thread(t) { + : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) { + event_handle = InvalidHandle; + /* Lock the scheduler. */ kernel.GlobalSchedulerContext().scheduler_lock.Lock(); } @@ -38,10 +35,7 @@ public: ~KScopedSchedulerLockAndSleep() { /* Register the sleep. */ if (this->timeout_tick > 0) { - auto& time_manager = kernel.TimeManager(); - Handle handle{}; - time_manager.ScheduleTimeEvent(event_handle ? *event_handle : handle, this->thread, - this->timeout_tick); + kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); } /* Unlock the scheduler. */ -- cgit v1.2.3 From b1326d9230f7c1f33960d2816042b1a41d3b839f Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:37:35 -0800 Subject: hle: kernel: Use C++ style comments in KScheduler, etc. --- src/core/hle/kernel/k_priority_queue.h | 63 ++++--- src/core/hle/kernel/k_scheduler.cpp | 203 ++++++++++----------- src/core/hle/kernel/k_scheduler_lock.h | 16 +- .../hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 6 +- 4 files changed, 136 insertions(+), 152 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index 88e4e1ed4..d374f5c00 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h @@ -87,15 +87,15 @@ public: } constexpr bool PushBack(s32 core, Member* member) { - /* Get the entry associated with the member. */ + // Get the entry associated with the member. Entry& member_entry = member->GetPriorityQueueEntry(core); - /* Get the entry associated with the end of the queue. */ + // Get the entry associated with the end of the queue. Member* tail = this->root[core].GetPrev(); Entry& tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; - /* Link the entries. */ + // Link the entries. member_entry.SetPrev(tail); member_entry.SetNext(nullptr); tail_entry.SetNext(member); @@ -105,15 +105,15 @@ public: } constexpr bool PushFront(s32 core, Member* member) { - /* Get the entry associated with the member. */ + // Get the entry associated with the member. Entry& member_entry = member->GetPriorityQueueEntry(core); - /* Get the entry associated with the front of the queue. */ + // Get the entry associated with the front of the queue. Member* head = this->root[core].GetNext(); Entry& head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; - /* Link the entries. */ + // Link the entries. member_entry.SetPrev(nullptr); member_entry.SetNext(head); head_entry.SetPrev(member); @@ -123,10 +123,10 @@ public: } constexpr bool Remove(s32 core, Member* member) { - /* Get the entry associated with the member. */ + // Get the entry associated with the member. Entry& member_entry = member->GetPriorityQueueEntry(core); - /* Get the entries associated with next and prev. */ + // Get the entries associated with next and prev. Member* prev = member_entry.GetPrev(); Member* next = member_entry.GetNext(); Entry& prev_entry = @@ -134,7 +134,7 @@ public: Entry& next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; - /* Unlink. */ + // Unlink. prev_entry.SetNext(next); next_entry.SetPrev(prev); @@ -152,8 +152,7 @@ public: Common::BitSet64 available_priorities[NumCores]; public: - constexpr KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ - } + constexpr KPriorityQueueImpl() : queues(), available_priorities() {} constexpr void PushBack(s32 priority, s32 core, Member* member) { ASSERT(IsValidCore(core)); @@ -267,14 +266,14 @@ private: constexpr void PushBack(s32 priority, Member* member) { ASSERT(IsValidPriority(priority)); - /* Push onto the scheduled queue for its core, if we can. */ + // Push onto the scheduled queue for its core, if we can. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { this->scheduled_queue.PushBack(priority, core, member); ClearAffinityBit(affinity, core); } - /* And suggest the thread for all other cores. */ + // And suggest the thread for all other cores. while (affinity) { this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); } @@ -283,15 +282,15 @@ private: constexpr void PushFront(s32 priority, Member* member) { ASSERT(IsValidPriority(priority)); - /* Push onto the scheduled queue for its core, if we can. */ + // Push onto the scheduled queue for its core, if we can. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { this->scheduled_queue.PushFront(priority, core, member); ClearAffinityBit(affinity, core); } - /* And suggest the thread for all other cores. */ - /* Note: Nintendo pushes onto the back of the suggested queue, not the front. */ + // And suggest the thread for all other cores. + // Note: Nintendo pushes onto the back of the suggested queue, not the front. while (affinity) { this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); } @@ -300,24 +299,24 @@ private: constexpr void Remove(s32 priority, Member* member) { ASSERT(IsValidPriority(priority)); - /* Remove from the scheduled queue for its core. */ + // Remove from the scheduled queue for its core. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { this->scheduled_queue.Remove(priority, core, member); ClearAffinityBit(affinity, core); } - /* Remove from the suggested queue for all other cores. */ + // Remove from the suggested queue for all other cores. while (affinity) { this->suggested_queue.Remove(priority, GetNextCore(affinity), member); } } public: - constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ + constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { // ... } - /* Getters. */ + // Getters. constexpr Member* GetScheduledFront(s32 core) const { return this->scheduled_queue.GetFront(core); } @@ -346,7 +345,7 @@ public: return member->GetPriorityQueueEntry(core).GetNext(); } - /* Mutators. */ + // Mutators. constexpr void PushBack(Member* member) { this->PushBack(member->GetPriority(), member); } @@ -364,15 +363,15 @@ public: member); } - /* First class fancy operations. */ + // First class fancy operations. constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) { ASSERT(IsValidPriority(prev_priority)); - /* Remove the member from the queues. */ + // Remove the member from the queues. const s32 new_priority = member->GetPriority(); this->Remove(prev_priority, member); - /* And enqueue. If the member is running, we want to keep it running. */ + // And enqueue. If the member is running, we want to keep it running. if (is_running) { this->PushFront(new_priority, member); } else { @@ -382,12 +381,12 @@ public: constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity, Member* member) { - /* Get the new information. */ + // Get the new information. const s32 priority = member->GetPriority(); const AffinityMaskType& new_affinity = member->GetAffinityMask(); const s32 new_core = member->GetActiveCore(); - /* Remove the member from all queues it was in before. */ + // Remove the member from all queues it was in before. for (s32 core = 0; core < static_cast(NumCores); core++) { if (prev_affinity.GetAffinity(core)) { if (core == prev_core) { @@ -398,7 +397,7 @@ public: } } - /* And add the member to all queues it should be in now. */ + // And add the member to all queues it should be in now. for (s32 core = 0; core < static_cast(NumCores); core++) { if (new_affinity.GetAffinity(core)) { if (core == new_core) { @@ -411,18 +410,18 @@ public: } constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) { - /* Get the new information. */ + // Get the new information. const s32 new_core = member->GetActiveCore(); const s32 priority = member->GetPriority(); - /* We don't need to do anything if the core is the same. */ + // We don't need to do anything if the core is the same. if (prev_core != new_core) { - /* Remove from the scheduled queue for the previous core. */ + // Remove from the scheduled queue for the previous core. if (prev_core >= 0) { this->scheduled_queue.Remove(priority, prev_core, member); } - /* Remove from the suggested queue and add to the scheduled queue for the new core. */ + // Remove from the suggested queue and add to the scheduled queue for the new core. if (new_core >= 0) { this->suggested_queue.Remove(priority, new_core, member); if (to_front) { @@ -432,7 +431,7 @@ public: } } - /* Add to the suggested queue for the previous core. */ + // Add to the suggested queue for the previous core. if (prev_core >= 0) { this->suggested_queue.PushBack(priority, prev_core, member); } diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 9645fee22..cc2f8ef0e 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -84,35 +84,20 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { /*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - /* Clear that we need to update. */ + // Clear that we need to update. ClearSchedulerUpdateNeeded(kernel); u64 cores_needing_scheduling = 0, idle_cores = 0; Thread* top_threads[Core::Hardware::NUM_CPU_CORES]; auto& priority_queue = GetPriorityQueue(kernel); - /* We want to go over all cores, finding the highest priority thread and determining if - * scheduling is needed for that core. */ + /// We want to go over all cores, finding the highest priority thread and determining if + /// scheduling is needed for that core. for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); if (top_thread != nullptr) { - ///* If the thread has no waiters, we need to check if the process has a thread pinned. - ///*/ - // if (top_thread->GetNumKernelWaiters() == 0) { - // if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { - // if (Thread* pinned = parent->GetPinnedThread(core_id); - // pinned != nullptr && pinned != top_thread) { - // /* We prefer our parent's pinned thread if possible. However, we also - // don't - // * want to schedule un-runnable threads. */ - // if (pinned->GetRawState() == Thread::ThreadState_Runnable) { - // top_thread = pinned; - // } else { - // top_thread = nullptr; - // } - // } - // } - //} + // If the thread has no waiters, we need to check if the process has a thread pinned. + // TODO(bunnei): Implement thread pinning } else { idle_cores |= (1ULL << core_id); } @@ -122,27 +107,27 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); } - /* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */ + // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. while (idle_cores != 0) { u32 core_id = Common::CountTrailingZeroes64(idle_cores); if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; size_t num_candidates = 0; - /* While we have a suggested thread, try to migrate it! */ + // While we have a suggested thread, try to migrate it! while (suggested != nullptr) { - /* Check if the suggested thread is the top thread on its core. */ + // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* top_thread = (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; top_thread != suggested) { - /* Make sure we're not dealing with threads too high priority for migration. */ + // Make sure we're not dealing with threads too high priority for migration. if (top_thread != nullptr && top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) { break; } - /* The suggested thread isn't bound to its core, so we can migrate it! */ + // The suggested thread isn't bound to its core, so we can migrate it! suggested->SetActiveCore(core_id); priority_queue.ChangeCore(suggested_core, suggested); @@ -152,30 +137,30 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { break; } - /* Note this core as a candidate for migration. */ + // Note this core as a candidate for migration. ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES); migration_candidates[num_candidates++] = suggested_core; suggested = priority_queue.GetSuggestedNext(core_id, suggested); } - /* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our - * candidate cores' top threads. */ + // If suggested is nullptr, we failed to migrate a specific thread. So let's try all our + // candidate cores' top threads. if (suggested == nullptr) { for (size_t i = 0; i < num_candidates; i++) { - /* Check if there's some other thread that can run on the candidate core. */ + // Check if there's some other thread that can run on the candidate core. const s32 candidate_core = migration_candidates[i]; suggested = top_threads[candidate_core]; if (Thread* next_on_candidate_core = priority_queue.GetScheduledNext(candidate_core, suggested); next_on_candidate_core != nullptr) { - /* The candidate core can run some other thread! We'll migrate its current - * top thread to us. */ + // The candidate core can run some other thread! We'll migrate its current + // top thread to us. top_threads[candidate_core] = next_on_candidate_core; cores_needing_scheduling |= kernel.Scheduler(candidate_core) .UpdateHighestPriorityThread(top_threads[candidate_core]); - /* Perform the migration. */ + // Perform the migration. suggested->SetActiveCore(core_id); priority_queue.ChangeCore(candidate_core, suggested); @@ -199,20 +184,20 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { u32 old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - /* Check if the state has changed, because if it hasn't there's nothing to do. */ + // Check if the state has changed, because if it hasn't there's nothing to do. const auto cur_state = thread->scheduling_state; if (cur_state == old_state) { return; } - /* Update the priority queues. */ + // Update the priority queues. if (old_state == static_cast(ThreadSchedStatus::Runnable)) { - /* If we were previously runnable, then we're not runnable now, and we should remove. */ + // If we were previously runnable, then we're not runnable now, and we should remove. GetPriorityQueue(kernel).Remove(thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); } else if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { - /* If we're now runnable, then we weren't previously, and we should add. */ + // If we're now runnable, then we weren't previously, and we should add. GetPriorityQueue(kernel).PushBack(thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); @@ -224,7 +209,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - /* If the thread is runnable, we want to change its priority in the queue. */ + // If the thread is runnable, we want to change its priority in the queue. if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { GetPriorityQueue(kernel).ChangePriority( old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); @@ -238,7 +223,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { s32 old_core) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - /* If the thread is runnable, we want to change its affinity in the queue. */ + // If the thread is runnable, we want to change its affinity in the queue. if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); IncrementScheduledCount(thread); @@ -249,11 +234,11 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { ASSERT(system.GlobalSchedulerContext().IsLocked()); - /* Get a reference to the priority queue. */ + // Get a reference to the priority queue. auto& kernel = system.Kernel(); auto& priority_queue = GetPriorityQueue(kernel); - /* Rotate the front of the queue to the end. */ + // Rotate the front of the queue to the end. Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority); Thread* next_thread = nullptr; if (top_thread != nullptr) { @@ -264,27 +249,27 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } } - /* While we have a suggested thread, try to migrate it! */ + // While we have a suggested thread, try to migrate it! { Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority); while (suggested != nullptr) { - /* Check if the suggested thread is the top thread on its core. */ + // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { - /* If the next thread is a new thread that has been waiting longer than our - * suggestion, we prefer it to our suggestion. */ + // If the next thread is a new thread that has been waiting longer than our + // suggestion, we prefer it to our suggestion. if (top_thread != next_thread && next_thread != nullptr && next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) { suggested = nullptr; break; } - /* If we're allowed to do a migration, do one. */ - /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion - * to the front of the queue. */ + // If we're allowed to do a migration, do one. + // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion + // to the front of the queue. if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { suggested->SetActiveCore(core_id); @@ -294,38 +279,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } } - /* Get the next suggestion. */ + // Get the next suggestion. suggested = priority_queue.GetSamePriorityNext(core_id, suggested); } } - /* Now that we might have migrated a thread with the same priority, check if we can do better. - */ + // Now that we might have migrated a thread with the same priority, check if we can do better. + { Thread* best_thread = priority_queue.GetScheduledFront(core_id); if (best_thread == GetCurrentThread()) { best_thread = priority_queue.GetScheduledNext(core_id, best_thread); } - /* If the best thread we can choose has a priority the same or worse than ours, try to - * migrate a higher priority thread. */ + // If the best thread we can choose has a priority the same or worse than ours, try to + // migrate a higher priority thread. if (best_thread != nullptr && best_thread->GetPriority() >= static_cast(priority)) { Thread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { - /* If the suggestion's priority is the same as ours, don't bother. */ + // If the suggestion's priority is the same as ours, don't bother. if (suggested->GetPriority() >= best_thread->GetPriority()) { break; } - /* Check if the suggested thread is the top thread on its core. */ + // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { - /* If we're allowed to do a migration, do one. */ - /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the - * suggestion to the front of the queue. */ + // If we're allowed to do a migration, do one. + // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the + // suggestion to the front of the queue. if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { @@ -336,13 +321,13 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } } - /* Get the next suggestion. */ + // Get the next suggestion. suggested = priority_queue.GetSuggestedNext(core_id, suggested); } } } - /* After a rotation, we need a scheduler update. */ + // After a rotation, we need a scheduler update. SetSchedulerUpdateNeeded(kernel); } @@ -392,38 +377,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { void KScheduler::YieldWithoutCoreMigration() { auto& kernel = system.Kernel(); - /* Validate preconditions. */ + // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); - /* Get the current thread and process. */ + // Get the current thread and process. Thread& cur_thread = *GetCurrentThread(); Process& cur_process = *kernel.CurrentProcess(); - /* If the thread's yield count matches, there's nothing for us to do. */ + // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { return; } - /* Get a reference to the priority queue. */ + // Get a reference to the priority queue. auto& priority_queue = GetPriorityQueue(kernel); - /* Perform the yield. */ + // Perform the yield. { KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { - /* Put the current thread at the back of the queue. */ + // Put the current thread at the back of the queue. Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); - /* If the next thread is different, we have an update to perform. */ + // If the next thread is different, we have an update to perform. if (next_thread != std::addressof(cur_thread)) { SetSchedulerUpdateNeeded(kernel); } else { - /* Otherwise, set the thread's yield count so that we won't waste work until the - * process is scheduled again. */ + // Otherwise, set the thread's yield count so that we won't waste work until the + // process is scheduled again. cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); } } @@ -433,40 +418,40 @@ void KScheduler::YieldWithoutCoreMigration() { void KScheduler::YieldWithCoreMigration() { auto& kernel = system.Kernel(); - /* Validate preconditions. */ + // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); - /* Get the current thread and process. */ + // Get the current thread and process. Thread& cur_thread = *GetCurrentThread(); Process& cur_process = *kernel.CurrentProcess(); - /* If the thread's yield count matches, there's nothing for us to do. */ + // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { return; } - /* Get a reference to the priority queue. */ + // Get a reference to the priority queue. auto& priority_queue = GetPriorityQueue(kernel); - /* Perform the yield. */ + // Perform the yield. { KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { - /* Get the current active core. */ + // Get the current active core. const s32 core_id = cur_thread.GetActiveCore(); - /* Put the current thread at the back of the queue. */ + // Put the current thread at the back of the queue. Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); - /* While we have a suggested thread, try to migrate it! */ + // While we have a suggested thread, try to migrate it! bool recheck = false; Thread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { - /* Check if the suggested thread is the thread running on its core. */ + // Check if the suggested thread is the thread running on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* running_on_suggested_core = @@ -474,10 +459,10 @@ void KScheduler::YieldWithCoreMigration() { ? kernel.Scheduler(suggested_core).state.highest_priority_thread : nullptr; running_on_suggested_core != suggested) { - /* If the current thread's priority is higher than our suggestion's we prefer - * the next thread to the suggestion. */ - /* We also prefer the next thread when the current thread's priority is equal to - * the suggestions, but the next thread has been waiting longer. */ + // If the current thread's priority is higher than our suggestion's we prefer + // the next thread to the suggestion. We also prefer the next thread when the + // current thread's priority is equal to the suggestions, but the next thread + // has been waiting longer. if ((suggested->GetPriority() > cur_thread.GetPriority()) || (suggested->GetPriority() == cur_thread.GetPriority() && next_thread != std::addressof(cur_thread) && @@ -486,9 +471,9 @@ void KScheduler::YieldWithCoreMigration() { break; } - /* If we're allowed to do a migration, do one. */ - /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the - * suggestion to the front of the queue. */ + // If we're allowed to do a migration, do one. + // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the + // suggestion to the front of the queue. if (running_on_suggested_core == nullptr || running_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { @@ -497,23 +482,23 @@ void KScheduler::YieldWithCoreMigration() { IncrementScheduledCount(suggested); break; } else { - /* We couldn't perform a migration, but we should check again on a future - * yield. */ + // We couldn't perform a migration, but we should check again on a future + // yield. recheck = true; } } - /* Get the next suggestion. */ + // Get the next suggestion. suggested = priority_queue.GetSuggestedNext(core_id, suggested); } - /* If we still have a suggestion or the next thread is different, we have an update to - * perform. */ + // If we still have a suggestion or the next thread is different, we have an update to + // perform. if (suggested != nullptr || next_thread != std::addressof(cur_thread)) { SetSchedulerUpdateNeeded(kernel); } else if (!recheck) { - /* Otherwise if we don't need to re-check, set the thread's yield count so that we - * won't waste work until the process is scheduled again. */ + // Otherwise if we don't need to re-check, set the thread's yield count so that we + // won't waste work until the process is scheduled again. cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); } } @@ -523,48 +508,48 @@ void KScheduler::YieldWithCoreMigration() { void KScheduler::YieldToAnyThread() { auto& kernel = system.Kernel(); - /* Validate preconditions. */ + // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); - /* Get the current thread and process. */ + // Get the current thread and process. Thread& cur_thread = *GetCurrentThread(); Process& cur_process = *kernel.CurrentProcess(); - /* If the thread's yield count matches, there's nothing for us to do. */ + // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { return; } - /* Get a reference to the priority queue. */ + // Get a reference to the priority queue. auto& priority_queue = GetPriorityQueue(kernel); - /* Perform the yield. */ + // Perform the yield. { KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { - /* Get the current active core. */ + // Get the current active core. const s32 core_id = cur_thread.GetActiveCore(); - /* Migrate the current thread to core -1. */ + // Migrate the current thread to core -1. cur_thread.SetActiveCore(-1); priority_queue.ChangeCore(core_id, std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); - /* If there's nothing scheduled, we can try to perform a migration. */ + // If there's nothing scheduled, we can try to perform a migration. if (priority_queue.GetScheduledFront(core_id) == nullptr) { - /* While we have a suggested thread, try to migrate it! */ + // While we have a suggested thread, try to migrate it! Thread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { - /* Check if the suggested thread is the top thread on its core. */ + // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { - /* If we're allowed to do a migration, do one. */ + // If we're allowed to do a migration, do one. if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { @@ -573,25 +558,25 @@ void KScheduler::YieldToAnyThread() { IncrementScheduledCount(suggested); } - /* Regardless of whether we migrated, we had a candidate, so we're done. */ + // Regardless of whether we migrated, we had a candidate, so we're done. break; } - /* Get the next suggestion. */ + // Get the next suggestion. suggested = priority_queue.GetSuggestedNext(core_id, suggested); } - /* If the suggestion is different from the current thread, we need to perform an - * update. */ + // If the suggestion is different from the current thread, we need to perform an + // update. if (suggested != std::addressof(cur_thread)) { SetSchedulerUpdateNeeded(kernel); } else { - /* Otherwise, set the thread's yield count so that we won't waste work until the - * process is scheduled again. */ + // Otherwise, set the thread's yield count so that we won't waste work until the + // process is scheduled again. cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); } } else { - /* Otherwise, we have an update to perform. */ + // Otherwise, we have an update to perform. SetSchedulerUpdateNeeded(kernel); } } diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index d46f5fc04..39a02af2a 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -34,19 +34,19 @@ public: void Lock() { if (this->IsLockedByCurrentThread()) { - /* If we already own the lock, we can just increment the count. */ + // If we already own the lock, we can just increment the count. ASSERT(this->lock_count > 0); this->lock_count++; } else { - /* Otherwise, we want to disable scheduling and acquire the spinlock. */ + // Otherwise, we want to disable scheduling and acquire the spinlock. SchedulerType::DisableScheduling(kernel); this->spin_lock.lock(); - /* For debug, ensure that our state is valid. */ + // For debug, ensure that our state is valid. ASSERT(this->lock_count == 0); ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle()); - /* Increment count, take ownership. */ + // Increment count, take ownership. this->lock_count = 1; this->owner_thread = kernel.GetCurrentEmuThreadID(); } @@ -56,18 +56,18 @@ public: ASSERT(this->IsLockedByCurrentThread()); ASSERT(this->lock_count > 0); - /* Release an instance of the lock. */ + // Release an instance of the lock. if ((--this->lock_count) == 0) { - /* We're no longer going to hold the lock. Take note of what cores need scheduling. */ + // We're no longer going to hold the lock. Take note of what cores need scheduling. const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads(kernel); Core::EmuThreadHandle leaving_thread = owner_thread; - /* Note that we no longer hold the lock, and unlock the spinlock. */ + // Note that we no longer hold the lock, and unlock the spinlock. this->owner_thread = Core::EmuThreadHandle::InvalidHandle(); this->spin_lock.unlock(); - /* Enable scheduling, and perform a rescheduling operation. */ + // Enable scheduling, and perform a rescheduling operation. SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); } } diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index 16f470923..d1cc5dda7 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -28,17 +28,17 @@ public: : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) { event_handle = InvalidHandle; - /* Lock the scheduler. */ + // Lock the scheduler. kernel.GlobalSchedulerContext().scheduler_lock.Lock(); } ~KScopedSchedulerLockAndSleep() { - /* Register the sleep. */ + // Register the sleep. if (this->timeout_tick > 0) { kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); } - /* Unlock the scheduler. */ + // Unlock the scheduler. kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); } -- cgit v1.2.3 From 357d79fb6e6ec0d373d8768f1ee32eebe02e55da Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:47:59 -0800 Subject: hle: kernel: GlobalSchedulerContext: Various style fixes based on code review feedback. --- src/core/hle/kernel/global_scheduler_context.cpp | 7 ++++++- src/core/hle/kernel/global_scheduler_context.h | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index a9cb48b38..a133e8ed0 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -31,7 +31,12 @@ void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) { void GlobalSchedulerContext::PreemptThreads() { // The priority levels at which the global scheduler preempts threads every 10 ms. They are // ordered from Core 0 to Core 3. - std::array preemption_priorities = {59, 59, 59, 63}; + static constexpr std::array preemption_priorities{ + 59, + 59, + 59, + 63, + }; ASSERT(IsLocked()); for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index c4bc23eed..5c7b89290 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -21,7 +21,7 @@ class SchedulerLock; using KSchedulerPriorityQueue = KPriorityQueue; -static constexpr s32 HighestCoreMigrationAllowedPriority = 2; +constexpr s32 HighestCoreMigrationAllowedPriority = 2; class GlobalSchedulerContext final { friend class KScheduler; @@ -39,7 +39,7 @@ public: void RemoveThread(std::shared_ptr thread); /// Returns a list of all threads managed by the scheduler - const std::vector>& GetThreadList() const { + [[nodiscard]] const std::vector>& GetThreadList() const { return thread_list; } @@ -55,11 +55,11 @@ public: /// Returns true if the global scheduler lock is acquired bool IsLocked() const; - LockType& SchedulerLock() { + [[nodiscard]] LockType& SchedulerLock() { return scheduler_lock; } - const LockType& SchedulerLock() const { + [[nodiscard]] const LockType& SchedulerLock() const { return scheduler_lock; } -- cgit v1.2.3 From 4d3be1816c9f1ac5c49906c8466fa2618ea15e73 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:50:32 -0800 Subject: hle: kernel: KAffinityMask: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_affinity_mask.h | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h index fa2a720a4..dd73781cd 100644 --- a/src/core/hle/kernel/k_affinity_mask.h +++ b/src/core/hle/kernel/k_affinity_mask.h @@ -14,24 +14,10 @@ namespace Kernel { class KAffinityMask { -private: - static constexpr u64 AllowedAffinityMask = (1ul << Core::Hardware::NUM_CPU_CORES) - 1; - -private: - u64 mask; - -private: - static constexpr u64 GetCoreBit(s32 core) { - ASSERT(0 <= core && core < static_cast(Core::Hardware::NUM_CPU_CORES)); - return (1ull << core); - } - public: - constexpr KAffinityMask() : mask(0) { - ASSERT(this); - } + constexpr KAffinityMask() = default; - constexpr u64 GetAffinityMask() const { + [[nodiscard]] constexpr u64 GetAffinityMask() const { return this->mask; } @@ -40,7 +26,7 @@ public: this->mask = new_mask; } - constexpr bool GetAffinity(s32 core) const { + [[nodiscard]] constexpr bool GetAffinity(s32 core) const { return this->mask & GetCoreBit(core); } @@ -57,6 +43,16 @@ public: constexpr void SetAll() { this->mask = AllowedAffinityMask; } + +private: + [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) { + ASSERT(0 <= core && core < static_cast(Core::Hardware::NUM_CPU_CORES)); + return (1ULL << core); + } + + static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1; + + u64 mask{}; }; } // namespace Kernel -- cgit v1.2.3 From 8fd921557fc8e830ee8330fc1dcf6dcc9963c4c2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:57:18 -0800 Subject: hle: kernel: KPriorityQueue: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_priority_queue.h | 65 +++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 29 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index d374f5c00..01a577d0c 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h @@ -7,6 +7,8 @@ #pragma once +#include + #include "common/assert.h" #include "common/bit_set.h" #include "common/bit_util.h" @@ -17,7 +19,7 @@ namespace Kernel { class Thread; template -concept KPriorityQueueAffinityMask = !std::is_reference::value && requires(T & t) { +concept KPriorityQueueAffinityMask = !std::is_reference_v && requires(T & t) { { t.GetAffinityMask() } ->std::convertible_to; {t.SetAffinityMask(std::declval())}; @@ -29,7 +31,7 @@ concept KPriorityQueueAffinityMask = !std::is_reference::value && requires(T }; template -concept KPriorityQueueMember = !std::is_reference::value && requires(T & t) { +concept KPriorityQueueMember = !std::is_reference_v && requires(T & t) { {typename T::QueueEntry()}; {(typename T::QueueEntry()).Initialize()}; {(typename T::QueueEntry()).SetPrev(std::addressof(t))}; @@ -54,8 +56,8 @@ concept KPriorityQueueMember = !std::is_reference::value && requires(T & t) { template requires KPriorityQueueMember class KPriorityQueue { public: - using AffinityMaskType = typename std::remove_cv().GetAffinityMask())>::type>::type; + using AffinityMaskType = typename std::remove_cv_t< + typename std::remove_reference().GetAffinityMask())>::type>; static_assert(LowestPriority >= 0); static_assert(HighestPriority >= 0); @@ -77,12 +79,12 @@ private: public: class KPerCoreQueue { private: - Entry root[NumCores]; + std::array root{}; public: - constexpr KPerCoreQueue() : root() { - for (size_t i = 0; i < NumCores; i++) { - this->root[i].Initialize(); + constexpr KPerCoreQueue() { + for (auto& per_core_root : root) { + per_core_root.Initialize(); } } @@ -101,7 +103,7 @@ public: tail_entry.SetNext(member); this->root[core].SetPrev(member); - return (tail == nullptr); + return tail == nullptr; } constexpr bool PushFront(s32 core, Member* member) { @@ -147,21 +149,19 @@ public: }; class KPriorityQueueImpl { - private: - KPerCoreQueue queues[NumPriority]; - Common::BitSet64 available_priorities[NumCores]; - public: - constexpr KPriorityQueueImpl() : queues(), available_priorities() {} + constexpr KPriorityQueueImpl() = default; constexpr void PushBack(s32 priority, s32 core, Member* member) { ASSERT(IsValidCore(core)); ASSERT(IsValidPriority(priority)); - if (priority <= LowestPriority) { - if (this->queues[priority].PushBack(core, member)) { - this->available_priorities[core].SetBit(priority); - } + if (priority > LowestPriority) { + return; + } + + if (this->queues[priority].PushBack(core, member)) { + this->available_priorities[core].SetBit(priority); } } @@ -169,10 +169,12 @@ public: ASSERT(IsValidCore(core)); ASSERT(IsValidPriority(priority)); - if (priority <= LowestPriority) { - if (this->queues[priority].PushFront(core, member)) { - this->available_priorities[core].SetBit(priority); - } + if (priority > LowestPriority) { + return; + } + + if (this->queues[priority].PushFront(core, member)) { + this->available_priorities[core].SetBit(priority); } } @@ -180,10 +182,12 @@ public: ASSERT(IsValidCore(core)); ASSERT(IsValidPriority(priority)); - if (priority <= LowestPriority) { - if (this->queues[priority].Remove(core, member)) { - this->available_priorities[core].ClearBit(priority); - } + if (priority > LowestPriority) { + return; + } + + if (this->queues[priority].Remove(core, member)) { + this->available_priorities[core].ClearBit(priority); } } @@ -246,6 +250,10 @@ public: return nullptr; } } + + private: + std::array queues{}; + std::array, NumCores> available_priorities{}; }; private: @@ -254,7 +262,7 @@ private: private: constexpr void ClearAffinityBit(u64& affinity, s32 core) { - affinity &= ~(u64(1ul) << core); + affinity &= ~(u64(1) << core); } constexpr s32 GetNextCore(u64& affinity) { @@ -313,8 +321,7 @@ private: } public: - constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { // ... - } + constexpr KPriorityQueue() = default; // Getters. constexpr Member* GetScheduledFront(s32 core) const { -- cgit v1.2.3 From 960500cfd2558c52597fff69c1bb0ea38d922b6a Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:02:30 -0800 Subject: hle: kernel: KScheduler: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_scheduler.cpp | 42 +++++++++++++++---------------- src/core/hle/kernel/k_scheduler.h | 49 ++++++++++++++++--------------------- 2 files changed, 41 insertions(+), 50 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index cc2f8ef0e..c5fd82a6b 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -29,8 +29,8 @@ static void IncrementScheduledCount(Kernel::Thread* thread) { } } -/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, - Core::EmuThreadHandle global_thread) { +void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, + Core::EmuThreadHandle global_thread) { u32 current_core = global_thread.host_handle; bool must_context_switch = global_thread.guest_handle != InvalidHandle && (current_core < Core::Hardware::NUM_CPU_CORES); @@ -81,7 +81,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { } } -/*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { +u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Clear that we need to update. @@ -94,7 +94,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { /// We want to go over all cores, finding the highest priority thread and determining if /// scheduling is needed for that core. for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); + Thread* top_thread = priority_queue.GetScheduledFront(static_cast(core_id)); if (top_thread != nullptr) { // If the thread has no waiters, we need to check if the process has a thread pinned. // TODO(bunnei): Implement thread pinning @@ -180,8 +180,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { return cores_needing_scheduling; } -/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, - u32 old_state) { +void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Check if the state has changed, because if it hasn't there's nothing to do. @@ -204,8 +203,8 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { } } -/*static*/ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, - Thread* current_thread, u32 old_priority) { +void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, + u32 old_priority) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); @@ -218,9 +217,8 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { } } -/*static*/ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, - const KAffinityMask& old_affinity, - s32 old_core) { +void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, + const KAffinityMask& old_affinity, s32 old_core) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // If the thread is runnable, we want to change its affinity in the queue. @@ -331,38 +329,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { SetSchedulerUpdateNeeded(kernel); } -/*static*/ bool KScheduler::CanSchedule(KernelCore& kernel) { +bool KScheduler::CanSchedule(KernelCore& kernel) { return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; } -/*static*/ bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { +bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire); } -/*static*/ void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { +void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release); } -/*static*/ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { +void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release); } -/*static*/ void KScheduler::DisableScheduling(KernelCore& kernel) { +void KScheduler::DisableScheduling(KernelCore& kernel) { if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); scheduler->GetCurrentThread()->DisableDispatch(); } } -/*static*/ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, - Core::EmuThreadHandle global_thread) { +void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread) { if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { scheduler->GetCurrentThread()->EnableDispatch(); } RescheduleCores(kernel, cores_needing_scheduling, global_thread); } -/*static*/ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { +u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { if (IsSchedulerUpdateNeeded(kernel)) { return UpdateHighestPriorityThreadsImpl(kernel); } else { @@ -370,7 +368,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } } -/*static*/ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { +KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { return kernel.GlobalSchedulerContext().priority_queue; } @@ -585,7 +583,7 @@ void KScheduler::YieldToAnyThread() { KScheduler::KScheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { - switch_fiber = std::make_shared(std::function(OnSwitch), this); + switch_fiber = std::make_shared(OnSwitch, this); this->state.needs_scheduling = true; this->state.interrupt_task_thread_runnable = false; this->state.should_count_idle = false; @@ -722,7 +720,7 @@ void KScheduler::SwitchToCurrent() { } const auto is_switch_pending = [this] { std::scoped_lock lock{guard}; - return !!this->state.needs_scheduling; + return state.needs_scheduling.load(std::memory_order_relaxed); }; do { if (current_thread != nullptr && !current_thread->IsHLEThread()) { diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index d52ecc0db..e84abc84c 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -51,32 +51,28 @@ public: void Reload(Thread* thread); /// Gets the current running thread - Thread* GetCurrentThread() const; + [[nodiscard]] Thread* GetCurrentThread() const; /// Gets the timestamp for the last context switch in ticks. - u64 GetLastContextSwitchTicks() const; + [[nodiscard]] u64 GetLastContextSwitchTicks() const; - bool ContextSwitchPending() const { - return this->state.needs_scheduling; + [[nodiscard]] bool ContextSwitchPending() const { + return state.needs_scheduling.load(std::memory_order_relaxed); } void Initialize(); void OnThreadStart(); - std::shared_ptr& ControlContext() { + [[nodiscard]] std::shared_ptr& ControlContext() { return switch_fiber; } - const std::shared_ptr& ControlContext() const { + [[nodiscard]] const std::shared_ptr& ControlContext() const { return switch_fiber; } - std::size_t CurrentCoreId() const { - return core_id; - } - - u64 UpdateHighestPriorityThread(Thread* highest_thread); + [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread); /** * Takes a thread and moves it to the back of the it's priority list. @@ -114,7 +110,18 @@ public: static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, const KAffinityMask& old_affinity, s32 old_core); + static bool CanSchedule(KernelCore& kernel); + static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); + static void SetSchedulerUpdateNeeded(KernelCore& kernel); + static void ClearSchedulerUpdateNeeded(KernelCore& kernel); + static void DisableScheduling(KernelCore& kernel); + static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread); + [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel); + private: + friend class GlobalSchedulerContext; + /** * Takes care of selecting the new scheduled threads in three steps: * @@ -129,24 +136,11 @@ private: * * returns the cores needing scheduling. */ - static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); + [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); - void RotateScheduledQueue(s32 core_id, s32 priority); + [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); -public: - static bool CanSchedule(KernelCore& kernel); - static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); - static void SetSchedulerUpdateNeeded(KernelCore& kernel); - static void ClearSchedulerUpdateNeeded(KernelCore& kernel); - static void DisableScheduling(KernelCore& kernel); - static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, - Core::EmuThreadHandle global_thread); - static u64 UpdateHighestPriorityThreads(KernelCore& kernel); - -private: - friend class GlobalSchedulerContext; - - static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); + void RotateScheduledQueue(s32 core_id, s32 priority); void Schedule() { ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); @@ -175,7 +169,6 @@ private: static void OnSwitch(void* this_scheduler); void SwitchToCurrent(); -private: Thread* current_thread{}; Thread* idle_thread{}; -- cgit v1.2.3 From 165d8485f05569d47751e2f2f730fd3f417831bb Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:04:24 -0800 Subject: hle: kernel: KAbstractSchedulerLock: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_scheduler_lock.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 39a02af2a..2d675b39e 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -17,16 +17,8 @@ class KernelCore; template class KAbstractSchedulerLock { -private: - KernelCore& kernel; - Common::SpinLock spin_lock; - s32 lock_count; - Core::EmuThreadHandle owner_thread; - public: - KAbstractSchedulerLock(KernelCore& kernel) - : kernel{kernel}, spin_lock(), lock_count(0), - owner_thread(Core::EmuThreadHandle::InvalidHandle()) {} + explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {} bool IsLockedByCurrentThread() const { return this->owner_thread == kernel.GetCurrentEmuThreadID(); @@ -71,6 +63,12 @@ public: SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); } } + +private: + KernelCore& kernel; + Common::SpinLock spin_lock{}; + s32 lock_count{}; + Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()}; }; } // namespace Kernel -- cgit v1.2.3 From b1b4f2337e286e2c079634906224fc67ae3ff7c3 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:07:00 -0800 Subject: hle: kernel: KScopedLock: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_scoped_lock.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h index 03320859f..d7cc557b2 100644 --- a/src/core/hle/kernel/k_scoped_lock.h +++ b/src/core/hle/kernel/k_scoped_lock.h @@ -12,7 +12,7 @@ namespace Kernel { template -concept KLockable = !std::is_reference::value && requires(T & t) { +concept KLockable = !std::is_reference_v && requires(T & t) { { t.Lock() } ->std::same_as; { t.Unlock() } @@ -20,11 +20,7 @@ concept KLockable = !std::is_reference::value && requires(T & t) { }; template -requires KLockable class KScopedLock : NonCopyable { - -private: - T* lock_ptr; - +requires KLockable class KScopedLock { public: explicit KScopedLock(T* l) : lock_ptr(l) { this->lock_ptr->Lock(); @@ -34,6 +30,12 @@ public: ~KScopedLock() { this->lock_ptr->Unlock(); } + + KScopedLock(const KScopedLock&) = delete; + KScopedLock(KScopedLock&&) = delete; + +private: + T* lock_ptr; }; } // namespace Kernel -- cgit v1.2.3 From ed4d1e2adefa4775dc1a950d7db84c7935ae7324 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:07:44 -0800 Subject: hle: kernel: KScopedSchedulerLockAndSleep: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index d1cc5dda7..2bb3817fa 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -16,12 +16,6 @@ namespace Kernel { class KScopedSchedulerLockAndSleep { -private: - KernelCore& kernel; - Handle& event_handle; - Thread* thread{}; - s64 timeout_tick{}; - public: explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t, s64 timeout) @@ -45,6 +39,12 @@ public: void CancelSleep() { this->timeout_tick = 0; } + +private: + KernelCore& kernel; + Handle& event_handle; + Thread* thread{}; + s64 timeout_tick{}; }; } // namespace Kernel -- cgit v1.2.3 From 9b492430bb349b98ac4768ddde044a63976fe146 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:17:18 -0800 Subject: hle: kernel: Thread: Various style fixes based on code review feedback. --- src/core/hle/kernel/thread.h | 47 +++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 22 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index f1aa358a4..11ef29888 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -4,6 +4,7 @@ #pragma once +#include #include #include #include @@ -346,10 +347,11 @@ public: void SetStatus(ThreadStatus new_status); - constexpr s64 GetLastScheduledTick() const { + s64 GetLastScheduledTick() const { return this->last_scheduled_tick; } - constexpr void SetLastScheduledTick(s64 tick) { + + void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; } @@ -481,7 +483,7 @@ public: return ideal_core; } - constexpr const KAffinityMask& GetAffinityMask() const { + const KAffinityMask& GetAffinityMask() const { return affinity_mask; } @@ -490,10 +492,11 @@ public: /// Sleeps this thread for the given amount of nanoseconds. ResultCode Sleep(s64 nanoseconds); - constexpr s64 GetYieldScheduleCount() const { + s64 GetYieldScheduleCount() const { return this->schedule_count; } - constexpr void SetYieldScheduleCount(s64 count) { + + void SetYieldScheduleCount(s64 count) { this->schedule_count = count; } @@ -570,14 +573,9 @@ public: return has_exited; } - struct QueueEntry { - private: - Thread* prev; - Thread* next; - + class QueueEntry { public: - constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ - } + constexpr QueueEntry() = default; constexpr void Initialize() { this->prev = nullptr; @@ -590,18 +588,23 @@ public: constexpr Thread* GetNext() const { return this->next; } - constexpr void SetPrev(Thread* t) { - this->prev = t; + constexpr void SetPrev(Thread* thread) { + this->prev = thread; } - constexpr void SetNext(Thread* t) { - this->next = t; + constexpr void SetNext(Thread* thread) { + this->next = thread; } + + private: + Thread* prev{}; + Thread* next{}; }; - constexpr QueueEntry& GetPriorityQueueEntry(s32 core) { + QueueEntry& GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; } - constexpr const QueueEntry& GetPriorityQueueEntry(s32 core) const { + + const QueueEntry& GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; } @@ -619,9 +622,6 @@ public: disable_count--; } - ThreadStatus status = ThreadStatus::Dormant; - u32 scheduling_state = 0; - private: friend class GlobalSchedulerContext; friend class KScheduler; @@ -638,6 +638,9 @@ private: ThreadContext64 context_64{}; std::shared_ptr host_context{}; + ThreadStatus status = ThreadStatus::Dormant; + u32 scheduling_state = 0; + u64 thread_id = 0; VAddr entry_point = 0; @@ -701,7 +704,7 @@ private: KScheduler* scheduler = nullptr; - QueueEntry per_core_priority_queue_entry[Core::Hardware::NUM_CPU_CORES]{}; + std::array per_core_priority_queue_entry{}; u32 ideal_core{0xFFFFFFFF}; KAffinityMask affinity_mask{}; -- cgit v1.2.3 From 1bdb756d28a00f168094e490143dba89569fe35a Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 6 Dec 2020 00:25:58 -0800 Subject: hle: kernel: Process: Various style fixes based on code review feedback. --- src/core/hle/kernel/process.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 36d8547bd..e412e58aa 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -217,12 +217,12 @@ public: } /// Gets the process schedule count, used for thread yelding - constexpr s64 GetScheduledCount() const { + s64 GetScheduledCount() const { return schedule_count; } /// Increments the process schedule count, used for thread yielding. - constexpr void IncrementScheduledCount() { + void IncrementScheduledCount() { ++schedule_count; } -- cgit v1.2.3 From feac654ba04951e77e8642cfe188277a688ca779 Mon Sep 17 00:00:00 2001 From: comex Date: Sat, 14 Nov 2020 17:57:51 -0500 Subject: core: Mark unused fields as [[maybe_unused]] --- src/core/hle/kernel/memory/memory_block_manager.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h index 6e1d41075..f57d1bbcc 100644 --- a/src/core/hle/kernel/memory/memory_block_manager.h +++ b/src/core/hle/kernel/memory/memory_block_manager.h @@ -57,8 +57,8 @@ public: private: void MergeAdjacent(iterator it, iterator& next_it); - const VAddr start_addr; - const VAddr end_addr; + [[maybe_unused]] const VAddr start_addr; + [[maybe_unused]] const VAddr end_addr; MemoryBlockTree memory_block_tree; }; -- cgit v1.2.3 From e31cb50405daae5d7189a98da272ca6c08ca8e04 Mon Sep 17 00:00:00 2001 From: comex Date: Mon, 31 Aug 2020 10:38:58 -0400 Subject: Fix "explicitly defaulted but implicitly deleted" warning `PhysicalCore`'s move assignment operator was declared as `= default`, but was implicitly deleted because `PhysicalCore` has fields of reference type. Switch to explicitly deleting it to avoid a Clang warning. The move *constructor* is still defaulted, and is required to exist due to the use of `std::vector`. --- src/core/hle/kernel/physical_core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index 37513130a..801d24c28 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -36,7 +36,7 @@ public: PhysicalCore& operator=(const PhysicalCore&) = delete; PhysicalCore(PhysicalCore&&) = default; - PhysicalCore& operator=(PhysicalCore&&) = default; + PhysicalCore& operator=(PhysicalCore&&) = delete; /// Initialize the core for the specified parameters. void Initialize(bool is_64_bit); -- cgit v1.2.3 From 6b7320add44bf3d933063d0b93296222fd522ef6 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Mon, 7 Dec 2020 22:00:34 -0500 Subject: core: Remove unnecessary enum casts in log calls Follows the video core PR. fmt doesn't require casts for enum classes anymore, so we can remove quite a few casts. --- src/core/hle/kernel/process_capability.cpp | 2 +- src/core/hle/kernel/resource_limit.cpp | 4 ++-- src/core/hle/kernel/server_session.cpp | 3 +-- 3 files changed, 4 insertions(+), 5 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp index 63880f13d..0f128c586 100644 --- a/src/core/hle/kernel/process_capability.cpp +++ b/src/core/hle/kernel/process_capability.cpp @@ -199,7 +199,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s break; } - LOG_ERROR(Kernel, "Invalid capability type! type={}", static_cast(type)); + LOG_ERROR(Kernel, "Invalid capability type! type={}", type); return ERR_INVALID_CAPABILITY_DESCRIPTOR; } diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp index 212e442f4..7bf50339d 100644 --- a/src/core/hle/kernel/resource_limit.cpp +++ b/src/core/hle/kernel/resource_limit.cpp @@ -65,8 +65,8 @@ ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) { limit[index] = value; return RESULT_SUCCESS; } else { - LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}", - static_cast(resource), value, index); + LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}", resource, + value, index); return ERR_INVALID_STATE; } } diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 8c19f2534..ae088cf41 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -130,8 +130,7 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con } } - LOG_CRITICAL(IPC, "Unknown domain command={}", - static_cast(domain_message_header.command.Value())); + LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value()); ASSERT(false); return RESULT_SUCCESS; } -- cgit v1.2.3 From 2de124e2235f28cc25e2b213f8ba76c9f670f36c Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 8 Dec 2020 15:38:28 -0500 Subject: svc: Remove unnecessary casts Simplifies and removes some casts. In all cases, these were generally widening from a 32-bit unsigned type to a 64-bit unsigned type, so no information would be lost from the conversion. --- src/core/hle/kernel/svc.cpp | 60 +++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 35 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 95d6e2b4d..c8060f179 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -234,8 +234,7 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask, u32 attribute) { - return SetMemoryAttribute(system, static_cast(address), static_cast(size), - mask, attribute); + return SetMemoryAttribute(system, address, size, mask, attribute); } /// Maps a memory range into a different range. @@ -255,8 +254,7 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr } static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) { - return MapMemory(system, static_cast(dst_addr), static_cast(src_addr), - static_cast(size)); + return MapMemory(system, dst_addr, src_addr, size); } /// Unmaps a region that was previously mapped with svcMapMemory @@ -276,8 +274,7 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad } static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) { - return UnmapMemory(system, static_cast(dst_addr), static_cast(src_addr), - static_cast(size)); + return UnmapMemory(system, dst_addr, src_addr, size); } /// Connect to an OS service given the port name, returns the handle to the port to out @@ -531,8 +528,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle, u32 mutex_addr, Handle requesting_thread_handle) { - return ArbitrateLock(system, holding_thread_handle, static_cast(mutex_addr), - requesting_thread_handle); + return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle); } /// Unlock a mutex @@ -555,7 +551,7 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { } static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) { - return ArbitrateUnlock(system, static_cast(mutex_addr)); + return ArbitrateUnlock(system, mutex_addr); } enum class BreakType : u32 { @@ -677,7 +673,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { } static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) { - Break(system, reason, static_cast(info1), static_cast(info2)); + Break(system, reason, info1, info2); } /// Used to output a message on a debug hardware unit - does nothing on a retail unit @@ -948,7 +944,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low, u32 info_id, u32 handle, u32 sub_id_high) { - const u64 sub_id{static_cast(sub_id_low | (static_cast(sub_id_high) << 32))}; + const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)}; u64 res_value{}; const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)}; @@ -1009,7 +1005,7 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) } static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) { - return MapPhysicalMemory(system, static_cast(addr), static_cast(size)); + return MapPhysicalMemory(system, addr, size); } /// Unmaps memory previously mapped via MapPhysicalMemory @@ -1063,7 +1059,7 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size } static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) { - return UnmapPhysicalMemory(system, static_cast(addr), static_cast(size)); + return UnmapPhysicalMemory(system, addr, size); } /// Sets the thread activity @@ -1144,7 +1140,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H } static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) { - return GetThreadContext(system, static_cast(thread_context), handle); + return GetThreadContext(system, thread_context, handle); } /// Gets the priority for the specified thread @@ -1281,8 +1277,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr, u32 size, u32 permissions) { - return MapSharedMemory(system, shared_memory_handle, static_cast(addr), - static_cast(size), permissions); + return MapSharedMemory(system, shared_memory_handle, addr, size, permissions); } static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, @@ -1552,8 +1547,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority, u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) { - return CreateThread(system, out_handle, static_cast(entry_point), static_cast(arg), - static_cast(stack_top), priority, processor_id); + return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id); } /// Starts the thread for the provided handle @@ -1637,8 +1631,7 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { } static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) { - const s64 nanoseconds = static_cast(static_cast(nanoseconds_low) | - (static_cast(nanoseconds_high) << 32)); + const auto nanoseconds = static_cast(u64{nanoseconds_low} | (u64{nanoseconds_high} << 32)); SleepThread(system, nanoseconds); } @@ -1724,10 +1717,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr, u32 condition_variable_addr, Handle thread_handle, u32 nanoseconds_low, u32 nanoseconds_high) { - const s64 nanoseconds = - static_cast(nanoseconds_low | (static_cast(nanoseconds_high) << 32)); - return WaitProcessWideKeyAtomic(system, static_cast(mutex_addr), - static_cast(condition_variable_addr), thread_handle, + const auto nanoseconds = static_cast(nanoseconds_low | (u64{nanoseconds_high} << 32)); + return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle, nanoseconds); } @@ -1833,8 +1824,8 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value, u32 timeout_low, u32 timeout_high) { - s64 timeout = static_cast(timeout_low | (static_cast(timeout_high) << 32)); - return WaitForAddress(system, static_cast(address), type, value, timeout); + const auto timeout = static_cast(timeout_low | (u64{timeout_high} << 32)); + return WaitForAddress(system, address, type, value, timeout); } // Signals to an address (via Address Arbiter) @@ -1862,7 +1853,7 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value, s32 num_to_wake) { - return SignalToAddress(system, static_cast(address), type, value, num_to_wake); + return SignalToAddress(system, address, type, value, num_to_wake); } static void KernelDebug([[maybe_unused]] Core::System& system, @@ -1893,7 +1884,7 @@ static u64 GetSystemTick(Core::System& system) { } static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) { - u64 time = GetSystemTick(system); + const auto time = GetSystemTick(system); *time_low = static_cast(time); *time_high = static_cast(time >> 32); } @@ -1984,8 +1975,7 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size, u32 permissions) { - return CreateTransferMemory(system, handle, static_cast(addr), - static_cast(size), permissions); + return CreateTransferMemory(system, handle, addr, size, permissions); } static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, @@ -2075,8 +2065,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, u32 affinity_mask_low, u32 affinity_mask_high) { - const u64 affinity_mask = - static_cast(affinity_mask_low) | (static_cast(affinity_mask_high) << 32); + const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32); return SetThreadCoreMask(system, thread_handle, core, affinity_mask); } @@ -2341,9 +2330,10 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd return RESULT_SUCCESS; } -static ResultCode FlushProcessDataCache32(Core::System& system, Handle handle, u32 address, - u32 size) { - // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a nope +static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system, + [[maybe_unused]] Handle handle, + [[maybe_unused]] u32 address, [[maybe_unused]] u32 size) { + // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op, // as all emulation is done in the same cache level in host architecture, thus data cache // does not need flushing. LOG_DEBUG(Kernel_SVC, "called"); -- cgit v1.2.3 From 28281ae2500a4af9c36c26de5ba07b80d440b335 Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 9 Dec 2020 21:27:05 -0800 Subject: core: hle: server_session: Use separate threads for each service connection. --- src/core/hle/kernel/kernel.cpp | 2 +- src/core/hle/kernel/server_session.cpp | 20 ++----- src/core/hle/kernel/server_session.h | 12 ++-- src/core/hle/kernel/service_thread.cpp | 100 +++++++++++++++++++++++++++++++++ src/core/hle/kernel/service_thread.h | 27 +++++++++ 5 files changed, 138 insertions(+), 23 deletions(-) create mode 100644 src/core/hle/kernel/service_thread.cpp create mode 100644 src/core/hle/kernel/service_thread.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 04cae3a43..1bf4c3355 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -329,7 +329,7 @@ struct KernelCore::Impl { std::atomic registered_thread_ids{Core::Hardware::NUM_CPU_CORES}; // Number of host threads is a relatively high number to avoid overflowing - static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 64; + static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 1024; std::atomic num_host_threads{0}; std::array, NUM_REGISTRABLE_HOST_THREADS> register_host_thread_keys{}; diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index a35c8aa4b..079c3911a 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -32,12 +32,9 @@ ResultVal> ServerSession::Create(KernelCore& kern std::string name) { std::shared_ptr session{std::make_shared(kernel)}; - session->request_event = - Core::Timing::CreateEvent(name, [session](std::uintptr_t, std::chrono::nanoseconds) { - session->CompleteSyncRequest(); - }); session->name = std::move(name); session->parent = std::move(parent); + session->service_thread = std::make_unique(kernel); return MakeResult(std::move(session)); } @@ -142,16 +139,12 @@ ResultCode ServerSession::QueueSyncRequest(std::shared_ptr thread, std::make_shared(kernel, memory, SharedFrom(this), std::move(thread)); context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); - request_queue.Push(std::move(context)); + service_thread->QueueSyncRequest(*this, std::move(context)); return RESULT_SUCCESS; } -ResultCode ServerSession::CompleteSyncRequest() { - ASSERT(!request_queue.Empty()); - - auto& context = *request_queue.Front(); - +ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { ResultCode result = RESULT_SUCCESS; // If the session has been converted to a domain, handle the domain request if (IsDomain() && context.HasDomainMessageHeader()) { @@ -177,18 +170,13 @@ ResultCode ServerSession::CompleteSyncRequest() { } } - request_queue.Pop(); - return result; } ResultCode ServerSession::HandleSyncRequest(std::shared_ptr thread, Core::Memory::Memory& memory, Core::Timing::CoreTiming& core_timing) { - const ResultCode result = QueueSyncRequest(std::move(thread), memory); - const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000}; - core_timing.ScheduleEvent(delay, request_event, {}); - return result; + return QueueSyncRequest(std::move(thread), memory); } } // namespace Kernel diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index d23e9ec68..8466b03e6 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -10,6 +10,7 @@ #include #include "common/threadsafe_queue.h" +#include "core/hle/kernel/service_thread.h" #include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" @@ -43,6 +44,8 @@ class Thread; * TLS buffer and control is transferred back to it. */ class ServerSession final : public SynchronizationObject { + friend class ServiceThread; + public: explicit ServerSession(KernelCore& kernel); ~ServerSession() override; @@ -132,7 +135,7 @@ private: ResultCode QueueSyncRequest(std::shared_ptr thread, Core::Memory::Memory& memory); /// Completes a sync request from the emulated application. - ResultCode CompleteSyncRequest(); + ResultCode CompleteSyncRequest(HLERequestContext& context); /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an /// object handle. @@ -163,11 +166,8 @@ private: /// The name of this session (optional) std::string name; - /// Core timing event used to schedule the service request at some point in the future - std::shared_ptr request_event; - - /// Queue of scheduled service requests - Common::MPSCQueue> request_queue; + /// Thread to dispatch service requests + std::unique_ptr service_thread; }; } // namespace Kernel diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp new file mode 100644 index 000000000..59a6045df --- /dev/null +++ b/src/core/hle/kernel/service_thread.cpp @@ -0,0 +1,100 @@ +// Copyright 2020 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include +#include +#include +#include +#include +#include + +#include "common/assert.h" +#include "common/scope_exit.h" +#include "core/core.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/server_session.h" +#include "core/hle/kernel/service_thread.h" +#include "core/hle/lock.h" +#include "video_core/renderer_base.h" + +namespace Kernel { + +class ServiceThread::Impl final { +public: + explicit Impl(KernelCore& kernel); + ~Impl(); + + void QueueSyncRequest(ServerSession& session, std::shared_ptr&& context); + +private: + std::vector threads; + std::queue> requests; + std::mutex queue_mutex; + std::condition_variable condition; + bool stop{}; +}; + +ServiceThread::Impl::Impl(KernelCore& kernel) { + constexpr std::size_t SizeOfPool{1}; + for (std::size_t i = 0; i < SizeOfPool; ++i) + threads.emplace_back([&] { + // Wait for first request before trying to acquire a render context + { + std::unique_lock lock{queue_mutex}; + condition.wait(lock, [this] { return stop || !requests.empty(); }); + } + + kernel.RegisterHostThread(); + + while (true) { + std::function task; + + { + std::unique_lock lock{queue_mutex}; + condition.wait(lock, [this] { return stop || !requests.empty(); }); + if (stop && requests.empty()) { + return; + } + task = std::move(requests.front()); + requests.pop(); + } + + task(); + } + }); +} + +void ServiceThread::Impl::QueueSyncRequest(ServerSession& session, + std::shared_ptr&& context) { + { + std::unique_lock lock{queue_mutex}; + requests.emplace([session{SharedFrom(&session)}, context{std::move(context)}]() { + session->CompleteSyncRequest(*context); + return; + }); + } + condition.notify_one(); +} + +ServiceThread::Impl::~Impl() { + { + std::unique_lock lock{queue_mutex}; + stop = true; + } + condition.notify_all(); + for (std::thread& thread : threads) { + thread.join(); + } +} + +ServiceThread::ServiceThread(KernelCore& kernel) : impl{std::make_unique(kernel)} {} + +ServiceThread::~ServiceThread() = default; + +void ServiceThread::QueueSyncRequest(ServerSession& session, + std::shared_ptr&& context) { + impl->QueueSyncRequest(session, std::move(context)); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h new file mode 100644 index 000000000..d252490bb --- /dev/null +++ b/src/core/hle/kernel/service_thread.h @@ -0,0 +1,27 @@ +// Copyright 2020 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include + +namespace Kernel { + +class HLERequestContext; +class KernelCore; +class ServerSession; + +class ServiceThread final { +public: + explicit ServiceThread(KernelCore& kernel); + ~ServiceThread(); + + void QueueSyncRequest(ServerSession& session, std::shared_ptr&& context); + +private: + class Impl; + std::unique_ptr impl; +}; + +} // namespace Kernel -- cgit v1.2.3 From 8bc3d66354972c8a288a56c9b75c9705597778f8 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 10 Dec 2020 16:03:35 -0800 Subject: hle: kernel: service_thread: Add parameter for thread pool size. --- src/core/hle/kernel/server_session.cpp | 2 +- src/core/hle/kernel/service_thread.cpp | 10 +++++----- src/core/hle/kernel/service_thread.h | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 079c3911a..ed42452ff 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -34,7 +34,7 @@ ResultVal> ServerSession::Create(KernelCore& kern session->name = std::move(name); session->parent = std::move(parent); - session->service_thread = std::make_unique(kernel); + session->service_thread = std::make_unique(kernel, 1); return MakeResult(std::move(session)); } diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index 59a6045df..4ceb7e56a 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -22,7 +22,7 @@ namespace Kernel { class ServiceThread::Impl final { public: - explicit Impl(KernelCore& kernel); + explicit Impl(KernelCore& kernel, std::size_t num_threads); ~Impl(); void QueueSyncRequest(ServerSession& session, std::shared_ptr&& context); @@ -35,9 +35,8 @@ private: bool stop{}; }; -ServiceThread::Impl::Impl(KernelCore& kernel) { - constexpr std::size_t SizeOfPool{1}; - for (std::size_t i = 0; i < SizeOfPool; ++i) +ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads) { + for (std::size_t i = 0; i < num_threads; ++i) threads.emplace_back([&] { // Wait for first request before trying to acquire a render context { @@ -88,7 +87,8 @@ ServiceThread::Impl::~Impl() { } } -ServiceThread::ServiceThread(KernelCore& kernel) : impl{std::make_unique(kernel)} {} +ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads) + : impl{std::make_unique(kernel, num_threads)} {} ServiceThread::~ServiceThread() = default; diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h index d252490bb..91ad7ae85 100644 --- a/src/core/hle/kernel/service_thread.h +++ b/src/core/hle/kernel/service_thread.h @@ -14,7 +14,7 @@ class ServerSession; class ServiceThread final { public: - explicit ServiceThread(KernelCore& kernel); + explicit ServiceThread(KernelCore& kernel, std::size_t num_threads); ~ServiceThread(); void QueueSyncRequest(ServerSession& session, std::shared_ptr&& context); -- cgit v1.2.3 From 5d4715cc6af424e8529de5ac1a11d1cca3b3f7cf Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 11 Dec 2020 16:45:27 -0800 Subject: hle: kernel: hle_ipc: Remove SleepClientThread. - This was kind of hacky, and no longer is necessary with service threads. --- src/core/hle/kernel/hle_ipc.cpp | 37 ------------------------------------- src/core/hle/kernel/hle_ipc.h | 17 ----------------- 2 files changed, 54 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index e75e80ad0..83decf6cf 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -46,43 +46,6 @@ void SessionRequestHandler::ClientDisconnected( boost::range::remove_erase(connected_sessions, server_session); } -std::shared_ptr HLERequestContext::SleepClientThread( - const std::string& reason, u64 timeout, WakeupCallback&& callback, - std::shared_ptr writable_event) { - // Put the client thread to sleep until the wait event is signaled or the timeout expires. - - if (!writable_event) { - // Create event if not provided - const auto pair = WritableEvent::CreateEventPair(kernel, "HLE Pause Event: " + reason); - writable_event = pair.writable; - } - - Handle event_handle = InvalidHandle; - { - KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); - thread->SetHLECallback( - [context = *this, callback](std::shared_ptr thread) mutable -> bool { - ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT - ? ThreadWakeupReason::Timeout - : ThreadWakeupReason::Signal; - callback(thread, context, reason); - context.WriteToOutgoingCommandBuffer(*thread); - return true; - }); - const auto readable_event{writable_event->GetReadableEvent()}; - writable_event->Clear(); - thread->SetHLESyncObject(readable_event.get()); - thread->SetStatus(ThreadStatus::WaitHLEEvent); - thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); - readable_event->AddWaitingThread(thread); - } - thread->SetHLETimeEvent(event_handle); - - is_thread_waiting = true; - - return writable_event; -} - HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, std::shared_ptr server_session, std::shared_ptr thread) diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index c31a65476..b112e1ebd 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -129,23 +129,6 @@ public: using WakeupCallback = std::function thread, HLERequestContext& context, ThreadWakeupReason reason)>; - /** - * Puts the specified guest thread to sleep until the returned event is signaled or until the - * specified timeout expires. - * @param reason Reason for pausing the thread, to be used for debugging purposes. - * @param timeout Timeout in nanoseconds after which the thread will be awoken and the callback - * invoked with a Timeout reason. - * @param callback Callback to be invoked when the thread is resumed. This callback must write - * the entire command response once again, regardless of the state of it before this function - * was called. - * @param writable_event Event to use to wake up the thread. If unspecified, an event will be - * created. - * @returns Event that when signaled will resume the thread and call the callback function. - */ - std::shared_ptr SleepClientThread( - const std::string& reason, u64 timeout, WakeupCallback&& callback, - std::shared_ptr writable_event = nullptr); - /// Populates this context with data from the requesting process/thread. ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf); -- cgit v1.2.3 From 6d2f9428c5387abaae03478c9204d164a718ffe5 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 14 Dec 2020 17:57:40 -0800 Subject: core: kernel: Clear process list earlier. --- src/core/hle/kernel/kernel.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 1bf4c3355..b3661e4c1 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -76,6 +76,8 @@ struct KernelCore::Impl { } void Shutdown() { + process_list.clear(); + next_object_id = 0; next_kernel_process_id = Process::InitialKIPIDMin; next_user_process_id = Process::ProcessIDMin; @@ -89,8 +91,6 @@ struct KernelCore::Impl { cores.clear(); - process_list.clear(); - current_process = nullptr; system_resource_limit = nullptr; -- cgit v1.2.3 From d0649d0971fa0e4486b7febe9f24b892c7864548 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 24 Dec 2020 23:29:14 -0800 Subject: core: hle: kernel: Clear process list on boot. --- src/core/hle/kernel/kernel.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index b3661e4c1..312c64c17 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -57,6 +57,8 @@ struct KernelCore::Impl { } void Initialize(KernelCore& kernel) { + process_list.clear(); + RegisterHostThread(); global_scheduler_context = std::make_unique(kernel); @@ -76,8 +78,6 @@ struct KernelCore::Impl { } void Shutdown() { - process_list.clear(); - next_object_id = 0; next_kernel_process_id = Process::InitialKIPIDMin; next_user_process_id = Process::ProcessIDMin; -- cgit v1.2.3 From f57be2e62603ddd99fbdb1dea1bfc91ee2d7fc04 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 29 Dec 2020 01:06:39 -0800 Subject: hle: kernel: service_thread: Add thread name and take weak_ptr of ServerSession. --- src/core/hle/kernel/server_session.cpp | 2 +- src/core/hle/kernel/service_thread.cpp | 28 +++++++++++++++++++--------- src/core/hle/kernel/service_thread.h | 3 ++- 3 files changed, 22 insertions(+), 11 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index ed42452ff..947f4a133 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -34,7 +34,7 @@ ResultVal> ServerSession::Create(KernelCore& kern session->name = std::move(name); session->parent = std::move(parent); - session->service_thread = std::make_unique(kernel, 1); + session->service_thread = std::make_unique(kernel, 1, session->name); return MakeResult(std::move(session)); } diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index 4ceb7e56a..1c134777f 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -11,6 +11,7 @@ #include "common/assert.h" #include "common/scope_exit.h" +#include "common/thread.h" #include "core/core.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/server_session.h" @@ -22,7 +23,7 @@ namespace Kernel { class ServiceThread::Impl final { public: - explicit Impl(KernelCore& kernel, std::size_t num_threads); + explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name); ~Impl(); void QueueSyncRequest(ServerSession& session, std::shared_ptr&& context); @@ -32,12 +33,16 @@ private: std::queue> requests; std::mutex queue_mutex; std::condition_variable condition; + const std::string service_name; bool stop{}; }; -ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads) { +ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name) + : service_name{name} { for (std::size_t i = 0; i < num_threads; ++i) - threads.emplace_back([&] { + threads.emplace_back([this, &kernel] { + Common::SetCurrentThreadName(std::string{"Hle_" + service_name}.c_str()); + // Wait for first request before trying to acquire a render context { std::unique_lock lock{queue_mutex}; @@ -52,7 +57,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads) { { std::unique_lock lock{queue_mutex}; condition.wait(lock, [this] { return stop || !requests.empty(); }); - if (stop && requests.empty()) { + if (stop || requests.empty()) { return; } task = std::move(requests.front()); @@ -68,9 +73,14 @@ void ServiceThread::Impl::QueueSyncRequest(ServerSession& session, std::shared_ptr&& context) { { std::unique_lock lock{queue_mutex}; - requests.emplace([session{SharedFrom(&session)}, context{std::move(context)}]() { - session->CompleteSyncRequest(*context); - return; + + // ServerSession owns the service thread, so we cannot caption a strong pointer here in the + // event that the ServerSession is terminated. + std::weak_ptr weak_ptr{SharedFrom(&session)}; + requests.emplace([weak_ptr, context{std::move(context)}]() { + if (auto strong_ptr = weak_ptr.lock()) { + strong_ptr->CompleteSyncRequest(*context); + } }); } condition.notify_one(); @@ -87,8 +97,8 @@ ServiceThread::Impl::~Impl() { } } -ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads) - : impl{std::make_unique(kernel, num_threads)} {} +ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name) + : impl{std::make_unique(kernel, num_threads, name)} {} ServiceThread::~ServiceThread() = default; diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h index 91ad7ae85..025ab8fb5 100644 --- a/src/core/hle/kernel/service_thread.h +++ b/src/core/hle/kernel/service_thread.h @@ -5,6 +5,7 @@ #pragma once #include +#include namespace Kernel { @@ -14,7 +15,7 @@ class ServerSession; class ServiceThread final { public: - explicit ServiceThread(KernelCore& kernel, std::size_t num_threads); + explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name); ~ServiceThread(); void QueueSyncRequest(ServerSession& session, std::shared_ptr&& context); -- cgit v1.2.3 From dfdac7d38af170683812f3b474ef9d686dfa9ef8 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 15 Dec 2020 00:41:48 -0800 Subject: hle: kernel: Move ServiceThread ownership to KernelCore. - Fixes a circular dependency which prevented threads from being released on shutdown. --- src/core/hle/kernel/kernel.cpp | 21 ++++++++++++++++++++- src/core/hle/kernel/kernel.h | 17 +++++++++++++++++ src/core/hle/kernel/server_session.cpp | 13 ++++++++++--- src/core/hle/kernel/server_session.h | 2 +- 4 files changed, 48 insertions(+), 5 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 312c64c17..5f917686f 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include "common/assert.h" @@ -35,6 +35,7 @@ #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" +#include "core/hle/kernel/service_thread.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" @@ -107,6 +108,9 @@ struct KernelCore::Impl { std::fill(register_host_thread_keys.begin(), register_host_thread_keys.end(), std::thread::id{}); std::fill(register_host_thread_values.begin(), register_host_thread_values.end(), 0); + + // Ensures all service threads gracefully shutdown + service_threads.clear(); } void InitializePhysicalCores() { @@ -345,6 +349,9 @@ struct KernelCore::Impl { std::shared_ptr irs_shared_mem; std::shared_ptr time_shared_mem; + // Threads used for services + std::unordered_set> service_threads; + std::array, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; std::array interrupts{}; std::array, Core::Hardware::NUM_CPU_CORES> schedulers{}; @@ -639,4 +646,16 @@ void KernelCore::ExitSVCProfile() { MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); } +std::weak_ptr KernelCore::CreateServiceThread(const std::string& name) { + auto service_thread = std::make_shared(*this, 1, name); + impl->service_threads.emplace(service_thread); + return service_thread; +} + +void KernelCore::ReleaseServiceThread(std::weak_ptr service_thread) { + if (auto strong_ptr = service_thread.lock()) { + impl->service_threads.erase(strong_ptr); + } +} + } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 5846c3f39..e3169f5a7 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -42,6 +42,7 @@ class Process; class ResourceLimit; class KScheduler; class SharedMemory; +class ServiceThread; class Synchronization; class Thread; class TimeManager; @@ -227,6 +228,22 @@ public: void ExitSVCProfile(); + /** + * Creates an HLE service thread, which are used to execute service routines asynchronously. + * While these are allocated per ServerSession, these need to be owned and managed outside of + * ServerSession to avoid a circular dependency. + * @param name String name for the ServerSession creating this thread, used for debug purposes. + * @returns The a weak pointer newly created service thread. + */ + std::weak_ptr CreateServiceThread(const std::string& name); + + /** + * Releases a HLE service thread, instructing KernelCore to free it. This should be called when + * the ServerSession associated with the thread is destroyed. + * @param service_thread Service thread to release. + */ + void ReleaseServiceThread(std::weak_ptr service_thread); + private: friend class Object; friend class Process; diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 947f4a133..b40fe3916 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -25,7 +25,10 @@ namespace Kernel { ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {} -ServerSession::~ServerSession() = default; + +ServerSession::~ServerSession() { + kernel.ReleaseServiceThread(service_thread); +} ResultVal> ServerSession::Create(KernelCore& kernel, std::shared_ptr parent, @@ -34,7 +37,7 @@ ResultVal> ServerSession::Create(KernelCore& kern session->name = std::move(name); session->parent = std::move(parent); - session->service_thread = std::make_unique(kernel, 1, session->name); + session->service_thread = kernel.CreateServiceThread(session->name); return MakeResult(std::move(session)); } @@ -139,7 +142,11 @@ ResultCode ServerSession::QueueSyncRequest(std::shared_ptr thread, std::make_shared(kernel, memory, SharedFrom(this), std::move(thread)); context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); - service_thread->QueueSyncRequest(*this, std::move(context)); + + if (auto strong_ptr = service_thread.lock()) { + strong_ptr->QueueSyncRequest(*this, std::move(context)); + return RESULT_SUCCESS; + } return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index 8466b03e6..e8d1d99ea 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -167,7 +167,7 @@ private: std::string name; /// Thread to dispatch service requests - std::unique_ptr service_thread; + std::weak_ptr service_thread; }; } // namespace Kernel -- cgit v1.2.3 From 0383363a8f5d0781b710f0b06a06299b56816ce7 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Tue, 29 Dec 2020 14:01:56 -0500 Subject: svc: demote SleepThread log to LOG_TRACE This log is called often, and introduces a lot of noise when debug logging is enabled, making it difficult to see other debug logs. --- src/core/hle/kernel/svc.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2d225392f..de3ed25da 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1583,7 +1583,7 @@ static void ExitThread32(Core::System& system) { /// Sleep the current thread static void SleepThread(Core::System& system, s64 nanoseconds) { - LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds); + LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds); enum class SleepType : s64 { YieldWithoutCoreMigration = 0, -- cgit v1.2.3 From b36896b90e825bdc89a61c99ece3def6c15c012a Mon Sep 17 00:00:00 2001 From: comex Date: Tue, 29 Dec 2020 14:22:35 -0500 Subject: Add missing include of "core/hle/kernel/kernel.h" This is needed as the header invokes methods on KernelCore. --- src/core/hle/kernel/k_scheduler_lock.h | 1 + 1 file changed, 1 insertion(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 2d675b39e..2f1c1f691 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -10,6 +10,7 @@ #include "common/assert.h" #include "common/spin_lock.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/kernel.h" namespace Kernel { -- cgit v1.2.3 From 388cf58b31d6480007901785851e6369b0efe64b Mon Sep 17 00:00:00 2001 From: comex Date: Tue, 29 Dec 2020 14:26:16 -0500 Subject: k_priority_queue: Fix concepts use - For `std::same_as`, add missing include of ``. - For `std::convertible_to`, create a replacement in `common/concepts.h` and use that instead. This would also be found in ``, but unlike `std::same_as`, `std::convertible_to` is not yet implemented in libc++, LLVM's STL implementation - not even in master. (In fact, `std::same_as` is the *only* concept currently implemented. For some reason.) --- src/core/hle/kernel/k_priority_queue.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index 01a577d0c..99fb8fe93 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h @@ -8,11 +8,13 @@ #pragma once #include +#include #include "common/assert.h" #include "common/bit_set.h" #include "common/bit_util.h" #include "common/common_types.h" +#include "common/concepts.h" namespace Kernel { @@ -21,7 +23,7 @@ class Thread; template concept KPriorityQueueAffinityMask = !std::is_reference_v && requires(T & t) { { t.GetAffinityMask() } - ->std::convertible_to; + ->Common::ConvertibleTo; {t.SetAffinityMask(std::declval())}; { t.GetAffinity(std::declval()) } @@ -48,9 +50,9 @@ concept KPriorityQueueMember = !std::is_reference_v && requires(T & t) { ->KPriorityQueueAffinityMask; { t.GetActiveCore() } - ->std::convertible_to; + ->Common::ConvertibleTo; { t.GetPriority() } - ->std::convertible_to; + ->Common::ConvertibleTo; }; template -- cgit v1.2.3 From c192da3f82ea3b2563ce43a340c88ac9a6602f26 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 29 Dec 2020 15:55:30 -0800 Subject: hle: kernel: Manage host thread IDs using TLS. - Avoids the need to have a large map of host to guest thread IDs. --- src/core/hle/kernel/kernel.cpp | 77 +++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 46 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 5f917686f..022cd413d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -104,10 +104,8 @@ struct KernelCore::Impl { exclusive_monitor.reset(); - num_host_threads = 0; - std::fill(register_host_thread_keys.begin(), register_host_thread_keys.end(), - std::thread::id{}); - std::fill(register_host_thread_values.begin(), register_host_thread_values.end(), 0); + // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others + next_host_thread_id = Core::Hardware::NUM_CPU_CORES; // Ensures all service threads gracefully shutdown service_threads.clear(); @@ -190,52 +188,46 @@ struct KernelCore::Impl { } } + /// Creates a new host thread ID, should only be called by GetHostThreadId + u32 AllocateHostThreadId(std::optional core_id) { + if (core_id) { + // The first for slots are reserved for CPU core threads + ASSERT(*core_id < Core::Hardware::NUM_CPU_CORES); + return static_cast(*core_id); + } else { + return next_host_thread_id++; + } + } + + /// Gets the host thread ID for the caller, allocating a new one if this is the first time + u32 GetHostThreadId(std::optional core_id = std::nullopt) { + const thread_local auto host_thread_id{AllocateHostThreadId(core_id)}; + return host_thread_id; + } + + /// Registers a CPU core thread by allocating a host thread ID for it void RegisterCoreThread(std::size_t core_id) { - const std::thread::id this_id = std::this_thread::get_id(); + ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); + const auto this_id = GetHostThreadId(core_id); if (!is_multicore) { single_core_thread_id = this_id; } - const auto end = - register_host_thread_keys.begin() + static_cast(num_host_threads); - const auto it = std::find(register_host_thread_keys.begin(), end, this_id); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - ASSERT(it == end); - InsertHostThread(static_cast(core_id)); } + /// Registers a new host thread by allocating a host thread ID for it void RegisterHostThread() { - const std::thread::id this_id = std::this_thread::get_id(); - const auto end = - register_host_thread_keys.begin() + static_cast(num_host_threads); - const auto it = std::find(register_host_thread_keys.begin(), end, this_id); - if (it == end) { - InsertHostThread(registered_thread_ids++); - } - } - - void InsertHostThread(u32 value) { - const size_t index = num_host_threads++; - ASSERT_MSG(index < NUM_REGISTRABLE_HOST_THREADS, "Too many host threads"); - register_host_thread_values[index] = value; - register_host_thread_keys[index] = std::this_thread::get_id(); + [[maybe_unused]] const auto this_id = GetHostThreadId(); } - [[nodiscard]] u32 GetCurrentHostThreadID() const { - const std::thread::id this_id = std::this_thread::get_id(); + [[nodiscard]] u32 GetCurrentHostThreadID() { + const auto this_id = GetHostThreadId(); if (!is_multicore && single_core_thread_id == this_id) { return static_cast(system.GetCpuManager().CurrentCore()); } - const auto end = - register_host_thread_keys.begin() + static_cast(num_host_threads); - const auto it = std::find(register_host_thread_keys.begin(), end, this_id); - if (it == end) { - return Core::INVALID_HOST_THREAD_ID; - } - return register_host_thread_values[static_cast( - std::distance(register_host_thread_keys.begin(), it))]; + return this_id; } - Core::EmuThreadHandle GetCurrentEmuThreadID() const { + [[nodiscard]] Core::EmuThreadHandle GetCurrentEmuThreadID() { Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle(); result.host_handle = GetCurrentHostThreadID(); if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { @@ -329,15 +321,8 @@ struct KernelCore::Impl { std::unique_ptr exclusive_monitor; std::vector cores; - // 0-3 IDs represent core threads, >3 represent others - std::atomic registered_thread_ids{Core::Hardware::NUM_CPU_CORES}; - - // Number of host threads is a relatively high number to avoid overflowing - static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 1024; - std::atomic num_host_threads{0}; - std::array, NUM_REGISTRABLE_HOST_THREADS> - register_host_thread_keys{}; - std::array, NUM_REGISTRABLE_HOST_THREADS> register_host_thread_values{}; + // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others + std::atomic next_host_thread_id{Core::Hardware::NUM_CPU_CORES}; // Kernel memory management std::unique_ptr memory_manager; @@ -357,7 +342,7 @@ struct KernelCore::Impl { std::array, Core::Hardware::NUM_CPU_CORES> schedulers{}; bool is_multicore{}; - std::thread::id single_core_thread_id{}; + u32 single_core_thread_id{}; std::array svc_ticks{}; -- cgit v1.2.3 From a2a0f5318dba2c87578703a5bb69b9fbf7ec526d Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 29 Dec 2020 16:39:04 -0800 Subject: hle: kernel: Manage service threads on another thread. - This is to allow service threads to defer destruction of themselves. --- src/core/hle/kernel/kernel.cpp | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 022cd413d..e8ece8164 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -15,6 +15,7 @@ #include "common/logging/log.h" #include "common/microprofile.h" #include "common/thread.h" +#include "common/thread_worker.h" #include "core/arm/arm_interface.h" #include "core/arm/cpu_interrupt_handler.h" #include "core/arm/exclusive_monitor.h" @@ -58,11 +59,11 @@ struct KernelCore::Impl { } void Initialize(KernelCore& kernel) { - process_list.clear(); - RegisterHostThread(); global_scheduler_context = std::make_unique(kernel); + service_thread_manager = + std::make_unique(1, "yuzu:ServiceThreadManager"); InitializePhysicalCores(); InitializeSystemResourceLimit(kernel); @@ -79,6 +80,12 @@ struct KernelCore::Impl { } void Shutdown() { + process_list.clear(); + + // Ensures all service threads gracefully shutdown + service_thread_manager.reset(); + service_threads.clear(); + next_object_id = 0; next_kernel_process_id = Process::InitialKIPIDMin; next_user_process_id = Process::ProcessIDMin; @@ -106,9 +113,6 @@ struct KernelCore::Impl { // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others next_host_thread_id = Core::Hardware::NUM_CPU_CORES; - - // Ensures all service threads gracefully shutdown - service_threads.clear(); } void InitializePhysicalCores() { @@ -337,6 +341,10 @@ struct KernelCore::Impl { // Threads used for services std::unordered_set> service_threads; + // Service threads are managed by a worker thread, so that a calling service thread can queue up + // the release of itself + std::unique_ptr service_thread_manager; + std::array, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; std::array interrupts{}; std::array, Core::Hardware::NUM_CPU_CORES> schedulers{}; @@ -633,14 +641,17 @@ void KernelCore::ExitSVCProfile() { std::weak_ptr KernelCore::CreateServiceThread(const std::string& name) { auto service_thread = std::make_shared(*this, 1, name); - impl->service_threads.emplace(service_thread); + impl->service_thread_manager->QueueWork( + [this, service_thread] { impl->service_threads.emplace(service_thread); }); return service_thread; } void KernelCore::ReleaseServiceThread(std::weak_ptr service_thread) { - if (auto strong_ptr = service_thread.lock()) { - impl->service_threads.erase(strong_ptr); - } + impl->service_thread_manager->QueueWork([this, service_thread] { + if (auto strong_ptr = service_thread.lock()) { + impl->service_threads.erase(strong_ptr); + } + }); } } // namespace Kernel -- cgit v1.2.3 From 82e0eeed21d34accb5f69f7436b2d525b701e68e Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 29 Dec 2020 16:40:47 -0800 Subject: hle: kernel: service_thread: Make thread naming more consistent. --- src/core/hle/kernel/service_thread.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index 1c134777f..ee46f3e21 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -41,7 +41,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std : service_name{name} { for (std::size_t i = 0; i < num_threads; ++i) threads.emplace_back([this, &kernel] { - Common::SetCurrentThreadName(std::string{"Hle_" + service_name}.c_str()); + Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str()); // Wait for first request before trying to acquire a render context { -- cgit v1.2.3 From b3587102d160fb74a12935a79f06ee8a12712f12 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Tue, 29 Dec 2020 21:16:57 -0300 Subject: core/memory: Read and write page table atomically Squash attributes into the pointer's integer, making them an uintptr_t pair containing 2 bits at the bottom and then the pointer. These bits are currently unused thanks to alignment requirements. Configure Dynarmic to mask out these bits on pointer reads. While we are at it, remove some unused attributes carried over from Citra. Read/Write and other hot functions use a two step unpacking process that is less readable to stop MSVC from emitting an extra AND instruction in the hot path: mov rdi,rcx shr rdx,0Ch mov r8,qword ptr [rax+8] mov rax,qword ptr [r8+rdx*8] mov rdx,rax -and al,3 and rdx,0FFFFFFFFFFFFFFFCh je Core::Memory::Memory::Impl::Read mov rax,qword ptr [vaddr] movzx eax,byte ptr [rdx+rax] --- src/core/hle/kernel/memory/page_table.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index f53a7be82..f3e8bc333 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -265,7 +265,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t physical_memory_usage = 0; memory_pool = pool; - page_table_impl.Resize(address_space_width, PageBits, true); + page_table_impl.Resize(address_space_width, PageBits); return InitializeMemoryLayout(start, end); } -- cgit v1.2.3 From a745d87971b2c9795e1b2c587bfe30b849b522fa Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Sat, 2 Jan 2021 09:00:05 -0500 Subject: general: Fix various spelling errors --- src/core/hle/kernel/memory/memory_block.h | 14 +++++++------- src/core/hle/kernel/memory/page_table.cpp | 12 ++++++------ src/core/hle/kernel/svc_types.h | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h index 37fe19916..83acece1e 100644 --- a/src/core/hle/kernel/memory/memory_block.h +++ b/src/core/hle/kernel/memory/memory_block.h @@ -73,12 +73,12 @@ enum class MemoryState : u32 { ThreadLocal = static_cast(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, - Transfered = static_cast(Svc::MemoryState::Transfered) | FlagsMisc | - FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | - FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + Transferred = static_cast(Svc::MemoryState::Transferred) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | + FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, - SharedTransfered = static_cast(Svc::MemoryState::SharedTransfered) | FlagsMisc | - FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + SharedTransferred = static_cast(Svc::MemoryState::SharedTransferred) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, SharedCode = static_cast(Svc::MemoryState::SharedCode) | FlagMapped | FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, @@ -111,8 +111,8 @@ static_assert(static_cast(MemoryState::AliasCodeData) == 0x03FFBD09); static_assert(static_cast(MemoryState::Ipc) == 0x005C3C0A); static_assert(static_cast(MemoryState::Stack) == 0x005C3C0B); static_assert(static_cast(MemoryState::ThreadLocal) == 0x0040200C); -static_assert(static_cast(MemoryState::Transfered) == 0x015C3C0D); -static_assert(static_cast(MemoryState::SharedTransfered) == 0x005C380E); +static_assert(static_cast(MemoryState::Transferred) == 0x015C3C0D); +static_assert(static_cast(MemoryState::SharedTransferred) == 0x005C380E); static_assert(static_cast(MemoryState::SharedCode) == 0x0040380F); static_assert(static_cast(MemoryState::Inaccessible) == 0x00000010); static_assert(static_cast(MemoryState::NonSecureIpc) == 0x005C3811); diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index f3e8bc333..080886554 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -1007,8 +1007,8 @@ constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const { case MemoryState::Shared: case MemoryState::AliasCode: case MemoryState::AliasCodeData: - case MemoryState::Transfered: - case MemoryState::SharedTransfered: + case MemoryState::Transferred: + case MemoryState::SharedTransferred: case MemoryState::SharedCode: case MemoryState::GeneratedCode: case MemoryState::CodeOut: @@ -1042,8 +1042,8 @@ constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const { case MemoryState::Shared: case MemoryState::AliasCode: case MemoryState::AliasCodeData: - case MemoryState::Transfered: - case MemoryState::SharedTransfered: + case MemoryState::Transferred: + case MemoryState::SharedTransferred: case MemoryState::SharedCode: case MemoryState::GeneratedCode: case MemoryState::CodeOut: @@ -1080,8 +1080,8 @@ constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState s case MemoryState::AliasCodeData: case MemoryState::Stack: case MemoryState::ThreadLocal: - case MemoryState::Transfered: - case MemoryState::SharedTransfered: + case MemoryState::Transferred: + case MemoryState::SharedTransferred: case MemoryState::SharedCode: case MemoryState::GeneratedCode: case MemoryState::CodeOut: diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 986724beb..11e1d8e2d 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -23,8 +23,8 @@ enum class MemoryState : u32 { Ipc = 0x0A, Stack = 0x0B, ThreadLocal = 0x0C, - Transfered = 0x0D, - SharedTransfered = 0x0E, + Transferred = 0x0D, + SharedTransferred = 0x0E, SharedCode = 0x0F, Inaccessible = 0x10, NonSecureIpc = 0x11, -- cgit v1.2.3 From 4f13e270c8c63f86a135426e381eef7d4837e5a4 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Tue, 5 Jan 2021 04:18:16 -0300 Subject: core: Silence warnings when compiling without asserts --- src/core/hle/kernel/memory/address_space_info.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp index e4288cab4..6cf43ba24 100644 --- a/src/core/hle/kernel/memory/address_space_info.cpp +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } UNREACHABLE(); + return 0; } std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { @@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } UNREACHABLE(); + return 0; } } // namespace Kernel::Memory -- cgit v1.2.3