summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/k_client_session.cpp8
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp40
-rw-r--r--src/core/hle/kernel/k_process.cpp23
-rw-r--r--src/core/hle/kernel/k_server_session.cpp36
-rw-r--r--src/core/hle/kernel/k_thread.h4
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp4
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp4
-rw-r--r--src/core/hle/kernel/kernel.cpp69
-rw-r--r--src/core/hle/kernel/kernel.h4
-rw-r--r--src/core/hle/kernel/svc/svc_code_memory.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_device_address_space.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_event.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_ipc.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_port.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_resource_limit.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_session.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_transfer_memory.cpp4
19 files changed, 153 insertions, 87 deletions
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index 472e8571c..3e01e3b67 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -24,7 +24,9 @@ Result KClientSession::SendSyncRequest(uintptr_t address, size_t size) {
// Create a session request.
KSessionRequest* request = KSessionRequest::Create(m_kernel);
R_UNLESS(request != nullptr, ResultOutOfResource);
- SCOPE_EXIT({ request->Close(); });
+ SCOPE_EXIT {
+ request->Close();
+ };
// Initialize the request.
request->Initialize(nullptr, address, size);
@@ -37,7 +39,9 @@ Result KClientSession::SendAsyncRequest(KEvent* event, uintptr_t address, size_t
// Create a session request.
KSessionRequest* request = KSessionRequest::Create(m_kernel);
R_UNLESS(request != nullptr, ResultOutOfResource);
- SCOPE_EXIT({ request->Close(); });
+ SCOPE_EXIT {
+ request->Close();
+ };
// Initialize the request.
request->Initialize(event, address, size);
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 1dd86fb3c..19cdf4f3a 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -1305,11 +1305,11 @@ Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddr
// Ensure that we maintain the instruction cache.
bool reprotected_pages = false;
- SCOPE_EXIT({
+ SCOPE_EXIT {
if (reprotected_pages && any_code_pages) {
InvalidateInstructionCache(m_kernel, this, dst_address, size);
}
- });
+ };
// Unmap.
{
@@ -1397,7 +1397,9 @@ Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
// Close the opened pages when we're done with them.
// If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
// automatically.
- SCOPE_EXIT({ pg.Close(); });
+ SCOPE_EXIT {
+ pg.Close();
+ };
// Clear all the newly allocated pages.
for (const auto& it : pg) {
@@ -1603,7 +1605,9 @@ Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProce
m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
// Ensure that the page group is closed when we're done working with it.
- SCOPE_EXIT({ pg.Close(); });
+ SCOPE_EXIT {
+ pg.Close();
+ };
// Clear all pages.
for (const auto& it : pg) {
@@ -2191,7 +2195,9 @@ Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
// Close the opened pages when we're done with them.
// If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
// automatically.
- SCOPE_EXIT({ pg.Close(); });
+ SCOPE_EXIT {
+ pg.Close();
+ };
// Clear all the newly allocated pages.
for (const auto& it : pg) {
@@ -2592,7 +2598,9 @@ Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddre
// Temporarily unlock ourselves, so that other operations can occur while we flush the
// region.
m_general_lock.Unlock();
- SCOPE_EXIT({ m_general_lock.Lock(); });
+ SCOPE_EXIT {
+ m_general_lock.Lock();
+ };
// Flush the region.
R_ASSERT(FlushDataCache(dst_address, size));
@@ -3311,10 +3319,10 @@ Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddre
// Ensure we unmap the io memory when we're done with it.
const KPageProperties unmap_properties =
KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
- SCOPE_EXIT({
+ SCOPE_EXIT {
R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
unmap_properties, OperationType::Unmap, true));
- });
+ };
// Read the memory.
const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
@@ -3347,10 +3355,10 @@ Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAdd
// Ensure we unmap the io memory when we're done with it.
const KPageProperties unmap_properties =
KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
- SCOPE_EXIT({
+ SCOPE_EXIT {
R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
unmap_properties, OperationType::Unmap, true));
- });
+ };
// Write the memory.
const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
@@ -4491,14 +4499,14 @@ Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
// If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
// free on scope exit.
- SCOPE_EXIT({
+ SCOPE_EXIT {
if (start_partial_page != 0) {
m_kernel.MemoryManager().Close(start_partial_page, 1);
}
if (end_partial_page != 0) {
m_kernel.MemoryManager().Close(end_partial_page, 1);
}
- });
+ };
ON_RESULT_FAILURE {
if (cur_mapped_addr != dst_addr) {
@@ -5166,10 +5174,10 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
// If we fail in the next bit (or retry), we need to cleanup the pages.
- auto pg_guard = SCOPE_GUARD({
+ auto pg_guard = SCOPE_GUARD {
pg.OpenFirst();
pg.Close();
- });
+ };
// Map the memory.
{
@@ -5694,7 +5702,9 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
// Ensure that any pages we track are closed on exit.
KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
- SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
+ SCOPE_EXIT {
+ pages_to_close.CloseAndReset();
+ };
// Make a page group representing the region to unmap.
this->MakePageGroup(pages_to_close, virt_addr, num_pages);
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 0b08e877e..cb9a11a63 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -4,8 +4,9 @@
#include <random>
#include "common/scope_exit.h"
#include "common/settings.h"
+#include "core/arm/dynarmic/arm_dynarmic.h"
+#include "core/arm/dynarmic/dynarmic_exclusive_monitor.h"
#include "core/core.h"
-#include "core/gpu_dirty_memory_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
@@ -76,7 +77,9 @@ Result TerminateChildren(KernelCore& kernel, KProcess* process,
}
// Terminate and close the thread.
- SCOPE_EXIT({ cur_child->Close(); });
+ SCOPE_EXIT {
+ cur_child->Close();
+ };
if (const Result terminate_result = cur_child->Terminate();
ResultTerminationRequested == terminate_result) {
@@ -465,11 +468,11 @@ void KProcess::DoWorkerTaskImpl() {
Result KProcess::StartTermination() {
// Finalize the handle table when we're done, if the process isn't immortal.
- SCOPE_EXIT({
+ SCOPE_EXIT {
if (!m_is_immortal) {
this->FinalizeHandleTable();
}
- });
+ };
// Terminate child threads other than the current one.
R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
@@ -963,7 +966,9 @@ Result KProcess::Run(s32 priority, size_t stack_size) {
// Create a new thread for the process.
KThread* main_thread = KThread::Create(m_kernel);
R_UNLESS(main_thread != nullptr, ResultOutOfResource);
- SCOPE_EXIT({ main_thread->Close(); });
+ SCOPE_EXIT {
+ main_thread->Close();
+ };
// Initialize the thread.
R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
@@ -1154,7 +1159,9 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
// Ensure we maintain a clean state on exit.
- SCOPE_EXIT({ res_limit->Close(); });
+ SCOPE_EXIT {
+ res_limit->Close();
+ };
// Declare flags and code address.
Svc::CreateProcessFlag flag{};
@@ -1258,6 +1265,10 @@ void KProcess::InitializeInterfaces() {
#ifdef HAS_NCE
if (this->IsApplication() && Settings::IsNceEnabled()) {
+ // Register the scoped JIT handler before creating any NCE instances
+ // so that its signal handler will appear first in the signal chain.
+ Core::ScopedJitExecution::RegisterHandler();
+
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
m_arm_interfaces[i] = std::make_unique<Core::ArmNce>(m_kernel.System(), true, i);
}
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index adaabdd6d..40c3323ef 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -651,11 +651,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
// Process any special data.
if (src_header.GetHasSpecialHeader()) {
// After we process, make sure we track whether the receive list is broken.
- SCOPE_EXIT({
+ SCOPE_EXIT {
if (offset > dst_recv_list_idx) {
recv_list_broken = true;
}
- });
+ };
// Process special data.
R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread,
@@ -665,11 +665,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
// Process any pointer buffers.
for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
// After we process, make sure we track whether the receive list is broken.
- SCOPE_EXIT({
+ SCOPE_EXIT {
if (offset > dst_recv_list_idx) {
recv_list_broken = true;
}
- });
+ };
R_TRY(ProcessReceiveMessagePointerDescriptors(
offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list,
@@ -680,11 +680,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
// Process any map alias buffers.
for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
// After we process, make sure we track whether the receive list is broken.
- SCOPE_EXIT({
+ SCOPE_EXIT {
if (offset > dst_recv_list_idx) {
recv_list_broken = true;
}
- });
+ };
// We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite.
const KMemoryPermission perm = (i >= src_header.GetSendCount())
@@ -702,11 +702,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
// Process any raw data.
if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
// After we process, make sure we track whether the receive list is broken.
- SCOPE_EXIT({
+ SCOPE_EXIT {
if (offset + raw_count > dst_recv_list_idx) {
recv_list_broken = true;
}
- });
+ };
// Get the offset and size.
const size_t offset_words = offset * sizeof(u32);
@@ -1124,7 +1124,9 @@ Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server
client_thread->Open();
}
- SCOPE_EXIT({ client_thread->Close(); });
+ SCOPE_EXIT {
+ client_thread->Close();
+ };
// Set the request as our current.
m_current_request = request;
@@ -1174,7 +1176,9 @@ Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server
// Reply to the client.
{
// After we reply, close our reference to the request.
- SCOPE_EXIT({ request->Close(); });
+ SCOPE_EXIT {
+ request->Close();
+ };
// Get the event to check whether the request is async.
if (KEvent* event = request->GetEvent(); event != nullptr) {
@@ -1236,7 +1240,9 @@ Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buff
}
// Close reference to the request once we're done processing it.
- SCOPE_EXIT({ request->Close(); });
+ SCOPE_EXIT {
+ request->Close();
+ };
// Extract relevant information from the request.
const uint64_t client_message = request->GetAddress();
@@ -1394,7 +1400,9 @@ void KServerSession::CleanupRequests() {
}
// Close a reference to the request once it's cleaned up.
- SCOPE_EXIT({ request->Close(); });
+ SCOPE_EXIT {
+ request->Close();
+ };
// Extract relevant information from the request.
const uint64_t client_message = request->GetAddress();
@@ -1491,7 +1499,9 @@ void KServerSession::OnClientClosed() {
ASSERT(thread != nullptr);
// Ensure that we close the request when done.
- SCOPE_EXIT({ request->Close(); });
+ SCOPE_EXIT {
+ request->Close();
+ };
// If we're terminating, close a reference to the thread and event.
if (terminate) {
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index f13e232b2..e928cfebc 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -66,6 +66,7 @@ enum class SuspendType : u32 {
Debug = 2,
Backtrace = 3,
Init = 4,
+ System = 5,
Count,
};
@@ -84,8 +85,9 @@ enum class ThreadState : u16 {
DebugSuspended = (1 << (2 + SuspendShift)),
BacktraceSuspended = (1 << (3 + SuspendShift)),
InitSuspended = (1 << (4 + SuspendShift)),
+ SystemSuspended = (1 << (5 + SuspendShift)),
- SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
+ SuspendFlagMask = ((1 << 6) - 1) << SuspendShift,
};
DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index a632d1634..1952c0083 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -21,7 +21,9 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
// Allocate a new page.
KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
- auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); });
+ auto page_buf_guard = SCOPE_GUARD {
+ KPageBuffer::Free(kernel, page_buf);
+ };
// Map the address in.
const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index cbb1b02bb..09295e8ad 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -24,7 +24,9 @@ Result KTransferMemory::Initialize(KProcessAddress addr, std::size_t size,
// Construct the page group, guarding to make sure our state is valid on exit.
m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
- auto pg_guard = SCOPE_GUARD({ m_page_group.reset(); });
+ auto pg_guard = SCOPE_GUARD {
+ m_page_group.reset();
+ };
// Lock the memory.
R_TRY(page_table.LockForTransferMemory(std::addressof(*m_page_group), addr, size,
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 34b25be66..9e5eaeec4 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -109,7 +109,9 @@ struct KernelCore::Impl {
void Shutdown() {
is_shutting_down.store(true, std::memory_order_relaxed);
- SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
+ SCOPE_EXIT {
+ is_shutting_down.store(false, std::memory_order_relaxed);
+ };
CloseServices();
@@ -1080,7 +1082,9 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
// Ensure that we don't hold onto any extra references.
- SCOPE_EXIT({ process->Close(); });
+ SCOPE_EXIT {
+ process->Close();
+ };
// Register the new process.
KProcess::Register(*this, process);
@@ -1108,7 +1112,9 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
// Ensure that we don't hold onto any extra references.
- SCOPE_EXIT({ process->Close(); });
+ SCOPE_EXIT {
+ process->Close();
+ };
// Register the new process.
KProcess::Register(*this, process);
@@ -1204,39 +1210,48 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
return *impl->hidbus_shared_mem;
}
-void KernelCore::SuspendApplication(bool suspended) {
+void KernelCore::SuspendEmulation(bool suspended) {
const bool should_suspend{exception_exited || suspended};
- const auto activity =
- should_suspend ? Svc::ProcessActivity::Paused : Svc::ProcessActivity::Runnable;
+ auto processes = GetProcessList();
- // Get the application process.
- KScopedAutoObject<KProcess> process = ApplicationProcess();
- if (process.IsNull()) {
- return;
+ for (auto& process : processes) {
+ KScopedLightLock ll{process->GetListLock()};
+
+ for (auto& thread : process->GetThreadList()) {
+ if (should_suspend) {
+ thread.RequestSuspend(SuspendType::System);
+ } else {
+ thread.Resume(SuspendType::System);
+ }
+ }
}
- // Set the new activity.
- process->SetActivity(activity);
+ if (!should_suspend) {
+ return;
+ }
// Wait for process execution to stop.
- bool must_wait{should_suspend};
-
- // KernelCore::SuspendApplication must be called from locked context,
- // or we could race another call to SetActivity, interfering with waiting.
- while (must_wait) {
+ // KernelCore::SuspendEmulation must be called from locked context,
+ // or we could race another call, interfering with waiting.
+ const auto TryWait = [&]() {
KScopedSchedulerLock sl{*this};
- // Assume that all threads have finished running.
- must_wait = false;
-
- for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() ==
- process.GetPointerUnsafe()) {
- // A thread has not finished running yet.
- // Continue waiting.
- must_wait = true;
+ for (auto& process : processes) {
+ for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() ==
+ process.GetPointerUnsafe()) {
+ // A thread has not finished running yet.
+ // Continue waiting.
+ return false;
+ }
}
}
+
+ return true;
+ };
+
+ while (!TryWait()) {
+ // ...
}
}
@@ -1260,7 +1275,7 @@ bool KernelCore::IsShuttingDown() const {
void KernelCore::ExceptionalExitApplication() {
exception_exited = true;
- SuspendApplication(true);
+ SuspendEmulation(true);
}
void KernelCore::EnterSVCProfile() {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 8ea5bed1c..57182c0c8 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -258,8 +258,8 @@ public:
/// Gets the shared memory object for HIDBus services.
const Kernel::KSharedMemory& GetHidBusSharedMem() const;
- /// Suspend/unsuspend application process.
- void SuspendApplication(bool suspend);
+ /// Suspend/unsuspend emulated processes.
+ void SuspendEmulation(bool suspend);
/// Exceptional exit application process.
void ExceptionalExitApplication();
diff --git a/src/core/hle/kernel/svc/svc_code_memory.cpp b/src/core/hle/kernel/svc/svc_code_memory.cpp
index bae4cb0cd..7be2802f0 100644
--- a/src/core/hle/kernel/svc/svc_code_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_code_memory.cpp
@@ -45,7 +45,9 @@ Result CreateCodeMemory(Core::System& system, Handle* out, u64 address, uint64_t
KCodeMemory* code_mem = KCodeMemory::Create(kernel);
R_UNLESS(code_mem != nullptr, ResultOutOfResource);
- SCOPE_EXIT({ code_mem->Close(); });
+ SCOPE_EXIT {
+ code_mem->Close();
+ };
// Verify that the region is in range.
R_UNLESS(GetCurrentProcess(system.Kernel()).GetPageTable().Contains(address, size),
diff --git a/src/core/hle/kernel/svc/svc_device_address_space.cpp b/src/core/hle/kernel/svc/svc_device_address_space.cpp
index 42add9473..ac828320f 100644
--- a/src/core/hle/kernel/svc/svc_device_address_space.cpp
+++ b/src/core/hle/kernel/svc/svc_device_address_space.cpp
@@ -28,7 +28,9 @@ Result CreateDeviceAddressSpace(Core::System& system, Handle* out, uint64_t das_
// Create the device address space.
KDeviceAddressSpace* das = KDeviceAddressSpace::Create(system.Kernel());
R_UNLESS(das != nullptr, ResultOutOfResource);
- SCOPE_EXIT({ das->Close(); });
+ SCOPE_EXIT {
+ das->Close();
+ };
// Initialize the device address space.
R_TRY(das->Initialize(das_address, das_size));
diff --git a/src/core/hle/kernel/svc/svc_event.cpp b/src/core/hle/kernel/svc/svc_event.cpp
index 901202e6a..8e4beb396 100644
--- a/src/core/hle/kernel/svc/svc_event.cpp
+++ b/src/core/hle/kernel/svc/svc_event.cpp
@@ -72,10 +72,10 @@ Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
event_reservation.Commit();
// Ensure that we clean up the event (and its only references are handle table) on function end.
- SCOPE_EXIT({
+ SCOPE_EXIT {
event->GetReadableEvent().Close();
event->Close();
- });
+ };
// Register the event.
KEvent::Register(kernel, event);
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp
index 85cc4f561..b619bd70a 100644
--- a/src/core/hle/kernel/svc/svc_ipc.cpp
+++ b/src/core/hle/kernel/svc/svc_ipc.cpp
@@ -129,11 +129,11 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
}
// Ensure handles are closed when we're done.
- SCOPE_EXIT({
+ SCOPE_EXIT {
for (auto i = 0; i < num_handles; ++i) {
objs[i]->Close();
}
- });
+ };
R_RETURN(ReplyAndReceiveImpl(kernel, out_index, message, buffer_size, message_paddr, objs,
num_handles, reply_target, timeout_ns));
@@ -208,10 +208,10 @@ Result SendAsyncRequestWithUserBuffer(Core::System& system, Handle* out_event_ha
event_reservation.Commit();
// At end of scope, kill the standing references to the sub events.
- SCOPE_EXIT({
+ SCOPE_EXIT {
event->GetReadableEvent().Close();
event->Close();
- });
+ };
// Register the event.
KEvent::Register(system.Kernel(), event);
diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp
index 737749f7d..9a22dadaf 100644
--- a/src/core/hle/kernel/svc/svc_port.cpp
+++ b/src/core/hle/kernel/svc/svc_port.cpp
@@ -68,10 +68,10 @@ Result CreatePort(Core::System& system, Handle* out_server, Handle* out_client,
port->Initialize(max_sessions, is_light, name);
// Ensure that we clean up the port (and its only references are handle table) on function end.
- SCOPE_EXIT({
+ SCOPE_EXIT {
port->GetServerPort().Close();
port->GetClientPort().Close();
- });
+ };
// Register the port.
KPort::Register(kernel, port);
@@ -150,10 +150,10 @@ Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t
KPort::Register(system.Kernel(), port);
// Ensure that our only reference to the port is in the handle table when we're done.
- SCOPE_EXIT({
+ SCOPE_EXIT {
port->GetClientPort().Close();
port->GetServerPort().Close();
- });
+ };
// Register the handle in the table.
R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort())));
diff --git a/src/core/hle/kernel/svc/svc_resource_limit.cpp b/src/core/hle/kernel/svc/svc_resource_limit.cpp
index c8e820b6a..6f3972482 100644
--- a/src/core/hle/kernel/svc/svc_resource_limit.cpp
+++ b/src/core/hle/kernel/svc/svc_resource_limit.cpp
@@ -18,7 +18,9 @@ Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
// Ensure we don't leak a reference to the limit.
- SCOPE_EXIT({ resource_limit->Close(); });
+ SCOPE_EXIT {
+ resource_limit->Close();
+ };
// Initialize the resource limit.
resource_limit->Initialize();
diff --git a/src/core/hle/kernel/svc/svc_session.cpp b/src/core/hle/kernel/svc/svc_session.cpp
index 2f5905f32..b034d21d1 100644
--- a/src/core/hle/kernel/svc/svc_session.cpp
+++ b/src/core/hle/kernel/svc/svc_session.cpp
@@ -69,10 +69,10 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien
// Ensure that we clean up the session (and its only references are handle table) on function
// end.
- SCOPE_EXIT({
+ SCOPE_EXIT {
session->GetClientSession().Close();
session->GetServerSession().Close();
- });
+ };
// Register the session.
T::Register(system.Kernel(), session);
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 6c79cfd8d..fb03908d7 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -78,11 +78,11 @@ Result WaitSynchronization(Core::System& system, int32_t* out_index, u64 user_ha
}
// Ensure handles are closed when we're done.
- SCOPE_EXIT({
+ SCOPE_EXIT {
for (auto i = 0; i < num_handles; ++i) {
objs[i]->Close();
}
- });
+ };
// Convert the timeout from nanoseconds to ticks.
s64 timeout;
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 7681afa33..7517bb9d3 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -51,7 +51,9 @@ Result CreateThread(Core::System& system, Handle* out_handle, u64 entry_point, u
// Create the thread.
KThread* thread = KThread::Create(kernel);
R_UNLESS(thread != nullptr, ResultOutOfResource)
- SCOPE_EXIT({ thread->Close(); });
+ SCOPE_EXIT {
+ thread->Close();
+ };
// Initialize the thread.
{
diff --git a/src/core/hle/kernel/svc/svc_transfer_memory.cpp b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
index 671bca23f..2ea0d4421 100644
--- a/src/core/hle/kernel/svc/svc_transfer_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
@@ -52,7 +52,9 @@ Result CreateTransferMemory(Core::System& system, Handle* out, u64 address, u64
R_UNLESS(trmem != nullptr, ResultOutOfResource);
// Ensure the only reference is in the handle table when we're done.
- SCOPE_EXIT({ trmem->Close(); });
+ SCOPE_EXIT {
+ trmem->Close();
+ };
// Ensure that the region is in range.
R_UNLESS(process.GetPageTable().Contains(address, size), ResultInvalidCurrentMemory);