summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp25
-rw-r--r--src/core/hle/kernel/errors.h2
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/handle_table.h2
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp27
-rw-r--r--src/core/hle/kernel/hle_ipc.h20
-rw-r--r--src/core/hle/kernel/process.cpp6
-rw-r--r--src/core/hle/kernel/process.h4
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp120
-rw-r--r--src/core/hle/kernel/thread.cpp2
-rw-r--r--src/core/hle/kernel/thread.h14
-rw-r--r--src/core/hle/kernel/vm_manager.cpp2
-rw-r--r--src/core/hle/kernel/vm_manager.h4
-rw-r--r--src/core/hle/kernel/wait_object.cpp2
15 files changed, 146 insertions, 88 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 6657accd5..93577591f 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -35,16 +35,17 @@ static ResultCode WaitForAddress(VAddr address, s64 timeout) {
// Gets the threads waiting on an address.
static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) {
- const auto RetrieveWaitingThreads =
- [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr arb_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- auto& thread_list = scheduler->GetThreadList();
-
- for (auto& thread : thread_list) {
- if (thread->arb_wait_address == arb_addr)
- waiting_threads.push_back(thread);
- }
- };
+ const auto RetrieveWaitingThreads = [](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr arb_addr) {
+ const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
+ auto& thread_list = scheduler->GetThreadList();
+
+ for (auto& thread : thread_list) {
+ if (thread->arb_wait_address == arb_addr)
+ waiting_threads.push_back(thread);
+ }
+ };
// Retrieve all threads that are waiting for this address.
std::vector<SharedPtr<Thread>> threads;
@@ -66,12 +67,12 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
// them all.
- size_t last = waiting_threads.size();
+ std::size_t last = waiting_threads.size();
if (num_to_wake > 0)
last = num_to_wake;
// Signal the waiting threads.
- for (size_t i = 0; i < last; i++) {
+ for (std::size_t i = 0; i < last; i++) {
ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb);
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
waiting_threads[i]->arb_wait_address = 0;
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index 6c9fc9bfe..8c2be2681 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -17,6 +17,7 @@ enum {
// Confirmed Switch OS error codes
MaxConnectionsReached = 7,
+ InvalidSize = 101,
InvalidAddress = 102,
HandleTableFull = 105,
InvalidMemoryState = 106,
@@ -56,6 +57,7 @@ constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS(ErrorModule::Kernel,
ErrCodes::InvalidMemoryPermissions);
constexpr ResultCode ERR_INVALID_HANDLE(ErrorModule::Kernel, ErrCodes::InvalidHandle);
constexpr ResultCode ERR_INVALID_PROCESSOR_ID(ErrorModule::Kernel, ErrCodes::InvalidProcessorId);
+constexpr ResultCode ERR_INVALID_SIZE(ErrorModule::Kernel, ErrCodes::InvalidSize);
constexpr ResultCode ERR_INVALID_STATE(ErrorModule::Kernel, ErrCodes::InvalidState);
constexpr ResultCode ERR_INVALID_THREAD_PRIORITY(ErrorModule::Kernel,
ErrCodes::InvalidThreadPriority);
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 3a079b9a9..5ee5c05e3 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -65,7 +65,7 @@ ResultCode HandleTable::Close(Handle handle) {
}
bool HandleTable::IsValid(Handle handle) const {
- size_t slot = GetSlot(handle);
+ std::size_t slot = GetSlot(handle);
u16 generation = GetGeneration(handle);
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h
index cac928adb..9e2f33e8a 100644
--- a/src/core/hle/kernel/handle_table.h
+++ b/src/core/hle/kernel/handle_table.h
@@ -93,7 +93,7 @@ private:
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
* reduced by ExHeader values, but this is not emulated here.
*/
- static const size_t MAX_COUNT = 4096;
+ static const std::size_t MAX_COUNT = 4096;
static u16 GetSlot(Handle handle) {
return handle >> 15;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 7264be906..72fb9d250 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -42,9 +42,9 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
Kernel::SharedPtr<Kernel::Event> event) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
- thread->wakeup_callback =
- [context = *this, callback](ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index) mutable -> bool {
+ thread->wakeup_callback = [context = *this, callback](
+ ThreadWakeupReason reason, SharedPtr<Thread> thread,
+ SharedPtr<WaitObject> object, std::size_t index) mutable -> bool {
ASSERT(thread->status == ThreadStatus::WaitHLEEvent);
callback(thread, context, reason);
context.WriteToOutgoingCommandBuffer(*thread);
@@ -199,8 +199,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb
}
// The data_size already includes the payload header, the padding and the domain header.
- size_t size = data_payload_offset + command_header->data_size -
- sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
+ std::size_t size = data_payload_offset + command_header->data_size -
+ sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
std::copy_n(src_cmdbuf, size, cmd_buf.begin());
@@ -217,8 +217,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
ParseCommandBuffer(cmd_buf.data(), false);
// The data_size already includes the payload header, the padding and the domain header.
- size_t size = data_payload_offset + command_header->data_size -
- sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
+ std::size_t size = data_payload_offset + command_header->data_size -
+ sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
@@ -229,7 +229,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
"Handle descriptor bit set but no handles to translate");
// We write the translated handles at a specific offset in the command buffer, this space
// was already reserved when writing the header.
- size_t current_offset =
+ std::size_t current_offset =
(sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32);
ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented");
@@ -258,7 +258,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
ASSERT(domain_message_header->num_objects == domain_objects.size());
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
- size_t domain_offset = size - domain_message_header->num_objects;
+ std::size_t domain_offset = size - domain_message_header->num_objects;
auto& request_handlers = server_session->domain_request_handlers;
for (auto& object : domain_objects) {
@@ -291,14 +291,15 @@ std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const {
return buffer;
}
-size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffer_index) const {
+std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
+ int buffer_index) const {
if (size == 0) {
LOG_WARNING(Core, "skip empty buffer write");
return 0;
}
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
- const size_t buffer_size{GetWriteBufferSize(buffer_index)};
+ const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
if (size > buffer_size) {
LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
buffer_size);
@@ -314,13 +315,13 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffe
return size;
}
-size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
+std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()};
return is_buffer_a ? BufferDescriptorA()[buffer_index].Size()
: BufferDescriptorX()[buffer_index].Size();
}
-size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
+std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
return is_buffer_b ? BufferDescriptorB()[buffer_index].Size()
: BufferDescriptorC()[buffer_index].Size();
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index f0d07f1b6..894479ee0 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -170,7 +170,7 @@ public:
std::vector<u8> ReadBuffer(int buffer_index = 0) const;
/// Helper function to write a buffer using the appropriate buffer descriptor
- size_t WriteBuffer(const void* buffer, size_t size, int buffer_index = 0) const;
+ std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const;
/* Helper function to write a buffer using the appropriate buffer descriptor
*
@@ -182,7 +182,7 @@ public:
*/
template <typename ContiguousContainer,
typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>>
- size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
+ std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
using ContiguousType = typename ContiguousContainer::value_type;
static_assert(std::is_trivially_copyable_v<ContiguousType>,
@@ -193,19 +193,19 @@ public:
}
/// Helper function to get the size of the input buffer
- size_t GetReadBufferSize(int buffer_index = 0) const;
+ std::size_t GetReadBufferSize(int buffer_index = 0) const;
/// Helper function to get the size of the output buffer
- size_t GetWriteBufferSize(int buffer_index = 0) const;
+ std::size_t GetWriteBufferSize(int buffer_index = 0) const;
template <typename T>
- SharedPtr<T> GetCopyObject(size_t index) {
+ SharedPtr<T> GetCopyObject(std::size_t index) {
ASSERT(index < copy_objects.size());
return DynamicObjectCast<T>(copy_objects[index]);
}
template <typename T>
- SharedPtr<T> GetMoveObject(size_t index) {
+ SharedPtr<T> GetMoveObject(std::size_t index) {
ASSERT(index < move_objects.size());
return DynamicObjectCast<T>(move_objects[index]);
}
@@ -223,7 +223,7 @@ public:
}
template <typename T>
- std::shared_ptr<T> GetDomainRequestHandler(size_t index) const {
+ std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const {
return std::static_pointer_cast<T>(domain_request_handlers[index]);
}
@@ -240,15 +240,15 @@ public:
domain_objects.clear();
}
- size_t NumMoveObjects() const {
+ std::size_t NumMoveObjects() const {
return move_objects.size();
}
- size_t NumCopyObjects() const {
+ std::size_t NumCopyObjects() const {
return copy_objects.size();
}
- size_t NumDomainObjects() const {
+ std::size_t NumDomainObjects() const {
return domain_objects.size();
}
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b025e323f..7a272d031 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -40,8 +40,8 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) {
return process;
}
-void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
- for (size_t i = 0; i < len; ++i) {
+void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
+ for (std::size_t i = 0; i < len; ++i) {
u32 descriptor = kernel_caps[i];
u32 type = descriptor >> 20;
@@ -211,7 +211,7 @@ ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
"Shared memory exceeds bounds of mapped block");
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
- size_t backing_block_offset = vma->second.offset + vma_offset;
+ std::size_t backing_block_offset = vma->second.offset + vma_offset;
CASCADE_RESULT(auto new_vma,
vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 1587d40c1..81538f70c 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -59,7 +59,7 @@ class ResourceLimit;
struct CodeSet final : public Object {
struct Segment {
- size_t offset = 0;
+ std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
};
@@ -164,7 +164,7 @@ public:
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
* to this process.
*/
- void ParseKernelCaps(const u32* kernel_caps, size_t len);
+ void ParseKernelCaps(const u32* kernel_caps, std::size_t len);
/**
* Applies address space changes and launches the process main thread.
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index 2c729afe3..2c06bb7ce 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -119,7 +119,7 @@ public:
/// Backing memory for this shared memory block.
std::shared_ptr<std::vector<u8>> backing_block;
/// Offset into the backing block for this shared memory.
- size_t backing_block_offset;
+ std::size_t backing_block_offset;
/// Size of the memory block. Page-aligned.
u64 size;
/// Permission restrictions applied to the process which created the block.
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index f500fd2e7..c5c1697ee 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -35,10 +35,21 @@
#include "core/hle/service/service.h"
namespace Kernel {
+namespace {
+constexpr bool Is4KBAligned(VAddr address) {
+ return (address & 0xFFF) == 0;
+}
+} // Anonymous namespace
/// Set the process heap to a given Size. It can both extend and shrink the heap.
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
+
+ // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 4GB.
+ if ((heap_size & 0xFFFFFFFE001FFFFF) != 0) {
+ return ERR_INVALID_SIZE;
+ }
+
auto& process = *Core::CurrentProcess();
CASCADE_RESULT(*heap_addr,
process.HeapAllocate(Memory::HEAP_VADDR, heap_size, VMAPermission::ReadWrite));
@@ -56,6 +67,15 @@ static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state
static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
+
+ if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
return Core::CurrentProcess()->MirrorMemory(dst_addr, src_addr, size);
}
@@ -63,6 +83,15 @@ static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
+
+ if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
return Core::CurrentProcess()->UnmapMemory(dst_addr, src_addr, size);
}
@@ -146,7 +175,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
/// Default thread wakeup callback for WaitSynchronization
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index) {
+ SharedPtr<WaitObject> object, std::size_t index) {
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
if (reason == ThreadWakeupReason::Timeout) {
@@ -415,35 +444,43 @@ static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 s
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
shared_memory_handle, addr, size, permissions);
+ if (!Is4KBAligned(addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
+ const auto permissions_type = static_cast<MemoryPermission>(permissions);
+ if (permissions_type != MemoryPermission::Read &&
+ permissions_type != MemoryPermission::ReadWrite) {
+ LOG_ERROR(Kernel_SVC, "Invalid permissions=0x{:08X}", permissions);
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
if (!shared_memory) {
return ERR_INVALID_HANDLE;
}
- MemoryPermission permissions_type = static_cast<MemoryPermission>(permissions);
- switch (permissions_type) {
- case MemoryPermission::Read:
- case MemoryPermission::Write:
- case MemoryPermission::ReadWrite:
- case MemoryPermission::Execute:
- case MemoryPermission::ReadExecute:
- case MemoryPermission::WriteExecute:
- case MemoryPermission::ReadWriteExecute:
- case MemoryPermission::DontCare:
- return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
- MemoryPermission::DontCare);
- default:
- LOG_ERROR(Kernel_SVC, "unknown permissions=0x{:08X}", permissions);
- }
-
- return RESULT_SUCCESS;
+ return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
+ MemoryPermission::DontCare);
}
static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) {
LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}",
shared_memory_handle, addr, size);
+ if (!Is4KBAligned(addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
@@ -524,7 +561,7 @@ static void ExitProcess() {
/// Creates a new thread
static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, VAddr stack_top,
u32 priority, s32 processor_id) {
- std::string name = fmt::format("unknown-{:X}", entry_point);
+ std::string name = fmt::format("thread-{:X}", entry_point);
if (priority > THREADPRIO_LOWEST) {
return ERR_INVALID_THREAD_PRIORITY;
@@ -647,16 +684,17 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
condition_variable_addr, target);
- auto RetrieveWaitingThreads =
- [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr condvar_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- auto& thread_list = scheduler->GetThreadList();
+ auto RetrieveWaitingThreads = [](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr condvar_addr) {
+ const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
+ auto& thread_list = scheduler->GetThreadList();
- for (auto& thread : thread_list) {
- if (thread->condvar_wait_address == condvar_addr)
- waiting_threads.push_back(thread);
- }
- };
+ for (auto& thread : thread_list) {
+ if (thread->condvar_wait_address == condvar_addr)
+ waiting_threads.push_back(thread);
+ }
+ };
// Retrieve a list of all threads that are waiting for this condition variable.
std::vector<SharedPtr<Thread>> waiting_threads;
@@ -672,7 +710,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// Only process up to 'target' threads, unless 'target' is -1, in which case process
// them all.
- size_t last = waiting_threads.size();
+ std::size_t last = waiting_threads.size();
if (target != -1)
last = target;
@@ -680,12 +718,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
if (last > waiting_threads.size())
return RESULT_SUCCESS;
- for (size_t index = 0; index < last; ++index) {
+ for (std::size_t index = 0; index < last; ++index) {
auto& thread = waiting_threads[index];
ASSERT(thread->condvar_wait_address == condition_variable_addr);
- size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
+ std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
auto& monitor = Core::System::GetInstance().Monitor();
@@ -898,12 +936,28 @@ static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permiss
LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size,
local_permissions, remote_permissions);
+ // Size must be a multiple of 4KB and be less than or equal to
+ // approx. 8 GB (actually (1GB - 512B) * 8)
+ if (size == 0 || (size & 0xFFFFFFFE00000FFF) != 0) {
+ return ERR_INVALID_SIZE;
+ }
+
+ const auto local_perms = static_cast<MemoryPermission>(local_permissions);
+ if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) {
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
+ const auto remote_perms = static_cast<MemoryPermission>(remote_permissions);
+ if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite &&
+ remote_perms != MemoryPermission::DontCare) {
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto& handle_table = kernel.HandleTable();
auto shared_mem_handle =
SharedMemory::Create(kernel, handle_table.Get<Process>(KernelHandle::CurrentProcess), size,
- static_cast<MemoryPermission>(local_permissions),
- static_cast<MemoryPermission>(remote_permissions));
+ local_perms, remote_perms);
CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
return RESULT_SUCCESS;
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 3f12a84dc..89cd5f401 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -275,7 +275,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
available_slot = 0; // Use the first slot in the new page
// Allocate some memory from the end of the linear heap for this region.
- const size_t offset = thread->tls_memory->size();
+ const std::size_t offset = thread->tls_memory->size();
thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0);
auto& vm_manager = owner_process->vm_manager;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 20f50458b..df4748942 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -15,6 +15,12 @@
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
+namespace Kernel {
+
+class KernelCore;
+class Process;
+class Scheduler;
+
enum ThreadPriority : u32 {
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
@@ -54,12 +60,6 @@ enum class ThreadWakeupReason {
Timeout // The thread was woken up due to a wait timeout.
};
-namespace Kernel {
-
-class KernelCore;
-class Process;
-class Scheduler;
-
class Thread final : public WaitObject {
public:
/**
@@ -254,7 +254,7 @@ public:
Handle callback_handle;
using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index);
+ SharedPtr<WaitObject> object, std::size_t index);
// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
// was waiting via WaitSynchronizationN then the object will be the last object that became
// available. In case of a timeout, the object will be nullptr.
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 479cacb62..608cbd57b 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -86,7 +86,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
std::shared_ptr<std::vector<u8>> block,
- size_t offset, u64 size,
+ std::size_t offset, u64 size,
MemoryState state) {
ASSERT(block != nullptr);
ASSERT(offset + size <= block->size());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 98bd04bea..de75036c0 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -81,7 +81,7 @@ struct VirtualMemoryArea {
/// Memory block backing this VMA.
std::shared_ptr<std::vector<u8>> backing_block = nullptr;
/// Offset into the backing_memory the mapping starts from.
- size_t offset = 0;
+ std::size_t offset = 0;
// Settings for type = BackingMemory
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
@@ -147,7 +147,7 @@ public:
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
- size_t offset, u64 size, MemoryState state);
+ std::size_t offset, u64 size, MemoryState state);
/**
* Maps an unmanaged host memory pointer at a given address.
diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp
index eef00b729..b190ceb98 100644
--- a/src/core/hle/kernel/wait_object.cpp
+++ b/src/core/hle/kernel/wait_object.cpp
@@ -81,7 +81,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
}
}
- size_t index = thread->GetWaitObjectIndex(this);
+ std::size_t index = thread->GetWaitObjectIndex(this);
for (auto& object : thread->wait_objects)
object->RemoveWaitingThread(thread.get());