summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/core/arm/arm_interface.h3
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp21
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h7
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp21
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h7
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp1
-rw-r--r--src/core/hle/kernel/k_slab_heap.h199
-rw-r--r--src/core/hle/kernel/k_thread.cpp10
-rw-r--r--src/core/hle/kernel/kernel.cpp5
-rw-r--r--src/core/hle/service/hid/hid.cpp11
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h4
11 files changed, 251 insertions, 38 deletions
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index 77094b48f..689e3ceb5 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -65,9 +65,6 @@ public:
/// Step CPU by one instruction
virtual void Step() = 0;
- /// Exits execution from a callback, the callback must rewind the stack
- virtual void ExceptionalExit() = 0;
-
/// Clear all instruction cache
virtual void ClearInstructionCache() = 0;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index e401fa825..50dc82382 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -78,7 +78,9 @@ public:
}
void CallSVC(u32 swi) override {
- Kernel::Svc::Call(parent.system, swi);
+ parent.svc_called = true;
+ parent.svc_swi = swi;
+ parent.jit->HaltExecution();
}
void AddTicks(u64 ticks) override {
@@ -187,11 +189,17 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
}
void ARM_Dynarmic_32::Run() {
- jit->Run();
-}
-
-void ARM_Dynarmic_32::ExceptionalExit() {
- jit->ExceptionalExit();
+ while (true) {
+ jit->Run();
+ if (!svc_called) {
+ break;
+ }
+ svc_called = false;
+ Kernel::Svc::Call(system, svc_swi);
+ if (shutdown) {
+ break;
+ }
+ }
}
void ARM_Dynarmic_32::Step() {
@@ -275,6 +283,7 @@ void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
void ARM_Dynarmic_32::PrepareReschedule() {
jit->HaltExecution();
+ shutdown = true;
}
void ARM_Dynarmic_32::ClearInstructionCache() {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index b882b0c59..fa6f4f430 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -42,7 +42,6 @@ public:
u32 GetPSTATE() const override;
void SetPSTATE(u32 pstate) override;
void Run() override;
- void ExceptionalExit() override;
void Step() override;
VAddr GetTlsAddress() const override;
void SetTlsAddress(VAddr address) override;
@@ -82,6 +81,12 @@ private:
std::size_t core_index;
DynarmicExclusiveMonitor& exclusive_monitor;
std::shared_ptr<Dynarmic::A32::Jit> jit;
+
+ // SVC callback
+ u32 svc_swi{};
+ bool svc_called{};
+
+ bool shutdown{};
};
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 157051d69..4f5a58b38 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -102,7 +102,9 @@ public:
}
void CallSVC(u32 swi) override {
- Kernel::Svc::Call(parent.system, swi);
+ parent.svc_called = true;
+ parent.svc_swi = swi;
+ parent.jit->HaltExecution();
}
void AddTicks(u64 ticks) override {
@@ -227,11 +229,17 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
}
void ARM_Dynarmic_64::Run() {
- jit->Run();
-}
-
-void ARM_Dynarmic_64::ExceptionalExit() {
- jit->ExceptionalExit();
+ while (true) {
+ jit->Run();
+ if (!svc_called) {
+ break;
+ }
+ svc_called = false;
+ Kernel::Svc::Call(system, svc_swi);
+ if (shutdown) {
+ break;
+ }
+ }
}
void ARM_Dynarmic_64::Step() {
@@ -320,6 +328,7 @@ void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
void ARM_Dynarmic_64::PrepareReschedule() {
jit->HaltExecution();
+ shutdown = true;
}
void ARM_Dynarmic_64::ClearInstructionCache() {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index 92f715f19..5214a8147 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -40,7 +40,6 @@ public:
void SetPSTATE(u32 pstate) override;
void Run() override;
void Step() override;
- void ExceptionalExit() override;
VAddr GetTlsAddress() const override;
void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override;
@@ -75,6 +74,12 @@ private:
DynarmicExclusiveMonitor& exclusive_monitor;
std::shared_ptr<Dynarmic::A64::Jit> jit;
+
+ // SVC callback
+ u32 svc_swi{};
+ bool svc_called{};
+
+ bool shutdown{};
};
} // namespace Core
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 2f82fbcd6..6a7d80d03 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -659,7 +659,6 @@ void KScheduler::Unload(KThread* thread) {
if (thread) {
if (thread->IsCallingSvc()) {
- system.ArmInterface(core_id).ExceptionalExit();
thread->ClearIsCallingSvc();
}
if (!thread->IsTerminationRequested()) {
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 81d472a3e..0ad74b0a0 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -4,34 +4,213 @@
#pragma once
+#include <atomic>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+
namespace Kernel {
class KernelCore;
-/// This is a placeholder class to manage slab heaps for kernel objects. For now, we just allocate
-/// these with new/delete, but this can be re-implemented later to allocate these in emulated
-/// memory.
+namespace impl {
+
+class KSlabHeapImpl final : NonCopyable {
+public:
+ struct Node {
+ Node* next{};
+ };
+
+ constexpr KSlabHeapImpl() = default;
+
+ void Initialize(std::size_t size) {
+ ASSERT(head == nullptr);
+ obj_size = size;
+ }
+
+ constexpr std::size_t GetObjectSize() const {
+ return obj_size;
+ }
+
+ Node* GetHead() const {
+ return head;
+ }
+
+ void* Allocate() {
+ Node* ret = head.load();
+
+ do {
+ if (ret == nullptr) {
+ break;
+ }
+ } while (!head.compare_exchange_weak(ret, ret->next));
+
+ return ret;
+ }
+
+ void Free(void* obj) {
+ Node* node = static_cast<Node*>(obj);
+
+ Node* cur_head = head.load();
+ do {
+ node->next = cur_head;
+ } while (!head.compare_exchange_weak(cur_head, node));
+ }
+
+private:
+ std::atomic<Node*> head{};
+ std::size_t obj_size{};
+};
+
+} // namespace impl
+
+class KSlabHeapBase : NonCopyable {
+public:
+ constexpr KSlabHeapBase() = default;
+
+ constexpr bool Contains(uintptr_t addr) const {
+ return start <= addr && addr < end;
+ }
+
+ constexpr std::size_t GetSlabHeapSize() const {
+ return (end - start) / GetObjectSize();
+ }
+
+ constexpr std::size_t GetObjectSize() const {
+ return impl.GetObjectSize();
+ }
+
+ constexpr uintptr_t GetSlabHeapAddress() const {
+ return start;
+ }
+
+ std::size_t GetObjectIndexImpl(const void* obj) const {
+ return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
+ }
+
+ std::size_t GetPeakIndex() const {
+ return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
+ }
+
+ void* AllocateImpl() {
+ return impl.Allocate();
+ }
+
+ void FreeImpl(void* obj) {
+ // Don't allow freeing an object that wasn't allocated from this heap
+ ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
+
+ impl.Free(obj);
+ }
+
+ void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
+ // Ensure we don't initialize a slab using null memory
+ ASSERT(memory != nullptr);
+
+ // Initialize the base allocator
+ impl.Initialize(obj_size);
+
+ // Set our tracking variables
+ const std::size_t num_obj = (memory_size / obj_size);
+ start = reinterpret_cast<uintptr_t>(memory);
+ end = start + num_obj * obj_size;
+ peak = start;
+
+ // Free the objects
+ u8* cur = reinterpret_cast<u8*>(end);
+
+ for (std::size_t i{}; i < num_obj; i++) {
+ cur -= obj_size;
+ impl.Free(cur);
+ }
+ }
+
+private:
+ using Impl = impl::KSlabHeapImpl;
+
+ Impl impl;
+ uintptr_t peak{};
+ uintptr_t start{};
+ uintptr_t end{};
+};
template <typename T>
-class KSlabHeap final : NonCopyable {
+class KSlabHeap final : public KSlabHeapBase {
public:
- KSlabHeap() = default;
+ enum class AllocationType {
+ Host,
+ Guest,
+ };
- void Initialize([[maybe_unused]] void* memory, [[maybe_unused]] std::size_t memory_size) {
- // Placeholder that should initialize the backing slab heap implementation.
+ explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host)
+ : KSlabHeapBase(), allocation_type{allocation_type_} {}
+
+ void Initialize(void* memory, std::size_t memory_size) {
+ if (allocation_type == AllocationType::Guest) {
+ InitializeImpl(sizeof(T), memory, memory_size);
+ }
}
T* Allocate() {
- return new T();
+ switch (allocation_type) {
+ case AllocationType::Host:
+ // Fallback for cases where we do not yet support allocating guest memory from the slab
+ // heap, such as for kernel memory regions.
+ return new T;
+
+ case AllocationType::Guest:
+ T* obj = static_cast<T*>(AllocateImpl());
+ if (obj != nullptr) {
+ new (obj) T();
+ }
+ return obj;
+ }
+
+ UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
+ return nullptr;
}
T* AllocateWithKernel(KernelCore& kernel) {
- return new T(kernel);
+ switch (allocation_type) {
+ case AllocationType::Host:
+ // Fallback for cases where we do not yet support allocating guest memory from the slab
+ // heap, such as for kernel memory regions.
+ return new T(kernel);
+
+ case AllocationType::Guest:
+ T* obj = static_cast<T*>(AllocateImpl());
+ if (obj != nullptr) {
+ new (obj) T(kernel);
+ }
+ return obj;
+ }
+
+ UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
+ return nullptr;
}
void Free(T* obj) {
- delete obj;
+ switch (allocation_type) {
+ case AllocationType::Host:
+ // Fallback for cases where we do not yet support allocating guest memory from the slab
+ // heap, such as for kernel memory regions.
+ delete obj;
+ return;
+
+ case AllocationType::Guest:
+ FreeImpl(obj);
+ return;
+ }
+
+ UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
}
+
+ constexpr std::size_t GetObjectIndex(const T* obj) const {
+ return GetObjectIndexImpl(obj);
+ }
+
+private:
+ const AllocationType allocation_type;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index e3f08f256..3cf43d290 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -168,13 +168,13 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
sizeof(StackParameters));
- // Setup the TLS, if needed.
- if (type == ThreadType::User) {
- tls_address = owner->CreateTLSRegion();
- }
-
// Set parent, if relevant.
if (owner != nullptr) {
+ // Setup the TLS, if needed.
+ if (type == ThreadType::User) {
+ tls_address = owner->CreateTLSRegion();
+ }
+
parent = owner;
parent->Open();
parent->IncrementThreadCount();
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 8b55df82e..0ffb78d51 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -258,7 +258,7 @@ struct KernelCore::Impl {
KAutoObject::Create(thread.get());
ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
- return std::move(thread);
+ return thread;
};
thread_local auto thread = make_thread();
@@ -620,7 +620,8 @@ struct KernelCore::Impl {
void InitializePageSlab() {
// Allocate slab heaps
- user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>();
+ user_slab_heap_pages =
+ std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest);
// TODO(ameerj): This should be derived, not hardcoded within the kernel
constexpr u64 user_slab_heap_size{0x3de000};
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 49c17fd14..df0fe1c8e 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -1770,7 +1770,7 @@ public:
{232, nullptr, "GetIrSensorState"},
{233, nullptr, "GetXcdHandleForNpadWithIrSensor"},
{301, nullptr, "ActivateNpadSystem"},
- {303, nullptr, "ApplyNpadSystemCommonPolicy"},
+ {303, &HidSys::ApplyNpadSystemCommonPolicy, "ApplyNpadSystemCommonPolicy"},
{304, nullptr, "EnableAssigningSingleOnSlSrPress"},
{305, nullptr, "DisableAssigningSingleOnSlSrPress"},
{306, nullptr, "GetLastActiveNpad"},
@@ -1949,6 +1949,15 @@ public:
RegisterHandlers(functions);
}
+
+private:
+ void ApplyNpadSystemCommonPolicy(Kernel::HLERequestContext& ctx) {
+ // We already do this for homebrew so we can just stub it out
+ LOG_WARNING(Service_HID, "called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+ }
};
class HidTmp final : public ServiceFramework<HidTmp> {
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index de971041f..9e6b87960 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -596,7 +596,7 @@ void BufferCache<P>::PopAsyncFlushes() {
runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies);
}
runtime.Finish();
- for (const auto [copy, buffer_id] : downloads) {
+ for (const auto& [copy, buffer_id] : downloads) {
const Buffer& buffer = slot_buffers[buffer_id];
const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
// Undo the modified offset
@@ -606,7 +606,7 @@ void BufferCache<P>::PopAsyncFlushes() {
}
} else {
const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
- for (const auto [copy, buffer_id] : downloads) {
+ for (const auto& [copy, buffer_id] : downloads) {
Buffer& buffer = slot_buffers[buffer_id];
buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size));
const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;