summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/core_cpu.cpp3
-rw-r--r--src/core/core_cpu.h2
-rw-r--r--src/core/core_timing.cpp2
-rw-r--r--src/core/hle/kernel/thread.cpp11
4 files changed, 13 insertions, 5 deletions
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp
index 46a522fcd..9b306faf6 100644
--- a/src/core/core_cpu.cpp
+++ b/src/core/core_cpu.cpp
@@ -14,6 +14,7 @@
#include "core/core_timing.h"
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/lock.h"
#include "core/settings.h"
namespace Core {
@@ -125,6 +126,8 @@ void Cpu::Reschedule() {
}
reschedule_pending = false;
+ // Lock the global kernel mutex when we manipulate the HLE state
+ std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
scheduler->Reschedule();
}
diff --git a/src/core/core_cpu.h b/src/core/core_cpu.h
index 976952903..56cdae194 100644
--- a/src/core/core_cpu.h
+++ b/src/core/core_cpu.h
@@ -79,7 +79,7 @@ private:
std::shared_ptr<CpuBarrier> cpu_barrier;
std::shared_ptr<Kernel::Scheduler> scheduler;
- bool reschedule_pending{};
+ std::atomic<bool> reschedule_pending = false;
size_t core_index;
};
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index d3bb6f818..f977d1b32 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -135,11 +135,9 @@ void ClearPendingEvents() {
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
ASSERT(event_type != nullptr);
s64 timeout = GetTicks() + cycles_into_future;
-
// If this event needs to be scheduled before the next advance(), force one early
if (!is_global_timer_sane)
ForceExceptionCheck(cycles_into_future);
-
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
}
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 40918ca81..a1a7867ce 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -23,6 +23,7 @@
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/lock.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -104,6 +105,10 @@ void ExitCurrentThread() {
*/
static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
const auto proper_handle = static_cast<Handle>(thread_handle);
+
+ // Lock the global kernel mutex when we enter the kernel HLE.
+ std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
+
SharedPtr<Thread> thread = wakeup_callback_handle_table.Get<Thread>(proper_handle);
if (thread == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
@@ -155,8 +160,10 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
if (nanoseconds == -1)
return;
- CoreTiming::ScheduleEvent(CoreTiming::nsToCycles(nanoseconds), ThreadWakeupEventType,
- callback_handle);
+ // This function might be called from any thread so we have to be cautious and use the
+ // thread-safe version of ScheduleEvent.
+ CoreTiming::ScheduleEventThreadsafe(CoreTiming::nsToCycles(nanoseconds), ThreadWakeupEventType,
+ callback_handle);
}
void Thread::CancelWakeupTimer() {