summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/core_timing.cpp6
-rw-r--r--src/core/hle/kernel/scheduler.cpp21
-rw-r--r--src/core/hle/service/am/am.cpp20
-rw-r--r--src/core/hle/service/am/am.h1
4 files changed, 32 insertions, 16 deletions
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 5c83c41a4..a63e60461 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -172,7 +172,7 @@ void CoreTiming::ClearPendingEvents() {
}
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
- basic_lock.lock();
+ std::scoped_lock lock{basic_lock};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type.lock().get() == event_type.get();
@@ -183,12 +183,10 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
event_queue.erase(itr, event_queue.end());
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
}
- basic_lock.unlock();
}
std::optional<s64> CoreTiming::Advance() {
- std::scoped_lock advance_scope{advance_lock};
- std::scoped_lock basic_scope{basic_lock};
+ std::scoped_lock lock{advance_lock, basic_lock};
global_timer = GetGlobalTimeNs().count();
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 2b12c0dbf..7b929781c 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -6,6 +6,7 @@
// licensed under GPLv2 or later under exception provided by the author.
#include <algorithm>
+#include <mutex>
#include <set>
#include <unordered_set>
#include <utility>
@@ -31,22 +32,20 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
GlobalScheduler::~GlobalScheduler() = default;
void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
- global_list_guard.lock();
+ std::scoped_lock lock{global_list_guard};
thread_list.push_back(std::move(thread));
- global_list_guard.unlock();
}
void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
- global_list_guard.lock();
+ std::scoped_lock lock{global_list_guard};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
- global_list_guard.unlock();
}
u32 GlobalScheduler::SelectThreads() {
ASSERT(is_locked);
const auto update_thread = [](Thread* thread, Scheduler& sched) {
- sched.guard.lock();
+ std::scoped_lock lock{sched.guard};
if (thread != sched.selected_thread_set.get()) {
if (thread == nullptr) {
++sched.idle_selection_count;
@@ -57,7 +56,6 @@ u32 GlobalScheduler::SelectThreads() {
sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
sched.is_context_switch_pending = reschedule_pending;
std::atomic_thread_fence(std::memory_order_seq_cst);
- sched.guard.unlock();
return reschedule_pending;
};
if (!is_reselection_pending.load()) {
@@ -757,11 +755,12 @@ void Scheduler::OnSwitch(void* this_scheduler) {
void Scheduler::SwitchToCurrent() {
while (true) {
- guard.lock();
- selected_thread = selected_thread_set;
- current_thread = selected_thread;
- is_context_switch_pending = false;
- guard.unlock();
+ {
+ std::scoped_lock lock{guard};
+ selected_thread = selected_thread_set;
+ current_thread = selected_thread;
+ is_context_switch_pending = false;
+ }
while (!is_context_switch_pending) {
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
current_thread->context_guard.lock();
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 24cfb370b..c688d6d98 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -272,7 +272,7 @@ ISelfController::ISelfController(Core::System& system,
{41, nullptr, "IsSystemBufferSharingEnabled"},
{42, nullptr, "GetSystemSharedLayerHandle"},
{43, nullptr, "GetSystemSharedBufferHandle"},
- {44, nullptr, "CreateManagedDisplaySeparableLayer"},
+ {44, &ISelfController::CreateManagedDisplaySeparableLayer, "CreateManagedDisplaySeparableLayer"},
{45, nullptr, "SetManagedDisplayLayerSeparationMode"},
{50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"},
{51, nullptr, "ApproveToDisplay"},
@@ -462,6 +462,24 @@ void ISelfController::CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx)
rb.Push(*layer_id);
}
+void ISelfController::CreateManagedDisplaySeparableLayer(Kernel::HLERequestContext& ctx) {
+ LOG_WARNING(Service_AM, "(STUBBED) called");
+
+ // TODO(Subv): Find out how AM determines the display to use, for now just
+ // create the layer in the Default display.
+ // This calls nn::vi::CreateRecordingLayer() which creates another layer.
+ // Currently we do not support more than 1 layer per display, output 1 layer id for now.
+ // Outputting 1 layer id instead of the expected 2 has not been observed to cause any adverse
+ // side effects.
+ // TODO: Support multiple layers
+ const auto display_id = nvflinger->OpenDisplay("Default");
+ const auto layer_id = nvflinger->CreateLayer(*display_id);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(RESULT_SUCCESS);
+ rb.Push(*layer_id);
+}
+
void ISelfController::SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 2f69466ec..6cfb11b48 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -140,6 +140,7 @@ private:
void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx);
void SetAlbumImageOrientation(Kernel::HLERequestContext& ctx);
void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx);
+ void CreateManagedDisplaySeparableLayer(Kernel::HLERequestContext& ctx);
void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx);
void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);