summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/audio_core/in/audio_in_system.cpp6
-rw-r--r--src/audio_core/out/audio_out_system.cpp6
-rw-r--r--src/audio_core/renderer/system.cpp89
-rw-r--r--src/audio_core/renderer/system.h16
-rw-r--r--src/audio_core/renderer/voice/voice_context.cpp4
-rw-r--r--src/audio_core/sink/cubeb_sink.cpp31
-rw-r--r--src/audio_core/sink/cubeb_sink.h7
-rw-r--r--src/audio_core/sink/sdl2_sink.cpp14
-rw-r--r--src/audio_core/sink/sdl2_sink.h7
-rw-r--r--src/audio_core/sink/sink_details.cpp66
-rw-r--r--src/audio_core/sink/sink_details.h2
-rw-r--r--src/common/CMakeLists.txt6
-rw-r--r--src/common/concepts.h8
-rw-r--r--src/common/fixed_point.h274
-rw-r--r--src/common/settings.h2
-rw-r--r--src/core/CMakeLists.txt7
-rw-r--r--src/core/arm/arm_interface.cpp8
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp1
-rw-r--r--src/core/core.cpp92
-rw-r--r--src/core/core.h10
-rw-r--r--src/core/core_timing.cpp38
-rw-r--r--src/core/core_timing.h14
-rw-r--r--src/core/device_memory.h10
-rw-r--r--src/core/file_sys/savedata_factory.cpp58
-rw-r--r--src/core/file_sys/savedata_factory.h4
-rw-r--r--src/core/frontend/framebuffer_layout.cpp2
-rw-r--r--src/core/frontend/framebuffer_layout.h1
-rw-r--r--src/core/hid/irs_types.h20
-rw-r--r--src/core/hle/ipc_helpers.h14
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp110
-rw-r--r--src/core/hle/kernel/hle_ipc.h11
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp8
-rw-r--r--src/core/hle/kernel/k_class_token.cpp12
-rw-r--r--src/core/hle/kernel/k_class_token.h1
-rw-r--r--src/core/hle/kernel/k_client_session.cpp18
-rw-r--r--src/core/hle/kernel/k_client_session.h3
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp2
-rw-r--r--src/core/hle/kernel/k_dynamic_page_manager.h136
-rw-r--r--src/core/hle/kernel/k_dynamic_resource_manager.h58
-rw-r--r--src/core/hle/kernel/k_dynamic_slab_heap.h122
-rw-r--r--src/core/hle/kernel/k_event.cpp44
-rw-r--r--src/core/hle/kernel/k_event.h31
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp29
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.h4
-rw-r--r--src/core/hle/kernel/k_linked_list.h1
-rw-r--r--src/core/hle/kernel/k_memory_block.h506
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.cpp409
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h145
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_page_buffer.cpp2
-rw-r--r--src/core/hle/kernel/k_page_buffer.h1
-rw-r--r--src/core/hle/kernel/k_page_table.cpp1302
-rw-r--r--src/core/hle/kernel/k_page_table.h319
-rw-r--r--src/core/hle/kernel/k_process.cpp112
-rw-r--r--src/core/hle/kernel/k_process.h83
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp33
-rw-r--r--src/core/hle/kernel/k_readable_event.h17
-rw-r--r--src/core/hle/kernel/k_server_session.cpp343
-rw-r--r--src/core/hle/kernel/k_server_session.h62
-rw-r--r--src/core/hle/kernel/k_session_request.cpp61
-rw-r--r--src/core/hle/kernel/k_session_request.h306
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp2
-rw-r--r--src/core/hle/kernel/k_shared_memory.h4
-rw-r--r--src/core/hle/kernel/k_shared_memory_info.h3
-rw-r--r--src/core/hle/kernel/k_slab_heap.h27
-rw-r--r--src/core/hle/kernel/k_thread.cpp115
-rw-r--r--src/core/hle/kernel/k_thread.h4
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h2
-rw-r--r--src/core/hle/kernel/k_writable_event.cpp35
-rw-r--r--src/core/hle/kernel/k_writable_event.h39
-rw-r--r--src/core/hle/kernel/kernel.cpp68
-rw-r--r--src/core/hle/kernel/kernel.h18
-rw-r--r--src/core/hle/kernel/slab_helpers.h2
-rw-r--r--src/core/hle/kernel/svc.cpp201
-rw-r--r--src/core/hle/kernel/svc_common.h7
-rw-r--r--src/core/hle/kernel/svc_types.h13
-rw-r--r--src/core/hle/kernel/svc_wrap.h32
-rw-r--r--src/core/hle/result.h130
-rw-r--r--src/core/hle/service/acc/async_context.cpp2
-rw-r--r--src/core/hle/service/am/am.cpp12
-rw-r--r--src/core/hle/service/am/applets/applets.cpp10
-rw-r--r--src/core/hle/service/audio/audctl.cpp16
-rw-r--r--src/core/hle/service/audio/audren_u.cpp30
-rw-r--r--src/core/hle/service/bcat/backend/backend.cpp2
-rw-r--r--src/core/hle/service/friend/friend.cpp13
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp10
-rw-r--r--src/core/hle/service/hid/controllers/palma.cpp16
-rw-r--r--src/core/hle/service/hid/hid.cpp2
-rw-r--r--src/core/hle/service/hid/hidbus/ringcon.cpp8
-rw-r--r--src/core/hle/service/hid/irsensor/pointing_processor.h4
-rw-r--r--src/core/hle/service/kernel_helpers.cpp5
-rw-r--r--src/core/hle/service/ldn/ldn.cpp2
-rw-r--r--src/core/hle/service/ldr/ldr.cpp4
-rw-r--r--src/core/hle/service/nfp/nfp_device.cpp6
-rw-r--r--src/core/hle/service/nim/nim.cpp4
-rw-r--r--src/core/hle/service/ns/ns.cpp30
-rw-r--r--src/core/hle/service/ns/ns.h3
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp5
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp3
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp1
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.h4
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.cpp9
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.h1
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h1
-rw-r--r--src/core/hle/service/ptm/psm.cpp6
-rw-r--r--src/core/hle/service/ptm/ts.cpp15
-rw-r--r--src/core/hle/service/ptm/ts.h1
-rw-r--r--src/core/hle/service/set/set_sys.cpp79
-rw-r--r--src/core/hle/service/set/set_sys.h2
-rw-r--r--src/core/hle/service/sm/sm.cpp3
-rw-r--r--src/core/hle/service/sm/sm_controller.cpp5
-rw-r--r--src/core/hle/service/time/system_clock_context_update_callback.cpp10
-rw-r--r--src/core/hle/service/time/system_clock_context_update_callback.h6
-rw-r--r--src/core/hle/service/vi/display/vi_display.cpp3
-rw-r--r--src/core/hle/service/vi/vi_results.h2
-rw-r--r--src/core/memory.cpp6
-rw-r--r--src/tests/core/core_timing.cpp3
-rw-r--r--src/video_core/CMakeLists.txt1
-rw-r--r--src/video_core/dirty_flags.cpp2
-rw-r--r--src/video_core/engines/maxwell_3d.cpp8
-rw-r--r--src/video_core/engines/maxwell_3d.h26
-rw-r--r--src/video_core/engines/maxwell_dma.cpp127
-rw-r--r--src/video_core/engines/maxwell_dma.h2
-rw-r--r--src/video_core/host1x/syncpoint_manager.cpp12
-rw-r--r--src/video_core/memory_manager.cpp61
-rw-r--r--src/video_core/memory_manager.h21
-rw-r--r--src/video_core/pte_kind.h264
-rw-r--r--src/video_core/renderer_base.cpp8
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp17
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp4
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.cpp14
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp10
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp14
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp84
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp12
-rw-r--r--src/video_core/texture_cache/descriptor_table.h2
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp2
-rw-r--r--src/video_core/texture_cache/texture_cache.h8
-rw-r--r--src/yuzu/applets/qt_controller.ui2
-rw-r--r--src/yuzu/bootmanager.cpp4
-rw-r--r--src/yuzu/configuration/configure_audio.cpp4
-rw-r--r--src/yuzu/configuration/configure_graphics.ui5
-rw-r--r--src/yuzu/main.cpp13
-rw-r--r--src/yuzu_cmd/yuzu.cpp4
147 files changed, 4789 insertions, 2120 deletions
diff --git a/src/audio_core/in/audio_in_system.cpp b/src/audio_core/in/audio_in_system.cpp
index e7f918a47..6b7e6715c 100644
--- a/src/audio_core/in/audio_in_system.cpp
+++ b/src/audio_core/in/audio_in_system.cpp
@@ -23,7 +23,7 @@ System::~System() {
void System::Finalize() {
Stop();
session->Finalize();
- buffer_event->GetWritableEvent().Signal();
+ buffer_event->Signal();
}
void System::StartSession() {
@@ -142,7 +142,7 @@ void System::ReleaseBuffers() {
if (signal) {
// Signal if any buffer was released, or if none are registered, we need more.
- buffer_event->GetWritableEvent().Signal();
+ buffer_event->Signal();
}
}
@@ -159,7 +159,7 @@ bool System::FlushAudioInBuffers() {
buffers.FlushBuffers(buffers_released);
if (buffers_released > 0) {
- buffer_event->GetWritableEvent().Signal();
+ buffer_event->Signal();
}
return true;
}
diff --git a/src/audio_core/out/audio_out_system.cpp b/src/audio_core/out/audio_out_system.cpp
index 8b907590a..48a801923 100644
--- a/src/audio_core/out/audio_out_system.cpp
+++ b/src/audio_core/out/audio_out_system.cpp
@@ -24,7 +24,7 @@ System::~System() {
void System::Finalize() {
Stop();
session->Finalize();
- buffer_event->GetWritableEvent().Signal();
+ buffer_event->Signal();
}
std::string_view System::GetDefaultOutputDeviceName() const {
@@ -141,7 +141,7 @@ void System::ReleaseBuffers() {
bool signal{buffers.ReleaseBuffers(system.CoreTiming(), *session)};
if (signal) {
// Signal if any buffer was released, or if none are registered, we need more.
- buffer_event->GetWritableEvent().Signal();
+ buffer_event->Signal();
}
}
@@ -158,7 +158,7 @@ bool System::FlushAudioOutBuffers() {
buffers.FlushBuffers(buffers_released);
if (buffers_released > 0) {
- buffer_event->GetWritableEvent().Signal();
+ buffer_event->Signal();
}
return true;
}
diff --git a/src/audio_core/renderer/system.cpp b/src/audio_core/renderer/system.cpp
index 7a217969e..4fac30c7c 100644
--- a/src/audio_core/renderer/system.cpp
+++ b/src/audio_core/renderer/system.cpp
@@ -98,9 +98,8 @@ System::System(Core::System& core_, Kernel::KEvent* adsp_rendered_event_)
: core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {}
Result System::Initialize(const AudioRendererParameterInternal& params,
- Kernel::KTransferMemory* transfer_memory, const u64 transfer_memory_size,
- const u32 process_handle_, const u64 applet_resource_user_id_,
- const s32 session_id_) {
+ Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size,
+ u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) {
if (!CheckValidRevision(params.revision)) {
return Service::Audio::ERR_INVALID_REVISION;
}
@@ -354,6 +353,8 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
render_time_limit_percent = 100;
drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto;
+ drop_voice_param = 1.0f;
+ num_voices_dropped = 0;
allocator.Align(0x40);
command_workbuffer_size = allocator.GetRemainingSize();
@@ -534,7 +535,7 @@ Result System::Update(std::span<const u8> input, std::span<u8> performance, std:
return result;
}
- adsp_rendered_event->GetWritableEvent().Clear();
+ adsp_rendered_event->Clear();
num_times_updated++;
const auto end_time{core.CoreTiming().GetClockTicks()};
@@ -547,7 +548,7 @@ u32 System::GetRenderingTimeLimit() const {
return render_time_limit_percent;
}
-void System::SetRenderingTimeLimit(const u32 limit) {
+void System::SetRenderingTimeLimit(u32 limit) {
render_time_limit_percent = limit;
}
@@ -625,7 +626,7 @@ void System::SendCommandToDsp() {
reset_command_buffers = false;
command_buffer_size = command_size;
if (remaining_command_count == 0) {
- adsp_rendered_event->GetWritableEvent().Signal();
+ adsp_rendered_event->Signal();
}
} else {
adsp.ClearRemainCount(session_id);
@@ -635,7 +636,7 @@ void System::SendCommandToDsp() {
}
u64 System::GenerateCommand(std::span<u8> in_command_buffer,
- [[maybe_unused]] const u64 command_buffer_size_) {
+ [[maybe_unused]] u64 command_buffer_size_) {
PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count);
const auto start_time{core.CoreTiming().GetClockTicks()};
@@ -693,7 +694,8 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
voice_context.SortInfo();
- const auto start_estimated_time{command_buffer.estimated_process_time};
+ const auto start_estimated_time{drop_voice_param *
+ static_cast<f32>(command_buffer.estimated_process_time)};
command_generator.GenerateVoiceCommands();
command_generator.GenerateSubMixCommands();
@@ -712,11 +714,16 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported();
time_limit_percent = 70.0f;
}
+
+ const auto end_estimated_time{drop_voice_param *
+ static_cast<f32>(command_buffer.estimated_process_time)};
+ const auto estimated_time{start_estimated_time - end_estimated_time};
+
const auto time_limit{static_cast<u32>(
- static_cast<f32>(start_estimated_time - command_buffer.estimated_process_time) +
- (((time_limit_percent / 100.0f) * 2'880'000.0) *
- (static_cast<f32>(render_time_limit_percent) / 100.0f)))};
- num_voices_dropped = DropVoices(command_buffer, start_estimated_time, time_limit);
+ estimated_time + (((time_limit_percent / 100.0f) * 2'880'000.0) *
+ (static_cast<f32>(render_time_limit_percent) / 100.0f)))};
+ num_voices_dropped =
+ DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit);
}
command_list_header->buffer_size = command_buffer.size;
@@ -737,24 +744,33 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
return command_buffer.size;
}
-u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_process_time,
- const u32 time_limit) {
+f32 System::GetVoiceDropParameter() const {
+ return drop_voice_param;
+}
+
+void System::SetVoiceDropParameter(f32 voice_drop_) {
+ drop_voice_param = voice_drop_;
+}
+
+u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit) {
u32 i{0};
auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)};
- ICommand* cmd{};
+ ICommand* cmd{nullptr};
- for (; i < command_buffer.count; i++) {
+ // Find a first valid voice to drop
+ while (i < command_buffer.count) {
cmd = reinterpret_cast<ICommand*>(command_list);
- if (cmd->type != CommandId::Performance &&
- cmd->type != CommandId::DataSourcePcmInt16Version1 &&
- cmd->type != CommandId::DataSourcePcmInt16Version2 &&
- cmd->type != CommandId::DataSourcePcmFloatVersion1 &&
- cmd->type != CommandId::DataSourcePcmFloatVersion2 &&
- cmd->type != CommandId::DataSourceAdpcmVersion1 &&
- cmd->type != CommandId::DataSourceAdpcmVersion2) {
+ if (cmd->type == CommandId::Performance ||
+ cmd->type == CommandId::DataSourcePcmInt16Version1 ||
+ cmd->type == CommandId::DataSourcePcmInt16Version2 ||
+ cmd->type == CommandId::DataSourcePcmFloatVersion1 ||
+ cmd->type == CommandId::DataSourcePcmFloatVersion2 ||
+ cmd->type == CommandId::DataSourceAdpcmVersion1 ||
+ cmd->type == CommandId::DataSourceAdpcmVersion2) {
break;
}
command_list += cmd->size;
+ i++;
}
if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) {
@@ -767,6 +783,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
const auto node_id_type{cmd->node_id >> 28};
const auto node_id_base{cmd->node_id & 0xFFF};
+ // If the new estimated process time falls below the limit, we're done dropping.
if (estimated_process_time <= time_limit) {
break;
}
@@ -775,6 +792,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
break;
}
+ // Don't drop voices marked with the highest priority.
auto& voice_info{voice_context.GetInfo(node_id_base)};
if (voice_info.priority == HighestVoicePriority) {
break;
@@ -783,18 +801,23 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
voices_dropped++;
voice_info.voice_dropped = true;
- if (i < command_buffer.count) {
- while (cmd->node_id == node_id) {
- if (cmd->type == CommandId::DepopPrepare) {
- cmd->enabled = true;
- } else if (cmd->type == CommandId::Performance || !cmd->enabled) {
- cmd->enabled = false;
- }
- i++;
- command_list += cmd->size;
- cmd = reinterpret_cast<ICommand*>(command_list);
+ // First iteration should drop the voice, and then iterate through all of the commands tied
+ // to the voice. We don't need reverb on a voice which we've just removed, for example.
+ // Depops can't be removed otherwise we'll introduce audio popping, and we don't
+ // remove perf commands. Lower the estimated time for each command dropped.
+ while (i < command_buffer.count && cmd->node_id == node_id) {
+ if (cmd->type == CommandId::DepopPrepare) {
+ cmd->enabled = true;
+ } else if (cmd->enabled && cmd->type != CommandId::Performance) {
+ cmd->enabled = false;
+ estimated_process_time -= static_cast<u32>(
+ drop_voice_param * static_cast<f32>(cmd->estimated_process_time));
}
+ command_list += cmd->size;
+ cmd = reinterpret_cast<ICommand*>(command_list);
+ i++;
}
+ i++;
}
return voices_dropped;
}
diff --git a/src/audio_core/renderer/system.h b/src/audio_core/renderer/system.h
index bcbe65b07..429196e41 100644
--- a/src/audio_core/renderer/system.h
+++ b/src/audio_core/renderer/system.h
@@ -196,6 +196,20 @@ public:
*/
u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit);
+ /**
+ * Get the current voice drop parameter.
+ *
+ * @return The current voice drop.
+ */
+ f32 GetVoiceDropParameter() const;
+
+ /**
+ * Set the voice drop parameter.
+ *
+ * @param The new voice drop.
+ */
+ void SetVoiceDropParameter(f32 voice_drop);
+
private:
/// Core system
Core::System& core;
@@ -301,6 +315,8 @@ private:
u32 num_voices_dropped{};
/// Tick that rendering started
u64 render_start_tick{};
+ /// Parameter to control the threshold for dropping voices if the audio graph gets too large
+ f32 drop_voice_param{1.0f};
};
} // namespace AudioRenderer
diff --git a/src/audio_core/renderer/voice/voice_context.cpp b/src/audio_core/renderer/voice/voice_context.cpp
index eafb51b01..a501a677d 100644
--- a/src/audio_core/renderer/voice/voice_context.cpp
+++ b/src/audio_core/renderer/voice/voice_context.cpp
@@ -74,8 +74,8 @@ void VoiceContext::SortInfo() {
}
std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) {
- return a->priority != b->priority ? a->priority < b->priority
- : a->sort_order < b->sort_order;
+ return a->priority != b->priority ? a->priority > b->priority
+ : a->sort_order > b->sort_order;
});
}
diff --git a/src/audio_core/sink/cubeb_sink.cpp b/src/audio_core/sink/cubeb_sink.cpp
index 36b115ad6..32c1b1cb3 100644
--- a/src/audio_core/sink/cubeb_sink.cpp
+++ b/src/audio_core/sink/cubeb_sink.cpp
@@ -66,10 +66,10 @@ public:
const auto latency_error = cubeb_get_min_latency(ctx, &params, &minimum_latency);
if (latency_error != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error getting minimum latency, error: {}", latency_error);
- minimum_latency = 256U;
+ minimum_latency = TargetSampleCount * 2;
}
- minimum_latency = std::max(minimum_latency, 256u);
+ minimum_latency = std::max(minimum_latency, TargetSampleCount * 2);
LOG_INFO(Service_Audio,
"Opening cubeb stream {} type {} with: rate {} channels {} (system channels {}) "
@@ -326,4 +326,31 @@ std::vector<std::string> ListCubebSinkDevices(bool capture) {
return device_list;
}
+u32 GetCubebLatency() {
+ cubeb* ctx;
+
+ if (cubeb_init(&ctx, "yuzu Latency Getter", nullptr) != CUBEB_OK) {
+ LOG_CRITICAL(Audio_Sink, "cubeb_init failed");
+ // Return a large latency so we choose SDL instead.
+ return 10000u;
+ }
+
+ cubeb_stream_params params{};
+ params.rate = TargetSampleRate;
+ params.channels = 2;
+ params.format = CUBEB_SAMPLE_S16LE;
+ params.prefs = CUBEB_STREAM_PREF_NONE;
+ params.layout = CUBEB_LAYOUT_STEREO;
+
+ u32 latency{0};
+ const auto latency_error = cubeb_get_min_latency(ctx, &params, &latency);
+ if (latency_error != CUBEB_OK) {
+ LOG_CRITICAL(Audio_Sink, "Error getting minimum latency, error: {}", latency_error);
+ latency = TargetSampleCount * 2;
+ }
+ latency = std::max(latency, TargetSampleCount * 2);
+ cubeb_destroy(ctx);
+ return latency;
+}
+
} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/cubeb_sink.h b/src/audio_core/sink/cubeb_sink.h
index 4b0cb160d..3302cb98d 100644
--- a/src/audio_core/sink/cubeb_sink.h
+++ b/src/audio_core/sink/cubeb_sink.h
@@ -96,4 +96,11 @@ private:
*/
std::vector<std::string> ListCubebSinkDevices(bool capture);
+/**
+ * Get the reported latency for this sink.
+ *
+ * @return Minimum latency for this sink.
+ */
+u32 GetCubebLatency();
+
} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/sdl2_sink.cpp b/src/audio_core/sink/sdl2_sink.cpp
index 1bd001b94..c138dc628 100644
--- a/src/audio_core/sink/sdl2_sink.cpp
+++ b/src/audio_core/sink/sdl2_sink.cpp
@@ -47,11 +47,7 @@ public:
spec.freq = TargetSampleRate;
spec.channels = static_cast<u8>(device_channels);
spec.format = AUDIO_S16SYS;
- if (type == StreamType::Render) {
- spec.samples = TargetSampleCount;
- } else {
- spec.samples = 1024;
- }
+ spec.samples = TargetSampleCount * 2;
spec.callback = &SDLSinkStream::DataCallback;
spec.userdata = this;
@@ -234,10 +230,16 @@ std::vector<std::string> ListSDLSinkDevices(bool capture) {
const int device_count = SDL_GetNumAudioDevices(capture);
for (int i = 0; i < device_count; ++i) {
- device_list.emplace_back(SDL_GetAudioDeviceName(i, 0));
+ if (const char* name = SDL_GetAudioDeviceName(i, capture)) {
+ device_list.emplace_back(name);
+ }
}
return device_list;
}
+u32 GetSDLLatency() {
+ return TargetSampleCount * 2;
+}
+
} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/sdl2_sink.h b/src/audio_core/sink/sdl2_sink.h
index f01eddc1b..27ed1ab94 100644
--- a/src/audio_core/sink/sdl2_sink.h
+++ b/src/audio_core/sink/sdl2_sink.h
@@ -87,4 +87,11 @@ private:
*/
std::vector<std::string> ListSDLSinkDevices(bool capture);
+/**
+ * Get the reported latency for this sink.
+ *
+ * @return Minimum latency for this sink.
+ */
+u32 GetSDLLatency();
+
} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/sink_details.cpp b/src/audio_core/sink/sink_details.cpp
index 67bdab779..39ea6d91b 100644
--- a/src/audio_core/sink/sink_details.cpp
+++ b/src/audio_core/sink/sink_details.cpp
@@ -21,58 +21,80 @@ namespace {
struct SinkDetails {
using FactoryFn = std::unique_ptr<Sink> (*)(std::string_view);
using ListDevicesFn = std::vector<std::string> (*)(bool);
+ using LatencyFn = u32 (*)();
/// Name for this sink.
- const char* id;
+ std::string_view id;
/// A method to call to construct an instance of this type of sink.
FactoryFn factory;
/// A method to call to list available devices.
ListDevicesFn list_devices;
+ /// Method to get the latency of this backend.
+ LatencyFn latency;
};
// sink_details is ordered in terms of desirability, with the best choice at the top.
constexpr SinkDetails sink_details[] = {
#ifdef HAVE_CUBEB
- SinkDetails{"cubeb",
- [](std::string_view device_id) -> std::unique_ptr<Sink> {
- return std::make_unique<CubebSink>(device_id);
- },
- &ListCubebSinkDevices},
+ SinkDetails{
+ "cubeb",
+ [](std::string_view device_id) -> std::unique_ptr<Sink> {
+ return std::make_unique<CubebSink>(device_id);
+ },
+ &ListCubebSinkDevices,
+ &GetCubebLatency,
+ },
#endif
#ifdef HAVE_SDL2
- SinkDetails{"sdl2",
- [](std::string_view device_id) -> std::unique_ptr<Sink> {
- return std::make_unique<SDLSink>(device_id);
- },
- &ListSDLSinkDevices},
+ SinkDetails{
+ "sdl2",
+ [](std::string_view device_id) -> std::unique_ptr<Sink> {
+ return std::make_unique<SDLSink>(device_id);
+ },
+ &ListSDLSinkDevices,
+ &GetSDLLatency,
+ },
#endif
SinkDetails{"null",
[](std::string_view device_id) -> std::unique_ptr<Sink> {
return std::make_unique<NullSink>(device_id);
},
- [](bool capture) { return std::vector<std::string>{"null"}; }},
+ [](bool capture) { return std::vector<std::string>{"null"}; }, []() { return 0u; }},
};
const SinkDetails& GetOutputSinkDetails(std::string_view sink_id) {
- auto iter =
- std::find_if(std::begin(sink_details), std::end(sink_details),
- [sink_id](const auto& sink_detail) { return sink_detail.id == sink_id; });
+ const auto find_backend{[](std::string_view id) {
+ return std::find_if(std::begin(sink_details), std::end(sink_details),
+ [&id](const auto& sink_detail) { return sink_detail.id == id; });
+ }};
- if (sink_id == "auto" || iter == std::end(sink_details)) {
- if (sink_id != "auto") {
- LOG_ERROR(Audio, "Invalid sink_id {}", sink_id);
+ auto iter = find_backend(sink_id);
+
+ if (sink_id == "auto") {
+ // Auto-select a backend. Prefer CubeB, but it may report a large minimum latency which
+ // causes audio issues, in that case go with SDL.
+#if defined(HAVE_CUBEB) && defined(HAVE_SDL2)
+ iter = find_backend("cubeb");
+ if (iter->latency() > TargetSampleCount * 3) {
+ iter = find_backend("sdl2");
}
- // Auto-select.
- // sink_details is ordered in terms of desirability, with the best choice at the front.
+#else
iter = std::begin(sink_details);
+#endif
+ LOG_INFO(Service_Audio, "Auto-selecting the {} backend", iter->id);
+ }
+
+ if (iter == std::end(sink_details)) {
+ LOG_ERROR(Audio, "Invalid sink_id {}", sink_id);
+ iter = find_backend("null");
}
return *iter;
}
} // Anonymous namespace
-std::vector<const char*> GetSinkIDs() {
- std::vector<const char*> sink_ids(std::size(sink_details));
+std::vector<std::string_view> GetSinkIDs() {
+ std::vector<std::string_view> sink_ids(std::size(sink_details));
std::transform(std::begin(sink_details), std::end(sink_details), std::begin(sink_ids),
[](const auto& sink) { return sink.id; });
diff --git a/src/audio_core/sink/sink_details.h b/src/audio_core/sink/sink_details.h
index 3ebdb1e30..e75932898 100644
--- a/src/audio_core/sink/sink_details.h
+++ b/src/audio_core/sink/sink_details.h
@@ -19,7 +19,7 @@ class Sink;
*
* @return Vector of available sink names.
*/
-std::vector<const char*> GetSinkIDs();
+std::vector<std::string_view> GetSinkIDs();
/**
* Gets the list of devices for a particular sink identified by the given ID.
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index a02696873..46cf75fde 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -169,7 +169,11 @@ endif()
create_target_directory_groups(common)
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
-target_link_libraries(common PRIVATE lz4::lz4)
+if (TARGET lz4::lz4)
+ target_link_libraries(common PRIVATE lz4::lz4)
+else()
+ target_link_libraries(common PRIVATE LZ4::lz4_shared)
+endif()
if (TARGET zstd::zstd)
target_link_libraries(common PRIVATE zstd::zstd)
else()
diff --git a/src/common/concepts.h b/src/common/concepts.h
index a97555f6a..e8ce30dfe 100644
--- a/src/common/concepts.h
+++ b/src/common/concepts.h
@@ -34,4 +34,12 @@ concept DerivedFrom = requires {
template <typename From, typename To>
concept ConvertibleTo = std::is_convertible_v<From, To>;
+// No equivalents in the stdlib
+
+template <typename T>
+concept IsArithmetic = std::is_arithmetic_v<T>;
+
+template <typename T>
+concept IsIntegral = std::is_integral_v<T>;
+
} // namespace Common
diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h
index 4a0f72cc9..f899b0d54 100644
--- a/src/common/fixed_point.h
+++ b/src/common/fixed_point.h
@@ -4,14 +4,7 @@
// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h
// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math
-#ifndef FIXED_H_
-#define FIXED_H_
-
-#if __cplusplus >= 201402L
-#define CONSTEXPR14 constexpr
-#else
-#define CONSTEXPR14
-#endif
+#pragma once
#include <cstddef> // for size_t
#include <cstdint>
@@ -19,6 +12,8 @@
#include <ostream>
#include <type_traits>
+#include <common/concepts.h>
+
namespace Common {
template <size_t I, size_t F>
@@ -57,8 +52,8 @@ struct type_from_size<64> {
static constexpr size_t size = 64;
using value_type = int64_t;
- using unsigned_type = std::make_unsigned<value_type>::type;
- using signed_type = std::make_signed<value_type>::type;
+ using unsigned_type = std::make_unsigned_t<value_type>;
+ using signed_type = std::make_signed_t<value_type>;
using next_size = type_from_size<128>;
};
@@ -68,8 +63,8 @@ struct type_from_size<32> {
static constexpr size_t size = 32;
using value_type = int32_t;
- using unsigned_type = std::make_unsigned<value_type>::type;
- using signed_type = std::make_signed<value_type>::type;
+ using unsigned_type = std::make_unsigned_t<value_type>;
+ using signed_type = std::make_signed_t<value_type>;
using next_size = type_from_size<64>;
};
@@ -79,8 +74,8 @@ struct type_from_size<16> {
static constexpr size_t size = 16;
using value_type = int16_t;
- using unsigned_type = std::make_unsigned<value_type>::type;
- using signed_type = std::make_signed<value_type>::type;
+ using unsigned_type = std::make_unsigned_t<value_type>;
+ using signed_type = std::make_signed_t<value_type>;
using next_size = type_from_size<32>;
};
@@ -90,8 +85,8 @@ struct type_from_size<8> {
static constexpr size_t size = 8;
using value_type = int8_t;
- using unsigned_type = std::make_unsigned<value_type>::type;
- using signed_type = std::make_signed<value_type>::type;
+ using unsigned_type = std::make_unsigned_t<value_type>;
+ using signed_type = std::make_signed_t<value_type>;
using next_size = type_from_size<16>;
};
@@ -106,9 +101,9 @@ constexpr B next_to_base(N rhs) {
struct divide_by_zero : std::exception {};
template <size_t I, size_t F>
-CONSTEXPR14 FixedPoint<I, F> divide(
+constexpr FixedPoint<I, F> divide(
FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
- typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+ std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
using next_type = typename FixedPoint<I, F>::next_type;
using base_type = typename FixedPoint<I, F>::base_type;
@@ -126,9 +121,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
}
template <size_t I, size_t F>
-CONSTEXPR14 FixedPoint<I, F> divide(
+constexpr FixedPoint<I, F> divide(
FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
- typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+ std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
using unsigned_type = typename FixedPoint<I, F>::unsigned_type;
@@ -196,9 +191,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
// this is the usual implementation of multiplication
template <size_t I, size_t F>
-CONSTEXPR14 FixedPoint<I, F> multiply(
+constexpr FixedPoint<I, F> multiply(
FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
- typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+ std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
using next_type = typename FixedPoint<I, F>::next_type;
using base_type = typename FixedPoint<I, F>::base_type;
@@ -215,9 +210,9 @@ CONSTEXPR14 FixedPoint<I, F> multiply(
// it is slightly slower, but is more robust since it doesn't
// require and upgraded type
template <size_t I, size_t F>
-CONSTEXPR14 FixedPoint<I, F> multiply(
+constexpr FixedPoint<I, F> multiply(
FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
- typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+ std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
using base_type = typename FixedPoint<I, F>::base_type;
@@ -272,19 +267,20 @@ public:
static constexpr base_type one = base_type(1) << fractional_bits;
public: // constructors
- FixedPoint() = default;
- FixedPoint(const FixedPoint&) = default;
- FixedPoint(FixedPoint&&) = default;
- FixedPoint& operator=(const FixedPoint&) = default;
+ constexpr FixedPoint() = default;
+
+ constexpr FixedPoint(const FixedPoint&) = default;
+ constexpr FixedPoint& operator=(const FixedPoint&) = default;
+
+ constexpr FixedPoint(FixedPoint&&) noexcept = default;
+ constexpr FixedPoint& operator=(FixedPoint&&) noexcept = default;
- template <class Number>
- constexpr FixedPoint(
- Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr)
- : data_(static_cast<base_type>(n * one)) {}
+ template <IsArithmetic Number>
+ constexpr FixedPoint(Number n) : data_(static_cast<base_type>(n * one)) {}
public: // conversion
template <size_t I2, size_t F2>
- CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) {
+ constexpr explicit FixedPoint(FixedPoint<I2, F2> other) {
static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types");
using T = FixedPoint<I2, F2>;
@@ -308,36 +304,14 @@ public:
}
public: // comparison operators
- constexpr bool operator==(FixedPoint rhs) const {
- return data_ == rhs.data_;
- }
-
- constexpr bool operator!=(FixedPoint rhs) const {
- return data_ != rhs.data_;
- }
-
- constexpr bool operator<(FixedPoint rhs) const {
- return data_ < rhs.data_;
- }
-
- constexpr bool operator>(FixedPoint rhs) const {
- return data_ > rhs.data_;
- }
-
- constexpr bool operator<=(FixedPoint rhs) const {
- return data_ <= rhs.data_;
- }
-
- constexpr bool operator>=(FixedPoint rhs) const {
- return data_ >= rhs.data_;
- }
+ friend constexpr auto operator<=>(FixedPoint lhs, FixedPoint rhs) = default;
public: // unary operators
- constexpr bool operator!() const {
+ [[nodiscard]] constexpr bool operator!() const {
return !data_;
}
- constexpr FixedPoint operator~() const {
+ [[nodiscard]] constexpr FixedPoint operator~() const {
// NOTE(eteran): this will often appear to "just negate" the value
// that is not an error, it is because -x == (~x+1)
// and that "+1" is adding an infinitesimally small fraction to the
@@ -345,89 +319,87 @@ public: // unary operators
return FixedPoint::from_base(~data_);
}
- constexpr FixedPoint operator-() const {
+ [[nodiscard]] constexpr FixedPoint operator-() const {
return FixedPoint::from_base(-data_);
}
- constexpr FixedPoint operator+() const {
+ [[nodiscard]] constexpr FixedPoint operator+() const {
return FixedPoint::from_base(+data_);
}
- CONSTEXPR14 FixedPoint& operator++() {
+ constexpr FixedPoint& operator++() {
data_ += one;
return *this;
}
- CONSTEXPR14 FixedPoint& operator--() {
+ constexpr FixedPoint& operator--() {
data_ -= one;
return *this;
}
- CONSTEXPR14 FixedPoint operator++(int) {
+ constexpr FixedPoint operator++(int) {
FixedPoint tmp(*this);
data_ += one;
return tmp;
}
- CONSTEXPR14 FixedPoint operator--(int) {
+ constexpr FixedPoint operator--(int) {
FixedPoint tmp(*this);
data_ -= one;
return tmp;
}
public: // basic math operators
- CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) {
+ constexpr FixedPoint& operator+=(FixedPoint n) {
data_ += n.data_;
return *this;
}
- CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) {
+ constexpr FixedPoint& operator-=(FixedPoint n) {
data_ -= n.data_;
return *this;
}
- CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) {
+ constexpr FixedPoint& operator*=(FixedPoint n) {
return assign(detail::multiply(*this, n));
}
- CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) {
+ constexpr FixedPoint& operator/=(FixedPoint n) {
FixedPoint temp;
return assign(detail::divide(*this, n, temp));
}
private:
- CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) {
+ constexpr FixedPoint& assign(FixedPoint rhs) {
data_ = rhs.data_;
return *this;
}
public: // binary math operators, effects underlying bit pattern since these
// don't really typically make sense for non-integer values
- CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) {
+ constexpr FixedPoint& operator&=(FixedPoint n) {
data_ &= n.data_;
return *this;
}
- CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) {
+ constexpr FixedPoint& operator|=(FixedPoint n) {
data_ |= n.data_;
return *this;
}
- CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) {
+ constexpr FixedPoint& operator^=(FixedPoint n) {
data_ ^= n.data_;
return *this;
}
- template <class Integer,
- class = typename std::enable_if<std::is_integral<Integer>::value>::type>
- CONSTEXPR14 FixedPoint& operator>>=(Integer n) {
+ template <IsIntegral Integer>
+ constexpr FixedPoint& operator>>=(Integer n) {
data_ >>= n;
return *this;
}
- template <class Integer,
- class = typename std::enable_if<std::is_integral<Integer>::value>::type>
- CONSTEXPR14 FixedPoint& operator<<=(Integer n) {
+ template <IsIntegral Integer>
+ constexpr FixedPoint& operator<<=(Integer n) {
data_ <<= n;
return *this;
}
@@ -437,42 +409,42 @@ public: // conversion to basic types
data_ += (data_ & fractional_mask) >> 1;
}
- constexpr int to_int() {
+ [[nodiscard]] constexpr int to_int() {
round_up();
return static_cast<int>((data_ & integer_mask) >> fractional_bits);
}
- constexpr unsigned int to_uint() const {
+ [[nodiscard]] constexpr unsigned int to_uint() {
round_up();
return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
}
- constexpr int64_t to_long() {
+ [[nodiscard]] constexpr int64_t to_long() {
round_up();
return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
}
- constexpr int to_int_floor() const {
+ [[nodiscard]] constexpr int to_int_floor() const {
return static_cast<int>((data_ & integer_mask) >> fractional_bits);
}
- constexpr int64_t to_long_floor() {
+ [[nodiscard]] constexpr int64_t to_long_floor() const {
return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
}
- constexpr unsigned int to_uint_floor() const {
+ [[nodiscard]] constexpr unsigned int to_uint_floor() const {
return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
}
- constexpr float to_float() const {
+ [[nodiscard]] constexpr float to_float() const {
return static_cast<float>(data_) / FixedPoint::one;
}
- constexpr double to_double() const {
+ [[nodiscard]] constexpr double to_double() const {
return static_cast<double>(data_) / FixedPoint::one;
}
- constexpr base_type to_raw() const {
+ [[nodiscard]] constexpr base_type to_raw() const {
return data_;
}
@@ -480,27 +452,27 @@ public: // conversion to basic types
data_ &= fractional_mask;
}
- constexpr base_type get_frac() const {
+ [[nodiscard]] constexpr base_type get_frac() const {
return data_ & fractional_mask;
}
public:
- CONSTEXPR14 void swap(FixedPoint& rhs) {
+ constexpr void swap(FixedPoint& rhs) noexcept {
using std::swap;
swap(data_, rhs.data_);
}
public:
- base_type data_;
+ base_type data_{};
};
// if we have the same fractional portion, but differing integer portions, we trivially upgrade the
// smaller type
template <size_t I1, size_t I2, size_t F>
-CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
-operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator+(
+ FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
- using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+ using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
const T l = T::from_base(lhs.to_raw());
const T r = T::from_base(rhs.to_raw());
@@ -508,10 +480,10 @@ operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
}
template <size_t I1, size_t I2, size_t F>
-CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
-operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator-(
+ FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
- using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+ using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
const T l = T::from_base(lhs.to_raw());
const T r = T::from_base(rhs.to_raw());
@@ -519,10 +491,10 @@ operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
}
template <size_t I1, size_t I2, size_t F>
-CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
-operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator*(
+ FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
- using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+ using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
const T l = T::from_base(lhs.to_raw());
const T r = T::from_base(rhs.to_raw());
@@ -530,10 +502,10 @@ operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
}
template <size_t I1, size_t I2, size_t F>
-CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
-operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator/(
+ FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
- using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+ using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
const T l = T::from_base(lhs.to_raw());
const T r = T::from_base(rhs.to_raw());
@@ -548,159 +520,133 @@ std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) {
// basic math operators
template <size_t I, size_t F>
-CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
lhs += rhs;
return lhs;
}
template <size_t I, size_t F>
-CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
lhs -= rhs;
return lhs;
}
template <size_t I, size_t F>
-CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
lhs *= rhs;
return lhs;
}
template <size_t I, size_t F>
-CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
lhs /= rhs;
return lhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
+template <size_t I, size_t F, IsArithmetic Number>
+constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
lhs += FixedPoint<I, F>(rhs);
return lhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
+template <size_t I, size_t F, IsArithmetic Number>
+constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
lhs -= FixedPoint<I, F>(rhs);
return lhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
+template <size_t I, size_t F, IsArithmetic Number>
+constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
lhs *= FixedPoint<I, F>(rhs);
return lhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
+template <size_t I, size_t F, IsArithmetic Number>
+constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
lhs /= FixedPoint<I, F>(rhs);
return lhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
+template <size_t I, size_t F, IsArithmetic Number>
+constexpr FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
FixedPoint<I, F> tmp(lhs);
tmp += rhs;
return tmp;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
+template <size_t I, size_t F, IsArithmetic Number>
+constexpr FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
FixedPoint<I, F> tmp(lhs);
tmp -= rhs;
return tmp;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
+template <size_t I, size_t F, IsArithmetic Number>
+constexpr FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
FixedPoint<I, F> tmp(lhs);
tmp *= rhs;
return tmp;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
+template <size_t I, size_t F, IsArithmetic Number>
+constexpr FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
FixedPoint<I, F> tmp(lhs);
tmp /= rhs;
return tmp;
}
// shift operators
-template <size_t I, size_t F, class Integer,
- class = typename std::enable_if<std::is_integral<Integer>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
+template <size_t I, size_t F, IsIntegral Integer>
+constexpr FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
lhs <<= rhs;
return lhs;
}
-template <size_t I, size_t F, class Integer,
- class = typename std::enable_if<std::is_integral<Integer>::value>::type>
-CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
+template <size_t I, size_t F, IsIntegral Integer>
+constexpr FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
lhs >>= rhs;
return lhs;
}
// comparison operators
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) {
return lhs > FixedPoint<I, F>(rhs);
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) {
return lhs < FixedPoint<I, F>(rhs);
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) {
return lhs >= FixedPoint<I, F>(rhs);
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) {
return lhs <= FixedPoint<I, F>(rhs);
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) {
return lhs == FixedPoint<I, F>(rhs);
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) {
return lhs != FixedPoint<I, F>(rhs);
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) > rhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) < rhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) >= rhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) <= rhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) == rhs;
}
-template <size_t I, size_t F, class Number,
- class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+template <size_t I, size_t F, IsArithmetic Number>
constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) != rhs;
}
} // namespace Common
-
-#undef CONSTEXPR14
-
-#endif
diff --git a/src/common/settings.h b/src/common/settings.h
index d2452c93b..0eb98939c 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -431,7 +431,7 @@ struct Values {
FullscreenMode::Exclusive,
#endif
FullscreenMode::Borderless, FullscreenMode::Exclusive, "fullscreen_mode"};
- SwitchableSetting<int, true> aspect_ratio{0, 0, 3, "aspect_ratio"};
+ SwitchableSetting<int, true> aspect_ratio{0, 0, 4, "aspect_ratio"};
SwitchableSetting<int, true> max_anisotropy{0, 0, 5, "max_anisotropy"};
SwitchableSetting<bool> use_speed_limit{true, "use_speed_limit"};
SwitchableSetting<u16, true> speed_limit{100, 0, 9999, "speed_limit"};
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 95302c419..055bea641 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -190,6 +190,9 @@ add_library(core STATIC
hle/kernel/k_code_memory.h
hle/kernel/k_condition_variable.cpp
hle/kernel/k_condition_variable.h
+ hle/kernel/k_dynamic_page_manager.h
+ hle/kernel/k_dynamic_resource_manager.h
+ hle/kernel/k_dynamic_slab_heap.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp
@@ -240,6 +243,8 @@ add_library(core STATIC
hle/kernel/k_server_session.h
hle/kernel/k_session.cpp
hle/kernel/k_session.h
+ hle/kernel/k_session_request.cpp
+ hle/kernel/k_session_request.h
hle/kernel/k_shared_memory.cpp
hle/kernel/k_shared_memory.h
hle/kernel/k_shared_memory_info.h
@@ -261,8 +266,6 @@ add_library(core STATIC
hle/kernel/k_worker_task.h
hle/kernel/k_worker_task_manager.cpp
hle/kernel/k_worker_task_manager.h
- hle/kernel/k_writable_event.cpp
- hle/kernel/k_writable_event.h
hle/kernel/kernel.cpp
hle/kernel/kernel.h
hle/kernel/memory_types.h
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 953d96439..29ba562dc 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -134,6 +134,14 @@ void ARM_Interface::Run() {
}
system.ExitDynarmicProfile();
+ // If the thread is scheduled for termination, exit the thread.
+ if (current_thread->HasDpc()) {
+ if (current_thread->IsTerminationRequested()) {
+ current_thread->Exit();
+ UNREACHABLE();
+ }
+ }
+
// Notify the debugger and go to sleep if a breakpoint was hit,
// or if the thread is unable to continue for any reason.
if (Has(hr, breakpoint) || Has(hr, no_execute)) {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 1d46f6d40..22b5d5656 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -111,6 +111,7 @@ public:
LOG_ERROR(Core_ARM,
"Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
num_instructions, memory.Read32(pc));
+ ReturnException(pc, ARM_Interface::no_execute);
}
void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 1deeee154..7fb8bc019 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -133,6 +133,50 @@ struct System::Impl {
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
+ void Initialize(System& system) {
+ device_memory = std::make_unique<Core::DeviceMemory>();
+
+ is_multicore = Settings::values.use_multi_core.GetValue();
+
+ core_timing.SetMulticore(is_multicore);
+ core_timing.Initialize([&system]() { system.RegisterHostThread(); });
+
+ const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
+ const auto current_time =
+ std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
+ Settings::values.custom_rtc_differential =
+ Settings::values.custom_rtc.value_or(current_time) - current_time;
+
+ // Create a default fs if one doesn't already exist.
+ if (virtual_filesystem == nullptr) {
+ virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
+ }
+ if (content_provider == nullptr) {
+ content_provider = std::make_unique<FileSys::ContentProviderUnion>();
+ }
+
+ // Create default implementations of applets if one is not provided.
+ applet_manager.SetDefaultAppletsIfMissing();
+
+ is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
+
+ kernel.SetMulticore(is_multicore);
+ cpu_manager.SetMulticore(is_multicore);
+ cpu_manager.SetAsyncGpu(is_async_gpu);
+ }
+
+ void ReinitializeIfNecessary(System& system) {
+ if (is_multicore == Settings::values.use_multi_core.GetValue()) {
+ return;
+ }
+
+ LOG_DEBUG(Kernel, "Re-initializing");
+
+ is_multicore = Settings::values.use_multi_core.GetValue();
+
+ Initialize(system);
+ }
+
SystemResultStatus Run() {
std::unique_lock<std::mutex> lk(suspend_guard);
status = SystemResultStatus::Success;
@@ -178,37 +222,14 @@ struct System::Impl {
debugger = std::make_unique<Debugger>(system, port);
}
- SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
+ SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
LOG_DEBUG(Core, "initialized OK");
- device_memory = std::make_unique<Core::DeviceMemory>();
-
- is_multicore = Settings::values.use_multi_core.GetValue();
- is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
-
- kernel.SetMulticore(is_multicore);
- cpu_manager.SetMulticore(is_multicore);
- cpu_manager.SetAsyncGpu(is_async_gpu);
- core_timing.SetMulticore(is_multicore);
+ // Setting changes may require a full system reinitialization (e.g., disabling multicore).
+ ReinitializeIfNecessary(system);
kernel.Initialize();
cpu_manager.Initialize();
- core_timing.Initialize([&system]() { system.RegisterHostThread(); });
-
- const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
- const auto current_time =
- std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
- Settings::values.custom_rtc_differential =
- Settings::values.custom_rtc.value_or(current_time) - current_time;
-
- // Create a default fs if one doesn't already exist.
- if (virtual_filesystem == nullptr)
- virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
- if (content_provider == nullptr)
- content_provider = std::make_unique<FileSys::ContentProviderUnion>();
-
- /// Create default implementations of applets if one is not provided.
- applet_manager.SetDefaultAppletsIfMissing();
/// Reset all glue registrations
arp_manager.ResetAll();
@@ -253,11 +274,11 @@ struct System::Impl {
return SystemResultStatus::ErrorGetLoader;
}
- SystemResultStatus init_result{Init(system, emu_window)};
+ SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
if (init_result != SystemResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
static_cast<int>(init_result));
- Shutdown();
+ ShutdownMainProcess();
return init_result;
}
@@ -276,7 +297,7 @@ struct System::Impl {
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
- Shutdown();
+ ShutdownMainProcess();
return static_cast<SystemResultStatus>(
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
@@ -335,7 +356,7 @@ struct System::Impl {
return status;
}
- void Shutdown() {
+ void ShutdownMainProcess() {
SetShuttingDown(true);
// Log last frame performance stats if game was loded
@@ -369,7 +390,7 @@ struct System::Impl {
cheat_engine.reset();
telemetry_session.reset();
time_manager.Shutdown();
- core_timing.Shutdown();
+ core_timing.ClearPendingEvents();
app_loader.reset();
audio_core.reset();
gpu_core.reset();
@@ -377,7 +398,6 @@ struct System::Impl {
perf_stats.reset();
kernel.Shutdown();
memory.Reset();
- applet_manager.ClearAll();
if (auto room_member = room_network.GetRoomMember().lock()) {
Network::GameInfo game_info{};
@@ -520,6 +540,10 @@ const CpuManager& System::GetCpuManager() const {
return impl->cpu_manager;
}
+void System::Initialize() {
+ impl->Initialize(*this);
+}
+
SystemResultStatus System::Run() {
return impl->Run();
}
@@ -540,8 +564,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
}
-void System::Shutdown() {
- impl->Shutdown();
+void System::ShutdownMainProcess() {
+ impl->ShutdownMainProcess();
}
bool System::IsShuttingDown() const {
diff --git a/src/core/core.h b/src/core/core.h
index 7843cc8ad..4ebedffd9 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -143,6 +143,12 @@ public:
System& operator=(System&&) = delete;
/**
+ * Initializes the system
+ * This function will initialize core functionaility used for system emulation
+ */
+ void Initialize();
+
+ /**
* Run the OS and Application
* This function will start emulation and run the relevant devices
*/
@@ -166,8 +172,8 @@ public:
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
- /// Shutdown the emulated system.
- void Shutdown();
+ /// Shutdown the main emulated process.
+ void ShutdownMainProcess();
/// Check if the core is shutting down.
[[nodiscard]] bool IsShuttingDown() const;
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 6c0fcb7b5..0e7b5f943 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -40,7 +40,9 @@ struct CoreTiming::Event {
CoreTiming::CoreTiming()
: clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
-CoreTiming::~CoreTiming() = default;
+CoreTiming::~CoreTiming() {
+ Reset();
+}
void CoreTiming::ThreadEntry(CoreTiming& instance) {
constexpr char name[] = "HostTiming";
@@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
}
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
+ Reset();
on_thread_init = std::move(on_thread_init_);
event_fifo_id = 0;
shutting_down = false;
@@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
}
}
-void CoreTiming::Shutdown() {
- paused = true;
- shutting_down = true;
- pause_event.Set();
- event.Set();
- if (timer_thread) {
- timer_thread->join();
- }
- ClearPendingEvents();
- timer_thread.reset();
- has_started = false;
+void CoreTiming::ClearPendingEvents() {
+ event_queue.clear();
}
void CoreTiming::Pause(bool is_paused) {
@@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const {
return CpuCyclesToClockCycles(ticks);
}
-void CoreTiming::ClearPendingEvents() {
- event_queue.clear();
-}
-
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
std::scoped_lock lock{basic_lock};
@@ -270,6 +260,7 @@ void CoreTiming::ThreadLoop() {
// There are more events left in the queue, wait until the next event.
const auto wait_time = *next_time - GetGlobalTimeNs().count();
if (wait_time > 0) {
+#ifdef _WIN32
// Assume a timer resolution of 1ms.
static constexpr s64 TimerResolutionNS = 1000000;
@@ -287,6 +278,9 @@ void CoreTiming::ThreadLoop() {
if (event.IsSet()) {
event.Reset();
}
+#else
+ event.WaitFor(std::chrono::nanoseconds(wait_time));
+#endif
}
} else {
// Queue is empty, wait until another event is scheduled and signals us to continue.
@@ -303,6 +297,18 @@ void CoreTiming::ThreadLoop() {
}
}
+void CoreTiming::Reset() {
+ paused = true;
+ shutting_down = true;
+ pause_event.Set();
+ event.Set();
+ if (timer_thread) {
+ timer_thread->join();
+ }
+ timer_thread.reset();
+ has_started = false;
+}
+
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
if (is_multicore) {
return clock->GetTimeNS();
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 3259397b2..b5925193c 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -61,19 +61,14 @@ public:
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
void Initialize(std::function<void()>&& on_thread_init_);
- /// Tears down all timing related functionality.
- void Shutdown();
+ /// Clear all pending events. This should ONLY be done on exit.
+ void ClearPendingEvents();
/// Sets if emulation is multicore or single core, must be set before Initialize
void SetMulticore(bool is_multicore_) {
is_multicore = is_multicore_;
}
- /// Check if it's using host timing.
- bool IsHostTiming() const {
- return is_multicore;
- }
-
/// Pauses/Unpauses the execution of the timer thread.
void Pause(bool is_paused);
@@ -136,12 +131,11 @@ public:
private:
struct Event;
- /// Clear all pending events. This should ONLY be done on exit.
- void ClearPendingEvents();
-
static void ThreadEntry(CoreTiming& instance);
void ThreadLoop();
+ void Reset();
+
std::unique_ptr<Common::WallClock> clock;
s64 global_timer = 0;
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
index df61b0c0b..90510733c 100644
--- a/src/core/device_memory.h
+++ b/src/core/device_memory.h
@@ -31,12 +31,14 @@ public:
DramMemoryMap::Base;
}
- u8* GetPointer(PAddr addr) {
- return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
+ template <typename T>
+ T* GetPointer(PAddr addr) {
+ return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
}
- const u8* GetPointer(PAddr addr) const {
- return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
+ template <typename T>
+ const T* GetPointer(PAddr addr) const {
+ return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
}
Common::HostMemory buffer;
diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp
index 8c1b2523c..1567da231 100644
--- a/src/core/file_sys/savedata_factory.cpp
+++ b/src/core/file_sys/savedata_factory.cpp
@@ -5,6 +5,7 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
+#include "common/uuid.h"
#include "core/core.h"
#include "core/file_sys/savedata_factory.h"
#include "core/file_sys/vfs.h"
@@ -59,6 +60,36 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA
attr.title_id == 0 && attr.save_id == 0);
}
+std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id,
+ u128 user_id) {
+ // Only detect nand user saves.
+ const auto space_id_path = [space_id]() -> std::string_view {
+ switch (space_id) {
+ case SaveDataSpaceId::NandUser:
+ return "/user/save";
+ default:
+ return "";
+ }
+ }();
+
+ if (space_id_path.empty()) {
+ return "";
+ }
+
+ Common::UUID uuid;
+ std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID));
+
+ // Only detect account/device saves from the future location.
+ switch (type) {
+ case SaveDataType::SaveData:
+ return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id);
+ case SaveDataType::DeviceSaveData:
+ return fmt::format("{}/device/{:016X}/1", space_id_path, title_id);
+ default:
+ return "";
+ }
+}
+
} // Anonymous namespace
std::string SaveDataAttribute::DebugInfo() const {
@@ -82,7 +113,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
PrintSaveDataAttributeWarnings(meta);
const auto save_directory =
- GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
+ GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
auto out = dir->CreateDirectoryRelative(save_directory);
@@ -99,7 +130,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
const SaveDataAttribute& meta) const {
const auto save_directory =
- GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
+ GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
auto out = dir->GetDirectoryRelative(save_directory);
@@ -134,9 +165,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
}
}
-std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space,
- SaveDataType type, u64 title_id, u128 user_id,
- u64 save_id) {
+std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
+ SaveDataSpaceId space, SaveDataType type, u64 title_id,
+ u128 user_id, u64 save_id) {
// According to switchbrew, if a save is of type SaveData and the title id field is 0, it should
// be interpreted as the title id of the current process.
if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) {
@@ -145,6 +176,17 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
}
}
+ // For compat with a future impl.
+ if (std::string future_path =
+ GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id);
+ !future_path.empty()) {
+ // Check if this location exists, and prefer it over the old.
+ if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) {
+ LOG_INFO(Service_FS, "Using save at new location: {}", future_path);
+ return future_path;
+ }
+ }
+
std::string out = GetSaveDataSpaceIdPath(space);
switch (type) {
@@ -167,7 +209,8 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
u128 user_id) const {
- const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
+ const auto path =
+ GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME);
@@ -185,7 +228,8 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
SaveDataSize new_value) const {
- const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
+ const auto path =
+ GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME);
diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h
index a763b94c8..d3633ef03 100644
--- a/src/core/file_sys/savedata_factory.h
+++ b/src/core/file_sys/savedata_factory.h
@@ -95,8 +95,8 @@ public:
VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space);
- static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type,
- u64 title_id, u128 user_id, u64 save_id);
+ static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space,
+ SaveDataType type, u64 title_id, u128 user_id, u64 save_id);
SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const;
void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp
index 90dd68ff1..b4081fc39 100644
--- a/src/core/frontend/framebuffer_layout.cpp
+++ b/src/core/frontend/framebuffer_layout.cpp
@@ -67,6 +67,8 @@ float EmulationAspectRatio(AspectRatio aspect, float window_aspect_ratio) {
return 3.0f / 4.0f;
case AspectRatio::R21_9:
return 9.0f / 21.0f;
+ case AspectRatio::R16_10:
+ return 10.0f / 16.0f;
case AspectRatio::StretchToWindow:
return window_aspect_ratio;
default:
diff --git a/src/core/frontend/framebuffer_layout.h b/src/core/frontend/framebuffer_layout.h
index 1561d994e..94683b30f 100644
--- a/src/core/frontend/framebuffer_layout.h
+++ b/src/core/frontend/framebuffer_layout.h
@@ -27,6 +27,7 @@ enum class AspectRatio {
Default,
R4_3,
R21_9,
+ R16_10,
StretchToWindow,
};
diff --git a/src/core/hid/irs_types.h b/src/core/hid/irs_types.h
index 88c5b016d..0d1bfe53f 100644
--- a/src/core/hid/irs_types.h
+++ b/src/core/hid/irs_types.h
@@ -14,7 +14,7 @@ enum class CameraAmbientNoiseLevel : u32 {
Low,
Medium,
High,
- Unkown3, // This level can't be reached
+ Unknown3, // This level can't be reached
};
// This is nn::irsensor::CameraLightTarget
@@ -75,9 +75,9 @@ enum class IrCameraStatus : u32 {
enum class IrCameraInternalStatus : u32 {
Stopped,
FirmwareUpdateNeeded,
- Unkown2,
- Unkown3,
- Unkown4,
+ Unknown2,
+ Unknown3,
+ Unknown4,
FirmwareVersionRequested,
FirmwareVersionIsInvalid,
Ready,
@@ -121,20 +121,20 @@ enum class IrSensorFunctionLevel : u8 {
// This is nn::irsensor::MomentProcessorPreprocess
enum class MomentProcessorPreprocess : u32 {
- Unkown0,
- Unkown1,
+ Unknown0,
+ Unknown1,
};
// This is nn::irsensor::PackedMomentProcessorPreprocess
enum class PackedMomentProcessorPreprocess : u8 {
- Unkown0,
- Unkown1,
+ Unknown0,
+ Unknown1,
};
// This is nn::irsensor::PointingStatus
enum class PointingStatus : u32 {
- Unkown0,
- Unkown1,
+ Unknown0,
+ Unknown1,
};
struct IrsRect {
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h
index d631c0357..aa27be767 100644
--- a/src/core/hle/ipc_helpers.h
+++ b/src/core/hle/ipc_helpers.h
@@ -86,13 +86,13 @@ public:
u32 num_domain_objects{};
const bool always_move_handles{
(static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0};
- if (!ctx.Session()->IsDomain() || always_move_handles) {
+ if (!ctx.Session()->GetSessionRequestManager()->IsDomain() || always_move_handles) {
num_handles_to_move = num_objects_to_move;
} else {
num_domain_objects = num_objects_to_move;
}
- if (ctx.Session()->IsDomain()) {
+ if (ctx.Session()->GetSessionRequestManager()->IsDomain()) {
raw_data_size +=
static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects);
ctx.write_size += num_domain_objects;
@@ -125,7 +125,8 @@ public:
if (!ctx.IsTipc()) {
AlignWithPadding();
- if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) {
+ if (ctx.Session()->GetSessionRequestManager()->IsDomain() &&
+ ctx.HasDomainMessageHeader()) {
IPC::DomainMessageHeader domain_header{};
domain_header.num_objects = num_domain_objects;
PushRaw(domain_header);
@@ -145,14 +146,15 @@ public:
template <class T>
void PushIpcInterface(std::shared_ptr<T> iface) {
- if (context->Session()->IsDomain()) {
+ if (context->Session()->GetSessionRequestManager()->IsDomain()) {
context->AddDomainObject(std::move(iface));
} else {
kernel.CurrentProcess()->GetResourceLimit()->Reserve(
Kernel::LimitableResource::Sessions, 1);
auto* session = Kernel::KSession::Create(kernel);
- session->Initialize(nullptr, iface->GetServiceName());
+ session->Initialize(nullptr, iface->GetServiceName(),
+ std::make_shared<Kernel::SessionRequestManager>(kernel));
context->AddMoveObject(&session->GetClientSession());
iface->ClientConnected(&session->GetServerSession());
@@ -385,7 +387,7 @@ public:
template <class T>
std::weak_ptr<T> PopIpcInterface() {
- ASSERT(context->Session()->IsDomain());
+ ASSERT(context->Session()->GetSessionRequestManager()->IsDomain());
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
return context->GetDomainHandler<T>(Pop<u32>() - 1);
}
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 5b3feec66..e4f43a053 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -19,6 +19,7 @@
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/service_thread.h"
#include "core/memory.h"
namespace Kernel {
@@ -56,16 +57,103 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
}
}
+Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session,
+ HLERequestContext& context) {
+ Result result = ResultSuccess;
+
+ // If the session has been converted to a domain, handle the domain request
+ if (this->HasSessionRequestHandler(context)) {
+ if (IsDomain() && context.HasDomainMessageHeader()) {
+ result = HandleDomainSyncRequest(server_session, context);
+ // If there is no domain header, the regular session handler is used
+ } else if (this->HasSessionHandler()) {
+ // If this manager has an associated HLE handler, forward the request to it.
+ result = this->SessionHandler().HandleSyncRequest(*server_session, context);
+ }
+ } else {
+ ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
+ IPC::ResponseBuilder rb(context, 2);
+ rb.Push(ResultSuccess);
+ }
+
+ if (convert_to_domain) {
+ ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
+ this->ConvertToDomain();
+ convert_to_domain = false;
+ }
+
+ return result;
+}
+
+Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session,
+ HLERequestContext& context) {
+ if (!context.HasDomainMessageHeader()) {
+ return ResultSuccess;
+ }
+
+ // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
+ context.SetSessionRequestManager(server_session->GetSessionRequestManager());
+
+ // If there is a DomainMessageHeader, then this is CommandType "Request"
+ const auto& domain_message_header = context.GetDomainMessageHeader();
+ const u32 object_id{domain_message_header.object_id};
+ switch (domain_message_header.command) {
+ case IPC::DomainMessageHeader::CommandType::SendMessage:
+ if (object_id > this->DomainHandlerCount()) {
+ LOG_CRITICAL(IPC,
+ "object_id {} is too big! This probably means a recent service call "
+ "needed to return a new interface!",
+ object_id);
+ ASSERT(false);
+ return ResultSuccess; // Ignore error if asserts are off
+ }
+ if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) {
+ return strong_ptr->HandleSyncRequest(*server_session, context);
+ } else {
+ ASSERT(false);
+ return ResultSuccess;
+ }
+
+ case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
+ LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
+
+ this->CloseDomainHandler(object_id - 1);
+
+ IPC::ResponseBuilder rb{context, 2};
+ rb.Push(ResultSuccess);
+ return ResultSuccess;
+ }
+ }
+
+ LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
+ ASSERT(false);
+ return ResultSuccess;
+}
+
+Result SessionRequestManager::QueueSyncRequest(KSession* parent,
+ std::shared_ptr<HLERequestContext>&& context) {
+ // Ensure we have a session request handler
+ if (this->HasSessionRequestHandler(*context)) {
+ if (auto strong_ptr = this->GetServiceThread().lock()) {
+ strong_ptr->QueueSyncRequest(*parent, std::move(context));
+ } else {
+ ASSERT_MSG(false, "strong_ptr is nullptr!");
+ }
+ } else {
+ ASSERT_MSG(false, "handler is invalid!");
+ }
+
+ return ResultSuccess;
+}
+
void SessionRequestHandler::ClientConnected(KServerSession* session) {
- session->ClientConnected(shared_from_this());
+ session->GetSessionRequestManager()->SetSessionHandler(shared_from_this());
// Ensure our server session is tracked globally.
kernel.RegisterServerObject(session);
}
-void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
- session->ClientDisconnected();
-}
+void SessionRequestHandler::ClientDisconnected(KServerSession* session) {}
HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
KServerSession* server_session_, KThread* thread_)
@@ -126,7 +214,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
// Padding to align to 16 bytes
rp.AlignWithPadding();
- if (Session()->IsDomain() &&
+ if (Session()->GetSessionRequestManager()->IsDomain() &&
((command_header->type == IPC::CommandType::Request ||
command_header->type == IPC::CommandType::RequestWithContext) ||
!incoming)) {
@@ -135,7 +223,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
if (incoming || domain_message_header) {
domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
} else {
- if (Session()->IsDomain()) {
+ if (Session()->GetSessionRequestManager()->IsDomain()) {
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
}
}
@@ -228,12 +316,12 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
- if (Session()->IsDomain()) {
+ if (server_session->GetSessionRequestManager()->IsDomain()) {
current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
- for (const auto& object : outgoing_domain_objects) {
- server_session->AppendDomainHandler(object);
- cmd_buf[current_offset++] =
- static_cast<u32_le>(server_session->NumDomainRequestHandlers());
+ for (auto& object : outgoing_domain_objects) {
+ server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object));
+ cmd_buf[current_offset++] = static_cast<u32_le>(
+ server_session->GetSessionRequestManager()->DomainHandlerCount());
}
}
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 99265ce90..a0522bca0 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -43,13 +43,13 @@ class Domain;
class HLERequestContext;
class KAutoObject;
class KernelCore;
+class KEvent;
class KHandleTable;
class KProcess;
class KServerSession;
class KThread;
class KReadableEvent;
class KSession;
-class KWritableEvent;
class ServiceThread;
enum class ThreadWakeupReason;
@@ -121,6 +121,10 @@ public:
is_domain = true;
}
+ void ConvertToDomainOnRequestEnd() {
+ convert_to_domain = true;
+ }
+
std::size_t DomainHandlerCount() const {
return domain_handlers.size();
}
@@ -164,7 +168,12 @@ public:
bool HasSessionRequestHandler(const HLERequestContext& context) const;
+ Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
+ Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
+ Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context);
+
private:
+ bool convert_to_domain{};
bool is_domain{};
SessionRequestHandlerPtr session_handler;
std::vector<SessionRequestHandlerPtr> domain_handlers;
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 9b6b284d0..477e4e407 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -18,6 +18,7 @@
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_session.h"
+#include "core/hle/kernel/k_session_request.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
#include "core/hle/kernel/k_system_control.h"
@@ -34,6 +35,7 @@ namespace Kernel::Init {
HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
+ HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
@@ -94,8 +96,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
if (size > 0) {
- void* backing_kernel_memory{
- system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
+ void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
+ TranslateSlabAddrToPhysical(memory_layout, start))};
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
ASSERT(region != nullptr);
@@ -181,7 +183,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) {
ASSERT(slab_address != 0);
// Initialize the slabheap.
- KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
+ KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
slab_size);
}
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp
index cc2a0f7ca..10265c23c 100644
--- a/src/core/hle/kernel/k_class_token.cpp
+++ b/src/core/hle/kernel/k_class_token.cpp
@@ -18,7 +18,6 @@
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_transfer_memory.h"
-#include "core/hle/kernel/k_writable_event.h"
namespace Kernel {
@@ -42,13 +41,12 @@ static_assert(ClassToken<KPort> == 0b10000101'00000000);
static_assert(ClassToken<KSession> == 0b00011001'00000000);
static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000);
static_assert(ClassToken<KEvent> == 0b01001001'00000000);
-static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000);
// static_assert(ClassToken<KLightClientSession> == 0b00110001'00000000);
// static_assert(ClassToken<KLightServerSession> == 0b01010001'00000000);
-static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000);
+static_assert(ClassToken<KTransferMemory> == 0b01010001'00000000);
// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000);
// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000);
-static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000);
+static_assert(ClassToken<KCodeMemory> == 0b10100001'00000000);
// Ensure that the token hierarchy is correct.
@@ -73,13 +71,12 @@ static_assert(ClassToken<KPort> == ((0b10000101 << 8) | ClassToken<KAutoObject>)
static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>));
-static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KLightClientSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KLightServerSession> == ((0b01010001 << 8) | ClassToken<KAutoObject>));
-static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>));
+static_assert(ClassToken<KTransferMemory> == ((0b01010001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
-static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
+static_assert(ClassToken<KCodeMemory> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
// Ensure that the token hierarchy reflects the class hierarchy.
@@ -110,7 +107,6 @@ static_assert(std::is_final_v<KPort> && std::is_base_of_v<KAutoObject, KPort>);
static_assert(std::is_final_v<KSession> && std::is_base_of_v<KAutoObject, KSession>);
static_assert(std::is_final_v<KSharedMemory> && std::is_base_of_v<KAutoObject, KSharedMemory>);
static_assert(std::is_final_v<KEvent> && std::is_base_of_v<KAutoObject, KEvent>);
-static_assert(std::is_final_v<KWritableEvent> && std::is_base_of_v<KAutoObject, KWritableEvent>);
// static_assert(std::is_final_v<KLightClientSession> &&
// std::is_base_of_v<KAutoObject, KLightClientSession>);
// static_assert(std::is_final_v<KLightServerSession> &&
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h
index c9001ae3d..ab20e00ff 100644
--- a/src/core/hle/kernel/k_class_token.h
+++ b/src/core/hle/kernel/k_class_token.h
@@ -101,7 +101,6 @@ public:
KSession,
KSharedMemory,
KEvent,
- KWritableEvent,
KLightClientSession,
KLightServerSession,
KTransferMemory,
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index b2a887b14..b4197a8d5 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -1,6 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "common/scope_exit.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
@@ -10,6 +11,8 @@
namespace Kernel {
+static constexpr u32 MessageBufferSize = 0x100;
+
KClientSession::KClientSession(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_} {}
KClientSession::~KClientSession() = default;
@@ -21,10 +24,17 @@ void KClientSession::Destroy() {
void KClientSession::OnServerClosed() {}
-Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing) {
- // Signal the server session that new data is available
- return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing);
+Result KClientSession::SendSyncRequest() {
+ // Create a session request.
+ KSessionRequest* request = KSessionRequest::Create(kernel);
+ R_UNLESS(request != nullptr, ResultOutOfResource);
+ SCOPE_EXIT({ request->Close(); });
+
+ // Initialize the request.
+ request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
+
+ // Send the request.
+ return parent->GetServerSession().OnRequest(request);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
index 0c750d756..b4a19c546 100644
--- a/src/core/hle/kernel/k_client_session.h
+++ b/src/core/hle/kernel/k_client_session.h
@@ -46,8 +46,7 @@ public:
return parent;
}
- Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing);
+ Result SendSyncRequest();
void OnServerClosed();
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index da57ceb21..4b1c134d4 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
// Clear the memory.
for (const auto& block : m_page_group.Nodes()) {
- std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
+ std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
}
// Set remaining tracking members.
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h
new file mode 100644
index 000000000..9076c8fa3
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_page_manager.h
@@ -0,0 +1,136 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/alignment.h"
+#include "common/common_types.h"
+#include "core/hle/kernel/k_page_bitmap.h"
+#include "core/hle/kernel/k_spin_lock.h"
+#include "core/hle/kernel/memory_types.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+class KDynamicPageManager {
+public:
+ class PageBuffer {
+ private:
+ u8 m_buffer[PageSize];
+ };
+ static_assert(sizeof(PageBuffer) == PageSize);
+
+public:
+ KDynamicPageManager() = default;
+
+ template <typename T>
+ T* GetPointer(VAddr addr) {
+ return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
+ }
+
+ template <typename T>
+ const T* GetPointer(VAddr addr) const {
+ return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
+ }
+
+ Result Initialize(VAddr addr, size_t sz) {
+ // We need to have positive size.
+ R_UNLESS(sz > 0, ResultOutOfMemory);
+ m_backing_memory.resize(sz);
+
+ // Calculate management overhead.
+ const size_t management_size =
+ KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
+ const size_t allocatable_size = sz - management_size;
+
+ // Set tracking fields.
+ m_address = addr;
+ m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer));
+ m_count = allocatable_size / sizeof(PageBuffer);
+ R_UNLESS(m_count > 0, ResultOutOfMemory);
+
+ // Clear the management region.
+ u64* management_ptr = GetPointer<u64>(m_address + allocatable_size);
+ std::memset(management_ptr, 0, management_size);
+
+ // Initialize the bitmap.
+ m_page_bitmap.Initialize(management_ptr, m_count);
+
+ // Free the pages to the bitmap.
+ for (size_t i = 0; i < m_count; i++) {
+ // Ensure the freed page is all-zero.
+ std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
+
+ // Set the bit for the free page.
+ m_page_bitmap.SetBit(i);
+ }
+
+ R_SUCCEED();
+ }
+
+ VAddr GetAddress() const {
+ return m_address;
+ }
+ size_t GetSize() const {
+ return m_size;
+ }
+ size_t GetUsed() const {
+ return m_used;
+ }
+ size_t GetPeak() const {
+ return m_peak;
+ }
+ size_t GetCount() const {
+ return m_count;
+ }
+
+ PageBuffer* Allocate() {
+ // Take the lock.
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ KScopedSpinLock lk(m_lock);
+
+ // Find a random free block.
+ s64 soffset = m_page_bitmap.FindFreeBlock(true);
+ if (soffset < 0) [[unlikely]] {
+ return nullptr;
+ }
+
+ const size_t offset = static_cast<size_t>(soffset);
+
+ // Update our tracking.
+ m_page_bitmap.ClearBit(offset);
+ m_peak = std::max(m_peak, (++m_used));
+
+ return GetPointer<PageBuffer>(m_address) + offset;
+ }
+
+ void Free(PageBuffer* pb) {
+ // Ensure all pages in the heap are zero.
+ std::memset(pb, 0, PageSize);
+
+ // Take the lock.
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ KScopedSpinLock lk(m_lock);
+
+ // Set the bit for the free page.
+ size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer);
+ m_page_bitmap.SetBit(offset);
+
+ // Decrement our used count.
+ --m_used;
+ }
+
+private:
+ KSpinLock m_lock;
+ KPageBitmap m_page_bitmap;
+ size_t m_used{};
+ size_t m_peak{};
+ size_t m_count{};
+ VAddr m_address{};
+ size_t m_size{};
+
+ // TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
+ std::vector<u8> m_backing_memory;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h
new file mode 100644
index 000000000..1ce517e8e
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_resource_manager.h
@@ -0,0 +1,58 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_funcs.h"
+#include "core/hle/kernel/k_dynamic_slab_heap.h"
+#include "core/hle/kernel/k_memory_block.h"
+
+namespace Kernel {
+
+template <typename T, bool ClearNode = false>
+class KDynamicResourceManager {
+ YUZU_NON_COPYABLE(KDynamicResourceManager);
+ YUZU_NON_MOVEABLE(KDynamicResourceManager);
+
+public:
+ using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
+
+public:
+ constexpr KDynamicResourceManager() = default;
+
+ constexpr size_t GetSize() const {
+ return m_slab_heap->GetSize();
+ }
+ constexpr size_t GetUsed() const {
+ return m_slab_heap->GetUsed();
+ }
+ constexpr size_t GetPeak() const {
+ return m_slab_heap->GetPeak();
+ }
+ constexpr size_t GetCount() const {
+ return m_slab_heap->GetCount();
+ }
+
+ void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) {
+ m_page_allocator = page_allocator;
+ m_slab_heap = slab_heap;
+ }
+
+ T* Allocate() const {
+ return m_slab_heap->Allocate(m_page_allocator);
+ }
+
+ void Free(T* t) const {
+ m_slab_heap->Free(t);
+ }
+
+private:
+ KDynamicPageManager* m_page_allocator{};
+ DynamicSlabType* m_slab_heap{};
+};
+
+class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
+
+using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h
new file mode 100644
index 000000000..3a0ddd050
--- /dev/null
+++ b/src/core/hle/kernel/k_dynamic_slab_heap.h
@@ -0,0 +1,122 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <atomic>
+
+#include "common/common_funcs.h"
+#include "core/hle/kernel/k_dynamic_page_manager.h"
+#include "core/hle/kernel/k_slab_heap.h"
+
+namespace Kernel {
+
+template <typename T, bool ClearNode = false>
+class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
+ YUZU_NON_COPYABLE(KDynamicSlabHeap);
+ YUZU_NON_MOVEABLE(KDynamicSlabHeap);
+
+public:
+ constexpr KDynamicSlabHeap() = default;
+
+ constexpr VAddr GetAddress() const {
+ return m_address;
+ }
+ constexpr size_t GetSize() const {
+ return m_size;
+ }
+ constexpr size_t GetUsed() const {
+ return m_used.load();
+ }
+ constexpr size_t GetPeak() const {
+ return m_peak.load();
+ }
+ constexpr size_t GetCount() const {
+ return m_count.load();
+ }
+
+ constexpr bool IsInRange(VAddr addr) const {
+ return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
+ }
+
+ void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
+ ASSERT(page_allocator != nullptr);
+
+ // Initialize members.
+ m_address = page_allocator->GetAddress();
+ m_size = page_allocator->GetSize();
+
+ // Initialize the base allocator.
+ KSlabHeapImpl::Initialize();
+
+ // Allocate until we have the correct number of objects.
+ while (m_count.load() < num_objects) {
+ auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
+ ASSERT(allocated != nullptr);
+
+ for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
+ KSlabHeapImpl::Free(allocated + i);
+ }
+
+ m_count += sizeof(PageBuffer) / sizeof(T);
+ }
+ }
+
+ T* Allocate(KDynamicPageManager* page_allocator) {
+ T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());
+
+ // If we successfully allocated and we should clear the node, do so.
+ if constexpr (ClearNode) {
+ if (allocated != nullptr) [[likely]] {
+ reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
+ }
+ }
+
+ // If we fail to allocate, try to get a new page from our next allocator.
+ if (allocated == nullptr) [[unlikely]] {
+ if (page_allocator != nullptr) {
+ allocated = reinterpret_cast<T*>(page_allocator->Allocate());
+ if (allocated != nullptr) {
+ // If we succeeded in getting a page, free the rest to our slab.
+ for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
+ KSlabHeapImpl::Free(allocated + i);
+ }
+ m_count += sizeof(PageBuffer) / sizeof(T);
+ }
+ }
+ }
+
+ if (allocated != nullptr) [[likely]] {
+ // Construct the object.
+ std::construct_at(allocated);
+
+ // Update our tracking.
+ const size_t used = ++m_used;
+ size_t peak = m_peak.load();
+ while (peak < used) {
+ if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
+ break;
+ }
+ }
+ }
+
+ return allocated;
+ }
+
+ void Free(T* t) {
+ KSlabHeapImpl::Free(t);
+ --m_used;
+ }
+
+private:
+ using PageBuffer = KDynamicPageManager::PageBuffer;
+
+private:
+ std::atomic<size_t> m_used{};
+ std::atomic<size_t> m_peak{};
+ std::atomic<size_t> m_count{};
+ VAddr m_address{};
+ size_t m_size{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
index e52fafbc7..78ca59463 100644
--- a/src/core/hle/kernel/k_event.cpp
+++ b/src/core/hle/kernel/k_event.cpp
@@ -8,39 +8,45 @@
namespace Kernel {
KEvent::KEvent(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, readable_event{kernel_}, writable_event{
- kernel_} {}
+ : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {}
KEvent::~KEvent() = default;
-void KEvent::Initialize(std::string&& name_, KProcess* owner_) {
- // Increment reference count.
- // Because reference count is one on creation, this will result
- // in a reference count of two. Thus, when both readable and
- // writable events are closed this object will be destroyed.
- Open();
+void KEvent::Initialize(KProcess* owner) {
+ // Create our readable event.
+ KAutoObject::Create(std::addressof(m_readable_event));
- // Create our sub events.
- KAutoObject::Create(std::addressof(readable_event));
- KAutoObject::Create(std::addressof(writable_event));
-
- // Initialize our sub sessions.
- readable_event.Initialize(this, name_ + ":Readable");
- writable_event.Initialize(this, name_ + ":Writable");
+ // Initialize our readable event.
+ m_readable_event.Initialize(this);
// Set our owner process.
- owner = owner_;
- owner->Open();
+ m_owner = owner;
+ m_owner->Open();
// Mark initialized.
- name = std::move(name_);
- initialized = true;
+ m_initialized = true;
}
void KEvent::Finalize() {
KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize();
}
+Result KEvent::Signal() {
+ KScopedSchedulerLock sl{kernel};
+
+ R_SUCCEED_IF(m_readable_event_destroyed);
+
+ return m_readable_event.Signal();
+}
+
+Result KEvent::Clear() {
+ KScopedSchedulerLock sl{kernel};
+
+ R_SUCCEED_IF(m_readable_event_destroyed);
+
+ return m_readable_event.Clear();
+}
+
void KEvent::PostDestroy(uintptr_t arg) {
// Release the event count resource the owner process holds.
KProcess* owner = reinterpret_cast<KProcess*>(arg);
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
index 2ff828feb..48ce7d9a0 100644
--- a/src/core/hle/kernel/k_event.h
+++ b/src/core/hle/kernel/k_event.h
@@ -4,14 +4,12 @@
#pragma once
#include "core/hle/kernel/k_readable_event.h"
-#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KernelCore;
class KReadableEvent;
-class KWritableEvent;
class KProcess;
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
@@ -21,37 +19,40 @@ public:
explicit KEvent(KernelCore& kernel_);
~KEvent() override;
- void Initialize(std::string&& name, KProcess* owner_);
+ void Initialize(KProcess* owner);
void Finalize() override;
bool IsInitialized() const override {
- return initialized;
+ return m_initialized;
}
uintptr_t GetPostDestroyArgument() const override {
- return reinterpret_cast<uintptr_t>(owner);
+ return reinterpret_cast<uintptr_t>(m_owner);
}
KProcess* GetOwner() const override {
- return owner;
+ return m_owner;
}
KReadableEvent& GetReadableEvent() {
- return readable_event;
- }
-
- KWritableEvent& GetWritableEvent() {
- return writable_event;
+ return m_readable_event;
}
static void PostDestroy(uintptr_t arg);
+ Result Signal();
+ Result Clear();
+
+ void OnReadableEventDestroyed() {
+ m_readable_event_destroyed = true;
+ }
+
private:
- KReadableEvent readable_event;
- KWritableEvent writable_event;
- KProcess* owner{};
- bool initialized{};
+ KReadableEvent m_readable_event;
+ KProcess* m_owner{};
+ bool m_initialized{};
+ bool m_readable_event_destroyed{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index 1b577a5b3..4a6b60d26 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -11,29 +11,34 @@
namespace Kernel::KInterruptManager {
void HandleInterrupt(KernelCore& kernel, s32 core_id) {
- auto* process = kernel.CurrentProcess();
- if (!process) {
- return;
- }
-
// Acknowledge the interrupt.
kernel.PhysicalCore(core_id).ClearInterrupt();
auto& current_thread = GetCurrentThread(kernel);
- // If the user disable count is set, we may need to pin the current thread.
- if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
- KScopedSchedulerLock sl{kernel};
+ if (auto* process = kernel.CurrentProcess(); process) {
+ // If the user disable count is set, we may need to pin the current thread.
+ if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
+ KScopedSchedulerLock sl{kernel};
- // Pin the current thread.
- process->PinCurrentThread(core_id);
+ // Pin the current thread.
+ process->PinCurrentThread(core_id);
- // Set the interrupt flag for the thread.
- GetCurrentThread(kernel).SetInterruptFlag();
+ // Set the interrupt flag for the thread.
+ GetCurrentThread(kernel).SetInterruptFlag();
+ }
}
// Request interrupt scheduling.
kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
}
+void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
+ for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
+ if (core_mask & (1ULL << core_id)) {
+ kernel.PhysicalCore(core_id).Interrupt();
+ }
+ }
+}
+
} // namespace Kernel::KInterruptManager
diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h
index f103dfe3f..803dc9211 100644
--- a/src/core/hle/kernel/k_interrupt_manager.h
+++ b/src/core/hle/kernel/k_interrupt_manager.h
@@ -11,6 +11,8 @@ class KernelCore;
namespace KInterruptManager {
void HandleInterrupt(KernelCore& kernel, s32 core_id);
-}
+void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
+
+} // namespace KInterruptManager
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h
index 78859ced3..29ebd16b7 100644
--- a/src/core/hle/kernel/k_linked_list.h
+++ b/src/core/hle/kernel/k_linked_list.h
@@ -16,6 +16,7 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>,
public KSlabAllocated<KLinkedListNode> {
public:
+ explicit KLinkedListNode(KernelCore&) {}
KLinkedListNode() = default;
void Initialize(void* it) {
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index 18df1f836..9444f6bd2 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -6,6 +6,7 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
+#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_types.h"
@@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
enum class KMemoryAttribute : u8 {
None = 0x00,
- Mask = 0x7F,
- All = Mask,
- DontCareMask = 0x80,
+ All = 0xFF,
+ UserMask = All,
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
@@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 {
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
SetMask = Uncached,
-
- IpcAndDeviceMapped = IpcLocked | DeviceShared,
- LockedAndIpcLocked = Locked | IpcLocked,
- DeviceSharedAndUncached = DeviceShared | Uncached
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
-static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
- static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
+enum class KMemoryBlockDisableMergeAttribute : u8 {
+ None = 0,
+ Normal = (1u << 0),
+ DeviceLeft = (1u << 1),
+ IpcLeft = (1u << 2),
+ Locked = (1u << 3),
+ DeviceRight = (1u << 4),
+
+ AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
+ AllRight = DeviceRight,
+};
+DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
struct KMemoryInfo {
- VAddr addr{};
- std::size_t size{};
- KMemoryState state{};
- KMemoryPermission perm{};
- KMemoryAttribute attribute{};
- KMemoryPermission original_perm{};
- u16 ipc_lock_count{};
- u16 device_use_count{};
+ uintptr_t m_address;
+ size_t m_size;
+ KMemoryState m_state;
+ u16 m_device_disable_merge_left_count;
+ u16 m_device_disable_merge_right_count;
+ u16 m_ipc_lock_count;
+ u16 m_device_use_count;
+ u16 m_ipc_disable_merge_count;
+ KMemoryPermission m_permission;
+ KMemoryAttribute m_attribute;
+ KMemoryPermission m_original_permission;
+ KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
return {
- addr,
- size,
- static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
- static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
- static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
- ipc_lock_count,
- device_use_count,
+ .addr = m_address,
+ .size = m_size,
+ .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
+ .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
+ .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
+ .ipc_refcount = m_ipc_lock_count,
+ .device_refcount = m_device_use_count,
+ .padding = {},
};
}
- constexpr VAddr GetAddress() const {
- return addr;
+ constexpr uintptr_t GetAddress() const {
+ return m_address;
+ }
+
+ constexpr size_t GetSize() const {
+ return m_size;
}
- constexpr std::size_t GetSize() const {
- return size;
+
+ constexpr size_t GetNumPages() const {
+ return this->GetSize() / PageSize;
}
- constexpr std::size_t GetNumPages() const {
- return GetSize() / PageSize;
+
+ constexpr uintptr_t GetEndAddress() const {
+ return this->GetAddress() + this->GetSize();
}
- constexpr VAddr GetEndAddress() const {
- return GetAddress() + GetSize();
+
+ constexpr uintptr_t GetLastAddress() const {
+ return this->GetEndAddress() - 1;
}
- constexpr VAddr GetLastAddress() const {
- return GetEndAddress() - 1;
+
+ constexpr u16 GetIpcLockCount() const {
+ return m_ipc_lock_count;
}
+
+ constexpr u16 GetIpcDisableMergeCount() const {
+ return m_ipc_disable_merge_count;
+ }
+
constexpr KMemoryState GetState() const {
- return state;
+ return m_state;
+ }
+
+ constexpr KMemoryPermission GetPermission() const {
+ return m_permission;
}
+
+ constexpr KMemoryPermission GetOriginalPermission() const {
+ return m_original_permission;
+ }
+
constexpr KMemoryAttribute GetAttribute() const {
- return attribute;
+ return m_attribute;
}
- constexpr KMemoryPermission GetPermission() const {
- return perm;
+
+ constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
+ return m_disable_merge_attribute;
}
};
-class KMemoryBlock final {
- friend class KMemoryBlockManager;
-
+class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
private:
- VAddr addr{};
- std::size_t num_pages{};
- KMemoryState state{KMemoryState::None};
- u16 ipc_lock_count{};
- u16 device_use_count{};
- KMemoryPermission perm{KMemoryPermission::None};
- KMemoryPermission original_perm{KMemoryPermission::None};
- KMemoryAttribute attribute{KMemoryAttribute::None};
+ u16 m_device_disable_merge_left_count;
+ u16 m_device_disable_merge_right_count;
+ VAddr m_address;
+ size_t m_num_pages;
+ KMemoryState m_memory_state;
+ u16 m_ipc_lock_count;
+ u16 m_device_use_count;
+ u16 m_ipc_disable_merge_count;
+ KMemoryPermission m_permission;
+ KMemoryPermission m_original_permission;
+ KMemoryAttribute m_attribute;
+ KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
public:
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
@@ -261,113 +297,349 @@ public:
}
public:
- constexpr KMemoryBlock() = default;
- constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
- KMemoryPermission perm_, KMemoryAttribute attribute_)
- : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
-
constexpr VAddr GetAddress() const {
- return addr;
+ return m_address;
}
- constexpr std::size_t GetNumPages() const {
- return num_pages;
+ constexpr size_t GetNumPages() const {
+ return m_num_pages;
}
- constexpr std::size_t GetSize() const {
- return GetNumPages() * PageSize;
+ constexpr size_t GetSize() const {
+ return this->GetNumPages() * PageSize;
}
constexpr VAddr GetEndAddress() const {
- return GetAddress() + GetSize();
+ return this->GetAddress() + this->GetSize();
}
constexpr VAddr GetLastAddress() const {
- return GetEndAddress() - 1;
+ return this->GetEndAddress() - 1;
+ }
+
+ constexpr u16 GetIpcLockCount() const {
+ return m_ipc_lock_count;
+ }
+
+ constexpr u16 GetIpcDisableMergeCount() const {
+ return m_ipc_disable_merge_count;
+ }
+
+ constexpr KMemoryPermission GetPermission() const {
+ return m_permission;
+ }
+
+ constexpr KMemoryPermission GetOriginalPermission() const {
+ return m_original_permission;
+ }
+
+ constexpr KMemoryAttribute GetAttribute() const {
+ return m_attribute;
}
constexpr KMemoryInfo GetMemoryInfo() const {
return {
- GetAddress(), GetSize(), state, perm,
- attribute, original_perm, ipc_lock_count, device_use_count,
+ .m_address = this->GetAddress(),
+ .m_size = this->GetSize(),
+ .m_state = m_memory_state,
+ .m_device_disable_merge_left_count = m_device_disable_merge_left_count,
+ .m_device_disable_merge_right_count = m_device_disable_merge_right_count,
+ .m_ipc_lock_count = m_ipc_lock_count,
+ .m_device_use_count = m_device_use_count,
+ .m_ipc_disable_merge_count = m_ipc_disable_merge_count,
+ .m_permission = m_permission,
+ .m_attribute = m_attribute,
+ .m_original_permission = m_original_permission,
+ .m_disable_merge_attribute = m_disable_merge_attribute,
};
}
- void ShareToDevice(KMemoryPermission /*new_perm*/) {
- ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
- device_use_count == 0);
- attribute |= KMemoryAttribute::DeviceShared;
- const u16 new_use_count{++device_use_count};
- ASSERT(new_use_count > 0);
+public:
+ explicit KMemoryBlock() = default;
+
+ constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
+ KMemoryAttribute attr)
+ : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
+ m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
+ m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
+ m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
+ m_original_permission(KMemoryPermission::None), m_attribute(attr),
+ m_disable_merge_attribute() {}
+
+ constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
+ KMemoryAttribute attr) {
+ m_device_disable_merge_left_count = 0;
+ m_device_disable_merge_right_count = 0;
+ m_address = addr;
+ m_num_pages = np;
+ m_memory_state = ms;
+ m_ipc_lock_count = 0;
+ m_device_use_count = 0;
+ m_permission = p;
+ m_original_permission = KMemoryPermission::None;
+ m_attribute = attr;
+ m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
+ }
+
+ constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
+ constexpr auto AttributeIgnoreMask =
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
+ return m_memory_state == s && m_permission == p &&
+ (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
+ }
+
+ constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
+ return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
+ m_original_permission == rhs.m_original_permission &&
+ m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
+ m_device_use_count == rhs.m_device_use_count;
+ }
+
+ constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
+ return this->HasSameProperties(rhs) &&
+ (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
+ KMemoryBlockDisableMergeAttribute::None &&
+ (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
+ KMemoryBlockDisableMergeAttribute::None;
}
- void UnshareToDevice(KMemoryPermission /*new_perm*/) {
- ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
- const u16 prev_use_count{device_use_count--};
- ASSERT(prev_use_count > 0);
- if (prev_use_count == 1) {
- attribute &= ~KMemoryAttribute::DeviceShared;
+ constexpr bool Contains(VAddr addr) const {
+ return this->GetAddress() <= addr && addr <= this->GetEndAddress();
+ }
+
+ constexpr void Add(const KMemoryBlock& added_block) {
+ ASSERT(added_block.GetNumPages() > 0);
+ ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
+ this->GetEndAddress() + added_block.GetSize() - 1);
+
+ m_num_pages += added_block.GetNumPages();
+ m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute | added_block.m_disable_merge_attribute);
+ m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
+ }
+
+ constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
+ bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
+ ASSERT(m_original_permission == KMemoryPermission::None);
+ ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
+
+ m_memory_state = s;
+ m_permission = p;
+ m_attribute = static_cast<KMemoryAttribute>(
+ a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
+
+ if (set_disable_merge_attr && set_mask != 0) {
+ m_disable_merge_attribute = m_disable_merge_attribute |
+ static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
+ }
+ if (clear_mask != 0) {
+ m_disable_merge_attribute = m_disable_merge_attribute &
+ static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
}
}
-private:
- constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
- constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
- KMemoryAttribute::IpcLocked |
- KMemoryAttribute::DeviceShared};
- return state == s && perm == p &&
- (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
+ constexpr void Split(KMemoryBlock* block, VAddr addr) {
+ ASSERT(this->GetAddress() < addr);
+ ASSERT(this->Contains(addr));
+ ASSERT(Common::IsAligned(addr, PageSize));
+
+ block->m_address = m_address;
+ block->m_num_pages = (addr - this->GetAddress()) / PageSize;
+ block->m_memory_state = m_memory_state;
+ block->m_ipc_lock_count = m_ipc_lock_count;
+ block->m_device_use_count = m_device_use_count;
+ block->m_permission = m_permission;
+ block->m_original_permission = m_original_permission;
+ block->m_attribute = m_attribute;
+ block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
+ block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
+ block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
+ block->m_device_disable_merge_right_count = 0;
+
+ m_address = addr;
+ m_num_pages -= block->m_num_pages;
+
+ m_ipc_disable_merge_count = 0;
+ m_device_disable_merge_left_count = 0;
+ m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
}
- constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
- return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
- attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
- device_use_count == rhs.device_use_count;
+ constexpr void UpdateDeviceDisableMergeStateForShareLeft(
+ [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+ if (left) {
+ m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
+ const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
+ ASSERT(new_device_disable_merge_left_count > 0);
+ }
}
- constexpr bool Contains(VAddr start) const {
- return GetAddress() <= start && start <= GetEndAddress();
+ constexpr void UpdateDeviceDisableMergeStateForShareRight(
+ [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
+ if (right) {
+ m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
+ const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
+ ASSERT(new_device_disable_merge_right_count > 0);
+ }
+ }
+
+ constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
+ bool right) {
+ this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
+ this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
}
- constexpr void Add(std::size_t count) {
- ASSERT(count > 0);
- ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
+ constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
+ bool right) {
+ // We must either be shared or have a zero lock count.
+ ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
+ m_device_use_count == 0);
- num_pages += count;
+ // Share.
+ const u16 new_count = ++m_device_use_count;
+ ASSERT(new_count > 0);
+
+ m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
+
+ this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
}
- constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
- KMemoryAttribute new_attribute) {
- ASSERT(original_perm == KMemoryPermission::None);
- ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
+ constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
+ [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
- state = new_state;
- perm = new_perm;
+ if (left) {
+ if (!m_device_disable_merge_left_count) {
+ return;
+ }
+ --m_device_disable_merge_left_count;
+ }
- attribute = static_cast<KMemoryAttribute>(
- new_attribute |
- (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
+ m_device_disable_merge_left_count =
+ std::min(m_device_disable_merge_left_count, m_device_use_count);
+
+ if (m_device_disable_merge_left_count == 0) {
+ m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
+ }
}
- constexpr KMemoryBlock Split(VAddr split_addr) {
- ASSERT(GetAddress() < split_addr);
- ASSERT(Contains(split_addr));
- ASSERT(Common::IsAligned(split_addr, PageSize));
+ constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
+ [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
+ if (right) {
+ const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
+ ASSERT(old_device_disable_merge_right_count > 0);
+ if (old_device_disable_merge_right_count == 1) {
+ m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
+ }
+ }
+ }
- KMemoryBlock block;
- block.addr = addr;
- block.num_pages = (split_addr - GetAddress()) / PageSize;
- block.state = state;
- block.ipc_lock_count = ipc_lock_count;
- block.device_use_count = device_use_count;
- block.perm = perm;
- block.original_perm = original_perm;
- block.attribute = attribute;
+ constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
+ bool right) {
+ this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
+ this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
+ }
- addr = split_addr;
- num_pages -= block.num_pages;
+ constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
+ bool right) {
+ // We must be shared.
+ ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
+
+ // Unhare.
+ const u16 old_count = m_device_use_count--;
+ ASSERT(old_count > 0);
+
+ if (old_count == 1) {
+ m_attribute =
+ static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
+ }
+
+ this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
+ }
+
+ constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
+ bool right) {
+
+ // We must be shared.
+ ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
+
+ // Unhare.
+ const u16 old_count = m_device_use_count--;
+ ASSERT(old_count > 0);
+
+ if (old_count == 1) {
+ m_attribute =
+ static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
+ }
+
+ this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
+ }
+
+ constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+ // We must either be locked or have a zero lock count.
+ ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
+ m_ipc_lock_count == 0);
+
+ // Lock.
+ const u16 new_lock_count = ++m_ipc_lock_count;
+ ASSERT(new_lock_count > 0);
+
+ // If this is our first lock, update our permissions.
+ if (new_lock_count == 1) {
+ ASSERT(m_original_permission == KMemoryPermission::None);
+ ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
+ (m_permission | KMemoryPermission::NotMapped));
+ ASSERT((m_permission & KMemoryPermission::UserExecute) !=
+ KMemoryPermission::UserExecute ||
+ (new_perm == KMemoryPermission::UserRead));
+ m_original_permission = m_permission;
+ m_permission = static_cast<KMemoryPermission>(
+ (new_perm & KMemoryPermission::IpcLockChangeMask) |
+ (m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
+ }
+ m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
+
+ if (left) {
+ m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
+ const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
+ ASSERT(new_ipc_disable_merge_count > 0);
+ }
+ }
+
+ constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
+ [[maybe_unused]] bool right) {
+ // We must be locked.
+ ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
+
+ // Unlock.
+ const u16 old_lock_count = m_ipc_lock_count--;
+ ASSERT(old_lock_count > 0);
+
+ // If this is our last unlock, update our permissions.
+ if (old_lock_count == 1) {
+ ASSERT(m_original_permission != KMemoryPermission::None);
+ m_permission = m_original_permission;
+ m_original_permission = KMemoryPermission::None;
+ m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
+ }
+
+ if (left) {
+ const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
+ ASSERT(old_ipc_disable_merge_count > 0);
+ if (old_ipc_disable_merge_count == 1) {
+ m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
+ m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
+ }
+ }
+ }
- return block;
+ constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
+ return m_disable_merge_attribute;
}
};
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index 3ddb9984f..cf4c1e371 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -2,221 +2,336 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/memory_types.h"
namespace Kernel {
-KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_)
- : start_addr{start_addr_}, end_addr{end_addr_} {
- const u64 num_pages{(end_addr - start_addr) / PageSize};
- memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None);
-}
+KMemoryBlockManager::KMemoryBlockManager() = default;
-KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
- auto node{memory_block_tree.begin()};
- while (node != end()) {
- const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
- if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) {
- return node;
- }
- node = std::next(node);
- }
- return end();
+Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
+ // Allocate a block to encapsulate the address space, insert it into the tree.
+ KMemoryBlock* start_block = slab_manager->Allocate();
+ R_UNLESS(start_block != nullptr, ResultOutOfResource);
+
+ // Set our start and end.
+ m_start_address = st;
+ m_end_address = nd;
+ ASSERT(Common::IsAligned(m_start_address, PageSize));
+ ASSERT(Common::IsAligned(m_end_address, PageSize));
+
+ // Initialize and insert the block.
+ start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
+ KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
+ m_memory_block_tree.insert(*start_block);
+
+ R_SUCCEED();
}
-VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
- std::size_t num_pages, std::size_t align,
- std::size_t offset, std::size_t guard_pages) {
- if (num_pages == 0) {
- return {};
+void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
+ HostUnmapCallback&& host_unmap_callback) {
+ // Erase every block until we have none left.
+ auto it = m_memory_block_tree.begin();
+ while (it != m_memory_block_tree.end()) {
+ KMemoryBlock* block = std::addressof(*it);
+ it = m_memory_block_tree.erase(it);
+ slab_manager->Free(block);
+ host_unmap_callback(block->GetAddress(), block->GetSize());
}
- const VAddr region_end{region_start + region_num_pages * PageSize};
- const VAddr region_last{region_end - 1};
- for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
- const auto info{it->GetMemoryInfo()};
- if (region_last < info.GetAddress()) {
- break;
- }
+ ASSERT(m_memory_block_tree.empty());
+}
- if (info.state != KMemoryState::Free) {
- continue;
- }
+VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const {
+ if (num_pages > 0) {
+ const VAddr region_end = region_start + region_num_pages * PageSize;
+ const VAddr region_last = region_end - 1;
+ for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
+ it++) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+ if (region_last < info.GetAddress()) {
+ break;
+ }
+ if (info.m_state != KMemoryState::Free) {
+ continue;
+ }
- VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
- area += guard_pages * PageSize;
+ VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
+ area += guard_pages * PageSize;
- const VAddr offset_area{Common::AlignDown(area, align) + offset};
- area = (area <= offset_area) ? offset_area : offset_area + align;
+ const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
+ area = (area <= offset_area) ? offset_area : offset_area + alignment;
- const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
- const VAddr area_last{area_end - 1};
+ const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
+ const VAddr area_last = area_end - 1;
- if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
- area_last <= info.GetLastAddress()) {
- return area;
+ if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
+ area_last <= info.GetLastAddress()) {
+ return area;
+ }
}
}
return {};
}
-void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
- KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
- KMemoryState state, KMemoryPermission perm,
- KMemoryAttribute attribute) {
- const VAddr update_end_addr{addr + num_pages * PageSize};
- iterator node{memory_block_tree.begin()};
+void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
+ VAddr address, size_t num_pages) {
+ // Find the iterator now that we've updated.
+ iterator it = this->FindIterator(address);
+ if (address != m_start_address) {
+ it--;
+ }
- prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
+ // Coalesce blocks that we can.
+ while (true) {
+ iterator prev = it++;
+ if (it == m_memory_block_tree.end()) {
+ break;
+ }
- while (node != memory_block_tree.end()) {
- KMemoryBlock* block{&(*node)};
- iterator next_node{std::next(node)};
- const VAddr cur_addr{block->GetAddress()};
- const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
+ if (prev->CanMergeWith(*it)) {
+ KMemoryBlock* block = std::addressof(*it);
+ m_memory_block_tree.erase(it);
+ prev->Add(*block);
+ allocator->Free(block);
+ it = prev;
+ }
- if (addr < cur_end_addr && cur_addr < update_end_addr) {
- if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
- node = next_node;
- continue;
- }
+ if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
+ break;
+ }
+ }
+}
- iterator new_node{node};
- if (addr > cur_addr) {
- memory_block_tree.insert(node, block->Split(addr));
+void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+ size_t num_pages, KMemoryState state, KMemoryPermission perm,
+ KMemoryAttribute attr,
+ KMemoryBlockDisableMergeAttribute set_disable_attr,
+ KMemoryBlockDisableMergeAttribute clear_disable_attr) {
+ // Ensure for auditing that we never end up with an invalid tree.
+ KScopedMemoryBlockManagerAuditor auditor(this);
+ ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
+ KMemoryAttribute::None);
+
+ VAddr cur_address = address;
+ size_t remaining_pages = num_pages;
+ iterator it = this->FindIterator(address);
+
+ while (remaining_pages > 0) {
+ const size_t remaining_size = remaining_pages * PageSize;
+ KMemoryInfo cur_info = it->GetMemoryInfo();
+ if (it->HasProperties(state, perm, attr)) {
+ // If we already have the right properties, just advance.
+ if (cur_address + remaining_size < cur_info.GetEndAddress()) {
+ remaining_pages = 0;
+ cur_address += remaining_size;
+ } else {
+ remaining_pages =
+ (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
+ cur_address = cur_info.GetEndAddress();
}
+ } else {
+ // If we need to, create a new block before and insert it.
+ if (cur_info.GetAddress() != cur_address) {
+ KMemoryBlock* new_block = allocator->Allocate();
+
+ it->Split(new_block, cur_address);
+ it = m_memory_block_tree.insert(*new_block);
+ it++;
- if (update_end_addr < cur_end_addr) {
- new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
+ cur_info = it->GetMemoryInfo();
+ cur_address = cur_info.GetAddress();
}
- new_node->Update(state, perm, attribute);
+ // If we need to, create a new block after and insert it.
+ if (cur_info.GetSize() > remaining_size) {
+ KMemoryBlock* new_block = allocator->Allocate();
- MergeAdjacent(new_node, next_node);
- }
+ it->Split(new_block, cur_address + remaining_size);
+ it = m_memory_block_tree.insert(*new_block);
- if (cur_end_addr - 1 >= update_end_addr - 1) {
- break;
- }
+ cur_info = it->GetMemoryInfo();
+ }
- node = next_node;
+ // Update block state.
+ it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
+ static_cast<u8>(clear_disable_attr));
+ cur_address += cur_info.GetSize();
+ remaining_pages -= cur_info.GetNumPages();
+ }
+ it++;
}
+
+ this->CoalesceForUpdate(allocator, address, num_pages);
}
-void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
- KMemoryPermission perm, KMemoryAttribute attribute) {
- const VAddr update_end_addr{addr + num_pages * PageSize};
- iterator node{memory_block_tree.begin()};
+void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
+ VAddr address, size_t num_pages, KMemoryState test_state,
+ KMemoryPermission test_perm, KMemoryAttribute test_attr,
+ KMemoryState state, KMemoryPermission perm,
+ KMemoryAttribute attr) {
+ // Ensure for auditing that we never end up with an invalid tree.
+ KScopedMemoryBlockManagerAuditor auditor(this);
+ ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
+ KMemoryAttribute::None);
+
+ VAddr cur_address = address;
+ size_t remaining_pages = num_pages;
+ iterator it = this->FindIterator(address);
+
+ while (remaining_pages > 0) {
+ const size_t remaining_size = remaining_pages * PageSize;
+ KMemoryInfo cur_info = it->GetMemoryInfo();
+ if (it->HasProperties(test_state, test_perm, test_attr) &&
+ !it->HasProperties(state, perm, attr)) {
+ // If we need to, create a new block before and insert it.
+ if (cur_info.GetAddress() != cur_address) {
+ KMemoryBlock* new_block = allocator->Allocate();
+
+ it->Split(new_block, cur_address);
+ it = m_memory_block_tree.insert(*new_block);
+ it++;
+
+ cur_info = it->GetMemoryInfo();
+ cur_address = cur_info.GetAddress();
+ }
- while (node != memory_block_tree.end()) {
- KMemoryBlock* block{&(*node)};
- iterator next_node{std::next(node)};
- const VAddr cur_addr{block->GetAddress()};
- const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
+ // If we need to, create a new block after and insert it.
+ if (cur_info.GetSize() > remaining_size) {
+ KMemoryBlock* new_block = allocator->Allocate();
- if (addr < cur_end_addr && cur_addr < update_end_addr) {
- iterator new_node{node};
+ it->Split(new_block, cur_address + remaining_size);
+ it = m_memory_block_tree.insert(*new_block);
- if (addr > cur_addr) {
- memory_block_tree.insert(node, block->Split(addr));
+ cur_info = it->GetMemoryInfo();
}
- if (update_end_addr < cur_end_addr) {
- new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
+ // Update block state.
+ it->Update(state, perm, attr, false, 0, 0);
+ cur_address += cur_info.GetSize();
+ remaining_pages -= cur_info.GetNumPages();
+ } else {
+ // If we already have the right properties, just advance.
+ if (cur_address + remaining_size < cur_info.GetEndAddress()) {
+ remaining_pages = 0;
+ cur_address += remaining_size;
+ } else {
+ remaining_pages =
+ (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
+ cur_address = cur_info.GetEndAddress();
}
-
- new_node->Update(state, perm, attribute);
-
- MergeAdjacent(new_node, next_node);
- }
-
- if (cur_end_addr - 1 >= update_end_addr - 1) {
- break;
}
-
- node = next_node;
+ it++;
}
+
+ this->CoalesceForUpdate(allocator, address, num_pages);
}
-void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
+void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+ size_t num_pages, MemoryBlockLockFunction lock_func,
KMemoryPermission perm) {
- const VAddr update_end_addr{addr + num_pages * PageSize};
- iterator node{memory_block_tree.begin()};
+ // Ensure for auditing that we never end up with an invalid tree.
+ KScopedMemoryBlockManagerAuditor auditor(this);
+ ASSERT(Common::IsAligned(address, PageSize));
- while (node != memory_block_tree.end()) {
- KMemoryBlock* block{&(*node)};
- iterator next_node{std::next(node)};
- const VAddr cur_addr{block->GetAddress()};
- const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
+ VAddr cur_address = address;
+ size_t remaining_pages = num_pages;
+ iterator it = this->FindIterator(address);
- if (addr < cur_end_addr && cur_addr < update_end_addr) {
- iterator new_node{node};
+ const VAddr end_address = address + (num_pages * PageSize);
- if (addr > cur_addr) {
- memory_block_tree.insert(node, block->Split(addr));
- }
+ while (remaining_pages > 0) {
+ const size_t remaining_size = remaining_pages * PageSize;
+ KMemoryInfo cur_info = it->GetMemoryInfo();
- if (update_end_addr < cur_end_addr) {
- new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
- }
+ // If we need to, create a new block before and insert it.
+ if (cur_info.m_address != cur_address) {
+ KMemoryBlock* new_block = allocator->Allocate();
- lock_func(new_node, perm);
+ it->Split(new_block, cur_address);
+ it = m_memory_block_tree.insert(*new_block);
+ it++;
- MergeAdjacent(new_node, next_node);
+ cur_info = it->GetMemoryInfo();
+ cur_address = cur_info.GetAddress();
}
- if (cur_end_addr - 1 >= update_end_addr - 1) {
- break;
+ if (cur_info.GetSize() > remaining_size) {
+ // If we need to, create a new block after and insert it.
+ KMemoryBlock* new_block = allocator->Allocate();
+
+ it->Split(new_block, cur_address + remaining_size);
+ it = m_memory_block_tree.insert(*new_block);
+
+ cur_info = it->GetMemoryInfo();
}
- node = next_node;
+ // Call the locked update function.
+ (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
+ cur_info.GetEndAddress() == end_address);
+ cur_address += cur_info.GetSize();
+ remaining_pages -= cur_info.GetNumPages();
+ it++;
}
-}
-void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
- const_iterator it{FindIterator(start)};
- KMemoryInfo info{};
- do {
- info = it->GetMemoryInfo();
- func(info);
- it = std::next(it);
- } while (info.addr + info.size - 1 < end - 1 && it != cend());
+ this->CoalesceForUpdate(allocator, address, num_pages);
}
-void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
- KMemoryBlock* block{&(*it)};
-
- auto EraseIt = [&](const iterator it_to_erase) {
- if (next_it == it_to_erase) {
- next_it = std::next(next_it);
+// Debug.
+bool KMemoryBlockManager::CheckState() const {
+ // Loop over every block, ensuring that we are sorted and coalesced.
+ auto it = m_memory_block_tree.cbegin();
+ auto prev = it++;
+ while (it != m_memory_block_tree.cend()) {
+ const KMemoryInfo prev_info = prev->GetMemoryInfo();
+ const KMemoryInfo cur_info = it->GetMemoryInfo();
+
+ // Sequential blocks which can be merged should be merged.
+ if (prev->CanMergeWith(*it)) {
+ return false;
}
- memory_block_tree.erase(it_to_erase);
- };
- if (it != memory_block_tree.begin()) {
- KMemoryBlock* prev{&(*std::prev(it))};
-
- if (block->HasSameProperties(*prev)) {
- const iterator prev_it{std::prev(it)};
+ // Sequential blocks should be sequential.
+ if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
+ return false;
+ }
- prev->Add(block->GetNumPages());
- EraseIt(it);
+ // If the block is ipc locked, it must have a count.
+ if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
+ cur_info.m_ipc_lock_count == 0) {
+ return false;
+ }
- it = prev_it;
- block = prev;
+ // If the block is device shared, it must have a count.
+ if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
+ cur_info.m_device_use_count == 0) {
+ return false;
}
+
+ // Advance the iterator.
+ prev = it++;
}
- if (it != cend()) {
- const KMemoryBlock* const next{&(*std::next(it))};
+ // Our loop will miss checking the last block, potentially, so check it.
+ if (prev != m_memory_block_tree.cend()) {
+ const KMemoryInfo prev_info = prev->GetMemoryInfo();
+ // If the block is ipc locked, it must have a count.
+ if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
+ prev_info.m_ipc_lock_count == 0) {
+ return false;
+ }
- if (block->HasSameProperties(*next)) {
- block->Add(next->GetNumPages());
- EraseIt(std::next(it));
+ // If the block is device shared, it must have a count.
+ if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
+ prev_info.m_device_use_count == 0) {
+ return false;
}
}
+
+ return true;
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index e14741b89..9b5873883 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -4,63 +4,154 @@
#pragma once
#include <functional>
-#include <list>
+#include "common/common_funcs.h"
#include "common/common_types.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_memory_block.h"
namespace Kernel {
+class KMemoryBlockManagerUpdateAllocator {
+public:
+ static constexpr size_t MaxBlocks = 2;
+
+private:
+ KMemoryBlock* m_blocks[MaxBlocks];
+ size_t m_index;
+ KMemoryBlockSlabManager* m_slab_manager;
+
+private:
+ Result Initialize(size_t num_blocks) {
+ // Check num blocks.
+ ASSERT(num_blocks <= MaxBlocks);
+
+ // Set index.
+ m_index = MaxBlocks - num_blocks;
+
+ // Allocate the blocks.
+ for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
+ m_blocks[m_index + i] = m_slab_manager->Allocate();
+ R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
+ }
+
+ R_SUCCEED();
+ }
+
+public:
+ KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
+ size_t num_blocks = MaxBlocks)
+ : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
+ *out_result = this->Initialize(num_blocks);
+ }
+
+ ~KMemoryBlockManagerUpdateAllocator() {
+ for (const auto& block : m_blocks) {
+ if (block != nullptr) {
+ m_slab_manager->Free(block);
+ }
+ }
+ }
+
+ KMemoryBlock* Allocate() {
+ ASSERT(m_index < MaxBlocks);
+ ASSERT(m_blocks[m_index] != nullptr);
+ KMemoryBlock* block = nullptr;
+ std::swap(block, m_blocks[m_index++]);
+ return block;
+ }
+
+ void Free(KMemoryBlock* block) {
+ ASSERT(m_index <= MaxBlocks);
+ ASSERT(block != nullptr);
+ if (m_index == 0) {
+ m_slab_manager->Free(block);
+ } else {
+ m_blocks[--m_index] = block;
+ }
+ }
+};
+
class KMemoryBlockManager final {
public:
- using MemoryBlockTree = std::list<KMemoryBlock>;
+ using MemoryBlockTree =
+ Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
+ using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
+ bool right);
using iterator = MemoryBlockTree::iterator;
using const_iterator = MemoryBlockTree::const_iterator;
public:
- KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_);
+ KMemoryBlockManager();
+
+ using HostUnmapCallback = std::function<void(VAddr, u64)>;
+
+ Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
+ void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
iterator end() {
- return memory_block_tree.end();
+ return m_memory_block_tree.end();
}
const_iterator end() const {
- return memory_block_tree.end();
+ return m_memory_block_tree.end();
}
const_iterator cend() const {
- return memory_block_tree.cend();
+ return m_memory_block_tree.cend();
}
- iterator FindIterator(VAddr addr);
+ VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
+ size_t alignment, size_t offset, size_t guard_pages) const;
- VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
- std::size_t align, std::size_t offset, std::size_t guard_pages);
+ void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
+ KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
+ KMemoryBlockDisableMergeAttribute set_disable_attr,
+ KMemoryBlockDisableMergeAttribute clear_disable_attr);
+ void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
+ MemoryBlockLockFunction lock_func, KMemoryPermission perm);
- void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
- KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state,
- KMemoryPermission perm, KMemoryAttribute attribute);
+ void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+ size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
+ KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
+ KMemoryAttribute attr);
- void Update(VAddr addr, std::size_t num_pages, KMemoryState state,
- KMemoryPermission perm = KMemoryPermission::None,
- KMemoryAttribute attribute = KMemoryAttribute::None);
-
- using LockFunc = std::function<void(iterator, KMemoryPermission)>;
- void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
- KMemoryPermission perm);
+ iterator FindIterator(VAddr address) const {
+ return m_memory_block_tree.find(KMemoryBlock(
+ address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
+ }
- using IterateFunc = std::function<void(const KMemoryInfo&)>;
- void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
+ const KMemoryBlock* FindBlock(VAddr address) const {
+ if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
+ return std::addressof(*it);
+ }
- KMemoryBlock& FindBlock(VAddr addr) {
- return *FindIterator(addr);
+ return nullptr;
}
+ // Debug.
+ bool CheckState() const;
+
private:
- void MergeAdjacent(iterator it, iterator& next_it);
+ void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+ size_t num_pages);
- [[maybe_unused]] const VAddr start_addr;
- [[maybe_unused]] const VAddr end_addr;
+ MemoryBlockTree m_memory_block_tree;
+ VAddr m_start_address{};
+ VAddr m_end_address{};
+};
- MemoryBlockTree memory_block_tree;
+class KScopedMemoryBlockManagerAuditor {
+public:
+ explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
+ ASSERT(m_manager->CheckState());
+ }
+ explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
+ : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
+ ~KScopedMemoryBlockManagerAuditor() {
+ ASSERT(m_manager->CheckState());
+ }
+
+private:
+ KMemoryBlockManager* m_manager;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 5b0a9963a..646711505 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
// Set all the allocated memory.
for (const auto& block : out->Nodes()) {
- std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
+ std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
block.GetSize());
}
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp
index 1a0bf4439..0c16dded4 100644
--- a/src/core/hle/kernel/k_page_buffer.cpp
+++ b/src/core/hle/kernel/k_page_buffer.cpp
@@ -12,7 +12,7 @@ namespace Kernel {
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
ASSERT(Common::IsAligned(phys_addr, PageSize));
- return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
+ return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
index 7e50dc1d1..aef06e213 100644
--- a/src/core/hle/kernel/k_page_buffer.h
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -13,6 +13,7 @@ namespace Kernel {
class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
public:
+ explicit KPageBuffer(KernelCore&) {}
KPageBuffer() = default;
static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index d975de844..307e491cb 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -25,7 +25,7 @@ namespace {
using namespace Common::Literals;
-constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
+constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
switch (as_type) {
case FileSys::ProgramAddressSpaceType::Is32Bit:
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
@@ -43,27 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
} // namespace
KPageTable::KPageTable(Core::System& system_)
- : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {}
+ : m_general_lock{system_.Kernel()},
+ m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {}
KPageTable::~KPageTable() = default;
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- VAddr code_addr, std::size_t code_size,
+ VAddr code_addr, size_t code_size,
+ KMemoryBlockSlabManager* mem_block_slab_manager,
KMemoryManager::Pool pool) {
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
- return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
+ return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
};
const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
- return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type);
+ return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
};
// Set our width and heap/alias sizes
- address_space_width = GetAddressSpaceWidthFromType(as_type);
+ m_address_space_width = GetAddressSpaceWidthFromType(as_type);
const VAddr start = 0;
- const VAddr end{1ULL << address_space_width};
- std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
- std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
+ const VAddr end{1ULL << m_address_space_width};
+ size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
+ size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
ASSERT(code_addr < code_addr + code_size);
ASSERT(code_addr + code_size - 1 <= end - 1);
@@ -75,66 +77,65 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
}
// Set code regions and determine remaining
- constexpr std::size_t RegionAlignment{2_MiB};
+ constexpr size_t RegionAlignment{2_MiB};
VAddr process_code_start{};
VAddr process_code_end{};
- std::size_t stack_region_size{};
- std::size_t kernel_map_region_size{};
+ size_t stack_region_size{};
+ size_t kernel_map_region_size{};
- if (address_space_width == 39) {
+ if (m_address_space_width == 39) {
alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
- code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
- code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
- alias_code_region_start = code_region_start;
- alias_code_region_end = code_region_end;
+ m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
+ m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
+ m_alias_code_region_start = m_code_region_start;
+ m_alias_code_region_end = m_code_region_end;
process_code_start = Common::AlignDown(code_addr, RegionAlignment);
process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
} else {
stack_region_size = 0;
kernel_map_region_size = 0;
- code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
- code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
- stack_region_start = code_region_start;
- alias_code_region_start = code_region_start;
- alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
- GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
- stack_region_end = code_region_end;
- kernel_map_region_start = code_region_start;
- kernel_map_region_end = code_region_end;
- process_code_start = code_region_start;
- process_code_end = code_region_end;
+ m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
+ m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+ m_stack_region_start = m_code_region_start;
+ m_alias_code_region_start = m_code_region_start;
+ m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
+ GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
+ m_stack_region_end = m_code_region_end;
+ m_kernel_map_region_start = m_code_region_start;
+ m_kernel_map_region_end = m_code_region_end;
+ process_code_start = m_code_region_start;
+ process_code_end = m_code_region_end;
}
// Set other basic fields
- is_aslr_enabled = enable_aslr;
- address_space_start = start;
- address_space_end = end;
- is_kernel = false;
+ m_enable_aslr = enable_aslr;
+ m_enable_device_address_space_merge = false;
+ m_address_space_start = start;
+ m_address_space_end = end;
+ m_is_kernel = false;
+ m_memory_block_slab_manager = mem_block_slab_manager;
// Determine the region we can place our undetermineds in
VAddr alloc_start{};
- std::size_t alloc_size{};
- if ((process_code_start - code_region_start) >= (end - process_code_end)) {
- alloc_start = code_region_start;
- alloc_size = process_code_start - code_region_start;
+ size_t alloc_size{};
+ if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
+ alloc_start = m_code_region_start;
+ alloc_size = process_code_start - m_code_region_start;
} else {
alloc_start = process_code_end;
alloc_size = end - process_code_end;
}
- const std::size_t needed_size{
- (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
- if (alloc_size < needed_size) {
- ASSERT(false);
- return ResultOutOfMemory;
- }
+ const size_t needed_size =
+ (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
+ R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
- const std::size_t remaining_size{alloc_size - needed_size};
+ const size_t remaining_size{alloc_size - needed_size};
// Determine random placements for each region
- std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
+ size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
if (enable_aslr) {
alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
RegionAlignment;
@@ -147,117 +148,130 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
}
// Setup heap and alias regions
- alias_region_start = alloc_start + alias_rnd;
- alias_region_end = alias_region_start + alias_region_size;
- heap_region_start = alloc_start + heap_rnd;
- heap_region_end = heap_region_start + heap_region_size;
+ m_alias_region_start = alloc_start + alias_rnd;
+ m_alias_region_end = m_alias_region_start + alias_region_size;
+ m_heap_region_start = alloc_start + heap_rnd;
+ m_heap_region_end = m_heap_region_start + heap_region_size;
if (alias_rnd <= heap_rnd) {
- heap_region_start += alias_region_size;
- heap_region_end += alias_region_size;
+ m_heap_region_start += alias_region_size;
+ m_heap_region_end += alias_region_size;
} else {
- alias_region_start += heap_region_size;
- alias_region_end += heap_region_size;
+ m_alias_region_start += heap_region_size;
+ m_alias_region_end += heap_region_size;
}
// Setup stack region
if (stack_region_size) {
- stack_region_start = alloc_start + stack_rnd;
- stack_region_end = stack_region_start + stack_region_size;
+ m_stack_region_start = alloc_start + stack_rnd;
+ m_stack_region_end = m_stack_region_start + stack_region_size;
if (alias_rnd < stack_rnd) {
- stack_region_start += alias_region_size;
- stack_region_end += alias_region_size;
+ m_stack_region_start += alias_region_size;
+ m_stack_region_end += alias_region_size;
} else {
- alias_region_start += stack_region_size;
- alias_region_end += stack_region_size;
+ m_alias_region_start += stack_region_size;
+ m_alias_region_end += stack_region_size;
}
if (heap_rnd < stack_rnd) {
- stack_region_start += heap_region_size;
- stack_region_end += heap_region_size;
+ m_stack_region_start += heap_region_size;
+ m_stack_region_end += heap_region_size;
} else {
- heap_region_start += stack_region_size;
- heap_region_end += stack_region_size;
+ m_heap_region_start += stack_region_size;
+ m_heap_region_end += stack_region_size;
}
}
// Setup kernel map region
if (kernel_map_region_size) {
- kernel_map_region_start = alloc_start + kmap_rnd;
- kernel_map_region_end = kernel_map_region_start + kernel_map_region_size;
+ m_kernel_map_region_start = alloc_start + kmap_rnd;
+ m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
if (alias_rnd < kmap_rnd) {
- kernel_map_region_start += alias_region_size;
- kernel_map_region_end += alias_region_size;
+ m_kernel_map_region_start += alias_region_size;
+ m_kernel_map_region_end += alias_region_size;
} else {
- alias_region_start += kernel_map_region_size;
- alias_region_end += kernel_map_region_size;
+ m_alias_region_start += kernel_map_region_size;
+ m_alias_region_end += kernel_map_region_size;
}
if (heap_rnd < kmap_rnd) {
- kernel_map_region_start += heap_region_size;
- kernel_map_region_end += heap_region_size;
+ m_kernel_map_region_start += heap_region_size;
+ m_kernel_map_region_end += heap_region_size;
} else {
- heap_region_start += kernel_map_region_size;
- heap_region_end += kernel_map_region_size;
+ m_heap_region_start += kernel_map_region_size;
+ m_heap_region_end += kernel_map_region_size;
}
if (stack_region_size) {
if (stack_rnd < kmap_rnd) {
- kernel_map_region_start += stack_region_size;
- kernel_map_region_end += stack_region_size;
+ m_kernel_map_region_start += stack_region_size;
+ m_kernel_map_region_end += stack_region_size;
} else {
- stack_region_start += kernel_map_region_size;
- stack_region_end += kernel_map_region_size;
+ m_stack_region_start += kernel_map_region_size;
+ m_stack_region_end += kernel_map_region_size;
}
}
}
// Set heap members
- current_heap_end = heap_region_start;
- max_heap_size = 0;
- max_physical_memory_size = 0;
+ m_current_heap_end = m_heap_region_start;
+ m_max_heap_size = 0;
+ m_max_physical_memory_size = 0;
// Ensure that we regions inside our address space
auto IsInAddressSpace = [&](VAddr addr) {
- return address_space_start <= addr && addr <= address_space_end;
+ return m_address_space_start <= addr && addr <= m_address_space_end;
};
- ASSERT(IsInAddressSpace(alias_region_start));
- ASSERT(IsInAddressSpace(alias_region_end));
- ASSERT(IsInAddressSpace(heap_region_start));
- ASSERT(IsInAddressSpace(heap_region_end));
- ASSERT(IsInAddressSpace(stack_region_start));
- ASSERT(IsInAddressSpace(stack_region_end));
- ASSERT(IsInAddressSpace(kernel_map_region_start));
- ASSERT(IsInAddressSpace(kernel_map_region_end));
+ ASSERT(IsInAddressSpace(m_alias_region_start));
+ ASSERT(IsInAddressSpace(m_alias_region_end));
+ ASSERT(IsInAddressSpace(m_heap_region_start));
+ ASSERT(IsInAddressSpace(m_heap_region_end));
+ ASSERT(IsInAddressSpace(m_stack_region_start));
+ ASSERT(IsInAddressSpace(m_stack_region_end));
+ ASSERT(IsInAddressSpace(m_kernel_map_region_start));
+ ASSERT(IsInAddressSpace(m_kernel_map_region_end));
// Ensure that we selected regions that don't overlap
- const VAddr alias_start{alias_region_start};
- const VAddr alias_last{alias_region_end - 1};
- const VAddr heap_start{heap_region_start};
- const VAddr heap_last{heap_region_end - 1};
- const VAddr stack_start{stack_region_start};
- const VAddr stack_last{stack_region_end - 1};
- const VAddr kmap_start{kernel_map_region_start};
- const VAddr kmap_last{kernel_map_region_end - 1};
+ const VAddr alias_start{m_alias_region_start};
+ const VAddr alias_last{m_alias_region_end - 1};
+ const VAddr heap_start{m_heap_region_start};
+ const VAddr heap_last{m_heap_region_end - 1};
+ const VAddr stack_start{m_stack_region_start};
+ const VAddr stack_last{m_stack_region_end - 1};
+ const VAddr kmap_start{m_kernel_map_region_start};
+ const VAddr kmap_last{m_kernel_map_region_end - 1};
ASSERT(alias_last < heap_start || heap_last < alias_start);
ASSERT(alias_last < stack_start || stack_last < alias_start);
ASSERT(alias_last < kmap_start || kmap_last < alias_start);
ASSERT(heap_last < stack_start || stack_last < heap_start);
ASSERT(heap_last < kmap_start || kmap_last < heap_start);
- current_heap_end = heap_region_start;
- max_heap_size = 0;
- mapped_physical_memory_size = 0;
- memory_pool = pool;
+ m_current_heap_end = m_heap_region_start;
+ m_max_heap_size = 0;
+ m_mapped_physical_memory_size = 0;
+ m_memory_pool = pool;
+
+ m_page_table_impl = std::make_unique<Common::PageTable>();
+ m_page_table_impl->Resize(m_address_space_width, PageBits);
+
+ // Initialize our memory block manager.
+ R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+ m_memory_block_slab_manager));
+}
- page_table_impl.Resize(address_space_width, PageBits);
+void KPageTable::Finalize() {
+ // Finalize memory blocks.
+ m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) {
+ m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
+ });
- return InitializeMemoryLayout(start, end);
+ // Close the backing page table, as the destructor is not called for guest objects.
+ m_page_table_impl.reset();
}
-Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
+Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state,
KMemoryPermission perm) {
const u64 size{num_pages * PageSize};
@@ -265,52 +279,76 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat
R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Verify that the destination memory is unmapped.
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+
+ // Allocate and open.
KPageGroup pg;
- R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
+ R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
&pg, num_pages,
- KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
+ KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
- block_manager->Update(addr, num_pages, state, perm);
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
+Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Verify that the source memory is normal heap.
KMemoryState src_state{};
KMemoryPermission src_perm{};
- std::size_t num_src_allocator_blocks{};
+ size_t num_src_allocator_blocks{};
R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
src_address, size, KMemoryState::All, KMemoryState::Normal,
KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::All, KMemoryAttribute::None));
// Verify that the destination memory is unmapped.
- std::size_t num_dst_allocator_blocks{};
+ size_t num_dst_allocator_blocks{};
R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
KMemoryState::Free, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::None,
KMemoryAttribute::None));
+ // Create an update allocator for the source.
+ Result src_allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
// Map the code memory.
{
// Determine the number of pages being operated on.
- const std::size_t num_pages = size / PageSize;
+ const size_t num_pages = size / PageSize;
// Create page groups for the memory being mapped.
KPageGroup pg;
@@ -335,33 +373,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size
unprot_guard.Cancel();
// Apply the memory block updates.
- block_manager->Update(src_address, num_pages, src_state, new_perm,
- KMemoryAttribute::Locked);
- block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm,
- KMemoryAttribute::None);
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+ KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
}
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Verify that the source memory is locked normal heap.
- std::size_t num_src_allocator_blocks{};
+ size_t num_src_allocator_blocks{};
R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked));
// Verify that the destination memory is aliasable code.
- std::size_t num_dst_allocator_blocks{};
+ size_t num_dst_allocator_blocks{};
R_TRY(this->CheckMemoryStateContiguous(
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
@@ -370,7 +412,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
// Determine whether any pages being unmapped are code.
bool any_code_pages = false;
{
- KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address);
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
while (true) {
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
@@ -396,9 +438,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
SCOPE_EXIT({
if (reprotected_pages && any_code_pages) {
if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
- system.InvalidateCpuInstructionCacheRange(dst_address, size);
+ m_system.InvalidateCpuInstructionCacheRange(dst_address, size);
} else {
- system.InvalidateCpuInstructionCaches();
+ m_system.InvalidateCpuInstructionCaches();
}
}
});
@@ -406,7 +448,21 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
// Unmap.
{
// Determine the number of pages being operated on.
- const std::size_t num_pages = size / PageSize;
+ const size_t num_pages = size / PageSize;
+
+ // Create an update allocator for the source.
+ Result src_allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
// Unmap the aliased copy of the pages.
R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
@@ -416,73 +472,34 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si
OperationType::ChangePermissions));
// Apply the memory block updates.
- block_manager->Update(dst_address, num_pages, KMemoryState::None);
- block_manager->Update(src_address, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
// Note that we reprotected pages.
reprotected_pages = true;
}
- return ResultSuccess;
+ R_SUCCEED();
}
-VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
- std::size_t num_pages, std::size_t alignment, std::size_t offset,
- std::size_t guard_pages) {
+VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
+ size_t alignment, size_t offset, size_t guard_pages) {
VAddr address = 0;
if (num_pages <= region_num_pages) {
if (this->IsAslrEnabled()) {
- // Try to directly find a free area up to 8 times.
- for (std::size_t i = 0; i < 8; i++) {
- const std::size_t random_offset =
- KSystemControl::GenerateRandomRange(
- 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
- alignment;
- const VAddr candidate =
- Common::AlignDown((region_start + random_offset), alignment) + offset;
-
- KMemoryInfo info = this->QueryInfoImpl(candidate);
-
- if (info.state != KMemoryState::Free) {
- continue;
- }
- if (region_start > candidate) {
- continue;
- }
- if (info.GetAddress() + guard_pages * PageSize > candidate) {
- continue;
- }
-
- const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
- if (candidate_end > info.GetLastAddress()) {
- continue;
- }
- if (candidate_end > region_start + region_num_pages * PageSize - 1) {
- continue;
- }
-
- address = candidate;
- break;
- }
- // Fall back to finding the first free area with a random offset.
- if (address == 0) {
- // NOTE: Nintendo does not account for guard pages here.
- // This may theoretically cause an offset to be chosen that cannot be mapped. We
- // will account for guard pages.
- const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
- 0, region_num_pages - num_pages - guard_pages);
- address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
- region_num_pages - offset_pages, num_pages,
- alignment, offset, guard_pages);
- }
+ UNIMPLEMENTED();
}
-
// Find the first free area.
if (address == 0) {
- address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages,
- alignment, offset, guard_pages);
+ address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
+ alignment, offset, guard_pages);
}
}
@@ -500,7 +517,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
- R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
+ R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr),
+ ResultInvalidCurrentMemory);
// Prepare tracking variables.
PAddr cur_addr = next_entry.phys_addr;
@@ -508,9 +526,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
size_t tot_size = cur_size;
// Iterate, adding to group as we go.
- const auto& memory_layout = system.Kernel().MemoryLayout();
+ const auto& memory_layout = m_system.Kernel().MemoryLayout();
while (tot_size < size) {
- R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
+ R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
ResultInvalidCurrentMemory);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
@@ -538,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
R_TRY(pg.AddBlock(cur_addr, cur_pages));
- return ResultSuccess;
+ R_SUCCEED();
}
bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
@@ -546,7 +564,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
const size_t size = num_pages * PageSize;
const auto& pg = pg_ll.Nodes();
- const auto& memory_layout = system.Kernel().MemoryLayout();
+ const auto& memory_layout = m_system.Kernel().MemoryLayout();
// Empty groups are necessarily invalid.
if (pg.empty()) {
@@ -573,7 +591,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
- if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
+ if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) {
return false;
}
@@ -584,7 +602,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
// Iterate, comparing expected to actual.
while (tot_size < size) {
- if (!page_table_impl.ContinueTraversal(next_entry, context)) {
+ if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
return false;
}
@@ -630,11 +648,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
-Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
VAddr src_addr) {
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
- const std::size_t num_pages{size / PageSize};
+ const size_t num_pages{size / PageSize};
// Check that the memory is mapped in the destination process.
size_t num_allocator_blocks;
@@ -649,43 +667,51 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None));
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
// Apply the memory block update.
- block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None);
+ m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
- system.InvalidateCpuInstructionCaches();
+ m_system.InvalidateCpuInstructionCaches();
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
+Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Lock the physical memory lock.
- KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+ KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
// Calculate the last address for convenience.
const VAddr last_address = address + size - 1;
// Define iteration variables.
VAddr cur_address;
- std::size_t mapped_size;
+ size_t mapped_size;
// The entire mapping process can be retried.
while (true) {
// Check if the memory is already mapped.
{
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Iterate over the memory.
cur_address = address;
mapped_size = 0;
- auto it = block_manager->FindIterator(cur_address);
+ auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
- ASSERT(it != block_manager->end());
+ ASSERT(it != m_memory_block_manager.end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
@@ -716,20 +742,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
{
// Reserve the memory from the process resource limit.
KScopedResourceReservation memory_reservation(
- system.Kernel().CurrentProcess()->GetResourceLimit(),
+ m_system.Kernel().CurrentProcess()->GetResourceLimit(),
LimitableResource::PhysicalMemory, size - mapped_size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the new memory.
KPageGroup pg;
- R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+ R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
&pg, (size - mapped_size) / PageSize,
- KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
+ KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
// Map the memory.
{
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
size_t num_allocator_blocks = 0;
@@ -739,10 +765,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
size_t checked_mapped_size = 0;
cur_address = address;
- auto it = block_manager->FindIterator(cur_address);
+ auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
- ASSERT(it != block_manager->end());
+ ASSERT(it != m_memory_block_manager.end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
@@ -782,6 +808,14 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
}
}
+ // Create an update allocator.
+ ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Reset the current tracking address, and make sure we clean up on failure.
cur_address = address;
auto unmap_guard = detail::ScopeExit([&] {
@@ -791,10 +825,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
// Iterate, unmapping the pages.
cur_address = address;
- auto it = block_manager->FindIterator(cur_address);
+ auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
- ASSERT(it != block_manager->end());
+ ASSERT(it != m_memory_block_manager.end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
@@ -830,10 +864,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
PAddr pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
- auto it = block_manager->FindIterator(cur_address);
+ auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
- ASSERT(it != block_manager->end());
+ ASSERT(it != m_memory_block_manager.end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
@@ -886,37 +920,37 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
memory_reservation.Commit();
// Increase our tracked mapped size.
- mapped_physical_memory_size += (size - mapped_size);
+ m_mapped_physical_memory_size += (size - mapped_size);
// Update the relevant memory blocks.
- block_manager->Update(address, size / PageSize, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryState::Normal, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::None);
+ m_memory_block_manager.UpdateIfMatch(
+ std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
// Cancel our guard.
unmap_guard.Cancel();
- return ResultSuccess;
+ R_SUCCEED();
}
}
}
}
-Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
+Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
// Lock the physical memory lock.
- KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+ KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Calculate the last address for convenience.
const VAddr last_address = address + size - 1;
// Define iteration variables.
VAddr cur_address = 0;
- std::size_t mapped_size = 0;
- std::size_t num_allocator_blocks = 0;
+ size_t mapped_size = 0;
+ size_t num_allocator_blocks = 0;
// Check if the memory is mapped.
{
@@ -924,10 +958,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
cur_address = address;
mapped_size = 0;
- auto it = block_manager->FindIterator(cur_address);
+ auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
- ASSERT(it != block_manager->end());
+ ASSERT(it != m_memory_block_manager.end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
@@ -1022,6 +1056,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
}
ASSERT(pg.GetNumPages() == mapped_size / PageSize);
+ // Create an update allocator.
+ ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Reset the current tracking address, and make sure we clean up on failure.
cur_address = address;
auto remap_guard = detail::ScopeExit([&] {
@@ -1030,7 +1071,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
cur_address = address;
// Iterate over the memory we unmapped.
- auto it = block_manager->FindIterator(cur_address);
+ auto it = m_memory_block_manager.FindIterator(cur_address);
auto pg_it = pg.Nodes().begin();
PAddr pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
@@ -1085,10 +1126,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
});
// Iterate over the memory, unmapping as we go.
- auto it = block_manager->FindIterator(cur_address);
+ auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
// Check that the iterator is valid.
- ASSERT(it != block_manager->end());
+ ASSERT(it != m_memory_block_manager.end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
@@ -1115,104 +1156,159 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
}
// Release the memory resource.
- mapped_physical_memory_size -= mapped_size;
- auto process{system.Kernel().CurrentProcess()};
+ m_mapped_physical_memory_size -= mapped_size;
+ auto process{m_system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
// Update memory blocks.
- block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None);
+ m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
// TODO(bunnei): This is a workaround until the next set of changes, where we add reference
// counting for mapped pages. Until then, we must manually close the reference to the page
// group.
- system.Kernel().MemoryManager().Close(pg);
+ m_system.Kernel().MemoryManager().Close(pg);
// We succeeded.
remap_guard.Cancel();
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryState src_state{};
- CASCADE_CODE(CheckMemoryState(
- &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias,
- KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
+Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the source address's state is valid.
+ KMemoryState src_state;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
+ std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+ KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
- if (IsRegionMapped(dst_addr, size)) {
- return ResultInvalidCurrentMemory;
- }
+ // Validate that the dst address's state is valid.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+ // Create an update allocator for the source.
+ Result src_allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Map the memory.
KPageGroup page_linked_list;
- const std::size_t num_pages{size / PageSize};
-
- AddRegionToPages(src_addr, num_pages, page_linked_list);
+ const size_t num_pages{size / PageSize};
+ const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
+ AddRegionToPages(src_address, num_pages, page_linked_list);
{
+ // Reprotect the source as kernel-read/not mapped.
auto block_guard = detail::ScopeExit([&] {
- Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite,
+ Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
OperationType::ChangePermissions);
});
-
- CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None,
- OperationType::ChangePermissions));
- CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite));
+ R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions));
+ R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
block_guard.Cancel();
}
- block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None,
- KMemoryAttribute::Locked);
- block_manager->Update(dst_addr, num_pages, KMemoryState::Stack,
- KMemoryPermission::UserReadWrite);
-
- return ResultSuccess;
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
+ new_src_perm, new_src_attr,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+ KMemoryState::Stack, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
}
-Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
+Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the source address's state is valid.
+ KMemoryState src_state;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
+ src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+ KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
+ KMemoryAttribute::All, KMemoryAttribute::Locked));
+
+ // Validate that the dst address's state is valid.
+ KMemoryPermission dst_perm;
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
+ dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
- KMemoryState src_state{};
- CASCADE_CODE(CheckMemoryState(
- &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias,
- KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None,
- KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
+ // Create an update allocator for the source.
+ Result src_allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
- KMemoryPermission dst_perm{};
- CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size,
- KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::Mask,
- KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
+ // Create an update allocator for the destination.
+ Result dst_allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
KPageGroup src_pages;
KPageGroup dst_pages;
- const std::size_t num_pages{size / PageSize};
+ const size_t num_pages{size / PageSize};
- AddRegionToPages(src_addr, num_pages, src_pages);
- AddRegionToPages(dst_addr, num_pages, dst_pages);
+ AddRegionToPages(src_address, num_pages, src_pages);
+ AddRegionToPages(dst_address, num_pages, dst_pages);
- if (!dst_pages.IsEqual(src_pages)) {
- return ResultInvalidMemoryRegion;
- }
+ R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion);
{
- auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); });
+ auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
- CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
- CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite,
- OperationType::ChangePermissions));
+ R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
+ R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
+ OperationType::ChangePermissions));
block_guard.Cancel();
}
- block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite);
- block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
-
- return ResultSuccess;
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Locked);
+ m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+ KMemoryState::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
}
Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
@@ -1225,48 +1321,54 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
if (const auto result{
Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
result.IsError()) {
- const std::size_t num_pages{(addr - cur_addr) / PageSize};
+ const size_t num_pages{(addr - cur_addr) / PageSize};
ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
.IsSuccess());
- return result;
+ R_RETURN(result);
}
cur_addr += node.GetNumPages() * PageSize;
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
KMemoryPermission perm) {
// Check that the map is in range.
- const std::size_t num_pages{page_linked_list.GetNumPages()};
- const std::size_t size{num_pages * PageSize};
+ const size_t num_pages{page_linked_list.GetNumPages()};
+ const size_t size{num_pages * PageSize};
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Check the memory state.
R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+
// Map the pages.
R_TRY(MapPages(address, page_linked_list, perm));
// Update the blocks.
- block_manager->Update(address, num_pages, state, perm);
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
- PAddr phys_addr, bool is_pa_valid, VAddr region_start,
- std::size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
+ bool is_pa_valid, VAddr region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
// Ensure this is a valid map request.
@@ -1275,7 +1377,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Find a random address to map at.
VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
@@ -1288,6 +1390,11 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
KMemoryAttribute::None, KMemoryAttribute::None)
.IsSuccess());
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+
// Perform mapping operation.
if (is_pa_valid) {
R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
@@ -1296,11 +1403,13 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t
}
// Update the blocks.
- block_manager->Update(addr, num_pages, state, perm);
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
// We successfully mapped the pages.
*out_addr = addr;
- return ResultSuccess;
+ R_SUCCEED();
}
Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
@@ -1312,60 +1421,80 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
OperationType::Unmap)};
result.IsError()) {
- return result;
+ R_RETURN(result);
}
cur_addr += node.GetNumPages() * PageSize;
}
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) {
+Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
// Check that the unmap is in range.
- const std::size_t num_pages{page_linked_list.GetNumPages()};
- const std::size_t size{num_pages * PageSize};
- R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+ const size_t num_pages{page_linked_list.GetNumPages()};
+ const size_t size{num_pages * PageSize};
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Check the memory state.
- R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None,
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None));
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Perform the unmap.
- R_TRY(UnmapPages(addr, page_linked_list));
+ R_TRY(UnmapPages(address, page_linked_list));
// Update the blocks.
- block_manager->Update(addr, num_pages, state, KMemoryPermission::None);
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
+Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) {
// Check that the unmap is in range.
- const std::size_t size = num_pages * PageSize;
+ const size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Check the memory state.
- std::size_t num_allocator_blocks{};
+ size_t num_allocator_blocks{};
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None));
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Perform the unmap.
R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
// Update the blocks.
- block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None);
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
- return ResultSuccess;
+ R_SUCCEED();
}
Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
@@ -1380,7 +1509,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Check if state allows us to create the group.
R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
@@ -1390,15 +1519,15 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
// Create a new page group for the region.
R_TRY(this->MakePageGroup(*out, address, num_pages));
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
+Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Verify we can change the memory permission.
KMemoryState old_state;
@@ -1435,105 +1564,101 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
// Succeed if there's nothing to do.
R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Perform mapping operation.
const auto operation =
was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
R_TRY(Operate(addr, num_pages, new_perm, operation));
// Update the blocks.
- block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None);
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
// Ensure cache coherency, if we're setting pages as executable.
if (is_x) {
- system.InvalidateCpuInstructionCacheRange(addr, size);
+ m_system.InvalidateCpuInstructionCacheRange(addr, size);
}
- return ResultSuccess;
+ R_SUCCEED();
}
KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
- return block_manager->FindBlock(addr).GetMemoryInfo();
+ return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
}
KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
if (!Contains(addr, 1)) {
- return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible,
- KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None};
+ return {
+ .m_address = m_address_space_end,
+ .m_size = 0 - m_address_space_end,
+ .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
+ .m_device_disable_merge_left_count = 0,
+ .m_device_disable_merge_right_count = 0,
+ .m_ipc_lock_count = 0,
+ .m_device_use_count = 0,
+ .m_ipc_disable_merge_count = 0,
+ .m_permission = KMemoryPermission::None,
+ .m_attribute = KMemoryAttribute::None,
+ .m_original_permission = KMemoryPermission::None,
+ .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
+ };
}
return QueryInfoImpl(addr);
}
-Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
- KScopedLightLock lk(general_lock);
-
- KMemoryState state{};
- KMemoryAttribute attribute{};
-
- R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::Mask, KMemoryAttribute::None,
- KMemoryAttribute::IpcAndDeviceMapped));
-
- block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked);
-
- return ResultSuccess;
-}
-
-Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryState state{};
-
- R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
- KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
- KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
-
- block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite);
- return ResultSuccess;
-}
-
-Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Verify we can change the memory permission.
KMemoryState old_state;
KMemoryPermission old_perm;
- R_TRY(this->CheckMemoryState(
- std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size,
- KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+ std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
// Determine new perm.
const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
R_SUCCEED_IF(old_perm == new_perm);
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Perform mapping operation.
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
// Update the blocks.
- block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None);
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
+Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) {
const size_t num_pages = size / PageSize;
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
KMemoryAttribute::SetMask);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Verify we can change the memory attribute.
KMemoryState old_state;
@@ -1548,6 +1673,12 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Determine the new attribute.
const KMemoryAttribute new_attr =
static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
@@ -1557,123 +1688,142 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3
this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
// Update the blocks.
- block_manager->Update(addr, num_pages, old_state, old_perm, new_attr);
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::SetMaxHeapSize(std::size_t size) {
+Result KPageTable::SetMaxHeapSize(size_t size) {
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Only process page tables are allowed to set heap size.
ASSERT(!this->IsKernel());
- max_heap_size = size;
+ m_max_heap_size = size;
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
+Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
// Lock the physical memory mutex.
- KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+ KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
// Try to perform a reduction in heap, instead of an extension.
VAddr cur_address{};
- std::size_t allocation_size{};
+ size_t allocation_size{};
{
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Validate that setting heap size is possible at all.
- R_UNLESS(!is_kernel, ResultOutOfMemory);
- R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start),
+ R_UNLESS(!m_is_kernel, ResultOutOfMemory);
+ R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
ResultOutOfMemory);
- R_UNLESS(size <= max_heap_size, ResultOutOfMemory);
+ R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
if (size < GetHeapSize()) {
// The size being requested is less than the current size, so we need to free the end of
// the heap.
// Validate memory state.
- std::size_t num_allocator_blocks;
+ size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
- heap_region_start + size, GetHeapSize() - size,
+ m_heap_region_start + size, GetHeapSize() - size,
KMemoryState::All, KMemoryState::Normal,
KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::All, KMemoryAttribute::None));
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Unmap the end of the heap.
const auto num_pages = (GetHeapSize() - size) / PageSize;
- R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None,
+ R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
OperationType::Unmap));
// Release the memory from the resource limit.
- system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
+ m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
LimitableResource::PhysicalMemory, num_pages * PageSize);
// Apply the memory block update.
- block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None);
+ m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
+ num_pages, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None);
// Update the current heap end.
- current_heap_end = heap_region_start + size;
+ m_current_heap_end = m_heap_region_start + size;
// Set the output.
- *out = heap_region_start;
- return ResultSuccess;
+ *out = m_heap_region_start;
+ R_SUCCEED();
} else if (size == GetHeapSize()) {
// The size requested is exactly the current size.
- *out = heap_region_start;
- return ResultSuccess;
+ *out = m_heap_region_start;
+ R_SUCCEED();
} else {
// We have to allocate memory. Determine how much to allocate and where while the table
// is locked.
- cur_address = current_heap_end;
+ cur_address = m_current_heap_end;
allocation_size = size - GetHeapSize();
}
}
// Reserve memory for the heap extension.
KScopedResourceReservation memory_reservation(
- system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
+ m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
allocation_size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the heap extension.
KPageGroup pg;
- R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
+ R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
&pg, allocation_size / PageSize,
- KMemoryManager::EncodeOption(memory_pool, allocation_option)));
+ KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
// Clear all the newly allocated pages.
for (const auto& it : pg.Nodes()) {
- std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
+ std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
it.GetSize());
}
// Map the pages.
{
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Ensure that the heap hasn't changed since we began executing.
- ASSERT(cur_address == current_heap_end);
+ ASSERT(cur_address == m_current_heap_end);
// Check the memory state.
- std::size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end,
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
allocation_size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(
+ std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Map the pages.
const auto num_pages = allocation_size / PageSize;
- R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup));
+ R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
// Clear all the newly allocated pages.
- for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
- std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0,
+ for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
+ std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
PageSize);
}
@@ -1681,133 +1831,172 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
memory_reservation.Commit();
// Apply the memory block update.
- block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
+ m_memory_block_manager.Update(
+ std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
// Update the current heap end.
- current_heap_end = heap_region_start + size;
+ m_current_heap_end = m_heap_region_start + size;
// Set the output.
- *out = heap_region_start;
- return ResultSuccess;
+ *out = m_heap_region_start;
+ R_SUCCEED();
}
}
-ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
+ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
bool is_map_only, VAddr region_start,
- std::size_t region_num_pages, KMemoryState state,
+ size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr) {
- KScopedLightLock lk(general_lock);
-
- if (!CanContain(region_start, region_num_pages * PageSize, state)) {
- return ResultInvalidCurrentMemory;
- }
-
- if (region_num_pages <= needed_num_pages) {
- return ResultOutOfMemory;
- }
+ KScopedLightLock lk(m_general_lock);
+ R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
const VAddr addr{
AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
- if (!addr) {
- return ResultOutOfMemory;
- }
+ R_UNLESS(addr, ResultOutOfMemory);
+
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
if (is_map_only) {
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
KPageGroup page_group;
- R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+ R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
&page_group, needed_num_pages,
- KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
+ KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
}
- block_manager->Update(addr, needed_num_pages, state, perm);
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
return addr;
}
-Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryPermission perm{};
- if (const Result result{CheckMemoryState(
- nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
- KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
- KMemoryAttribute::DeviceSharedAndUncached)};
- result.IsError()) {
- return result;
- }
+Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
+ bool is_aligned) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
- block_manager->UpdateLock(
- addr, size / PageSize,
- [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
- block->ShareToDevice(permission);
- },
- perm);
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
- return ResultSuccess;
+ // Check the memory state.
+ const auto test_state =
+ (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state,
+ test_state, perm, perm,
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
+ KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+ &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
+
+ R_SUCCEED();
}
-Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryPermission perm{};
- if (const Result result{CheckMemoryState(
- nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
- KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
- KMemoryAttribute::DeviceSharedAndUncached)};
- result.IsError()) {
- return result;
- }
+Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
- block_manager->UpdateLock(
- addr, size / PageSize,
- [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
- block->UnshareToDevice(permission);
- },
- perm);
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
- return ResultSuccess;
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
+ KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
+ m_enable_device_address_space_merge
+ ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
+ : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
}
-Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) {
- return this->LockMemoryAndOpen(
+Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+ KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+ &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
+ R_RETURN(this->LockMemoryAndOpen(
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
KMemoryAttribute::None,
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
KMemoryPermission::KernelReadWrite),
- KMemoryAttribute::Locked);
+ KMemoryAttribute::Locked));
}
-Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) {
- return this->UnlockMemory(
+Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) {
+ R_RETURN(this->UnlockMemory(
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
-}
-
-Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
- block_manager = std::make_unique<KMemoryBlockManager>(start, end);
-
- return ResultSuccess;
-}
-
-bool KPageTable::IsRegionMapped(VAddr address, u64 size) {
- return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask,
- KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)
- .IsError();
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
}
bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
- auto start_ptr = system.Memory().GetPointer(addr);
+ auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr);
for (u64 offset{}; offset < size; offset += PageSize) {
- if (start_ptr != system.Memory().GetPointer(addr + offset)) {
+ if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) {
return false;
}
start_ptr += PageSize;
@@ -1815,8 +2004,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
return true;
}
-void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
- KPageGroup& page_linked_list) {
+void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) {
VAddr addr{start};
while (addr < start + (num_pages * PageSize)) {
const PAddr paddr{GetPhysicalAddr(addr)};
@@ -1826,16 +2014,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
}
}
-VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages,
- u64 needed_num_pages, std::size_t align) {
- if (is_aslr_enabled) {
+VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
+ size_t align) {
+ if (m_enable_aslr) {
UNIMPLEMENTED();
}
- return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
- IsKernel() ? 1 : 4);
+ return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
+ IsKernel() ? 1 : 4);
}
-Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation) {
ASSERT(this->IsLockedByCurrentThread());
@@ -1844,11 +2032,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup&
ASSERT(num_pages == page_group.GetNumPages());
for (const auto& node : page_group.Nodes()) {
- const std::size_t size{node.GetNumPages() * PageSize};
+ const size_t size{node.GetNumPages() * PageSize};
switch (operation) {
case OperationType::MapGroup:
- system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
+ m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
break;
default:
ASSERT(false);
@@ -1857,10 +2045,10 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup&
addr += size;
}
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
+Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
OperationType operation, PAddr map_addr) {
ASSERT(this->IsLockedByCurrentThread());
@@ -1870,12 +2058,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission
switch (operation) {
case OperationType::Unmap:
- system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize);
+ m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
break;
case OperationType::Map: {
ASSERT(map_addr);
ASSERT(Common::IsAligned(map_addr, PageSize));
- system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr);
+ m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
break;
}
case OperationType::ChangePermissions:
@@ -1884,25 +2072,25 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission
default:
ASSERT(false);
}
- return ResultSuccess;
+ R_SUCCEED();
}
VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
- return address_space_start;
+ return m_address_space_start;
case KMemoryState::Normal:
- return heap_region_start;
+ return m_heap_region_start;
case KMemoryState::Ipc:
case KMemoryState::NonSecureIpc:
case KMemoryState::NonDeviceIpc:
- return alias_region_start;
+ return m_alias_region_start;
case KMemoryState::Stack:
- return stack_region_start;
+ return m_stack_region_start;
case KMemoryState::Static:
case KMemoryState::ThreadLocal:
- return kernel_map_region_start;
+ return m_kernel_map_region_start;
case KMemoryState::Io:
case KMemoryState::Shared:
case KMemoryState::AliasCode:
@@ -1913,31 +2101,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
- return alias_code_region_start;
+ return m_alias_code_region_start;
case KMemoryState::Code:
case KMemoryState::CodeData:
- return code_region_start;
+ return m_code_region_start;
default:
UNREACHABLE();
}
}
-std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
+size_t KPageTable::GetRegionSize(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
- return address_space_end - address_space_start;
+ return m_address_space_end - m_address_space_start;
case KMemoryState::Normal:
- return heap_region_end - heap_region_start;
+ return m_heap_region_end - m_heap_region_start;
case KMemoryState::Ipc:
case KMemoryState::NonSecureIpc:
case KMemoryState::NonDeviceIpc:
- return alias_region_end - alias_region_start;
+ return m_alias_region_end - m_alias_region_start;
case KMemoryState::Stack:
- return stack_region_end - stack_region_start;
+ return m_stack_region_end - m_stack_region_start;
case KMemoryState::Static:
case KMemoryState::ThreadLocal:
- return kernel_map_region_end - kernel_map_region_start;
+ return m_kernel_map_region_end - m_kernel_map_region_start;
case KMemoryState::Io:
case KMemoryState::Shared:
case KMemoryState::AliasCode:
@@ -1948,16 +2136,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
- return alias_code_region_end - alias_code_region_start;
+ return m_alias_code_region_end - m_alias_code_region_start;
case KMemoryState::Code:
case KMemoryState::CodeData:
- return code_region_end - code_region_start;
+ return m_code_region_end - m_code_region_start;
default:
UNREACHABLE();
}
}
-bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const {
+bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
const VAddr end = addr + size;
const VAddr last = end - 1;
@@ -1966,10 +2154,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
const bool is_in_region =
region_start <= addr && addr < end && last <= region_start + region_size - 1;
- const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr ||
- heap_region_start == heap_region_end);
- const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr ||
- alias_region_start == alias_region_end);
+ const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
+ m_heap_region_start == m_heap_region_end);
+ const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
+ m_alias_region_start == m_alias_region_end);
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
@@ -2008,23 +2196,23 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
// Validate the states match expectation.
- R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory);
- R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory);
- R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
- std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
+Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
const VAddr last_addr = addr + size - 1;
- KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr);
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
// If the start address isn't aligned, we need a block.
@@ -2042,7 +2230,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA
// Advance our iterator.
it++;
- ASSERT(it != block_manager->cend());
+ ASSERT(it != m_memory_block_manager.cend());
info = it->GetMemoryInfo();
}
@@ -2054,12 +2242,12 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
- VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ VAddr addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
@@ -2067,7 +2255,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
// Get information about the first block.
const VAddr last_addr = addr + size - 1;
- KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr);
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
// If the start address isn't aligned, we need a block.
@@ -2075,14 +2263,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
(Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
// Validate all blocks in the range have correct state.
- const KMemoryState first_state = info.state;
- const KMemoryPermission first_perm = info.perm;
- const KMemoryAttribute first_attr = info.attribute;
+ const KMemoryState first_state = info.m_state;
+ const KMemoryPermission first_perm = info.m_permission;
+ const KMemoryAttribute first_attr = info.m_attribute;
while (true) {
// Validate the current block.
- R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory);
- R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory);
- R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr),
+ R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
+ R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
ResultInvalidCurrentMemory);
// Validate against the provided masks.
@@ -2095,7 +2283,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
// Advance our iterator.
it++;
- ASSERT(it != block_manager->cend());
+ ASSERT(it != m_memory_block_manager.cend());
info = it->GetMemoryInfo();
}
@@ -2116,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
if (out_blocks_needed != nullptr) {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@@ -2134,7 +2322,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Check that the output page group is empty, if it exists.
if (out_pg) {
@@ -2162,6 +2350,12 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
}
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Decide on new perm and attr.
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
@@ -2172,9 +2366,11 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
}
// Apply the memory block updates.
- block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
- return ResultSuccess;
+ R_SUCCEED();
}
Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
@@ -2191,7 +2387,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
// Lock the table.
- KScopedLightLock lk(general_lock);
+ KScopedLightLock lk(m_general_lock);
// Check the state.
KMemoryState old_state{};
@@ -2213,15 +2409,23 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
+ // Create an update allocator.
+ Result allocator_result{ResultSuccess};
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
// Update permission, if we need to.
if (new_perm != old_perm) {
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
}
// Apply the memory block updates.
- block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Locked);
- return ResultSuccess;
+ R_SUCCEED();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 25774f232..c6aeacd96 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -9,8 +9,10 @@
#include "common/common_types.h"
#include "common/page_table.h"
#include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
@@ -34,58 +36,66 @@ public:
~KPageTable();
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
- Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
+ VAddr code_addr, size_t code_size,
+ KMemoryBlockSlabManager* mem_block_slab_manager,
+ KMemoryManager::Pool pool);
+
+ void Finalize();
+
+ Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
KMemoryPermission perm);
- Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
- Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
+ Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy);
- Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+ Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
VAddr src_addr);
- Result MapPhysicalMemory(VAddr addr, std::size_t size);
- Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
- Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ Result MapPhysicalMemory(VAddr addr, size_t size);
+ Result UnmapPhysicalMemory(VAddr addr, size_t size);
+ Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
+ Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
KMemoryPermission perm);
- Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
+ Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
KMemoryState state, KMemoryPermission perm) {
- return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
- this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
- state, perm);
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
}
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
- Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
- Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
+ Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
+ Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
- Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
- Result ResetTransferMemory(VAddr addr, std::size_t size);
- Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
- Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
- Result SetMaxHeapSize(std::size_t size);
- Result SetHeapSize(VAddr* out, std::size_t size);
- ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
- bool is_map_only, VAddr region_start,
- std::size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm, PAddr map_addr = 0);
- Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
- Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
- Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
- Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
+ Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
+ Result SetMaxHeapSize(size_t size);
+ Result SetHeapSize(VAddr* out, size_t size);
+ ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
+ VAddr region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm,
+ PAddr map_addr = 0);
+
+ Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
+ bool is_aligned);
+ Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
+
+ Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
+
+ Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
+ Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() {
- return page_table_impl;
+ return *m_page_table_impl;
}
const Common::PageTable& PageTableImpl() const {
- return page_table_impl;
+ return *m_page_table_impl;
}
- bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
+ bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
private:
enum class OperationType : u32 {
@@ -96,67 +106,65 @@ private:
ChangePermissionsAndRefresh,
};
- static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask |
- KMemoryAttribute::IpcLocked |
- KMemoryAttribute::DeviceShared;
+ static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
- Result InitializeMemoryLayout(VAddr start, VAddr end);
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
- Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
- bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
+ Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
+ bool is_pa_valid, VAddr region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm);
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
- bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
- void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
+ void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr);
- VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
- std::size_t align);
- Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+ VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
+ size_t align);
+ Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation);
- Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr = 0);
+ Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
+ PAddr map_addr = 0);
VAddr GetRegionAddress(KMemoryState state) const;
- std::size_t GetRegionSize(KMemoryState state) const;
+ size_t GetRegionSize(KMemoryState state) const;
- VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
- std::size_t alignment, std::size_t offset, std::size_t guard_pages);
+ VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
+ size_t alignment, size_t offset, size_t guard_pages);
- Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+ Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
+ Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
- return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
- perm, attr_mask, attr);
+ R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
+ perm, attr_mask, attr));
}
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
- std::size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
+ size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+ Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
- return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
- state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
+ R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
+ state_mask, state, perm_mask, perm, attr_mask, attr,
+ ignore_attr));
}
- Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
- return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
- attr_mask, attr, ignore_attr);
+ R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
+ attr_mask, attr, ignore_attr));
}
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@@ -174,13 +182,13 @@ private:
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
- return general_lock.IsLockedByCurrentThread();
+ return m_general_lock.IsLockedByCurrentThread();
}
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
ASSERT(this->IsLockedByCurrentThread());
- return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
+ return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
@@ -191,95 +199,93 @@ private:
return *out != 0;
}
- mutable KLightLock general_lock;
- mutable KLightLock map_physical_memory_lock;
-
- std::unique_ptr<KMemoryBlockManager> block_manager;
+ mutable KLightLock m_general_lock;
+ mutable KLightLock m_map_physical_memory_lock;
public:
constexpr VAddr GetAddressSpaceStart() const {
- return address_space_start;
+ return m_address_space_start;
}
constexpr VAddr GetAddressSpaceEnd() const {
- return address_space_end;
+ return m_address_space_end;
}
- constexpr std::size_t GetAddressSpaceSize() const {
- return address_space_end - address_space_start;
+ constexpr size_t GetAddressSpaceSize() const {
+ return m_address_space_end - m_address_space_start;
}
constexpr VAddr GetHeapRegionStart() const {
- return heap_region_start;
+ return m_heap_region_start;
}
constexpr VAddr GetHeapRegionEnd() const {
- return heap_region_end;
+ return m_heap_region_end;
}
- constexpr std::size_t GetHeapRegionSize() const {
- return heap_region_end - heap_region_start;
+ constexpr size_t GetHeapRegionSize() const {
+ return m_heap_region_end - m_heap_region_start;
}
constexpr VAddr GetAliasRegionStart() const {
- return alias_region_start;
+ return m_alias_region_start;
}
constexpr VAddr GetAliasRegionEnd() const {
- return alias_region_end;
+ return m_alias_region_end;
}
- constexpr std::size_t GetAliasRegionSize() const {
- return alias_region_end - alias_region_start;
+ constexpr size_t GetAliasRegionSize() const {
+ return m_alias_region_end - m_alias_region_start;
}
constexpr VAddr GetStackRegionStart() const {
- return stack_region_start;
+ return m_stack_region_start;
}
constexpr VAddr GetStackRegionEnd() const {
- return stack_region_end;
+ return m_stack_region_end;
}
- constexpr std::size_t GetStackRegionSize() const {
- return stack_region_end - stack_region_start;
+ constexpr size_t GetStackRegionSize() const {
+ return m_stack_region_end - m_stack_region_start;
}
constexpr VAddr GetKernelMapRegionStart() const {
- return kernel_map_region_start;
+ return m_kernel_map_region_start;
}
constexpr VAddr GetKernelMapRegionEnd() const {
- return kernel_map_region_end;
+ return m_kernel_map_region_end;
}
constexpr VAddr GetCodeRegionStart() const {
- return code_region_start;
+ return m_code_region_start;
}
constexpr VAddr GetCodeRegionEnd() const {
- return code_region_end;
+ return m_code_region_end;
}
constexpr VAddr GetAliasCodeRegionStart() const {
- return alias_code_region_start;
+ return m_alias_code_region_start;
}
constexpr VAddr GetAliasCodeRegionSize() const {
- return alias_code_region_end - alias_code_region_start;
+ return m_alias_code_region_end - m_alias_code_region_start;
}
- std::size_t GetNormalMemorySize() {
- KScopedLightLock lk(general_lock);
- return GetHeapSize() + mapped_physical_memory_size;
+ size_t GetNormalMemorySize() {
+ KScopedLightLock lk(m_general_lock);
+ return GetHeapSize() + m_mapped_physical_memory_size;
}
- constexpr std::size_t GetAddressSpaceWidth() const {
- return address_space_width;
+ constexpr size_t GetAddressSpaceWidth() const {
+ return m_address_space_width;
}
- constexpr std::size_t GetHeapSize() const {
- return current_heap_end - heap_region_start;
+ constexpr size_t GetHeapSize() const {
+ return m_current_heap_end - m_heap_region_start;
}
- constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
- return address_space_start <= address && address + size - 1 <= address_space_end - 1;
+ constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
+ return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
}
- constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
- return alias_region_start > address || address + size - 1 > alias_region_end - 1;
+ constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
+ return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
}
- constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
- return stack_region_start > address || address + size - 1 > stack_region_end - 1;
+ constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
+ return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
}
- constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
+ constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
}
- constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
- return address + size > heap_region_start && heap_region_end > address;
+ constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
+ return address + size > m_heap_region_start && m_heap_region_end > address;
}
- constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
- return address + size > alias_region_start && alias_region_end > address;
+ constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
+ return address + size > m_alias_region_start && m_alias_region_end > address;
}
- constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
+ constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
if (IsInvalidRegion(address, size)) {
return true;
}
@@ -291,73 +297,78 @@ public:
}
return {};
}
- constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
+ constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
- constexpr std::size_t GetNumGuardPages() const {
+ constexpr size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4;
}
PAddr GetPhysicalAddr(VAddr addr) const {
- const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
+ const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
ASSERT(backing_addr);
return backing_addr + addr;
}
constexpr bool Contains(VAddr addr) const {
- return address_space_start <= addr && addr <= address_space_end - 1;
+ return m_address_space_start <= addr && addr <= m_address_space_end - 1;
}
- constexpr bool Contains(VAddr addr, std::size_t size) const {
- return address_space_start <= addr && addr < addr + size &&
- addr + size - 1 <= address_space_end - 1;
+ constexpr bool Contains(VAddr addr, size_t size) const {
+ return m_address_space_start <= addr && addr < addr + size &&
+ addr + size - 1 <= m_address_space_end - 1;
}
private:
constexpr bool IsKernel() const {
- return is_kernel;
+ return m_is_kernel;
}
constexpr bool IsAslrEnabled() const {
- return is_aslr_enabled;
+ return m_enable_aslr;
}
- constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
- return (address_space_start <= addr) &&
- (num_pages <= (address_space_end - address_space_start) / PageSize) &&
- (addr + num_pages * PageSize - 1 <= address_space_end - 1);
+ constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
+ return (m_address_space_start <= addr) &&
+ (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
+ (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
}
private:
- VAddr address_space_start{};
- VAddr address_space_end{};
- VAddr heap_region_start{};
- VAddr heap_region_end{};
- VAddr current_heap_end{};
- VAddr alias_region_start{};
- VAddr alias_region_end{};
- VAddr stack_region_start{};
- VAddr stack_region_end{};
- VAddr kernel_map_region_start{};
- VAddr kernel_map_region_end{};
- VAddr code_region_start{};
- VAddr code_region_end{};
- VAddr alias_code_region_start{};
- VAddr alias_code_region_end{};
-
- std::size_t mapped_physical_memory_size{};
- std::size_t max_heap_size{};
- std::size_t max_physical_memory_size{};
- std::size_t address_space_width{};
-
- bool is_kernel{};
- bool is_aslr_enabled{};
-
- u32 heap_fill_value{};
- const KMemoryRegion* cached_physical_heap_region{};
-
- KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
- KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
-
- Common::PageTable page_table_impl;
-
- Core::System& system;
+ VAddr m_address_space_start{};
+ VAddr m_address_space_end{};
+ VAddr m_heap_region_start{};
+ VAddr m_heap_region_end{};
+ VAddr m_current_heap_end{};
+ VAddr m_alias_region_start{};
+ VAddr m_alias_region_end{};
+ VAddr m_stack_region_start{};
+ VAddr m_stack_region_end{};
+ VAddr m_kernel_map_region_start{};
+ VAddr m_kernel_map_region_end{};
+ VAddr m_code_region_start{};
+ VAddr m_code_region_end{};
+ VAddr m_alias_code_region_start{};
+ VAddr m_alias_code_region_end{};
+
+ size_t m_mapped_physical_memory_size{};
+ size_t m_max_heap_size{};
+ size_t m_max_physical_memory_size{};
+ size_t m_address_space_width{};
+
+ KMemoryBlockManager m_memory_block_manager;
+
+ bool m_is_kernel{};
+ bool m_enable_aslr{};
+ bool m_enable_device_address_space_merge{};
+
+ KMemoryBlockSlabManager* m_memory_block_slab_manager{};
+
+ u32 m_heap_fill_value{};
+ const KMemoryRegion* m_cached_physical_heap_region{};
+
+ KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
+ KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
+
+ std::unique_ptr<Common::PageTable> m_page_table_impl;
+
+ Core::System& m_system;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d3e99665f..8c3495e5a 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
process->name = std::move(process_name);
process->resource_limit = res_limit;
- process->status = ProcessStatus::Created;
+ process->system_resource_address = 0;
+ process->state = State::Created;
process->program_id = 0;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
: kernel.CreateNewUserProcessID();
@@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
process->exception_thread = nullptr;
process->is_suspended = false;
process->schedule_count = 0;
+ process->is_handle_table_initialized = false;
// Open a reference to the resource limit.
process->resource_limit->Open();
- return ResultSuccess;
+ R_SUCCEED();
}
void KProcess::DoWorkerTaskImpl() {
@@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() {
}
}
-u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
+u64 KProcess::GetTotalPhysicalMemoryAvailable() {
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
- page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size +
+ page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
main_thread_stack_size};
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
capacity != pool_size) {
@@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
return memory_usage_capacity;
}
-u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
+u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
}
-u64 KProcess::GetTotalPhysicalMemoryUsed() const {
- return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() +
+u64 KProcess::GetTotalPhysicalMemoryUsed() {
+ return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
GetSystemResourceSize();
}
-u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
+u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
}
@@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
shmem->Open();
shemen_info->Open();
- return ResultSuccess;
+ R_SUCCEED();
}
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
@@ -289,12 +291,12 @@ Result KProcess::Reset() {
KScopedSchedulerLock sl{kernel};
// Validate that we're in a state that we can reset.
- R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
+ R_UNLESS(state != State::Terminated, ResultInvalidState);
R_UNLESS(is_signaled, ResultInvalidState);
// Clear signaled.
is_signaled = false;
- return ResultSuccess;
+ R_SUCCEED();
}
Result KProcess::SetActivity(ProcessActivity activity) {
@@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) {
KScopedSchedulerLock sl{kernel};
// Validate our state.
- R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
- R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
+ R_UNLESS(state != State::Terminating, ResultInvalidState);
+ R_UNLESS(state != State::Terminated, ResultInvalidState);
// Either pause or resume.
if (activity == ProcessActivity::Paused) {
// Verify that we're not suspended.
- if (is_suspended) {
- return ResultInvalidState;
- }
+ R_UNLESS(!is_suspended, ResultInvalidState);
// Suspend all threads.
for (auto* thread : GetThreadList()) {
@@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
ASSERT(activity == ProcessActivity::Runnable);
// Verify that we're suspended.
- if (!is_suspended) {
- return ResultInvalidState;
- }
+ R_UNLESS(is_suspended, ResultInvalidState);
// Resume all threads.
for (auto* thread : GetThreadList()) {
@@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
SetSuspended(false);
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
@@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
system_resource_size = metadata.GetSystemResourceSize();
image_size = code_size;
+ // We currently do not support process-specific system resource
+ UNIMPLEMENTED_IF(system_resource_size != 0);
+
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
code_size + system_resource_size);
if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
code_size + system_resource_size);
- return ResultLimitReached;
+ R_RETURN(ResultLimitReached);
}
// Initialize proces address space
- if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
- 0x8000000, code_size,
- KMemoryManager::Pool::Application)};
+ if (const Result result{page_table.InitializeForProcess(
+ metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
+ &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
result.IsError()) {
- return result;
+ R_RETURN(result);
}
// Map process code region
- if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
- code_size / PageSize, KMemoryState::Code,
- KMemoryPermission::None)};
+ if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
+ code_size / PageSize, KMemoryState::Code,
+ KMemoryPermission::None)};
result.IsError()) {
- return result;
+ R_RETURN(result);
}
// Initialize process capabilities
const auto& caps{metadata.GetKernelCapabilities()};
if (const Result result{
- capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
+ capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
result.IsError()) {
- return result;
+ R_RETURN(result);
}
// Set memory usage capacity
@@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
case FileSys::ProgramAddressSpaceType::Is32Bit:
case FileSys::ProgramAddressSpaceType::Is36Bit:
case FileSys::ProgramAddressSpaceType::Is39Bit:
- memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
+ memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
break;
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
- memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
- page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
+ memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
+ page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
break;
default:
@@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
}
// Create TLS region
- R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
+ R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
memory_reservation.Commit();
- return handle_table.Initialize(capabilities.GetHandleTableSize());
+ R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
}
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
@@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
- ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError());
+ ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
- ChangeStatus(ProcessStatus::Running);
+ ChangeState(State::Running);
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
}
void KProcess::PrepareForTermination() {
- ChangeStatus(ProcessStatus::Exiting);
+ ChangeState(State::Terminating);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
for (auto* thread : in_thread_list) {
@@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() {
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
- this->DeleteThreadLocalRegion(tls_region_address);
- tls_region_address = 0;
+ this->DeleteThreadLocalRegion(plr_address);
+ plr_address = 0;
if (resource_limit) {
resource_limit->Release(LimitableResource::PhysicalMemory,
main_thread_stack_size + image_size);
}
- ChangeStatus(ProcessStatus::Exited);
+ ChangeState(State::Terminated);
}
void KProcess::Finalize() {
@@ -474,7 +475,7 @@ void KProcess::Finalize() {
}
// Finalize the page table.
- page_table.reset();
+ page_table.Finalize();
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
@@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
}
*out = tlr;
- return ResultSuccess;
+ R_SUCCEED();
}
}
@@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
// We succeeded!
tlp_guard.Cancel();
*out = tlr;
- return ResultSuccess;
+ R_SUCCEED();
}
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
@@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
KThreadLocalPage::Free(kernel, page_to_free);
}
- return ResultSuccess;
+ R_SUCCEED();
}
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
@@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Svc::MemoryPermission permission) {
- page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
+ page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
};
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
@@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const {
}
KProcess::KProcess(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
- kernel_.System())},
+ : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
state_lock{kernel_}, list_lock{kernel_} {}
KProcess::~KProcess() = default;
-void KProcess::ChangeStatus(ProcessStatus new_status) {
- if (status == new_status) {
+void KProcess::ChangeState(State new_state) {
+ if (state == new_state) {
return;
}
- status = new_status;
+ state = new_state;
is_signaled = true;
NotifyAvailable();
}
@@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
// The kernel always ensures that the given stack size is page aligned.
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
- const VAddr start{page_table->GetStackRegionStart()};
- const std::size_t size{page_table->GetStackRegionEnd() - start};
+ const VAddr start{page_table.GetStackRegionStart()};
+ const std::size_t size{page_table.GetStackRegionEnd() - start};
CASCADE_RESULT(main_thread_stack_top,
- page_table->AllocateAndMapMemory(
+ page_table.AllocateAndMapMemory(
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
main_thread_stack_top += main_thread_stack_size;
- return ResultSuccess;
+ R_SUCCEED();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index d56d73bab..2e0cc3d0b 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -13,6 +13,7 @@
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
+#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_worker_task.h"
@@ -31,7 +32,6 @@ class ProgramMetadata;
namespace Kernel {
class KernelCore;
-class KPageTable;
class KResourceLimit;
class KThread;
class KSharedMemoryInfo;
@@ -45,24 +45,6 @@ enum class MemoryRegion : u16 {
BASE = 3,
};
-/**
- * Indicates the status of a Process instance.
- *
- * @note These match the values as used by kernel,
- * so new entries should only be added if RE
- * shows that a new value has been introduced.
- */
-enum class ProcessStatus {
- Created,
- CreatedWithDebuggerAttached,
- Running,
- WaitingForDebuggerToAttach,
- DebuggerAttached,
- Exiting,
- Exited,
- DebugBreak,
-};
-
enum class ProcessActivity : u32 {
Runnable,
Paused,
@@ -89,6 +71,17 @@ public:
explicit KProcess(KernelCore& kernel_);
~KProcess() override;
+ enum class State {
+ Created = static_cast<u32>(Svc::ProcessState::Created),
+ CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
+ Running = static_cast<u32>(Svc::ProcessState::Running),
+ Crashed = static_cast<u32>(Svc::ProcessState::Crashed),
+ RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached),
+ Terminating = static_cast<u32>(Svc::ProcessState::Terminating),
+ Terminated = static_cast<u32>(Svc::ProcessState::Terminated),
+ DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
+ };
+
enum : u64 {
/// Lowest allowed process ID for a kernel initial process.
InitialKIPIDMin = 1,
@@ -114,12 +107,12 @@ public:
/// Gets a reference to the process' page table.
KPageTable& PageTable() {
- return *page_table;
+ return page_table;
}
/// Gets const a reference to the process' page table.
const KPageTable& PageTable() const {
- return *page_table;
+ return page_table;
}
/// Gets a reference to the process' handle table.
@@ -145,26 +138,25 @@ public:
}
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
- return condition_var.Wait(address, cv_key, tag, ns);
+ R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
}
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
- return address_arbiter.SignalToAddress(address, signal_type, value, count);
+ R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
}
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
s64 timeout) {
- return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
+ R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
- /// Gets the address to the process' dedicated TLS region.
- VAddr GetTLSRegionAddress() const {
- return tls_region_address;
+ VAddr GetProcessLocalRegionAddress() const {
+ return plr_address;
}
/// Gets the current status of the process
- ProcessStatus GetStatus() const {
- return status;
+ State GetState() const {
+ return state;
}
/// Gets the unique ID that identifies this particular process.
@@ -286,18 +278,18 @@ public:
}
/// Retrieves the total physical memory available to this process in bytes.
- u64 GetTotalPhysicalMemoryAvailable() const;
+ u64 GetTotalPhysicalMemoryAvailable();
/// Retrieves the total physical memory available to this process in bytes,
/// without the size of the personal system resource heap added to it.
- u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
+ u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
/// Retrieves the total physical memory used by this process in bytes.
- u64 GetTotalPhysicalMemoryUsed() const;
+ u64 GetTotalPhysicalMemoryUsed();
/// Retrieves the total physical memory used by this process in bytes,
/// without the size of the personal system resource heap added to it.
- u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
+ u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
/// Gets the list of all threads created with this process as their owner.
std::list<KThread*>& GetThreadList() {
@@ -415,19 +407,24 @@ private:
pinned_threads[core_id] = nullptr;
}
- /// Changes the process status. If the status is different
- /// from the current process status, then this will trigger
- /// a process signal.
- void ChangeStatus(ProcessStatus new_status);
+ void FinalizeHandleTable() {
+ // Finalize the table.
+ handle_table.Finalize();
+
+ // Note that the table is finalized.
+ is_handle_table_initialized = false;
+ }
+
+ void ChangeState(State new_state);
/// Allocates the main thread stack for the process, given the stack size in bytes.
Result AllocateMainThreadStack(std::size_t stack_size);
/// Memory manager for this process
- std::unique_ptr<KPageTable> page_table;
+ KPageTable page_table;
/// Current status of the process
- ProcessStatus status{};
+ State state{};
/// The ID of this process
u64 process_id = 0;
@@ -443,6 +440,8 @@ private:
/// Resource limit descriptor for this process
KResourceLimit* resource_limit{};
+ VAddr system_resource_address{};
+
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0;
@@ -469,7 +468,7 @@ private:
KConditionVariable condition_var;
/// Address indicating the location of the process' dedicated TLS region.
- VAddr tls_region_address = 0;
+ VAddr plr_address = 0;
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
@@ -495,8 +494,12 @@ private:
/// Schedule count of this process
s64 schedule_count{};
+ size_t memory_release_hint{};
+
bool is_signaled{};
bool is_suspended{};
+ bool is_immortal{};
+ bool is_handle_table_initialized{};
bool is_initialized{};
std::atomic<u16> num_running_threads{};
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index 94c5464fe..5c942d47c 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -15,31 +15,44 @@ KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{ker
KReadableEvent::~KReadableEvent() = default;
+void KReadableEvent::Initialize(KEvent* parent) {
+ m_is_signaled = false;
+ m_parent = parent;
+
+ if (m_parent != nullptr) {
+ m_parent->Open();
+ }
+}
+
bool KReadableEvent::IsSignaled() const {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
- return is_signaled;
+ return m_is_signaled;
}
void KReadableEvent::Destroy() {
- if (parent) {
- parent->Close();
+ if (m_parent) {
+ {
+ KScopedSchedulerLock sl{kernel};
+ m_parent->OnReadableEventDestroyed();
+ }
+ m_parent->Close();
}
}
Result KReadableEvent::Signal() {
KScopedSchedulerLock lk{kernel};
- if (!is_signaled) {
- is_signaled = true;
- NotifyAvailable();
+ if (!m_is_signaled) {
+ m_is_signaled = true;
+ this->NotifyAvailable();
}
return ResultSuccess;
}
Result KReadableEvent::Clear() {
- Reset();
+ this->Reset();
return ResultSuccess;
}
@@ -47,11 +60,11 @@ Result KReadableEvent::Clear() {
Result KReadableEvent::Reset() {
KScopedSchedulerLock lk{kernel};
- if (!is_signaled) {
+ if (!m_is_signaled) {
return ResultInvalidState;
}
- is_signaled = false;
+ m_is_signaled = false;
return ResultSuccess;
}
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index 18dcad289..743f96bf5 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -20,26 +20,23 @@ public:
explicit KReadableEvent(KernelCore& kernel_);
~KReadableEvent() override;
- void Initialize(KEvent* parent_event_, std::string&& name_) {
- is_signaled = false;
- parent = parent_event_;
- name = std::move(name_);
- }
+ void Initialize(KEvent* parent);
KEvent* GetParent() const {
- return parent;
+ return m_parent;
}
+ Result Signal();
+ Result Clear();
+
bool IsSignaled() const override;
void Destroy() override;
- Result Signal();
- Result Clear();
Result Reset();
private:
- bool is_signaled{};
- KEvent* parent{};
+ bool m_is_signaled{};
+ KEvent* m_parent{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 802c646a6..faf03fcc8 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -7,6 +7,8 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
+#include "common/scope_exit.h"
+#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/hle_ipc.h"
@@ -18,13 +20,16 @@
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/service_thread.h"
#include "core/memory.h"
namespace Kernel {
-KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
+using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
+
+KServerSession::KServerSession(KernelCore& kernel_)
+ : KSynchronizationObject{kernel_}, m_lock{kernel_} {}
KServerSession::~KServerSession() = default;
@@ -33,17 +38,14 @@ void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
// Set member variables.
parent = parent_session_;
name = std::move(name_);
-
- if (manager_) {
- manager = manager_;
- } else {
- manager = std::make_shared<SessionRequestManager>(kernel);
- }
+ manager = manager_;
}
void KServerSession::Destroy() {
parent->OnServerClosed();
+ this->CleanupRequests();
+
parent->Close();
// Release host emulation members.
@@ -54,13 +56,13 @@ void KServerSession::Destroy() {
}
void KServerSession::OnClientClosed() {
- if (manager->HasSessionHandler()) {
+ if (manager && manager->HasSessionHandler()) {
manager->SessionHandler().ClientDisconnected(this);
}
}
bool KServerSession::IsSignaled() const {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
// If the client is closed, we're always signaled.
if (parent->IsClientClosed()) {
@@ -68,114 +70,281 @@ bool KServerSession::IsSignaled() const {
}
// Otherwise, we're signaled if we have a request and aren't handling one.
- return false;
+ return !m_request_list.empty() && m_current_request == nullptr;
}
-void KServerSession::AppendDomainHandler(SessionRequestHandlerPtr handler) {
- manager->AppendDomainHandler(std::move(handler));
+Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
+ u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
+ auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
+
+ context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
+
+ return manager->QueueSyncRequest(parent, std::move(context));
}
-std::size_t KServerSession::NumDomainRequestHandlers() const {
- return manager->DomainHandlerCount();
+Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
+ Result result = manager->CompleteSyncRequest(this, context);
+
+ // The calling thread is waiting for this request to complete, so wake it up.
+ context.GetThread().EndWait(result);
+
+ return result;
}
-Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
- if (!context.HasDomainMessageHeader()) {
- return ResultSuccess;
- }
+Result KServerSession::OnRequest(KSessionRequest* request) {
+ // Create the wait queue.
+ ThreadQueueImplForKServerSessionRequest wait_queue{kernel};
- // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
- context.SetSessionRequestManager(manager);
-
- // If there is a DomainMessageHeader, then this is CommandType "Request"
- const auto& domain_message_header = context.GetDomainMessageHeader();
- const u32 object_id{domain_message_header.object_id};
- switch (domain_message_header.command) {
- case IPC::DomainMessageHeader::CommandType::SendMessage:
- if (object_id > manager->DomainHandlerCount()) {
- LOG_CRITICAL(IPC,
- "object_id {} is too big! This probably means a recent service call "
- "to {} needed to return a new interface!",
- object_id, name);
- ASSERT(false);
- return ResultSuccess; // Ignore error if asserts are off
- }
- if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
- return strong_ptr->HandleSyncRequest(*this, context);
+ {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Ensure that we can handle new requests.
+ R_UNLESS(!parent->IsServerClosed(), ResultSessionClosed);
+
+ // Check that we're not terminating.
+ R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
+
+ if (manager) {
+ // HLE request.
+ auto& memory{kernel.System().Memory()};
+ this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory);
} else {
- ASSERT(false);
- return ResultSuccess;
- }
+ // Non-HLE request.
- case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
- LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
+ // Get whether we're empty.
+ const bool was_empty = m_request_list.empty();
- manager->CloseDomainHandler(object_id - 1);
+ // Add the request to the list.
+ request->Open();
+ m_request_list.push_back(*request);
- IPC::ResponseBuilder rb{context, 2};
- rb.Push(ResultSuccess);
- return ResultSuccess;
- }
+ // If we were empty, signal.
+ if (was_empty) {
+ this->NotifyAvailable();
+ }
+ }
+
+ // If we have a request event, this is asynchronous, and we don't need to wait.
+ R_SUCCEED_IF(request->GetEvent() != nullptr);
+
+ // This is a synchronous request, so we should wait for our request to complete.
+ GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
+ GetCurrentThread(kernel).BeginWait(&wait_queue);
}
- LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
- ASSERT(false);
- return ResultSuccess;
+ return GetCurrentThread(kernel).GetWaitResult();
}
-Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
- u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
- auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
+Result KServerSession::SendReply() {
+ // Lock the session.
+ KScopedLightLock lk{m_lock};
- context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
+ // Get the request.
+ KSessionRequest* request;
+ {
+ KScopedSchedulerLock sl{kernel};
- // Ensure we have a session request handler
- if (manager->HasSessionRequestHandler(*context)) {
- if (auto strong_ptr = manager->GetServiceThread().lock()) {
- strong_ptr->QueueSyncRequest(*parent, std::move(context));
- } else {
- ASSERT_MSG(false, "strong_ptr is nullptr!");
+ // Get the current request.
+ request = m_current_request;
+ R_UNLESS(request != nullptr, ResultInvalidState);
+
+ // Clear the current request, since we're processing it.
+ m_current_request = nullptr;
+ if (!m_request_list.empty()) {
+ this->NotifyAvailable();
}
- } else {
- ASSERT_MSG(false, "handler is invalid!");
}
- return ResultSuccess;
-}
+ // Close reference to the request once we're done processing it.
+ SCOPE_EXIT({ request->Close(); });
-Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
- Result result = ResultSuccess;
+ // Extract relevant information from the request.
+ const uintptr_t client_message = request->GetAddress();
+ const size_t client_buffer_size = request->GetSize();
+ KThread* client_thread = request->GetThread();
+ KEvent* event = request->GetEvent();
- // If the session has been converted to a domain, handle the domain request
- if (manager->HasSessionRequestHandler(context)) {
- if (IsDomain() && context.HasDomainMessageHeader()) {
- result = HandleDomainSyncRequest(context);
- // If there is no domain header, the regular session handler is used
- } else if (manager->HasSessionHandler()) {
- // If this ServerSession has an associated HLE handler, forward the request to it.
- result = manager->SessionHandler().HandleSyncRequest(*this, context);
- }
+ // Check whether we're closed.
+ const bool closed = (client_thread == nullptr || parent->IsClientClosed());
+
+ Result result = ResultSuccess;
+ if (!closed) {
+ // If we're not closed, send the reply.
+ Core::Memory::Memory& memory{kernel.System().Memory()};
+ KThread* server_thread{GetCurrentThreadPointer(kernel)};
+ UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
+
+ auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
+ auto* dst_msg_buffer = memory.GetPointer(client_message);
+ std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
} else {
- ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
- IPC::ResponseBuilder rb(context, 2);
- rb.Push(ResultSuccess);
+ result = ResultSessionClosed;
}
- if (convert_to_domain) {
- ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
- manager->ConvertToDomain();
- convert_to_domain = false;
+ // Select a result for the client.
+ Result client_result = result;
+ if (closed && R_SUCCEEDED(result)) {
+ result = ResultSessionClosed;
+ client_result = ResultSessionClosed;
+ } else {
+ result = ResultSuccess;
}
- // The calling thread is waiting for this request to complete, so wake it up.
- context.GetThread().EndWait(result);
+ // If there's a client thread, update it.
+ if (client_thread != nullptr) {
+ if (event != nullptr) {
+ // // Get the client process/page table.
+ // KProcess *client_process = client_thread->GetOwnerProcess();
+ // KPageTable *client_page_table = &client_process->PageTable();
+
+ // // If we need to, reply with an async error.
+ // if (R_FAILED(client_result)) {
+ // ReplyAsyncError(client_process, client_message, client_buffer_size,
+ // client_result);
+ // }
+
+ // // Unlock the client buffer.
+ // // NOTE: Nintendo does not check the result of this.
+ // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
+
+ // Signal the event.
+ event->Signal();
+ } else {
+ // End the client thread's wait.
+ KScopedSchedulerLock sl{kernel};
+
+ if (!client_thread->IsTerminationRequested()) {
+ client_thread->EndWait(client_result);
+ }
+ }
+ }
return result;
}
-Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing) {
- return QueueSyncRequest(thread, memory);
+Result KServerSession::ReceiveRequest() {
+ // Lock the session.
+ KScopedLightLock lk{m_lock};
+
+ // Get the request and client thread.
+ KSessionRequest* request;
+ KThread* client_thread;
+
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Ensure that we can service the request.
+ R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed);
+
+ // Ensure we aren't already servicing a request.
+ R_UNLESS(m_current_request == nullptr, ResultNotFound);
+
+ // Ensure we have a request to service.
+ R_UNLESS(!m_request_list.empty(), ResultNotFound);
+
+ // Pop the first request from the list.
+ request = &m_request_list.front();
+ m_request_list.pop_front();
+
+ // Get the thread for the request.
+ client_thread = request->GetThread();
+ R_UNLESS(client_thread != nullptr, ResultSessionClosed);
+
+ // Open the client thread.
+ client_thread->Open();
+ }
+
+ SCOPE_EXIT({ client_thread->Close(); });
+
+ // Set the request as our current.
+ m_current_request = request;
+
+ // Get the client address.
+ uintptr_t client_message = request->GetAddress();
+ size_t client_buffer_size = request->GetSize();
+ // bool recv_list_broken = false;
+
+ // Receive the message.
+ Core::Memory::Memory& memory{kernel.System().Memory()};
+ KThread* server_thread{GetCurrentThreadPointer(kernel)};
+ UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
+
+ auto* src_msg_buffer = memory.GetPointer(client_message);
+ auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
+ std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
+
+ // We succeeded.
+ return ResultSuccess;
+}
+
+void KServerSession::CleanupRequests() {
+ KScopedLightLock lk(m_lock);
+
+ // Clean up any pending requests.
+ while (true) {
+ // Get the next request.
+ KSessionRequest* request = nullptr;
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ if (m_current_request) {
+ // Choose the current request if we have one.
+ request = m_current_request;
+ m_current_request = nullptr;
+ } else if (!m_request_list.empty()) {
+ // Pop the request from the front of the list.
+ request = &m_request_list.front();
+ m_request_list.pop_front();
+ }
+ }
+
+ // If there's no request, we're done.
+ if (request == nullptr) {
+ break;
+ }
+
+ // Close a reference to the request once it's cleaned up.
+ SCOPE_EXIT({ request->Close(); });
+
+ // Extract relevant information from the request.
+ // const uintptr_t client_message = request->GetAddress();
+ // const size_t client_buffer_size = request->GetSize();
+ KThread* client_thread = request->GetThread();
+ KEvent* event = request->GetEvent();
+
+ // KProcess *server_process = request->GetServerProcess();
+ // KProcess *client_process = (client_thread != nullptr) ?
+ // client_thread->GetOwnerProcess() : nullptr;
+ // KProcessPageTable *client_page_table = (client_process != nullptr) ?
+ // &client_process->GetPageTable() : nullptr;
+
+ // Cleanup the mappings.
+ // Result result = CleanupMap(request, server_process, client_page_table);
+
+ // If there's a client thread, update it.
+ if (client_thread != nullptr) {
+ if (event != nullptr) {
+ // // We need to reply async.
+ // ReplyAsyncError(client_process, client_message, client_buffer_size,
+ // (R_SUCCEEDED(result) ? ResultSessionClosed : result));
+
+ // // Unlock the client buffer.
+ // NOTE: Nintendo does not check the result of this.
+ // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
+
+ // Signal the event.
+ event->Signal();
+ } else {
+ // End the client thread's wait.
+ KScopedSchedulerLock sl{kernel};
+
+ if (!client_thread->IsTerminationRequested()) {
+ client_thread->EndWait(ResultSessionClosed);
+ }
+ }
+ }
+ }
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index 6d0821945..32135473b 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -3,6 +3,7 @@
#pragma once
+#include <list>
#include <memory>
#include <string>
#include <utility>
@@ -10,6 +11,8 @@
#include <boost/intrusive/list.hpp>
#include "core/hle/kernel/hle_ipc.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_session_request.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
@@ -55,64 +58,29 @@ public:
}
bool IsSignaled() const override;
-
void OnClientClosed();
- void ClientConnected(SessionRequestHandlerPtr handler) {
- manager->SetSessionHandler(std::move(handler));
- }
-
- void ClientDisconnected() {
- manager = nullptr;
- }
-
- /**
- * Handle a sync request from the emulated application.
- *
- * @param thread Thread that initiated the request.
- * @param memory Memory context to handle the sync request under.
- * @param core_timing Core timing context to schedule the request event under.
- *
- * @returns Result from the operation.
- */
- Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing);
-
- /// Adds a new domain request handler to the collection of request handlers within
- /// this ServerSession instance.
- void AppendDomainHandler(SessionRequestHandlerPtr handler);
-
- /// Retrieves the total number of domain request handlers that have been
- /// appended to this ServerSession instance.
- std::size_t NumDomainRequestHandlers() const;
-
- /// Returns true if the session has been converted to a domain, otherwise False
- bool IsDomain() const {
- return manager->IsDomain();
- }
-
- /// Converts the session to a domain at the end of the current command
- void ConvertToDomain() {
- convert_to_domain = true;
- }
-
/// Gets the session request manager, which forwards requests to the underlying service
std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() {
return manager;
}
+ /// TODO: flesh these out to match the real kernel
+ Result OnRequest(KSessionRequest* request);
+ Result SendReply();
+ Result ReceiveRequest();
+
private:
+ /// Frees up waiting client sessions when this server session is about to die
+ void CleanupRequests();
+
/// Queues a sync request from the emulated application.
Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application.
Result CompleteSyncRequest(HLERequestContext& context);
- /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
- /// object handle.
- Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
-
- /// This session's HLE request handlers
+ /// This session's HLE request handlers; if nullptr, this is not an HLE server
std::shared_ptr<SessionRequestManager> manager;
/// When set to True, converts the session to a domain at the end of the command
@@ -120,6 +88,12 @@ private:
/// KSession that owns this KServerSession
KSession* parent{};
+
+ /// List of threads which are pending a reply.
+ boost::intrusive::list<KSessionRequest> m_request_list;
+ KSessionRequest* m_current_request;
+
+ KLightLock m_lock;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp
new file mode 100644
index 000000000..520da6aa7
--- /dev/null
+++ b/src/core/hle/kernel/k_session_request.cpp
@@ -0,0 +1,61 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/k_page_buffer.h"
+#include "core/hle/kernel/k_session_request.h"
+
+namespace Kernel {
+
+Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size,
+ KMemoryState state, size_t index) {
+ // At most 15 buffers of each type (4-bit descriptor counts).
+ ASSERT(index < ((1ul << 4) - 1) * 3);
+
+ // Get the mapping.
+ Mapping* mapping;
+ if (index < NumStaticMappings) {
+ mapping = &m_static_mappings[index];
+ } else {
+ // Allocate a page for the extra mappings.
+ if (m_mappings == nullptr) {
+ KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel);
+ R_UNLESS(page_buffer != nullptr, ResultOutOfMemory);
+
+ m_mappings = reinterpret_cast<Mapping*>(page_buffer);
+ }
+
+ mapping = &m_mappings[index - NumStaticMappings];
+ }
+
+ // Set the mapping.
+ mapping->Set(client, server, size, state);
+
+ return ResultSuccess;
+}
+
+Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
+ KMemoryState state) {
+ ASSERT(m_num_recv == 0);
+ ASSERT(m_num_exch == 0);
+ return this->PushMap(client, server, size, state, m_num_send++);
+}
+
+Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
+ KMemoryState state) {
+ ASSERT(m_num_exch == 0);
+ return this->PushMap(client, server, size, state, m_num_send + m_num_recv++);
+}
+
+Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
+ KMemoryState state) {
+ return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++);
+}
+
+void KSessionRequest::SessionMappings::Finalize() {
+ if (m_mappings) {
+ KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
+ m_mappings = nullptr;
+ }
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h
new file mode 100644
index 000000000..e5558bc2c
--- /dev/null
+++ b/src/core/hle/kernel/k_session_request.h
@@ -0,0 +1,306 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+
+#include "core/hle/kernel/k_auto_object.h"
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/slab_helpers.h"
+
+namespace Kernel {
+
+class KSessionRequest final : public KSlabAllocated<KSessionRequest>,
+ public KAutoObject,
+ public boost::intrusive::list_base_hook<> {
+ KERNEL_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
+
+public:
+ class SessionMappings {
+ private:
+ static constexpr size_t NumStaticMappings = 8;
+
+ class Mapping {
+ public:
+ constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) {
+ m_client_address = c;
+ m_server_address = s;
+ m_size = sz;
+ m_state = st;
+ }
+
+ constexpr VAddr GetClientAddress() const {
+ return m_client_address;
+ }
+ constexpr VAddr GetServerAddress() const {
+ return m_server_address;
+ }
+ constexpr size_t GetSize() const {
+ return m_size;
+ }
+ constexpr KMemoryState GetMemoryState() const {
+ return m_state;
+ }
+
+ private:
+ VAddr m_client_address;
+ VAddr m_server_address;
+ size_t m_size;
+ KMemoryState m_state;
+ };
+
+ public:
+ explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {}
+
+ void Initialize() {}
+ void Finalize();
+
+ size_t GetSendCount() const {
+ return m_num_send;
+ }
+ size_t GetReceiveCount() const {
+ return m_num_recv;
+ }
+ size_t GetExchangeCount() const {
+ return m_num_exch;
+ }
+
+ Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state);
+ Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state);
+ Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state);
+
+ VAddr GetSendClientAddress(size_t i) const {
+ return GetSendMapping(i).GetClientAddress();
+ }
+ VAddr GetSendServerAddress(size_t i) const {
+ return GetSendMapping(i).GetServerAddress();
+ }
+ size_t GetSendSize(size_t i) const {
+ return GetSendMapping(i).GetSize();
+ }
+ KMemoryState GetSendMemoryState(size_t i) const {
+ return GetSendMapping(i).GetMemoryState();
+ }
+
+ VAddr GetReceiveClientAddress(size_t i) const {
+ return GetReceiveMapping(i).GetClientAddress();
+ }
+ VAddr GetReceiveServerAddress(size_t i) const {
+ return GetReceiveMapping(i).GetServerAddress();
+ }
+ size_t GetReceiveSize(size_t i) const {
+ return GetReceiveMapping(i).GetSize();
+ }
+ KMemoryState GetReceiveMemoryState(size_t i) const {
+ return GetReceiveMapping(i).GetMemoryState();
+ }
+
+ VAddr GetExchangeClientAddress(size_t i) const {
+ return GetExchangeMapping(i).GetClientAddress();
+ }
+ VAddr GetExchangeServerAddress(size_t i) const {
+ return GetExchangeMapping(i).GetServerAddress();
+ }
+ size_t GetExchangeSize(size_t i) const {
+ return GetExchangeMapping(i).GetSize();
+ }
+ KMemoryState GetExchangeMemoryState(size_t i) const {
+ return GetExchangeMapping(i).GetMemoryState();
+ }
+
+ private:
+ Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index);
+
+ const Mapping& GetSendMapping(size_t i) const {
+ ASSERT(i < m_num_send);
+
+ const size_t index = i;
+ if (index < NumStaticMappings) {
+ return m_static_mappings[index];
+ } else {
+ return m_mappings[index - NumStaticMappings];
+ }
+ }
+
+ const Mapping& GetReceiveMapping(size_t i) const {
+ ASSERT(i < m_num_recv);
+
+ const size_t index = m_num_send + i;
+ if (index < NumStaticMappings) {
+ return m_static_mappings[index];
+ } else {
+ return m_mappings[index - NumStaticMappings];
+ }
+ }
+
+ const Mapping& GetExchangeMapping(size_t i) const {
+ ASSERT(i < m_num_exch);
+
+ const size_t index = m_num_send + m_num_recv + i;
+ if (index < NumStaticMappings) {
+ return m_static_mappings[index];
+ } else {
+ return m_mappings[index - NumStaticMappings];
+ }
+ }
+
+ private:
+ KernelCore& kernel;
+ std::array<Mapping, NumStaticMappings> m_static_mappings;
+ Mapping* m_mappings{};
+ u8 m_num_send{};
+ u8 m_num_recv{};
+ u8 m_num_exch{};
+ };
+
+public:
+ explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {}
+
+ static KSessionRequest* Create(KernelCore& kernel) {
+ KSessionRequest* req = KSessionRequest::Allocate(kernel);
+ if (req != nullptr) [[likely]] {
+ KAutoObject::Create(req);
+ }
+ return req;
+ }
+
+ void Destroy() override {
+ this->Finalize();
+ KSessionRequest::Free(kernel, this);
+ }
+
+ void Initialize(KEvent* event, uintptr_t address, size_t size) {
+ m_mappings.Initialize();
+
+ m_thread = GetCurrentThreadPointer(kernel);
+ m_event = event;
+ m_address = address;
+ m_size = size;
+
+ m_thread->Open();
+ if (m_event != nullptr) {
+ m_event->Open();
+ }
+ }
+
+ static void PostDestroy(uintptr_t arg) {}
+
+ KThread* GetThread() const {
+ return m_thread;
+ }
+ KEvent* GetEvent() const {
+ return m_event;
+ }
+ uintptr_t GetAddress() const {
+ return m_address;
+ }
+ size_t GetSize() const {
+ return m_size;
+ }
+ KProcess* GetServerProcess() const {
+ return m_server;
+ }
+
+ void SetServerProcess(KProcess* process) {
+ m_server = process;
+ m_server->Open();
+ }
+
+ void ClearThread() {
+ m_thread = nullptr;
+ }
+ void ClearEvent() {
+ m_event = nullptr;
+ }
+
+ size_t GetSendCount() const {
+ return m_mappings.GetSendCount();
+ }
+ size_t GetReceiveCount() const {
+ return m_mappings.GetReceiveCount();
+ }
+ size_t GetExchangeCount() const {
+ return m_mappings.GetExchangeCount();
+ }
+
+ Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ return m_mappings.PushSend(client, server, size, state);
+ }
+
+ Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ return m_mappings.PushReceive(client, server, size, state);
+ }
+
+ Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ return m_mappings.PushExchange(client, server, size, state);
+ }
+
+ VAddr GetSendClientAddress(size_t i) const {
+ return m_mappings.GetSendClientAddress(i);
+ }
+ VAddr GetSendServerAddress(size_t i) const {
+ return m_mappings.GetSendServerAddress(i);
+ }
+ size_t GetSendSize(size_t i) const {
+ return m_mappings.GetSendSize(i);
+ }
+ KMemoryState GetSendMemoryState(size_t i) const {
+ return m_mappings.GetSendMemoryState(i);
+ }
+
+ VAddr GetReceiveClientAddress(size_t i) const {
+ return m_mappings.GetReceiveClientAddress(i);
+ }
+ VAddr GetReceiveServerAddress(size_t i) const {
+ return m_mappings.GetReceiveServerAddress(i);
+ }
+ size_t GetReceiveSize(size_t i) const {
+ return m_mappings.GetReceiveSize(i);
+ }
+ KMemoryState GetReceiveMemoryState(size_t i) const {
+ return m_mappings.GetReceiveMemoryState(i);
+ }
+
+ VAddr GetExchangeClientAddress(size_t i) const {
+ return m_mappings.GetExchangeClientAddress(i);
+ }
+ VAddr GetExchangeServerAddress(size_t i) const {
+ return m_mappings.GetExchangeServerAddress(i);
+ }
+ size_t GetExchangeSize(size_t i) const {
+ return m_mappings.GetExchangeSize(i);
+ }
+ KMemoryState GetExchangeMemoryState(size_t i) const {
+ return m_mappings.GetExchangeMemoryState(i);
+ }
+
+private:
+ // NOTE: This is public and virtual in Nintendo's kernel.
+ void Finalize() override {
+ m_mappings.Finalize();
+
+ if (m_thread) {
+ m_thread->Close();
+ }
+ if (m_event) {
+ m_event->Close();
+ }
+ if (m_server) {
+ m_server->Close();
+ }
+ }
+
+private:
+ SessionMappings m_mappings;
+ KThread* m_thread{};
+ KProcess* m_server{};
+ KEvent* m_event{};
+ uintptr_t m_address{};
+ size_t m_size{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 8ff1545b6..a039cc591 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
is_initialized = true;
// Clear all pages in the memory.
- std::memset(device_memory_.GetPointer(physical_address_), 0, size_);
+ std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_);
return ResultSuccess;
}
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 34cb98456..5620c3660 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -54,7 +54,7 @@ public:
* @return A pointer to the shared memory block from the specified offset
*/
u8* GetPointer(std::size_t offset = 0) {
- return device_memory->GetPointer(physical_address + offset);
+ return device_memory->GetPointer<u8>(physical_address + offset);
}
/**
@@ -63,7 +63,7 @@ public:
* @return A pointer to the shared memory block from the specified offset
*/
const u8* GetPointer(std::size_t offset = 0) const {
- return device_memory->GetPointer(physical_address + offset);
+ return device_memory->GetPointer<u8>(physical_address + offset);
}
void Finalize() override;
diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h
index e43db8515..2bb6b6d08 100644
--- a/src/core/hle/kernel/k_shared_memory_info.h
+++ b/src/core/hle/kernel/k_shared_memory_info.h
@@ -15,7 +15,8 @@ class KSharedMemoryInfo final : public KSlabAllocated<KSharedMemoryInfo>,
public boost::intrusive::list_base_hook<> {
public:
- explicit KSharedMemoryInfo() = default;
+ explicit KSharedMemoryInfo(KernelCore&) {}
+ KSharedMemoryInfo() = default;
constexpr void Initialize(KSharedMemory* shmem) {
shared_memory = shmem;
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 2b303537e..a8c77a7d4 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -8,6 +8,7 @@
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
+#include "common/spin_lock.h"
namespace Kernel {
@@ -36,28 +37,34 @@ public:
}
void* Allocate() {
- Node* ret = m_head.load();
+ // KScopedInterruptDisable di;
- do {
- if (ret == nullptr) {
- break;
- }
- } while (!m_head.compare_exchange_weak(ret, ret->next));
+ m_lock.lock();
+
+ Node* ret = m_head;
+ if (ret != nullptr) [[likely]] {
+ m_head = ret->next;
+ }
+ m_lock.unlock();
return ret;
}
void Free(void* obj) {
+ // KScopedInterruptDisable di;
+
+ m_lock.lock();
+
Node* node = static_cast<Node*>(obj);
+ node->next = m_head;
+ m_head = node;
- Node* cur_head = m_head.load();
- do {
- node->next = cur_head;
- } while (!m_head.compare_exchange_weak(cur_head, node));
+ m_lock.unlock();
}
private:
std::atomic<Node*> m_head{};
+ Common::SpinLock m_lock;
};
} // namespace impl
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 174afc80d..b7bfcdce3 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -30,6 +30,7 @@
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -38,6 +39,9 @@
#endif
namespace {
+
+constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
+
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
u32 entry_point, u32 arg) {
context = {};
@@ -241,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
}
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
@@ -254,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
- return ResultSuccess;
+ R_SUCCEED();
}
Result KThread::InitializeDummyThread(KThread* thread) {
@@ -264,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) {
// Initialize emulation parameters.
thread->stack_parameters.disable_count = 0;
- return ResultSuccess;
+ R_SUCCEED();
}
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
- return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
- system.GetCpuManager().GetGuestActivateFunc());
+ R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
+ ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
}
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
- return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
- system.GetCpuManager().GetIdleThreadStartFunc());
+ R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
+ ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
}
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg, s32 virt_core) {
- return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
- system.GetCpuManager().GetShutdownThreadStartFunc());
+ R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
+ ThreadType::HighPriority,
+ system.GetCpuManager().GetShutdownThreadStartFunc()));
}
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
- return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
- ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
+ R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
+ ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
}
void KThread::PostDestroy(uintptr_t arg) {
@@ -538,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
*out_ideal_core = virtual_ideal_core_id;
*out_affinity_mask = virtual_affinity_mask;
- return ResultSuccess;
+ R_SUCCEED();
}
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
@@ -554,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
*out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
@@ -666,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
} while (retry_update);
}
- return ResultSuccess;
+ R_SUCCEED();
}
void KThread::SetBasePriority(s32 value) {
@@ -839,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
} while (thread_is_current);
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KThread::GetThreadContext3(std::vector<u8>& out) {
@@ -874,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
}
}
- return ResultSuccess;
+ R_SUCCEED();
}
void KThread::AddWaiterImpl(KThread* thread) {
@@ -1038,7 +1043,7 @@ Result KThread::Run() {
// Set our state and finish.
SetState(ThreadState::Runnable);
- return ResultSuccess;
+ R_SUCCEED();
}
}
@@ -1073,6 +1078,78 @@ void KThread::Exit() {
UNREACHABLE_MSG("KThread::Exit() would return");
}
+Result KThread::Terminate() {
+ ASSERT(this != GetCurrentThreadPointer(kernel));
+
+ // Request the thread terminate if it hasn't already.
+ if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
+ // If the thread isn't terminated, wait for it to terminate.
+ s32 index;
+ KSynchronizationObject* objects[] = {this};
+ R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
+ Svc::WaitInfinite));
+ }
+
+ R_SUCCEED();
+}
+
+ThreadState KThread::RequestTerminate() {
+ ASSERT(this != GetCurrentThreadPointer(kernel));
+
+ KScopedSchedulerLock sl{kernel};
+
+ // Determine if this is the first termination request.
+ const bool first_request = [&]() -> bool {
+ // Perform an atomic compare-and-swap from false to true.
+ bool expected = false;
+ return termination_requested.compare_exchange_strong(expected, true);
+ }();
+
+ // If this is the first request, start termination procedure.
+ if (first_request) {
+ // If the thread is in initialized state, just change state to terminated.
+ if (this->GetState() == ThreadState::Initialized) {
+ thread_state = ThreadState::Terminated;
+ return ThreadState::Terminated;
+ }
+
+ // Register the terminating dpc.
+ this->RegisterDpc(DpcFlag::Terminating);
+
+ // If the thread is pinned, unpin it.
+ if (this->GetStackParameters().is_pinned) {
+ this->GetOwnerProcess()->UnpinThread(this);
+ }
+
+ // If the thread is suspended, continue it.
+ if (this->IsSuspended()) {
+ suspend_allowed_flags = 0;
+ this->UpdateState();
+ }
+
+ // Change the thread's priority to be higher than any system thread's.
+ if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
+ this->SetBasePriority(TerminatingThreadPriority);
+ }
+
+ // If the thread is runnable, send a termination interrupt to other cores.
+ if (this->GetState() == ThreadState::Runnable) {
+ if (const u64 core_mask =
+ physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
+ core_mask != 0) {
+ Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
+ }
+ }
+
+ // Wake up the thread.
+ if (this->GetState() == ThreadState::Waiting) {
+ wait_queue->CancelWait(this, ResultTerminationRequested, true);
+ }
+ }
+
+ return this->GetState();
+}
+
Result KThread::Sleep(s64 timeout) {
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
ASSERT(this == GetCurrentThreadPointer(kernel));
@@ -1086,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) {
// Check if the thread should terminate.
if (this->IsTerminationRequested()) {
slp.CancelSleep();
- return ResultTerminationRequested;
+ R_THROW(ResultTerminationRequested);
}
// Wait for the sleep to end.
@@ -1094,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) {
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
}
- return ResultSuccess;
+ R_SUCCEED();
}
void KThread::IfDummyThreadTryWait() {
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 9ee20208e..e2a27d603 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -180,6 +180,10 @@ public:
void Exit();
+ Result Terminate();
+
+ ThreadState RequestTerminate();
+
[[nodiscard]] u32 GetSuspendFlags() const {
return suspend_allowed_flags & suspend_request_flags;
}
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index 0a7f22680..5d466ace7 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -26,7 +26,7 @@ public:
static_assert(RegionsPerPage > 0);
public:
- constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) {
+ constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) {
m_is_region_free.fill(true);
}
diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp
deleted file mode 100644
index ff88c5acd..000000000
--- a/src/core/hle/kernel/k_writable_event.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "core/hle/kernel/k_event.h"
-#include "core/hle/kernel/k_readable_event.h"
-#include "core/hle/kernel/k_writable_event.h"
-
-namespace Kernel {
-
-KWritableEvent::KWritableEvent(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
-
-KWritableEvent::~KWritableEvent() = default;
-
-void KWritableEvent::Initialize(KEvent* parent_event_, std::string&& name_) {
- parent = parent_event_;
- name = std::move(name_);
- parent->GetReadableEvent().Open();
-}
-
-Result KWritableEvent::Signal() {
- return parent->GetReadableEvent().Signal();
-}
-
-Result KWritableEvent::Clear() {
- return parent->GetReadableEvent().Clear();
-}
-
-void KWritableEvent::Destroy() {
- // Close our references.
- parent->GetReadableEvent().Close();
- parent->Close();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h
deleted file mode 100644
index 3fd0c7d0a..000000000
--- a/src/core/hle/kernel/k_writable_event.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include "core/hle/kernel/k_auto_object.h"
-#include "core/hle/kernel/slab_helpers.h"
-#include "core/hle/result.h"
-
-namespace Kernel {
-
-class KernelCore;
-class KEvent;
-
-class KWritableEvent final
- : public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> {
- KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject);
-
-public:
- explicit KWritableEvent(KernelCore& kernel_);
- ~KWritableEvent() override;
-
- void Destroy() override;
-
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
-
- void Initialize(KEvent* parent_, std::string&& name_);
- Result Signal();
- Result Clear();
-
- KEvent* GetParent() const {
- return parent;
- }
-
-private:
- KEvent* parent{};
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 9251f29ad..eed2dc9f3 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -24,6 +24,7 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_client_port.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
@@ -73,8 +74,16 @@ struct KernelCore::Impl {
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
InitializeShutdownThreads();
- InitializePreemption(kernel);
InitializePhysicalCores();
+ InitializePreemption(kernel);
+
+ // Initialize the Dynamic Slab Heaps.
+ {
+ const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
+ ASSERT(pt_heap_region.GetEndAddress() != 0);
+
+ InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
+ }
RegisterHostThread();
}
@@ -86,6 +95,15 @@ struct KernelCore::Impl {
}
}
+ void CloseCurrentProcess() {
+ (*current_process).Finalize();
+ // current_process->Close();
+ // TODO: The current process should be destroyed based on accurate ref counting after
+ // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
+ (*current_process).Destroy();
+ current_process = nullptr;
+ }
+
void Shutdown() {
is_shutting_down.store(true, std::memory_order_relaxed);
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
@@ -99,10 +117,6 @@ struct KernelCore::Impl {
next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1;
- for (auto& core : cores) {
- core = nullptr;
- }
-
global_handle_table->Finalize();
global_handle_table.reset();
@@ -152,15 +166,7 @@ struct KernelCore::Impl {
}
}
- // Shutdown all processes.
- if (current_process) {
- (*current_process).Finalize();
- // current_process->Close();
- // TODO: The current process should be destroyed based on accurate ref counting after
- // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
- (*current_process).Destroy();
- current_process = nullptr;
- }
+ CloseCurrentProcess();
// Track kernel objects that were not freed on shutdown
{
@@ -257,6 +263,18 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
+ void InitializeResourceManagers(VAddr address, size_t size) {
+ dynamic_page_manager = std::make_unique<KDynamicPageManager>();
+ memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
+ app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
+
+ dynamic_page_manager->Initialize(address, size);
+ static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
+ memory_block_heap->Initialize(dynamic_page_manager.get(),
+ ApplicationMemoryBlockSlabHeapSize);
+ app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
+ }
+
void InitializeShutdownThreads() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
shutdown_threads[core_id] = KThread::Create(system.Kernel());
@@ -344,11 +362,6 @@ struct KernelCore::Impl {
static inline thread_local KThread* current_thread{nullptr};
KThread* GetCurrentEmuThread() {
- // If we are shutting down the kernel, none of this is relevant anymore.
- if (IsShuttingDown()) {
- return {};
- }
-
const auto thread_id = GetCurrentHostThreadID();
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
@@ -770,6 +783,11 @@ struct KernelCore::Impl {
// Kernel memory management
std::unique_ptr<KMemoryManager> memory_manager;
+ // Dynamic slab managers
+ std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
+ std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
+ std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
+
// Shared memory for services
Kernel::KSharedMemory* hid_shared_mem{};
Kernel::KSharedMemory* font_shared_mem{};
@@ -853,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const {
return impl->current_process;
}
+void KernelCore::CloseCurrentProcess() {
+ impl->CloseCurrentProcess();
+}
+
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
return impl->process_list;
}
@@ -1041,6 +1063,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager;
}
+KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
+ return *impl->app_memory_block_manager;
+}
+
+const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
+ return *impl->app_memory_block_manager;
+}
+
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
return *impl->hid_shared_mem;
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index bcf016a97..266be2bc4 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -37,6 +37,7 @@ class KClientSession;
class KEvent;
class KHandleTable;
class KLinkedListNode;
+class KMemoryBlockSlabManager;
class KMemoryLayout;
class KMemoryManager;
class KPageBuffer;
@@ -46,13 +47,13 @@ class KResourceLimit;
class KScheduler;
class KServerSession;
class KSession;
+class KSessionRequest;
class KSharedMemory;
class KSharedMemoryInfo;
class KThread;
class KThreadLocalPage;
class KTransferMemory;
class KWorkerTaskManager;
-class KWritableEvent;
class KCodeMemory;
class PhysicalCore;
class ServiceThread;
@@ -131,6 +132,9 @@ public:
/// Retrieves a const pointer to the current process.
const KProcess* CurrentProcess() const;
+ /// Closes the current process.
+ void CloseCurrentProcess();
+
/// Retrieves the list of processes.
const std::vector<KProcess*>& GetProcessList() const;
@@ -239,6 +243,12 @@ public:
/// Gets the virtual memory manager for the kernel.
const KMemoryManager& MemoryManager() const;
+ /// Gets the application memory block manager for the kernel.
+ KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
+
+ /// Gets the application memory block manager for the kernel.
+ const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
+
/// Gets the shared memory object for HID services.
Kernel::KSharedMemory& GetHidSharedMem();
@@ -345,14 +355,14 @@ public:
return slab_heap_container->thread;
} else if constexpr (std::is_same_v<T, KTransferMemory>) {
return slab_heap_container->transfer_memory;
- } else if constexpr (std::is_same_v<T, KWritableEvent>) {
- return slab_heap_container->writeable_event;
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
return slab_heap_container->code_memory;
} else if constexpr (std::is_same_v<T, KPageBuffer>) {
return slab_heap_container->page_buffer;
} else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
return slab_heap_container->thread_local_page;
+ } else if constexpr (std::is_same_v<T, KSessionRequest>) {
+ return slab_heap_container->session_request;
}
}
@@ -412,10 +422,10 @@ private:
KSlabHeap<KSharedMemoryInfo> shared_memory_info;
KSlabHeap<KThread> thread;
KSlabHeap<KTransferMemory> transfer_memory;
- KSlabHeap<KWritableEvent> writeable_event;
KSlabHeap<KCodeMemory> code_memory;
KSlabHeap<KPageBuffer> page_buffer;
KSlabHeap<KThreadLocalPage> thread_local_page;
+ KSlabHeap<KSessionRequest> session_request;
};
std::unique_ptr<SlabHeapContainer> slab_heap_container;
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index 299a981a8..06b51e919 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -24,7 +24,7 @@ public:
}
static Derived* Allocate(KernelCore& kernel) {
- return kernel.SlabHeap<Derived>().Allocate();
+ return kernel.SlabHeap<Derived>().Allocate(kernel);
}
static void Free(KernelCore& kernel, Derived* obj) {
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 27e5a805d..b07ae3f02 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -29,12 +29,12 @@
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/k_transfer_memory.h"
-#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/svc.h"
@@ -256,6 +256,93 @@ static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u3
return UnmapMemory(system, dst_addr, src_addr, size);
}
+template <typename T>
+Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) {
+ auto& process = *system.CurrentProcess();
+ auto& handle_table = process.GetHandleTable();
+
+ // Declare the session we're going to allocate.
+ T* session;
+
+ // Reserve a new session from the process resource limit.
+ // FIXME: LimitableResource_SessionCountMax
+ KScopedResourceReservation session_reservation(&process, LimitableResource::Sessions);
+ if (session_reservation.Succeeded()) {
+ session = T::Create(system.Kernel());
+ } else {
+ return ResultLimitReached;
+
+ // // We couldn't reserve a session. Check that we support dynamically expanding the
+ // // resource limit.
+ // R_UNLESS(process.GetResourceLimit() ==
+ // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached);
+ // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached());
+
+ // // Try to allocate a session from unused slab memory.
+ // session = T::CreateFromUnusedSlabMemory();
+ // R_UNLESS(session != nullptr, ResultLimitReached);
+ // ON_RESULT_FAILURE { session->Close(); };
+
+ // // If we're creating a KSession, we want to add two KSessionRequests to the heap, to
+ // // prevent request exhaustion.
+ // // NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's
+ // // no reason to not do this statically.
+ // if constexpr (std::same_as<T, KSession>) {
+ // for (size_t i = 0; i < 2; i++) {
+ // KSessionRequest* request = KSessionRequest::CreateFromUnusedSlabMemory();
+ // R_UNLESS(request != nullptr, ResultLimitReached);
+ // request->Close();
+ // }
+ // }
+
+ // We successfully allocated a session, so add the object we allocated to the resource
+ // limit.
+ // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::Sessions, 1);
+ }
+
+ // Check that we successfully created a session.
+ R_UNLESS(session != nullptr, ResultOutOfResource);
+
+ // Initialize the session.
+ session->Initialize(nullptr, fmt::format("{}", name));
+
+ // Commit the session reservation.
+ session_reservation.Commit();
+
+ // Ensure that we clean up the session (and its only references are handle table) on function
+ // end.
+ SCOPE_EXIT({
+ session->GetClientSession().Close();
+ session->GetServerSession().Close();
+ });
+
+ // Register the session.
+ T::Register(system.Kernel(), session);
+
+ // Add the server session to the handle table.
+ R_TRY(handle_table.Add(out_server, &session->GetServerSession()));
+
+ // Add the client session to the handle table.
+ const auto result = handle_table.Add(out_client, &session->GetClientSession());
+
+ if (!R_SUCCEEDED(result)) {
+ // Ensure that we maintaing a clean handle state on exit.
+ handle_table.Remove(*out_server);
+ }
+
+ return result;
+}
+
+static Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client,
+ u32 is_light, u64 name) {
+ if (is_light) {
+ // return CreateSession<KLightSession>(system, out_server, out_client, name);
+ return ResultUnknown;
+ } else {
+ return CreateSession<KSession>(system, out_server, out_client, name);
+ }
+}
+
/// Connect to an OS service given the port name, returns the handle to the port to out
static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
auto& memory = system.Memory();
@@ -295,7 +382,8 @@ static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_n
// Create a session.
KClientSession* session{};
- R_TRY(port->CreateSession(std::addressof(session)));
+ R_TRY(port->CreateSession(std::addressof(session),
+ std::make_shared<SessionRequestManager>(kernel)));
port->Close();
// Register the session in the table, close the extra reference.
@@ -313,7 +401,7 @@ static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle,
return ConnectToNamedPort(system, out_handle, port_name_address);
}
-/// Makes a blocking IPC call to an OS service.
+/// Makes a blocking IPC call to a service.
static Result SendSyncRequest(Core::System& system, Handle handle) {
auto& kernel = system.Kernel();
@@ -327,22 +415,75 @@ static Result SendSyncRequest(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
- {
- KScopedSchedulerLock lock(kernel);
-
- // This is a synchronous request, so we should wait for our request to complete.
- GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
- GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
- session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
- }
-
- return GetCurrentThread(kernel).GetWaitResult();
+ return session->SendSyncRequest();
}
static Result SendSyncRequest32(Core::System& system, Handle handle) {
return SendSyncRequest(system, handle);
}
+static Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles,
+ s32 num_handles, Handle reply_target, s64 timeout_ns) {
+ auto& kernel = system.Kernel();
+ auto& handle_table = GetCurrentThread(kernel).GetOwnerProcess()->GetHandleTable();
+
+ // Convert handle list to object table.
+ std::vector<KSynchronizationObject*> objs(num_handles);
+ R_UNLESS(
+ handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, num_handles),
+ ResultInvalidHandle);
+
+ // Ensure handles are closed when we're done.
+ SCOPE_EXIT({
+ for (auto i = 0; i < num_handles; ++i) {
+ objs[i]->Close();
+ }
+ });
+
+ // Reply to the target, if one is specified.
+ if (reply_target != InvalidHandle) {
+ KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target);
+ R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
+
+ // If we fail to reply, we want to set the output index to -1.
+ // ON_RESULT_FAILURE { *out_index = -1; };
+
+ // Send the reply.
+ // R_TRY(session->SendReply());
+
+ Result rc = session->SendReply();
+ if (!R_SUCCEEDED(rc)) {
+ *out_index = -1;
+ return rc;
+ }
+ }
+
+ // Wait for a message.
+ while (true) {
+ // Wait for an object.
+ s32 index;
+ Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(),
+ static_cast<s32>(objs.size()), timeout_ns);
+ if (result == ResultTimedOut) {
+ return result;
+ }
+
+ // Receive the request.
+ if (R_SUCCEEDED(result)) {
+ KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
+ if (session != nullptr) {
+ result = session->ReceiveRequest();
+ if (result == ResultNotFound) {
+ continue;
+ }
+ }
+ }
+
+ *out_index = index;
+ return result;
+ }
+}
+
/// Get the ID for the specified thread.
static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
// Get the thread from its handle.
@@ -792,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
return ResultSuccess;
case GetInfoType::UserExceptionContextAddr:
- *result = process->GetTLSRegionAddress();
+ *result = process->GetProcessLocalRegionAddress();
return ResultSuccess;
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
@@ -1747,7 +1888,7 @@ static void ExitProcess(Core::System& system) {
auto* current_process = system.Kernel().CurrentProcess();
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
- ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
+ ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
"Process has already exited");
system.Exit();
@@ -2303,11 +2444,11 @@ static Result SignalEvent(Core::System& system, Handle event_handle) {
// Get the current handle table.
const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- // Get the writable event.
- KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle);
- R_UNLESS(writable_event.IsNotNull(), ResultInvalidHandle);
+ // Get the event.
+ KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
+ R_UNLESS(event.IsNotNull(), ResultInvalidHandle);
- return writable_event->Signal();
+ return event->Signal();
}
static Result SignalEvent32(Core::System& system, Handle event_handle) {
@@ -2322,9 +2463,9 @@ static Result ClearEvent(Core::System& system, Handle event_handle) {
// Try to clear the writable event.
{
- KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle);
- if (writable_event.IsNotNull()) {
- return writable_event->Clear();
+ KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
+ if (event.IsNotNull()) {
+ return event->Clear();
}
}
@@ -2362,24 +2503,24 @@ static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_r
R_UNLESS(event != nullptr, ResultOutOfResource);
// Initialize the event.
- event->Initialize("CreateEvent", kernel.CurrentProcess());
+ event->Initialize(kernel.CurrentProcess());
// Commit the thread reservation.
event_reservation.Commit();
// Ensure that we clean up the event (and its only references are handle table) on function end.
SCOPE_EXIT({
- event->GetWritableEvent().Close();
event->GetReadableEvent().Close();
+ event->Close();
});
// Register the event.
KEvent::Register(kernel, event);
- // Add the writable event to the handle table.
- R_TRY(handle_table.Add(out_write, std::addressof(event->GetWritableEvent())));
+ // Add the event to the handle table.
+ R_TRY(handle_table.Add(out_write, event));
- // Add the writable event to the handle table.
+ // Ensure that we maintaing a clean handle state on exit.
auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); });
// Add the readable event to the handle table.
@@ -2416,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand
return ResultInvalidEnumValue;
}
- *out = static_cast<u64>(process->GetStatus());
+ *out = static_cast<u64>(process->GetState());
return ResultSuccess;
}
@@ -2860,10 +3001,10 @@ static const FunctionDef SVC_Table_64[] = {
{0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"},
{0x3E, nullptr, "Unknown3e"},
{0x3F, nullptr, "Unknown3f"},
- {0x40, nullptr, "CreateSession"},
+ {0x40, SvcWrap64<CreateSession>, "CreateSession"},
{0x41, nullptr, "AcceptSession"},
{0x42, nullptr, "ReplyAndReceiveLight"},
- {0x43, nullptr, "ReplyAndReceive"},
+ {0x43, SvcWrap64<ReplyAndReceive>, "ReplyAndReceive"},
{0x44, nullptr, "ReplyAndReceiveWithUserBuffer"},
{0x45, SvcWrap64<CreateEvent>, "CreateEvent"},
{0x46, nullptr, "MapIoRegion"},
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
index 95750c3eb..85506710e 100644
--- a/src/core/hle/kernel/svc_common.h
+++ b/src/core/hle/kernel/svc_common.h
@@ -14,8 +14,11 @@ namespace Kernel::Svc {
using namespace Common::Literals;
-constexpr s32 ArgumentHandleCountMax = 0x40;
-constexpr u32 HandleWaitMask{1u << 30};
+constexpr inline s32 ArgumentHandleCountMax = 0x40;
+
+constexpr inline u32 HandleWaitMask = 1u << 30;
+
+constexpr inline s64 WaitInfinite = -1;
constexpr inline std::size_t HeapSizeAlignment = 2_MiB;
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 79e15183a..abb9847fe 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
constexpr inline s32 LowestThreadPriority = 63;
constexpr inline s32 HighestThreadPriority = 0;
+constexpr inline s32 SystemThreadPriorityHighest = 16;
+
+enum class ProcessState : u32 {
+ Created = 0,
+ CreatedAttached = 1,
+ Running = 2,
+ Crashed = 3,
+ RunningAttached = 4,
+ Terminating = 5,
+ Terminated = 6,
+ DebugBreak = 7,
+};
+
constexpr inline size_t ThreadLocalRegionSize = 0x200;
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 4bc49087e..272c54cf7 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -8,6 +8,7 @@
#include "core/core.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
+#include "core/memory.h"
namespace Kernel {
@@ -346,6 +347,37 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
+// Used by CreateSession
+template <Result func(Core::System&, Handle*, Handle*, u32, u64)>
+void SvcWrap64(Core::System& system) {
+ Handle param_1 = 0;
+ Handle param_2 = 0;
+ const u32 retval = func(system, &param_1, &param_2, static_cast<u32>(Param(system, 2)),
+ static_cast<u32>(Param(system, 3)))
+ .raw;
+
+ system.CurrentArmInterface().SetReg(1, param_1);
+ system.CurrentArmInterface().SetReg(2, param_2);
+ FuncReturn(system, retval);
+}
+
+// Used by ReplyAndReceive
+template <Result func(Core::System&, s32*, Handle*, s32, Handle, s64)>
+void SvcWrap64(Core::System& system) {
+ s32 param_1 = 0;
+ s32 num_handles = static_cast<s32>(Param(system, 2));
+
+ std::vector<Handle> handles(num_handles);
+ system.Memory().ReadBlock(Param(system, 1), handles.data(), num_handles * sizeof(Handle));
+
+ const u32 retval = func(system, &param_1, handles.data(), num_handles,
+ static_cast<s32>(Param(system, 3)), static_cast<s64>(Param(system, 4)))
+ .raw;
+
+ system.CurrentArmInterface().SetReg(1, param_1);
+ FuncReturn(system, retval);
+}
+
// Used by WaitForAddress
template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
void SvcWrap64(Core::System& system) {
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index 47a1b829b..ef4b2d417 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -5,6 +5,7 @@
#include "common/assert.h"
#include "common/bit_field.h"
+#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/expected.h"
@@ -130,6 +131,18 @@ union Result {
[[nodiscard]] constexpr bool IsError() const {
return !IsSuccess();
}
+
+ [[nodiscard]] constexpr bool IsFailure() const {
+ return !IsSuccess();
+ }
+
+ [[nodiscard]] constexpr u32 GetInnerValue() const {
+ return static_cast<u32>(module.Value()) | (description << module.bits);
+ }
+
+ [[nodiscard]] constexpr bool Includes(Result result) const {
+ return GetInnerValue() == result.GetInnerValue();
+ }
};
static_assert(std::is_trivial_v<Result>);
@@ -349,19 +362,115 @@ private:
} \
} while (false)
-#define R_SUCCEEDED(res) (res.IsSuccess())
+#define R_SUCCEEDED(res) (static_cast<Result>(res).IsSuccess())
+#define R_FAILED(res) (static_cast<Result>(res).IsFailure())
-/// Evaluates a boolean expression, and succeeds if that expression is true.
-#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
+namespace ResultImpl {
+template <auto EvaluateResult, class F>
+class ScopedResultGuard {
+ YUZU_NON_COPYABLE(ScopedResultGuard);
+ YUZU_NON_MOVEABLE(ScopedResultGuard);
+
+private:
+ Result& m_ref;
+ F m_f;
+
+public:
+ constexpr ScopedResultGuard(Result& ref, F f) : m_ref(ref), m_f(std::move(f)) {}
+ constexpr ~ScopedResultGuard() {
+ if (EvaluateResult(m_ref)) {
+ m_f();
+ }
+ }
+};
+
+template <auto EvaluateResult>
+class ResultReferenceForScopedResultGuard {
+private:
+ Result& m_ref;
+
+public:
+ constexpr ResultReferenceForScopedResultGuard(Result& r) : m_ref(r) {}
+ constexpr operator Result&() const {
+ return m_ref;
+ }
+};
+
+template <auto EvaluateResult, typename F>
+constexpr ScopedResultGuard<EvaluateResult, F> operator+(
+ ResultReferenceForScopedResultGuard<EvaluateResult> ref, F&& f) {
+ return ScopedResultGuard<EvaluateResult, F>(static_cast<Result&>(ref), std::forward<F>(f));
+}
+
+constexpr bool EvaluateResultSuccess(const Result& r) {
+ return R_SUCCEEDED(r);
+}
+constexpr bool EvaluateResultFailure(const Result& r) {
+ return R_FAILED(r);
+}
+
+template <typename T>
+constexpr void UpdateCurrentResultReference(T result_reference, Result result) = delete;
+// Intentionally not defined
+
+template <>
+constexpr void UpdateCurrentResultReference<Result&>(Result& result_reference, Result result) {
+ result_reference = result;
+}
+
+template <>
+constexpr void UpdateCurrentResultReference<const Result>(Result result_reference, Result result) {}
+} // namespace ResultImpl
+
+#define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \
+ [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE = \
+ std::same_as<decltype(__TmpCurrentResultReference), Result&>; \
+ [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference; \
+ [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess; \
+ Result& __TmpCurrentResultReference = \
+ HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE
+
+#define ON_RESULT_RETURN_IMPL(...) \
+ static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \
+ auto RESULT_GUARD_STATE_##__COUNTER__ = \
+ ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \
+ __TmpCurrentResultReference) + \
+ [&]()
+
+#define ON_RESULT_FAILURE_2 ON_RESULT_RETURN_IMPL(ResultImpl::EvaluateResultFailure)
+
+#define ON_RESULT_FAILURE \
+ DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \
+ ON_RESULT_FAILURE_2
+
+#define ON_RESULT_SUCCESS_2 ON_RESULT_RETURN_IMPL(ResultImpl::EvaluateResultSuccess)
+
+#define ON_RESULT_SUCCESS \
+ DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \
+ ON_RESULT_SUCCESS_2
+
+constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
+
+/// Returns a result.
+#define R_RETURN(res_expr) \
+ { \
+ const Result _tmp_r_throw_rc = (res_expr); \
+ ResultImpl::UpdateCurrentResultReference<decltype(__TmpCurrentResultReference)>( \
+ __TmpCurrentResultReference, _tmp_r_throw_rc); \
+ return _tmp_r_throw_rc; \
+ }
+
+/// Returns ResultSuccess()
+#define R_SUCCEED() R_RETURN(ResultSuccess)
+
+/// Throws a result.
+#define R_THROW(res_expr) R_RETURN(res_expr)
/// Evaluates a boolean expression, and returns a result unless that expression is true.
#define R_UNLESS(expr, res) \
{ \
if (!(expr)) { \
- if (res.IsError()) { \
- LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
- } \
- return res; \
+ R_THROW(res); \
} \
}
@@ -369,7 +478,10 @@ private:
#define R_TRY(res_expr) \
{ \
const auto _tmp_r_try_rc = (res_expr); \
- if (_tmp_r_try_rc.IsError()) { \
- return _tmp_r_try_rc; \
+ if (R_FAILED(_tmp_r_try_rc)) { \
+ R_THROW(_tmp_r_try_rc); \
} \
}
+
+/// Evaluates a boolean expression, and succeeds if that expression is true.
+#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
diff --git a/src/core/hle/service/acc/async_context.cpp b/src/core/hle/service/acc/async_context.cpp
index c85b2e43a..713689d8f 100644
--- a/src/core/hle/service/acc/async_context.cpp
+++ b/src/core/hle/service/acc/async_context.cpp
@@ -64,7 +64,7 @@ void IAsyncContext::GetResult(Kernel::HLERequestContext& ctx) {
void IAsyncContext::MarkComplete() {
is_complete.store(true);
- completion_event->GetWritableEvent().Signal();
+ completion_event->Signal();
}
} // namespace Service::Account
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 6fb7e198e..e55233054 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -316,7 +316,7 @@ ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nv
accumulated_suspended_tick_changed_event =
service_context.CreateEvent("ISelfController:AccumulatedSuspendedTickChangedEvent");
- accumulated_suspended_tick_changed_event->GetWritableEvent().Signal();
+ accumulated_suspended_tick_changed_event->Signal();
}
ISelfController::~ISelfController() {
@@ -378,7 +378,7 @@ void ISelfController::LeaveFatalSection(Kernel::HLERequestContext& ctx) {
void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
- launchable_event->GetWritableEvent().Signal();
+ launchable_event->Signal();
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(ResultSuccess);
@@ -618,18 +618,18 @@ Kernel::KReadableEvent& AppletMessageQueue::GetOperationModeChangedEvent() {
void AppletMessageQueue::PushMessage(AppletMessage msg) {
messages.push(msg);
- on_new_message->GetWritableEvent().Signal();
+ on_new_message->Signal();
}
AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() {
if (messages.empty()) {
- on_new_message->GetWritableEvent().Clear();
+ on_new_message->Clear();
return AppletMessage::None;
}
auto msg = messages.front();
messages.pop();
if (messages.empty()) {
- on_new_message->GetWritableEvent().Clear();
+ on_new_message->Clear();
}
return msg;
}
@@ -653,7 +653,7 @@ void AppletMessageQueue::FocusStateChanged() {
void AppletMessageQueue::OperationModeChanged() {
PushMessage(AppletMessage::OperationModeChanged);
PushMessage(AppletMessage::PerformanceModeChanged);
- on_operation_mode_changed->GetWritableEvent().Signal();
+ on_operation_mode_changed->Signal();
}
ICommonStateGetter::ICommonStateGetter(Core::System& system_,
diff --git a/src/core/hle/service/am/applets/applets.cpp b/src/core/hle/service/am/applets/applets.cpp
index b5b8e4cad..7062df21c 100644
--- a/src/core/hle/service/am/applets/applets.cpp
+++ b/src/core/hle/service/am/applets/applets.cpp
@@ -65,7 +65,7 @@ std::shared_ptr<IStorage> AppletDataBroker::PopNormalDataToGame() {
auto out = std::move(out_channel.front());
out_channel.pop_front();
- pop_out_data_event->GetWritableEvent().Clear();
+ pop_out_data_event->Clear();
return out;
}
@@ -84,7 +84,7 @@ std::shared_ptr<IStorage> AppletDataBroker::PopInteractiveDataToGame() {
auto out = std::move(out_interactive_channel.front());
out_interactive_channel.pop_front();
- pop_interactive_out_data_event->GetWritableEvent().Clear();
+ pop_interactive_out_data_event->Clear();
return out;
}
@@ -103,7 +103,7 @@ void AppletDataBroker::PushNormalDataFromGame(std::shared_ptr<IStorage>&& storag
void AppletDataBroker::PushNormalDataFromApplet(std::shared_ptr<IStorage>&& storage) {
out_channel.emplace_back(std::move(storage));
- pop_out_data_event->GetWritableEvent().Signal();
+ pop_out_data_event->Signal();
}
void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& storage) {
@@ -112,11 +112,11 @@ void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& s
void AppletDataBroker::PushInteractiveDataFromApplet(std::shared_ptr<IStorage>&& storage) {
out_interactive_channel.emplace_back(std::move(storage));
- pop_interactive_out_data_event->GetWritableEvent().Signal();
+ pop_interactive_out_data_event->Signal();
}
void AppletDataBroker::SignalStateChanged() {
- state_changed_event->GetWritableEvent().Signal();
+ state_changed_event->Signal();
switch (applet_mode) {
case LibraryAppletMode::AllForeground:
diff --git a/src/core/hle/service/audio/audctl.cpp b/src/core/hle/service/audio/audctl.cpp
index 4a2ae5f88..5abf22ba4 100644
--- a/src/core/hle/service/audio/audctl.cpp
+++ b/src/core/hle/service/audio/audctl.cpp
@@ -45,9 +45,25 @@ AudCtl::AudCtl(Core::System& system_) : ServiceFramework{system_, "audctl"} {
{32, nullptr, "GetActiveOutputTarget"},
{33, nullptr, "GetTargetDeviceInfo"},
{34, nullptr, "AcquireTargetNotification"},
+ {35, nullptr, "SetHearingProtectionSafeguardTimerRemainingTimeForDebug"},
+ {36, nullptr, "GetHearingProtectionSafeguardTimerRemainingTimeForDebug"},
+ {37, nullptr, "SetHearingProtectionSafeguardEnabled"},
+ {38, nullptr, "IsHearingProtectionSafeguardEnabled"},
+ {39, nullptr, "IsHearingProtectionSafeguardMonitoringOutputForDebug"},
+ {40, nullptr, "GetSystemInformationForDebug"},
+ {41, nullptr, "SetVolumeButtonLongPressTime"},
+ {42, nullptr, "SetNativeVolumeForDebug"},
{10000, nullptr, "NotifyAudioOutputTargetForPlayReport"},
{10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"},
{10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"},
+ {10100, nullptr, "GetAudioVolumeDataForPlayReport"},
+ {10101, nullptr, "BindAudioVolumeUpdateEventForPlayReport"},
+ {10102, nullptr, "BindAudioOutputTargetUpdateEventForPlayReport"},
+ {10103, nullptr, "GetAudioOutputTargetForPlayReport"},
+ {10104, nullptr, "GetAudioOutputChannelCountForPlayReport"},
+ {10105, nullptr, "BindAudioOutputChannelCountUpdateEventForPlayReport"},
+ {10106, nullptr, "GetDefaultAudioOutputTargetForPlayReport"},
+ {50000, nullptr, "SetAnalogInputBoostGainForPrototyping"},
};
// clang-format on
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index 6fb07c37d..13423dca6 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -52,6 +52,8 @@ public:
{9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"},
{10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"},
{11, nullptr, "ExecuteAudioRendererRendering"},
+ {12, &IAudioRenderer::SetVoiceDropParameter, "SetVoiceDropParameter"},
+ {13, &IAudioRenderer::GetVoiceDropParameter, "GetVoiceDropParameter"},
};
// clang-format on
RegisterHandlers(functions);
@@ -205,6 +207,30 @@ private:
LOG_DEBUG(Service_Audio, "called");
}
+ void SetVoiceDropParameter(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_Audio, "called");
+
+ IPC::RequestParser rp{ctx};
+ auto voice_drop_param{rp.Pop<f32>()};
+
+ auto& system_ = impl->GetSystem();
+ system_.SetVoiceDropParameter(voice_drop_param);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+ }
+
+ void GetVoiceDropParameter(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_Audio, "called");
+
+ auto& system_ = impl->GetSystem();
+ auto voice_drop_param{system_.GetVoiceDropParameter()};
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(voice_drop_param);
+ }
+
KernelHelpers::ServiceContext service_context;
Kernel::KEvent* rendered_event;
Manager& manager;
@@ -239,7 +265,7 @@ public:
};
RegisterHandlers(functions);
- event->GetWritableEvent().Signal();
+ event->Signal();
}
~IAudioDevice() override {
@@ -325,7 +351,7 @@ private:
void QueryAudioDeviceSystemEvent(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_Audio, "(STUBBED) called");
- event->GetWritableEvent().Signal();
+ event->Signal();
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/bcat/backend/backend.cpp b/src/core/hle/service/bcat/backend/backend.cpp
index cd0b405ff..847f76987 100644
--- a/src/core/hle/service/bcat/backend/backend.cpp
+++ b/src/core/hle/service/bcat/backend/backend.cpp
@@ -82,7 +82,7 @@ void ProgressServiceBackend::FinishDownload(Result result) {
}
void ProgressServiceBackend::SignalUpdate() {
- update_event->GetWritableEvent().Signal();
+ update_event->Signal();
}
Backend::Backend(DirectoryGetter getter) : dir_getter(std::move(getter)) {}
diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp
index e0db787fc..fad532115 100644
--- a/src/core/hle/service/friend/friend.cpp
+++ b/src/core/hle/service/friend/friend.cpp
@@ -26,7 +26,7 @@ public:
{10101, &IFriendService::GetFriendList, "GetFriendList"},
{10102, nullptr, "UpdateFriendInfo"},
{10110, nullptr, "GetFriendProfileImage"},
- {10120, nullptr, "IsFriendListCacheAvailable"},
+ {10120, &IFriendService::CheckFriendListAvailability, "CheckFriendListAvailability"},
{10121, nullptr, "EnsureFriendListAvailable"},
{10200, nullptr, "SendFriendRequestForApplication"},
{10211, nullptr, "AddFacedFriendRequestForApplication"},
@@ -194,6 +194,17 @@ private:
// TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId"
}
+ void CheckFriendListAvailability(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto uuid{rp.PopRaw<Common::UUID>()};
+
+ LOG_WARNING(Service_Friend, "(STUBBED) called, uuid=0x{}", uuid.RawString());
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(true);
+ }
+
KernelHelpers::ServiceContext service_context;
Kernel::KEvent* completion_event;
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index f8972ec7a..ba8a1f786 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -16,7 +16,6 @@
#include "core/hid/hid_core.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
-#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/service/hid/controllers/npad.h"
#include "core/hle/service/hid/errors.h"
#include "core/hle/service/kernel_helpers.h"
@@ -167,7 +166,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
const auto& battery_level = controller.device->GetBattery();
auto* shared_memory = controller.shared_memory;
if (controller_type == Core::HID::NpadStyleIndex::None) {
- controller.styleset_changed_event->GetWritableEvent().Signal();
+ controller.styleset_changed_event->Signal();
return;
}
@@ -746,8 +745,9 @@ void Controller_NPad::SetSupportedNpadIdTypes(u8* data, std::size_t length) {
}
void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) {
- ASSERT(max_length < supported_npad_id_types.size());
- std::memcpy(data, supported_npad_id_types.data(), supported_npad_id_types.size());
+ const auto copy_amount = supported_npad_id_types.size() * sizeof(u32);
+ ASSERT(max_length <= copy_amount);
+ std::memcpy(data, supported_npad_id_types.data(), copy_amount);
}
std::size_t Controller_NPad::GetSupportedNpadIdTypesSize() const {
@@ -1033,7 +1033,7 @@ Kernel::KReadableEvent& Controller_NPad::GetStyleSetChangedEvent(Core::HID::Npad
void Controller_NPad::SignalStyleSetChangedEvent(Core::HID::NpadIdType npad_id) const {
const auto& controller = GetControllerFromNpadIdType(npad_id);
- controller.styleset_changed_event->GetWritableEvent().Signal();
+ controller.styleset_changed_event->Signal();
}
void Controller_NPad::AddNewControllerAt(Core::HID::NpadStyleIndex controller,
diff --git a/src/core/hle/service/hid/controllers/palma.cpp b/src/core/hle/service/hid/controllers/palma.cpp
index 575d4e626..4564ea1e2 100644
--- a/src/core/hle/service/hid/controllers/palma.cpp
+++ b/src/core/hle/service/hid/controllers/palma.cpp
@@ -73,7 +73,7 @@ Result Controller_Palma::PlayPalmaActivity(const PalmaConnectionHandle& handle,
operation.operation = PalmaOperationType::PlayActivity;
operation.result = PalmaResultSuccess;
operation.data = {};
- operation_complete_event->GetWritableEvent().Signal();
+ operation_complete_event->Signal();
return ResultSuccess;
}
@@ -93,7 +93,7 @@ Result Controller_Palma::ReadPalmaStep(const PalmaConnectionHandle& handle) {
operation.operation = PalmaOperationType::ReadStep;
operation.result = PalmaResultSuccess;
operation.data = {};
- operation_complete_event->GetWritableEvent().Signal();
+ operation_complete_event->Signal();
return ResultSuccess;
}
@@ -122,7 +122,7 @@ Result Controller_Palma::ReadPalmaUniqueCode(const PalmaConnectionHandle& handle
operation.operation = PalmaOperationType::ReadUniqueCode;
operation.result = PalmaResultSuccess;
operation.data = {};
- operation_complete_event->GetWritableEvent().Signal();
+ operation_complete_event->Signal();
return ResultSuccess;
}
@@ -133,7 +133,7 @@ Result Controller_Palma::SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle&
operation.operation = PalmaOperationType::SetUniqueCodeInvalid;
operation.result = PalmaResultSuccess;
operation.data = {};
- operation_complete_event->GetWritableEvent().Signal();
+ operation_complete_event->Signal();
return ResultSuccess;
}
@@ -147,7 +147,7 @@ Result Controller_Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandl
operation.operation = PalmaOperationType::WriteRgbLedPatternEntry;
operation.result = PalmaResultSuccess;
operation.data = {};
- operation_complete_event->GetWritableEvent().Signal();
+ operation_complete_event->Signal();
return ResultSuccess;
}
@@ -159,7 +159,7 @@ Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle
operation.operation = PalmaOperationType::WriteWaveEntry;
operation.result = PalmaResultSuccess;
operation.data = {};
- operation_complete_event->GetWritableEvent().Signal();
+ operation_complete_event->Signal();
return ResultSuccess;
}
@@ -172,7 +172,7 @@ Result Controller_Palma::SetPalmaDataBaseIdentificationVersion(const PalmaConnec
operation.operation = PalmaOperationType::ReadDataBaseIdentificationVersion;
operation.result = PalmaResultSuccess;
operation.data[0] = {};
- operation_complete_event->GetWritableEvent().Signal();
+ operation_complete_event->Signal();
return ResultSuccess;
}
@@ -185,7 +185,7 @@ Result Controller_Palma::GetPalmaDataBaseIdentificationVersion(
operation.result = PalmaResultSuccess;
operation.data = {};
operation.data[0] = static_cast<u8>(database_id_version);
- operation_complete_event->GetWritableEvent().Signal();
+ operation_complete_event->Signal();
return ResultSuccess;
}
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 46bad7871..79375bd2f 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -2118,7 +2118,7 @@ void Hid::WritePalmaWaveEntry(Kernel::HLERequestContext& ctx) {
ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size");
LOG_WARNING(Service_HID,
- "(STUBBED) called, connection_handle={}, wave_set={}, unkown={}, "
+ "(STUBBED) called, connection_handle={}, wave_set={}, unknown={}, "
"t_mem_handle=0x{:08X}, t_mem_size={}, size={}",
connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size);
diff --git a/src/core/hle/service/hid/hidbus/ringcon.cpp b/src/core/hle/service/hid/hidbus/ringcon.cpp
index ad223d649..57f1a2a26 100644
--- a/src/core/hle/service/hid/hidbus/ringcon.cpp
+++ b/src/core/hle/service/hid/hidbus/ringcon.cpp
@@ -131,12 +131,12 @@ bool RingController::SetCommand(const std::vector<u8>& data) {
case RingConCommands::ReadRepCount:
case RingConCommands::ReadTotalPushCount:
ASSERT_MSG(data.size() == 0x4, "data.size is not 0x4 bytes");
- send_command_async_event->GetWritableEvent().Signal();
+ send_command_async_event->Signal();
return true;
case RingConCommands::ResetRepCount:
ASSERT_MSG(data.size() == 0x4, "data.size is not 0x4 bytes");
total_rep_count = 0;
- send_command_async_event->GetWritableEvent().Signal();
+ send_command_async_event->Signal();
return true;
case RingConCommands::SaveCalData: {
ASSERT_MSG(data.size() == 0x14, "data.size is not 0x14 bytes");
@@ -144,14 +144,14 @@ bool RingController::SetCommand(const std::vector<u8>& data) {
SaveCalData save_info{};
std::memcpy(&save_info, data.data(), sizeof(SaveCalData));
user_calibration = save_info.calibration;
- send_command_async_event->GetWritableEvent().Signal();
+ send_command_async_event->Signal();
return true;
}
default:
LOG_ERROR(Service_HID, "Command not implemented {}", command);
command = RingConCommands::Error;
// Signal a reply to avoid softlocking the game
- send_command_async_event->GetWritableEvent().Signal();
+ send_command_async_event->Signal();
return false;
}
}
diff --git a/src/core/hle/service/hid/irsensor/pointing_processor.h b/src/core/hle/service/hid/irsensor/pointing_processor.h
index cf4930794..d63423aff 100644
--- a/src/core/hle/service/hid/irsensor/pointing_processor.h
+++ b/src/core/hle/service/hid/irsensor/pointing_processor.h
@@ -37,10 +37,10 @@ private:
u8 pointing_status;
INSERT_PADDING_BYTES(3);
u32 unknown;
- float unkown_float1;
+ float unknown_float1;
float position_x;
float position_y;
- float unkown_float2;
+ float unknown_float2;
Core::IrSensor::IrsRect window_of_interest;
};
static_assert(sizeof(PointingProcessorMarkerData) == 0x20,
diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp
index 3e317367b..af133af93 100644
--- a/src/core/hle/service/kernel_helpers.cpp
+++ b/src/core/hle/service/kernel_helpers.cpp
@@ -9,7 +9,6 @@
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/service/kernel_helpers.h"
namespace Service::KernelHelpers {
@@ -46,7 +45,7 @@ Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) {
}
// Initialize the event.
- event->Initialize(std::move(name), process);
+ event->Initialize(process);
// Commit the thread reservation.
event_reservation.Commit();
@@ -59,7 +58,7 @@ Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) {
void ServiceContext::CloseEvent(Kernel::KEvent* event) {
event->GetReadableEvent().Close();
- event->GetWritableEvent().Close();
+ event->Close();
}
} // namespace Service::KernelHelpers
diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp
index ea3e7e55a..6df563136 100644
--- a/src/core/hle/service/ldn/ldn.cpp
+++ b/src/core/hle/service/ldn/ldn.cpp
@@ -165,7 +165,7 @@ public:
}
void OnEventFired() {
- state_change_event->GetWritableEvent().Signal();
+ state_change_event->Signal();
}
void GetState(Kernel::HLERequestContext& ctx) {
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index becd6d1b9..652441bc2 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -290,7 +290,7 @@ public:
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
const auto start_info{page_table.QueryInfo(start - 1)};
- if (start_info.state != Kernel::KMemoryState::Free) {
+ if (start_info.GetState() != Kernel::KMemoryState::Free) {
return {};
}
@@ -300,7 +300,7 @@ public:
const auto end_info{page_table.QueryInfo(start + size)};
- if (end_info.state != Kernel::KMemoryState::Free) {
+ if (end_info.GetState() != Kernel::KMemoryState::Free) {
return {};
}
diff --git a/src/core/hle/service/nfp/nfp_device.cpp b/src/core/hle/service/nfp/nfp_device.cpp
index ec895ac01..76f8a267a 100644
--- a/src/core/hle/service/nfp/nfp_device.cpp
+++ b/src/core/hle/service/nfp/nfp_device.cpp
@@ -58,7 +58,7 @@ NfpDevice::~NfpDevice() {
void NfpDevice::NpadUpdate(Core::HID::ControllerTriggerType type) {
if (type == Core::HID::ControllerTriggerType::Connected ||
type == Core::HID::ControllerTriggerType::Disconnected) {
- availability_change_event->GetWritableEvent().Signal();
+ availability_change_event->Signal();
return;
}
@@ -100,7 +100,7 @@ bool NfpDevice::LoadAmiibo(std::span<const u8> data) {
device_state = DeviceState::TagFound;
deactivate_event->GetReadableEvent().Clear();
- activate_event->GetWritableEvent().Signal();
+ activate_event->Signal();
return true;
}
@@ -115,7 +115,7 @@ void NfpDevice::CloseAmiibo() {
encrypted_tag_data = {};
tag_data = {};
activate_event->GetReadableEvent().Clear();
- deactivate_event->GetWritableEvent().Signal();
+ deactivate_event->Signal();
}
Kernel::KReadableEvent& NfpDevice::GetActivateEvent() const {
diff --git a/src/core/hle/service/nim/nim.cpp b/src/core/hle/service/nim/nim.cpp
index b2bb7426d..5a8a91e0b 100644
--- a/src/core/hle/service/nim/nim.cpp
+++ b/src/core/hle/service/nim/nim.cpp
@@ -328,7 +328,7 @@ private:
void StartTask(Kernel::HLERequestContext& ctx) {
// No need to connect to the internet, just finish the task straight away.
LOG_DEBUG(Service_NIM, "called");
- finished_event->GetWritableEvent().Signal();
+ finished_event->Signal();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
@@ -350,7 +350,7 @@ private:
void Cancel(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_NIM, "called");
- finished_event->GetWritableEvent().Clear();
+ finished_event->Clear();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp
index f7318c3cb..f59a1a63d 100644
--- a/src/core/hle/service/ns/ns.cpp
+++ b/src/core/hle/service/ns/ns.cpp
@@ -8,6 +8,7 @@
#include "core/file_sys/patch_manager.h"
#include "core/file_sys/vfs.h"
#include "core/hle/ipc_helpers.h"
+#include "core/hle/service/glue/glue_manager.h"
#include "core/hle/service/ns/errors.h"
#include "core/hle/service/ns/iplatform_service_manager.h"
#include "core/hle/service/ns/language.h"
@@ -581,7 +582,7 @@ IReadOnlyApplicationControlDataInterface::IReadOnlyApplicationControlDataInterfa
: ServiceFramework{system_, "IReadOnlyApplicationControlDataInterface"} {
// clang-format off
static const FunctionInfo functions[] = {
- {0, nullptr, "GetApplicationControlData"},
+ {0, &IReadOnlyApplicationControlDataInterface::GetApplicationControlData, "GetApplicationControlData"},
{1, nullptr, "GetApplicationDesiredLanguage"},
{2, nullptr, "ConvertApplicationLanguageToLanguageCode"},
{3, nullptr, "ConvertLanguageCodeToApplicationLanguage"},
@@ -594,6 +595,33 @@ IReadOnlyApplicationControlDataInterface::IReadOnlyApplicationControlDataInterfa
IReadOnlyApplicationControlDataInterface::~IReadOnlyApplicationControlDataInterface() = default;
+void IReadOnlyApplicationControlDataInterface::GetApplicationControlData(
+ Kernel::HLERequestContext& ctx) {
+ enum class ApplicationControlSource : u8 {
+ CacheOnly,
+ Storage,
+ StorageOnly,
+ };
+
+ struct RequestParameters {
+ ApplicationControlSource source;
+ u64 application_id;
+ };
+ static_assert(sizeof(RequestParameters) == 0x10, "RequestParameters has incorrect size.");
+
+ IPC::RequestParser rp{ctx};
+ const auto parameters{rp.PopRaw<RequestParameters>()};
+ const auto nacp_data{system.GetARPManager().GetControlProperty(parameters.application_id)};
+ const auto result = nacp_data ? ResultSuccess : ResultUnknown;
+
+ if (nacp_data) {
+ ctx.WriteBuffer(nacp_data->data(), nacp_data->size());
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
NS::NS(const char* name, Core::System& system_) : ServiceFramework{system_, name} {
// clang-format off
static const FunctionInfo functions[] = {
diff --git a/src/core/hle/service/ns/ns.h b/src/core/hle/service/ns/ns.h
index 4dc191518..9c18e935c 100644
--- a/src/core/hle/service/ns/ns.h
+++ b/src/core/hle/service/ns/ns.h
@@ -78,6 +78,9 @@ class IReadOnlyApplicationControlDataInterface final
public:
explicit IReadOnlyApplicationControlDataInterface(Core::System& system_);
~IReadOnlyApplicationControlDataInterface() override;
+
+private:
+ void GetApplicationControlData(Kernel::HLERequestContext& ctx);
};
class NS final : public ServiceFramework<NS> {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 6411dbf43..b635e6ed1 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -311,7 +311,8 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
handle->address +
(static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
- gmmu->Map(virtual_address, cpu_address, size, use_big_pages);
+ gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
+ use_big_pages);
}
}
@@ -350,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
VAddr cpu_address{mapping->ptr + params.buffer_offset};
- gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
+ gmmu->Map(gpu_address, cpu_address, params.mapping_size,
+ static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
return NvResult::Success;
} catch (const std::out_of_range&) {
@@ -389,7 +391,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
}
const bool use_big_pages = alloc->second.big_pages && big_page;
- gmmu->Map(params.offset, cpu_address, size, use_big_pages);
+ gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
+ use_big_pages);
auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
use_big_pages, alloc->second.sparse)};
@@ -409,7 +412,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
return NvResult::InsufficientMemory;
}
- gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page);
+ gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
+ static_cast<Tegra::PTEKind>(params.kind), big_page);
auto mapping{
std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index 5bee4a3d3..eee11fab8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -12,7 +12,6 @@
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
-#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
@@ -206,7 +205,7 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
auto& event_ = events[slot];
if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
EventState::Waiting) {
- event_.kevent->GetWritableEvent().Signal();
+ event_.kevent->Signal();
}
event_.status.store(EventState::Signalled, std::memory_order_release);
});
@@ -306,7 +305,7 @@ NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::v
}
event.fails++;
event.status.store(EventState::Cancelled, std::memory_order_release);
- event.kevent->GetWritableEvent().Clear();
+ event.kevent->Clear();
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index ddf273b5e..b60679021 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
}
ASSERT(system.CurrentProcess()
->PageTable()
- .LockForDeviceAddressSpace(handle_description->address, handle_description->size)
+ .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
+ Kernel::KMemoryPermission::None, true)
.IsSuccess());
std::memcpy(output.data(), &params, sizeof(params));
return result;
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 5e7b7468f..9d9924395 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -8,7 +8,6 @@
#include "core/core.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_event.h"
-#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h
index cd58a4f35..5ac06ee30 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.h
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.h
@@ -7,10 +7,6 @@
#include "core/hle/service/nvdrv/nvdrv.h"
#include "core/hle/service/service.h"
-namespace Kernel {
-class KWritableEvent;
-}
-
namespace Service::Nvidia {
class NVDRV final : public ServiceFramework<NVDRV> {
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
index d4ab23a10..77ddbb6ef 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
@@ -11,7 +11,6 @@
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
-#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/kernel_helpers.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
@@ -110,7 +109,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) {
core->override_max_buffer_count = buffer_count;
core->SignalDequeueCondition();
- buffer_wait_event->GetWritableEvent().Signal();
+ buffer_wait_event->Signal();
listener = core->consumer_listener;
}
@@ -623,7 +622,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
slots[slot].fence = fence;
core->SignalDequeueCondition();
- buffer_wait_event->GetWritableEvent().Signal();
+ buffer_wait_event->Signal();
}
Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
@@ -753,7 +752,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
core->connected_producer_listener = nullptr;
core->connected_api = NativeWindowApi::NoConnectedApi;
core->SignalDequeueCondition();
- buffer_wait_event->GetWritableEvent().Signal();
+ buffer_wait_event->Signal();
listener = core->consumer_listener;
} else {
LOG_ERROR(Service_NVFlinger, "still connected to another api (cur = {} req = {})",
@@ -802,7 +801,7 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
}
core->SignalDequeueCondition();
- buffer_wait_event->GetWritableEvent().Signal();
+ buffer_wait_event->Signal();
return Status::NoError;
}
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h
index 0ba03a568..7526bf8ec 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.h
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h
@@ -24,7 +24,6 @@ namespace Kernel {
class KernelCore;
class KEvent;
class KReadableEvent;
-class KWritableEvent;
} // namespace Kernel
namespace Service::KernelHelpers {
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index b62615de2..99509bc5b 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -25,7 +25,6 @@ struct EventType;
namespace Kernel {
class KReadableEvent;
-class KWritableEvent;
} // namespace Kernel
namespace Service::Nvidia {
diff --git a/src/core/hle/service/ptm/psm.cpp b/src/core/hle/service/ptm/psm.cpp
index 2c31e9485..1ac97fe31 100644
--- a/src/core/hle/service/ptm/psm.cpp
+++ b/src/core/hle/service/ptm/psm.cpp
@@ -37,19 +37,19 @@ public:
void SignalChargerTypeChanged() {
if (should_signal && should_signal_charger_type) {
- state_change_event->GetWritableEvent().Signal();
+ state_change_event->Signal();
}
}
void SignalPowerSupplyChanged() {
if (should_signal && should_signal_power_supply) {
- state_change_event->GetWritableEvent().Signal();
+ state_change_event->Signal();
}
}
void SignalBatteryVoltageStateChanged() {
if (should_signal && should_signal_battery_voltage) {
- state_change_event->GetWritableEvent().Signal();
+ state_change_event->Signal();
}
}
diff --git a/src/core/hle/service/ptm/ts.cpp b/src/core/hle/service/ptm/ts.cpp
index 65c3f135f..b1a0a5544 100644
--- a/src/core/hle/service/ptm/ts.cpp
+++ b/src/core/hle/service/ptm/ts.cpp
@@ -15,7 +15,7 @@ TS::TS(Core::System& system_) : ServiceFramework{system_, "ts"} {
{0, nullptr, "GetTemperatureRange"},
{1, &TS::GetTemperature, "GetTemperature"},
{2, nullptr, "SetMeasurementMode"},
- {3, nullptr, "GetTemperatureMilliC"},
+ {3, &TS::GetTemperatureMilliC, "GetTemperatureMilliC"},
{4, nullptr, "OpenSession"},
};
// clang-format on
@@ -29,8 +29,6 @@ void TS::GetTemperature(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto location{rp.PopEnum<Location>()};
- LOG_WARNING(Service_HID, "(STUBBED) called. location={}", location);
-
const s32 temperature = location == Location::Internal ? 35 : 20;
IPC::ResponseBuilder rb{ctx, 3};
@@ -38,4 +36,15 @@ void TS::GetTemperature(Kernel::HLERequestContext& ctx) {
rb.Push(temperature);
}
+void TS::GetTemperatureMilliC(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto location{rp.PopEnum<Location>()};
+
+ const s32 temperature = location == Location::Internal ? 35000 : 20000;
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(temperature);
+}
+
} // namespace Service::PTM
diff --git a/src/core/hle/service/ptm/ts.h b/src/core/hle/service/ptm/ts.h
index 39a734ef7..39d51847e 100644
--- a/src/core/hle/service/ptm/ts.h
+++ b/src/core/hle/service/ptm/ts.h
@@ -20,6 +20,7 @@ private:
};
void GetTemperature(Kernel::HLERequestContext& ctx);
+ void GetTemperatureMilliC(Kernel::HLERequestContext& ctx);
};
} // namespace Service::PTM
diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp
index 2a0b812c1..d7cea6aac 100644
--- a/src/core/hle/service/set/set_sys.cpp
+++ b/src/core/hle/service/set/set_sys.cpp
@@ -101,6 +101,81 @@ void SET_SYS::SetColorSetId(Kernel::HLERequestContext& ctx) {
rb.Push(ResultSuccess);
}
+// FIXME: implement support for the real system_settings.ini
+
+template <typename T>
+static std::vector<u8> ToBytes(const T& value) {
+ static_assert(std::is_trivially_copyable_v<T>);
+
+ const auto* begin = reinterpret_cast<const u8*>(&value);
+ const auto* end = begin + sizeof(T);
+
+ return std::vector<u8>(begin, end);
+}
+
+using Settings =
+ std::map<std::string, std::map<std::string, std::vector<u8>, std::less<>>, std::less<>>;
+
+static Settings GetSettings() {
+ Settings ret;
+
+ ret["hbloader"]["applet_heap_size"] = ToBytes(u64{0x0});
+ ret["hbloader"]["applet_heap_reservation_size"] = ToBytes(u64{0x8600000});
+
+ return ret;
+}
+
+void SET_SYS::GetSettingsItemValueSize(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_SET, "called");
+
+ // The category of the setting. This corresponds to the top-level keys of
+ // system_settings.ini.
+ const auto setting_category_buf{ctx.ReadBuffer(0)};
+ const std::string setting_category{setting_category_buf.begin(), setting_category_buf.end()};
+
+ // The name of the setting. This corresponds to the second-level keys of
+ // system_settings.ini.
+ const auto setting_name_buf{ctx.ReadBuffer(1)};
+ const std::string setting_name{setting_name_buf.begin(), setting_name_buf.end()};
+
+ auto settings{GetSettings()};
+ u64 response_size{0};
+
+ if (settings.contains(setting_category) && settings[setting_category].contains(setting_name)) {
+ response_size = settings[setting_category][setting_name].size();
+ }
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(response_size == 0 ? ResultUnknown : ResultSuccess);
+ rb.Push(response_size);
+}
+
+void SET_SYS::GetSettingsItemValue(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_SET, "called");
+
+ // The category of the setting. This corresponds to the top-level keys of
+ // system_settings.ini.
+ const auto setting_category_buf{ctx.ReadBuffer(0)};
+ const std::string setting_category{setting_category_buf.begin(), setting_category_buf.end()};
+
+ // The name of the setting. This corresponds to the second-level keys of
+ // system_settings.ini.
+ const auto setting_name_buf{ctx.ReadBuffer(1)};
+ const std::string setting_name{setting_name_buf.begin(), setting_name_buf.end()};
+
+ auto settings{GetSettings()};
+ Result response{ResultUnknown};
+
+ if (settings.contains(setting_category) && settings[setting_category].contains(setting_name)) {
+ auto setting_value = settings[setting_category][setting_name];
+ ctx.WriteBuffer(setting_value.data(), setting_value.size());
+ response = ResultSuccess;
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(response);
+}
+
SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} {
// clang-format off
static const FunctionInfo functions[] = {
@@ -138,8 +213,8 @@ SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} {
{32, nullptr, "SetAccountNotificationSettings"},
{35, nullptr, "GetVibrationMasterVolume"},
{36, nullptr, "SetVibrationMasterVolume"},
- {37, nullptr, "GetSettingsItemValueSize"},
- {38, nullptr, "GetSettingsItemValue"},
+ {37, &SET_SYS::GetSettingsItemValueSize, "GetSettingsItemValueSize"},
+ {38, &SET_SYS::GetSettingsItemValue, "GetSettingsItemValue"},
{39, nullptr, "GetTvSettings"},
{40, nullptr, "SetTvSettings"},
{41, nullptr, "GetEdid"},
diff --git a/src/core/hle/service/set/set_sys.h b/src/core/hle/service/set/set_sys.h
index ac97772b7..258ef8c57 100644
--- a/src/core/hle/service/set/set_sys.h
+++ b/src/core/hle/service/set/set_sys.h
@@ -23,6 +23,8 @@ private:
BasicBlack = 1,
};
+ void GetSettingsItemValueSize(Kernel::HLERequestContext& ctx);
+ void GetSettingsItemValue(Kernel::HLERequestContext& ctx);
void GetFirmwareVersion(Kernel::HLERequestContext& ctx);
void GetFirmwareVersion2(Kernel::HLERequestContext& ctx);
void GetColorSetId(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 246c94623..48e70f93c 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -156,7 +156,8 @@ ResultVal<Kernel::KClientSession*> SM::GetServiceImpl(Kernel::HLERequestContext&
// Create a new session.
Kernel::KClientSession* session{};
- if (const auto result = port->GetClientPort().CreateSession(std::addressof(session));
+ if (const auto result = port->GetClientPort().CreateSession(
+ std::addressof(session), std::make_shared<Kernel::SessionRequestManager>(kernel));
result.IsError()) {
LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw);
return result;
diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp
index 2a4bd64ab..273f79568 100644
--- a/src/core/hle/service/sm/sm_controller.cpp
+++ b/src/core/hle/service/sm/sm_controller.cpp
@@ -15,9 +15,10 @@
namespace Service::SM {
void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) {
- ASSERT_MSG(!ctx.Session()->IsDomain(), "Session is already a domain");
+ ASSERT_MSG(!ctx.Session()->GetSessionRequestManager()->IsDomain(),
+ "Session is already a domain");
LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId());
- ctx.Session()->ConvertToDomain();
+ ctx.Session()->GetSessionRequestManager()->ConvertToDomainOnRequestEnd();
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/time/system_clock_context_update_callback.cpp b/src/core/hle/service/time/system_clock_context_update_callback.cpp
index a649bed3a..cafc04ee7 100644
--- a/src/core/hle/service/time/system_clock_context_update_callback.cpp
+++ b/src/core/hle/service/time/system_clock_context_update_callback.cpp
@@ -1,7 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include "core/hle/kernel/k_writable_event.h"
+#include "core/hle/kernel/k_event.h"
#include "core/hle/service/time/errors.h"
#include "core/hle/service/time/system_clock_context_update_callback.h"
@@ -20,13 +20,13 @@ bool SystemClockContextUpdateCallback::NeedUpdate(const SystemClockContext& valu
}
void SystemClockContextUpdateCallback::RegisterOperationEvent(
- std::shared_ptr<Kernel::KWritableEvent>&& writable_event) {
- operation_event_list.emplace_back(std::move(writable_event));
+ std::shared_ptr<Kernel::KEvent>&& event) {
+ operation_event_list.emplace_back(std::move(event));
}
void SystemClockContextUpdateCallback::BroadcastOperationEvent() {
- for (const auto& writable_event : operation_event_list) {
- writable_event->Signal();
+ for (const auto& event : operation_event_list) {
+ event->Signal();
}
}
diff --git a/src/core/hle/service/time/system_clock_context_update_callback.h b/src/core/hle/service/time/system_clock_context_update_callback.h
index 9c6caf196..bf657acd9 100644
--- a/src/core/hle/service/time/system_clock_context_update_callback.h
+++ b/src/core/hle/service/time/system_clock_context_update_callback.h
@@ -9,7 +9,7 @@
#include "core/hle/service/time/clock_types.h"
namespace Kernel {
-class KWritableEvent;
+class KEvent;
}
namespace Service::Time::Clock {
@@ -24,7 +24,7 @@ public:
bool NeedUpdate(const SystemClockContext& value) const;
- void RegisterOperationEvent(std::shared_ptr<Kernel::KWritableEvent>&& writable_event);
+ void RegisterOperationEvent(std::shared_ptr<Kernel::KEvent>&& event);
void BroadcastOperationEvent();
@@ -37,7 +37,7 @@ protected:
private:
bool has_context{};
- std::vector<std::shared_ptr<Kernel::KWritableEvent>> operation_event_list;
+ std::vector<std::shared_ptr<Kernel::KEvent>> operation_event_list;
};
} // namespace Service::Time::Clock
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp
index 288aafaaf..8ef74f1f0 100644
--- a/src/core/hle/service/vi/display/vi_display.cpp
+++ b/src/core/hle/service/vi/display/vi_display.cpp
@@ -10,7 +10,6 @@
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
-#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/service/kernel_helpers.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvflinger/buffer_item_consumer.h"
@@ -74,7 +73,7 @@ Kernel::KReadableEvent* Display::GetVSyncEventUnchecked() {
}
void Display::SignalVSyncEvent() {
- vsync_event->GetWritableEvent().Signal();
+ vsync_event->Signal();
}
void Display::CreateLayer(u64 layer_id, u32 binder_id,
diff --git a/src/core/hle/service/vi/vi_results.h b/src/core/hle/service/vi/vi_results.h
index a46c247d2..22bac799f 100644
--- a/src/core/hle/service/vi/vi_results.h
+++ b/src/core/hle/service/vi/vi_results.h
@@ -1,6 +1,8 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#pragma once
+
#include "core/hle/result.h"
namespace Service::VI {
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 2ac792566..9637cb5b1 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -65,7 +65,7 @@ struct Memory::Impl {
return {};
}
- return system.DeviceMemory().GetPointer(paddr) + vaddr;
+ return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
}
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
@@ -75,7 +75,7 @@ struct Memory::Impl {
return {};
}
- return system.DeviceMemory().GetPointer(paddr) + vaddr;
+ return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
}
u8 Read8(const VAddr addr) {
@@ -499,7 +499,7 @@ struct Memory::Impl {
} else {
while (base != end) {
page_table.pointers[base].Store(
- system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
+ system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
ASSERT_MSG(page_table.pointers[base].Pointer(),
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index 7c432a63c..284b2ae66 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -40,9 +40,6 @@ struct ScopeInit final {
core_timing.SetMulticore(true);
core_timing.Initialize([]() {});
}
- ~ScopeInit() {
- core_timing.Shutdown();
- }
Core::Timing::CoreTiming core_timing;
};
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 40e6d1ec4..cb8b46edf 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -82,6 +82,7 @@ add_library(video_core STATIC
gpu_thread.h
memory_manager.cpp
memory_manager.h
+ pte_kind.h
query_cache.h
rasterizer_accelerated.cpp
rasterizer_accelerated.h
diff --git a/src/video_core/dirty_flags.cpp b/src/video_core/dirty_flags.cpp
index 1039e036f..c2ecc12f5 100644
--- a/src/video_core/dirty_flags.cpp
+++ b/src/video_core/dirty_flags.cpp
@@ -61,7 +61,7 @@ void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) {
}
void SetupDirtyShaders(Maxwell3D::DirtyState::Tables& tables) {
- FillBlock(tables[0], OFF(pipelines), NUM(pipelines) * Maxwell3D::Regs::MaxShaderProgram,
+ FillBlock(tables[0], OFF(pipelines), NUM(pipelines[0]) * Maxwell3D::Regs::MaxShaderProgram,
Shaders);
}
} // Anonymous namespace
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index fdf470913..b1a22b76c 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -74,15 +74,15 @@ void Maxwell3D::InitializeRegisterDefaults() {
regs.stencil_front_op.zfail = Regs::StencilOp::Op::Keep_D3D;
regs.stencil_front_op.zpass = Regs::StencilOp::Op::Keep_D3D;
regs.stencil_front_op.func = Regs::ComparisonOp::Always_GL;
- regs.stencil_front_func.func_mask = 0xFFFFFFFF;
- regs.stencil_front_func.mask = 0xFFFFFFFF;
+ regs.stencil_front_func_mask = 0xFFFFFFFF;
+ regs.stencil_front_mask = 0xFFFFFFFF;
regs.stencil_two_side_enable = 1;
regs.stencil_back_op.fail = Regs::StencilOp::Op::Keep_D3D;
regs.stencil_back_op.zfail = Regs::StencilOp::Op::Keep_D3D;
regs.stencil_back_op.zpass = Regs::StencilOp::Op::Keep_D3D;
regs.stencil_back_op.func = Regs::ComparisonOp::Always_GL;
- regs.stencil_back_func.func_mask = 0xFFFFFFFF;
- regs.stencil_back_func.mask = 0xFFFFFFFF;
+ regs.stencil_back_func_mask = 0xFFFFFFFF;
+ regs.stencil_back_mask = 0xFFFFFFFF;
regs.depth_test_func = Regs::ComparisonOp::Always_GL;
regs.gl_front_face = Regs::FrontFace::CounterClockWise;
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index efe1073b0..75e3b868d 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -390,7 +390,7 @@ public:
FractionalEven = 2,
};
- enum class OutputPrimitves : u32 {
+ enum class OutputPrimitives : u32 {
Points = 0,
Lines = 1,
Triangles_CW = 2,
@@ -401,7 +401,7 @@ public:
union {
BitField<0, 2, DomainType> domain_type;
BitField<4, 2, Spacing> spacing;
- BitField<8, 2, OutputPrimitves> output_primitives;
+ BitField<8, 2, OutputPrimitives> output_primitives;
};
} params;
@@ -1795,12 +1795,6 @@ public:
ComparisonOp func;
};
- struct StencilFunc {
- s32 ref;
- u32 func_mask;
- u32 mask;
- };
-
struct PsSaturate {
// Opposite of DepthMode
enum class Depth : u32 {
@@ -2737,7 +2731,9 @@ public:
u32 post_z_pixel_imask; ///< 0x0F1C
INSERT_PADDING_BYTES_NOINIT(0x20);
ConstantColorRendering const_color_rendering; ///< 0x0F40
- StencilFunc stencil_back_func; ///< 0x0F54
+ s32 stencil_back_ref; ///< 0x0F54
+ u32 stencil_back_mask; ///< 0x0F58
+ u32 stencil_back_func_mask; ///< 0x0F5C
INSERT_PADDING_BYTES_NOINIT(0x24);
VertexStreamSubstitute vertex_stream_substitute; ///< 0x0F84
u32 line_mode_clip_generated_edge_do_not_draw; ///< 0x0F8C
@@ -2855,7 +2851,9 @@ public:
Blend blend; ///< 0x133C
u32 stencil_enable; ///< 0x1380
StencilOp stencil_front_op; ///< 0x1384
- StencilFunc stencil_front_func; ///< 0x1394
+ s32 stencil_front_ref; ///< 0x1394
+ s32 stencil_front_func_mask; ///< 0x1398
+ s32 stencil_front_mask; ///< 0x139C
INSERT_PADDING_BYTES_NOINIT(0x4);
u32 draw_auto_start_byte_count; ///< 0x13A4
PsSaturate frag_color_clamp; ///< 0x13A8
@@ -3311,7 +3309,9 @@ ASSERT_REG_POSITION(vpc_perf, 0x0F14);
ASSERT_REG_POSITION(pm_local_trigger, 0x0F18);
ASSERT_REG_POSITION(post_z_pixel_imask, 0x0F1C);
ASSERT_REG_POSITION(const_color_rendering, 0x0F40);
-ASSERT_REG_POSITION(stencil_back_func, 0x0F54);
+ASSERT_REG_POSITION(stencil_back_ref, 0x0F54);
+ASSERT_REG_POSITION(stencil_back_mask, 0x0F58);
+ASSERT_REG_POSITION(stencil_back_func_mask, 0x0F5C);
ASSERT_REG_POSITION(vertex_stream_substitute, 0x0F84);
ASSERT_REG_POSITION(line_mode_clip_generated_edge_do_not_draw, 0x0F8C);
ASSERT_REG_POSITION(color_mask_common, 0x0F90);
@@ -3416,7 +3416,9 @@ ASSERT_REG_POSITION(invalidate_texture_data_cache_lines, 0x1338);
ASSERT_REG_POSITION(blend, 0x133C);
ASSERT_REG_POSITION(stencil_enable, 0x1380);
ASSERT_REG_POSITION(stencil_front_op, 0x1384);
-ASSERT_REG_POSITION(stencil_front_func, 0x1394);
+ASSERT_REG_POSITION(stencil_front_ref, 0x1394);
+ASSERT_REG_POSITION(stencil_front_func_mask, 0x1398);
+ASSERT_REG_POSITION(stencil_front_mask, 0x139C);
ASSERT_REG_POSITION(draw_auto_start_byte_count, 0x13A4);
ASSERT_REG_POSITION(frag_color_clamp, 0x13A8);
ASSERT_REG_POSITION(window_origin, 0x13AC);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3909d36c1..4eb7a100d 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -56,66 +56,85 @@ void MaxwellDMA::Launch() {
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
- const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
- const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
-
- if (!is_src_pitch && !is_dst_pitch) {
- // If both the source and the destination are in block layout, assert.
- UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
- return;
- }
+ if (launch.multi_line_enable) {
+ const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
+ const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
+
+ if (!is_src_pitch && !is_dst_pitch) {
+ // If both the source and the destination are in block layout, assert.
+ UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
+ return;
+ }
- if (is_src_pitch && is_dst_pitch) {
- CopyPitchToPitch();
+ if (is_src_pitch && is_dst_pitch) {
+ for (u32 line = 0; line < regs.line_count; ++line) {
+ const GPUVAddr source_line =
+ regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
+ const GPUVAddr dest_line =
+ regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
+ memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
+ }
+ } else {
+ if (!is_src_pitch && is_dst_pitch) {
+ CopyBlockLinearToPitch();
+ } else {
+ CopyPitchToBlockLinear();
+ }
+ }
} else {
- ASSERT(launch.multi_line_enable == 1);
-
- if (!is_src_pitch && is_dst_pitch) {
- CopyBlockLinearToPitch();
+ // TODO: allow multisized components.
+ auto& accelerate = rasterizer->AccessAccelerateDMA();
+ const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
+ if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) {
+ ASSERT(regs.remap_const.component_size_minus_one == 3);
+ accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
+ std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
+ memory_manager.WriteBlockUnsafe(regs.offset_out,
+ reinterpret_cast<u8*>(tmp_buffer.data()),
+ regs.line_length_in * sizeof(u32));
} else {
- CopyPitchToBlockLinear();
+ auto convert_linear_2_blocklinear_addr = [](u64 address) {
+ return (address & ~0x1f0ULL) | ((address & 0x40) >> 2) | ((address & 0x10) << 1) |
+ ((address & 0x180) >> 1) | ((address & 0x20) << 3);
+ };
+ auto src_kind = memory_manager.GetPageKind(regs.offset_in);
+ auto dst_kind = memory_manager.GetPageKind(regs.offset_out);
+ const bool is_src_pitch = IsPitchKind(static_cast<PTEKind>(src_kind));
+ const bool is_dst_pitch = IsPitchKind(static_cast<PTEKind>(dst_kind));
+ if (!is_src_pitch && is_dst_pitch) {
+ std::vector<u8> tmp_buffer(regs.line_length_in);
+ std::vector<u8> dst_buffer(regs.line_length_in);
+ memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
+ regs.line_length_in);
+ for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
+ dst_buffer[offset] =
+ tmp_buffer[convert_linear_2_blocklinear_addr(regs.offset_in + offset) -
+ regs.offset_in];
+ }
+ memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
+ } else if (is_src_pitch && !is_dst_pitch) {
+ std::vector<u8> tmp_buffer(regs.line_length_in);
+ std::vector<u8> dst_buffer(regs.line_length_in);
+ memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
+ regs.line_length_in);
+ for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
+ dst_buffer[convert_linear_2_blocklinear_addr(regs.offset_out + offset) -
+ regs.offset_out] = tmp_buffer[offset];
+ }
+ memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
+ } else {
+ if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
+ std::vector<u8> tmp_buffer(regs.line_length_in);
+ memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
+ regs.line_length_in);
+ memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(),
+ regs.line_length_in);
+ }
+ }
}
}
- ReleaseSemaphore();
-}
-void MaxwellDMA::CopyPitchToPitch() {
- // When `multi_line_enable` bit is enabled we copy a 2D image of dimensions
- // (line_length_in, line_count).
- // Otherwise the copy is performed as if we were copying a 1D buffer of length line_length_in.
- const bool remap_enabled = regs.launch_dma.remap_enable != 0;
- if (regs.launch_dma.multi_line_enable) {
- UNIMPLEMENTED_IF(remap_enabled);
-
- // Perform a line-by-line copy.
- // We're going to take a subrect of size (line_length_in, line_count) from the source
- // rectangle. There is no need to manually flush/invalidate the regions because CopyBlock
- // does that for us.
- for (u32 line = 0; line < regs.line_count; ++line) {
- const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
- const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
- memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
- }
- return;
- }
- // TODO: allow multisized components.
- auto& accelerate = rasterizer->AccessAccelerateDMA();
- const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
- const bool is_buffer_clear = remap_enabled && is_const_a_dst;
- if (is_buffer_clear) {
- ASSERT(regs.remap_const.component_size_minus_one == 3);
- accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
- std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
- memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(tmp_buffer.data()),
- regs.line_length_in * sizeof(u32));
- return;
- }
- UNIMPLEMENTED_IF(remap_enabled);
- if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
- std::vector<u8> tmp_buffer(regs.line_length_in);
- memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), regs.line_length_in);
- memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), regs.line_length_in);
- }
+ ReleaseSemaphore();
}
void MaxwellDMA::CopyBlockLinearToPitch() {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index bc48320ce..953e34adc 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -219,8 +219,6 @@ private:
/// registers.
void Launch();
- void CopyPitchToPitch();
-
void CopyBlockLinearToPitch();
void CopyPitchToBlockLinear();
diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp
index 326e8355a..a44fc83d3 100644
--- a/src/video_core/host1x/syncpoint_manager.cpp
+++ b/src/video_core/host1x/syncpoint_manager.cpp
@@ -36,7 +36,17 @@ SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
ActionHandle& handle) {
std::unique_lock lk(guard);
- action_storage.erase(handle);
+
+ // We want to ensure the iterator still exists prior to erasing it
+ // Otherwise, if an invalid iterator was passed in then it could lead to UB
+ // It is important to avoid UB in that case since the deregister isn't called from a locked
+ // context
+ for (auto it = action_storage.begin(); it != action_storage.end(); it++) {
+ if (it == handle) {
+ action_storage.erase(it);
+ return;
+ }
+ }
}
void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index cca401c74..d07b21bd6 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -41,7 +41,11 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
big_entries.resize(big_page_table_size / 32, 0);
big_page_table_cpu.resize(big_page_table_size);
big_page_continous.resize(big_page_table_size / continous_bits, 0);
+ std::array<PTEKind, 32> kind_valus;
+ kind_valus.fill(PTEKind::INVALID);
+ big_kinds.resize(big_page_table_size / 32, kind_valus);
entries.resize(page_table_size / 32, 0);
+ kinds.resize(big_page_table_size / 32, kind_valus);
}
MemoryManager::~MemoryManager() = default;
@@ -78,6 +82,41 @@ void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
}
}
+PTEKind MemoryManager::GetPageKind(GPUVAddr gpu_addr) const {
+ auto entry = GetEntry<true>(gpu_addr);
+ if (entry == EntryType::Mapped || entry == EntryType::Reserved) [[likely]] {
+ return GetKind<true>(gpu_addr);
+ } else {
+ return GetKind<false>(gpu_addr);
+ }
+}
+
+template <bool is_big_page>
+PTEKind MemoryManager::GetKind(size_t position) const {
+ if constexpr (is_big_page) {
+ position = position >> big_page_bits;
+ const size_t sub_index = position % 32;
+ return big_kinds[position / 32][sub_index];
+ } else {
+ position = position >> page_bits;
+ const size_t sub_index = position % 32;
+ return kinds[position / 32][sub_index];
+ }
+}
+
+template <bool is_big_page>
+void MemoryManager::SetKind(size_t position, PTEKind kind) {
+ if constexpr (is_big_page) {
+ position = position >> big_page_bits;
+ const size_t sub_index = position % 32;
+ big_kinds[position / 32][sub_index] = kind;
+ } else {
+ position = position >> page_bits;
+ const size_t sub_index = position % 32;
+ kinds[position / 32][sub_index] = kind;
+ }
+}
+
inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
const size_t sub_index = big_page_index % continous_bits;
@@ -92,8 +131,8 @@ inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value
}
template <MemoryManager::EntryType entry_type>
-GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
- size_t size) {
+GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+ PTEKind kind) {
u64 remaining_size{size};
if constexpr (entry_type == EntryType::Mapped) {
page_table.ReserveRange(gpu_addr, size);
@@ -102,6 +141,7 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
const GPUVAddr current_gpu_addr = gpu_addr + offset;
[[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr);
SetEntry<false>(current_gpu_addr, entry_type);
+ SetKind<false>(current_gpu_addr, kind);
if (current_entry_type != entry_type) {
rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size);
}
@@ -118,12 +158,13 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
template <MemoryManager::EntryType entry_type>
GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
- size_t size) {
+ size_t size, PTEKind kind) {
u64 remaining_size{size};
for (u64 offset{}; offset < size; offset += big_page_size) {
const GPUVAddr current_gpu_addr = gpu_addr + offset;
[[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr);
SetEntry<true>(current_gpu_addr, entry_type);
+ SetKind<true>(current_gpu_addr, kind);
if (current_entry_type != entry_type) {
rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size);
}
@@ -159,19 +200,19 @@ void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_)
rasterizer = rasterizer_;
}
-GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
+GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind,
bool is_big_pages) {
if (is_big_pages) [[likely]] {
- return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
+ return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
}
- return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
+ return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
}
GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
if (is_big_pages) [[likely]] {
- return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
+ return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID);
}
- return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
+ return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID);
}
void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
@@ -188,8 +229,8 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
rasterizer->UnmapMemory(*cpu_addr, map_size);
}
- BigPageTableOp<EntryType::Free>(gpu_addr, 0, size);
- PageTableOp<EntryType::Free>(gpu_addr, 0, size);
+ BigPageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
+ PageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
}
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index f992e29f3..ab4bc9ec6 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -11,6 +11,7 @@
#include "common/common_types.h"
#include "common/multi_level_page_table.h"
#include "common/virtual_buffer.h"
+#include "video_core/pte_kind.h"
namespace VideoCore {
class RasterizerInterface;
@@ -98,7 +99,8 @@ public:
std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
std::size_t size) const;
- GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true);
+ GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
+ PTEKind kind = PTEKind::INVALID, bool is_big_pages = true);
GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
void Unmap(GPUVAddr gpu_addr, std::size_t size);
@@ -114,6 +116,8 @@ public:
return gpu_addr < address_space_size;
}
+ PTEKind GetPageKind(GPUVAddr gpu_addr) const;
+
private:
template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
@@ -166,10 +170,12 @@ private:
std::vector<u64> big_entries;
template <EntryType entry_type>
- GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
+ GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+ PTEKind kind);
template <EntryType entry_type>
- GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
+ GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+ PTEKind kind);
template <bool is_big_page>
inline EntryType GetEntry(size_t position) const;
@@ -177,6 +183,15 @@ private:
template <bool is_big_page>
inline void SetEntry(size_t position, EntryType entry);
+ std::vector<std::array<PTEKind, 32>> kinds;
+ std::vector<std::array<PTEKind, 32>> big_kinds;
+
+ template <bool is_big_page>
+ inline PTEKind GetKind(size_t position) const;
+
+ template <bool is_big_page>
+ inline void SetKind(size_t position, PTEKind kind);
+
Common::MultiLevelPageTable<u32> page_table;
Common::VirtualBuffer<u32> big_page_table_cpu;
diff --git a/src/video_core/pte_kind.h b/src/video_core/pte_kind.h
new file mode 100644
index 000000000..591d7214b
--- /dev/null
+++ b/src/video_core/pte_kind.h
@@ -0,0 +1,264 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Tegra {
+
+// https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt
+enum class PTEKind : u8 {
+ INVALID = 0xff,
+ PITCH = 0x00,
+ Z16 = 0x01,
+ Z16_2C = 0x02,
+ Z16_MS2_2C = 0x03,
+ Z16_MS4_2C = 0x04,
+ Z16_MS8_2C = 0x05,
+ Z16_MS16_2C = 0x06,
+ Z16_2Z = 0x07,
+ Z16_MS2_2Z = 0x08,
+ Z16_MS4_2Z = 0x09,
+ Z16_MS8_2Z = 0x0a,
+ Z16_MS16_2Z = 0x0b,
+ Z16_2CZ = 0x36,
+ Z16_MS2_2CZ = 0x37,
+ Z16_MS4_2CZ = 0x38,
+ Z16_MS8_2CZ = 0x39,
+ Z16_MS16_2CZ = 0x5f,
+ Z16_4CZ = 0x0c,
+ Z16_MS2_4CZ = 0x0d,
+ Z16_MS4_4CZ = 0x0e,
+ Z16_MS8_4CZ = 0x0f,
+ Z16_MS16_4CZ = 0x10,
+ S8Z24 = 0x11,
+ S8Z24_1Z = 0x12,
+ S8Z24_MS2_1Z = 0x13,
+ S8Z24_MS4_1Z = 0x14,
+ S8Z24_MS8_1Z = 0x15,
+ S8Z24_MS16_1Z = 0x16,
+ S8Z24_2CZ = 0x17,
+ S8Z24_MS2_2CZ = 0x18,
+ S8Z24_MS4_2CZ = 0x19,
+ S8Z24_MS8_2CZ = 0x1a,
+ S8Z24_MS16_2CZ = 0x1b,
+ S8Z24_2CS = 0x1c,
+ S8Z24_MS2_2CS = 0x1d,
+ S8Z24_MS4_2CS = 0x1e,
+ S8Z24_MS8_2CS = 0x1f,
+ S8Z24_MS16_2CS = 0x20,
+ S8Z24_4CSZV = 0x21,
+ S8Z24_MS2_4CSZV = 0x22,
+ S8Z24_MS4_4CSZV = 0x23,
+ S8Z24_MS8_4CSZV = 0x24,
+ S8Z24_MS16_4CSZV = 0x25,
+ V8Z24_MS4_VC12 = 0x26,
+ V8Z24_MS4_VC4 = 0x27,
+ V8Z24_MS8_VC8 = 0x28,
+ V8Z24_MS8_VC24 = 0x29,
+ V8Z24_MS4_VC12_1ZV = 0x2e,
+ V8Z24_MS4_VC4_1ZV = 0x2f,
+ V8Z24_MS8_VC8_1ZV = 0x30,
+ V8Z24_MS8_VC24_1ZV = 0x31,
+ V8Z24_MS4_VC12_2CS = 0x32,
+ V8Z24_MS4_VC4_2CS = 0x33,
+ V8Z24_MS8_VC8_2CS = 0x34,
+ V8Z24_MS8_VC24_2CS = 0x35,
+ V8Z24_MS4_VC12_2CZV = 0x3a,
+ V8Z24_MS4_VC4_2CZV = 0x3b,
+ V8Z24_MS8_VC8_2CZV = 0x3c,
+ V8Z24_MS8_VC24_2CZV = 0x3d,
+ V8Z24_MS4_VC12_2ZV = 0x3e,
+ V8Z24_MS4_VC4_2ZV = 0x3f,
+ V8Z24_MS8_VC8_2ZV = 0x40,
+ V8Z24_MS8_VC24_2ZV = 0x41,
+ V8Z24_MS4_VC12_4CSZV = 0x42,
+ V8Z24_MS4_VC4_4CSZV = 0x43,
+ V8Z24_MS8_VC8_4CSZV = 0x44,
+ V8Z24_MS8_VC24_4CSZV = 0x45,
+ Z24S8 = 0x46,
+ Z24S8_1Z = 0x47,
+ Z24S8_MS2_1Z = 0x48,
+ Z24S8_MS4_1Z = 0x49,
+ Z24S8_MS8_1Z = 0x4a,
+ Z24S8_MS16_1Z = 0x4b,
+ Z24S8_2CS = 0x4c,
+ Z24S8_MS2_2CS = 0x4d,
+ Z24S8_MS4_2CS = 0x4e,
+ Z24S8_MS8_2CS = 0x4f,
+ Z24S8_MS16_2CS = 0x50,
+ Z24S8_2CZ = 0x51,
+ Z24S8_MS2_2CZ = 0x52,
+ Z24S8_MS4_2CZ = 0x53,
+ Z24S8_MS8_2CZ = 0x54,
+ Z24S8_MS16_2CZ = 0x55,
+ Z24S8_4CSZV = 0x56,
+ Z24S8_MS2_4CSZV = 0x57,
+ Z24S8_MS4_4CSZV = 0x58,
+ Z24S8_MS8_4CSZV = 0x59,
+ Z24S8_MS16_4CSZV = 0x5a,
+ Z24V8_MS4_VC12 = 0x5b,
+ Z24V8_MS4_VC4 = 0x5c,
+ Z24V8_MS8_VC8 = 0x5d,
+ Z24V8_MS8_VC24 = 0x5e,
+ YUV_B8C1_2Y = 0x60,
+ YUV_B8C2_2Y = 0x61,
+ YUV_B10C1_2Y = 0x62,
+ YUV_B10C2_2Y = 0x6b,
+ YUV_B12C1_2Y = 0x6c,
+ YUV_B12C2_2Y = 0x6d,
+ Z24V8_MS4_VC12_1ZV = 0x63,
+ Z24V8_MS4_VC4_1ZV = 0x64,
+ Z24V8_MS8_VC8_1ZV = 0x65,
+ Z24V8_MS8_VC24_1ZV = 0x66,
+ Z24V8_MS4_VC12_2CS = 0x67,
+ Z24V8_MS4_VC4_2CS = 0x68,
+ Z24V8_MS8_VC8_2CS = 0x69,
+ Z24V8_MS8_VC24_2CS = 0x6a,
+ Z24V8_MS4_VC12_2CZV = 0x6f,
+ Z24V8_MS4_VC4_2CZV = 0x70,
+ Z24V8_MS8_VC8_2CZV = 0x71,
+ Z24V8_MS8_VC24_2CZV = 0x72,
+ Z24V8_MS4_VC12_2ZV = 0x73,
+ Z24V8_MS4_VC4_2ZV = 0x74,
+ Z24V8_MS8_VC8_2ZV = 0x75,
+ Z24V8_MS8_VC24_2ZV = 0x76,
+ Z24V8_MS4_VC12_4CSZV = 0x77,
+ Z24V8_MS4_VC4_4CSZV = 0x78,
+ Z24V8_MS8_VC8_4CSZV = 0x79,
+ Z24V8_MS8_VC24_4CSZV = 0x7a,
+ ZF32 = 0x7b,
+ ZF32_1Z = 0x7c,
+ ZF32_MS2_1Z = 0x7d,
+ ZF32_MS4_1Z = 0x7e,
+ ZF32_MS8_1Z = 0x7f,
+ ZF32_MS16_1Z = 0x80,
+ ZF32_2CS = 0x81,
+ ZF32_MS2_2CS = 0x82,
+ ZF32_MS4_2CS = 0x83,
+ ZF32_MS8_2CS = 0x84,
+ ZF32_MS16_2CS = 0x85,
+ ZF32_2CZ = 0x86,
+ ZF32_MS2_2CZ = 0x87,
+ ZF32_MS4_2CZ = 0x88,
+ ZF32_MS8_2CZ = 0x89,
+ ZF32_MS16_2CZ = 0x8a,
+ X8Z24_X16V8S8_MS4_VC12 = 0x8b,
+ X8Z24_X16V8S8_MS4_VC4 = 0x8c,
+ X8Z24_X16V8S8_MS8_VC8 = 0x8d,
+ X8Z24_X16V8S8_MS8_VC24 = 0x8e,
+ X8Z24_X16V8S8_MS4_VC12_1CS = 0x8f,
+ X8Z24_X16V8S8_MS4_VC4_1CS = 0x90,
+ X8Z24_X16V8S8_MS8_VC8_1CS = 0x91,
+ X8Z24_X16V8S8_MS8_VC24_1CS = 0x92,
+ X8Z24_X16V8S8_MS4_VC12_1ZV = 0x97,
+ X8Z24_X16V8S8_MS4_VC4_1ZV = 0x98,
+ X8Z24_X16V8S8_MS8_VC8_1ZV = 0x99,
+ X8Z24_X16V8S8_MS8_VC24_1ZV = 0x9a,
+ X8Z24_X16V8S8_MS4_VC12_1CZV = 0x9b,
+ X8Z24_X16V8S8_MS4_VC4_1CZV = 0x9c,
+ X8Z24_X16V8S8_MS8_VC8_1CZV = 0x9d,
+ X8Z24_X16V8S8_MS8_VC24_1CZV = 0x9e,
+ X8Z24_X16V8S8_MS4_VC12_2CS = 0x9f,
+ X8Z24_X16V8S8_MS4_VC4_2CS = 0xa0,
+ X8Z24_X16V8S8_MS8_VC8_2CS = 0xa1,
+ X8Z24_X16V8S8_MS8_VC24_2CS = 0xa2,
+ X8Z24_X16V8S8_MS4_VC12_2CSZV = 0xa3,
+ X8Z24_X16V8S8_MS4_VC4_2CSZV = 0xa4,
+ X8Z24_X16V8S8_MS8_VC8_2CSZV = 0xa5,
+ X8Z24_X16V8S8_MS8_VC24_2CSZV = 0xa6,
+ ZF32_X16V8S8_MS4_VC12 = 0xa7,
+ ZF32_X16V8S8_MS4_VC4 = 0xa8,
+ ZF32_X16V8S8_MS8_VC8 = 0xa9,
+ ZF32_X16V8S8_MS8_VC24 = 0xaa,
+ ZF32_X16V8S8_MS4_VC12_1CS = 0xab,
+ ZF32_X16V8S8_MS4_VC4_1CS = 0xac,
+ ZF32_X16V8S8_MS8_VC8_1CS = 0xad,
+ ZF32_X16V8S8_MS8_VC24_1CS = 0xae,
+ ZF32_X16V8S8_MS4_VC12_1ZV = 0xb3,
+ ZF32_X16V8S8_MS4_VC4_1ZV = 0xb4,
+ ZF32_X16V8S8_MS8_VC8_1ZV = 0xb5,
+ ZF32_X16V8S8_MS8_VC24_1ZV = 0xb6,
+ ZF32_X16V8S8_MS4_VC12_1CZV = 0xb7,
+ ZF32_X16V8S8_MS4_VC4_1CZV = 0xb8,
+ ZF32_X16V8S8_MS8_VC8_1CZV = 0xb9,
+ ZF32_X16V8S8_MS8_VC24_1CZV = 0xba,
+ ZF32_X16V8S8_MS4_VC12_2CS = 0xbb,
+ ZF32_X16V8S8_MS4_VC4_2CS = 0xbc,
+ ZF32_X16V8S8_MS8_VC8_2CS = 0xbd,
+ ZF32_X16V8S8_MS8_VC24_2CS = 0xbe,
+ ZF32_X16V8S8_MS4_VC12_2CSZV = 0xbf,
+ ZF32_X16V8S8_MS4_VC4_2CSZV = 0xc0,
+ ZF32_X16V8S8_MS8_VC8_2CSZV = 0xc1,
+ ZF32_X16V8S8_MS8_VC24_2CSZV = 0xc2,
+ ZF32_X24S8 = 0xc3,
+ ZF32_X24S8_1CS = 0xc4,
+ ZF32_X24S8_MS2_1CS = 0xc5,
+ ZF32_X24S8_MS4_1CS = 0xc6,
+ ZF32_X24S8_MS8_1CS = 0xc7,
+ ZF32_X24S8_MS16_1CS = 0xc8,
+ ZF32_X24S8_2CSZV = 0xce,
+ ZF32_X24S8_MS2_2CSZV = 0xcf,
+ ZF32_X24S8_MS4_2CSZV = 0xd0,
+ ZF32_X24S8_MS8_2CSZV = 0xd1,
+ ZF32_X24S8_MS16_2CSZV = 0xd2,
+ ZF32_X24S8_2CS = 0xd3,
+ ZF32_X24S8_MS2_2CS = 0xd4,
+ ZF32_X24S8_MS4_2CS = 0xd5,
+ ZF32_X24S8_MS8_2CS = 0xd6,
+ ZF32_X24S8_MS16_2CS = 0xd7,
+ S8 = 0x2a,
+ S8_2S = 0x2b,
+ GENERIC_16BX2 = 0xfe,
+ C32_2C = 0xd8,
+ C32_2CBR = 0xd9,
+ C32_2CBA = 0xda,
+ C32_2CRA = 0xdb,
+ C32_2BRA = 0xdc,
+ C32_MS2_2C = 0xdd,
+ C32_MS2_2CBR = 0xde,
+ C32_MS2_4CBRA = 0xcc,
+ C32_MS4_2C = 0xdf,
+ C32_MS4_2CBR = 0xe0,
+ C32_MS4_2CBA = 0xe1,
+ C32_MS4_2CRA = 0xe2,
+ C32_MS4_2BRA = 0xe3,
+ C32_MS4_4CBRA = 0x2c,
+ C32_MS8_MS16_2C = 0xe4,
+ C32_MS8_MS16_2CRA = 0xe5,
+ C64_2C = 0xe6,
+ C64_2CBR = 0xe7,
+ C64_2CBA = 0xe8,
+ C64_2CRA = 0xe9,
+ C64_2BRA = 0xea,
+ C64_MS2_2C = 0xeb,
+ C64_MS2_2CBR = 0xec,
+ C64_MS2_4CBRA = 0xcd,
+ C64_MS4_2C = 0xed,
+ C64_MS4_2CBR = 0xee,
+ C64_MS4_2CBA = 0xef,
+ C64_MS4_2CRA = 0xf0,
+ C64_MS4_2BRA = 0xf1,
+ C64_MS4_4CBRA = 0x2d,
+ C64_MS8_MS16_2C = 0xf2,
+ C64_MS8_MS16_2CRA = 0xf3,
+ C128_2C = 0xf4,
+ C128_2CR = 0xf5,
+ C128_MS2_2C = 0xf6,
+ C128_MS2_2CR = 0xf7,
+ C128_MS4_2C = 0xf8,
+ C128_MS4_2CR = 0xf9,
+ C128_MS8_MS16_2C = 0xfa,
+ C128_MS8_MS16_2CR = 0xfb,
+ X8C24 = 0xfc,
+ PITCH_NO_SWIZZLE = 0xfd,
+ SMSKED_MESSAGE = 0xca,
+ SMHOST_MESSAGE = 0xcb,
+};
+
+constexpr bool IsPitchKind(PTEKind kind) {
+ return kind == PTEKind::PITCH || kind == PTEKind::PITCH_NO_SWIZZLE;
+}
+
+} // namespace Tegra
diff --git a/src/video_core/renderer_base.cpp b/src/video_core/renderer_base.cpp
index 45791aa75..e8761a747 100644
--- a/src/video_core/renderer_base.cpp
+++ b/src/video_core/renderer_base.cpp
@@ -1,6 +1,8 @@
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <thread>
+
#include "common/logging/log.h"
#include "core/frontend/emu_window.h"
#include "video_core/renderer_base.h"
@@ -35,8 +37,12 @@ void RendererBase::RequestScreenshot(void* data, std::function<void(bool)> callb
LOG_ERROR(Render, "A screenshot is already requested or in progress, ignoring the request");
return;
}
+ auto async_callback{[callback = std::move(callback)](bool invert_y) {
+ std::thread t{callback, invert_y};
+ t.detach();
+ }};
renderer_settings.screenshot_bits = data;
- renderer_settings.screenshot_complete_callback = std::move(callback);
+ renderer_settings.screenshot_complete_callback = async_callback;
renderer_settings.screenshot_framebuffer_layout = layout;
renderer_settings.screenshot_requested = true;
}
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index cce00cea8..e5c09a969 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -658,8 +658,13 @@ void RasterizerOpenGL::SyncDepthClamp() {
}
flags[Dirty::DepthClampEnabled] = false;
- oglEnable(GL_DEPTH_CLAMP, maxwell3d->regs.viewport_clip_control.geometry_clip !=
- Maxwell::ViewportClipControl::GeometryClip::Passthrough);
+ bool depth_clamp_disabled{maxwell3d->regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::Passthrough ||
+ maxwell3d->regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ ||
+ maxwell3d->regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::FrustumZ};
+ oglEnable(GL_DEPTH_CLAMP, !depth_clamp_disabled);
}
void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
@@ -746,19 +751,19 @@ void RasterizerOpenGL::SyncStencilTestState() {
oglEnable(GL_STENCIL_TEST, regs.stencil_enable);
glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_op.func),
- regs.stencil_front_func.ref, regs.stencil_front_func.func_mask);
+ regs.stencil_front_ref, regs.stencil_front_func_mask);
glStencilOpSeparate(GL_FRONT, MaxwellToGL::StencilOp(regs.stencil_front_op.fail),
MaxwellToGL::StencilOp(regs.stencil_front_op.zfail),
MaxwellToGL::StencilOp(regs.stencil_front_op.zpass));
- glStencilMaskSeparate(GL_FRONT, regs.stencil_front_func.mask);
+ glStencilMaskSeparate(GL_FRONT, regs.stencil_front_mask);
if (regs.stencil_two_side_enable) {
glStencilFuncSeparate(GL_BACK, MaxwellToGL::ComparisonOp(regs.stencil_back_op.func),
- regs.stencil_back_func.ref, regs.stencil_back_func.mask);
+ regs.stencil_back_ref, regs.stencil_back_mask);
glStencilOpSeparate(GL_BACK, MaxwellToGL::StencilOp(regs.stencil_back_op.fail),
MaxwellToGL::StencilOp(regs.stencil_back_op.zfail),
MaxwellToGL::StencilOp(regs.stencil_back_op.zpass));
- glStencilMaskSeparate(GL_BACK, regs.stencil_back_func.mask);
+ glStencilMaskSeparate(GL_BACK, regs.stencil_back_mask);
} else {
glStencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFF);
glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_KEEP);
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 6bdb0b645..609f0a772 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -317,8 +317,8 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() {
graphics_key.tessellation_primitive.Assign(regs.tessellation.params.domain_type.Value());
graphics_key.tessellation_spacing.Assign(regs.tessellation.params.spacing.Value());
graphics_key.tessellation_clockwise.Assign(
- regs.tessellation.params.output_primitives.Value() !=
- Maxwell::Tessellation::OutputPrimitves::Triangles_CCW);
+ regs.tessellation.params.output_primitives.Value() ==
+ Maxwell::Tessellation::OutputPrimitives::Triangles_CW);
graphics_key.xfb_enabled.Assign(regs.transform_feedback_enabled != 0 ? 1 : 0);
if (graphics_key.xfb_enabled) {
SetXfbState(graphics_key.xfb_state, regs);
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp
index e2c709aac..a359f96f1 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.cpp
+++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp
@@ -100,14 +100,12 @@ void SetupDirtyDepthTest(Tables& tables) {
void SetupDirtyStencilTest(Tables& tables) {
static constexpr std::array offsets = {
- OFF(stencil_enable), OFF(stencil_front_op.func),
- OFF(stencil_front_func.ref), OFF(stencil_front_func.func_mask),
- OFF(stencil_front_op.fail), OFF(stencil_front_op.zfail),
- OFF(stencil_front_op.zpass), OFF(stencil_front_func.mask),
- OFF(stencil_two_side_enable), OFF(stencil_back_op.func),
- OFF(stencil_back_func.ref), OFF(stencil_back_func.func_mask),
- OFF(stencil_back_op.fail), OFF(stencil_back_op.zfail),
- OFF(stencil_back_op.zpass), OFF(stencil_back_func.mask)};
+ OFF(stencil_enable), OFF(stencil_front_op.func), OFF(stencil_front_ref),
+ OFF(stencil_front_func_mask), OFF(stencil_front_op.fail), OFF(stencil_front_op.zfail),
+ OFF(stencil_front_op.zpass), OFF(stencil_front_mask), OFF(stencil_two_side_enable),
+ OFF(stencil_back_op.func), OFF(stencil_back_ref), OFF(stencil_back_func_mask),
+ OFF(stencil_back_op.fail), OFF(stencil_back_op.zfail), OFF(stencil_back_op.zpass),
+ OFF(stencil_back_mask)};
for (const auto offset : offsets) {
tables[0][offset] = StencilTest;
}
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index eb7c22fd5..f85ed8e5b 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -63,14 +63,18 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
depth_clamp_disabled.Assign(regs.viewport_clip_control.geometry_clip ==
- Maxwell::ViewportClipControl::GeometryClip::Passthrough);
+ Maxwell::ViewportClipControl::GeometryClip::Passthrough ||
+ regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ ||
+ regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::FrustumZ);
ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0);
polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front));
patch_control_points_minus_one.Assign(regs.patch_vertices - 1);
tessellation_primitive.Assign(static_cast<u32>(regs.tessellation.params.domain_type.Value()));
tessellation_spacing.Assign(static_cast<u32>(regs.tessellation.params.spacing.Value()));
- tessellation_clockwise.Assign(regs.tessellation.params.output_primitives.Value() !=
- Maxwell::Tessellation::OutputPrimitves::Triangles_CCW);
+ tessellation_clockwise.Assign(regs.tessellation.params.output_primitives.Value() ==
+ Maxwell::Tessellation::OutputPrimitives::Triangles_CW);
logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
logic_op.Assign(PackLogicOp(regs.logic_op.op));
topology.Assign(regs.draw.topology);
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 7cb02631c..4b15c0f85 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -59,10 +59,11 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
return query_pool == *pool;
});
- ASSERT(it != std::end(pools));
- const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
- usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
+ if (it != std::end(pools)) {
+ const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
+ usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
+ }
}
QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 892cd94a3..47dfb45a1 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -772,11 +772,10 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
if (regs.stencil_two_side_enable) {
// Separate values per face
scheduler.Record(
- [front_ref = regs.stencil_front_func.ref,
- front_write_mask = regs.stencil_front_func.mask,
- front_test_mask = regs.stencil_front_func.func_mask,
- back_ref = regs.stencil_back_func.ref, back_write_mask = regs.stencil_back_func.mask,
- back_test_mask = regs.stencil_back_func.func_mask](vk::CommandBuffer cmdbuf) {
+ [front_ref = regs.stencil_front_ref, front_write_mask = regs.stencil_front_mask,
+ front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_ref,
+ back_write_mask = regs.stencil_back_mask,
+ back_test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
// Front face
cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_BIT, front_ref);
cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_BIT, front_write_mask);
@@ -789,9 +788,8 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
});
} else {
// Front face defines both faces
- scheduler.Record([ref = regs.stencil_front_func.ref,
- write_mask = regs.stencil_front_func.mask,
- test_mask = regs.stencil_front_func.func_mask](vk::CommandBuffer cmdbuf) {
+ scheduler.Record([ref = regs.stencil_front_ref, write_mask = regs.stencil_front_mask,
+ test_mask = regs.stencil_front_func_mask](vk::CommandBuffer cmdbuf) {
cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_AND_BACK, ref);
cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_AND_BACK, write_mask);
cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_AND_BACK, test_mask);
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 7fb256953..06f68d09a 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -26,39 +26,20 @@ using namespace Common::Literals;
constexpr VkDeviceSize MAX_ALIGNMENT = 256;
// Maximum size to put elements in the stream buffer
constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB;
+// Stream buffer size in bytes
+constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB;
+constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS;
constexpr VkMemoryPropertyFlags HOST_FLAGS =
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
constexpr VkMemoryPropertyFlags STREAM_FLAGS = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | HOST_FLAGS;
-static bool IsStreamHeap(VkMemoryHeap heap, size_t staging_buffer_size) noexcept {
- return staging_buffer_size < (heap.size * 2) / 3;
-}
-
-static bool HasLargeDeviceLocalHostVisibleMemory(const VkPhysicalDeviceMemoryProperties& props) {
- const auto flags{VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT};
-
- for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
- const auto& memory_type{props.memoryTypes[type_index]};
-
- if ((memory_type.propertyFlags & flags) != flags) {
- // Memory must be device local and host visible
- continue;
- }
-
- const auto& heap{props.memoryHeaps[memory_type.heapIndex]};
- if (heap.size >= 7168_MiB) {
- // This is the right type of memory
- return true;
- }
- }
-
- return false;
+bool IsStreamHeap(VkMemoryHeap heap) noexcept {
+ return STREAM_BUFFER_SIZE < (heap.size * 2) / 3;
}
std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask,
- VkMemoryPropertyFlags flags,
- size_t staging_buffer_size) noexcept {
+ VkMemoryPropertyFlags flags) noexcept {
for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
if (((type_mask >> type_index) & 1) == 0) {
// Memory type is incompatible
@@ -69,7 +50,7 @@ std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& p
// Memory type doesn't have the flags we want
continue;
}
- if (!IsStreamHeap(props.memoryHeaps[memory_type.heapIndex], staging_buffer_size)) {
+ if (!IsStreamHeap(props.memoryHeaps[memory_type.heapIndex])) {
// Memory heap is not suitable for streaming
continue;
}
@@ -80,17 +61,17 @@ std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& p
}
u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask,
- bool try_device_local, size_t staging_buffer_size) {
+ bool try_device_local) {
std::optional<u32> type;
if (try_device_local) {
// Try to find a DEVICE_LOCAL_BIT type, Nvidia and AMD have a dedicated heap for this
- type = FindMemoryTypeIndex(props, type_mask, STREAM_FLAGS, staging_buffer_size);
+ type = FindMemoryTypeIndex(props, type_mask, STREAM_FLAGS);
if (type) {
return *type;
}
}
// Otherwise try without the DEVICE_LOCAL_BIT
- type = FindMemoryTypeIndex(props, type_mask, HOST_FLAGS, staging_buffer_size);
+ type = FindMemoryTypeIndex(props, type_mask, HOST_FLAGS);
if (type) {
return *type;
}
@@ -98,32 +79,20 @@ u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_
throw vk::Exception(VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
-size_t Region(size_t iterator, size_t region_size) noexcept {
- return iterator / region_size;
+size_t Region(size_t iterator) noexcept {
+ return iterator / REGION_SIZE;
}
} // Anonymous namespace
StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
Scheduler& scheduler_)
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {
-
- const auto memory_properties{device.GetPhysical().GetMemoryProperties().memoryProperties};
- if (HasLargeDeviceLocalHostVisibleMemory(memory_properties)) {
- // Possible on many integrated and newer discrete cards
- staging_buffer_size = 1_GiB;
- } else {
- // Well-supported default size used by most Vulkan PC games
- staging_buffer_size = 256_MiB;
- }
-
- region_size = staging_buffer_size / StagingBufferPool::NUM_SYNCS;
-
const vk::Device& dev = device.GetLogical();
stream_buffer = dev.CreateBuffer(VkBufferCreateInfo{
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
- .size = staging_buffer_size,
+ .size = STREAM_BUFFER_SIZE,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
@@ -148,18 +117,19 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
.image = nullptr,
.buffer = *stream_buffer,
};
+ const auto memory_properties = device.GetPhysical().GetMemoryProperties().memoryProperties;
VkMemoryAllocateInfo stream_memory_info{
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = make_dedicated ? &dedicated_info : nullptr,
.allocationSize = requirements.size,
- .memoryTypeIndex = FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, true,
- staging_buffer_size),
+ .memoryTypeIndex =
+ FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, true),
};
stream_memory = dev.TryAllocateMemory(stream_memory_info);
if (!stream_memory) {
LOG_INFO(Render_Vulkan, "Dynamic memory allocation failed, trying with system memory");
- stream_memory_info.memoryTypeIndex = FindMemoryTypeIndex(
- memory_properties, requirements.memoryTypeBits, false, staging_buffer_size);
+ stream_memory_info.memoryTypeIndex =
+ FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, false);
stream_memory = dev.AllocateMemory(stream_memory_info);
}
@@ -167,7 +137,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
stream_memory.SetObjectNameEXT("Stream Buffer Memory");
}
stream_buffer.BindMemory(*stream_memory, 0);
- stream_pointer = stream_memory.Map(0, staging_buffer_size);
+ stream_pointer = stream_memory.Map(0, STREAM_BUFFER_SIZE);
}
StagingBufferPool::~StagingBufferPool() = default;
@@ -188,25 +158,25 @@ void StagingBufferPool::TickFrame() {
}
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
- if (AreRegionsActive(Region(free_iterator, region_size) + 1,
- std::min(Region(iterator + size, region_size) + 1, NUM_SYNCS))) {
+ if (AreRegionsActive(Region(free_iterator) + 1,
+ std::min(Region(iterator + size) + 1, NUM_SYNCS))) {
// Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload);
}
const u64 current_tick = scheduler.CurrentTick();
- std::fill(sync_ticks.begin() + Region(used_iterator, region_size),
- sync_ticks.begin() + Region(iterator, region_size), current_tick);
+ std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
+ current_tick);
used_iterator = iterator;
free_iterator = std::max(free_iterator, iterator + size);
- if (iterator + size >= staging_buffer_size) {
- std::fill(sync_ticks.begin() + Region(used_iterator, region_size),
- sync_ticks.begin() + NUM_SYNCS, current_tick);
+ if (iterator + size >= STREAM_BUFFER_SIZE) {
+ std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
+ current_tick);
used_iterator = 0;
iterator = 0;
free_iterator = size;
- if (AreRegionsActive(0, Region(size, region_size) + 1)) {
+ if (AreRegionsActive(0, Region(size) + 1)) {
// Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload);
}
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index 90c67177f..91dc84da8 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -93,9 +93,6 @@ private:
size_t free_iterator = 0;
std::array<u64, NUM_SYNCS> sync_ticks{};
- size_t staging_buffer_size = 0;
- size_t region_size = 0;
-
StagingBuffersCache device_local_cache;
StagingBuffersCache upload_cache;
StagingBuffersCache download_cache;
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index ed98c8370..b87c3be66 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -77,12 +77,12 @@ void SetupDirtyDepthBounds(Tables& tables) {
void SetupDirtyStencilProperties(Tables& tables) {
auto& table = tables[0];
table[OFF(stencil_two_side_enable)] = StencilProperties;
- table[OFF(stencil_front_func.ref)] = StencilProperties;
- table[OFF(stencil_front_func.mask)] = StencilProperties;
- table[OFF(stencil_front_func.func_mask)] = StencilProperties;
- table[OFF(stencil_back_func.ref)] = StencilProperties;
- table[OFF(stencil_back_func.mask)] = StencilProperties;
- table[OFF(stencil_back_func.func_mask)] = StencilProperties;
+ table[OFF(stencil_front_ref)] = StencilProperties;
+ table[OFF(stencil_front_mask)] = StencilProperties;
+ table[OFF(stencil_front_func_mask)] = StencilProperties;
+ table[OFF(stencil_back_ref)] = StencilProperties;
+ table[OFF(stencil_back_mask)] = StencilProperties;
+ table[OFF(stencil_back_func_mask)] = StencilProperties;
}
void SetupDirtyLineWidth(Tables& tables) {
diff --git a/src/video_core/texture_cache/descriptor_table.h b/src/video_core/texture_cache/descriptor_table.h
index b18e3838f..ee4240288 100644
--- a/src/video_core/texture_cache/descriptor_table.h
+++ b/src/video_core/texture_cache/descriptor_table.h
@@ -18,7 +18,7 @@ class DescriptorTable {
public:
explicit DescriptorTable(Tegra::MemoryManager& gpu_memory_) : gpu_memory{gpu_memory_} {}
- [[nodiscard]] bool Synchornize(GPUVAddr gpu_addr, u32 limit) {
+ [[nodiscard]] bool Synchronize(GPUVAddr gpu_addr, u32 limit) {
[[likely]] if (current_gpu_addr == gpu_addr && current_limit == limit) {
return false;
}
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index ad935d386..08aa8ca33 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -150,6 +150,8 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
return PixelFormat::D24_UNORM_S8_UINT;
case Hash(TextureFormat::D32S8, FLOAT, UINT, UNORM, UNORM, LINEAR):
return PixelFormat::D32_FLOAT_S8_UINT;
+ case Hash(TextureFormat::R32_B24G8, FLOAT, UINT, UNORM, UNORM, LINEAR):
+ return PixelFormat::D32_FLOAT_S8_UINT;
case Hash(TextureFormat::BC1_RGBA, UNORM, LINEAR):
return PixelFormat::BC1_RGBA_UNORM;
case Hash(TextureFormat::BC1_RGBA, UNORM, SRGB):
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 413baf730..0e0fd410f 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -193,11 +193,11 @@ void TextureCache<P>::SynchronizeGraphicsDescriptors() {
const bool linked_tsc = maxwell3d->regs.sampler_binding == SamplerBinding::ViaHeaderBinding;
const u32 tic_limit = maxwell3d->regs.tex_header.limit;
const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tex_sampler.limit;
- if (channel_state->graphics_sampler_table.Synchornize(maxwell3d->regs.tex_sampler.Address(),
+ if (channel_state->graphics_sampler_table.Synchronize(maxwell3d->regs.tex_sampler.Address(),
tsc_limit)) {
channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
}
- if (channel_state->graphics_image_table.Synchornize(maxwell3d->regs.tex_header.Address(),
+ if (channel_state->graphics_image_table.Synchronize(maxwell3d->regs.tex_header.Address(),
tic_limit)) {
channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
}
@@ -209,10 +209,10 @@ void TextureCache<P>::SynchronizeComputeDescriptors() {
const u32 tic_limit = kepler_compute->regs.tic.limit;
const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit;
const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address();
- if (channel_state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) {
+ if (channel_state->compute_sampler_table.Synchronize(tsc_gpu_addr, tsc_limit)) {
channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
}
- if (channel_state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(),
+ if (channel_state->compute_image_table.Synchronize(kepler_compute->regs.tic.Address(),
tic_limit)) {
channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
}
diff --git a/src/yuzu/applets/qt_controller.ui b/src/yuzu/applets/qt_controller.ui
index c8cb6bcf3..f5eccba70 100644
--- a/src/yuzu/applets/qt_controller.ui
+++ b/src/yuzu/applets/qt_controller.ui
@@ -2300,7 +2300,7 @@
<item>
<widget class="QRadioButton" name="radioUndocked">
<property name="text">
- <string>Undocked</string>
+ <string>Handheld</string>
</property>
</widget>
</item>
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 24251247d..6acfb7b06 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -120,8 +120,8 @@ void EmuThread::run() {
}
}
- // Shutdown the core emulation
- system.Shutdown();
+ // Shutdown the main emulated process
+ system.ShutdownMainProcess();
#if MICROPROFILE_ENABLED
MicroProfileOnThreadExit();
diff --git a/src/yuzu/configuration/configure_audio.cpp b/src/yuzu/configuration/configure_audio.cpp
index 19b8b15ef..70cc6f84b 100644
--- a/src/yuzu/configuration/configure_audio.cpp
+++ b/src/yuzu/configuration/configure_audio.cpp
@@ -161,8 +161,8 @@ void ConfigureAudio::InitializeAudioSinkComboBox() {
ui->sink_combo_box->clear();
ui->sink_combo_box->addItem(QString::fromUtf8(AudioCore::Sink::auto_device_name));
- for (const char* id : AudioCore::Sink::GetSinkIDs()) {
- ui->sink_combo_box->addItem(QString::fromUtf8(id));
+ for (const auto& id : AudioCore::Sink::GetSinkIDs()) {
+ ui->sink_combo_box->addItem(QString::fromUtf8(id.data(), static_cast<s32>(id.length())));
}
}
diff --git a/src/yuzu/configuration/configure_graphics.ui b/src/yuzu/configuration/configure_graphics.ui
index 1e4f74704..fdbb33372 100644
--- a/src/yuzu/configuration/configure_graphics.ui
+++ b/src/yuzu/configuration/configure_graphics.ui
@@ -301,6 +301,11 @@
</item>
<item>
<property name="text">
+ <string>Force 16:10</string>
+ </property>
+ </item>
+ <item>
+ <property name="text">
<string>Stretch to Window</string>
</property>
</item>
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index f45a25410..7b16d7f7e 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -294,6 +294,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
#ifdef __linux__
SetupSigInterrupts();
#endif
+ system->Initialize();
Common::Log::Initialize();
LoadTranslation();
@@ -1895,6 +1896,8 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
case GameListOpenTarget::SaveData: {
open_target = tr("Save Data");
const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir);
+ auto vfs_nand_dir =
+ vfs->OpenDirectory(Common::FS::PathToUTF8String(nand_dir), FileSys::Mode::Read);
if (has_user_save) {
// User save data
@@ -1921,15 +1924,15 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
ASSERT(user_id);
const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath(
- *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData,
- program_id, user_id->AsU128(), 0);
+ *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
+ FileSys::SaveDataType::SaveData, program_id, user_id->AsU128(), 0);
path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path);
} else {
// Device save data
const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath(
- *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData,
- program_id, {}, 0);
+ *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
+ FileSys::SaveDataType::SaveData, program_id, {}, 0);
path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path);
}
@@ -3280,7 +3283,7 @@ void GMainWindow::LoadAmiibo(const QString& filename) {
QMessageBox::warning(this, title, tr("The current game is not looking for amiibos"));
break;
case InputCommon::VirtualAmiibo::Info::Unknown:
- QMessageBox::warning(this, title, tr("An unkown error occured"));
+ QMessageBox::warning(this, title, tr("An unknown error occurred"));
break;
default:
break;
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp
index 3a0f33cba..e16f79eb4 100644
--- a/src/yuzu_cmd/yuzu.cpp
+++ b/src/yuzu_cmd/yuzu.cpp
@@ -302,6 +302,8 @@ int main(int argc, char** argv) {
}
Core::System system{};
+ system.Initialize();
+
InputCommon::InputSubsystem input_subsystem{};
// Apply the command line arguments
@@ -392,7 +394,7 @@ int main(int argc, char** argv) {
}
system.DetachDebugger();
void(system.Pause());
- system.Shutdown();
+ system.ShutdownMainProcess();
detached_tasks.WaitForAllTasks();
return 0;