diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/core/core_timing.cpp | 15 | ||||
-rw-r--r-- | src/core/core_timing.h | 1 | ||||
-rw-r--r-- | src/tests/core/core_timing.cpp | 156 |
3 files changed, 165 insertions, 7 deletions
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 6da2dcfb4..0ed6f9b19 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -13,6 +13,8 @@ #include "common/thread.h" #include "core/core_timing_util.h" +#pragma optoimize("", off) + namespace Core::Timing { constexpr int MAX_SLICE_LENGTH = 10000; @@ -114,7 +116,7 @@ void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) { u64 CoreTiming::GetTicks() const { u64 ticks = static_cast<u64>(global_timer); if (!is_global_timer_sane) { - ticks += time_slice[current_context] - downcounts[current_context]; + ticks += accumulated_ticks; } return ticks; } @@ -124,6 +126,7 @@ u64 CoreTiming::GetIdleTicks() const { } void CoreTiming::AddTicks(u64 ticks) { + accumulated_ticks += ticks; downcounts[current_context] -= static_cast<s64>(ticks); } @@ -151,7 +154,6 @@ void CoreTiming::ForceExceptionCheck(s64 cycles) { // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int // here. Account for cycles already executed by adjusting the g.slice_length - slice_length -= downcounts[current_context] - static_cast<int>(cycles); downcounts[current_context] = static_cast<int>(cycles); } @@ -172,8 +174,8 @@ std::optional<u64> CoreTiming::NextAvailableCore(const s64 needed_ticks) const { void CoreTiming::Advance() { std::unique_lock<std::mutex> guard(inner_mutex); - const int cycles_executed = time_slice[current_context] - downcounts[current_context]; - time_slice[current_context] = std::max<s64>(0, downcounts[current_context]); + const int cycles_executed = accumulated_ticks; + time_slice[current_context] = std::max<s64>(0, time_slice[current_context] - accumulated_ticks); global_timer += cycles_executed; is_global_timer_sane = true; @@ -198,6 +200,8 @@ void CoreTiming::Advance() { } } + accumulated_ticks = 0; + downcounts[current_context] = time_slice[current_context]; } @@ -212,6 +216,9 @@ void CoreTiming::ResetRun() { s64 needed_ticks = std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH); downcounts[current_context] = needed_ticks; } + + is_global_timer_sane = false; + accumulated_ticks = 0; } void CoreTiming::Idle() { diff --git a/src/core/core_timing.h b/src/core/core_timing.h index ec0a6d2c0..8bba45beb 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -130,6 +130,7 @@ private: s64 global_timer = 0; s64 idled_cycles = 0; s64 slice_length = 0; + u64 accumulated_ticks = 0; std::array<s64, num_cpu_cores> downcounts{}; // Slice of time assigned to each core per run. std::array<s64, num_cpu_cores> time_slice{}; diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 596a2f4aa..467eb4736 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp @@ -6,6 +6,7 @@ #include <array> #include <bitset> +#include <cstdlib> #include <string> #include "common/file_util.h" #include "core/core.h" @@ -13,7 +14,7 @@ // Numbers are chosen randomly to make sure the correct one is given. static constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}}; -static constexpr int MAX_SLICE_LENGTH = 20000; // Copied from CoreTiming internals +static constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals static std::bitset<CB_IDS.size()> callbacks_ran_flags; static u64 expected_callback = 0; @@ -28,6 +29,12 @@ void CallbackTemplate(u64 userdata, s64 cycles_late) { REQUIRE(lateness == cycles_late); } +static u64 callbacks_done = 0; + +void EmptyCallback(u64 userdata, s64 cycles_late) { + ++callbacks_done; +} + struct ScopeInit final { ScopeInit() { core_timing.Initialize(); @@ -39,16 +46,159 @@ struct ScopeInit final { Core::Timing::CoreTiming core_timing; }; -static void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, int downcount, +static void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32 context = 0, int expected_lateness = 0, int cpu_downcount = 0) { callbacks_ran_flags = 0; expected_callback = CB_IDS[idx]; lateness = expected_lateness; // Pretend we executed X cycles of instructions. + core_timing.SwitchContext(context); core_timing.AddTicks(core_timing.GetDowncount() - cpu_downcount); core_timing.Advance(); + core_timing.SwitchContext((context + 1) % 4); REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags); - REQUIRE(downcount == core_timing.GetDowncount()); +} + +TEST_CASE("CoreTiming[BasicOrder]", "[core]") { + ScopeInit guard; + auto& core_timing = guard.core_timing; + + Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>); + Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>); + Core::Timing::EventType* cb_c = core_timing.RegisterEvent("callbackC", CallbackTemplate<2>); + Core::Timing::EventType* cb_d = core_timing.RegisterEvent("callbackD", CallbackTemplate<3>); + Core::Timing::EventType* cb_e = core_timing.RegisterEvent("callbackE", CallbackTemplate<4>); + + // Enter slice 0 + core_timing.ResetRun(); + + // D -> B -> C -> A -> E + core_timing.SwitchContext(0); + core_timing.ScheduleEvent(1000, cb_a, CB_IDS[0]); + REQUIRE(1000 == core_timing.GetDowncount()); + core_timing.ScheduleEvent(500, cb_b, CB_IDS[1]); + REQUIRE(500 == core_timing.GetDowncount()); + core_timing.ScheduleEvent(800, cb_c, CB_IDS[2]); + REQUIRE(500 == core_timing.GetDowncount()); + core_timing.ScheduleEvent(100, cb_d, CB_IDS[3]); + REQUIRE(100 == core_timing.GetDowncount()); + core_timing.ScheduleEvent(1200, cb_e, CB_IDS[4]); + REQUIRE(100 == core_timing.GetDowncount()); + + AdvanceAndCheck(core_timing, 3, 0); + AdvanceAndCheck(core_timing, 1, 1); + AdvanceAndCheck(core_timing, 2, 2); + AdvanceAndCheck(core_timing, 0, 3); + AdvanceAndCheck(core_timing, 4, 0); +} + +TEST_CASE("CoreTiming[FairSharing]", "[core]") { + + ScopeInit guard; + auto& core_timing = guard.core_timing; + + Core::Timing::EventType* empty_callback = + core_timing.RegisterEvent("empty_callback", EmptyCallback); + + callbacks_done = 0; + u64 MAX_CALLBACKS = 10; + for (std::size_t i = 0; i < 10; i++) { + core_timing.ScheduleEvent(i * 3333U, empty_callback, 0); + } + + const s64 advances = MAX_SLICE_LENGTH / 10; + core_timing.ResetRun(); + u64 current_time = core_timing.GetTicks(); + bool keep_running{}; + do { + keep_running = false; + for (u32 active_core = 0; active_core < 4; ++active_core) { + core_timing.SwitchContext(active_core); + if (core_timing.CurrentContextCanRun()) { + core_timing.AddTicks(std::min<s64>(advances, core_timing.GetDowncount())); + core_timing.Advance(); + } + keep_running |= core_timing.CurrentContextCanRun(); + } + } while (keep_running); + u64 current_time_2 = core_timing.GetTicks(); + + REQUIRE(MAX_CALLBACKS == callbacks_done); + REQUIRE(current_time_2 == current_time + MAX_SLICE_LENGTH * 4); +} + +TEST_CASE("Core::Timing[PredictableLateness]", "[core]") { + ScopeInit guard; + auto& core_timing = guard.core_timing; + + Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>); + Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>); + + // Enter slice 0 + core_timing.ResetRun(); + + core_timing.ScheduleEvent(100, cb_a, CB_IDS[0]); + core_timing.ScheduleEvent(200, cb_b, CB_IDS[1]); + + AdvanceAndCheck(core_timing, 0, 0, 10, -10); // (100 - 10) + AdvanceAndCheck(core_timing, 1, 1, 50, -50); +} + +namespace ChainSchedulingTest { +static int reschedules = 0; + +static void RescheduleCallback(Core::Timing::CoreTiming& core_timing, u64 userdata, + s64 cycles_late) { + --reschedules; + REQUIRE(reschedules >= 0); + REQUIRE(lateness == cycles_late); + + if (reschedules > 0) { + core_timing.ScheduleEvent(1000, reinterpret_cast<Core::Timing::EventType*>(userdata), + userdata); + } +} +} // namespace ChainSchedulingTest + +TEST_CASE("CoreTiming[ChainScheduling]", "[core]") { + using namespace ChainSchedulingTest; + + ScopeInit guard; + auto& core_timing = guard.core_timing; + + Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>); + Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>); + Core::Timing::EventType* cb_c = core_timing.RegisterEvent("callbackC", CallbackTemplate<2>); + Core::Timing::EventType* cb_rs = core_timing.RegisterEvent( + "callbackReschedule", [&core_timing](u64 userdata, s64 cycles_late) { + RescheduleCallback(core_timing, userdata, cycles_late); + }); + + // Enter slice 0 + core_timing.ResetRun(); + + core_timing.ScheduleEvent(800, cb_a, CB_IDS[0]); + core_timing.ScheduleEvent(1000, cb_b, CB_IDS[1]); + core_timing.ScheduleEvent(2200, cb_c, CB_IDS[2]); + core_timing.ScheduleEvent(1000, cb_rs, reinterpret_cast<u64>(cb_rs)); + REQUIRE(800 == core_timing.GetDowncount()); + + reschedules = 3; + AdvanceAndCheck(core_timing, 0, 0); // cb_a + AdvanceAndCheck(core_timing, 1, 1); // cb_b, cb_rs + REQUIRE(2 == reschedules); + + core_timing.AddTicks(core_timing.GetDowncount()); + core_timing.Advance(); // cb_rs + core_timing.SwitchContext(3); + REQUIRE(1 == reschedules); + REQUIRE(200 == core_timing.GetDowncount()); + + AdvanceAndCheck(core_timing, 2, 3); // cb_c + + core_timing.AddTicks(core_timing.GetDowncount()); + core_timing.Advance(); // cb_rs + REQUIRE(0 == reschedules); } |