summaryrefslogtreecommitdiffstats
path: root/src/audio_core
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2020-09-11 16:57:27 +0200
committerGitHub <noreply@github.com>2020-09-11 16:57:27 +0200
commit324029d4f9fd2381f474e608a2859360324161e5 (patch)
treed2dc348235f05f20686c526f7092590f596f65c2 /src/audio_core
parentMerge pull request #4597 from Morph1984/mjolnir-p2 (diff)
parentPreliminary effects (diff)
downloadyuzu-324029d4f9fd2381f474e608a2859360324161e5.tar
yuzu-324029d4f9fd2381f474e608a2859360324161e5.tar.gz
yuzu-324029d4f9fd2381f474e608a2859360324161e5.tar.bz2
yuzu-324029d4f9fd2381f474e608a2859360324161e5.tar.lz
yuzu-324029d4f9fd2381f474e608a2859360324161e5.tar.xz
yuzu-324029d4f9fd2381f474e608a2859360324161e5.tar.zst
yuzu-324029d4f9fd2381f474e608a2859360324161e5.zip
Diffstat (limited to '')
-rw-r--r--src/audio_core/CMakeLists.txt16
-rw-r--r--src/audio_core/algorithm/interpolate.cpp32
-rw-r--r--src/audio_core/algorithm/interpolate.h3
-rw-r--r--src/audio_core/audio_renderer.cpp543
-rw-r--r--src/audio_core/audio_renderer.h224
-rw-r--r--src/audio_core/behavior_info.cpp79
-rw-r--r--src/audio_core/behavior_info.h52
-rw-r--r--src/audio_core/command_generator.cpp976
-rw-r--r--src/audio_core/command_generator.h103
-rw-r--r--src/audio_core/common.h65
-rw-r--r--src/audio_core/cubeb_sink.cpp18
-rw-r--r--src/audio_core/effect_context.cpp299
-rw-r--r--src/audio_core/effect_context.h322
-rw-r--r--src/audio_core/info_updater.cpp517
-rw-r--r--src/audio_core/info_updater.h58
-rw-r--r--src/audio_core/memory_pool.cpp62
-rw-r--r--src/audio_core/memory_pool.h53
-rw-r--r--src/audio_core/mix_context.cpp296
-rw-r--r--src/audio_core/mix_context.h114
-rw-r--r--src/audio_core/sink_context.cpp31
-rw-r--r--src/audio_core/sink_context.h89
-rw-r--r--src/audio_core/splitter_context.cpp617
-rw-r--r--src/audio_core/splitter_context.h221
-rw-r--r--src/audio_core/stream.cpp6
-rw-r--r--src/audio_core/voice_context.cpp526
-rw-r--r--src/audio_core/voice_context.h296
26 files changed, 4971 insertions, 647 deletions
diff --git a/src/audio_core/CMakeLists.txt b/src/audio_core/CMakeLists.txt
index 5ef38a337..cb00ef60e 100644
--- a/src/audio_core/CMakeLists.txt
+++ b/src/audio_core/CMakeLists.txt
@@ -12,16 +12,32 @@ add_library(audio_core STATIC
buffer.h
codec.cpp
codec.h
+ command_generator.cpp
+ command_generator.h
common.h
+ effect_context.cpp
+ effect_context.h
+ info_updater.cpp
+ info_updater.h
+ memory_pool.cpp
+ memory_pool.h
+ mix_context.cpp
+ mix_context.h
null_sink.h
sink.h
+ sink_context.cpp
+ sink_context.h
sink_details.cpp
sink_details.h
sink_stream.h
+ splitter_context.cpp
+ splitter_context.h
stream.cpp
stream.h
time_stretch.cpp
time_stretch.h
+ voice_context.cpp
+ voice_context.h
$<$<BOOL:${ENABLE_CUBEB}>:cubeb_sink.cpp cubeb_sink.h>
)
diff --git a/src/audio_core/algorithm/interpolate.cpp b/src/audio_core/algorithm/interpolate.cpp
index 49ab9d3e1..689a54508 100644
--- a/src/audio_core/algorithm/interpolate.cpp
+++ b/src/audio_core/algorithm/interpolate.cpp
@@ -197,4 +197,36 @@ std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input,
return output;
}
+void Resample(s32* output, const s32* input, s32 pitch, s32& fraction, std::size_t sample_count) {
+ const std::array<s16, 512>& lut = [pitch] {
+ if (pitch > 0xaaaa) {
+ return curve_lut0;
+ }
+ if (pitch <= 0x8000) {
+ return curve_lut1;
+ }
+ return curve_lut2;
+ }();
+
+ std::size_t index{};
+
+ for (std::size_t i = 0; i < sample_count; i++) {
+ const std::size_t lut_index{(static_cast<std::size_t>(fraction) >> 8) * 4};
+ const auto l0 = lut[lut_index + 0];
+ const auto l1 = lut[lut_index + 1];
+ const auto l2 = lut[lut_index + 2];
+ const auto l3 = lut[lut_index + 3];
+
+ const auto s0 = static_cast<s32>(input[index]);
+ const auto s1 = static_cast<s32>(input[index + 1]);
+ const auto s2 = static_cast<s32>(input[index + 2]);
+ const auto s3 = static_cast<s32>(input[index + 3]);
+
+ output[i] = (l0 * s0 + l1 * s1 + l2 * s2 + l3 * s3) >> 15;
+ fraction += pitch;
+ index += (fraction >> 15);
+ fraction &= 0x7fff;
+ }
+}
+
} // namespace AudioCore
diff --git a/src/audio_core/algorithm/interpolate.h b/src/audio_core/algorithm/interpolate.h
index ab1a31754..d534077af 100644
--- a/src/audio_core/algorithm/interpolate.h
+++ b/src/audio_core/algorithm/interpolate.h
@@ -38,4 +38,7 @@ inline std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16>
return Interpolate(state, std::move(input), ratio);
}
+/// Nintendo Switchs DSP resampling algorithm. Based on a single channel
+void Resample(s32* output, const s32* input, s32 pitch, s32& fraction, std::size_t sample_count);
+
} // namespace AudioCore
diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp
index d64452617..56dc892b1 100644
--- a/src/audio_core/audio_renderer.cpp
+++ b/src/audio_core/audio_renderer.cpp
@@ -2,95 +2,49 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <vector>
#include "audio_core/algorithm/interpolate.h"
#include "audio_core/audio_out.h"
#include "audio_core/audio_renderer.h"
#include "audio_core/codec.h"
#include "audio_core/common.h"
+#include "audio_core/info_updater.h"
+#include "audio_core/voice_context.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/writable_event.h"
#include "core/memory.h"
+#include "core/settings.h"
namespace AudioCore {
-
-constexpr u32 STREAM_SAMPLE_RATE{48000};
-constexpr u32 STREAM_NUM_CHANNELS{2};
-using VoiceChannelHolder = std::array<VoiceResourceInformation*, 6>;
-class AudioRenderer::VoiceState {
-public:
- bool IsPlaying() const {
- return is_in_use && info.play_state == PlayState::Started;
- }
-
- const VoiceOutStatus& GetOutStatus() const {
- return out_status;
- }
-
- const VoiceInfo& GetInfo() const {
- return info;
- }
-
- VoiceInfo& GetInfo() {
- return info;
- }
-
- void SetWaveIndex(std::size_t index);
- std::vector<s16> DequeueSamples(std::size_t sample_count, Core::Memory::Memory& memory,
- const VoiceChannelHolder& voice_resources);
- void UpdateState();
- void RefreshBuffer(Core::Memory::Memory& memory, const VoiceChannelHolder& voice_resources);
-
-private:
- bool is_in_use{};
- bool is_refresh_pending{};
- std::size_t wave_index{};
- std::size_t offset{};
- Codec::ADPCMState adpcm_state{};
- InterpolationState interp_state{};
- std::vector<s16> samples;
- VoiceOutStatus out_status{};
- VoiceInfo info{};
-};
-
-class AudioRenderer::EffectState {
-public:
- const EffectOutStatus& GetOutStatus() const {
- return out_status;
- }
-
- const EffectInStatus& GetInfo() const {
- return info;
- }
-
- EffectInStatus& GetInfo() {
- return info;
- }
-
- void UpdateState(Core::Memory::Memory& memory);
-
-private:
- EffectOutStatus out_status{};
- EffectInStatus info{};
-};
-
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
- AudioRendererParameter params,
+ AudioCommon::AudioRendererParameter params,
std::shared_ptr<Kernel::WritableEvent> buffer_event,
std::size_t instance_number)
- : worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count),
- voice_resources(params.voice_count), effects(params.effect_count), memory{memory_} {
+ : worker_params{params}, buffer_event{buffer_event},
+ memory_pool_info(params.effect_count + params.voice_count * 4),
+ voice_context(params.voice_count), effect_context(params.effect_count), mix_context(),
+ sink_context(params.sink_count), splitter_context(),
+ voices(params.voice_count), memory{memory_},
+ command_generator(worker_params, voice_context, mix_context, splitter_context, effect_context,
+ memory),
+ temp_mix_buffer(AudioCommon::TOTAL_TEMP_MIX_SIZE) {
behavior_info.SetUserRevision(params.revision);
+ splitter_context.Initialize(behavior_info, params.splitter_count,
+ params.num_splitter_send_channels);
+ mix_context.Initialize(behavior_info, params.submix_count + 1, params.effect_count);
audio_out = std::make_unique<AudioCore::AudioOut>();
- stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS,
- fmt::format("AudioRenderer-Instance{}", instance_number),
- [=]() { buffer_event->Signal(); });
+ stream =
+ audio_out->OpenStream(core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS,
+ fmt::format("AudioRenderer-Instance{}", instance_number),
+ [=]() { buffer_event->Signal(); });
audio_out->StartStream(stream);
QueueMixedBuffer(0);
QueueMixedBuffer(1);
QueueMixedBuffer(2);
+ QueueMixedBuffer(3);
}
AudioRenderer::~AudioRenderer() = default;
@@ -111,355 +65,200 @@ Stream::State AudioRenderer::GetStreamState() const {
return stream->GetState();
}
-ResultVal<std::vector<u8>> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params) {
- // Copy UpdateDataHeader struct
- UpdateDataHeader config{};
- std::memcpy(&config, input_params.data(), sizeof(UpdateDataHeader));
- u32 memory_pool_count = worker_params.effect_count + (worker_params.voice_count * 4);
-
- if (!behavior_info.UpdateInput(input_params, sizeof(UpdateDataHeader))) {
- LOG_ERROR(Audio, "Failed to update behavior info input parameters");
- return Audren::ERR_INVALID_PARAMETERS;
- }
-
- // Copy MemoryPoolInfo structs
- std::vector<MemoryPoolInfo> mem_pool_info(memory_pool_count);
- std::memcpy(mem_pool_info.data(),
- input_params.data() + sizeof(UpdateDataHeader) + config.behavior_size,
- memory_pool_count * sizeof(MemoryPoolInfo));
-
- // Copy voice resources
- const std::size_t voice_resource_offset{sizeof(UpdateDataHeader) + config.behavior_size +
- config.memory_pools_size};
- std::memcpy(voice_resources.data(), input_params.data() + voice_resource_offset,
- sizeof(VoiceResourceInformation) * voice_resources.size());
-
- // Copy VoiceInfo structs
- std::size_t voice_offset{sizeof(UpdateDataHeader) + config.behavior_size +
- config.memory_pools_size + config.voice_resource_size};
- for (auto& voice : voices) {
- std::memcpy(&voice.GetInfo(), input_params.data() + voice_offset, sizeof(VoiceInfo));
- voice_offset += sizeof(VoiceInfo);
- }
-
- std::size_t effect_offset{sizeof(UpdateDataHeader) + config.behavior_size +
- config.memory_pools_size + config.voice_resource_size +
- config.voices_size};
- for (auto& effect : effects) {
- std::memcpy(&effect.GetInfo(), input_params.data() + effect_offset, sizeof(EffectInStatus));
- effect_offset += sizeof(EffectInStatus);
- }
-
- // Update memory pool state
- std::vector<MemoryPoolEntry> memory_pool(memory_pool_count);
- for (std::size_t index = 0; index < memory_pool.size(); ++index) {
- if (mem_pool_info[index].pool_state == MemoryPoolStates::RequestAttach) {
- memory_pool[index].state = MemoryPoolStates::Attached;
- } else if (mem_pool_info[index].pool_state == MemoryPoolStates::RequestDetach) {
- memory_pool[index].state = MemoryPoolStates::Detached;
- }
- }
-
- // Update voices
- for (auto& voice : voices) {
- voice.UpdateState();
- if (!voice.GetInfo().is_in_use) {
- continue;
- }
- if (voice.GetInfo().is_new) {
- voice.SetWaveIndex(voice.GetInfo().wave_buffer_head);
- }
- }
-
- for (auto& effect : effects) {
- effect.UpdateState(memory);
- }
-
- // Release previous buffers and queue next ones for playback
- ReleaseAndQueueBuffers();
-
- // Copy output header
- UpdateDataHeader response_data{worker_params};
- if (behavior_info.IsElapsedFrameCountSupported()) {
- response_data.render_info = sizeof(RendererInfo);
- response_data.total_size += sizeof(RendererInfo);
- }
-
- std::vector<u8> output_params(response_data.total_size);
- std::memcpy(output_params.data(), &response_data, sizeof(UpdateDataHeader));
-
- // Copy output memory pool entries
- std::memcpy(output_params.data() + sizeof(UpdateDataHeader), memory_pool.data(),
- response_data.memory_pools_size);
-
- // Copy output voice status
- std::size_t voice_out_status_offset{sizeof(UpdateDataHeader) + response_data.memory_pools_size};
- for (const auto& voice : voices) {
- std::memcpy(output_params.data() + voice_out_status_offset, &voice.GetOutStatus(),
- sizeof(VoiceOutStatus));
- voice_out_status_offset += sizeof(VoiceOutStatus);
- }
+static constexpr s16 ClampToS16(s32 value) {
+ return static_cast<s16>(std::clamp(value, -32768, 32767));
+}
- std::size_t effect_out_status_offset{
- sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size +
- response_data.voice_resource_size};
- for (const auto& effect : effects) {
- std::memcpy(output_params.data() + effect_out_status_offset, &effect.GetOutStatus(),
- sizeof(EffectOutStatus));
- effect_out_status_offset += sizeof(EffectOutStatus);
- }
+ResultCode AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
+ std::vector<u8>& output_params) {
- // Update behavior info output
- const std::size_t behavior_out_status_offset{
- sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size +
- response_data.effects_size + response_data.sinks_size +
- response_data.performance_manager_size};
+ InfoUpdater info_updater{input_params, output_params, behavior_info};
- if (!behavior_info.UpdateOutput(output_params, behavior_out_status_offset)) {
- LOG_ERROR(Audio, "Failed to update behavior info output parameters");
- return Audren::ERR_INVALID_PARAMETERS;
+ if (!info_updater.UpdateBehaviorInfo(behavior_info)) {
+ LOG_ERROR(Audio, "Failed to update behavior info input parameters");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- if (behavior_info.IsElapsedFrameCountSupported()) {
- const std::size_t renderer_info_offset{
- sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size +
- response_data.effects_size + response_data.sinks_size +
- response_data.performance_manager_size + response_data.behavior_size};
- RendererInfo renderer_info{};
- renderer_info.elasped_frame_count = elapsed_frame_count;
- std::memcpy(output_params.data() + renderer_info_offset, &renderer_info,
- sizeof(RendererInfo));
+ if (!info_updater.UpdateMemoryPools(memory_pool_info)) {
+ LOG_ERROR(Audio, "Failed to update memory pool parameters");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- return MakeResult(output_params);
-}
-
-void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) {
- wave_index = index & 3;
- is_refresh_pending = true;
-}
-
-std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(
- std::size_t sample_count, Core::Memory::Memory& memory,
- const VoiceChannelHolder& voice_resources) {
- if (!IsPlaying()) {
- return {};
+ if (!info_updater.UpdateVoiceChannelResources(voice_context)) {
+ LOG_ERROR(Audio, "Failed to update voice channel resource parameters");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- if (is_refresh_pending) {
- RefreshBuffer(memory, voice_resources);
+ if (!info_updater.UpdateVoices(voice_context, memory_pool_info, 0)) {
+ LOG_ERROR(Audio, "Failed to update voice parameters");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- const std::size_t max_size{samples.size() - offset};
- const std::size_t dequeue_offset{offset};
- std::size_t size{sample_count * STREAM_NUM_CHANNELS};
- if (size > max_size) {
- size = max_size;
+ // TODO(ogniK): Deal with stopped audio renderer but updates still taking place
+ if (!info_updater.UpdateEffects(effect_context, true)) {
+ LOG_ERROR(Audio, "Failed to update effect parameters");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- out_status.played_sample_count += size / STREAM_NUM_CHANNELS;
- offset += size;
-
- const auto& wave_buffer{info.wave_buffer[wave_index]};
- if (offset == samples.size()) {
- offset = 0;
-
- if (!wave_buffer.is_looping && wave_buffer.buffer_sz) {
- SetWaveIndex(wave_index + 1);
- }
-
- if (wave_buffer.buffer_sz) {
- out_status.wave_buffer_consumed++;
- }
-
- if (wave_buffer.end_of_stream || wave_buffer.buffer_sz == 0) {
- info.play_state = PlayState::Paused;
+ if (behavior_info.IsSplitterSupported()) {
+ if (!info_updater.UpdateSplitterInfo(splitter_context)) {
+ LOG_ERROR(Audio, "Failed to update splitter parameters");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
- return {samples.begin() + dequeue_offset, samples.begin() + dequeue_offset + size};
-}
+ auto mix_result = info_updater.UpdateMixes(mix_context, worker_params.mix_buffer_count,
+ splitter_context, effect_context);
-void AudioRenderer::VoiceState::UpdateState() {
- if (is_in_use && !info.is_in_use) {
- // No longer in use, reset state
- is_refresh_pending = true;
- wave_index = 0;
- offset = 0;
- out_status = {};
+ if (mix_result.IsError()) {
+ LOG_ERROR(Audio, "Failed to update mix parameters");
+ return mix_result;
}
- is_in_use = info.is_in_use;
-}
-void AudioRenderer::VoiceState::RefreshBuffer(Core::Memory::Memory& memory,
- const VoiceChannelHolder& voice_resources) {
- const auto wave_buffer_address = info.wave_buffer[wave_index].buffer_addr;
- const auto wave_buffer_size = info.wave_buffer[wave_index].buffer_sz;
- std::vector<s16> new_samples(wave_buffer_size / sizeof(s16));
- memory.ReadBlock(wave_buffer_address, new_samples.data(), wave_buffer_size);
-
- switch (static_cast<Codec::PcmFormat>(info.sample_format)) {
- case Codec::PcmFormat::Int16: {
- // PCM16 is played as-is
- break;
- }
- case Codec::PcmFormat::Adpcm: {
- // Decode ADPCM to PCM16
- Codec::ADPCM_Coeff coeffs;
- memory.ReadBlock(info.additional_params_addr, coeffs.data(), sizeof(Codec::ADPCM_Coeff));
- new_samples = Codec::DecodeADPCM(reinterpret_cast<u8*>(new_samples.data()),
- new_samples.size() * sizeof(s16), coeffs, adpcm_state);
- break;
+ // TODO(ogniK): Sinks
+ if (!info_updater.UpdateSinks(sink_context)) {
+ LOG_ERROR(Audio, "Failed to update sink parameters");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- default:
- UNIMPLEMENTED_MSG("Unimplemented sample_format={}", info.sample_format);
- break;
- }
-
- switch (info.channel_count) {
- case 1: {
- // 1 channel is upsampled to 2 channel
- samples.resize(new_samples.size() * 2);
- for (std::size_t index = 0; index < new_samples.size(); ++index) {
- auto sample = static_cast<float>(new_samples[index]);
- if (voice_resources[0]->in_use) {
- sample *= voice_resources[0]->mix_volumes[0];
- }
-
- samples[index * 2] = static_cast<s16>(sample * info.volume);
- samples[index * 2 + 1] = static_cast<s16>(sample * info.volume);
- }
- break;
+ // TODO(ogniK): Performance buffer
+ if (!info_updater.UpdatePerformanceBuffer()) {
+ LOG_ERROR(Audio, "Failed to update performance buffer parameters");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- case 2: {
- // 2 channel is played as is
- samples = std::move(new_samples);
- const std::size_t sample_count = (samples.size() / 2);
- for (std::size_t index = 0; index < sample_count; ++index) {
- const std::size_t index_l = index * 2;
- const std::size_t index_r = index * 2 + 1;
-
- auto sample_l = static_cast<float>(samples[index_l]);
- auto sample_r = static_cast<float>(samples[index_r]);
-
- if (voice_resources[0]->in_use) {
- sample_l *= voice_resources[0]->mix_volumes[0];
- }
-
- if (voice_resources[1]->in_use) {
- sample_r *= voice_resources[1]->mix_volumes[1];
- }
- samples[index_l] = static_cast<s16>(sample_l * info.volume);
- samples[index_r] = static_cast<s16>(sample_r * info.volume);
- }
- break;
+ if (!info_updater.UpdateErrorInfo(behavior_info)) {
+ LOG_ERROR(Audio, "Failed to update error info");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- case 6: {
- samples.resize((new_samples.size() / 6) * 2);
- const std::size_t sample_count = samples.size() / 2;
-
- for (std::size_t index = 0; index < sample_count; ++index) {
- auto FL = static_cast<float>(new_samples[index * 6]);
- auto FR = static_cast<float>(new_samples[index * 6 + 1]);
- auto FC = static_cast<float>(new_samples[index * 6 + 2]);
- auto BL = static_cast<float>(new_samples[index * 6 + 4]);
- auto BR = static_cast<float>(new_samples[index * 6 + 5]);
-
- if (voice_resources[0]->in_use) {
- FL *= voice_resources[0]->mix_volumes[0];
- }
- if (voice_resources[1]->in_use) {
- FR *= voice_resources[1]->mix_volumes[1];
- }
- if (voice_resources[2]->in_use) {
- FC *= voice_resources[2]->mix_volumes[2];
- }
- if (voice_resources[4]->in_use) {
- BL *= voice_resources[4]->mix_volumes[4];
- }
- if (voice_resources[5]->in_use) {
- BR *= voice_resources[5]->mix_volumes[5];
- }
- samples[index * 2] =
- static_cast<s16>((0.3694f * FL + 0.2612f * FC + 0.3694f * BL) * info.volume);
- samples[index * 2 + 1] =
- static_cast<s16>((0.3694f * FR + 0.2612f * FC + 0.3694f * BR) * info.volume);
+ if (behavior_info.IsElapsedFrameCountSupported()) {
+ if (!info_updater.UpdateRendererInfo(elapsed_frame_count)) {
+ LOG_ERROR(Audio, "Failed to update renderer info");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- break;
- }
- default:
- UNIMPLEMENTED_MSG("Unimplemented channel_count={}", info.channel_count);
- break;
}
+ // TODO(ogniK): Statistics
- // Only interpolate when necessary, expensive.
- if (GetInfo().sample_rate != STREAM_SAMPLE_RATE) {
- samples = Interpolate(interp_state, std::move(samples), GetInfo().sample_rate,
- STREAM_SAMPLE_RATE);
+ if (!info_updater.WriteOutputHeader()) {
+ LOG_ERROR(Audio, "Failed to write output header");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
- is_refresh_pending = false;
-}
+ // TODO(ogniK): Check when all sections are implemented
-void AudioRenderer::EffectState::UpdateState(Core::Memory::Memory& memory) {
- if (info.is_new) {
- out_status.state = EffectStatus::New;
- } else {
- if (info.type == Effect::Aux) {
- ASSERT_MSG(memory.Read32(info.aux_info.return_buffer_info) == 0,
- "Aux buffers tried to update");
- ASSERT_MSG(memory.Read32(info.aux_info.send_buffer_info) == 0,
- "Aux buffers tried to update");
- ASSERT_MSG(memory.Read32(info.aux_info.return_buffer_base) == 0,
- "Aux buffers tried to update");
- ASSERT_MSG(memory.Read32(info.aux_info.send_buffer_base) == 0,
- "Aux buffers tried to update");
- }
+ if (!info_updater.CheckConsumedSize()) {
+ LOG_ERROR(Audio, "Audio buffers were not consumed!");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
-}
-static constexpr s16 ClampToS16(s32 value) {
- return static_cast<s16>(std::clamp(value, -32768, 32767));
+ ReleaseAndQueueBuffers();
+
+ return RESULT_SUCCESS;
}
void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
- constexpr std::size_t BUFFER_SIZE{512};
+ command_generator.PreCommand();
+ // Clear mix buffers before our next operation
+ command_generator.ClearMixBuffers();
+
+ // If the splitter is not in use, sort our mixes
+ if (!splitter_context.UsingSplitter()) {
+ mix_context.SortInfo();
+ }
+ // Sort our voices
+ voice_context.SortInfo();
+
+ // Handle samples
+ command_generator.GenerateVoiceCommands();
+ command_generator.GenerateSubMixCommands();
+ command_generator.GenerateFinalMixCommands();
+
+ command_generator.PostCommand();
+ // Base sample size
+ std::size_t BUFFER_SIZE{worker_params.sample_count};
+ // Samples
std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels());
-
- for (auto& voice : voices) {
- if (!voice.IsPlaying()) {
- continue;
- }
- VoiceChannelHolder resources{};
- for (u32 channel = 0; channel < voice.GetInfo().channel_count; channel++) {
- const auto channel_resource_id = voice.GetInfo().voice_channel_resource_ids[channel];
- resources[channel] = &voice_resources[channel_resource_id];
+ // Make sure to clear our samples
+ std::memset(buffer.data(), 0, buffer.size() * sizeof(s16));
+
+ if (sink_context.InUse()) {
+ const auto stream_channel_count = stream->GetNumChannels();
+ const auto buffer_offsets = sink_context.OutputBuffers();
+ const auto channel_count = buffer_offsets.size();
+ const auto& final_mix = mix_context.GetFinalMixInfo();
+ const auto& in_params = final_mix.GetInParams();
+ std::vector<s32*> mix_buffers(channel_count);
+ for (std::size_t i = 0; i < channel_count; i++) {
+ mix_buffers[i] =
+ command_generator.GetMixBuffer(in_params.buffer_offset + buffer_offsets[i]);
}
- std::size_t offset{};
- s64 samples_remaining{BUFFER_SIZE};
- while (samples_remaining > 0) {
- const std::vector<s16> samples{
- voice.DequeueSamples(samples_remaining, memory, resources)};
-
- if (samples.empty()) {
- break;
- }
-
- samples_remaining -= samples.size() / stream->GetNumChannels();
-
- for (const auto& sample : samples) {
- const s32 buffer_sample{buffer[offset]};
- buffer[offset++] =
- ClampToS16(buffer_sample + static_cast<s32>(sample * voice.GetInfo().volume));
+ for (std::size_t i = 0; i < BUFFER_SIZE; i++) {
+ if (channel_count == 1) {
+ const auto sample = ClampToS16(mix_buffers[0][i]);
+ buffer[i * stream_channel_count + 0] = sample;
+ if (stream_channel_count > 1) {
+ buffer[i * stream_channel_count + 1] = sample;
+ }
+ if (stream_channel_count == 6) {
+ buffer[i * stream_channel_count + 2] = sample;
+ buffer[i * stream_channel_count + 4] = sample;
+ buffer[i * stream_channel_count + 5] = sample;
+ }
+ } else if (channel_count == 2) {
+ const auto l_sample = ClampToS16(mix_buffers[0][i]);
+ const auto r_sample = ClampToS16(mix_buffers[1][i]);
+ if (stream_channel_count == 1) {
+ buffer[i * stream_channel_count + 0] = l_sample;
+ } else if (stream_channel_count == 2) {
+ buffer[i * stream_channel_count + 0] = l_sample;
+ buffer[i * stream_channel_count + 1] = r_sample;
+ } else if (stream_channel_count == 6) {
+ buffer[i * stream_channel_count + 0] = l_sample;
+ buffer[i * stream_channel_count + 1] = r_sample;
+
+ buffer[i * stream_channel_count + 2] =
+ ClampToS16((static_cast<s32>(l_sample) + static_cast<s32>(r_sample)) / 2);
+
+ buffer[i * stream_channel_count + 4] = l_sample;
+ buffer[i * stream_channel_count + 5] = r_sample;
+ }
+
+ } else if (channel_count == 6) {
+ const auto fl_sample = ClampToS16(mix_buffers[0][i]);
+ const auto fr_sample = ClampToS16(mix_buffers[1][i]);
+ const auto fc_sample = ClampToS16(mix_buffers[2][i]);
+ const auto lf_sample = ClampToS16(mix_buffers[3][i]);
+ const auto bl_sample = ClampToS16(mix_buffers[4][i]);
+ const auto br_sample = ClampToS16(mix_buffers[5][i]);
+
+ if (stream_channel_count == 1) {
+ buffer[i * stream_channel_count + 0] = fc_sample;
+ } else if (stream_channel_count == 2) {
+ buffer[i * stream_channel_count + 0] =
+ static_cast<s16>(0.3694f * static_cast<float>(fl_sample) +
+ 0.2612f * static_cast<float>(fc_sample) +
+ 0.3694f * static_cast<float>(bl_sample));
+ buffer[i * stream_channel_count + 1] =
+ static_cast<s16>(0.3694f * static_cast<float>(fr_sample) +
+ 0.2612f * static_cast<float>(fc_sample) +
+ 0.3694f * static_cast<float>(br_sample));
+ } else if (stream_channel_count == 6) {
+ buffer[i * stream_channel_count + 0] = fl_sample;
+ buffer[i * stream_channel_count + 1] = fr_sample;
+ buffer[i * stream_channel_count + 2] = fc_sample;
+ buffer[i * stream_channel_count + 3] = lf_sample;
+ buffer[i * stream_channel_count + 4] = bl_sample;
+ buffer[i * stream_channel_count + 5] = br_sample;
+ }
}
}
}
+
audio_out->QueueBuffer(stream, tag, std::move(buffer));
elapsed_frame_count++;
+ voice_context.UpdateStateByDspShared();
}
void AudioRenderer::ReleaseAndQueueBuffers() {
diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h
index f0b691a86..2bca795ba 100644
--- a/src/audio_core/audio_renderer.h
+++ b/src/audio_core/audio_renderer.h
@@ -9,8 +9,15 @@
#include <vector>
#include "audio_core/behavior_info.h"
+#include "audio_core/command_generator.h"
#include "audio_core/common.h"
+#include "audio_core/effect_context.h"
+#include "audio_core/memory_pool.h"
+#include "audio_core/mix_context.h"
+#include "audio_core/sink_context.h"
+#include "audio_core/splitter_context.h"
#include "audio_core/stream.h"
+#include "audio_core/voice_context.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
@@ -30,220 +37,25 @@ class Memory;
}
namespace AudioCore {
+using DSPStateHolder = std::array<VoiceState*, 6>;
class AudioOut;
-enum class PlayState : u8 {
- Started = 0,
- Stopped = 1,
- Paused = 2,
-};
-
-enum class Effect : u8 {
- None = 0,
- Aux = 2,
-};
-
-enum class EffectStatus : u8 {
- None = 0,
- New = 1,
-};
-
-struct AudioRendererParameter {
- u32_le sample_rate;
- u32_le sample_count;
- u32_le mix_buffer_count;
- u32_le submix_count;
- u32_le voice_count;
- u32_le sink_count;
- u32_le effect_count;
- u32_le performance_frame_count;
- u8 is_voice_drop_enabled;
- u8 unknown_21;
- u8 unknown_22;
- u8 execution_mode;
- u32_le splitter_count;
- u32_le num_splitter_send_channels;
- u32_le unknown_30;
- u32_le revision;
-};
-static_assert(sizeof(AudioRendererParameter) == 52, "AudioRendererParameter is an invalid size");
-
-enum class MemoryPoolStates : u32 { // Should be LE
- Invalid = 0x0,
- Unknown = 0x1,
- RequestDetach = 0x2,
- Detached = 0x3,
- RequestAttach = 0x4,
- Attached = 0x5,
- Released = 0x6,
-};
-
-struct MemoryPoolEntry {
- MemoryPoolStates state;
- u32_le unknown_4;
- u32_le unknown_8;
- u32_le unknown_c;
-};
-static_assert(sizeof(MemoryPoolEntry) == 0x10, "MemoryPoolEntry has wrong size");
-
-struct MemoryPoolInfo {
- u64_le pool_address;
- u64_le pool_size;
- MemoryPoolStates pool_state;
- INSERT_PADDING_WORDS(3); // Unknown
-};
-static_assert(sizeof(MemoryPoolInfo) == 0x20, "MemoryPoolInfo has wrong size");
-struct BiquadFilter {
- u8 enable;
- INSERT_PADDING_BYTES(1);
- std::array<s16_le, 3> numerator;
- std::array<s16_le, 2> denominator;
-};
-static_assert(sizeof(BiquadFilter) == 0xc, "BiquadFilter has wrong size");
-
-struct WaveBuffer {
- u64_le buffer_addr;
- u64_le buffer_sz;
- s32_le start_sample_offset;
- s32_le end_sample_offset;
- u8 is_looping;
- u8 end_of_stream;
- u8 sent_to_server;
- INSERT_PADDING_BYTES(5);
- u64 context_addr;
- u64 context_sz;
- INSERT_PADDING_BYTES(8);
-};
-static_assert(sizeof(WaveBuffer) == 0x38, "WaveBuffer has wrong size");
-
-struct VoiceResourceInformation {
- s32_le id{};
- std::array<float_le, MAX_MIX_BUFFERS> mix_volumes{};
- bool in_use{};
- INSERT_PADDING_BYTES(11);
-};
-static_assert(sizeof(VoiceResourceInformation) == 0x70, "VoiceResourceInformation has wrong size");
-
-struct VoiceInfo {
- u32_le id;
- u32_le node_id;
- u8 is_new;
- u8 is_in_use;
- PlayState play_state;
- u8 sample_format;
- u32_le sample_rate;
- u32_le priority;
- u32_le sorting_order;
- u32_le channel_count;
- float_le pitch;
- float_le volume;
- std::array<BiquadFilter, 2> biquad_filter;
- u32_le wave_buffer_count;
- u32_le wave_buffer_head;
- INSERT_PADDING_WORDS(1);
- u64_le additional_params_addr;
- u64_le additional_params_sz;
- u32_le mix_id;
- u32_le splitter_info_id;
- std::array<WaveBuffer, 4> wave_buffer;
- std::array<u32_le, 6> voice_channel_resource_ids;
- INSERT_PADDING_BYTES(24);
-};
-static_assert(sizeof(VoiceInfo) == 0x170, "VoiceInfo is wrong size");
-
-struct VoiceOutStatus {
- u64_le played_sample_count;
- u32_le wave_buffer_consumed;
- u32_le voice_drops_count;
-};
-static_assert(sizeof(VoiceOutStatus) == 0x10, "VoiceOutStatus has wrong size");
-
-struct AuxInfo {
- std::array<u8, 24> input_mix_buffers;
- std::array<u8, 24> output_mix_buffers;
- u32_le mix_buffer_count;
- u32_le sample_rate; // Stored in the aux buffer currently
- u32_le sample_count;
- u64_le send_buffer_info;
- u64_le send_buffer_base;
-
- u64_le return_buffer_info;
- u64_le return_buffer_base;
-};
-static_assert(sizeof(AuxInfo) == 0x60, "AuxInfo is an invalid size");
-
-struct EffectInStatus {
- Effect type;
- u8 is_new;
- u8 is_enabled;
- INSERT_PADDING_BYTES(1);
- u32_le mix_id;
- u64_le buffer_base;
- u64_le buffer_sz;
- s32_le priority;
- INSERT_PADDING_BYTES(4);
- union {
- std::array<u8, 0xa0> raw;
- AuxInfo aux_info;
- };
-};
-static_assert(sizeof(EffectInStatus) == 0xc0, "EffectInStatus is an invalid size");
-
-struct EffectOutStatus {
- EffectStatus state;
- INSERT_PADDING_BYTES(0xf);
-};
-static_assert(sizeof(EffectOutStatus) == 0x10, "EffectOutStatus is an invalid size");
-
struct RendererInfo {
u64_le elasped_frame_count{};
INSERT_PADDING_WORDS(2);
};
static_assert(sizeof(RendererInfo) == 0x10, "RendererInfo is an invalid size");
-struct UpdateDataHeader {
- UpdateDataHeader() {}
-
- explicit UpdateDataHeader(const AudioRendererParameter& config) {
- revision = Common::MakeMagic('R', 'E', 'V', '8'); // 9.2.0 Revision
- behavior_size = 0xb0;
- memory_pools_size = (config.effect_count + (config.voice_count * 4)) * 0x10;
- voices_size = config.voice_count * 0x10;
- voice_resource_size = 0x0;
- effects_size = config.effect_count * 0x10;
- mixes_size = 0x0;
- sinks_size = config.sink_count * 0x20;
- performance_manager_size = 0x10;
- render_info = 0;
- total_size = sizeof(UpdateDataHeader) + behavior_size + memory_pools_size + voices_size +
- effects_size + sinks_size + performance_manager_size;
- }
-
- u32_le revision{};
- u32_le behavior_size{};
- u32_le memory_pools_size{};
- u32_le voices_size{};
- u32_le voice_resource_size{};
- u32_le effects_size{};
- u32_le mixes_size{};
- u32_le sinks_size{};
- u32_le performance_manager_size{};
- u32_le splitter_size{};
- u32_le render_info{};
- INSERT_PADDING_WORDS(4);
- u32_le total_size{};
-};
-static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size");
-
class AudioRenderer {
public:
AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
- AudioRendererParameter params,
+ AudioCommon::AudioRendererParameter params,
std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number);
~AudioRenderer();
- ResultVal<std::vector<u8>> UpdateAudioRenderer(const std::vector<u8>& input_params);
+ ResultCode UpdateAudioRenderer(const std::vector<u8>& input_params,
+ std::vector<u8>& output_params);
void QueueMixedBuffer(Buffer::Tag tag);
void ReleaseAndQueueBuffers();
u32 GetSampleRate() const;
@@ -252,19 +64,23 @@ public:
Stream::State GetStreamState() const;
private:
- class EffectState;
- class VoiceState;
BehaviorInfo behavior_info{};
- AudioRendererParameter worker_params;
+ AudioCommon::AudioRendererParameter worker_params;
std::shared_ptr<Kernel::WritableEvent> buffer_event;
+ std::vector<ServerMemoryPoolInfo> memory_pool_info;
+ VoiceContext voice_context;
+ EffectContext effect_context;
+ MixContext mix_context;
+ SinkContext sink_context;
+ SplitterContext splitter_context;
std::vector<VoiceState> voices;
- std::vector<VoiceResourceInformation> voice_resources;
- std::vector<EffectState> effects;
std::unique_ptr<AudioOut> audio_out;
StreamPtr stream;
Core::Memory::Memory& memory;
+ CommandGenerator command_generator;
std::size_t elapsed_frame_count{};
+ std::vector<s32> temp_mix_buffer{};
};
} // namespace AudioCore
diff --git a/src/audio_core/behavior_info.cpp b/src/audio_core/behavior_info.cpp
index 94b7a3bf1..5d62adb0b 100644
--- a/src/audio_core/behavior_info.cpp
+++ b/src/audio_core/behavior_info.cpp
@@ -9,39 +9,11 @@
namespace AudioCore {
-BehaviorInfo::BehaviorInfo() : process_revision(CURRENT_PROCESS_REVISION) {}
+BehaviorInfo::BehaviorInfo() : process_revision(AudioCommon::CURRENT_PROCESS_REVISION) {}
BehaviorInfo::~BehaviorInfo() = default;
-bool BehaviorInfo::UpdateInput(const std::vector<u8>& buffer, std::size_t offset) {
- if (!CanConsumeBuffer(buffer.size(), offset, sizeof(InParams))) {
- LOG_ERROR(Audio, "Buffer is an invalid size!");
- return false;
- }
- InParams params{};
- std::memcpy(&params, buffer.data() + offset, sizeof(InParams));
-
- if (!IsValidRevision(params.revision)) {
- LOG_ERROR(Audio, "Invalid input revision, revision=0x{:08X}", params.revision);
- return false;
- }
-
- if (user_revision != params.revision) {
- LOG_ERROR(Audio,
- "User revision differs from input revision, expecting 0x{:08X} but got 0x{:08X}",
- user_revision, params.revision);
- return false;
- }
-
- ClearError();
- UpdateFlags(params.flags);
-
- // TODO(ogniK): Check input params size when InfoUpdater is used
-
- return true;
-}
-
bool BehaviorInfo::UpdateOutput(std::vector<u8>& buffer, std::size_t offset) {
- if (!CanConsumeBuffer(buffer.size(), offset, sizeof(OutParams))) {
+ if (!AudioCommon::CanConsumeBuffer(buffer.size(), offset, sizeof(OutParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
@@ -65,36 +37,69 @@ void BehaviorInfo::SetUserRevision(u32_le revision) {
user_revision = revision;
}
+u32_le BehaviorInfo::GetUserRevision() const {
+ return user_revision;
+}
+
+u32_le BehaviorInfo::GetProcessRevision() const {
+ return process_revision;
+}
+
bool BehaviorInfo::IsAdpcmLoopContextBugFixed() const {
- return IsRevisionSupported(2, user_revision);
+ return AudioCommon::IsRevisionSupported(2, user_revision);
}
bool BehaviorInfo::IsSplitterSupported() const {
- return IsRevisionSupported(2, user_revision);
+ return AudioCommon::IsRevisionSupported(2, user_revision);
}
bool BehaviorInfo::IsLongSizePreDelaySupported() const {
- return IsRevisionSupported(3, user_revision);
+ return AudioCommon::IsRevisionSupported(3, user_revision);
}
bool BehaviorInfo::IsAudioRenererProcessingTimeLimit80PercentSupported() const {
- return IsRevisionSupported(5, user_revision);
+ return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsAudioRenererProcessingTimeLimit75PercentSupported() const {
- return IsRevisionSupported(4, user_revision);
+ return AudioCommon::IsRevisionSupported(4, user_revision);
}
bool BehaviorInfo::IsAudioRenererProcessingTimeLimit70PercentSupported() const {
- return IsRevisionSupported(1, user_revision);
+ return AudioCommon::IsRevisionSupported(1, user_revision);
}
bool BehaviorInfo::IsElapsedFrameCountSupported() const {
- return IsRevisionSupported(5, user_revision);
+ return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsMemoryPoolForceMappingEnabled() const {
return (flags & 1) != 0;
}
+bool BehaviorInfo::IsFlushVoiceWaveBuffersSupported() const {
+ return AudioCommon::IsRevisionSupported(5, user_revision);
+}
+
+bool BehaviorInfo::IsVoicePlayedSampleCountResetAtLoopPointSupported() const {
+ return AudioCommon::IsRevisionSupported(5, user_revision);
+}
+
+bool BehaviorInfo::IsVoicePitchAndSrcSkippedSupported() const {
+ return AudioCommon::IsRevisionSupported(5, user_revision);
+}
+
+bool BehaviorInfo::IsMixInParameterDirtyOnlyUpdateSupported() const {
+ return AudioCommon::IsRevisionSupported(7, user_revision);
+}
+
+bool BehaviorInfo::IsSplitterBugFixed() const {
+ return AudioCommon::IsRevisionSupported(5, user_revision);
+}
+
+void BehaviorInfo::CopyErrorInfo(BehaviorInfo::OutParams& dst) {
+ dst.error_count = static_cast<u32>(error_count);
+ std::copy(errors.begin(), errors.begin() + error_count, dst.errors.begin());
+}
+
} // namespace AudioCore
diff --git a/src/audio_core/behavior_info.h b/src/audio_core/behavior_info.h
index c5e91ab39..50948e8df 100644
--- a/src/audio_core/behavior_info.h
+++ b/src/audio_core/behavior_info.h
@@ -14,15 +14,37 @@
namespace AudioCore {
class BehaviorInfo {
public:
+ struct ErrorInfo {
+ u32_le result{};
+ INSERT_PADDING_WORDS(1);
+ u64_le result_info{};
+ };
+ static_assert(sizeof(ErrorInfo) == 0x10, "ErrorInfo is an invalid size");
+
+ struct InParams {
+ u32_le revision{};
+ u32_le padding{};
+ u64_le flags{};
+ };
+ static_assert(sizeof(InParams) == 0x10, "InParams is an invalid size");
+
+ struct OutParams {
+ std::array<ErrorInfo, 10> errors{};
+ u32_le error_count{};
+ INSERT_PADDING_BYTES(12);
+ };
+ static_assert(sizeof(OutParams) == 0xb0, "OutParams is an invalid size");
+
explicit BehaviorInfo();
~BehaviorInfo();
- bool UpdateInput(const std::vector<u8>& buffer, std::size_t offset);
bool UpdateOutput(std::vector<u8>& buffer, std::size_t offset);
void ClearError();
void UpdateFlags(u64_le dest_flags);
void SetUserRevision(u32_le revision);
+ u32_le GetUserRevision() const;
+ u32_le GetProcessRevision() const;
bool IsAdpcmLoopContextBugFixed() const;
bool IsSplitterSupported() const;
@@ -32,35 +54,19 @@ public:
bool IsAudioRenererProcessingTimeLimit70PercentSupported() const;
bool IsElapsedFrameCountSupported() const;
bool IsMemoryPoolForceMappingEnabled() const;
+ bool IsFlushVoiceWaveBuffersSupported() const;
+ bool IsVoicePlayedSampleCountResetAtLoopPointSupported() const;
+ bool IsVoicePitchAndSrcSkippedSupported() const;
+ bool IsMixInParameterDirtyOnlyUpdateSupported() const;
+ bool IsSplitterBugFixed() const;
+ void CopyErrorInfo(OutParams& dst);
private:
u32_le process_revision{};
u32_le user_revision{};
u64_le flags{};
-
- struct ErrorInfo {
- u32_le result{};
- INSERT_PADDING_WORDS(1);
- u64_le result_info{};
- };
- static_assert(sizeof(ErrorInfo) == 0x10, "ErrorInfo is an invalid size");
-
std::array<ErrorInfo, 10> errors{};
std::size_t error_count{};
-
- struct InParams {
- u32_le revision{};
- u32_le padding{};
- u64_le flags{};
- };
- static_assert(sizeof(InParams) == 0x10, "InParams is an invalid size");
-
- struct OutParams {
- std::array<ErrorInfo, 10> errors{};
- u32_le error_count{};
- INSERT_PADDING_BYTES(12);
- };
- static_assert(sizeof(OutParams) == 0xb0, "OutParams is an invalid size");
};
} // namespace AudioCore
diff --git a/src/audio_core/command_generator.cpp b/src/audio_core/command_generator.cpp
new file mode 100644
index 000000000..84782cde6
--- /dev/null
+++ b/src/audio_core/command_generator.cpp
@@ -0,0 +1,976 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "audio_core/algorithm/interpolate.h"
+#include "audio_core/command_generator.h"
+#include "audio_core/effect_context.h"
+#include "audio_core/mix_context.h"
+#include "audio_core/voice_context.h"
+#include "core/memory.h"
+
+namespace AudioCore {
+namespace {
+constexpr std::size_t MIX_BUFFER_SIZE = 0x3f00;
+constexpr std::size_t SCALED_MIX_BUFFER_SIZE = MIX_BUFFER_SIZE << 15ULL;
+
+template <std::size_t N>
+void ApplyMix(s32* output, const s32* input, s32 gain, s32 sample_count) {
+ for (std::size_t i = 0; i < static_cast<std::size_t>(sample_count); i += N) {
+ for (std::size_t j = 0; j < N; j++) {
+ output[i + j] +=
+ static_cast<s32>((static_cast<s64>(input[i + j]) * gain + 0x4000) >> 15);
+ }
+ }
+}
+
+s32 ApplyMixRamp(s32* output, const s32* input, float gain, float delta, s32 sample_count) {
+ s32 x = 0;
+ for (s32 i = 0; i < sample_count; i++) {
+ x = static_cast<s32>(static_cast<float>(input[i]) * gain);
+ output[i] += x;
+ gain += delta;
+ }
+ return x;
+}
+
+void ApplyGain(s32* output, const s32* input, s32 gain, s32 delta, s32 sample_count) {
+ for (s32 i = 0; i < sample_count; i++) {
+ output[i] = static_cast<s32>((static_cast<s64>(input[i]) * gain + 0x4000) >> 15);
+ gain += delta;
+ }
+}
+
+void ApplyGainWithoutDelta(s32* output, const s32* input, s32 gain, s32 sample_count) {
+ for (s32 i = 0; i < sample_count; i++) {
+ output[i] = static_cast<s32>((static_cast<s64>(input[i]) * gain + 0x4000) >> 15);
+ }
+}
+
+s32 ApplyMixDepop(s32* output, s32 first_sample, s32 delta, s32 sample_count) {
+ const bool positive = first_sample > 0;
+ auto final_sample = std::abs(first_sample);
+ for (s32 i = 0; i < sample_count; i++) {
+ final_sample = static_cast<s32>((static_cast<s64>(final_sample) * delta) >> 15);
+ if (positive) {
+ output[i] += final_sample;
+ } else {
+ output[i] -= final_sample;
+ }
+ }
+ if (positive) {
+ return final_sample;
+ } else {
+ return -final_sample;
+ }
+}
+
+} // namespace
+
+CommandGenerator::CommandGenerator(AudioCommon::AudioRendererParameter& worker_params,
+ VoiceContext& voice_context, MixContext& mix_context,
+ SplitterContext& splitter_context, EffectContext& effect_context,
+ Core::Memory::Memory& memory)
+ : worker_params(worker_params), voice_context(voice_context), mix_context(mix_context),
+ splitter_context(splitter_context), effect_context(effect_context), memory(memory),
+ mix_buffer((worker_params.mix_buffer_count + AudioCommon::MAX_CHANNEL_COUNT) *
+ worker_params.sample_count),
+ sample_buffer(MIX_BUFFER_SIZE),
+ depop_buffer((worker_params.mix_buffer_count + AudioCommon::MAX_CHANNEL_COUNT) *
+ worker_params.sample_count) {}
+CommandGenerator::~CommandGenerator() = default;
+
+void CommandGenerator::ClearMixBuffers() {
+ std::fill(mix_buffer.begin(), mix_buffer.end(), 0);
+ std::fill(sample_buffer.begin(), sample_buffer.end(), 0);
+ // std::fill(depop_buffer.begin(), depop_buffer.end(), 0);
+}
+
+void CommandGenerator::GenerateVoiceCommands() {
+ if (dumping_frame) {
+ LOG_DEBUG(Audio, "(DSP_TRACE) GenerateVoiceCommands");
+ }
+ // Grab all our voices
+ const auto voice_count = voice_context.GetVoiceCount();
+ for (std::size_t i = 0; i < voice_count; i++) {
+ auto& voice_info = voice_context.GetSortedInfo(i);
+ // Update voices and check if we should queue them
+ if (voice_info.ShouldSkip() || !voice_info.UpdateForCommandGeneration(voice_context)) {
+ continue;
+ }
+
+ // Queue our voice
+ GenerateVoiceCommand(voice_info);
+ }
+ // Update our splitters
+ splitter_context.UpdateInternalState();
+}
+
+void CommandGenerator::GenerateVoiceCommand(ServerVoiceInfo& voice_info) {
+ auto& in_params = voice_info.GetInParams();
+ const auto channel_count = in_params.channel_count;
+
+ for (s32 channel = 0; channel < channel_count; channel++) {
+ const auto resource_id = in_params.voice_channel_resource_id[channel];
+ auto& dsp_state = voice_context.GetDspSharedState(resource_id);
+ auto& channel_resource = voice_context.GetChannelResource(resource_id);
+
+ // Decode our samples for our channel
+ GenerateDataSourceCommand(voice_info, dsp_state, channel);
+
+ if (in_params.should_depop) {
+ in_params.last_volume = 0.0f;
+ } else if (in_params.splitter_info_id != AudioCommon::NO_SPLITTER ||
+ in_params.mix_id != AudioCommon::NO_MIX) {
+ // Apply a biquad filter if needed
+ GenerateBiquadFilterCommandForVoice(voice_info, dsp_state,
+ worker_params.mix_buffer_count, channel);
+ // Base voice volume ramping
+ GenerateVolumeRampCommand(in_params.last_volume, in_params.volume, channel,
+ in_params.node_id);
+ in_params.last_volume = in_params.volume;
+
+ if (in_params.mix_id != AudioCommon::NO_MIX) {
+ // If we're using a mix id
+ auto& mix_info = mix_context.GetInfo(in_params.mix_id);
+ const auto& dest_mix_params = mix_info.GetInParams();
+
+ // Voice Mixing
+ GenerateVoiceMixCommand(
+ channel_resource.GetCurrentMixVolume(), channel_resource.GetLastMixVolume(),
+ dsp_state, dest_mix_params.buffer_offset, dest_mix_params.buffer_count,
+ worker_params.mix_buffer_count + channel, in_params.node_id);
+
+ // Update last mix volumes
+ channel_resource.UpdateLastMixVolumes();
+ } else if (in_params.splitter_info_id != AudioCommon::NO_SPLITTER) {
+ s32 base = channel;
+ while (auto* destination_data =
+ GetDestinationData(in_params.splitter_info_id, base)) {
+ base += channel_count;
+
+ if (!destination_data->IsConfigured()) {
+ continue;
+ }
+ if (destination_data->GetMixId() >= mix_context.GetCount()) {
+ continue;
+ }
+
+ const auto& mix_info = mix_context.GetInfo(destination_data->GetMixId());
+ const auto& dest_mix_params = mix_info.GetInParams();
+ GenerateVoiceMixCommand(
+ destination_data->CurrentMixVolumes(), destination_data->LastMixVolumes(),
+ dsp_state, dest_mix_params.buffer_offset, dest_mix_params.buffer_count,
+ worker_params.mix_buffer_count + channel, in_params.node_id);
+ destination_data->MarkDirty();
+ }
+ }
+ // Update biquad filter enabled states
+ for (std::size_t i = 0; i < AudioCommon::MAX_BIQUAD_FILTERS; i++) {
+ in_params.was_biquad_filter_enabled[i] = in_params.biquad_filter[i].enabled;
+ }
+ }
+ }
+}
+
+void CommandGenerator::GenerateSubMixCommands() {
+ const auto mix_count = mix_context.GetCount();
+ for (std::size_t i = 0; i < mix_count; i++) {
+ auto& mix_info = mix_context.GetSortedInfo(i);
+ const auto& in_params = mix_info.GetInParams();
+ if (!in_params.in_use || in_params.mix_id == AudioCommon::FINAL_MIX) {
+ continue;
+ }
+ GenerateSubMixCommand(mix_info);
+ }
+}
+
+void CommandGenerator::GenerateFinalMixCommands() {
+ GenerateFinalMixCommand();
+}
+
+void CommandGenerator::PreCommand() {
+ if (!dumping_frame) {
+ return;
+ }
+ for (std::size_t i = 0; i < splitter_context.GetInfoCount(); i++) {
+ const auto& base = splitter_context.GetInfo(i);
+ std::string graph = fmt::format("b[{}]", i);
+ auto* head = base.GetHead();
+ while (head != nullptr) {
+ graph += fmt::format("->{}", head->GetMixId());
+ head = head->GetNextDestination();
+ }
+ LOG_DEBUG(Audio, "(DSP_TRACE) SplitterGraph splitter_info={}, {}", i, graph);
+ }
+}
+
+void CommandGenerator::PostCommand() {
+ if (!dumping_frame) {
+ return;
+ }
+ dumping_frame = false;
+}
+
+void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
+ s32 channel) {
+ auto& in_params = voice_info.GetInParams();
+ const auto depop = in_params.should_depop;
+
+ if (depop) {
+ if (in_params.mix_id != AudioCommon::NO_MIX) {
+ auto& mix_info = mix_context.GetInfo(in_params.mix_id);
+ const auto& mix_in = mix_info.GetInParams();
+ GenerateDepopPrepareCommand(dsp_state, mix_in.buffer_count, mix_in.buffer_offset);
+ } else if (in_params.splitter_info_id != AudioCommon::NO_SPLITTER) {
+ s32 index{};
+ while (const auto* destination =
+ GetDestinationData(in_params.splitter_info_id, index++)) {
+ if (!destination->IsConfigured()) {
+ continue;
+ }
+ auto& mix_info = mix_context.GetInfo(destination->GetMixId());
+ const auto& mix_in = mix_info.GetInParams();
+ GenerateDepopPrepareCommand(dsp_state, mix_in.buffer_count, mix_in.buffer_offset);
+ }
+ }
+ } else {
+ switch (in_params.sample_format) {
+ case SampleFormat::Pcm16:
+ DecodeFromWaveBuffers(voice_info, GetChannelMixBuffer(channel), dsp_state, channel,
+ worker_params.sample_rate, worker_params.sample_count,
+ in_params.node_id);
+ break;
+ case SampleFormat::Adpcm:
+ ASSERT(channel == 0 && in_params.channel_count == 1);
+ DecodeFromWaveBuffers(voice_info, GetChannelMixBuffer(0), dsp_state, 0,
+ worker_params.sample_rate, worker_params.sample_count,
+ in_params.node_id);
+ break;
+ default:
+ UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
+ }
+ }
+}
+
+void CommandGenerator::GenerateBiquadFilterCommandForVoice(ServerVoiceInfo& voice_info,
+ VoiceState& dsp_state,
+ s32 mix_buffer_count, s32 channel) {
+ for (std::size_t i = 0; i < AudioCommon::MAX_BIQUAD_FILTERS; i++) {
+ const auto& in_params = voice_info.GetInParams();
+ auto& biquad_filter = in_params.biquad_filter[i];
+ // Check if biquad filter is actually used
+ if (!biquad_filter.enabled) {
+ continue;
+ }
+
+ // Reinitialize our biquad filter state if it was enabled previously
+ if (!in_params.was_biquad_filter_enabled[i]) {
+ dsp_state.biquad_filter_state.fill(0);
+ }
+
+ // Generate biquad filter
+ // GenerateBiquadFilterCommand(mix_buffer_count, biquad_filter,
+ // dsp_state.biquad_filter_state,
+ // mix_buffer_count + channel, mix_buffer_count +
+ // channel, worker_params.sample_count,
+ // voice_info.GetInParams().node_id);
+ }
+}
+
+void AudioCore::CommandGenerator::GenerateBiquadFilterCommand(
+ s32 mix_buffer, const BiquadFilterParameter& params, std::array<s64, 2>& state,
+ std::size_t input_offset, std::size_t output_offset, s32 sample_count, s32 node_id) {
+ if (dumping_frame) {
+ LOG_DEBUG(Audio,
+ "(DSP_TRACE) GenerateBiquadFilterCommand node_id={}, "
+ "input_mix_buffer={}, output_mix_buffer={}",
+ node_id, input_offset, output_offset);
+ }
+ const auto* input = GetMixBuffer(input_offset);
+ auto* output = GetMixBuffer(output_offset);
+
+ // Biquad filter parameters
+ const auto [n0, n1, n2] = params.numerator;
+ const auto [d0, d1] = params.denominator;
+
+ // Biquad filter states
+ auto [s0, s1] = state;
+
+ constexpr s64 int32_min = std::numeric_limits<s32>::min();
+ constexpr s64 int32_max = std::numeric_limits<s32>::max();
+
+ for (int i = 0; i < sample_count; ++i) {
+ const auto sample = static_cast<s64>(input[i]);
+ const auto f = (sample * n0 + s0 + 0x4000) >> 15;
+ const auto y = std::clamp(f, int32_min, int32_max);
+ s0 = sample * n1 + y * d0 + s1;
+ s1 = sample * n2 + y * d1;
+ output[i] = static_cast<s32>(y);
+ }
+
+ state = {s0, s1};
+}
+
+void CommandGenerator::GenerateDepopPrepareCommand(VoiceState& dsp_state,
+ std::size_t mix_buffer_count,
+ std::size_t mix_buffer_offset) {
+ for (std::size_t i = 0; i < mix_buffer_count; i++) {
+ auto& sample = dsp_state.previous_samples[i];
+ if (sample != 0) {
+ depop_buffer[mix_buffer_offset + i] += sample;
+ sample = 0;
+ }
+ }
+}
+
+void CommandGenerator::GenerateDepopForMixBuffersCommand(std::size_t mix_buffer_count,
+ std::size_t mix_buffer_offset,
+ s32 sample_rate) {
+ const std::size_t end_offset =
+ std::min(mix_buffer_offset + mix_buffer_count, GetTotalMixBufferCount());
+ const s32 delta = sample_rate == 48000 ? 0x7B29 : 0x78CB;
+ for (std::size_t i = mix_buffer_offset; i < end_offset; i++) {
+ if (depop_buffer[i] == 0) {
+ continue;
+ }
+
+ depop_buffer[i] =
+ ApplyMixDepop(GetMixBuffer(i), depop_buffer[i], delta, worker_params.sample_count);
+ }
+}
+
+void CommandGenerator::GenerateEffectCommand(ServerMixInfo& mix_info) {
+ const std::size_t effect_count = effect_context.GetCount();
+ const auto buffer_offset = mix_info.GetInParams().buffer_offset;
+ for (std::size_t i = 0; i < effect_count; i++) {
+ const auto index = mix_info.GetEffectOrder(i);
+ if (index == AudioCommon::NO_EFFECT_ORDER) {
+ break;
+ }
+ auto* info = effect_context.GetInfo(index);
+ const auto type = info->GetType();
+
+ // TODO(ogniK): Finish remaining effects
+ switch (type) {
+ case EffectType::Aux:
+ GenerateAuxCommand(buffer_offset, info, info->IsEnabled());
+ break;
+ case EffectType::I3dl2Reverb:
+ GenerateI3dl2ReverbEffectCommand(buffer_offset, info, info->IsEnabled());
+ break;
+ case EffectType::BiquadFilter:
+ GenerateBiquadFilterEffectCommand(buffer_offset, info, info->IsEnabled());
+ break;
+ default:
+ break;
+ }
+
+ info->UpdateForCommandGeneration();
+ }
+}
+
+void CommandGenerator::GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, EffectBase* info,
+ bool enabled) {
+ if (!enabled) {
+ return;
+ }
+ const auto& params = dynamic_cast<EffectI3dl2Reverb*>(info)->GetParams();
+ const auto channel_count = params.channel_count;
+ for (s32 i = 0; i < channel_count; i++) {
+ // TODO(ogniK): Actually implement reverb
+ if (params.input[i] != params.output[i]) {
+ const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
+ auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
+ ApplyMix<1>(output, input, 32768, worker_params.sample_count);
+ }
+ }
+}
+
+void CommandGenerator::GenerateBiquadFilterEffectCommand(s32 mix_buffer_offset, EffectBase* info,
+ bool enabled) {
+ if (!enabled) {
+ return;
+ }
+ const auto& params = dynamic_cast<EffectBiquadFilter*>(info)->GetParams();
+ const auto channel_count = params.channel_count;
+ for (s32 i = 0; i < channel_count; i++) {
+ // TODO(ogniK): Actually implement biquad filter
+ if (params.input[i] != params.output[i]) {
+ const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
+ auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
+ ApplyMix<1>(output, input, 32768, worker_params.sample_count);
+ }
+ }
+}
+
+void CommandGenerator::GenerateAuxCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled) {
+ auto aux = dynamic_cast<EffectAuxInfo*>(info);
+ const auto& params = aux->GetParams();
+ if (aux->GetSendBuffer() != 0 && aux->GetRecvBuffer() != 0) {
+ const auto max_channels = params.count;
+ u32 offset{};
+ for (u32 channel = 0; channel < max_channels; channel++) {
+ u32 write_count = 0;
+ if (channel == (max_channels - 1)) {
+ write_count = offset + worker_params.sample_count;
+ }
+
+ const auto input_index = params.input_mix_buffers[channel] + mix_buffer_offset;
+ const auto output_index = params.output_mix_buffers[channel] + mix_buffer_offset;
+
+ if (enabled) {
+ AuxInfoDSP send_info{};
+ AuxInfoDSP recv_info{};
+ memory.ReadBlock(aux->GetSendInfo(), &send_info, sizeof(AuxInfoDSP));
+ memory.ReadBlock(aux->GetRecvInfo(), &recv_info, sizeof(AuxInfoDSP));
+
+ WriteAuxBuffer(send_info, aux->GetSendBuffer(), params.sample_count,
+ GetMixBuffer(input_index), worker_params.sample_count, offset,
+ write_count);
+ memory.WriteBlock(aux->GetSendInfo(), &send_info, sizeof(AuxInfoDSP));
+
+ const auto samples_read = ReadAuxBuffer(
+ recv_info, aux->GetRecvBuffer(), params.sample_count,
+ GetMixBuffer(output_index), worker_params.sample_count, offset, write_count);
+ memory.WriteBlock(aux->GetRecvInfo(), &recv_info, sizeof(AuxInfoDSP));
+
+ if (samples_read != worker_params.sample_count &&
+ samples_read <= params.sample_count) {
+ std::memset(GetMixBuffer(output_index), 0, params.sample_count - samples_read);
+ }
+ } else {
+ AuxInfoDSP empty{};
+ memory.WriteBlock(aux->GetSendInfo(), &empty, sizeof(AuxInfoDSP));
+ memory.WriteBlock(aux->GetRecvInfo(), &empty, sizeof(AuxInfoDSP));
+ if (output_index != input_index) {
+ std::memcpy(GetMixBuffer(output_index), GetMixBuffer(input_index),
+ worker_params.sample_count * sizeof(s32));
+ }
+ }
+
+ offset += worker_params.sample_count;
+ }
+ }
+}
+
+ServerSplitterDestinationData* CommandGenerator::GetDestinationData(s32 splitter_id, s32 index) {
+ if (splitter_id == AudioCommon::NO_SPLITTER) {
+ return nullptr;
+ }
+ return splitter_context.GetDestinationData(splitter_id, index);
+}
+
+s32 CommandGenerator::WriteAuxBuffer(AuxInfoDSP& dsp_info, VAddr send_buffer, u32 max_samples,
+ const s32* data, u32 sample_count, u32 write_offset,
+ u32 write_count) {
+ if (max_samples == 0) {
+ return 0;
+ }
+ u32 offset = dsp_info.write_offset + write_offset;
+ if (send_buffer == 0 || offset > max_samples) {
+ return 0;
+ }
+
+ std::size_t data_offset{};
+ u32 remaining = sample_count;
+ while (remaining > 0) {
+ // Get position in buffer
+ const auto base = send_buffer + (offset * sizeof(u32));
+ const auto samples_to_grab = std::min(max_samples - offset, remaining);
+ // Write to output
+ memory.WriteBlock(base, (data + data_offset), samples_to_grab * sizeof(u32));
+ offset = (offset + samples_to_grab) % max_samples;
+ remaining -= samples_to_grab;
+ data_offset += samples_to_grab;
+ }
+
+ if (write_count != 0) {
+ dsp_info.write_offset = (dsp_info.write_offset + write_count) % max_samples;
+ }
+ return sample_count;
+}
+
+s32 CommandGenerator::ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u32 max_samples,
+ s32* out_data, u32 sample_count, u32 read_offset,
+ u32 read_count) {
+ if (max_samples == 0) {
+ return 0;
+ }
+
+ u32 offset = recv_info.read_offset + read_offset;
+ if (recv_buffer == 0 || offset > max_samples) {
+ return 0;
+ }
+
+ u32 remaining = sample_count;
+ while (remaining > 0) {
+ const auto base = recv_buffer + (offset * sizeof(u32));
+ const auto samples_to_grab = std::min(max_samples - offset, remaining);
+ std::vector<s32> buffer(samples_to_grab);
+ memory.ReadBlock(base, buffer.data(), buffer.size() * sizeof(u32));
+ std::memcpy(out_data, buffer.data(), buffer.size() * sizeof(u32));
+ out_data += samples_to_grab;
+ offset = (offset + samples_to_grab) % max_samples;
+ remaining -= samples_to_grab;
+ }
+
+ if (read_count != 0) {
+ recv_info.read_offset = (recv_info.read_offset + read_count) % max_samples;
+ }
+ return sample_count;
+}
+
+void CommandGenerator::GenerateVolumeRampCommand(float last_volume, float current_volume,
+ s32 channel, s32 node_id) {
+ const auto last = static_cast<s32>(last_volume * 32768.0f);
+ const auto current = static_cast<s32>(current_volume * 32768.0f);
+ const auto delta = static_cast<s32>((static_cast<float>(current) - static_cast<float>(last)) /
+ static_cast<float>(worker_params.sample_count));
+
+ if (dumping_frame) {
+ LOG_DEBUG(Audio,
+ "(DSP_TRACE) GenerateVolumeRampCommand node_id={}, input={}, output={}, "
+ "last_volume={}, current_volume={}",
+ node_id, GetMixChannelBufferOffset(channel), GetMixChannelBufferOffset(channel),
+ last_volume, current_volume);
+ }
+ // Apply generic gain on samples
+ ApplyGain(GetChannelMixBuffer(channel), GetChannelMixBuffer(channel), last, delta,
+ worker_params.sample_count);
+}
+
+void CommandGenerator::GenerateVoiceMixCommand(const MixVolumeBuffer& mix_volumes,
+ const MixVolumeBuffer& last_mix_volumes,
+ VoiceState& dsp_state, s32 mix_buffer_offset,
+ s32 mix_buffer_count, s32 voice_index, s32 node_id) {
+ // Loop all our mix buffers
+ for (s32 i = 0; i < mix_buffer_count; i++) {
+ if (last_mix_volumes[i] != 0.0f || mix_volumes[i] != 0.0f) {
+ const auto delta = static_cast<float>((mix_volumes[i] - last_mix_volumes[i])) /
+ static_cast<float>(worker_params.sample_count);
+
+ if (dumping_frame) {
+ LOG_DEBUG(Audio,
+ "(DSP_TRACE) GenerateVoiceMixCommand node_id={}, input={}, "
+ "output={}, last_volume={}, current_volume={}",
+ node_id, voice_index, mix_buffer_offset + i, last_mix_volumes[i],
+ mix_volumes[i]);
+ }
+
+ dsp_state.previous_samples[i] =
+ ApplyMixRamp(GetMixBuffer(mix_buffer_offset + i), GetMixBuffer(voice_index),
+ last_mix_volumes[i], delta, worker_params.sample_count);
+ } else {
+ dsp_state.previous_samples[i] = 0;
+ }
+ }
+}
+
+void CommandGenerator::GenerateSubMixCommand(ServerMixInfo& mix_info) {
+ if (dumping_frame) {
+ LOG_DEBUG(Audio, "(DSP_TRACE) GenerateSubMixCommand");
+ }
+ auto& in_params = mix_info.GetInParams();
+ GenerateDepopForMixBuffersCommand(in_params.buffer_count, in_params.buffer_offset,
+ in_params.sample_rate);
+
+ GenerateEffectCommand(mix_info);
+
+ GenerateMixCommands(mix_info);
+}
+
+void CommandGenerator::GenerateMixCommands(ServerMixInfo& mix_info) {
+ if (!mix_info.HasAnyConnection()) {
+ return;
+ }
+ const auto& in_params = mix_info.GetInParams();
+ if (in_params.dest_mix_id != AudioCommon::NO_MIX) {
+ const auto& dest_mix = mix_context.GetInfo(in_params.dest_mix_id);
+ const auto& dest_in_params = dest_mix.GetInParams();
+
+ const auto buffer_count = in_params.buffer_count;
+
+ for (s32 i = 0; i < buffer_count; i++) {
+ for (s32 j = 0; j < dest_in_params.buffer_count; j++) {
+ const auto mixed_volume = in_params.volume * in_params.mix_volume[i][j];
+ if (mixed_volume != 0.0f) {
+ GenerateMixCommand(dest_in_params.buffer_offset + j,
+ in_params.buffer_offset + i, mixed_volume,
+ in_params.node_id);
+ }
+ }
+ }
+ } else if (in_params.splitter_id != AudioCommon::NO_SPLITTER) {
+ s32 base{};
+ while (const auto* destination_data = GetDestinationData(in_params.splitter_id, base++)) {
+ if (!destination_data->IsConfigured()) {
+ continue;
+ }
+
+ const auto& dest_mix = mix_context.GetInfo(destination_data->GetMixId());
+ const auto& dest_in_params = dest_mix.GetInParams();
+ const auto mix_index = (base - 1) % in_params.buffer_count + in_params.buffer_offset;
+ for (std::size_t i = 0; i < dest_in_params.buffer_count; i++) {
+ const auto mixed_volume = in_params.volume * destination_data->GetMixVolume(i);
+ if (mixed_volume != 0.0f) {
+ GenerateMixCommand(dest_in_params.buffer_offset + i, mix_index, mixed_volume,
+ in_params.node_id);
+ }
+ }
+ }
+ }
+}
+
+void CommandGenerator::GenerateMixCommand(std::size_t output_offset, std::size_t input_offset,
+ float volume, s32 node_id) {
+
+ if (dumping_frame) {
+ LOG_DEBUG(Audio,
+ "(DSP_TRACE) GenerateMixCommand node_id={}, input={}, output={}, volume={}",
+ node_id, input_offset, output_offset, volume);
+ }
+
+ auto* output = GetMixBuffer(output_offset);
+ const auto* input = GetMixBuffer(input_offset);
+
+ const s32 gain = static_cast<s32>(volume * 32768.0f);
+ // Mix with loop unrolling
+ if (worker_params.sample_count % 4 == 0) {
+ ApplyMix<4>(output, input, gain, worker_params.sample_count);
+ } else if (worker_params.sample_count % 2 == 0) {
+ ApplyMix<2>(output, input, gain, worker_params.sample_count);
+ } else {
+ ApplyMix<1>(output, input, gain, worker_params.sample_count);
+ }
+}
+
+void CommandGenerator::GenerateFinalMixCommand() {
+ if (dumping_frame) {
+ LOG_DEBUG(Audio, "(DSP_TRACE) GenerateFinalMixCommand");
+ }
+ auto& mix_info = mix_context.GetFinalMixInfo();
+ const auto in_params = mix_info.GetInParams();
+
+ GenerateDepopForMixBuffersCommand(in_params.buffer_count, in_params.buffer_offset,
+ in_params.sample_rate);
+
+ GenerateEffectCommand(mix_info);
+
+ for (s32 i = 0; i < in_params.buffer_count; i++) {
+ const s32 gain = static_cast<s32>(in_params.volume * 32768.0f);
+ if (dumping_frame) {
+ LOG_DEBUG(
+ Audio,
+ "(DSP_TRACE) ApplyGainWithoutDelta node_id={}, input={}, output={}, volume={}",
+ in_params.node_id, in_params.buffer_offset + i, in_params.buffer_offset + i,
+ in_params.volume);
+ }
+ ApplyGainWithoutDelta(GetMixBuffer(in_params.buffer_offset + i),
+ GetMixBuffer(in_params.buffer_offset + i), gain,
+ worker_params.sample_count);
+ }
+}
+
+s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
+ s32 sample_count, s32 channel, std::size_t mix_offset) {
+ auto& in_params = voice_info.GetInParams();
+ const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
+ if (wave_buffer.buffer_address == 0) {
+ return 0;
+ }
+ if (wave_buffer.buffer_size == 0) {
+ return 0;
+ }
+ if (wave_buffer.end_sample_offset < wave_buffer.start_sample_offset) {
+ return 0;
+ }
+ const auto samples_remaining =
+ (wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset;
+ const auto start_offset =
+ ((wave_buffer.start_sample_offset + dsp_state.offset) * in_params.channel_count) *
+ sizeof(s16);
+ const auto buffer_pos = wave_buffer.buffer_address + start_offset;
+ const auto samples_processed = std::min(sample_count, samples_remaining);
+
+ if (in_params.channel_count == 1) {
+ std::vector<s16> buffer(samples_processed);
+ memory.ReadBlock(buffer_pos, buffer.data(), buffer.size() * sizeof(s16));
+ for (std::size_t i = 0; i < buffer.size(); i++) {
+ sample_buffer[mix_offset + i] = buffer[i];
+ }
+ } else {
+ const auto channel_count = in_params.channel_count;
+ std::vector<s16> buffer(samples_processed * channel_count);
+ memory.ReadBlock(buffer_pos, buffer.data(), buffer.size() * sizeof(s16));
+
+ for (std::size_t i = 0; i < samples_processed; i++) {
+ sample_buffer[mix_offset + i] = buffer[i * channel_count + channel];
+ }
+ }
+
+ return samples_processed;
+}
+
+s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
+ s32 sample_count, s32 channel, std::size_t mix_offset) {
+ auto& in_params = voice_info.GetInParams();
+ const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
+ if (wave_buffer.buffer_address == 0) {
+ return 0;
+ }
+ if (wave_buffer.buffer_size == 0) {
+ return 0;
+ }
+ if (wave_buffer.end_sample_offset < wave_buffer.start_sample_offset) {
+ return 0;
+ }
+
+ constexpr std::array<int, 16> SIGNED_NIBBLES = {
+ {0, 1, 2, 3, 4, 5, 6, 7, -8, -7, -6, -5, -4, -3, -2, -1}};
+
+ constexpr std::size_t FRAME_LEN = 8;
+ constexpr std::size_t NIBBLES_PER_SAMPLE = 16;
+ constexpr std::size_t SAMPLES_PER_FRAME = 14;
+
+ auto frame_header = dsp_state.context.header;
+ s32 idx = (frame_header >> 4) & 0xf;
+ s32 scale = frame_header & 0xf;
+ s16 yn1 = dsp_state.context.yn1;
+ s16 yn2 = dsp_state.context.yn2;
+
+ Codec::ADPCM_Coeff coeffs;
+ memory.ReadBlock(in_params.additional_params_address, coeffs.data(),
+ sizeof(Codec::ADPCM_Coeff));
+
+ s32 coef1 = coeffs[idx * 2];
+ s32 coef2 = coeffs[idx * 2 + 1];
+
+ const auto samples_remaining =
+ (wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset;
+ const auto samples_processed = std::min(sample_count, samples_remaining);
+ const auto sample_pos = wave_buffer.start_sample_offset + dsp_state.offset;
+
+ const auto samples_remaining_in_frame = sample_pos % SAMPLES_PER_FRAME;
+ auto position_in_frame = ((sample_pos / SAMPLES_PER_FRAME) * NIBBLES_PER_SAMPLE) +
+ samples_remaining_in_frame + (samples_remaining_in_frame != 0 ? 2 : 0);
+
+ const auto decode_sample = [&](const int nibble) -> s16 {
+ const int xn = nibble * (1 << scale);
+ // We first transform everything into 11 bit fixed point, perform the second order
+ // digital filter, then transform back.
+ // 0x400 == 0.5 in 11 bit fixed point.
+ // Filter: y[n] = x[n] + 0.5 + c1 * y[n-1] + c2 * y[n-2]
+ int val = ((xn << 11) + 0x400 + coef1 * yn1 + coef2 * yn2) >> 11;
+ // Clamp to output range.
+ val = std::clamp<s32>(val, -32768, 32767);
+ // Advance output feedback.
+ yn2 = yn1;
+ yn1 = val;
+ return static_cast<s16>(val);
+ };
+
+ std::size_t buffer_offset{};
+ std::vector<u8> buffer(
+ std::max((samples_processed / FRAME_LEN) * SAMPLES_PER_FRAME, FRAME_LEN));
+ memory.ReadBlock(wave_buffer.buffer_address + (position_in_frame / 2), buffer.data(),
+ buffer.size());
+ std::size_t cur_mix_offset = mix_offset;
+
+ auto remaining_samples = samples_processed;
+ while (remaining_samples > 0) {
+ if (position_in_frame % NIBBLES_PER_SAMPLE == 0) {
+ // Read header
+ frame_header = buffer[buffer_offset++];
+ idx = (frame_header >> 4) & 0xf;
+ scale = frame_header & 0xf;
+ coef1 = coeffs[idx * 2];
+ coef2 = coeffs[idx * 2 + 1];
+ position_in_frame += 2;
+
+ // Decode entire frame
+ if (remaining_samples >= SAMPLES_PER_FRAME) {
+ for (std::size_t i = 0; i < SAMPLES_PER_FRAME / 2; i++) {
+
+ // Sample 1
+ const s32 s0 = SIGNED_NIBBLES[buffer[buffer_offset] >> 4];
+ const s32 s1 = SIGNED_NIBBLES[buffer[buffer_offset++] & 0xf];
+ const s16 sample_1 = decode_sample(s0);
+ const s16 sample_2 = decode_sample(s1);
+ sample_buffer[cur_mix_offset++] = sample_1;
+ sample_buffer[cur_mix_offset++] = sample_2;
+ }
+ remaining_samples -= SAMPLES_PER_FRAME;
+ position_in_frame += SAMPLES_PER_FRAME;
+ continue;
+ }
+ }
+ // Decode mid frame
+ s32 current_nibble = buffer[buffer_offset];
+ if (position_in_frame++ & 0x1) {
+ current_nibble &= 0xf;
+ buffer_offset++;
+ } else {
+ current_nibble >>= 4;
+ }
+ const s16 sample = decode_sample(SIGNED_NIBBLES[current_nibble]);
+ sample_buffer[cur_mix_offset++] = sample;
+ remaining_samples--;
+ }
+
+ dsp_state.context.header = frame_header;
+ dsp_state.context.yn1 = yn1;
+ dsp_state.context.yn2 = yn2;
+
+ return samples_processed;
+}
+
+s32* CommandGenerator::GetMixBuffer(std::size_t index) {
+ return mix_buffer.data() + (index * worker_params.sample_count);
+}
+
+const s32* CommandGenerator::GetMixBuffer(std::size_t index) const {
+ return mix_buffer.data() + (index * worker_params.sample_count);
+}
+
+std::size_t CommandGenerator::GetMixChannelBufferOffset(s32 channel) const {
+ return worker_params.mix_buffer_count + channel;
+}
+
+std::size_t CommandGenerator::GetTotalMixBufferCount() const {
+ return worker_params.mix_buffer_count + AudioCommon::MAX_CHANNEL_COUNT;
+}
+
+s32* CommandGenerator::GetChannelMixBuffer(s32 channel) {
+ return GetMixBuffer(worker_params.mix_buffer_count + channel);
+}
+
+const s32* CommandGenerator::GetChannelMixBuffer(s32 channel) const {
+ return GetMixBuffer(worker_params.mix_buffer_count + channel);
+}
+
+void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output,
+ VoiceState& dsp_state, s32 channel,
+ s32 target_sample_rate, s32 sample_count,
+ s32 node_id) {
+ auto& in_params = voice_info.GetInParams();
+ if (dumping_frame) {
+ LOG_DEBUG(Audio,
+ "(DSP_TRACE) DecodeFromWaveBuffers, node_id={}, channel={}, "
+ "format={}, sample_count={}, sample_rate={}, mix_id={}, splitter_id={}",
+ node_id, channel, in_params.sample_format, sample_count, in_params.sample_rate,
+ in_params.mix_id, in_params.splitter_info_id);
+ }
+ ASSERT_OR_EXECUTE(output != nullptr, { return; });
+
+ const auto resample_rate = static_cast<s32>(
+ static_cast<float>(in_params.sample_rate) / static_cast<float>(target_sample_rate) *
+ static_cast<float>(static_cast<s32>(in_params.pitch * 32768.0f)));
+ auto* output_base = output;
+ if ((dsp_state.fraction + sample_count * resample_rate) > (SCALED_MIX_BUFFER_SIZE - 4ULL)) {
+ return;
+ }
+
+ auto min_required_samples =
+ std::min(static_cast<s32>(SCALED_MIX_BUFFER_SIZE) - dsp_state.fraction, resample_rate);
+ if (min_required_samples >= sample_count) {
+ min_required_samples = sample_count;
+ }
+
+ std::size_t temp_mix_offset{};
+ bool is_buffer_completed{false};
+ auto samples_remaining = sample_count;
+ while (samples_remaining > 0 && !is_buffer_completed) {
+ const auto samples_to_output = std::min(samples_remaining, min_required_samples);
+ const auto samples_to_read = (samples_to_output * resample_rate + dsp_state.fraction) >> 15;
+
+ if (!in_params.behavior_flags.is_pitch_and_src_skipped) {
+ // Append sample histtory for resampler
+ for (std::size_t i = 0; i < AudioCommon::MAX_SAMPLE_HISTORY; i++) {
+ sample_buffer[temp_mix_offset + i] = dsp_state.sample_history[i];
+ }
+ temp_mix_offset += 4;
+ }
+
+ s32 samples_read{};
+ while (samples_read < samples_to_read) {
+ const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
+ // No more data can be read
+ if (!dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index]) {
+ is_buffer_completed = true;
+ break;
+ }
+
+ if (in_params.sample_format == SampleFormat::Adpcm && dsp_state.offset == 0 &&
+ wave_buffer.context_address != 0 && wave_buffer.context_size != 0) {
+ // TODO(ogniK): ADPCM loop context
+ }
+
+ s32 samples_decoded{0};
+ switch (in_params.sample_format) {
+ case SampleFormat::Pcm16:
+ samples_decoded = DecodePcm16(voice_info, dsp_state, samples_to_read - samples_read,
+ channel, temp_mix_offset);
+ break;
+ case SampleFormat::Adpcm:
+ samples_decoded = DecodeAdpcm(voice_info, dsp_state, samples_to_read - samples_read,
+ channel, temp_mix_offset);
+ break;
+ default:
+ UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
+ }
+
+ temp_mix_offset += samples_decoded;
+ samples_read += samples_decoded;
+ dsp_state.offset += samples_decoded;
+ dsp_state.played_sample_count += samples_decoded;
+
+ if (dsp_state.offset >=
+ (wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) ||
+ samples_decoded == 0) {
+ // Reset our sample offset
+ dsp_state.offset = 0;
+ if (wave_buffer.is_looping) {
+ if (samples_decoded == 0) {
+ // End of our buffer
+ is_buffer_completed = true;
+ break;
+ }
+
+ if (in_params.behavior_flags.is_played_samples_reset_at_loop_point.Value()) {
+ dsp_state.played_sample_count = 0;
+ }
+ } else {
+
+ // Update our wave buffer states
+ dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false;
+ dsp_state.wave_buffer_consumed++;
+ dsp_state.wave_buffer_index =
+ (dsp_state.wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
+ if (wave_buffer.end_of_stream) {
+ dsp_state.played_sample_count = 0;
+ }
+ }
+ }
+ }
+
+ if (in_params.behavior_flags.is_pitch_and_src_skipped.Value()) {
+ // No need to resample
+ std::memcpy(output, sample_buffer.data(), samples_read * sizeof(s32));
+ } else {
+ std::fill(sample_buffer.begin() + temp_mix_offset,
+ sample_buffer.begin() + temp_mix_offset + (samples_to_read - samples_read),
+ 0);
+ AudioCore::Resample(output, sample_buffer.data(), resample_rate, dsp_state.fraction,
+ samples_to_output);
+ // Resample
+ for (std::size_t i = 0; i < AudioCommon::MAX_SAMPLE_HISTORY; i++) {
+ dsp_state.sample_history[i] = sample_buffer[samples_to_read + i];
+ }
+ }
+ output += samples_to_output;
+ samples_remaining -= samples_to_output;
+ }
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/command_generator.h b/src/audio_core/command_generator.h
new file mode 100644
index 000000000..967d24078
--- /dev/null
+++ b/src/audio_core/command_generator.h
@@ -0,0 +1,103 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include "audio_core/common.h"
+#include "audio_core/voice_context.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+
+namespace Core::Memory {
+class Memory;
+}
+
+namespace AudioCore {
+class MixContext;
+class SplitterContext;
+class ServerSplitterDestinationData;
+class ServerMixInfo;
+class EffectContext;
+class EffectBase;
+struct AuxInfoDSP;
+using MixVolumeBuffer = std::array<float, AudioCommon::MAX_MIX_BUFFERS>;
+
+class CommandGenerator {
+public:
+ explicit CommandGenerator(AudioCommon::AudioRendererParameter& worker_params,
+ VoiceContext& voice_context, MixContext& mix_context,
+ SplitterContext& splitter_context, EffectContext& effect_context,
+ Core::Memory::Memory& memory);
+ ~CommandGenerator();
+
+ void ClearMixBuffers();
+ void GenerateVoiceCommands();
+ void GenerateVoiceCommand(ServerVoiceInfo& voice_info);
+ void GenerateSubMixCommands();
+ void GenerateFinalMixCommands();
+ void PreCommand();
+ void PostCommand();
+
+ s32* GetChannelMixBuffer(s32 channel);
+ const s32* GetChannelMixBuffer(s32 channel) const;
+ s32* GetMixBuffer(std::size_t index);
+ const s32* GetMixBuffer(std::size_t index) const;
+ std::size_t GetMixChannelBufferOffset(s32 channel) const;
+
+ std::size_t GetTotalMixBufferCount() const;
+
+private:
+ void GenerateDataSourceCommand(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 channel);
+ void GenerateBiquadFilterCommandForVoice(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
+ s32 mix_buffer_count, s32 channel);
+ void GenerateVolumeRampCommand(float last_volume, float current_volume, s32 channel,
+ s32 node_id);
+ void GenerateVoiceMixCommand(const MixVolumeBuffer& mix_volumes,
+ const MixVolumeBuffer& last_mix_volumes, VoiceState& dsp_state,
+ s32 mix_buffer_offset, s32 mix_buffer_count, s32 voice_index,
+ s32 node_id);
+ void GenerateSubMixCommand(ServerMixInfo& mix_info);
+ void GenerateMixCommands(ServerMixInfo& mix_info);
+ void GenerateMixCommand(std::size_t output_offset, std::size_t input_offset, float volume,
+ s32 node_id);
+ void GenerateFinalMixCommand();
+ void GenerateBiquadFilterCommand(s32 mix_buffer, const BiquadFilterParameter& params,
+ std::array<s64, 2>& state, std::size_t input_offset,
+ std::size_t output_offset, s32 sample_count, s32 node_id);
+ void GenerateDepopPrepareCommand(VoiceState& dsp_state, std::size_t mix_buffer_count,
+ std::size_t mix_buffer_offset);
+ void GenerateDepopForMixBuffersCommand(std::size_t mix_buffer_count,
+ std::size_t mix_buffer_offset, s32 sample_rate);
+ void GenerateEffectCommand(ServerMixInfo& mix_info);
+ void GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
+ void GenerateBiquadFilterEffectCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
+ void GenerateAuxCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
+ ServerSplitterDestinationData* GetDestinationData(s32 splitter_id, s32 index);
+
+ s32 WriteAuxBuffer(AuxInfoDSP& dsp_info, VAddr send_buffer, u32 max_samples, const s32* data,
+ u32 sample_count, u32 write_offset, u32 write_count);
+ s32 ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u32 max_samples, s32* out_data,
+ u32 sample_count, u32 read_offset, u32 read_count);
+
+ // DSP Code
+ s32 DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_count,
+ s32 channel, std::size_t mix_offset);
+ s32 DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_count,
+ s32 channel, std::size_t mix_offset);
+ void DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output, VoiceState& dsp_state,
+ s32 channel, s32 target_sample_rate, s32 sample_count, s32 node_id);
+
+ AudioCommon::AudioRendererParameter& worker_params;
+ VoiceContext& voice_context;
+ MixContext& mix_context;
+ SplitterContext& splitter_context;
+ EffectContext& effect_context;
+ Core::Memory::Memory& memory;
+ std::vector<s32> mix_buffer{};
+ std::vector<s32> sample_buffer{};
+ std::vector<s32> depop_buffer{};
+ bool dumping_frame{false};
+};
+} // namespace AudioCore
diff --git a/src/audio_core/common.h b/src/audio_core/common.h
index 7bb145c53..72ebce221 100644
--- a/src/audio_core/common.h
+++ b/src/audio_core/common.h
@@ -8,13 +8,30 @@
#include "common/swap.h"
#include "core/hle/result.h"
-namespace AudioCore {
+namespace AudioCommon {
namespace Audren {
constexpr ResultCode ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
-}
+constexpr ResultCode ERR_SPLITTER_SORT_FAILED{ErrorModule::Audio, 43};
+} // namespace Audren
constexpr u32_le CURRENT_PROCESS_REVISION = Common::MakeMagic('R', 'E', 'V', '8');
constexpr std::size_t MAX_MIX_BUFFERS = 24;
+constexpr std::size_t MAX_BIQUAD_FILTERS = 2;
+constexpr std::size_t MAX_CHANNEL_COUNT = 6;
+constexpr std::size_t MAX_WAVE_BUFFERS = 4;
+constexpr std::size_t MAX_SAMPLE_HISTORY = 4;
+constexpr u32 STREAM_SAMPLE_RATE = 48000;
+constexpr u32 STREAM_NUM_CHANNELS = 6;
+constexpr s32 NO_SPLITTER = -1;
+constexpr s32 NO_MIX = 0x7fffffff;
+constexpr s32 NO_FINAL_MIX = std::numeric_limits<s32>::min();
+constexpr s32 FINAL_MIX = 0;
+constexpr s32 NO_EFFECT_ORDER = -1;
+constexpr std::size_t TEMP_MIX_BASE_SIZE = 0x3f00; // TODO(ogniK): Work out this constant
+// Any size checks seem to take the sample history into account
+// and our const ends up being 0x3f04, the 4 bytes are most
+// likely the sample history
+constexpr std::size_t TOTAL_TEMP_MIX_SIZE = TEMP_MIX_BASE_SIZE + AudioCommon::MAX_SAMPLE_HISTORY;
static constexpr u32 VersionFromRevision(u32_le rev) {
// "REV7" -> 7
@@ -45,4 +62,46 @@ static constexpr bool CanConsumeBuffer(std::size_t size, std::size_t offset, std
return true;
}
-} // namespace AudioCore
+struct UpdateDataSizes {
+ u32_le behavior{};
+ u32_le memory_pool{};
+ u32_le voice{};
+ u32_le voice_channel_resource{};
+ u32_le effect{};
+ u32_le mixer{};
+ u32_le sink{};
+ u32_le performance{};
+ u32_le splitter{};
+ u32_le render_info{};
+ INSERT_PADDING_WORDS(4);
+};
+static_assert(sizeof(UpdateDataSizes) == 0x38, "UpdateDataSizes is an invalid size");
+
+struct UpdateDataHeader {
+ u32_le revision{};
+ UpdateDataSizes size{};
+ u32_le total_size{};
+};
+static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader is an invalid size");
+
+struct AudioRendererParameter {
+ u32_le sample_rate;
+ u32_le sample_count;
+ u32_le mix_buffer_count;
+ u32_le submix_count;
+ u32_le voice_count;
+ u32_le sink_count;
+ u32_le effect_count;
+ u32_le performance_frame_count;
+ u8 is_voice_drop_enabled;
+ u8 unknown_21;
+ u8 unknown_22;
+ u8 execution_mode;
+ u32_le splitter_count;
+ u32_le num_splitter_send_channels;
+ u32_le unknown_30;
+ u32_le revision;
+};
+static_assert(sizeof(AudioRendererParameter) == 52, "AudioRendererParameter is an invalid size");
+
+} // namespace AudioCommon
diff --git a/src/audio_core/cubeb_sink.cpp b/src/audio_core/cubeb_sink.cpp
index c27df946c..83c06c0ed 100644
--- a/src/audio_core/cubeb_sink.cpp
+++ b/src/audio_core/cubeb_sink.cpp
@@ -23,14 +23,24 @@ class CubebSinkStream final : public SinkStream {
public:
CubebSinkStream(cubeb* ctx, u32 sample_rate, u32 num_channels_, cubeb_devid output_device,
const std::string& name)
- : ctx{ctx}, num_channels{std::min(num_channels_, 2u)}, time_stretch{sample_rate,
+ : ctx{ctx}, num_channels{std::min(num_channels_, 6u)}, time_stretch{sample_rate,
num_channels} {
cubeb_stream_params params{};
params.rate = sample_rate;
params.channels = num_channels;
params.format = CUBEB_SAMPLE_S16NE;
- params.layout = num_channels == 1 ? CUBEB_LAYOUT_MONO : CUBEB_LAYOUT_STEREO;
+ switch (num_channels) {
+ case 1:
+ params.layout = CUBEB_LAYOUT_MONO;
+ break;
+ case 2:
+ params.layout = CUBEB_LAYOUT_STEREO;
+ break;
+ case 6:
+ params.layout = CUBEB_LAYOUT_3F2_LFE;
+ break;
+ }
u32 minimum_latency{};
if (cubeb_get_min_latency(ctx, &params, &minimum_latency) != CUBEB_OK) {
@@ -193,6 +203,7 @@ long CubebSinkStream::DataCallback(cubeb_stream* stream, void* user_data, const
const std::size_t samples_to_write = num_channels * num_frames;
std::size_t samples_written;
+ /*
if (Settings::values.enable_audio_stretching.GetValue()) {
const std::vector<s16> in{impl->queue.Pop()};
const std::size_t num_in{in.size() / num_channels};
@@ -207,7 +218,8 @@ long CubebSinkStream::DataCallback(cubeb_stream* stream, void* user_data, const
}
} else {
samples_written = impl->queue.Pop(buffer, samples_to_write);
- }
+ }*/
+ samples_written = impl->queue.Pop(buffer, samples_to_write);
if (samples_written >= num_channels) {
std::memcpy(&impl->last_frame[0], buffer + (samples_written - num_channels) * sizeof(s16),
diff --git a/src/audio_core/effect_context.cpp b/src/audio_core/effect_context.cpp
new file mode 100644
index 000000000..adfec3df5
--- /dev/null
+++ b/src/audio_core/effect_context.cpp
@@ -0,0 +1,299 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include "audio_core/effect_context.h"
+
+namespace AudioCore {
+namespace {
+bool ValidChannelCountForEffect(s32 channel_count) {
+ return channel_count == 1 || channel_count == 2 || channel_count == 4 || channel_count == 6;
+}
+} // namespace
+
+EffectContext::EffectContext(std::size_t effect_count) : effect_count(effect_count) {
+ effects.reserve(effect_count);
+ std::generate_n(std::back_inserter(effects), effect_count,
+ [] { return std::make_unique<EffectStubbed>(); });
+}
+EffectContext::~EffectContext() = default;
+
+std::size_t EffectContext::GetCount() const {
+ return effect_count;
+}
+
+EffectBase* EffectContext::GetInfo(std::size_t i) {
+ return effects.at(i).get();
+}
+
+EffectBase* EffectContext::RetargetEffect(std::size_t i, EffectType effect) {
+ switch (effect) {
+ case EffectType::Invalid:
+ effects[i] = std::make_unique<EffectStubbed>();
+ break;
+ case EffectType::BufferMixer:
+ effects[i] = std::make_unique<EffectBufferMixer>();
+ break;
+ case EffectType::Aux:
+ effects[i] = std::make_unique<EffectAuxInfo>();
+ break;
+ case EffectType::Delay:
+ effects[i] = std::make_unique<EffectDelay>();
+ break;
+ case EffectType::Reverb:
+ effects[i] = std::make_unique<EffectReverb>();
+ break;
+ case EffectType::I3dl2Reverb:
+ effects[i] = std::make_unique<EffectI3dl2Reverb>();
+ break;
+ case EffectType::BiquadFilter:
+ effects[i] = std::make_unique<EffectBiquadFilter>();
+ break;
+ default:
+ UNREACHABLE_MSG("Unimplemented effect {}", effect);
+ effects[i] = std::make_unique<EffectStubbed>();
+ }
+ return GetInfo(i);
+}
+
+const EffectBase* EffectContext::GetInfo(std::size_t i) const {
+ return effects.at(i).get();
+}
+
+EffectStubbed::EffectStubbed() : EffectBase::EffectBase(EffectType::Invalid) {}
+EffectStubbed::~EffectStubbed() = default;
+
+void EffectStubbed::Update(EffectInfo::InParams& in_params) {}
+void EffectStubbed::UpdateForCommandGeneration() {}
+
+EffectBase::EffectBase(EffectType effect_type) : effect_type(effect_type) {}
+EffectBase::~EffectBase() = default;
+
+UsageState EffectBase::GetUsage() const {
+ return usage;
+}
+
+EffectType EffectBase::GetType() const {
+ return effect_type;
+}
+
+bool EffectBase::IsEnabled() const {
+ return enabled;
+}
+
+s32 EffectBase::GetMixID() const {
+ return mix_id;
+}
+
+s32 EffectBase::GetProcessingOrder() const {
+ return processing_order;
+}
+
+EffectI3dl2Reverb::EffectI3dl2Reverb() : EffectGeneric::EffectGeneric(EffectType::I3dl2Reverb) {}
+EffectI3dl2Reverb::~EffectI3dl2Reverb() = default;
+
+void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
+ auto& internal_params = GetParams();
+ const auto* reverb_params = reinterpret_cast<I3dl2ReverbParams*>(in_params.raw.data());
+ if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
+ UNREACHABLE_MSG("Invalid reverb max channel count {}", reverb_params->max_channels);
+ return;
+ }
+
+ const auto last_status = internal_params.status;
+ mix_id = in_params.mix_id;
+ processing_order = in_params.processing_order;
+ internal_params = *reverb_params;
+ if (!ValidChannelCountForEffect(reverb_params->channel_count)) {
+ internal_params.channel_count = internal_params.max_channels;
+ }
+ enabled = in_params.is_enabled;
+ if (last_status != ParameterStatus::Updated) {
+ internal_params.status = last_status;
+ }
+
+ if (in_params.is_new || skipped) {
+ usage = UsageState::Initialized;
+ internal_params.status = ParameterStatus::Initialized;
+ skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
+ }
+}
+
+void EffectI3dl2Reverb::UpdateForCommandGeneration() {
+ if (enabled) {
+ usage = UsageState::Running;
+ } else {
+ usage = UsageState::Stopped;
+ }
+ GetParams().status = ParameterStatus::Updated;
+}
+
+EffectBiquadFilter::EffectBiquadFilter() : EffectGeneric::EffectGeneric(EffectType::BiquadFilter) {}
+EffectBiquadFilter::~EffectBiquadFilter() = default;
+
+void EffectBiquadFilter::Update(EffectInfo::InParams& in_params) {
+ auto& internal_params = GetParams();
+ const auto* biquad_params = reinterpret_cast<BiquadFilterParams*>(in_params.raw.data());
+ mix_id = in_params.mix_id;
+ processing_order = in_params.processing_order;
+ internal_params = *biquad_params;
+ enabled = in_params.is_enabled;
+}
+
+void EffectBiquadFilter::UpdateForCommandGeneration() {
+ if (enabled) {
+ usage = UsageState::Running;
+ } else {
+ usage = UsageState::Stopped;
+ }
+ GetParams().status = ParameterStatus::Updated;
+}
+
+EffectAuxInfo::EffectAuxInfo() : EffectGeneric::EffectGeneric(EffectType::Aux) {}
+EffectAuxInfo::~EffectAuxInfo() = default;
+
+void EffectAuxInfo::Update(EffectInfo::InParams& in_params) {
+ const auto* aux_params = reinterpret_cast<AuxInfo*>(in_params.raw.data());
+ mix_id = in_params.mix_id;
+ processing_order = in_params.processing_order;
+ GetParams() = *aux_params;
+ enabled = in_params.is_enabled;
+
+ if (in_params.is_new || skipped) {
+ skipped = aux_params->send_buffer_info == 0 || aux_params->return_buffer_info == 0;
+ if (skipped) {
+ return;
+ }
+
+ // There's two AuxInfos which are an identical size, the first one is managed by the cpu,
+ // the second is managed by the dsp. All we care about is managing the DSP one
+ send_info = aux_params->send_buffer_info + sizeof(AuxInfoDSP);
+ send_buffer = aux_params->send_buffer_info + (sizeof(AuxInfoDSP) * 2);
+
+ recv_info = aux_params->return_buffer_info + sizeof(AuxInfoDSP);
+ recv_buffer = aux_params->return_buffer_info + (sizeof(AuxInfoDSP) * 2);
+ }
+}
+
+void EffectAuxInfo::UpdateForCommandGeneration() {
+ if (enabled) {
+ usage = UsageState::Running;
+ } else {
+ usage = UsageState::Stopped;
+ }
+}
+
+const VAddr EffectAuxInfo::GetSendInfo() const {
+ return send_info;
+}
+
+const VAddr EffectAuxInfo::GetSendBuffer() const {
+ return send_buffer;
+}
+
+const VAddr EffectAuxInfo::GetRecvInfo() const {
+ return recv_info;
+}
+
+const VAddr EffectAuxInfo::GetRecvBuffer() const {
+ return recv_buffer;
+}
+
+EffectDelay::EffectDelay() : EffectGeneric::EffectGeneric(EffectType::Delay) {}
+EffectDelay::~EffectDelay() = default;
+
+void EffectDelay::Update(EffectInfo::InParams& in_params) {
+ const auto* delay_params = reinterpret_cast<DelayParams*>(in_params.raw.data());
+ auto& internal_params = GetParams();
+ if (!ValidChannelCountForEffect(delay_params->max_channels)) {
+ return;
+ }
+
+ const auto last_status = internal_params.status;
+ mix_id = in_params.mix_id;
+ processing_order = in_params.processing_order;
+ internal_params = *delay_params;
+ if (!ValidChannelCountForEffect(delay_params->channels)) {
+ internal_params.channels = internal_params.max_channels;
+ }
+ enabled = in_params.is_enabled;
+
+ if (last_status != ParameterStatus::Updated) {
+ internal_params.status = last_status;
+ }
+
+ if (in_params.is_new || skipped) {
+ usage = UsageState::Initialized;
+ internal_params.status = ParameterStatus::Initialized;
+ skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
+ }
+}
+
+void EffectDelay::UpdateForCommandGeneration() {
+ if (enabled) {
+ usage = UsageState::Running;
+ } else {
+ usage = UsageState::Stopped;
+ }
+ GetParams().status = ParameterStatus::Updated;
+}
+
+EffectBufferMixer::EffectBufferMixer() : EffectGeneric::EffectGeneric(EffectType::BufferMixer) {}
+EffectBufferMixer::~EffectBufferMixer() = default;
+
+void EffectBufferMixer::Update(EffectInfo::InParams& in_params) {
+ mix_id = in_params.mix_id;
+ processing_order = in_params.processing_order;
+ GetParams() = *reinterpret_cast<BufferMixerParams*>(in_params.raw.data());
+ enabled = in_params.is_enabled;
+}
+
+void EffectBufferMixer::UpdateForCommandGeneration() {
+ if (enabled) {
+ usage = UsageState::Running;
+ } else {
+ usage = UsageState::Stopped;
+ }
+}
+
+EffectReverb::EffectReverb() : EffectGeneric::EffectGeneric(EffectType::Reverb) {}
+EffectReverb::~EffectReverb() = default;
+
+void EffectReverb::Update(EffectInfo::InParams& in_params) {
+ const auto* reverb_params = reinterpret_cast<ReverbParams*>(in_params.raw.data());
+ auto& internal_params = GetParams();
+ if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
+ return;
+ }
+
+ const auto last_status = internal_params.status;
+ mix_id = in_params.mix_id;
+ processing_order = in_params.processing_order;
+ internal_params = *reverb_params;
+ if (!ValidChannelCountForEffect(reverb_params->channels)) {
+ internal_params.channels = internal_params.max_channels;
+ }
+ enabled = in_params.is_enabled;
+
+ if (last_status != ParameterStatus::Updated) {
+ internal_params.status = last_status;
+ }
+
+ if (in_params.is_new || skipped) {
+ usage = UsageState::Initialized;
+ internal_params.status = ParameterStatus::Initialized;
+ skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
+ }
+}
+
+void EffectReverb::UpdateForCommandGeneration() {
+ if (enabled) {
+ usage = UsageState::Running;
+ } else {
+ usage = UsageState::Stopped;
+ }
+ GetParams().status = ParameterStatus::Updated;
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/effect_context.h b/src/audio_core/effect_context.h
new file mode 100644
index 000000000..2f2da72dd
--- /dev/null
+++ b/src/audio_core/effect_context.h
@@ -0,0 +1,322 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <memory>
+#include <vector>
+#include "audio_core/common.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/swap.h"
+
+namespace AudioCore {
+enum class EffectType : u8 {
+ Invalid = 0,
+ BufferMixer = 1,
+ Aux = 2,
+ Delay = 3,
+ Reverb = 4,
+ I3dl2Reverb = 5,
+ BiquadFilter = 6,
+};
+
+enum class UsageStatus : u8 {
+ Invalid = 0,
+ New = 1,
+ Initialized = 2,
+ Used = 3,
+ Removed = 4,
+};
+
+enum class UsageState {
+ Invalid = 0,
+ Initialized = 1,
+ Running = 2,
+ Stopped = 3,
+};
+
+enum class ParameterStatus : u8 {
+ Initialized = 0,
+ Updating = 1,
+ Updated = 2,
+};
+
+struct BufferMixerParams {
+ std::array<s8, AudioCommon::MAX_MIX_BUFFERS> input{};
+ std::array<s8, AudioCommon::MAX_MIX_BUFFERS> output{};
+ std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> volume{};
+ s32_le count{};
+};
+static_assert(sizeof(BufferMixerParams) == 0x94, "BufferMixerParams is an invalid size");
+
+struct AuxInfoDSP {
+ u32_le read_offset{};
+ u32_le write_offset{};
+ u32_le remaining{};
+ INSERT_PADDING_WORDS(13);
+};
+static_assert(sizeof(AuxInfoDSP) == 0x40, "AuxInfoDSP is an invalid size");
+
+struct AuxInfo {
+ std::array<s8, AudioCommon::MAX_MIX_BUFFERS> input_mix_buffers{};
+ std::array<s8, AudioCommon::MAX_MIX_BUFFERS> output_mix_buffers{};
+ u32_le count{};
+ s32_le sample_rate{};
+ s32_le sample_count{};
+ s32_le mix_buffer_count{};
+ u64_le send_buffer_info{};
+ u64_le send_buffer_base{};
+
+ u64_le return_buffer_info{};
+ u64_le return_buffer_base{};
+};
+static_assert(sizeof(AuxInfo) == 0x60, "AuxInfo is an invalid size");
+
+struct I3dl2ReverbParams {
+ std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
+ std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
+ u16_le max_channels{};
+ u16_le channel_count{};
+ INSERT_PADDING_BYTES(1);
+ u32_le sample_rate{};
+ f32 room_hf{};
+ f32 hf_reference{};
+ f32 decay_time{};
+ f32 hf_decay_ratio{};
+ f32 room{};
+ f32 reflection{};
+ f32 reverb{};
+ f32 diffusion{};
+ f32 reflection_delay{};
+ f32 reverb_delay{};
+ f32 density{};
+ f32 dry_gain{};
+ ParameterStatus status{};
+ INSERT_PADDING_BYTES(3);
+};
+static_assert(sizeof(I3dl2ReverbParams) == 0x4c, "I3dl2ReverbParams is an invalid size");
+
+struct BiquadFilterParams {
+ std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
+ std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
+ std::array<s16_le, 3> numerator;
+ std::array<s16_le, 2> denominator;
+ s8 channel_count{};
+ ParameterStatus status{};
+};
+static_assert(sizeof(BiquadFilterParams) == 0x18, "BiquadFilterParams is an invalid size");
+
+struct DelayParams {
+ std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
+ std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
+ u16_le max_channels{};
+ u16_le channels{};
+ s32_le max_delay{};
+ s32_le delay{};
+ s32_le sample_rate{};
+ s32_le gain{};
+ s32_le feedback_gain{};
+ s32_le out_gain{};
+ s32_le dry_gain{};
+ s32_le channel_spread{};
+ s32_le low_pass{};
+ ParameterStatus status{};
+ INSERT_PADDING_BYTES(3);
+};
+static_assert(sizeof(DelayParams) == 0x38, "DelayParams is an invalid size");
+
+struct ReverbParams {
+ std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
+ std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
+ u16_le max_channels{};
+ u16_le channels{};
+ s32_le sample_rate{};
+ s32_le mode0{};
+ s32_le mode0_gain{};
+ s32_le pre_delay{};
+ s32_le mode1{};
+ s32_le mode1_gain{};
+ s32_le decay{};
+ s32_le hf_decay_ratio{};
+ s32_le coloration{};
+ s32_le reverb_gain{};
+ s32_le out_gain{};
+ s32_le dry_gain{};
+ ParameterStatus status{};
+ INSERT_PADDING_BYTES(3);
+};
+static_assert(sizeof(ReverbParams) == 0x44, "ReverbParams is an invalid size");
+
+class EffectInfo {
+public:
+ struct InParams {
+ EffectType type{};
+ u8 is_new{};
+ u8 is_enabled{};
+ INSERT_PADDING_BYTES(1);
+ s32_le mix_id{};
+ u64_le buffer_address{};
+ u64_le buffer_size{};
+ s32_le processing_order{};
+ INSERT_PADDING_BYTES(4);
+ union {
+ std::array<u8, 0xa0> raw;
+ };
+ };
+ static_assert(sizeof(EffectInfo::InParams) == 0xc0, "InParams is an invalid size");
+
+ struct OutParams {
+ UsageStatus status{};
+ INSERT_PADDING_BYTES(15);
+ };
+ static_assert(sizeof(EffectInfo::OutParams) == 0x10, "OutParams is an invalid size");
+};
+
+struct AuxAddress {
+ VAddr send_dsp_info{};
+ VAddr send_buffer_base{};
+ VAddr return_dsp_info{};
+ VAddr return_buffer_base{};
+};
+
+class EffectBase {
+public:
+ EffectBase(EffectType effect_type);
+ ~EffectBase();
+
+ virtual void Update(EffectInfo::InParams& in_params) = 0;
+ virtual void UpdateForCommandGeneration() = 0;
+ UsageState GetUsage() const;
+ EffectType GetType() const;
+ bool IsEnabled() const;
+ s32 GetMixID() const;
+ s32 GetProcessingOrder() const;
+
+protected:
+ UsageState usage{UsageState::Invalid};
+ EffectType effect_type{};
+ s32 mix_id{};
+ s32 processing_order{};
+ bool enabled = false;
+};
+
+template <typename T>
+class EffectGeneric : public EffectBase {
+public:
+ EffectGeneric(EffectType effect_type) : EffectBase::EffectBase(effect_type) {}
+ ~EffectGeneric() = default;
+
+ T& GetParams() {
+ return internal_params;
+ }
+
+ const I3dl2ReverbParams& GetParams() const {
+ return internal_params;
+ }
+
+private:
+ T internal_params{};
+};
+
+class EffectStubbed : public EffectBase {
+public:
+ explicit EffectStubbed();
+ ~EffectStubbed();
+
+ void Update(EffectInfo::InParams& in_params) override;
+ void UpdateForCommandGeneration() override;
+};
+
+class EffectI3dl2Reverb : public EffectGeneric<I3dl2ReverbParams> {
+public:
+ explicit EffectI3dl2Reverb();
+ ~EffectI3dl2Reverb();
+
+ void Update(EffectInfo::InParams& in_params) override;
+ void UpdateForCommandGeneration() override;
+
+private:
+ bool skipped = false;
+};
+
+class EffectBiquadFilter : public EffectGeneric<BiquadFilterParams> {
+public:
+ explicit EffectBiquadFilter();
+ ~EffectBiquadFilter();
+
+ void Update(EffectInfo::InParams& in_params) override;
+ void UpdateForCommandGeneration() override;
+};
+
+class EffectAuxInfo : public EffectGeneric<AuxInfo> {
+public:
+ explicit EffectAuxInfo();
+ ~EffectAuxInfo();
+
+ void Update(EffectInfo::InParams& in_params) override;
+ void UpdateForCommandGeneration() override;
+ const VAddr GetSendInfo() const;
+ const VAddr GetSendBuffer() const;
+ const VAddr GetRecvInfo() const;
+ const VAddr GetRecvBuffer() const;
+
+private:
+ VAddr send_info{};
+ VAddr send_buffer{};
+ VAddr recv_info{};
+ VAddr recv_buffer{};
+ bool skipped = false;
+ AuxAddress addresses{};
+};
+
+class EffectDelay : public EffectGeneric<DelayParams> {
+public:
+ explicit EffectDelay();
+ ~EffectDelay();
+
+ void Update(EffectInfo::InParams& in_params) override;
+ void UpdateForCommandGeneration() override;
+
+private:
+ bool skipped = false;
+};
+
+class EffectBufferMixer : public EffectGeneric<BufferMixerParams> {
+public:
+ explicit EffectBufferMixer();
+ ~EffectBufferMixer();
+
+ void Update(EffectInfo::InParams& in_params) override;
+ void UpdateForCommandGeneration() override;
+};
+
+class EffectReverb : public EffectGeneric<ReverbParams> {
+public:
+ explicit EffectReverb();
+ ~EffectReverb();
+
+ void Update(EffectInfo::InParams& in_params) override;
+ void UpdateForCommandGeneration() override;
+
+private:
+ bool skipped = false;
+};
+
+class EffectContext {
+public:
+ explicit EffectContext(std::size_t effect_count);
+ ~EffectContext();
+
+ std::size_t GetCount() const;
+ EffectBase* GetInfo(std::size_t i);
+ EffectBase* RetargetEffect(std::size_t i, EffectType effect);
+ const EffectBase* GetInfo(std::size_t i) const;
+
+private:
+ std::size_t effect_count{};
+ std::vector<std::unique_ptr<EffectBase>> effects;
+};
+} // namespace AudioCore
diff --git a/src/audio_core/info_updater.cpp b/src/audio_core/info_updater.cpp
new file mode 100644
index 000000000..f53ce21a5
--- /dev/null
+++ b/src/audio_core/info_updater.cpp
@@ -0,0 +1,517 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "audio_core/behavior_info.h"
+#include "audio_core/effect_context.h"
+#include "audio_core/info_updater.h"
+#include "audio_core/memory_pool.h"
+#include "audio_core/mix_context.h"
+#include "audio_core/sink_context.h"
+#include "audio_core/splitter_context.h"
+#include "audio_core/voice_context.h"
+#include "common/logging/log.h"
+
+namespace AudioCore {
+
+InfoUpdater::InfoUpdater(const std::vector<u8>& in_params, std::vector<u8>& out_params,
+ BehaviorInfo& behavior_info)
+ : in_params(in_params), out_params(out_params), behavior_info(behavior_info) {
+ ASSERT(
+ AudioCommon::CanConsumeBuffer(in_params.size(), 0, sizeof(AudioCommon::UpdateDataHeader)));
+ std::memcpy(&input_header, in_params.data(), sizeof(AudioCommon::UpdateDataHeader));
+ output_header.total_size = sizeof(AudioCommon::UpdateDataHeader);
+}
+
+InfoUpdater::~InfoUpdater() = default;
+
+bool InfoUpdater::UpdateBehaviorInfo(BehaviorInfo& in_behavior_info) {
+ if (input_header.size.behavior != sizeof(BehaviorInfo::InParams)) {
+ LOG_ERROR(Audio, "Behavior info is an invalid size, expecting 0x{:X} but got 0x{:X}",
+ sizeof(BehaviorInfo::InParams), input_header.size.behavior);
+ return false;
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset,
+ sizeof(BehaviorInfo::InParams))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ BehaviorInfo::InParams behavior_in{};
+ std::memcpy(&behavior_in, in_params.data() + input_offset, sizeof(BehaviorInfo::InParams));
+ input_offset += sizeof(BehaviorInfo::InParams);
+
+ // Make sure it's an audio revision we can actually support
+ if (!AudioCommon::IsValidRevision(behavior_in.revision)) {
+ LOG_ERROR(Audio, "Invalid input revision, revision=0x{:08X}", behavior_in.revision);
+ return false;
+ }
+
+ // Make sure that our behavior info revision matches the input
+ if (in_behavior_info.GetUserRevision() != behavior_in.revision) {
+ LOG_ERROR(Audio,
+ "User revision differs from input revision, expecting 0x{:08X} but got 0x{:08X}",
+ in_behavior_info.GetUserRevision(), behavior_in.revision);
+ return false;
+ }
+
+ // Update behavior info flags
+ in_behavior_info.ClearError();
+ in_behavior_info.UpdateFlags(behavior_in.flags);
+
+ return true;
+}
+
+bool InfoUpdater::UpdateMemoryPools(std::vector<ServerMemoryPoolInfo>& memory_pool_info) {
+ const auto force_mapping = behavior_info.IsMemoryPoolForceMappingEnabled();
+ const auto memory_pool_count = memory_pool_info.size();
+ const auto total_memory_pool_in = sizeof(ServerMemoryPoolInfo::InParams) * memory_pool_count;
+ const auto total_memory_pool_out = sizeof(ServerMemoryPoolInfo::OutParams) * memory_pool_count;
+
+ if (input_header.size.memory_pool != total_memory_pool_in) {
+ LOG_ERROR(Audio, "Memory pools are an invalid size, expecting 0x{:X} but got 0x{:X}",
+ total_memory_pool_in, input_header.size.memory_pool);
+ return false;
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_memory_pool_in)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ std::vector<ServerMemoryPoolInfo::InParams> mempool_in(memory_pool_count);
+ std::vector<ServerMemoryPoolInfo::OutParams> mempool_out(memory_pool_count);
+
+ std::memcpy(mempool_in.data(), in_params.data() + input_offset, total_memory_pool_in);
+ input_offset += total_memory_pool_in;
+
+ // Update our memory pools
+ for (std::size_t i = 0; i < memory_pool_count; i++) {
+ if (!memory_pool_info[i].Update(mempool_in[i], mempool_out[i])) {
+ LOG_ERROR(Audio, "Failed to update memory pool {}!", i);
+ return false;
+ }
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset,
+ sizeof(BehaviorInfo::InParams))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ std::memcpy(out_params.data() + output_offset, mempool_out.data(), total_memory_pool_out);
+ output_offset += total_memory_pool_out;
+ output_header.size.memory_pool = static_cast<u32>(total_memory_pool_out);
+ return true;
+}
+
+bool InfoUpdater::UpdateVoiceChannelResources(VoiceContext& voice_context) {
+ const auto voice_count = voice_context.GetVoiceCount();
+ const auto voice_size = voice_count * sizeof(VoiceChannelResource::InParams);
+ std::vector<VoiceChannelResource::InParams> resources_in(voice_count);
+
+ if (input_header.size.voice_channel_resource != voice_size) {
+ LOG_ERROR(Audio, "VoiceChannelResource is an invalid size, expecting 0x{:X} but got 0x{:X}",
+ voice_size, input_header.size.voice_channel_resource);
+ return false;
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, voice_size)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ std::memcpy(resources_in.data(), in_params.data() + input_offset, voice_size);
+ input_offset += voice_size;
+
+ // Update our channel resources
+ for (std::size_t i = 0; i < voice_count; i++) {
+ // Grab our channel resource
+ auto& resource = voice_context.GetChannelResource(i);
+ resource.Update(resources_in[i]);
+ }
+
+ return true;
+}
+
+bool InfoUpdater::UpdateVoices(VoiceContext& voice_context,
+ std::vector<ServerMemoryPoolInfo>& memory_pool_info,
+ VAddr audio_codec_dsp_addr) {
+ const auto voice_count = voice_context.GetVoiceCount();
+ std::vector<VoiceInfo::InParams> voice_in(voice_count);
+ std::vector<VoiceInfo::OutParams> voice_out(voice_count);
+
+ const auto voice_in_size = voice_count * sizeof(VoiceInfo::InParams);
+ const auto voice_out_size = voice_count * sizeof(VoiceInfo::OutParams);
+
+ if (input_header.size.voice != voice_in_size) {
+ LOG_ERROR(Audio, "Voices are an invalid size, expecting 0x{:X} but got 0x{:X}",
+ voice_in_size, input_header.size.voice);
+ return false;
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, voice_in_size)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ std::memcpy(voice_in.data(), in_params.data() + input_offset, voice_in_size);
+ input_offset += voice_in_size;
+
+ // Set all voices to not be in use
+ for (std::size_t i = 0; i < voice_count; i++) {
+ voice_context.GetInfo(i).GetInParams().in_use = false;
+ }
+
+ // Update our voices
+ for (std::size_t i = 0; i < voice_count; i++) {
+ auto& in_params = voice_in[i];
+ const auto channel_count = static_cast<std::size_t>(in_params.channel_count);
+ // Skip if it's not currently in use
+ if (!in_params.is_in_use) {
+ continue;
+ }
+ // Voice states for each channel
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT> voice_states{};
+ ASSERT(in_params.id < voice_count);
+
+ // Grab our current voice info
+ auto& voice_info = voice_context.GetInfo(static_cast<std::size_t>(in_params.id));
+
+ ASSERT(channel_count <= AudioCommon::MAX_CHANNEL_COUNT);
+
+ // Get all our channel voice states
+ for (std::size_t channel = 0; channel < channel_count; channel++) {
+ voice_states[channel] =
+ &voice_context.GetState(in_params.voice_channel_resource_ids[channel]);
+ }
+
+ if (in_params.is_new) {
+ // Default our values for our voice
+ voice_info.Initialize();
+ if (channel_count == 0 || channel_count > AudioCommon::MAX_CHANNEL_COUNT) {
+ continue;
+ }
+
+ // Zero out our voice states
+ for (std::size_t channel = 0; channel < channel_count; channel++) {
+ std::memset(voice_states[channel], 0, sizeof(VoiceState));
+ }
+ }
+
+ // Update our voice
+ voice_info.UpdateParameters(in_params, behavior_info);
+ // TODO(ogniK): Handle mapping errors with behavior info based on in params response
+
+ // Update our wave buffers
+ voice_info.UpdateWaveBuffers(in_params, voice_states, behavior_info);
+ voice_info.WriteOutStatus(voice_out[i], in_params, voice_states);
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, voice_out_size)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+ std::memcpy(out_params.data() + output_offset, voice_out.data(), voice_out_size);
+ output_offset += voice_out_size;
+ output_header.size.voice = static_cast<u32>(voice_out_size);
+ return true;
+}
+
+bool InfoUpdater::UpdateEffects(EffectContext& effect_context, bool is_active) {
+ const auto effect_count = effect_context.GetCount();
+ std::vector<EffectInfo::InParams> effect_in(effect_count);
+ std::vector<EffectInfo::OutParams> effect_out(effect_count);
+
+ const auto total_effect_in = effect_count * sizeof(EffectInfo::InParams);
+ const auto total_effect_out = effect_count * sizeof(EffectInfo::OutParams);
+
+ if (input_header.size.effect != total_effect_in) {
+ LOG_ERROR(Audio, "Effects are an invalid size, expecting 0x{:X} but got 0x{:X}",
+ total_effect_in, input_header.size.effect);
+ return false;
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_effect_in)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ std::memcpy(effect_in.data(), in_params.data() + input_offset, total_effect_in);
+ input_offset += total_effect_in;
+
+ // Update effects
+ for (std::size_t i = 0; i < effect_count; i++) {
+ auto* info = effect_context.GetInfo(i);
+ if (effect_in[i].type != info->GetType()) {
+ info = effect_context.RetargetEffect(i, effect_in[i].type);
+ }
+
+ info->Update(effect_in[i]);
+
+ if ((!is_active && info->GetUsage() != UsageState::Initialized) ||
+ info->GetUsage() == UsageState::Stopped) {
+ effect_out[i].status = UsageStatus::Removed;
+ } else {
+ effect_out[i].status = UsageStatus::Used;
+ }
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_effect_out)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ std::memcpy(out_params.data() + output_offset, effect_out.data(), total_effect_out);
+ output_offset += total_effect_out;
+ output_header.size.effect = static_cast<u32>(total_effect_out);
+
+ return true;
+}
+
+bool InfoUpdater::UpdateSplitterInfo(SplitterContext& splitter_context) {
+ std::size_t start_offset = input_offset;
+ std::size_t bytes_read{};
+ // Update splitter context
+ if (!splitter_context.Update(in_params, input_offset, bytes_read)) {
+ LOG_ERROR(Audio, "Failed to update splitter context!");
+ return false;
+ }
+
+ const auto consumed = input_offset - start_offset;
+
+ if (input_header.size.splitter != consumed) {
+ LOG_ERROR(Audio, "Splitters is an invalid size, expecting 0x{:X} but got 0x{:X}",
+ bytes_read, input_header.size.splitter);
+ return false;
+ }
+
+ return true;
+}
+
+ResultCode InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
+ SplitterContext& splitter_context,
+ EffectContext& effect_context) {
+ std::vector<MixInfo::InParams> mix_in_params;
+
+ if (!behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
+ // If we're not dirty, get ALL mix in parameters
+ const auto context_mix_count = mix_context.GetCount();
+ const auto total_mix_in = context_mix_count * sizeof(MixInfo::InParams);
+ if (input_header.size.mixer != total_mix_in) {
+ LOG_ERROR(Audio, "Mixer is an invalid size, expecting 0x{:X} but got 0x{:X}",
+ total_mix_in, input_header.size.mixer);
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_mix_in)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
+ }
+
+ mix_in_params.resize(context_mix_count);
+ std::memcpy(mix_in_params.data(), in_params.data() + input_offset, total_mix_in);
+
+ input_offset += total_mix_in;
+ } else {
+ // Only update the "dirty" mixes
+ MixInfo::DirtyHeader dirty_header{};
+ if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset,
+ sizeof(MixInfo::DirtyHeader))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
+ }
+
+ std::memcpy(&dirty_header, in_params.data() + input_offset, sizeof(MixInfo::DirtyHeader));
+ input_offset += sizeof(MixInfo::DirtyHeader);
+
+ const auto total_mix_in =
+ dirty_header.mixer_count * sizeof(MixInfo::InParams) + sizeof(MixInfo::DirtyHeader);
+
+ if (input_header.size.mixer != total_mix_in) {
+ LOG_ERROR(Audio, "Mixer is an invalid size, expecting 0x{:X} but got 0x{:X}",
+ total_mix_in, input_header.size.mixer);
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
+ }
+
+ if (dirty_header.mixer_count != 0) {
+ mix_in_params.resize(dirty_header.mixer_count);
+ std::memcpy(mix_in_params.data(), in_params.data() + input_offset,
+ mix_in_params.size() * sizeof(MixInfo::InParams));
+ input_offset += mix_in_params.size() * sizeof(MixInfo::InParams);
+ }
+ }
+
+ // Get our total input count
+ const auto mix_count = mix_in_params.size();
+
+ if (!behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
+ // Only verify our buffer count if we're not dirty
+ std::size_t total_buffer_count{};
+ for (std::size_t i = 0; i < mix_count; i++) {
+ const auto& in = mix_in_params[i];
+ total_buffer_count += in.buffer_count;
+ if (in.dest_mix_id > mix_count && in.dest_mix_id != AudioCommon::NO_MIX &&
+ in.mix_id != AudioCommon::FINAL_MIX) {
+ LOG_ERROR(
+ Audio,
+ "Invalid mix destination, mix_id={:X}, dest_mix_id={:X}, mix_buffer_count={:X}",
+ in.mix_id, in.dest_mix_id, mix_buffer_count);
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
+ }
+ }
+
+ if (total_buffer_count > mix_buffer_count) {
+ LOG_ERROR(Audio,
+ "Too many mix buffers used! mix_buffer_count={:X}, requesting_buffers={:X}",
+ mix_buffer_count, total_buffer_count);
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
+ }
+ }
+
+ if (mix_buffer_count == 0) {
+ LOG_ERROR(Audio, "No mix buffers!");
+ return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
+ }
+
+ bool should_sort = false;
+ for (std::size_t i = 0; i < mix_count; i++) {
+ const auto& mix_in = mix_in_params[i];
+ std::size_t target_mix{};
+ if (behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
+ target_mix = mix_in.mix_id;
+ } else {
+ // Non dirty supported games just use i instead of the actual mix_id
+ target_mix = i;
+ }
+ auto& mix_info = mix_context.GetInfo(target_mix);
+ auto& mix_info_params = mix_info.GetInParams();
+ if (mix_info_params.in_use != mix_in.in_use) {
+ mix_info_params.in_use = mix_in.in_use;
+ mix_info.ResetEffectProcessingOrder();
+ should_sort = true;
+ }
+
+ if (mix_in.in_use) {
+ should_sort |= mix_info.Update(mix_context.GetEdgeMatrix(), mix_in, behavior_info,
+ splitter_context, effect_context);
+ }
+ }
+
+ if (should_sort && behavior_info.IsSplitterSupported()) {
+ // Sort our splitter data
+ if (!mix_context.TsortInfo(splitter_context)) {
+ return AudioCommon::Audren::ERR_SPLITTER_SORT_FAILED;
+ }
+ }
+
+ // TODO(ogniK): Sort when splitter is suppoorted
+
+ return RESULT_SUCCESS;
+}
+
+bool InfoUpdater::UpdateSinks(SinkContext& sink_context) {
+ const auto sink_count = sink_context.GetCount();
+ std::vector<SinkInfo::InParams> sink_in_params(sink_count);
+ const auto total_sink_in = sink_count * sizeof(SinkInfo::InParams);
+
+ if (input_header.size.sink != total_sink_in) {
+ LOG_ERROR(Audio, "Sinks are an invalid size, expecting 0x{:X} but got 0x{:X}",
+ total_sink_in, input_header.size.effect);
+ return false;
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_sink_in)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ std::memcpy(sink_in_params.data(), in_params.data() + input_offset, total_sink_in);
+ input_offset += total_sink_in;
+
+ // TODO(ogniK): Properly update sinks
+ if (!sink_in_params.empty()) {
+ sink_context.UpdateMainSink(sink_in_params[0]);
+ }
+
+ output_header.size.sink = static_cast<u32>(0x20 * sink_count);
+ output_offset += 0x20 * sink_count;
+ return true;
+}
+
+bool InfoUpdater::UpdatePerformanceBuffer() {
+ output_header.size.performance = 0x10;
+ output_offset += 0x10;
+ return true;
+}
+
+bool InfoUpdater::UpdateErrorInfo(BehaviorInfo& in_behavior_info) {
+ const auto total_beahvior_info_out = sizeof(BehaviorInfo::OutParams);
+
+ if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_beahvior_info_out)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ BehaviorInfo::OutParams behavior_info_out{};
+ behavior_info.CopyErrorInfo(behavior_info_out);
+
+ std::memcpy(out_params.data() + output_offset, &behavior_info_out, total_beahvior_info_out);
+ output_offset += total_beahvior_info_out;
+ output_header.size.behavior = total_beahvior_info_out;
+
+ return true;
+}
+
+struct RendererInfo {
+ u64_le elasped_frame_count{};
+ INSERT_PADDING_WORDS(2);
+};
+static_assert(sizeof(RendererInfo) == 0x10, "RendererInfo is an invalid size");
+
+bool InfoUpdater::UpdateRendererInfo(std::size_t elapsed_frame_count) {
+ const auto total_renderer_info_out = sizeof(RendererInfo);
+ if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_renderer_info_out)) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+ RendererInfo out{};
+ out.elasped_frame_count = elapsed_frame_count;
+ std::memcpy(out_params.data() + output_offset, &out, total_renderer_info_out);
+ output_offset += total_renderer_info_out;
+ output_header.size.render_info = total_renderer_info_out;
+
+ return true;
+}
+
+bool InfoUpdater::CheckConsumedSize() const {
+ if (output_offset != out_params.size()) {
+ LOG_ERROR(Audio, "Output is not consumed! Consumed {}, but requires {}. {} bytes remaining",
+ output_offset, out_params.size(), out_params.size() - output_offset);
+ return false;
+ }
+ /*if (input_offset != in_params.size()) {
+ LOG_ERROR(Audio, "Input is not consumed!");
+ return false;
+ }*/
+ return true;
+}
+
+bool InfoUpdater::WriteOutputHeader() {
+ if (!AudioCommon::CanConsumeBuffer(out_params.size(), 0,
+ sizeof(AudioCommon::UpdateDataHeader))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+ output_header.revision = AudioCommon::CURRENT_PROCESS_REVISION;
+ const auto& sz = output_header.size;
+ output_header.total_size += sz.behavior + sz.memory_pool + sz.voice +
+ sz.voice_channel_resource + sz.effect + sz.mixer + sz.sink +
+ sz.performance + sz.splitter + sz.render_info;
+
+ std::memcpy(out_params.data(), &output_header, sizeof(AudioCommon::UpdateDataHeader));
+ return true;
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/info_updater.h b/src/audio_core/info_updater.h
new file mode 100644
index 000000000..06f9d770f
--- /dev/null
+++ b/src/audio_core/info_updater.h
@@ -0,0 +1,58 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <vector>
+#include "audio_core/common.h"
+#include "common/common_types.h"
+
+namespace AudioCore {
+
+class BehaviorInfo;
+class ServerMemoryPoolInfo;
+class VoiceContext;
+class EffectContext;
+class MixContext;
+class SinkContext;
+class SplitterContext;
+
+class InfoUpdater {
+public:
+ // TODO(ogniK): Pass process handle when we support it
+ InfoUpdater(const std::vector<u8>& in_params, std::vector<u8>& out_params,
+ BehaviorInfo& behavior_info);
+ ~InfoUpdater();
+
+ bool UpdateBehaviorInfo(BehaviorInfo& in_behavior_info);
+ bool UpdateMemoryPools(std::vector<ServerMemoryPoolInfo>& memory_pool_info);
+ bool UpdateVoiceChannelResources(VoiceContext& voice_context);
+ bool UpdateVoices(VoiceContext& voice_context,
+ std::vector<ServerMemoryPoolInfo>& memory_pool_info,
+ VAddr audio_codec_dsp_addr);
+ bool UpdateEffects(EffectContext& effect_context, bool is_active);
+ bool UpdateSplitterInfo(SplitterContext& splitter_context);
+ ResultCode UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
+ SplitterContext& splitter_context, EffectContext& effect_context);
+ bool UpdateSinks(SinkContext& sink_context);
+ bool UpdatePerformanceBuffer();
+ bool UpdateErrorInfo(BehaviorInfo& in_behavior_info);
+ bool UpdateRendererInfo(std::size_t elapsed_frame_count);
+ bool CheckConsumedSize() const;
+
+ bool WriteOutputHeader();
+
+private:
+ const std::vector<u8>& in_params;
+ std::vector<u8>& out_params;
+ BehaviorInfo& behavior_info;
+
+ AudioCommon::UpdateDataHeader input_header{};
+ AudioCommon::UpdateDataHeader output_header{};
+
+ std::size_t input_offset{sizeof(AudioCommon::UpdateDataHeader)};
+ std::size_t output_offset{sizeof(AudioCommon::UpdateDataHeader)};
+};
+
+} // namespace AudioCore
diff --git a/src/audio_core/memory_pool.cpp b/src/audio_core/memory_pool.cpp
new file mode 100644
index 000000000..5a3453063
--- /dev/null
+++ b/src/audio_core/memory_pool.cpp
@@ -0,0 +1,62 @@
+
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "audio_core/memory_pool.h"
+#include "common/logging/log.h"
+
+namespace AudioCore {
+
+ServerMemoryPoolInfo::ServerMemoryPoolInfo() = default;
+ServerMemoryPoolInfo::~ServerMemoryPoolInfo() = default;
+bool ServerMemoryPoolInfo::Update(const ServerMemoryPoolInfo::InParams& in_params,
+ ServerMemoryPoolInfo::OutParams& out_params) {
+ // Our state does not need to be changed
+ if (in_params.state != ServerMemoryPoolInfo::State::RequestAttach &&
+ in_params.state != ServerMemoryPoolInfo::State::RequestDetach) {
+ return true;
+ }
+
+ // Address or size is null
+ if (in_params.address == 0 || in_params.size == 0) {
+ LOG_ERROR(Audio, "Memory pool address or size is zero! address={:X}, size={:X}",
+ in_params.address, in_params.size);
+ return false;
+ }
+
+ // Address or size is not aligned
+ if ((in_params.address % 0x1000) != 0 || (in_params.size % 0x1000) != 0) {
+ LOG_ERROR(Audio, "Memory pool address or size is not aligned! address={:X}, size={:X}",
+ in_params.address, in_params.size);
+ return false;
+ }
+
+ if (in_params.state == ServerMemoryPoolInfo::State::RequestAttach) {
+ cpu_address = in_params.address;
+ size = in_params.size;
+ used = true;
+ out_params.state = ServerMemoryPoolInfo::State::Attached;
+ } else {
+ // Unexpected address
+ if (cpu_address != in_params.address) {
+ LOG_ERROR(Audio, "Memory pool address differs! Expecting {:X} but address is {:X}",
+ cpu_address, in_params.address);
+ return false;
+ }
+
+ if (size != in_params.size) {
+ LOG_ERROR(Audio, "Memory pool size differs! Expecting {:X} but size is {:X}", size,
+ in_params.size);
+ return false;
+ }
+
+ cpu_address = 0;
+ size = 0;
+ used = false;
+ out_params.state = ServerMemoryPoolInfo::State::Detached;
+ }
+ return true;
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/memory_pool.h b/src/audio_core/memory_pool.h
new file mode 100644
index 000000000..8ac503f1c
--- /dev/null
+++ b/src/audio_core/memory_pool.h
@@ -0,0 +1,53 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/swap.h"
+
+namespace AudioCore {
+
+class ServerMemoryPoolInfo {
+public:
+ ServerMemoryPoolInfo();
+ ~ServerMemoryPoolInfo();
+
+ enum class State : u32_le {
+ Invalid = 0x0,
+ Aquired = 0x1,
+ RequestDetach = 0x2,
+ Detached = 0x3,
+ RequestAttach = 0x4,
+ Attached = 0x5,
+ Released = 0x6,
+ };
+
+ struct InParams {
+ u64_le address{};
+ u64_le size{};
+ ServerMemoryPoolInfo::State state{};
+ INSERT_PADDING_WORDS(3);
+ };
+ static_assert(sizeof(ServerMemoryPoolInfo::InParams) == 0x20, "InParams are an invalid size");
+
+ struct OutParams {
+ ServerMemoryPoolInfo::State state{};
+ INSERT_PADDING_WORDS(3);
+ };
+ static_assert(sizeof(ServerMemoryPoolInfo::OutParams) == 0x10, "OutParams are an invalid size");
+
+ bool Update(const ServerMemoryPoolInfo::InParams& in_params,
+ ServerMemoryPoolInfo::OutParams& out_params);
+
+private:
+ // There's another entry here which is the DSP address, however since we're not talking to the
+ // DSP we can just use the same address provided by the guest without needing to remap
+ u64_le cpu_address{};
+ u64_le size{};
+ bool used{};
+};
+
+} // namespace AudioCore
diff --git a/src/audio_core/mix_context.cpp b/src/audio_core/mix_context.cpp
new file mode 100644
index 000000000..042891490
--- /dev/null
+++ b/src/audio_core/mix_context.cpp
@@ -0,0 +1,296 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "audio_core/behavior_info.h"
+#include "audio_core/common.h"
+#include "audio_core/effect_context.h"
+#include "audio_core/mix_context.h"
+#include "audio_core/splitter_context.h"
+
+namespace AudioCore {
+MixContext::MixContext() = default;
+MixContext::~MixContext() = default;
+
+void MixContext::Initialize(const BehaviorInfo& behavior_info, std::size_t mix_count,
+ std::size_t effect_count) {
+ info_count = mix_count;
+ infos.resize(info_count);
+ auto& final_mix = GetInfo(AudioCommon::FINAL_MIX);
+ final_mix.GetInParams().mix_id = AudioCommon::FINAL_MIX;
+ sorted_info.reserve(infos.size());
+ for (auto& info : infos) {
+ sorted_info.push_back(&info);
+ }
+
+ for (auto& info : infos) {
+ info.SetEffectCount(effect_count);
+ }
+
+ // Only initialize our edge matrix and node states if splitters are supported
+ if (behavior_info.IsSplitterSupported()) {
+ node_states.Initialize(mix_count);
+ edge_matrix.Initialize(mix_count);
+ }
+}
+
+void MixContext::UpdateDistancesFromFinalMix() {
+ // Set all distances to be invalid
+ for (std::size_t i = 0; i < info_count; i++) {
+ GetInfo(i).GetInParams().final_mix_distance = AudioCommon::NO_FINAL_MIX;
+ }
+
+ for (std::size_t i = 0; i < info_count; i++) {
+ auto& info = GetInfo(i);
+ auto& in_params = info.GetInParams();
+ // Populate our sorted info
+ sorted_info[i] = &info;
+
+ if (!in_params.in_use) {
+ continue;
+ }
+
+ auto mix_id = in_params.mix_id;
+ // Needs to be referenced out of scope
+ s32 distance_to_final_mix{AudioCommon::FINAL_MIX};
+ for (; distance_to_final_mix < info_count; distance_to_final_mix++) {
+ if (mix_id == AudioCommon::FINAL_MIX) {
+ // If we're at the final mix, we're done
+ break;
+ } else if (mix_id == AudioCommon::NO_MIX) {
+ // If we have no more mix ids, we're done
+ distance_to_final_mix = AudioCommon::NO_FINAL_MIX;
+ break;
+ } else {
+ const auto& dest_mix = GetInfo(mix_id);
+ const auto dest_mix_distance = dest_mix.GetInParams().final_mix_distance;
+
+ if (dest_mix_distance == AudioCommon::NO_FINAL_MIX) {
+ // If our current mix isn't pointing to a final mix, follow through
+ mix_id = dest_mix.GetInParams().dest_mix_id;
+ } else {
+ // Our current mix + 1 = final distance
+ distance_to_final_mix = dest_mix_distance + 1;
+ break;
+ }
+ }
+ }
+
+ // If we're out of range for our distance, mark it as no final mix
+ if (distance_to_final_mix >= info_count) {
+ distance_to_final_mix = AudioCommon::NO_FINAL_MIX;
+ }
+
+ in_params.final_mix_distance = distance_to_final_mix;
+ }
+}
+
+void MixContext::CalcMixBufferOffset() {
+ s32 offset{};
+ for (std::size_t i = 0; i < info_count; i++) {
+ auto& info = GetSortedInfo(i);
+ auto& in_params = info.GetInParams();
+ if (in_params.in_use) {
+ // Only update if in use
+ in_params.buffer_offset = offset;
+ offset += in_params.buffer_count;
+ }
+ }
+}
+
+void MixContext::SortInfo() {
+ // Get the distance to the final mix
+ UpdateDistancesFromFinalMix();
+
+ // Sort based on the distance to the final mix
+ std::sort(sorted_info.begin(), sorted_info.end(),
+ [](const ServerMixInfo* lhs, const ServerMixInfo* rhs) {
+ return lhs->GetInParams().final_mix_distance >
+ rhs->GetInParams().final_mix_distance;
+ });
+
+ // Calculate the mix buffer offset
+ CalcMixBufferOffset();
+}
+
+bool MixContext::TsortInfo(SplitterContext& splitter_context) {
+ // If we're not using mixes, just calculate the mix buffer offset
+ if (!splitter_context.UsingSplitter()) {
+ CalcMixBufferOffset();
+ return true;
+ }
+ // Sort our node states
+ if (!node_states.Tsort(edge_matrix)) {
+ return false;
+ }
+
+ // Get our sorted list
+ const auto sorted_list = node_states.GetIndexList();
+ std::size_t info_id{};
+ for (auto itr = sorted_list.rbegin(); itr != sorted_list.rend(); ++itr) {
+ // Set our sorted info
+ sorted_info[info_id++] = &GetInfo(*itr);
+ }
+
+ // Calculate the mix buffer offset
+ CalcMixBufferOffset();
+ return true;
+}
+
+std::size_t MixContext::GetCount() const {
+ return info_count;
+}
+
+ServerMixInfo& MixContext::GetInfo(std::size_t i) {
+ ASSERT(i < info_count);
+ return infos.at(i);
+}
+
+const ServerMixInfo& MixContext::GetInfo(std::size_t i) const {
+ ASSERT(i < info_count);
+ return infos.at(i);
+}
+
+ServerMixInfo& MixContext::GetSortedInfo(std::size_t i) {
+ ASSERT(i < info_count);
+ return *sorted_info.at(i);
+}
+
+const ServerMixInfo& MixContext::GetSortedInfo(std::size_t i) const {
+ ASSERT(i < info_count);
+ return *sorted_info.at(i);
+}
+
+ServerMixInfo& MixContext::GetFinalMixInfo() {
+ return infos.at(AudioCommon::FINAL_MIX);
+}
+
+const ServerMixInfo& MixContext::GetFinalMixInfo() const {
+ return infos.at(AudioCommon::FINAL_MIX);
+}
+
+EdgeMatrix& MixContext::GetEdgeMatrix() {
+ return edge_matrix;
+}
+
+const EdgeMatrix& MixContext::GetEdgeMatrix() const {
+ return edge_matrix;
+}
+
+ServerMixInfo::ServerMixInfo() {
+ Cleanup();
+}
+ServerMixInfo::~ServerMixInfo() = default;
+
+const ServerMixInfo::InParams& ServerMixInfo::GetInParams() const {
+ return in_params;
+}
+
+ServerMixInfo::InParams& ServerMixInfo::GetInParams() {
+ return in_params;
+}
+
+bool ServerMixInfo::Update(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
+ BehaviorInfo& behavior_info, SplitterContext& splitter_context,
+ EffectContext& effect_context) {
+ in_params.volume = mix_in.volume;
+ in_params.sample_rate = mix_in.sample_rate;
+ in_params.buffer_count = mix_in.buffer_count;
+ in_params.in_use = mix_in.in_use;
+ in_params.mix_id = mix_in.mix_id;
+ in_params.node_id = mix_in.node_id;
+ for (std::size_t i = 0; i < mix_in.mix_volume.size(); i++) {
+ std::copy(mix_in.mix_volume[i].begin(), mix_in.mix_volume[i].end(),
+ in_params.mix_volume[i].begin());
+ }
+
+ bool require_sort = false;
+
+ if (behavior_info.IsSplitterSupported()) {
+ require_sort = UpdateConnection(edge_matrix, mix_in, splitter_context);
+ } else {
+ in_params.dest_mix_id = mix_in.dest_mix_id;
+ in_params.splitter_id = AudioCommon::NO_SPLITTER;
+ }
+
+ ResetEffectProcessingOrder();
+ const auto effect_count = effect_context.GetCount();
+ for (std::size_t i = 0; i < effect_count; i++) {
+ auto* effect_info = effect_context.GetInfo(i);
+ if (effect_info->GetMixID() == in_params.mix_id) {
+ effect_processing_order[effect_info->GetProcessingOrder()] = static_cast<s32>(i);
+ }
+ }
+
+ // TODO(ogniK): Update effect processing order
+ return require_sort;
+}
+
+bool ServerMixInfo::HasAnyConnection() const {
+ return in_params.splitter_id != AudioCommon::NO_SPLITTER ||
+ in_params.mix_id != AudioCommon::NO_MIX;
+}
+
+void ServerMixInfo::Cleanup() {
+ in_params.volume = 0.0f;
+ in_params.sample_rate = 0;
+ in_params.buffer_count = 0;
+ in_params.in_use = false;
+ in_params.mix_id = AudioCommon::NO_MIX;
+ in_params.node_id = 0;
+ in_params.buffer_offset = 0;
+ in_params.dest_mix_id = AudioCommon::NO_MIX;
+ in_params.splitter_id = AudioCommon::NO_SPLITTER;
+ std::memset(in_params.mix_volume.data(), 0, sizeof(float) * in_params.mix_volume.size());
+}
+
+void ServerMixInfo::SetEffectCount(std::size_t count) {
+ effect_processing_order.resize(count);
+ ResetEffectProcessingOrder();
+}
+
+void ServerMixInfo::ResetEffectProcessingOrder() {
+ for (auto& order : effect_processing_order) {
+ order = AudioCommon::NO_EFFECT_ORDER;
+ }
+}
+
+s32 ServerMixInfo::GetEffectOrder(std::size_t i) const {
+ return effect_processing_order.at(i);
+}
+
+bool ServerMixInfo::UpdateConnection(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
+ SplitterContext& splitter_context) {
+ // Mixes are identical
+ if (in_params.dest_mix_id == mix_in.dest_mix_id &&
+ in_params.splitter_id == mix_in.splitter_id &&
+ ((in_params.splitter_id == AudioCommon::NO_SPLITTER) ||
+ !splitter_context.GetInfo(in_params.splitter_id).HasNewConnection())) {
+ return false;
+ }
+ // Remove current edges for mix id
+ edge_matrix.RemoveEdges(in_params.mix_id);
+ if (mix_in.dest_mix_id != AudioCommon::NO_MIX) {
+ // If we have a valid destination mix id, set our edge matrix
+ edge_matrix.Connect(in_params.mix_id, mix_in.dest_mix_id);
+ } else if (mix_in.splitter_id != AudioCommon::NO_SPLITTER) {
+ // Recurse our splitter linked and set our edges
+ auto& splitter_info = splitter_context.GetInfo(mix_in.splitter_id);
+ const auto length = splitter_info.GetLength();
+ for (s32 i = 0; i < length; i++) {
+ const auto* splitter_destination =
+ splitter_context.GetDestinationData(mix_in.splitter_id, i);
+ if (splitter_destination == nullptr) {
+ continue;
+ }
+ if (splitter_destination->ValidMixId()) {
+ edge_matrix.Connect(in_params.mix_id, splitter_destination->GetMixId());
+ }
+ }
+ }
+ in_params.dest_mix_id = mix_in.dest_mix_id;
+ in_params.splitter_id = mix_in.splitter_id;
+ return true;
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/mix_context.h b/src/audio_core/mix_context.h
new file mode 100644
index 000000000..6a588eeb4
--- /dev/null
+++ b/src/audio_core/mix_context.h
@@ -0,0 +1,114 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <vector>
+#include "audio_core/common.h"
+#include "audio_core/splitter_context.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+
+namespace AudioCore {
+class BehaviorInfo;
+class EffectContext;
+
+class MixInfo {
+public:
+ struct DirtyHeader {
+ u32_le magic{};
+ u32_le mixer_count{};
+ INSERT_PADDING_BYTES(0x18);
+ };
+ static_assert(sizeof(DirtyHeader) == 0x20, "MixInfo::DirtyHeader is an invalid size");
+
+ struct InParams {
+ float_le volume{};
+ s32_le sample_rate{};
+ s32_le buffer_count{};
+ bool in_use{};
+ INSERT_PADDING_BYTES(3);
+ s32_le mix_id{};
+ s32_le effect_count{};
+ u32_le node_id{};
+ INSERT_PADDING_WORDS(2);
+ std::array<std::array<float_le, AudioCommon::MAX_MIX_BUFFERS>, AudioCommon::MAX_MIX_BUFFERS>
+ mix_volume{};
+ s32_le dest_mix_id{};
+ s32_le splitter_id{};
+ INSERT_PADDING_WORDS(1);
+ };
+ static_assert(sizeof(MixInfo::InParams) == 0x930, "MixInfo::InParams is an invalid size");
+};
+
+class ServerMixInfo {
+public:
+ struct InParams {
+ float volume{};
+ s32 sample_rate{};
+ s32 buffer_count{};
+ bool in_use{};
+ s32 mix_id{};
+ u32 node_id{};
+ std::array<std::array<float_le, AudioCommon::MAX_MIX_BUFFERS>, AudioCommon::MAX_MIX_BUFFERS>
+ mix_volume{};
+ s32 dest_mix_id{};
+ s32 splitter_id{};
+ s32 buffer_offset{};
+ s32 final_mix_distance{};
+ };
+ ServerMixInfo();
+ ~ServerMixInfo();
+
+ const ServerMixInfo::InParams& GetInParams() const;
+ ServerMixInfo::InParams& GetInParams();
+
+ bool Update(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
+ BehaviorInfo& behavior_info, SplitterContext& splitter_context,
+ EffectContext& effect_context);
+ bool HasAnyConnection() const;
+ void Cleanup();
+ void SetEffectCount(std::size_t count);
+ void ResetEffectProcessingOrder();
+ s32 GetEffectOrder(std::size_t i) const;
+
+private:
+ std::vector<s32> effect_processing_order;
+ InParams in_params{};
+ bool UpdateConnection(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
+ SplitterContext& splitter_context);
+};
+
+class MixContext {
+public:
+ MixContext();
+ ~MixContext();
+
+ void Initialize(const BehaviorInfo& behavior_info, std::size_t mix_count,
+ std::size_t effect_count);
+ void SortInfo();
+ bool TsortInfo(SplitterContext& splitter_context);
+
+ std::size_t GetCount() const;
+ ServerMixInfo& GetInfo(std::size_t i);
+ const ServerMixInfo& GetInfo(std::size_t i) const;
+ ServerMixInfo& GetSortedInfo(std::size_t i);
+ const ServerMixInfo& GetSortedInfo(std::size_t i) const;
+ ServerMixInfo& GetFinalMixInfo();
+ const ServerMixInfo& GetFinalMixInfo() const;
+ EdgeMatrix& GetEdgeMatrix();
+ const EdgeMatrix& GetEdgeMatrix() const;
+
+private:
+ void CalcMixBufferOffset();
+ void UpdateDistancesFromFinalMix();
+
+ NodeStates node_states{};
+ EdgeMatrix edge_matrix{};
+ std::size_t info_count{};
+ std::vector<ServerMixInfo> infos{};
+ std::vector<ServerMixInfo*> sorted_info{};
+};
+} // namespace AudioCore
diff --git a/src/audio_core/sink_context.cpp b/src/audio_core/sink_context.cpp
new file mode 100644
index 000000000..0882b411a
--- /dev/null
+++ b/src/audio_core/sink_context.cpp
@@ -0,0 +1,31 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "audio_core/sink_context.h"
+
+namespace AudioCore {
+SinkContext::SinkContext(std::size_t sink_count) : sink_count(sink_count) {}
+SinkContext::~SinkContext() = default;
+
+std::size_t SinkContext::GetCount() const {
+ return sink_count;
+}
+
+void SinkContext::UpdateMainSink(SinkInfo::InParams& in) {
+ in_use = in.in_use;
+ use_count = in.device.input_count;
+ std::memcpy(buffers.data(), in.device.input.data(), AudioCommon::MAX_CHANNEL_COUNT);
+}
+
+bool SinkContext::InUse() const {
+ return in_use;
+}
+
+std::vector<u8> SinkContext::OutputBuffers() const {
+ std::vector<u8> buffer_ret(use_count);
+ std::memcpy(buffer_ret.data(), buffers.data(), use_count);
+ return buffer_ret;
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/sink_context.h b/src/audio_core/sink_context.h
new file mode 100644
index 000000000..d7aa72ba7
--- /dev/null
+++ b/src/audio_core/sink_context.h
@@ -0,0 +1,89 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "audio_core/common.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/swap.h"
+
+namespace AudioCore {
+
+enum class SinkTypes : u8 {
+ Invalid = 0,
+ Device = 1,
+ Circular = 2,
+};
+
+enum class SinkSampleFormat : u32_le {
+ None = 0,
+ Pcm8 = 1,
+ Pcm16 = 2,
+ Pcm24 = 3,
+ Pcm32 = 4,
+ PcmFloat = 5,
+ Adpcm = 6,
+};
+
+class SinkInfo {
+public:
+ struct CircularBufferIn {
+ u64_le address;
+ u32_le size;
+ u32_le input_count;
+ u32_le sample_count;
+ u32_le previous_position;
+ SinkSampleFormat sample_format;
+ std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
+ bool in_use;
+ INSERT_UNION_PADDING_BYTES(5);
+ };
+ static_assert(sizeof(SinkInfo::CircularBufferIn) == 0x28,
+ "SinkInfo::CircularBufferIn is in invalid size");
+
+ struct DeviceIn {
+ std::array<u8, 255> device_name;
+ INSERT_UNION_PADDING_BYTES(1);
+ s32_le input_count;
+ std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
+ INSERT_UNION_PADDING_BYTES(1);
+ bool down_matrix_enabled;
+ std::array<float_le, 4> down_matrix_coef;
+ };
+ static_assert(sizeof(SinkInfo::DeviceIn) == 0x11c, "SinkInfo::DeviceIn is an invalid size");
+
+ struct InParams {
+ SinkTypes type{};
+ bool in_use{};
+ INSERT_PADDING_BYTES(2);
+ u32_le node_id{};
+ INSERT_PADDING_WORDS(6);
+ union {
+ // std::array<u8, 0x120> raw{};
+ SinkInfo::DeviceIn device;
+ SinkInfo::CircularBufferIn circular_buffer;
+ };
+ };
+ static_assert(sizeof(SinkInfo::InParams) == 0x140, "SinkInfo::InParams are an invalid size!");
+};
+
+class SinkContext {
+public:
+ explicit SinkContext(std::size_t sink_count);
+ ~SinkContext();
+
+ std::size_t GetCount() const;
+
+ void UpdateMainSink(SinkInfo::InParams& in);
+ bool InUse() const;
+ std::vector<u8> OutputBuffers() const;
+
+private:
+ bool in_use{false};
+ s32 use_count{};
+ std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> buffers{};
+ std::size_t sink_count{};
+};
+} // namespace AudioCore
diff --git a/src/audio_core/splitter_context.cpp b/src/audio_core/splitter_context.cpp
new file mode 100644
index 000000000..79bb2f516
--- /dev/null
+++ b/src/audio_core/splitter_context.cpp
@@ -0,0 +1,617 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "audio_core/behavior_info.h"
+#include "audio_core/splitter_context.h"
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "common/logging/log.h"
+
+namespace AudioCore {
+
+ServerSplitterDestinationData::ServerSplitterDestinationData(s32 id) : id(id) {}
+ServerSplitterDestinationData::~ServerSplitterDestinationData() = default;
+
+void ServerSplitterDestinationData::Update(SplitterInfo::InDestinationParams& header) {
+ // Log error as these are not actually failure states
+ if (header.magic != SplitterMagic::DataHeader) {
+ LOG_ERROR(Audio, "Splitter destination header is invalid!");
+ return;
+ }
+
+ // Incorrect splitter id
+ if (header.splitter_id != id) {
+ LOG_ERROR(Audio, "Splitter destination ids do not match!");
+ return;
+ }
+
+ mix_id = header.mix_id;
+ // Copy our mix volumes
+ std::copy(header.mix_volumes.begin(), header.mix_volumes.end(), current_mix_volumes.begin());
+ if (!in_use && header.in_use) {
+ // Update mix volumes
+ std::copy(current_mix_volumes.begin(), current_mix_volumes.end(), last_mix_volumes.begin());
+ needs_update = false;
+ }
+ in_use = header.in_use;
+}
+
+ServerSplitterDestinationData* ServerSplitterDestinationData::GetNextDestination() {
+ return next;
+}
+
+const ServerSplitterDestinationData* ServerSplitterDestinationData::GetNextDestination() const {
+ return next;
+}
+
+void ServerSplitterDestinationData::SetNextDestination(ServerSplitterDestinationData* dest) {
+ next = dest;
+}
+
+bool ServerSplitterDestinationData::ValidMixId() const {
+ return GetMixId() != AudioCommon::NO_MIX;
+}
+
+s32 ServerSplitterDestinationData::GetMixId() const {
+ return mix_id;
+}
+
+bool ServerSplitterDestinationData::IsConfigured() const {
+ return in_use && ValidMixId();
+}
+
+float ServerSplitterDestinationData::GetMixVolume(std::size_t i) const {
+ ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
+ return current_mix_volumes.at(i);
+}
+
+const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
+ServerSplitterDestinationData::CurrentMixVolumes() const {
+ return current_mix_volumes;
+}
+
+const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
+ServerSplitterDestinationData::LastMixVolumes() const {
+ return last_mix_volumes;
+}
+
+void ServerSplitterDestinationData::MarkDirty() {
+ needs_update = true;
+}
+
+void ServerSplitterDestinationData::UpdateInternalState() {
+ if (in_use && needs_update) {
+ std::copy(current_mix_volumes.begin(), current_mix_volumes.end(), last_mix_volumes.begin());
+ }
+ needs_update = false;
+}
+
+ServerSplitterInfo::ServerSplitterInfo(s32 id) : id(id) {}
+ServerSplitterInfo::~ServerSplitterInfo() = default;
+
+void ServerSplitterInfo::InitializeInfos() {
+ send_length = 0;
+ head = nullptr;
+ new_connection = true;
+}
+
+void ServerSplitterInfo::ClearNewConnectionFlag() {
+ new_connection = false;
+}
+
+std::size_t ServerSplitterInfo::Update(SplitterInfo::InInfoPrams& header) {
+ if (header.send_id != id) {
+ return 0;
+ }
+
+ sample_rate = header.sample_rate;
+ new_connection = true;
+ // We need to update the size here due to the splitter bug being present and providing an
+ // incorrect size. We're suppose to also update the header here but we just ignore and continue
+ return (sizeof(s32_le) * (header.length - 1)) + (sizeof(s32_le) * 3);
+}
+
+ServerSplitterDestinationData* ServerSplitterInfo::GetHead() {
+ return head;
+}
+
+const ServerSplitterDestinationData* ServerSplitterInfo::GetHead() const {
+ return head;
+}
+
+ServerSplitterDestinationData* ServerSplitterInfo::GetData(std::size_t depth) {
+ auto current_head = head;
+ for (std::size_t i = 0; i < depth; i++) {
+ if (current_head == nullptr) {
+ return nullptr;
+ }
+ current_head = current_head->GetNextDestination();
+ }
+ return current_head;
+}
+
+const ServerSplitterDestinationData* ServerSplitterInfo::GetData(std::size_t depth) const {
+ auto current_head = head;
+ for (std::size_t i = 0; i < depth; i++) {
+ if (current_head == nullptr) {
+ return nullptr;
+ }
+ current_head = current_head->GetNextDestination();
+ }
+ return current_head;
+}
+
+bool ServerSplitterInfo::HasNewConnection() const {
+ return new_connection;
+}
+
+s32 ServerSplitterInfo::GetLength() const {
+ return send_length;
+}
+
+void ServerSplitterInfo::SetHead(ServerSplitterDestinationData* new_head) {
+ head = new_head;
+}
+
+void ServerSplitterInfo::SetHeadDepth(s32 length) {
+ send_length = length;
+}
+
+SplitterContext::SplitterContext() = default;
+SplitterContext::~SplitterContext() = default;
+
+void SplitterContext::Initialize(BehaviorInfo& behavior_info, std::size_t _info_count,
+ std::size_t _data_count) {
+ if (!behavior_info.IsSplitterSupported() || _data_count == 0 || _info_count == 0) {
+ Setup(0, 0, false);
+ return;
+ }
+ // Only initialize if we're using splitters
+ Setup(_info_count, _data_count, behavior_info.IsSplitterBugFixed());
+}
+
+bool SplitterContext::Update(const std::vector<u8>& input, std::size_t& input_offset,
+ std::size_t& bytes_read) {
+ const auto UpdateOffsets = [&](std::size_t read) {
+ input_offset += read;
+ bytes_read += read;
+ };
+
+ if (info_count == 0 || data_count == 0) {
+ bytes_read = 0;
+ return true;
+ }
+
+ if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
+ sizeof(SplitterInfo::InHeader))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+ SplitterInfo::InHeader header{};
+ std::memcpy(&header, input.data() + input_offset, sizeof(SplitterInfo::InHeader));
+ UpdateOffsets(sizeof(SplitterInfo::InHeader));
+
+ if (header.magic != SplitterMagic::SplitterHeader) {
+ LOG_ERROR(Audio, "Invalid header magic! Expecting {:X} but got {:X}",
+ SplitterMagic::SplitterHeader, header.magic);
+ return false;
+ }
+
+ // Clear all connections
+ for (auto& info : infos) {
+ info.ClearNewConnectionFlag();
+ }
+
+ UpdateInfo(input, input_offset, bytes_read, header.info_count);
+ UpdateData(input, input_offset, bytes_read, header.data_count);
+ const auto aligned_bytes_read = Common::AlignUp(bytes_read, 16);
+ input_offset += aligned_bytes_read - bytes_read;
+ bytes_read = aligned_bytes_read;
+ return true;
+}
+
+bool SplitterContext::UsingSplitter() const {
+ return info_count > 0 && data_count > 0;
+}
+
+ServerSplitterInfo& SplitterContext::GetInfo(std::size_t i) {
+ ASSERT(i < info_count);
+ return infos.at(i);
+}
+
+const ServerSplitterInfo& SplitterContext::GetInfo(std::size_t i) const {
+ ASSERT(i < info_count);
+ return infos.at(i);
+}
+
+ServerSplitterDestinationData& SplitterContext::GetData(std::size_t i) {
+ ASSERT(i < data_count);
+ return datas.at(i);
+}
+
+const ServerSplitterDestinationData& SplitterContext::GetData(std::size_t i) const {
+ ASSERT(i < data_count);
+ return datas.at(i);
+}
+
+ServerSplitterDestinationData* SplitterContext::GetDestinationData(std::size_t info,
+ std::size_t data) {
+ ASSERT(info < info_count);
+ auto& cur_info = GetInfo(info);
+ return cur_info.GetData(data);
+}
+
+const ServerSplitterDestinationData* SplitterContext::GetDestinationData(std::size_t info,
+ std::size_t data) const {
+ ASSERT(info < info_count);
+ auto& cur_info = GetInfo(info);
+ return cur_info.GetData(data);
+}
+
+void SplitterContext::UpdateInternalState() {
+ if (data_count == 0) {
+ return;
+ }
+
+ for (auto& data : datas) {
+ data.UpdateInternalState();
+ }
+}
+
+std::size_t SplitterContext::GetInfoCount() const {
+ return info_count;
+}
+
+std::size_t SplitterContext::GetDataCount() const {
+ return data_count;
+}
+
+void SplitterContext::Setup(std::size_t _info_count, std::size_t _data_count,
+ bool is_splitter_bug_fixed) {
+
+ info_count = _info_count;
+ data_count = _data_count;
+
+ for (std::size_t i = 0; i < info_count; i++) {
+ auto& splitter = infos.emplace_back(static_cast<s32>(i));
+ splitter.InitializeInfos();
+ }
+ for (std::size_t i = 0; i < data_count; i++) {
+ datas.emplace_back(static_cast<s32>(i));
+ }
+
+ bug_fixed = is_splitter_bug_fixed;
+}
+
+bool SplitterContext::UpdateInfo(const std::vector<u8>& input, std::size_t& input_offset,
+ std::size_t& bytes_read, s32 in_splitter_count) {
+ const auto UpdateOffsets = [&](std::size_t read) {
+ input_offset += read;
+ bytes_read += read;
+ };
+
+ for (s32 i = 0; i < in_splitter_count; i++) {
+ if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
+ sizeof(SplitterInfo::InInfoPrams))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+ SplitterInfo::InInfoPrams header{};
+ std::memcpy(&header, input.data() + input_offset, sizeof(SplitterInfo::InInfoPrams));
+
+ // Logged as warning as these don't actually cause a bailout for some reason
+ if (header.magic != SplitterMagic::InfoHeader) {
+ LOG_ERROR(Audio, "Bad splitter data header");
+ break;
+ }
+
+ if (header.send_id < 0 || header.send_id > info_count) {
+ LOG_ERROR(Audio, "Bad splitter data id");
+ break;
+ }
+
+ UpdateOffsets(sizeof(SplitterInfo::InInfoPrams));
+ auto& info = GetInfo(header.send_id);
+ if (!RecomposeDestination(info, header, input, input_offset)) {
+ LOG_ERROR(Audio, "Failed to recompose destination for splitter!");
+ return false;
+ }
+ const std::size_t read = info.Update(header);
+ bytes_read += read;
+ input_offset += read;
+ }
+ return true;
+}
+
+bool SplitterContext::UpdateData(const std::vector<u8>& input, std::size_t& input_offset,
+ std::size_t& bytes_read, s32 in_data_count) {
+ const auto UpdateOffsets = [&](std::size_t read) {
+ input_offset += read;
+ bytes_read += read;
+ };
+
+ for (s32 i = 0; i < in_data_count; i++) {
+ if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
+ sizeof(SplitterInfo::InDestinationParams))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+ SplitterInfo::InDestinationParams header{};
+ std::memcpy(&header, input.data() + input_offset,
+ sizeof(SplitterInfo::InDestinationParams));
+ UpdateOffsets(sizeof(SplitterInfo::InDestinationParams));
+
+ // Logged as warning as these don't actually cause a bailout for some reason
+ if (header.magic != SplitterMagic::DataHeader) {
+ LOG_ERROR(Audio, "Bad splitter data header");
+ break;
+ }
+
+ if (header.splitter_id < 0 || header.splitter_id > data_count) {
+ LOG_ERROR(Audio, "Bad splitter data id");
+ break;
+ }
+ GetData(header.splitter_id).Update(header);
+ }
+ return true;
+}
+
+bool SplitterContext::RecomposeDestination(ServerSplitterInfo& info,
+ SplitterInfo::InInfoPrams& header,
+ const std::vector<u8>& input,
+ const std::size_t& input_offset) {
+ // Clear our current destinations
+ auto* current_head = info.GetHead();
+ while (current_head != nullptr) {
+ auto next_head = current_head->GetNextDestination();
+ current_head->SetNextDestination(nullptr);
+ current_head = next_head;
+ }
+ info.SetHead(nullptr);
+
+ s32 size = header.length;
+ // If the splitter bug is present, calculate fixed size
+ if (!bug_fixed) {
+ if (info_count > 0) {
+ const auto factor = data_count / info_count;
+ size = std::min(header.length, static_cast<s32>(factor));
+ } else {
+ size = 0;
+ }
+ }
+
+ if (size < 1) {
+ LOG_ERROR(Audio, "Invalid splitter info size! size={:X}", size);
+ return true;
+ }
+
+ auto* start_head = &GetData(header.resource_id_base);
+ current_head = start_head;
+ std::vector<s32_le> resource_ids(size - 1);
+ if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
+ resource_ids.size() * sizeof(s32_le))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+ std::memcpy(resource_ids.data(), input.data() + input_offset,
+ resource_ids.size() * sizeof(s32_le));
+
+ for (auto resource_id : resource_ids) {
+ auto* head = &GetData(resource_id);
+ current_head->SetNextDestination(head);
+ current_head = head;
+ }
+
+ info.SetHead(start_head);
+ info.SetHeadDepth(size);
+
+ return true;
+}
+
+NodeStates::NodeStates() = default;
+NodeStates::~NodeStates() = default;
+
+void NodeStates::Initialize(std::size_t node_count_) {
+ // Setup our work parameters
+ node_count = node_count_;
+ was_node_found.resize(node_count);
+ was_node_completed.resize(node_count);
+ index_list.resize(node_count);
+ index_stack.Reset(node_count * node_count);
+}
+
+bool NodeStates::Tsort(EdgeMatrix& edge_matrix) {
+ return DepthFirstSearch(edge_matrix);
+}
+
+std::size_t NodeStates::GetIndexPos() const {
+ return index_pos;
+}
+
+const std::vector<s32>& NodeStates::GetIndexList() const {
+ return index_list;
+}
+
+void NodeStates::PushTsortResult(s32 index) {
+ ASSERT(index < node_count);
+ index_list[index_pos++] = index;
+}
+
+bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
+ ResetState();
+ for (std::size_t i = 0; i < node_count; i++) {
+ const auto node_id = static_cast<s32>(i);
+
+ // If we don't have a state, send to our index stack for work
+ if (GetState(i) == NodeStates::State::NoState) {
+ index_stack.push(node_id);
+ }
+
+ // While we have work to do in our stack
+ while (index_stack.Count() > 0) {
+ // Get the current node
+ const auto current_stack_index = index_stack.top();
+ // Check if we've seen the node yet
+ const auto index_state = GetState(current_stack_index);
+ if (index_state == NodeStates::State::NoState) {
+ // Mark the node as seen
+ UpdateState(NodeStates::State::InFound, current_stack_index);
+ } else if (index_state == NodeStates::State::InFound) {
+ // We've seen this node before, mark it as completed
+ UpdateState(NodeStates::State::InCompleted, current_stack_index);
+ // Update our index list
+ PushTsortResult(current_stack_index);
+ // Pop the stack
+ index_stack.pop();
+ continue;
+ } else if (index_state == NodeStates::State::InCompleted) {
+ // If our node is already sorted, clear it
+ index_stack.pop();
+ continue;
+ }
+
+ const auto node_count = edge_matrix.GetNodeCount();
+ for (s32 j = 0; j < static_cast<s32>(node_count); j++) {
+ // Check if our node is connected to our edge matrix
+ if (!edge_matrix.Connected(current_stack_index, j)) {
+ continue;
+ }
+
+ // Check if our node exists
+ const auto node_state = GetState(j);
+ if (node_state == NodeStates::State::NoState) {
+ // Add more work
+ index_stack.push(j);
+ } else if (node_state == NodeStates::State::InFound) {
+ UNREACHABLE_MSG("Node start marked as found");
+ ResetState();
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+void NodeStates::ResetState() {
+ // Reset to the start of our index stack
+ index_pos = 0;
+ for (std::size_t i = 0; i < node_count; i++) {
+ // Mark all nodes as not found
+ was_node_found[i] = false;
+ // Mark all nodes as uncompleted
+ was_node_completed[i] = false;
+ // Mark all indexes as invalid
+ index_list[i] = -1;
+ }
+}
+
+void NodeStates::UpdateState(NodeStates::State state, std::size_t i) {
+ switch (state) {
+ case NodeStates::State::NoState:
+ was_node_found[i] = false;
+ was_node_completed[i] = false;
+ break;
+ case NodeStates::State::InFound:
+ was_node_found[i] = true;
+ was_node_completed[i] = false;
+ break;
+ case NodeStates::State::InCompleted:
+ was_node_found[i] = false;
+ was_node_completed[i] = true;
+ break;
+ }
+}
+
+NodeStates::State NodeStates::GetState(std::size_t i) {
+ ASSERT(i < node_count);
+ if (was_node_found[i]) {
+ // If our node exists in our found list
+ return NodeStates::State::InFound;
+ } else if (was_node_completed[i]) {
+ // If node is in the completed list
+ return NodeStates::State::InCompleted;
+ } else {
+ // If in neither
+ return NodeStates::State::NoState;
+ }
+}
+
+NodeStates::Stack::Stack() = default;
+NodeStates::Stack::~Stack() = default;
+
+void NodeStates::Stack::Reset(std::size_t size) {
+ // Mark our stack as empty
+ stack.resize(size);
+ stack_size = size;
+ stack_pos = 0;
+ std::fill(stack.begin(), stack.end(), 0);
+}
+
+void NodeStates::Stack::push(s32 val) {
+ ASSERT(stack_pos < stack_size);
+ stack[stack_pos++] = val;
+}
+
+std::size_t NodeStates::Stack::Count() const {
+ return stack_pos;
+}
+
+s32 NodeStates::Stack::top() const {
+ ASSERT(stack_pos > 0);
+ return stack[stack_pos - 1];
+}
+
+s32 NodeStates::Stack::pop() {
+ ASSERT(stack_pos > 0);
+ stack_pos--;
+ return stack[stack_pos];
+}
+
+EdgeMatrix::EdgeMatrix() = default;
+EdgeMatrix::~EdgeMatrix() = default;
+
+void EdgeMatrix::Initialize(std::size_t _node_count) {
+ node_count = _node_count;
+ edge_matrix.resize(node_count * node_count);
+}
+
+bool EdgeMatrix::Connected(s32 a, s32 b) {
+ return GetState(a, b);
+}
+
+void EdgeMatrix::Connect(s32 a, s32 b) {
+ SetState(a, b, true);
+}
+
+void EdgeMatrix::Disconnect(s32 a, s32 b) {
+ SetState(a, b, false);
+}
+
+void EdgeMatrix::RemoveEdges(s32 edge) {
+ for (std::size_t i = 0; i < node_count; i++) {
+ SetState(edge, static_cast<s32>(i), false);
+ }
+}
+
+std::size_t EdgeMatrix::GetNodeCount() const {
+ return node_count;
+}
+
+void EdgeMatrix::SetState(s32 a, s32 b, bool state) {
+ ASSERT(InRange(a, b));
+ edge_matrix.at(a * node_count + b) = state;
+}
+
+bool EdgeMatrix::GetState(s32 a, s32 b) {
+ ASSERT(InRange(a, b));
+ return edge_matrix.at(a * node_count + b);
+}
+
+bool EdgeMatrix::InRange(s32 a, s32 b) const {
+ const std::size_t pos = a * node_count + b;
+ return pos < (node_count * node_count);
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/splitter_context.h b/src/audio_core/splitter_context.h
new file mode 100644
index 000000000..ea6239fdb
--- /dev/null
+++ b/src/audio_core/splitter_context.h
@@ -0,0 +1,221 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <stack>
+#include <vector>
+#include "audio_core/common.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/swap.h"
+
+namespace AudioCore {
+class BehaviorInfo;
+
+class EdgeMatrix {
+public:
+ EdgeMatrix();
+ ~EdgeMatrix();
+
+ void Initialize(std::size_t _node_count);
+ bool Connected(s32 a, s32 b);
+ void Connect(s32 a, s32 b);
+ void Disconnect(s32 a, s32 b);
+ void RemoveEdges(s32 edge);
+ std::size_t GetNodeCount() const;
+
+private:
+ void SetState(s32 a, s32 b, bool state);
+ bool GetState(s32 a, s32 b);
+
+ bool InRange(s32 a, s32 b) const;
+ std::vector<bool> edge_matrix{};
+ std::size_t node_count{};
+};
+
+class NodeStates {
+public:
+ enum class State {
+ NoState = 0,
+ InFound = 1,
+ InCompleted = 2,
+ };
+
+ // Looks to be a fixed size stack. Placed within the NodeStates class based on symbols
+ class Stack {
+ public:
+ Stack();
+ ~Stack();
+
+ void Reset(std::size_t size);
+ void push(s32 val);
+ std::size_t Count() const;
+ s32 top() const;
+ s32 pop();
+
+ private:
+ std::vector<s32> stack{};
+ std::size_t stack_size{};
+ std::size_t stack_pos{};
+ };
+ NodeStates();
+ ~NodeStates();
+
+ void Initialize(std::size_t _node_count);
+ bool Tsort(EdgeMatrix& edge_matrix);
+ std::size_t GetIndexPos() const;
+ const std::vector<s32>& GetIndexList() const;
+
+private:
+ void PushTsortResult(s32 index);
+ bool DepthFirstSearch(EdgeMatrix& edge_matrix);
+ void ResetState();
+ void UpdateState(NodeStates::State state, std::size_t i);
+ NodeStates::State GetState(std::size_t i);
+
+ std::size_t node_count{};
+ std::vector<bool> was_node_found{};
+ std::vector<bool> was_node_completed{};
+ std::size_t index_pos{};
+ std::vector<s32> index_list{};
+ NodeStates::Stack index_stack{};
+};
+
+enum class SplitterMagic : u32_le {
+ SplitterHeader = Common::MakeMagic('S', 'N', 'D', 'H'),
+ DataHeader = Common::MakeMagic('S', 'N', 'D', 'D'),
+ InfoHeader = Common::MakeMagic('S', 'N', 'D', 'I'),
+};
+
+class SplitterInfo {
+public:
+ struct InHeader {
+ SplitterMagic magic{};
+ s32_le info_count{};
+ s32_le data_count{};
+ INSERT_PADDING_WORDS(5);
+ };
+ static_assert(sizeof(SplitterInfo::InHeader) == 0x20,
+ "SplitterInfo::InHeader is an invalid size");
+
+ struct InInfoPrams {
+ SplitterMagic magic{};
+ s32_le send_id{};
+ s32_le sample_rate{};
+ s32_le length{};
+ s32_le resource_id_base{};
+ };
+ static_assert(sizeof(SplitterInfo::InInfoPrams) == 0x14,
+ "SplitterInfo::InInfoPrams is an invalid size");
+
+ struct InDestinationParams {
+ SplitterMagic magic{};
+ s32_le splitter_id{};
+ std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> mix_volumes{};
+ s32_le mix_id{};
+ bool in_use{};
+ INSERT_PADDING_BYTES(3);
+ };
+ static_assert(sizeof(SplitterInfo::InDestinationParams) == 0x70,
+ "SplitterInfo::InDestinationParams is an invalid size");
+};
+
+class ServerSplitterDestinationData {
+public:
+ explicit ServerSplitterDestinationData(s32 id);
+ ~ServerSplitterDestinationData();
+
+ void Update(SplitterInfo::InDestinationParams& header);
+
+ ServerSplitterDestinationData* GetNextDestination();
+ const ServerSplitterDestinationData* GetNextDestination() const;
+ void SetNextDestination(ServerSplitterDestinationData* dest);
+ bool ValidMixId() const;
+ s32 GetMixId() const;
+ bool IsConfigured() const;
+ float GetMixVolume(std::size_t i) const;
+ const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& CurrentMixVolumes() const;
+ const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& LastMixVolumes() const;
+ void MarkDirty();
+ void UpdateInternalState();
+
+private:
+ bool needs_update{};
+ bool in_use{};
+ s32 id{};
+ s32 mix_id{};
+ std::array<float, AudioCommon::MAX_MIX_BUFFERS> current_mix_volumes{};
+ std::array<float, AudioCommon::MAX_MIX_BUFFERS> last_mix_volumes{};
+ ServerSplitterDestinationData* next = nullptr;
+};
+
+class ServerSplitterInfo {
+public:
+ explicit ServerSplitterInfo(s32 id);
+ ~ServerSplitterInfo();
+
+ void InitializeInfos();
+ void ClearNewConnectionFlag();
+ std::size_t Update(SplitterInfo::InInfoPrams& header);
+
+ ServerSplitterDestinationData* GetHead();
+ const ServerSplitterDestinationData* GetHead() const;
+ ServerSplitterDestinationData* GetData(std::size_t depth);
+ const ServerSplitterDestinationData* GetData(std::size_t depth) const;
+
+ bool HasNewConnection() const;
+ s32 GetLength() const;
+
+ void SetHead(ServerSplitterDestinationData* new_head);
+ void SetHeadDepth(s32 length);
+
+private:
+ s32 sample_rate{};
+ s32 id{};
+ s32 send_length{};
+ ServerSplitterDestinationData* head = nullptr;
+ bool new_connection{};
+};
+
+class SplitterContext {
+public:
+ SplitterContext();
+ ~SplitterContext();
+
+ void Initialize(BehaviorInfo& behavior_info, std::size_t splitter_count,
+ std::size_t data_count);
+
+ bool Update(const std::vector<u8>& input, std::size_t& input_offset, std::size_t& bytes_read);
+ bool UsingSplitter() const;
+
+ ServerSplitterInfo& GetInfo(std::size_t i);
+ const ServerSplitterInfo& GetInfo(std::size_t i) const;
+ ServerSplitterDestinationData& GetData(std::size_t i);
+ const ServerSplitterDestinationData& GetData(std::size_t i) const;
+ ServerSplitterDestinationData* GetDestinationData(std::size_t info, std::size_t data);
+ const ServerSplitterDestinationData* GetDestinationData(std::size_t info,
+ std::size_t data) const;
+ void UpdateInternalState();
+
+ std::size_t GetInfoCount() const;
+ std::size_t GetDataCount() const;
+
+private:
+ void Setup(std::size_t info_count, std::size_t data_count, bool is_splitter_bug_fixed);
+ bool UpdateInfo(const std::vector<u8>& input, std::size_t& input_offset,
+ std::size_t& bytes_read, s32 in_splitter_count);
+ bool UpdateData(const std::vector<u8>& input, std::size_t& input_offset,
+ std::size_t& bytes_read, s32 in_data_count);
+ bool RecomposeDestination(ServerSplitterInfo& info, SplitterInfo::InInfoPrams& header,
+ const std::vector<u8>& input, const std::size_t& input_offset);
+
+ std::vector<ServerSplitterInfo> infos{};
+ std::vector<ServerSplitterDestinationData> datas{};
+
+ std::size_t info_count{};
+ std::size_t data_count{};
+ bool bug_fixed{};
+};
+} // namespace AudioCore
diff --git a/src/audio_core/stream.cpp b/src/audio_core/stream.cpp
index 7be5d5087..cb33926bc 100644
--- a/src/audio_core/stream.cpp
+++ b/src/audio_core/stream.cpp
@@ -104,11 +104,7 @@ void Stream::PlayNextBuffer(std::chrono::nanoseconds ns_late) {
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
- const auto time_stretch_delta = Settings::values.enable_audio_stretching.GetValue()
- ? std::chrono::nanoseconds::zero()
- : ns_late;
- const auto future_time = GetBufferReleaseNS(*active_buffer) - time_stretch_delta;
- core_timing.ScheduleEvent(future_time, release_event, {});
+ core_timing.ScheduleEvent(GetBufferReleaseNS(*active_buffer) - ns_late, release_event, {});
}
void Stream::ReleaseActiveBuffer(std::chrono::nanoseconds ns_late) {
diff --git a/src/audio_core/voice_context.cpp b/src/audio_core/voice_context.cpp
new file mode 100644
index 000000000..1d8f69844
--- /dev/null
+++ b/src/audio_core/voice_context.cpp
@@ -0,0 +1,526 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "audio_core/behavior_info.h"
+#include "audio_core/voice_context.h"
+#include "core/memory.h"
+
+namespace AudioCore {
+
+ServerVoiceChannelResource::ServerVoiceChannelResource(s32 id) : id(id) {}
+ServerVoiceChannelResource::~ServerVoiceChannelResource() = default;
+
+bool ServerVoiceChannelResource::InUse() const {
+ return in_use;
+}
+
+float ServerVoiceChannelResource::GetCurrentMixVolumeAt(std::size_t i) const {
+ ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
+ return mix_volume.at(i);
+}
+
+float ServerVoiceChannelResource::GetLastMixVolumeAt(std::size_t i) const {
+ ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
+ return last_mix_volume.at(i);
+}
+
+void ServerVoiceChannelResource::Update(VoiceChannelResource::InParams& in_params) {
+ in_use = in_params.in_use;
+ // Update our mix volumes only if it's in use
+ if (in_params.in_use) {
+ mix_volume = in_params.mix_volume;
+ }
+}
+
+void ServerVoiceChannelResource::UpdateLastMixVolumes() {
+ last_mix_volume = mix_volume;
+}
+
+const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
+ServerVoiceChannelResource::GetCurrentMixVolume() const {
+ return mix_volume;
+}
+
+const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
+ServerVoiceChannelResource::GetLastMixVolume() const {
+ return last_mix_volume;
+}
+
+ServerVoiceInfo::ServerVoiceInfo() {
+ Initialize();
+}
+ServerVoiceInfo::~ServerVoiceInfo() = default;
+
+void ServerVoiceInfo::Initialize() {
+ in_params.in_use = false;
+ in_params.node_id = 0;
+ in_params.id = 0;
+ in_params.current_playstate = ServerPlayState::Stop;
+ in_params.priority = 255;
+ in_params.sample_rate = 0;
+ in_params.sample_format = SampleFormat::Invalid;
+ in_params.channel_count = 0;
+ in_params.pitch = 0.0f;
+ in_params.volume = 0.0f;
+ in_params.last_volume = 0.0f;
+ in_params.biquad_filter.fill({});
+ in_params.wave_buffer_count = 0;
+ in_params.wave_bufffer_head = 0;
+ in_params.mix_id = AudioCommon::NO_MIX;
+ in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
+ in_params.additional_params_address = 0;
+ in_params.additional_params_size = 0;
+ in_params.is_new = false;
+ out_params.played_sample_count = 0;
+ out_params.wave_buffer_consumed = 0;
+ in_params.voice_drop_flag = false;
+ in_params.buffer_mapped = false;
+ in_params.wave_buffer_flush_request_count = 0;
+ in_params.was_biquad_filter_enabled.fill(false);
+
+ for (auto& wave_buffer : in_params.wave_buffer) {
+ wave_buffer.start_sample_offset = 0;
+ wave_buffer.end_sample_offset = 0;
+ wave_buffer.is_looping = false;
+ wave_buffer.end_of_stream = false;
+ wave_buffer.buffer_address = 0;
+ wave_buffer.buffer_size = 0;
+ wave_buffer.context_address = 0;
+ wave_buffer.context_size = 0;
+ wave_buffer.sent_to_dsp = true;
+ }
+
+ stored_samples.clear();
+}
+
+void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
+ BehaviorInfo& behavior_info) {
+ in_params.in_use = voice_in.is_in_use;
+ in_params.id = voice_in.id;
+ in_params.node_id = voice_in.node_id;
+ in_params.last_playstate = in_params.current_playstate;
+ switch (voice_in.play_state) {
+ case PlayState::Paused:
+ in_params.current_playstate = ServerPlayState::Paused;
+ break;
+ case PlayState::Stopped:
+ if (in_params.current_playstate != ServerPlayState::Stop) {
+ in_params.current_playstate = ServerPlayState::RequestStop;
+ }
+ break;
+ case PlayState::Started:
+ in_params.current_playstate = ServerPlayState::Play;
+ break;
+ default:
+ UNREACHABLE_MSG("Unknown playstate {}", voice_in.play_state);
+ break;
+ }
+
+ in_params.priority = voice_in.priority;
+ in_params.sorting_order = voice_in.sorting_order;
+ in_params.sample_rate = voice_in.sample_rate;
+ in_params.sample_format = voice_in.sample_format;
+ in_params.channel_count = voice_in.channel_count;
+ in_params.pitch = voice_in.pitch;
+ in_params.volume = voice_in.volume;
+ in_params.biquad_filter = voice_in.biquad_filter;
+ in_params.wave_buffer_count = voice_in.wave_buffer_count;
+ in_params.wave_bufffer_head = voice_in.wave_buffer_head;
+ if (behavior_info.IsFlushVoiceWaveBuffersSupported()) {
+ in_params.wave_buffer_flush_request_count += voice_in.wave_buffer_flush_request_count;
+ }
+ in_params.mix_id = voice_in.mix_id;
+ if (behavior_info.IsSplitterSupported()) {
+ in_params.splitter_info_id = voice_in.splitter_info_id;
+ } else {
+ in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
+ }
+
+ std::memcpy(in_params.voice_channel_resource_id.data(),
+ voice_in.voice_channel_resource_ids.data(),
+ sizeof(s32) * in_params.voice_channel_resource_id.size());
+
+ if (behavior_info.IsVoicePlayedSampleCountResetAtLoopPointSupported()) {
+ in_params.behavior_flags.is_played_samples_reset_at_loop_point =
+ voice_in.behavior_flags.is_played_samples_reset_at_loop_point;
+ } else {
+ in_params.behavior_flags.is_played_samples_reset_at_loop_point.Assign(0);
+ }
+ if (behavior_info.IsVoicePitchAndSrcSkippedSupported()) {
+ in_params.behavior_flags.is_pitch_and_src_skipped =
+ voice_in.behavior_flags.is_pitch_and_src_skipped;
+ } else {
+ in_params.behavior_flags.is_pitch_and_src_skipped.Assign(0);
+ }
+
+ if (voice_in.is_voice_drop_flag_clear_requested) {
+ in_params.voice_drop_flag = false;
+ }
+
+ if (in_params.additional_params_address != voice_in.additional_params_address ||
+ in_params.additional_params_size != voice_in.additional_params_size) {
+ in_params.additional_params_address = voice_in.additional_params_address;
+ in_params.additional_params_size = voice_in.additional_params_size;
+ // TODO(ogniK): Reattach buffer, do we actually need to? Maybe just signal to the DSP that
+ // our context is new
+ }
+}
+
+void ServerVoiceInfo::UpdateWaveBuffers(
+ const VoiceInfo::InParams& voice_in,
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states,
+ BehaviorInfo& behavior_info) {
+ if (voice_in.is_new) {
+ // Initialize our wave buffers
+ for (auto& wave_buffer : in_params.wave_buffer) {
+ wave_buffer.start_sample_offset = 0;
+ wave_buffer.end_sample_offset = 0;
+ wave_buffer.is_looping = false;
+ wave_buffer.end_of_stream = false;
+ wave_buffer.buffer_address = 0;
+ wave_buffer.buffer_size = 0;
+ wave_buffer.context_address = 0;
+ wave_buffer.context_size = 0;
+ wave_buffer.sent_to_dsp = true;
+ }
+
+ // Mark all our wave buffers as invalid
+ for (std::size_t channel = 0; channel < static_cast<std::size_t>(in_params.channel_count);
+ channel++) {
+ for (auto& is_valid : voice_states[channel]->is_wave_buffer_valid) {
+ is_valid = false;
+ }
+ }
+ }
+
+ // Update our wave buffers
+ for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
+ // Assume that we have at least 1 channel voice state
+ const auto have_valid_wave_buffer = voice_states[0]->is_wave_buffer_valid[i];
+
+ UpdateWaveBuffer(in_params.wave_buffer[i], voice_in.wave_buffer[i], in_params.sample_format,
+ have_valid_wave_buffer, behavior_info);
+ }
+}
+
+void ServerVoiceInfo::UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer,
+ const WaveBuffer& in_wave_buffer, SampleFormat sample_format,
+ bool is_buffer_valid, BehaviorInfo& behavior_info) {
+ if (!is_buffer_valid && out_wavebuffer.sent_to_dsp) {
+ out_wavebuffer.buffer_address = 0;
+ out_wavebuffer.buffer_size = 0;
+ }
+
+ if (!in_wave_buffer.sent_to_server || !in_params.buffer_mapped) {
+ // Validate sample offset sizings
+ if (sample_format == SampleFormat::Pcm16) {
+ const auto buffer_size = in_wave_buffer.buffer_size;
+ if (in_wave_buffer.start_sample_offset < 0 || in_wave_buffer.end_sample_offset < 0 ||
+ (buffer_size < (sizeof(s16) * in_wave_buffer.start_sample_offset)) ||
+ (buffer_size < (sizeof(s16) * in_wave_buffer.end_sample_offset))) {
+ // TODO(ogniK): Write error info
+ return;
+ }
+ }
+ // TODO(ogniK): ADPCM Size error
+
+ out_wavebuffer.sent_to_dsp = false;
+ out_wavebuffer.start_sample_offset = in_wave_buffer.start_sample_offset;
+ out_wavebuffer.end_sample_offset = in_wave_buffer.end_sample_offset;
+ out_wavebuffer.is_looping = in_wave_buffer.is_looping;
+ out_wavebuffer.end_of_stream = in_wave_buffer.end_of_stream;
+
+ out_wavebuffer.buffer_address = in_wave_buffer.buffer_address;
+ out_wavebuffer.buffer_size = in_wave_buffer.buffer_size;
+ out_wavebuffer.context_address = in_wave_buffer.context_address;
+ out_wavebuffer.context_size = in_wave_buffer.context_size;
+ in_params.buffer_mapped =
+ in_wave_buffer.buffer_address != 0 && in_wave_buffer.buffer_size != 0;
+ // TODO(ogniK): Pool mapper attachment
+ // TODO(ogniK): IsAdpcmLoopContextBugFixed
+ }
+}
+
+void ServerVoiceInfo::WriteOutStatus(
+ VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in,
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states) {
+ if (voice_in.is_new) {
+ in_params.is_new = true;
+ voice_out.wave_buffer_consumed = 0;
+ voice_out.played_sample_count = 0;
+ voice_out.voice_dropped = false;
+ } else if (!in_params.is_new) {
+ voice_out.wave_buffer_consumed = voice_states[0]->wave_buffer_consumed;
+ voice_out.played_sample_count = voice_states[0]->played_sample_count;
+ voice_out.voice_dropped = in_params.voice_drop_flag;
+ } else {
+ voice_out.wave_buffer_consumed = 0;
+ voice_out.played_sample_count = 0;
+ voice_out.voice_dropped = false;
+ }
+}
+
+const ServerVoiceInfo::InParams& ServerVoiceInfo::GetInParams() const {
+ return in_params;
+}
+
+ServerVoiceInfo::InParams& ServerVoiceInfo::GetInParams() {
+ return in_params;
+}
+
+const ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() const {
+ return out_params;
+}
+
+ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() {
+ return out_params;
+}
+
+bool ServerVoiceInfo::ShouldSkip() const {
+ // TODO(ogniK): Handle unmapped wave buffers or parameters
+ return !in_params.in_use || (in_params.wave_buffer_count == 0) || in_params.voice_drop_flag;
+}
+
+bool ServerVoiceInfo::UpdateForCommandGeneration(VoiceContext& voice_context) {
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT> dsp_voice_states{};
+ if (in_params.is_new) {
+ ResetResources(voice_context);
+ in_params.last_volume = in_params.volume;
+ in_params.is_new = false;
+ }
+
+ const s32 channel_count = in_params.channel_count;
+ for (s32 i = 0; i < channel_count; i++) {
+ const auto channel_resource = in_params.voice_channel_resource_id[i];
+ dsp_voice_states[i] =
+ &voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
+ }
+ return UpdateParametersForCommandGeneration(dsp_voice_states);
+}
+
+void ServerVoiceInfo::ResetResources(VoiceContext& voice_context) {
+ const s32 channel_count = in_params.channel_count;
+ for (s32 i = 0; i < channel_count; i++) {
+ const auto channel_resource = in_params.voice_channel_resource_id[i];
+ auto& dsp_state =
+ voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
+ dsp_state = {};
+ voice_context.GetChannelResource(static_cast<std::size_t>(channel_resource))
+ .UpdateLastMixVolumes();
+ }
+}
+
+bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states) {
+ const s32 channel_count = in_params.channel_count;
+ if (in_params.wave_buffer_flush_request_count > 0) {
+ FlushWaveBuffers(in_params.wave_buffer_flush_request_count, dsp_voice_states,
+ channel_count);
+ in_params.wave_buffer_flush_request_count = 0;
+ }
+
+ switch (in_params.current_playstate) {
+ case ServerPlayState::Play: {
+ for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
+ if (!in_params.wave_buffer[i].sent_to_dsp) {
+ for (s32 channel = 0; channel < channel_count; channel++) {
+ dsp_voice_states[channel]->is_wave_buffer_valid[i] = true;
+ }
+ in_params.wave_buffer[i].sent_to_dsp = true;
+ }
+ }
+ in_params.should_depop = false;
+ return HasValidWaveBuffer(dsp_voice_states[0]);
+ }
+ case ServerPlayState::Paused:
+ case ServerPlayState::Stop: {
+ in_params.should_depop = in_params.last_playstate == ServerPlayState::Play;
+ return in_params.should_depop;
+ }
+ case ServerPlayState::RequestStop: {
+ for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
+ in_params.wave_buffer[i].sent_to_dsp = true;
+ for (s32 channel = 0; channel < channel_count; channel++) {
+ auto* dsp_state = dsp_voice_states[channel];
+
+ if (dsp_state->is_wave_buffer_valid[i]) {
+ dsp_state->wave_buffer_index =
+ (dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
+ dsp_state->wave_buffer_consumed++;
+ }
+
+ dsp_state->is_wave_buffer_valid[i] = false;
+ }
+ }
+
+ for (s32 channel = 0; channel < channel_count; channel++) {
+ auto* dsp_state = dsp_voice_states[channel];
+ dsp_state->offset = 0;
+ dsp_state->played_sample_count = 0;
+ dsp_state->fraction = 0;
+ dsp_state->sample_history.fill(0);
+ dsp_state->context = {};
+ }
+
+ in_params.current_playstate = ServerPlayState::Stop;
+ in_params.should_depop = in_params.last_playstate == ServerPlayState::Play;
+ return in_params.should_depop;
+ }
+ default:
+ UNREACHABLE_MSG("Invalid playstate {}", in_params.current_playstate);
+ }
+
+ return false;
+}
+
+void ServerVoiceInfo::FlushWaveBuffers(
+ u8 flush_count, std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
+ s32 channel_count) {
+ auto wave_head = in_params.wave_bufffer_head;
+
+ for (u8 i = 0; i < flush_count; i++) {
+ in_params.wave_buffer[wave_head].sent_to_dsp = true;
+ for (s32 channel = 0; channel < channel_count; channel++) {
+ auto* dsp_state = dsp_voice_states[channel];
+ dsp_state->wave_buffer_consumed++;
+ dsp_state->is_wave_buffer_valid[wave_head] = false;
+ dsp_state->wave_buffer_index =
+ (dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
+ }
+ wave_head = (wave_head + 1) % AudioCommon::MAX_WAVE_BUFFERS;
+ }
+}
+
+bool ServerVoiceInfo::HasValidWaveBuffer(const VoiceState* state) const {
+ const auto& valid_wb = state->is_wave_buffer_valid;
+ return std::find(valid_wb.begin(), valid_wb.end(), true) != valid_wb.end();
+}
+
+VoiceContext::VoiceContext(std::size_t voice_count) : voice_count(voice_count) {
+ for (std::size_t i = 0; i < voice_count; i++) {
+ voice_channel_resources.emplace_back(static_cast<s32>(i));
+ sorted_voice_info.push_back(&voice_info.emplace_back());
+ voice_states.emplace_back();
+ dsp_voice_states.emplace_back();
+ }
+}
+
+VoiceContext::~VoiceContext() {
+ sorted_voice_info.clear();
+}
+
+std::size_t VoiceContext::GetVoiceCount() const {
+ return voice_count;
+}
+
+ServerVoiceChannelResource& VoiceContext::GetChannelResource(std::size_t i) {
+ ASSERT(i < voice_count);
+ return voice_channel_resources.at(i);
+}
+
+const ServerVoiceChannelResource& VoiceContext::GetChannelResource(std::size_t i) const {
+ ASSERT(i < voice_count);
+ return voice_channel_resources.at(i);
+}
+
+VoiceState& VoiceContext::GetState(std::size_t i) {
+ ASSERT(i < voice_count);
+ return voice_states.at(i);
+}
+
+const VoiceState& VoiceContext::GetState(std::size_t i) const {
+ ASSERT(i < voice_count);
+ return voice_states.at(i);
+}
+
+VoiceState& VoiceContext::GetDspSharedState(std::size_t i) {
+ ASSERT(i < voice_count);
+ return dsp_voice_states.at(i);
+}
+
+const VoiceState& VoiceContext::GetDspSharedState(std::size_t i) const {
+ ASSERT(i < voice_count);
+ return dsp_voice_states.at(i);
+}
+
+ServerVoiceInfo& VoiceContext::GetInfo(std::size_t i) {
+ ASSERT(i < voice_count);
+ return voice_info.at(i);
+}
+
+const ServerVoiceInfo& VoiceContext::GetInfo(std::size_t i) const {
+ ASSERT(i < voice_count);
+ return voice_info.at(i);
+}
+
+ServerVoiceInfo& VoiceContext::GetSortedInfo(std::size_t i) {
+ ASSERT(i < voice_count);
+ return *sorted_voice_info.at(i);
+}
+
+const ServerVoiceInfo& VoiceContext::GetSortedInfo(std::size_t i) const {
+ ASSERT(i < voice_count);
+ return *sorted_voice_info.at(i);
+}
+
+s32 VoiceContext::DecodePcm16(s32* output_buffer, ServerWaveBuffer* wave_buffer, s32 channel,
+ s32 channel_count, s32 buffer_offset, s32 sample_count,
+ Core::Memory::Memory& memory) {
+ if (wave_buffer->buffer_address == 0) {
+ return 0;
+ }
+ if (wave_buffer->buffer_size == 0) {
+ return 0;
+ }
+ if (wave_buffer->end_sample_offset < wave_buffer->start_sample_offset) {
+ return 0;
+ }
+
+ const auto samples_remaining =
+ (wave_buffer->end_sample_offset - wave_buffer->start_sample_offset) - buffer_offset;
+ const auto start_offset = (wave_buffer->start_sample_offset + buffer_offset) * channel_count;
+ const auto buffer_pos = wave_buffer->buffer_address + start_offset;
+
+ s16* buffer_data = reinterpret_cast<s16*>(memory.GetPointer(buffer_pos));
+
+ const auto samples_processed = std::min(sample_count, samples_remaining);
+
+ // Fast path
+ if (channel_count == 1) {
+ for (std::size_t i = 0; i < samples_processed; i++) {
+ output_buffer[i] = buffer_data[i];
+ }
+ } else {
+ for (std::size_t i = 0; i < samples_processed; i++) {
+ output_buffer[i] = buffer_data[i * channel_count + channel];
+ }
+ }
+
+ return samples_processed;
+}
+
+void VoiceContext::SortInfo() {
+ for (std::size_t i = 0; i < voice_count; i++) {
+ sorted_voice_info[i] = &voice_info[i];
+ }
+
+ std::sort(sorted_voice_info.begin(), sorted_voice_info.end(),
+ [](const ServerVoiceInfo* lhs, const ServerVoiceInfo* rhs) {
+ const auto& lhs_in = lhs->GetInParams();
+ const auto& rhs_in = rhs->GetInParams();
+ // Sort by priority
+ if (lhs_in.priority != rhs_in.priority) {
+ return lhs_in.priority > rhs_in.priority;
+ } else {
+ // If the priorities match, sort by sorting order
+ return lhs_in.sorting_order > rhs_in.sorting_order;
+ }
+ });
+}
+
+void VoiceContext::UpdateStateByDspShared() {
+ voice_states = dsp_voice_states;
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/voice_context.h b/src/audio_core/voice_context.h
new file mode 100644
index 000000000..59d3d7dfb
--- /dev/null
+++ b/src/audio_core/voice_context.h
@@ -0,0 +1,296 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include "audio_core/algorithm/interpolate.h"
+#include "audio_core/codec.h"
+#include "audio_core/common.h"
+#include "common/bit_field.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+
+namespace Core::Memory {
+class Memory;
+}
+
+namespace AudioCore {
+
+class BehaviorInfo;
+class VoiceContext;
+
+enum class SampleFormat : u8 {
+ Invalid = 0,
+ Pcm8 = 1,
+ Pcm16 = 2,
+ Pcm24 = 3,
+ Pcm32 = 4,
+ PcmFloat = 5,
+ Adpcm = 6,
+};
+
+enum class PlayState : u8 {
+ Started = 0,
+ Stopped = 1,
+ Paused = 2,
+};
+
+enum class ServerPlayState {
+ Play = 0,
+ Stop = 1,
+ RequestStop = 2,
+ Paused = 3,
+};
+
+struct BiquadFilterParameter {
+ bool enabled{};
+ INSERT_PADDING_BYTES(1);
+ std::array<s16, 3> numerator{};
+ std::array<s16, 2> denominator{};
+};
+static_assert(sizeof(BiquadFilterParameter) == 0xc, "BiquadFilterParameter is an invalid size");
+
+struct WaveBuffer {
+ u64_le buffer_address{};
+ u64_le buffer_size{};
+ s32_le start_sample_offset{};
+ s32_le end_sample_offset{};
+ u8 is_looping{};
+ u8 end_of_stream{};
+ u8 sent_to_server{};
+ INSERT_PADDING_BYTES(5);
+ u64 context_address{};
+ u64 context_size{};
+ INSERT_PADDING_BYTES(8);
+};
+static_assert(sizeof(WaveBuffer) == 0x38, "WaveBuffer is an invalid size");
+
+struct ServerWaveBuffer {
+ VAddr buffer_address{};
+ std::size_t buffer_size{};
+ s32 start_sample_offset{};
+ s32 end_sample_offset{};
+ bool is_looping{};
+ bool end_of_stream{};
+ VAddr context_address{};
+ std::size_t context_size{};
+ bool sent_to_dsp{true};
+};
+
+struct BehaviorFlags {
+ BitField<0, 1, u16> is_played_samples_reset_at_loop_point;
+ BitField<1, 1, u16> is_pitch_and_src_skipped;
+};
+static_assert(sizeof(BehaviorFlags) == 0x4, "BehaviorFlags is an invalid size");
+
+struct ADPCMContext {
+ u16 header{};
+ s16 yn1{};
+ s16 yn2{};
+};
+static_assert(sizeof(ADPCMContext) == 0x6, "ADPCMContext is an invalid size");
+
+struct VoiceState {
+ s64 played_sample_count{};
+ s32 offset{};
+ s32 wave_buffer_index{};
+ std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid{};
+ s32 wave_buffer_consumed{};
+ std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history{};
+ s32 fraction{};
+ VAddr context_address{};
+ Codec::ADPCM_Coeff coeff{};
+ ADPCMContext context{};
+ std::array<s64, 2> biquad_filter_state{};
+ std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples{};
+ u32 external_context_size{};
+ bool is_external_context_used{};
+ bool voice_dropped{};
+};
+
+class VoiceChannelResource {
+public:
+ struct InParams {
+ s32_le id{};
+ std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> mix_volume{};
+ bool in_use{};
+ INSERT_PADDING_BYTES(11);
+ };
+ static_assert(sizeof(VoiceChannelResource::InParams) == 0x70, "InParams is an invalid size");
+};
+
+class ServerVoiceChannelResource {
+public:
+ explicit ServerVoiceChannelResource(s32 id);
+ ~ServerVoiceChannelResource();
+
+ bool InUse() const;
+ float GetCurrentMixVolumeAt(std::size_t i) const;
+ float GetLastMixVolumeAt(std::size_t i) const;
+ void Update(VoiceChannelResource::InParams& in_params);
+ void UpdateLastMixVolumes();
+
+ const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& GetCurrentMixVolume() const;
+ const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& GetLastMixVolume() const;
+
+private:
+ s32 id{};
+ std::array<float, AudioCommon::MAX_MIX_BUFFERS> mix_volume{};
+ std::array<float, AudioCommon::MAX_MIX_BUFFERS> last_mix_volume{};
+ bool in_use{};
+};
+
+class VoiceInfo {
+public:
+ struct InParams {
+ s32_le id{};
+ u32_le node_id{};
+ u8 is_new{};
+ u8 is_in_use{};
+ PlayState play_state{};
+ SampleFormat sample_format{};
+ s32_le sample_rate{};
+ s32_le priority{};
+ s32_le sorting_order{};
+ s32_le channel_count{};
+ float_le pitch{};
+ float_le volume{};
+ std::array<BiquadFilterParameter, 2> biquad_filter{};
+ s32_le wave_buffer_count{};
+ s16_le wave_buffer_head{};
+ INSERT_PADDING_BYTES(6);
+ u64_le additional_params_address{};
+ u64_le additional_params_size{};
+ s32_le mix_id{};
+ s32_le splitter_info_id{};
+ std::array<WaveBuffer, 4> wave_buffer{};
+ std::array<u32_le, 6> voice_channel_resource_ids{};
+ // TODO(ogniK): Remaining flags
+ u8 is_voice_drop_flag_clear_requested{};
+ u8 wave_buffer_flush_request_count{};
+ INSERT_PADDING_BYTES(2);
+ BehaviorFlags behavior_flags{};
+ INSERT_PADDING_BYTES(16);
+ };
+ static_assert(sizeof(VoiceInfo::InParams) == 0x170, "InParams is an invalid size");
+
+ struct OutParams {
+ u64_le played_sample_count{};
+ u32_le wave_buffer_consumed{};
+ u8 voice_dropped{};
+ INSERT_PADDING_BYTES(3);
+ };
+ static_assert(sizeof(VoiceInfo::OutParams) == 0x10, "OutParams is an invalid size");
+};
+
+class ServerVoiceInfo {
+public:
+ struct InParams {
+ bool in_use{};
+ bool is_new{};
+ bool should_depop{};
+ SampleFormat sample_format{};
+ s32 sample_rate{};
+ s32 channel_count{};
+ s32 id{};
+ s32 node_id{};
+ s32 mix_id{};
+ ServerPlayState current_playstate{};
+ ServerPlayState last_playstate{};
+ s32 priority{};
+ s32 sorting_order{};
+ float pitch{};
+ float volume{};
+ float last_volume{};
+ std::array<BiquadFilterParameter, AudioCommon::MAX_BIQUAD_FILTERS> biquad_filter{};
+ s32 wave_buffer_count{};
+ s16 wave_bufffer_head{};
+ INSERT_PADDING_BYTES(2);
+ BehaviorFlags behavior_flags{};
+ VAddr additional_params_address{};
+ std::size_t additional_params_size{};
+ std::array<ServerWaveBuffer, AudioCommon::MAX_WAVE_BUFFERS> wave_buffer{};
+ std::array<s32, AudioCommon::MAX_CHANNEL_COUNT> voice_channel_resource_id{};
+ s32 splitter_info_id{};
+ u8 wave_buffer_flush_request_count{};
+ bool voice_drop_flag{};
+ bool buffer_mapped{};
+ std::array<bool, AudioCommon::MAX_BIQUAD_FILTERS> was_biquad_filter_enabled{};
+ };
+
+ struct OutParams {
+ s64 played_sample_count{};
+ s32 wave_buffer_consumed{};
+ };
+
+ ServerVoiceInfo();
+ ~ServerVoiceInfo();
+ void Initialize();
+ void UpdateParameters(const VoiceInfo::InParams& voice_in, BehaviorInfo& behavior_info);
+ void UpdateWaveBuffers(const VoiceInfo::InParams& voice_in,
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states,
+ BehaviorInfo& behavior_info);
+ void UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer, const WaveBuffer& in_wave_buffer,
+ SampleFormat sample_format, bool is_buffer_valid,
+ BehaviorInfo& behavior_info);
+ void WriteOutStatus(VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in,
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states);
+
+ const InParams& GetInParams() const;
+ InParams& GetInParams();
+
+ const OutParams& GetOutParams() const;
+ OutParams& GetOutParams();
+
+ bool ShouldSkip() const;
+ bool UpdateForCommandGeneration(VoiceContext& voice_context);
+ void ResetResources(VoiceContext& voice_context);
+ bool UpdateParametersForCommandGeneration(
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states);
+ void FlushWaveBuffers(u8 flush_count,
+ std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
+ s32 channel_count);
+
+private:
+ std::vector<s16> stored_samples;
+ InParams in_params{};
+ OutParams out_params{};
+
+ bool HasValidWaveBuffer(const VoiceState* state) const;
+};
+
+class VoiceContext {
+public:
+ VoiceContext(std::size_t voice_count);
+ ~VoiceContext();
+
+ std::size_t GetVoiceCount() const;
+ ServerVoiceChannelResource& GetChannelResource(std::size_t i);
+ const ServerVoiceChannelResource& GetChannelResource(std::size_t i) const;
+ VoiceState& GetState(std::size_t i);
+ const VoiceState& GetState(std::size_t i) const;
+ VoiceState& GetDspSharedState(std::size_t i);
+ const VoiceState& GetDspSharedState(std::size_t i) const;
+ ServerVoiceInfo& GetInfo(std::size_t i);
+ const ServerVoiceInfo& GetInfo(std::size_t i) const;
+ ServerVoiceInfo& GetSortedInfo(std::size_t i);
+ const ServerVoiceInfo& GetSortedInfo(std::size_t i) const;
+
+ s32 DecodePcm16(s32* output_buffer, ServerWaveBuffer* wave_buffer, s32 channel,
+ s32 channel_count, s32 buffer_offset, s32 sample_count,
+ Core::Memory::Memory& memory);
+ void SortInfo();
+ void UpdateStateByDspShared();
+
+private:
+ std::size_t voice_count{};
+ std::vector<ServerVoiceChannelResource> voice_channel_resources{};
+ std::vector<VoiceState> voice_states{};
+ std::vector<VoiceState> dsp_voice_states{};
+ std::vector<ServerVoiceInfo> voice_info{};
+ std::vector<ServerVoiceInfo*> sorted_voice_info{};
+};
+
+} // namespace AudioCore