summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/audio_core/renderer/adsp/audio_renderer.cpp5
-rw-r--r--src/audio_core/renderer/system_manager.cpp9
-rw-r--r--src/audio_core/renderer/system_manager.h10
-rw-r--r--src/audio_core/sink/sink_stream.cpp7
-rw-r--r--src/core/core.cpp3
-rw-r--r--src/core/file_sys/romfs.cpp3
-rw-r--r--src/core/file_sys/vfs_concat.cpp161
-rw-r--r--src/core/file_sys/vfs_concat.h28
-rw-r--r--src/core/hid/emulated_controller.cpp9
-rw-r--r--src/core/hle/service/nfc/common/amiibo_crypto.cpp3
-rw-r--r--src/core/hle/service/nfc/common/device.cpp75
-rw-r--r--src/core/hle/service/nfc/common/device.h3
-rw-r--r--src/input_common/drivers/joycon.cpp8
-rw-r--r--src/input_common/helpers/joycon_driver.cpp20
-rw-r--r--src/input_common/helpers/joycon_driver.h1
-rw-r--r--src/input_common/helpers/joycon_protocol/joycon_types.h50
-rw-r--r--src/input_common/helpers/joycon_protocol/nfc.cpp332
-rw-r--r--src/input_common/helpers/joycon_protocol/nfc.h27
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp2
-rw-r--r--src/video_core/buffer_cache/buffer_cache.cpp4
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h291
-rw-r--r--src/video_core/buffer_cache/buffer_cache_base.h141
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp2
-rw-r--r--src/video_core/texture_cache/image_base.cpp7
-rw-r--r--src/video_core/texture_cache/image_base.h2
-rw-r--r--src/video_core/texture_cache/texture_cache.h148
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h17
-rw-r--r--src/video_core/texture_cache/util.cpp15
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp5
-rw-r--r--src/yuzu/game_list.cpp4
-rw-r--r--src/yuzu/game_list.h1
-rw-r--r--src/yuzu/main.cpp20
-rw-r--r--src/yuzu/main.h1
-rw-r--r--src/yuzu_cmd/yuzu.cpp4
34 files changed, 958 insertions, 460 deletions
diff --git a/src/audio_core/renderer/adsp/audio_renderer.cpp b/src/audio_core/renderer/adsp/audio_renderer.cpp
index 503f40349..1cbeed302 100644
--- a/src/audio_core/renderer/adsp/audio_renderer.cpp
+++ b/src/audio_core/renderer/adsp/audio_renderer.cpp
@@ -154,6 +154,11 @@ void AudioRenderer::ThreadFunc() {
return;
case RenderMessage::AudioRenderer_Render: {
+ if (system.IsShuttingDown()) [[unlikely]] {
+ std::this_thread::sleep_for(std::chrono::milliseconds(5));
+ mailbox->ADSPSendMessage(RenderMessage::AudioRenderer_RenderResponse);
+ continue;
+ }
std::array<bool, MaxRendererSessions> buffers_reset{};
std::array<u64, MaxRendererSessions> render_times_taken{};
const auto start_time{system.CoreTiming().GetClockTicks()};
diff --git a/src/audio_core/renderer/system_manager.cpp b/src/audio_core/renderer/system_manager.cpp
index 07d8ed093..300ecdbf1 100644
--- a/src/audio_core/renderer/system_manager.cpp
+++ b/src/audio_core/renderer/system_manager.cpp
@@ -27,7 +27,7 @@ bool SystemManager::InitializeUnsafe() {
if (!active) {
if (adsp.Start()) {
active = true;
- thread = std::jthread([this](std::stop_token stop_token) { ThreadFunc(); });
+ thread = std::jthread([this](std::stop_token stop_token) { ThreadFunc(stop_token); });
}
}
@@ -39,8 +39,7 @@ void SystemManager::Stop() {
return;
}
active = false;
- update.store(true);
- update.notify_all();
+ thread.request_stop();
thread.join();
adsp.Stop();
}
@@ -85,12 +84,12 @@ bool SystemManager::Remove(System& system_) {
return true;
}
-void SystemManager::ThreadFunc() {
+void SystemManager::ThreadFunc(std::stop_token stop_token) {
static constexpr char name[]{"AudioRenderSystemManager"};
MicroProfileOnThreadCreate(name);
Common::SetCurrentThreadName(name);
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
- while (active) {
+ while (active && !stop_token.stop_requested()) {
{
std::scoped_lock l{mutex1};
diff --git a/src/audio_core/renderer/system_manager.h b/src/audio_core/renderer/system_manager.h
index 1f0bbd8b4..9681fd121 100644
--- a/src/audio_core/renderer/system_manager.h
+++ b/src/audio_core/renderer/system_manager.h
@@ -66,13 +66,7 @@ private:
/**
* Main thread responsible for command generation.
*/
- void ThreadFunc();
-
- enum class StreamState {
- Filling,
- Steady,
- Draining,
- };
+ void ThreadFunc(std::stop_token stop_token);
/// Core system
Core::System& core;
@@ -90,8 +84,6 @@ private:
ADSP::ADSP& adsp;
/// AudioRenderer mailbox for communication
ADSP::AudioRenderer_Mailbox* mailbox{};
- /// Atomic for main thread to wait on
- std::atomic<bool> update{};
};
} // namespace AudioCore::AudioRenderer
diff --git a/src/audio_core/sink/sink_stream.cpp b/src/audio_core/sink/sink_stream.cpp
index 13ba26e74..2331aaff9 100644
--- a/src/audio_core/sink/sink_stream.cpp
+++ b/src/audio_core/sink/sink_stream.cpp
@@ -271,8 +271,11 @@ u64 SinkStream::GetExpectedPlayedSampleCount() {
void SinkStream::WaitFreeSpace() {
std::unique_lock lk{release_mutex};
- release_cv.wait(
- lk, [this]() { return queued_buffers < max_queue_size || system.IsShuttingDown(); });
+ release_cv.wait_for(lk, std::chrono::milliseconds(5),
+ [this]() { return queued_buffers < max_queue_size; });
+ if (queued_buffers > max_queue_size + 3) {
+ release_cv.wait(lk, [this]() { return queued_buffers < max_queue_size; });
+ }
}
} // namespace AudioCore::Sink
diff --git a/src/core/core.cpp b/src/core/core.cpp
index b5f62690e..4406ae30e 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -117,8 +117,7 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
return nullptr;
}
- return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(std::move(concat),
- dir->GetName());
+ return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(concat, dir->GetName());
}
if (Common::FS::IsDir(path)) {
diff --git a/src/core/file_sys/romfs.cpp b/src/core/file_sys/romfs.cpp
index ddcfe5980..fb5683a6b 100644
--- a/src/core/file_sys/romfs.cpp
+++ b/src/core/file_sys/romfs.cpp
@@ -140,7 +140,8 @@ VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
return nullptr;
RomFSBuildContext ctx{dir, ext};
- return ConcatenatedVfsFile::MakeConcatenatedFile(0, ctx.Build(), dir->GetName());
+ auto file_map = ctx.Build();
+ return ConcatenatedVfsFile::MakeConcatenatedFile(0, file_map, dir->GetName());
}
} // namespace FileSys
diff --git a/src/core/file_sys/vfs_concat.cpp b/src/core/file_sys/vfs_concat.cpp
index d23623aa0..853b893a1 100644
--- a/src/core/file_sys/vfs_concat.cpp
+++ b/src/core/file_sys/vfs_concat.cpp
@@ -10,84 +10,105 @@
namespace FileSys {
-static bool VerifyConcatenationMapContinuity(const std::multimap<u64, VirtualFile>& map) {
- const auto last_valid = --map.end();
- for (auto iter = map.begin(); iter != last_valid;) {
- const auto old = iter++;
- if (old->first + old->second->GetSize() != iter->first) {
+ConcatenatedVfsFile::ConcatenatedVfsFile(ConcatenationMap&& concatenation_map_, std::string&& name_)
+ : concatenation_map(std::move(concatenation_map_)), name(std::move(name_)) {
+ DEBUG_ASSERT(this->VerifyContinuity());
+}
+
+bool ConcatenatedVfsFile::VerifyContinuity() const {
+ u64 last_offset = 0;
+ for (auto& entry : concatenation_map) {
+ if (entry.offset != last_offset) {
return false;
}
- }
-
- return map.begin()->first == 0;
-}
-ConcatenatedVfsFile::ConcatenatedVfsFile(std::vector<VirtualFile> files_, std::string name_)
- : name(std::move(name_)) {
- std::size_t next_offset = 0;
- for (const auto& file : files_) {
- files.emplace(next_offset, file);
- next_offset += file->GetSize();
+ last_offset = entry.offset + entry.file->GetSize();
}
-}
-ConcatenatedVfsFile::ConcatenatedVfsFile(std::multimap<u64, VirtualFile> files_, std::string name_)
- : files(std::move(files_)), name(std::move(name_)) {
- ASSERT(VerifyConcatenationMapContinuity(files));
+ return true;
}
ConcatenatedVfsFile::~ConcatenatedVfsFile() = default;
-VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(std::vector<VirtualFile> files,
- std::string name) {
- if (files.empty())
+VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(const std::vector<VirtualFile>& files,
+ std::string&& name) {
+ // Fold trivial cases.
+ if (files.empty()) {
return nullptr;
- if (files.size() == 1)
- return files[0];
+ }
+ if (files.size() == 1) {
+ return files.front();
+ }
+
+ // Make the concatenation map from the input.
+ std::vector<ConcatenationEntry> concatenation_map;
+ concatenation_map.reserve(files.size());
+ u64 last_offset = 0;
+
+ for (auto& file : files) {
+ concatenation_map.emplace_back(ConcatenationEntry{
+ .offset = last_offset,
+ .file = file,
+ });
+
+ last_offset += file->GetSize();
+ }
- return VirtualFile(new ConcatenatedVfsFile(std::move(files), std::move(name)));
+ return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name)));
}
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
- std::multimap<u64, VirtualFile> files,
- std::string name) {
- if (files.empty())
+ const std::multimap<u64, VirtualFile>& files,
+ std::string&& name) {
+ // Fold trivial cases.
+ if (files.empty()) {
return nullptr;
- if (files.size() == 1)
+ }
+ if (files.size() == 1) {
return files.begin()->second;
+ }
- const auto last_valid = --files.end();
- for (auto iter = files.begin(); iter != last_valid;) {
- const auto old = iter++;
- if (old->first + old->second->GetSize() != iter->first) {
- files.emplace(old->first + old->second->GetSize(),
- std::make_shared<StaticVfsFile>(filler_byte, iter->first - old->first -
- old->second->GetSize()));
+ // Make the concatenation map from the input.
+ std::vector<ConcatenationEntry> concatenation_map;
+
+ concatenation_map.reserve(files.size());
+ u64 last_offset = 0;
+
+ // Iteration of a multimap is ordered, so offset will be strictly non-decreasing.
+ for (auto& [offset, file] : files) {
+ if (offset > last_offset) {
+ concatenation_map.emplace_back(ConcatenationEntry{
+ .offset = last_offset,
+ .file = std::make_shared<StaticVfsFile>(filler_byte, offset - last_offset),
+ });
}
- }
- // Ensure the map starts at offset 0 (start of file), otherwise pad to fill.
- if (files.begin()->first != 0)
- files.emplace(0, std::make_shared<StaticVfsFile>(filler_byte, files.begin()->first));
+ concatenation_map.emplace_back(ConcatenationEntry{
+ .offset = offset,
+ .file = file,
+ });
+
+ last_offset = offset + file->GetSize();
+ }
- return VirtualFile(new ConcatenatedVfsFile(std::move(files), std::move(name)));
+ return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name)));
}
std::string ConcatenatedVfsFile::GetName() const {
- if (files.empty()) {
+ if (concatenation_map.empty()) {
return "";
}
if (!name.empty()) {
return name;
}
- return files.begin()->second->GetName();
+ return concatenation_map.front().file->GetName();
}
std::size_t ConcatenatedVfsFile::GetSize() const {
- if (files.empty()) {
+ if (concatenation_map.empty()) {
return 0;
}
- return files.rbegin()->first + files.rbegin()->second->GetSize();
+ return concatenation_map.back().offset + concatenation_map.back().file->GetSize();
}
bool ConcatenatedVfsFile::Resize(std::size_t new_size) {
@@ -95,10 +116,10 @@ bool ConcatenatedVfsFile::Resize(std::size_t new_size) {
}
VirtualDir ConcatenatedVfsFile::GetContainingDirectory() const {
- if (files.empty()) {
+ if (concatenation_map.empty()) {
return nullptr;
}
- return files.begin()->second->GetContainingDirectory();
+ return concatenation_map.front().file->GetContainingDirectory();
}
bool ConcatenatedVfsFile::IsWritable() const {
@@ -110,25 +131,45 @@ bool ConcatenatedVfsFile::IsReadable() const {
}
std::size_t ConcatenatedVfsFile::Read(u8* data, std::size_t length, std::size_t offset) const {
- auto entry = --files.end();
- for (auto iter = files.begin(); iter != files.end(); ++iter) {
- if (iter->first > offset) {
- entry = --iter;
+ const ConcatenationEntry key{
+ .offset = offset,
+ .file = nullptr,
+ };
+
+ // Read nothing if the map is empty.
+ if (concatenation_map.empty()) {
+ return 0;
+ }
+
+ // Binary search to find the iterator to the first position we can check.
+ // It must exist, since we are not empty and are comparing unsigned integers.
+ auto it = std::prev(std::upper_bound(concatenation_map.begin(), concatenation_map.end(), key));
+ u64 cur_length = length;
+ u64 cur_offset = offset;
+
+ while (cur_length > 0 && it != concatenation_map.end()) {
+ // Check if we can read the file at this position.
+ const auto& file = it->file;
+ const u64 file_offset = it->offset;
+ const u64 file_size = file->GetSize();
+
+ if (cur_offset >= file_offset + file_size) {
+ // Entirely out of bounds read.
break;
}
- }
- if (entry->first + entry->second->GetSize() <= offset)
- return 0;
+ // Read the file at this position.
+ const u64 intended_read_size = std::min<u64>(cur_length, file_size);
+ const u64 actual_read_size =
+ file->Read(data + (cur_offset - offset), intended_read_size, cur_offset - file_offset);
- const auto read_in =
- std::min<u64>(entry->first + entry->second->GetSize() - offset, entry->second->GetSize());
- if (length > read_in) {
- return entry->second->Read(data, read_in, offset - entry->first) +
- Read(data + read_in, length - read_in, offset + read_in);
+ // Update tracking.
+ cur_offset += actual_read_size;
+ cur_length -= actual_read_size;
+ it++;
}
- return entry->second->Read(data, std::min<u64>(read_in, length), offset - entry->first);
+ return cur_offset - offset;
}
std::size_t ConcatenatedVfsFile::Write(const u8* data, std::size_t length, std::size_t offset) {
diff --git a/src/core/file_sys/vfs_concat.h b/src/core/file_sys/vfs_concat.h
index 9be0261b6..6b329d545 100644
--- a/src/core/file_sys/vfs_concat.h
+++ b/src/core/file_sys/vfs_concat.h
@@ -3,6 +3,7 @@
#pragma once
+#include <compare>
#include <map>
#include <memory>
#include "core/file_sys/vfs.h"
@@ -12,19 +13,33 @@ namespace FileSys {
// Class that wraps multiple vfs files and concatenates them, making reads seamless. Currently
// read-only.
class ConcatenatedVfsFile : public VfsFile {
- explicit ConcatenatedVfsFile(std::vector<VirtualFile> files, std::string name_);
- explicit ConcatenatedVfsFile(std::multimap<u64, VirtualFile> files, std::string name_);
+private:
+ struct ConcatenationEntry {
+ u64 offset;
+ VirtualFile file;
+
+ auto operator<=>(const ConcatenationEntry& other) const {
+ return this->offset <=> other.offset;
+ }
+ };
+ using ConcatenationMap = std::vector<ConcatenationEntry>;
+
+ explicit ConcatenatedVfsFile(std::vector<ConcatenationEntry>&& concatenation_map,
+ std::string&& name);
+ bool VerifyContinuity() const;
public:
~ConcatenatedVfsFile() override;
/// Wrapper function to allow for more efficient handling of files.size() == 0, 1 cases.
- static VirtualFile MakeConcatenatedFile(std::vector<VirtualFile> files, std::string name);
+ static VirtualFile MakeConcatenatedFile(const std::vector<VirtualFile>& files,
+ std::string&& name);
/// Convenience function that turns a map of offsets to files into a concatenated file, filling
/// gaps with a given filler byte.
- static VirtualFile MakeConcatenatedFile(u8 filler_byte, std::multimap<u64, VirtualFile> files,
- std::string name);
+ static VirtualFile MakeConcatenatedFile(u8 filler_byte,
+ const std::multimap<u64, VirtualFile>& files,
+ std::string&& name);
std::string GetName() const override;
std::size_t GetSize() const override;
@@ -37,8 +52,7 @@ public:
bool Rename(std::string_view new_name) override;
private:
- // Maps starting offset to file -- more efficient.
- std::multimap<u64, VirtualFile> files;
+ ConcatenationMap concatenation_map;
std::string name;
};
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp
index 366880711..bbfea7117 100644
--- a/src/core/hid/emulated_controller.cpp
+++ b/src/core/hid/emulated_controller.cpp
@@ -1283,9 +1283,14 @@ bool EmulatedController::HasNfc() const {
}
bool EmulatedController::WriteNfc(const std::vector<u8>& data) {
- auto& nfc_output_device = output_devices[3];
+ auto& nfc_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
+ auto& nfc_virtual_output_device = output_devices[3];
+
+ if (nfc_output_device->SupportsNfc() != Common::Input::NfcState::NotSupported) {
+ return nfc_output_device->WriteNfcData(data) == Common::Input::NfcState::Success;
+ }
- return nfc_output_device->WriteNfcData(data) == Common::Input::NfcState::Success;
+ return nfc_virtual_output_device->WriteNfcData(data) == Common::Input::NfcState::Success;
}
void EmulatedController::SetLedPattern() {
diff --git a/src/core/hle/service/nfc/common/amiibo_crypto.cpp b/src/core/hle/service/nfc/common/amiibo_crypto.cpp
index f3901ee8d..b2bcb68c3 100644
--- a/src/core/hle/service/nfc/common/amiibo_crypto.cpp
+++ b/src/core/hle/service/nfc/common/amiibo_crypto.cpp
@@ -52,9 +52,6 @@ bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file) {
if (ntag_file.compability_container != 0xEEFF10F1U) {
return false;
}
- if (amiibo_data.constant_value != 0xA5) {
- return false;
- }
if (amiibo_data.model_info.tag_type != NFC::PackedTagType::Type2) {
return false;
}
diff --git a/src/core/hle/service/nfc/common/device.cpp b/src/core/hle/service/nfc/common/device.cpp
index 322bde2ed..0bd7900e1 100644
--- a/src/core/hle/service/nfc/common/device.cpp
+++ b/src/core/hle/service/nfc/common/device.cpp
@@ -119,18 +119,31 @@ bool NfcDevice::LoadNfcTag(std::span<const u8> data) {
memcpy(&tag_data, data.data(), sizeof(NFP::EncryptedNTAG215File));
is_plain_amiibo = NFP::AmiiboCrypto::IsAmiiboValid(tag_data);
+ is_write_protected = false;
+ device_state = DeviceState::TagFound;
+ deactivate_event->GetReadableEvent().Clear();
+ activate_event->Signal();
+
+ // Fallback for plain amiibos
if (is_plain_amiibo) {
- encrypted_tag_data = NFP::AmiiboCrypto::EncodedDataToNfcData(tag_data);
LOG_INFO(Service_NFP, "Using plain amiibo");
- } else {
- tag_data = {};
+ encrypted_tag_data = NFP::AmiiboCrypto::EncodedDataToNfcData(tag_data);
+ return true;
+ }
+
+ // Fallback for encrypted amiibos without keys
+ if (!NFP::AmiiboCrypto::IsKeyAvailable()) {
+ LOG_INFO(Service_NFC, "Loading amiibo without keys");
memcpy(&encrypted_tag_data, data.data(), sizeof(NFP::EncryptedNTAG215File));
+ BuildAmiiboWithoutKeys();
+ is_plain_amiibo = true;
+ is_write_protected = true;
+ return true;
}
- device_state = DeviceState::TagFound;
- deactivate_event->GetReadableEvent().Clear();
- activate_event->Signal();
+ tag_data = {};
+ memcpy(&encrypted_tag_data, data.data(), sizeof(NFP::EncryptedNTAG215File));
return true;
}
@@ -346,23 +359,15 @@ Result NfcDevice::Mount(NFP::ModelType model_type, NFP::MountTarget mount_target
return ResultWrongDeviceState;
}
- // The loaded amiibo is not encrypted
- if (is_plain_amiibo) {
- device_state = DeviceState::TagMounted;
- mount_target = mount_target_;
- return ResultSuccess;
- }
-
if (!NFP::AmiiboCrypto::IsAmiiboValid(encrypted_tag_data)) {
LOG_ERROR(Service_NFP, "Not an amiibo");
return ResultNotAnAmiibo;
}
- // Mark amiibos as read only when keys are missing
- if (!NFP::AmiiboCrypto::IsKeyAvailable()) {
- LOG_ERROR(Service_NFP, "No keys detected");
+ // The loaded amiibo is not encrypted
+ if (is_plain_amiibo) {
device_state = DeviceState::TagMounted;
- mount_target = NFP::MountTarget::Rom;
+ mount_target = mount_target_;
return ResultSuccess;
}
@@ -421,11 +426,11 @@ Result NfcDevice::Flush() {
tag_data.write_counter++;
- FlushWithBreak(NFP::BreakType::Normal);
+ const auto result = FlushWithBreak(NFP::BreakType::Normal);
is_data_moddified = false;
- return ResultSuccess;
+ return result;
}
Result NfcDevice::FlushDebug() {
@@ -444,11 +449,11 @@ Result NfcDevice::FlushDebug() {
tag_data.write_counter++;
- FlushWithBreak(NFP::BreakType::Normal);
+ const auto result = FlushWithBreak(NFP::BreakType::Normal);
is_data_moddified = false;
- return ResultSuccess;
+ return result;
}
Result NfcDevice::FlushWithBreak(NFP::BreakType break_type) {
@@ -457,6 +462,11 @@ Result NfcDevice::FlushWithBreak(NFP::BreakType break_type) {
return ResultWrongDeviceState;
}
+ if (is_write_protected) {
+ LOG_ERROR(Service_NFP, "No keys available skipping write request");
+ return ResultSuccess;
+ }
+
std::vector<u8> data(sizeof(NFP::EncryptedNTAG215File));
if (is_plain_amiibo) {
memcpy(data.data(), &tag_data, sizeof(tag_data));
@@ -1033,7 +1043,6 @@ Result NfcDevice::GetAll(NFP::NfpData& data) const {
}
NFP::CommonInfo common_info{};
- Service::Mii::MiiManager manager;
const u64 application_id = tag_data.application_id;
GetCommonInfo(common_info);
@@ -1249,6 +1258,28 @@ void NfcDevice::UpdateRegisterInfoCrc() {
tag_data.register_info_crc = crc.checksum();
}
+void NfcDevice::BuildAmiiboWithoutKeys() {
+ Service::Mii::MiiManager manager;
+ auto& settings = tag_data.settings;
+
+ tag_data = NFP::AmiiboCrypto::NfcDataToEncodedData(encrypted_tag_data);
+
+ // Common info
+ tag_data.write_counter = 0;
+ tag_data.amiibo_version = 0;
+ settings.write_date = GetAmiiboDate(GetCurrentPosixTime());
+
+ // Register info
+ SetAmiiboName(settings, {'y', 'u', 'z', 'u', 'A', 'm', 'i', 'i', 'b', 'o'});
+ settings.settings.font_region.Assign(0);
+ settings.init_date = GetAmiiboDate(GetCurrentPosixTime());
+ tag_data.owner_mii = manager.BuildFromStoreData(manager.BuildDefault(0));
+
+ // Admin info
+ settings.settings.amiibo_initialized.Assign(1);
+ settings.settings.appdata_initialized.Assign(0);
+}
+
u64 NfcDevice::GetHandle() const {
// Generate a handle based of the npad id
return static_cast<u64>(npad_id);
diff --git a/src/core/hle/service/nfc/common/device.h b/src/core/hle/service/nfc/common/device.h
index 98e1945c1..6a37e8458 100644
--- a/src/core/hle/service/nfc/common/device.h
+++ b/src/core/hle/service/nfc/common/device.h
@@ -110,6 +110,8 @@ private:
void UpdateSettingsCrc();
void UpdateRegisterInfoCrc();
+ void BuildAmiiboWithoutKeys();
+
bool is_controller_set{};
int callback_key;
const Core::HID::NpadIdType npad_id;
@@ -128,6 +130,7 @@ private:
bool is_data_moddified{};
bool is_app_area_open{};
bool is_plain_amiibo{};
+ bool is_write_protected{};
NFP::MountTarget mount_target{NFP::MountTarget::None};
NFP::NTAG215File tag_data{};
diff --git a/src/input_common/drivers/joycon.cpp b/src/input_common/drivers/joycon.cpp
index 653862a72..b2b5677c8 100644
--- a/src/input_common/drivers/joycon.cpp
+++ b/src/input_common/drivers/joycon.cpp
@@ -291,9 +291,13 @@ Common::Input::NfcState Joycons::SupportsNfc(const PadIdentifier& identifier_) c
return Common::Input::NfcState::Success;
};
-Common::Input::NfcState Joycons::WriteNfcData(const PadIdentifier& identifier_,
+Common::Input::NfcState Joycons::WriteNfcData(const PadIdentifier& identifier,
const std::vector<u8>& data) {
- return Common::Input::NfcState::NotSupported;
+ auto handle = GetHandle(identifier);
+ if (handle->WriteNfcData(data) != Joycon::DriverResult::Success) {
+ return Common::Input::NfcState::WriteFailed;
+ }
+ return Common::Input::NfcState::Success;
};
Common::Input::DriverResult Joycons::SetPollingMode(const PadIdentifier& identifier,
diff --git a/src/input_common/helpers/joycon_driver.cpp b/src/input_common/helpers/joycon_driver.cpp
index 83429a336..95106f16d 100644
--- a/src/input_common/helpers/joycon_driver.cpp
+++ b/src/input_common/helpers/joycon_driver.cpp
@@ -492,6 +492,26 @@ DriverResult JoyconDriver::SetRingConMode() {
return result;
}
+DriverResult JoyconDriver::WriteNfcData(std::span<const u8> data) {
+ std::scoped_lock lock{mutex};
+ disable_input_thread = true;
+
+ if (!supported_features.nfc) {
+ return DriverResult::NotSupported;
+ }
+ if (!nfc_protocol->IsEnabled()) {
+ return DriverResult::Disabled;
+ }
+ if (!amiibo_detected) {
+ return DriverResult::ErrorWritingData;
+ }
+
+ const auto result = nfc_protocol->WriteAmiibo(data);
+
+ disable_input_thread = false;
+ return result;
+}
+
bool JoyconDriver::IsConnected() const {
std::scoped_lock lock{mutex};
return is_connected.load();
diff --git a/src/input_common/helpers/joycon_driver.h b/src/input_common/helpers/joycon_driver.h
index 72a9e71dc..e9b2fccbb 100644
--- a/src/input_common/helpers/joycon_driver.h
+++ b/src/input_common/helpers/joycon_driver.h
@@ -49,6 +49,7 @@ public:
DriverResult SetIrMode();
DriverResult SetNfcMode();
DriverResult SetRingConMode();
+ DriverResult WriteNfcData(std::span<const u8> data);
void SetCallbacks(const JoyconCallbacks& callbacks);
diff --git a/src/input_common/helpers/joycon_protocol/joycon_types.h b/src/input_common/helpers/joycon_protocol/joycon_types.h
index 353dc744d..5007b0e18 100644
--- a/src/input_common/helpers/joycon_protocol/joycon_types.h
+++ b/src/input_common/helpers/joycon_protocol/joycon_types.h
@@ -23,6 +23,7 @@ constexpr std::array<u8, 8> DefaultVibrationBuffer{0x0, 0x1, 0x40, 0x40, 0x0, 0x
using MacAddress = std::array<u8, 6>;
using SerialNumber = std::array<u8, 15>;
+using TagUUID = std::array<u8, 7>;
enum class ControllerType : u8 {
None = 0x00,
@@ -276,12 +277,13 @@ enum class MCUPacketFlag : u8 {
LastCommandPacket = 0x08,
};
-enum class NFCReadCommand : u8 {
+enum class NFCCommand : u8 {
CancelAll = 0x00,
StartPolling = 0x01,
StopPolling = 0x02,
StartWaitingRecieve = 0x04,
- Ntag = 0x06,
+ ReadNtag = 0x06,
+ WriteNtag = 0x08,
Mifare = 0x0F,
};
@@ -292,14 +294,19 @@ enum class NFCTagType : u8 {
enum class NFCPages {
Block0 = 0,
+ Block3 = 3,
Block45 = 45,
Block135 = 135,
Block231 = 231,
};
enum class NFCStatus : u8 {
+ Ready = 0x00,
+ Polling = 0x01,
LastPackage = 0x04,
+ WriteDone = 0x05,
TagLost = 0x07,
+ WriteReady = 0x09,
};
enum class IrsMode : u8 {
@@ -559,13 +566,32 @@ static_assert(sizeof(NFCReadBlockCommand) == 0x9, "NFCReadBlockCommand is an inv
struct NFCReadCommandData {
u8 unknown;
u8 uuid_length;
- u8 unknown_2;
- std::array<u8, 6> uid;
+ TagUUID uid;
NFCTagType tag_type;
NFCReadBlockCommand read_block;
};
static_assert(sizeof(NFCReadCommandData) == 0x13, "NFCReadCommandData is an invalid size");
+#pragma pack(push, 1)
+struct NFCWriteCommandData {
+ u8 unknown;
+ u8 uuid_length;
+ TagUUID uid;
+ NFCTagType tag_type;
+ u8 unknown2;
+ u8 unknown3;
+ u8 unknown4;
+ u8 unknown5;
+ u8 unknown6;
+ u8 unknown7;
+ u8 unknown8;
+ u8 magic;
+ u16_be write_count;
+ u8 amiibo_version;
+};
+static_assert(sizeof(NFCWriteCommandData) == 0x15, "NFCWriteCommandData is an invalid size");
+#pragma pack(pop)
+
struct NFCPollingCommandData {
u8 enable_mifare;
u8 unknown_1;
@@ -576,8 +602,8 @@ struct NFCPollingCommandData {
static_assert(sizeof(NFCPollingCommandData) == 0x05, "NFCPollingCommandData is an invalid size");
struct NFCRequestState {
- NFCReadCommand command_argument;
- INSERT_PADDING_BYTES(0x1);
+ NFCCommand command_argument;
+ u8 block_id;
u8 packet_id;
MCUPacketFlag packet_flag;
u8 data_length;
@@ -591,6 +617,18 @@ struct NFCRequestState {
};
static_assert(sizeof(NFCRequestState) == 0x26, "NFCRequestState is an invalid size");
+struct NFCDataChunk {
+ u8 nfc_page;
+ u8 data_size;
+ std::array<u8, 0xFF> data;
+};
+
+struct NFCWritePackage {
+ NFCWriteCommandData command_data;
+ u8 number_of_chunks;
+ std::array<NFCDataChunk, 4> data_chunks;
+};
+
struct IrsConfigure {
MCUCommand command;
MCUSubCommand sub_command;
diff --git a/src/input_common/helpers/joycon_protocol/nfc.cpp b/src/input_common/helpers/joycon_protocol/nfc.cpp
index 46c9e9489..3b7a628e5 100644
--- a/src/input_common/helpers/joycon_protocol/nfc.cpp
+++ b/src/input_common/helpers/joycon_protocol/nfc.cpp
@@ -34,6 +34,12 @@ DriverResult NfcProtocol::EnableNfc() {
result = ConfigureMCU(config);
}
+ if (result == DriverResult::Success) {
+ result = WaitSetMCUMode(ReportMode::NFC_IR_MODE_60HZ, MCUMode::NFC);
+ }
+ if (result == DriverResult::Success) {
+ result = WaitUntilNfcIs(NFCStatus::Ready);
+ }
return result;
}
@@ -56,27 +62,20 @@ DriverResult NfcProtocol::StartNFCPollingMode() {
LOG_DEBUG(Input, "Start NFC pooling Mode");
ScopedSetBlocking sb(this);
DriverResult result{DriverResult::Success};
- TagFoundData tag_data{};
if (result == DriverResult::Success) {
- result = WaitSetMCUMode(ReportMode::NFC_IR_MODE_60HZ, MCUMode::NFC);
- }
- if (result == DriverResult::Success) {
- result = WaitUntilNfcIsReady();
- }
- if (result == DriverResult::Success) {
MCUCommandResponse output{};
result = SendStopPollingRequest(output);
}
if (result == DriverResult::Success) {
- result = WaitUntilNfcIsReady();
+ result = WaitUntilNfcIs(NFCStatus::Ready);
}
if (result == DriverResult::Success) {
MCUCommandResponse output{};
result = SendStartPollingRequest(output);
}
if (result == DriverResult::Success) {
- result = WaitUntilNfcIsPolling();
+ result = WaitUntilNfcIs(NFCStatus::Polling);
}
if (result == DriverResult::Success) {
is_enabled = true;
@@ -112,6 +111,49 @@ DriverResult NfcProtocol::ScanAmiibo(std::vector<u8>& data) {
return result;
}
+DriverResult NfcProtocol::WriteAmiibo(std::span<const u8> data) {
+ LOG_DEBUG(Input, "Write amiibo");
+ ScopedSetBlocking sb(this);
+ DriverResult result{DriverResult::Success};
+ TagUUID tag_uuid = GetTagUUID(data);
+ TagFoundData tag_data{};
+
+ if (result == DriverResult::Success) {
+ result = IsTagInRange(tag_data, 7);
+ }
+ if (result == DriverResult::Success) {
+ if (tag_data.uuid != tag_uuid) {
+ result = DriverResult::InvalidParameters;
+ }
+ }
+ if (result == DriverResult::Success) {
+ MCUCommandResponse output{};
+ result = SendStopPollingRequest(output);
+ }
+ if (result == DriverResult::Success) {
+ result = WaitUntilNfcIs(NFCStatus::Ready);
+ }
+ if (result == DriverResult::Success) {
+ MCUCommandResponse output{};
+ result = SendStartPollingRequest(output, true);
+ }
+ if (result == DriverResult::Success) {
+ result = WaitUntilNfcIs(NFCStatus::WriteReady);
+ }
+ if (result == DriverResult::Success) {
+ result = WriteAmiiboData(tag_uuid, data);
+ }
+ if (result == DriverResult::Success) {
+ result = WaitUntilNfcIs(NFCStatus::WriteDone);
+ }
+ if (result == DriverResult::Success) {
+ MCUCommandResponse output{};
+ result = SendStopPollingRequest(output);
+ }
+
+ return result;
+}
+
bool NfcProtocol::HasAmiibo() {
if (update_counter++ < AMIIBO_UPDATE_DELAY) {
return true;
@@ -129,7 +171,7 @@ bool NfcProtocol::HasAmiibo() {
return result == DriverResult::Success;
}
-DriverResult NfcProtocol::WaitUntilNfcIsReady() {
+DriverResult NfcProtocol::WaitUntilNfcIs(NFCStatus status) {
constexpr std::size_t timeout_limit = 10;
MCUCommandResponse output{};
std::size_t tries = 0;
@@ -145,28 +187,7 @@ DriverResult NfcProtocol::WaitUntilNfcIsReady() {
}
} while (output.mcu_report != MCUReport::NFCState ||
(output.mcu_data[1] << 8) + output.mcu_data[0] != 0x0500 ||
- output.mcu_data[5] != 0x31 || output.mcu_data[6] != 0x00);
-
- return DriverResult::Success;
-}
-
-DriverResult NfcProtocol::WaitUntilNfcIsPolling() {
- constexpr std::size_t timeout_limit = 10;
- MCUCommandResponse output{};
- std::size_t tries = 0;
-
- do {
- auto result = SendNextPackageRequest(output, {});
-
- if (result != DriverResult::Success) {
- return result;
- }
- if (tries++ > timeout_limit) {
- return DriverResult::Timeout;
- }
- } while (output.mcu_report != MCUReport::NFCState ||
- (output.mcu_data[1] << 8) + output.mcu_data[0] != 0x0500 ||
- output.mcu_data[5] != 0x31 || output.mcu_data[6] != 0x01);
+ output.mcu_data[5] != 0x31 || output.mcu_data[6] != static_cast<u8>(status));
return DriverResult::Success;
}
@@ -188,7 +209,7 @@ DriverResult NfcProtocol::IsTagInRange(TagFoundData& data, std::size_t timeout_l
(output.mcu_data[6] != 0x09 && output.mcu_data[6] != 0x04));
data.type = output.mcu_data[12];
- data.uuid.resize(output.mcu_data[14]);
+ data.uuid_size = std::min(output.mcu_data[14], static_cast<u8>(sizeof(TagUUID)));
memcpy(data.uuid.data(), output.mcu_data.data() + 15, data.uuid.size());
return DriverResult::Success;
@@ -245,17 +266,94 @@ DriverResult NfcProtocol::GetAmiiboData(std::vector<u8>& ntag_data) {
return DriverResult::Timeout;
}
-DriverResult NfcProtocol::SendStartPollingRequest(MCUCommandResponse& output) {
+DriverResult NfcProtocol::WriteAmiiboData(const TagUUID& tag_uuid, std::span<const u8> data) {
+ constexpr std::size_t timeout_limit = 60;
+ const auto nfc_data = MakeAmiiboWritePackage(tag_uuid, data);
+ const std::vector<u8> nfc_buffer_data = SerializeWritePackage(nfc_data);
+ std::span<const u8> buffer(nfc_buffer_data);
+ MCUCommandResponse output{};
+ u8 block_id = 1;
+ u8 package_index = 0;
+ std::size_t tries = 0;
+ std::size_t current_position = 0;
+
+ LOG_INFO(Input, "Writing amiibo data");
+
+ auto result = SendWriteAmiiboRequest(output, tag_uuid);
+
+ if (result != DriverResult::Success) {
+ return result;
+ }
+
+ // Read Tag data but ignore the actual sent data
+ while (tries++ < timeout_limit) {
+ result = SendNextPackageRequest(output, package_index);
+ const auto nfc_status = static_cast<NFCStatus>(output.mcu_data[6]);
+
+ if (result != DriverResult::Success) {
+ return result;
+ }
+
+ if ((output.mcu_report == MCUReport::NFCReadData ||
+ output.mcu_report == MCUReport::NFCState) &&
+ nfc_status == NFCStatus::TagLost) {
+ return DriverResult::ErrorReadingData;
+ }
+
+ if (output.mcu_report == MCUReport::NFCReadData && output.mcu_data[1] == 0x07) {
+ package_index++;
+ continue;
+ }
+
+ if (output.mcu_report == MCUReport::NFCState && nfc_status == NFCStatus::LastPackage) {
+ LOG_INFO(Input, "Finished reading amiibo");
+ break;
+ }
+ }
+
+ // Send Data. Nfc buffer size is 31, Send the data in smaller packages
+ while (current_position < buffer.size() && tries++ < timeout_limit) {
+ const std::size_t next_position =
+ std::min(current_position + sizeof(NFCRequestState::raw_data), buffer.size());
+ const std::size_t block_size = next_position - current_position;
+ const bool is_last_packet = block_size < sizeof(NFCRequestState::raw_data);
+
+ SendWriteDataAmiiboRequest(output, block_id, is_last_packet,
+ buffer.subspan(current_position, block_size));
+
+ const auto nfc_status = static_cast<NFCStatus>(output.mcu_data[6]);
+
+ if ((output.mcu_report == MCUReport::NFCReadData ||
+ output.mcu_report == MCUReport::NFCState) &&
+ nfc_status == NFCStatus::TagLost) {
+ return DriverResult::ErrorReadingData;
+ }
+
+ // Increase position when data is confirmed by the joycon
+ if (output.mcu_report == MCUReport::NFCState &&
+ (output.mcu_data[1] << 8) + output.mcu_data[0] == 0x0500 &&
+ output.mcu_data[3] == block_id) {
+ block_id++;
+ current_position = next_position;
+ }
+ }
+
+ return result;
+}
+
+DriverResult NfcProtocol::SendStartPollingRequest(MCUCommandResponse& output,
+ bool is_second_attempt) {
NFCRequestState request{
- .command_argument = NFCReadCommand::StartPolling,
- .packet_id = 0x0,
+ .command_argument = NFCCommand::StartPolling,
+ .block_id = {},
+ .packet_id = {},
.packet_flag = MCUPacketFlag::LastCommandPacket,
.data_length = sizeof(NFCPollingCommandData),
.nfc_polling =
{
- .enable_mifare = 0x01,
- .unknown_1 = 0x00,
- .unknown_2 = 0x00,
+ .enable_mifare = 0x00,
+ .unknown_1 = static_cast<u8>(is_second_attempt ? 0xe8 : 0x00),
+ .unknown_2 = static_cast<u8>(is_second_attempt ? 0x03 : 0x00),
.unknown_3 = 0x2c,
.unknown_4 = 0x01,
},
@@ -271,10 +369,11 @@ DriverResult NfcProtocol::SendStartPollingRequest(MCUCommandResponse& output) {
DriverResult NfcProtocol::SendStopPollingRequest(MCUCommandResponse& output) {
NFCRequestState request{
- .command_argument = NFCReadCommand::StopPolling,
- .packet_id = 0x0,
+ .command_argument = NFCCommand::StopPolling,
+ .block_id = {},
+ .packet_id = {},
.packet_flag = MCUPacketFlag::LastCommandPacket,
- .data_length = 0,
+ .data_length = {},
.raw_data = {},
.crc = {},
};
@@ -288,10 +387,11 @@ DriverResult NfcProtocol::SendStopPollingRequest(MCUCommandResponse& output) {
DriverResult NfcProtocol::SendNextPackageRequest(MCUCommandResponse& output, u8 packet_id) {
NFCRequestState request{
- .command_argument = NFCReadCommand::StartWaitingRecieve,
+ .command_argument = NFCCommand::StartWaitingRecieve,
+ .block_id = {},
.packet_id = packet_id,
.packet_flag = MCUPacketFlag::LastCommandPacket,
- .data_length = 0,
+ .data_length = {},
.raw_data = {},
.crc = {},
};
@@ -305,17 +405,17 @@ DriverResult NfcProtocol::SendNextPackageRequest(MCUCommandResponse& output, u8
DriverResult NfcProtocol::SendReadAmiiboRequest(MCUCommandResponse& output, NFCPages ntag_pages) {
NFCRequestState request{
- .command_argument = NFCReadCommand::Ntag,
- .packet_id = 0x0,
+ .command_argument = NFCCommand::ReadNtag,
+ .block_id = {},
+ .packet_id = {},
.packet_flag = MCUPacketFlag::LastCommandPacket,
.data_length = sizeof(NFCReadCommandData),
.nfc_read =
{
.unknown = 0xd0,
- .uuid_length = 0x07,
- .unknown_2 = 0x00,
+ .uuid_length = sizeof(NFCReadCommandData::uid),
.uid = {},
- .tag_type = NFCTagType::AllTags,
+ .tag_type = NFCTagType::Ntag215,
.read_block = GetReadBlockCommand(ntag_pages),
},
.crc = {},
@@ -328,12 +428,135 @@ DriverResult NfcProtocol::SendReadAmiiboRequest(MCUCommandResponse& output, NFCP
output);
}
+DriverResult NfcProtocol::SendWriteAmiiboRequest(MCUCommandResponse& output,
+ const TagUUID& tag_uuid) {
+ NFCRequestState request{
+ .command_argument = NFCCommand::ReadNtag,
+ .block_id = {},
+ .packet_id = {},
+ .packet_flag = MCUPacketFlag::LastCommandPacket,
+ .data_length = sizeof(NFCReadCommandData),
+ .nfc_read =
+ {
+ .unknown = 0xd0,
+ .uuid_length = sizeof(NFCReadCommandData::uid),
+ .uid = tag_uuid,
+ .tag_type = NFCTagType::Ntag215,
+ .read_block = GetReadBlockCommand(NFCPages::Block3),
+ },
+ .crc = {},
+ };
+
+ std::array<u8, sizeof(NFCRequestState)> request_data{};
+ memcpy(request_data.data(), &request, sizeof(NFCRequestState));
+ request_data[36] = CalculateMCU_CRC8(request_data.data(), 36);
+ return SendMCUData(ReportMode::NFC_IR_MODE_60HZ, MCUSubCommand::ReadDeviceMode, request_data,
+ output);
+}
+
+DriverResult NfcProtocol::SendWriteDataAmiiboRequest(MCUCommandResponse& output, u8 block_id,
+ bool is_last_packet,
+ std::span<const u8> data) {
+ const auto data_size = std::min(data.size(), sizeof(NFCRequestState::raw_data));
+ NFCRequestState request{
+ .command_argument = NFCCommand::WriteNtag,
+ .block_id = block_id,
+ .packet_id = {},
+ .packet_flag =
+ is_last_packet ? MCUPacketFlag::LastCommandPacket : MCUPacketFlag::MorePacketsRemaining,
+ .data_length = static_cast<u8>(data_size),
+ .raw_data = {},
+ .crc = {},
+ };
+ memcpy(request.raw_data.data(), data.data(), data_size);
+
+ std::array<u8, sizeof(NFCRequestState)> request_data{};
+ memcpy(request_data.data(), &request, sizeof(NFCRequestState));
+ request_data[36] = CalculateMCU_CRC8(request_data.data(), 36);
+ return SendMCUData(ReportMode::NFC_IR_MODE_60HZ, MCUSubCommand::ReadDeviceMode, request_data,
+ output);
+}
+
+std::vector<u8> NfcProtocol::SerializeWritePackage(const NFCWritePackage& package) const {
+ const std::size_t header_size =
+ sizeof(NFCWriteCommandData) + sizeof(NFCWritePackage::number_of_chunks);
+ std::vector<u8> serialized_data(header_size);
+ std::size_t start_index = 0;
+
+ memcpy(serialized_data.data(), &package, header_size);
+ start_index += header_size;
+
+ for (const auto& data_chunk : package.data_chunks) {
+ const std::size_t chunk_size =
+ sizeof(NFCDataChunk::nfc_page) + sizeof(NFCDataChunk::data_size) + data_chunk.data_size;
+
+ serialized_data.resize(start_index + chunk_size);
+ memcpy(serialized_data.data() + start_index, &data_chunk, chunk_size);
+ start_index += chunk_size;
+ }
+
+ return serialized_data;
+}
+
+NFCWritePackage NfcProtocol::MakeAmiiboWritePackage(const TagUUID& tag_uuid,
+ std::span<const u8> data) const {
+ return {
+ .command_data{
+ .unknown = 0xd0,
+ .uuid_length = sizeof(NFCReadCommandData::uid),
+ .uid = tag_uuid,
+ .tag_type = NFCTagType::Ntag215,
+ .unknown2 = 0x00,
+ .unknown3 = 0x01,
+ .unknown4 = 0x04,
+ .unknown5 = 0xff,
+ .unknown6 = 0xff,
+ .unknown7 = 0xff,
+ .unknown8 = 0xff,
+ .magic = data[16],
+ .write_count = static_cast<u16>((data[17] << 8) + data[18]),
+ .amiibo_version = data[19],
+ },
+ .number_of_chunks = 3,
+ .data_chunks =
+ {
+ MakeAmiiboChunk(0x05, 0x20, data),
+ MakeAmiiboChunk(0x20, 0xf0, data),
+ MakeAmiiboChunk(0x5c, 0x98, data),
+ },
+ };
+}
+
+NFCDataChunk NfcProtocol::MakeAmiiboChunk(u8 page, u8 size, std::span<const u8> data) const {
+ constexpr u8 PAGE_SIZE = 4;
+
+ if (static_cast<std::size_t>(page * PAGE_SIZE) + size >= data.size()) {
+ return {};
+ }
+
+ NFCDataChunk chunk{
+ .nfc_page = page,
+ .data_size = size,
+ .data = {},
+ };
+ std::memcpy(chunk.data.data(), data.data() + (page * PAGE_SIZE), size);
+ return chunk;
+}
+
NFCReadBlockCommand NfcProtocol::GetReadBlockCommand(NFCPages pages) const {
switch (pages) {
case NFCPages::Block0:
return {
.block_count = 1,
};
+ case NFCPages::Block3:
+ return {
+ .block_count = 1,
+ .blocks =
+ {
+ NFCReadBlock{0x03, 0x03},
+ },
+ };
case NFCPages::Block45:
return {
.block_count = 1,
@@ -368,6 +591,17 @@ NFCReadBlockCommand NfcProtocol::GetReadBlockCommand(NFCPages pages) const {
};
}
+TagUUID NfcProtocol::GetTagUUID(std::span<const u8> data) const {
+ if (data.size() < 10) {
+ return {};
+ }
+
+ // crc byte 3 is omitted in this operation
+ return {
+ data[0], data[1], data[2], data[4], data[5], data[6], data[7],
+ };
+}
+
bool NfcProtocol::IsEnabled() const {
return is_enabled;
}
diff --git a/src/input_common/helpers/joycon_protocol/nfc.h b/src/input_common/helpers/joycon_protocol/nfc.h
index c9e9af03f..eb58c427d 100644
--- a/src/input_common/helpers/joycon_protocol/nfc.h
+++ b/src/input_common/helpers/joycon_protocol/nfc.h
@@ -27,6 +27,8 @@ public:
DriverResult ScanAmiibo(std::vector<u8>& data);
+ DriverResult WriteAmiibo(std::span<const u8> data);
+
bool HasAmiibo();
bool IsEnabled() const;
@@ -37,18 +39,20 @@ private:
struct TagFoundData {
u8 type;
- std::vector<u8> uuid;
+ u8 uuid_size;
+ TagUUID uuid;
};
- DriverResult WaitUntilNfcIsReady();
-
- DriverResult WaitUntilNfcIsPolling();
+ DriverResult WaitUntilNfcIs(NFCStatus status);
DriverResult IsTagInRange(TagFoundData& data, std::size_t timeout_limit = 1);
DriverResult GetAmiiboData(std::vector<u8>& data);
- DriverResult SendStartPollingRequest(MCUCommandResponse& output);
+ DriverResult WriteAmiiboData(const TagUUID& tag_uuid, std::span<const u8> data);
+
+ DriverResult SendStartPollingRequest(MCUCommandResponse& output,
+ bool is_second_attempt = false);
DriverResult SendStopPollingRequest(MCUCommandResponse& output);
@@ -56,8 +60,21 @@ private:
DriverResult SendReadAmiiboRequest(MCUCommandResponse& output, NFCPages ntag_pages);
+ DriverResult SendWriteAmiiboRequest(MCUCommandResponse& output, const TagUUID& tag_uuid);
+
+ DriverResult SendWriteDataAmiiboRequest(MCUCommandResponse& output, u8 block_id,
+ bool is_last_packet, std::span<const u8> data);
+
+ std::vector<u8> SerializeWritePackage(const NFCWritePackage& package) const;
+
+ NFCWritePackage MakeAmiiboWritePackage(const TagUUID& tag_uuid, std::span<const u8> data) const;
+
+ NFCDataChunk MakeAmiiboChunk(u8 page, u8 size, std::span<const u8> data) const;
+
NFCReadBlockCommand GetReadBlockCommand(NFCPages pages) const;
+ TagUUID GetTagUUID(std::span<const u8> data) const;
+
bool is_enabled{};
std::size_t update_counter{};
};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp
index 442365a26..c2a0ee6f1 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp
@@ -30,7 +30,7 @@ void SHF(TranslatorVisitor& v, u64 insn, const IR::U32& shift, const IR::U32& hi
union {
u64 insn;
BitField<0, 8, IR::Reg> dest_reg;
- BitField<0, 8, IR::Reg> lo_bits_reg;
+ BitField<8, 8, IR::Reg> lo_bits_reg;
BitField<37, 2, MaxShift> max_shift;
BitField<47, 1, u64> cc;
BitField<48, 2, u64> x_mode;
diff --git a/src/video_core/buffer_cache/buffer_cache.cpp b/src/video_core/buffer_cache/buffer_cache.cpp
index 40db243d2..4b4f7061b 100644
--- a/src/video_core/buffer_cache/buffer_cache.cpp
+++ b/src/video_core/buffer_cache/buffer_cache.cpp
@@ -2,6 +2,8 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "common/microprofile.h"
+#include "video_core/buffer_cache/buffer_cache_base.h"
+#include "video_core/control/channel_state_cache.inc"
namespace VideoCommon {
@@ -9,4 +11,6 @@ MICROPROFILE_DEFINE(GPU_PrepareBuffers, "GPU", "Prepare buffers", MP_RGB(224, 12
MICROPROFILE_DEFINE(GPU_BindUploadBuffers, "GPU", "Bind and upload buffers", MP_RGB(224, 128, 128));
MICROPROFILE_DEFINE(GPU_DownloadMemory, "GPU", "Download buffers", MP_RGB(224, 128, 128));
+template class VideoCommon::ChannelSetupCaches<VideoCommon::BufferCacheChannelInfo>;
+
} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 65494097b..c336be707 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -64,17 +64,22 @@ void BufferCache<P>::RunGarbageCollector() {
template <class P>
void BufferCache<P>::TickFrame() {
// Calculate hits and shots and move hit bits to the right
- const u32 hits = std::reduce(uniform_cache_hits.begin(), uniform_cache_hits.end());
- const u32 shots = std::reduce(uniform_cache_shots.begin(), uniform_cache_shots.end());
- std::copy_n(uniform_cache_hits.begin(), uniform_cache_hits.size() - 1,
- uniform_cache_hits.begin() + 1);
- std::copy_n(uniform_cache_shots.begin(), uniform_cache_shots.size() - 1,
- uniform_cache_shots.begin() + 1);
- uniform_cache_hits[0] = 0;
- uniform_cache_shots[0] = 0;
+
+ const u32 hits = std::reduce(channel_state->uniform_cache_hits.begin(),
+ channel_state->uniform_cache_hits.end());
+ const u32 shots = std::reduce(channel_state->uniform_cache_shots.begin(),
+ channel_state->uniform_cache_shots.end());
+ std::copy_n(channel_state->uniform_cache_hits.begin(),
+ channel_state->uniform_cache_hits.size() - 1,
+ channel_state->uniform_cache_hits.begin() + 1);
+ std::copy_n(channel_state->uniform_cache_shots.begin(),
+ channel_state->uniform_cache_shots.size() - 1,
+ channel_state->uniform_cache_shots.begin() + 1);
+ channel_state->uniform_cache_hits[0] = 0;
+ channel_state->uniform_cache_shots[0] = 0;
const bool skip_preferred = hits * 256 < shots * 251;
- uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
+ channel_state->uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
// If we can obtain the memory info, use it instead of the estimate.
if (runtime.CanReportMemoryUsage()) {
@@ -164,10 +169,10 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
BufferId buffer_a;
BufferId buffer_b;
do {
- has_deleted_buffers = false;
+ channel_state->has_deleted_buffers = false;
buffer_a = FindBuffer(*cpu_src_address, static_cast<u32>(amount));
buffer_b = FindBuffer(*cpu_dest_address, static_cast<u32>(amount));
- } while (has_deleted_buffers);
+ } while (channel_state->has_deleted_buffers);
auto& src_buffer = slot_buffers[buffer_a];
auto& dest_buffer = slot_buffers[buffer_b];
SynchronizeBuffer(src_buffer, *cpu_src_address, static_cast<u32>(amount));
@@ -272,30 +277,30 @@ void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr
.size = size,
.buffer_id = BufferId{},
};
- uniform_buffers[stage][index] = binding;
+ channel_state->uniform_buffers[stage][index] = binding;
}
template <class P>
void BufferCache<P>::DisableGraphicsUniformBuffer(size_t stage, u32 index) {
- uniform_buffers[stage][index] = NULL_BINDING;
+ channel_state->uniform_buffers[stage][index] = NULL_BINDING;
}
template <class P>
void BufferCache<P>::UpdateGraphicsBuffers(bool is_indexed) {
MICROPROFILE_SCOPE(GPU_PrepareBuffers);
do {
- has_deleted_buffers = false;
+ channel_state->has_deleted_buffers = false;
DoUpdateGraphicsBuffers(is_indexed);
- } while (has_deleted_buffers);
+ } while (channel_state->has_deleted_buffers);
}
template <class P>
void BufferCache<P>::UpdateComputeBuffers() {
MICROPROFILE_SCOPE(GPU_PrepareBuffers);
do {
- has_deleted_buffers = false;
+ channel_state->has_deleted_buffers = false;
DoUpdateComputeBuffers();
- } while (has_deleted_buffers);
+ } while (channel_state->has_deleted_buffers);
}
template <class P>
@@ -338,98 +343,102 @@ template <class P>
void BufferCache<P>::SetUniformBuffersState(const std::array<u32, NUM_STAGES>& mask,
const UniformBufferSizes* sizes) {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- if (enabled_uniform_buffer_masks != mask) {
+ if (channel_state->enabled_uniform_buffer_masks != mask) {
if constexpr (IS_OPENGL) {
- fast_bound_uniform_buffers.fill(0);
+ channel_state->fast_bound_uniform_buffers.fill(0);
}
- dirty_uniform_buffers.fill(~u32{0});
- uniform_buffer_binding_sizes.fill({});
+ channel_state->dirty_uniform_buffers.fill(~u32{0});
+ channel_state->uniform_buffer_binding_sizes.fill({});
}
}
- enabled_uniform_buffer_masks = mask;
- uniform_buffer_sizes = sizes;
+ channel_state->enabled_uniform_buffer_masks = mask;
+ channel_state->uniform_buffer_sizes = sizes;
}
template <class P>
void BufferCache<P>::SetComputeUniformBufferState(u32 mask,
const ComputeUniformBufferSizes* sizes) {
- enabled_compute_uniform_buffer_mask = mask;
- compute_uniform_buffer_sizes = sizes;
+ channel_state->enabled_compute_uniform_buffer_mask = mask;
+ channel_state->compute_uniform_buffer_sizes = sizes;
}
template <class P>
void BufferCache<P>::UnbindGraphicsStorageBuffers(size_t stage) {
- enabled_storage_buffers[stage] = 0;
- written_storage_buffers[stage] = 0;
+ channel_state->enabled_storage_buffers[stage] = 0;
+ channel_state->written_storage_buffers[stage] = 0;
}
template <class P>
void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index, u32 cbuf_index,
u32 cbuf_offset, bool is_written) {
- enabled_storage_buffers[stage] |= 1U << ssbo_index;
- written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
+ channel_state->enabled_storage_buffers[stage] |= 1U << ssbo_index;
+ channel_state->written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
const auto& cbufs = maxwell3d->state.shader_stages[stage];
const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset;
- storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, cbuf_index, is_written);
+ channel_state->storage_buffers[stage][ssbo_index] =
+ StorageBufferBinding(ssbo_addr, cbuf_index, is_written);
}
template <class P>
void BufferCache<P>::UnbindGraphicsTextureBuffers(size_t stage) {
- enabled_texture_buffers[stage] = 0;
- written_texture_buffers[stage] = 0;
- image_texture_buffers[stage] = 0;
+ channel_state->enabled_texture_buffers[stage] = 0;
+ channel_state->written_texture_buffers[stage] = 0;
+ channel_state->image_texture_buffers[stage] = 0;
}
template <class P>
void BufferCache<P>::BindGraphicsTextureBuffer(size_t stage, size_t tbo_index, GPUVAddr gpu_addr,
u32 size, PixelFormat format, bool is_written,
bool is_image) {
- enabled_texture_buffers[stage] |= 1U << tbo_index;
- written_texture_buffers[stage] |= (is_written ? 1U : 0U) << tbo_index;
+ channel_state->enabled_texture_buffers[stage] |= 1U << tbo_index;
+ channel_state->written_texture_buffers[stage] |= (is_written ? 1U : 0U) << tbo_index;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
- image_texture_buffers[stage] |= (is_image ? 1U : 0U) << tbo_index;
+ channel_state->image_texture_buffers[stage] |= (is_image ? 1U : 0U) << tbo_index;
}
- texture_buffers[stage][tbo_index] = GetTextureBufferBinding(gpu_addr, size, format);
+ channel_state->texture_buffers[stage][tbo_index] =
+ GetTextureBufferBinding(gpu_addr, size, format);
}
template <class P>
void BufferCache<P>::UnbindComputeStorageBuffers() {
- enabled_compute_storage_buffers = 0;
- written_compute_storage_buffers = 0;
- image_compute_texture_buffers = 0;
+ channel_state->enabled_compute_storage_buffers = 0;
+ channel_state->written_compute_storage_buffers = 0;
+ channel_state->image_compute_texture_buffers = 0;
}
template <class P>
void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index, u32 cbuf_offset,
bool is_written) {
- enabled_compute_storage_buffers |= 1U << ssbo_index;
- written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
+ channel_state->enabled_compute_storage_buffers |= 1U << ssbo_index;
+ channel_state->written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
const auto& launch_desc = kepler_compute->launch_description;
ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
const auto& cbufs = launch_desc.const_buffer_config;
const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset;
- compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, cbuf_index, is_written);
+ channel_state->compute_storage_buffers[ssbo_index] =
+ StorageBufferBinding(ssbo_addr, cbuf_index, is_written);
}
template <class P>
void BufferCache<P>::UnbindComputeTextureBuffers() {
- enabled_compute_texture_buffers = 0;
- written_compute_texture_buffers = 0;
- image_compute_texture_buffers = 0;
+ channel_state->enabled_compute_texture_buffers = 0;
+ channel_state->written_compute_texture_buffers = 0;
+ channel_state->image_compute_texture_buffers = 0;
}
template <class P>
void BufferCache<P>::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_addr, u32 size,
PixelFormat format, bool is_written, bool is_image) {
- enabled_compute_texture_buffers |= 1U << tbo_index;
- written_compute_texture_buffers |= (is_written ? 1U : 0U) << tbo_index;
+ channel_state->enabled_compute_texture_buffers |= 1U << tbo_index;
+ channel_state->written_compute_texture_buffers |= (is_written ? 1U : 0U) << tbo_index;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
- image_compute_texture_buffers |= (is_image ? 1U : 0U) << tbo_index;
+ channel_state->image_compute_texture_buffers |= (is_image ? 1U : 0U) << tbo_index;
}
- compute_texture_buffers[tbo_index] = GetTextureBufferBinding(gpu_addr, size, format);
+ channel_state->compute_texture_buffers[tbo_index] =
+ GetTextureBufferBinding(gpu_addr, size, format);
}
template <class P>
@@ -672,10 +681,10 @@ bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
template <class P>
void BufferCache<P>::BindHostIndexBuffer() {
- Buffer& buffer = slot_buffers[index_buffer.buffer_id];
- TouchBuffer(buffer, index_buffer.buffer_id);
- const u32 offset = buffer.Offset(index_buffer.cpu_addr);
- const u32 size = index_buffer.size;
+ Buffer& buffer = slot_buffers[channel_state->index_buffer.buffer_id];
+ TouchBuffer(buffer, channel_state->index_buffer.buffer_id);
+ const u32 offset = buffer.Offset(channel_state->index_buffer.cpu_addr);
+ const u32 size = channel_state->index_buffer.size;
const auto& draw_state = maxwell3d->draw_manager->GetDrawState();
if (!draw_state.inline_index_draw_indexes.empty()) [[unlikely]] {
if constexpr (USE_MEMORY_MAPS) {
@@ -689,7 +698,7 @@ void BufferCache<P>::BindHostIndexBuffer() {
buffer.ImmediateUpload(0, draw_state.inline_index_draw_indexes);
}
} else {
- SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
+ SynchronizeBuffer(buffer, channel_state->index_buffer.cpu_addr, size);
}
if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
const u32 new_offset =
@@ -706,7 +715,7 @@ template <class P>
void BufferCache<P>::BindHostVertexBuffers() {
auto& flags = maxwell3d->dirty.flags;
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
- const Binding& binding = vertex_buffers[index];
+ const Binding& binding = channel_state->vertex_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
SynchronizeBuffer(buffer, binding.cpu_addr, binding.size);
@@ -729,19 +738,19 @@ void BufferCache<P>::BindHostDrawIndirectBuffers() {
SynchronizeBuffer(buffer, binding.cpu_addr, binding.size);
};
if (current_draw_indirect->include_count) {
- bind_buffer(count_buffer_binding);
+ bind_buffer(channel_state->count_buffer_binding);
}
- bind_buffer(indirect_buffer_binding);
+ bind_buffer(channel_state->indirect_buffer_binding);
}
template <class P>
void BufferCache<P>::BindHostGraphicsUniformBuffers(size_t stage) {
u32 dirty = ~0U;
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- dirty = std::exchange(dirty_uniform_buffers[stage], 0);
+ dirty = std::exchange(channel_state->dirty_uniform_buffers[stage], 0);
}
u32 binding_index = 0;
- ForEachEnabledBit(enabled_uniform_buffer_masks[stage], [&](u32 index) {
+ ForEachEnabledBit(channel_state->enabled_uniform_buffer_masks[stage], [&](u32 index) {
const bool needs_bind = ((dirty >> index) & 1) != 0;
BindHostGraphicsUniformBuffer(stage, index, binding_index, needs_bind);
if constexpr (NEEDS_BIND_UNIFORM_INDEX) {
@@ -753,13 +762,13 @@ void BufferCache<P>::BindHostGraphicsUniformBuffers(size_t stage) {
template <class P>
void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 binding_index,
bool needs_bind) {
- const Binding& binding = uniform_buffers[stage][index];
+ const Binding& binding = channel_state->uniform_buffers[stage][index];
const VAddr cpu_addr = binding.cpu_addr;
- const u32 size = std::min(binding.size, (*uniform_buffer_sizes)[stage][index]);
+ const u32 size = std::min(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]);
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID &&
- size <= uniform_buffer_skip_cache_size &&
+ size <= channel_state->uniform_buffer_skip_cache_size &&
!memory_tracker.IsRegionGpuModified(cpu_addr, size);
if (use_fast_buffer) {
if constexpr (IS_OPENGL) {
@@ -767,11 +776,11 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
// Fast path for Nvidia
const bool should_fast_bind =
!HasFastUniformBufferBound(stage, binding_index) ||
- uniform_buffer_binding_sizes[stage][binding_index] != size;
+ channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size;
if (should_fast_bind) {
// We only have to bind when the currently bound buffer is not the fast version
- fast_bound_uniform_buffers[stage] |= 1U << binding_index;
- uniform_buffer_binding_sizes[stage][binding_index] = size;
+ channel_state->fast_bound_uniform_buffers[stage] |= 1U << binding_index;
+ channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
runtime.BindFastUniformBuffer(stage, binding_index, size);
}
const auto span = ImmediateBufferWithData(cpu_addr, size);
@@ -780,8 +789,8 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
}
}
if constexpr (IS_OPENGL) {
- fast_bound_uniform_buffers[stage] |= 1U << binding_index;
- uniform_buffer_binding_sizes[stage][binding_index] = size;
+ channel_state->fast_bound_uniform_buffers[stage] |= 1U << binding_index;
+ channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
}
// Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan
const std::span<u8> span = runtime.BindMappedUniformBuffer(stage, binding_index, size);
@@ -791,15 +800,15 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
// Classic cached path
const bool sync_cached = SynchronizeBuffer(buffer, cpu_addr, size);
if (sync_cached) {
- ++uniform_cache_hits[0];
+ ++channel_state->uniform_cache_hits[0];
}
- ++uniform_cache_shots[0];
+ ++channel_state->uniform_cache_shots[0];
// Skip binding if it's not needed and if the bound buffer is not the fast version
// This exists to avoid instances where the fast buffer is bound and a GPU write happens
needs_bind |= HasFastUniformBufferBound(stage, binding_index);
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- needs_bind |= uniform_buffer_binding_sizes[stage][binding_index] != size;
+ needs_bind |= channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size;
}
if (!needs_bind) {
return;
@@ -807,14 +816,14 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
const u32 offset = buffer.Offset(cpu_addr);
if constexpr (IS_OPENGL) {
// Fast buffer will be unbound
- fast_bound_uniform_buffers[stage] &= ~(1U << binding_index);
+ channel_state->fast_bound_uniform_buffers[stage] &= ~(1U << binding_index);
// Mark the index as dirty if offset doesn't match
const bool is_copy_bind = offset != 0 && !runtime.SupportsNonZeroUniformOffset();
- dirty_uniform_buffers[stage] |= (is_copy_bind ? 1U : 0U) << index;
+ channel_state->dirty_uniform_buffers[stage] |= (is_copy_bind ? 1U : 0U) << index;
}
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- uniform_buffer_binding_sizes[stage][binding_index] = size;
+ channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
}
if constexpr (NEEDS_BIND_UNIFORM_INDEX) {
runtime.BindUniformBuffer(stage, binding_index, buffer, offset, size);
@@ -826,15 +835,15 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
template <class P>
void BufferCache<P>::BindHostGraphicsStorageBuffers(size_t stage) {
u32 binding_index = 0;
- ForEachEnabledBit(enabled_storage_buffers[stage], [&](u32 index) {
- const Binding& binding = storage_buffers[stage][index];
+ ForEachEnabledBit(channel_state->enabled_storage_buffers[stage], [&](u32 index) {
+ const Binding& binding = channel_state->storage_buffers[stage][index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
- const bool is_written = ((written_storage_buffers[stage] >> index) & 1) != 0;
+ const bool is_written = ((channel_state->written_storage_buffers[stage] >> index) & 1) != 0;
if constexpr (NEEDS_BIND_STORAGE_INDEX) {
runtime.BindStorageBuffer(stage, binding_index, buffer, offset, size, is_written);
++binding_index;
@@ -846,8 +855,8 @@ void BufferCache<P>::BindHostGraphicsStorageBuffers(size_t stage) {
template <class P>
void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
- ForEachEnabledBit(enabled_texture_buffers[stage], [&](u32 index) {
- const TextureBufferBinding& binding = texture_buffers[stage][index];
+ ForEachEnabledBit(channel_state->enabled_texture_buffers[stage], [&](u32 index) {
+ const TextureBufferBinding& binding = channel_state->texture_buffers[stage][index];
Buffer& buffer = slot_buffers[binding.buffer_id];
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
@@ -855,7 +864,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
const u32 offset = buffer.Offset(binding.cpu_addr);
const PixelFormat format = binding.format;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
- if (((image_texture_buffers[stage] >> index) & 1) != 0) {
+ if (((channel_state->image_texture_buffers[stage] >> index) & 1) != 0) {
runtime.BindImageBuffer(buffer, offset, size, format);
} else {
runtime.BindTextureBuffer(buffer, offset, size, format);
@@ -872,7 +881,7 @@ void BufferCache<P>::BindHostTransformFeedbackBuffers() {
return;
}
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
- const Binding& binding = transform_feedback_buffers[index];
+ const Binding& binding = channel_state->transform_feedback_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const u32 size = binding.size;
@@ -887,15 +896,16 @@ template <class P>
void BufferCache<P>::BindHostComputeUniformBuffers() {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
// Mark all uniform buffers as dirty
- dirty_uniform_buffers.fill(~u32{0});
- fast_bound_uniform_buffers.fill(0);
+ channel_state->dirty_uniform_buffers.fill(~u32{0});
+ channel_state->fast_bound_uniform_buffers.fill(0);
}
u32 binding_index = 0;
- ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
- const Binding& binding = compute_uniform_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_uniform_buffer_mask, [&](u32 index) {
+ const Binding& binding = channel_state->compute_uniform_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
- const u32 size = std::min(binding.size, (*compute_uniform_buffer_sizes)[index]);
+ const u32 size =
+ std::min(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]);
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
@@ -911,15 +921,16 @@ void BufferCache<P>::BindHostComputeUniformBuffers() {
template <class P>
void BufferCache<P>::BindHostComputeStorageBuffers() {
u32 binding_index = 0;
- ForEachEnabledBit(enabled_compute_storage_buffers, [&](u32 index) {
- const Binding& binding = compute_storage_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_storage_buffers, [&](u32 index) {
+ const Binding& binding = channel_state->compute_storage_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
- const bool is_written = ((written_compute_storage_buffers >> index) & 1) != 0;
+ const bool is_written =
+ ((channel_state->written_compute_storage_buffers >> index) & 1) != 0;
if constexpr (NEEDS_BIND_STORAGE_INDEX) {
runtime.BindComputeStorageBuffer(binding_index, buffer, offset, size, is_written);
++binding_index;
@@ -931,8 +942,8 @@ void BufferCache<P>::BindHostComputeStorageBuffers() {
template <class P>
void BufferCache<P>::BindHostComputeTextureBuffers() {
- ForEachEnabledBit(enabled_compute_texture_buffers, [&](u32 index) {
- const TextureBufferBinding& binding = compute_texture_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_texture_buffers, [&](u32 index) {
+ const TextureBufferBinding& binding = channel_state->compute_texture_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
@@ -940,7 +951,7 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
const u32 offset = buffer.Offset(binding.cpu_addr);
const PixelFormat format = binding.format;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
- if (((image_compute_texture_buffers >> index) & 1) != 0) {
+ if (((channel_state->image_compute_texture_buffers >> index) & 1) != 0) {
runtime.BindImageBuffer(buffer, offset, size, format);
} else {
runtime.BindTextureBuffer(buffer, offset, size, format);
@@ -954,7 +965,7 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
template <class P>
void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
do {
- has_deleted_buffers = false;
+ channel_state->has_deleted_buffers = false;
if (is_indexed) {
UpdateIndexBuffer();
}
@@ -968,7 +979,7 @@ void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
if (current_draw_indirect) {
UpdateDrawIndirect();
}
- } while (has_deleted_buffers);
+ } while (channel_state->has_deleted_buffers);
}
template <class P>
@@ -999,7 +1010,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
slot_buffers.erase(inline_buffer_id);
inline_buffer_id = CreateBuffer(0, buffer_size);
}
- index_buffer = Binding{
+ channel_state->index_buffer = Binding{
.cpu_addr = 0,
.size = inline_index_size,
.buffer_id = inline_buffer_id,
@@ -1015,10 +1026,10 @@ void BufferCache<P>::UpdateIndexBuffer() {
(index_buffer_ref.count + index_buffer_ref.first) * index_buffer_ref.FormatSizeInBytes();
const u32 size = std::min(address_size, draw_size);
if (size == 0 || !cpu_addr) {
- index_buffer = NULL_BINDING;
+ channel_state->index_buffer = NULL_BINDING;
return;
}
- index_buffer = Binding{
+ channel_state->index_buffer = Binding{
.cpu_addr = *cpu_addr,
.size = size,
.buffer_id = FindBuffer(*cpu_addr, size),
@@ -1051,13 +1062,13 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
u32 size = address_size; // TODO: Analyze stride and number of vertices
if (array.enable == 0 || size == 0 || !cpu_addr) {
- vertex_buffers[index] = NULL_BINDING;
+ channel_state->vertex_buffers[index] = NULL_BINDING;
return;
}
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
size = static_cast<u32>(gpu_memory->MaxContinuousRange(gpu_addr_begin, size));
}
- vertex_buffers[index] = Binding{
+ channel_state->vertex_buffers[index] = Binding{
.cpu_addr = *cpu_addr,
.size = size,
.buffer_id = FindBuffer(*cpu_addr, size),
@@ -1079,23 +1090,24 @@ void BufferCache<P>::UpdateDrawIndirect() {
};
};
if (current_draw_indirect->include_count) {
- update(current_draw_indirect->count_start_address, sizeof(u32), count_buffer_binding);
+ update(current_draw_indirect->count_start_address, sizeof(u32),
+ channel_state->count_buffer_binding);
}
update(current_draw_indirect->indirect_start_address, current_draw_indirect->buffer_size,
- indirect_buffer_binding);
+ channel_state->indirect_buffer_binding);
}
template <class P>
void BufferCache<P>::UpdateUniformBuffers(size_t stage) {
- ForEachEnabledBit(enabled_uniform_buffer_masks[stage], [&](u32 index) {
- Binding& binding = uniform_buffers[stage][index];
+ ForEachEnabledBit(channel_state->enabled_uniform_buffer_masks[stage], [&](u32 index) {
+ Binding& binding = channel_state->uniform_buffers[stage][index];
if (binding.buffer_id) {
// Already updated
return;
}
// Mark as dirty
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- dirty_uniform_buffers[stage] |= 1U << index;
+ channel_state->dirty_uniform_buffers[stage] |= 1U << index;
}
// Resolve buffer
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
@@ -1104,10 +1116,10 @@ void BufferCache<P>::UpdateUniformBuffers(size_t stage) {
template <class P>
void BufferCache<P>::UpdateStorageBuffers(size_t stage) {
- const u32 written_mask = written_storage_buffers[stage];
- ForEachEnabledBit(enabled_storage_buffers[stage], [&](u32 index) {
+ const u32 written_mask = channel_state->written_storage_buffers[stage];
+ ForEachEnabledBit(channel_state->enabled_storage_buffers[stage], [&](u32 index) {
// Resolve buffer
- Binding& binding = storage_buffers[stage][index];
+ Binding& binding = channel_state->storage_buffers[stage][index];
const BufferId buffer_id = FindBuffer(binding.cpu_addr, binding.size);
binding.buffer_id = buffer_id;
// Mark buffer as written if needed
@@ -1119,11 +1131,11 @@ void BufferCache<P>::UpdateStorageBuffers(size_t stage) {
template <class P>
void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
- ForEachEnabledBit(enabled_texture_buffers[stage], [&](u32 index) {
- Binding& binding = texture_buffers[stage][index];
+ ForEachEnabledBit(channel_state->enabled_texture_buffers[stage], [&](u32 index) {
+ Binding& binding = channel_state->texture_buffers[stage][index];
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark buffer as written if needed
- if (((written_texture_buffers[stage] >> index) & 1) != 0) {
+ if (((channel_state->written_texture_buffers[stage] >> index) & 1) != 0) {
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
@@ -1146,11 +1158,11 @@ void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
const u32 size = binding.size;
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (binding.enable == 0 || size == 0 || !cpu_addr) {
- transform_feedback_buffers[index] = NULL_BINDING;
+ channel_state->transform_feedback_buffers[index] = NULL_BINDING;
return;
}
const BufferId buffer_id = FindBuffer(*cpu_addr, size);
- transform_feedback_buffers[index] = Binding{
+ channel_state->transform_feedback_buffers[index] = Binding{
.cpu_addr = *cpu_addr,
.size = size,
.buffer_id = buffer_id,
@@ -1160,8 +1172,8 @@ void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
template <class P>
void BufferCache<P>::UpdateComputeUniformBuffers() {
- ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
- Binding& binding = compute_uniform_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_uniform_buffer_mask, [&](u32 index) {
+ Binding& binding = channel_state->compute_uniform_buffers[index];
binding = NULL_BINDING;
const auto& launch_desc = kepler_compute->launch_description;
if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
@@ -1178,12 +1190,12 @@ void BufferCache<P>::UpdateComputeUniformBuffers() {
template <class P>
void BufferCache<P>::UpdateComputeStorageBuffers() {
- ForEachEnabledBit(enabled_compute_storage_buffers, [&](u32 index) {
+ ForEachEnabledBit(channel_state->enabled_compute_storage_buffers, [&](u32 index) {
// Resolve buffer
- Binding& binding = compute_storage_buffers[index];
+ Binding& binding = channel_state->compute_storage_buffers[index];
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark as written if needed
- if (((written_compute_storage_buffers >> index) & 1) != 0) {
+ if (((channel_state->written_compute_storage_buffers >> index) & 1) != 0) {
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
@@ -1191,11 +1203,11 @@ void BufferCache<P>::UpdateComputeStorageBuffers() {
template <class P>
void BufferCache<P>::UpdateComputeTextureBuffers() {
- ForEachEnabledBit(enabled_compute_texture_buffers, [&](u32 index) {
- Binding& binding = compute_texture_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_texture_buffers, [&](u32 index) {
+ Binding& binding = channel_state->compute_texture_buffers[index];
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark as written if needed
- if (((written_compute_texture_buffers >> index) & 1) != 0) {
+ if (((channel_state->written_compute_texture_buffers >> index) & 1) != 0) {
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
@@ -1610,13 +1622,13 @@ void BufferCache<P>::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
const auto replace = [scalar_replace](std::span<Binding> bindings) {
std::ranges::for_each(bindings, scalar_replace);
};
- scalar_replace(index_buffer);
- replace(vertex_buffers);
- std::ranges::for_each(uniform_buffers, replace);
- std::ranges::for_each(storage_buffers, replace);
- replace(transform_feedback_buffers);
- replace(compute_uniform_buffers);
- replace(compute_storage_buffers);
+ scalar_replace(channel_state->index_buffer);
+ replace(channel_state->vertex_buffers);
+ std::ranges::for_each(channel_state->uniform_buffers, replace);
+ std::ranges::for_each(channel_state->storage_buffers, replace);
+ replace(channel_state->transform_feedback_buffers);
+ replace(channel_state->compute_uniform_buffers);
+ replace(channel_state->compute_storage_buffers);
// Mark the whole buffer as CPU written to stop tracking CPU writes
if (!do_not_mark) {
@@ -1634,8 +1646,8 @@ void BufferCache<P>::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
template <class P>
void BufferCache<P>::NotifyBufferDeletion() {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- dirty_uniform_buffers.fill(~u32{0});
- uniform_buffer_binding_sizes.fill({});
+ channel_state->dirty_uniform_buffers.fill(~u32{0});
+ channel_state->uniform_buffer_binding_sizes.fill({});
}
auto& flags = maxwell3d->dirty.flags;
flags[Dirty::IndexBuffer] = true;
@@ -1643,13 +1655,12 @@ void BufferCache<P>::NotifyBufferDeletion() {
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
flags[Dirty::VertexBuffer0 + index] = true;
}
- has_deleted_buffers = true;
+ channel_state->has_deleted_buffers = true;
}
template <class P>
-typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr,
- u32 cbuf_index,
- bool is_written) const {
+Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index,
+ bool is_written) const {
const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr);
const auto size = [&]() {
const bool is_nvn_cbuf = cbuf_index == 0;
@@ -1681,8 +1692,8 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
}
template <class P>
-typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
- GPUVAddr gpu_addr, u32 size, PixelFormat format) {
+TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
+ PixelFormat format) {
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
TextureBufferBinding binding;
if (!cpu_addr || size == 0) {
@@ -1721,7 +1732,7 @@ std::span<u8> BufferCache<P>::ImmediateBuffer(size_t wanted_capacity) {
template <class P>
bool BufferCache<P>::HasFastUniformBufferBound(size_t stage, u32 binding_index) const noexcept {
if constexpr (IS_OPENGL) {
- return ((fast_bound_uniform_buffers[stage] >> binding_index) & 1) != 0;
+ return ((channel_state->fast_bound_uniform_buffers[stage] >> binding_index) & 1) != 0;
} else {
// Only OpenGL has fast uniform buffers
return false;
@@ -1730,14 +1741,14 @@ bool BufferCache<P>::HasFastUniformBufferBound(size_t stage, u32 binding_index)
template <class P>
std::pair<typename BufferCache<P>::Buffer*, u32> BufferCache<P>::GetDrawIndirectCount() {
- auto& buffer = slot_buffers[count_buffer_binding.buffer_id];
- return std::make_pair(&buffer, buffer.Offset(count_buffer_binding.cpu_addr));
+ auto& buffer = slot_buffers[channel_state->count_buffer_binding.buffer_id];
+ return std::make_pair(&buffer, buffer.Offset(channel_state->count_buffer_binding.cpu_addr));
}
template <class P>
std::pair<typename BufferCache<P>::Buffer*, u32> BufferCache<P>::GetDrawIndirectBuffer() {
- auto& buffer = slot_buffers[indirect_buffer_binding.buffer_id];
- return std::make_pair(&buffer, buffer.Offset(indirect_buffer_binding.cpu_addr));
+ auto& buffer = slot_buffers[channel_state->indirect_buffer_binding.buffer_id];
+ return std::make_pair(&buffer, buffer.Offset(channel_state->indirect_buffer_binding.cpu_addr));
}
} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index ac00d4d9d..c689fe06b 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -86,8 +86,78 @@ enum class ObtainBufferOperation : u32 {
MarkQuery = 3,
};
-template <typename P>
-class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
+static constexpr BufferId NULL_BUFFER_ID{0};
+static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
+
+struct Binding {
+ VAddr cpu_addr{};
+ u32 size{};
+ BufferId buffer_id;
+};
+
+struct TextureBufferBinding : Binding {
+ PixelFormat format;
+};
+
+static constexpr Binding NULL_BINDING{
+ .cpu_addr = 0,
+ .size = 0,
+ .buffer_id = NULL_BUFFER_ID,
+};
+
+class BufferCacheChannelInfo : public ChannelInfo {
+public:
+ BufferCacheChannelInfo() = delete;
+ BufferCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept : ChannelInfo(state) {}
+ BufferCacheChannelInfo(const BufferCacheChannelInfo& state) = delete;
+ BufferCacheChannelInfo& operator=(const BufferCacheChannelInfo&) = delete;
+
+ Binding index_buffer;
+ std::array<Binding, NUM_VERTEX_BUFFERS> vertex_buffers;
+ std::array<std::array<Binding, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES> uniform_buffers;
+ std::array<std::array<Binding, NUM_STORAGE_BUFFERS>, NUM_STAGES> storage_buffers;
+ std::array<std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS>, NUM_STAGES> texture_buffers;
+ std::array<Binding, NUM_TRANSFORM_FEEDBACK_BUFFERS> transform_feedback_buffers;
+ Binding count_buffer_binding;
+ Binding indirect_buffer_binding;
+
+ std::array<Binding, NUM_COMPUTE_UNIFORM_BUFFERS> compute_uniform_buffers;
+ std::array<Binding, NUM_STORAGE_BUFFERS> compute_storage_buffers;
+ std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS> compute_texture_buffers;
+
+ std::array<u32, NUM_STAGES> enabled_uniform_buffer_masks{};
+ u32 enabled_compute_uniform_buffer_mask = 0;
+
+ const UniformBufferSizes* uniform_buffer_sizes{};
+ const ComputeUniformBufferSizes* compute_uniform_buffer_sizes{};
+
+ std::array<u32, NUM_STAGES> enabled_storage_buffers{};
+ std::array<u32, NUM_STAGES> written_storage_buffers{};
+ u32 enabled_compute_storage_buffers = 0;
+ u32 written_compute_storage_buffers = 0;
+
+ std::array<u32, NUM_STAGES> enabled_texture_buffers{};
+ std::array<u32, NUM_STAGES> written_texture_buffers{};
+ std::array<u32, NUM_STAGES> image_texture_buffers{};
+ u32 enabled_compute_texture_buffers = 0;
+ u32 written_compute_texture_buffers = 0;
+ u32 image_compute_texture_buffers = 0;
+
+ std::array<u32, 16> uniform_cache_hits{};
+ std::array<u32, 16> uniform_cache_shots{};
+
+ u32 uniform_buffer_skip_cache_size = DEFAULT_SKIP_CACHE_SIZE;
+
+ bool has_deleted_buffers = false;
+
+ std::array<u32, NUM_STAGES> dirty_uniform_buffers{};
+ std::array<u32, NUM_STAGES> fast_bound_uniform_buffers{};
+ std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>
+ uniform_buffer_binding_sizes{};
+};
+
+template <class P>
+class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInfo> {
// Page size for caching purposes.
// This is unrelated to the CPU page size and it can be changed as it seems optimal.
static constexpr u32 CACHING_PAGEBITS = 16;
@@ -104,8 +174,6 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelI
static constexpr bool SEPARATE_IMAGE_BUFFERS_BINDINGS = P::SEPARATE_IMAGE_BUFFER_BINDINGS;
static constexpr bool IMPLEMENTS_ASYNC_DOWNLOADS = P::IMPLEMENTS_ASYNC_DOWNLOADS;
- static constexpr BufferId NULL_BUFFER_ID{0};
-
static constexpr s64 DEFAULT_EXPECTED_MEMORY = 512_MiB;
static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB;
static constexpr s64 TARGET_THRESHOLD = 4_GiB;
@@ -149,8 +217,6 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelI
using OverlapSection = boost::icl::inter_section<int>;
using OverlapCounter = boost::icl::split_interval_map<VAddr, int>;
- struct Empty {};
-
struct OverlapResult {
std::vector<BufferId> ids;
VAddr begin;
@@ -158,25 +224,7 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelI
bool has_stream_leap = false;
};
- struct Binding {
- VAddr cpu_addr{};
- u32 size{};
- BufferId buffer_id;
- };
-
- struct TextureBufferBinding : Binding {
- PixelFormat format;
- };
-
- static constexpr Binding NULL_BINDING{
- .cpu_addr = 0,
- .size = 0,
- .buffer_id = NULL_BUFFER_ID,
- };
-
public:
- static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
-
explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
@@ -496,51 +544,6 @@ private:
u32 last_index_count = 0;
- Binding index_buffer;
- std::array<Binding, NUM_VERTEX_BUFFERS> vertex_buffers;
- std::array<std::array<Binding, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES> uniform_buffers;
- std::array<std::array<Binding, NUM_STORAGE_BUFFERS>, NUM_STAGES> storage_buffers;
- std::array<std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS>, NUM_STAGES> texture_buffers;
- std::array<Binding, NUM_TRANSFORM_FEEDBACK_BUFFERS> transform_feedback_buffers;
- Binding count_buffer_binding;
- Binding indirect_buffer_binding;
-
- std::array<Binding, NUM_COMPUTE_UNIFORM_BUFFERS> compute_uniform_buffers;
- std::array<Binding, NUM_STORAGE_BUFFERS> compute_storage_buffers;
- std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS> compute_texture_buffers;
-
- std::array<u32, NUM_STAGES> enabled_uniform_buffer_masks{};
- u32 enabled_compute_uniform_buffer_mask = 0;
-
- const UniformBufferSizes* uniform_buffer_sizes{};
- const ComputeUniformBufferSizes* compute_uniform_buffer_sizes{};
-
- std::array<u32, NUM_STAGES> enabled_storage_buffers{};
- std::array<u32, NUM_STAGES> written_storage_buffers{};
- u32 enabled_compute_storage_buffers = 0;
- u32 written_compute_storage_buffers = 0;
-
- std::array<u32, NUM_STAGES> enabled_texture_buffers{};
- std::array<u32, NUM_STAGES> written_texture_buffers{};
- std::array<u32, NUM_STAGES> image_texture_buffers{};
- u32 enabled_compute_texture_buffers = 0;
- u32 written_compute_texture_buffers = 0;
- u32 image_compute_texture_buffers = 0;
-
- std::array<u32, 16> uniform_cache_hits{};
- std::array<u32, 16> uniform_cache_shots{};
-
- u32 uniform_buffer_skip_cache_size = DEFAULT_SKIP_CACHE_SIZE;
-
- bool has_deleted_buffers = false;
-
- std::conditional_t<HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS, std::array<u32, NUM_STAGES>, Empty>
- dirty_uniform_buffers{};
- std::conditional_t<IS_OPENGL, std::array<u32, NUM_STAGES>, Empty> fast_bound_uniform_buffers{};
- std::conditional_t<HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS,
- std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>, Empty>
- uniform_buffer_binding_sizes{};
-
MemoryTracker memory_tracker;
IntervalSet uncommitted_ranges;
IntervalSet common_ranges;
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 6af4ae793..6d3bda192 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -117,7 +117,7 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_)
for (auto& stage_uniforms : fast_uniforms) {
for (OGLBuffer& buffer : stage_uniforms) {
buffer.Create();
- glNamedBufferData(buffer.handle, BufferCache::DEFAULT_SKIP_CACHE_SIZE, nullptr,
+ glNamedBufferData(buffer.handle, VideoCommon::DEFAULT_SKIP_CACHE_SIZE, nullptr,
GL_STREAM_DRAW);
}
}
diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp
index 91512022f..d79594ce5 100644
--- a/src/video_core/texture_cache/image_base.cpp
+++ b/src/video_core/texture_cache/image_base.cpp
@@ -155,7 +155,7 @@ void ImageBase::CheckAliasState() {
flags &= ~ImageFlagBits::Alias;
}
-void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id) {
+bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id) {
static constexpr auto OPTIONS = RelaxedOptions::Size | RelaxedOptions::Format;
ASSERT(lhs.info.type == rhs.info.type);
std::optional<SubresourceBase> base;
@@ -169,7 +169,7 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
}
if (!base) {
LOG_ERROR(HW_GPU, "Image alias should have been flipped");
- return;
+ return false;
}
const PixelFormat lhs_format = lhs.info.format;
const PixelFormat rhs_format = rhs.info.format;
@@ -248,12 +248,13 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
}
ASSERT(lhs_alias.copies.empty() == rhs_alias.copies.empty());
if (lhs_alias.copies.empty()) {
- return;
+ return false;
}
lhs.aliased_images.push_back(std::move(lhs_alias));
rhs.aliased_images.push_back(std::move(rhs_alias));
lhs.flags &= ~ImageFlagBits::IsRescalable;
rhs.flags &= ~ImageFlagBits::IsRescalable;
+ return true;
}
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h
index 329396bb6..1b8a17ee8 100644
--- a/src/video_core/texture_cache/image_base.h
+++ b/src/video_core/texture_cache/image_base.h
@@ -142,6 +142,6 @@ struct ImageAllocBase {
std::vector<ImageId> images;
};
-void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id);
+bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id);
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 9790949f5..2cf082c5d 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -139,7 +139,6 @@ void TextureCache<P>::TickFrame() {
TickAsyncDecode();
runtime.TickFrame();
- critical_gc = 0;
++frame_tick;
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
@@ -1312,17 +1311,18 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
const size_t size_bytes = CalculateGuestSizeInBytes(new_info);
const bool broken_views = runtime.HasBrokenTextureViewFormats();
const bool native_bgr = runtime.HasNativeBgr();
- boost::container::small_vector<ImageId, 4> overlap_ids;
- std::unordered_set<ImageId> overlaps_found;
- boost::container::small_vector<ImageId, 4> left_aliased_ids;
- boost::container::small_vector<ImageId, 4> right_aliased_ids;
- std::unordered_set<ImageId> ignore_textures;
- boost::container::small_vector<ImageId, 4> bad_overlap_ids;
- boost::container::small_vector<ImageId, 4> all_siblings;
+ join_overlap_ids.clear();
+ join_overlaps_found.clear();
+ join_left_aliased_ids.clear();
+ join_right_aliased_ids.clear();
+ join_ignore_textures.clear();
+ join_bad_overlap_ids.clear();
+ join_copies_to_do.clear();
+ join_alias_indices.clear();
const bool this_is_linear = info.type == ImageType::Linear;
const auto region_check = [&](ImageId overlap_id, ImageBase& overlap) {
if (True(overlap.flags & ImageFlagBits::Remapped)) {
- ignore_textures.insert(overlap_id);
+ join_ignore_textures.insert(overlap_id);
return;
}
const bool overlap_is_linear = overlap.info.type == ImageType::Linear;
@@ -1332,11 +1332,11 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
if (this_is_linear && overlap_is_linear) {
if (info.pitch == overlap.info.pitch && gpu_addr == overlap.gpu_addr) {
// Alias linear images with the same pitch
- left_aliased_ids.push_back(overlap_id);
+ join_left_aliased_ids.push_back(overlap_id);
}
return;
}
- overlaps_found.insert(overlap_id);
+ join_overlaps_found.insert(overlap_id);
static constexpr bool strict_size = true;
const std::optional<OverlapResult> solution = ResolveOverlap(
new_info, gpu_addr, cpu_addr, overlap, strict_size, broken_views, native_bgr);
@@ -1344,33 +1344,33 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
gpu_addr = solution->gpu_addr;
cpu_addr = solution->cpu_addr;
new_info.resources = solution->resources;
- overlap_ids.push_back(overlap_id);
- all_siblings.push_back(overlap_id);
+ join_overlap_ids.push_back(overlap_id);
+ join_copies_to_do.emplace_back(JoinCopy{false, overlap_id});
return;
}
static constexpr auto options = RelaxedOptions::Size | RelaxedOptions::Format;
const ImageBase new_image_base(new_info, gpu_addr, cpu_addr);
if (IsSubresource(new_info, overlap, gpu_addr, options, broken_views, native_bgr)) {
- left_aliased_ids.push_back(overlap_id);
+ join_left_aliased_ids.push_back(overlap_id);
overlap.flags |= ImageFlagBits::Alias;
- all_siblings.push_back(overlap_id);
+ join_copies_to_do.emplace_back(JoinCopy{true, overlap_id});
} else if (IsSubresource(overlap.info, new_image_base, overlap.gpu_addr, options,
broken_views, native_bgr)) {
- right_aliased_ids.push_back(overlap_id);
+ join_right_aliased_ids.push_back(overlap_id);
overlap.flags |= ImageFlagBits::Alias;
- all_siblings.push_back(overlap_id);
+ join_copies_to_do.emplace_back(JoinCopy{true, overlap_id});
} else {
- bad_overlap_ids.push_back(overlap_id);
+ join_bad_overlap_ids.push_back(overlap_id);
}
};
ForEachImageInRegion(cpu_addr, size_bytes, region_check);
const auto region_check_gpu = [&](ImageId overlap_id, ImageBase& overlap) {
- if (!overlaps_found.contains(overlap_id)) {
+ if (!join_overlaps_found.contains(overlap_id)) {
if (True(overlap.flags & ImageFlagBits::Remapped)) {
- ignore_textures.insert(overlap_id);
+ join_ignore_textures.insert(overlap_id);
}
if (overlap.gpu_addr == gpu_addr && overlap.guest_size_bytes == size_bytes) {
- ignore_textures.insert(overlap_id);
+ join_ignore_textures.insert(overlap_id);
}
}
};
@@ -1378,11 +1378,11 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
bool can_rescale = info.rescaleable;
bool any_rescaled = false;
- for (const ImageId sibling_id : all_siblings) {
+ for (const auto& copy : join_copies_to_do) {
if (!can_rescale) {
break;
}
- Image& sibling = slot_images[sibling_id];
+ Image& sibling = slot_images[copy.id];
can_rescale &= ImageCanRescale(sibling);
any_rescaled |= True(sibling.flags & ImageFlagBits::Rescaled);
}
@@ -1390,13 +1390,13 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
can_rescale &= any_rescaled;
if (can_rescale) {
- for (const ImageId sibling_id : all_siblings) {
- Image& sibling = slot_images[sibling_id];
+ for (const auto& copy : join_copies_to_do) {
+ Image& sibling = slot_images[copy.id];
ScaleUp(sibling);
}
} else {
- for (const ImageId sibling_id : all_siblings) {
- Image& sibling = slot_images[sibling_id];
+ for (const auto& copy : join_copies_to_do) {
+ Image& sibling = slot_images[copy.id];
ScaleDown(sibling);
}
}
@@ -1408,7 +1408,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
new_image.flags |= ImageFlagBits::Sparse;
}
- for (const ImageId overlap_id : ignore_textures) {
+ for (const ImageId overlap_id : join_ignore_textures) {
Image& overlap = slot_images[overlap_id];
if (True(overlap.flags & ImageFlagBits::GpuModified)) {
UNIMPLEMENTED();
@@ -1429,14 +1429,60 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
ScaleDown(new_image);
}
- std::ranges::sort(overlap_ids, [this](const ImageId lhs, const ImageId rhs) {
- const ImageBase& lhs_image = slot_images[lhs];
- const ImageBase& rhs_image = slot_images[rhs];
+ std::ranges::sort(join_copies_to_do, [this](const JoinCopy& lhs, const JoinCopy& rhs) {
+ const ImageBase& lhs_image = slot_images[lhs.id];
+ const ImageBase& rhs_image = slot_images[rhs.id];
return lhs_image.modification_tick < rhs_image.modification_tick;
});
- for (const ImageId overlap_id : overlap_ids) {
- Image& overlap = slot_images[overlap_id];
+ ImageBase& new_image_base = new_image;
+ for (const ImageId aliased_id : join_right_aliased_ids) {
+ ImageBase& aliased = slot_images[aliased_id];
+ size_t alias_index = new_image_base.aliased_images.size();
+ if (!AddImageAlias(new_image_base, aliased, new_image_id, aliased_id)) {
+ continue;
+ }
+ join_alias_indices.emplace(aliased_id, alias_index);
+ new_image.flags |= ImageFlagBits::Alias;
+ }
+ for (const ImageId aliased_id : join_left_aliased_ids) {
+ ImageBase& aliased = slot_images[aliased_id];
+ size_t alias_index = new_image_base.aliased_images.size();
+ if (!AddImageAlias(aliased, new_image_base, aliased_id, new_image_id)) {
+ continue;
+ }
+ join_alias_indices.emplace(aliased_id, alias_index);
+ new_image.flags |= ImageFlagBits::Alias;
+ }
+ for (const ImageId aliased_id : join_bad_overlap_ids) {
+ ImageBase& aliased = slot_images[aliased_id];
+ aliased.overlapping_images.push_back(new_image_id);
+ new_image.overlapping_images.push_back(aliased_id);
+ if (aliased.info.resources.levels == 1 && aliased.info.block.depth == 0 &&
+ aliased.overlapping_images.size() > 1) {
+ aliased.flags |= ImageFlagBits::BadOverlap;
+ }
+ if (new_image.info.resources.levels == 1 && new_image.info.block.depth == 0 &&
+ new_image.overlapping_images.size() > 1) {
+ new_image.flags |= ImageFlagBits::BadOverlap;
+ }
+ }
+
+ for (const auto& copy_object : join_copies_to_do) {
+ Image& overlap = slot_images[copy_object.id];
+ if (copy_object.is_alias) {
+ if (!overlap.IsSafeDownload()) {
+ continue;
+ }
+ const auto alias_pointer = join_alias_indices.find(copy_object.id);
+ if (alias_pointer == join_alias_indices.end()) {
+ continue;
+ }
+ const AliasedImage& aliased = new_image.aliased_images[alias_pointer->second];
+ CopyImage(new_image_id, aliased.id, aliased.copies);
+ new_image.modification_tick = overlap.modification_tick;
+ continue;
+ }
if (True(overlap.flags & ImageFlagBits::GpuModified)) {
new_image.flags |= ImageFlagBits::GpuModified;
const auto& resolution = Settings::values.resolution_info;
@@ -1449,35 +1495,15 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
} else {
runtime.CopyImage(new_image, overlap, std::move(copies));
}
+ new_image.modification_tick = overlap.modification_tick;
}
if (True(overlap.flags & ImageFlagBits::Tracked)) {
- UntrackImage(overlap, overlap_id);
- }
- UnregisterImage(overlap_id);
- DeleteImage(overlap_id);
- }
- ImageBase& new_image_base = new_image;
- for (const ImageId aliased_id : right_aliased_ids) {
- ImageBase& aliased = slot_images[aliased_id];
- AddImageAlias(new_image_base, aliased, new_image_id, aliased_id);
- new_image.flags |= ImageFlagBits::Alias;
- }
- for (const ImageId aliased_id : left_aliased_ids) {
- ImageBase& aliased = slot_images[aliased_id];
- AddImageAlias(aliased, new_image_base, aliased_id, new_image_id);
- new_image.flags |= ImageFlagBits::Alias;
- }
- for (const ImageId aliased_id : bad_overlap_ids) {
- ImageBase& aliased = slot_images[aliased_id];
- aliased.overlapping_images.push_back(new_image_id);
- new_image.overlapping_images.push_back(aliased_id);
- if (aliased.info.resources.levels == 1 && aliased.overlapping_images.size() > 1) {
- aliased.flags |= ImageFlagBits::BadOverlap;
- }
- if (new_image.info.resources.levels == 1 && new_image.overlapping_images.size() > 1) {
- new_image.flags |= ImageFlagBits::BadOverlap;
+ UntrackImage(overlap, copy_object.id);
}
+ UnregisterImage(copy_object.id);
+ DeleteImage(copy_object.id);
}
+
RegisterImage(new_image_id);
return new_image_id;
}
@@ -1507,7 +1533,7 @@ std::optional<typename TextureCache<P>::BlitImages> TextureCache<P>::GetBlitImag
if (!copy.must_accelerate) {
do {
if (!src_id && !dst_id) {
- break;
+ return std::nullopt;
}
if (src_id && True(slot_images[src_id].flags & ImageFlagBits::GpuModified)) {
break;
@@ -1885,10 +1911,6 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format);
}
total_used_memory += Common::AlignUp(tentative_size, 1024);
- if (total_used_memory > critical_memory && critical_gc < GC_EMERGENCY_COUNTS) {
- RunGarbageCollector();
- critical_gc++;
- }
image.lru_index = lru_cache.Insert(image_id, frame_tick);
ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) {
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 1a3308e2d..3bfa92154 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -10,7 +10,9 @@
#include <span>
#include <type_traits>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
+#include <boost/container/small_vector.hpp>
#include <queue>
#include "common/common_types.h"
@@ -427,7 +429,6 @@ private:
u64 minimum_memory;
u64 expected_memory;
u64 critical_memory;
- size_t critical_gc;
struct BufferDownload {
GPUVAddr address;
@@ -477,6 +478,20 @@ private:
Common::ThreadWorker texture_decode_worker{1, "TextureDecoder"};
std::vector<std::unique_ptr<AsyncDecodeContext>> async_decodes;
+
+ // Join caching
+ boost::container::small_vector<ImageId, 4> join_overlap_ids;
+ std::unordered_set<ImageId> join_overlaps_found;
+ boost::container::small_vector<ImageId, 4> join_left_aliased_ids;
+ boost::container::small_vector<ImageId, 4> join_right_aliased_ids;
+ std::unordered_set<ImageId> join_ignore_textures;
+ boost::container::small_vector<ImageId, 4> join_bad_overlap_ids;
+ struct JoinCopy {
+ bool is_alias;
+ ImageId id;
+ };
+ boost::container::small_vector<JoinCopy, 4> join_copies_to_do;
+ std::unordered_map<ImageId, size_t> join_alias_indices;
};
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 1463f157b..95a5b47d8 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -123,7 +123,9 @@ template <u32 GOB_EXTENT>
return {
.width = AdjustMipBlockSize<GOB_SIZE_X>(num_tiles.width, block_size.width, level),
.height = AdjustMipBlockSize<GOB_SIZE_Y>(num_tiles.height, block_size.height, level),
- .depth = AdjustMipBlockSize<GOB_SIZE_Z>(num_tiles.depth, block_size.depth, level),
+ .depth = level == 0
+ ? block_size.depth
+ : AdjustMipBlockSize<GOB_SIZE_Z>(num_tiles.depth, block_size.depth, level),
};
}
@@ -165,6 +167,13 @@ template <u32 GOB_EXTENT>
}
[[nodiscard]] constexpr Extent3D TileShift(const LevelInfo& info, u32 level) {
+ if (level == 0) {
+ return Extent3D{
+ .width = info.block.width,
+ .height = info.block.height,
+ .depth = info.block.depth,
+ };
+ }
const Extent3D blocks = NumLevelBlocks(info, level);
return Extent3D{
.width = AdjustTileSize(info.block.width, GOB_SIZE_X, blocks.width),
@@ -1288,7 +1297,9 @@ u32 MapSizeBytes(const ImageBase& image) {
static_assert(CalculateLevelSize(LevelInfo{{1920, 1080, 1}, {0, 2, 0}, {1, 1}, 2, 0}, 0) ==
0x7f8000);
-static_assert(CalculateLevelSize(LevelInfo{{32, 32, 1}, {0, 0, 4}, {1, 1}, 4, 0}, 0) == 0x4000);
+static_assert(CalculateLevelSize(LevelInfo{{32, 32, 1}, {0, 0, 4}, {1, 1}, 4, 0}, 0) == 0x40000);
+
+static_assert(CalculateLevelSize(LevelInfo{{128, 8, 1}, {0, 4, 0}, {1, 1}, 4, 0}, 0) == 0x40000);
static_assert(CalculateLevelOffset(PixelFormat::R8_SINT, {1920, 1080, 1}, {0, 2, 0}, 0, 7) ==
0x2afc00);
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 3a7c2dedf..aea677cb3 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -473,11 +473,12 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
}
if (extensions.push_descriptor && is_intel_anv) {
const u32 version = (properties.properties.driverVersion << 3) >> 3;
- if (version >= VK_MAKE_API_VERSION(0, 22, 3, 0)) {
+ if (version >= VK_MAKE_API_VERSION(0, 22, 3, 0) &&
+ version < VK_MAKE_API_VERSION(0, 23, 2, 0)) {
// Disable VK_KHR_push_descriptor due to
// mesa/mesa/-/commit/ff91c5ca42bc80aa411cb3fd8f550aa6fdd16bdc
LOG_WARNING(Render_Vulkan,
- "ANV drivers 22.3.0 and later have broken VK_KHR_push_descriptor");
+ "ANV drivers 22.3.0 to 23.1.0 have broken VK_KHR_push_descriptor");
extensions.push_descriptor = false;
loaded_extensions.erase(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
}
diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp
index c21828b1d..465084fea 100644
--- a/src/yuzu/game_list.cpp
+++ b/src/yuzu/game_list.cpp
@@ -544,6 +544,7 @@ void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::stri
QAction* remove_update = remove_menu->addAction(tr("Remove Installed Update"));
QAction* remove_dlc = remove_menu->addAction(tr("Remove All Installed DLC"));
QAction* remove_custom_config = remove_menu->addAction(tr("Remove Custom Configuration"));
+ QAction* remove_cache_storage = remove_menu->addAction(tr("Remove Cache Storage"));
QAction* remove_gl_shader_cache = remove_menu->addAction(tr("Remove OpenGL Pipeline Cache"));
QAction* remove_vk_shader_cache = remove_menu->addAction(tr("Remove Vulkan Pipeline Cache"));
remove_menu->addSeparator();
@@ -614,6 +615,9 @@ void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::stri
connect(remove_custom_config, &QAction::triggered, [this, program_id, path]() {
emit RemoveFileRequested(program_id, GameListRemoveTarget::CustomConfiguration, path);
});
+ connect(remove_cache_storage, &QAction::triggered, [this, program_id, path] {
+ emit RemoveFileRequested(program_id, GameListRemoveTarget::CacheStorage, path);
+ });
connect(dump_romfs, &QAction::triggered, [this, program_id, path]() {
emit DumpRomFSRequested(program_id, path, DumpRomFSTarget::Normal);
});
diff --git a/src/yuzu/game_list.h b/src/yuzu/game_list.h
index 64e5af4c1..6c2f75e53 100644
--- a/src/yuzu/game_list.h
+++ b/src/yuzu/game_list.h
@@ -45,6 +45,7 @@ enum class GameListRemoveTarget {
VkShaderCache,
AllShaderCache,
CustomConfiguration,
+ CacheStorage,
};
enum class DumpRomFSTarget {
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 4489f43af..25cfef6d5 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -2323,6 +2323,8 @@ void GMainWindow::OnGameListRemoveFile(u64 program_id, GameListRemoveTarget targ
return tr("Delete All Transferable Shader Caches?");
case GameListRemoveTarget::CustomConfiguration:
return tr("Remove Custom Game Configuration?");
+ case GameListRemoveTarget::CacheStorage:
+ return tr("Remove Cache Storage?");
default:
return QString{};
}
@@ -2346,6 +2348,9 @@ void GMainWindow::OnGameListRemoveFile(u64 program_id, GameListRemoveTarget targ
case GameListRemoveTarget::CustomConfiguration:
RemoveCustomConfiguration(program_id, game_path);
break;
+ case GameListRemoveTarget::CacheStorage:
+ RemoveCacheStorage(program_id);
+ break;
}
}
@@ -2435,6 +2440,21 @@ void GMainWindow::RemoveCustomConfiguration(u64 program_id, const std::string& g
}
}
+void GMainWindow::RemoveCacheStorage(u64 program_id) {
+ const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir);
+ auto vfs_nand_dir =
+ vfs->OpenDirectory(Common::FS::PathToUTF8String(nand_dir), FileSys::Mode::Read);
+
+ const auto cache_storage_path = FileSys::SaveDataFactory::GetFullPath(
+ *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
+ FileSys::SaveDataType::CacheStorage, 0 /* program_id */, {}, 0);
+
+ const auto path = Common::FS::ConcatPathSafe(nand_dir, cache_storage_path);
+
+ // Not an error if it wasn't cleared.
+ Common::FS::RemoveDirRecursively(path);
+}
+
void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_path,
DumpRomFSTarget target) {
const auto failed = [this] {
diff --git a/src/yuzu/main.h b/src/yuzu/main.h
index 17631a2d9..6bb70972f 100644
--- a/src/yuzu/main.h
+++ b/src/yuzu/main.h
@@ -370,6 +370,7 @@ private:
void RemoveVulkanDriverPipelineCache(u64 program_id);
void RemoveAllTransferableShaderCaches(u64 program_id);
void RemoveCustomConfiguration(u64 program_id, const std::string& game_path);
+ void RemoveCacheStorage(u64 program_id);
std::optional<u64> SelectRomFSDumpTarget(const FileSys::ContentProvider&, u64 program_id);
InstallResult InstallNSPXCI(const QString& filename);
InstallResult InstallNCA(const QString& filename);
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp
index 5f39ece32..7b6d49c63 100644
--- a/src/yuzu_cmd/yuzu.cpp
+++ b/src/yuzu_cmd/yuzu.cpp
@@ -227,7 +227,7 @@ int main(int argc, char** argv) {
};
while (optind < argc) {
- int arg = getopt_long(argc, argv, "g:fhvp::c:", long_options, &option_index);
+ int arg = getopt_long(argc, argv, "g:fhvp::c:u:", long_options, &option_index);
if (arg != -1) {
switch (static_cast<char>(arg)) {
case 'c':
@@ -283,7 +283,7 @@ int main(int argc, char** argv) {
break;
case 'u':
selected_user = atoi(optarg);
- return 0;
+ break;
case 'v':
PrintVersion();
return 0;