summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/fs/path_util.cpp27
-rw-r--r--src/core/hle/service/am/applets/applet_web_browser.cpp10
-rw-r--r--src/video_core/command_classes/codecs/h264.cpp7
-rw-r--r--src/video_core/command_classes/vic.cpp260
-rw-r--r--src/video_core/command_classes/vic.h20
-rw-r--r--src/yuzu/configuration/configure_input_player_widget.cpp4
-rw-r--r--src/yuzu/debugger/profiler.cpp12
-rw-r--r--src/yuzu/game_list.cpp3
8 files changed, 197 insertions, 146 deletions
diff --git a/src/common/fs/path_util.cpp b/src/common/fs/path_util.cpp
index 43b79bd6d..1bcb897b5 100644
--- a/src/common/fs/path_util.cpp
+++ b/src/common/fs/path_util.cpp
@@ -82,32 +82,35 @@ public:
private:
PathManagerImpl() {
+ fs::path yuzu_path;
+ fs::path yuzu_path_cache;
+ fs::path yuzu_path_config;
+
#ifdef _WIN32
- auto yuzu_path = GetExeDirectory() / PORTABLE_DIR;
+ yuzu_path = GetExeDirectory() / PORTABLE_DIR;
if (!IsDir(yuzu_path)) {
yuzu_path = GetAppDataRoamingDirectory() / YUZU_DIR;
}
- GenerateYuzuPath(YuzuPath::YuzuDir, yuzu_path);
- GenerateYuzuPath(YuzuPath::CacheDir, yuzu_path / CACHE_DIR);
- GenerateYuzuPath(YuzuPath::ConfigDir, yuzu_path / CONFIG_DIR);
+ yuzu_path_cache = yuzu_path / CACHE_DIR;
+ yuzu_path_config = yuzu_path / CONFIG_DIR;
#else
- auto yuzu_path = GetCurrentDir() / PORTABLE_DIR;
+ yuzu_path = GetCurrentDir() / PORTABLE_DIR;
if (Exists(yuzu_path) && IsDir(yuzu_path)) {
- GenerateYuzuPath(YuzuPath::YuzuDir, yuzu_path);
- GenerateYuzuPath(YuzuPath::CacheDir, yuzu_path / CACHE_DIR);
- GenerateYuzuPath(YuzuPath::ConfigDir, yuzu_path / CONFIG_DIR);
+ yuzu_path_cache = yuzu_path / CACHE_DIR;
+ yuzu_path_config = yuzu_path / CONFIG_DIR;
} else {
yuzu_path = GetDataDirectory("XDG_DATA_HOME") / YUZU_DIR;
-
- GenerateYuzuPath(YuzuPath::YuzuDir, yuzu_path);
- GenerateYuzuPath(YuzuPath::CacheDir, GetDataDirectory("XDG_CACHE_HOME") / YUZU_DIR);
- GenerateYuzuPath(YuzuPath::ConfigDir, GetDataDirectory("XDG_CONFIG_HOME") / YUZU_DIR);
+ yuzu_path_cache = GetDataDirectory("XDG_CACHE_HOME") / YUZU_DIR;
+ yuzu_path_config = GetDataDirectory("XDG_CONFIG_HOME") / YUZU_DIR;
}
#endif
+ GenerateYuzuPath(YuzuPath::YuzuDir, yuzu_path);
+ GenerateYuzuPath(YuzuPath::CacheDir, yuzu_path_cache);
+ GenerateYuzuPath(YuzuPath::ConfigDir, yuzu_path_config);
GenerateYuzuPath(YuzuPath::DumpDir, yuzu_path / DUMP_DIR);
GenerateYuzuPath(YuzuPath::KeysDir, yuzu_path / KEYS_DIR);
GenerateYuzuPath(YuzuPath::LoadDir, yuzu_path / LOAD_DIR);
diff --git a/src/core/hle/service/am/applets/applet_web_browser.cpp b/src/core/hle/service/am/applets/applet_web_browser.cpp
index 35f194961..927eeefff 100644
--- a/src/core/hle/service/am/applets/applet_web_browser.cpp
+++ b/src/core/hle/service/am/applets/applet_web_browser.cpp
@@ -24,6 +24,7 @@
#include "core/hle/service/am/applets/applet_web_browser.h"
#include "core/hle/service/filesystem/filesystem.h"
#include "core/hle/service/ns/pl_u.h"
+#include "core/loader/loader.h"
namespace Service::AM::Applets {
@@ -122,6 +123,15 @@ FileSys::VirtualFile GetOfflineRomFS(Core::System& system, u64 title_id,
const auto nca = system.GetContentProvider().GetEntry(title_id, nca_type);
if (nca == nullptr) {
+ if (nca_type == FileSys::ContentRecordType::HtmlDocument) {
+ LOG_WARNING(Service_AM, "Falling back to AppLoader to get the RomFS.");
+ FileSys::VirtualFile romfs;
+ system.GetAppLoader().ReadManualRomFS(romfs);
+ if (romfs != nullptr) {
+ return romfs;
+ }
+ }
+
LOG_ERROR(Service_AM,
"NCA of type={} with title_id={:016X} is not found in the ContentProvider!",
nca_type, title_id);
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/command_classes/codecs/h264.cpp
index 51ee14c13..5519c4705 100644
--- a/src/video_core/command_classes/codecs/h264.cpp
+++ b/src/video_core/command_classes/codecs/h264.cpp
@@ -20,6 +20,8 @@
#include <array>
#include <bit>
+
+#include "common/settings.h"
#include "video_core/command_classes/codecs/h264.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
@@ -96,7 +98,10 @@ const std::vector<u8>& H264::ComposeFrameHeader(const NvdecCommon::NvdecRegister
(context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
// TODO (ameerj): Where do we get this number, it seems to be particular for each stream
- writer.WriteUe(6); // Max number of reference frames
+ const auto nvdec_decoding = Settings::values.nvdec_emulation.GetValue();
+ const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::GPU;
+ const u32 max_num_ref_frames = uses_gpu_decoding ? 6u : 16u;
+ writer.WriteUe(max_num_ref_frames);
writer.WriteBit(false);
writer.WriteUe(context.h264_parameter_set.pic_width_in_mbs - 1);
writer.WriteUe(pic_height - 1);
diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/command_classes/vic.cpp
index 0ee07f398..dc768b952 100644
--- a/src/video_core/command_classes/vic.cpp
+++ b/src/video_core/command_classes/vic.cpp
@@ -16,6 +16,7 @@ extern "C" {
}
#include "common/assert.h"
+#include "common/bit_field.h"
#include "common/logging/log.h"
#include "video_core/command_classes/nvdec.h"
@@ -26,6 +27,25 @@ extern "C" {
#include "video_core/textures/decoders.h"
namespace Tegra {
+namespace {
+enum class VideoPixelFormat : u64_le {
+ RGBA8 = 0x1f,
+ BGRA8 = 0x20,
+ RGBX8 = 0x23,
+ Yuv420 = 0x44,
+};
+} // Anonymous namespace
+
+union VicConfig {
+ u64_le raw{};
+ BitField<0, 7, VideoPixelFormat> pixel_format;
+ BitField<7, 2, u64_le> chroma_loc_horiz;
+ BitField<9, 2, u64_le> chroma_loc_vert;
+ BitField<11, 4, u64_le> block_linear_kind;
+ BitField<15, 4, u64_le> block_linear_height_log2;
+ BitField<32, 14, u64_le> surface_width_minus1;
+ BitField<46, 14, u64_le> surface_height_minus1;
+};
Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_)
: gpu(gpu_),
@@ -65,134 +85,156 @@ void Vic::Execute() {
if (!frame) {
return;
}
- const auto pixel_format = static_cast<VideoPixelFormat>(config.pixel_format.Value());
- switch (pixel_format) {
+ const u64 surface_width = config.surface_width_minus1 + 1;
+ const u64 surface_height = config.surface_height_minus1 + 1;
+ if (static_cast<u64>(frame->width) != surface_width ||
+ static_cast<u64>(frame->height) > surface_height) {
+ // TODO: Properly support multiple video streams with differing frame dimensions
+ LOG_WARNING(Debug,
+ "Frame dimensions {}x{} can't be safely decoded into surface dimensions {}x{}",
+ frame->width, frame->height, surface_width, surface_height);
+ return;
+ }
+ switch (config.pixel_format) {
+ case VideoPixelFormat::RGBA8:
case VideoPixelFormat::BGRA8:
- case VideoPixelFormat::RGBA8: {
- LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
+ case VideoPixelFormat::RGBX8:
+ WriteRGBFrame(frame, config);
+ break;
+ case VideoPixelFormat::Yuv420:
+ WriteYUVFrame(frame, config);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value());
+ break;
+ }
+}
- if (scaler_ctx == nullptr || frame->width != scaler_width ||
- frame->height != scaler_height) {
- const AVPixelFormat target_format =
- (pixel_format == VideoPixelFormat::RGBA8) ? AV_PIX_FMT_RGBA : AV_PIX_FMT_BGRA;
+void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
+ LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
+
+ if (!scaler_ctx || frame->width != scaler_width || frame->height != scaler_height) {
+ const AVPixelFormat target_format = [pixel_format = config.pixel_format]() {
+ switch (pixel_format) {
+ case VideoPixelFormat::RGBA8:
+ return AV_PIX_FMT_RGBA;
+ case VideoPixelFormat::BGRA8:
+ return AV_PIX_FMT_BGRA;
+ case VideoPixelFormat::RGBX8:
+ return AV_PIX_FMT_RGB0;
+ default:
+ return AV_PIX_FMT_RGBA;
+ }
+ }();
+
+ sws_freeContext(scaler_ctx);
+ // Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format
+ scaler_ctx = sws_getContext(frame->width, frame->height,
+ static_cast<AVPixelFormat>(frame->format), frame->width,
+ frame->height, target_format, 0, nullptr, nullptr, nullptr);
+ scaler_width = frame->width;
+ scaler_height = frame->height;
+ converted_frame_buffer.reset();
+ }
+ // Get Converted frame
+ const u32 width = static_cast<u32>(frame->width);
+ const u32 height = static_cast<u32>(frame->height);
+ const std::size_t linear_size = width * height * 4;
+
+ // Only allocate frame_buffer once per stream, as the size is not expected to change
+ if (!converted_frame_buffer) {
+ converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(linear_size)), av_free};
+ }
+ const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0};
+ u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
+
+ sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height, &converted_frame_buf_addr,
+ converted_stride.data());
+
+ const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
+ if (blk_kind != 0) {
+ // swizzle pitch linear to block linear
+ const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
+ const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
+ luma_buffer.resize(size);
+ Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(),
+ converted_frame_buffer.get(), block_height, 0, 0);
+
+ gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
+ } else {
+ // send pitch linear frame
+ gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
+ linear_size);
+ }
+}
- sws_freeContext(scaler_ctx);
- scaler_ctx = nullptr;
+void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
+ LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
- // Frames are decoded into either YUV420 or NV12 formats. Convert to desired format
- scaler_ctx = sws_getContext(frame->width, frame->height,
- static_cast<AVPixelFormat>(frame->format), frame->width,
- frame->height, target_format, 0, nullptr, nullptr, nullptr);
+ const std::size_t surface_width = config.surface_width_minus1 + 1;
+ const std::size_t surface_height = config.surface_height_minus1 + 1;
+ const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
+ const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
+ const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
- scaler_width = frame->width;
- scaler_height = frame->height;
- }
- // Get Converted frame
- const u32 width = static_cast<u32>(frame->width);
- const u32 height = static_cast<u32>(frame->height);
- const std::size_t linear_size = width * height * 4;
-
- // Only allocate frame_buffer once per stream, as the size is not expected to change
- if (!converted_frame_buffer) {
- converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(linear_size)), av_free};
+ const auto stride = static_cast<size_t>(frame->linesize[0]);
+
+ luma_buffer.resize(aligned_width * surface_height);
+ chroma_buffer.resize(aligned_width * surface_height / 2);
+
+ // Populate luma buffer
+ const u8* luma_src = frame->data[0];
+ for (std::size_t y = 0; y < frame_height; ++y) {
+ const std::size_t src = y * stride;
+ const std::size_t dst = y * aligned_width;
+ for (std::size_t x = 0; x < frame_width; ++x) {
+ luma_buffer[dst + x] = luma_src[src + x];
}
- const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0};
- u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
-
- sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height,
- &converted_frame_buf_addr, converted_stride.data());
-
- const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
- if (blk_kind != 0) {
- // swizzle pitch linear to block linear
- const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
- const auto size =
- Tegra::Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
- luma_buffer.resize(size);
- Tegra::Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(),
- converted_frame_buffer.get(), block_height, 0, 0);
-
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
- } else {
- // send pitch linear frame
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
- linear_size);
+ }
+ gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
+ luma_buffer.size());
+
+ // Chroma
+ const std::size_t half_height = frame_height / 2;
+ const auto half_stride = static_cast<size_t>(frame->linesize[1]);
+
+ switch (frame->format) {
+ case AV_PIX_FMT_YUV420P: {
+ // Frame from FFmpeg software
+ // Populate chroma buffer from both channels with interleaving.
+ const std::size_t half_width = frame_width / 2;
+ const u8* chroma_b_src = frame->data[1];
+ const u8* chroma_r_src = frame->data[2];
+ for (std::size_t y = 0; y < half_height; ++y) {
+ const std::size_t src = y * half_stride;
+ const std::size_t dst = y * aligned_width;
+
+ for (std::size_t x = 0; x < half_width; ++x) {
+ chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
+ chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
+ }
}
break;
}
- case VideoPixelFormat::Yuv420: {
- LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
-
- const std::size_t surface_width = config.surface_width_minus1 + 1;
- const std::size_t surface_height = config.surface_height_minus1 + 1;
- const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
- const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
- const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
-
- const auto stride = static_cast<size_t>(frame->linesize[0]);
-
- luma_buffer.resize(aligned_width * surface_height);
- chroma_buffer.resize(aligned_width * surface_height / 2);
-
- // Populate luma buffer
- const u8* luma_src = frame->data[0];
- for (std::size_t y = 0; y < frame_height; ++y) {
+ case AV_PIX_FMT_NV12: {
+ // Frame from VA-API hardware
+ // This is already interleaved so just copy
+ const u8* chroma_src = frame->data[1];
+ for (std::size_t y = 0; y < half_height; ++y) {
const std::size_t src = y * stride;
const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < frame_width; ++x) {
- luma_buffer[dst + x] = luma_src[src + x];
- }
- }
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
- luma_buffer.size());
-
- // Chroma
- const std::size_t half_height = frame_height / 2;
- const auto half_stride = static_cast<size_t>(frame->linesize[1]);
-
- switch (frame->format) {
- case AV_PIX_FMT_YUV420P: {
- // Frame from FFmpeg software
- // Populate chroma buffer from both channels with interleaving.
- const std::size_t half_width = frame_width / 2;
- const u8* chroma_b_src = frame->data[1];
- const u8* chroma_r_src = frame->data[2];
- for (std::size_t y = 0; y < half_height; ++y) {
- const std::size_t src = y * half_stride;
- const std::size_t dst = y * aligned_width;
-
- for (std::size_t x = 0; x < half_width; ++x) {
- chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
- chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
- }
+ chroma_buffer[dst + x] = chroma_src[src + x];
}
- break;
- }
- case AV_PIX_FMT_NV12: {
- // Frame from VA-API hardware
- // This is already interleaved so just copy
- const u8* chroma_src = frame->data[1];
- for (std::size_t y = 0; y < half_height; ++y) {
- const std::size_t src = y * stride;
- const std::size_t dst = y * aligned_width;
- for (std::size_t x = 0; x < frame_width; ++x) {
- chroma_buffer[dst + x] = chroma_src[src + x];
- }
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
}
- gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
- chroma_buffer.size());
break;
}
default:
- UNIMPLEMENTED_MSG("Unknown video pixel format {}", config.pixel_format.Value());
+ UNREACHABLE();
break;
}
+ gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
+ chroma_buffer.size());
}
} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.h b/src/video_core/command_classes/vic.h
index 74246e08c..6d4cdfd57 100644
--- a/src/video_core/command_classes/vic.h
+++ b/src/video_core/command_classes/vic.h
@@ -6,7 +6,6 @@
#include <memory>
#include <vector>
-#include "common/bit_field.h"
#include "common/common_types.h"
struct SwsContext;
@@ -14,6 +13,7 @@ struct SwsContext;
namespace Tegra {
class GPU;
class Nvdec;
+union VicConfig;
class Vic {
public:
@@ -27,6 +27,7 @@ public:
};
explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor);
+
~Vic();
/// Write to the device state.
@@ -35,22 +36,9 @@ public:
private:
void Execute();
- enum class VideoPixelFormat : u64_le {
- RGBA8 = 0x1f,
- BGRA8 = 0x20,
- Yuv420 = 0x44,
- };
+ void WriteRGBFrame(const AVFrame* frame, const VicConfig& config);
- union VicConfig {
- u64_le raw{};
- BitField<0, 7, u64_le> pixel_format;
- BitField<7, 2, u64_le> chroma_loc_horiz;
- BitField<9, 2, u64_le> chroma_loc_vert;
- BitField<11, 4, u64_le> block_linear_kind;
- BitField<15, 4, u64_le> block_linear_height_log2;
- BitField<32, 14, u64_le> surface_width_minus1;
- BitField<46, 14, u64_le> surface_height_minus1;
- };
+ void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
GPU& gpu;
std::shared_ptr<Tegra::Nvdec> nvdec_processor;
diff --git a/src/yuzu/configuration/configure_input_player_widget.cpp b/src/yuzu/configuration/configure_input_player_widget.cpp
index da328d904..f31f86339 100644
--- a/src/yuzu/configuration/configure_input_player_widget.cpp
+++ b/src/yuzu/configuration/configure_input_player_widget.cpp
@@ -1837,7 +1837,7 @@ void PlayerControlPreview::DrawLeftBody(QPainter& p, const QPointF center) {
const float led_size = 5.0f;
const QPointF led_position = sideview_center + QPointF(0, -36);
int led_count = 0;
- for (const auto color : led_color) {
+ for (const auto& color : led_color) {
p.setBrush(color);
DrawRectangle(p, led_position + QPointF(0, 12 * led_count++), led_size, led_size);
}
@@ -1933,7 +1933,7 @@ void PlayerControlPreview::DrawRightBody(QPainter& p, const QPointF center) {
const float led_size = 5.0f;
const QPointF led_position = sideview_center + QPointF(0, -36);
int led_count = 0;
- for (const auto color : led_color) {
+ for (const auto& color : led_color) {
p.setBrush(color);
DrawRectangle(p, led_position + QPointF(0, 12 * led_count++), led_size, led_size);
}
diff --git a/src/yuzu/debugger/profiler.cpp b/src/yuzu/debugger/profiler.cpp
index 7a6f84d96..33110685a 100644
--- a/src/yuzu/debugger/profiler.cpp
+++ b/src/yuzu/debugger/profiler.cpp
@@ -143,24 +143,28 @@ void MicroProfileWidget::hideEvent(QHideEvent* ev) {
}
void MicroProfileWidget::mouseMoveEvent(QMouseEvent* ev) {
- MicroProfileMousePosition(ev->pos().x() / x_scale, ev->pos().y() / y_scale, 0);
+ const auto mouse_position = ev->pos();
+ MicroProfileMousePosition(mouse_position.x() / x_scale, mouse_position.y() / y_scale, 0);
ev->accept();
}
void MicroProfileWidget::mousePressEvent(QMouseEvent* ev) {
- MicroProfileMousePosition(ev->pos().x() / x_scale, ev->pos().y() / y_scale, 0);
+ const auto mouse_position = ev->pos();
+ MicroProfileMousePosition(mouse_position.x() / x_scale, mouse_position.y() / y_scale, 0);
MicroProfileMouseButton(ev->buttons() & Qt::LeftButton, ev->buttons() & Qt::RightButton);
ev->accept();
}
void MicroProfileWidget::mouseReleaseEvent(QMouseEvent* ev) {
- MicroProfileMousePosition(ev->pos().x() / x_scale, ev->pos().y() / y_scale, 0);
+ const auto mouse_position = ev->pos();
+ MicroProfileMousePosition(mouse_position.x() / x_scale, mouse_position.y() / y_scale, 0);
MicroProfileMouseButton(ev->buttons() & Qt::LeftButton, ev->buttons() & Qt::RightButton);
ev->accept();
}
void MicroProfileWidget::wheelEvent(QWheelEvent* ev) {
- MicroProfileMousePosition(ev->pos().x() / x_scale, ev->pos().y() / y_scale,
+ const auto wheel_position = ev->position().toPoint();
+ MicroProfileMousePosition(wheel_position.x() / x_scale, wheel_position.y() / y_scale,
ev->angleDelta().y() / 120);
ev->accept();
}
diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp
index f9d949e75..ba54423ff 100644
--- a/src/yuzu/game_list.cpp
+++ b/src/yuzu/game_list.cpp
@@ -159,8 +159,7 @@ GameListSearchField::GameListSearchField(GameList* parent) : QWidget{parent} {
* @return true if the haystack contains all words of userinput
*/
static bool ContainsAllWords(const QString& haystack, const QString& userinput) {
- const QStringList userinput_split =
- userinput.split(QLatin1Char{' '}, QString::SplitBehavior::SkipEmptyParts);
+ const QStringList userinput_split = userinput.split(QLatin1Char{' '}, Qt::SkipEmptyParts);
return std::all_of(userinput_split.begin(), userinput_split.end(),
[&haystack](const QString& s) { return haystack.contains(s); });