summaryrefslogtreecommitdiffstats
path: root/src/video_core/command_classes
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/video_core/command_classes/codecs/codec.cpp224
-rw-r--r--src/video_core/command_classes/codecs/codec.h10
-rw-r--r--src/video_core/command_classes/codecs/h264.cpp8
-rw-r--r--src/video_core/command_classes/vic.cpp259
-rw-r--r--src/video_core/command_classes/vic.h20
5 files changed, 301 insertions, 220 deletions
diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/command_classes/codecs/codec.cpp
index f798a0053..61966cbfe 100644
--- a/src/video_core/command_classes/codecs/codec.cpp
+++ b/src/video_core/command_classes/codecs/codec.cpp
@@ -5,6 +5,7 @@
#include <fstream>
#include <vector>
#include "common/assert.h"
+#include "common/settings.h"
#include "video_core/command_classes/codecs/codec.h"
#include "video_core/command_classes/codecs/h264.h"
#include "video_core/command_classes/codecs/vp9.h"
@@ -16,108 +17,146 @@ extern "C" {
}
namespace Tegra {
-#if defined(LIBVA_FOUND)
-// Hardware acceleration code from FFmpeg/doc/examples/hw_decode.c originally under MIT license
namespace {
-constexpr std::array<const char*, 2> VAAPI_DRIVERS = {
- "i915",
- "amdgpu",
-};
+constexpr AVPixelFormat PREFERRED_GPU_FMT = AV_PIX_FMT_NV12;
+constexpr AVPixelFormat PREFERRED_CPU_FMT = AV_PIX_FMT_YUV420P;
+
+void AVPacketDeleter(AVPacket* ptr) {
+ av_packet_free(&ptr);
+}
-AVPixelFormat GetHwFormat(AVCodecContext*, const AVPixelFormat* pix_fmts) {
+using AVPacketPtr = std::unique_ptr<AVPacket, decltype(&AVPacketDeleter)>;
+
+AVPixelFormat GetGpuFormat(AVCodecContext* av_codec_ctx, const AVPixelFormat* pix_fmts) {
for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; ++p) {
- if (*p == AV_PIX_FMT_VAAPI) {
- return AV_PIX_FMT_VAAPI;
+ if (*p == av_codec_ctx->pix_fmt) {
+ return av_codec_ctx->pix_fmt;
}
}
LOG_INFO(Service_NVDRV, "Could not find compatible GPU AV format, falling back to CPU");
- return *pix_fmts;
+ av_buffer_unref(&av_codec_ctx->hw_device_ctx);
+ av_codec_ctx->pix_fmt = PREFERRED_CPU_FMT;
+ return PREFERRED_CPU_FMT;
+}
+} // namespace
+
+void AVFrameDeleter(AVFrame* ptr) {
+ av_frame_free(&ptr);
}
-bool CreateVaapiHwdevice(AVBufferRef** av_hw_device) {
+Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs)
+ : gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)),
+ vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {}
+
+Codec::~Codec() {
+ if (!initialized) {
+ return;
+ }
+ // Free libav memory
+ avcodec_free_context(&av_codec_ctx);
+ av_buffer_unref(&av_gpu_decoder);
+}
+
+bool Codec::CreateGpuAvDevice() {
+#if defined(LIBVA_FOUND)
+ static constexpr std::array<const char*, 3> VAAPI_DRIVERS = {
+ "i915",
+ "iHD",
+ "amdgpu",
+ };
AVDictionary* hwdevice_options = nullptr;
av_dict_set(&hwdevice_options, "connection_type", "drm", 0);
for (const auto& driver : VAAPI_DRIVERS) {
av_dict_set(&hwdevice_options, "kernel_driver", driver, 0);
- const int hwdevice_error = av_hwdevice_ctx_create(av_hw_device, AV_HWDEVICE_TYPE_VAAPI,
+ const int hwdevice_error = av_hwdevice_ctx_create(&av_gpu_decoder, AV_HWDEVICE_TYPE_VAAPI,
nullptr, hwdevice_options, 0);
if (hwdevice_error >= 0) {
LOG_INFO(Service_NVDRV, "Using VA-API with {}", driver);
av_dict_free(&hwdevice_options);
+ av_codec_ctx->pix_fmt = AV_PIX_FMT_VAAPI;
return true;
}
LOG_DEBUG(Service_NVDRV, "VA-API av_hwdevice_ctx_create failed {}", hwdevice_error);
}
LOG_DEBUG(Service_NVDRV, "VA-API av_hwdevice_ctx_create failed for all drivers");
av_dict_free(&hwdevice_options);
- return false;
-}
-} // namespace
#endif
-
-void AVFrameDeleter(AVFrame* ptr) {
- av_frame_free(&ptr);
+ static constexpr auto HW_CONFIG_METHOD = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX;
+ static constexpr std::array GPU_DECODER_TYPES{
+ AV_HWDEVICE_TYPE_CUDA,
+#ifdef _WIN32
+ AV_HWDEVICE_TYPE_D3D11VA,
+#else
+ AV_HWDEVICE_TYPE_VDPAU,
+#endif
+ };
+ for (const auto& type : GPU_DECODER_TYPES) {
+ const int hwdevice_res = av_hwdevice_ctx_create(&av_gpu_decoder, type, nullptr, nullptr, 0);
+ if (hwdevice_res < 0) {
+ LOG_DEBUG(Service_NVDRV, "{} av_hwdevice_ctx_create failed {}",
+ av_hwdevice_get_type_name(type), hwdevice_res);
+ continue;
+ }
+ for (int i = 0;; i++) {
+ const AVCodecHWConfig* config = avcodec_get_hw_config(av_codec, i);
+ if (!config) {
+ LOG_DEBUG(Service_NVDRV, "{} decoder does not support device type {}.",
+ av_codec->name, av_hwdevice_get_type_name(type));
+ break;
+ }
+ if (config->methods & HW_CONFIG_METHOD && config->device_type == type) {
+ av_codec_ctx->pix_fmt = config->pix_fmt;
+ LOG_INFO(Service_NVDRV, "Using {} GPU decoder", av_hwdevice_get_type_name(type));
+ return true;
+ }
+ }
+ }
+ return false;
}
-Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs)
- : gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)),
- vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {}
-
-Codec::~Codec() {
- if (!initialized) {
- return;
- }
- // Free libav memory
- avcodec_send_packet(av_codec_ctx, nullptr);
- AVFrame* av_frame = av_frame_alloc();
- avcodec_receive_frame(av_codec_ctx, av_frame);
- avcodec_flush_buffers(av_codec_ctx);
- av_frame_free(&av_frame);
- avcodec_close(av_codec_ctx);
- av_buffer_unref(&av_hw_device);
+void Codec::InitializeAvCodecContext() {
+ av_codec_ctx = avcodec_alloc_context3(av_codec);
+ av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0);
}
-void Codec::InitializeHwdec() {
- // Prioritize integrated GPU to mitigate bandwidth bottlenecks
-#if defined(LIBVA_FOUND)
- if (CreateVaapiHwdevice(&av_hw_device)) {
- const auto hw_device_ctx = av_buffer_ref(av_hw_device);
- ASSERT_MSG(hw_device_ctx, "av_buffer_ref failed");
- av_codec_ctx->hw_device_ctx = hw_device_ctx;
- av_codec_ctx->get_format = GetHwFormat;
+void Codec::InitializeGpuDecoder() {
+ if (!CreateGpuAvDevice()) {
+ av_buffer_unref(&av_gpu_decoder);
return;
}
-#endif
- // TODO more GPU accelerated decoders
+ auto* hw_device_ctx = av_buffer_ref(av_gpu_decoder);
+ ASSERT_MSG(hw_device_ctx, "av_buffer_ref failed");
+ av_codec_ctx->hw_device_ctx = hw_device_ctx;
+ av_codec_ctx->get_format = GetGpuFormat;
}
void Codec::Initialize() {
- AVCodecID codec;
- switch (current_codec) {
- case NvdecCommon::VideoCodec::H264:
- codec = AV_CODEC_ID_H264;
- break;
- case NvdecCommon::VideoCodec::Vp9:
- codec = AV_CODEC_ID_VP9;
- break;
- default:
- UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
+ const AVCodecID codec = [&] {
+ switch (current_codec) {
+ case NvdecCommon::VideoCodec::H264:
+ return AV_CODEC_ID_H264;
+ case NvdecCommon::VideoCodec::Vp9:
+ return AV_CODEC_ID_VP9;
+ default:
+ UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
+ return AV_CODEC_ID_NONE;
+ }
+ }();
+ av_codec = avcodec_find_decoder(codec);
+
+ InitializeAvCodecContext();
+ if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::GPU) {
+ InitializeGpuDecoder();
+ }
+ if (const int res = avcodec_open2(av_codec_ctx, av_codec, nullptr); res < 0) {
+ LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed with result {}", res);
+ avcodec_free_context(&av_codec_ctx);
+ av_buffer_unref(&av_gpu_decoder);
return;
}
- av_codec = avcodec_find_decoder(codec);
- av_codec_ctx = avcodec_alloc_context3(av_codec);
- av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0);
- InitializeHwdec();
if (!av_codec_ctx->hw_device_ctx) {
LOG_INFO(Service_NVDRV, "Using FFmpeg software decoding");
}
- const auto av_error = avcodec_open2(av_codec_ctx, av_codec, nullptr);
- if (av_error < 0) {
- LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed.");
- avcodec_close(av_codec_ctx);
- av_buffer_unref(&av_hw_device);
- return;
- }
initialized = true;
}
@@ -133,6 +172,9 @@ void Codec::Decode() {
if (is_first_frame) {
Initialize();
}
+ if (!initialized) {
+ return;
+ }
bool vp9_hidden_frame = false;
std::vector<u8> frame_data;
if (current_codec == NvdecCommon::VideoCodec::H264) {
@@ -141,50 +183,48 @@ void Codec::Decode() {
frame_data = vp9_decoder->ComposeFrameHeader(state);
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
}
- AVPacket packet{};
- av_init_packet(&packet);
- packet.data = frame_data.data();
- packet.size = static_cast<s32>(frame_data.size());
- if (const int ret = avcodec_send_packet(av_codec_ctx, &packet); ret) {
- LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", ret);
+ AVPacketPtr packet{av_packet_alloc(), AVPacketDeleter};
+ if (!packet) {
+ LOG_ERROR(Service_NVDRV, "av_packet_alloc failed");
+ return;
+ }
+ packet->data = frame_data.data();
+ packet->size = static_cast<s32>(frame_data.size());
+ if (const int res = avcodec_send_packet(av_codec_ctx, packet.get()); res != 0) {
+ LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", res);
return;
}
// Only receive/store visible frames
if (vp9_hidden_frame) {
return;
}
- AVFrame* hw_frame = av_frame_alloc();
- AVFrame* sw_frame = hw_frame;
- ASSERT_MSG(hw_frame, "av_frame_alloc hw_frame failed");
- if (const int ret = avcodec_receive_frame(av_codec_ctx, hw_frame); ret) {
+ AVFramePtr initial_frame{av_frame_alloc(), AVFrameDeleter};
+ AVFramePtr final_frame{nullptr, AVFrameDeleter};
+ ASSERT_MSG(initial_frame, "av_frame_alloc initial_frame failed");
+ if (const int ret = avcodec_receive_frame(av_codec_ctx, initial_frame.get()); ret) {
LOG_DEBUG(Service_NVDRV, "avcodec_receive_frame error {}", ret);
- av_frame_free(&hw_frame);
return;
}
- if (!hw_frame->width || !hw_frame->height) {
+ if (initial_frame->width == 0 || initial_frame->height == 0) {
LOG_WARNING(Service_NVDRV, "Zero width or height in frame");
- av_frame_free(&hw_frame);
return;
}
-#if defined(LIBVA_FOUND)
- // Hardware acceleration code from FFmpeg/doc/examples/hw_decode.c under MIT license
- if (hw_frame->format == AV_PIX_FMT_VAAPI) {
- sw_frame = av_frame_alloc();
- ASSERT_MSG(sw_frame, "av_frame_alloc sw_frame failed");
+ if (av_codec_ctx->hw_device_ctx) {
+ final_frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter};
+ ASSERT_MSG(final_frame, "av_frame_alloc final_frame failed");
// Can't use AV_PIX_FMT_YUV420P and share code with software decoding in vic.cpp
// because Intel drivers crash unless using AV_PIX_FMT_NV12
- sw_frame->format = AV_PIX_FMT_NV12;
- const int transfer_data_ret = av_hwframe_transfer_data(sw_frame, hw_frame, 0);
- ASSERT_MSG(!transfer_data_ret, "av_hwframe_transfer_data error {}", transfer_data_ret);
- av_frame_free(&hw_frame);
+ final_frame->format = PREFERRED_GPU_FMT;
+ const int ret = av_hwframe_transfer_data(final_frame.get(), initial_frame.get(), 0);
+ ASSERT_MSG(!ret, "av_hwframe_transfer_data error {}", ret);
+ } else {
+ final_frame = std::move(initial_frame);
}
-#endif
- if (sw_frame->format != AV_PIX_FMT_YUV420P && sw_frame->format != AV_PIX_FMT_NV12) {
- UNIMPLEMENTED_MSG("Unexpected video format from host graphics: {}", sw_frame->format);
- av_frame_free(&sw_frame);
+ if (final_frame->format != PREFERRED_CPU_FMT && final_frame->format != PREFERRED_GPU_FMT) {
+ UNIMPLEMENTED_MSG("Unexpected video format: {}", final_frame->format);
return;
}
- av_frames.push(AVFramePtr{sw_frame, AVFrameDeleter});
+ av_frames.push(std::move(final_frame));
if (av_frames.size() > 10) {
LOG_TRACE(Service_NVDRV, "av_frames.push overflow dropped frame");
av_frames.pop();
diff --git a/src/video_core/command_classes/codecs/codec.h b/src/video_core/command_classes/codecs/codec.h
index 71936203f..f9a80886f 100644
--- a/src/video_core/command_classes/codecs/codec.h
+++ b/src/video_core/command_classes/codecs/codec.h
@@ -5,6 +5,7 @@
#pragma once
#include <memory>
+#include <string_view>
#include <queue>
#include "common/common_types.h"
#include "video_core/command_classes/nvdec_common.h"
@@ -50,18 +51,23 @@ public:
/// Returns the value of current_codec
[[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const;
+
/// Return name of the current codec
[[nodiscard]] std::string_view GetCurrentCodecName() const;
private:
- void InitializeHwdec();
+ void InitializeAvCodecContext();
+
+ void InitializeGpuDecoder();
+
+ bool CreateGpuAvDevice();
bool initialized{};
NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None};
AVCodec* av_codec{nullptr};
- AVBufferRef* av_hw_device{nullptr};
AVCodecContext* av_codec_ctx{nullptr};
+ AVBufferRef* av_gpu_decoder{nullptr};
GPU& gpu;
const NvdecCommon::NvdecRegisters& state;
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/command_classes/codecs/h264.cpp
index 5fb6d45ee..5519c4705 100644
--- a/src/video_core/command_classes/codecs/h264.cpp
+++ b/src/video_core/command_classes/codecs/h264.cpp
@@ -20,6 +20,8 @@
#include <array>
#include <bit>
+
+#include "common/settings.h"
#include "video_core/command_classes/codecs/h264.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
@@ -95,7 +97,11 @@ const std::vector<u8>& H264::ComposeFrameHeader(const NvdecCommon::NvdecRegister
const s32 pic_height = context.h264_parameter_set.frame_height_in_map_units /
(context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
- writer.WriteUe(16);
+ // TODO (ameerj): Where do we get this number, it seems to be particular for each stream
+ const auto nvdec_decoding = Settings::values.nvdec_emulation.GetValue();
+ const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::GPU;
+ const u32 max_num_ref_frames = uses_gpu_decoding ? 6u : 16u;
+ writer.WriteUe(max_num_ref_frames);
writer.WriteBit(false);
writer.WriteUe(context.h264_parameter_set.pic_width_in_mbs - 1);
writer.WriteUe(pic_height - 1);
diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/command_classes/vic.cpp
index 0ee07f398..051616124 100644
--- a/src/video_core/command_classes/vic.cpp
+++ b/src/video_core/command_classes/vic.cpp
@@ -16,6 +16,7 @@ extern "C" {
}
#include "common/assert.h"
+#include "common/bit_field.h"
#include "common/logging/log.h"
#include "video_core/command_classes/nvdec.h"
@@ -26,6 +27,25 @@ extern "C" {
#include "video_core/textures/decoders.h"
namespace Tegra {
+namespace {
+enum class VideoPixelFormat : u64_le {
+ RGBA8 = 0x1f,
+ BGRA8 = 0x20,
+ RGBX8 = 0x23,
+ YUV420 = 0x44,
+};
+} // Anonymous namespace
+
+union VicConfig {
+ u64_le raw{};
+ BitField<0, 7, VideoPixelFormat> pixel_format;
+ BitField<7, 2, u64_le> chroma_loc_horiz;
+ BitField<9, 2, u64_le> chroma_loc_vert;
+ BitField<11, 4, u64_le> block_linear_kind;
+ BitField<15, 4, u64_le> block_linear_height_log2;
+ BitField<32, 14, u64_le> surface_width_minus1;
+ BitField<46, 14, u64_le> surface_height_minus1;
+};
Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_)
: gpu(gpu_),
@@ -65,134 +85,155 @@ void Vic::Execute() {
if (!frame) {
return;
}
- const auto pixel_format = static_cast<VideoPixelFormat>(config.pixel_format.Value());
- switch (pixel_format) {
+ const u64 surface_width = config.surface_width_minus1 + 1;
+ const u64 surface_height = config.surface_height_minus1 + 1;
+ if (static_cast<u64>(frame->width) != surface_width ||
+ static_cast<u64>(frame->height) != surface_height) {
+ // TODO: Properly support multiple video streams with differing frame dimensions
+ LOG_WARNING(Service_NVDRV, "Frame dimensions {}x{} don't match surface dimensions {}x{}",
+ frame->width, frame->height, surface_width, surface_height);
+ }
+ switch (config.pixel_format) {
+ case VideoPixelFormat::RGBA8:
case VideoPixelFormat::BGRA8:
- case VideoPixelFormat::RGBA8: {
- LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
+ case VideoPixelFormat::RGBX8:
+ WriteRGBFrame(frame, config);
+ break;
+ case VideoPixelFormat::YUV420:
+ WriteYUVFrame(frame, config);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value());
+ break;
+ }
+}
- if (scaler_ctx == nullptr || frame->width != scaler_width ||
- frame->height != scaler_height) {
- const AVPixelFormat target_format =
- (pixel_format == VideoPixelFormat::RGBA8) ? AV_PIX_FMT_RGBA : AV_PIX_FMT_BGRA;
+void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
+ LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
+
+ if (!scaler_ctx || frame->width != scaler_width || frame->height != scaler_height) {
+ const AVPixelFormat target_format = [pixel_format = config.pixel_format]() {
+ switch (pixel_format) {
+ case VideoPixelFormat::RGBA8:
+ return AV_PIX_FMT_RGBA;
+ case VideoPixelFormat::BGRA8:
+ return AV_PIX_FMT_BGRA;
+ case VideoPixelFormat::RGBX8:
+ return AV_PIX_FMT_RGB0;
+ default:
+ return AV_PIX_FMT_RGBA;
+ }
+ }();
+
+ sws_freeContext(scaler_ctx);
+ // Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format
+ scaler_ctx = sws_getContext(frame->width, frame->height,
+ static_cast<AVPixelFormat>(frame->format), frame->width,
+ frame->height, target_format, 0, nullptr, nullptr, nullptr);
+ scaler_width = frame->width;
+ scaler_height = frame->height;
+ converted_frame_buffer.reset();
+ }
+ if (!converted_frame_buffer) {
+ const size_t frame_size = frame->width * frame->height * 4;
+ converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(frame_size)), av_free};
+ }
+ const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0};
+ u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
+ sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height, &converted_frame_buf_addr,
+ converted_stride.data());
+
+ // Use the minimum of surface/frame dimensions to avoid buffer overflow.
+ const u32 surface_width = static_cast<u32>(config.surface_width_minus1) + 1;
+ const u32 surface_height = static_cast<u32>(config.surface_height_minus1) + 1;
+ const u32 width = std::min(surface_width, static_cast<u32>(frame->width));
+ const u32 height = std::min(surface_height, static_cast<u32>(frame->height));
+ const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
+ if (blk_kind != 0) {
+ // swizzle pitch linear to block linear
+ const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
+ const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
+ luma_buffer.resize(size);
+ Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(),
+ converted_frame_buf_addr, block_height, 0, 0);
+
+ gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
+ } else {
+ // send pitch linear frame
+ const size_t linear_size = width * height * 4;
+ gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
+ linear_size);
+ }
+}
- sws_freeContext(scaler_ctx);
- scaler_ctx = nullptr;
+void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
+ LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
- // Frames are decoded into either YUV420 or NV12 formats. Convert to desired format
- scaler_ctx = sws_getContext(frame->width, frame->height,
- static_cast<AVPixelFormat>(frame->format), frame->width,
- frame->height, target_format, 0, nullptr, nullptr, nullptr);
+ const std::size_t surface_width = config.surface_width_minus1 + 1;
+ const std::size_t surface_height = config.surface_height_minus1 + 1;
+ const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
+ // Use the minimum of surface/frame dimensions to avoid buffer overflow.
+ const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
+ const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
- scaler_width = frame->width;
- scaler_height = frame->height;
- }
- // Get Converted frame
- const u32 width = static_cast<u32>(frame->width);
- const u32 height = static_cast<u32>(frame->height);
- const std::size_t linear_size = width * height * 4;
-
- // Only allocate frame_buffer once per stream, as the size is not expected to change
- if (!converted_frame_buffer) {
- converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(linear_size)), av_free};
+ const auto stride = static_cast<size_t>(frame->linesize[0]);
+
+ luma_buffer.resize(aligned_width * surface_height);
+ chroma_buffer.resize(aligned_width * surface_height / 2);
+
+ // Populate luma buffer
+ const u8* luma_src = frame->data[0];
+ for (std::size_t y = 0; y < frame_height; ++y) {
+ const std::size_t src = y * stride;
+ const std::size_t dst = y * aligned_width;
+ for (std::size_t x = 0; x < frame_width; ++x) {
+ luma_buffer[dst + x] = luma_src[src + x];
}
- const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0};
- u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
-
- sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height,
- &converted_frame_buf_addr, converted_stride.data());
-
- const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
- if (blk_kind != 0) {
- // swizzle pitch linear to block linear
- const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
- const auto size =
- Tegra::Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
- luma_buffer.resize(size);
- Tegra::Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(),
- converted_frame_buffer.get(), block_height, 0, 0);
-
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
- } else {
- // send pitch linear frame
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
- linear_size);
+ }
+ gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
+ luma_buffer.size());
+
+ // Chroma
+ const std::size_t half_height = frame_height / 2;
+ const auto half_stride = static_cast<size_t>(frame->linesize[1]);
+
+ switch (frame->format) {
+ case AV_PIX_FMT_YUV420P: {
+ // Frame from FFmpeg software
+ // Populate chroma buffer from both channels with interleaving.
+ const std::size_t half_width = frame_width / 2;
+ const u8* chroma_b_src = frame->data[1];
+ const u8* chroma_r_src = frame->data[2];
+ for (std::size_t y = 0; y < half_height; ++y) {
+ const std::size_t src = y * half_stride;
+ const std::size_t dst = y * aligned_width;
+
+ for (std::size_t x = 0; x < half_width; ++x) {
+ chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
+ chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
+ }
}
break;
}
- case VideoPixelFormat::Yuv420: {
- LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
-
- const std::size_t surface_width = config.surface_width_minus1 + 1;
- const std::size_t surface_height = config.surface_height_minus1 + 1;
- const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
- const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
- const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
-
- const auto stride = static_cast<size_t>(frame->linesize[0]);
-
- luma_buffer.resize(aligned_width * surface_height);
- chroma_buffer.resize(aligned_width * surface_height / 2);
-
- // Populate luma buffer
- const u8* luma_src = frame->data[0];
- for (std::size_t y = 0; y < frame_height; ++y) {
+ case AV_PIX_FMT_NV12: {
+ // Frame from VA-API hardware
+ // This is already interleaved so just copy
+ const u8* chroma_src = frame->data[1];
+ for (std::size_t y = 0; y < half_height; ++y) {
const std::size_t src = y * stride;
const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < frame_width; ++x) {
- luma_buffer[dst + x] = luma_src[src + x];
- }
- }
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
- luma_buffer.size());
-
- // Chroma
- const std::size_t half_height = frame_height / 2;
- const auto half_stride = static_cast<size_t>(frame->linesize[1]);
-
- switch (frame->format) {
- case AV_PIX_FMT_YUV420P: {
- // Frame from FFmpeg software
- // Populate chroma buffer from both channels with interleaving.
- const std::size_t half_width = frame_width / 2;
- const u8* chroma_b_src = frame->data[1];
- const u8* chroma_r_src = frame->data[2];
- for (std::size_t y = 0; y < half_height; ++y) {
- const std::size_t src = y * half_stride;
- const std::size_t dst = y * aligned_width;
-
- for (std::size_t x = 0; x < half_width; ++x) {
- chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
- chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
- }
+ chroma_buffer[dst + x] = chroma_src[src + x];
}
- break;
- }
- case AV_PIX_FMT_NV12: {
- // Frame from VA-API hardware
- // This is already interleaved so just copy
- const u8* chroma_src = frame->data[1];
- for (std::size_t y = 0; y < half_height; ++y) {
- const std::size_t src = y * stride;
- const std::size_t dst = y * aligned_width;
- for (std::size_t x = 0; x < frame_width; ++x) {
- chroma_buffer[dst + x] = chroma_src[src + x];
- }
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
}
- gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
- chroma_buffer.size());
break;
}
default:
- UNIMPLEMENTED_MSG("Unknown video pixel format {}", config.pixel_format.Value());
+ UNREACHABLE();
break;
}
+ gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
+ chroma_buffer.size());
}
} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.h b/src/video_core/command_classes/vic.h
index 74246e08c..6d4cdfd57 100644
--- a/src/video_core/command_classes/vic.h
+++ b/src/video_core/command_classes/vic.h
@@ -6,7 +6,6 @@
#include <memory>
#include <vector>
-#include "common/bit_field.h"
#include "common/common_types.h"
struct SwsContext;
@@ -14,6 +13,7 @@ struct SwsContext;
namespace Tegra {
class GPU;
class Nvdec;
+union VicConfig;
class Vic {
public:
@@ -27,6 +27,7 @@ public:
};
explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor);
+
~Vic();
/// Write to the device state.
@@ -35,22 +36,9 @@ public:
private:
void Execute();
- enum class VideoPixelFormat : u64_le {
- RGBA8 = 0x1f,
- BGRA8 = 0x20,
- Yuv420 = 0x44,
- };
+ void WriteRGBFrame(const AVFrame* frame, const VicConfig& config);
- union VicConfig {
- u64_le raw{};
- BitField<0, 7, u64_le> pixel_format;
- BitField<7, 2, u64_le> chroma_loc_horiz;
- BitField<9, 2, u64_le> chroma_loc_vert;
- BitField<11, 4, u64_le> block_linear_kind;
- BitField<15, 4, u64_le> block_linear_height_log2;
- BitField<32, 14, u64_le> surface_width_minus1;
- BitField<46, 14, u64_le> surface_height_minus1;
- };
+ void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
GPU& gpu;
std::shared_ptr<Tegra::Nvdec> nvdec_processor;