summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/video_core/dma_pusher.cpp30
-rw-r--r--src/video_core/dma_pusher.h1
-rw-r--r--src/video_core/engines/fermi_2d.cpp6
-rw-r--r--src/video_core/engines/fermi_2d.h3
-rw-r--r--src/video_core/engines/kepler_compute.cpp7
-rw-r--r--src/video_core/engines/kepler_compute.h3
-rw-r--r--src/video_core/engines/kepler_memory.cpp7
-rw-r--r--src/video_core/engines/kepler_memory.h3
-rw-r--r--src/video_core/engines/maxwell_3d.cpp74
-rw-r--r--src/video_core/engines/maxwell_3d.h7
-rw-r--r--src/video_core/engines/maxwell_dma.cpp7
-rw-r--r--src/video_core/engines/maxwell_dma.h3
-rw-r--r--src/video_core/engines/shader_bytecode.h3
-rw-r--r--src/video_core/gpu.cpp55
-rw-r--r--src/video_core/gpu.h10
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp12
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h13
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp42
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h1
-rw-r--r--src/video_core/renderer_vulkan/wrapper.cpp21
-rw-r--r--src/video_core/shader/decode/arithmetic_half.cpp51
-rw-r--r--src/video_core/shader/decode/arithmetic_integer.cpp4
27 files changed, 318 insertions, 82 deletions
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 324dafdcd..16311f05e 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -71,16 +71,22 @@ bool DmaPusher::Step() {
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
command_list_header.size * sizeof(u32));
- for (const CommandHeader& command_header : command_headers) {
-
- // now, see if we're in the middle of a command
- if (dma_state.length_pending) {
- // Second word of long non-inc methods command - method count
- dma_state.length_pending = 0;
- dma_state.method_count = command_header.method_count_;
- } else if (dma_state.method_count) {
+ for (std::size_t index = 0; index < command_headers.size();) {
+ const CommandHeader& command_header = command_headers[index];
+
+ if (dma_state.method_count) {
// Data word of methods command
- CallMethod(command_header.argument);
+ if (dma_state.non_incrementing) {
+ const u32 max_write = static_cast<u32>(
+ std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) -
+ index);
+ CallMultiMethod(&command_header.argument, max_write);
+ dma_state.method_count -= max_write;
+ index += max_write;
+ continue;
+ } else {
+ CallMethod(command_header.argument);
+ }
if (!dma_state.non_incrementing) {
dma_state.method++;
@@ -120,6 +126,7 @@ bool DmaPusher::Step() {
break;
}
}
+ index++;
}
if (!non_main) {
@@ -140,4 +147,9 @@ void DmaPusher::CallMethod(u32 argument) const {
gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count});
}
+void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
+ gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
+ dma_state.method_count);
+}
+
} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index d6188614a..6cef71306 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -75,6 +75,7 @@ private:
void SetState(const CommandHeader& command_header);
void CallMethod(u32 argument) const;
+ void CallMultiMethod(const u32* base_start, u32 num_methods) const;
std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index bace6affb..8a47614d2 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -28,6 +28,12 @@ void Fermi2D::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void Fermi2D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
static std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) {
const u32 line_a = src_2 - src_1;
const u32 line_b = dst_2 - dst_1;
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index dba342c70..939a5966d 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -39,6 +39,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
enum class Origin : u32 {
Center = 0,
Corner = 1,
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 368c75a66..00a12175f 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -51,6 +51,13 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const {
const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value();
ASSERT(cbuf_mask[regs.tex_cb_index]);
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index eeb79c56f..fe55fdfd0 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -202,6 +202,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
Texture::FullTextureInfo GetTexture(std::size_t offset) const;
/// Given a texture handle, returns the TSC and TIC entries.
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 597872e43..586ff15dc 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -41,4 +41,11 @@ void KeplerMemory::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
} // namespace Tegra::Engines
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index 396fb6e86..bb26fb030 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -40,6 +40,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
struct Regs {
static constexpr size_t NUM_REGS = 0x7F;
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 2824ed707..39e3b66a2 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -280,6 +280,58 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ // Methods after 0xE00 are special, they're actually triggers for some microcode that was
+ // uploaded to the GPU during initialization.
+ if (method >= MacroRegistersStart) {
+ // We're trying to execute a macro
+ if (executing_macro == 0) {
+ // A macro call must begin by writing the macro method's register, not its argument.
+ ASSERT_MSG((method % 2) == 0,
+ "Can't start macro execution by writing to the ARGS register");
+ executing_macro = method;
+ }
+
+ for (std::size_t i = 0; i < amount; i++) {
+ macro_params.push_back(base_start[i]);
+ }
+
+ // Call the macro when there are no more parameters in the command buffer
+ if (amount == methods_pending) {
+ CallMacroMethod(executing_macro, macro_params.size(), macro_params.data());
+ macro_params.clear();
+ }
+ return;
+ }
+ switch (method) {
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[3]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[4]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[5]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[6]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[7]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[8]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[9]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[10]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[11]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[12]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): {
+ ProcessCBMultiData(method, base_start, amount);
+ break;
+ }
+ default: {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+ }
+ }
+}
+
void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) {
if (mme_draw.current_mode == MMEDrawMode::Undefined) {
if (mme_draw.gl_begin_consume) {
@@ -570,6 +622,28 @@ void Maxwell3D::StartCBData(u32 method) {
ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]);
}
+void Maxwell3D::ProcessCBMultiData(u32 method, const u32* start_base, u32 amount) {
+ if (cb_data_state.current != method) {
+ if (cb_data_state.current != null_cb_data) {
+ FinishCBData();
+ }
+ constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]);
+ cb_data_state.start_pos = regs.const_buffer.cb_pos;
+ cb_data_state.id = method - first_cb_data;
+ cb_data_state.current = method;
+ cb_data_state.counter = 0;
+ }
+ const std::size_t id = cb_data_state.id;
+ const std::size_t size = amount;
+ std::size_t i = 0;
+ for (; i < size; i++) {
+ cb_data_state.buffer[id][cb_data_state.counter] = start_base[i];
+ cb_data_state.counter++;
+ }
+ // Increment the current buffer position.
+ regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4 * amount;
+}
+
void Maxwell3D::FinishCBData() {
// Write the input value to the current const buffer at the current position.
const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 59d5752d2..3dfba8197 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -1259,7 +1259,8 @@ public:
GPUVAddr LimitAddress() const {
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
- limit_low);
+ limit_low) +
+ 1;
}
} vertex_array_limit[NumVertexArrays];
@@ -1358,6 +1359,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
/// Write the value to the register identified by method.
void CallMethodFromMME(const GPU::MethodCall& method_call);
@@ -1511,6 +1515,7 @@ private:
/// Handles a write to the CB_DATA[i] register.
void StartCBData(u32 method);
void ProcessCBData(u32 value);
+ void ProcessCBMultiData(u32 method, const u32* start_base, u32 amount);
void FinishCBData();
/// Handles a write to the CB_BIND register.
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3bfed6ab8..6630005b0 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -36,6 +36,13 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
#undef MAXWELLDMA_REG_INDEX
}
+void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
void MaxwellDMA::HandleCopy() {
LOG_TRACE(HW_GPU, "Requested a DMA copy");
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 4f40d1d1f..c43ed8194 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -35,6 +35,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
struct Regs {
static constexpr std::size_t NUM_REGS = 0x1D6;
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 7231597d4..cde3a26b9 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -655,6 +655,7 @@ union Instruction {
}
constexpr Instruction(u64 value) : value{value} {}
+ constexpr Instruction(const Instruction& instr) : value(instr.value) {}
BitField<0, 8, Register> gpr0;
BitField<8, 8, Register> gpr8;
@@ -817,11 +818,9 @@ union Instruction {
BitField<32, 1, u64> saturate;
BitField<49, 2, HalfMerge> merge;
- BitField<43, 1, u64> negate_a;
BitField<44, 1, u64> abs_a;
BitField<47, 2, HalfType> type_a;
- BitField<31, 1, u64> negate_b;
BitField<30, 1, u64> abs_b;
BitField<28, 2, HalfType> type_b;
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 3b7572d61..b87fd873d 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -9,6 +9,7 @@
#include "core/core_timing_util.h"
#include "core/frontend/emu_window.h"
#include "core/memory.h"
+#include "core/settings.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/kepler_memory.h"
@@ -154,7 +155,10 @@ u64 GPU::GetTicks() const {
constexpr u64 gpu_ticks_den = 625;
const u64 cpu_ticks = system.CoreTiming().GetTicks();
- const u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
+ u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
+ if (Settings::values.use_fast_gpu_time) {
+ nanoseconds /= 256;
+ }
const u64 nanoseconds_num = nanoseconds / gpu_ticks_den;
const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den;
return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den;
@@ -209,16 +213,32 @@ void GPU::CallMethod(const MethodCall& method_call) {
ASSERT(method_call.subchannel < bound_engines.size());
- if (ExecuteMethodOnEngine(method_call)) {
+ if (ExecuteMethodOnEngine(method_call.method)) {
CallEngineMethod(method_call);
} else {
CallPullerMethod(method_call);
}
}
-bool GPU::ExecuteMethodOnEngine(const MethodCall& method_call) {
- const auto method = static_cast<BufferMethods>(method_call.method);
- return method >= BufferMethods::NonPullerMethods;
+void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
+
+ ASSERT(subchannel < bound_engines.size());
+
+ if (ExecuteMethodOnEngine(method)) {
+ CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
+ } else {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallPullerMethod(
+ {method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)});
+ }
+ }
+}
+
+bool GPU::ExecuteMethodOnEngine(u32 method) {
+ const auto buffer_method = static_cast<BufferMethods>(method);
+ return buffer_method >= BufferMethods::NonPullerMethods;
}
void GPU::CallPullerMethod(const MethodCall& method_call) {
@@ -298,6 +318,31 @@ void GPU::CallEngineMethod(const MethodCall& method_call) {
}
}
+void GPU::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ const EngineID engine = bound_engines[subchannel];
+
+ switch (engine) {
+ case EngineID::FERMI_TWOD_A:
+ fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_B:
+ maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine");
+ }
+}
+
void GPU::ProcessBindMethod(const MethodCall& method_call) {
// Bind the current subchannel to the desired engine id.
LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 5e3eb94e9..dd51c95b7 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -155,6 +155,10 @@ public:
/// Calls a GPU method.
void CallMethod(const MethodCall& method_call);
+ /// Calls a GPU multivalue method.
+ void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
/// Flush all current written commands into the host GPU for execution.
void FlushCommands();
/// Synchronizes CPU writes with Host GPU memory.
@@ -309,8 +313,12 @@ private:
/// Calls a GPU engine method.
void CallEngineMethod(const MethodCall& method_call);
+ /// Calls a GPU engine multivalue method.
+ void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
/// Determines where the method should be executed.
- bool ExecuteMethodOnEngine(const MethodCall& method_call);
+ bool ExecuteMethodOnEngine(u32 method);
protected:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 4c16c89d2..6fe155bcc 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -186,8 +186,12 @@ void RasterizerOpenGL::SetupVertexBuffer() {
const GPUVAddr start = vertex_array.StartAddress();
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
- ASSERT(end > start);
- const u64 size = end - start + 1;
+ ASSERT(end >= start);
+ const u64 size = end - start;
+ if (size == 0) {
+ glBindVertexBuffer(static_cast<GLuint>(index), 0, 0, vertex_array.stride);
+ continue;
+ }
const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset,
vertex_array.stride);
@@ -311,8 +315,8 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
const GPUVAddr start = regs.vertex_array[index].StartAddress();
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
- ASSERT(end > start);
- size += end - start + 1;
+ size += end - start;
+ ASSERT(end >= start);
}
return size;
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index d4fd4d3f1..77188b862 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -129,7 +129,7 @@ struct FixedPipelineState {
auto& binding = bindings[index];
binding.raw = 0;
binding.enabled.Assign(enabled ? 1 : 0);
- binding.stride.Assign(stride);
+ binding.stride.Assign(static_cast<u16>(stride));
binding_divisors[index] = divisor;
}
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index a4d841e26..c8640762d 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -82,11 +82,6 @@ public:
return present_family;
}
- /// Returns true if the device is integrated with the host CPU.
- bool IsIntegrated() const {
- return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
- }
-
/// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
u32 GetApiVersion() const {
return properties.apiVersion;
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index 6a9e658bf..b4c650a63 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -118,8 +118,7 @@ private:
};
VKMemoryManager::VKMemoryManager(const VKDevice& device)
- : device{device}, properties{device.GetPhysical().GetMemoryProperties()},
- is_memory_unified{GetMemoryUnified(properties)} {}
+ : device{device}, properties{device.GetPhysical().GetMemoryProperties()} {}
VKMemoryManager::~VKMemoryManager() = default;
@@ -209,16 +208,6 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi
return {};
}
-bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
- for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
- if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
- // Memory is considered unified when heaps are device local only.
- return false;
- }
- }
- return true;
-}
-
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
const vk::DeviceMemory& memory, u64 begin, u64 end)
: device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index 5b6858e9b..1af88e3d4 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -40,11 +40,6 @@ public:
/// Commits memory required by the image and binds it.
VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
- /// Returns true if the memory allocations are done always in host visible and coherent memory.
- bool IsMemoryUnified() const {
- return is_memory_unified;
- }
-
private:
/// Allocates a chunk of memory.
bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
@@ -53,12 +48,8 @@ private:
VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
VkMemoryPropertyFlags wanted_properties);
- /// Returns true if the device uses an unified memory model.
- static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties);
-
- const VKDevice& device; ///< Device handler.
- const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
- const bool is_memory_unified; ///< True if memory model is unified.
+ const VKDevice& device; ///< Device handler.
+ const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
};
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 4eafdc14d..c821b1229 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -883,8 +883,12 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex
const GPUVAddr start{vertex_array.StartAddress()};
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
- ASSERT(end > start);
- const std::size_t size{end - start + 1};
+ ASSERT(end >= start);
+ const std::size_t size{end - start};
+ if (size == 0) {
+ buffer_bindings.AddVertexBinding(DefaultBuffer(), 0);
+ continue;
+ }
const auto [buffer, offset] = buffer_cache.UploadMemory(start, size);
buffer_bindings.AddVertexBinding(buffer, offset);
}
@@ -1039,8 +1043,7 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry,
const Tegra::Engines::ConstBufferInfo& buffer) {
if (!buffer.enabled) {
// Set values to zero to unbind buffers
- update_descriptor_queue.AddBuffer(buffer_cache.GetEmptyBuffer(sizeof(float)), 0,
- sizeof(float));
+ update_descriptor_queue.AddBuffer(DefaultBuffer(), 0, DEFAULT_BUFFER_SIZE);
return;
}
@@ -1063,7 +1066,9 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
if (size == 0) {
// Sometimes global memory pointers don't have a proper size. Upload a dummy entry
// because Vulkan doesn't like empty buffers.
- constexpr std::size_t dummy_size = 4;
+ // Note: Do *not* use DefaultBuffer() here, storage buffers can be written breaking the
+ // default buffer.
+ static constexpr std::size_t dummy_size = 4;
const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
return;
@@ -1228,7 +1233,7 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
DEBUG_ASSERT(end >= start);
- size += (end - start + 1) * regs.vertex_array[index].enable;
+ size += (end - start) * regs.vertex_array[index].enable;
}
return size;
}
@@ -1276,4 +1281,29 @@ RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions)
return params;
}
+VkBuffer RasterizerVulkan::DefaultBuffer() {
+ if (default_buffer) {
+ return *default_buffer;
+ }
+
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = DEFAULT_BUFFER_SIZE;
+ ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ default_buffer = device.GetLogical().CreateBuffer(ci);
+ default_buffer_commit = memory_manager.Commit(default_buffer, false);
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+ scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) {
+ cmdbuf.FillBuffer(buffer, 0, DEFAULT_BUFFER_SIZE, 0);
+ });
+ return *default_buffer;
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 2fa46b0cc..d41a7929e 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -155,6 +155,7 @@ private:
using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>;
static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8;
+ static constexpr VkDeviceSize DEFAULT_BUFFER_SIZE = 4 * sizeof(float);
void FlushWork();
@@ -247,6 +248,8 @@ private:
RenderPassParams GetRenderPassParams(Texceptions texceptions) const;
+ VkBuffer DefaultBuffer();
+
Core::System& system;
Core::Frontend::EmuWindow& render_window;
VKScreenInfo& screen_info;
@@ -271,6 +274,9 @@ private:
VKFenceManager fence_manager;
VKQueryCache query_cache;
+ vk::Buffer default_buffer;
+ VKMemoryCommit default_buffer_commit;
+
std::array<View, Maxwell::NumRenderTargets> color_attachments;
View zeta_attachment;
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 94d954d7a..45c180221 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -39,8 +39,7 @@ VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator
VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler)
- : device{device}, memory_manager{memory_manager}, scheduler{scheduler},
- is_device_integrated{device.IsIntegrated()} {}
+ : device{device}, memory_manager{memory_manager}, scheduler{scheduler} {}
VKStagingBufferPool::~VKStagingBufferPool() = default;
@@ -56,9 +55,7 @@ void VKStagingBufferPool::TickFrame() {
current_delete_level = (current_delete_level + 1) % NumLevels;
ReleaseCache(true);
- if (!is_device_integrated) {
- ReleaseCache(false);
- }
+ ReleaseCache(false);
}
VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
@@ -81,7 +78,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
ci.size = 1ULL << log2;
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
- VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
@@ -95,7 +92,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
}
VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
- return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers;
+ return host_visible ? host_staging_buffers : device_staging_buffers;
}
void VKStagingBufferPool::ReleaseCache(bool host_visible) {
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index a0840ff8c..faf6418fd 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -71,7 +71,6 @@ private:
const VKDevice& device;
VKMemoryManager& memory_manager;
VKScheduler& scheduler;
- const bool is_device_integrated;
StagingBuffersCache host_staging_buffers;
StagingBuffersCache device_staging_buffers;
diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp
index 539f3c974..7f5bc1404 100644
--- a/src/video_core/renderer_vulkan/wrapper.cpp
+++ b/src/video_core/renderer_vulkan/wrapper.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <algorithm>
#include <exception>
#include <memory>
#include <optional>
@@ -16,6 +17,23 @@ namespace Vulkan::vk {
namespace {
+void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld) {
+ std::stable_sort(devices.begin(), devices.end(), [&](auto lhs, auto rhs) {
+ // This will call Vulkan more than needed, but these calls are cheap.
+ const auto lhs_properties = vk::PhysicalDevice(lhs, dld).GetProperties();
+ const auto rhs_properties = vk::PhysicalDevice(rhs, dld).GetProperties();
+
+ // Prefer discrete GPUs, Nvidia over AMD, AMD over Intel, Intel over the rest.
+ const bool preferred =
+ (lhs_properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU &&
+ rhs_properties.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) ||
+ (lhs_properties.vendorID == 0x10DE && rhs_properties.vendorID != 0x10DE) ||
+ (lhs_properties.vendorID == 0x1002 && rhs_properties.vendorID != 0x1002) ||
+ (lhs_properties.vendorID == 0x8086 && rhs_properties.vendorID != 0x8086);
+ return !preferred;
+ });
+}
+
template <typename T>
bool Proc(T& result, const InstanceDispatch& dld, const char* proc_name,
VkInstance instance = nullptr) noexcept {
@@ -389,7 +407,8 @@ std::optional<std::vector<VkPhysicalDevice>> Instance::EnumeratePhysicalDevices(
if (dld->vkEnumeratePhysicalDevices(handle, &num, physical_devices.data()) != VK_SUCCESS) {
return std::nullopt;
}
- return physical_devices;
+ SortPhysicalDevices(physical_devices, *dld);
+ return std::make_optional(std::move(physical_devices));
}
DebugCallback Instance::TryCreateDebugCallback(
diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp
index ee7d9a29d..a276aee44 100644
--- a/src/video_core/shader/decode/arithmetic_half.cpp
+++ b/src/video_core/shader/decode/arithmetic_half.cpp
@@ -19,22 +19,46 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- if (opcode->get().GetId() == OpCode::Id::HADD2_C ||
- opcode->get().GetId() == OpCode::Id::HADD2_R) {
+ bool negate_a = false;
+ bool negate_b = false;
+ bool absolute_a = false;
+ bool absolute_b = false;
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::HADD2_R:
if (instr.alu_half.ftz == 0) {
LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
}
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ negate_b = ((instr.value >> 31) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 30) & 1) != 0;
+ break;
+ case OpCode::Id::HADD2_C:
+ if (instr.alu_half.ftz == 0) {
+ LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
+ }
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ negate_b = ((instr.value >> 56) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 54) & 1) != 0;
+ break;
+ case OpCode::Id::HMUL2_R:
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 30) & 1) != 0;
+ break;
+ case OpCode::Id::HMUL2_C:
+ negate_b = ((instr.value >> 31) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 54) & 1) != 0;
+ break;
}
- const bool negate_a =
- opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0;
- const bool negate_b =
- opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0;
-
Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half.type_a);
- op_a = GetOperandAbsNegHalf(op_a, instr.alu_half.abs_a, negate_a);
+ op_a = GetOperandAbsNegHalf(op_a, absolute_a, negate_a);
- auto [type_b, op_b] = [&]() -> std::tuple<HalfType, Node> {
+ auto [type_b, op_b] = [this, instr, opcode]() -> std::pair<HalfType, Node> {
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HMUL2_C:
@@ -48,17 +72,16 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
}
}();
op_b = UnpackHalfFloat(op_b, type_b);
- // redeclaration to avoid a bug in clang with reusing local bindings in lambdas
- Node op_b_alt = GetOperandAbsNegHalf(op_b, instr.alu_half.abs_b, negate_b);
+ op_b = GetOperandAbsNegHalf(op_b, absolute_b, negate_b);
- Node value = [&]() {
+ Node value = [this, opcode, op_a, op_b = op_b] {
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HADD2_R:
- return Operation(OperationCode::HAdd, PRECISE, op_a, op_b_alt);
+ return Operation(OperationCode::HAdd, PRECISE, op_a, op_b);
case OpCode::Id::HMUL2_C:
case OpCode::Id::HMUL2_R:
- return Operation(OperationCode::HMul, PRECISE, op_a, op_b_alt);
+ return Operation(OperationCode::HMul, PRECISE, op_a, op_b);
default:
UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName());
return Immediate(0);
diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp
index 0f4c3103a..9af8c606d 100644
--- a/src/video_core/shader/decode/arithmetic_integer.cpp
+++ b/src/video_core/shader/decode/arithmetic_integer.cpp
@@ -249,8 +249,8 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
}
case OpCode::Id::LEA_IMM: {
const bool neg = instr.lea.imm.neg != 0;
- return {Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
- GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
+ return {GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
+ Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
Immediate(static_cast<u32>(instr.lea.imm.entry_b))};
}
case OpCode::Id::LEA_RZ: {