summaryrefslogtreecommitdiffstats
path: root/src/video_core/shader/decode
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/shader/decode')
-rw-r--r--src/video_core/shader/decode/memory.cpp103
-rw-r--r--src/video_core/shader/decode/texture.cpp16
2 files changed, 76 insertions, 43 deletions
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp
index c934d0719..7591a715f 100644
--- a/src/video_core/shader/decode/memory.cpp
+++ b/src/video_core/shader/decode/memory.cpp
@@ -6,6 +6,7 @@
#include <vector>
#include <fmt/format.h>
+#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
@@ -15,6 +16,8 @@
namespace VideoCommon::Shader {
+using Tegra::Shader::AtomicOp;
+using Tegra::Shader::AtomicType;
using Tegra::Shader::Attribute;
using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
@@ -22,34 +25,39 @@ using Tegra::Shader::Register;
namespace {
-u32 GetLdgMemorySize(Tegra::Shader::UniformType uniform_type) {
+bool IsUnaligned(Tegra::Shader::UniformType uniform_type) {
+ return uniform_type == Tegra::Shader::UniformType::UnsignedByte ||
+ uniform_type == Tegra::Shader::UniformType::UnsignedShort;
+}
+
+u32 GetUnalignedMask(Tegra::Shader::UniformType uniform_type) {
switch (uniform_type) {
case Tegra::Shader::UniformType::UnsignedByte:
- case Tegra::Shader::UniformType::Single:
- return 1;
- case Tegra::Shader::UniformType::Double:
- return 2;
- case Tegra::Shader::UniformType::Quad:
- case Tegra::Shader::UniformType::UnsignedQuad:
- return 4;
+ return 0b11;
+ case Tegra::Shader::UniformType::UnsignedShort:
+ return 0b10;
default:
- UNIMPLEMENTED_MSG("Unimplemented size={}!", static_cast<u32>(uniform_type));
- return 1;
+ UNREACHABLE();
+ return 0;
}
}
-u32 GetStgMemorySize(Tegra::Shader::UniformType uniform_type) {
+u32 GetMemorySize(Tegra::Shader::UniformType uniform_type) {
switch (uniform_type) {
+ case Tegra::Shader::UniformType::UnsignedByte:
+ return 8;
+ case Tegra::Shader::UniformType::UnsignedShort:
+ return 16;
case Tegra::Shader::UniformType::Single:
- return 1;
+ return 32;
case Tegra::Shader::UniformType::Double:
- return 2;
+ return 64;
case Tegra::Shader::UniformType::Quad:
case Tegra::Shader::UniformType::UnsignedQuad:
- return 4;
+ return 128;
default:
UNIMPLEMENTED_MSG("Unimplemented size={}!", static_cast<u32>(uniform_type));
- return 1;
+ return 32;
}
}
@@ -184,9 +192,10 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
}();
const auto [real_address_base, base_address, descriptor] =
- TrackGlobalMemory(bb, instr, false);
+ TrackGlobalMemory(bb, instr, true, false);
- const u32 count = GetLdgMemorySize(type);
+ const u32 size = GetMemorySize(type);
+ const u32 count = Common::AlignUp(size, 32) / 32;
if (!real_address_base || !base_address) {
// Tracking failed, load zeroes.
for (u32 i = 0; i < count; ++i) {
@@ -200,14 +209,15 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
const Node real_address = Operation(OperationCode::UAdd, real_address_base, it_offset);
Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor);
- if (type == Tegra::Shader::UniformType::UnsignedByte) {
- // To handle unaligned loads get the byte used to dereferenced global memory
- // and extract that byte from the loaded uint32.
- Node byte = Operation(OperationCode::UBitwiseAnd, real_address, Immediate(3));
- byte = Operation(OperationCode::ULogicalShiftLeft, std::move(byte), Immediate(3));
+ // To handle unaligned loads get the bytes used to dereference global memory and extract
+ // those bytes from the loaded u32.
+ if (IsUnaligned(type)) {
+ Node mask = Immediate(GetUnalignedMask(type));
+ Node offset = Operation(OperationCode::UBitwiseAnd, real_address, std::move(mask));
+ offset = Operation(OperationCode::ULogicalShiftLeft, offset, Immediate(3));
- gmem = Operation(OperationCode::UBitfieldExtract, std::move(gmem), std::move(byte),
- Immediate(8));
+ gmem = Operation(OperationCode::UBitfieldExtract, std::move(gmem),
+ std::move(offset), Immediate(size));
}
SetTemporary(bb, i, gmem);
@@ -295,23 +305,53 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
}
}();
+ // For unaligned reads we have to read memory too.
+ const bool is_read = IsUnaligned(type);
const auto [real_address_base, base_address, descriptor] =
- TrackGlobalMemory(bb, instr, true);
+ TrackGlobalMemory(bb, instr, is_read, true);
if (!real_address_base || !base_address) {
// Tracking failed, skip the store.
break;
}
- const u32 count = GetStgMemorySize(type);
+ const u32 size = GetMemorySize(type);
+ const u32 count = Common::AlignUp(size, 32) / 32;
for (u32 i = 0; i < count; ++i) {
const Node it_offset = Immediate(i * 4);
const Node real_address = Operation(OperationCode::UAdd, real_address_base, it_offset);
const Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor);
- const Node value = GetRegister(instr.gpr0.Value() + i);
+ Node value = GetRegister(instr.gpr0.Value() + i);
+
+ if (IsUnaligned(type)) {
+ Node mask = Immediate(GetUnalignedMask(type));
+ Node offset = Operation(OperationCode::UBitwiseAnd, real_address, std::move(mask));
+ offset = Operation(OperationCode::ULogicalShiftLeft, offset, Immediate(3));
+
+ value = Operation(OperationCode::UBitfieldInsert, gmem, std::move(value), offset,
+ Immediate(size));
+ }
+
bb.push_back(Operation(OperationCode::Assign, gmem, value));
}
break;
}
+ case OpCode::Id::ATOMS: {
+ UNIMPLEMENTED_IF_MSG(instr.atoms.operation != AtomicOp::Add, "operation={}",
+ static_cast<int>(instr.atoms.operation.Value()));
+ UNIMPLEMENTED_IF_MSG(instr.atoms.type != AtomicType::U32, "type={}",
+ static_cast<int>(instr.atoms.type.Value()));
+
+ const s32 offset = instr.atoms.GetImmediateOffset();
+ Node address = GetRegister(instr.gpr8);
+ address = Operation(OperationCode::IAdd, std::move(address), Immediate(offset));
+
+ Node memory = GetSharedMemory(std::move(address));
+ Node data = GetRegister(instr.gpr20);
+
+ Node value = Operation(OperationCode::UAtomicAdd, std::move(memory), std::move(data));
+ SetRegister(bb, instr.gpr0, std::move(value));
+ break;
+ }
case OpCode::Id::AL2P: {
// Ignore al2p.direction since we don't care about it.
@@ -336,7 +376,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackGlobalMemory(NodeBlock& bb,
Instruction instr,
- bool is_write) {
+ bool is_read, bool is_write) {
const auto addr_register{GetRegister(instr.gmem.gpr)};
const auto immediate_offset{static_cast<u32>(instr.gmem.offset)};
@@ -351,11 +391,8 @@ std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackGlobalMemory(NodeBlock&
const GlobalMemoryBase descriptor{index, offset};
const auto& [entry, is_new] = used_global_memory.try_emplace(descriptor);
auto& usage = entry->second;
- if (is_write) {
- usage.is_written = true;
- } else {
- usage.is_read = true;
- }
+ usage.is_written |= is_write;
+ usage.is_read |= is_read;
const auto real_address =
Operation(OperationCode::UAdd, NO_PRECISE, Immediate(immediate_offset), addr_register);
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index cf99cc5be..0b567e39d 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -801,14 +801,10 @@ std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement(
std::vector<Node> ShaderIR::GetAoffiCoordinates(Node aoffi_reg, std::size_t coord_count,
bool is_tld4) {
- const auto [coord_offsets, size, wrap_value,
- diff_value] = [is_tld4]() -> std::tuple<std::array<u32, 3>, u32, s32, s32> {
- if (is_tld4) {
- return {{0, 8, 16}, 6, 32, 64};
- } else {
- return {{0, 4, 8}, 4, 8, 16};
- }
- }();
+ const std::array coord_offsets = is_tld4 ? std::array{0U, 8U, 16U} : std::array{0U, 4U, 8U};
+ const u32 size = is_tld4 ? 6 : 4;
+ const s32 wrap_value = is_tld4 ? 32 : 8;
+ const s32 diff_value = is_tld4 ? 64 : 16;
const u32 mask = (1U << size) - 1;
std::vector<Node> aoffi;
@@ -821,7 +817,7 @@ std::vector<Node> ShaderIR::GetAoffiCoordinates(Node aoffi_reg, std::size_t coor
LOG_WARNING(HW_GPU,
"AOFFI constant folding failed, some hardware might have graphical issues");
for (std::size_t coord = 0; coord < coord_count; ++coord) {
- const Node value = BitfieldExtract(aoffi_reg, coord_offsets.at(coord), size);
+ const Node value = BitfieldExtract(aoffi_reg, coord_offsets[coord], size);
const Node condition =
Operation(OperationCode::LogicalIGreaterEqual, value, Immediate(wrap_value));
const Node negative = Operation(OperationCode::IAdd, value, Immediate(-diff_value));
@@ -831,7 +827,7 @@ std::vector<Node> ShaderIR::GetAoffiCoordinates(Node aoffi_reg, std::size_t coor
}
for (std::size_t coord = 0; coord < coord_count; ++coord) {
- s32 value = (*aoffi_immediate >> coord_offsets.at(coord)) & mask;
+ s32 value = (*aoffi_immediate >> coord_offsets[coord]) & mask;
if (value >= wrap_value) {
value -= diff_value;
}