summaryrefslogtreecommitdiffstats
path: root/src/video_core/shader/decode
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/shader/decode')
-rw-r--r--src/video_core/shader/decode/arithmetic_half.cpp15
-rw-r--r--src/video_core/shader/decode/arithmetic_half_immediate.cpp17
-rw-r--r--src/video_core/shader/decode/half_set.cpp16
-rw-r--r--src/video_core/shader/decode/half_set_predicate.cpp8
-rw-r--r--src/video_core/shader/decode/hfma2.cpp12
-rw-r--r--src/video_core/shader/decode/memory.cpp118
-rw-r--r--src/video_core/shader/decode/texture.cpp113
7 files changed, 205 insertions, 94 deletions
diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp
index baee89107..9467f9417 100644
--- a/src/video_core/shader/decode/arithmetic_half.cpp
+++ b/src/video_core/shader/decode/arithmetic_half.cpp
@@ -18,7 +18,9 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
if (opcode->get().GetId() == OpCode::Id::HADD2_C ||
opcode->get().GetId() == OpCode::Id::HADD2_R) {
- UNIMPLEMENTED_IF(instr.alu_half.ftz != 0);
+ if (instr.alu_half.ftz != 0) {
+ LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName());
+ }
}
UNIMPLEMENTED_IF_MSG(instr.alu_half.saturate != 0, "Half float saturation not implemented");
@@ -27,9 +29,8 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
const bool negate_b =
opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0;
- const Node op_a = GetOperandAbsNegHalf(GetRegister(instr.gpr8), instr.alu_half.abs_a, negate_a);
-
- // instr.alu_half.type_a
+ Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half.type_a);
+ op_a = GetOperandAbsNegHalf(op_a, instr.alu_half.abs_a, negate_a);
Node op_b = [&]() {
switch (opcode->get().GetId()) {
@@ -44,17 +45,17 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
return Immediate(0);
}
}();
+ op_b = UnpackHalfFloat(op_b, instr.alu_half.type_b);
op_b = GetOperandAbsNegHalf(op_b, instr.alu_half.abs_b, negate_b);
Node value = [&]() {
- MetaHalfArithmetic meta{true, {instr.alu_half_imm.type_a, instr.alu_half.type_b}};
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HADD2_R:
- return Operation(OperationCode::HAdd, meta, op_a, op_b);
+ return Operation(OperationCode::HAdd, PRECISE, op_a, op_b);
case OpCode::Id::HMUL2_C:
case OpCode::Id::HMUL2_R:
- return Operation(OperationCode::HMul, meta, op_a, op_b);
+ return Operation(OperationCode::HMul, PRECISE, op_a, op_b);
default:
UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName());
return Immediate(0);
diff --git a/src/video_core/shader/decode/arithmetic_half_immediate.cpp b/src/video_core/shader/decode/arithmetic_half_immediate.cpp
index c2164ba50..fbcd35b18 100644
--- a/src/video_core/shader/decode/arithmetic_half_immediate.cpp
+++ b/src/video_core/shader/decode/arithmetic_half_immediate.cpp
@@ -17,34 +17,33 @@ u32 ShaderIR::DecodeArithmeticHalfImmediate(NodeBlock& bb, u32 pc) {
const auto opcode = OpCode::Decode(instr);
if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) {
- UNIMPLEMENTED_IF(instr.alu_half_imm.ftz != 0);
+ if (instr.alu_half_imm.ftz != 0) {
+ LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName());
+ }
} else {
UNIMPLEMENTED_IF(instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None);
}
- UNIMPLEMENTED_IF_MSG(instr.alu_half_imm.saturate != 0,
- "Half float immediate saturation not implemented");
- Node op_a = GetRegister(instr.gpr8);
+ Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half_imm.type_a);
op_a = GetOperandAbsNegHalf(op_a, instr.alu_half_imm.abs_a, instr.alu_half_imm.negate_a);
const Node op_b = UnpackHalfImmediate(instr, true);
Node value = [&]() {
- MetaHalfArithmetic meta{true, {instr.alu_half_imm.type_a}};
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_IMM:
- return Operation(OperationCode::HAdd, meta, op_a, op_b);
+ return Operation(OperationCode::HAdd, PRECISE, op_a, op_b);
case OpCode::Id::HMUL2_IMM:
- return Operation(OperationCode::HMul, meta, op_a, op_b);
+ return Operation(OperationCode::HMul, PRECISE, op_a, op_b);
default:
UNREACHABLE();
return Immediate(0);
}
}();
- value = HalfMerge(GetRegister(instr.gpr0), value, instr.alu_half_imm.merge);
+ value = GetSaturatedHalfFloat(value, instr.alu_half_imm.saturate);
+ value = HalfMerge(GetRegister(instr.gpr0), value, instr.alu_half_imm.merge);
SetRegister(bb, instr.gpr0, value);
-
return pc;
}
diff --git a/src/video_core/shader/decode/half_set.cpp b/src/video_core/shader/decode/half_set.cpp
index 748368555..1dd94bf9d 100644
--- a/src/video_core/shader/decode/half_set.cpp
+++ b/src/video_core/shader/decode/half_set.cpp
@@ -18,11 +18,13 @@ u32 ShaderIR::DecodeHalfSet(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- UNIMPLEMENTED_IF(instr.hset2.ftz != 0);
+ if (instr.hset2.ftz != 0) {
+ LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName());
+ }
+
+ Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hset2.type_a);
+ op_a = GetOperandAbsNegHalf(op_a, instr.hset2.abs_a, instr.hset2.negate_a);
- // instr.hset2.type_a
- // instr.hset2.type_b
- Node op_a = GetRegister(instr.gpr8);
Node op_b = [&]() {
switch (opcode->get().GetId()) {
case OpCode::Id::HSET2_R:
@@ -32,14 +34,12 @@ u32 ShaderIR::DecodeHalfSet(NodeBlock& bb, u32 pc) {
return Immediate(0);
}
}();
-
- op_a = GetOperandAbsNegHalf(op_a, instr.hset2.abs_a, instr.hset2.negate_a);
+ op_b = UnpackHalfFloat(op_b, instr.hset2.type_b);
op_b = GetOperandAbsNegHalf(op_b, instr.hset2.abs_b, instr.hset2.negate_b);
const Node second_pred = GetPredicate(instr.hset2.pred39, instr.hset2.neg_pred);
- MetaHalfArithmetic meta{false, {instr.hset2.type_a, instr.hset2.type_b}};
- const Node comparison_pair = GetPredicateComparisonHalf(instr.hset2.cond, meta, op_a, op_b);
+ const Node comparison_pair = GetPredicateComparisonHalf(instr.hset2.cond, op_a, op_b);
const OperationCode combiner = GetPredicateCombiner(instr.hset2.op);
diff --git a/src/video_core/shader/decode/half_set_predicate.cpp b/src/video_core/shader/decode/half_set_predicate.cpp
index e68512692..6e59eb650 100644
--- a/src/video_core/shader/decode/half_set_predicate.cpp
+++ b/src/video_core/shader/decode/half_set_predicate.cpp
@@ -19,10 +19,10 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0);
- Node op_a = GetRegister(instr.gpr8);
+ Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hsetp2.type_a);
op_a = GetOperandAbsNegHalf(op_a, instr.hsetp2.abs_a, instr.hsetp2.negate_a);
- const Node op_b = [&]() {
+ Node op_b = [&]() {
switch (opcode->get().GetId()) {
case OpCode::Id::HSETP2_R:
return GetOperandAbsNegHalf(GetRegister(instr.gpr20), instr.hsetp2.abs_a,
@@ -32,6 +32,7 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) {
return Immediate(0);
}
}();
+ op_b = UnpackHalfFloat(op_b, instr.hsetp2.type_b);
// We can't use the constant predicate as destination.
ASSERT(instr.hsetp2.pred3 != static_cast<u64>(Pred::UnusedIndex));
@@ -42,8 +43,7 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) {
const OperationCode pair_combiner =
instr.hsetp2.h_and ? OperationCode::LogicalAll2 : OperationCode::LogicalAny2;
- MetaHalfArithmetic meta = {false, {instr.hsetp2.type_a, instr.hsetp2.type_b}};
- const Node comparison = GetPredicateComparisonHalf(instr.hsetp2.cond, meta, op_a, op_b);
+ const Node comparison = GetPredicateComparisonHalf(instr.hsetp2.cond, op_a, op_b);
const Node first_pred = Operation(pair_combiner, comparison);
// Set the primary predicate to the result of Predicate OP SecondPredicate
diff --git a/src/video_core/shader/decode/hfma2.cpp b/src/video_core/shader/decode/hfma2.cpp
index 7a07c5ec6..5c1becce5 100644
--- a/src/video_core/shader/decode/hfma2.cpp
+++ b/src/video_core/shader/decode/hfma2.cpp
@@ -27,10 +27,6 @@ u32 ShaderIR::DecodeHfma2(NodeBlock& bb, u32 pc) {
}
constexpr auto identity = HalfType::H0_H1;
-
- const HalfType type_a = instr.hfma2.type_a;
- const Node op_a = GetRegister(instr.gpr8);
-
bool neg_b{}, neg_c{};
auto [saturate, type_b, op_b, type_c,
op_c] = [&]() -> std::tuple<bool, HalfType, Node, HalfType, Node> {
@@ -62,11 +58,11 @@ u32 ShaderIR::DecodeHfma2(NodeBlock& bb, u32 pc) {
}();
UNIMPLEMENTED_IF_MSG(saturate, "HFMA2 saturation is not implemented");
- op_b = GetOperandAbsNegHalf(op_b, false, neg_b);
- op_c = GetOperandAbsNegHalf(op_c, false, neg_c);
+ const Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hfma2.type_a);
+ op_b = GetOperandAbsNegHalf(UnpackHalfFloat(op_b, type_b), false, neg_b);
+ op_c = GetOperandAbsNegHalf(UnpackHalfFloat(op_c, type_c), false, neg_c);
- MetaHalfArithmetic meta{true, {type_a, type_b, type_c}};
- Node value = Operation(OperationCode::HFma, meta, op_a, op_b, op_c);
+ Node value = Operation(OperationCode::HFma, PRECISE, op_a, op_b, op_c);
value = HalfMerge(GetRegister(instr.gpr0), value, instr.hfma2.merge);
SetRegister(bb, instr.gpr0, value);
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp
index ea3c71eed..ea1092db1 100644
--- a/src/video_core/shader/decode/memory.cpp
+++ b/src/video_core/shader/decode/memory.cpp
@@ -8,6 +8,7 @@
#include "common/assert.h"
#include "common/common_types.h"
+#include "common/logging/log.h"
#include "video_core/engines/shader_bytecode.h"
#include "video_core/shader/shader_ir.h"
@@ -18,6 +19,23 @@ using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
using Tegra::Shader::Register;
+namespace {
+u32 GetUniformTypeElementsCount(Tegra::Shader::UniformType uniform_type) {
+ switch (uniform_type) {
+ case Tegra::Shader::UniformType::Single:
+ return 1;
+ case Tegra::Shader::UniformType::Double:
+ return 2;
+ case Tegra::Shader::UniformType::Quad:
+ case Tegra::Shader::UniformType::UnsignedQuad:
+ return 4;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented size={}!", static_cast<u32>(uniform_type));
+ return 1;
+ }
+}
+} // namespace
+
u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
@@ -85,8 +103,8 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::LD_L: {
- UNIMPLEMENTED_IF_MSG(instr.ld_l.unknown == 1, "LD_L Unhandled mode: {}",
- static_cast<u32>(instr.ld_l.unknown.Value()));
+ LOG_DEBUG(HW_GPU, "LD_L cache management mode: {}",
+ static_cast<u64>(instr.ld_l.unknown.Value()));
const auto GetLmem = [&](s32 offset) {
ASSERT(offset % 4 == 0);
@@ -126,45 +144,15 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::LDG: {
- const u32 count = [&]() {
- switch (instr.ldg.type) {
- case Tegra::Shader::UniformType::Single:
- return 1;
- case Tegra::Shader::UniformType::Double:
- return 2;
- case Tegra::Shader::UniformType::Quad:
- case Tegra::Shader::UniformType::UnsignedQuad:
- return 4;
- default:
- UNIMPLEMENTED_MSG("Unimplemented LDG size!");
- return 1;
- }
- }();
-
- const Node addr_register = GetRegister(instr.gpr8);
- const Node base_address =
- TrackCbuf(addr_register, global_code, static_cast<s64>(global_code.size()));
- const auto cbuf = std::get_if<CbufNode>(base_address);
- ASSERT(cbuf != nullptr);
- const auto cbuf_offset_imm = std::get_if<ImmediateNode>(cbuf->GetOffset());
- ASSERT(cbuf_offset_imm != nullptr);
- const auto cbuf_offset = cbuf_offset_imm->GetValue();
-
- bb.push_back(Comment(
- fmt::format("Base address is c[0x{:x}][0x{:x}]", cbuf->GetIndex(), cbuf_offset)));
-
- const GlobalMemoryBase descriptor{cbuf->GetIndex(), cbuf_offset};
- used_global_memory_bases.insert(descriptor);
-
- const Node immediate_offset =
- Immediate(static_cast<u32>(instr.ldg.immediate_offset.Value()));
- const Node base_real_address =
- Operation(OperationCode::UAdd, NO_PRECISE, immediate_offset, addr_register);
+ const auto [real_address_base, base_address, descriptor] =
+ TrackAndGetGlobalMemory(bb, GetRegister(instr.gpr8),
+ static_cast<u32>(instr.ldg.immediate_offset.Value()), false);
+ const u32 count = GetUniformTypeElementsCount(instr.ldg.type);
for (u32 i = 0; i < count; ++i) {
const Node it_offset = Immediate(i * 4);
const Node real_address =
- Operation(OperationCode::UAdd, NO_PRECISE, base_real_address, it_offset);
+ Operation(OperationCode::UAdd, NO_PRECISE, real_address_base, it_offset);
const Node gmem = StoreNode(GmemNode(real_address, base_address, descriptor));
SetTemporal(bb, i, gmem);
@@ -174,6 +162,28 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
}
break;
}
+ case OpCode::Id::STG: {
+ const auto [real_address_base, base_address, descriptor] =
+ TrackAndGetGlobalMemory(bb, GetRegister(instr.gpr8),
+ static_cast<u32>(instr.stg.immediate_offset.Value()), true);
+
+ // Encode in temporary registers like this: real_base_address, {registers_to_be_written...}
+ SetTemporal(bb, 0, real_address_base);
+
+ const u32 count = GetUniformTypeElementsCount(instr.stg.type);
+ for (u32 i = 0; i < count; ++i) {
+ SetTemporal(bb, i + 1, GetRegister(instr.gpr0.Value() + i));
+ }
+ for (u32 i = 0; i < count; ++i) {
+ const Node it_offset = Immediate(i * 4);
+ const Node real_address =
+ Operation(OperationCode::UAdd, NO_PRECISE, real_address_base, it_offset);
+ const Node gmem = StoreNode(GmemNode(real_address, base_address, descriptor));
+
+ bb.push_back(Operation(OperationCode::Assign, gmem, GetTemporal(i + 1)));
+ }
+ break;
+ }
case OpCode::Id::ST_A: {
UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex,
"Indirect attribute loads are not supported");
@@ -205,8 +215,8 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::ST_L: {
- UNIMPLEMENTED_IF_MSG(instr.st_l.unknown == 0, "ST_L Unhandled mode: {}",
- static_cast<u32>(instr.st_l.unknown.Value()));
+ LOG_DEBUG(HW_GPU, "ST_L cache management mode: {}",
+ static_cast<u64>(instr.st_l.cache_management.Value()));
const auto GetLmemAddr = [&](s32 offset) {
ASSERT(offset % 4 == 0);
@@ -236,4 +246,34 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
return pc;
}
+std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackAndGetGlobalMemory(NodeBlock& bb,
+ Node addr_register,
+ u32 immediate_offset,
+ bool is_write) {
+ const Node base_address{
+ TrackCbuf(addr_register, global_code, static_cast<s64>(global_code.size()))};
+ const auto cbuf = std::get_if<CbufNode>(base_address);
+ ASSERT(cbuf != nullptr);
+ const auto cbuf_offset_imm = std::get_if<ImmediateNode>(cbuf->GetOffset());
+ ASSERT(cbuf_offset_imm != nullptr);
+ const auto cbuf_offset = cbuf_offset_imm->GetValue();
+
+ bb.push_back(
+ Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", cbuf->GetIndex(), cbuf_offset)));
+
+ const GlobalMemoryBase descriptor{cbuf->GetIndex(), cbuf_offset};
+ const auto& [entry, is_new] = used_global_memory.try_emplace(descriptor);
+ auto& usage = entry->second;
+ if (is_write) {
+ usage.is_written = true;
+ } else {
+ usage.is_read = true;
+ }
+
+ const auto real_address =
+ Operation(OperationCode::UAdd, NO_PRECISE, Immediate(immediate_offset), addr_register);
+
+ return {real_address, base_address, descriptor};
+}
+
} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index a775b402b..fa65ac9a9 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -40,7 +40,7 @@ static std::size_t GetCoordCount(TextureType texture_type) {
u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
-
+ bool is_bindless = false;
switch (opcode->get().GetId()) {
case OpCode::Id::TEX: {
if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) {
@@ -54,7 +54,25 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
const auto process_mode = instr.tex.GetTextureProcessMode();
WriteTexInstructionFloat(
bb, instr,
- GetTexCode(instr, texture_type, process_mode, depth_compare, is_array, is_aoffi));
+ GetTexCode(instr, texture_type, process_mode, depth_compare, is_array, is_aoffi, {}));
+ break;
+ }
+ case OpCode::Id::TEX_B: {
+ UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
+
+ if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) {
+ LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete");
+ }
+
+ const TextureType texture_type{instr.tex_b.texture_type};
+ const bool is_array = instr.tex_b.array != 0;
+ const bool is_aoffi = instr.tex.UsesMiscMode(TextureMiscMode::AOFFI);
+ const bool depth_compare = instr.tex_b.UsesMiscMode(TextureMiscMode::DC);
+ const auto process_mode = instr.tex_b.GetTextureProcessMode();
+ WriteTexInstructionFloat(bb, instr,
+ GetTexCode(instr, texture_type, process_mode, depth_compare,
+ is_array, is_aoffi, {instr.gpr20}));
break;
}
case OpCode::Id::TEXS: {
@@ -134,6 +152,9 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
WriteTexsInstructionFloat(bb, instr, values);
break;
}
+ case OpCode::Id::TXQ_B:
+ is_bindless = true;
+ [[fallthrough]];
case OpCode::Id::TXQ: {
if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) {
LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete");
@@ -143,7 +164,10 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
// Sadly, not all texture instructions specify the type of texture their sampler
// uses. This must be fixed at a later instance.
const auto& sampler =
- GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false);
+ is_bindless
+ ? GetBindlessSampler(instr.gpr8, Tegra::Shader::TextureType::Texture2D, false,
+ false)
+ : GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false);
u32 indexer = 0;
switch (instr.txq.query_type) {
@@ -154,7 +178,8 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
}
MetaTexture meta{sampler, {}, {}, {}, {}, {}, {}, element};
const Node value =
- Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8));
+ Operation(OperationCode::TextureQueryDimensions, meta,
+ GetRegister(instr.gpr8.Value() + (is_bindless ? 1 : 0)));
SetTemporal(bb, indexer++, value);
}
for (u32 i = 0; i < indexer; ++i) {
@@ -168,6 +193,9 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
}
break;
}
+ case OpCode::Id::TMML_B:
+ is_bindless = true;
+ [[fallthrough]];
case OpCode::Id::TMML: {
UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
"NDV is not implemented");
@@ -178,7 +206,9 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
auto texture_type = instr.tmml.texture_type.Value();
const bool is_array = instr.tmml.array != 0;
- const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
+ const auto& sampler = is_bindless
+ ? GetBindlessSampler(instr.gpr20, texture_type, is_array, false)
+ : GetSampler(instr.sampler, texture_type, is_array, false);
std::vector<Node> coords;
@@ -199,17 +229,19 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
texture_type = TextureType::Texture2D;
}
-
+ u32 indexer = 0;
for (u32 element = 0; element < 2; ++element) {
+ if (!instr.tmml.IsComponentEnabled(element)) {
+ continue;
+ }
auto params = coords;
MetaTexture meta{sampler, {}, {}, {}, {}, {}, {}, element};
const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params));
- SetTemporal(bb, element, value);
+ SetTemporal(bb, indexer++, value);
}
- for (u32 element = 0; element < 2; ++element) {
- SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element));
+ for (u32 i = 0; i < indexer; ++i) {
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
}
-
break;
}
case OpCode::Id::TLDS: {
@@ -254,6 +286,34 @@ const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, Textu
return *used_samplers.emplace(entry).first;
}
+const Sampler& ShaderIR::GetBindlessSampler(const Tegra::Shader::Register& reg, TextureType type,
+ bool is_array, bool is_shadow) {
+ const Node sampler_register = GetRegister(reg);
+ const Node base_sampler =
+ TrackCbuf(sampler_register, global_code, static_cast<s64>(global_code.size()));
+ const auto cbuf = std::get_if<CbufNode>(base_sampler);
+ const auto cbuf_offset_imm = std::get_if<ImmediateNode>(cbuf->GetOffset());
+ ASSERT(cbuf_offset_imm != nullptr);
+ const auto cbuf_offset = cbuf_offset_imm->GetValue();
+ const auto cbuf_index = cbuf->GetIndex();
+ const u64 cbuf_key = (cbuf_index << 32) | cbuf_offset;
+
+ // If this sampler has already been used, return the existing mapping.
+ const auto itr =
+ std::find_if(used_samplers.begin(), used_samplers.end(),
+ [&](const Sampler& entry) { return entry.GetOffset() == cbuf_key; });
+ if (itr != used_samplers.end()) {
+ ASSERT(itr->GetType() == type && itr->IsArray() == is_array &&
+ itr->IsShadow() == is_shadow);
+ return *itr;
+ }
+
+ // Otherwise create a new mapping for this sampler
+ const std::size_t next_index = used_samplers.size();
+ const Sampler entry{cbuf_index, cbuf_offset, next_index, type, is_array, is_shadow};
+ return *used_samplers.emplace(entry).first;
+}
+
void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
u32 dest_elem = 0;
for (u32 elem = 0; elem < 4; ++elem) {
@@ -326,22 +386,27 @@ void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr,
Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
TextureProcessMode process_mode, std::vector<Node> coords,
Node array, Node depth_compare, u32 bias_offset,
- std::vector<Node> aoffi) {
+ std::vector<Node> aoffi,
+ std::optional<Tegra::Shader::Register> bindless_reg) {
const bool is_array = array;
const bool is_shadow = depth_compare;
+ const bool is_bindless = bindless_reg.has_value();
UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) ||
(texture_type == TextureType::TextureCube && is_array && is_shadow),
"This method is not supported.");
- const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow);
+ const auto& sampler = is_bindless
+ ? GetBindlessSampler(*bindless_reg, texture_type, is_array, is_shadow)
+ : GetSampler(instr.sampler, texture_type, is_array, is_shadow);
const bool lod_needed = process_mode == TextureProcessMode::LZ ||
process_mode == TextureProcessMode::LL ||
process_mode == TextureProcessMode::LLA;
- // LOD selection (either via bias or explicit textureLod) not supported in GL for
- // sampler2DArrayShadow and samplerCubeArrayShadow.
+ // LOD selection (either via bias or explicit textureLod) not
+ // supported in GL for sampler2DArrayShadow and
+ // samplerCubeArrayShadow.
const bool gl_lod_supported =
!((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) ||
(texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow));
@@ -359,8 +424,9 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
lod = Immediate(0.0f);
break;
case TextureProcessMode::LB:
- // If present, lod or bias are always stored in the register indexed by the gpr20
- // field with an offset depending on the usage of the other registers
+ // If present, lod or bias are always stored in the register
+ // indexed by the gpr20 field with an offset depending on the
+ // usage of the other registers
bias = GetRegister(instr.gpr20.Value() + bias_offset);
break;
case TextureProcessMode::LL:
@@ -384,11 +450,18 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
TextureProcessMode process_mode, bool depth_compare, bool is_array,
- bool is_aoffi) {
+ bool is_aoffi, std::optional<Tegra::Shader::Register> bindless_reg) {
const bool lod_bias_enabled{
(process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ)};
+ const bool is_bindless = bindless_reg.has_value();
+
u64 parameter_register = instr.gpr20.Value();
+ if (is_bindless) {
+ ++parameter_register;
+ }
+
+ const u32 bias_lod_offset = (is_bindless ? 1 : 0);
if (lod_bias_enabled) {
++parameter_register;
}
@@ -423,7 +496,8 @@ Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
dc = GetRegister(parameter_register++);
}
- return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0, aoffi);
+ return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_lod_offset,
+ aoffi, bindless_reg);
}
Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
@@ -459,7 +533,8 @@ Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
dc = GetRegister(depth_register);
}
- return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset, {});
+ return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset, {},
+ {});
}
Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare,