summaryrefslogtreecommitdiffstats
path: root/src/shader_recompiler
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/shader_recompiler/CMakeLists.txt1
-rw-r--r--src/shader_recompiler/backend/bindings.h2
-rw-r--r--src/shader_recompiler/backend/glasm/emit_context.cpp4
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm.cpp3
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm.h2
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_image.cpp18
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_instructions.h5
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp8
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp4
-rw-r--r--src/shader_recompiler/backend/glsl/emit_context.cpp3
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp4
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_image.cpp16
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_instructions.h5
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp8
-rw-r--r--src/shader_recompiler/backend/spirv/emit_context.cpp86
-rw-r--r--src/shader_recompiler/backend/spirv/emit_context.h17
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv.h13
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp12
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp54
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_instructions.h5
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp8
-rw-r--r--src/shader_recompiler/frontend/ir/basic_block.cpp5
-rw-r--r--src/shader_recompiler/frontend/ir/basic_block.h3
-rw-r--r--src/shader_recompiler/frontend/ir/ir_emitter.cpp16
-rw-r--r--src/shader_recompiler/frontend/ir/ir_emitter.h7
-rw-r--r--src/shader_recompiler/frontend/ir/microinstruction.cpp11
-rw-r--r--src/shader_recompiler/frontend/ir/opcodes.inc6
-rw-r--r--src/shader_recompiler/frontend/ir/value.h2
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate_program.cpp4
-rw-r--r--src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp5
-rw-r--r--src/shader_recompiler/ir_opt/passes.h1
-rw-r--r--src/shader_recompiler/ir_opt/rescaling_pass.cpp327
-rw-r--r--src/shader_recompiler/shader_info.h10
33 files changed, 665 insertions, 10 deletions
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index b5b7e5e83..bc3df80c8 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -221,6 +221,7 @@ add_library(shader_recompiler STATIC
ir_opt/lower_fp16_to_fp32.cpp
ir_opt/lower_int64_to_int32.cpp
ir_opt/passes.h
+ ir_opt/rescaling_pass.cpp
ir_opt/ssa_rewrite_pass.cpp
ir_opt/texture_pass.cpp
ir_opt/verification_pass.cpp
diff --git a/src/shader_recompiler/backend/bindings.h b/src/shader_recompiler/backend/bindings.h
index 35503000c..669702553 100644
--- a/src/shader_recompiler/backend/bindings.h
+++ b/src/shader_recompiler/backend/bindings.h
@@ -14,6 +14,8 @@ struct Bindings {
u32 storage_buffer{};
u32 texture{};
u32 image{};
+ u32 texture_scaling_index{};
+ u32 image_scaling_index{};
};
} // namespace Shader::Backend
diff --git a/src/shader_recompiler/backend/glasm/emit_context.cpp b/src/shader_recompiler/backend/glasm/emit_context.cpp
index 069c019ad..8fd459dfe 100644
--- a/src/shader_recompiler/backend/glasm/emit_context.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_context.cpp
@@ -6,6 +6,7 @@
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/backend/glasm/emit_context.h"
+#include "shader_recompiler/backend/glasm/emit_glasm.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
@@ -55,7 +56,8 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
}
if (!runtime_info.glasm_use_storage_buffers) {
if (const size_t num = info.storage_buffers_descriptors.size(); num > 0) {
- Add("PARAM c[{}]={{program.local[0..{}]}};", num, num - 1);
+ const size_t index{num + PROGRAM_LOCAL_PARAMETER_STORAGE_BUFFER_BASE};
+ Add("PARAM c[{}]={{program.local[0..{}]}};", index, index - 1);
}
}
stage = program.stage;
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
index 4ce1c4f54..004658546 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
@@ -448,6 +448,9 @@ std::string EmitGLASM(const Profile& profile, const RuntimeInfo& runtime_info, I
header += fmt::format("SHARED_MEMORY {};", program.shared_memory_size);
header += fmt::format("SHARED shared_mem[]={{program.sharedmem}};");
}
+ if (program.info.uses_rescaling_uniform) {
+ header += "PARAM scaling[1]={program.local[0..0]};";
+ }
header += "TEMP ";
for (size_t index = 0; index < ctx.reg_alloc.NumUsedRegisters(); ++index) {
header += fmt::format("R{},", index);
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.h b/src/shader_recompiler/backend/glasm/emit_glasm.h
index bcb55f062..292655acb 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm.h
+++ b/src/shader_recompiler/backend/glasm/emit_glasm.h
@@ -13,6 +13,8 @@
namespace Shader::Backend::GLASM {
+constexpr u32 PROGRAM_LOCAL_PARAMETER_STORAGE_BUFFER_BASE = 1;
+
[[nodiscard]] std::string EmitGLASM(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& bindings);
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
index 09e3a9b82..d325d31c7 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
@@ -608,6 +608,24 @@ void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Re
ctx.Add("STOREIM.{} {},{},{},{};", format, image, color, coord, type);
}
+void EmitIsTextureScaled(EmitContext& ctx, IR::Inst& inst, const IR::Value& index) {
+ if (!index.IsImmediate()) {
+ throw NotImplementedException("Non-constant texture rescaling");
+ }
+ ctx.Add("AND.U RC.x,scaling[0].x,{};"
+ "SNE.S {},RC.x,0;",
+ 1u << index.U32(), ctx.reg_alloc.Define(inst));
+}
+
+void EmitIsImageScaled(EmitContext& ctx, IR::Inst& inst, const IR::Value& index) {
+ if (!index.IsImmediate()) {
+ throw NotImplementedException("Non-constant texture rescaling");
+ }
+ ctx.Add("AND.U RC.x,scaling[0].y,{};"
+ "SNE.S {},RC.x,0;",
+ 1u << index.U32(), ctx.reg_alloc.Define(inst));
+}
+
void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "ADD.U32");
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
index 12afda43b..1f343bff5 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
@@ -72,6 +72,7 @@ void EmitInvocationId(EmitContext& ctx, IR::Inst& inst);
void EmitSampleId(EmitContext& ctx, IR::Inst& inst);
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst);
void EmitYDirection(EmitContext& ctx, IR::Inst& inst);
+void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst);
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset);
void EmitWriteLocal(EmitContext& ctx, ScalarU32 word_offset, ScalarU32 value);
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst);
@@ -303,6 +304,8 @@ void EmitIAdd64(EmitContext& ctx, IR::Inst& inst, Register a, Register b);
void EmitISub32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitISub64(EmitContext& ctx, IR::Inst& inst, Register a, Register b);
void EmitIMul32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
+void EmitSDiv32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
+void EmitUDiv32(EmitContext& ctx, IR::Inst& inst, ScalarU32 a, ScalarU32 b);
void EmitINeg32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitINeg64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
@@ -553,6 +556,8 @@ void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord);
void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
Register color);
+void EmitIsTextureScaled(EmitContext& ctx, IR::Inst& inst, const IR::Value& index);
+void EmitIsImageScaled(EmitContext& ctx, IR::Inst& inst, const IR::Value& index);
void EmitBindlessImageAtomicIAdd32(EmitContext&);
void EmitBindlessImageAtomicSMin32(EmitContext&);
void EmitBindlessImageAtomicUMin32(EmitContext&);
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp
index f55c26b76..8aa494a4d 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp
@@ -90,6 +90,14 @@ void EmitIMul32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
ctx.Add("MUL.S {}.x,{},{};", inst, a, b);
}
+void EmitSDiv32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
+ ctx.Add("DIV.S {}.x,{},{};", inst, a, b);
+}
+
+void EmitUDiv32(EmitContext& ctx, IR::Inst& inst, ScalarU32 a, ScalarU32 b) {
+ ctx.Add("DIV.U {}.x,{},{};", inst, a, b);
+}
+
void EmitINeg32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
if (value.type != Type::Register && static_cast<s32>(value.imm_u32) < 0) {
ctx.Add("MOV.S {},{};", inst, -static_cast<s32>(value.imm_u32));
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
index e537f6073..681aeda8d 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
@@ -210,6 +210,10 @@ void EmitYDirection(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.F {}.x,y_direction[0].w;", inst);
}
+void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst) {
+ ctx.Add("MOV.F {}.x,scaling[0].z;", inst);
+}
+
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,0;", inst);
}
diff --git a/src/shader_recompiler/backend/glsl/emit_context.cpp b/src/shader_recompiler/backend/glsl/emit_context.cpp
index 4e6f2c0fe..97bd59302 100644
--- a/src/shader_recompiler/backend/glsl/emit_context.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_context.cpp
@@ -393,6 +393,9 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
DefineGenericOutput(index, program.invocations);
}
}
+ if (info.uses_rescaling_uniform) {
+ header += "layout(location=0) uniform vec4 scaling;";
+ }
DefineConstantBuffers(bindings);
DefineStorageBuffers(bindings);
SetupImages(bindings);
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
index 170db269a..4c26f3829 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
@@ -445,6 +445,10 @@ void EmitYDirection(EmitContext& ctx, IR::Inst& inst) {
ctx.AddF32("{}=gl_FrontMaterial.ambient.a;", inst);
}
+void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst) {
+ ctx.AddF32("{}=scaling.z;", inst);
+}
+
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset) {
ctx.AddU32("{}=lmem[{}];", inst, word_offset);
}
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
index 447eb8e0a..2f78d0267 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -612,6 +612,22 @@ void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value
value);
}
+void EmitIsTextureScaled(EmitContext& ctx, IR::Inst& inst, const IR::Value& index) {
+ if (!index.IsImmediate()) {
+ throw NotImplementedException("Non-constant texture rescaling");
+ }
+ const u32 image_index{index.U32()};
+ ctx.AddU1("{}=(ftou(scaling.x)&{})!=0;", inst, 1u << image_index);
+}
+
+void EmitIsImageScaled(EmitContext& ctx, IR::Inst& inst, const IR::Value& index) {
+ if (!index.IsImmediate()) {
+ throw NotImplementedException("Non-constant texture rescaling");
+ }
+ const u32 image_index{index.U32()};
+ ctx.AddU1("{}=(ftou(scaling.y)&{})!=0;", inst, 1u << image_index);
+}
+
void EmitBindlessImageSampleImplicitLod(EmitContext&) {
NotImplemented();
}
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
index 5936d086f..f86502e4c 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
@@ -85,6 +85,7 @@ void EmitInvocationId(EmitContext& ctx, IR::Inst& inst);
void EmitSampleId(EmitContext& ctx, IR::Inst& inst);
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst);
void EmitYDirection(EmitContext& ctx, IR::Inst& inst);
+void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst);
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset);
void EmitWriteLocal(EmitContext& ctx, std::string_view word_offset, std::string_view value);
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst);
@@ -362,6 +363,8 @@ void EmitIAdd64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::strin
void EmitISub32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitISub64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitIMul32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
+void EmitSDiv32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
+void EmitUDiv32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitINeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitINeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
@@ -627,6 +630,8 @@ void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords);
void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view color);
+void EmitIsTextureScaled(EmitContext& ctx, IR::Inst& inst, const IR::Value& index);
+void EmitIsImageScaled(EmitContext& ctx, IR::Inst& inst, const IR::Value& index);
void EmitBindlessImageAtomicIAdd32(EmitContext&);
void EmitBindlessImageAtomicSMin32(EmitContext&);
void EmitBindlessImageAtomicUMin32(EmitContext&);
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
index 38419f88f..88c1d4c5e 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
@@ -78,6 +78,14 @@ void EmitIMul32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::strin
ctx.AddU32("{}=uint({}*{});", inst, a, b);
}
+void EmitSDiv32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
+ ctx.AddU32("{}=uint(int({})/int({}));", inst, a, b);
+}
+
+void EmitUDiv32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
+ ctx.AddU32("{}={}/{};", inst, a, b);
+}
+
void EmitINeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=uint(-({}));", inst, value);
}
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index 3c84e6466..723455462 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -7,11 +7,14 @@
#include <climits>
#include <string_view>
+#include <boost/container/static_vector.hpp>
+
#include <fmt/format.h>
#include "common/common_types.h"
#include "common/div_ceil.h"
#include "shader_recompiler/backend/spirv/emit_context.h"
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
namespace Shader::Backend::SPIRV {
namespace {
@@ -474,8 +477,9 @@ void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_vie
EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_info_,
IR::Program& program, Bindings& bindings)
- : Sirit::Module(profile_.supported_spirv), profile{profile_},
- runtime_info{runtime_info_}, stage{program.stage} {
+ : Sirit::Module(profile_.supported_spirv), profile{profile_}, runtime_info{runtime_info_},
+ stage{program.stage}, texture_rescaling_index{bindings.texture_scaling_index},
+ image_rescaling_index{bindings.image_scaling_index} {
const bool is_unified{profile.unified_descriptor_binding};
u32& uniform_binding{is_unified ? bindings.unified : bindings.uniform_buffer};
u32& storage_binding{is_unified ? bindings.unified : bindings.storage_buffer};
@@ -492,10 +496,11 @@ EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_inf
DefineStorageBuffers(program.info, storage_binding);
DefineTextureBuffers(program.info, texture_binding);
DefineImageBuffers(program.info, image_binding);
- DefineTextures(program.info, texture_binding);
- DefineImages(program.info, image_binding);
+ DefineTextures(program.info, texture_binding, bindings.texture_scaling_index);
+ DefineImages(program.info, image_binding, bindings.image_scaling_index);
DefineAttributeMemAccess(program.info);
DefineGlobalMemoryFunctions(program.info);
+ DefineRescalingInput(program.info);
}
EmitContext::~EmitContext() = default;
@@ -996,6 +1001,73 @@ void EmitContext::DefineGlobalMemoryFunctions(const Info& info) {
define(&StorageDefinitions::U32x4, storage_types.U32x4, U32[4], sizeof(u32[4]));
}
+void EmitContext::DefineRescalingInput(const Info& info) {
+ if (!info.uses_rescaling_uniform) {
+ return;
+ }
+ if (profile.unified_descriptor_binding) {
+ DefineRescalingInputPushConstant();
+ } else {
+ DefineRescalingInputUniformConstant();
+ }
+}
+
+void EmitContext::DefineRescalingInputPushConstant() {
+ boost::container::static_vector<Id, 3> members{};
+ u32 member_index{0};
+
+ rescaling_textures_type = TypeArray(U32[1], Const(4u));
+ Decorate(rescaling_textures_type, spv::Decoration::ArrayStride, 4u);
+ members.push_back(rescaling_textures_type);
+ rescaling_textures_member_index = member_index++;
+
+ rescaling_images_type = TypeArray(U32[1], Const(NUM_IMAGE_SCALING_WORDS));
+ Decorate(rescaling_images_type, spv::Decoration::ArrayStride, 4u);
+ members.push_back(rescaling_images_type);
+ rescaling_images_member_index = member_index++;
+
+ if (stage != Stage::Compute) {
+ members.push_back(F32[1]);
+ rescaling_downfactor_member_index = member_index++;
+ }
+ const Id push_constant_struct{TypeStruct(std::span(members.data(), members.size()))};
+ Decorate(push_constant_struct, spv::Decoration::Block);
+ Name(push_constant_struct, "ResolutionInfo");
+
+ MemberDecorate(push_constant_struct, rescaling_textures_member_index, spv::Decoration::Offset,
+ static_cast<u32>(offsetof(RescalingLayout, rescaling_textures)));
+ MemberName(push_constant_struct, rescaling_textures_member_index, "rescaling_textures");
+
+ MemberDecorate(push_constant_struct, rescaling_images_member_index, spv::Decoration::Offset,
+ static_cast<u32>(offsetof(RescalingLayout, rescaling_images)));
+ MemberName(push_constant_struct, rescaling_images_member_index, "rescaling_images");
+
+ if (stage != Stage::Compute) {
+ MemberDecorate(push_constant_struct, rescaling_downfactor_member_index,
+ spv::Decoration::Offset,
+ static_cast<u32>(offsetof(RescalingLayout, down_factor)));
+ MemberName(push_constant_struct, rescaling_downfactor_member_index, "down_factor");
+ }
+ const Id pointer_type{TypePointer(spv::StorageClass::PushConstant, push_constant_struct)};
+ rescaling_push_constants = AddGlobalVariable(pointer_type, spv::StorageClass::PushConstant);
+ Name(rescaling_push_constants, "rescaling_push_constants");
+
+ if (profile.supported_spirv >= 0x00010400) {
+ interfaces.push_back(rescaling_push_constants);
+ }
+}
+
+void EmitContext::DefineRescalingInputUniformConstant() {
+ const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, F32[4])};
+ rescaling_uniform_constant =
+ AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant);
+ Decorate(rescaling_uniform_constant, spv::Decoration::Location, 0u);
+
+ if (profile.supported_spirv >= 0x00010400) {
+ interfaces.push_back(rescaling_uniform_constant);
+ }
+}
+
void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
if (info.constant_buffer_descriptors.empty()) {
return;
@@ -1184,7 +1256,7 @@ void EmitContext::DefineImageBuffers(const Info& info, u32& binding) {
}
}
-void EmitContext::DefineTextures(const Info& info, u32& binding) {
+void EmitContext::DefineTextures(const Info& info, u32& binding, u32& scaling_index) {
textures.reserve(info.texture_descriptors.size());
for (const TextureDescriptor& desc : info.texture_descriptors) {
const Id image_type{ImageType(*this, desc)};
@@ -1206,13 +1278,14 @@ void EmitContext::DefineTextures(const Info& info, u32& binding) {
interfaces.push_back(id);
}
++binding;
+ ++scaling_index;
}
if (info.uses_atomic_image_u32) {
image_u32 = TypePointer(spv::StorageClass::Image, U32[1]);
}
}
-void EmitContext::DefineImages(const Info& info, u32& binding) {
+void EmitContext::DefineImages(const Info& info, u32& binding, u32& scaling_index) {
images.reserve(info.image_descriptors.size());
for (const ImageDescriptor& desc : info.image_descriptors) {
if (desc.count != 1) {
@@ -1233,6 +1306,7 @@ void EmitContext::DefineImages(const Info& info, u32& binding) {
interfaces.push_back(id);
}
++binding;
+ ++scaling_index;
}
}
diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h
index 112c52382..63f8185d9 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.h
+++ b/src/shader_recompiler/backend/spirv/emit_context.h
@@ -238,6 +238,16 @@ public:
Id indexed_load_func{};
Id indexed_store_func{};
+ Id rescaling_uniform_constant{};
+ Id rescaling_push_constants{};
+ Id rescaling_textures_type{};
+ Id rescaling_images_type{};
+ u32 rescaling_textures_member_index{};
+ u32 rescaling_images_member_index{};
+ u32 rescaling_downfactor_member_index{};
+ u32 texture_rescaling_index{};
+ u32 image_rescaling_index{};
+
Id local_memory{};
Id shared_memory_u8{};
@@ -310,10 +320,13 @@ private:
void DefineStorageBuffers(const Info& info, u32& binding);
void DefineTextureBuffers(const Info& info, u32& binding);
void DefineImageBuffers(const Info& info, u32& binding);
- void DefineTextures(const Info& info, u32& binding);
- void DefineImages(const Info& info, u32& binding);
+ void DefineTextures(const Info& info, u32& binding, u32& scaling_index);
+ void DefineImages(const Info& info, u32& binding, u32& scaling_index);
void DefineAttributeMemAccess(const Info& info);
void DefineGlobalMemoryFunctions(const Info& info);
+ void DefineRescalingInput(const Info& info);
+ void DefineRescalingInputPushConstant();
+ void DefineRescalingInputUniformConstant();
void DefineInputs(const IR::Program& program);
void DefineOutputs(const IR::Program& program);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.h b/src/shader_recompiler/backend/spirv/emit_spirv.h
index db0c935fe..4b25534ce 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.h
@@ -16,6 +16,19 @@
namespace Shader::Backend::SPIRV {
+constexpr u32 NUM_TEXTURE_SCALING_WORDS = 4;
+constexpr u32 NUM_IMAGE_SCALING_WORDS = 2;
+constexpr u32 NUM_TEXTURE_AND_IMAGE_SCALING_WORDS =
+ NUM_TEXTURE_SCALING_WORDS + NUM_IMAGE_SCALING_WORDS;
+
+struct RescalingLayout {
+ alignas(16) std::array<u32, NUM_TEXTURE_SCALING_WORDS> rescaling_textures;
+ alignas(16) std::array<u32, NUM_IMAGE_SCALING_WORDS> rescaling_images;
+ alignas(16) u32 down_factor;
+};
+constexpr u32 RESCALING_LAYOUT_WORDS_OFFSET = offsetof(RescalingLayout, rescaling_textures);
+constexpr u32 RESCALING_LAYOUT_DOWN_FACTOR_OFFSET = offsetof(RescalingLayout, down_factor);
+
[[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& bindings);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index d3a93d5f4..bac683ae1 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -526,6 +526,18 @@ Id EmitYDirection(EmitContext& ctx) {
return ctx.Const(ctx.runtime_info.y_negate ? -1.0f : 1.0f);
}
+Id EmitResolutionDownFactor(EmitContext& ctx) {
+ if (ctx.profile.unified_descriptor_binding) {
+ const Id pointer_type{ctx.TypePointer(spv::StorageClass::PushConstant, ctx.F32[1])};
+ const Id index{ctx.Const(ctx.rescaling_downfactor_member_index)};
+ const Id pointer{ctx.OpAccessChain(pointer_type, ctx.rescaling_push_constants, index)};
+ return ctx.OpLoad(ctx.F32[1], pointer);
+ } else {
+ const Id composite{ctx.OpLoad(ctx.F32[4], ctx.rescaling_uniform_constant)};
+ return ctx.OpCompositeExtract(ctx.F32[1], composite, 2u);
+ }
+}
+
Id EmitLoadLocal(EmitContext& ctx, Id word_offset) {
const Id pointer{ctx.OpAccessChain(ctx.private_u32, ctx.local_memory, word_offset)};
return ctx.OpLoad(ctx.U32[1], pointer);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 1d5364309..4d168a96d 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -224,6 +224,36 @@ Id Emit(MethodPtrType sparse_ptr, MethodPtrType non_sparse_ptr, EmitContext& ctx
Decorate(ctx, inst, sample);
return ctx.OpCompositeExtract(result_type, sample, 1U);
}
+
+Id IsScaled(EmitContext& ctx, const IR::Value& index, Id member_index, u32 base_index) {
+ const Id push_constant_u32{ctx.TypePointer(spv::StorageClass::PushConstant, ctx.U32[1])};
+ Id bit{};
+ if (index.IsImmediate()) {
+ // Use BitwiseAnd instead of BitfieldExtract for better codegen on Nvidia OpenGL.
+ // LOP32I.NZ is used to set the predicate rather than BFE+ISETP.
+ const u32 index_value{index.U32() + base_index};
+ const Id word_index{ctx.Const(index_value / 32)};
+ const Id bit_index_mask{ctx.Const(1u << (index_value % 32))};
+ const Id pointer{ctx.OpAccessChain(push_constant_u32, ctx.rescaling_push_constants,
+ member_index, word_index)};
+ const Id word{ctx.OpLoad(ctx.U32[1], pointer)};
+ bit = ctx.OpBitwiseAnd(ctx.U32[1], word, bit_index_mask);
+ } else {
+ Id index_value{ctx.Def(index)};
+ if (base_index != 0) {
+ index_value = ctx.OpIAdd(ctx.U32[1], index_value, ctx.Const(base_index));
+ }
+ const Id bit_index{ctx.OpBitwiseAnd(ctx.U32[1], index_value, ctx.Const(31u))};
+ bit = ctx.OpBitFieldUExtract(ctx.U32[1], index_value, bit_index, ctx.Const(1u));
+ }
+ return ctx.OpINotEqual(ctx.U1, bit, ctx.u32_zero_value);
+}
+
+Id BitTest(EmitContext& ctx, Id mask, Id bit) {
+ const Id shifted{ctx.OpShiftRightLogical(ctx.U32[1], mask, bit)};
+ const Id bit_value{ctx.OpBitwiseAnd(ctx.U32[1], shifted, ctx.Const(1u))};
+ return ctx.OpINotEqual(ctx.U1, bit_value, ctx.u32_zero_value);
+}
} // Anonymous namespace
Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
@@ -470,4 +500,28 @@ void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
ctx.OpImageWrite(Image(ctx, index, info), coords, color);
}
+Id EmitIsTextureScaled(EmitContext& ctx, const IR::Value& index) {
+ if (ctx.profile.unified_descriptor_binding) {
+ const Id member_index{ctx.Const(ctx.rescaling_textures_member_index)};
+ return IsScaled(ctx, index, member_index, ctx.texture_rescaling_index);
+ } else {
+ const Id composite{ctx.OpLoad(ctx.F32[4], ctx.rescaling_uniform_constant)};
+ const Id mask_f32{ctx.OpCompositeExtract(ctx.F32[1], composite, 0u)};
+ const Id mask{ctx.OpBitcast(ctx.U32[1], mask_f32)};
+ return BitTest(ctx, mask, ctx.Def(index));
+ }
+}
+
+Id EmitIsImageScaled(EmitContext& ctx, const IR::Value& index) {
+ if (ctx.profile.unified_descriptor_binding) {
+ const Id member_index{ctx.Const(ctx.rescaling_images_member_index)};
+ return IsScaled(ctx, index, member_index, ctx.image_rescaling_index);
+ } else {
+ const Id composite{ctx.OpLoad(ctx.F32[4], ctx.rescaling_uniform_constant)};
+ const Id mask_f32{ctx.OpCompositeExtract(ctx.F32[1], composite, 1u)};
+ const Id mask{ctx.OpBitcast(ctx.U32[1], mask_f32)};
+ return BitTest(ctx, mask, ctx.Def(index));
+ }
+}
+
} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
index c9db1c164..6cd22dd3e 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
@@ -75,6 +75,7 @@ Id EmitInvocationId(EmitContext& ctx);
Id EmitSampleId(EmitContext& ctx);
Id EmitIsHelperInvocation(EmitContext& ctx);
Id EmitYDirection(EmitContext& ctx);
+Id EmitResolutionDownFactor(EmitContext& ctx);
Id EmitLoadLocal(EmitContext& ctx, Id word_offset);
void EmitWriteLocal(EmitContext& ctx, Id word_offset, Id value);
Id EmitUndefU1(EmitContext& ctx);
@@ -283,6 +284,8 @@ Id EmitIAdd64(EmitContext& ctx, Id a, Id b);
Id EmitISub32(EmitContext& ctx, Id a, Id b);
Id EmitISub64(EmitContext& ctx, Id a, Id b);
Id EmitIMul32(EmitContext& ctx, Id a, Id b);
+Id EmitSDiv32(EmitContext& ctx, Id a, Id b);
+Id EmitUDiv32(EmitContext& ctx, Id a, Id b);
Id EmitINeg32(EmitContext& ctx, Id value);
Id EmitINeg64(EmitContext& ctx, Id value);
Id EmitIAbs32(EmitContext& ctx, Id value);
@@ -510,6 +513,8 @@ Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, I
Id derivates, Id offset, Id lod_clamp);
Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords);
void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id color);
+Id EmitIsTextureScaled(EmitContext& ctx, const IR::Value& index);
+Id EmitIsImageScaled(EmitContext& ctx, const IR::Value& index);
Id EmitBindlessImageAtomicIAdd32(EmitContext&);
Id EmitBindlessImageAtomicSMin32(EmitContext&);
Id EmitBindlessImageAtomicUMin32(EmitContext&);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp
index 3501d7495..50277eec3 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp
@@ -72,6 +72,14 @@ Id EmitIMul32(EmitContext& ctx, Id a, Id b) {
return ctx.OpIMul(ctx.U32[1], a, b);
}
+Id EmitSDiv32(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpSDiv(ctx.U32[1], a, b);
+}
+
+Id EmitUDiv32(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpUDiv(ctx.U32[1], a, b);
+}
+
Id EmitINeg32(EmitContext& ctx, Id value) {
return ctx.OpSNegate(ctx.U32[1], value);
}
diff --git a/src/shader_recompiler/frontend/ir/basic_block.cpp b/src/shader_recompiler/frontend/ir/basic_block.cpp
index 7c08b25ce..974efa4a0 100644
--- a/src/shader_recompiler/frontend/ir/basic_block.cpp
+++ b/src/shader_recompiler/frontend/ir/basic_block.cpp
@@ -22,6 +22,11 @@ void Block::AppendNewInst(Opcode op, std::initializer_list<Value> args) {
PrependNewInst(end(), op, args);
}
+Block::iterator Block::PrependNewInst(iterator insertion_point, const Inst& base_inst) {
+ Inst* const inst{inst_pool->Create(base_inst)};
+ return instructions.insert(insertion_point, *inst);
+}
+
Block::iterator Block::PrependNewInst(iterator insertion_point, Opcode op,
std::initializer_list<Value> args, u32 flags) {
Inst* const inst{inst_pool->Create(op, flags)};
diff --git a/src/shader_recompiler/frontend/ir/basic_block.h b/src/shader_recompiler/frontend/ir/basic_block.h
index 9ce1ed07e..fbfe98266 100644
--- a/src/shader_recompiler/frontend/ir/basic_block.h
+++ b/src/shader_recompiler/frontend/ir/basic_block.h
@@ -40,6 +40,9 @@ public:
/// Appends a new instruction to the end of this basic block.
void AppendNewInst(Opcode op, std::initializer_list<Value> args);
+ /// Prepends a copy of an instruction to this basic block before the insertion point.
+ iterator PrependNewInst(iterator insertion_point, const Inst& base_inst);
+
/// Prepends a new instruction to this basic block before the insertion point.
iterator PrependNewInst(iterator insertion_point, Opcode op,
std::initializer_list<Value> args = {}, u32 flags = 0);
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
index 13159a68d..356f889ac 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
@@ -375,6 +375,10 @@ F32 IREmitter::YDirection() {
return Inst<F32>(Opcode::YDirection);
}
+F32 IREmitter::ResolutionDownFactor() {
+ return Inst<F32>(Opcode::ResolutionDownFactor);
+}
+
U32 IREmitter::LaneId() {
return Inst<U32>(Opcode::LaneId);
}
@@ -1141,6 +1145,10 @@ U32 IREmitter::IMul(const U32& a, const U32& b) {
return Inst<U32>(Opcode::IMul32, a, b);
}
+U32 IREmitter::IDiv(const U32& a, const U32& b, bool is_signed) {
+ return Inst<U32>(is_signed ? Opcode::SDiv32 : Opcode::UDiv32, a, b);
+}
+
U32U64 IREmitter::INeg(const U32U64& value) {
switch (value.Type()) {
case Type::U32:
@@ -1938,6 +1946,14 @@ Value IREmitter::ImageAtomicExchange(const Value& handle, const Value& coords, c
return Inst(op, Flags{info}, handle, coords, value);
}
+U1 IREmitter::IsTextureScaled(const U32& index) {
+ return Inst<U1>(Opcode::IsTextureScaled, index);
+}
+
+U1 IREmitter::IsImageScaled(const U32& index) {
+ return Inst<U1>(Opcode::IsImageScaled, index);
+}
+
U1 IREmitter::VoteAll(const U1& value) {
return Inst<U1>(Opcode::VoteAll, value);
}
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.h b/src/shader_recompiler/frontend/ir/ir_emitter.h
index 1b89ca5a0..13eefa88b 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.h
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.h
@@ -102,6 +102,8 @@ public:
[[nodiscard]] U1 IsHelperInvocation();
[[nodiscard]] F32 YDirection();
+ [[nodiscard]] F32 ResolutionDownFactor();
+
[[nodiscard]] U32 LaneId();
[[nodiscard]] U32 LoadGlobalU8(const U64& address);
@@ -207,6 +209,7 @@ public:
[[nodiscard]] U32U64 IAdd(const U32U64& a, const U32U64& b);
[[nodiscard]] U32U64 ISub(const U32U64& a, const U32U64& b);
[[nodiscard]] U32 IMul(const U32& a, const U32& b);
+ [[nodiscard]] U32 IDiv(const U32& a, const U32& b, bool is_signed = false);
[[nodiscard]] U32U64 INeg(const U32U64& value);
[[nodiscard]] U32 IAbs(const U32& value);
[[nodiscard]] U32U64 ShiftLeftLogical(const U32U64& base, const U32& shift);
@@ -356,6 +359,10 @@ public:
TextureInstInfo info);
[[nodiscard]] Value ImageAtomicExchange(const Value& handle, const Value& coords,
const Value& value, TextureInstInfo info);
+
+ [[nodiscard]] U1 IsTextureScaled(const U32& index);
+ [[nodiscard]] U1 IsImageScaled(const U32& index);
+
[[nodiscard]] U1 VoteAll(const U1& value);
[[nodiscard]] U1 VoteAny(const U1& value);
[[nodiscard]] U1 VoteEqual(const U1& value);
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 30b470bdd..97e2bf6af 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -47,6 +47,17 @@ Inst::Inst(IR::Opcode op_, u32 flags_) noexcept : op{op_}, flags{flags_} {
}
}
+Inst::Inst(const Inst& base) : op{base.op}, flags{base.flags} {
+ if (base.op == Opcode::Phi) {
+ throw NotImplementedException("Copying phi node");
+ }
+ std::construct_at(&args);
+ const size_t num_args{base.NumArgs()};
+ for (size_t index = 0; index < num_args; ++index) {
+ SetArg(index, base.Arg(index));
+ }
+}
+
Inst::~Inst() {
if (op == Opcode::Phi) {
std::destroy_at(&phi_args);
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc
index d91098c80..6929919df 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.inc
+++ b/src/shader_recompiler/frontend/ir/opcodes.inc
@@ -62,6 +62,7 @@ OPCODE(InvocationId, U32,
OPCODE(SampleId, U32, )
OPCODE(IsHelperInvocation, U1, )
OPCODE(YDirection, F32, )
+OPCODE(ResolutionDownFactor, F32, )
// Undefined
OPCODE(UndefU1, U1, )
@@ -286,6 +287,8 @@ OPCODE(IAdd64, U64, U64,
OPCODE(ISub32, U32, U32, U32, )
OPCODE(ISub64, U64, U64, U64, )
OPCODE(IMul32, U32, U32, U32, )
+OPCODE(SDiv32, U32, U32, U32, )
+OPCODE(UDiv32, U32, U32, U32, )
OPCODE(INeg32, U32, U32, )
OPCODE(INeg64, U64, U64, )
OPCODE(IAbs32, U32, U32, )
@@ -490,6 +493,9 @@ OPCODE(ImageGradient, F32x4, Opaq
OPCODE(ImageRead, U32x4, Opaque, Opaque, )
OPCODE(ImageWrite, Void, Opaque, Opaque, U32x4, )
+OPCODE(IsTextureScaled, U1, U32, )
+OPCODE(IsImageScaled, U1, U32, )
+
// Atomic Image operations
OPCODE(BindlessImageAtomicIAdd32, U32, U32, Opaque, U32, )
diff --git a/src/shader_recompiler/frontend/ir/value.h b/src/shader_recompiler/frontend/ir/value.h
index 6c9ef6bdd..947579852 100644
--- a/src/shader_recompiler/frontend/ir/value.h
+++ b/src/shader_recompiler/frontend/ir/value.h
@@ -116,10 +116,10 @@ public:
class Inst : public boost::intrusive::list_base_hook<> {
public:
explicit Inst(IR::Opcode op_, u32 flags_) noexcept;
+ explicit Inst(const Inst& base);
~Inst();
Inst& operator=(const Inst&) = delete;
- Inst(const Inst&) = delete;
Inst& operator=(Inst&&) = delete;
Inst(Inst&&) = delete;
diff --git a/src/shader_recompiler/frontend/maxwell/translate_program.cpp b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
index 2fc542f0e..267ebe4af 100644
--- a/src/shader_recompiler/frontend/maxwell/translate_program.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
@@ -179,6 +179,10 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
Optimization::TexturePass(env, program);
Optimization::ConstantPropagationPass(program);
+
+ if (Settings::values.resolution_info.active) {
+ Optimization::RescalingPass(program);
+ }
Optimization::DeadCodeEliminationPass(program);
if (Settings::values.renderer_debug) {
Optimization::VerificationPass(program);
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index f69e1c9cc..1e476d83d 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -430,6 +430,11 @@ void VisitUsages(Info& info, IR::Inst& inst) {
case IR::Opcode::IsHelperInvocation:
info.uses_is_helper_invocation = true;
break;
+ case IR::Opcode::ResolutionDownFactor:
+ case IR::Opcode::IsTextureScaled:
+ case IR::Opcode::IsImageScaled:
+ info.uses_rescaling_uniform = true;
+ break;
case IR::Opcode::LaneId:
info.uses_subgroup_invocation_id = true;
break;
diff --git a/src/shader_recompiler/ir_opt/passes.h b/src/shader_recompiler/ir_opt/passes.h
index 2f89b1ea0..f877c7ba0 100644
--- a/src/shader_recompiler/ir_opt/passes.h
+++ b/src/shader_recompiler/ir_opt/passes.h
@@ -19,6 +19,7 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program);
void IdentityRemovalPass(IR::Program& program);
void LowerFp16ToFp32(IR::Program& program);
void LowerInt64ToInt32(IR::Program& program);
+void RescalingPass(IR::Program& program);
void SsaRewritePass(IR::Program& program);
void TexturePass(Environment& env, IR::Program& program);
void VerificationPass(const IR::Program& program);
diff --git a/src/shader_recompiler/ir_opt/rescaling_pass.cpp b/src/shader_recompiler/ir_opt/rescaling_pass.cpp
new file mode 100644
index 000000000..c28500dd1
--- /dev/null
+++ b/src/shader_recompiler/ir_opt/rescaling_pass.cpp
@@ -0,0 +1,327 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/alignment.h"
+#include "common/settings.h"
+#include "shader_recompiler/environment.h"
+#include "shader_recompiler/frontend/ir/ir_emitter.h"
+#include "shader_recompiler/frontend/ir/modifiers.h"
+#include "shader_recompiler/frontend/ir/program.h"
+#include "shader_recompiler/frontend/ir/value.h"
+#include "shader_recompiler/ir_opt/passes.h"
+#include "shader_recompiler/shader_info.h"
+
+namespace Shader::Optimization {
+namespace {
+[[nodiscard]] bool IsTextureTypeRescalable(TextureType type) {
+ switch (type) {
+ case TextureType::Color2D:
+ case TextureType::ColorArray2D:
+ return true;
+ case TextureType::Color1D:
+ case TextureType::ColorArray1D:
+ case TextureType::Color3D:
+ case TextureType::ColorCube:
+ case TextureType::ColorArrayCube:
+ case TextureType::Buffer:
+ break;
+ }
+ return false;
+}
+
+void VisitMark(IR::Block& block, IR::Inst& inst) {
+ switch (inst.GetOpcode()) {
+ case IR::Opcode::ShuffleIndex:
+ case IR::Opcode::ShuffleUp:
+ case IR::Opcode::ShuffleDown:
+ case IR::Opcode::ShuffleButterfly: {
+ const IR::Value shfl_arg{inst.Arg(0)};
+ if (shfl_arg.IsImmediate()) {
+ break;
+ }
+ const IR::Inst* const arg_inst{shfl_arg.InstRecursive()};
+ if (arg_inst->GetOpcode() != IR::Opcode::BitCastU32F32) {
+ break;
+ }
+ const IR::Value bitcast_arg{arg_inst->Arg(0)};
+ if (bitcast_arg.IsImmediate()) {
+ break;
+ }
+ IR::Inst* const bitcast_inst{bitcast_arg.InstRecursive()};
+ bool must_patch_outside = false;
+ if (bitcast_inst->GetOpcode() == IR::Opcode::GetAttribute) {
+ const IR::Attribute attr{bitcast_inst->Arg(0).Attribute()};
+ switch (attr) {
+ case IR::Attribute::PositionX:
+ case IR::Attribute::PositionY:
+ bitcast_inst->SetFlags<u32>(0xDEADBEEF);
+ must_patch_outside = true;
+ break;
+ default:
+ break;
+ }
+ }
+ if (must_patch_outside) {
+ const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ const IR::F32 new_inst{&*block.PrependNewInst(it, inst)};
+ const IR::F32 up_factor{ir.FPRecip(ir.ResolutionDownFactor())};
+ const IR::Value converted{ir.FPMul(new_inst, up_factor)};
+ inst.ReplaceUsesWith(converted);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+void PatchFragCoord(IR::Block& block, IR::Inst& inst) {
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ const IR::F32 down_factor{ir.ResolutionDownFactor()};
+ const IR::F32 frag_coord{ir.GetAttribute(inst.Arg(0).Attribute())};
+ const IR::F32 downscaled_frag_coord{ir.FPMul(frag_coord, down_factor)};
+ inst.ReplaceUsesWith(downscaled_frag_coord);
+}
+
+void PatchPointSize(IR::Block& block, IR::Inst& inst) {
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ const IR::F32 point_value{inst.Arg(1)};
+ const IR::F32 up_factor{ir.FPRecip(ir.ResolutionDownFactor())};
+ const IR::F32 upscaled_point_value{ir.FPMul(point_value, up_factor)};
+ inst.SetArg(1, upscaled_point_value);
+}
+
+[[nodiscard]] IR::U32 Scale(IR::IREmitter& ir, const IR::U1& is_scaled, const IR::U32& value) {
+ IR::U32 scaled_value{value};
+ if (const u32 up_scale = Settings::values.resolution_info.up_scale; up_scale != 1) {
+ scaled_value = ir.IMul(scaled_value, ir.Imm32(up_scale));
+ }
+ if (const u32 down_shift = Settings::values.resolution_info.down_shift; down_shift != 0) {
+ scaled_value = ir.ShiftRightArithmetic(scaled_value, ir.Imm32(down_shift));
+ }
+ return IR::U32{ir.Select(is_scaled, scaled_value, value)};
+}
+
+[[nodiscard]] IR::U32 SubScale(IR::IREmitter& ir, const IR::U1& is_scaled, const IR::U32& value,
+ const IR::Attribute attrib) {
+ const IR::F32 up_factor{ir.Imm32(Settings::values.resolution_info.up_factor)};
+ const IR::F32 base{ir.FPMul(ir.ConvertUToF(32, 32, value), up_factor)};
+ const IR::F32 frag_coord{ir.GetAttribute(attrib)};
+ const IR::F32 down_factor{ir.Imm32(Settings::values.resolution_info.down_factor)};
+ const IR::F32 floor{ir.FPMul(up_factor, ir.FPFloor(ir.FPMul(frag_coord, down_factor)))};
+ const IR::F16F32F64 deviation{ir.FPAdd(base, ir.FPAdd(frag_coord, ir.FPNeg(floor)))};
+ return IR::U32{ir.Select(is_scaled, ir.ConvertFToU(32, deviation), value)};
+}
+
+[[nodiscard]] IR::U32 DownScale(IR::IREmitter& ir, const IR::U1& is_scaled, const IR::U32& value) {
+ IR::U32 scaled_value{value};
+ if (const u32 down_shift = Settings::values.resolution_info.down_shift; down_shift != 0) {
+ scaled_value = ir.ShiftLeftLogical(scaled_value, ir.Imm32(down_shift));
+ }
+ if (const u32 up_scale = Settings::values.resolution_info.up_scale; up_scale != 1) {
+ scaled_value = ir.IDiv(scaled_value, ir.Imm32(up_scale));
+ }
+ return IR::U32{ir.Select(is_scaled, scaled_value, value)};
+}
+
+void PatchImageQueryDimensions(IR::Block& block, IR::Inst& inst) {
+ const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ const auto info{inst.Flags<IR::TextureInstInfo>()};
+ const IR::U1 is_scaled{ir.IsTextureScaled(ir.Imm32(info.descriptor_index))};
+ switch (info.type) {
+ case TextureType::Color2D:
+ case TextureType::ColorArray2D: {
+ const IR::Value new_inst{&*block.PrependNewInst(it, inst)};
+ const IR::U32 width{DownScale(ir, is_scaled, IR::U32{ir.CompositeExtract(new_inst, 0)})};
+ const IR::U32 height{DownScale(ir, is_scaled, IR::U32{ir.CompositeExtract(new_inst, 1)})};
+ const IR::Value replacement{ir.CompositeConstruct(
+ width, height, ir.CompositeExtract(new_inst, 2), ir.CompositeExtract(new_inst, 3))};
+ inst.ReplaceUsesWith(replacement);
+ break;
+ }
+ case TextureType::Color1D:
+ case TextureType::ColorArray1D:
+ case TextureType::Color3D:
+ case TextureType::ColorCube:
+ case TextureType::ColorArrayCube:
+ case TextureType::Buffer:
+ // Nothing to patch here
+ break;
+ }
+}
+
+void ScaleIntegerComposite(IR::IREmitter& ir, IR::Inst& inst, const IR::U1& is_scaled,
+ size_t index) {
+ const IR::Value composite{inst.Arg(index)};
+ if (composite.IsEmpty()) {
+ return;
+ }
+ const auto info{inst.Flags<IR::TextureInstInfo>()};
+ const IR::U32 x{Scale(ir, is_scaled, IR::U32{ir.CompositeExtract(composite, 0)})};
+ const IR::U32 y{Scale(ir, is_scaled, IR::U32{ir.CompositeExtract(composite, 1)})};
+ switch (info.type) {
+ case TextureType::Color2D:
+ inst.SetArg(index, ir.CompositeConstruct(x, y));
+ break;
+ case TextureType::ColorArray2D: {
+ const IR::U32 z{ir.CompositeExtract(composite, 2)};
+ inst.SetArg(index, ir.CompositeConstruct(x, y, z));
+ break;
+ }
+ case TextureType::Color1D:
+ case TextureType::ColorArray1D:
+ case TextureType::Color3D:
+ case TextureType::ColorCube:
+ case TextureType::ColorArrayCube:
+ case TextureType::Buffer:
+ // Nothing to patch here
+ break;
+ }
+}
+
+void SubScaleCoord(IR::IREmitter& ir, IR::Inst& inst, const IR::U1& is_scaled) {
+ const auto info{inst.Flags<IR::TextureInstInfo>()};
+ const IR::Value coord{inst.Arg(1)};
+ const IR::U32 coord_x{ir.CompositeExtract(coord, 0)};
+ const IR::U32 coord_y{ir.CompositeExtract(coord, 1)};
+
+ const IR::U32 scaled_x{SubScale(ir, is_scaled, coord_x, IR::Attribute::PositionX)};
+ const IR::U32 scaled_y{SubScale(ir, is_scaled, coord_y, IR::Attribute::PositionY)};
+ switch (info.type) {
+ case TextureType::Color2D:
+ inst.SetArg(1, ir.CompositeConstruct(scaled_x, scaled_y));
+ break;
+ case TextureType::ColorArray2D: {
+ const IR::U32 z{ir.CompositeExtract(coord, 2)};
+ inst.SetArg(1, ir.CompositeConstruct(scaled_x, scaled_y, z));
+ break;
+ }
+ case TextureType::Color1D:
+ case TextureType::ColorArray1D:
+ case TextureType::Color3D:
+ case TextureType::ColorCube:
+ case TextureType::ColorArrayCube:
+ case TextureType::Buffer:
+ // Nothing to patch here
+ break;
+ }
+}
+
+void SubScaleImageFetch(IR::Block& block, IR::Inst& inst) {
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ const auto info{inst.Flags<IR::TextureInstInfo>()};
+ if (!IsTextureTypeRescalable(info.type)) {
+ return;
+ }
+ const IR::U1 is_scaled{ir.IsTextureScaled(ir.Imm32(info.descriptor_index))};
+ SubScaleCoord(ir, inst, is_scaled);
+ // Scale ImageFetch offset
+ ScaleIntegerComposite(ir, inst, is_scaled, 2);
+}
+
+void SubScaleImageRead(IR::Block& block, IR::Inst& inst) {
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ const auto info{inst.Flags<IR::TextureInstInfo>()};
+ if (!IsTextureTypeRescalable(info.type)) {
+ return;
+ }
+ const IR::U1 is_scaled{ir.IsImageScaled(ir.Imm32(info.descriptor_index))};
+ SubScaleCoord(ir, inst, is_scaled);
+}
+
+void PatchImageFetch(IR::Block& block, IR::Inst& inst) {
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ const auto info{inst.Flags<IR::TextureInstInfo>()};
+ if (!IsTextureTypeRescalable(info.type)) {
+ return;
+ }
+ const IR::U1 is_scaled{ir.IsTextureScaled(ir.Imm32(info.descriptor_index))};
+ ScaleIntegerComposite(ir, inst, is_scaled, 1);
+ // Scale ImageFetch offset
+ ScaleIntegerComposite(ir, inst, is_scaled, 2);
+}
+
+void PatchImageRead(IR::Block& block, IR::Inst& inst) {
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ const auto info{inst.Flags<IR::TextureInstInfo>()};
+ if (!IsTextureTypeRescalable(info.type)) {
+ return;
+ }
+ const IR::U1 is_scaled{ir.IsImageScaled(ir.Imm32(info.descriptor_index))};
+ ScaleIntegerComposite(ir, inst, is_scaled, 1);
+}
+
+void Visit(const IR::Program& program, IR::Block& block, IR::Inst& inst) {
+ const bool is_fragment_shader{program.stage == Stage::Fragment};
+ switch (inst.GetOpcode()) {
+ case IR::Opcode::GetAttribute: {
+ const IR::Attribute attr{inst.Arg(0).Attribute()};
+ switch (attr) {
+ case IR::Attribute::PositionX:
+ case IR::Attribute::PositionY:
+ if (is_fragment_shader && inst.Flags<u32>() != 0xDEADBEEF) {
+ PatchFragCoord(block, inst);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case IR::Opcode::SetAttribute: {
+ const IR::Attribute attr{inst.Arg(0).Attribute()};
+ switch (attr) {
+ case IR::Attribute::PointSize:
+ if (inst.Flags<u32>() != 0xDEADBEEF) {
+ PatchPointSize(block, inst);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case IR::Opcode::ImageQueryDimensions:
+ PatchImageQueryDimensions(block, inst);
+ break;
+ case IR::Opcode::ImageFetch:
+ if (is_fragment_shader) {
+ SubScaleImageFetch(block, inst);
+ } else {
+ PatchImageFetch(block, inst);
+ }
+ break;
+ case IR::Opcode::ImageRead:
+ if (is_fragment_shader) {
+ SubScaleImageRead(block, inst);
+ } else {
+ PatchImageRead(block, inst);
+ }
+ break;
+ default:
+ break;
+ }
+}
+} // Anonymous namespace
+
+void RescalingPass(IR::Program& program) {
+ const bool is_fragment_shader{program.stage == Stage::Fragment};
+ if (is_fragment_shader) {
+ for (IR::Block* const block : program.post_order_blocks) {
+ for (IR::Inst& inst : block->Instructions()) {
+ VisitMark(*block, inst);
+ }
+ }
+ }
+ for (IR::Block* const block : program.post_order_blocks) {
+ for (IR::Inst& inst : block->Instructions()) {
+ Visit(program, *block, inst);
+ }
+ }
+}
+
+} // namespace Shader::Optimization
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h
index 4ef4dbd40..9f375c30e 100644
--- a/src/shader_recompiler/shader_info.h
+++ b/src/shader_recompiler/shader_info.h
@@ -172,6 +172,7 @@ struct Info {
bool uses_global_memory{};
bool uses_atomic_image_u32{};
bool uses_shadow_lod{};
+ bool uses_rescaling_uniform{};
IR::Type used_constant_buffer_types{};
IR::Type used_storage_buffer_types{};
@@ -190,4 +191,13 @@ struct Info {
ImageDescriptors image_descriptors;
};
+template <typename Descriptors>
+u32 NumDescriptors(const Descriptors& descriptors) {
+ u32 num{};
+ for (const auto& desc : descriptors) {
+ num += desc.count;
+ }
+ return num;
+}
+
} // namespace Shader