summaryrefslogtreecommitdiffstats
path: root/src/shader_recompiler/backend
diff options
context:
space:
mode:
authorMorph <39850852+Morph1984@users.noreply.github.com>2021-09-12 19:53:29 +0200
committerGitHub <noreply@github.com>2021-09-12 19:53:29 +0200
commit9248442bb2759c071b565a10e959883980ff09d6 (patch)
tree1058129dc40331f7f688d32467da17214a2dc013 /src/shader_recompiler/backend
parentMerge pull request #6975 from ogniK5377/acc-async-ctx (diff)
parentemit_glsl_warp: Fix shuffle ops for 64-thread warp sizes (diff)
downloadyuzu-9248442bb2759c071b565a10e959883980ff09d6.tar
yuzu-9248442bb2759c071b565a10e959883980ff09d6.tar.gz
yuzu-9248442bb2759c071b565a10e959883980ff09d6.tar.bz2
yuzu-9248442bb2759c071b565a10e959883980ff09d6.tar.lz
yuzu-9248442bb2759c071b565a10e959883980ff09d6.tar.xz
yuzu-9248442bb2759c071b565a10e959883980ff09d6.tar.zst
yuzu-9248442bb2759c071b565a10e959883980ff09d6.zip
Diffstat (limited to 'src/shader_recompiler/backend')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_warp.cpp122
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp41
2 files changed, 109 insertions, 54 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_warp.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_warp.cpp
index a982dd8a2..cd285e2c8 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_warp.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_warp.cpp
@@ -11,6 +11,8 @@
namespace Shader::Backend::GLSL {
namespace {
+constexpr char THREAD_ID[]{"gl_SubGroupInvocationARB"};
+
void SetInBoundsFlag(EmitContext& ctx, IR::Inst& inst) {
IR::Inst* const in_bounds{inst.GetAssociatedPseudoOperation(IR::Opcode::GetInBoundsFromOp)};
if (!in_bounds) {
@@ -43,84 +45,100 @@ void UseShuffleNv(EmitContext& ctx, IR::Inst& inst, std::string_view shfl_op,
ctx.AddU32("{}={}({},{},{},shfl_in_bounds);", inst, shfl_op, value, index, width);
SetInBoundsFlag(ctx, inst);
}
+
+std::string_view BallotIndex(EmitContext& ctx) {
+ if (!ctx.profile.warp_size_potentially_larger_than_guest) {
+ return ".x";
+ }
+ return "[gl_SubGroupInvocationARB>>5]";
+}
+
+std::string GetMask(EmitContext& ctx, std::string_view mask) {
+ const auto ballot_index{BallotIndex(ctx)};
+ return fmt::format("uint(uvec2({}){})", mask, ballot_index);
+}
} // Anonymous namespace
void EmitLaneId(EmitContext& ctx, IR::Inst& inst) {
- ctx.AddU32("{}=gl_SubGroupInvocationARB&31u;", inst);
+ ctx.AddU32("{}={}&31u;", inst, THREAD_ID);
}
void EmitVoteAll(EmitContext& ctx, IR::Inst& inst, std::string_view pred) {
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
ctx.AddU1("{}=allInvocationsEqualARB({});", inst, pred);
- } else {
- const auto active_mask{fmt::format("uvec2(ballotARB(true))[gl_SubGroupInvocationARB]")};
- const auto ballot{fmt::format("uvec2(ballotARB({}))[gl_SubGroupInvocationARB]", pred)};
- ctx.AddU1("{}=({}&{})=={};", inst, ballot, active_mask, active_mask);
+ return;
}
+ const auto ballot_index{BallotIndex(ctx)};
+ const auto active_mask{fmt::format("uvec2(ballotARB(true)){}", ballot_index)};
+ const auto ballot{fmt::format("uvec2(ballotARB({})){}", pred, ballot_index)};
+ ctx.AddU1("{}=({}&{})=={};", inst, ballot, active_mask, active_mask);
}
void EmitVoteAny(EmitContext& ctx, IR::Inst& inst, std::string_view pred) {
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
ctx.AddU1("{}=anyInvocationARB({});", inst, pred);
- } else {
- const auto active_mask{fmt::format("uvec2(ballotARB(true))[gl_SubGroupInvocationARB]")};
- const auto ballot{fmt::format("uvec2(ballotARB({}))[gl_SubGroupInvocationARB]", pred)};
- ctx.AddU1("{}=({}&{})!=0u;", inst, ballot, active_mask, active_mask);
+ return;
}
+ const auto ballot_index{BallotIndex(ctx)};
+ const auto active_mask{fmt::format("uvec2(ballotARB(true)){}", ballot_index)};
+ const auto ballot{fmt::format("uvec2(ballotARB({})){}", pred, ballot_index)};
+ ctx.AddU1("{}=({}&{})!=0u;", inst, ballot, active_mask, active_mask);
}
void EmitVoteEqual(EmitContext& ctx, IR::Inst& inst, std::string_view pred) {
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
ctx.AddU1("{}=allInvocationsEqualARB({});", inst, pred);
- } else {
- const auto active_mask{fmt::format("uvec2(ballotARB(true))[gl_SubGroupInvocationARB]")};
- const auto ballot{fmt::format("uvec2(ballotARB({}))[gl_SubGroupInvocationARB]", pred)};
- const auto value{fmt::format("({}^{})", ballot, active_mask)};
- ctx.AddU1("{}=({}==0)||({}=={});", inst, value, value, active_mask);
+ return;
}
+ const auto ballot_index{BallotIndex(ctx)};
+ const auto active_mask{fmt::format("uvec2(ballotARB(true)){}", ballot_index)};
+ const auto ballot{fmt::format("uvec2(ballotARB({})){}", pred, ballot_index)};
+ const auto value{fmt::format("({}^{})", ballot, active_mask)};
+ ctx.AddU1("{}=({}==0)||({}=={});", inst, value, value, active_mask);
}
void EmitSubgroupBallot(EmitContext& ctx, IR::Inst& inst, std::string_view pred) {
- if (!ctx.profile.warp_size_potentially_larger_than_guest) {
- ctx.AddU32("{}=uvec2(ballotARB({})).x;", inst, pred);
- } else {
- ctx.AddU32("{}=uvec2(ballotARB({}))[gl_SubGroupInvocationARB];", inst, pred);
- }
+ const auto ballot_index{BallotIndex(ctx)};
+ ctx.AddU32("{}=uvec2(ballotARB({})){};", inst, pred, ballot_index);
}
void EmitSubgroupEqMask(EmitContext& ctx, IR::Inst& inst) {
- ctx.AddU32("{}=uint(gl_SubGroupEqMaskARB.x);", inst);
+ ctx.AddU32("{}={};", inst, GetMask(ctx, "gl_SubGroupEqMaskARB"));
}
void EmitSubgroupLtMask(EmitContext& ctx, IR::Inst& inst) {
- ctx.AddU32("{}=uint(gl_SubGroupLtMaskARB.x);", inst);
+ ctx.AddU32("{}={};", inst, GetMask(ctx, "gl_SubGroupLtMaskARB"));
}
void EmitSubgroupLeMask(EmitContext& ctx, IR::Inst& inst) {
- ctx.AddU32("{}=uint(gl_SubGroupLeMaskARB.x);", inst);
+ ctx.AddU32("{}={};", inst, GetMask(ctx, "gl_SubGroupLeMaskARB"));
}
void EmitSubgroupGtMask(EmitContext& ctx, IR::Inst& inst) {
- ctx.AddU32("{}=uint(gl_SubGroupGtMaskARB.x);", inst);
+ ctx.AddU32("{}={};", inst, GetMask(ctx, "gl_SubGroupGtMaskARB"));
}
void EmitSubgroupGeMask(EmitContext& ctx, IR::Inst& inst) {
- ctx.AddU32("{}=uint(gl_SubGroupGeMaskARB.x);", inst);
+ ctx.AddU32("{}={};", inst, GetMask(ctx, "gl_SubGroupGeMaskARB"));
}
void EmitShuffleIndex(EmitContext& ctx, IR::Inst& inst, std::string_view value,
- std::string_view index, std::string_view clamp,
- std::string_view segmentation_mask) {
+ std::string_view index, std::string_view clamp, std::string_view seg_mask) {
if (ctx.profile.support_gl_warp_intrinsics) {
- UseShuffleNv(ctx, inst, "shuffleNV", value, index, clamp, segmentation_mask);
+ UseShuffleNv(ctx, inst, "shuffleNV", value, index, clamp, seg_mask);
return;
}
- const auto not_seg_mask{fmt::format("(~{})", segmentation_mask)};
- const auto thread_id{"gl_SubGroupInvocationARB"};
- const auto min_thread_id{ComputeMinThreadId(thread_id, segmentation_mask)};
- const auto max_thread_id{ComputeMaxThreadId(min_thread_id, clamp, not_seg_mask)};
+ const bool big_warp{ctx.profile.warp_size_potentially_larger_than_guest};
+ const auto is_upper_partition{"int(gl_SubGroupInvocationARB)>=32"};
+ const auto upper_index{fmt::format("{}?{}+32:{}", is_upper_partition, index, index)};
+ const auto upper_clamp{fmt::format("{}?{}+32:{}", is_upper_partition, clamp, clamp)};
+
+ const auto not_seg_mask{fmt::format("(~{})", seg_mask)};
+ const auto min_thread_id{ComputeMinThreadId(THREAD_ID, seg_mask)};
+ const auto max_thread_id{
+ ComputeMaxThreadId(min_thread_id, big_warp ? upper_clamp : clamp, not_seg_mask)};
- const auto lhs{fmt::format("({}&{})", index, not_seg_mask)};
+ const auto lhs{fmt::format("({}&{})", big_warp ? upper_index : index, not_seg_mask)};
const auto src_thread_id{fmt::format("({})|({})", lhs, min_thread_id)};
ctx.Add("shfl_in_bounds=int({})<=int({});", src_thread_id, max_thread_id);
SetInBoundsFlag(ctx, inst);
@@ -128,29 +146,34 @@ void EmitShuffleIndex(EmitContext& ctx, IR::Inst& inst, std::string_view value,
}
void EmitShuffleUp(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view index,
- std::string_view clamp, std::string_view segmentation_mask) {
+ std::string_view clamp, std::string_view seg_mask) {
if (ctx.profile.support_gl_warp_intrinsics) {
- UseShuffleNv(ctx, inst, "shuffleUpNV", value, index, clamp, segmentation_mask);
+ UseShuffleNv(ctx, inst, "shuffleUpNV", value, index, clamp, seg_mask);
return;
}
- const auto thread_id{"gl_SubGroupInvocationARB"};
- const auto max_thread_id{GetMaxThreadId(thread_id, clamp, segmentation_mask)};
- const auto src_thread_id{fmt::format("({}-{})", thread_id, index)};
+ const bool big_warp{ctx.profile.warp_size_potentially_larger_than_guest};
+ const auto is_upper_partition{"int(gl_SubGroupInvocationARB)>=32"};
+ const auto upper_clamp{fmt::format("{}?{}+32:{}", is_upper_partition, clamp, clamp)};
+
+ const auto max_thread_id{GetMaxThreadId(THREAD_ID, big_warp ? upper_clamp : clamp, seg_mask)};
+ const auto src_thread_id{fmt::format("({}-{})", THREAD_ID, index)};
ctx.Add("shfl_in_bounds=int({})>=int({});", src_thread_id, max_thread_id);
SetInBoundsFlag(ctx, inst);
ctx.AddU32("{}=shfl_in_bounds?readInvocationARB({},{}):{};", inst, value, src_thread_id, value);
}
void EmitShuffleDown(EmitContext& ctx, IR::Inst& inst, std::string_view value,
- std::string_view index, std::string_view clamp,
- std::string_view segmentation_mask) {
+ std::string_view index, std::string_view clamp, std::string_view seg_mask) {
if (ctx.profile.support_gl_warp_intrinsics) {
- UseShuffleNv(ctx, inst, "shuffleDownNV", value, index, clamp, segmentation_mask);
+ UseShuffleNv(ctx, inst, "shuffleDownNV", value, index, clamp, seg_mask);
return;
}
- const auto thread_id{"gl_SubGroupInvocationARB"};
- const auto max_thread_id{GetMaxThreadId(thread_id, clamp, segmentation_mask)};
- const auto src_thread_id{fmt::format("({}+{})", thread_id, index)};
+ const bool big_warp{ctx.profile.warp_size_potentially_larger_than_guest};
+ const auto is_upper_partition{"int(gl_SubGroupInvocationARB)>=32"};
+ const auto upper_clamp{fmt::format("{}?{}+32:{}", is_upper_partition, clamp, clamp)};
+
+ const auto max_thread_id{GetMaxThreadId(THREAD_ID, big_warp ? upper_clamp : clamp, seg_mask)};
+ const auto src_thread_id{fmt::format("({}+{})", THREAD_ID, index)};
ctx.Add("shfl_in_bounds=int({})<=int({});", src_thread_id, max_thread_id);
SetInBoundsFlag(ctx, inst);
ctx.AddU32("{}=shfl_in_bounds?readInvocationARB({},{}):{};", inst, value, src_thread_id, value);
@@ -158,14 +181,17 @@ void EmitShuffleDown(EmitContext& ctx, IR::Inst& inst, std::string_view value,
void EmitShuffleButterfly(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view index, std::string_view clamp,
- std::string_view segmentation_mask) {
+ std::string_view seg_mask) {
if (ctx.profile.support_gl_warp_intrinsics) {
- UseShuffleNv(ctx, inst, "shuffleXorNV", value, index, clamp, segmentation_mask);
+ UseShuffleNv(ctx, inst, "shuffleXorNV", value, index, clamp, seg_mask);
return;
}
- const auto thread_id{"gl_SubGroupInvocationARB"};
- const auto max_thread_id{GetMaxThreadId(thread_id, clamp, segmentation_mask)};
- const auto src_thread_id{fmt::format("({}^{})", thread_id, index)};
+ const bool big_warp{ctx.profile.warp_size_potentially_larger_than_guest};
+ const auto is_upper_partition{"int(gl_SubGroupInvocationARB)>=32"};
+ const auto upper_clamp{fmt::format("{}?{}+32:{}", is_upper_partition, clamp, clamp)};
+
+ const auto max_thread_id{GetMaxThreadId(THREAD_ID, big_warp ? upper_clamp : clamp, seg_mask)};
+ const auto src_thread_id{fmt::format("({}^{})", THREAD_ID, index)};
ctx.Add("shfl_in_bounds=int({})<=int({});", src_thread_id, max_thread_id);
SetInBoundsFlag(ctx, inst);
ctx.AddU32("{}=shfl_in_bounds?readInvocationARB({},{}):{};", inst, value, src_thread_id, value);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
index 78b1e1ba7..cef52c56e 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
@@ -7,8 +7,13 @@
namespace Shader::Backend::SPIRV {
namespace {
+Id GetThreadId(EmitContext& ctx) {
+ return ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id);
+}
+
Id WarpExtract(EmitContext& ctx, Id value) {
- const Id local_index{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id thread_id{GetThreadId(ctx)};
+ const Id local_index{ctx.OpShiftRightArithmetic(ctx.U32[1], thread_id, ctx.Const(5U))};
return ctx.OpVectorExtractDynamic(ctx.U32[1], value, local_index);
}
@@ -48,10 +53,17 @@ Id SelectValue(EmitContext& ctx, Id in_range, Id value, Id src_thread_id) {
return ctx.OpSelect(ctx.U32[1], in_range,
ctx.OpSubgroupReadInvocationKHR(ctx.U32[1], value, src_thread_id), value);
}
+
+Id GetUpperClamp(EmitContext& ctx, Id invocation_id, Id clamp) {
+ const Id thirty_two{ctx.Const(32u)};
+ const Id is_upper_partition{ctx.OpSGreaterThanEqual(ctx.U1, invocation_id, thirty_two)};
+ const Id upper_clamp{ctx.OpIAdd(ctx.U32[1], thirty_two, clamp)};
+ return ctx.OpSelect(ctx.U32[1], is_upper_partition, upper_clamp, clamp);
+}
} // Anonymous namespace
Id EmitLaneId(EmitContext& ctx) {
- const Id id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id id{GetThreadId(ctx)};
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
return id;
}
@@ -123,7 +135,15 @@ Id EmitSubgroupGeMask(EmitContext& ctx) {
Id EmitShuffleIndex(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id segmentation_mask) {
const Id not_seg_mask{ctx.OpNot(ctx.U32[1], segmentation_mask)};
- const Id thread_id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id thread_id{GetThreadId(ctx)};
+ if (ctx.profile.warp_size_potentially_larger_than_guest) {
+ const Id thirty_two{ctx.Const(32u)};
+ const Id is_upper_partition{ctx.OpSGreaterThanEqual(ctx.U1, thread_id, thirty_two)};
+ const Id upper_index{ctx.OpIAdd(ctx.U32[1], thirty_two, index)};
+ const Id upper_clamp{ctx.OpIAdd(ctx.U32[1], thirty_two, clamp)};
+ index = ctx.OpSelect(ctx.U32[1], is_upper_partition, upper_index, index);
+ clamp = ctx.OpSelect(ctx.U32[1], is_upper_partition, upper_clamp, clamp);
+ }
const Id min_thread_id{ComputeMinThreadId(ctx, thread_id, segmentation_mask)};
const Id max_thread_id{ComputeMaxThreadId(ctx, min_thread_id, clamp, not_seg_mask)};
@@ -137,7 +157,10 @@ Id EmitShuffleIndex(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id cla
Id EmitShuffleUp(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id segmentation_mask) {
- const Id thread_id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id thread_id{GetThreadId(ctx)};
+ if (ctx.profile.warp_size_potentially_larger_than_guest) {
+ clamp = GetUpperClamp(ctx, thread_id, clamp);
+ }
const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
const Id src_thread_id{ctx.OpISub(ctx.U32[1], thread_id, index)};
const Id in_range{ctx.OpSGreaterThanEqual(ctx.U1, src_thread_id, max_thread_id)};
@@ -148,7 +171,10 @@ Id EmitShuffleUp(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id EmitShuffleDown(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id segmentation_mask) {
- const Id thread_id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id thread_id{GetThreadId(ctx)};
+ if (ctx.profile.warp_size_potentially_larger_than_guest) {
+ clamp = GetUpperClamp(ctx, thread_id, clamp);
+ }
const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
const Id src_thread_id{ctx.OpIAdd(ctx.U32[1], thread_id, index)};
const Id in_range{ctx.OpSLessThanEqual(ctx.U1, src_thread_id, max_thread_id)};
@@ -159,7 +185,10 @@ Id EmitShuffleDown(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clam
Id EmitShuffleButterfly(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id segmentation_mask) {
- const Id thread_id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id thread_id{GetThreadId(ctx)};
+ if (ctx.profile.warp_size_potentially_larger_than_guest) {
+ clamp = GetUpperClamp(ctx, thread_id, clamp);
+ }
const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
const Id src_thread_id{ctx.OpBitwiseXor(ctx.U32[1], thread_id, index)};
const Id in_range{ctx.OpSLessThanEqual(ctx.U1, src_thread_id, max_thread_id)};