summaryrefslogtreecommitdiffstats
path: root/src/shader_recompiler/backend/glsl
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp142
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_instructions.h31
2 files changed, 173 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index dc377b053..a409a7ab3 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -105,6 +105,13 @@ void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_vi
pointer_offset, value, pointer_offset, value);
}
+void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
+ std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
+ ctx.AddU32x2("{}=uvec2(smem[{}>>2],smem[({}+4)>>2]);", inst, pointer_offset, pointer_offset);
+ ctx.Add("smem[{}>>2]={}.x;smem[({}+4)>>2]={}.y;", pointer_offset, value, pointer_offset, value);
+}
+
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
@@ -265,6 +272,97 @@ void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Val
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
}
+void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
+ ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
+ ctx.var_alloc.Consume(offset));
+ ctx.Add("{}_ssbo{}[{}>>2]+={}.x;{}_ssbo{}[({}>>2)+1]+={}.y;", ctx.stage_name, binding.U32(),
+ ctx.var_alloc.Consume(offset), value, ctx.stage_name, binding.U32(),
+ ctx.var_alloc.Consume(offset), value);
+}
+
+void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
+ ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
+ ctx.var_alloc.Consume(offset));
+ ctx.Add("for(int "
+ "i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(min(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
+ ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), value);
+}
+
+void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
+ ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
+ ctx.var_alloc.Consume(offset));
+ ctx.Add("for(int i=0;i<2;++i){{ "
+ "{}_ssbo{}[({}>>2)+i]=min({}_ssbo{}[({}>>2)+i],{}[i]);}}",
+ ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), value);
+}
+
+void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
+ ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
+ ctx.var_alloc.Consume(offset));
+ ctx.Add("for(int "
+ "i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(max(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
+ ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), value);
+}
+
+void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
+ ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
+ ctx.var_alloc.Consume(offset));
+ ctx.Add("for(int i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=max({}_ssbo{}[({}>>2)+i],{}[i]);}}",
+ ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
+ binding.U32(), ctx.var_alloc.Consume(offset), value);
+}
+
+void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
+ ctx.AddU32x2("{}=uvec2(atomicAnd({}_ssbo{}[{}>>2],{}.x),atomicAnd({}_ssbo{}[({}>>2)+1],{}.y));",
+ inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
+ ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
+}
+
+void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
+ ctx.AddU32x2("{}=uvec2(atomicOr({}_ssbo{}[{}>>2],{}.x),atomicOr({}_ssbo{}[({}>>2)+1],{}.y));",
+ inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
+ ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
+}
+
+void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
+ ctx.AddU32x2("{}=uvec2(atomicXor({}_ssbo{}[{}>>2],{}.x),atomicXor({}_ssbo{}[({}>>2)+1],{}.y));",
+ inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
+ ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
+}
+
+void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value) {
+ LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
+ ctx.AddU32x2("{}=uvec2(atomicExchange({}_ssbo{}[{}>>2],{}.x),atomicExchange({}_ssbo{}[({}>>2)+"
+ "1],{}.y));",
+ inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
+ ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
+}
+
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd");
@@ -388,6 +486,50 @@ void EmitGlobalAtomicExchange64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
+void EmitGlobalAtomicIAdd32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicSMin32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicUMin32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicSMax32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicUMax32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicInc32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicDec32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicAnd32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicOr32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicXor32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
+void EmitGlobalAtomicExchange32x2(EmitContext&) {
+ throw NotImplementedException("GLSL Instrucion");
+}
+
void EmitGlobalAtomicAddF32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
index 6cabbc717..704baddc9 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
@@ -442,6 +442,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi
std::string_view value);
void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
+void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
+ std::string_view value);
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -482,6 +484,24 @@ void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& b
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
+void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
+ const IR::Value& offset, std::string_view value);
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -518,6 +538,17 @@ void EmitGlobalAtomicAnd64(EmitContext& ctx);
void EmitGlobalAtomicOr64(EmitContext& ctx);
void EmitGlobalAtomicXor64(EmitContext& ctx);
void EmitGlobalAtomicExchange64(EmitContext& ctx);
+void EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
+void EmitGlobalAtomicSMin32x2(EmitContext& ctx);
+void EmitGlobalAtomicUMin32x2(EmitContext& ctx);
+void EmitGlobalAtomicSMax32x2(EmitContext& ctx);
+void EmitGlobalAtomicUMax32x2(EmitContext& ctx);
+void EmitGlobalAtomicInc32x2(EmitContext& ctx);
+void EmitGlobalAtomicDec32x2(EmitContext& ctx);
+void EmitGlobalAtomicAnd32x2(EmitContext& ctx);
+void EmitGlobalAtomicOr32x2(EmitContext& ctx);
+void EmitGlobalAtomicXor32x2(EmitContext& ctx);
+void EmitGlobalAtomicExchange32x2(EmitContext& ctx);
void EmitGlobalAtomicAddF32(EmitContext& ctx);
void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
void EmitGlobalAtomicAddF32x2(EmitContext& ctx);