From ae4e452759573d145738688d9284077934e61ae4 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Mon, 14 Jun 2021 11:32:28 -0400 Subject: glsl: Add Shader_GLSL logging --- .../backend/glsl/emit_glsl_atomic.cpp | 13 ++++++------ .../backend/glsl/emit_glsl_context_get_set.cpp | 23 ++++++++++++--------- .../backend/glsl/emit_glsl_image.cpp | 24 ++++++++++++---------- 3 files changed, 32 insertions(+), 28 deletions(-) (limited to 'src') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp index 850eee1e1..9152ace98 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp @@ -98,7 +98,7 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, std::string_view value) { - // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic"); + LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset, pointer_offset); ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;", @@ -171,7 +171,7 @@ void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Val void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, const IR::Value& offset, std::string_view value) { - // LOG_WARNING(..., "Op falling to non-atomic"); + LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset)); @@ -182,7 +182,7 @@ void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, const IR::Value& offset, std::string_view value) { - // LOG_WARNING(..., "Op falling to non-atomic"); + LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset)); @@ -195,7 +195,7 @@ void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, const IR::Value& offset, std::string_view value) { - // LOG_WARNING(..., "Op falling to non-atomic"); + LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset)); @@ -207,7 +207,7 @@ void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, const IR::Value& offset, std::string_view value) { - // LOG_WARNING(..., "Op falling to non-atomic"); + LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset)); @@ -220,8 +220,7 @@ void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, const IR::Value& offset, std::string_view value) { - // LOG_WARNING(..., "Op falling to non-atomic"); - + LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset)); diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp index 3eeccfb3c..0d1e5ed7f 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp @@ -42,7 +42,7 @@ void GetCbuf(EmitContext& ctx, std::string_view ret, const IR::Value& binding, const s32 signed_offset{static_cast(offset.U32())}; static constexpr u32 cbuf_size{4096 * 16}; if (signed_offset < 0 || offset.U32() > cbuf_size) { - // LOG_WARNING(..., "Immediate constant buffer offset is out of bounds"); + LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds"); ctx.Add("{}=0u;", ret); return; } @@ -144,7 +144,7 @@ void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding const u32 u32_offset{offset.U32()}; const s32 signed_offset{static_cast(offset.U32())}; if (signed_offset < 0 || u32_offset > cbuf_size) { - // LOG_WARNING(..., "Immediate constant buffer offset is out of bounds"); + LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds"); ctx.AddU32x2("{}=uvec2(0u);", inst); return; } @@ -184,7 +184,8 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, } // GLSL only exposes 8 legacy texcoords if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) { - // LOG_WARNING(..., "GLSL does not allow access to gl_TexCoord[{}]", TexCoordIndex(attr)); + LOG_WARNING(Shader_GLSL, "GLSL does not allow access to gl_TexCoord[{}]", + TexCoordIndex(attr)); ctx.AddF32("{}=0.f;", inst); return; } @@ -257,7 +258,8 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val const char swizzle{"xyzw"[element]}; // GLSL only exposes 8 legacy texcoords if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) { - // LOG_WARNING(..., "GLSL does not allow access to gl_TexCoord[{}]", TexCoordIndex(attr)); + LOG_WARNING(Shader_GLSL, "GLSL does not allow access to gl_TexCoord[{}]", + TexCoordIndex(attr)); return; } if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture7Q) { @@ -269,8 +271,8 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val case IR::Attribute::Layer: if (ctx.stage != Stage::Geometry && !ctx.profile.support_viewport_index_layer_non_geometry) { - // LOG_WARNING(..., "Shader stores viewport layer but device does not support viewport - // layer extension"); + LOG_WARNING(Shader_GLSL, "Shader stores viewport layer but device does not support " + "viewport layer extension"); break; } ctx.Add("gl_Layer=ftoi({});", value); @@ -278,16 +280,17 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val case IR::Attribute::ViewportIndex: if (ctx.stage != Stage::Geometry && !ctx.profile.support_viewport_index_layer_non_geometry) { - // LOG_WARNING(..., "Shader stores viewport index but device does not support viewport - // layer extension"); + LOG_WARNING(Shader_GLSL, "Shader stores viewport index but device does not support " + "viewport layer extension"); break; } ctx.Add("gl_ViewportIndex=ftoi({});", value); break; case IR::Attribute::ViewportMask: if (ctx.stage != Stage::Geometry && !ctx.profile.support_viewport_mask) { - // LOG_WARNING(..., "Shader stores viewport mask but device does not support viewport - // mask extension"); + LOG_WARNING( + Shader_GLSL, + "Shader stores viewport mask but device does not support viewport mask extension"); break; } ctx.Add("gl_ViewportMask[0]=ftoi({});", value); diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index c6b3df9c9..447eb8e0a 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -96,7 +96,7 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { } const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi}; if (!has_var_aoffi) { - // LOG_WARNING("Device does not support variable texture offsets, STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support variable texture offsets, STUBBING"); } const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"}; switch (offset.Type()) { @@ -116,7 +116,7 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { - // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING"); + LOG_WARNING(Shader_GLSL, "Not all arguments in PTP are immediate, STUBBING"); return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))"; } const IR::Opcode opcode{values[0]->GetOpcode()}; @@ -152,7 +152,7 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu const auto sparse_inst{PrepareSparse(inst)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { @@ -196,7 +196,7 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu const auto sparse_inst{PrepareSparse(inst)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { @@ -239,9 +239,10 @@ void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR:: const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && ctx.stage != Stage::Fragment && needs_shadow_ext}; if (use_grad) { - // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); + LOG_WARNING(Shader_GLSL, + "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); if (info.type == TextureType::ColorArrayCube) { - // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing"); + LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing"); ctx.AddF32("{}=0.0f;", inst); return; } @@ -291,9 +292,10 @@ void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR:: const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; if (use_grad) { - // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); + LOG_WARNING(Shader_GLSL, + "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); if (info.type == TextureType::ColorArrayCube) { - // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing"); + LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing"); ctx.AddF32("{}=0.0f;", inst); return; } @@ -329,7 +331,7 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, const auto sparse_inst{PrepareSparse(inst)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { @@ -376,7 +378,7 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde const auto sparse_inst{PrepareSparse(inst)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { @@ -426,7 +428,7 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { -- cgit v1.2.3