From f9c6101f55cd059979254f24de55db72ae3717a4 Mon Sep 17 00:00:00 2001 From: Cedric Guillemet <1312968+CedricGuillemet@users.noreply.github.com> Date: Thu, 19 Mar 2026 18:14:30 +0100 Subject: [PATCH 1/3] Expose more functions --- spirv_hlsl.cpp | 1103 ++++++++++++++++++++++-------------------------- 1 file changed, 505 insertions(+), 598 deletions(-) diff --git a/spirv_hlsl.cpp b/spirv_hlsl.cpp index f5144ab8c..ef7b5d6af 100644 --- a/spirv_hlsl.cpp +++ b/spirv_hlsl.cpp @@ -31,7 +31,10 @@ using namespace spv; using namespace SPIRV_CROSS_NAMESPACE; using namespace std; -#define SPIRV_CROSS_INVALID_CALL() assert(false); +void InvalidCall() { + throw std::runtime_error("Invalid call"); +} +#define SPIRV_CROSS_INVALID_CALL() InvalidCall(); enum ExtraSubExpressionType { @@ -4721,16 +4724,12 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction) case OpFMod: { -#ifndef SPIRV_CROSS_WEBMIN if (!requires_op_fmod) { requires_op_fmod = true; force_recompile(); } CompilerGLSL_emit_instruction(instruction); -#else - SPIRV_CROSS_INVALID_CALL(); -#endif break; } @@ -4814,12 +4813,8 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction) case OpFwidth: case OpFwidthCoarse: case OpFwidthFine: -#ifndef SPIRV_CROSS_WEBMIN HLSL_UFOP(fwidth); register_control_dependent_expression(ops[1]); -#else - SPIRV_CROSS_INVALID_CALL(); -#endif break; case OpLogicalNot: @@ -5075,7 +5070,6 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction) case OpImageQuerySizeLod: { -#ifndef SPIRV_CROSS_WEBMIN auto result_type = ops[0]; auto id = ops[1]; @@ -5089,9 +5083,6 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction) auto &restype = get(ops[0]); expr = bitcast_expression(restype, SPIRType::UInt, expr); emit_op(result_type, id, expr, true); -#else - SPIRV_CROSS_INVALID_CALL(); -#endif break; } @@ -12959,7 +12950,6 @@ void CompilerHLSL::CompilerGLSL_emit_instruction(const Instruction &instruction) // ALU case OpIsNan: -#ifndef SPIRV_CROSS_WEBMIN if (!is_legacy()) GLSL_UFOP(isnan); else @@ -12971,13 +12961,9 @@ void CompilerHLSL::CompilerGLSL_emit_instruction(const Instruction &instruction) else emit_binary_op(ops[0], ops[1], ops[2], ops[2], "!="); } -#else - SPIRV_CROSS_INVALID_CALL(); -#endif break; case OpIsInf: -#ifndef SPIRV_CROSS_WEBMIN if (!is_legacy()) GLSL_UFOP(isinf); else @@ -13015,9 +13001,6 @@ void CompilerHLSL::CompilerGLSL_emit_instruction(const Instruction &instruction) inherit_expression_dependencies(result_id, operand); } -#else - SPIRV_CROSS_INVALID_CALL(); -#endif break; case OpSNegate: @@ -13049,12 +13032,8 @@ void CompilerHLSL::CompilerGLSL_emit_instruction(const Instruction &instruction) case OpISub: { -#ifndef SPIRV_CROSS_WEBMIN auto type = get(ops[0]).basetype; GLSL_BOP_CAST(-, type); -#else - SPIRV_CROSS_INVALID_CALL(); -#endif break; } @@ -13420,7 +13399,6 @@ void CompilerHLSL::CompilerGLSL_emit_instruction(const Instruction &instruction) case OpLogicalOr: { -#ifndef SPIRV_CROSS_WEBMIN // No vector variant in GLSL for logical OR. auto result_type = ops[0]; auto id = ops[1]; @@ -13430,15 +13408,11 @@ void CompilerHLSL::CompilerGLSL_emit_instruction(const Instruction &instruction) emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "||", false, SPIRType::Unknown); else GLSL_BOP(||); -#else - SPIRV_CROSS_INVALID_CALL(); -#endif break; } case OpLogicalAnd: { -#ifndef SPIRV_CROSS_WEBMIN // No vector variant in GLSL for logical AND. auto result_type = ops[0]; auto id = ops[1]; @@ -13448,9 +13422,6 @@ void CompilerHLSL::CompilerGLSL_emit_instruction(const Instruction &instruction) emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "&&", false, SPIRType::Unknown); else GLSL_BOP(&&); -#else - SPIRV_CROSS_INVALID_CALL(); -#endif break; } @@ -16149,148 +16120,503 @@ bool CompilerHLSL::should_suppress_usage_tracking(uint32_t id) const return !expression_is_forwarded(id) || expression_suppresses_usage_tracking(id); } -#ifndef SPIRV_CROSS_WEBMIN -CompilerHLSL::ShaderSubgroupSupportHelper::Result::Result() + +CompilerHLSL::BitcastType CompilerHLSL::get_bitcast_type(uint32_t result_type, uint32_t op0) { - for (auto &weight : weights) - weight = 0; + auto &rslt_type = get(result_type); + auto &expr_type = expression_type(op0); - // Make sure KHR_shader_subgroup extensions are always prefered. - const uint32_t big_num = FeatureCount; - weights[KHR_shader_subgroup_ballot] = big_num; - weights[KHR_shader_subgroup_basic] = big_num; - weights[KHR_shader_subgroup_vote] = big_num; - weights[KHR_shader_subgroup_arithmetic] = big_num; + if (rslt_type.basetype == SPIRType::BaseType::UInt64 && expr_type.basetype == SPIRType::BaseType::UInt && + expr_type.vecsize == 2) + return BitcastType::TypePackUint2x32; + else if (rslt_type.basetype == SPIRType::BaseType::UInt && rslt_type.vecsize == 2 && + expr_type.basetype == SPIRType::BaseType::UInt64) + return BitcastType::TypeUnpackUint64; + + return BitcastType::TypeNormal; } -string CompilerHLSL::image_type_hlsl_legacy(const SPIRType &type, uint32_t /*id*/) +string CompilerHLSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) { - auto &imagetype = get(type.image.type); - string res; - - switch (imagetype.basetype) + if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Int) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Int64) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Float) + return "asuint"; + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::UInt) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::UInt64) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Float) + return "asint"; + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::UInt) + return "asfloat"; + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::Int) + return "asfloat"; + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Double) + SPIRV_CROSS_THROW("Double to Int64 is not supported in HLSL."); + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Double) + SPIRV_CROSS_THROW("Double to UInt64 is not supported in HLSL."); + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::Int64) + return "asdouble"; + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::UInt64) + return "asdouble"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) { - case SPIRType::Int: - res = "i"; - break; - case SPIRType::UInt: - res = "u"; - break; - default: - break; + if (!requires_explicit_fp16_packing) + { + requires_explicit_fp16_packing = true; + force_recompile(); + } + return "spvUnpackFloat2x16"; } - - if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData) - return res + "subpassInput" + (type.image.ms ? "MS" : ""); - - // If we're emulating subpassInput with samplers, force sampler2D - // so we don't have to specify format. - if (type.basetype == SPIRType::Image && type.image.dim != DimSubpassData) + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Half && in_type.vecsize == 2) { - // Sampler buffers are always declared as samplerBuffer even though they might be separate images in the SPIR-V. - if (type.image.dim == DimBuffer && type.image.sampled == 1) - res += "sampler"; - else - res += type.image.sampled == 2 ? "image" : "texture"; + if (!requires_explicit_fp16_packing) + { + requires_explicit_fp16_packing = true; + force_recompile(); + } + return "spvPackFloat2x16"; + } + else if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::Half) + { + if (hlsl_options.shader_model < 40) + SPIRV_CROSS_THROW("Half to UShort requires Shader Model 4."); + return "(" + type_to_glsl(out_type) + ")f32tof16"; + } + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UShort) + { + if (hlsl_options.shader_model < 40) + SPIRV_CROSS_THROW("UShort to Half requires Shader Model 4."); + return "(" + type_to_glsl(out_type) + ")f16tof32"; } else - res += "sampler"; + return ""; +} + +void CompilerHLSL::require_texture_query_variant(uint32_t var_id) +{ + if (const auto *var = maybe_get_backing_variable(var_id)) + var_id = var->self; + + auto &type = expression_type(var_id); + bool uav = type.image.sampled == 2; + if (hlsl_options.nonwritable_uav_texture_as_srv && has_decoration(var_id, DecorationNonWritable)) + uav = false; + uint32_t bit = 0; switch (type.image.dim) { case Dim1D: - res += "1D"; + bit = type.image.arrayed ? Query1DArray : Query1D; break; + case Dim2D: - res += "2D"; + if (type.image.ms) + bit = type.image.arrayed ? Query2DMSArray : Query2DMS; + else + bit = type.image.arrayed ? Query2DArray : Query2D; break; + case Dim3D: - res += "3D"; + bit = Query3D; break; + case DimCube: - res += "CUBE"; + bit = type.image.arrayed ? QueryCubeArray : QueryCube; break; case DimBuffer: - res += "Buffer"; + bit = QueryBuffer; break; - case DimSubpassData: - res += "2D"; - break; default: - SPIRV_CROSS_THROW("Only 1D, 2D, 3D, Buffer, InputTarget and Cube textures supported."); + SPIRV_CROSS_THROW("Unsupported query type."); } - if (type.image.ms) - res += "MS"; - if (type.image.arrayed) - res += "Array"; - - return res; -} + switch (get(type.image.type).basetype) + { + case SPIRType::Float: + bit += QueryTypeFloat; + break; -string CompilerHLSL::image_type_hlsl(const SPIRType &type, uint32_t id) -{ - if (hlsl_options.shader_model <= 30) - return image_type_hlsl_legacy(type, id); - else - return image_type_hlsl_modern(type, id); -} + case SPIRType::Int: + bit += QueryTypeInt; + break; -std::string CompilerHLSL::to_initializer_expression(const SPIRVariable &var) -{ - // We cannot emit static const initializer for block constants for practical reasons, - // so just inline the initializer. - // FIXME: There is a theoretical problem here if someone tries to composite extract - // into this initializer since we don't declare it properly, but that is somewhat non-sensical. - auto &type = get(var.basetype); - bool is_block = has_decoration(type.self, DecorationBlock); - auto *c = maybe_get(var.initializer); - if (is_block && c) - return constant_expression(*c); - else - return CompilerGLSL_to_initializer_expression(var); -} + case SPIRType::UInt: + bit += QueryTypeUInt; + break; -void CompilerHLSL::emit_interface_block_member_in_struct(const SPIRVariable &var, uint32_t member_index, - uint32_t location, - std::unordered_set &active_locations) -{ - auto &execution = get_entry_point(); - auto type = get(var.basetype); - auto semantic = to_semantic(location, execution.model, var.storage); - auto mbr_name = join(to_name(type.self), "_", to_member_name(type, member_index)); - auto &mbr_type = get(type.member_types[member_index]); + default: + SPIRV_CROSS_THROW("Unsupported query type."); + } - statement(to_interpolation_qualifiers(get_member_decoration_bitset(type.self, member_index)), - type_to_glsl(mbr_type), - " ", mbr_name, type_to_array_glsl(mbr_type), - " : ", semantic, ";"); + auto norm_state = image_format_to_normalized_state(type.image.format); + auto &variant = uav ? required_texture_size_variants + .uav[uint32_t(norm_state)][image_format_to_components(type.image.format) - 1] : + required_texture_size_variants.srv; - // Structs and arrays should consume more locations. - uint32_t consumed_locations = type_to_consumed_locations(mbr_type); - for (uint32_t i = 0; i < consumed_locations; i++) - active_locations.insert(location + i); + uint64_t mask = 1ull << bit; + if ((variant & mask) == 0) + { + force_recompile(); + variant |= mask; + } } -void CompilerHLSL::set_hlsl_aux_buffer_binding(HLSLAuxBinding binding, uint32_t register_index, uint32_t register_space) +std::string CompilerHLSL::bitcast_expression(SPIRType::BaseType target_type, uint32_t arg) { - if (binding == HLSL_AUX_BINDING_BASE_VERTEX_INSTANCE) + auto expr = to_expression(arg); + auto &src_type = expression_type(arg); + if (src_type.basetype != target_type) { - base_vertex_info.explicit_binding = true; - base_vertex_info.register_space = register_space; - base_vertex_info.register_index = register_index; + auto target = src_type; + target.basetype = target_type; + expr = join(bitcast_glsl_op(target, src_type), "(", expr, ")"); } -} -void CompilerHLSL::unset_hlsl_aux_buffer_binding(HLSLAuxBinding binding) -{ - if (binding == HLSL_AUX_BINDING_BASE_VERTEX_INSTANCE) - base_vertex_info.explicit_binding = false; + return expr; } -bool CompilerHLSL::is_hlsl_aux_buffer_binding_used(HLSLAuxBinding binding) const +std::string CompilerHLSL::bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, + const std::string &expr) +{ + if (target_type.basetype == expr_type) + return expr; + + auto src_type = target_type; + src_type.basetype = expr_type; + return join(bitcast_glsl_op(target_type, src_type), "(", expr, ")"); +} + +void CompilerHLSL::emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, SPIRType::BaseType input_type, bool skip_cast_if_equal_type) +{ + string cast_op0, cast_op1; + auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, op0, op1, skip_cast_if_equal_type); + auto &out_type = get(result_type); + + // Special case boolean outputs since relational opcodes output booleans instead of int/uint. + string expr; + if (out_type.basetype != input_type && out_type.basetype != SPIRType::Boolean) + { + expected_type.basetype = input_type; + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); + expr += ')'; + } + else + { + expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerHLSL::emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, const char *op, SPIRType::BaseType input_type) +{ + auto &out_type = get(result_type); + auto expected_type = out_type; + expected_type.basetype = input_type; + string cast_op0 = + expression_type(op0).basetype != input_type ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0); + string cast_op1 = + expression_type(op1).basetype != input_type ? bitcast_glsl(expected_type, op1) : to_unpacked_expression(op1); + string cast_op2 = + expression_type(op2).basetype != input_type ? bitcast_glsl(expected_type, op2) : to_unpacked_expression(op2); + + string expr; + if (out_type.basetype != input_type) + { + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + expr += ')'; + } + else + { + expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1) && should_forward(op2)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); +} + +void CompilerHLSL::emit_emulated_ahyper_op(uint32_t result_type, uint32_t id, uint32_t op0, GLSLstd450 op) +{ + const char *one = backend.float_literal_suffix ? "1.0f" : "1.0"; + std::string expr; + bool forward = should_forward(op0); + + switch (op) + { + case GLSLstd450Asinh: + expr = join("log(", to_enclosed_expression(op0), " + sqrt(", to_enclosed_expression(op0), " * ", + to_enclosed_expression(op0), " + ", one, "))"); + emit_op(result_type, id, expr, forward); + break; + + case GLSLstd450Acosh: + expr = join("log(", to_enclosed_expression(op0), " + sqrt(", to_enclosed_expression(op0), " * ", + to_enclosed_expression(op0), " - ", one, "))"); + break; + + case GLSLstd450Atanh: + expr = join("log((", one, " + ", to_enclosed_expression(op0), + ") / " + "(", + one, " - ", to_enclosed_expression(op0), ")) * 0.5", backend.float_literal_suffix ? "f" : ""); + break; + + default: + SPIRV_CROSS_THROW("Invalid op."); + } + + emit_op(result_type, id, expr, forward); + inherit_expression_dependencies(id, op0); +} + +void CompilerHLSL::emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, bool negate, SPIRType::BaseType expected_type) +{ + auto &type0 = expression_type(op0); + auto &type1 = expression_type(op1); + + SPIRType target_type0 = type0; + SPIRType target_type1 = type1; + target_type0.basetype = expected_type; + target_type1.basetype = expected_type; + target_type0.vecsize = 1; + target_type1.vecsize = 1; + + auto &type = get(result_type); + auto expr = type_to_glsl_constructor(type); + expr += '('; + for (uint32_t i = 0; i < type.vecsize; i++) + { + // Make sure to call to_expression multiple times to ensure + // that these expressions are properly flushed to temporaries if needed. + if (negate) + expr += "!("; + + if (expected_type != SPIRType::Unknown && type0.basetype != expected_type) + expr += bitcast_expression(target_type0, type0.basetype, to_extract_component_expression(op0, i)); + else + expr += to_extract_component_expression(op0, i); + + expr += ' '; + expr += op; + expr += ' '; + + if (expected_type != SPIRType::Unknown && type1.basetype != expected_type) + expr += bitcast_expression(target_type1, type1.basetype, to_extract_component_expression(op1, i)); + else + expr += to_extract_component_expression(op1, i); + + if (negate) + expr += ")"; + + if (i + 1 < type.vecsize) + expr += ", "; + } + expr += ')'; + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +string CompilerHLSL::to_extract_component_expression(uint32_t id, uint32_t index) +{ + auto expr = to_enclosed_expression(id); + if (has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked)) + return join(expr, "[", index, "]"); + else + return join(expr, ".", index_to_swizzle(index)); +} + +void CompilerHLSL::emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op) +{ + auto &type = get(result_type); + auto expr = type_to_glsl_constructor(type); + expr += '('; + for (uint32_t i = 0; i < type.vecsize; i++) + { + // Make sure to call to_expression multiple times to ensure + // that these expressions are properly flushed to temporaries if needed. + expr += op; + expr += to_extract_component_expression(operand, i); + + if (i + 1 < type.vecsize) + expr += ", "; + } + expr += ')'; + emit_op(result_type, result_id, expr, should_forward(operand)); + + inherit_expression_dependencies(result_id, operand); +} + +void CompilerHLSL::emit_while_loop_initializers(const SPIRBlock &block) +{ + // While loops do not take initializers, so declare all of them outside. + for (auto &loop_var : block.loop_variables) + { + auto &var = get(loop_var); + statement(variable_decl(var), ";"); + } +} + +#ifndef SPIRV_CROSS_WEBMIN +CompilerHLSL::ShaderSubgroupSupportHelper::Result::Result() +{ + for (auto &weight : weights) + weight = 0; + + // Make sure KHR_shader_subgroup extensions are always prefered. + const uint32_t big_num = FeatureCount; + weights[KHR_shader_subgroup_ballot] = big_num; + weights[KHR_shader_subgroup_basic] = big_num; + weights[KHR_shader_subgroup_vote] = big_num; + weights[KHR_shader_subgroup_arithmetic] = big_num; +} + +string CompilerHLSL::image_type_hlsl_legacy(const SPIRType &type, uint32_t /*id*/) +{ + auto &imagetype = get(type.image.type); + string res; + + switch (imagetype.basetype) + { + case SPIRType::Int: + res = "i"; + break; + case SPIRType::UInt: + res = "u"; + break; + default: + break; + } + + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData) + return res + "subpassInput" + (type.image.ms ? "MS" : ""); + + // If we're emulating subpassInput with samplers, force sampler2D + // so we don't have to specify format. + if (type.basetype == SPIRType::Image && type.image.dim != DimSubpassData) + { + // Sampler buffers are always declared as samplerBuffer even though they might be separate images in the SPIR-V. + if (type.image.dim == DimBuffer && type.image.sampled == 1) + res += "sampler"; + else + res += type.image.sampled == 2 ? "image" : "texture"; + } + else + res += "sampler"; + + switch (type.image.dim) + { + case Dim1D: + res += "1D"; + break; + case Dim2D: + res += "2D"; + break; + case Dim3D: + res += "3D"; + break; + case DimCube: + res += "CUBE"; + break; + + case DimBuffer: + res += "Buffer"; + break; + + case DimSubpassData: + res += "2D"; + break; + default: + SPIRV_CROSS_THROW("Only 1D, 2D, 3D, Buffer, InputTarget and Cube textures supported."); + } + + if (type.image.ms) + res += "MS"; + if (type.image.arrayed) + res += "Array"; + + return res; +} + +string CompilerHLSL::image_type_hlsl(const SPIRType &type, uint32_t id) +{ + if (hlsl_options.shader_model <= 30) + return image_type_hlsl_legacy(type, id); + else + return image_type_hlsl_modern(type, id); +} + +std::string CompilerHLSL::to_initializer_expression(const SPIRVariable &var) +{ + // We cannot emit static const initializer for block constants for practical reasons, + // so just inline the initializer. + // FIXME: There is a theoretical problem here if someone tries to composite extract + // into this initializer since we don't declare it properly, but that is somewhat non-sensical. + auto &type = get(var.basetype); + bool is_block = has_decoration(type.self, DecorationBlock); + auto *c = maybe_get(var.initializer); + if (is_block && c) + return constant_expression(*c); + else + return CompilerGLSL_to_initializer_expression(var); +} + +void CompilerHLSL::emit_interface_block_member_in_struct(const SPIRVariable &var, uint32_t member_index, + uint32_t location, + std::unordered_set &active_locations) +{ + auto &execution = get_entry_point(); + auto type = get(var.basetype); + auto semantic = to_semantic(location, execution.model, var.storage); + auto mbr_name = join(to_name(type.self), "_", to_member_name(type, member_index)); + auto &mbr_type = get(type.member_types[member_index]); + + statement(to_interpolation_qualifiers(get_member_decoration_bitset(type.self, member_index)), + type_to_glsl(mbr_type), + " ", mbr_name, type_to_array_glsl(mbr_type), + " : ", semantic, ";"); + + // Structs and arrays should consume more locations. + uint32_t consumed_locations = type_to_consumed_locations(mbr_type); + for (uint32_t i = 0; i < consumed_locations; i++) + active_locations.insert(location + i); +} + +void CompilerHLSL::set_hlsl_aux_buffer_binding(HLSLAuxBinding binding, uint32_t register_index, uint32_t register_space) +{ + if (binding == HLSL_AUX_BINDING_BASE_VERTEX_INSTANCE) + { + base_vertex_info.explicit_binding = true; + base_vertex_info.register_space = register_space; + base_vertex_info.register_index = register_index; + } +} + +void CompilerHLSL::unset_hlsl_aux_buffer_binding(HLSLAuxBinding binding) +{ + if (binding == HLSL_AUX_BINDING_BASE_VERTEX_INSTANCE) + base_vertex_info.explicit_binding = false; +} + +bool CompilerHLSL::is_hlsl_aux_buffer_binding_used(HLSLAuxBinding binding) const { if (binding == HLSL_AUX_BINDING_BASE_VERTEX_INSTANCE) return base_vertex_info.used; @@ -17461,77 +17787,6 @@ case OpGroupNonUniform##op: \ register_control_dependent_expression(id); } -void CompilerHLSL::require_texture_query_variant(uint32_t var_id) -{ - if (const auto *var = maybe_get_backing_variable(var_id)) - var_id = var->self; - - auto &type = expression_type(var_id); - bool uav = type.image.sampled == 2; - if (hlsl_options.nonwritable_uav_texture_as_srv && has_decoration(var_id, DecorationNonWritable)) - uav = false; - - uint32_t bit = 0; - switch (type.image.dim) - { - case Dim1D: - bit = type.image.arrayed ? Query1DArray : Query1D; - break; - - case Dim2D: - if (type.image.ms) - bit = type.image.arrayed ? Query2DMSArray : Query2DMS; - else - bit = type.image.arrayed ? Query2DArray : Query2D; - break; - - case Dim3D: - bit = Query3D; - break; - - case DimCube: - bit = type.image.arrayed ? QueryCubeArray : QueryCube; - break; - - case DimBuffer: - bit = QueryBuffer; - break; - - default: - SPIRV_CROSS_THROW("Unsupported query type."); - } - - switch (get(type.image.type).basetype) - { - case SPIRType::Float: - bit += QueryTypeFloat; - break; - - case SPIRType::Int: - bit += QueryTypeInt; - break; - - case SPIRType::UInt: - bit += QueryTypeUInt; - break; - - default: - SPIRV_CROSS_THROW("Unsupported query type."); - } - - auto norm_state = image_format_to_normalized_state(type.image.format); - auto &variant = uav ? required_texture_size_variants - .uav[uint32_t(norm_state)][image_format_to_components(type.image.format) - 1] : - required_texture_size_variants.srv; - - uint64_t mask = 1ull << bit; - if ((variant & mask) == 0) - { - force_recompile(); - variant |= mask; - } -} - VariableID CompilerHLSL::remap_num_workgroups_builtin() { update_active_builtins(); @@ -18067,48 +18322,6 @@ string CompilerHLSL::to_combined_image_sampler(VariableID image_id, VariableID s } } -string CompilerHLSL::to_extract_component_expression(uint32_t id, uint32_t index) -{ - auto expr = to_enclosed_expression(id); - if (has_extended_decoration(id, SPIRVCrossDecorationPhysicalTypePacked)) - return join(expr, "[", index, "]"); - else - return join(expr, ".", index_to_swizzle(index)); -} - -void CompilerHLSL::emit_emulated_ahyper_op(uint32_t result_type, uint32_t id, uint32_t op0, GLSLstd450 op) -{ - const char *one = backend.float_literal_suffix ? "1.0f" : "1.0"; - std::string expr; - bool forward = should_forward(op0); - - switch (op) - { - case GLSLstd450Asinh: - expr = join("log(", to_enclosed_expression(op0), " + sqrt(", - to_enclosed_expression(op0), " * ", to_enclosed_expression(op0), " + ", one, "))"); - emit_op(result_type, id, expr, forward); - break; - - case GLSLstd450Acosh: - expr = join("log(", to_enclosed_expression(op0), " + sqrt(", - to_enclosed_expression(op0), " * ", to_enclosed_expression(op0), " - ", one, "))"); - break; - - case GLSLstd450Atanh: - expr = join("log((", one, " + ", to_enclosed_expression(op0), ") / " - "(", one, " - ", to_enclosed_expression(op0), ")) * 0.5", - backend.float_literal_suffix ? "f" : ""); - break; - - default: - SPIRV_CROSS_THROW("Invalid op."); - } - - emit_op(result_type, id, expr, forward); - inherit_expression_dependencies(id, op0); -} - void CompilerHLSL::convert_non_uniform_expression(string &expr, uint32_t ptr_id) { if (*backend.nonuniform_qualifier == '\0') @@ -18302,153 +18515,57 @@ std::pair CompilerHLSL::flattened_access_chain_offset( "4-component vector. " "Likely culprit here is a row-major matrix being accessed dynamically. " "This cannot be flattened. Try using std140 layout instead."); - } - - expr += to_enclosed_expression(index, false); - expr += " * "; - expr += convert_to_string(indexing_stride / word_stride); - expr += " + "; - } - - type = &get(type->parent_type); - } - // Vector -> Scalar - else if (type->vecsize > 1) - { - auto *constant = maybe_get(index); - if (constant) - { - index = evaluate_constant_u32(index); - offset += index * (row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8)); - } - else - { - uint32_t indexing_stride = row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8); - - // Dynamic array access. - if (indexing_stride % word_stride) - { - SPIRV_CROSS_THROW("Stride for dynamic vector indexing must be divisible by the " - "size of a 4-component vector. " - "This cannot be flattened in legacy targets."); - } - - expr += to_enclosed_expression(index, false); - expr += " * "; - expr += convert_to_string(indexing_stride / word_stride); - expr += " + "; - } - - type = &get(type->parent_type); - } - else - SPIRV_CROSS_THROW("Cannot subdivide a scalar value!"); - } - - if (need_transpose) - *need_transpose = row_major_matrix_needs_conversion; - if (out_matrix_stride) - *out_matrix_stride = matrix_stride; - if (out_array_stride) - *out_array_stride = array_stride; - - return std::make_pair(expr, offset); -} - -std::string CompilerHLSL::bitcast_expression(SPIRType::BaseType target_type, uint32_t arg) -{ - auto expr = to_expression(arg); - auto &src_type = expression_type(arg); - if (src_type.basetype != target_type) - { - auto target = src_type; - target.basetype = target_type; - expr = join(bitcast_glsl_op(target, src_type), "(", expr, ")"); - } - - return expr; -} - -std::string CompilerHLSL::bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, - const std::string &expr) -{ - if (target_type.basetype == expr_type) - return expr; - - auto src_type = target_type; - src_type.basetype = expr_type; - return join(bitcast_glsl_op(target_type, src_type), "(", expr, ")"); -} - -void CompilerHLSL::emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op) -{ - auto &type = get(result_type); - auto expr = type_to_glsl_constructor(type); - expr += '('; - for (uint32_t i = 0; i < type.vecsize; i++) - { - // Make sure to call to_expression multiple times to ensure - // that these expressions are properly flushed to temporaries if needed. - expr += op; - expr += to_extract_component_expression(operand, i); - - if (i + 1 < type.vecsize) - expr += ", "; - } - expr += ')'; - emit_op(result_type, result_id, expr, should_forward(operand)); - - inherit_expression_dependencies(result_id, operand); -} - -void CompilerHLSL::emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, - const char *op, bool negate, SPIRType::BaseType expected_type) -{ - auto &type0 = expression_type(op0); - auto &type1 = expression_type(op1); - - SPIRType target_type0 = type0; - SPIRType target_type1 = type1; - target_type0.basetype = expected_type; - target_type1.basetype = expected_type; - target_type0.vecsize = 1; - target_type1.vecsize = 1; - - auto &type = get(result_type); - auto expr = type_to_glsl_constructor(type); - expr += '('; - for (uint32_t i = 0; i < type.vecsize; i++) - { - // Make sure to call to_expression multiple times to ensure - // that these expressions are properly flushed to temporaries if needed. - if (negate) - expr += "!("; - - if (expected_type != SPIRType::Unknown && type0.basetype != expected_type) - expr += bitcast_expression(target_type0, type0.basetype, to_extract_component_expression(op0, i)); - else - expr += to_extract_component_expression(op0, i); + } - expr += ' '; - expr += op; - expr += ' '; + expr += to_enclosed_expression(index, false); + expr += " * "; + expr += convert_to_string(indexing_stride / word_stride); + expr += " + "; + } - if (expected_type != SPIRType::Unknown && type1.basetype != expected_type) - expr += bitcast_expression(target_type1, type1.basetype, to_extract_component_expression(op1, i)); - else - expr += to_extract_component_expression(op1, i); + type = &get(type->parent_type); + } + // Vector -> Scalar + else if (type->vecsize > 1) + { + auto *constant = maybe_get(index); + if (constant) + { + index = evaluate_constant_u32(index); + offset += index * (row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8)); + } + else + { + uint32_t indexing_stride = row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8); - if (negate) - expr += ")"; + // Dynamic array access. + if (indexing_stride % word_stride) + { + SPIRV_CROSS_THROW("Stride for dynamic vector indexing must be divisible by the " + "size of a 4-component vector. " + "This cannot be flattened in legacy targets."); + } - if (i + 1 < type.vecsize) - expr += ", "; + expr += to_enclosed_expression(index, false); + expr += " * "; + expr += convert_to_string(indexing_stride / word_stride); + expr += " + "; + } + + type = &get(type->parent_type); + } + else + SPIRV_CROSS_THROW("Cannot subdivide a scalar value!"); } - expr += ')'; - emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); - inherit_expression_dependencies(result_id, op0); - inherit_expression_dependencies(result_id, op1); + if (need_transpose) + *need_transpose = row_major_matrix_needs_conversion; + if (out_matrix_stride) + *out_matrix_stride = matrix_stride; + if (out_array_stride) + *out_array_stride = array_stride; + + return std::make_pair(expr, offset); } uint32_t CompilerHLSL::mask_relevant_memory_semantics(uint32_t semantics) @@ -18487,17 +18604,6 @@ void CompilerHLSL::emit_line_directive(uint32_t file_id, uint32_t line_literal) } } -void CompilerHLSL::emit_while_loop_initializers(const SPIRBlock &block) -{ - // While loops do not take initializers, so declare all of them outside. - for (auto &loop_var : block.loop_variables) - { - auto &var = get(loop_var); - statement(variable_decl(var), ";"); - } -} - - string CompilerHLSL::address_of_expression(const std::string &expr) { if (expr.size() > 3 && expr[0] == '(' && expr[1] == '*' && expr.back() == ')') @@ -19358,65 +19464,6 @@ void CompilerHLSL::request_workaround_wrapper_overload(TypeID id) } } -void CompilerHLSL::emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, - const char *op, SPIRType::BaseType input_type, bool skip_cast_if_equal_type) -{ - string cast_op0, cast_op1; - auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, op0, op1, skip_cast_if_equal_type); - auto &out_type = get(result_type); - - // Special case boolean outputs since relational opcodes output booleans instead of int/uint. - string expr; - if (out_type.basetype != input_type && out_type.basetype != SPIRType::Boolean) - { - expected_type.basetype = input_type; - expr = bitcast_glsl_op(out_type, expected_type); - expr += '('; - expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); - expr += ')'; - } - else - { - expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); - } - - emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); - inherit_expression_dependencies(result_id, op0); - inherit_expression_dependencies(result_id, op1); -} - -void CompilerHLSL::emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, - uint32_t op2, const char *op, SPIRType::BaseType input_type) -{ - auto &out_type = get(result_type); - auto expected_type = out_type; - expected_type.basetype = input_type; - string cast_op0 = - expression_type(op0).basetype != input_type ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0); - string cast_op1 = - expression_type(op1).basetype != input_type ? bitcast_glsl(expected_type, op1) : to_unpacked_expression(op1); - string cast_op2 = - expression_type(op2).basetype != input_type ? bitcast_glsl(expected_type, op2) : to_unpacked_expression(op2); - - string expr; - if (out_type.basetype != input_type) - { - expr = bitcast_glsl_op(out_type, expected_type); - expr += '('; - expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); - expr += ')'; - } - else - { - expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); - } - - emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1) && should_forward(op2)); - inherit_expression_dependencies(result_id, op0); - inherit_expression_dependencies(result_id, op1); - inherit_expression_dependencies(result_id, op2); -} - void CompilerHLSL::emit_nminmax_op(uint32_t result_type, uint32_t id, uint32_t op0, uint32_t op1, GLSLstd450 op) { // Need to emulate this call. @@ -20188,82 +20235,6 @@ void CompilerHLSL::emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t id, } #else -CompilerHLSL::BitcastType CompilerHLSL::get_bitcast_type(uint32_t result_type, uint32_t op0) -{ - auto &rslt_type = get(result_type); - auto &expr_type = expression_type(op0); - - if (rslt_type.basetype == SPIRType::BaseType::UInt64 && expr_type.basetype == SPIRType::BaseType::UInt && - expr_type.vecsize == 2) - return BitcastType::TypePackUint2x32; - else if (rslt_type.basetype == SPIRType::BaseType::UInt && rslt_type.vecsize == 2 && - expr_type.basetype == SPIRType::BaseType::UInt64) - return BitcastType::TypeUnpackUint64; - - return BitcastType::TypeNormal; -} - - -string CompilerHLSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) -{ - if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Int) - return type_to_glsl(out_type); - else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Int64) - return type_to_glsl(out_type); - else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Float) - return "asuint"; - else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::UInt) - return type_to_glsl(out_type); - else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::UInt64) - return type_to_glsl(out_type); - else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Float) - return "asint"; - else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::UInt) - return "asfloat"; - else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::Int) - return "asfloat"; - else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Double) - SPIRV_CROSS_THROW("Double to Int64 is not supported in HLSL."); - else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Double) - SPIRV_CROSS_THROW("Double to UInt64 is not supported in HLSL."); - else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::Int64) - return "asdouble"; - else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::UInt64) - return "asdouble"; - else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) - { - if (!requires_explicit_fp16_packing) - { - requires_explicit_fp16_packing = true; - force_recompile(); - } - return "spvUnpackFloat2x16"; - } - else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Half && in_type.vecsize == 2) - { - if (!requires_explicit_fp16_packing) - { - requires_explicit_fp16_packing = true; - force_recompile(); - } - return "spvPackFloat2x16"; - } - else if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::Half) - { - if (hlsl_options.shader_model < 40) - SPIRV_CROSS_THROW("Half to UShort requires Shader Model 4."); - return "(" + type_to_glsl(out_type) + ")f32tof16"; - } - else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UShort) - { - if (hlsl_options.shader_model < 40) - SPIRV_CROSS_THROW("UShort to Half requires Shader Model 4."); - return "(" + type_to_glsl(out_type) + ")f16tof32"; - } - else - return ""; -} - CompilerHLSL::ShaderSubgroupSupportHelper::Result::Result() { SPIRV_CROSS_INVALID_CALL(); @@ -20409,12 +20380,6 @@ void CompilerHLSL::emit_subgroup_op(const Instruction &) SPIRV_CROSS_THROW("Invalid call."); } -void CompilerHLSL::require_texture_query_variant(uint32_t) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - VariableID CompilerHLSL::remap_num_workgroups_builtin() { SPIRV_CROSS_INVALID_CALL(); @@ -20499,18 +20464,6 @@ string CompilerHLSL::to_combined_image_sampler(VariableID , VariableID) SPIRV_CROSS_THROW("Invalid call."); } -string CompilerHLSL::to_extract_component_expression(uint32_t, uint32_t) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - -void CompilerHLSL::emit_emulated_ahyper_op(uint32_t, uint32_t, uint32_t, GLSLstd450) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - void CompilerHLSL::convert_non_uniform_expression(string &, uint32_t) { SPIRV_CROSS_INVALID_CALL(); @@ -20531,32 +20484,6 @@ std::pair CompilerHLSL::flattened_access_chain_offset( SPIRV_CROSS_THROW("Invalid call."); } -std::string CompilerHLSL::bitcast_expression(SPIRType::BaseType, uint32_t) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - -std::string CompilerHLSL::bitcast_expression(const SPIRType &, SPIRType::BaseType, - const std::string &) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - -void CompilerHLSL::emit_unrolled_unary_op(uint32_t, uint32_t, uint32_t, const char *) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - -void CompilerHLSL::emit_unrolled_binary_op(uint32_t, uint32_t, uint32_t, uint32_t, - const char *, bool, SPIRType::BaseType) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - uint32_t CompilerHLSL::mask_relevant_memory_semantics(uint32_t) { SPIRV_CROSS_INVALID_CALL(); @@ -20575,12 +20502,6 @@ void CompilerHLSL::emit_line_directive(uint32_t, uint32_t) SPIRV_CROSS_THROW("Invalid call."); } -void CompilerHLSL::emit_while_loop_initializers(const SPIRBlock &) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - string CompilerHLSL::address_of_expression(const std::string &) { SPIRV_CROSS_INVALID_CALL(); @@ -20788,20 +20709,6 @@ void CompilerHLSL::request_workaround_wrapper_overload(TypeID) SPIRV_CROSS_THROW("Invalid call."); } -void CompilerHLSL::emit_binary_func_op_cast(uint32_t, uint32_t, uint32_t, uint32_t, - const char *, SPIRType::BaseType, bool) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - -void CompilerHLSL::emit_trinary_func_op_cast(uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t, const char *, SPIRType::BaseType) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} - void CompilerHLSL::emit_nminmax_op(uint32_t, uint32_t, uint32_t, uint32_t, GLSLstd450) { SPIRV_CROSS_INVALID_CALL(); From 8b737344f3523d46bf4558a00431768e59c1e1b8 Mon Sep 17 00:00:00 2001 From: Cedric Guillemet <1312968+CedricGuillemet@users.noreply.github.com> Date: Fri, 20 Mar 2026 09:24:03 +0100 Subject: [PATCH 2/3] revert InvalidCall change --- spirv_hlsl.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/spirv_hlsl.cpp b/spirv_hlsl.cpp index ef7b5d6af..cf460d453 100644 --- a/spirv_hlsl.cpp +++ b/spirv_hlsl.cpp @@ -31,10 +31,7 @@ using namespace spv; using namespace SPIRV_CROSS_NAMESPACE; using namespace std; -void InvalidCall() { - throw std::runtime_error("Invalid call"); -} -#define SPIRV_CROSS_INVALID_CALL() InvalidCall(); +#define SPIRV_CROSS_INVALID_CALL() assert(false); enum ExtraSubExpressionType { From 80a01c2bfff6c846b2757a0e1bbf29afbf25edbb Mon Sep 17 00:00:00 2001 From: Cedric Guillemet <1312968+CedricGuillemet@users.noreply.github.com> Date: Fri, 20 Mar 2026 14:38:34 +0100 Subject: [PATCH 3/3] msl missing functions --- spirv_msl.cpp | 653 ++++++++++++++++++++++++-------------------------- 1 file changed, 307 insertions(+), 346 deletions(-) diff --git a/spirv_msl.cpp b/spirv_msl.cpp index dd6029187..c887633ae 100644 --- a/spirv_msl.cpp +++ b/spirv_msl.cpp @@ -24275,55 +24275,274 @@ bool CompilerMSL::optimize_read_modify_write(const SPIRType &type, const string return true; } -#ifndef SPIRV_CROSS_WEBMIN +bool CompilerMSL::emit_complex_bitcast(uint32_t, uint32_t, uint32_t) +{ + // This is handled from the outside where we deal with PtrToU/UToPtr and friends. + return false; +} -void CompilerMSL::store_flattened_struct(const string &basename, uint32_t rhs_id, const SPIRType &type, - const SmallVector &indices) +string CompilerMSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) { - SmallVector sub_indices = indices; - sub_indices.push_back(0); + if (out_type.basetype == in_type.basetype) + return ""; - auto *member_type = &type; - for (auto &index : indices) - member_type = &get(member_type->member_types[index]); + assert(out_type.basetype != SPIRType::Boolean); + assert(in_type.basetype != SPIRType::Boolean); - for (uint32_t i = 0; i < uint32_t(member_type->member_types.size()); i++) + bool integral_cast = type_is_integral(out_type) && type_is_integral(in_type) && (out_type.vecsize == in_type.vecsize); + bool same_size_cast = (out_type.width * out_type.vecsize) == (in_type.width * in_type.vecsize); + + // Bitcasting can only be used between types of the same overall size. + // And always formally cast between integers, because it's trivial, and also + // because Metal can internally cast the results of some integer ops to a larger + // size (eg. short shift right becomes int), which means chaining integer ops + // together may introduce size variations that SPIR-V doesn't know about. + if (same_size_cast && !integral_cast) + return "as_type<" + type_to_glsl(out_type) + ">"; + else + return type_to_glsl(out_type); +} + +const char *CompilerMSL::descriptor_address_space(uint32_t id, StorageClass storage, const char *plain_address_space) const +{ + if (msl_options.argument_buffers) { - sub_indices.back() = i; - auto lhs = join(basename, "_", to_member_name(*member_type, i)); - ParsedIR::sanitize_underscores(lhs); + bool storage_class_is_descriptor = storage == StorageClassUniform || + storage == StorageClassStorageBuffer || + storage == StorageClassUniformConstant; - if (get(member_type->member_types[i]).basetype == SPIRType::Struct) + uint32_t desc_set = get_decoration(id, DecorationDescriptorSet); + if (storage_class_is_descriptor && descriptor_set_is_argument_buffer(desc_set)) { - store_flattened_struct(lhs, rhs_id, type, sub_indices); + // An awkward case where we need to emit *more* address space declarations (yay!). + // An example is where we pass down an array of buffer pointers to leaf functions. + // It's a constant array containing pointers to constants. + // The pointer array is always constant however. E.g. + // device SSBO * constant (&array)[N]. + // const device SSBO * constant (&array)[N]. + // constant SSBO * constant (&array)[N]. + // However, this only matters for argument buffers, since for MSL 1.0 style codegen, + // we emit the buffer array on stack instead, and that seems to work just fine apparently. + + // If the argument was marked as being in device address space, any pointer to member would + // be const device, not constant. + if (argument_buffer_device_storage_mask & (1u << desc_set)) + return "const device"; + else + return "constant"; } + } + + return plain_address_space; +} + +bool CompilerMSL::emit_array_copy(const char *expr, uint32_t lhs_id, uint32_t rhs_id, + StorageClass lhs_storage, StorageClass rhs_storage) +{ + // Allow Metal to use the array template to make arrays a value type. + // This, however, cannot be used for threadgroup address specifiers, so consider the custom array copy as fallback. + bool lhs_is_thread_storage = storage_class_array_is_thread(lhs_storage); + bool rhs_is_thread_storage = storage_class_array_is_thread(rhs_storage); + + bool lhs_is_array_template = lhs_is_thread_storage; + bool rhs_is_array_template = rhs_is_thread_storage; + + // Special considerations for stage IO variables. + // If the variable is actually backed by non-user visible device storage, we use array templates for those. + // + // Another special consideration is given to thread local variables which happen to have Offset decorations + // applied to them. Block-like types do not use array templates, so we need to force POD path if we detect + // these scenarios. This check isn't perfect since it would be technically possible to mix and match these things, + // and for a fully correct solution we might have to track array template state through access chains as well, + // but for all reasonable use cases, this should suffice. + // This special case should also only apply to Function/Private storage classes. + // We should not check backing variable for temporaries. + auto *lhs_var = maybe_get_backing_variable(lhs_id); + if (lhs_var && lhs_storage == StorageClassStorageBuffer && storage_class_array_is_thread(lhs_var->storage)) + lhs_is_array_template = true; + else if (lhs_var && (lhs_storage == StorageClassFunction || lhs_storage == StorageClassPrivate) && + type_is_block_like(get(lhs_var->basetype))) + lhs_is_array_template = false; + + auto *rhs_var = maybe_get_backing_variable(rhs_id); + if (rhs_var && rhs_storage == StorageClassStorageBuffer && storage_class_array_is_thread(rhs_var->storage)) + rhs_is_array_template = true; + else if (rhs_var && (rhs_storage == StorageClassFunction || rhs_storage == StorageClassPrivate) && + type_is_block_like(get(rhs_var->basetype))) + rhs_is_array_template = false; + + // If threadgroup storage qualifiers are *not* used: + // Avoid spvCopy* wrapper functions; Otherwise, spvUnsafeArray<> template cannot be used with that storage qualifier. + if (lhs_is_array_template && rhs_is_array_template && !using_builtin_array()) + { + // Fall back to normal copy path. + return false; + } + else + { + // Ensure the LHS variable has been declared + if (lhs_var) + flush_variable_declaration(lhs_var->self); + + string lhs; + if (expr) + lhs = expr; else + lhs = to_expression(lhs_id); + + // Assignment from an array initializer is fine. + auto &type = expression_type(rhs_id); + auto *var = maybe_get_backing_variable(rhs_id); + + // Unfortunately, we cannot template on address space in MSL, + // so explicit address space redirection it is ... + bool is_constant = false; + if (ir.ids[rhs_id].get_type() == TypeConstant) { - auto rhs = to_expression(rhs_id) + to_multi_member_reference(type, sub_indices); - statement(lhs, " = ", rhs, ";"); + is_constant = true; + } + else if (var && var->remapped_variable && var->statically_assigned && + ir.ids[var->static_expression].get_type() == TypeConstant) + { + is_constant = true; + } + else if (rhs_storage == StorageClassUniform || rhs_storage == StorageClassUniformConstant) + { + is_constant = true; + } + + // For the case where we have OpLoad triggering an array copy, + // we cannot easily detect this case ahead of time since it's + // context dependent. We might have to force a recompile here + // if this is the only use of array copies in our shader. + if (type.array.size() > 1) + { + if (type.array.size() > kArrayCopyMultidimMax) + SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays."); + auto func = static_cast(SPVFuncImplArrayCopyMultidimBase + type.array.size()); + add_spv_func_and_recompile(func); } + else + add_spv_func_and_recompile(SPVFuncImplArrayCopy); + + const char *tag = nullptr; + if (lhs_is_thread_storage && is_constant) + tag = "FromConstantToStack"; + else if (lhs_storage == StorageClassWorkgroup && is_constant) + tag = "FromConstantToThreadGroup"; + else if (lhs_is_thread_storage && rhs_is_thread_storage) + tag = "FromStackToStack"; + else if (lhs_storage == StorageClassWorkgroup && rhs_is_thread_storage) + tag = "FromStackToThreadGroup"; + else if (lhs_is_thread_storage && rhs_storage == StorageClassWorkgroup) + tag = "FromThreadGroupToStack"; + else if (lhs_storage == StorageClassWorkgroup && rhs_storage == StorageClassWorkgroup) + tag = "FromThreadGroupToThreadGroup"; + else if (lhs_storage == StorageClassStorageBuffer && rhs_storage == StorageClassStorageBuffer) + tag = "FromDeviceToDevice"; + else if (lhs_storage == StorageClassStorageBuffer && is_constant) + tag = "FromConstantToDevice"; + else if (lhs_storage == StorageClassStorageBuffer && rhs_storage == StorageClassWorkgroup) + tag = "FromThreadGroupToDevice"; + else if (lhs_storage == StorageClassStorageBuffer && rhs_is_thread_storage) + tag = "FromStackToDevice"; + else if (lhs_storage == StorageClassWorkgroup && rhs_storage == StorageClassStorageBuffer) + tag = "FromDeviceToThreadGroup"; + else if (lhs_is_thread_storage && rhs_storage == StorageClassStorageBuffer) + tag = "FromDeviceToStack"; + else + SPIRV_CROSS_THROW("Unknown storage class used for copying arrays."); + + // Pass internal array of spvUnsafeArray<> into wrapper functions + if (lhs_is_array_template && rhs_is_array_template && !msl_options.force_native_arrays) + statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ".elements, ", to_expression(rhs_id), ".elements);"); + if (lhs_is_array_template && !msl_options.force_native_arrays) + statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ".elements, ", to_expression(rhs_id), ");"); + else if (rhs_is_array_template && !msl_options.force_native_arrays) + statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ".elements);"); + else + statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ");"); } + + return true; } -string CompilerMSL::to_multi_member_reference(const SPIRType &type, const SmallVector &indices) +void CompilerMSL::emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, SPIRType::BaseType input_type, bool skip_cast_if_equal_type) { - string ret; - auto *member_type = &type; - for (auto &index : indices) + string cast_op0, cast_op1; + auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, op0, op1, skip_cast_if_equal_type); + auto &out_type = get(result_type); + + // Special case boolean outputs since relational opcodes output booleans instead of int/uint. + string expr; + if (out_type.basetype != input_type && out_type.basetype != SPIRType::Boolean) { - ret += join(".", to_member_name(*member_type, index)); - member_type = &get(member_type->member_types[index]); + expected_type.basetype = input_type; + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); + expr += ')'; } - return ret; + else + { + expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); } -void CompilerMSL::store_flattened_struct(uint32_t lhs_id, uint32_t value) +void CompilerMSL::emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, const char *op, SPIRType::BaseType input_type) { - auto &type = expression_type(lhs_id); - auto basename = to_flattened_access_chain_expression(lhs_id); - store_flattened_struct(basename, value, type, {}); + auto &out_type = get(result_type); + auto expected_type = out_type; + expected_type.basetype = input_type; + string cast_op0 = + expression_type(op0).basetype != input_type ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0); + string cast_op1 = + expression_type(op1).basetype != input_type ? bitcast_glsl(expected_type, op1) : to_unpacked_expression(op1); + string cast_op2 = + expression_type(op2).basetype != input_type ? bitcast_glsl(expected_type, op2) : to_unpacked_expression(op2); + + string expr; + if (out_type.basetype != input_type) + { + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + expr += ')'; + } + else + { + expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1) && should_forward(op2)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); +} + +void CompilerMSL::emit_while_loop_initializers(const SPIRBlock &block) +{ + // While loops do not take initializers, so declare all of them outside. + for (auto &loop_var : block.loop_variables) + { + auto &var = get(loop_var); + statement(variable_decl(var), ";"); + } } +void CompilerMSL::end_scope_decl(const string &decl) +{ + if (!indent) + SPIRV_CROSS_THROW("Popping empty indent stack."); + indent--; + statement("} ", decl, ";"); +} string CompilerMSL::to_extract_constant_composite_expression(uint32_t result_type, const SPIRConstant &c, const uint32_t *chain, uint32_t length) @@ -24363,6 +24582,57 @@ string CompilerMSL::to_extract_constant_composite_expression(uint32_t result_typ return constant_expression(tmp); } +#ifndef SPIRV_CROSS_WEBMIN + +void CompilerMSL::store_flattened_struct(const string &basename, uint32_t rhs_id, const SPIRType &type, + const SmallVector &indices) +{ + SmallVector sub_indices = indices; + sub_indices.push_back(0); + + auto *member_type = &type; + for (auto &index : indices) + member_type = &get(member_type->member_types[index]); + + for (uint32_t i = 0; i < uint32_t(member_type->member_types.size()); i++) + { + sub_indices.back() = i; + auto lhs = join(basename, "_", to_member_name(*member_type, i)); + ParsedIR::sanitize_underscores(lhs); + + if (get(member_type->member_types[i]).basetype == SPIRType::Struct) + { + store_flattened_struct(lhs, rhs_id, type, sub_indices); + } + else + { + auto rhs = to_expression(rhs_id) + to_multi_member_reference(type, sub_indices); + statement(lhs, " = ", rhs, ";"); + } + } +} + +string CompilerMSL::to_multi_member_reference(const SPIRType &type, const SmallVector &indices) +{ + string ret; + auto *member_type = &type; + for (auto &index : indices) + { + ret += join(".", to_member_name(*member_type, index)); + member_type = &get(member_type->member_types[index]); + } + return ret; +} + +void CompilerMSL::store_flattened_struct(uint32_t lhs_id, uint32_t value) +{ + auto &type = expression_type(lhs_id); + auto basename = to_flattened_access_chain_expression(lhs_id); + store_flattened_struct(basename, value, type, {}); +} + + + void CompilerMSL::emit_copy_logical_type(uint32_t lhs_id, uint32_t lhs_type_id, uint32_t rhs_id, uint32_t rhs_type_id, SmallVector chain) { @@ -25734,79 +26004,22 @@ void CompilerMSL::emit_nminmax_op(uint32_t result_type, uint32_t id, uint32_t op else if (expression_type(op0).vecsize > 1) { // If the number doesn't equal itself, it must be NaN - emit_binary_func_op(btype_id, left_nan_id, op0, op0, "notEqual"); - emit_binary_func_op(btype_id, right_nan_id, op1, op1, "notEqual"); - } - else - { - emit_binary_op(btype_id, left_nan_id, op0, op0, "!="); - emit_binary_op(btype_id, right_nan_id, op1, op1, "!="); - } - emit_binary_func_op(result_type, tmp_id, op0, op1, op == GLSLstd450NMin ? "min" : "max"); - emit_mix_op(result_type, mixed_first_id, tmp_id, op1, left_nan_id); - emit_mix_op(result_type, id, mixed_first_id, op0, right_nan_id); -} - - -void CompilerMSL::emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, - const char *op, SPIRType::BaseType input_type, bool skip_cast_if_equal_type) -{ - string cast_op0, cast_op1; - auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, op0, op1, skip_cast_if_equal_type); - auto &out_type = get(result_type); - - // Special case boolean outputs since relational opcodes output booleans instead of int/uint. - string expr; - if (out_type.basetype != input_type && out_type.basetype != SPIRType::Boolean) - { - expected_type.basetype = input_type; - expr = bitcast_glsl_op(out_type, expected_type); - expr += '('; - expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); - expr += ')'; - } - else - { - expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); - } - - emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); - inherit_expression_dependencies(result_id, op0); - inherit_expression_dependencies(result_id, op1); -} - -void CompilerMSL::emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, - uint32_t op2, const char *op, SPIRType::BaseType input_type) -{ - auto &out_type = get(result_type); - auto expected_type = out_type; - expected_type.basetype = input_type; - string cast_op0 = - expression_type(op0).basetype != input_type ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0); - string cast_op1 = - expression_type(op1).basetype != input_type ? bitcast_glsl(expected_type, op1) : to_unpacked_expression(op1); - string cast_op2 = - expression_type(op2).basetype != input_type ? bitcast_glsl(expected_type, op2) : to_unpacked_expression(op2); - - string expr; - if (out_type.basetype != input_type) - { - expr = bitcast_glsl_op(out_type, expected_type); - expr += '('; - expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); - expr += ')'; + emit_binary_func_op(btype_id, left_nan_id, op0, op0, "notEqual"); + emit_binary_func_op(btype_id, right_nan_id, op1, op1, "notEqual"); } else { - expr += join(op, "(", cast_op0, ", ", cast_op1, ", ", cast_op2, ")"); + emit_binary_op(btype_id, left_nan_id, op0, op0, "!="); + emit_binary_op(btype_id, right_nan_id, op1, op1, "!="); } - - emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1) && should_forward(op2)); - inherit_expression_dependencies(result_id, op0); - inherit_expression_dependencies(result_id, op1); - inherit_expression_dependencies(result_id, op2); + emit_binary_func_op(result_type, tmp_id, op0, op1, op == GLSLstd450NMin ? "min" : "max"); + emit_mix_op(result_type, mixed_first_id, tmp_id, op1, left_nan_id); + emit_mix_op(result_type, id, mixed_first_id, op0, right_nan_id); } + + + void CompilerMSL::emit_emulated_ahyper_op(uint32_t result_type, uint32_t id, uint32_t op0, GLSLstd450 op) { const char *one = backend.float_literal_suffix ? "1.0f" : "1.0"; @@ -26105,15 +26318,6 @@ void CompilerMSL::forward_relaxed_precision(uint32_t dst_id, const uint32_t *arg set_decoration(dst_id, DecorationRelaxedPrecision); } -void CompilerMSL::emit_while_loop_initializers(const SPIRBlock &block) -{ - // While loops do not take initializers, so declare all of them outside. - for (auto &loop_var : block.loop_variables) - { - auto &var = get(loop_var); - statement(variable_decl(var), ";"); - } -} void CompilerMSL::emit_line_directive(uint32_t file_id, uint32_t line_literal) @@ -27062,12 +27266,6 @@ uint32_t CompilerMSL::get_declared_member_location(const SPIRVariable &var, uint } -void CompilerMSL::end_scope_decl(const string &decl) -{ - if (!indent) - SPIRV_CROSS_THROW("Popping empty indent stack."); - indent--; - statement("} ", decl, ";"); } void CompilerMSL::end_scope(const string &trailer) @@ -28120,33 +28318,7 @@ case OpGroupNonUniform##op: \ register_control_dependent_expression(id); } -string CompilerMSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) -{ - if (out_type.basetype == in_type.basetype) - return ""; - - assert(out_type.basetype != SPIRType::Boolean); - assert(in_type.basetype != SPIRType::Boolean); - - bool integral_cast = type_is_integral(out_type) && type_is_integral(in_type) && (out_type.vecsize == in_type.vecsize); - bool same_size_cast = (out_type.width * out_type.vecsize) == (in_type.width * in_type.vecsize); - - // Bitcasting can only be used between types of the same overall size. - // And always formally cast between integers, because it's trivial, and also - // because Metal can internally cast the results of some integer ops to a larger - // size (eg. short shift right becomes int), which means chaining integer ops - // together may introduce size variations that SPIR-V doesn't know about. - if (same_size_cast && !integral_cast) - return "as_type<" + type_to_glsl(out_type) + ">"; - else - return type_to_glsl(out_type); -} -bool CompilerMSL::emit_complex_bitcast(uint32_t, uint32_t, uint32_t) -{ - // This is handled from the outside where we deal with PtrToU/UToPtr and friends. - return false; -} string CompilerMSL::constant_op_expression(const SPIRConstantOp &cop) { @@ -28168,38 +28340,6 @@ bool CompilerMSL::type_is_pointer_to_pointer(const SPIRType &type) const return type.pointer_depth > parent_type.pointer_depth && type_is_pointer(parent_type); } -const char *CompilerMSL::descriptor_address_space(uint32_t id, StorageClass storage, const char *plain_address_space) const -{ - if (msl_options.argument_buffers) - { - bool storage_class_is_descriptor = storage == StorageClassUniform || - storage == StorageClassStorageBuffer || - storage == StorageClassUniformConstant; - - uint32_t desc_set = get_decoration(id, DecorationDescriptorSet); - if (storage_class_is_descriptor && descriptor_set_is_argument_buffer(desc_set)) - { - // An awkward case where we need to emit *more* address space declarations (yay!). - // An example is where we pass down an array of buffer pointers to leaf functions. - // It's a constant array containing pointers to constants. - // The pointer array is always constant however. E.g. - // device SSBO * constant (&array)[N]. - // const device SSBO * constant (&array)[N]. - // constant SSBO * constant (&array)[N]. - // However, this only matters for argument buffers, since for MSL 1.0 style codegen, - // we emit the buffer array on stack instead, and that seems to work just fine apparently. - - // If the argument was marked as being in device address space, any pointer to member would - // be const device, not constant. - if (argument_buffer_device_storage_mask & (1u << desc_set)) - return "const device"; - else - return "constant"; - } - } - - return plain_address_space; -} string CompilerMSL::entry_point_args_argument_buffer(bool append_comma) { @@ -28717,136 +28857,6 @@ void CompilerMSL::emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uin flush_all_active_variables(); } -bool CompilerMSL::emit_array_copy(const char *expr, uint32_t lhs_id, uint32_t rhs_id, - StorageClass lhs_storage, StorageClass rhs_storage) -{ - // Allow Metal to use the array template to make arrays a value type. - // This, however, cannot be used for threadgroup address specifiers, so consider the custom array copy as fallback. - bool lhs_is_thread_storage = storage_class_array_is_thread(lhs_storage); - bool rhs_is_thread_storage = storage_class_array_is_thread(rhs_storage); - - bool lhs_is_array_template = lhs_is_thread_storage; - bool rhs_is_array_template = rhs_is_thread_storage; - - // Special considerations for stage IO variables. - // If the variable is actually backed by non-user visible device storage, we use array templates for those. - // - // Another special consideration is given to thread local variables which happen to have Offset decorations - // applied to them. Block-like types do not use array templates, so we need to force POD path if we detect - // these scenarios. This check isn't perfect since it would be technically possible to mix and match these things, - // and for a fully correct solution we might have to track array template state through access chains as well, - // but for all reasonable use cases, this should suffice. - // This special case should also only apply to Function/Private storage classes. - // We should not check backing variable for temporaries. - auto *lhs_var = maybe_get_backing_variable(lhs_id); - if (lhs_var && lhs_storage == StorageClassStorageBuffer && storage_class_array_is_thread(lhs_var->storage)) - lhs_is_array_template = true; - else if (lhs_var && (lhs_storage == StorageClassFunction || lhs_storage == StorageClassPrivate) && - type_is_block_like(get(lhs_var->basetype))) - lhs_is_array_template = false; - - auto *rhs_var = maybe_get_backing_variable(rhs_id); - if (rhs_var && rhs_storage == StorageClassStorageBuffer && storage_class_array_is_thread(rhs_var->storage)) - rhs_is_array_template = true; - else if (rhs_var && (rhs_storage == StorageClassFunction || rhs_storage == StorageClassPrivate) && - type_is_block_like(get(rhs_var->basetype))) - rhs_is_array_template = false; - - // If threadgroup storage qualifiers are *not* used: - // Avoid spvCopy* wrapper functions; Otherwise, spvUnsafeArray<> template cannot be used with that storage qualifier. - if (lhs_is_array_template && rhs_is_array_template && !using_builtin_array()) - { - // Fall back to normal copy path. - return false; - } - else - { - // Ensure the LHS variable has been declared - if (lhs_var) - flush_variable_declaration(lhs_var->self); - - string lhs; - if (expr) - lhs = expr; - else - lhs = to_expression(lhs_id); - - // Assignment from an array initializer is fine. - auto &type = expression_type(rhs_id); - auto *var = maybe_get_backing_variable(rhs_id); - - // Unfortunately, we cannot template on address space in MSL, - // so explicit address space redirection it is ... - bool is_constant = false; - if (ir.ids[rhs_id].get_type() == TypeConstant) - { - is_constant = true; - } - else if (var && var->remapped_variable && var->statically_assigned && - ir.ids[var->static_expression].get_type() == TypeConstant) - { - is_constant = true; - } - else if (rhs_storage == StorageClassUniform || rhs_storage == StorageClassUniformConstant) - { - is_constant = true; - } - - // For the case where we have OpLoad triggering an array copy, - // we cannot easily detect this case ahead of time since it's - // context dependent. We might have to force a recompile here - // if this is the only use of array copies in our shader. - if (type.array.size() > 1) - { - if (type.array.size() > kArrayCopyMultidimMax) - SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays."); - auto func = static_cast(SPVFuncImplArrayCopyMultidimBase + type.array.size()); - add_spv_func_and_recompile(func); - } - else - add_spv_func_and_recompile(SPVFuncImplArrayCopy); - - const char *tag = nullptr; - if (lhs_is_thread_storage && is_constant) - tag = "FromConstantToStack"; - else if (lhs_storage == StorageClassWorkgroup && is_constant) - tag = "FromConstantToThreadGroup"; - else if (lhs_is_thread_storage && rhs_is_thread_storage) - tag = "FromStackToStack"; - else if (lhs_storage == StorageClassWorkgroup && rhs_is_thread_storage) - tag = "FromStackToThreadGroup"; - else if (lhs_is_thread_storage && rhs_storage == StorageClassWorkgroup) - tag = "FromThreadGroupToStack"; - else if (lhs_storage == StorageClassWorkgroup && rhs_storage == StorageClassWorkgroup) - tag = "FromThreadGroupToThreadGroup"; - else if (lhs_storage == StorageClassStorageBuffer && rhs_storage == StorageClassStorageBuffer) - tag = "FromDeviceToDevice"; - else if (lhs_storage == StorageClassStorageBuffer && is_constant) - tag = "FromConstantToDevice"; - else if (lhs_storage == StorageClassStorageBuffer && rhs_storage == StorageClassWorkgroup) - tag = "FromThreadGroupToDevice"; - else if (lhs_storage == StorageClassStorageBuffer && rhs_is_thread_storage) - tag = "FromStackToDevice"; - else if (lhs_storage == StorageClassWorkgroup && rhs_storage == StorageClassStorageBuffer) - tag = "FromDeviceToThreadGroup"; - else if (lhs_is_thread_storage && rhs_storage == StorageClassStorageBuffer) - tag = "FromDeviceToStack"; - else - SPIRV_CROSS_THROW("Unknown storage class used for copying arrays."); - - // Pass internal array of spvUnsafeArray<> into wrapper functions - if (lhs_is_array_template && rhs_is_array_template && !msl_options.force_native_arrays) - statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ".elements, ", to_expression(rhs_id), ".elements);"); - if (lhs_is_array_template && !msl_options.force_native_arrays) - statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ".elements, ", to_expression(rhs_id), ");"); - else if (rhs_is_array_template && !msl_options.force_native_arrays) - statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ".elements);"); - else - statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ");"); - } - - return true; -} uint32_t CompilerMSL::get_physical_tess_level_array_size(spv::BuiltIn builtin) const { @@ -30437,12 +30447,6 @@ void CompilerMSL::store_flattened_struct(uint32_t, uint32_t) } -string CompilerMSL::to_extract_constant_composite_expression(uint32_t, const SPIRConstant &, - const uint32_t *, uint32_t ) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} void CompilerMSL::emit_copy_logical_type(uint32_t, uint32_t, uint32_t, uint32_t, SmallVector) @@ -30731,19 +30735,7 @@ void CompilerMSL::emit_nminmax_op(uint32_t, uint32_t, uint32_t, uint32_t, GLSLst } -void CompilerMSL::emit_binary_func_op_cast(uint32_t, uint32_t, uint32_t, uint32_t, - const char *, SPIRType::BaseType, bool) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} -void CompilerMSL::emit_trinary_func_op_cast(uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t, const char *, SPIRType::BaseType) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} void CompilerMSL::emit_emulated_ahyper_op(uint32_t, uint32_t, uint32_t, GLSLstd450) { @@ -30803,11 +30795,6 @@ void CompilerMSL::forward_relaxed_precision(uint32_t, const uint32_t *, uint32_t SPIRV_CROSS_THROW("Invalid call."); } -void CompilerMSL::emit_while_loop_initializers(const SPIRBlock &) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} void CompilerMSL::emit_line_directive(uint32_t, uint32_t) @@ -31052,11 +31039,6 @@ uint32_t CompilerMSL::get_declared_member_location(const SPIRVariable &, uint32_ } -void CompilerMSL::end_scope_decl(const string &) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} void CompilerMSL::end_scope(const string &) { @@ -31219,17 +31201,7 @@ void CompilerMSL::emit_subgroup_op(const Instruction &) SPIRV_CROSS_THROW("Invalid call."); } -string CompilerMSL::bitcast_glsl_op(const SPIRType &, const SPIRType &) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} -bool CompilerMSL::emit_complex_bitcast(uint32_t, uint32_t, uint32_t) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} string CompilerMSL::constant_op_expression(const SPIRConstantOp &) { @@ -31243,11 +31215,6 @@ bool CompilerMSL::type_is_pointer_to_pointer(const SPIRType &) const SPIRV_CROSS_THROW("Invalid call."); } -const char *CompilerMSL::descriptor_address_space(uint32_t, StorageClass, const char *) const -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} string CompilerMSL::entry_point_args_argument_buffer(bool) { @@ -31331,12 +31298,6 @@ void CompilerMSL::emit_barrier(uint32_t, uint32_t, uint32_t) SPIRV_CROSS_THROW("Invalid call."); } -bool CompilerMSL::emit_array_copy(const char *, uint32_t, uint32_t, - StorageClass, StorageClass) -{ - SPIRV_CROSS_INVALID_CALL(); - SPIRV_CROSS_THROW("Invalid call."); -} uint32_t CompilerMSL::get_physical_tess_level_array_size(spv::BuiltIn) const {