2 * Copyright 2018-2021 Arm Limited
3 * SPDX-License-Identifier: Apache-2.0 OR MIT
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 * At your option, you may choose to accept this material under either:
20 * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
21 * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
24 #include "spirv_parser.hpp"
30 namespace SPIRV_CROSS_NAMESPACE
32 Parser::Parser(vector
<uint32_t> spirv
)
34 ir
.spirv
= std::move(spirv
);
37 Parser::Parser(const uint32_t *spirv_data
, size_t word_count
)
39 ir
.spirv
= vector
<uint32_t>(spirv_data
, spirv_data
+ word_count
);
42 static bool decoration_is_string(Decoration decoration
)
46 case DecorationHlslSemanticGOOGLE
:
54 static inline uint32_t swap_endian(uint32_t v
)
56 return ((v
>> 24) & 0x000000ffu
) | ((v
>> 8) & 0x0000ff00u
) | ((v
<< 8) & 0x00ff0000u
) | ((v
<< 24) & 0xff000000u
);
59 static bool is_valid_spirv_version(uint32_t version
)
63 // Allow v99 since it tends to just work.
65 case 0x10000: // SPIR-V 1.0
66 case 0x10100: // SPIR-V 1.1
67 case 0x10200: // SPIR-V 1.2
68 case 0x10300: // SPIR-V 1.3
69 case 0x10400: // SPIR-V 1.4
70 case 0x10500: // SPIR-V 1.5
71 case 0x10600: // SPIR-V 1.6
81 auto &spirv
= ir
.spirv
;
83 auto len
= spirv
.size();
85 SPIRV_CROSS_THROW("SPIRV file too small.");
87 auto s
= spirv
.data();
89 // Endian-swap if we need to.
90 if (s
[0] == swap_endian(MagicNumber
))
91 transform(begin(spirv
), end(spirv
), begin(spirv
), [](uint32_t c
) { return swap_endian(c
); });
93 if (s
[0] != MagicNumber
|| !is_valid_spirv_version(s
[1]))
94 SPIRV_CROSS_THROW("Invalid SPIRV format.");
96 uint32_t bound
= s
[3];
98 const uint32_t MaximumNumberOfIDs
= 0x3fffff;
99 if (bound
> MaximumNumberOfIDs
)
100 SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
102 ir
.set_id_bounds(bound
);
106 SmallVector
<Instruction
> instructions
;
109 Instruction instr
= {};
110 instr
.op
= spirv
[offset
] & 0xffff;
111 instr
.count
= (spirv
[offset
] >> 16) & 0xffff;
113 if (instr
.count
== 0)
114 SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
116 instr
.offset
= offset
+ 1;
117 instr
.length
= instr
.count
- 1;
119 offset
+= instr
.count
;
121 if (offset
> spirv
.size())
122 SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
124 instructions
.push_back(instr
);
127 for (auto &i
: instructions
)
130 for (auto &fixup
: forward_pointer_fixups
)
132 auto &target
= get
<SPIRType
>(fixup
.first
);
133 auto &source
= get
<SPIRType
>(fixup
.second
);
134 target
.member_types
= source
.member_types
;
135 target
.basetype
= source
.basetype
;
136 target
.self
= source
.self
;
138 forward_pointer_fixups
.clear();
140 if (current_function
)
141 SPIRV_CROSS_THROW("Function was not terminated.");
143 SPIRV_CROSS_THROW("Block was not terminated.");
144 if (ir
.default_entry_point
== 0)
145 SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module.");
148 const uint32_t *Parser::stream(const Instruction
&instr
) const
150 // If we're not going to use any arguments, just return nullptr.
151 // We want to avoid case where we return an out of range pointer
152 // that trips debug assertions on some platforms.
156 if (instr
.offset
+ instr
.length
> ir
.spirv
.size())
157 SPIRV_CROSS_THROW("Compiler::stream() out of range.");
158 return &ir
.spirv
[instr
.offset
];
161 static string
extract_string(const vector
<uint32_t> &spirv
, uint32_t offset
)
164 for (uint32_t i
= offset
; i
< spirv
.size(); i
++)
166 uint32_t w
= spirv
[i
];
168 for (uint32_t j
= 0; j
< 4; j
++, w
>>= 8)
177 SPIRV_CROSS_THROW("String was not terminated before EOF");
180 void Parser::parse(const Instruction
&instruction
)
182 auto *ops
= stream(instruction
);
183 auto op
= static_cast<Op
>(instruction
.op
);
184 uint32_t length
= instruction
.length
;
186 // HACK for glslang that might emit OpEmitMeshTasksEXT followed by return / branch.
187 // Instead of failing hard, just ignore it.
188 if (ignore_trailing_block_opcodes
)
190 ignore_trailing_block_opcodes
= false;
191 if (op
== OpReturn
|| op
== OpBranch
|| op
== OpUnreachable
)
197 case OpSourceContinued
:
198 case OpSourceExtension
:
200 case OpModuleProcessed
:
205 set
<SPIRString
>(ops
[0], extract_string(ir
.spirv
, instruction
.offset
+ 1));
210 ir
.addressing_model
= static_cast<AddressingModel
>(ops
[0]);
211 ir
.memory_model
= static_cast<MemoryModel
>(ops
[1]);
216 auto lang
= static_cast<SourceLanguage
>(ops
[0]);
219 case SourceLanguageESSL
:
221 ir
.source
.version
= ops
[1];
222 ir
.source
.known
= true;
223 ir
.source
.hlsl
= false;
226 case SourceLanguageGLSL
:
227 ir
.source
.es
= false;
228 ir
.source
.version
= ops
[1];
229 ir
.source
.known
= true;
230 ir
.source
.hlsl
= false;
233 case SourceLanguageHLSL
:
234 // For purposes of cross-compiling, this is GLSL 450.
235 ir
.source
.es
= false;
236 ir
.source
.version
= 450;
237 ir
.source
.known
= true;
238 ir
.source
.hlsl
= true;
242 ir
.source
.known
= false;
250 uint32_t result_type
= ops
[0];
251 uint32_t id
= ops
[1];
252 set
<SPIRUndef
>(id
, result_type
);
254 current_block
->ops
.push_back(instruction
);
260 uint32_t cap
= ops
[0];
261 if (cap
== CapabilityKernel
)
262 SPIRV_CROSS_THROW("Kernel capability not supported.");
264 ir
.declared_capabilities
.push_back(static_cast<Capability
>(ops
[0]));
270 auto ext
= extract_string(ir
.spirv
, instruction
.offset
);
271 ir
.declared_extensions
.push_back(std::move(ext
));
275 case OpExtInstImport
:
277 uint32_t id
= ops
[0];
279 SPIRExtension::Extension spirv_ext
= SPIRExtension::Unsupported
;
281 auto ext
= extract_string(ir
.spirv
, instruction
.offset
+ 1);
282 if (ext
== "GLSL.std.450")
283 spirv_ext
= SPIRExtension::GLSL
;
284 else if (ext
== "DebugInfo")
285 spirv_ext
= SPIRExtension::SPV_debug_info
;
286 else if (ext
== "SPV_AMD_shader_ballot")
287 spirv_ext
= SPIRExtension::SPV_AMD_shader_ballot
;
288 else if (ext
== "SPV_AMD_shader_explicit_vertex_parameter")
289 spirv_ext
= SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter
;
290 else if (ext
== "SPV_AMD_shader_trinary_minmax")
291 spirv_ext
= SPIRExtension::SPV_AMD_shader_trinary_minmax
;
292 else if (ext
== "SPV_AMD_gcn_shader")
293 spirv_ext
= SPIRExtension::SPV_AMD_gcn_shader
;
294 else if (ext
== "NonSemantic.DebugPrintf")
295 spirv_ext
= SPIRExtension::NonSemanticDebugPrintf
;
296 else if (ext
== "NonSemantic.Shader.DebugInfo.100")
297 spirv_ext
= SPIRExtension::NonSemanticShaderDebugInfo
;
298 else if (ext
.find("NonSemantic.") == 0)
299 spirv_ext
= SPIRExtension::NonSemanticGeneric
;
301 set
<SPIRExtension
>(id
, spirv_ext
);
302 // Other SPIR-V extensions which have ExtInstrs are currently not supported.
309 // The SPIR-V debug information extended instructions might come at global scope.
312 current_block
->ops
.push_back(instruction
);
315 const auto *type
= maybe_get
<SPIRType
>(ops
[0]);
317 ir
.load_type_width
.insert({ ops
[1], type
->width
});
326 ir
.entry_points
.insert(make_pair(ops
[1], SPIREntryPoint(ops
[1], static_cast<ExecutionModel
>(ops
[0]),
327 extract_string(ir
.spirv
, instruction
.offset
+ 2))));
328 auto &e
= itr
.first
->second
;
330 // Strings need nul-terminator and consume the whole word.
331 uint32_t strlen_words
= uint32_t((e
.name
.size() + 1 + 3) >> 2);
333 for (uint32_t i
= strlen_words
+ 2; i
< instruction
.length
; i
++)
334 e
.interface_variables
.push_back(ops
[i
]);
336 // Set the name of the entry point in case OpName is not provided later.
337 ir
.set_name(ops
[1], e
.name
);
339 // If we don't have an entry, make the first one our "default".
340 if (!ir
.default_entry_point
)
341 ir
.default_entry_point
= ops
[1];
345 case OpExecutionMode
:
347 auto &execution
= ir
.entry_points
[ops
[0]];
348 auto mode
= static_cast<ExecutionMode
>(ops
[1]);
349 execution
.flags
.set(mode
);
353 case ExecutionModeInvocations
:
354 execution
.invocations
= ops
[2];
357 case ExecutionModeLocalSize
:
358 execution
.workgroup_size
.x
= ops
[2];
359 execution
.workgroup_size
.y
= ops
[3];
360 execution
.workgroup_size
.z
= ops
[4];
363 case ExecutionModeOutputVertices
:
364 execution
.output_vertices
= ops
[2];
367 case ExecutionModeOutputPrimitivesEXT
:
368 execution
.output_primitives
= ops
[2];
377 case OpExecutionModeId
:
379 auto &execution
= ir
.entry_points
[ops
[0]];
380 auto mode
= static_cast<ExecutionMode
>(ops
[1]);
381 execution
.flags
.set(mode
);
383 if (mode
== ExecutionModeLocalSizeId
)
385 execution
.workgroup_size
.id_x
= ops
[2];
386 execution
.workgroup_size
.id_y
= ops
[3];
387 execution
.workgroup_size
.id_z
= ops
[4];
395 uint32_t id
= ops
[0];
396 ir
.set_name(id
, extract_string(ir
.spirv
, instruction
.offset
+ 1));
402 uint32_t id
= ops
[0];
403 uint32_t member
= ops
[1];
404 ir
.set_member_name(id
, member
, extract_string(ir
.spirv
, instruction
.offset
+ 2));
408 case OpDecorationGroup
:
410 // Noop, this simply means an ID should be a collector of decorations.
411 // The meta array is already a flat array of decorations which will contain the relevant decorations.
415 case OpGroupDecorate
:
417 uint32_t group_id
= ops
[0];
418 auto &decorations
= ir
.meta
[group_id
].decoration
;
419 auto &flags
= decorations
.decoration_flags
;
421 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
422 // i.e., we cannot just copy the meta structure directly.
423 for (uint32_t i
= 1; i
< length
; i
++)
425 uint32_t target
= ops
[i
];
426 flags
.for_each_bit([&](uint32_t bit
) {
427 auto decoration
= static_cast<Decoration
>(bit
);
429 if (decoration_is_string(decoration
))
431 ir
.set_decoration_string(target
, decoration
, ir
.get_decoration_string(group_id
, decoration
));
435 ir
.meta
[target
].decoration_word_offset
[decoration
] =
436 ir
.meta
[group_id
].decoration_word_offset
[decoration
];
437 ir
.set_decoration(target
, decoration
, ir
.get_decoration(group_id
, decoration
));
444 case OpGroupMemberDecorate
:
446 uint32_t group_id
= ops
[0];
447 auto &flags
= ir
.meta
[group_id
].decoration
.decoration_flags
;
449 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
450 // i.e., we cannot just copy the meta structure directly.
451 for (uint32_t i
= 1; i
+ 1 < length
; i
+= 2)
453 uint32_t target
= ops
[i
+ 0];
454 uint32_t index
= ops
[i
+ 1];
455 flags
.for_each_bit([&](uint32_t bit
) {
456 auto decoration
= static_cast<Decoration
>(bit
);
458 if (decoration_is_string(decoration
))
459 ir
.set_member_decoration_string(target
, index
, decoration
,
460 ir
.get_decoration_string(group_id
, decoration
));
462 ir
.set_member_decoration(target
, index
, decoration
, ir
.get_decoration(group_id
, decoration
));
471 // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
472 // so merge decorate and decorate-id here.
473 uint32_t id
= ops
[0];
475 auto decoration
= static_cast<Decoration
>(ops
[1]);
478 ir
.meta
[id
].decoration_word_offset
[decoration
] = uint32_t(&ops
[2] - ir
.spirv
.data());
479 ir
.set_decoration(id
, decoration
, ops
[2]);
482 ir
.set_decoration(id
, decoration
);
487 case OpDecorateStringGOOGLE
:
489 uint32_t id
= ops
[0];
490 auto decoration
= static_cast<Decoration
>(ops
[1]);
491 ir
.set_decoration_string(id
, decoration
, extract_string(ir
.spirv
, instruction
.offset
+ 2));
495 case OpMemberDecorate
:
497 uint32_t id
= ops
[0];
498 uint32_t member
= ops
[1];
499 auto decoration
= static_cast<Decoration
>(ops
[2]);
501 ir
.set_member_decoration(id
, member
, decoration
, ops
[3]);
503 ir
.set_member_decoration(id
, member
, decoration
);
507 case OpMemberDecorateStringGOOGLE
:
509 uint32_t id
= ops
[0];
510 uint32_t member
= ops
[1];
511 auto decoration
= static_cast<Decoration
>(ops
[2]);
512 ir
.set_member_decoration_string(id
, member
, decoration
, extract_string(ir
.spirv
, instruction
.offset
+ 3));
516 // Build up basic types.
519 uint32_t id
= ops
[0];
520 auto &type
= set
<SPIRType
>(id
, op
);
521 type
.basetype
= SPIRType::Void
;
527 uint32_t id
= ops
[0];
528 auto &type
= set
<SPIRType
>(id
, op
);
529 type
.basetype
= SPIRType::Boolean
;
536 uint32_t id
= ops
[0];
537 uint32_t width
= ops
[1];
538 auto &type
= set
<SPIRType
>(id
, op
);
540 type
.basetype
= SPIRType::Double
;
541 else if (width
== 32)
542 type
.basetype
= SPIRType::Float
;
543 else if (width
== 16)
544 type
.basetype
= SPIRType::Half
;
546 SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
553 uint32_t id
= ops
[0];
554 uint32_t width
= ops
[1];
555 bool signedness
= ops
[2] != 0;
556 auto &type
= set
<SPIRType
>(id
, op
);
557 type
.basetype
= signedness
? to_signed_basetype(width
) : to_unsigned_basetype(width
);
562 // Build composite types by "inheriting".
563 // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
564 // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
567 uint32_t id
= ops
[0];
568 uint32_t vecsize
= ops
[2];
570 auto &base
= get
<SPIRType
>(ops
[1]);
571 auto &vecbase
= set
<SPIRType
>(id
, base
);
574 vecbase
.vecsize
= vecsize
;
576 vecbase
.parent_type
= ops
[1];
582 uint32_t id
= ops
[0];
583 uint32_t colcount
= ops
[2];
585 auto &base
= get
<SPIRType
>(ops
[1]);
586 auto &matrixbase
= set
<SPIRType
>(id
, base
);
589 matrixbase
.columns
= colcount
;
590 matrixbase
.self
= id
;
591 matrixbase
.parent_type
= ops
[1];
597 uint32_t id
= ops
[0];
598 uint32_t tid
= ops
[1];
599 auto &base
= get
<SPIRType
>(tid
);
600 auto &arraybase
= set
<SPIRType
>(id
, base
);
603 arraybase
.parent_type
= tid
;
605 uint32_t cid
= ops
[2];
606 ir
.mark_used_as_array_length(cid
);
607 auto *c
= maybe_get
<SPIRConstant
>(cid
);
608 bool literal
= c
&& !c
->specialization
;
610 // We're copying type information into Array types, so we'll need a fixup for any physical pointer
612 if (base
.forward_pointer
)
613 forward_pointer_fixups
.push_back({ id
, tid
});
615 arraybase
.array_size_literal
.push_back(literal
);
616 arraybase
.array
.push_back(literal
? c
->scalar() : cid
);
618 // .self resolves down to non-array/non-pointer type.
619 arraybase
.self
= base
.self
;
623 case OpTypeRuntimeArray
:
625 uint32_t id
= ops
[0];
627 auto &base
= get
<SPIRType
>(ops
[1]);
628 auto &arraybase
= set
<SPIRType
>(id
, base
);
630 // We're copying type information into Array types, so we'll need a fixup for any physical pointer
632 if (base
.forward_pointer
)
633 forward_pointer_fixups
.push_back({ id
, ops
[1] });
636 arraybase
.array
.push_back(0);
637 arraybase
.array_size_literal
.push_back(true);
638 arraybase
.parent_type
= ops
[1];
640 // .self resolves down to non-array/non-pointer type.
641 arraybase
.self
= base
.self
;
647 uint32_t id
= ops
[0];
648 auto &type
= set
<SPIRType
>(id
, op
);
649 type
.basetype
= SPIRType::Image
;
650 type
.image
.type
= ops
[1];
651 type
.image
.dim
= static_cast<Dim
>(ops
[2]);
652 type
.image
.depth
= ops
[3] == 1;
653 type
.image
.arrayed
= ops
[4] != 0;
654 type
.image
.ms
= ops
[5] != 0;
655 type
.image
.sampled
= ops
[6];
656 type
.image
.format
= static_cast<ImageFormat
>(ops
[7]);
657 type
.image
.access
= (length
>= 9) ? static_cast<AccessQualifier
>(ops
[8]) : AccessQualifierMax
;
661 case OpTypeSampledImage
:
663 uint32_t id
= ops
[0];
664 uint32_t imagetype
= ops
[1];
665 auto &type
= set
<SPIRType
>(id
, op
);
666 type
= get
<SPIRType
>(imagetype
);
667 type
.basetype
= SPIRType::SampledImage
;
674 uint32_t id
= ops
[0];
675 auto &type
= set
<SPIRType
>(id
, op
);
676 type
.basetype
= SPIRType::Sampler
;
682 uint32_t id
= ops
[0];
684 // Very rarely, we might receive a FunctionPrototype here.
685 // We won't be able to compile it, but we shouldn't crash when parsing.
686 // We should be able to reflect.
687 auto *base
= maybe_get
<SPIRType
>(ops
[2]);
688 auto &ptrbase
= set
<SPIRType
>(id
, op
);
696 ptrbase
.pointer
= true;
697 ptrbase
.pointer_depth
++;
698 ptrbase
.storage
= static_cast<StorageClass
>(ops
[1]);
700 if (ptrbase
.storage
== StorageClassAtomicCounter
)
701 ptrbase
.basetype
= SPIRType::AtomicCounter
;
703 if (base
&& base
->forward_pointer
)
704 forward_pointer_fixups
.push_back({ id
, ops
[2] });
706 ptrbase
.parent_type
= ops
[2];
708 // Do NOT set ptrbase.self!
712 case OpTypeForwardPointer
:
714 uint32_t id
= ops
[0];
715 auto &ptrbase
= set
<SPIRType
>(id
, op
);
716 ptrbase
.pointer
= true;
717 ptrbase
.pointer_depth
++;
718 ptrbase
.storage
= static_cast<StorageClass
>(ops
[1]);
719 ptrbase
.forward_pointer
= true;
721 if (ptrbase
.storage
== StorageClassAtomicCounter
)
722 ptrbase
.basetype
= SPIRType::AtomicCounter
;
729 uint32_t id
= ops
[0];
730 auto &type
= set
<SPIRType
>(id
, op
);
731 type
.basetype
= SPIRType::Struct
;
732 for (uint32_t i
= 1; i
< length
; i
++)
733 type
.member_types
.push_back(ops
[i
]);
735 // Check if we have seen this struct type before, with just different
738 // Add workaround for issue #17 as well by looking at OpName for the struct
739 // types, which we shouldn't normally do.
740 // We should not normally have to consider type aliases like this to begin with
741 // however ... glslang issues #304, #307 cover this.
743 // For stripped names, never consider struct type aliasing.
744 // We risk declaring the same struct multiple times, but type-punning is not allowed
746 bool consider_aliasing
= !ir
.get_name(type
.self
).empty();
747 if (consider_aliasing
)
749 for (auto &other
: global_struct_cache
)
751 if (ir
.get_name(type
.self
) == ir
.get_name(other
) &&
752 types_are_logically_equivalent(type
, get
<SPIRType
>(other
)))
754 type
.type_alias
= other
;
759 if (type
.type_alias
== TypeID(0))
760 global_struct_cache
.push_back(id
);
767 uint32_t id
= ops
[0];
768 uint32_t ret
= ops
[1];
770 auto &func
= set
<SPIRFunctionPrototype
>(id
, ret
);
771 for (uint32_t i
= 2; i
< length
; i
++)
772 func
.parameter_types
.push_back(ops
[i
]);
776 case OpTypeAccelerationStructureKHR
:
778 uint32_t id
= ops
[0];
779 auto &type
= set
<SPIRType
>(id
, op
);
780 type
.basetype
= SPIRType::AccelerationStructure
;
784 case OpTypeRayQueryKHR
:
786 uint32_t id
= ops
[0];
787 auto &type
= set
<SPIRType
>(id
, op
);
788 type
.basetype
= SPIRType::RayQuery
;
792 // Variable declaration
793 // All variables are essentially pointers with a storage qualifier.
796 uint32_t type
= ops
[0];
797 uint32_t id
= ops
[1];
798 auto storage
= static_cast<StorageClass
>(ops
[2]);
799 uint32_t initializer
= length
== 4 ? ops
[3] : 0;
801 if (storage
== StorageClassFunction
)
803 if (!current_function
)
804 SPIRV_CROSS_THROW("No function currently in scope");
805 current_function
->add_local_variable(id
);
808 set
<SPIRVariable
>(id
, type
, storage
, initializer
);
813 // OpPhi is a fairly magical opcode.
814 // It selects temporary variables based on which parent block we *came from*.
815 // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
816 // variable to emulate SSA Phi.
819 if (!current_function
)
820 SPIRV_CROSS_THROW("No function currently in scope");
822 SPIRV_CROSS_THROW("No block currently in scope");
824 uint32_t result_type
= ops
[0];
825 uint32_t id
= ops
[1];
827 // Instead of a temporary, create a new function-wide temporary with this ID instead.
828 auto &var
= set
<SPIRVariable
>(id
, result_type
, spv::StorageClassFunction
);
829 var
.phi_variable
= true;
831 current_function
->add_local_variable(id
);
833 for (uint32_t i
= 2; i
+ 2 <= length
; i
+= 2)
834 current_block
->phi_variables
.push_back({ ops
[i
], ops
[i
+ 1], id
});
842 uint32_t id
= ops
[1];
843 auto &type
= get
<SPIRType
>(ops
[0]);
846 set
<SPIRConstant
>(id
, ops
[0], ops
[2] | (uint64_t(ops
[3]) << 32), op
== OpSpecConstant
);
848 set
<SPIRConstant
>(id
, ops
[0], ops
[2], op
== OpSpecConstant
);
852 case OpSpecConstantFalse
:
853 case OpConstantFalse
:
855 uint32_t id
= ops
[1];
856 set
<SPIRConstant
>(id
, ops
[0], uint32_t(0), op
== OpSpecConstantFalse
);
860 case OpSpecConstantTrue
:
863 uint32_t id
= ops
[1];
864 set
<SPIRConstant
>(id
, ops
[0], uint32_t(1), op
== OpSpecConstantTrue
);
870 uint32_t id
= ops
[1];
871 uint32_t type
= ops
[0];
872 ir
.make_constant_null(id
, type
, true);
876 case OpSpecConstantComposite
:
877 case OpConstantComposite
:
879 uint32_t id
= ops
[1];
880 uint32_t type
= ops
[0];
882 auto &ctype
= get
<SPIRType
>(type
);
884 // We can have constants which are structs and arrays.
885 // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
887 if (ctype
.basetype
== SPIRType::Struct
|| !ctype
.array
.empty())
889 set
<SPIRConstant
>(id
, type
, ops
+ 2, length
- 2, op
== OpSpecConstantComposite
);
893 uint32_t elements
= length
- 2;
895 SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
897 SPIRConstant remapped_constant_ops
[4];
898 const SPIRConstant
*c
[4];
899 for (uint32_t i
= 0; i
< elements
; i
++)
901 // Specialization constants operations can also be part of this.
902 // We do not know their value, so any attempt to query SPIRConstant later
903 // will fail. We can only propagate the ID of the expression and use to_expression on it.
904 auto *constant_op
= maybe_get
<SPIRConstantOp
>(ops
[2 + i
]);
905 auto *undef_op
= maybe_get
<SPIRUndef
>(ops
[2 + i
]);
908 if (op
== OpConstantComposite
)
909 SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
911 remapped_constant_ops
[i
].make_null(get
<SPIRType
>(constant_op
->basetype
));
912 remapped_constant_ops
[i
].self
= constant_op
->self
;
913 remapped_constant_ops
[i
].constant_type
= constant_op
->basetype
;
914 remapped_constant_ops
[i
].specialization
= true;
915 c
[i
] = &remapped_constant_ops
[i
];
919 // Undefined, just pick 0.
920 remapped_constant_ops
[i
].make_null(get
<SPIRType
>(undef_op
->basetype
));
921 remapped_constant_ops
[i
].constant_type
= undef_op
->basetype
;
922 c
[i
] = &remapped_constant_ops
[i
];
925 c
[i
] = &get
<SPIRConstant
>(ops
[2 + i
]);
927 set
<SPIRConstant
>(id
, type
, c
, elements
, op
== OpSpecConstantComposite
);
935 uint32_t res
= ops
[0];
936 uint32_t id
= ops
[1];
938 uint32_t type
= ops
[3];
940 if (current_function
)
941 SPIRV_CROSS_THROW("Must end a function before starting a new one!");
943 current_function
= &set
<SPIRFunction
>(id
, res
, type
);
947 case OpFunctionParameter
:
949 uint32_t type
= ops
[0];
950 uint32_t id
= ops
[1];
952 if (!current_function
)
953 SPIRV_CROSS_THROW("Must be in a function!");
955 current_function
->add_parameter(type
, id
);
956 set
<SPIRVariable
>(id
, type
, StorageClassFunction
);
964 // Very specific error message, but seems to come up quite often.
966 "Cannot end a function before ending the current block.\n"
967 "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
969 current_function
= nullptr;
976 // OpLabel always starts a block.
977 if (!current_function
)
978 SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
980 uint32_t id
= ops
[0];
982 current_function
->blocks
.push_back(id
);
983 if (!current_function
->entry_block
)
984 current_function
->entry_block
= id
;
987 SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
989 current_block
= &set
<SPIRBlock
>(id
);
993 // Branch instructions end blocks.
997 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
999 uint32_t target
= ops
[0];
1000 current_block
->terminator
= SPIRBlock::Direct
;
1001 current_block
->next_block
= target
;
1002 current_block
= nullptr;
1006 case OpBranchConditional
:
1009 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1011 current_block
->condition
= ops
[0];
1012 current_block
->true_block
= ops
[1];
1013 current_block
->false_block
= ops
[2];
1015 current_block
->terminator
= SPIRBlock::Select
;
1017 if (current_block
->true_block
== current_block
->false_block
)
1019 // Bogus conditional, translate to a direct branch.
1020 // Avoids some ugly edge cases later when analyzing CFGs.
1022 // There are some super jank cases where the merge block is different from the true/false,
1023 // and later branches can "break" out of the selection construct this way.
1024 // This is complete nonsense, but CTS hits this case.
1025 // In this scenario, we should see the selection construct as more of a Switch with one default case.
1026 // The problem here is that this breaks any attempt to break out of outer switch statements,
1027 // but it's theoretically solvable if this ever comes up using the ladder breaking system ...
1029 if (current_block
->true_block
!= current_block
->next_block
&&
1030 current_block
->merge
== SPIRBlock::MergeSelection
)
1032 uint32_t ids
= ir
.increase_bound_by(2);
1034 auto &type
= set
<SPIRType
>(ids
, OpTypeInt
);
1035 type
.basetype
= SPIRType::Int
;
1037 auto &c
= set
<SPIRConstant
>(ids
+ 1, ids
);
1039 current_block
->condition
= c
.self
;
1040 current_block
->default_block
= current_block
->true_block
;
1041 current_block
->terminator
= SPIRBlock::MultiSelect
;
1042 ir
.block_meta
[current_block
->next_block
] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT
;
1043 ir
.block_meta
[current_block
->next_block
] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT
;
1047 // Collapse loops if we have to.
1048 bool collapsed_loop
= current_block
->true_block
== current_block
->merge_block
&&
1049 current_block
->merge
== SPIRBlock::MergeLoop
;
1053 ir
.block_meta
[current_block
->merge_block
] &= ~ParsedIR::BLOCK_META_LOOP_MERGE_BIT
;
1054 ir
.block_meta
[current_block
->continue_block
] &= ~ParsedIR::BLOCK_META_CONTINUE_BIT
;
1057 current_block
->next_block
= current_block
->true_block
;
1058 current_block
->condition
= 0;
1059 current_block
->true_block
= 0;
1060 current_block
->false_block
= 0;
1061 current_block
->merge_block
= 0;
1062 current_block
->merge
= SPIRBlock::MergeNone
;
1063 current_block
->terminator
= SPIRBlock::Direct
;
1067 current_block
= nullptr;
1074 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1076 current_block
->terminator
= SPIRBlock::MultiSelect
;
1078 current_block
->condition
= ops
[0];
1079 current_block
->default_block
= ops
[1];
1081 uint32_t remaining_ops
= length
- 2;
1082 if ((remaining_ops
% 2) == 0)
1084 for (uint32_t i
= 2; i
+ 2 <= length
; i
+= 2)
1085 current_block
->cases_32bit
.push_back({ ops
[i
], ops
[i
+ 1] });
1088 if ((remaining_ops
% 3) == 0)
1090 for (uint32_t i
= 2; i
+ 3 <= length
; i
+= 3)
1092 uint64_t value
= (static_cast<uint64_t>(ops
[i
+ 1]) << 32) | ops
[i
];
1093 current_block
->cases_64bit
.push_back({ value
, ops
[i
+ 2] });
1097 // If we jump to next block, make it break instead since we're inside a switch case block at that point.
1098 ir
.block_meta
[current_block
->next_block
] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT
;
1100 current_block
= nullptr;
1105 case OpTerminateInvocation
:
1108 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1109 current_block
->terminator
= SPIRBlock::Kill
;
1110 current_block
= nullptr;
1114 case OpTerminateRayKHR
:
1115 // NV variant is not a terminator.
1117 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1118 current_block
->terminator
= SPIRBlock::TerminateRay
;
1119 current_block
= nullptr;
1122 case OpIgnoreIntersectionKHR
:
1123 // NV variant is not a terminator.
1125 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1126 current_block
->terminator
= SPIRBlock::IgnoreIntersection
;
1127 current_block
= nullptr;
1130 case OpEmitMeshTasksEXT
:
1132 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1133 current_block
->terminator
= SPIRBlock::EmitMeshTasks
;
1134 for (uint32_t i
= 0; i
< 3; i
++)
1135 current_block
->mesh
.groups
[i
] = ops
[i
];
1136 current_block
->mesh
.payload
= length
>= 4 ? ops
[3] : 0;
1137 current_block
= nullptr;
1138 // Currently glslang is bugged and does not treat EmitMeshTasksEXT as a terminator.
1139 ignore_trailing_block_opcodes
= true;
1145 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1146 current_block
->terminator
= SPIRBlock::Return
;
1147 current_block
= nullptr;
1154 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1155 current_block
->terminator
= SPIRBlock::Return
;
1156 current_block
->return_value
= ops
[0];
1157 current_block
= nullptr;
1164 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1165 current_block
->terminator
= SPIRBlock::Unreachable
;
1166 current_block
= nullptr;
1170 case OpSelectionMerge
:
1173 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1175 current_block
->next_block
= ops
[0];
1176 current_block
->merge
= SPIRBlock::MergeSelection
;
1177 ir
.block_meta
[current_block
->next_block
] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT
;
1181 if (ops
[1] & SelectionControlFlattenMask
)
1182 current_block
->hint
= SPIRBlock::HintFlatten
;
1183 else if (ops
[1] & SelectionControlDontFlattenMask
)
1184 current_block
->hint
= SPIRBlock::HintDontFlatten
;
1192 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1194 current_block
->merge_block
= ops
[0];
1195 current_block
->continue_block
= ops
[1];
1196 current_block
->merge
= SPIRBlock::MergeLoop
;
1198 ir
.block_meta
[current_block
->self
] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT
;
1199 ir
.block_meta
[current_block
->merge_block
] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT
;
1201 ir
.continue_block_to_loop_header
[current_block
->continue_block
] = BlockID(current_block
->self
);
1203 // Don't add loop headers to continue blocks,
1204 // which would make it impossible branch into the loop header since
1205 // they are treated as continues.
1206 if (current_block
->continue_block
!= BlockID(current_block
->self
))
1207 ir
.block_meta
[current_block
->continue_block
] |= ParsedIR::BLOCK_META_CONTINUE_BIT
;
1211 if (ops
[2] & LoopControlUnrollMask
)
1212 current_block
->hint
= SPIRBlock::HintUnroll
;
1213 else if (ops
[2] & LoopControlDontUnrollMask
)
1214 current_block
->hint
= SPIRBlock::HintDontUnroll
;
1219 case OpSpecConstantOp
:
1222 SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1224 uint32_t result_type
= ops
[0];
1225 uint32_t id
= ops
[1];
1226 auto spec_op
= static_cast<Op
>(ops
[2]);
1228 set
<SPIRConstantOp
>(id
, result_type
, spec_op
, ops
+ 3, length
- 3);
1234 // OpLine might come at global scope, but we don't care about those since they will not be declared in any
1235 // meaningful correct order.
1236 // Ignore all OpLine directives which live outside a function.
1238 current_block
->ops
.push_back(instruction
);
1240 // Line directives may arrive before first OpLabel.
1241 // Treat this as the line of the function declaration,
1242 // so warnings for arguments can propagate properly.
1243 if (current_function
)
1245 // Store the first one we find and emit it before creating the function prototype.
1246 if (current_function
->entry_line
.file_id
== 0)
1248 current_function
->entry_line
.file_id
= ops
[0];
1249 current_function
->entry_line
.line_literal
= ops
[1];
1257 // OpNoLine might come at global scope.
1259 current_block
->ops
.push_back(instruction
);
1268 const auto *type
= maybe_get
<SPIRType
>(ops
[0]);
1270 ir
.load_type_width
.insert({ ops
[1], type
->width
});
1274 SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1276 current_block
->ops
.push_back(instruction
);
1282 bool Parser::types_are_logically_equivalent(const SPIRType
&a
, const SPIRType
&b
) const
1284 if (a
.basetype
!= b
.basetype
)
1286 if (a
.width
!= b
.width
)
1288 if (a
.vecsize
!= b
.vecsize
)
1290 if (a
.columns
!= b
.columns
)
1292 if (a
.array
.size() != b
.array
.size())
1295 size_t array_count
= a
.array
.size();
1296 if (array_count
&& memcmp(a
.array
.data(), b
.array
.data(), array_count
* sizeof(uint32_t)) != 0)
1299 if (a
.basetype
== SPIRType::Image
|| a
.basetype
== SPIRType::SampledImage
)
1301 if (memcmp(&a
.image
, &b
.image
, sizeof(SPIRType::Image
)) != 0)
1305 if (a
.member_types
.size() != b
.member_types
.size())
1308 size_t member_types
= a
.member_types
.size();
1309 for (size_t i
= 0; i
< member_types
; i
++)
1311 if (!types_are_logically_equivalent(get
<SPIRType
>(a
.member_types
[i
]), get
<SPIRType
>(b
.member_types
[i
])))
1318 bool Parser::variable_storage_is_aliased(const SPIRVariable
&v
) const
1320 auto &type
= get
<SPIRType
>(v
.basetype
);
1322 auto *type_meta
= ir
.find_meta(type
.self
);
1324 bool ssbo
= v
.storage
== StorageClassStorageBuffer
||
1325 (type_meta
&& type_meta
->decoration
.decoration_flags
.get(DecorationBufferBlock
));
1326 bool image
= type
.basetype
== SPIRType::Image
;
1327 bool counter
= type
.basetype
== SPIRType::AtomicCounter
;
1331 is_restrict
= ir
.get_buffer_block_flags(v
).get(DecorationRestrict
);
1333 is_restrict
= ir
.has_decoration(v
.self
, DecorationRestrict
);
1335 return !is_restrict
&& (ssbo
|| image
|| counter
);
1337 } // namespace SPIRV_CROSS_NAMESPACE