Spracherkennung für: .rs vermutete Sprache: Unknown {[0] [0] [0]} [Methode: Schwerpunktbildung, einfache Gewichte, sechs Dimensionen]
/*!
Frontend for [SPIR-V][spv] (Standard Portable Intermediate Representation).
## ID lookups
Our IR links to everything with `Handle`, while SPIR-V uses IDs.
In order to keep track of the associations, the parser has many lookup tables.
There map `spv::Word` into a specific IR handle, plus potentially a bit of
extra info, such as the related SPIR-V type ID.
TODO: would be nice to find ways that avoid looking up as much
## Inputs/Outputs
We create a private variable for each input/output. The relevant inputs are
populated at the start of an entry point. The outputs are saved at the end.
The function associated with an entry point is wrapped in another function,
such that we can handle any `Return` statements without problems.
## Row-major matrices
We don't handle them natively, since the IR only expects column majority.
Instead, we detect when such matrix is accessed in the `OpAccessChain`,
and we generate a parallel expression that loads the value, but transposed.
This value then gets used instead of `OpLoad` result later on.
[spv]:
https://www.khronos.org/registry/SPIR-V/
*/
mod convert;
mod error;
mod function;
mod image;
mod null;
use convert::*;
pub use error::Error;
use function::*;
use crate::{
arena::{Arena, Handle, UniqueArena},
proc::{Alignment, Layouter},
FastHashMap, FastHashSet, FastIndexMap,
};
use petgraph::graphmap::GraphMap;
use std::{convert::TryInto, mem, num::NonZeroU32, path::PathBuf};
use super::atomic_upgrade::Upgrades;
pub const SUPPORTED_CAPABILITIES: &[spirv::Capability] = &[
spirv::Capability::Shader,
spirv::Capability::VulkanMemoryModel,
spirv::Capability::ClipDistance,
spirv::Capability::CullDistance,
spirv::Capability::SampleRateShading,
spirv::Capability::DerivativeControl,
spirv::Capability::Matrix,
spirv::Capability::ImageQuery,
spirv::Capability::Sampled1D,
spirv::Capability::Image1D,
spirv::Capability::SampledCubeArray,
spirv::Capability::ImageCubeArray,
spirv::Capability::StorageImageExtendedFormats,
spirv::Capability::Int8,
spirv::Capability::Int16,
spirv::Capability::Int64,
spirv::Capability::Int64Atomics,
spirv::Capability::Float16,
spirv::Capability::AtomicFloat32AddEXT,
spirv::Capability::Float64,
spirv::Capability::Geometry,
spirv::Capability::MultiView,
// tricky ones
spirv::Capability::UniformBufferArrayDynamicIndexing,
spirv::Capability::StorageBufferArrayDynamicIndexing,
];
pub const SUPPORTED_EXTENSIONS: &[&str] = &[
"SPV_KHR_storage_buffer_storage_class",
"SPV_KHR_vulkan_memory_model",
"SPV_KHR_multiview",
"SPV_EXT_shader_atomic_float_add",
];
pub const SUPPORTED_EXT_SETS: &[&str] = &["GLSL.std.450"];
#[derive(Copy, Clone)]
pub struct Instruction {
op: spirv::Op,
wc: u16,
}
impl Instruction {
const fn expect(self, count: u16) -> Result<(), Error> {
if self.wc == count {
Ok(())
} else {
Err(Error::InvalidOperandCount(self.op, self.wc))
}
}
fn expect_at_least(self, count: u16) -> Result<u16, Error> {
self.wc
.checked_sub(count)
.ok_or(Error::InvalidOperandCount(self.op, self.wc))
}
}
impl crate::TypeInner {
fn can_comparison_sample(&self, module: &crate::Module) -> bool {
match *self {
crate::TypeInner::Image {
class:
crate::ImageClass::Sampled {
kind: crate::ScalarKind::Float,
multi: false,
},
..
} => true,
crate::TypeInner::Sampler { .. } => true,
crate::TypeInner::BindingArray { base, .. } => {
module.types[base].inner.can_comparison_sample(module)
}
_ => false,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
pub enum ModuleState {
Empty,
Capability,
Extension,
ExtInstImport,
MemoryModel,
EntryPoint,
ExecutionMode,
Source,
Name,
ModuleProcessed,
Annotation,
Type,
Function,
}
trait LookupHelper {
type Target;
fn lookup(&self, key: spirv::Word) -> Result<&Self::Target, Error>;
}
impl<T> LookupHelper for FastHashMap<spirv::Word, T> {
type Target = T;
fn lookup(&self, key: spirv::Word) -> Result<&T, Error> {
self.get(&key).ok_or(Error::InvalidId(key))
}
}
impl crate::ImageDimension {
const fn required_coordinate_size(&self) -> Option<crate::VectorSize> {
match *self {
crate::ImageDimension::D1 => None,
crate::ImageDimension::D2 => Some(crate::VectorSize::Bi),
crate::ImageDimension::D3 => Some(crate::VectorSize::Tri),
crate::ImageDimension::Cube => Some(crate::VectorSize::Tri),
}
}
}
type MemberIndex = u32;
bitflags::bitflags! {
#[derive(Clone, Copy, Debug, Default)]
struct DecorationFlags: u32 {
const NON_READABLE = 0x1;
const NON_WRITABLE = 0x2;
}
}
impl DecorationFlags {
fn to_storage_access(self) -> crate::StorageAccess {
let mut access = crate::StorageAccess::LOAD | crate::StorageAccess::STORE;
if self.contains(DecorationFlags::NON_READABLE) {
access &= !crate::StorageAccess::LOAD;
}
if self.contains(DecorationFlags::NON_WRITABLE) {
access &= !crate::StorageAccess::STORE;
}
access
}
}
#[derive(Debug, PartialEq)]
enum Majority {
Column,
Row,
}
#[derive(Debug, Default)]
struct Decoration {
name: Option<String>,
built_in: Option<spirv::Word>,
location: Option<spirv::Word>,
desc_set: Option<spirv::Word>,
desc_index: Option<spirv::Word>,
specialization_constant_id: Option<spirv::Word>,
storage_buffer: bool,
offset: Option<spirv::Word>,
array_stride: Option<NonZeroU32>,
matrix_stride: Option<NonZeroU32>,
matrix_major: Option<Majority>,
invariant: bool,
interpolation: Option<crate::Interpolation>,
sampling: Option<crate::Sampling>,
flags: DecorationFlags,
}
impl Decoration {
fn debug_name(&self) -> &str {
match self.name {
Some(ref name) => name.as_str(),
None => "?",
}
}
const fn resource_binding(&self) -> Option<crate::ResourceBinding> {
match *self {
Decoration {
desc_set: Some(group),
desc_index: Some(binding),
..
} => Some(crate::ResourceBinding { group, binding }),
_ => None,
}
}
fn io_binding(&self) -> Result<crate::Binding, Error> {
match *self {
Decoration {
built_in: Some(built_in),
location: None,
invariant,
..
} => Ok(crate::Binding::BuiltIn(map_builtin(built_in, invariant)?)),
Decoration {
built_in: None,
location: Some(location),
interpolation,
sampling,
..
} => Ok(crate::Binding::Location {
location,
interpolation,
sampling,
second_blend_source: false,
}),
_ => Err(Error::MissingDecoration(spirv::Decoration::Location)),
}
}
}
#[derive(Debug)]
struct LookupFunctionType {
parameter_type_ids: Vec<spirv::Word>,
return_type_id: spirv::Word,
}
struct LookupFunction {
handle: Handle<crate::Function>,
parameters_sampling: Vec<image::SamplingFlags>,
}
#[derive(Debug)]
struct EntryPoint {
stage: crate::ShaderStage,
name: String,
early_depth_test: Option<crate::EarlyDepthTest>,
workgroup_size: [u32; 3],
variable_ids: Vec<spirv::Word>,
}
#[derive(Clone, Debug)]
struct LookupType {
handle: Handle<crate::Type>,
base_id: Option<spirv::Word>,
}
#[derive(Debug)]
enum Constant {
Constant(Handle<crate::Constant>),
Override(Handle<crate::Override>),
}
impl Constant {
const fn to_expr(&self) -> crate::Expression {
match *self {
Self::Constant(c) => crate::Expression::Constant(c),
Self::Override(o) => crate::Expression::Override(o),
}
}
}
#[derive(Debug)]
struct LookupConstant {
inner: Constant,
type_id: spirv::Word,
}
#[derive(Debug)]
enum Variable {
Global,
Input(crate::FunctionArgument),
Output(crate::FunctionResult),
}
#[derive(Debug)]
struct LookupVariable {
inner: Variable,
handle: Handle<crate::GlobalVariable>,
type_id: spirv::Word,
}
/// Information about SPIR-V result ids, stored in `Frontend::lookup_expression`.
#[derive(Clone, Debug)]
struct LookupExpression {
/// The `Expression` constructed for this result.
///
/// Note that, while a SPIR-V result id can be used in any block dominated
/// by its definition, a Naga `Expression` is only in scope for the rest of
/// its subtree. `Frontend::get_expr_handle` takes care of spilling the result
/// to a `LocalVariable` which can then be used anywhere.
handle: Handle<crate::Expression>,
/// The SPIR-V type of this result.
type_id: spirv::Word,
/// The label id of the block that defines this expression.
///
/// This is zero for globals, constants, and function parameters, since they
/// originate outside any function's block.
block_id: spirv::Word,
}
#[derive(Debug)]
struct LookupMember {
type_id: spirv::Word,
// This is true for either matrices, or arrays of matrices (yikes).
row_major: bool,
}
#[derive(Clone, Debug)]
enum LookupLoadOverride {
/// For arrays of matrices, we track them but not loading yet.
Pending,
/// For matrices, vectors, and scalars, we pre-load the data.
Loaded(Handle<crate::Expression>),
}
#[derive(PartialEq)]
enum ExtendedClass {
Global(crate::AddressSpace),
Input,
Output,
}
#[derive(Clone, Debug)]
pub struct Options {
/// The IR coordinate space matches all the APIs except SPIR-V,
/// so by default we flip the Y coordinate of the `BuiltIn::Position`.
/// This flag can be used to avoid this.
pub adjust_coordinate_space: bool,
/// Only allow shaders with the known set of capabilities.
pub strict_capabilities: bool,
pub block_ctx_dump_prefix: Option<PathBuf>,
}
impl Default for Options {
fn default() -> Self {
Options {
adjust_coordinate_space: true,
strict_capabilities: false,
block_ctx_dump_prefix: None,
}
}
}
/// An index into the `BlockContext::bodies` table.
type BodyIndex = usize;
/// An intermediate representation of a Naga [`Statement`].
///
/// `Body` and `BodyFragment` values form a tree: the `BodyIndex` fields of the
/// variants are indices of the child `Body` values in [`BlockContext::bodies`].
/// The `lower` function assembles the final `Statement` tree from this `Body`
/// tree. See [`BlockContext`] for details.
///
/// [`Statement`]: crate::Statement
#[derive(Debug)]
enum BodyFragment {
BlockId(spirv::Word),
If {
condition: Handle<crate::Expression>,
accept: BodyIndex,
reject: BodyIndex,
},
Loop {
/// The body of the loop. Its [`Body::parent`] is the block containing
/// this `Loop` fragment.
body: BodyIndex,
/// The loop's continuing block. This is a grandchild: its
/// [`Body::parent`] is the loop body block, whose index is above.
continuing: BodyIndex,
/// If the SPIR-V loop's back-edge branch is conditional, this is the
/// expression that must be `false` for the back-edge to be taken, with
/// `true` being for the "loop merge" (which breaks out of the loop).
break_if: Option<Handle<crate::Expression>>,
},
Switch {
selector: Handle<crate::Expression>,
cases: Vec<(i32, BodyIndex)>,
default: BodyIndex,
},
Break,
Continue,
}
/// An intermediate representation of a Naga [`Block`].
///
/// This will be assembled into a `Block` once we've added spills for phi nodes
/// and out-of-scope expressions. See [`BlockContext`] for details.
///
/// [`Block`]: crate::Block
#[derive(Debug)]
struct Body {
/// The index of the direct parent of this body
parent: usize,
data: Vec<BodyFragment>,
}
impl Body {
/// Creates a new empty `Body` with the specified `parent`
pub const fn with_parent(parent: usize) -> Self {
Body {
parent,
data: Vec::new(),
}
}
}
#[derive(Debug)]
struct PhiExpression {
/// The local variable used for the phi node
local: Handle<crate::LocalVariable>,
/// List of (expression, block)
expressions: Vec<(spirv::Word, spirv::Word)>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum MergeBlockInformation {
LoopMerge,
LoopContinue,
SelectionMerge,
SwitchMerge,
}
/// Fragments of Naga IR, to be assembled into `Statements` once data flow is
/// resolved.
///
/// We can't build a Naga `Statement` tree directly from SPIR-V blocks for three
/// main reasons:
///
/// - We parse a function's SPIR-V blocks in the order they appear in the file.
/// Within a function, SPIR-V requires that a block must precede any blocks it
/// structurally dominates, but doesn't say much else about the order in which
/// they must appear. So while we know we'll see control flow header blocks
/// before their child constructs and merge blocks, those children and the
/// merge blocks may appear in any order - perhaps even intermingled with
/// children of other constructs.
///
/// - A SPIR-V expression can be used in any SPIR-V block dominated by its
/// definition, whereas Naga expressions are scoped to the rest of their
/// subtree. This means that discovering an expression use later in the
/// function retroactively requires us to have spilled that expression into a
/// local variable back before we left its scope. (The docs for
/// [`Frontend::get_expr_handle`] explain this in more detail.)
///
/// - We translate SPIR-V OpPhi expressions as Naga local variables in which we
/// store the appropriate value before jumping to the OpPhi's block.
///
/// All these cases require us to go back and amend previously generated Naga IR
/// based on things we discover later. But modifying old blocks in arbitrary
/// spots in a `Statement` tree is awkward.
///
/// Instead, as we iterate through the function's body, we accumulate
/// control-flow-free fragments of Naga IR in the [`blocks`] table, while
/// building a skeleton of the Naga `Statement` tree in [`bodies`]. We note any
/// spills and temporaries we must introduce in [`phis`].
///
/// Finally, once we've processed the entire function, we add temporaries and
/// spills to the fragmentary `Blocks` as directed by `phis`, and assemble them
/// into the final Naga `Statement` tree as directed by `bodies`.
///
/// [`blocks`]: BlockContext::blocks
/// [`bodies`]: BlockContext::bodies
/// [`phis`]: BlockContext::phis
/// [`lower`]: function::lower
#[derive(Debug)]
struct BlockContext<'function> {
/// Phi nodes encountered when parsing the function, used to generate spills
/// to local variables.
phis: Vec<PhiExpression>,
/// Fragments of control-flow-free Naga IR.
///
/// These will be stitched together into a proper [`Statement`] tree according
/// to `bodies`, once parsing is complete.
///
/// [`Statement`]: crate::Statement
blocks: FastHashMap<spirv::Word, crate::Block>,
/// Map from each SPIR-V block's label id to the index of the [`Body`] in
/// [`bodies`] the block should append its contents to.
///
/// Since each statement in a Naga [`Block`] dominates the next, we are sure
/// to encounter their SPIR-V blocks in order. Thus, by having this table
/// map a SPIR-V structured control flow construct's merge block to the same
/// body index as its header block, when we encounter the merge block, we
/// will simply pick up building the [`Body`] where the header left off.
///
/// A function's first block is special: it is the only block we encounter
/// without having seen its label mentioned in advance. (It's simply the
/// first `OpLabel` after the `OpFunction`.) We thus assume that any block
/// missing an entry here must be the first block, which always has body
/// index zero.
///
/// [`bodies`]: BlockContext::bodies
/// [`Block`]: crate::Block
body_for_label: FastHashMap<spirv::Word, BodyIndex>,
/// SPIR-V metadata about merge/continue blocks.
mergers: FastHashMap<spirv::Word, MergeBlockInformation>,
/// A table of `Body` values, each representing a block in the final IR.
///
/// The first element is always the function's top-level block.
bodies: Vec<Body>,
/// The module we're building.
module: &'function mut crate::Module,
/// Id of the function currently being processed
function_id: spirv::Word,
/// Expression arena of the function currently being processed
expressions: &'function mut Arena<crate::Expression>,
/// Local variables arena of the function currently being processed
local_arena: &'function mut Arena<crate::LocalVariable>,
/// Arguments of the function currently being processed
arguments: &'function [crate::FunctionArgument],
/// Metadata about the usage of function parameters as sampling objects
parameter_sampling: &'function mut [image::SamplingFlags],
}
enum SignAnchor {
Result,
Operand,
}
pub struct Frontend<I> {
data: I,
data_offset: usize,
state: ModuleState,
layouter: Layouter,
temp_bytes: Vec<u8>,
ext_glsl_id: Option<spirv::Word>,
future_decor: FastHashMap<spirv::Word, Decoration>,
future_member_decor: FastHashMap<(spirv::Word, MemberIndex), Decoration>,
lookup_member: FastHashMap<(Handle<crate::Type>, MemberIndex), LookupMember>,
handle_sampling: FastHashMap<Handle<crate::GlobalVariable>, image::SamplingFlags>,
/// A record of what is accessed by [`Atomic`] statements we've
/// generated, so we can upgrade the types of their operands.
///
/// [`Atomic`]: crate::Statement::Atomic
upgrade_atomics: Upgrades,
lookup_type: FastHashMap<spirv::Word, LookupType>,
lookup_void_type: Option<spirv::Word>,
lookup_storage_buffer_types: FastHashMap<Handle<crate::Type>, crate::StorageAccess>,
lookup_constant: FastHashMap<spirv::Word, LookupConstant>,
lookup_variable: FastHashMap<spirv::Word, LookupVariable>,
lookup_expression: FastHashMap<spirv::Word, LookupExpression>,
// Load overrides are used to work around row-major matrices
lookup_load_override: FastHashMap<spirv::Word, LookupLoadOverride>,
lookup_sampled_image: FastHashMap<spirv::Word, image::LookupSampledImage>,
lookup_function_type: FastHashMap<spirv::Word, LookupFunctionType>,
lookup_function: FastHashMap<spirv::Word, LookupFunction>,
lookup_entry_point: FastHashMap<spirv::Word, EntryPoint>,
// When parsing functions, each entry point function gets an entry here so that additional
// processing for them can be performed after all function parsing.
deferred_entry_points: Vec<(EntryPoint, spirv::Word)>,
//Note: each `OpFunctionCall` gets a single entry here, indexed by the
// dummy `Handle<crate::Function>` of the call site.
deferred_function_calls: Vec<spirv::Word>,
dummy_functions: Arena<crate::Function>,
// Graph of all function calls through the module.
// It's used to sort the functions (as nodes) topologically,
// so that in the IR any called function is already known.
function_call_graph: GraphMap<spirv::Word, (), petgraph::Directed>,
options: Options,
/// Maps for a switch from a case target to the respective body and associated literals that
/// use that target block id.
///
/// Used to preserve allocations between instruction parsing.
switch_cases: FastIndexMap<spirv::Word, (BodyIndex, Vec<i32>)>,
/// Tracks access to gl_PerVertex's builtins, it is used to cull unused builtins since initial
izing those can
/// affect performance and the mere presence of some of these builtins might cause backends to error since they
/// might be unsupported.
///
/// The problematic builtins are: PointSize, ClipDistance and CullDistance.
///
/// glslang declares those by default even though they are never written to
/// (see <https://github.com/KhronosGroup/glslang/issues/1868>)
gl_per_vertex_builtin_access: FastHashSet<crate::BuiltIn>,
}
impl<I: Iterator<Item = u32>> Frontend<I> {
pub fn new(data: I, options: &Options) -> Self {
Frontend {
data,
data_offset: 0,
state: ModuleState::Empty,
layouter: Layouter::default(),
temp_bytes: Vec::new(),
ext_glsl_id: None,
future_decor: FastHashMap::default(),
future_member_decor: FastHashMap::default(),
handle_sampling: FastHashMap::default(),
lookup_member: FastHashMap::default(),
upgrade_atomics: Default::default(),
lookup_type: FastHashMap::default(),
lookup_void_type: None,
lookup_storage_buffer_types: FastHashMap::default(),
lookup_constant: FastHashMap::default(),
lookup_variable: FastHashMap::default(),
lookup_expression: FastHashMap::default(),
lookup_load_override: FastHashMap::default(),
lookup_sampled_image: FastHashMap::default(),
lookup_function_type: FastHashMap::default(),
lookup_function: FastHashMap::default(),
lookup_entry_point: FastHashMap::default(),
deferred_entry_points: Vec::default(),
deferred_function_calls: Vec::default(),
dummy_functions: Arena::new(),
function_call_graph: GraphMap::new(),
options: options.clone(),
switch_cases: FastIndexMap::default(),
gl_per_vertex_builtin_access: FastHashSet::default(),
}
}
fn span_from(&self, from: usize) -> crate::Span {
crate::Span::from(from..self.data_offset)
}
fn span_from_with_op(&self, from: usize) -> crate::Span {
crate::Span::from((from - 4)..self.data_offset)
}
fn next(&mut self) -> Result<u32, Error> {
if let Some(res) = self.data.next() {
self.data_offset += 4;
Ok(res)
} else {
Err(Error::IncompleteData)
}
}
fn next_inst(&mut self) -> Result<Instruction, Error> {
let word = self.next()?;
let (wc, opcode) = ((word >> 16) as u16, (word & 0xffff) as u16);
if wc == 0 {
return Err(Error::InvalidWordCount);
}
let op = spirv::Op::from_u32(opcode as u32).ok_or(Error::UnknownInstruction(opcode))?;
Ok(Instruction { op, wc })
}
fn next_string(&mut self, mut count: u16) -> Result<(String, u16), Error> {
self.temp_bytes.clear();
loop {
if count == 0 {
return Err(Error::BadString);
}
count -= 1;
let chars = self.next()?.to_le_bytes();
let pos = chars.iter().position(|&c| c == 0).unwrap_or(4);
self.temp_bytes.extend_from_slice(&chars[..pos]);
if pos < 4 {
break;
}
}
std::str::from_utf8(&self.temp_bytes)
.map(|s| (s.to_owned(), count))
.map_err(|_| Error::BadString)
}
fn next_decoration(
&mut self,
inst: Instruction,
base_words: u16,
dec: &mut Decoration,
) -> Result<(), Error> {
let raw = self.next()?;
let dec_typed = spirv::Decoration::from_u32(raw).ok_or(Error::InvalidDecoration(raw))?;
log::trace!("\t\t{}: {:?}", dec.debug_name(), dec_typed);
match dec_typed {
spirv::Decoration::BuiltIn => {
inst.expect(base_words + 2)?;
dec.built_in = Some(self.next()?);
}
spirv::Decoration::Location => {
inst.expect(base_words + 2)?;
dec.location = Some(self.next()?);
}
spirv::Decoration::DescriptorSet => {
inst.expect(base_words + 2)?;
dec.desc_set = Some(self.next()?);
}
spirv::Decoration::Binding => {
inst.expect(base_words + 2)?;
dec.desc_index = Some(self.next()?);
}
spirv::Decoration::BufferBlock => {
dec.storage_buffer = true;
}
spirv::Decoration::Offset => {
inst.expect(base_words + 2)?;
dec.offset = Some(self.next()?);
}
spirv::Decoration::ArrayStride => {
inst.expect(base_words + 2)?;
dec.array_stride = NonZeroU32::new(self.next()?);
}
spirv::Decoration::MatrixStride => {
inst.expect(base_words + 2)?;
dec.matrix_stride = NonZeroU32::new(self.next()?);
}
spirv::Decoration::Invariant => {
dec.invariant = true;
}
spirv::Decoration::NoPerspective => {
dec.interpolation = Some(crate::Interpolation::Linear);
}
spirv::Decoration::Flat => {
dec.interpolation = Some(crate::Interpolation::Flat);
}
spirv::Decoration::Centroid => {
dec.sampling = Some(crate::Sampling::Centroid);
}
spirv::Decoration::Sample => {
dec.sampling = Some(crate::Sampling::Sample);
}
spirv::Decoration::NonReadable => {
dec.flags |= DecorationFlags::NON_READABLE;
}
spirv::Decoration::NonWritable => {
dec.flags |= DecorationFlags::NON_WRITABLE;
}
spirv::Decoration::ColMajor => {
dec.matrix_major = Some(Majority::Column);
}
spirv::Decoration::RowMajor => {
dec.matrix_major = Some(Majority::Row);
}
spirv::Decoration::SpecId => {
dec.specialization_constant_id = Some(self.next()?);
}
other => {
log::warn!("Unknown decoration {:?}", other);
for _ in base_words + 1..inst.wc {
let _var = self.next()?;
}
}
}
Ok(())
}
/// Return the Naga [`Expression`] to use in `body_idx` to refer to the SPIR-V result `id`.
///
/// Ideally, we would just have a map from each SPIR-V instruction id to the
/// [`Handle`] for the Naga [`Expression`] we generated for it.
/// Unfortunately, SPIR-V and Naga IR are different enough that such a
/// straightforward relationship isn't possible.
///
/// In SPIR-V, an instruction's result id can be used by any instruction
/// dominated by that instruction. In Naga, an [`Expression`] is only in
/// scope for the remainder of its [`Block`]. In pseudocode:
///
/// ```ignore
/// loop {
/// a = f();
/// g(a);
/// break;
/// }
/// h(a);
/// ```
///
/// Suppose the calls to `f`, `g`, and `h` are SPIR-V instructions. In
/// SPIR-V, both the `g` and `h` instructions are allowed to refer to `a`,
/// because the loop body, including `f`, dominates both of them.
///
/// But if `a` is a Naga [`Expression`], its scope ends at the end of the
/// block it's evaluated in: the loop body. Thus, while the [`Expression`]
/// we generate for `g` can refer to `a`, the one we generate for `h`
/// cannot.
///
/// Instead, the SPIR-V front end must generate Naga IR like this:
///
/// ```ignore
/// var temp; // INTRODUCED
/// loop {
/// a = f();
/// g(a);
/// temp = a; // INTRODUCED
/// }
/// h(temp); // ADJUSTED
/// ```
///
/// In other words, where `a` is in scope, [`Expression`]s can refer to it
/// directly; but once it is out of scope, we need to spill it to a
/// temporary and refer to that instead.
///
/// Given a SPIR-V expression `id` and the index `body_idx` of the [body]
/// that wants to refer to it:
///
/// - If the Naga [`Expression`] we generated for `id` is in scope in
/// `body_idx`, then we simply return its `Handle<Expression>`.
///
/// - Otherwise, introduce a new [`LocalVariable`], and add an entry to
/// [`BlockContext::phis`] to arrange for `id`'s value to be spilled to
/// it. Then emit a fresh [`Load`] of that temporary variable for use in
/// `body_idx`'s block, and return its `Handle`.
///
/// The SPIR-V domination rule ensures that the introduced [`LocalVariable`]
/// will always have been initialized before it is used.
///
/// `lookup` must be the [`LookupExpression`] for `id`.
///
/// `body_idx` argument must be the index of the [`Body`] that hopes to use
/// `id`'s [`Expression`].
///
/// [`Expression`]: crate::Expression
/// [`Handle`]: crate::Handle
/// [`Block`]: crate::Block
/// [body]: BlockContext::bodies
/// [`LocalVariable`]: crate::LocalVariable
/// [`Load`]: crate::Expression::Load
fn get_expr_handle(
&self,
id: spirv::Word,
lookup: &LookupExpression,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
body_idx: BodyIndex,
) -> Handle<crate::Expression> {
// What `Body` was `id` defined in?
let expr_body_idx = ctx
.body_for_label
.get(&lookup.block_id)
.copied()
.unwrap_or(0);
// Don't need to do a load/store if the expression is in the main body
// or if the expression is in the same body as where the query was
// requested. The body_idx might actually not be the final one if a loop
// or conditional occurs but in those cases we know that the new body
// will be a subscope of the body that was passed so we can still reuse
// the handle and not issue a load/store.
if is_parent(body_idx, expr_body_idx, ctx) {
lookup.handle
} else {
// Add a temporary variable of the same type which will be used to
// store the original expression and used in the current block
let ty = self.lookup_type[&lookup.type_id].handle;
let local = ctx.local_arena.append(
crate::LocalVariable {
name: None,
ty,
init: None,
},
crate::Span::default(),
);
block.extend(emitter.finish(ctx.expressions));
let pointer = ctx.expressions.append(
crate::Expression::LocalVariable(local),
crate::Span::default(),
);
emitter.start(ctx.expressions);
let expr = ctx
.expressions
.append(crate::Expression::Load { pointer }, crate::Span::default());
// Add a slightly odd entry to the phi table, so that while `id`'s
// `Expression` is still in scope, the usual phi processing will
// spill its value to `local`, where we can find it later.
//
// This pretends that the block in which `id` is defined is the
// predecessor of some other block with a phi in it that cites id as
// one of its sources, and uses `local` as its variable. There is no
// such phi, but nobody needs to know that.
ctx.phis.push(PhiExpression {
local,
expressions: vec![(id, lookup.block_id)],
});
expr
}
}
fn parse_expr_unary_op(
&mut self,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
op: crate::UnaryOperator,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let p_id = self.next()?;
let p_lexp = self.lookup_expression.lookup(p_id)?;
let handle = self.get_expr_handle(p_id, p_lexp, ctx, emitter, block, body_idx);
let expr = crate::Expression::Unary { op, expr: handle };
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, self.span_from_with_op(start)),
type_id: result_type_id,
block_id,
},
);
Ok(())
}
fn parse_expr_binary_op(
&mut self,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
op: crate::BinaryOperator,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let p1_id = self.next()?;
let p2_id = self.next()?;
let p1_lexp = self.lookup_expression.lookup(p1_id)?;
let left = self.get_expr_handle(p1_id, p1_lexp, ctx, emitter, block, body_idx);
let p2_lexp = self.lookup_expression.lookup(p2_id)?;
let right = self.get_expr_handle(p2_id, p2_lexp, ctx, emitter, block, body_idx);
let expr = crate::Expression::Binary { op, left, right };
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, self.span_from_with_op(start)),
type_id: result_type_id,
block_id,
},
);
Ok(())
}
/// A more complicated version of the unary op,
/// where we force the operand to have the same type as the result.
fn parse_expr_unary_op_sign_adjusted(
&mut self,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
op: crate::UnaryOperator,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let p1_id = self.next()?;
let span = self.span_from_with_op(start);
let p1_lexp = self.lookup_expression.lookup(p1_id)?;
let left = self.get_expr_handle(p1_id, p1_lexp, ctx, emitter, block, body_idx);
let result_lookup_ty = self.lookup_type.lookup(result_type_id)?;
let kind = ctx.module.types[result_lookup_ty.handle]
.inner
.scalar_kind()
.unwrap();
let expr = crate::Expression::Unary {
op,
expr: if p1_lexp.type_id == result_type_id {
left
} else {
ctx.expressions.append(
crate::Expression::As {
expr: left,
kind,
convert: None,
},
span,
)
},
};
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, span),
type_id: result_type_id,
block_id,
},
);
Ok(())
}
/// A more complicated version of the binary op,
/// where we force the operand to have the same type as the result.
/// This is mostly needed for "i++" and "i--" coming from GLSL.
#[allow(clippy::too_many_arguments)]
fn parse_expr_binary_op_sign_adjusted(
&mut self,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
op: crate::BinaryOperator,
// For arithmetic operations, we need the sign of operands to match the result.
// For boolean operations, however, the operands need to match the signs, but
// result is always different - a boolean.
anchor: SignAnchor,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let p1_id = self.next()?;
let p2_id = self.next()?;
let span = self.span_from_with_op(start);
let p1_lexp = self.lookup_expression.lookup(p1_id)?;
let left = self.get_expr_handle(p1_id, p1_lexp, ctx, emitter, block, body_idx);
let p2_lexp = self.lookup_expression.lookup(p2_id)?;
let right = self.get_expr_handle(p2_id, p2_lexp, ctx, emitter, block, body_idx);
let expected_type_id = match anchor {
SignAnchor::Result => result_type_id,
SignAnchor::Operand => p1_lexp.type_id,
};
let expected_lookup_ty = self.lookup_type.lookup(expected_type_id)?;
let kind = ctx.module.types[expected_lookup_ty.handle]
.inner
.scalar_kind()
.unwrap();
let expr = crate::Expression::Binary {
op,
left: if p1_lexp.type_id == expected_type_id {
left
} else {
ctx.expressions.append(
crate::Expression::As {
expr: left,
kind,
convert: None,
},
span,
)
},
right: if p2_lexp.type_id == expected_type_id {
right
} else {
ctx.expressions.append(
crate::Expression::As {
expr: right,
kind,
convert: None,
},
span,
)
},
};
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, span),
type_id: result_type_id,
block_id,
},
);
Ok(())
}
/// A version of the binary op where one or both of the arguments might need to be casted to a
/// specific integer kind (unsigned or signed), used for operations like OpINotEqual or
/// OpUGreaterThan.
#[allow(clippy::too_many_arguments)]
fn parse_expr_int_comparison(
&mut self,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
op: crate::BinaryOperator,
kind: crate::ScalarKind,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let p1_id = self.next()?;
let p2_id = self.next()?;
let span = self.span_from_with_op(start);
let p1_lexp = self.lookup_expression.lookup(p1_id)?;
let left = self.get_expr_handle(p1_id, p1_lexp, ctx, emitter, block, body_idx);
let p1_lookup_ty = self.lookup_type.lookup(p1_lexp.type_id)?;
let p1_kind = ctx.module.types[p1_lookup_ty.handle]
.inner
.scalar_kind()
.unwrap();
let p2_lexp = self.lookup_expression.lookup(p2_id)?;
let right = self.get_expr_handle(p2_id, p2_lexp, ctx, emitter, block, body_idx);
let p2_lookup_ty = self.lookup_type.lookup(p2_lexp.type_id)?;
let p2_kind = ctx.module.types[p2_lookup_ty.handle]
.inner
.scalar_kind()
.unwrap();
let expr = crate::Expression::Binary {
op,
left: if p1_kind == kind {
left
} else {
ctx.expressions.append(
crate::Expression::As {
expr: left,
kind,
convert: None,
},
span,
)
},
right: if p2_kind == kind {
right
} else {
ctx.expressions.append(
crate::Expression::As {
expr: right,
kind,
convert: None,
},
span,
)
},
};
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, span),
type_id: result_type_id,
block_id,
},
);
Ok(())
}
fn parse_expr_shift_op(
&mut self,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
op: crate::BinaryOperator,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let p1_id = self.next()?;
let p2_id = self.next()?;
let span = self.span_from_with_op(start);
let p1_lexp = self.lookup_expression.lookup(p1_id)?;
let left = self.get_expr_handle(p1_id, p1_lexp, ctx, emitter, block, body_idx);
let p2_lexp = self.lookup_expression.lookup(p2_id)?;
let p2_handle = self.get_expr_handle(p2_id, p2_lexp, ctx, emitter, block, body_idx);
// convert the shift to Uint
let right = ctx.expressions.append(
crate::Expression::As {
expr: p2_handle,
kind: crate::ScalarKind::Uint,
convert: None,
},
span,
);
let expr = crate::Expression::Binary { op, left, right };
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, span),
type_id: result_type_id,
block_id,
},
);
Ok(())
}
fn parse_expr_derivative(
&mut self,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
(axis, ctrl): (crate::DerivativeAxis, crate::DerivativeControl),
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let arg_id = self.next()?;
let arg_lexp = self.lookup_expression.lookup(arg_id)?;
let arg_handle = self.get_expr_handle(arg_id, arg_lexp, ctx, emitter, block, body_idx);
let expr = crate::Expression::Derivative {
axis,
ctrl,
expr: arg_handle,
};
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, self.span_from_with_op(start)),
type_id: result_type_id,
block_id,
},
);
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn insert_composite(
&self,
root_expr: Handle<crate::Expression>,
root_type_id: spirv::Word,
object_expr: Handle<crate::Expression>,
selections: &[spirv::Word],
type_arena: &UniqueArena<crate::Type>,
expressions: &mut Arena<crate::Expression>,
span: crate::Span,
) -> Result<Handle<crate::Expression>, Error> {
let selection = match selections.first() {
Some(&index) => index,
None => return Ok(object_expr),
};
let root_span = expressions.get_span(root_expr);
let root_lookup = self.lookup_type.lookup(root_type_id)?;
let (count, child_type_id) = match type_arena[root_lookup.handle].inner {
crate::TypeInner::Struct { ref members, .. } => {
let child_member = self
.lookup_member
.get(&(root_lookup.handle, selection))
.ok_or(Error::InvalidAccessType(root_type_id))?;
(members.len(), child_member.type_id)
}
crate::TypeInner::Array { size, .. } => {
let size = match size {
crate::ArraySize::Constant(size) => size.get(),
crate::ArraySize::Pending(_) => {
unreachable!();
}
// A runtime sized array is not a composite type
crate::ArraySize::Dynamic => {
return Err(Error::InvalidAccessType(root_type_id))
}
};
let child_type_id = root_lookup
.base_id
.ok_or(Error::InvalidAccessType(root_type_id))?;
(size as usize, child_type_id)
}
crate::TypeInner::Vector { size, .. }
| crate::TypeInner::Matrix { columns: size, .. } => {
let child_type_id = root_lookup
.base_id
.ok_or(Error::InvalidAccessType(root_type_id))?;
(size as usize, child_type_id)
}
_ => return Err(Error::InvalidAccessType(root_type_id)),
};
let mut components = Vec::with_capacity(count);
for index in 0..count as u32 {
let expr = expressions.append(
crate::Expression::AccessIndex {
base: root_expr,
index,
},
if index == selection { span } else { root_span },
);
components.push(expr);
}
components[selection as usize] = self.insert_composite(
components[selection as usize],
child_type_id,
object_expr,
&selections[1..],
type_arena,
expressions,
span,
)?;
Ok(expressions.append(
crate::Expression::Compose {
ty: root_lookup.handle,
components,
},
span,
))
}
/// Return the Naga [`Expression`] for `pointer_id`, and its referent [`Type`].
///
/// Return a [`Handle`] for a Naga [`Expression`] that holds the value of
/// the SPIR-V instruction `pointer_id`, along with the [`Type`] to which it
/// is a pointer.
///
/// This may entail spilling `pointer_id`'s value to a temporary:
/// see [`get_expr_handle`]'s documentation.
///
/// [`Expression`]: crate::Expression
/// [`Type`]: crate::Type
/// [`Handle`]: crate::Handle
/// [`get_expr_handle`]: Frontend::get_expr_handle
fn get_exp_and_base_ty_handles(
&self,
pointer_id: spirv::Word,
ctx: &mut BlockContext,
emitter: &mut crate::proc::Emitter,
block: &mut crate::Block,
body_idx: usize,
) -> Result<(Handle<crate::Expression>, Handle<crate::Type>), Error> {
log::trace!("\t\t\tlooking up pointer expr {:?}", pointer_id);
let p_lexp_handle;
let p_lexp_ty_id;
{
let lexp = self.lookup_expression.lookup(pointer_id)?;
p_lexp_handle = self.get_expr_handle(pointer_id, lexp, ctx, emitter, block, body_idx);
p_lexp_ty_id = lexp.type_id;
};
log::trace!("\t\t\tlooking up pointer type {pointer_id:?}");
let p_ty = self.lookup_type.lookup(p_lexp_ty_id)?;
let p_ty_base_id = p_ty.base_id.ok_or(Error::InvalidAccessType(p_lexp_ty_id))?;
log::trace!("\t\t\tlooking up pointer base type {p_ty_base_id:?} of {p_ty:?}");
let p_base_ty = self.lookup_type.lookup(p_ty_base_id)?;
Ok((p_lexp_handle, p_base_ty.handle))
}
#[allow(clippy::too_many_arguments)]
fn parse_atomic_expr_with_value(
&mut self,
inst: Instruction,
emitter: &mut crate::proc::Emitter,
ctx: &mut BlockContext,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
atomic_function: crate::AtomicFunction,
) -> Result<(), Error> {
inst.expect(7)?;
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let pointer_id = self.next()?;
let _scope_id = self.next()?;
let _memory_semantics_id = self.next()?;
let value_id = self.next()?;
let span = self.span_from_with_op(start);
let (p_lexp_handle, p_base_ty_handle) =
self.get_exp_and_base_ty_handles(pointer_id, ctx, emitter, block, body_idx)?;
log::trace!("\t\t\tlooking up value expr {value_id:?}");
let v_lexp_handle = self.lookup_expression.lookup(value_id)?.handle;
block.extend(emitter.finish(ctx.expressions));
// Create an expression for our result
let r_lexp_handle = {
let expr = crate::Expression::AtomicResult {
ty: p_base_ty_handle,
comparison: false,
};
let handle = ctx.expressions.append(expr, span);
self.lookup_expression.insert(
result_id,
LookupExpression {
handle,
type_id: result_type_id,
block_id,
},
);
handle
};
emitter.start(ctx.expressions);
// Create a statement for the op itself
let stmt = crate::Statement::Atomic {
pointer: p_lexp_handle,
fun: atomic_function,
value: v_lexp_handle,
result: Some(r_lexp_handle),
};
block.push(stmt, span);
// Store any associated global variables so we can upgrade their types later
self.record_atomic_access(ctx, p_lexp_handle)?;
Ok(())
}
/// Add the next SPIR-V block's contents to `block_ctx`.
///
/// Except for the function's entry block, `block_id` should be the label of
/// a block we've seen mentioned before, with an entry in
/// `block_ctx.body_for_label` to tell us which `Body` it contributes to.
fn next_block(&mut self, block_id: spirv::Word, ctx: &mut BlockContext) -> Result<(), Error> {
// Extend `body` with the correct form for a branch to `target`.
fn merger(body: &mut Body, target: &MergeBlockInformation) {
body.data.push(match *target {
MergeBlockInformation::LoopContinue => BodyFragment::Continue,
MergeBlockInformation::LoopMerge | MergeBlockInformation::SwitchMerge => {
BodyFragment::Break
}
// Finishing a selection merge means just falling off the end of
// the `accept` or `reject` block of the `If` statement.
MergeBlockInformation::SelectionMerge => return,
})
}
let mut emitter = crate::proc::Emitter::default();
emitter.start(ctx.expressions);
// Find the `Body` to which this block contributes.
//
// If this is some SPIR-V structured control flow construct's merge
// block, then `body_idx` will refer to the same `Body` as the header,
// so that we simply pick up accumulating the `Body` where the header
// left off. Each of the statements in a block dominates the next, so
// we're sure to encounter their SPIR-V blocks in order, ensuring that
// the `Body` will be assembled in the proper order.
//
// Note that, unlike every other kind of SPIR-V block, we don't know the
// function's first block's label in advance. Thus, we assume that if
// this block has no entry in `ctx.body_for_label`, it must be the
// function's first block. This always has body index zero.
let mut body_idx = *ctx.body_for_label.entry(block_id).or_default();
// The Naga IR block this call builds. This will end up as
// `ctx.blocks[&block_id]`, and `ctx.bodies[body_idx]` will refer to it
// via a `BodyFragment::BlockId`.
let mut block = crate::Block::new();
// Stores the merge block as defined by a `OpSelectionMerge` otherwise is `None`
//
// This is used in `OpSwitch` to promote the `MergeBlockInformation` from
// `SelectionMerge` to `SwitchMerge` to allow `Break`s this isn't desirable for
// `LoopMerge`s because otherwise `Continue`s wouldn't be allowed
let mut selection_merge_block = None;
macro_rules! get_expr_handle {
($id:expr, $lexp:expr) => {
self.get_expr_handle($id, $lexp, ctx, &mut emitter, &mut block, body_idx)
};
}
macro_rules! parse_expr_op {
($op:expr, BINARY) => {
self.parse_expr_binary_op(ctx, &mut emitter, &mut block, block_id, body_idx, $op)
};
($op:expr, SHIFT) => {
self.parse_expr_shift_op(ctx, &mut emitter, &mut block, block_id, body_idx, $op)
};
($op:expr, UNARY) => {
self.parse_expr_unary_op(ctx, &mut emitter, &mut block, block_id, body_idx, $op)
};
($axis:expr, $ctrl:expr, DERIVATIVE) => {
self.parse_expr_derivative(
ctx,
&mut emitter,
&mut block,
block_id,
body_idx,
($axis, $ctrl),
)
};
}
let terminator = loop {
use spirv::Op;
let start = self.data_offset;
let inst = self.next_inst()?;
let span = crate::Span::from(start..(start + 4 * (inst.wc as usize)));
log::debug!("\t\t{:?} [{}]", inst.op, inst.wc);
match inst.op {
Op::Line => {
inst.expect(4)?;
let _file_id = self.next()?;
let _row_id = self.next()?;
let _col_id = self.next()?;
}
Op::NoLine => inst.expect(1)?,
Op::Undef => {
inst.expect(3)?;
let type_id = self.next()?;
let id = self.next()?;
let type_lookup = self.lookup_type.lookup(type_id)?;
let ty = type_lookup.handle;
self.lookup_expression.insert(
id,
LookupExpression {
handle: ctx
.expressions
.append(crate::Expression::ZeroValue(ty), span),
type_id,
block_id,
},
);
}
Op::Variable => {
inst.expect_at_least(4)?;
block.extend(emitter.finish(ctx.expressions));
let result_type_id = self.next()?;
let result_id = self.next()?;
let _storage_class = self.next()?;
let init = if inst.wc > 4 {
inst.expect(5)?;
let init_id = self.next()?;
let lconst = self.lookup_constant.lookup(init_id)?;
Some(ctx.expressions.append(lconst.inner.to_expr(), span))
} else {
None
};
let name = self
.future_decor
.remove(&result_id)
.and_then(|decor| decor.name);
if let Some(ref name) = name {
log::debug!("\t\t\tid={} name={}", result_id, name);
}
let lookup_ty = self.lookup_type.lookup(result_type_id)?;
let var_handle = ctx.local_arena.append(
crate::LocalVariable {
name,
ty: match ctx.module.types[lookup_ty.handle].inner {
crate::TypeInner::Pointer { base, .. } => base,
_ => lookup_ty.handle,
},
init,
},
span,
);
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx
.expressions
.append(crate::Expression::LocalVariable(var_handle), span),
type_id: result_type_id,
block_id,
},
);
emitter.start(ctx.expressions);
}
Op::Phi => {
inst.expect_at_least(3)?;
block.extend(emitter.finish(ctx.expressions));
let result_type_id = self.next()?;
let result_id = self.next()?;
let name = format!("phi_{result_id}");
let local = ctx.local_arena.append(
crate::LocalVariable {
name: Some(name),
ty: self.lookup_type.lookup(result_type_id)?.handle,
init: None,
},
self.span_from(start),
);
let pointer = ctx
.expressions
.append(crate::Expression::LocalVariable(local), span);
let in_count = (inst.wc - 3) / 2;
let mut phi = PhiExpression {
local,
expressions: Vec::with_capacity(in_count as usize),
};
for _ in 0..in_count {
let expr = self.next()?;
let block = self.next()?;
phi.expressions.push((expr, block));
}
ctx.phis.push(phi);
emitter.start(ctx.expressions);
// Associate the lookup with an actual value, which is emitted
// into the current block.
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx
.expressions
.append(crate::Expression::Load { pointer }, span),
type_id: result_type_id,
block_id,
},
);
}
Op::AccessChain | Op::InBoundsAccessChain => {
struct AccessExpression {
base_handle: Handle<crate::Expression>,
type_id: spirv::Word,
load_override: Option<LookupLoadOverride>,
}
inst.expect_at_least(4)?;
let result_type_id = self.next()?;
let result_id = self.next()?;
let base_id = self.next()?;
log::trace!("\t\t\tlooking up expr {:?}", base_id);
let mut acex = {
let lexp = self.lookup_expression.lookup(base_id)?;
let lty = self.lookup_type.lookup(lexp.type_id)?;
// HACK `OpAccessChain` and `OpInBoundsAccessChain`
// require for the result type to be a pointer, but if
// we're given a pointer to an image / sampler, it will
// be *already* dereferenced, since we do that early
// during `parse_type_pointer()`.
//
// This can happen only through `BindingArray`, since
// that's the only case where one can obtain a pointer
// to an image / sampler, and so let's match on that:
let dereference = match ctx.module.types[lty.handle].inner {
crate::TypeInner::BindingArray { .. } => false,
_ => true,
};
let type_id = if dereference {
lty.base_id.ok_or(Error::InvalidAccessType(lexp.type_id))?
} else {
lexp.type_id
};
AccessExpression {
base_handle: get_expr_handle!(base_id, lexp),
type_id,
load_override: self.lookup_load_override.get(&base_id).cloned(),
}
};
for _ in 4..inst.wc {
let access_id = self.next()?;
log::trace!("\t\t\tlooking up index expr {:?}", access_id);
let index_expr = self.lookup_expression.lookup(access_id)?.clone();
let index_expr_handle = get_expr_handle!(access_id, &index_expr);
let index_expr_data = &ctx.expressions[index_expr.handle];
let index_maybe = match *index_expr_data {
crate::Expression::Constant(const_handle) => Some(
ctx.gctx()
.eval_expr_to_u32(ctx.module.constants[const_handle].init)
.map_err(|_| {
Error::InvalidAccess(crate::Expression::Constant(
const_handle,
))
})?,
),
_ => None,
};
log::trace!("\t\t\tlooking up type {:?}", acex.type_id);
let type_lookup = self.lookup_type.lookup(acex.type_id)?;
let ty = &ctx.module.types[type_lookup.handle];
acex = match ty.inner {
// can only index a struct with a constant
crate::TypeInner::Struct { ref members, .. } => {
let index = index_maybe
.ok_or_else(|| Error::InvalidAccess(index_expr_data.clone()))?;
let lookup_member = self
.lookup_member
.get(&(type_lookup.handle, index))
.ok_or(Error::InvalidAccessType(acex.type_id))?;
let base_handle = ctx.expressions.append(
crate::Expression::AccessIndex {
base: acex.base_handle,
index,
},
span,
);
if let Some(crate::Binding::BuiltIn(built_in)) =
members[index as usize].binding
{
self.gl_per_vertex_builtin_access.insert(built_in);
}
AccessExpression {
base_handle,
type_id: lookup_member.type_id,
load_override: if lookup_member.row_major {
debug_assert!(acex.load_override.is_none());
let sub_type_lookup =
self.lookup_type.lookup(lookup_member.type_id)?;
Some(match ctx.module.types[sub_type_lookup.handle].inner {
// load it transposed, to match column major expectations
crate::TypeInner::Matrix { .. } => {
let loaded = ctx.expressions.append(
crate::Expression::Load {
--> --------------------
--> maximum size reached
--> --------------------