Quellcode-Bibliothek writer.rs
Sprache: unbekannt
|
|
Columbo aufrufen.rs Download desUnknown {[0] [0] [0]}Datei anzeigen
use super::{sampler as sm, Error, LocationMode, Options, PipelineOptions, TranslationInf o};
use crate::{
arena::{Handle, HandleSet},
back::{self, Baked},
proc::index,
proc::{self, NameKey, TypeResolution},
valid, FastHashMap, FastHashSet,
};
#[cfg(test)]
use std::ptr;
use std::{
fmt::{Display, Error as FmtError, Formatter, Write},
iter,
};
/// Shorthand result used internally by the backend
type BackendResult = Result<(), Error>;
const NAMESPACE: &str = "metal";
// The name of the array member of the Metal struct types we generate to
// represent Naga `Array` types. See the comments in `Writer::write_type_defs`
// for details.
const WRAPPED_ARRAY_FIELD: &str = "inner";
// This is a hack: we need to pass a pointer to an atomic,
// but generally the backend isn't putting "&" in front of every pointer.
// Some more general handling of pointers is needed to be implemented here.
const ATOMIC_REFERENCE: &str = "&";
const RT_NAMESPACE: &str = "metal::raytracing";
const RAY_QUERY_TYPE: &str = "_RayQuery";
const RAY_QUERY_FIELD_INTERSECTOR: &str = "intersector";
const RAY_QUERY_FIELD_INTERSECTION: &str = "intersection";
const RAY_QUERY_FIELD_READY: &str = "ready";
const RAY_QUERY_FUN_MAP_INTERSECTION: &str = "_map_intersection_type";
pub(crate) const ATOMIC_COMP_EXCH_FUNCTION: &str = "naga_atomic_compare_exchange_weak_explicit";
pub(crate) const MODF_FUNCTION: &str = "naga_modf";
pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
/// For some reason, Metal does not let you have `metal::texture<..>*` as a buffer argument.
/// However, if you put that texture inside a struct, everything is totally fine. This
/// baffles me to no end.
///
/// As such, we wrap all argument buffers in a struct that has a single generic `<T>` field.
/// This allows `NagaArgumentBufferWrapper<metal::texture<..>>*` to work. The astute among
/// you have noticed that this should be exactly the same to the compiler, and you're correct.
pub(crate) const ARGUMENT_BUFFER_WRAPPER_STRUCT: &str = "NagaArgumentBufferWrapper";
/// Write the Metal name for a Naga numeric type: scalar, vector, or matrix.
///
/// The `sizes` slice determines whether this function writes a
/// scalar, vector, or matrix type:
///
/// - An empty slice produces a scalar type.
/// - A one-element slice produces a vector type.
/// - A two element slice `[ROWS COLUMNS]` produces a matrix of the given size.
fn put_numeric_type(
out: &mut impl Write,
scalar: crate::Scalar,
sizes: &[crate::VectorSize],
) -> Result<(), FmtError> {
match (scalar, sizes) {
(scalar, &[]) => {
write!(out, "{}", scalar.to_msl_name())
}
(scalar, &[rows]) => {
write!(
out,
"{}::{}{}",
NAMESPACE,
scalar.to_msl_name(),
back::vector_size_str(rows)
)
}
(scalar, &[rows, columns]) => {
write!(
out,
"{}::{}{}x{}",
NAMESPACE,
scalar.to_msl_name(),
back::vector_size_str(columns),
back::vector_size_str(rows)
)
}
(_, _) => Ok(()), // not meaningful
}
}
const fn scalar_is_int(scalar: crate::Scalar) -> bool {
use crate::ScalarKind::*;
match scalar.kind {
Sint | Uint | AbstractInt | Bool => true,
Float | AbstractFloat => false,
}
}
/// Prefix for cached clamped level-of-detail values for `ImageLoad` expressions.
const CLAMPED_LOD_LOAD_PREFIX: &str = "clamped_lod_e";
/// Wrapper for identifier names for clamped level-of-detail values
///
/// Values of this type implement [`std::fmt::Display`], formatting as
/// the name of the variable used to hold the cached clamped
/// level-of-detail value for an `ImageLoad` expression.
struct ClampedLod(Handle<crate::Expression>);
impl Display for ClampedLod {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, CLAMPED_LOD_LOAD_PREFIX)
}
}
/// Wrapper for generating `struct _mslBufferSizes` member names for
/// runtime-sized array lengths.
///
/// On Metal, `wgpu_hal` passes the element counts for all runtime-sized arrays
/// as an argument to the entry point. This argument's type in the MSL is
/// `struct _mslBufferSizes`, a Naga-synthesized struct with a `uint` member for
/// each global variable containing a runtime-sized array.
///
/// If `global` is a [`Handle`] for a [`GlobalVariable`] that contains a
/// runtime-sized array, then the value `ArraySize(global)` implements
/// [`std::fmt::Display`], formatting as the name of the struct member carrying
/// the number of elements in that runtime-sized array.
///
/// [`GlobalVariable`]: crate::GlobalVariable
struct ArraySizeMember(Handle<crate::GlobalVariable>);
impl Display for ArraySizeMember {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, "size")
}
}
struct TypeContext<'a> {
handle: Handle<crate::Type>,
gctx: proc::GlobalCtx<'a>,
names: &'a FastHashMap<NameKey, String>,
access: crate::StorageAccess,
binding: Option<&'a super::ResolvedBinding>,
first_time: bool,
}
impl TypeContext<'_> {
fn scalar(&self) -> Option<crate::Scalar> {
let ty = &self.gctx.types[self.handle];
ty.inner.scalar()
}
fn vertex_input_dimension(&self) -> u32 {
let ty = &self.gctx.types[self.handle];
match ty.inner {
crate::TypeInner::Scalar(_) => 1,
crate::TypeInner::Vector { size, .. } => size as u32,
_ => unreachable!(),
}
}
}
impl Display for TypeContext<'_> {
fn fmt(&self, out: &mut Formatter<'_>) -> Result<(), FmtError> {
let ty = &self.gctx.types[self.handle];
if ty.needs_alias() && !self.first_time {
let name = &self.names[&NameKey::Type(self.handle)];
return write!(out, "{name}");
}
match ty.inner {
crate::TypeInner::Scalar(scalar) => put_numeric_type(out, scalar, &[]),
crate::TypeInner::Atomic(scalar) => {
write!(out, "{}::atomic_{}", NAMESPACE, scalar.to_msl_name())
}
crate::TypeInner::Vector { size, scalar } => put_numeric_type(out, scalar, &[size]),
crate::TypeInner::Matrix { columns, rows, .. } => {
put_numeric_type(out, crate::Scalar::F32, &[rows, columns])
}
crate::TypeInner::Pointer { base, space } => {
let sub = Self {
handle: base,
first_time: false,
..*self
};
let space_name = match space.to_msl_name() {
Some(name) => name,
None => return Ok(()),
};
write!(out, "{space_name} {sub}&")
}
crate::TypeInner::ValuePointer {
size,
scalar,
space,
} => {
match space.to_msl_name() {
Some(name) => write!(out, "{name} ")?,
None => return Ok(()),
};
match size {
Some(rows) => put_numeric_type(out, scalar, &[rows])?,
None => put_numeric_type(out, scalar, &[])?,
};
write!(out, "&")
}
crate::TypeInner::Array { base, .. } => {
let sub = Self {
handle: base,
first_time: false,
..*self
};
// Array lengths go at the end of the type definition,
// so just print the element type here.
write!(out, "{sub}")
}
crate::TypeInner::Struct { .. } => unreachable!(),
crate::TypeInner::Image {
dim,
arrayed,
class,
} => {
let dim_str = match dim {
crate::ImageDimension::D1 => "1d",
crate::ImageDimension::D2 => "2d",
crate::ImageDimension::D3 => "3d",
crate::ImageDimension::Cube => "cube",
};
let (texture_str, msaa_str, scalar, access) = match class {
crate::ImageClass::Sampled { kind, multi } => {
let (msaa_str, access) = if multi {
("_ms", "read")
} else {
("", "sample")
};
let scalar = crate::Scalar { kind, width: 4 };
("texture", msaa_str, scalar, access)
}
crate::ImageClass::Depth { multi } => {
let (msaa_str, access) = if multi {
("_ms", "read")
} else {
("", "sample")
};
let scalar = crate::Scalar {
kind: crate::ScalarKind::Float,
width: 4,
};
("depth", msaa_str, scalar, access)
}
crate::ImageClass::Storage { format, .. } => {
let access = if self
.access
.contains(crate::StorageAccess::LOAD | crate::StorageAccess::STORE)
{
"read_write"
} else if self.access.contains(crate::StorageAccess::STORE) {
"write"
} else if self.access.contains(crate::StorageAccess::LOAD) {
"read"
} else {
log::warn!(
"Storage access for {:?} (name '{}'): {:?}",
self.handle,
ty.name.as_deref().unwrap_or_default(),
self.access
);
unreachable!("module is not valid");
};
("texture", "", format.into(), access)
}
};
let base_name = scalar.to_msl_name();
let array_str = if arrayed { "_array" } else { "" };
write!(
out,
"{NAMESPACE}::{texture_str}{dim_str}{msaa_str}{array_str}<{base_name}, {NAMESPACE}::access::{access}>",
)
}
crate::TypeInner::Sampler { comparison: _ } => {
write!(out, "{NAMESPACE}::sampler")
}
crate::TypeInner::AccelerationStructure => {
write!(out, "{RT_NAMESPACE}::instance_acceleration_structure")
}
crate::TypeInner::RayQuery => {
write!(out, "{RAY_QUERY_TYPE}")
}
crate::TypeInner::BindingArray { base, .. } => {
let base_tyname = Self {
handle: base,
first_time: false,
..*self
};
write!(
out,
"constant {ARGUMENT_BUFFER_WRAPPER_STRUCT}<{base_tyname}>*"
)
}
}
}
}
struct TypedGlobalVariable<'a> {
module: &'a crate::Module,
names: &'a FastHashMap<NameKey, String>,
handle: Handle<crate::GlobalVariable>,
usage: valid::GlobalUse,
binding: Option<&'a super::ResolvedBinding>,
reference: bool,
}
impl TypedGlobalVariable<'_> {
fn try_fmt<W: Write>(&self, out: &mut W) -> BackendResult {
let var = &self.module.global_variables[self.handle];
let name = &self.names[&NameKey::GlobalVariable(self.handle)];
let storage_access = match var.space {
crate::AddressSpace::Storage { access } => access,
_ => match self.module.types[var.ty].inner {
crate::TypeInner::Image {
class: crate::ImageClass::Storage { access, .. },
..
} => access,
crate::TypeInner::BindingArray { base, .. } => {
match self.module.types[base].inner {
crate::TypeInner::Image {
class: crate::ImageClass::Storage { access, .. },
..
} => access,
_ => crate::StorageAccess::default(),
}
}
_ => crate::StorageAccess::default(),
},
};
let ty_name = TypeContext {
handle: var.ty,
gctx: self.module.to_ctx(),
names: self.names,
access: storage_access,
binding: self.binding,
first_time: false,
};
let (space, access, reference) = match var.space.to_msl_name() {
Some(space) if self.reference => {
let access = if var.space.needs_access_qualifier()
&& !self.usage.intersects(valid::GlobalUse::WRITE)
{
"const"
} else {
""
};
(space, access, "&")
}
_ => ("", "", ""),
};
Ok(write!(
out,
"{}{}{}{}{}{} {}",
space,
if space.is_empty() { "" } else { " " },
ty_name,
if access.is_empty() { "" } else { " " },
access,
reference,
name,
)?)
}
}
pub struct Writer<W> {
out: W,
names: FastHashMap<NameKey, String>,
named_expressions: crate::NamedExpressions,
/// Set of expressions that need to be baked to avoid unnecessary repetition in output
need_bake_expressions: back::NeedBakeExpressions,
namer: proc::Namer,
#[cfg(test)]
put_expression_stack_pointers: FastHashSet<*const ()>,
#[cfg(test)]
put_block_stack_pointers: FastHashSet<*const ()>,
/// Set of (struct type, struct field index) denoting which fields require
/// padding inserted **before** them (i.e. between fields at index - 1 and index)
struct_member_pads: FastHashSet<(Handle<crate::Type>, u32)>,
/// Name of the force-bounded-loop macro.
///
/// See `emit_force_bounded_loop_macro` for details.
force_bounded_loop_macro_name: String,
}
impl crate::Scalar {
fn to_msl_name(self) -> &'static str {
use crate::ScalarKind as Sk;
match self {
Self {
kind: Sk::Float,
width: _,
} => "float",
Self {
kind: Sk::Sint,
width: 4,
} => "int",
Self {
kind: Sk::Uint,
width: 4,
} => "uint",
Self {
kind: Sk::Sint,
width: 8,
} => "long",
Self {
kind: Sk::Uint,
width: 8,
} => "ulong",
Self {
kind: Sk::Bool,
width: _,
} => "bool",
Self {
kind: Sk::AbstractInt | Sk::AbstractFloat,
width: _,
} => unreachable!("Found Abstract scalar kind"),
_ => unreachable!("Unsupported scalar kind: {:?}", self),
}
}
}
const fn separate(need_separator: bool) -> &'static str {
if need_separator {
","
} else {
""
}
}
fn should_pack_struct_member(
members: &[crate::StructMember],
span: u32,
index: usize,
module: &crate::Module,
) -> Option<crate::Scalar> {
let member = &members[index];
let ty_inner = &module.types[member.ty].inner;
let last_offset = member.offset + ty_inner.size(module.to_ctx());
let next_offset = match members.get(index + 1) {
Some(next) => next.offset,
None => span,
};
let is_tight = next_offset == last_offset;
match *ty_inner {
crate::TypeInner::Vector {
size: crate::VectorSize::Tri,
scalar: scalar @ crate::Scalar { width: 4, .. },
} if is_tight => Some(scalar),
_ => None,
}
}
fn needs_array_length(ty: Handle<crate::Type>, arena: &crate::UniqueArena<crate::Type>) -> bool {
match arena[ty].inner {
crate::TypeInner::Struct { ref members, .. } => {
if let Some(member) = members.last() {
if let crate::TypeInner::Array {
size: crate::ArraySize::Dynamic,
..
} = arena[member.ty].inner
{
return true;
}
}
false
}
crate::TypeInner::Array {
size: crate::ArraySize::Dynamic,
..
} => true,
_ => false,
}
}
impl crate::AddressSpace {
/// Returns true if global variables in this address space are
/// passed in function arguments. These arguments need to be
/// passed through any functions called from the entry point.
const fn needs_pass_through(&self) -> bool {
match *self {
Self::Uniform
| Self::Storage { .. }
| Self::Private
| Self::WorkGroup
| Self::PushConstant
| Self::Handle => true,
Self::Function => false,
}
}
/// Returns true if the address space may need a "const" qualifier.
const fn needs_access_qualifier(&self) -> bool {
match *self {
//Note: we are ignoring the storage access here, and instead
// rely on the actual use of a global by functions. This means we
// may end up with "const" even if the binding is read-write,
// and that should be OK.
Self::Storage { .. } => true,
// These should always be read-write.
Self::Private | Self::WorkGroup => false,
// These translate to `constant` address space, no need for qualifiers.
Self::Uniform | Self::PushConstant => false,
// Not applicable.
Self::Handle | Self::Function => false,
}
}
const fn to_msl_name(self) -> Option<&'static str> {
match self {
Self::Handle => None,
Self::Uniform | Self::PushConstant => Some("constant"),
Self::Storage { .. } => Some("device"),
Self::Private | Self::Function => Some("thread"),
Self::WorkGroup => Some("threadgroup"),
}
}
}
impl crate::Type {
// Returns `true` if we need to emit an alias for this type.
const fn needs_alias(&self) -> bool {
use crate::TypeInner as Ti;
match self.inner {
// value types are concise enough, we only alias them if they are named
Ti::Scalar(_)
| Ti::Vector { .. }
| Ti::Matrix { .. }
| Ti::Atomic(_)
| Ti::Pointer { .. }
| Ti::ValuePointer { .. } => self.name.is_some(),
// composite types are better to be aliased, regardless of the name
Ti::Struct { .. } | Ti::Array { .. } => true,
// handle types may be different, depending on the global var access, so we always inline them
Ti::Image { .. }
| Ti::Sampler { .. }
| Ti::AccelerationStructure
| Ti::RayQuery
| Ti::BindingArray { .. } => false,
}
}
}
enum FunctionOrigin {
Handle(Handle<crate::Function>),
EntryPoint(proc::EntryPointIndex),
}
/// A level of detail argument.
///
/// When [`BoundsCheckPolicy::Restrict`] applies to an [`ImageLoad`] access, we
/// save the clamped level of detail in a temporary variable whose name is based
/// on the handle of the `ImageLoad` expression. But for other policies, we just
/// use the expression directly.
///
/// [`BoundsCheckPolicy::Restrict`]: index::BoundsCheckPolicy::Restrict
/// [`ImageLoad`]: crate::Expression::ImageLoad
#[derive(Clone, Copy)]
enum LevelOfDetail {
Direct(Handle<crate::Expression>),
Restricted(Handle<crate::Expression>),
}
/// Values needed to select a particular texel for [`ImageLoad`] and [`ImageStore`].
///
/// When this is used in code paths unconcerned with the `Restrict` bounds check
/// policy, the `LevelOfDetail` enum introduces an unneeded match, since `level`
/// will always be either `None` or `Some(Direct(_))`. But this turns out not to
/// be too awkward. If that changes, we can revisit.
///
/// [`ImageLoad`]: crate::Expression::ImageLoad
/// [`ImageStore`]: crate::Statement::ImageStore
struct TexelAddress {
coordinate: Handle<crate::Expression>,
array_index: Option<Handle<crate::Expression>>,
sample: Option<Handle<crate::Expression>>,
level: Option<LevelOfDetail>,
}
struct ExpressionContext<'a> {
function: &'a crate::Function,
origin: FunctionOrigin,
info: &'a valid::FunctionInfo,
module: &'a crate::Module,
mod_info: &'a valid::ModuleInfo,
pipeline_options: &'a PipelineOptions,
lang_version: (u8, u8),
policies: index::BoundsCheckPolicies,
/// The set of expressions used as indices in `ReadZeroSkipWrite`-policy
/// accesses. These may need to be cached in temporary variables. See
/// `index::find_checked_indexes` for details.
guarded_indices: HandleSet<crate::Expression>,
/// See [`Writer::emit_force_bounded_loop_macro`] for details.
force_loop_bounding: bool,
}
impl<'a> ExpressionContext<'a> {
fn resolve_type(&self, handle: Handle<crate::Expression>) -> &'a crate::TypeInner {
self.info[handle].ty.inner_with(&self.module.types)
}
/// Return true if calls to `image`'s `read` and `write` methods should supply a level of detail.
///
/// Only mipmapped images need to specify a level of detail. Since 1D
/// textures cannot have mipmaps, MSL requires that the level argument to
/// texture1d queries and accesses must be a constexpr 0. It's easiest
/// just to omit the level entirely for 1D textures.
fn image_needs_lod(&self, image: Handle<crate::Expression>) -> bool {
let image_ty = self.resolve_type(image);
if let crate::TypeInner::Image { dim, class, .. } = *image_ty {
class.is_mipmapped() && dim != crate::ImageDimension::D1
} else {
false
}
}
fn choose_bounds_check_policy(
&self,
pointer: Handle<crate::Expression>,
) -> index::BoundsCheckPolicy {
self.policies
.choose_policy(pointer, &self.module.types, self.info)
}
fn access_needs_check(
&self,
base: Handle<crate::Expression>,
index: index::GuardedIndex,
) -> Option<index::IndexableLength> {
index::access_needs_check(
base,
index,
self.module,
&self.function.expressions,
self.info,
)
}
fn get_packed_vec_kind(&self, expr_handle: Handle<crate::Expression>) -> Option<crate::Scalar> {
match self.function.expressions[expr_handle] {
crate::Expression::AccessIndex { base, index } => {
let ty = match *self.resolve_type(base) {
crate::TypeInner::Pointer { base, .. } => &self.module.types[base].inner,
ref ty => ty,
};
match *ty {
crate::TypeInner::Struct {
ref members, span, ..
} => should_pack_struct_member(members, span, index as usize, self.module),
_ => None,
}
}
_ => None,
}
}
}
struct StatementContext<'a> {
expression: ExpressionContext<'a>,
result_struct: Option<&'a str>,
}
impl<W: Write> Writer<W> {
/// Creates a new `Writer` instance.
pub fn new(out: W) -> Self {
Writer {
out,
names: FastHashMap::default(),
named_expressions: Default::default(),
need_bake_expressions: Default::default(),
namer: proc::Namer::default(),
#[cfg(test)]
put_expression_stack_pointers: Default::default(),
#[cfg(test)]
put_block_stack_pointers: Default::default(),
struct_member_pads: FastHashSet::default(),
force_bounded_loop_macro_name: String::default(),
}
}
/// Finishes writing and returns the output.
// See https://github.com/rust-lang/rust-clippy/issues/4979.
#[allow(clippy::missing_const_for_fn)]
pub fn finish(self) -> W {
self.out
}
/// Define a macro to invoke at the bottom of each loop body, to
/// defeat MSL infinite loop reasoning.
///
/// If we haven't done so already, emit the definition of a preprocessor
/// macro to be invoked at the end of each loop body in the generated MSL,
/// to ensure that the MSL compiler's optimizations do not remove bounds
/// checks.
///
/// Only the first call to this function for a given module actually causes
/// the macro definition to be written. Subsequent loops can simply use the
/// prior macro definition, since macros aren't block-scoped.
///
/// # What is this trying to solve?
///
/// In Metal Shading Language, an infinite loop has undefined behavior.
/// (This rule is inherited from C++14.) This means that, if the MSL
/// compiler determines that a given loop will never exit, it may assume
/// that it is never reached. It may thus assume that any conditions
/// sufficient to cause the loop to be reached must be false. Like many
/// optimizing compilers, MSL uses this kind of analysis to establish limits
/// on the range of values variables involved in those conditions might
/// hold.
///
/// For example, suppose the MSL compiler sees the code:
///
/// ```ignore
/// if (i >= 10) {
/// while (true) { }
/// }
/// ```
///
/// It will recognize that the `while` loop will never terminate, conclude
/// that it must be unreachable, and thus infer that, if this code is
/// reached, then `i < 10` at that point.
///
/// Now suppose that, at some point where `i` has the same value as above,
/// the compiler sees the code:
///
/// ```ignore
/// if (i < 10) {
/// a[i] = 1;
/// }
/// ```
///
/// Because the compiler is confident that `i < 10`, it will make the
/// assignment to `a[i]` unconditional, rewriting this code as, simply:
///
/// ```ignore
/// a[i] = 1;
/// ```
///
/// If that `if` condition was injected by Naga to implement a bounds check,
/// the MSL compiler's optimizations could allow out-of-bounds array
/// accesses to occur.
///
/// Naga cannot feasibly anticipate whether the MSL compiler will determine
/// that a loop is infinite, so an attacker could craft a Naga module
/// containing an infinite loop protected by conditions that cause the Metal
/// compiler to remove bounds checks that Naga injected elsewhere in the
/// function.
///
/// This rewrite could occur even if the conditional assignment appears
/// *before* the `while` loop, as long as `i < 10` by the time the loop is
/// reached. This would allow the attacker to save the results of
/// unauthorized reads somewhere accessible before entering the infinite
/// loop. But even worse, the MSL compiler has been observed to simply
/// delete the infinite loop entirely, so that even code dominated by the
/// loop becomes reachable. This would make the attack even more flexible,
/// since shaders that would appear to never terminate would actually exit
/// nicely, after having stolen data from elsewhere in the GPU address
/// space.
///
/// To avoid UB, Naga must persuade the MSL compiler that no loop Naga
/// generates is infinite. One approach would be to add inline assembly to
/// each loop that is annotated as potentially branching out of the loop,
/// but which in fact generates no instructions. Unfortunately, inline
/// assembly is not handled correctly by some Metal device drivers.
///
/// Instead, we add the following code to the bottom of every loop:
///
/// ```ignore
/// if (volatile bool unpredictable = false; unpredictable)
/// break;
/// ```
///
/// Although the `if` condition will always be false in any real execution,
/// the `volatile` qualifier prevents the compiler from assuming this. Thus,
/// it must assume that the `break` might be reached, and hence that the
/// loop is not unbounded. This prevents the range analysis impact described
/// above.
///
/// Unfortunately, what makes this a kludge, not a hack, is that this
/// solution leaves the GPU executing a pointless conditional branch, at
/// runtime, in every iteration of the loop. There's no part of the system
/// that has a global enough view to be sure that `unpredictable` is true,
/// and remove it from the code. Adding the branch also affects
/// optimization: for example, it's impossible to unroll this loop. This
/// transformation has been observed to significantly hurt performance.
///
/// To make our output a bit more legible, we pull the condition out into a
/// preprocessor macro defined at the top of the module.
///
/// This approach is also used by Chromium WebGPU's Dawn shader compiler:
/// <https://dawn.googlesource.com/dawn/+/a37557db581c2b60fb1cd2c01abdb232927dd961/src/tint/lang/msl/writer/printer/printer.cc#222>
fn emit_force_bounded_loop_macro(&mut self) -> BackendResult {
if !self.force_bounded_loop_macro_name.is_empty() {
return Ok(());
}
self.force_bounded_loop_macro_name = self.namer.call("LOOP_IS_BOUNDED");
let loop_bounded_volatile_name = self.namer.call("unpredictable_break_from_loop");
writeln!(
self.out,
"#define {} {{ volatile bool {} = false; if ({}) break; }}",
self.force_bounded_loop_macro_name,
loop_bounded_volatile_name,
loop_bounded_volatile_name,
)?;
Ok(())
}
fn put_call_parameters(
&mut self,
parameters: impl Iterator<Item = Handle<crate::Expression>>,
context: &ExpressionContext,
) -> BackendResult {
self.put_call_parameters_impl(parameters, context, |writer, context, expr| {
writer.put_expression(expr, context, true)
})
}
fn put_call_parameters_impl<C, E>(
&mut self,
parameters: impl Iterator<Item = Handle<crate::Expression>>,
ctx: &C,
put_expression: E,
) -> BackendResult
where
E: Fn(&mut Self, &C, Handle<crate::Expression>) -> BackendResult,
{
write!(self.out, "(")?;
for (i, handle) in parameters.enumerate() {
if i != 0 {
write!(self.out, ", ")?;
}
put_expression(self, ctx, handle)?;
}
write!(self.out, ")")?;
Ok(())
}
fn put_level_of_detail(
&mut self,
level: LevelOfDetail,
context: &ExpressionContext,
) -> BackendResult {
match level {
LevelOfDetail::Direct(expr) => self.put_expression(expr, context, true)?,
LevelOfDetail::Restricted(load) => write!(self.out, "{}", ClampedLod(load))?,
}
Ok(())
}
fn put_image_query(
&mut self,
image: Handle<crate::Expression>,
query: &str,
level: Option<LevelOfDetail>,
context: &ExpressionContext,
) -> BackendResult {
self.put_expression(image, context, false)?;
write!(self.out, ".get_{query}(")?;
if let Some(level) = level {
self.put_level_of_detail(level, context)?;
}
write!(self.out, ")")?;
Ok(())
}
fn put_image_size_query(
&mut self,
image: Handle<crate::Expression>,
level: Option<LevelOfDetail>,
kind: crate::ScalarKind,
context: &ExpressionContext,
) -> BackendResult {
//Note: MSL only has separate width/height/depth queries,
// so compose the result of them.
let dim = match *context.resolve_type(image) {
crate::TypeInner::Image { dim, .. } => dim,
ref other => unreachable!("Unexpected type {:?}", other),
};
let scalar = crate::Scalar { kind, width: 4 };
let coordinate_type = scalar.to_msl_name();
match dim {
crate::ImageDimension::D1 => {
// Since 1D textures never have mipmaps, MSL requires that the
// `level` argument be a constexpr 0. It's simplest for us just
// to pass `None` and omit the level entirely.
if kind == crate::ScalarKind::Uint {
// No need to construct a vector. No cast needed.
self.put_image_query(image, "width", None, context)?;
} else {
// There's no definition for `int` in the `metal` namespace.
write!(self.out, "int(")?;
self.put_image_query(image, "width", None, context)?;
write!(self.out, ")")?;
}
}
crate::ImageDimension::D2 => {
write!(self.out, "{NAMESPACE}::{coordinate_type}2(")?;
self.put_image_query(image, "width", level, context)?;
write!(self.out, ", ")?;
self.put_image_query(image, "height", level, context)?;
write!(self.out, ")")?;
}
crate::ImageDimension::D3 => {
write!(self.out, "{NAMESPACE}::{coordinate_type}3(")?;
self.put_image_query(image, "width", level, context)?;
write!(self.out, ", ")?;
self.put_image_query(image, "height", level, context)?;
write!(self.out, ", ")?;
self.put_image_query(image, "depth", level, context)?;
write!(self.out, ")")?;
}
crate::ImageDimension::Cube => {
write!(self.out, "{NAMESPACE}::{coordinate_type}2(")?;
self.put_image_query(image, "width", level, context)?;
write!(self.out, ")")?;
}
}
Ok(())
}
fn put_cast_to_uint_scalar_or_vector(
&mut self,
expr: Handle<crate::Expression>,
context: &ExpressionContext,
) -> BackendResult {
// coordinates in IR are int, but Metal expects uint
match *context.resolve_type(expr) {
crate::TypeInner::Scalar(_) => {
put_numeric_type(&mut self.out, crate::Scalar::U32, &[])?
}
crate::TypeInner::Vector { size, .. } => {
put_numeric_type(&mut self.out, crate::Scalar::U32, &[size])?
}
_ => {
return Err(Error::GenericValidation(
"Invalid type for image coordinate".into(),
))
}
};
write!(self.out, "(")?;
self.put_expression(expr, context, true)?;
write!(self.out, ")")?;
Ok(())
}
fn put_image_sample_level(
&mut self,
image: Handle<crate::Expression>,
level: crate::SampleLevel,
context: &ExpressionContext,
) -> BackendResult {
let has_levels = context.image_needs_lod(image);
match level {
crate::SampleLevel::Auto => {}
crate::SampleLevel::Zero => {
//TODO: do we support Zero on `Sampled` image classes?
}
_ if !has_levels => {
log::warn!("1D image can't be sampled with level {:?}", level);
}
crate::SampleLevel::Exact(h) => {
write!(self.out, ", {NAMESPACE}::level(")?;
self.put_expression(h, context, true)?;
write!(self.out, ")")?;
}
crate::SampleLevel::Bias(h) => {
write!(self.out, ", {NAMESPACE}::bias(")?;
self.put_expression(h, context, true)?;
write!(self.out, ")")?;
}
crate::SampleLevel::Gradient { x, y } => {
write!(self.out, ", {NAMESPACE}::gradient2d(")?;
self.put_expression(x, context, true)?;
write!(self.out, ", ")?;
self.put_expression(y, context, true)?;
write!(self.out, ")")?;
}
}
Ok(())
}
fn put_image_coordinate_limits(
&mut self,
image: Handle<crate::Expression>,
level: Option<LevelOfDetail>,
context: &ExpressionContext,
) -> BackendResult {
self.put_image_size_query(image, level, crate::ScalarKind::Uint, context)?;
write!(self.out, " - 1")?;
Ok(())
}
/// General function for writing restricted image indexes.
///
/// This is used to produce restricted mip levels, array indices, and sample
/// indices for [`ImageLoad`] and [`ImageStore`] accesses under the
/// [`Restrict`] bounds check policy.
///
/// This function writes an expression of the form:
///
/// ```ignore
///
/// metal::min(uint(INDEX), IMAGE.LIMIT_METHOD() - 1)
///
/// ```
///
/// [`ImageLoad`]: crate::Expression::ImageLoad
/// [`ImageStore`]: crate::Statement::ImageStore
/// [`Restrict`]: index::BoundsCheckPolicy::Restrict
fn put_restricted_scalar_image_index(
&mut self,
image: Handle<crate::Expression>,
index: Handle<crate::Expression>,
limit_method: &str,
context: &ExpressionContext,
) -> BackendResult {
write!(self.out, "{NAMESPACE}::min(uint(")?;
self.put_expression(index, context, true)?;
write!(self.out, "), ")?;
self.put_expression(image, context, false)?;
write!(self.out, ".{limit_method}() - 1)")?;
Ok(())
}
fn put_restricted_texel_address(
&mut self,
image: Handle<crate::Expression>,
address: &TexelAddress,
context: &ExpressionContext,
) -> BackendResult {
// Write the coordinate.
write!(self.out, "{NAMESPACE}::min(")?;
self.put_cast_to_uint_scalar_or_vector(address.coordinate, context)?;
write!(self.out, ", ")?;
self.put_image_coordinate_limits(image, address.level, context)?;
write!(self.out, ")")?;
// Write the array index, if present.
if let Some(array_index) = address.array_index {
write!(self.out, ", ")?;
self.put_restricted_scalar_image_index(image, array_index, "get_array_size", context)?;
}
// Write the sample index, if present.
if let Some(sample) = address.sample {
write!(self.out, ", ")?;
self.put_restricted_scalar_image_index(image, sample, "get_num_samples", context)?;
}
// The level of detail should be clamped and cached by
// `put_cache_restricted_level`, so we don't need to clamp it here.
if let Some(level) = address.level {
write!(self.out, ", ")?;
self.put_level_of_detail(level, context)?;
}
Ok(())
}
/// Write an expression that is true if the given image access is in bounds.
fn put_image_access_bounds_check(
&mut self,
image: Handle<crate::Expression>,
address: &TexelAddress,
context: &ExpressionContext,
) -> BackendResult {
let mut conjunction = "";
// First, check the level of detail. Only if that is in bounds can we
// use it to find the appropriate bounds for the coordinates.
let level = if let Some(level) = address.level {
write!(self.out, "uint(")?;
self.put_level_of_detail(level, context)?;
write!(self.out, ") < ")?;
self.put_expression(image, context, true)?;
write!(self.out, ".get_num_mip_levels()")?;
conjunction = " && ";
Some(level)
} else {
None
};
// Check sample index, if present.
if let Some(sample) = address.sample {
write!(self.out, "uint(")?;
self.put_expression(sample, context, true)?;
write!(self.out, ") < ")?;
self.put_expression(image, context, true)?;
write!(self.out, ".get_num_samples()")?;
conjunction = " && ";
}
// Check array index, if present.
if let Some(array_index) = address.array_index {
write!(self.out, "{conjunction}uint(")?;
self.put_expression(array_index, context, true)?;
write!(self.out, ") < ")?;
self.put_expression(image, context, true)?;
write!(self.out, ".get_array_size()")?;
conjunction = " && ";
}
// Finally, check if the coordinates are within bounds.
let coord_is_vector = match *context.resolve_type(address.coordinate) {
crate::TypeInner::Vector { .. } => true,
_ => false,
};
write!(self.out, "{conjunction}")?;
if coord_is_vector {
write!(self.out, "{NAMESPACE}::all(")?;
}
self.put_cast_to_uint_scalar_or_vector(address.coordinate, context)?;
write!(self.out, " < ")?;
self.put_image_size_query(image, level, crate::ScalarKind::Uint, context)?;
if coord_is_vector {
write!(self.out, ")")?;
}
Ok(())
}
fn put_image_load(
&mut self,
load: Handle<crate::Expression>,
image: Handle<crate::Expression>,
mut address: TexelAddress,
context: &ExpressionContext,
) -> BackendResult {
match context.policies.image_load {
proc::BoundsCheckPolicy::Restrict => {
// Use the cached restricted level of detail, if any. Omit the
// level altogether for 1D textures.
if address.level.is_some() {
address.level = if context.image_needs_lod(image) {
Some(LevelOfDetail::Restricted(load))
} else {
None
}
}
self.put_expression(image, context, false)?;
write!(self.out, ".read(")?;
self.put_restricted_texel_address(image, &address, context)?;
write!(self.out, ")")?;
}
proc::BoundsCheckPolicy::ReadZeroSkipWrite => {
write!(self.out, "(")?;
self.put_image_access_bounds_check(image, &address, context)?;
write!(self.out, " ? ")?;
self.put_unchecked_image_load(image, &address, context)?;
write!(self.out, ": DefaultConstructible())")?;
}
proc::BoundsCheckPolicy::Unchecked => {
self.put_unchecked_image_load(image, &address, context)?;
}
}
Ok(())
}
fn put_unchecked_image_load(
&mut self,
image: Handle<crate::Expression>,
address: &TexelAddress,
context: &ExpressionContext,
) -> BackendResult {
self.put_expression(image, context, false)?;
write!(self.out, ".read(")?;
// coordinates in IR are int, but Metal expects uint
self.put_cast_to_uint_scalar_or_vector(address.coordinate, context)?;
if let Some(expr) = address.array_index {
write!(self.out, ", ")?;
self.put_expression(expr, context, true)?;
}
if let Some(sample) = address.sample {
write!(self.out, ", ")?;
self.put_expression(sample, context, true)?;
}
if let Some(level) = address.level {
if context.image_needs_lod(image) {
write!(self.out, ", ")?;
self.put_level_of_detail(level, context)?;
}
}
write!(self.out, ")")?;
Ok(())
}
fn put_image_atomic(
&mut self,
level: back::Level,
image: Handle<crate::Expression>,
address: &TexelAddress,
fun: crate::AtomicFunction,
value: Handle<crate::Expression>,
context: &StatementContext,
) -> BackendResult {
write!(self.out, "{level}")?;
self.put_expression(image, &context.expression, false)?;
let op = fun.to_msl();
write!(self.out, ".atomic_{}(", op)?;
// coordinates in IR are int, but Metal expects uint
self.put_cast_to_uint_scalar_or_vector(address.coordinate, &context.expression)?;
write!(self.out, ", ")?;
self.put_expression(value, &context.expression, true)?;
writeln!(self.out, ");")?;
Ok(())
}
fn put_image_store(
&mut self,
level: back::Level,
image: Handle<crate::Expression>,
address: &TexelAddress,
value: Handle<crate::Expression>,
context: &StatementContext,
) -> BackendResult {
write!(self.out, "{level}")?;
self.put_expression(image, &context.expression, false)?;
write!(self.out, ".write(")?;
self.put_expression(value, &context.expression, true)?;
write!(self.out, ", ")?;
// coordinates in IR are int, but Metal expects uint
self.put_cast_to_uint_scalar_or_vector(address.coordinate, &context.expression)?;
if let Some(expr) = address.array_index {
write!(self.out, ", ")?;
self.put_expression(expr, &context.expression, true)?;
}
writeln!(self.out, ");")?;
Ok(())
}
/// Write the maximum valid index of the dynamically sized array at the end of `handle`.
///
/// The 'maximum valid index' is simply one less than the array's length.
///
/// This emits an expression of the form `a / b`, so the caller must
/// parenthesize its output if it will be applying operators of higher
/// precedence.
///
/// `handle` must be the handle of a global variable whose final member is a
/// dynamically sized array.
fn put_dynamic_array_max_index(
&mut self,
handle: Handle<crate::GlobalVariable>,
context: &ExpressionContext,
) -> BackendResult {
let global = &context.module.global_variables[handle];
let (offset, array_ty) = match context.module.types[global.ty].inner {
crate::TypeInner::Struct { ref members, .. } => match members.last() {
Some(&crate::StructMember { offset, ty, .. }) => (offset, ty),
None => return Err(Error::GenericValidation("Struct has no members".into())),
},
crate::TypeInner::Array {
size: crate::ArraySize::Dynamic,
..
} => (0, global.ty),
ref ty => {
return Err(Error::GenericValidation(format!(
"Expected type with dynamic array, got {ty:?}"
)))
}
};
let (size, stride) = match context.module.types[array_ty].inner {
crate::TypeInner::Array { base, stride, .. } => (
context.module.types[base]
.inner
.size(context.module.to_ctx()),
stride,
),
ref ty => {
return Err(Error::GenericValidation(format!(
"Expected array type, got {ty:?}"
)))
}
};
// When the stride length is larger than the size, the final element's stride of
// bytes would have padding following the value. But the buffer size in
// `buffer_sizes.sizeN` may not include this padding - it only needs to be large
// enough to hold the actual values' bytes.
//
// So subtract off the size to get a byte size that falls at the start or within
// the final element. Then divide by the stride size, to get one less than the
// length, and then add one. This works even if the buffer size does include the
// stride padding, since division rounds towards zero (MSL 2.4 §6.1). It will fail
// if there are zero elements in the array, but the WebGPU `validating shader binding`
// rules, together with draw-time validation when `minBindingSize` is zero,
// prevent that.
write!(
self.out,
"(_buffer_sizes.{member} - {offset} - {size}) / {stride}",
member = ArraySizeMember(handle),
offset = offset,
size = size,
stride = stride,
)?;
Ok(())
}
/// Emit code for the arithmetic expression of the dot product.
///
fn put_dot_product(
&mut self,
arg: Handle<crate::Expression>,
arg1: Handle<crate::Expression>,
size: usize,
context: &ExpressionContext,
) -> BackendResult {
// Write parentheses around the dot product expression to prevent operators
// with different precedences from applying earlier.
write!(self.out, "(")?;
// Cycle through all the components of the vector
for index in 0..size {
let component = back::COMPONENTS[index];
// Write the addition to the previous product
// This will print an extra '+' at the beginning but that is fine in msl
write!(self.out, " + ")?;
// Write the first vector expression, this expression is marked to be
// cached so unless it can't be cached (for example, it's a Constant)
// it shouldn't produce large expressions.
self.put_expression(arg, context, true)?;
// Access the current component on the first vector
write!(self.out, ".{component} * ")?;
// Write the second vector expression, this expression is marked to be
// cached so unless it can't be cached (for example, it's a Constant)
// it shouldn't produce large expressions.
self.put_expression(arg1, context, true)?;
// Access the current component on the second vector
write!(self.out, ".{component}")?;
}
write!(self.out, ")")?;
Ok(())
}
/// Emit code for the sign(i32) expression.
///
fn put_isign(
&mut self,
arg: Handle<crate::Expression>,
context: &ExpressionContext,
) -> BackendResult {
write!(self.out, "{NAMESPACE}::select({NAMESPACE}::select(")?;
match context.resolve_type(arg) {
&crate::TypeInner::Vector { size, .. } => {
let size = back::vector_size_str(size);
write!(self.out, "int{size}(-1), int{size}(1)")?;
}
_ => {
write!(self.out, "-1, 1")?;
}
}
write!(self.out, ", (")?;
self.put_expression(arg, context, true)?;
write!(self.out, " > 0)), 0, (")?;
self.put_expression(arg, context, true)?;
write!(self.out, " == 0))")?;
Ok(())
}
fn put_const_expression(
&mut self,
expr_handle: Handle<crate::Expression>,
module: &crate::Module,
mod_info: &valid::ModuleInfo,
) -> BackendResult {
self.put_possibly_const_expression(
expr_handle,
&module.global_expressions,
module,
mod_info,
&(module, mod_info),
|&(_, mod_info), expr| &mod_info[expr],
|writer, &(module, _), expr| writer.put_const_expression(expr, module, mod_info),
)
}
#[allow(clippy::too_many_arguments)]
fn put_possibly_const_expression<C, I, E>(
&mut self,
expr_handle: Handle<crate::Expression>,
expressions: &crate::Arena<crate::Expression>,
module: &crate::Module,
mod_info: &valid::ModuleInfo,
ctx: &C,
get_expr_ty: I,
put_expression: E,
) -> BackendResult
where
I: Fn(&C, Handle<crate::Expression>) -> &TypeResolution,
E: Fn(&mut Self, &C, Handle<crate::Expression>) -> BackendResult,
{
match expressions[expr_handle] {
crate::Expression::Literal(literal) => match literal {
crate::Literal::F64(_) => {
return Err(Error::CapabilityNotSupported(valid::Capabilities::FLOAT64))
}
crate::Literal::F32(value) => {
if value.is_infinite() {
let sign = if value.is_sign_negative() { "-" } else { "" };
write!(self.out, "{sign}INFINITY")?;
} else if value.is_nan() {
write!(self.out, "NAN")?;
} else {
let suffix = if value.fract() == 0.0 { ".0" } else { "" };
write!(self.out, "{value}{suffix}")?;
}
}
crate::Literal::U32(value) => {
write!(self.out, "{value}u")?;
}
crate::Literal::I32(value) => {
write!(self.out, "{value}")?;
}
crate::Literal::U64(value) => {
write!(self.out, "{value}uL")?;
}
crate::Literal::I64(value) => {
write!(self.out, "{value}L")?;
}
crate::Literal::Bool(value) => {
write!(self.out, "{value}")?;
}
crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
return Err(Error::GenericValidation(
"Unsupported abstract literal".into(),
));
}
},
crate::Expression::Constant(handle) => {
let constant = &module.constants[handle];
if constant.name.is_some() {
write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
} else {
self.put_const_expression(constant.init, module, mod_info)?;
}
}
crate::Expression::ZeroValue(ty) => {
let ty_name = TypeContext {
handle: ty,
gctx: module.to_ctx(),
names: &self.names,
access: crate::StorageAccess::empty(),
binding: None,
first_time: false,
};
write!(self.out, "{ty_name} {{}}")?;
}
crate::Expression::Compose { ty, ref components } => {
let ty_name = TypeContext {
handle: ty,
gctx: module.to_ctx(),
names: &self.names,
access: crate::StorageAccess::empty(),
binding: None,
first_time: false,
};
write!(self.out, "{ty_name}")?;
match module.types[ty].inner {
crate::TypeInner::Scalar(_)
| crate::TypeInner::Vector { .. }
| crate::TypeInner::Matrix { .. } => {
self.put_call_parameters_impl(
components.iter().copied(),
ctx,
put_expression,
)?;
}
crate::TypeInner::Array { .. } | crate::TypeInner::Struct { .. } => {
write!(self.out, " {{")?;
for (index, &component) in components.iter().enumerate() {
if index != 0 {
write!(self.out, ", ")?;
}
// insert padding initialization, if needed
if self.struct_member_pads.contains(&(ty, index as u32)) {
write!(self.out, "{{}}, ")?;
}
put_expression(self, ctx, component)?;
}
write!(self.out, "}}")?;
}
_ => return Err(Error::UnsupportedCompose(ty)),
}
}
crate::Expression::Splat { size, value } => {
let scalar = match *get_expr_ty(ctx, value).inner_with(&module.types) {
crate::TypeInner::Scalar(scalar) => scalar,
ref ty => {
return Err(Error::GenericValidation(format!(
"Expected splat value type must be a scalar, got {ty:?}",
)))
}
};
put_numeric_type(&mut self.out, scalar, &[size])?;
write!(self.out, "(")?;
put_expression(self, ctx, value)?;
write!(self.out, ")")?;
}
_ => unreachable!(),
}
Ok(())
}
/// Emit code for the expression `expr_handle`.
///
/// The `is_scoped` argument is true if the surrounding operators have the
/// precedence of the comma operator, or lower. So, for example:
///
/// - Pass `true` for `is_scoped` when writing function arguments, an
/// expression statement, an initializer expression, or anything already
/// wrapped in parenthesis.
///
/// - Pass `false` if it is an operand of a `?:` operator, a `[]`, or really
/// almost anything else.
fn put_expression(
&mut self,
expr_handle: Handle<crate::Expression>,
context: &ExpressionContext,
is_scoped: bool,
) -> BackendResult {
// Add to the set in order to track the stack size.
#[cfg(test)]
self.put_expression_stack_pointers
.insert(ptr::from_ref(&expr_handle).cast());
if let Some(name) = self.named_expressions.get(&expr_handle) {
write!(self.out, "{name}")?;
return Ok(());
}
let expression = &context.function.expressions[expr_handle];
log::trace!("expression {:?} = {:?}", expr_handle, expression);
match *expression {
crate::Expression::Literal(_)
| crate::Expression::Constant(_)
| crate::Expression::ZeroValue(_)
| crate::Expression::Compose { .. }
| crate::Expression::Splat { .. } => {
self.put_possibly_const_expression(
expr_handle,
&context.function.expressions,
context.module,
context.mod_info,
context,
|context, expr: Handle<crate::Expression>| &context.info[expr].ty,
|writer, context, expr| writer.put_expression(expr, context, true),
)?;
}
crate::Expression::Override(_) => return Err(Error::Override),
crate::Expression::Access { base, .. }
| crate::Expression::AccessIndex { base, .. } => {
// This is an acceptable place to generate a `ReadZeroSkipWrite` check.
// Since `put_bounds_checks` and `put_access_chain` handle an entire
// access chain at a time, recursing back through `put_expression` only
// for index expressions and the base object, we will never see intermediate
// `Access` or `AccessIndex` expressions here.
let policy = context.choose_bounds_check_policy(base);
if policy == index::BoundsCheckPolicy::ReadZeroSkipWrite
&& self.put_bounds_checks(
expr_handle,
context,
back::Level(0),
if is_scoped { "" } else { "(" },
)?
{
write!(self.out, " ? ")?;
self.put_access_chain(expr_handle, policy, context)?;
write!(self.out, " : DefaultConstructible()")?;
if !is_scoped {
write!(self.out, ")")?;
}
} else {
self.put_access_chain(expr_handle, policy, context)?;
}
}
crate::Expression::Swizzle {
size,
vector,
pattern,
} => {
self.put_wrapped_expression_for_packed_vec3_access(vector, context, false)?;
write!(self.out, ".")?;
for &sc in pattern[..size as usize].iter() {
write!(self.out, "{}", back::COMPONENTS[sc as usize])?;
}
}
crate::Expression::FunctionArgument(index) => {
let name_key = match context.origin {
FunctionOrigin::Handle(handle) => NameKey::FunctionArgument(handle, index),
FunctionOrigin::EntryPoint(ep_index) => {
NameKey::EntryPointArgument(ep_index, index)
}
};
let name = &self.names[&name_key];
write!(self.out, "{name}")?;
}
crate::Expression::GlobalVariable(handle) => {
let name = &self.names[&NameKey::GlobalVariable(handle)];
write!(self.out, "{name}")?;
}
crate::Expression::LocalVariable(handle) => {
let name_key = match context.origin {
FunctionOrigin::Handle(fun_handle) => {
NameKey::FunctionLocal(fun_handle, handle)
}
FunctionOrigin::EntryPoint(ep_index) => {
NameKey::EntryPointLocal(ep_index, handle)
}
};
let name = &self.names[&name_key];
write!(self.out, "{name}")?;
}
crate::Expression::Load { pointer } => self.put_load(pointer, context, is_scoped)?,
crate::Expression::ImageSample {
image,
sampler,
gather,
coordinate,
array_index,
offset,
level,
depth_ref,
} => {
let main_op = match gather {
Some(_) => "gather",
None => "sample",
};
let comparison_op = match depth_ref {
Some(_) => "_compare",
None => "",
};
self.put_expression(image, context, false)?;
write!(self.out, ".{main_op}{comparison_op}(")?;
self.put_expression(sampler, context, true)?;
write!(self.out, ", ")?;
self.put_expression(coordinate, context, true)?;
if let Some(expr) = array_index {
write!(self.out, ", ")?;
self.put_expression(expr, context, true)?;
}
if let Some(dref) = depth_ref {
write!(self.out, ", ")?;
self.put_expression(dref, context, true)?;
}
self.put_image_sample_level(image, level, context)?;
if let Some(offset) = offset {
write!(self.out, ", ")?;
self.put_const_expression(offset, context.module, context.mod_info)?;
}
match gather {
None | Some(crate::SwizzleComponent::X) => {}
Some(component) => {
let is_cube_map = match *context.resolve_type(image) {
crate::TypeInner::Image {
dim: crate::ImageDimension::Cube,
..
} => true,
_ => false,
};
// Offset always comes before the gather, except
// in cube maps where it's not applicable
if offset.is_none() && !is_cube_map {
write!(self.out, ", {NAMESPACE}::int2(0)")?;
}
let letter = back::COMPONENTS[component as usize];
write!(self.out, ", {NAMESPACE}::component::{letter}")?;
}
}
write!(self.out, ")")?;
}
crate::Expression::ImageLoad {
image,
coordinate,
array_index,
--> --------------------
--> maximum size reached
--> --------------------
[ 0.90Quellennavigators
]
|
2026-04-04
|