Quelle writer.rs
Sprache: unbekannt
|
|
Spracherkennung für: .rs vermutete Sprache: Unknown {[0] [0] [0]} [Methode: Schwerpunktbildung, einfache Gewichte, sechs Dimensionen]
use super::{
help::{
WrappedArrayLength, WrappedConstructor, WrappedImageQuery, WrappedStructMatrixAcces s,
WrappedZeroValue,
},
storage::StoreValue,
BackendResult, Error, FragmentEntryPoint, Options,
};
use crate::{
back::{self, Baked},
proc::{self, index, ExpressionKindTracker, NameKey},
valid, Handle, Module, Scalar, ScalarKind, ShaderStage, TypeInner,
};
use std::{fmt, mem};
const LOCATION_SEMANTIC: &str = "LOC";
const SPECIAL_CBUF_TYPE: &str = "NagaConstants";
const SPECIAL_CBUF_VAR: &str = "_NagaConstants";
const SPECIAL_FIRST_VERTEX: &str = "first_vertex";
const SPECIAL_FIRST_INSTANCE: &str = "first_instance";
const SPECIAL_OTHER: &str = "other";
pub(crate) const MODF_FUNCTION: &str = "naga_modf";
pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
pub(crate) const EXTRACT_BITS_FUNCTION: &str = "naga_extractBits";
pub(crate) const INSERT_BITS_FUNCTION: &str = "naga_insertBits";
struct EpStructMember {
name: String,
ty: Handle<crate::Type>,
// technically, this should always be `Some`
// (we `debug_assert!` this in `write_interface_struct`)
binding: Option<crate::Binding>,
index: u32,
}
/// Structure contains information required for generating
/// wrapped structure of all entry points arguments
struct EntryPointBinding {
/// Name of the fake EP argument that contains the struct
/// with all the flattened input data.
arg_name: String,
/// Generated structure name
ty_name: String,
/// Members of generated structure
members: Vec<EpStructMember>,
}
pub(super) struct EntryPointInterface {
/// If `Some`, the input of an entry point is gathered in a special
/// struct with members sorted by binding.
/// The `EntryPointBinding::members` array is sorted by index,
/// so that we can walk it in `write_ep_arguments_initialization`.
input: Option<EntryPointBinding>,
/// If `Some`, the output of an entry point is flattened.
/// The `EntryPointBinding::members` array is sorted by binding,
/// So that we can walk it in `Statement::Return` handler.
output: Option<EntryPointBinding>,
}
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord)]
enum InterfaceKey {
Location(u32),
BuiltIn(crate::BuiltIn),
Other,
}
impl InterfaceKey {
const fn new(binding: Option<&crate::Binding>) -> Self {
match binding {
Some(&crate::Binding::Location { location, .. }) => Self::Location(location),
Some(&crate::Binding::BuiltIn(built_in)) => Self::BuiltIn(built_in),
None => Self::Other,
}
}
}
#[derive(Copy, Clone, PartialEq)]
enum Io {
Input,
Output,
}
const fn is_subgroup_builtin_binding(binding: &Option<crate::Binding>) -> bool {
let &Some(crate::Binding::BuiltIn(builtin)) = binding else {
return false;
};
matches!(
builtin,
crate::BuiltIn::SubgroupSize
| crate::BuiltIn::SubgroupInvocationId
| crate::BuiltIn::NumSubgroups
| crate::BuiltIn::SubgroupId
)
}
impl<'a, W: fmt::Write> super::Writer<'a, W> {
pub fn new(out: W, options: &'a Options) -> Self {
Self {
out,
names: crate::FastHashMap::default(),
namer: proc::Namer::default(),
options,
entry_point_io: Vec::new(),
named_expressions: crate::NamedExpressions::default(),
wrapped: super::Wrapped::default(),
continue_ctx: back::continue_forward::ContinueCtx::default(),
temp_access_chain: Vec::new(),
need_bake_expressions: Default::default(),
}
}
fn reset(&mut self, module: &Module) {
self.names.clear();
self.namer.reset(
module,
super::keywords::RESERVED,
super::keywords::TYPES,
super::keywords::RESERVED_CASE_INSENSITIVE,
&[],
&mut self.names,
);
self.entry_point_io.clear();
self.named_expressions.clear();
self.wrapped.clear();
self.continue_ctx.clear();
self.need_bake_expressions.clear();
}
/// Helper method used to find which expressions of a given function require baking
///
/// # Notes
/// Clears `need_bake_expressions` set before adding to it
fn update_expressions_to_bake(
&mut self,
module: &Module,
func: &crate::Function,
info: &valid::FunctionInfo,
) {
use crate::Expression;
self.need_bake_expressions.clear();
for (fun_handle, expr) in func.expressions.iter() {
let expr_info = &info[fun_handle];
let min_ref_count = func.expressions[fun_handle].bake_ref_count();
if min_ref_count <= expr_info.ref_count {
self.need_bake_expressions.insert(fun_handle);
}
if let Expression::Math { fun, arg, .. } = *expr {
match fun {
crate::MathFunction::Asinh
| crate::MathFunction::Acosh
| crate::MathFunction::Atanh
| crate::MathFunction::Unpack2x16float
| crate::MathFunction::Unpack2x16snorm
| crate::MathFunction::Unpack2x16unorm
| crate::MathFunction::Unpack4x8snorm
| crate::MathFunction::Unpack4x8unorm
| crate::MathFunction::Unpack4xI8
| crate::MathFunction::Unpack4xU8
| crate::MathFunction::Pack2x16float
| crate::MathFunction::Pack2x16snorm
| crate::MathFunction::Pack2x16unorm
| crate::MathFunction::Pack4x8snorm
| crate::MathFunction::Pack4x8unorm
| crate::MathFunction::Pack4xI8
| crate::MathFunction::Pack4xU8 => {
self.need_bake_expressions.insert(arg);
}
crate::MathFunction::CountLeadingZeros => {
let inner = info[fun_handle].ty.inner_with(&module.types);
if let Some(ScalarKind::Sint) = inner.scalar_kind() {
self.need_bake_expressions.insert(arg);
}
}
_ => {}
}
}
if let Expression::Derivative { axis, ctrl, expr } = *expr {
use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
if axis == Axis::Width && (ctrl == Ctrl::Coarse || ctrl == Ctrl::Fine) {
self.need_bake_expressions.insert(expr);
}
}
}
for statement in func.body.iter() {
match *statement {
crate::Statement::SubgroupCollectiveOperation {
op: _,
collective_op: crate::CollectiveOperation::InclusiveScan,
argument,
result: _,
} => {
self.need_bake_expressions.insert(argument);
}
_ => {}
}
}
}
pub fn write(
&mut self,
module: &Module,
module_info: &valid::ModuleInfo,
fragment_entry_point: Option<&FragmentEntryPoint<'_>>,
) -> Result<super::ReflectionInfo, Error> {
if !module.overrides.is_empty() {
return Err(Error::Override);
}
self.reset(module);
// Write special constants, if needed
if let Some(ref bt) = self.options.special_constants_binding {
writeln!(self.out, "struct {SPECIAL_CBUF_TYPE} {{")?;
writeln!(self.out, "{}int {};", back::INDENT, SPECIAL_FIRST_VERTEX)?;
writeln!(self.out, "{}int {};", back::INDENT, SPECIAL_FIRST_INSTANCE)?;
writeln!(self.out, "{}uint {};", back::INDENT, SPECIAL_OTHER)?;
writeln!(self.out, "}};")?;
write!(
self.out,
"ConstantBuffer<{}> {}: register(b{}",
SPECIAL_CBUF_TYPE, SPECIAL_CBUF_VAR, bt.register
)?;
if bt.space != 0 {
write!(self.out, ", space{}", bt.space)?;
}
writeln!(self.out, ");")?;
// Extra newline for readability
writeln!(self.out)?;
}
// Save all entry point output types
let ep_results = module
.entry_points
.iter()
.map(|ep| (ep.stage, ep.function.result.clone()))
.collect::<Vec<(ShaderStage, Option<crate::FunctionResult>)>>();
self.write_all_mat_cx2_typedefs_and_functions(module)?;
// Write all structs
for (handle, ty) in module.types.iter() {
if let TypeInner::Struct { ref members, span } = ty.inner {
if module.types[members.last().unwrap().ty]
.inner
.is_dynamically_sized(&module.types)
{
// unsized arrays can only be in storage buffers,
// for which we use `ByteAddressBuffer` anyway.
continue;
}
let ep_result = ep_results.iter().find(|e| {
if let Some(ref result) = e.1 {
result.ty == handle
} else {
false
}
});
self.write_struct(
module,
handle,
members,
span,
ep_result.map(|r| (r.0, Io::Output)),
)?;
writeln!(self.out)?;
}
}
self.write_special_functions(module)?;
self.write_wrapped_compose_functions(module, &module.global_expressions)?;
self.write_wrapped_zero_value_functions(module, &module.global_expressions)?;
// Write all named constants
let mut constants = module
.constants
.iter()
.filter(|&(_, c)| c.name.is_some())
.peekable();
while let Some((handle, _)) = constants.next() {
self.write_global_constant(module, handle)?;
// Add extra newline for readability on last iteration
if constants.peek().is_none() {
writeln!(self.out)?;
}
}
// Write all globals
for (ty, _) in module.global_variables.iter() {
self.write_global(module, ty)?;
}
if !module.global_variables.is_empty() {
// Add extra newline for readability
writeln!(self.out)?;
}
// Write all entry points wrapped structs
for (index, ep) in module.entry_points.iter().enumerate() {
let ep_name = self.names[&NameKey::EntryPoint(index as u16)].clone();
let ep_io = self.write_ep_interface(
module,
&ep.function,
ep.stage,
&ep_name,
fragment_entry_point,
)?;
self.entry_point_io.push(ep_io);
}
// Write all regular functions
for (handle, function) in module.functions.iter() {
let info = &module_info[handle];
// Check if all of the globals are accessible
if !self.options.fake_missing_bindings {
if let Some((var_handle, _)) =
module
.global_variables
.iter()
.find(|&(var_handle, var)| match var.binding {
Some(ref binding) if !info[var_handle].is_empty() => {
self.options.resolve_resource_binding(binding).is_err()
}
_ => false,
})
{
log::info!(
"Skipping function {:?} (name {:?}) because global {:?} is inaccessible",
handle,
function.name,
var_handle
);
continue;
}
}
let ctx = back::FunctionCtx {
ty: back::FunctionType::Function(handle),
info,
expressions: &function.expressions,
named_expressions: &function.named_expressions,
expr_kind_tracker: ExpressionKindTracker::from_arena(&function.expressions),
};
let name = self.names[&NameKey::Function(handle)].clone();
self.write_wrapped_functions(module, &ctx)?;
self.write_function(module, name.as_str(), function, &ctx, info)?;
writeln!(self.out)?;
}
let mut entry_point_names = Vec::with_capacity(module.entry_points.len());
// Write all entry points
for (index, ep) in module.entry_points.iter().enumerate() {
let info = module_info.get_entry_point(index);
if !self.options.fake_missing_bindings {
let mut ep_error = None;
for (var_handle, var) in module.global_variables.iter() {
match var.binding {
Some(ref binding) if !info[var_handle].is_empty() => {
if let Err(err) = self.options.resolve_resource_binding(binding) {
ep_error = Some(err);
break;
}
}
_ => {}
}
}
if let Some(err) = ep_error {
entry_point_names.push(Err(err));
continue;
}
}
let ctx = back::FunctionCtx {
ty: back::FunctionType::EntryPoint(index as u16),
info,
expressions: &ep.function.expressions,
named_expressions: &ep.function.named_expressions,
expr_kind_tracker: ExpressionKindTracker::from_arena(&ep.function.expressions),
};
self.write_wrapped_functions(module, &ctx)?;
if ep.stage == ShaderStage::Compute {
// HLSL is calling workgroup size "num threads"
let num_threads = ep.workgroup_size;
writeln!(
self.out,
"[numthreads({}, {}, {})]",
num_threads[0], num_threads[1], num_threads[2]
)?;
}
let name = self.names[&NameKey::EntryPoint(index as u16)].clone();
self.write_function(module, &name, &ep.function, &ctx, info)?;
if index < module.entry_points.len() - 1 {
writeln!(self.out)?;
}
entry_point_names.push(Ok(name));
}
Ok(super::ReflectionInfo { entry_point_names })
}
fn write_modifier(&mut self, binding: &crate::Binding) -> BackendResult {
match *binding {
crate::Binding::BuiltIn(crate::BuiltIn::Position { invariant: true }) => {
write!(self.out, "precise ")?;
}
crate::Binding::Location {
interpolation,
sampling,
..
} => {
if let Some(interpolation) = interpolation {
if let Some(string) = interpolation.to_hlsl_str() {
write!(self.out, "{string} ")?
}
}
if let Some(sampling) = sampling {
if let Some(string) = sampling.to_hlsl_str() {
write!(self.out, "{string} ")?
}
}
}
crate::Binding::BuiltIn(_) => {}
}
Ok(())
}
//TODO: we could force fragment outputs to always go through `entry_point_io.output` path
// if they are struct, so that the `stage` argument here could be omitted.
fn write_semantic(
&mut self,
binding: &Option<crate::Binding>,
stage: Option<(ShaderStage, Io)>,
) -> BackendResult {
match *binding {
Some(crate::Binding::BuiltIn(builtin)) if !is_subgroup_builtin_binding(binding) => {
let builtin_str = builtin.to_hlsl_str()?;
write!(self.out, " : {builtin_str}")?;
}
Some(crate::Binding::Location {
second_blend_source: true,
..
}) => {
write!(self.out, " : SV_Target1")?;
}
Some(crate::Binding::Location {
location,
second_blend_source: false,
..
}) => {
if stage == Some((ShaderStage::Fragment, Io::Output)) {
write!(self.out, " : SV_Target{location}")?;
} else {
write!(self.out, " : {LOCATION_SEMANTIC}{location}")?;
}
}
_ => {}
}
Ok(())
}
fn write_interface_struct(
&mut self,
module: &Module,
shader_stage: (ShaderStage, Io),
struct_name: String,
mut members: Vec<EpStructMember>,
) -> Result<EntryPointBinding, Error> {
// Sort the members so that first come the user-defined varyings
// in ascending locations, and then built-ins. This allows VS and FS
// interfaces to match with regards to order.
members.sort_by_key(|m| InterfaceKey::new(m.binding.as_ref()));
write!(self.out, "struct {struct_name}")?;
writeln!(self.out, " {{")?;
for m in members.iter() {
// Sanity check that each IO member is a built-in or is assigned a
// location. Also see note about nesting in `write_ep_input_struct`.
debug_assert!(m.binding.is_some());
if is_subgroup_builtin_binding(&m.binding) {
continue;
}
write!(self.out, "{}", back::INDENT)?;
if let Some(ref binding) = m.binding {
self.write_modifier(binding)?;
}
self.write_type(module, m.ty)?;
write!(self.out, " {}", &m.name)?;
self.write_semantic(&m.binding, Some(shader_stage))?;
writeln!(self.out, ";")?;
}
if members.iter().any(|arg| {
matches!(
arg.binding,
Some(crate::Binding::BuiltIn(crate::BuiltIn::SubgroupId))
)
}) {
writeln!(
self.out,
"{}uint __local_invocation_index : SV_GroupIndex;",
back::INDENT
)?;
}
writeln!(self.out, "}};")?;
writeln!(self.out)?;
// See ordering notes on EntryPointInterface fields
match shader_stage.1 {
Io::Input => {
// bring back the original order
members.sort_by_key(|m| m.index);
}
Io::Output => {
// keep it sorted by binding
}
}
Ok(EntryPointBinding {
arg_name: self.namer.call(struct_name.to_lowercase().as_str()),
ty_name: struct_name,
members,
})
}
/// Flatten all entry point arguments into a single struct.
/// This is needed since we need to re-order them: first placing user locations,
/// then built-ins.
fn write_ep_input_struct(
&mut self,
module: &Module,
func: &crate::Function,
stage: ShaderStage,
entry_point_name: &str,
) -> Result<EntryPointBinding, Error> {
let struct_name = format!("{stage:?}Input_{entry_point_name}");
let mut fake_members = Vec::new();
for arg in func.arguments.iter() {
// NOTE: We don't need to handle nesting structs. All members must
// be either built-ins or assigned a location. I.E. `binding` is
// `Some`. This is checked in `VaryingContext::validate`. See:
// https://gpuweb.github.io/gpuweb/wgsl/#input-output-locations
match module.types[arg.ty].inner {
TypeInner::Struct { ref members, .. } => {
for member in members.iter() {
let name = self.namer.call_or(&member.name, "member");
let index = fake_members.len() as u32;
fake_members.push(EpStructMember {
name,
ty: member.ty,
binding: member.binding.clone(),
index,
});
}
}
_ => {
let member_name = self.namer.call_or(&arg.name, "member");
let index = fake_members.len() as u32;
fake_members.push(EpStructMember {
name: member_name,
ty: arg.ty,
binding: arg.binding.clone(),
index,
});
}
}
}
self.write_interface_struct(module, (stage, Io::Input), struct_name, fake_members)
}
/// Flatten all entry point results into a single struct.
/// This is needed since we need to re-order them: first placing user locations,
/// then built-ins.
fn write_ep_output_struct(
&mut self,
module: &Module,
result: &crate::FunctionResult,
stage: ShaderStage,
entry_point_name: &str,
frag_ep: Option<&FragmentEntryPoint<'_>>,
) -> Result<EntryPointBinding, Error> {
let struct_name = format!("{stage:?}Output_{entry_point_name}");
let empty = [];
let members = match module.types[result.ty].inner {
TypeInner::Struct { ref members, .. } => members,
ref other => {
log::error!("Unexpected {:?} output type without a binding", other);
&empty[..]
}
};
// Gather list of fragment input locations. We use this below to remove user-defined
// varyings from VS outputs that aren't in the FS inputs. This makes the VS interface match
// as long as the FS inputs are a subset of the VS outputs. This is only applied if the
// writer is supplied with information about the fragment entry point.
let fs_input_locs = if let (Some(frag_ep), ShaderStage::Vertex) = (frag_ep, stage) {
let mut fs_input_locs = Vec::new();
for arg in frag_ep.func.arguments.iter() {
let mut push_if_location = |binding: &Option<crate::Binding>| match *binding {
Some(crate::Binding::Location { location, .. }) => fs_input_locs.push(location),
Some(crate::Binding::BuiltIn(_)) | None => {}
};
// NOTE: We don't need to handle struct nesting. See note in
// `write_ep_input_struct`.
match frag_ep.module.types[arg.ty].inner {
TypeInner::Struct { ref members, .. } => {
for member in members.iter() {
push_if_location(&member.binding);
}
}
_ => push_if_location(&arg.binding),
}
}
fs_input_locs.sort();
Some(fs_input_locs)
} else {
None
};
let mut fake_members = Vec::new();
for (index, member) in members.iter().enumerate() {
if let Some(ref fs_input_locs) = fs_input_locs {
match member.binding {
Some(crate::Binding::Location { location, .. }) => {
if fs_input_locs.binary_search(&location).is_err() {
continue;
}
}
Some(crate::Binding::BuiltIn(_)) | None => {}
}
}
let member_name = self.namer.call_or(&member.name, "member");
fake_members.push(EpStructMember {
name: member_name,
ty: member.ty,
binding: member.binding.clone(),
index: index as u32,
});
}
self.write_interface_struct(module, (stage, Io::Output), struct_name, fake_members)
}
/// Writes special interface structures for an entry point. The special structures have
/// all the fields flattened into them and sorted by binding. They are needed to emulate
/// subgroup built-ins and to make the interfaces between VS outputs and FS inputs match.
fn write_ep_interface(
&mut self,
module: &Module,
func: &crate::Function,
stage: ShaderStage,
ep_name: &str,
frag_ep: Option<&FragmentEntryPoint<'_>>,
) -> Result<EntryPointInterface, Error> {
Ok(EntryPointInterface {
input: if !func.arguments.is_empty()
&& (stage == ShaderStage::Fragment
|| func
.arguments
.iter()
.any(|arg| is_subgroup_builtin_binding(&arg.binding)))
{
Some(self.write_ep_input_struct(module, func, stage, ep_name)?)
} else {
None
},
output: match func.result {
Some(ref fr) if fr.binding.is_none() && stage == ShaderStage::Vertex => {
Some(self.write_ep_output_struct(module, fr, stage, ep_name, frag_ep)?)
}
_ => None,
},
})
}
fn write_ep_argument_initialization(
&mut self,
ep: &crate::EntryPoint,
ep_input: &EntryPointBinding,
fake_member: &EpStructMember,
) -> BackendResult {
match fake_member.binding {
Some(crate::Binding::BuiltIn(crate::BuiltIn::SubgroupSize)) => {
write!(self.out, "WaveGetLaneCount()")?
}
Some(crate::Binding::BuiltIn(crate::BuiltIn::SubgroupInvocationId)) => {
write!(self.out, "WaveGetLaneIndex()")?
}
Some(crate::Binding::BuiltIn(crate::BuiltIn::NumSubgroups)) => write!(
self.out,
"({}u + WaveGetLaneCount() - 1u) / WaveGetLaneCount()",
ep.workgroup_size[0] * ep.workgroup_size[1] * ep.workgroup_size[2]
)?,
Some(crate::Binding::BuiltIn(crate::BuiltIn::SubgroupId)) => {
write!(
self.out,
"{}.__local_invocation_index / WaveGetLaneCount()",
ep_input.arg_name
)?;
}
_ => {
write!(self.out, "{}.{}", ep_input.arg_name, fake_member.name)?;
}
}
Ok(())
}
/// Write an entry point preface that initializes the arguments as specified in IR.
fn write_ep_arguments_initialization(
&mut self,
module: &Module,
func: &crate::Function,
ep_index: u16,
) -> BackendResult {
let ep = &module.entry_points[ep_index as usize];
let ep_input = match self.entry_point_io[ep_index as usize].input.take() {
Some(ep_input) => ep_input,
None => return Ok(()),
};
let mut fake_iter = ep_input.members.iter();
for (arg_index, arg) in func.arguments.iter().enumerate() {
write!(self.out, "{}", back::INDENT)?;
self.write_type(module, arg.ty)?;
let arg_name = &self.names[&NameKey::EntryPointArgument(ep_index, arg_index as u32)];
write!(self.out, " {arg_name}")?;
match module.types[arg.ty].inner {
TypeInner::Array { base, size, .. } => {
self.write_array_size(module, base, size)?;
write!(self.out, " = ")?;
self.write_ep_argument_initialization(
ep,
&ep_input,
fake_iter.next().unwrap(),
)?;
writeln!(self.out, ";")?;
}
TypeInner::Struct { ref members, .. } => {
write!(self.out, " = {{ ")?;
for index in 0..members.len() {
if index != 0 {
write!(self.out, ", ")?;
}
self.write_ep_argument_initialization(
ep,
&ep_input,
fake_iter.next().unwrap(),
)?;
}
writeln!(self.out, " }};")?;
}
_ => {
write!(self.out, " = ")?;
self.write_ep_argument_initialization(
ep,
&ep_input,
fake_iter.next().unwrap(),
)?;
writeln!(self.out, ";")?;
}
}
}
assert!(fake_iter.next().is_none());
Ok(())
}
/// Helper method used to write global variables
/// # Notes
/// Always adds a newline
fn write_global(
&mut self,
module: &Module,
handle: Handle<crate::GlobalVariable>,
) -> BackendResult {
let global = &module.global_variables[handle];
let inner = &module.types[global.ty].inner;
if let Some(ref binding) = global.binding {
if let Err(err) = self.options.resolve_resource_binding(binding) {
log::info!(
"Skipping global {:?} (name {:?}) for being inaccessible: {}",
handle,
global.name,
err,
);
return Ok(());
}
}
// https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-variable-register
let register_ty = match global.space {
crate::AddressSpace::Function => unreachable!("Function address space"),
crate::AddressSpace::Private => {
write!(self.out, "static ")?;
self.write_type(module, global.ty)?;
""
}
crate::AddressSpace::WorkGroup => {
write!(self.out, "groupshared ")?;
self.write_type(module, global.ty)?;
""
}
crate::AddressSpace::Uniform => {
// constant buffer declarations are expected to be inlined, e.g.
// `cbuffer foo: register(b0) { field1: type1; }`
write!(self.out, "cbuffer")?;
"b"
}
crate::AddressSpace::Storage { access } => {
let (prefix, register) = if access.contains(crate::StorageAccess::STORE) {
("RW", "u")
} else {
("", "t")
};
write!(self.out, "{prefix}ByteAddressBuffer")?;
register
}
crate::AddressSpace::Handle => {
let handle_ty = match *inner {
TypeInner::BindingArray { ref base, .. } => &module.types[*base].inner,
_ => inner,
};
let register = match *handle_ty {
TypeInner::Sampler { .. } => "s",
// all storage textures are UAV, unconditionally
TypeInner::Image {
class: crate::ImageClass::Storage { .. },
..
} => "u",
_ => "t",
};
self.write_type(module, global.ty)?;
register
}
crate::AddressSpace::PushConstant => {
// The type of the push constants will be wrapped in `ConstantBuffer`
write!(self.out, "ConstantBuffer<")?;
"b"
}
};
// If the global is a push constant write the type now because it will be a
// generic argument to `ConstantBuffer`
if global.space == crate::AddressSpace::PushConstant {
self.write_global_type(module, global.ty)?;
// need to write the array size if the type was emitted with `write_type`
if let TypeInner::Array { base, size, .. } = module.types[global.ty].inner {
self.write_array_size(module, base, size)?;
}
// Close the angled brackets for the generic argument
write!(self.out, ">")?;
}
let name = &self.names[&NameKey::GlobalVariable(handle)];
write!(self.out, " {name}")?;
// Push constants need to be assigned a binding explicitly by the consumer
// since naga has no way to know the binding from the shader alone
if global.space == crate::AddressSpace::PushConstant {
let target = self
.options
.push_constants_target
.as_ref()
.expect("No bind target was defined for the push constants block");
write!(self.out, ": register(b{}", target.register)?;
if target.space != 0 {
write!(self.out, ", space{}", target.space)?;
}
write!(self.out, ")")?;
}
if let Some(ref binding) = global.binding {
// this was already resolved earlier when we started evaluating an entry point.
let bt = self.options.resolve_resource_binding(binding).unwrap();
// need to write the binding array size if the type was emitted with `write_type`
if let TypeInner::BindingArray { base, size, .. } = module.types[global.ty].inner {
if let Some(overridden_size) = bt.binding_array_size {
write!(self.out, "[{overridden_size}]")?;
} else {
self.write_array_size(module, base, size)?;
}
}
write!(self.out, " : register({}{}", register_ty, bt.register)?;
if bt.space != 0 {
write!(self.out, ", space{}", bt.space)?;
}
write!(self.out, ")")?;
} else {
// need to write the array size if the type was emitted with `write_type`
if let TypeInner::Array { base, size, .. } = module.types[global.ty].inner {
self.write_array_size(module, base, size)?;
}
if global.space == crate::AddressSpace::Private {
write!(self.out, " = ")?;
if let Some(init) = global.init {
self.write_const_expression(module, init)?;
} else {
self.write_default_init(module, global.ty)?;
}
}
}
if global.space == crate::AddressSpace::Uniform {
write!(self.out, " {{ ")?;
self.write_global_type(module, global.ty)?;
write!(
self.out,
" {}",
&self.names[&NameKey::GlobalVariable(handle)]
)?;
// need to write the array size if the type was emitted with `write_type`
if let TypeInner::Array { base, size, .. } = module.types[global.ty].inner {
self.write_array_size(module, base, size)?;
}
writeln!(self.out, "; }}")?;
} else {
writeln!(self.out, ";")?;
}
Ok(())
}
/// Helper method used to write global constants
///
/// # Notes
/// Ends in a newline
fn write_global_constant(
&mut self,
module: &Module,
handle: Handle<crate::Constant>,
) -> BackendResult {
write!(self.out, "static const ")?;
let constant = &module.constants[handle];
self.write_type(module, constant.ty)?;
let name = &self.names[&NameKey::Constant(handle)];
write!(self.out, " {name}")?;
// Write size for array type
if let TypeInner::Array { base, size, .. } = module.types[constant.ty].inner {
self.write_array_size(module, base, size)?;
}
write!(self.out, " = ")?;
self.write_const_expression(module, constant.init)?;
writeln!(self.out, ";")?;
Ok(())
}
pub(super) fn write_array_size(
&mut self,
module: &Module,
base: Handle<crate::Type>,
size: crate::ArraySize,
) -> BackendResult {
write!(self.out, "[")?;
match size {
crate::ArraySize::Constant(size) => {
write!(self.out, "{size}")?;
}
crate::ArraySize::Pending(_) => unreachable!(),
crate::ArraySize::Dynamic => unreachable!(),
}
write!(self.out, "]")?;
if let TypeInner::Array {
base: next_base,
size: next_size,
..
} = module.types[base].inner
{
self.write_array_size(module, next_base, next_size)?;
}
Ok(())
}
/// Helper method used to write structs
///
/// # Notes
/// Ends in a newline
fn write_struct(
&mut self,
module: &Module,
handle: Handle<crate::Type>,
members: &[crate::StructMember],
span: u32,
shader_stage: Option<(ShaderStage, Io)>,
) -> BackendResult {
// Write struct name
let struct_name = &self.names[&NameKey::Type(handle)];
writeln!(self.out, "struct {struct_name} {{")?;
let mut last_offset = 0;
for (index, member) in members.iter().enumerate() {
if member.binding.is_none() && member.offset > last_offset {
// using int as padding should work as long as the backend
// doesn't support a type that's less than 4 bytes in size
// (Error::UnsupportedScalar catches this)
let padding = (member.offset - last_offset) / 4;
for i in 0..padding {
writeln!(self.out, "{}int _pad{}_{};", back::INDENT, index, i)?;
}
}
let ty_inner = &module.types[member.ty].inner;
last_offset = member.offset + ty_inner.size_hlsl(module.to_ctx());
// The indentation is only for readability
write!(self.out, "{}", back::INDENT)?;
match module.types[member.ty].inner {
TypeInner::Array { base, size, .. } => {
// HLSL arrays are written as `type name[size]`
self.write_global_type(module, member.ty)?;
// Write `name`
write!(
self.out,
" {}",
&self.names[&NameKey::StructMember(handle, index as u32)]
)?;
// Write [size]
self.write_array_size(module, base, size)?;
}
// We treat matrices of the form `matCx2` as a sequence of C `vec2`s.
// See the module-level block comment in mod.rs for details.
TypeInner::Matrix {
rows,
columns,
scalar,
} if member.binding.is_none() && rows == crate::VectorSize::Bi => {
let vec_ty = TypeInner::Vector { size: rows, scalar };
let field_name_key = NameKey::StructMember(handle, index as u32);
for i in 0..columns as u8 {
if i != 0 {
write!(self.out, "; ")?;
}
self.write_value_type(module, &vec_ty)?;
write!(self.out, " {}_{}", &self.names[&field_name_key], i)?;
}
}
_ => {
// Write modifier before type
if let Some(ref binding) = member.binding {
self.write_modifier(binding)?;
}
// Even though Naga IR matrices are column-major, we must describe
// matrices passed from the CPU as being in row-major order.
// See the module-level block comment in mod.rs for details.
if let TypeInner::Matrix { .. } = module.types[member.ty].inner {
write!(self.out, "row_major ")?;
}
// Write the member type and name
self.write_type(module, member.ty)?;
write!(
self.out,
" {}",
&self.names[&NameKey::StructMember(handle, index as u32)]
)?;
}
}
self.write_semantic(&member.binding, shader_stage)?;
writeln!(self.out, ";")?;
}
// add padding at the end since sizes of types don't get rounded up to their alignment in HLSL
if members.last().unwrap().binding.is_none() && span > last_offset {
let padding = (span - last_offset) / 4;
for i in 0..padding {
writeln!(self.out, "{}int _end_pad_{};", back::INDENT, i)?;
}
}
writeln!(self.out, "}};")?;
Ok(())
}
/// Helper method used to write global/structs non image/sampler types
///
/// # Notes
/// Adds no trailing or leading whitespace
pub(super) fn write_global_type(
&mut self,
module: &Module,
ty: Handle<crate::Type>,
) -> BackendResult {
let matrix_data = get_inner_matrix_data(module, ty);
// We treat matrices of the form `matCx2` as a sequence of C `vec2`s.
// See the module-level block comment in mod.rs for details.
if let Some(MatrixType {
columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = matrix_data
{
write!(self.out, "__mat{}x2", columns as u8)?;
} else {
// Even though Naga IR matrices are column-major, we must describe
// matrices passed from the CPU as being in row-major order.
// See the module-level block comment in mod.rs for details.
if matrix_data.is_some() {
write!(self.out, "row_major ")?;
}
self.write_type(module, ty)?;
}
Ok(())
}
/// Helper method used to write non image/sampler types
///
/// # Notes
/// Adds no trailing or leading whitespace
pub(super) fn write_type(&mut self, module: &Module, ty: Handle<crate::Type>) -> BackendResult {
let inner = &module.types[ty].inner;
match *inner {
TypeInner::Struct { .. } => write!(self.out, "{}", self.names[&NameKey::Type(ty)])?,
// hlsl array has the size separated from the base type
TypeInner::Array { base, .. } | TypeInner::BindingArray { base, .. } => {
self.write_type(module, base)?
}
ref other => self.write_value_type(module, other)?,
}
Ok(())
}
/// Helper method used to write value types
///
/// # Notes
/// Adds no trailing or leading whitespace
pub(super) fn write_value_type(&mut self, module: &Module, inner: &TypeInner) -> BackendResult {
match *inner {
TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
write!(self.out, "{}", scalar.to_hlsl_str()?)?;
}
TypeInner::Vector { size, scalar } => {
write!(
self.out,
"{}{}",
scalar.to_hlsl_str()?,
back::vector_size_str(size)
)?;
}
TypeInner::Matrix {
columns,
rows,
scalar,
} => {
// The IR supports only float matrix
// https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-matrix
// Because of the implicit transpose all matrices have in HLSL, we need to transpose the size as well.
write!(
self.out,
"{}{}x{}",
scalar.to_hlsl_str()?,
back::vector_size_str(columns),
back::vector_size_str(rows),
)?;
}
TypeInner::Image {
dim,
arrayed,
class,
} => {
self.write_image_type(dim, arrayed, class)?;
}
TypeInner::Sampler { comparison } => {
let sampler = if comparison {
"SamplerComparisonState"
} else {
"SamplerState"
};
write!(self.out, "{sampler}")?;
}
// HLSL arrays are written as `type name[size]`
// Current code is written arrays only as `[size]`
// Base `type` and `name` should be written outside
TypeInner::Array { base, size, .. } | TypeInner::BindingArray { base, size } => {
self.write_array_size(module, base, size)?;
}
_ => return Err(Error::Unimplemented(format!("write_value_type {inner:?}"))),
}
Ok(())
}
/// Helper method used to write functions
/// # Notes
/// Ends in a newline
fn write_function(
&mut self,
module: &Module,
name: &str,
func: &crate::Function,
func_ctx: &back::FunctionCtx<'_>,
info: &valid::FunctionInfo,
) -> BackendResult {
// Function Declaration Syntax - https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-function-syntax
self.update_expressions_to_bake(module, func, info);
// Write modifier
if let Some(crate::FunctionResult {
binding:
Some(
ref binding @ crate::Binding::BuiltIn(crate::BuiltIn::Position {
invariant: true,
}),
),
..
}) = func.result
{
self.write_modifier(binding)?;
}
// Write return type
if let Some(ref result) = func.result {
match func_ctx.ty {
back::FunctionType::Function(_) => {
self.write_type(module, result.ty)?;
}
back::FunctionType::EntryPoint(index) => {
if let Some(ref ep_output) = self.entry_point_io[index as usize].output {
write!(self.out, "{}", ep_output.ty_name)?;
} else {
self.write_type(module, result.ty)?;
}
}
}
} else {
write!(self.out, "void")?;
}
// Write function name
write!(self.out, " {name}(")?;
let need_workgroup_variables_initialization =
self.need_workgroup_variables_initialization(func_ctx, module);
// Write function arguments for non entry point functions
match func_ctx.ty {
back::FunctionType::Function(handle) => {
for (index, arg) in func.arguments.iter().enumerate() {
if index != 0 {
write!(self.out, ", ")?;
}
// Write argument type
let arg_ty = match module.types[arg.ty].inner {
// pointers in function arguments are expected and resolve to `inout`
TypeInner::Pointer { base, .. } => {
//TODO: can we narrow this down to just `in` when possible?
write!(self.out, "inout ")?;
base
}
_ => arg.ty,
};
self.write_type(module, arg_ty)?;
let argument_name =
&self.names[&NameKey::FunctionArgument(handle, index as u32)];
// Write argument name. Space is important.
write!(self.out, " {argument_name}")?;
if let TypeInner::Array { base, size, .. } = module.types[arg_ty].inner {
self.write_array_size(module, base, size)?;
}
}
}
back::FunctionType::EntryPoint(ep_index) => {
if let Some(ref ep_input) = self.entry_point_io[ep_index as usize].input {
write!(self.out, "{} {}", ep_input.ty_name, ep_input.arg_name)?;
} else {
let stage = module.entry_points[ep_index as usize].stage;
for (index, arg) in func.arguments.iter().enumerate() {
if index != 0 {
write!(self.out, ", ")?;
}
self.write_type(module, arg.ty)?;
let argument_name =
&self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
write!(self.out, " {argument_name}")?;
if let TypeInner::Array { base, size, .. } = module.types[arg.ty].inner {
self.write_array_size(module, base, size)?;
}
self.write_semantic(&arg.binding, Some((stage, Io::Input)))?;
}
}
if need_workgroup_variables_initialization {
if self.entry_point_io[ep_index as usize].input.is_some()
|| !func.arguments.is_empty()
{
write!(self.out, ", ")?;
}
write!(self.out, "uint3 __local_invocation_id : SV_GroupThreadID")?;
}
}
}
// Ends of arguments
write!(self.out, ")")?;
// Write semantic if it present
if let back::FunctionType::EntryPoint(index) = func_ctx.ty {
let stage = module.entry_points[index as usize].stage;
if let Some(crate::FunctionResult { ref binding, .. }) = func.result {
self.write_semantic(binding, Some((stage, Io::Output)))?;
}
}
// Function body start
writeln!(self.out)?;
writeln!(self.out, "{{")?;
if need_workgroup_variables_initialization {
self.write_workgroup_variables_initialization(func_ctx, module)?;
}
if let back::FunctionType::EntryPoint(index) = func_ctx.ty {
self.write_ep_arguments_initialization(module, func, index)?;
}
// Write function local variables
for (handle, local) in func.local_variables.iter() {
// Write indentation (only for readability)
write!(self.out, "{}", back::INDENT)?;
// Write the local name
// The leading space is important
self.write_type(module, local.ty)?;
write!(self.out, " {}", self.names[&func_ctx.name_key(handle)])?;
// Write size for array type
if let TypeInner::Array { base, size, .. } = module.types[local.ty].inner {
self.write_array_size(module, base, size)?;
}
write!(self.out, " = ")?;
// Write the local initializer if needed
if let Some(init) = local.init {
self.write_expr(module, init, func_ctx)?;
} else {
// Zero initialize local variables
self.write_default_init(module, local.ty)?;
}
// Finish the local with `;` and add a newline (only for readability)
writeln!(self.out, ";")?
}
if !func.local_variables.is_empty() {
writeln!(self.out)?;
}
// Write the function body (statement list)
for sta in func.body.iter() {
// The indentation should always be 1 when writing the function body
self.write_stmt(module, sta, func_ctx, back::Level(1))?;
}
writeln!(self.out, "}}")?;
self.named_expressions.clear();
Ok(())
}
fn need_workgroup_variables_initialization(
&mut self,
func_ctx: &back::FunctionCtx,
module: &Module,
) -> bool {
self.options.zero_initialize_workgroup_memory
&& func_ctx.ty.is_compute_entry_point(module)
&& module.global_variables.iter().any(|(handle, var)| {
!func_ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
})
}
fn write_workgroup_variables_initialization(
&mut self,
func_ctx: &back::FunctionCtx,
module: &Module,
) -> BackendResult {
let level = back::Level(1);
writeln!(
self.out,
"{level}if (all(__local_invocation_id == uint3(0u, 0u, 0u))) {{"
)?;
let vars = module.global_variables.iter().filter(|&(handle, var)| {
!func_ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
});
for (handle, var) in vars {
let name = &self.names[&NameKey::GlobalVariable(handle)];
write!(self.out, "{}{} = ", level.next(), name)?;
self.write_default_init(module, var.ty)?;
writeln!(self.out, ";")?;
}
writeln!(self.out, "{level}}}")?;
self.write_barrier(crate::Barrier::WORK_GROUP, level)
}
/// Helper method used to write switches
fn write_switch(
&mut self,
module: &Module,
func_ctx: &back::FunctionCtx<'_>,
level: back::Level,
selector: Handle<crate::Expression>,
cases: &[crate::SwitchCase],
) -> BackendResult {
// Write all cases
let indent_level_1 = level.next();
let indent_level_2 = indent_level_1.next();
// See docs of `back::continue_forward` module.
if let Some(variable) = self.continue_ctx.enter_switch(&mut self.namer) {
writeln!(self.out, "{level}bool {variable} = false;",)?;
};
// Check if there is only one body, by seeing if all except the last case are fall through
// with empty bodies. FXC doesn't handle these switches correctly, so
// we generate a `do {} while(false);` loop instead. There must be a default case, so there
// is no need to check if one of the cases would have matched.
let one_body = cases
.iter()
.rev()
.skip(1)
.all(|case| case.fall_through && case.body.is_empty());
if one_body {
// Start the do-while
writeln!(self.out, "{level}do {{")?;
// Note: Expressions have no side-effects so we don't need to emit selector expression.
// Body
if let Some(case) = cases.last() {
for sta in case.body.iter() {
self.write_stmt(module, sta, func_ctx, indent_level_1)?;
}
}
// End do-while
writeln!(self.out, "{level}}} while(false);")?;
} else {
// Start the switch
write!(self.out, "{level}")?;
write!(self.out, "switch(")?;
self.write_expr(module, selector, func_ctx)?;
writeln!(self.out, ") {{")?;
for (i, case) in cases.iter().enumerate() {
match case.value {
crate::SwitchValue::I32(value) => {
write!(self.out, "{indent_level_1}case {value}:")?
}
crate::SwitchValue::U32(value) => {
write!(self.out, "{indent_level_1}case {value}u:")?
}
crate::SwitchValue::Default => write!(self.out, "{indent_level_1}default:")?,
}
// The new block is not only stylistic, it plays a role here:
// We might end up having to write the same case body
// multiple times due to FXC not supporting fallthrough.
// Therefore, some `Expression`s written by `Statement::Emit`
// will end up having the same name (`_expr<handle_index>`).
// So we need to put each case in its own scope.
let write_block_braces = !(case.fall_through && case.body.is_empty());
if write_block_braces {
writeln!(self.out, " {{")?;
} else {
writeln!(self.out)?;
}
// Although FXC does support a series of case clauses before
// a block[^yes], it does not support fallthrough from a
// non-empty case block to the next[^no]. If this case has a
// non-empty body with a fallthrough, emulate that by
// duplicating the bodies of all the cases it would fall
// into as extensions of this case's own body. This makes
// the HLSL output potentially quadratic in the size of the
// Naga IR.
//
// [^yes]: ```hlsl
// case 1:
// case 2: do_stuff()
// ```
// [^no]: ```hlsl
// case 1: do_this();
// case 2: do_that();
// ```
if case.fall_through && !case.body.is_empty() {
let curr_len = i + 1;
let end_case_idx = curr_len
+ cases
.iter()
.skip(curr_len)
.position(|case| !case.fall_through)
.unwrap();
let indent_level_3 = indent_level_2.next();
for case in &cases[i..=end_case_idx] {
writeln!(self.out, "{indent_level_2}{{")?;
let prev_len = self.named_expressions.len();
for sta in case.body.iter() {
self.write_stmt(module, sta, func_ctx, indent_level_3)?;
}
// Clear all named expressions that were previously inserted by the statements in the block
self.named_expressions.truncate(prev_len);
writeln!(self.out, "{indent_level_2}}}")?;
}
let last_case = &cases[end_case_idx];
if last_case.body.last().map_or(true, |s| !s.is_terminator()) {
writeln!(self.out, "{indent_level_2}break;")?;
}
} else {
for sta in case.body.iter() {
self.write_stmt(module, sta, func_ctx, indent_level_2)?;
}
if !case.fall_through && case.body.last().map_or(true, |s| !s.is_terminator()) {
writeln!(self.out, "{indent_level_2}break;")?;
}
}
if write_block_braces {
writeln!(self.out, "{indent_level_1}}}")?;
}
}
writeln!(self.out, "{level}}}")?;
}
// Handle any forwarded continue statements.
use back::continue_forward::ExitControlFlow;
let op = match self.continue_ctx.exit_switch() {
ExitControlFlow::None => None,
ExitControlFlow::Continue { variable } => Some(("continue", variable)),
ExitControlFlow::Break { variable } => Some(("break", variable)),
};
if let Some((control_flow, variable)) = op {
writeln!(self.out, "{level}if ({variable}) {{")?;
writeln!(self.out, "{indent_level_1}{control_flow};")?;
writeln!(self.out, "{level}}}")?;
}
Ok(())
}
/// Helper method used to write statements
///
/// # Notes
/// Always adds a newline
fn write_stmt(
&mut self,
module: &Module,
stmt: &crate::Statement,
func_ctx: &back::FunctionCtx<'_>,
level: back::Level,
) -> BackendResult {
use crate::Statement;
match *stmt {
Statement::Emit(ref range) => {
for handle in range.clone() {
let ptr_class = func_ctx.resolve_type(handle, &module.types).pointer_space();
let expr_name = if ptr_class.is_some() {
// HLSL can't save a pointer-valued expression in a variable,
// but we shouldn't ever need to: they should never be named expressions,
// and none of the expression types flagged by bake_ref_count can be pointer-valued.
None
} else if let Some(name) = func_ctx.named_expressions.get(&handle) {
// Front end provides names for all variables at the start of writing.
// But we write them to step by step. We need to recache them
// Otherwise, we could accidentally write variable name instead of full expression.
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
} else if self.need_bake_expressions.contains(&handle) {
Some(Baked(handle).to_string())
} else {
None
};
if let Some(name) = expr_name {
write!(self.out, "{level}")?;
self.write_named_expr(module, handle, name, handle, func_ctx)?;
}
}
}
// TODO: copy-paste from glsl-out
Statement::Block(ref block) => {
write!(self.out, "{level}")?;
writeln!(self.out, "{{")?;
for sta in block.iter() {
// Increase the indentation to help with readability
self.write_stmt(module, sta, func_ctx, level.next())?
}
writeln!(self.out, "{level}}}")?
}
// TODO: copy-paste from glsl-out
Statement::If {
condition,
ref accept,
ref reject,
} => {
write!(self.out, "{level}")?;
write!(self.out, "if (")?;
self.write_expr(module, condition, func_ctx)?;
writeln!(self.out, ") {{")?;
let l2 = level.next();
for sta in accept {
// Increase indentation to help with readability
self.write_stmt(module, sta, func_ctx, l2)?;
}
// If there are no statements in the reject block we skip writing it
// This is only for readability
if !reject.is_empty() {
writeln!(self.out, "{level}}} else {{")?;
for sta in reject {
// Increase indentation to help with readability
self.write_stmt(module, sta, func_ctx, l2)?;
}
}
writeln!(self.out, "{level}}}")?
}
// TODO: copy-paste from glsl-out
Statement::Kill => writeln!(self.out, "{level}discard;")?,
Statement::Return { value: None } => {
writeln!(self.out, "{level}return;")?;
}
Statement::Return { value: Some(expr) } => {
let base_ty_res = &func_ctx.info[expr].ty;
let mut resolved = base_ty_res.inner_with(&module.types);
if let TypeInner::Pointer { base, space: _ } = *resolved {
resolved = &module.types[base].inner;
}
if let TypeInner::Struct { .. } = *resolved {
// We can safely unwrap here, since we now we working with struct
let ty = base_ty_res.handle().unwrap();
let struct_name = &self.names[&NameKey::Type(ty)];
let variable_name = self.namer.call(&struct_name.to_lowercase());
write!(self.out, "{level}const {struct_name} {variable_name} = ",)?;
self.write_expr(module, expr, func_ctx)?;
writeln!(self.out, ";")?;
--> --------------------
--> maximum size reached
--> --------------------
[ zur Elbe Produktseite wechseln0.63Quellennavigators
]
|
2026-04-04
|