SSL resource.rs
Sprache: unbekannt
|
|
#[cfg(feature = "trace")]
use crate::device::trace;
use crate::{
binding_model::{self, BindGroup, BindGroupLayout, BindGroupLayoutEntryError},
command, conv,
device::{
bgl, create_validator, life::WaitIdleError, map_buffer, AttachmentData,
DeviceLostInvocation, HostMap, MissingDownlevelFlags, MissingFeatures, RenderPassCon text,
CLEANUP_WAIT_MS,
},
hal_label,
init_tracker::{
BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
TextureInitTrackerAction,
},
instance::Adapter,
lock::{rank, Mutex, RwLock},
pipeline,
pool::ResourcePool,
resource::{
self, Buffer, Fallible, Labeled, ParentDevice, QuerySet, Sampler, StagingBuffer, Texture,
TextureView, TextureViewNotRenderableReason, TrackingData,
},
resource_log,
snatch::{SnatchGuard, SnatchLock, Snatchable},
track::{
BindGroupStates, DeviceTracker, TextureSelector, TrackerIndexAllocators, UsageScope,
UsageScopePool,
},
validation::{self, validate_color_attachment_bytes_per_sample},
weak_vec::WeakVec,
FastHashMap, LabelHelpers,
};
use arrayvec::ArrayVec;
use smallvec::SmallVec;
use wgt::{
math::align_to, DeviceLostReason, TextureFormat, TextureSampleType, TextureViewDimension,
};
use crate::resource::{AccelerationStructure, Tlas};
use std::{
borrow::Cow,
mem::{self, ManuallyDrop},
num::NonZeroU32,
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, OnceLock, Weak,
},
};
use super::{
queue::Queue, DeviceDescriptor, DeviceError, DeviceLostClosure, UserClosures,
ENTRYPOINT_FAILURE_ERROR, ZERO_BUFFER_SIZE,
};
/// Structure describing a logical device. Some members are internally mutable,
/// stored behind mutexes.
pub struct Device {
raw: Box<dyn hal::DynDevice>,
pub(crate) adapter: Arc<Adapter>,
pub(crate) queue: OnceLock<Weak<Queue>>,
pub(crate) zero_buffer: ManuallyDrop<Box<dyn hal::DynBuffer>>,
/// The `label` from the descriptor used to create the resource.
label: String,
pub(crate) command_allocator: command::CommandAllocator,
/// The index of the last command submission that was attempted.
///
/// Note that `fence` may never be signalled with this value, if the command
/// submission failed. If you need to wait for everything running on a
/// `Queue` to complete, wait for [`last_successful_submission_index`].
///
/// [`last_successful_submission_index`]: Device::last_successful_submission_index
pub(crate) active_submission_index: hal::AtomicFenceValue,
/// The index of the last successful submission to this device's
/// [`hal::Queue`].
///
/// Unlike [`active_submission_index`], which is incremented each time
/// submission is attempted, this is updated only when submission succeeds,
/// so waiting for this value won't hang waiting for work that was never
/// submitted.
///
/// [`active_submission_index`]: Device::active_submission_index
pub(crate) last_successful_submission_index: hal::AtomicFenceValue,
// NOTE: if both are needed, the `snatchable_lock` must be consistently acquired before the
// `fence` lock to avoid deadlocks.
pub(crate) fence: RwLock<ManuallyDrop<Box<dyn hal::DynFence>>>,
pub(crate) snatchable_lock: SnatchLock,
/// Is this device valid? Valid is closely associated with "lose the device",
/// which can be triggered by various methods, including at the end of device
/// destroy, and by any GPU errors that cause us to no longer trust the state
/// of the device. Ideally we would like to fold valid into the storage of
/// the device itself (for example as an Error enum), but unfortunately we
/// need to continue to be able to retrieve the device in poll_devices to
/// determine if it can be dropped. If our internal accesses of devices were
/// done through ref-counted references and external accesses checked for
/// Error enums, we wouldn't need this. For now, we need it. All the call
/// sites where we check it are areas that should be revisited if we start
/// using ref-counted references for internal access.
pub(crate) valid: AtomicBool,
/// Closure to be called on "lose the device". This is invoked directly by
/// device.lose or by the UserCallbacks returned from maintain when the device
/// has been destroyed and its queues are empty.
pub(crate) device_lost_closure: Mutex<Option<DeviceLostClosure>>,
/// Stores the state of buffers and textures.
pub(crate) trackers: Mutex<DeviceTracker>,
pub(crate) tracker_indices: TrackerIndexAllocators,
/// Pool of bind group layouts, allowing deduplication.
pub(crate) bgl_pool: ResourcePool<bgl::EntryMap, BindGroupLayout>,
pub(crate) alignments: hal::Alignments,
pub(crate) limits: wgt::Limits,
pub(crate) features: wgt::Features,
pub(crate) downlevel: wgt::DownlevelCapabilities,
pub(crate) instance_flags: wgt::InstanceFlags,
pub(crate) deferred_destroy: Mutex<Vec<DeferredDestroy>>,
pub(crate) usage_scopes: UsageScopePool,
pub(crate) last_acceleration_structure_build_command_index: AtomicU64,
#[cfg(feature = "indirect-validation")]
pub(crate) indirect_validation: Option<crate::indirect_validation::IndirectValidation>,
// needs to be dropped last
#[cfg(feature = "trace")]
pub(crate) trace: Mutex<Option<trace::Trace>>,
}
pub(crate) enum DeferredDestroy {
TextureViews(WeakVec<TextureView>),
BindGroups(WeakVec<BindGroup>),
}
impl std::fmt::Debug for Device {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Device")
.field("label", &self.label())
.field("limits", &self.limits)
.field("features", &self.features)
.field("downlevel", &self.downlevel)
.finish()
}
}
impl Drop for Device {
fn drop(&mut self) {
resource_log!("Drop {}", self.error_ident());
// SAFETY: We are in the Drop impl and we don't use self.zero_buffer anymore after this point.
let zero_buffer = unsafe { ManuallyDrop::take(&mut self.zero_buffer) };
// SAFETY: We are in the Drop impl and we don't use self.fence anymore after this point.
let fence = unsafe { ManuallyDrop::take(&mut self.fence.write()) };
#[cfg(feature = "indirect-validation")]
if let Some(indirect_validation) = self.indirect_validation.take() {
indirect_validation.dispose(self.raw.as_ref());
}
unsafe {
self.raw.destroy_buffer(zero_buffer);
self.raw.destroy_fence(fence);
}
}
}
impl Device {
pub(crate) fn raw(&self) -> &dyn hal::DynDevice {
self.raw.as_ref()
}
pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> {
if self.features.contains(feature) {
Ok(())
} else {
Err(MissingFeatures(feature))
}
}
pub(crate) fn require_downlevel_flags(
&self,
flags: wgt::DownlevelFlags,
) -> Result<(), MissingDownlevelFlags> {
if self.downlevel.flags.contains(flags) {
Ok(())
} else {
Err(MissingDownlevelFlags(flags))
}
}
}
impl Device {
pub(crate) fn new(
raw_device: Box<dyn hal::DynDevice>,
adapter: &Arc<Adapter>,
desc: &DeviceDescriptor,
trace_path: Option<&std::path::Path>,
instance_flags: wgt::InstanceFlags,
) -> Result<Self, DeviceError> {
#[cfg(not(feature = "trace"))]
if let Some(_) = trace_path {
log::error!("Feature 'trace' is not enabled");
}
let fence = unsafe { raw_device.create_fence() }.map_err(DeviceError::from_hal)?;
let command_allocator = command::CommandAllocator::new();
// Create zeroed buffer used for texture clears.
let zero_buffer = unsafe {
raw_device.create_buffer(&hal::BufferDescriptor {
label: hal_label(Some("(wgpu internal) zero init buffer"), instance_flags),
size: ZERO_BUFFER_SIZE,
usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST,
memory_flags: hal::MemoryFlags::empty(),
})
}
.map_err(DeviceError::from_hal)?;
let alignments = adapter.raw.capabilities.alignments.clone();
let downlevel = adapter.raw.capabilities.downlevel.clone();
#[cfg(feature = "indirect-validation")]
let indirect_validation = if downlevel
.flags
.contains(wgt::DownlevelFlags::INDIRECT_EXECUTION)
{
match crate::indirect_validation::IndirectValidation::new(
raw_device.as_ref(),
&desc.required_limits,
) {
Ok(indirect_validation) => Some(indirect_validation),
Err(e) => {
log::error!("indirect-validation error: {e:?}");
return Err(DeviceError::Lost);
}
}
} else {
None
};
Ok(Self {
raw: raw_device,
adapter: adapter.clone(),
queue: OnceLock::new(),
zero_buffer: ManuallyDrop::new(zero_buffer),
label: desc.label.to_string(),
command_allocator,
active_submission_index: AtomicU64::new(0),
last_successful_submission_index: AtomicU64::new(0),
fence: RwLock::new(rank::DEVICE_FENCE, ManuallyDrop::new(fence)),
snatchable_lock: unsafe { SnatchLock::new(rank::DEVICE_SNATCHABLE_LOCK) },
valid: AtomicBool::new(true),
device_lost_closure: Mutex::new(rank::DEVICE_LOST_CLOSURE, None),
trackers: Mutex::new(rank::DEVICE_TRACKERS, DeviceTracker::new()),
tracker_indices: TrackerIndexAllocators::new(),
bgl_pool: ResourcePool::new(),
#[cfg(feature = "trace")]
trace: Mutex::new(
rank::DEVICE_TRACE,
trace_path.and_then(|path| match trace::Trace::new(path) {
Ok(mut trace) => {
trace.add(trace::Action::Init {
desc: desc.clone(),
backend: adapter.backend(),
});
Some(trace)
}
Err(e) => {
log::error!("Unable to start a trace in '{path:?}': {e}");
None
}
}),
),
alignments,
limits: desc.required_limits.clone(),
features: desc.required_features,
downlevel,
instance_flags,
deferred_destroy: Mutex::new(rank::DEVICE_DEFERRED_DESTROY, Vec::new()),
usage_scopes: Mutex::new(rank::DEVICE_USAGE_SCOPES, Default::default()),
// By starting at one, we can put the result in a NonZeroU64.
last_acceleration_structure_build_command_index: AtomicU64::new(1),
#[cfg(feature = "indirect-validation")]
indirect_validation,
})
}
/// Returns the backend this device is using.
pub fn backend(&self) -> wgt::Backend {
self.adapter.backend()
}
pub fn is_valid(&self) -> bool {
self.valid.load(Ordering::Acquire)
}
pub fn check_is_valid(&self) -> Result<(), DeviceError> {
if self.is_valid() {
Ok(())
} else {
Err(DeviceError::Invalid(self.error_ident()))
}
}
pub fn handle_hal_error(&self, error: hal::DeviceError) -> DeviceError {
match error {
hal::DeviceError::OutOfMemory => {}
hal::DeviceError::Lost
| hal::DeviceError::ResourceCreationFailed
| hal::DeviceError::Unexpected => {
self.lose(&error.to_string());
}
}
DeviceError::from_hal(error)
}
/// Run some destroy operations that were deferred.
///
/// Destroying the resources requires taking a write lock on the device's snatch lock,
/// so a good reason for deferring resource destruction is when we don't know for sure
/// how risky it is to take the lock (typically, it shouldn't be taken from the drop
/// implementation of a reference-counted structure).
/// The snatch lock must not be held while this function is called.
pub(crate) fn deferred_resource_destruction(&self) {
let deferred_destroy = mem::take(&mut *self.deferred_destroy.lock());
for item in deferred_destroy {
match item {
DeferredDestroy::TextureViews(views) => {
for view in views {
let Some(view) = view.upgrade() else {
continue;
};
let Some(raw_view) = view.raw.snatch(&mut self.snatchable_lock.write())
else {
continue;
};
resource_log!("Destroy raw {}", view.error_ident());
unsafe {
self.raw().destroy_texture_view(raw_view);
}
}
}
DeferredDestroy::BindGroups(bind_groups) => {
for bind_group in bind_groups {
let Some(bind_group) = bind_group.upgrade() else {
continue;
};
let Some(raw_bind_group) =
bind_group.raw.snatch(&mut self.snatchable_lock.write())
else {
continue;
};
resource_log!("Destroy raw {}", bind_group.error_ident());
unsafe {
self.raw().destroy_bind_group(raw_bind_group);
}
}
}
}
}
}
pub fn get_queue(&self) -> Option<Arc<Queue>> {
self.queue.get().as_ref()?.upgrade()
}
pub fn set_queue(&self, queue: &Arc<Queue>) {
assert!(self.queue.set(Arc::downgrade(queue)).is_ok());
}
/// Check this device for completed commands.
///
/// The `maintain` argument tells how the maintenance function should behave, either
/// blocking or just polling the current state of the gpu.
///
/// Return a pair `(closures, queue_empty)`, where:
///
/// - `closures` is a list of actions to take: mapping buffers, notifying the user
///
/// - `queue_empty` is a boolean indicating whether there are more queue
/// submissions still in flight. (We have to take the locks needed to
/// produce this information for other reasons, so we might as well just
/// return it to our callers.)
pub(crate) fn maintain<'this>(
&'this self,
fence: crate::lock::RwLockReadGuard<ManuallyDrop<Box<dyn hal::DynFence>>>,
maintain: wgt::Maintain<crate::SubmissionIndex>,
snatch_guard: SnatchGuard,
) -> Result<(UserClosures, bool), WaitIdleError> {
profiling::scope!("Device::maintain");
// Determine which submission index `maintain` represents.
let submission_index = match maintain {
wgt::Maintain::WaitForSubmissionIndex(submission_index) => {
let last_successful_submission_index = self
.last_successful_submission_index
.load(Ordering::Acquire);
if submission_index > last_successful_submission_index {
return Err(WaitIdleError::WrongSubmissionIndex(
submission_index,
last_successful_submission_index,
));
}
submission_index
}
wgt::Maintain::Wait => self
.last_successful_submission_index
.load(Ordering::Acquire),
wgt::Maintain::Poll => unsafe { self.raw().get_fence_value(fence.as_ref()) }
.map_err(|e| self.handle_hal_error(e))?,
};
// If necessary, wait for that submission to complete.
if maintain.is_wait() {
log::trace!("Device::maintain: waiting for submission index {submission_index}");
unsafe {
self.raw()
.wait(fence.as_ref(), submission_index, CLEANUP_WAIT_MS)
}
.map_err(|e| self.handle_hal_error(e))?;
}
let (submission_closures, mapping_closures, queue_empty) =
if let Some(queue) = self.get_queue() {
queue.maintain(submission_index, &snatch_guard)
} else {
(SmallVec::new(), Vec::new(), true)
};
// Detect if we have been destroyed and now need to lose the device.
// If we are invalid (set at start of destroy) and our queue is empty,
// and we have a DeviceLostClosure, return the closure to be called by
// our caller. This will complete the steps for both destroy and for
// "lose the device".
let mut device_lost_invocations = SmallVec::new();
let mut should_release_gpu_resource = false;
if !self.is_valid() && queue_empty {
// We can release gpu resources associated with this device (but not
// while holding the life_tracker lock).
should_release_gpu_resource = true;
// If we have a DeviceLostClosure, build an invocation with the
// reason DeviceLostReason::Destroyed and no message.
if let Some(device_lost_closure) = self.device_lost_closure.lock().take() {
device_lost_invocations.push(DeviceLostInvocation {
closure: device_lost_closure,
reason: DeviceLostReason::Destroyed,
message: String::new(),
});
}
}
// Don't hold the locks while calling release_gpu_resources.
drop(fence);
drop(snatch_guard);
if should_release_gpu_resource {
self.release_gpu_resources();
}
let closures = UserClosures {
mappings: mapping_closures,
submissions: submission_closures,
device_lost_invocations,
};
Ok((closures, queue_empty))
}
pub(crate) fn create_buffer(
self: &Arc<Self>,
desc: &resource::BufferDescriptor,
) -> Result<Arc<Buffer>, resource::CreateBufferError> {
self.check_is_valid()?;
if desc.size > self.limits.max_buffer_size {
return Err(resource::CreateBufferError::MaxBufferSize {
requested: desc.size,
maximum: self.limits.max_buffer_size,
});
}
if desc.usage.contains(wgt::BufferUsages::INDEX)
&& desc.usage.contains(
wgt::BufferUsages::VERTEX
| wgt::BufferUsages::UNIFORM
| wgt::BufferUsages::INDIRECT
| wgt::BufferUsages::STORAGE,
)
{
self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?;
}
if desc.usage.is_empty()
|| desc.usage | wgt::BufferUsages::all() != wgt::BufferUsages::all()
{
return Err(resource::CreateBufferError::InvalidUsage(desc.usage));
}
if !self
.features
.contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS)
{
use wgt::BufferUsages as Bu;
let write_mismatch = desc.usage.contains(Bu::MAP_WRITE)
&& !(Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage);
let read_mismatch = desc.usage.contains(Bu::MAP_READ)
&& !(Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage);
if write_mismatch || read_mismatch {
return Err(resource::CreateBufferError::UsageMismatch(desc.usage));
}
}
let mut usage = conv::map_buffer_usage(desc.usage);
if desc.usage.contains(wgt::BufferUsages::INDIRECT) {
self.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)?;
// We are going to be reading from it, internally;
// when validating the content of the buffer
usage |= hal::BufferUses::STORAGE_READ_ONLY | hal::BufferUses::STORAGE_READ_WRITE;
}
if desc.mapped_at_creation {
if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(resource::CreateBufferError::UnalignedSize);
}
if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
// we are going to be copying into it, internally
usage |= hal::BufferUses::COPY_DST;
}
} else {
// We are required to zero out (initialize) all memory. This is done
// on demand using clear_buffer which requires write transfer usage!
usage |= hal::BufferUses::COPY_DST;
}
let actual_size = if desc.size == 0 {
wgt::COPY_BUFFER_ALIGNMENT
} else if desc.usage.contains(wgt::BufferUsages::VERTEX) {
// Bumping the size by 1 so that we can bind an empty range at the
// end of the buffer.
desc.size + 1
} else {
desc.size
};
let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT;
let aligned_size = if clear_remainder != 0 {
actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder
} else {
actual_size
};
let hal_desc = hal::BufferDescriptor {
label: desc.label.to_hal(self.instance_flags),
size: aligned_size,
usage,
memory_flags: hal::MemoryFlags::empty(),
};
let buffer =
unsafe { self.raw().create_buffer(&hal_desc) }.map_err(|e| self.handle_hal_error(e))?;
#[cfg(feature = "indirect-validation")]
let raw_indirect_validation_bind_group =
self.create_indirect_validation_bind_group(buffer.as_ref(), desc.size, desc.usage)?;
let buffer = Buffer {
raw: Snatchable::new(buffer),
device: self.clone(),
usage: desc.usage,
size: desc.size,
initialization_status: RwLock::new(
rank::BUFFER_INITIALIZATION_STATUS,
BufferInitTracker::new(aligned_size),
),
map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
label: desc.label.to_string(),
tracking_data: TrackingData::new(self.tracker_indices.buffers.clone()),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
#[cfg(feature = "indirect-validation")]
raw_indirect_validation_bind_group,
};
let buffer = Arc::new(buffer);
let buffer_use = if !desc.mapped_at_creation {
hal::BufferUses::empty()
} else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
// buffer is mappable, so we are just doing that at start
let map_size = buffer.size;
let mapping = if map_size == 0 {
hal::BufferMapping {
ptr: std::ptr::NonNull::dangling(),
is_coherent: true,
}
} else {
let snatch_guard: SnatchGuard = self.snatchable_lock.read();
map_buffer(&buffer, 0, map_size, HostMap::Write, &snatch_guard)?
};
*buffer.map_state.lock() = resource::BufferMapState::Active {
mapping,
range: 0..map_size,
host: HostMap::Write,
};
hal::BufferUses::MAP_WRITE
} else {
let mut staging_buffer =
StagingBuffer::new(self, wgt::BufferSize::new(aligned_size).unwrap())?;
// Zero initialize memory and then mark the buffer as initialized
// (it's guaranteed that this is the case by the time the buffer is usable)
staging_buffer.write_zeros();
buffer.initialization_status.write().drain(0..aligned_size);
*buffer.map_state.lock() = resource::BufferMapState::Init { staging_buffer };
hal::BufferUses::COPY_DST
};
self.trackers
.lock()
.buffers
.insert_single(&buffer, buffer_use);
Ok(buffer)
}
pub(crate) fn create_texture_from_hal(
self: &Arc<Self>,
hal_texture: Box<dyn hal::DynTexture>,
desc: &resource::TextureDescriptor,
) -> Result<Arc<Texture>, resource::CreateTextureError> {
let format_features = self
.describe_format_features(desc.format)
.map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))?;
unsafe { self.raw().add_raw_texture(&*hal_texture) };
let texture = Texture::new(
self,
resource::TextureInner::Native { raw: hal_texture },
conv::map_texture_usage(desc.usage, desc.format.into(), format_features.flags),
desc,
format_features,
resource::TextureClearMode::None,
false,
);
let texture = Arc::new(texture);
self.trackers
.lock()
.textures
.insert_single(&texture, hal::TextureUses::UNINITIALIZED);
Ok(texture)
}
pub(crate) fn create_buffer_from_hal(
self: &Arc<Self>,
hal_buffer: Box<dyn hal::DynBuffer>,
desc: &resource::BufferDescriptor,
) -> (Fallible<Buffer>, Option<resource::CreateBufferError>) {
#[cfg(feature = "indirect-validation")]
let raw_indirect_validation_bind_group = match self.create_indirect_validation_bind_group(
hal_buffer.as_ref(),
desc.size,
desc.usage,
) {
Ok(ok) => ok,
Err(e) => return (Fallible::Invalid(Arc::new(desc.label.to_string())), Some(e)),
};
unsafe { self.raw().add_raw_buffer(&*hal_buffer) };
let buffer = Buffer {
raw: Snatchable::new(hal_buffer),
device: self.clone(),
usage: desc.usage,
size: desc.size,
initialization_status: RwLock::new(
rank::BUFFER_INITIALIZATION_STATUS,
BufferInitTracker::new(0),
),
map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
label: desc.label.to_string(),
tracking_data: TrackingData::new(self.tracker_indices.buffers.clone()),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
#[cfg(feature = "indirect-validation")]
raw_indirect_validation_bind_group,
};
let buffer = Arc::new(buffer);
self.trackers
.lock()
.buffers
.insert_single(&buffer, hal::BufferUses::empty());
(Fallible::Valid(buffer), None)
}
#[cfg(feature = "indirect-validation")]
fn create_indirect_validation_bind_group(
&self,
raw_buffer: &dyn hal::DynBuffer,
buffer_size: u64,
usage: wgt::BufferUsages,
) -> Result<Snatchable<Box<dyn hal::DynBindGroup>>, resource::CreateBufferError> {
if usage.contains(wgt::BufferUsages::INDIRECT) {
let indirect_validation = self.indirect_validation.as_ref().unwrap();
let bind_group = indirect_validation
.create_src_bind_group(self.raw(), &self.limits, buffer_size, raw_buffer)
.map_err(resource::CreateBufferError::IndirectValidationBindGroup)?;
match bind_group {
Some(bind_group) => Ok(Snatchable::new(bind_group)),
None => Ok(Snatchable::empty()),
}
} else {
Ok(Snatchable::empty())
}
}
pub(crate) fn create_texture(
self: &Arc<Self>,
desc: &resource::TextureDescriptor,
) -> Result<Arc<Texture>, resource::CreateTextureError> {
use resource::{CreateTextureError, TextureDimensionError};
self.check_is_valid()?;
if desc.usage.is_empty()
|| desc.usage | wgt::TextureUsages::all() != wgt::TextureUsages::all()
{
return Err(CreateTextureError::InvalidUsage(desc.usage));
}
conv::check_texture_dimension_size(
desc.dimension,
desc.size,
desc.sample_count,
&self.limits,
)?;
if desc.dimension != wgt::TextureDimension::D2 {
// Depth textures can only be 2D
if desc.format.is_depth_stencil_format() {
return Err(CreateTextureError::InvalidDepthDimension(
desc.dimension,
desc.format,
));
}
// Renderable textures can only be 2D
if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(CreateTextureError::InvalidDimensionUsages(
wgt::TextureUsages::RENDER_ATTACHMENT,
desc.dimension,
));
}
}
if desc.dimension != wgt::TextureDimension::D2
&& desc.dimension != wgt::TextureDimension::D3
{
// Compressed textures can only be 2D or 3D
if desc.format.is_compressed() {
return Err(CreateTextureError::InvalidCompressedDimension(
desc.dimension,
desc.format,
));
}
}
if desc.format.is_compressed() {
let (block_width, block_height) = desc.format.block_dimensions();
if desc.size.width % block_width != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::NotMultipleOfBlockWidth {
width: desc.size.width,
block_width,
format: desc.format,
},
));
}
if desc.size.height % block_height != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::NotMultipleOfBlockHeight {
height: desc.size.height,
block_height,
format: desc.format,
},
));
}
if desc.dimension == wgt::TextureDimension::D3 {
// Only BCn formats with Sliced 3D feature can be used for 3D textures
if desc.format.is_bcn() {
self.require_features(wgt::Features::TEXTURE_COMPRESSION_BC_SLICED_3D)
.map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
} else {
return Err(CreateTextureError::InvalidCompressedDimension(
desc.dimension,
desc.format,
));
}
}
}
{
let (width_multiple, height_multiple) = desc.format.size_multiple_requirement();
if desc.size.width % width_multiple != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::WidthNotMultipleOf {
width: desc.size.width,
multiple: width_multiple,
format: desc.format,
},
));
}
if desc.size.height % height_multiple != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::HeightNotMultipleOf {
height: desc.size.height,
multiple: height_multiple,
format: desc.format,
},
));
}
}
let format_features = self
.describe_format_features(desc.format)
.map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
if desc.sample_count > 1 {
if desc.mip_level_count != 1 {
return Err(CreateTextureError::InvalidMipLevelCount {
requested: desc.mip_level_count,
maximum: 1,
});
}
if desc.size.depth_or_array_layers != 1 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::MultisampledDepthOrArrayLayer(
desc.size.depth_or_array_layers,
),
));
}
if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) {
return Err(CreateTextureError::InvalidMultisampledStorageBinding);
}
if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(CreateTextureError::MultisampledNotRenderAttachment);
}
if !format_features.flags.intersects(
wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4
| wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2
| wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8
| wgt::TextureFormatFeatureFlags::MULTISAMPLE_X16,
) {
return Err(CreateTextureError::InvalidMultisampledFormat(desc.format));
}
if !format_features
.flags
.sample_count_supported(desc.sample_count)
{
return Err(CreateTextureError::InvalidSampleCount(
desc.sample_count,
desc.format,
desc.format
.guaranteed_format_features(self.features)
.flags
.supported_sample_counts(),
self.adapter
.get_texture_format_features(desc.format)
.flags
.supported_sample_counts(),
));
};
}
let mips = desc.mip_level_count;
let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS);
if mips == 0 || mips > max_levels_allowed {
return Err(CreateTextureError::InvalidMipLevelCount {
requested: mips,
maximum: max_levels_allowed,
});
}
let missing_allowed_usages = desc.usage - format_features.allowed_usages;
if !missing_allowed_usages.is_empty() {
// detect downlevel incompatibilities
let wgpu_allowed_usages = desc
.format
.guaranteed_format_features(self.features)
.allowed_usages;
let wgpu_missing_usages = desc.usage - wgpu_allowed_usages;
return Err(CreateTextureError::InvalidFormatUsages(
missing_allowed_usages,
desc.format,
wgpu_missing_usages.is_empty(),
));
}
let mut hal_view_formats = vec![];
for format in desc.view_formats.iter() {
if desc.format == *format {
continue;
}
if desc.format.remove_srgb_suffix() != format.remove_srgb_suffix() {
return Err(CreateTextureError::InvalidViewFormat(*format, desc.format));
}
hal_view_formats.push(*format);
}
if !hal_view_formats.is_empty() {
self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?;
}
let hal_usage = conv::map_texture_usage_for_texture(desc, &format_features);
let hal_desc = hal::TextureDescriptor {
label: desc.label.to_hal(self.instance_flags),
size: desc.size,
mip_level_count: desc.mip_level_count,
sample_count: desc.sample_count,
dimension: desc.dimension,
format: desc.format,
usage: hal_usage,
memory_flags: hal::MemoryFlags::empty(),
view_formats: hal_view_formats,
};
let raw_texture = unsafe { self.raw().create_texture(&hal_desc) }
.map_err(|e| self.handle_hal_error(e))?;
let clear_mode = if hal_usage
.intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET)
{
let (is_color, usage) = if desc.format.is_depth_stencil_format() {
(false, hal::TextureUses::DEPTH_STENCIL_WRITE)
} else {
(true, hal::TextureUses::COLOR_TARGET)
};
let dimension = match desc.dimension {
wgt::TextureDimension::D1 => TextureViewDimension::D1,
wgt::TextureDimension::D2 => TextureViewDimension::D2,
wgt::TextureDimension::D3 => unreachable!(),
};
let clear_label = hal_label(
Some("(wgpu internal) clear texture view"),
self.instance_flags,
);
let mut clear_views = SmallVec::new();
for mip_level in 0..desc.mip_level_count {
for array_layer in 0..desc.size.depth_or_array_layers {
macro_rules! push_clear_view {
($format:expr, $aspect:expr) => {
let desc = hal::TextureViewDescriptor {
label: clear_label,
format: $format,
dimension,
usage,
range: wgt::ImageSubresourceRange {
aspect: $aspect,
base_mip_level: mip_level,
mip_level_count: Some(1),
base_array_layer: array_layer,
array_layer_count: Some(1),
},
};
clear_views.push(ManuallyDrop::new(
unsafe {
self.raw().create_texture_view(raw_texture.as_ref(), &desc)
}
.map_err(|e| self.handle_hal_error(e))?,
));
};
}
if let Some(planes) = desc.format.planes() {
for plane in 0..planes {
let aspect = wgt::TextureAspect::from_plane(plane).unwrap();
let format = desc.format.aspect_specific_format(aspect).unwrap();
push_clear_view!(format, aspect);
}
} else {
push_clear_view!(desc.format, wgt::TextureAspect::All);
}
}
}
resource::TextureClearMode::RenderPass {
clear_views,
is_color,
}
} else {
resource::TextureClearMode::BufferCopy
};
let texture = Texture::new(
self,
resource::TextureInner::Native { raw: raw_texture },
hal_usage,
desc,
format_features,
clear_mode,
true,
);
let texture = Arc::new(texture);
self.trackers
.lock()
.textures
.insert_single(&texture, hal::TextureUses::UNINITIALIZED);
Ok(texture)
}
pub(crate) fn create_texture_view(
self: &Arc<Self>,
texture: &Arc<Texture>,
desc: &resource::TextureViewDescriptor,
) -> Result<Arc<TextureView>, resource::CreateTextureViewError> {
self.check_is_valid()?;
let snatch_guard = texture.device.snatchable_lock.read();
let texture_raw = texture.try_raw(&snatch_guard)?;
// resolve TextureViewDescriptor defaults
// https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults
let resolved_format = desc.format.unwrap_or_else(|| {
texture
.desc
.format
.aspect_specific_format(desc.range.aspect)
.unwrap_or(texture.desc.format)
});
let resolved_dimension = desc
.dimension
.unwrap_or_else(|| match texture.desc.dimension {
wgt::TextureDimension::D1 => TextureViewDimension::D1,
wgt::TextureDimension::D2 => {
if texture.desc.array_layer_count() == 1 {
TextureViewDimension::D2
} else {
TextureViewDimension::D2Array
}
}
wgt::TextureDimension::D3 => TextureViewDimension::D3,
});
let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| {
texture
.desc
.mip_level_count
.saturating_sub(desc.range.base_mip_level)
});
let resolved_array_layer_count =
desc.range
.array_layer_count
.unwrap_or_else(|| match resolved_dimension {
TextureViewDimension::D1
| TextureViewDimension::D2
| TextureViewDimension::D3 => 1,
TextureViewDimension::Cube => 6,
TextureViewDimension::D2Array | TextureViewDimension::CubeArray => texture
.desc
.array_layer_count()
.saturating_sub(desc.range.base_array_layer),
});
let resolved_usage = {
let usage = desc.usage.unwrap_or(wgt::TextureUsages::empty());
if usage.is_empty() {
texture.desc.usage
} else if texture.desc.usage.contains(usage) {
usage
} else {
return Err(resource::CreateTextureViewError::InvalidTextureViewUsage {
view: usage,
texture: texture.desc.usage,
});
}
};
let allowed_format_usages = self
.describe_format_features(resolved_format)?
.allowed_usages;
if resolved_usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
&& !allowed_format_usages.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
{
return Err(
resource::CreateTextureViewError::TextureViewFormatNotRenderable(resolved_format),
);
}
if resolved_usage.contains(wgt::TextureUsages::STORAGE_BINDING)
&& !allowed_format_usages.contains(wgt::TextureUsages::STORAGE_BINDING)
{
return Err(
resource::CreateTextureViewError::TextureViewFormatNotStorage(resolved_format),
);
}
// validate TextureViewDescriptor
let aspects = hal::FormatAspects::new(texture.desc.format, desc.range.aspect);
if aspects.is_empty() {
return Err(resource::CreateTextureViewError::InvalidAspect {
texture_format: texture.desc.format,
requested_aspect: desc.range.aspect,
});
}
let format_is_good = if desc.range.aspect == wgt::TextureAspect::All {
resolved_format == texture.desc.format
|| texture.desc.view_formats.contains(&resolved_format)
} else {
Some(resolved_format)
== texture
.desc
.format
.aspect_specific_format(desc.range.aspect)
};
if !format_is_good {
return Err(resource::CreateTextureViewError::FormatReinterpretation {
texture: texture.desc.format,
view: resolved_format,
});
}
// check if multisampled texture is seen as anything but 2D
if texture.desc.sample_count > 1 && resolved_dimension != TextureViewDimension::D2 {
return Err(
resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension(
resolved_dimension,
),
);
}
// check if the dimension is compatible with the texture
if texture.desc.dimension != resolved_dimension.compatible_texture_dimension() {
return Err(
resource::CreateTextureViewError::InvalidTextureViewDimension {
view: resolved_dimension,
texture: texture.desc.dimension,
},
);
}
match resolved_dimension {
TextureViewDimension::D1 | TextureViewDimension::D2 | TextureViewDimension::D3 => {
if resolved_array_layer_count != 1 {
return Err(resource::CreateTextureViewError::InvalidArrayLayerCount {
requested: resolved_array_layer_count,
dim: resolved_dimension,
});
}
}
TextureViewDimension::Cube => {
if resolved_array_layer_count != 6 {
return Err(
resource::CreateTextureViewError::InvalidCubemapTextureDepth {
depth: resolved_array_layer_count,
},
);
}
}
TextureViewDimension::CubeArray => {
if resolved_array_layer_count % 6 != 0 {
return Err(
resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth {
depth: resolved_array_layer_count,
},
);
}
}
_ => {}
}
match resolved_dimension {
TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
if texture.desc.size.width != texture.desc.size.height {
return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize);
}
}
_ => {}
}
if resolved_mip_level_count == 0 {
return Err(resource::CreateTextureViewError::ZeroMipLevelCount);
}
let mip_level_end = desc
.range
.base_mip_level
.saturating_add(resolved_mip_level_count);
let level_end = texture.desc.mip_level_count;
if mip_level_end > level_end {
return Err(resource::CreateTextureViewError::TooManyMipLevels {
requested: mip_level_end,
total: level_end,
});
}
if resolved_array_layer_count == 0 {
return Err(resource::CreateTextureViewError::ZeroArrayLayerCount);
}
let array_layer_end = desc
.range
.base_array_layer
.saturating_add(resolved_array_layer_count);
let layer_end = texture.desc.array_layer_count();
if array_layer_end > layer_end {
return Err(resource::CreateTextureViewError::TooManyArrayLayers {
requested: array_layer_end,
total: layer_end,
});
};
// https://gpuweb.github.io/gpuweb/#abstract-opdef-renderable-texture-view
let render_extent = 'error: {
if !resolved_usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
break 'error Err(TextureViewNotRenderableReason::Usage(resolved_usage));
}
if !(resolved_dimension == TextureViewDimension::D2
|| (self.features.contains(wgt::Features::MULTIVIEW)
&& resolved_dimension == TextureViewDimension::D2Array))
{
break 'error Err(TextureViewNotRenderableReason::Dimension(
resolved_dimension,
));
}
if resolved_mip_level_count != 1 {
break 'error Err(TextureViewNotRenderableReason::MipLevelCount(
resolved_mip_level_count,
));
}
if resolved_array_layer_count != 1
&& !(self.features.contains(wgt::Features::MULTIVIEW))
{
break 'error Err(TextureViewNotRenderableReason::ArrayLayerCount(
resolved_array_layer_count,
));
}
if aspects != hal::FormatAspects::from(texture.desc.format) {
break 'error Err(TextureViewNotRenderableReason::Aspects(aspects));
}
Ok(texture
.desc
.compute_render_extent(desc.range.base_mip_level))
};
// filter the usages based on the other criteria
let usage = {
let mask_copy = !(hal::TextureUses::COPY_SRC | hal::TextureUses::COPY_DST);
let mask_dimension = match resolved_dimension {
TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
hal::TextureUses::RESOURCE
}
TextureViewDimension::D3 => {
hal::TextureUses::RESOURCE
| hal::TextureUses::STORAGE_READ_ONLY
| hal::TextureUses::STORAGE_WRITE_ONLY
| hal::TextureUses::STORAGE_READ_WRITE
}
_ => hal::TextureUses::all(),
};
let mask_mip_level = if resolved_mip_level_count == 1 {
hal::TextureUses::all()
} else {
hal::TextureUses::RESOURCE
};
texture.hal_usage & mask_copy & mask_dimension & mask_mip_level
};
// use the combined depth-stencil format for the view
let format = if resolved_format.is_depth_stencil_component(texture.desc.format) {
texture.desc.format
} else {
resolved_format
};
let resolved_range = wgt::ImageSubresourceRange {
aspect: desc.range.aspect,
base_mip_level: desc.range.base_mip_level,
mip_level_count: Some(resolved_mip_level_count),
base_array_layer: desc.range.base_array_layer,
array_layer_count: Some(resolved_array_layer_count),
};
let hal_desc = hal::TextureViewDescriptor {
label: desc.label.to_hal(self.instance_flags),
format,
dimension: resolved_dimension,
usage,
range: resolved_range,
};
let raw = unsafe { self.raw().create_texture_view(texture_raw, &hal_desc) }
.map_err(|e| self.handle_hal_error(e))?;
let selector = TextureSelector {
mips: desc.range.base_mip_level..mip_level_end,
layers: desc.range.base_array_layer..array_layer_end,
};
let view = TextureView {
raw: Snatchable::new(raw),
parent: texture.clone(),
device: self.clone(),
desc: resource::HalTextureViewDescriptor {
texture_format: texture.desc.format,
format: resolved_format,
dimension: resolved_dimension,
usage: resolved_usage,
range: resolved_range,
},
format_features: texture.format_features,
render_extent,
samples: texture.desc.sample_count,
selector,
label: desc.label.to_string(),
tracking_data: TrackingData::new(self.tracker_indices.texture_views.clone()),
};
let view = Arc::new(view);
{
let mut views = texture.views.lock();
views.push(Arc::downgrade(&view));
}
Ok(view)
}
pub(crate) fn create_sampler(
self: &Arc<Self>,
desc: &resource::SamplerDescriptor,
) -> Result<Arc<Sampler>, resource::CreateSamplerError> {
self.check_is_valid()?;
if desc
.address_modes
.iter()
.any(|am| am == &wgt::AddressMode::ClampToBorder)
{
self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?;
}
if desc.border_color == Some(wgt::SamplerBorderColor::Zero) {
self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?;
}
if desc.lod_min_clamp < 0.0 {
return Err(resource::CreateSamplerError::InvalidLodMinClamp(
desc.lod_min_clamp,
));
}
if desc.lod_max_clamp < desc.lod_min_clamp {
return Err(resource::CreateSamplerError::InvalidLodMaxClamp {
lod_min_clamp: desc.lod_min_clamp,
lod_max_clamp: desc.lod_max_clamp,
});
}
if desc.anisotropy_clamp < 1 {
return Err(resource::CreateSamplerError::InvalidAnisotropy(
desc.anisotropy_clamp,
));
}
if desc.anisotropy_clamp != 1 {
if !matches!(desc.min_filter, wgt::FilterMode::Linear) {
return Err(
resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
filter_type: resource::SamplerFilterErrorType::MinFilter,
filter_mode: desc.min_filter,
anisotropic_clamp: desc.anisotropy_clamp,
},
);
}
if !matches!(desc.mag_filter, wgt::FilterMode::Linear) {
return Err(
resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
filter_type: resource::SamplerFilterErrorType::MagFilter,
filter_mode: desc.mag_filter,
anisotropic_clamp: desc.anisotropy_clamp,
},
);
}
if !matches!(desc.mipmap_filter, wgt::FilterMode::Linear) {
return Err(
resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
filter_type: resource::SamplerFilterErrorType::MipmapFilter,
filter_mode: desc.mipmap_filter,
anisotropic_clamp: desc.anisotropy_clamp,
},
);
}
}
let anisotropy_clamp = if self
.downlevel
.flags
.contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING)
{
// Clamp anisotropy clamp to [1, 16] per the wgpu-hal interface
desc.anisotropy_clamp.min(16)
} else {
// If it isn't supported, set this unconditionally to 1
1
};
//TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS
let hal_desc = hal::SamplerDescriptor {
label: desc.label.to_hal(self.instance_flags),
address_modes: desc.address_modes,
mag_filter: desc.mag_filter,
min_filter: desc.min_filter,
mipmap_filter: desc.mipmap_filter,
lod_clamp: desc.lod_min_clamp..desc.lod_max_clamp,
compare: desc.compare,
anisotropy_clamp,
border_color: desc.border_color,
};
let raw = unsafe { self.raw().create_sampler(&hal_desc) }
.map_err(|e| self.handle_hal_error(e))?;
let sampler = Sampler {
raw: ManuallyDrop::new(raw),
device: self.clone(),
label: desc.label.to_string(),
tracking_data: TrackingData::new(self.tracker_indices.samplers.clone()),
comparison: desc.compare.is_some(),
filtering: desc.min_filter == wgt::FilterMode::Linear
|| desc.mag_filter == wgt::FilterMode::Linear
|| desc.mipmap_filter == wgt::FilterMode::Linear,
};
let sampler = Arc::new(sampler);
Ok(sampler)
}
pub(crate) fn create_shader_module<'a>(
self: &Arc<Self>,
desc: &pipeline::ShaderModuleDescriptor<'a>,
source: pipeline::ShaderModuleSource<'a>,
) -> Result<Arc<pipeline::ShaderModule>, pipeline::CreateShaderModuleError> {
self.check_is_valid()?;
let (module, source) = match source {
#[cfg(feature = "wgsl")]
pipeline::ShaderModuleSource::Wgsl(code) => {
profiling::scope!("naga::front::wgsl::parse_str");
let module = naga::front::wgsl::parse_str(&code).map_err(|inner| {
pipeline::CreateShaderModuleError::Parsing(naga::error::ShaderError {
source: code.to_string(),
label: desc.label.as_ref().map(|l| l.to_string()),
inner: Box::new(inner),
})
})?;
(Cow::Owned(module), code.into_owned())
}
#[cfg(feature = "spirv")]
pipeline::ShaderModuleSource::SpirV(spv, options) => {
let parser = naga::front::spv::Frontend::new(spv.iter().cloned(), &options);
profiling::scope!("naga::front::spv::Frontend");
let module = parser.parse().map_err(|inner| {
pipeline::CreateShaderModuleError::ParsingSpirV(naga::error::ShaderError {
source: String::new(),
label: desc.label.as_ref().map(|l| l.to_string()),
inner: Box::new(inner),
})
})?;
(Cow::Owned(module), String::new())
}
#[cfg(feature = "glsl")]
pipeline::ShaderModuleSource::Glsl(code, options) => {
let mut parser = naga::front::glsl::Frontend::default();
profiling::scope!("naga::front::glsl::Frontend.parse");
let module = parser.parse(&options, &code).map_err(|inner| {
pipeline::CreateShaderModuleError::ParsingGlsl(naga::error::ShaderError {
source: code.to_string(),
label: desc.label.as_ref().map(|l| l.to_string()),
inner: Box::new(inner),
})
})?;
(Cow::Owned(module), code.into_owned())
}
pipeline::ShaderModuleSource::Naga(module) => (module, String::new()),
pipeline::ShaderModuleSource::Dummy(_) => panic!("found `ShaderModuleSource::Dummy`"),
};
for (_, var) in module.global_variables.iter() {
match var.binding {
Some(ref br) if br.group >= self.limits.max_bind_groups => {
return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex {
bind: br.clone(),
group: br.group,
limit: self.limits.max_bind_groups,
});
}
_ => continue,
};
}
profiling::scope!("naga::validate");
let debug_source =
if self.instance_flags.contains(wgt::InstanceFlags::DEBUG) && !source.is_empty() {
Some(hal::DebugSource {
file_name: Cow::Owned(
desc.label
.as_ref()
.map_or("shader".to_string(), |l| l.to_string()),
),
source_code: Cow::Owned(source.clone()),
})
} else {
None
};
let info = create_validator(
self.features,
self.downlevel.flags,
naga::valid::ValidationFlags::all(),
)
.validate(&module)
.map_err(|inner| {
pipeline::CreateShaderModuleError::Validation(naga::error::ShaderError {
source,
label: desc.label.as_ref().map(|l| l.to_string()),
inner: Box::new(inner),
})
})?;
let interface = validation::Interface::new(&module, &info, self.limits.clone());
let hal_shader = hal::ShaderInput::Naga(hal::NagaShader {
module,
info,
debug_source,
});
let hal_desc = hal::ShaderModuleDescriptor {
label: desc.label.to_hal(self.instance_flags),
runtime_checks: desc.runtime_checks,
};
let raw = match unsafe { self.raw().create_shader_module(&hal_desc, hal_shader) } {
Ok(raw) => raw,
Err(error) => {
return Err(match error {
hal::ShaderError::Device(error) => {
pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error))
}
hal::ShaderError::Compilation(ref msg) => {
log::error!("Shader error: {}", msg);
pipeline::CreateShaderModuleError::Generation
}
})
}
};
let module = pipeline::ShaderModule {
raw: ManuallyDrop::new(raw),
device: self.clone(),
interface: Some(interface),
label: desc.label.to_string(),
};
let module = Arc::new(module);
Ok(module)
}
#[allow(unused_unsafe)]
pub(crate) unsafe fn create_shader_module_spirv<'a>(
self: &Arc<Self>,
desc: &pipeline::ShaderModuleDescriptor<'a>,
source: &'a [u32],
) -> Result<Arc<pipeline::ShaderModule>, pipeline::CreateShaderModuleError> {
self.check_is_valid()?;
self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?;
let hal_desc = hal::ShaderModuleDescriptor {
label: desc.label.to_hal(self.instance_flags),
runtime_checks: desc.runtime_checks,
};
let hal_shader = hal::ShaderInput::SpirV(source);
let raw = match unsafe { self.raw().create_shader_module(&hal_desc, hal_shader) } {
Ok(raw) => raw,
Err(error) => {
return Err(match error {
hal::ShaderError::Device(error) => {
pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error))
}
hal::ShaderError::Compilation(ref msg) => {
log::error!("Shader error: {}", msg);
pipeline::CreateShaderModuleError::Generation
}
})
}
};
let module = pipeline::ShaderModule {
raw: ManuallyDrop::new(raw),
device: self.clone(),
interface: None,
label: desc.label.to_string(),
};
let module = Arc::new(module);
Ok(module)
}
pub(crate) fn create_command_encoder(
self: &Arc<Self>,
label: &crate::Label,
) -> Result<Arc<command::CommandBuffer>, DeviceError> {
self.check_is_valid()?;
let queue = self.get_queue().unwrap();
let encoder = self
.command_allocator
.acquire_encoder(self.raw(), queue.raw())
.map_err(|e| self.handle_hal_error(e))?;
let command_buffer = command::CommandBuffer::new(encoder, self, label);
let command_buffer = Arc::new(command_buffer);
Ok(command_buffer)
}
/// Generate information about late-validated buffer bindings for pipelines.
//TODO: should this be combined with `get_introspection_bind_group_layouts` in some way?
fn make_late_sized_buffer_groups(
shader_binding_sizes: &FastHashMap<naga::ResourceBinding, wgt::BufferSize>,
layout: &binding_model::PipelineLayout,
) -> ArrayVec<pipeline::LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }> {
// Given the shader-required binding sizes and the pipeline layout,
// return the filtered list of them in the layout order,
// removing those with given `min_binding_size`.
layout
.bind_group_layouts
.iter()
.enumerate()
.map(|(group_index, bgl)| pipeline::LateSizedBufferGroup {
shader_sizes: bgl
.entries
.values()
.filter_map(|entry| match entry.ty {
wgt::BindingType::Buffer {
min_binding_size: None,
..
} => {
let rb = naga::ResourceBinding {
--> --------------------
--> maximum size reached
--> --------------------
[ Verzeichnis aufwärts0.78unsichere Verbindung
Übersetzung europäischer Sprachen durch Browser
]
|
2026-04-02
|