/** * loop_global_lock_killable() - take locks for safe loop_validate_file() test * * @lo: struct loop_device * @global: true if @lo is about to bind another "struct loop_device", false otherwise * * Returns 0 on success, -EINTR otherwise. * * Since loop_validate_file() traverses on other "struct loop_device" if * is_loop_device() is true, we need a global lock for serializing concurrent * loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
*/ staticint loop_global_lock_killable(struct loop_device *lo, bool global)
{ int err;
if (global) {
err = mutex_lock_killable(&loop_validate_mutex); if (err) return err;
}
err = mutex_lock_killable(&lo->lo_mutex); if (err && global)
mutex_unlock(&loop_validate_mutex); return err;
}
/** * loop_global_unlock() - release locks taken by loop_global_lock_killable() * * @lo: struct loop_device * @global: true if @lo was about to bind another "struct loop_device", false otherwise
*/ staticvoid loop_global_unlock(struct loop_device *lo, bool global)
{
mutex_unlock(&lo->lo_mutex); if (global)
mutex_unlock(&loop_validate_mutex);
}
/* * Get the accurate file size. This provides better results than * cached inode data, particularly for network filesystems where * metadata may be stale.
*/
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0); if (ret) return 0;
loopsize = stat.size;
}
if (lo->lo_offset > 0)
loopsize -= lo->lo_offset; /* offset is beyond i_size, weird but possible */ if (loopsize < 0) return 0; if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
loopsize = lo->lo_sizelimit; /* * Unfortunately, if we want to do I/O on the device, * the number of 512-byte sectors has to fit into a sector_t.
*/ return loopsize >> 9;
}
/* * We support direct I/O only if lo_offset is aligned with the logical I/O size * of backing device, and the logical block size of loop is bigger than that of * the backing device.
*/ staticbool lo_can_use_dio(struct loop_device *lo)
{ if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT)) returnfalse; if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size) returnfalse; if (lo->lo_offset & (lo->lo_min_dio_size - 1)) returnfalse; returntrue;
}
/* * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently * disabled when the device block size is too small or the offset is unaligned. * * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and * not the originally passed in one.
*/ staticinlinevoid loop_update_dio(struct loop_device *lo)
{
lockdep_assert_held(&lo->lo_mutex);
WARN_ON_ONCE(lo->lo_state == Lo_bound &&
lo->lo_queue->mq_freeze_depth == 0);
if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
}
/** * loop_set_size() - sets device size and notifies userspace * @lo: struct loop_device to set the size for * @size: new size of the loop device * * Callers must validate that the size passed into this function fits into * a sector_t, eg using loop_validate_size()
*/ staticvoid loop_set_size(struct loop_device *lo, loff_t size)
{ if (!set_capacity_and_notify(lo->lo_disk, size))
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
/* * XXX: this updates the queue limits without freezing the queue, which * is against the locking protocol and dangerous. But we can't just * freeze the queue as we're inside the ->queue_rq method here. So this * should move out into a workqueue unless we get the file operations to * advertise if they support specific fallocate operations.
*/
queue_limits_commit_update(lo->lo_queue, &lim);
}
staticint lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, int mode)
{ /* * We use fallocate to manipulate the space mappings used by the image * a.k.a. discard/zerorange.
*/ struct file *file = lo->lo_backing_file; int ret;
mode |= FALLOC_FL_KEEP_SIZE;
if (!bdev_max_discard_sectors(lo->lo_device)) return -EOPNOTSUPP;
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) return -EIO;
/* * We initially configure the limits in a hope that fallocate is * supported and clear them here if that turns out not to be true.
*/ if (unlikely(ret == -EOPNOTSUPP))
loop_clear_limits(lo, mode);
return ret;
}
staticint lo_req_flush(struct loop_device *lo, struct request *rq)
{ int ret = vfs_fsync(lo->lo_backing_file, 0); if (unlikely(ret && ret != -EINVAL))
ret = -EIO;
if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) { if (cmd->ret < 0)
ret = errno_to_blk_status(cmd->ret); goto end_io;
}
/* * Short READ - if we got some data, advance our request and * retry it. If we got no data, end the rest with EIO.
*/ if (cmd->ret) {
blk_update_request(rq, BLK_STS_OK, cmd->ret);
cmd->ret = 0;
blk_mq_requeue_request(rq, true);
} else { struct bio *bio = rq->bio;
while (bio) {
zero_fill_bio(bio);
bio = bio->bi_next;
}
ret = BLK_STS_IOERR;
end_io:
blk_mq_end_request(rq, ret);
}
}
/* * The bios of the request may be started from the middle of * the 'bvec' because of bio splitting, so we can't directly * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec * API will take care of all details for us.
*/
rq_for_each_bvec(tmp, rq, rq_iter) {
*bvec = tmp;
bvec++;
}
bvec = cmd->bvec;
offset = 0;
} else { /* * Same here, this bio may be started from the middle of the * 'bvec' because of bio splitting, so offset from the bvec * must be passed to iov iterator
*/
offset = bio->bi_iter.bi_bvec_done;
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
}
atomic_set(&cmd->ref, 2);
/* * Use the minimal dio alignment of the file system if provided.
*/ if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) &&
(st.result_mask & STATX_DIOALIGN)) return st.dio_offset_align;
/* * In a perfect world this wouldn't be needed, but as of Linux 6.13 only * a handful of file systems support the STATX_DIOALIGN flag.
*/ if (sb_bdev) return bdev_logical_block_size(sb_bdev); return SECTOR_SIZE;
}
staticint loop_check_backing_file(struct file *file)
{ if (!file->f_op->read_iter) return -EINVAL;
if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter) return -EINVAL;
return 0;
}
/* * loop_change_fd switched the backing store of a loopback device to * a new file. This is useful for operating system installers to free up * the original file and in High Availability environments to switch to * an alternative location for the content in case of server meltdown. * This can only work if the loop device is used read-only, and if the * new backing store is the same size and type as the old backing store.
*/ staticint loop_change_fd(struct loop_device *lo, struct block_device *bdev, unsignedint arg)
{ struct file *file = fget(arg); struct file *old_file; unsignedint memflags; int error; bool partscan; bool is_loop;
if (!file) return -EBADF;
error = loop_check_backing_file(file); if (error) {
fput(file); return error;
}
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
is_loop = is_loop_device(file);
error = loop_global_lock_killable(lo, is_loop); if (error) goto out_putf;
error = -ENXIO; if (lo->lo_state != Lo_bound) goto out_err;
/* the loop device has to be read-only */
error = -EINVAL; if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) goto out_err;
error = loop_validate_file(file, bdev); if (error) goto out_err;
old_file = lo->lo_backing_file;
error = -EINVAL;
/* size of the new backing store needs to be the same */ if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file)) goto out_err;
/* * We might switch to direct I/O mode for the loop device, write back * all dirty data the page cache now that so that the individual I/O * operations don't have to do that.
*/
vfs_fsync(file, 0);
/* * Flush loop_validate_file() before fput(), for l->lo_backing_file * might be pointing at old_file which might be the last reference.
*/ if (!is_loop) {
mutex_lock(&loop_validate_mutex);
mutex_unlock(&loop_validate_mutex);
} /* * We must drop file reference outside of lo_mutex as dropping * the file ref can take open_mutex which creates circular locking * dependency.
*/
fput(old_file);
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); if (partscan)
loop_reread_partitions(lo);
/* * If the backing device is a block device, mirror its zeroing * capability. Set the discard sectors to the block device's zeroing * capabilities because loop discards result in blkdev_issue_zeroout(), * not blkdev_issue_discard(). This maintains consistent behavior with * file-backed loop devices: discarded regions read back as zero.
*/ if (S_ISBLK(inode->i_mode)) { struct block_device *bdev = I_BDEV(inode);
/* * We use punch hole to reclaim the free space used by the * image a.k.a. discard.
*/
} elseif (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
*max_discard_sectors = UINT_MAX >> 9;
*granularity = sbuf.f_bsize;
}
}
worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN); /* * In the event we cannot allocate a worker, just queue on the * rootcg worker and issue the I/O as the rootcg
*/ if (!worker) {
cmd->blkcg_css = NULL; if (cmd->memcg_css)
css_put(cmd->memcg_css);
cmd->memcg_css = NULL; goto queue_work;
}
worker->blkcg_css = cmd->blkcg_css;
css_get(worker->blkcg_css);
INIT_WORK(&worker->work, loop_workfn);
INIT_LIST_HEAD(&worker->cmd_list);
INIT_LIST_HEAD(&worker->idle_list);
worker->lo = lo;
rb_link_node(&worker->rb_node, parent, node);
rb_insert_color(&worker->rb_node, &lo->worker_tree);
queue_work: if (worker) { /* * We need to remove from the idle list here while * holding the lock so that the idle timer doesn't * free the worker
*/ if (!list_empty(&worker->idle_list))
list_del_init(&worker->idle_list);
work = &worker->work;
cmd_list = &worker->cmd_list;
} else {
work = &lo->rootcg_work;
cmd_list = &lo->rootcg_cmd_list;
}
list_add_tail(&cmd->list_entry, cmd_list);
queue_work(lo->workqueue, work);
spin_unlock_irq(&lo->lo_work_lock);
}
/** * loop_set_status_from_info - configure device from loop_info * @lo: struct loop_device to configure * @info: struct loop_info64 to configure the device with * * Configures the loop device parameters according to the passed * in loop_info64 configuration.
*/ staticint
loop_set_status_from_info(struct loop_device *lo, conststruct loop_info64 *info)
{ if ((unsignedint) info->lo_encrypt_key_size > LO_KEY_SIZE) return -EINVAL;
switch (info->lo_encrypt_type) { case LO_CRYPT_NONE: break; case LO_CRYPT_XOR:
pr_warn("support for the xor transformation has been removed.\n"); return -EINVAL; case LO_CRYPT_CRYPTOAPI:
pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n"); return -EINVAL; default: return -EINVAL;
}
staticunsignedint loop_default_blocksize(struct loop_device *lo)
{ /* In case of direct I/O, match underlying minimum I/O size */ if (lo->lo_flags & LO_FLAGS_DIRECT_IO) return lo->lo_min_dio_size; return SECTOR_SIZE;
}
error = loop_check_backing_file(file); if (error) {
fput(file); return error;
}
is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
/* * If we don't hold exclusive handle for the device, upgrade to it * here to avoid changing device under exclusive owner.
*/ if (!(mode & BLK_OPEN_EXCL)) {
error = bd_prepare_to_claim(bdev, loop_configure, NULL); if (error) goto out_putf;
}
error = loop_global_lock_killable(lo, is_loop); if (error) goto out_bdev;
error = -EBUSY; if (lo->lo_state != Lo_unbound) goto out_unlock;
error = loop_validate_file(file, bdev); if (error) goto out_unlock;
lim = queue_limits_start_update(lo->lo_queue);
loop_update_limits(lo, &lim, config->block_size); /* No need to freeze the queue as the device isn't bound yet. */
error = queue_limits_commit_update(lo->lo_queue, &lim); if (error) goto out_unlock;
/* * We might switch to direct I/O mode for the loop device, write back * all dirty data the page cache now that so that the individual I/O * operations don't have to do that.
*/
vfs_fsync(file, 0);
loop_global_unlock(lo, is_loop); if (partscan)
loop_reread_partitions(lo);
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_configure);
return 0;
out_unlock:
loop_global_unlock(lo, is_loop);
out_bdev: if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_configure);
out_putf:
fput(file); /* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE); return error;
}
/* * Reset the block size to the default. * * No queue freezing needed because this is called from the final * ->release call only, so there can't be any outstanding I/O.
*/
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = SECTOR_SIZE;
lim.physical_block_size = SECTOR_SIZE;
lim.io_min = SECTOR_SIZE;
queue_limits_commit_update(lo->lo_queue, &lim);
invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo); /* let user-space know about this change */
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp); /* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
disk_force_media_change(lo->lo_disk);
if (lo->lo_flags & LO_FLAGS_PARTSCAN) { int err;
/* * open_mutex has been held already in release path, so don't * acquire it if this function is called in such case. * * If the reread partition isn't from release path, lo_refcnt * must be at least one and it can only become zero when the * current holder is released.
*/
err = bdev_disk_changed(lo->lo_disk, false); if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo->lo_number, err); /* Device is gone, no point in returning error */
}
/* * lo->lo_state is set to Lo_unbound here after above partscan has * finished. There cannot be anybody else entering __loop_clr_fd() as * Lo_rundown state protects us from all the other places trying to * change the 'lo' device.
*/
lo->lo_flags = 0; if (!part_shift)
set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
/* * Need not hold lo_mutex to fput backing file. Calling fput holding * lo_mutex triggers a circular lock dependency possibility warning as * fput can take open_mutex which is usually taken before lo_mutex.
*/
fput(filp);
}
staticint loop_clr_fd(struct loop_device *lo)
{ int err;
/* * Since lo_ioctl() is called without locks held, it is possible that * loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel. * * Therefore, use global lock when setting Lo_rundown state in order to * make sure that loop_validate_file() will fail if the "struct file" * which loop_configure()/loop_change_fd() found via fget() was this * loop device.
*/
err = loop_global_lock_killable(lo, true); if (err) return err; if (lo->lo_state != Lo_bound) {
loop_global_unlock(lo, true); return -ENXIO;
} /* * Mark the device for removing the backing device on last close. * If we are the only opener, also switch the state to roundown here to * prevent new openers from coming in.
*/
/* * If we don't hold exclusive handle for the device, upgrade to it * here to avoid changing device under exclusive owner.
*/ if (!(mode & BLK_OPEN_EXCL)) {
err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL); if (err) return err;
}
err = mutex_lock_killable(&lo->lo_mutex); if (err) goto abort_claim;
switch (cmd) { case LOOP_SET_FD: { /* * Legacy case - pass in a zeroed out struct loop_config with * only the file descriptor set , which corresponds with the * default parameters we'd have used otherwise.
*/ struct loop_config config;
/* * Transfer 32-bit compatibility structure in userspace to 64-bit loop info * - noinlined to reduce stack space usage in main part of driver
*/ static noinline int
loop_info64_from_compat(conststruct compat_loop_info __user *arg, struct loop_info64 *info64)
{ struct compat_loop_info info;
if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT;
/* * Transfer 64-bit loop info to 32-bit compatibility structure in userspace * - noinlined to reduce stack space usage in main part of driver
*/ static noinline int
loop_info64_to_compat(conststruct loop_info64 *info64, struct compat_loop_info __user *arg)
{ struct compat_loop_info info;
if (disk_openers(disk) > 0) return; /* * Clear the backing device information if this is the last close of * a device that's been marked for auto clear, or on which LOOP_CLR_FD * has been called.
*/
/* * And now the modules code and kernel interface.
*/
/* * If max_loop is specified, create that many devices upfront. * This also becomes a hard limit. If max_loop is not specified, * the default isn't a hard limit (as before commit 85c50197716c * changed the default value from 0 for max_loop=0 reasons), just * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module * init time. Loop devices can be requested on-demand with the * /dev/loop-control interface, or be instantiated by accessing * a 'dead' device node.
*/ staticint max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO; goto failed;
}
if (cmd_blkcg_css)
kthread_associate_blkcg(cmd_blkcg_css); if (cmd_memcg_css)
old_memcg = set_active_memcg(
mem_cgroup_from_css(cmd_memcg_css));
/* * do_req_filebacked() may call blk_mq_complete_request() synchronously * or asynchronously if using aio. Hence, do not touch 'cmd' after * do_req_filebacked() has returned unless we are sure that 'cmd' has * not yet been completed.
*/
ret = do_req_filebacked(lo, rq);
if (cmd_blkcg_css)
kthread_associate_blkcg(NULL);
if (cmd_memcg_css) {
set_active_memcg(old_memcg);
css_put(cmd_memcg_css);
}
failed: /* complete non-aio request */ if (ret != -EIOCBQUEUED) { if (ret == -EOPNOTSUPP)
cmd->ret = ret; else
cmd->ret = ret ? -EIO : 0; if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
}
/* * We only add to the idle list if there are no pending cmds * *and* the worker will not run again which ensures that it * is safe to free any worker on the idle list
*/ if (worker && !work_pending(&worker->work)) {
worker->last_ran_at = jiffies;
list_add_tail(&worker->idle_list, &lo->idle_worker_list);
loop_set_timer(lo);
}
spin_unlock_irq(&lo->lo_work_lock);
current->flags = orig_flags;
}
err = mutex_lock_killable(&loop_ctl_mutex); if (err) goto out_free_dev;
/* allocate id, if @id >= 0, we're requesting that specific id */ if (i >= 0) {
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); if (err == -ENOSPC)
err = -EEXIST;
} else {
err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
}
mutex_unlock(&loop_ctl_mutex); if (err < 0) goto out_free_dev;
i = err;
err = blk_mq_alloc_tag_set(&lo->tag_set); if (err) goto out_free_idr;
disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo); if (IS_ERR(disk)) {
err = PTR_ERR(disk); goto out_cleanup_tags;
}
lo->lo_queue = lo->lo_disk->queue;
/* * Disable partition scanning by default. The in-kernel partition * scanning can be requested individually per-device during its * setup. Userspace can always add and remove partitions from all * devices. The needed partition minors are allocated from the * extended minor space, the main loop device numbers will continue * to match the loop minors, regardless of the number of partitions * used. * * If max_part is given, partition scanning is globally enabled for * all loop devices. The minors for the main loop devices will be * multiples of max_part. * * Note: Global-for-all-devices, set-only-at-init, read-only module * parameteters like 'max_loop' and 'max_part' make things needlessly * complicated, are too static, inflexible and may surprise * userspace tools. Parameters like this in general should be avoided.
*/ if (!part_shift)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
mutex_init(&lo->lo_mutex);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
spin_lock_init(&lo->lo_work_lock);
INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
INIT_LIST_HEAD(&lo->rootcg_cmd_list);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;
disk->minors = 1 << part_shift;
disk->fops = &lo_fops;
disk->private_data = lo;
disk->queue = lo->lo_queue;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
sprintf(disk->disk_name, "loop%d", i); /* Make this loop device reachable from pathname. */
err = add_disk(disk); if (err) goto out_cleanup_disk;
/* Show this loop device. */
mutex_lock(&loop_ctl_mutex);
lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
staticint loop_control_remove(int idx)
{ struct loop_device *lo; int ret;
if (idx < 0) {
pr_warn_once("deleting an unspecified loop device is not supported.\n"); return -EINVAL;
}
/* Hide this loop device for serialization. */
ret = mutex_lock_killable(&loop_ctl_mutex); if (ret) return ret;
lo = idr_find(&loop_index_idr, idx); if (!lo || !lo->idr_visible)
ret = -ENODEV; else
lo->idr_visible = false;
mutex_unlock(&loop_ctl_mutex); if (ret) return ret;
/* Check whether this loop device can be removed. */
ret = mutex_lock_killable(&lo->lo_mutex); if (ret) goto mark_visible; if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) {
mutex_unlock(&lo->lo_mutex);
ret = -EBUSY; goto mark_visible;
} /* Mark this loop device as no more bound, but not quite unbound yet */
lo->lo_state = Lo_deleting;
mutex_unlock(&lo->lo_mutex);
loop_remove(lo); return 0;
mark_visible: /* Show this loop device again. */
mutex_lock(&loop_ctl_mutex);
lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex); return ret;
}
staticint loop_control_get_free(int idx)
{ struct loop_device *lo; int id, ret;
ret = mutex_lock_killable(&loop_ctl_mutex); if (ret) return ret;
idr_for_each_entry(&loop_index_idr, lo, id) { /* Hitting a race results in creating a new loop device which is harmless. */ if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound) goto found;
}
mutex_unlock(&loop_ctl_mutex); return loop_add(-1);
found:
mutex_unlock(&loop_ctl_mutex); return id;
}
staticlong loop_control_ioctl(struct file *file, unsignedint cmd, unsignedlong parm)
{ switch (cmd) { case LOOP_CTL_ADD: return loop_add(parm); case LOOP_CTL_REMOVE: return loop_control_remove(parm); case LOOP_CTL_GET_FREE: return loop_control_get_free(parm); default: return -ENOSYS;
}
}
/* * Adjust max_part according to part_shift as it is exported * to user space so that user can decide correct minor number * if [s]he want to create more devices. * * Note that -1 is required because partition 0 is reserved * for the whole disk.
*/
max_part = (1UL << part_shift) - 1;
}
/* * There is no need to use loop_ctl_mutex here, for nobody else can * access loop_index_idr when this module is unloading (unless forced * module unloading is requested). If this is not a clean unloading, * we have no means to avoid kernel crash.
*/
idr_for_each_entry(&loop_index_idr, lo, id)
loop_remove(lo);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.