/* * Protects mutations of configfs_dirent linkage together with proper i_mutex * Also protects mutations of symlinks linkage to target configfs_dirent * Mutators of configfs_dirent linkage must *both* have the proper inode locked * and configfs_dirent_lock locked, in that order. * This allows one to safely traverse configfs_dirent trees and symlinks without * having to lock inodes. * * Protects setting of CONFIGFS_USET_DROPPING: checking the flag * unlocked is not reliable unless in detach_groups() called from * rmdir()/unregister() and from configfs_attach_group()
*/
DEFINE_SPINLOCK(configfs_dirent_lock);
/* * All of link_obj/unlink_obj/link_group/unlink_group require that * subsys->su_mutex is held. * But parent configfs_subsystem is NULL when config_item is root. * Use this mutex when config_item is root.
*/ static DEFINE_MUTEX(configfs_subsystem_mutex);
if (sd) { /* Coordinate with configfs_readdir */
spin_lock(&configfs_dirent_lock); /* * Set sd->s_dentry to null only when this dentry is the one * that is going to be killed. Otherwise configfs_d_iput may * run just after configfs_lookup and set sd->s_dentry to * NULL even it's still in use.
*/ if (sd->s_dentry == dentry)
sd->s_dentry = NULL;
/* * Helpers to make lockdep happy with our recursive locking of default groups' * inodes (see configfs_attach_group() and configfs_detach_group()). * We put default groups i_mutexes in separate classes according to their depth * from the youngest non-default group ancestor. * * For a non-default group A having default groups A/B, A/C, and A/C/D, default * groups A/B and A/C will have their inode's mutex in class * default_group_class[0], and default group A/C/D will be in * default_group_class[1]. * * The lock classes are declared and assigned in inode.c, according to the * s_depth value. * The s_depth value is initialized to -1, adjusted to >= 0 when attaching * default groups, and reset to -1 when all default groups are attached. During * attachment, if configfs_create() sees s_depth > 0, the lock class of the new * inode's mutex is set to default_group_class[s_depth - 1].
*/
if (parent_depth >= 0)
sd->s_depth = parent_depth + 1;
}
staticvoid
configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd)
{ /* * item's i_mutex class is already setup, so s_depth is now only * used to set new sub-directories s_depth, which is always done * with item's i_mutex locked.
*/ /* * sd->s_depth == -1 iff we are a non default group. * else (we are a default group) sd->s_depth > 0 (see * create_dir()).
*/ if (sd->s_depth == -1) /* * We are a non default group and we are going to create * default groups.
*/
sd->s_depth = 0;
}
staticvoid
configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
{ /* We will not create default groups anymore. */
sd->s_depth = -1;
}
/* * Allocates a new configfs_dirent and links it to the parent configfs_dirent
*/ staticstruct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd, void *element, int type, struct configfs_fragment *frag)
{ struct configfs_dirent * sd;
sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL); if (!sd) return ERR_PTR(-ENOMEM);
/* * configfs_lookup scans only for unpinned items. s_children is * partitioned so that configfs_lookup can bail out early. * CONFIGFS_PINNED and CONFIGFS_NOT_PINNED are not symmetrical. readdir * cursors still need to be inserted at the front of the list.
*/ if (sd->s_type & CONFIGFS_PINNED)
list_add_tail(&sd->s_sibling, &parent_sd->s_children); else
list_add(&sd->s_sibling, &parent_sd->s_children);
spin_unlock(&configfs_dirent_lock);
return sd;
}
/* * * Return -EEXIST if there is already a configfs element with the same * name for the same parent. * * called with parent inode's i_mutex held
*/ staticint configfs_dirent_exists(struct dentry *dentry)
{ struct configfs_dirent *parent_sd = dentry->d_parent->d_fsdata; constunsignedchar *new = dentry->d_name.name; struct configfs_dirent *sd;
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { if (sd->s_element) { constunsignedchar *existing = configfs_get_name(sd); if (strcmp(existing, new)) continue; else return -EEXIST;
}
}
if (!sd) return;
spin_lock(&configfs_dirent_lock);
list_del_init(&sd->s_sibling);
spin_unlock(&configfs_dirent_lock);
configfs_put(sd);
}
/** * configfs_create_dir - create a directory for an config_item. * @item: config_itemwe're creating directory for. * @dentry: config_item's dentry. * @frag: config_item's fragment. * * Note: user-created entries won't be allowed under this new directory * until it is validated by configfs_dir_set_ready()
*/
/* * Allow userspace to create new entries under a new directory created with * configfs_create_dir(), and under all of its chidlren directories recursively. * @sd configfs_dirent of the new directory to validate * * Caller must hold configfs_dirent_lock.
*/ staticvoid configfs_dir_set_ready(struct configfs_dirent *sd)
{ struct configfs_dirent *child_sd;
/* * Check that a directory does not belong to a directory hierarchy being * attached and not validated yet. * @sd configfs_dirent of the directory to check * * @return non-zero iff the directory was validated * * Note: takes configfs_dirent_lock, so the result may change from false to true * in two consecutive calls, but never from true to false.
*/ int configfs_dirent_is_ready(struct configfs_dirent *sd)
{ int ret;
spin_lock(&configfs_dirent_lock);
ret = !(sd->s_type & CONFIGFS_USET_CREATING);
spin_unlock(&configfs_dirent_lock);
if (d_really_is_positive(d))
simple_rmdir(d_inode(parent),d);
pr_debug(" o %pd removing done (%d)\n", d, d_count(d));
dput(parent);
}
/** * configfs_remove_dir - remove an config_item's directory. * @item: config_item we're removing. * * The only thing special about this is that we remove any files in * the directory before we remove the directory, and we've inlined * what used to be configfs_rmdir() below, instead of calling separately. * * Caller holds the mutex of the item's inode
*/
if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG);
/* * Fake invisibility if dir belongs to a group/default groups hierarchy * being attached * * This forbids userspace to read/write attributes of items which may * not complete their initialization, since the dentries of the * attributes won't be instantiated.
*/ if (!configfs_dirent_is_ready(parent_sd)) return ERR_PTR(-ENOENT);
/* * s_children is partitioned, see configfs_new_dirent. The first * pinned item indicates we can stop scanning.
*/ if (sd->s_type & CONFIGFS_PINNED) break;
/* * Note: CONFIGFS_PINNED and CONFIGFS_NOT_PINNED are asymmetric. * there may be a readdir cursor in this list
*/ if ((sd->s_type & CONFIGFS_NOT_PINNED) &&
!strcmp(configfs_get_name(sd), dentry->d_name.name)) { struct configfs_attribute *attr = sd->s_element;
umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
/* * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are * attributes and are removed by rmdir(). We recurse, setting * CONFIGFS_USET_DROPPING on all children that are candidates for * default detach. * If there is an error, the caller will reset the flags via * configfs_detach_rollback().
*/ staticint configfs_detach_prep(struct dentry *dentry, struct dentry **wait)
{ struct configfs_dirent *parent_sd = dentry->d_fsdata; struct configfs_dirent *sd; int ret;
/* Mark that we're trying to drop the group */
parent_sd->s_type |= CONFIGFS_USET_DROPPING;
ret = -EBUSY; if (parent_sd->s_links) goto out;
ret = 0;
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { if (!sd->s_element ||
(sd->s_type & CONFIGFS_NOT_PINNED)) continue; if (sd->s_type & CONFIGFS_USET_DEFAULT) { /* Abort if racing with mkdir() */ if (sd->s_type & CONFIGFS_USET_IN_MKDIR) { if (wait)
*wait= dget(sd->s_dentry); return -EAGAIN;
}
/* * Yup, recursive. If there's a problem, blame * deep nesting of default_groups
*/
ret = configfs_detach_prep(sd->s_dentry, wait); if (!ret) continue;
} else
ret = -ENOTEMPTY;
break;
}
out: return ret;
}
/* * Walk the tree, resetting CONFIGFS_USET_DROPPING wherever it was * set.
*/ staticvoid configfs_detach_rollback(struct dentry *dentry)
{ struct configfs_dirent *parent_sd = dentry->d_fsdata; struct configfs_dirent *sd;
parent_sd->s_type &= ~CONFIGFS_USET_DROPPING;
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) if (sd->s_type & CONFIGFS_USET_DEFAULT)
configfs_detach_rollback(sd->s_dentry);
}
/** * Drop reference from dget() on entrance.
*/
dput(dentry);
}
/* * This fakes mkdir(2) on a default_groups[] entry. It * creates a dentry, attachs it, and then does fixup * on the sd->s_type. * * We could, perhaps, tweak our parent's ->mkdir for a minute and * try using vfs_mkdir. Just a thought.
*/ staticint create_default_group(struct config_group *parent_group, struct config_group *group, struct configfs_fragment *frag)
{ int ret; struct configfs_dirent *sd; /* We trust the caller holds a reference to parent */ struct dentry *child, *parent = parent_group->cg_item.ci_dentry;
if (!group->cg_item.ci_name)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
ret = -ENOMEM;
child = d_alloc_name(parent, group->cg_item.ci_name); if (child) {
d_add(child, NULL);
group = item->ci_group; if (group) {
list_del_init(&item->ci_entry);
item->ci_group = NULL;
item->ci_parent = NULL;
/* Drop the reference for ci_entry */
config_item_put(item);
/* Drop the reference for ci_parent */
config_group_put(group);
}
}
staticvoid link_obj(struct config_item *parent_item, struct config_item *item)
{ /* * Parent seems redundant with group, but it makes certain * traversals much nicer.
*/
item->ci_parent = parent_item;
/* * We hold a reference on the parent for the child's ci_parent * link.
*/
item->ci_group = config_group_get(to_config_group(parent_item));
list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
/* * We hold a reference on the child for ci_entry on the parent's * cg_children
*/
config_item_get(item);
}
/* * The goal is that configfs_attach_item() (and * configfs_attach_group()) can be called from either the VFS or this * module. That is, they assume that the items have been created, * the dentry allocated, and the dcache is all ready to go. * * If they fail, they must clean up after themselves as if they * had never been called. The caller (VFS or local function) will * handle cleaning up the dcache bits. * * configfs_detach_group() and configfs_detach_item() behave similarly on * the way out. They assume that the proper semaphores are held, they * clean up the configfs items, and they expect their callers will * handle the dcache bits.
*/ staticint configfs_attach_item(struct config_item *parent_item, struct config_item *item, struct dentry *dentry, struct configfs_fragment *frag)
{ int ret;
ret = configfs_create_dir(item, dentry, frag); if (!ret) {
ret = populate_attrs(item); if (ret) { /* * We are going to remove an inode and its dentry but * the VFS may already have hit and used them. Thus, * we must lock them as rmdir() would.
*/
inode_lock(d_inode(dentry));
configfs_remove_dir(item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
inode_unlock(d_inode(dentry));
d_delete(dentry);
}
}
return ret;
}
/* Caller holds the mutex of the item's inode */ staticvoid configfs_detach_item(struct config_item *item)
{
detach_attrs(item);
configfs_remove_dir(item);
}
ret = configfs_attach_item(parent_item, item, dentry, frag); if (!ret) {
sd = dentry->d_fsdata;
sd->s_type |= CONFIGFS_USET_DIR;
/* * FYI, we're faking mkdir in populate_groups() * We must lock the group's inode to avoid races with the VFS * which can already hit the inode and try to add/remove entries * under it. * * We must also lock the inode to remove it safely in case of * error, as rmdir() would.
*/
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
configfs_adjust_dir_dirent_depth_before_populate(sd);
ret = populate_groups(to_config_group(item), frag); if (ret) {
configfs_detach_item(item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
}
configfs_adjust_dir_dirent_depth_after_populate(sd);
inode_unlock(d_inode(dentry)); if (ret)
d_delete(dentry);
}
return ret;
}
/* Caller holds the mutex of the group's inode */ staticvoid configfs_detach_group(struct config_item *item)
{
detach_groups(to_config_group(item));
configfs_detach_item(item);
}
/* * After the item has been detached from the filesystem view, we are * ready to tear it out of the hierarchy. Notify the client before * we do that so they can perform any cleanup that requires * navigating the hierarchy. A client does not need to provide this * callback. The subsystem semaphore MUST be held by the caller, and * references must be valid for both items. It also assumes the * caller has validated ci_type.
*/ staticvoid client_disconnect_notify(struct config_item *parent_item, struct config_item *item)
{ conststruct config_item_type *type;
type = parent_item->ci_type;
BUG_ON(!type);
if (type->ct_group_ops && type->ct_group_ops->disconnect_notify)
type->ct_group_ops->disconnect_notify(to_config_group(parent_item),
item);
}
/* * Drop the initial reference from make_item()/make_group() * This function assumes that reference is held on item * and that item holds a valid reference to the parent. Also, it * assumes the caller has validated ci_type.
*/ staticvoid client_drop_item(struct config_item *parent_item, struct config_item *item)
{ conststruct config_item_type *type;
type = parent_item->ci_type;
BUG_ON(!type);
/* * If ->drop_item() exists, it is responsible for the * config_item_put().
*/ if (type->ct_group_ops && type->ct_group_ops->drop_item)
type->ct_group_ops->drop_item(to_config_group(parent_item),
item); else
config_item_put(item);
}
staticint configfs_dump(struct configfs_dirent *sd, int level)
{ struct configfs_dirent *child_sd; int ret = 0;
configfs_dump_one(sd, level);
if (!(sd->s_type & (CONFIGFS_DIR|CONFIGFS_ROOT))) return 0;
list_for_each_entry(child_sd, &sd->s_children, s_sibling) {
ret = configfs_dump(child_sd, level + 2); if (ret) break;
}
return ret;
} #endif
/* * configfs_depend_item() and configfs_undepend_item() * * WARNING: Do not call these from a configfs callback! * * This describes these functions and their helpers. * * Allow another kernel system to depend on a config_item. If this * happens, the item cannot go away until the dependent can live without * it. The idea is to give client modules as simple an interface as * possible. When a system asks them to depend on an item, they just * call configfs_depend_item(). If the item is live and the client * driver is in good shape, we'll happily do the work for them. * * Why is the locking complex? Because configfs uses the VFS to handle * all locking, but this function is called outside the normal * VFS->configfs path. So it must take VFS locks to prevent the * VFS->configfs stuff (configfs_mkdir(), configfs_rmdir(), etc). This is * why you can't call these functions underneath configfs callbacks. * * Note, btw, that this can be called at *any* time, even when a configfs * subsystem isn't registered, or when configfs is loading or unloading. * Just like configfs_register_subsystem(). So we take the same * precautions. We pin the filesystem. We lock configfs_dirent_lock. * If we can find the target item in the * configfs tree, it must be part of the subsystem tree as well, so we * do not need the subsystem semaphore. Holding configfs_dirent_lock helps * locking out mkdir() and rmdir(), who might be racing us.
*/
/* * configfs_depend_prep() * * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are * attributes. This is similar but not the same to configfs_detach_prep(). * Note that configfs_detach_prep() expects the parent to be locked when it * is called, but we lock the parent *inside* configfs_depend_prep(). We * do that so we can unlock it if we find nothing. * * Here we do a depth-first search of the dentry hierarchy looking for * our object. * We deliberately ignore items tagged as dropping since they are virtually * dead, as well as items in the middle of attachment since they virtually * do not exist yet. This completes the locking out of racing mkdir() and * rmdir(). * Note: subdirectories in the middle of attachment start with s_type = * CONFIGFS_DIR|CONFIGFS_USET_CREATING set by create_dir(). When * CONFIGFS_USET_CREATING is set, we ignore the item. The actual set of * s_type is in configfs_new_dirent(), which has configfs_dirent_lock. * * If the target is not found, -ENOENT is bubbled up. * * This adds a requirement that all config_items be unique! * * This is recursive. There isn't * much on the stack, though, so folks that need this function - be careful * about your stack! Patches will be accepted to make it iterative.
*/ staticint configfs_depend_prep(struct dentry *origin, struct config_item *target)
{ struct configfs_dirent *child_sd, *sd; int ret = 0;
spin_lock(&configfs_dirent_lock); /* Scan the tree, return 0 if found */
ret = configfs_depend_prep(subsys_dentry, target); if (ret) goto out_unlock_dirent_lock;
/* * We are sure that the item is not about to be removed by rmdir(), and * not in the middle of attachment by mkdir().
*/
p = target->ci_dentry->d_fsdata;
p->s_dependent_count += 1;
/* * Pin the configfs filesystem. This means we can safely access * the root of the configfs filesystem.
*/
root = configfs_pin_fs(); if (IS_ERR(root)) return PTR_ERR(root);
/* * Next, lock the root directory. We're going to check that the * subsystem is really registered, and so we need to lock out * configfs_[un]register_subsystem().
*/
inode_lock(d_inode(root));
subsys_sd = configfs_find_subsys_dentry(root->d_fsdata, s_item); if (!subsys_sd) {
ret = -ENOENT; goto out_unlock_fs;
}
/* Ok, now we can trust subsys/s_item */
ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
out_unlock_fs:
inode_unlock(d_inode(root));
/* * If we succeeded, the fs is pinned via other methods. If not, * we're done with it anyway. So release_fs() is always right.
*/
configfs_release_fs();
/* * Release the dependent linkage. This is much simpler than * configfs_depend_item() because we know that the client driver is * pinned, thus the subsystem is pinned, and therefore configfs is pinned.
*/ void configfs_undepend_item(struct config_item *target)
{ struct configfs_dirent *sd;
/* * Since we can trust everything is pinned, we just need * configfs_dirent_lock.
*/
spin_lock(&configfs_dirent_lock);
/* * After this unlock, we cannot trust the item to stay alive! * DO NOT REFERENCE item after this unlock.
*/
spin_unlock(&configfs_dirent_lock);
}
EXPORT_SYMBOL(configfs_undepend_item);
/* * caller_subsys is a caller's subsystem not target's. This is used to * determine if we should lock root and check subsys or not. When we are * in the same subsystem as our target there is no need to do locking as * we know that subsys is valid and is not unregistered during this function * as we are called from callback of one of his children and VFS holds a lock * on some inode. Otherwise we have to lock our root to ensure that target's * subsystem it is not unregistered during this function.
*/ int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys, struct config_item *target)
{ struct configfs_subsystem *target_subsys; struct config_group *root, *parent; struct configfs_dirent *subsys_sd; int ret = -ENOENT;
/* Disallow this function for configfs root */ if (configfs_is_root(target)) return -EINVAL;
parent = target->ci_group; /* * This may happen when someone is trying to depend root * directory of some subsystem
*/ if (configfs_is_root(&parent->cg_item)) {
target_subsys = to_configfs_subsystem(to_config_group(target));
root = parent;
} else {
target_subsys = parent->cg_subsys; /* Find a cofnigfs root as we may need it for locking */ for (root = parent; !configfs_is_root(&root->cg_item);
root = root->cg_item.ci_group)
;
}
if (target_subsys != caller_subsys) { /* * We are in other configfs subsystem, so we have to do * additional locking to prevent other subsystem from being * unregistered
*/
inode_lock(d_inode(root->cg_item.ci_dentry));
/* * As we are trying to depend item from other subsystem * we have to check if this subsystem is still registered
*/
subsys_sd = configfs_find_subsys_dentry(
root->cg_item.ci_dentry->d_fsdata,
&target_subsys->su_group.cg_item); if (!subsys_sd) goto out_root_unlock;
} else {
subsys_sd = target_subsys->su_group.cg_item.ci_dentry->d_fsdata;
}
/* Now we can execute core of depend item */
ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
if (target_subsys != caller_subsys)
out_root_unlock: /* * We were called from subsystem other than our target so we * took some locks so now it's time to release them
*/
inode_unlock(d_inode(root->cg_item.ci_dentry));
/* * Fake invisibility if dir belongs to a group/default groups hierarchy * being attached
*/ if (!configfs_dirent_is_ready(sd)) {
ret = -ENOENT; goto out;
}
if (!(sd->s_type & CONFIGFS_USET_DIR)) {
ret = -EPERM; goto out;
}
frag = new_fragment(); if (!frag) {
ret = -ENOMEM; goto out;
}
/* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
subsys = to_config_group(parent_item)->cg_subsys;
BUG_ON(!subsys);
if (!type || !type->ct_group_ops ||
(!type->ct_group_ops->make_group &&
!type->ct_group_ops->make_item)) {
ret = -EPERM; /* Lack-of-mkdir returns -EPERM */ goto out_put;
}
/* * The subsystem may belong to a different module than the item * being created. We don't want to safely pin the new item but * fail to pin the subsystem it sits under.
*/ if (!subsys->su_group.cg_item.ci_type) {
ret = -EINVAL; goto out_put;
}
subsys_owner = subsys->su_group.cg_item.ci_type->ct_owner; if (!try_module_get(subsys_owner)) {
ret = -EINVAL; goto out_put;
}
name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); if (!name) {
ret = -ENOMEM; goto out_subsys_put;
}
mutex_lock(&subsys->su_mutex); if (type->ct_group_ops->make_group) {
group = type->ct_group_ops->make_group(to_config_group(parent_item), name); if (!group)
group = ERR_PTR(-ENOMEM); if (!IS_ERR(group)) {
link_group(to_config_group(parent_item), group);
item = &group->cg_item;
} else
ret = PTR_ERR(group);
} else {
item = type->ct_group_ops->make_item(to_config_group(parent_item), name); if (!item)
item = ERR_PTR(-ENOMEM); if (!IS_ERR(item))
link_obj(parent_item, item); else
ret = PTR_ERR(item);
}
mutex_unlock(&subsys->su_mutex);
kfree(name); if (ret) { /* * If ret != 0, then link_obj() was never called. * There are no extra references to clean up.
*/ goto out_subsys_put;
}
/* * link_obj() has been called (via link_group() for groups). * From here on out, errors must clean that up.
*/
type = item->ci_type; if (!type) {
ret = -EINVAL; goto out_unlink;
}
new_item_owner = type->ct_owner; if (!try_module_get(new_item_owner)) {
ret = -EINVAL; goto out_unlink;
}
/* * I hate doing it this way, but if there is * an error, module_put() probably should * happen after any cleanup.
*/
module_got = 1;
/* * Make racing rmdir() fail if it did not tag parent with * CONFIGFS_USET_DROPPING * Note: if CONFIGFS_USET_DROPPING is already set, attach_group() will * fail and let rmdir() terminate correctly
*/
spin_lock(&configfs_dirent_lock); /* This will make configfs_detach_prep() fail */
sd->s_type |= CONFIGFS_USET_IN_MKDIR;
spin_unlock(&configfs_dirent_lock);
if (group)
ret = configfs_attach_group(parent_item, item, dentry, frag); else
ret = configfs_attach_item(parent_item, item, dentry, frag);
spin_lock(&configfs_dirent_lock);
sd->s_type &= ~CONFIGFS_USET_IN_MKDIR; if (!ret)
configfs_dir_set_ready(dentry->d_fsdata);
spin_unlock(&configfs_dirent_lock);
out_unlink: if (ret) { /* Tear down everything we built up */
mutex_lock(&subsys->su_mutex);
client_disconnect_notify(parent_item, item); if (group)
unlink_group(group); else
unlink_obj(item);
client_drop_item(parent_item, item);
mutex_unlock(&subsys->su_mutex);
if (module_got)
module_put(new_item_owner);
}
out_subsys_put: if (ret)
module_put(subsys_owner);
out_put: /* * link_obj()/link_group() took a reference from child->parent, * so the parent is safely pinned. We can drop our working * reference.
*/
config_item_put(parent_item);
put_fragment(frag);
sd = dentry->d_fsdata; if (sd->s_type & CONFIGFS_USET_DEFAULT) return -EPERM;
/* Get a working ref until we have the child */
parent_item = configfs_get_config_item(dentry->d_parent);
subsys = to_config_group(parent_item)->cg_subsys;
BUG_ON(!subsys);
if (!parent_item->ci_type) {
config_item_put(parent_item); return -EINVAL;
}
/* configfs_mkdir() shouldn't have allowed this */
BUG_ON(!subsys->su_group.cg_item.ci_type);
subsys_owner = subsys->su_group.cg_item.ci_type->ct_owner;
/* * Ensure that no racing symlink() will make detach_prep() fail while * the new link is temporarily attached
*/ do { struct dentry *wait;
mutex_lock(&configfs_symlink_mutex);
spin_lock(&configfs_dirent_lock); /* * Here's where we check for dependents. We're protected by * configfs_dirent_lock. * If no dependent, atomically tag the item as dropping.
*/
ret = sd->s_dependent_count ? -EBUSY : 0; if (!ret) {
ret = configfs_detach_prep(dentry, &wait); if (ret)
configfs_detach_rollback(dentry);
}
spin_unlock(&configfs_dirent_lock);
mutex_unlock(&configfs_symlink_mutex);
if (ret) { if (ret != -EAGAIN) {
config_item_put(parent_item); return ret;
}
/* Wait until the racing operation terminates */
inode_lock(d_inode(wait));
inode_unlock(d_inode(wait));
dput(wait);
}
} while (ret == -EAGAIN);
inode_lock(d_inode(dentry)); /* * Fake invisibility if dir belongs to a group/default groups hierarchy * being attached
*/
err = -ENOENT; if (configfs_dirent_is_ready(parent_sd)) {
file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL); if (IS_ERR(file->private_data))
err = PTR_ERR(file->private_data); else
err = 0;
}
inode_unlock(d_inode(dentry));
if (!dir_emit_dots(file, ctx)) return 0;
spin_lock(&configfs_dirent_lock); if (ctx->pos == 2)
list_move(q, &parent_sd->s_children); for (p = q->next; p != &parent_sd->s_children; p = p->next) { struct configfs_dirent *next; constchar *name; int len; struct inode *inode = NULL;
next = list_entry(p, struct configfs_dirent, s_sibling); if (!next->s_element) continue;
/* * We'll have a dentry and an inode for * PINNED items and for open attribute * files. We lock here to prevent a race * with configfs_d_iput() clearing * s_dentry before calling iput(). * * Why do we go to the trouble? If * someone has an attribute file open, * the inode number should match until * they close it. Beyond that, we don't * care.
*/
dentry = next->s_dentry; if (dentry)
inode = d_inode(dentry); if (inode)
ino = inode->i_ino;
spin_unlock(&configfs_dirent_lock); if (!inode)
ino = iunique(sb, 2);
name = configfs_get_name(next);
len = strlen(name);
if (!dir_emit(ctx, name, len, ino,
fs_umode_to_dtype(next->s_mode))) return 0;
/** * configfs_register_group - creates a parent-child relation between two groups * @parent_group: parent group * @group: child group * * link groups, creates dentry for the child and attaches it to the * parent dentry. * * Return: 0 on success, negative errno code on error
*/ int configfs_register_group(struct config_group *parent_group, struct config_group *group)
{ struct configfs_subsystem *subsys = parent_group->cg_subsys; struct dentry *parent; struct configfs_fragment *frag; int ret;
/** * configfs_register_default_group() - allocates and registers a child group * @parent_group: parent group * @name: child group name * @item_type: child item type description * * boilerplate to allocate and register a child group with its parent. We need * kzalloc'ed memory because child's default_group is initially empty. * * Return: allocated config group or ERR_PTR() on error
*/ struct config_group *
configfs_register_default_group(struct config_group *parent_group, constchar *name, conststruct config_item_type *item_type)
{ int ret; struct config_group *group;
group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) return ERR_PTR(-ENOMEM);
config_group_init_type_name(group, name, item_type);
ret = configfs_register_group(parent_group, group); if (ret) {
kfree(group); return ERR_PTR(ret);
} return group;
}
EXPORT_SYMBOL(configfs_register_default_group);
/** * configfs_unregister_default_group() - unregisters and frees a child group * @group: the group to act on
*/ void configfs_unregister_default_group(struct config_group *group)
{
configfs_unregister_group(group);
kfree(group);
}
EXPORT_SYMBOL(configfs_unregister_default_group);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.18Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.