// SPDX-License-Identifier: GPL-2.0-only /* * xattr.c * * Copyright (C) 2004, 2008 Oracle. All rights reserved. * * CREDITS: * Lots of code in this file is copy from linux/fs/ext3/xattr.c. * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
struct ocfs2_xattr_info { int xi_name_index; constchar *xi_name; int xi_name_len; constvoid *xi_value;
size_t xi_value_len;
};
struct ocfs2_xattr_search { struct buffer_head *inode_bh; /* * xattr_bh point to the block buffer head which has extended attribute * when extended attribute in inode, xattr_bh is equal to inode_bh.
*/ struct buffer_head *xattr_bh; struct ocfs2_xattr_header *header; struct ocfs2_xattr_bucket *bucket; void *base; void *end; struct ocfs2_xattr_entry *here; int not_found;
};
/* Operations on struct ocfs2_xa_entry */ struct ocfs2_xa_loc; struct ocfs2_xa_loc_operations { /* * Journal functions
*/ int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc, int type); void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
/* * Return a pointer to the appropriate buffer in loc->xl_storage * at the given offset from loc->xl_header.
*/ void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
/* Can we reuse the existing entry for the new value? */ int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi);
/* How much space is needed for the new value? */ int (*xlo_check_space)(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi);
/* * Return the offset of the first name+value pair. This is * the start of our downward-filling free space.
*/ int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
/* * Remove the name+value at this location. Do whatever is * appropriate with the remaining name+value pairs.
*/ void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
/* Fill xl_entry with a new entry */ void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
/* Add name+value storage to an entry */ void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
/* * Initialize the value buf's access and bh fields for this entry. * ocfs2_xa_fill_value_buf() will handle the xv pointer.
*/ void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_value_buf *vb);
};
/* * Describes an xattr entry location. This is a memory structure * tracking the on-disk structure.
*/ struct ocfs2_xa_loc { /* This xattr belongs to this inode */ struct inode *xl_inode;
/* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */ struct ocfs2_xattr_header *xl_header;
/* Bytes from xl_header to the end of the storage */ int xl_size;
/* * The ocfs2_xattr_entry this location describes. If this is * NULL, this location describes the on-disk structure where it * would have been.
*/ struct ocfs2_xattr_entry *xl_entry;
/* * Internal housekeeping
*/
/* Buffer(s) containing this entry */ void *xl_storage;
/* Operations on the storage backing this location */ conststruct ocfs2_xa_loc_operations *xl_ops;
};
/* * Convenience functions to calculate how much space is needed for a * given name+value pair
*/ staticint namevalue_size(int name_len, uint64_t value_len)
{ if (value_len > OCFS2_XATTR_INLINE_SIZE) return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; else return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
}
/* * A bucket that has never been written to disk doesn't need to be * read. We just need the buffer_heads. Don't call this for * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes * them fully.
*/ staticint ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
u64 xb_blkno, intnew)
{ int i, rc = 0;
for (i = 0; i < bucket->bu_blocks; i++) {
bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
xb_blkno + i); if (!bucket->bu_bhs[i]) {
rc = -ENOMEM;
mlog_errno(rc); break;
}
if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i])) { if (new)
ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i]); else {
set_buffer_uptodate(bucket->bu_bhs[i]);
ocfs2_set_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i]);
}
}
}
if (rc)
ocfs2_xattr_bucket_relse(bucket); return rc;
}
/* Read the xattr bucket at xb_blkno */ staticint ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
u64 xb_blkno)
{ int rc;
/* * If the ecc fails, we return the error but otherwise * leave the filesystem running. We know any error is * local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check); if (rc) return rc;
/* * Errors after here are fatal
*/
if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { return ocfs2_error(sb, "Extended attribute block #%llu has bad signature %.*s\n",
(unsignedlonglong)bh->b_blocknr, 7,
xb->xb_signature);
}
if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) { return ocfs2_error(sb, "Extended attribute block #%llu has an invalid xb_blkno of %llu\n",
(unsignedlonglong)bh->b_blocknr,
(unsignedlonglong)le64_to_cpu(xb->xb_blkno));
}
if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) { return ocfs2_error(sb, "Extended attribute block #%llu has an invalid xb_fs_generation of #%u\n",
(unsignedlonglong)bh->b_blocknr,
le32_to_cpu(xb->xb_fs_generation));
}
static u32 ocfs2_xattr_name_hash(struct inode *inode, constchar *name, int name_len)
{ /* Get hash value of uuid from super block */
u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash; int i;
/* hash extended attribute name */ for (i = 0; i < name_len; i++) {
hash = (hash << OCFS2_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^
*name++;
}
int ocfs2_calc_security_init(struct inode *dir, struct ocfs2_security_xattr_info *si, int *want_clusters, int *xattr_credits, struct ocfs2_alloc_context **xattr_ac)
{ int ret = 0; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); int s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
si->value_len);
/* * The max space of security xattr taken inline is * 256(name) + 80(value) + 16(entry) = 352 bytes, * So reserve one metadata block for it is ok.
*/ if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
s_size > OCFS2_XATTR_FREE_IN_IBODY) {
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac); if (ret) {
mlog_errno(ret); return ret;
}
*xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
}
/* reserve clusters for xattr value which will be set in B tree*/ if (si->value_len > OCFS2_XATTR_INLINE_SIZE) { int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
si->value_len);
/* * The max space of security xattr taken inline is * 256(name) + 80(value) + 16(entry) = 352 bytes, * The max space of acl xattr taken inline is * 80(value) + 16(entry) * 2(if directory) = 192 bytes, * when blocksize = 512, may reserve one more cluster for * xattr bucket, otherwise reserve one metadata block * for them is ok. * If this is a new directory with inline data, * we choose to reserve the entire inline area for * directory contents and force an external xattr block.
*/ if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
(S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) ||
(s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) {
*want_meta = *want_meta + 1;
*xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
}
/* * reserve credits and clusters for xattrs which has large value * and have to be set outside
*/ if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) {
new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
si->value_len);
*xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
new_clusters);
*want_clusters += new_clusters;
} if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL &&
acl_len > OCFS2_XATTR_INLINE_SIZE) { /* for directory, it has DEFAULT and ACCESS two types of acls */
new_clusters = (S_ISDIR(mode) ? 2 : 1) *
ocfs2_clusters_for_bytes(dir->i_sb, acl_len);
*xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
new_clusters);
*want_clusters += new_clusters;
}
if (why != RESTART_NONE && clusters_to_add) { /* * We can only fail in case the alloc file doesn't give * up enough clusters.
*/
BUG_ON(why == RESTART_META);
credits = ocfs2_calc_extend_credits(inode->i_sb,
&vb->vb_xv->xr_list);
status = ocfs2_extend_trans(handle, credits); if (status < 0) {
status = -ENOMEM;
mlog_errno(status); break;
}
}
}
staticint ocfs2_xattr_list_entries(struct inode *inode, struct ocfs2_xattr_header *header, char *buffer, size_t buffer_size)
{
size_t result = 0; int i, type, ret; constchar *name;
for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) { struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
type = ocfs2_xattr_get_type(entry);
name = (constchar *)header +
le16_to_cpu(entry->xe_name_offset);
ret = ocfs2_xattr_list_entry(inode->i_sb,
buffer, buffer_size,
&result, type, name,
entry->xe_name_len); if (ret) return ret;
}
return result;
}
int ocfs2_has_inline_xattr_value_outside(struct inode *inode, struct ocfs2_dinode *di)
{ struct ocfs2_xattr_header *xh; int i;
for (i = 0; i < num_clusters * bpc; i++, blkno++) {
ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
&bh, NULL); if (ret) {
mlog_errno(ret); goto out;
}
ret = ocfs2_journal_access(handle,
INODE_CACHE(inode),
bh,
OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) {
mlog_errno(ret); goto out;
}
/* Give a pointer into the storage for the given offset */ staticvoid *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
{
BUG_ON(offset >= loc->xl_size); return loc->xl_ops->xlo_offset_pointer(loc, offset);
}
/* * Wipe the name+value pair and allow the storage to reclaim it. This * must be followed by either removal of the entry or a call to * ocfs2_xa_add_namevalue().
*/ staticvoid ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
{
loc->xl_ops->xlo_wipe_namevalue(loc);
}
/* * Find lowest offset to a name+value pair. This is the start of our * downward-growing free space.
*/ staticint ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
{ return loc->xl_ops->xlo_get_free_start(loc);
}
/* Can we reuse loc->xl_entry for xi? */ staticint ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi)
{ return loc->xl_ops->xlo_can_reuse(loc, xi);
}
/* How much free space is needed to set the new value */ staticint ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi)
{ return loc->xl_ops->xlo_check_space(loc, xi);
}
staticvoid ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{
loc->xl_ops->xlo_add_entry(loc, name_hash);
loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); /* * We can't leave the new entry's xe_name_offset at zero or * add_namevalue() will go nuts. We set it to the size of our * storage so that it can never be less than any other entry.
*/
loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
}
staticvoid ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi)
{ int size = namevalue_size_xi(xi); int nameval_offset; char *nameval_buf;
staticvoid ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_value_buf *vb)
{ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
/* Value bufs are for value trees */
BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
BUG_ON(namevalue_size_xe(loc->xl_entry) !=
(name_size + OCFS2_XATTR_ROOT_SIZE));
staticint ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi)
{ /* * Block storage is strict. If the sizes aren't exact, we will * remove the old one and reinsert the new.
*/ return namevalue_size_xe(loc->xl_entry) ==
namevalue_size_xi(xi);
}
staticint ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
{ struct ocfs2_xattr_header *xh = loc->xl_header; int i, count = le16_to_cpu(xh->xh_count); int offset, free_start = loc->xl_size;
for (i = 0; i < count; i++) {
offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); if (offset < free_start)
free_start = offset;
}
return free_start;
}
staticint ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi)
{ int count = le16_to_cpu(loc->xl_header->xh_count); int free_start = ocfs2_xa_get_free_start(loc); int needed_space = ocfs2_xi_entry_usage(xi);
/* * Block storage will reclaim the original entry before inserting * the new value, so we only need the difference. If the new * entry is smaller than the old one, we don't need anything.
*/ if (loc->xl_entry) { /* Don't need space if we're reusing! */ if (ocfs2_xa_can_reuse_entry(loc, xi))
needed_space = 0; else
needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
} if (needed_space < 0)
needed_space = 0; return ocfs2_xa_check_space_helper(needed_space, free_start, count);
}
/* * Block storage for xattrs keeps the name+value pairs compacted. When * we remove one, we have to shift any that preceded it towards the end.
*/ staticvoid ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
{ int i, offset; int namevalue_offset, first_namevalue_offset, namevalue_size; struct ocfs2_xattr_entry *entry = loc->xl_entry; struct ocfs2_xattr_header *xh = loc->xl_header; int count = le16_to_cpu(xh->xh_count);
/* Now tell xh->xh_entries about it */ for (i = 0; i < count; i++) {
offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); if (offset <= namevalue_offset)
le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
namevalue_size);
}
/* * Note that we don't update xh_free_start or xh_name_value_len * because they're not used in block-stored xattrs.
*/
}
staticvoid *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
{ struct ocfs2_xattr_bucket *bucket = loc->xl_storage; int block, block_offset;
/* The header is at the front of the bucket */
block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
staticint ocfs2_bucket_align_free_start(struct super_block *sb, int free_start, int size)
{ /* * We need to make sure that the name+value pair fits within * one block.
*/ if (((free_start - size) >> sb->s_blocksize_bits) !=
((free_start - 1) >> sb->s_blocksize_bits))
free_start -= free_start % sb->s_blocksize;
return free_start;
}
staticint ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi)
{ int rc; int count = le16_to_cpu(loc->xl_header->xh_count); int free_start = ocfs2_xa_get_free_start(loc); int needed_space = ocfs2_xi_entry_usage(xi); int size = namevalue_size_xi(xi); struct super_block *sb = loc->xl_inode->i_sb;
/* * Bucket storage does not reclaim name+value pairs it cannot * reuse. They live as holes until the bucket fills, and then * the bucket is defragmented. However, the bucket can reclaim * the ocfs2_xattr_entry.
*/ if (loc->xl_entry) { /* Don't need space if we're reusing! */ if (ocfs2_xa_can_reuse_entry(loc, xi))
needed_space = 0; else
needed_space -= sizeof(struct ocfs2_xattr_entry);
}
BUG_ON(needed_space < 0);
if (free_start < size) { if (needed_space) return -ENOSPC;
} else { /* * First we check if it would fit in the first place. * Below, we align the free start to a block. This may * slide us below the minimum gap. By checking unaligned * first, we avoid that error.
*/
rc = ocfs2_xa_check_space_helper(needed_space, free_start,
count); if (rc) return rc;
free_start = ocfs2_bucket_align_free_start(sb, free_start,
size);
} return ocfs2_xa_check_space_helper(needed_space, free_start, count);
}
staticvoid ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{ struct ocfs2_xattr_header *xh = loc->xl_header; int count = le16_to_cpu(xh->xh_count); int low = 0, high = count - 1, tmp; struct ocfs2_xattr_entry *tmp_xe;
/* * We keep buckets sorted by name_hash, so we need to find * our insert place.
*/ while (low <= high && count) {
tmp = (low + high) / 2;
tmp_xe = &xh->xh_entries[tmp];
/* Values are not allowed to straddle block boundaries */
BUG_ON(block_offset !=
((nameval_offset + size - 1) >> sb->s_blocksize_bits)); /* We expect the bucket to be filled in */
BUG_ON(!bucket->bu_bhs[block_offset]);
/* * The caller of ocfs2_xa_value_truncate() has already called * ocfs2_xa_journal_access on the loc. However, The truncate code * calls ocfs2_extend_trans(). This may commit the previous * transaction and open a new one. If this is a bucket, truncate * could leave only vb->vb_bh set up for journaling. Meanwhile, * the caller is expecting to dirty the entire bucket. So we must * reset the journal work. We do this even if truncate has failed, * as it could have failed after committing the extend.
*/
access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
OCFS2_JOURNAL_ACCESS_WRITE);
/* Errors in truncate take precedence */ return trunc_rc ? trunc_rc : access_rc;
}
/* * Only zero out the entry if there are more remaining. This is * important for an empty bucket, as it keeps track of the * bucket's hash value. It doesn't hurt empty block storage.
*/ if (count) {
index = ((char *)entry - (char *)&xh->xh_entries) / sizeof(struct ocfs2_xattr_entry);
memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
(count - index) * sizeof(struct ocfs2_xattr_entry));
memset(&xh->xh_entries[count], 0, sizeof(struct ocfs2_xattr_entry));
}
}
/* * If we have a problem adjusting the size of an external value during * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr * in an intermediate state. For example, the value may be partially * truncated. * * If the value tree hasn't changed, the extend/truncate went nowhere. * We have nothing to do. The caller can treat it as a straight error. * * If the value tree got partially truncated, we now have a corrupted * extended attribute. We're going to wipe its entry and leak the * clusters. Better to leak some storage than leave a corrupt entry. * * If the value tree grew, it obviously didn't grow enough for the * new entry. We're not going to try and reclaim those clusters either. * If there was already an external value there (orig_clusters != 0), * the new clusters are attached safely and we can just leave the old * value in place. If there was no external value there, we remove * the entry. * * This way, the xattr block we store in the journal will be consistent. * If the size change broke because of the journal, no changes will hit * disk anyway.
*/ staticvoid ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc, constchar *what, unsignedint orig_clusters)
{ unsignedint new_clusters = ocfs2_xa_value_clusters(loc); char *nameval_buf = ocfs2_xa_offset_pointer(loc,
le16_to_cpu(loc->xl_entry->xe_name_offset));
if (new_clusters < orig_clusters) {
mlog(ML_ERROR, "Partial truncate while %s xattr %.*s. Leaking " "%u clusters and removing the entry\n",
what, loc->xl_entry->xe_name_len, nameval_buf,
orig_clusters - new_clusters);
ocfs2_xa_remove_entry(loc);
} elseif (!orig_clusters) {
mlog(ML_ERROR, "Unable to allocate an external value for xattr " "%.*s safely. Leaking %u clusters and removing the " "entry\n",
loc->xl_entry->xe_name_len, nameval_buf,
new_clusters - orig_clusters);
ocfs2_xa_remove_entry(loc);
} elseif (new_clusters > orig_clusters)
mlog(ML_ERROR, "Unable to grow xattr %.*s safely. %u new clusters " "have been added, but the value will not be " "modified\n",
loc->xl_entry->xe_name_len, nameval_buf,
new_clusters - orig_clusters);
}
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt); if (rc) {
mlog_errno(rc); /* * Since this is remove, we can return 0 if * ocfs2_xa_cleanup_value_truncate() is going to * wipe the entry anyway. So we check the * cluster count as well.
*/ if (orig_clusters != ocfs2_xa_value_clusters(loc))
rc = 0;
ocfs2_xa_cleanup_value_truncate(loc, "removing",
orig_clusters); goto out;
}
}
/* * Take an existing entry and make it ready for the new value. This * won't allocate space, but it may free space. It should be ready for * ocfs2_xa_prepare_entry() to finish the work.
*/ staticint ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_set_ctxt *ctxt)
{ int rc = 0; int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len); unsignedint orig_clusters; char *nameval_buf; int xe_local = ocfs2_xattr_is_local(loc->xl_entry); int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
/* * Prepares loc->xl_entry to receive the new xattr. This includes * properly setting up the name+value pair region. If loc->xl_entry * already exists, it will take care of modifying it appropriately. * * Note that this modifies the data. You did journal_access already, * right?
*/ staticint ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi,
u32 name_hash, struct ocfs2_xattr_set_ctxt *ctxt)
{ int rc = 0; unsignedint orig_clusters;
__le64 orig_value_size = 0;
rc = ocfs2_xa_check_space(loc, xi); if (rc) goto out;
if (loc->xl_entry) { if (ocfs2_xa_can_reuse_entry(loc, xi)) {
orig_value_size = loc->xl_entry->xe_value_size;
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); if (rc) goto out; goto alloc_value;
}
/* * If we get here, we have a blank entry. Fill it. We grow our * name+value pair back from the end.
*/
ocfs2_xa_add_namevalue(loc, xi); if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
ocfs2_xa_install_value_root(loc);
alloc_value: if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt); if (rc < 0) {
ctxt->set_abort = 1;
ocfs2_xa_cleanup_value_truncate(loc, "growing",
orig_clusters); /* * If we were growing an existing value, * ocfs2_xa_cleanup_value_truncate() won't remove * the entry. We need to restore the original value * size.
*/ if (loc->xl_entry) {
BUG_ON(!orig_value_size);
loc->xl_entry->xe_value_size = orig_value_size;
}
mlog_errno(rc);
}
}
out: return rc;
}
/* * Store the value portion of the name+value pair. This will skip * values that are stored externally. Their tree roots were set up * by ocfs2_xa_prepare_entry().
*/ staticint ocfs2_xa_store_value(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_set_ctxt *ctxt)
{ int rc = 0; int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len); char *nameval_buf; struct ocfs2_xattr_value_buf vb;
ret = ocfs2_xa_journal_access(ctxt->handle, loc,
OCFS2_JOURNAL_ACCESS_WRITE); if (ret) {
mlog_errno(ret); goto out;
}
/* * From here on out, everything is going to modify the buffer a * little. Errors are going to leave the xattr header in a * sane state. Thus, even with errors we dirty the sucker.
*/
/* Don't worry, we are never called with !xi_value and !xl_entry */ if (!xi->xi_value) {
ret = ocfs2_xa_remove(loc, ctxt); goto out_dirty;
}
ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt); if (ret) { if (ret != -ENOSPC)
mlog_errno(ret); goto out_dirty;
}
ret = ocfs2_xa_store_value(loc, xi, ctxt); if (ret)
mlog_errno(ret);
/* * In xattr remove, if it is stored outside and refcounted, we may have * the chance to split the refcount tree. So need the allocators.
*/ staticint ocfs2_lock_xattr_remove_allocators(struct inode *inode, struct ocfs2_xattr_value_root *xv, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_alloc_context **meta_ac, int *ref_credits)
{ int ret, meta_add = 0;
u32 p_cluster, num_clusters; unsignedint ext_flags;
*ref_credits = 0;
ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
&num_clusters,
&xv->xr_list,
&ext_flags); if (ret) {
mlog_errno(ret); goto out;
}
if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) goto out;
ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci,
ref_root_bh, xv,
&meta_add, ref_credits); if (ret) {
mlog_errno(ret); goto out;
}
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
meta_add, meta_ac); if (ret)
mlog_errno(ret);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.