/* Is the entry in the block free? */ int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
{ int i;
for (i = 0; i < info->dqi_entry_size; i++) if (disk[i]) return 0; return 1;
}
EXPORT_SYMBOL(qtree_entry_unused);
/* Find space for dquot */ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, int *err)
{
uint blk, i; struct qt_disk_dqdbheader *dh; char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); char *ddquot;
*err = 0; if (!buf) {
*err = -ENOMEM; return 0;
}
dh = (struct qt_disk_dqdbheader *)buf; if (info->dqi_free_entry) {
blk = info->dqi_free_entry;
*err = read_blk(info, blk, buf); if (*err < 0) goto out_buf;
*err = check_dquot_block_header(info, dh); if (*err) goto out_buf;
} else {
blk = get_free_dqblk(info); if ((int)blk < 0) {
*err = blk;
kfree(buf); return 0;
}
memset(buf, 0, info->dqi_usable_bs); /* This is enough as the block is already zeroed and the entry
* list is empty... */
info->dqi_free_entry = blk;
mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
} /* Block will be full? */ if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
*err = remove_free_dqentry(info, buf, blk); if (*err < 0) {
quota_error(dquot->dq_sb, "Can't remove block (%u) " "from entry free list", blk); goto out_buf;
}
}
le16_add_cpu(&dh->dqdh_entries, 1); /* Find free structure in block */
ddquot = buf + sizeof(struct qt_disk_dqdbheader); for (i = 0; i < qtree_dqstr_in_blk(info); i++) { if (qtree_entry_unused(info, ddquot)) break;
ddquot += info->dqi_entry_size;
} #ifdef __QUOTA_QT_PARANOIA if (i == qtree_dqstr_in_blk(info)) {
quota_error(dquot->dq_sb, "Data block full but it shouldn't");
*err = -EIO; goto out_buf;
} #endif
*err = write_blk(info, blk, buf); if (*err < 0) {
quota_error(dquot->dq_sb, "Can't write quota data block %u",
blk); goto out_buf;
}
dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) +
i * info->dqi_entry_size;
kfree(buf); return blk;
out_buf:
kfree(buf); return 0;
}
/* Insert reference to structure into the trie */ staticint do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *blks, int depth)
{ char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); int ret = 0, newson = 0, newact = 0;
__le32 *ref;
uint newblk; int i;
if (!buf) return -ENOMEM; if (!blks[depth]) {
ret = get_free_dqblk(info); if (ret < 0) goto out_buf; for (i = 0; i < depth; i++) if (ret == blks[i]) {
quota_error(dquot->dq_sb, "Free block already used in tree: block %u",
ret);
ret = -EIO; goto out_buf;
}
blks[depth] = ret;
memset(buf, 0, info->dqi_usable_bs);
newact = 1;
} else {
ret = read_blk(info, blks[depth], buf); if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read tree quota " "block %u", blks[depth]); goto out_buf;
}
}
ref = (__le32 *)buf;
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
ret = do_check_range(dquot->dq_sb, "block", newblk, 0,
info->dqi_blocks - 1); if (ret) goto out_buf; if (!newblk) {
newson = 1;
} else { for (i = 0; i <= depth; i++) if (newblk == blks[i]) {
quota_error(dquot->dq_sb, "Cycle in quota tree detected: block %u index %u",
blks[depth],
get_index(info, dquot->dq_id, depth));
ret = -EIO; goto out_buf;
}
}
blks[depth + 1] = newblk; if (depth == info->dqi_qtree_depth - 1) { #ifdef __QUOTA_QT_PARANOIA if (newblk) {
quota_error(dquot->dq_sb, "Inserting already present " "quota entry (block %u)",
le32_to_cpu(ref[get_index(info,
dquot->dq_id, depth)]));
ret = -EIO; goto out_buf;
} #endif
blks[depth + 1] = find_free_dqentry(info, dquot, &ret);
} else {
ret = do_insert_tree(info, dquot, blks, depth + 1);
} if (newson && ret >= 0) {
ref[get_index(info, dquot->dq_id, depth)] =
cpu_to_le32(blks[depth + 1]);
ret = write_blk(info, blks[depth], buf);
} elseif (newact && ret < 0) {
put_free_dqblk(info, buf, blks[depth]);
}
out_buf:
kfree(buf); return ret;
}
/* Wrapper for inserting quota structure into tree */ staticinlineint dq_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
#ifdef __QUOTA_QT_PARANOIA if (info->dqi_blocks <= QT_TREEOFF) {
quota_error(dquot->dq_sb, "Quota tree root isn't allocated!"); return -EIO;
} #endif if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
quota_error(dquot->dq_sb, "Quota tree depth too big!"); return -EIO;
} return do_insert_tree(info, dquot, blks, 0);
}
/* * We don't have to be afraid of deadlocks as we never have quotas on quota * files...
*/ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{ int type = dquot->dq_id.type; struct super_block *sb = dquot->dq_sb;
ssize_t ret; char *ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL);
if (!ddquot) return -ENOMEM;
/* dq_off is guarded by dqio_sem */ if (!dquot->dq_off) {
ret = dq_insert_tree(info, dquot); if (ret < 0) {
quota_error(sb, "Error %zd occurred while creating " "quota", ret);
kfree(ddquot); return ret;
}
}
spin_lock(&dquot->dq_dqb_lock);
info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
spin_unlock(&dquot->dq_dqb_lock);
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off); if (ret != info->dqi_entry_size) {
quota_error(sb, "dquota write failed"); if (ret >= 0)
ret = -ENOSPC;
} else {
ret = 0;
}
dqstats_inc(DQST_WRITES);
kfree(ddquot);
return ret;
}
EXPORT_SYMBOL(qtree_write_dquot);
/* Free dquot entry in data block */ staticint free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint blk)
{ struct qt_disk_dqdbheader *dh; char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); int ret = 0;
if (!buf) return -ENOMEM; if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
quota_error(dquot->dq_sb, "Quota structure has offset to " "other block (%u) than it should (%u)", blk,
(uint)(dquot->dq_off >> info->dqi_blocksize_bits));
ret = -EIO; goto out_buf;
}
ret = read_blk(info, blk, buf); if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota data block %u",
blk); goto out_buf;
}
dh = (struct qt_disk_dqdbheader *)buf;
ret = check_dquot_block_header(info, dh); if (ret) goto out_buf;
le16_add_cpu(&dh->dqdh_entries, -1); if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
ret = remove_free_dqentry(info, buf, blk); if (ret >= 0)
ret = put_free_dqblk(info, buf, blk); if (ret < 0) {
quota_error(dquot->dq_sb, "Can't move quota data block " "(%u) to free list", blk); goto out_buf;
}
} else {
memset(buf +
(dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
0, info->dqi_entry_size); if (le16_to_cpu(dh->dqdh_entries) ==
qtree_dqstr_in_blk(info) - 1) { /* Insert will write block itself */
ret = insert_free_dqentry(info, buf, blk); if (ret < 0) {
quota_error(dquot->dq_sb, "Can't insert quota " "data block (%u) to free entry list", blk); goto out_buf;
}
} else {
ret = write_blk(info, blk, buf); if (ret < 0) {
quota_error(dquot->dq_sb, "Can't write quota " "data block %u", blk); goto out_buf;
}
}
}
dquot->dq_off = 0; /* Quota is now unattached */
out_buf:
kfree(buf); return ret;
}
/* Remove reference to dquot from tree */ staticint remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *blks, int depth)
{ char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); int ret = 0;
uint newblk;
__le32 *ref = (__le32 *)buf; int i;
if (!buf) return -ENOMEM;
ret = read_blk(info, blks[depth], buf); if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota data block %u",
blks[depth]); goto out_buf;
}
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF,
info->dqi_blocks - 1); if (ret) goto out_buf;
for (i = 0; i <= depth; i++) if (newblk == blks[i]) {
quota_error(dquot->dq_sb, "Cycle in quota tree detected: block %u index %u",
blks[depth],
get_index(info, dquot->dq_id, depth));
ret = -EIO; goto out_buf;
} if (depth == info->dqi_qtree_depth - 1) {
ret = free_dqentry(info, dquot, newblk);
blks[depth + 1] = 0;
} else {
blks[depth + 1] = newblk;
ret = remove_tree(info, dquot, blks, depth + 1);
} if (ret >= 0 && !blks[depth + 1]) {
ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); /* Block got empty? */ for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
; /* Don't put the root block into the free block list */ if (i == (info->dqi_usable_bs >> 2)
&& blks[depth] != QT_TREEOFF) {
put_free_dqblk(info, buf, blks[depth]);
blks[depth] = 0;
} else {
ret = write_blk(info, blks[depth], buf); if (ret < 0)
quota_error(dquot->dq_sb, "Can't write quota tree block %u",
blks[depth]);
}
}
out_buf:
kfree(buf); return ret;
}
/* Delete dquot from tree */ int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
if (!dquot->dq_off) /* Even not allocated? */ return 0; if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
quota_error(dquot->dq_sb, "Quota tree depth too big!"); return -EIO;
} return remove_tree(info, dquot, blks, 0);
}
EXPORT_SYMBOL(qtree_delete_dquot);
/* Find entry in block */ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk)
{ char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
loff_t ret = 0; int i; char *ddquot;
if (!buf) return -ENOMEM;
ret = read_blk(info, blk, buf); if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota tree " "block %u", blk); goto out_buf;
}
ddquot = buf + sizeof(struct qt_disk_dqdbheader); for (i = 0; i < qtree_dqstr_in_blk(info); i++) { if (info->dqi_ops->is_id(ddquot, dquot)) break;
ddquot += info->dqi_entry_size;
} if (i == qtree_dqstr_in_blk(info)) {
quota_error(dquot->dq_sb, "Quota for id %u referenced but not present",
from_kqid(&init_user_ns, dquot->dq_id));
ret = -EIO; goto out_buf;
} else {
ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
qt_disk_dqdbheader) + i * info->dqi_entry_size;
}
out_buf:
kfree(buf); return ret;
}
/* Find entry for given id in the tree */ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint *blks, int depth)
{ char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
loff_t ret = 0;
__le32 *ref = (__le32 *)buf;
uint blk; int i;
if (!buf) return -ENOMEM;
ret = read_blk(info, blks[depth], buf); if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota tree block %u",
blks[depth]); goto out_buf;
}
ret = 0;
blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); if (!blk) /* No reference? */ goto out_buf;
ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF,
info->dqi_blocks - 1); if (ret) goto out_buf;
/* Check for cycles in the tree */ for (i = 0; i <= depth; i++) if (blk == blks[i]) {
quota_error(dquot->dq_sb, "Cycle in quota tree detected: block %u index %u",
blks[depth],
get_index(info, dquot->dq_id, depth));
ret = -EIO; goto out_buf;
}
blks[depth + 1] = blk; if (depth < info->dqi_qtree_depth - 1)
ret = find_tree_dqentry(info, dquot, blks, depth + 1); else
ret = find_block_dqentry(info, dquot, blk);
out_buf:
kfree(buf); return ret;
}
/* Find entry for given id in the tree - wrapper function */ staticinline loff_t find_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
quota_error(dquot->dq_sb, "Quota tree depth too big!"); return -EIO;
} return find_tree_dqentry(info, dquot, blks, 0);
}
int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{ int type = dquot->dq_id.type; struct super_block *sb = dquot->dq_sb;
loff_t offset; char *ddquot; int ret = 0;
#ifdef __QUOTA_QT_PARANOIA /* Invalidated quota? */ if (!sb_dqopt(dquot->dq_sb)->files[type]) {
quota_error(sb, "Quota invalidated while reading!"); return -EIO;
} #endif /* Do we know offset of the dquot entry in the quota file? */ if (!dquot->dq_off) {
offset = find_dqentry(info, dquot); if (offset <= 0) { /* Entry not present? */ if (offset < 0)
quota_error(sb,"Can't read quota structure " "for id %u",
from_kqid(&init_user_ns,
dquot->dq_id));
dquot->dq_off = 0;
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
ret = offset; goto out;
}
dquot->dq_off = offset;
}
ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL); if (!ddquot) return -ENOMEM;
ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off); if (ret != info->dqi_entry_size) { if (ret >= 0)
ret = -EIO;
quota_error(sb, "Error while reading quota structure for id %u",
from_kqid(&init_user_ns, dquot->dq_id));
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
kfree(ddquot); goto out;
}
spin_lock(&dquot->dq_dqb_lock);
info->dqi_ops->disk2mem_dqblk(dquot, ddquot); if (!dquot->dq_dqb.dqb_bhardlimit &&
!dquot->dq_dqb.dqb_bsoftlimit &&
!dquot->dq_dqb.dqb_ihardlimit &&
!dquot->dq_dqb.dqb_isoftlimit)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dquot->dq_dqb_lock);
kfree(ddquot);
out:
dqstats_inc(DQST_READS); return ret;
}
EXPORT_SYMBOL(qtree_read_dquot);
/* Check whether dquot should not be deleted. We know we are
* the only one operating on dquot (thanks to dq_lock) */ int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{ if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
!(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) return qtree_delete_dquot(info, dquot); return 0;
}
EXPORT_SYMBOL(qtree_release_dquot);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.