res = hfs_brec_find(fd, hfs_find_rec_by_key); if (hip->extent_state & HFSPLUS_EXT_NEW) { if (res != -ENOENT) return res; /* Fail early and avoid ENOSPC during the btree operation */
res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1); if (res) return res;
hfs_brec_insert(fd, hip->cached_extents, sizeof(hfsplus_extent_rec));
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
} else { if (res) return res;
hfs_bnode_write(fd->bnode, hip->cached_extents,
fd->entryoffset, fd->entrylength);
hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
}
/* * We can't just use hfsplus_mark_inode_dirty here, because we * also get called from hfsplus_write_inode, which should not * redirty the inode. Instead the callers have to be careful * to explicily mark the inode dirty, too.
*/
set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
return 0;
}
staticint hfsplus_ext_write_extent_locked(struct inode *inode)
{ int res = 0;
if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) { struct hfs_find_data fd;
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); if (res) return res;
res = __hfsplus_ext_write_extent(inode, &fd);
hfs_find_exit(&fd);
} return res;
}
int hfsplus_ext_write_extent(struct inode *inode)
{ int res;
mutex_lock(&HFSPLUS_I(inode)->extents_lock);
res = hfsplus_ext_write_extent_locked(inode);
mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
if (inode->i_ino == HFSPLUS_EXT_CNID) return -EIO;
mutex_lock(&hip->extents_lock);
/* * hfsplus_ext_read_extent will write out a cached extent into * the extents btree. In that case we may have to mark the inode * dirty even for a pure read of an extent here.
*/
was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
res = hfsplus_ext_read_extent(inode, ablock); if (res) {
mutex_unlock(&hip->extents_lock); return -EIO;
}
dblock = hfsplus_ext_find_block(hip->cached_extents,
ablock - hip->cached_start);
mutex_unlock(&hip->extents_lock);
hfsplus_dump_extent(extent); for (i = 0; i < 8; extent++, i++) {
count = be32_to_cpu(extent->block_count); if (offset == count) goto found; elseif (offset < count) break;
offset -= count;
} /* panic? */ return -EIO;
found: for (;;) {
start = be32_to_cpu(extent->start_block); if (count <= block_nr) {
err = hfsplus_block_free(sb, start, count); if (err) {
pr_err("can't free extent\n");
hfs_dbg(EXTENT, " start: %u count: %u\n",
start, count);
}
extent->block_count = 0;
extent->start_block = 0;
block_nr -= count;
} else {
count -= block_nr;
err = hfsplus_block_free(sb, start + count, block_nr); if (err) {
pr_err("can't free extent\n");
hfs_dbg(EXTENT, " start: %u count: %u\n",
start, count);
}
extent->block_count = cpu_to_be32(count);
block_nr = 0;
} if (!block_nr || !i) { /* * Try to free all extents and * return only last error
*/ return err;
}
i--;
extent--;
count = be32_to_cpu(extent->block_count);
}
}
int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type)
{ struct hfs_find_data fd;
hfsplus_extent_rec ext_entry;
u32 total_blocks, blocks, start; int res, i;
total_blocks = be32_to_cpu(fork->total_blocks); if (!total_blocks) return 0;
blocks = 0; for (i = 0; i < 8; i++)
blocks += be32_to_cpu(fork->extents[i].block_count);
res = hfsplus_free_extents(sb, fork->extents, blocks, blocks); if (res) return res; if (total_blocks == blocks) return 0;
res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); if (res) return res; do {
res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
total_blocks, type); if (res) break;
start = be32_to_cpu(fd.key->ext.start_block);
hfs_brec_remove(&fd);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.