/* * SERIALIZATION of the Block Allocation Map. * * the working state of the block allocation map is accessed in * two directions: * * 1) allocation and free requests that start at the dmap * level and move up through the dmap control pages (i.e. * the vast majority of requests). * * 2) allocation requests that start at dmap control page * level and work down towards the dmaps. * * the serialization scheme used here is as follows. * * requests which start at the bottom are serialized against each * other through buffers and each requests holds onto its buffers * as it works it way up from a single dmap to the required level * of dmap control page. * requests that start at the top are serialized against each other * and request that start from the bottom by the multiple read/single * write inode lock of the bmap inode. requests starting at the top * take this lock in write mode while request starting at the bottom * take the lock in read mode. a single top-down request may proceed * exclusively while multiple bottoms-up requests may proceed * simultaneously (under the protection of busy buffers). * * in addition to information found in dmaps and dmap control pages, * the working state of the block allocation map also includes read/ * write information maintained in the bmap descriptor (i.e. total * free block count, allocation group level free block counts). * a single exclusive lock (BMAP_LOCK) is used to guard this information * in the face of multiple-bottoms up requests. * (lock ordering: IREAD_LOCK, BMAP_LOCK); * * accesses to the persistent state of the block allocation map (limited * to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
*/
/* * NAME: dbUnmount() * * FUNCTION: terminate the block allocation map in preparation for * file system unmount. * * the in-core bmap descriptor is written to disk and * the memory for this descriptor is freed. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * * RETURN VALUES: * 0 - success * -EIO - i/o error
*/ int dbUnmount(struct inode *ipbmap, int mounterror)
{ struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
if (!(mounterror || isReadOnly(ipbmap)))
dbSync(ipbmap);
/* * Invalidate the page cache buffers
*/
truncate_inode_pages(ipbmap->i_mapping, 0);
/* free the memory for the in-memory bmap. */
kfree(bmp);
JFS_SBI(ipbmap->i_sb)->bmap = NULL;
/* * write bmap global control page
*/ /* get the buffer for the on-disk bmap descriptor. */
mp = read_metapage(ipbmap,
BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
PSIZE, 0); if (mp == NULL) {
jfs_err("dbSync: read_metapage failed!"); return -EIO;
} /* copy the in-memory version of the bmap to the on-disk version */
dbmp_le = (struct dbmap_disk *) mp->data;
dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag);
dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel);
dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); for (i = 0; i < MAXAG; i++)
dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]);
dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize);
dbmp_le->dn_maxfreebud = bmp->db_maxfreebud;
/* write the buffer */
write_metapage(mp);
/* * write out dirty pages of bmap
*/
filemap_write_and_wait(ipbmap->i_mapping);
diWriteSpecial(ipbmap, 0);
return (0);
}
/* * NAME: dbFree() * * FUNCTION: free the specified block range from the working block * allocation map. * * the blocks will be free from the working map one dmap * at a time. * * PARAMETERS: * ip - pointer to in-core inode; * blkno - starting block number to be freed. * nblocks - number of blocks to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error
*/ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
{ struct metapage *mp; struct dmap *dp; int nb, rc;
s64 lblkno, rem; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; struct super_block *sb = ipbmap->i_sb;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* block to be freed better be within the mapsize. */ if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
IREAD_UNLOCK(ipbmap);
printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
(unsignedlonglong) blkno,
(unsignedlonglong) nblocks);
jfs_error(ip->i_sb, "block to be freed is outside the map\n"); return -EIO;
}
/** * TRIM the blocks, when mounted with discard option
*/ if (JFS_SBI(sb)->flag & JFS_DISCARD) if (JFS_SBI(sb)->minblks_trim <= nblocks)
jfs_issue_discard(ipbmap, blkno, nblocks);
/* * free the blocks a dmap at a time.
*/
mp = NULL; for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { /* release previous dmap if any */ if (mp) {
write_metapage(mp);
}
/* get the buffer for the current dmap. */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) {
IREAD_UNLOCK(ipbmap); return -EIO;
}
dp = (struct dmap *) mp->data;
/* determine the number of blocks to be freed from * this dmap.
*/
nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
/* free the blocks. */ if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
jfs_error(ip->i_sb, "error in block map\n");
release_metapage(mp);
IREAD_UNLOCK(ipbmap); return (rc);
}
}
/* write the last buffer. */ if (mp)
write_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (0);
}
/* * NAME: dbUpdatePMap() * * FUNCTION: update the allocation state (free or allocate) of the * specified block range in the persistent block allocation map. * * the blocks will be updated in the persistent map one * dmap at a time. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * free - 'true' if block range is to be freed from the persistent * map; 'false' if it is to be allocated. * blkno - starting block number of the range. * nblocks - number of contiguous blocks in the range. * tblk - transaction block; * * RETURN VALUES: * 0 - success * -EIO - i/o error
*/ int
dbUpdatePMap(struct inode *ipbmap, int free, s64 blkno, s64 nblocks, struct tblock * tblk)
{ int nblks, dbitno, wbitno, rbits; int word, nbits, nwords; struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
s64 lblkno, rem, lastlblkno;
u32 mask; struct dmap *dp; struct metapage *mp; struct jfs_log *log; int lsn, difft, diffp; unsignedlong flags;
/* the blocks better be within the mapsize. */ if (blkno + nblocks > bmp->db_mapsize) {
printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
(unsignedlonglong) blkno,
(unsignedlonglong) nblocks);
jfs_error(ipbmap->i_sb, "blocks are outside the map\n"); return -EIO;
}
/* * update the block state a dmap at a time.
*/
mp = NULL;
lastlblkno = 0; for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) { /* get the buffer for the current dmap. */
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); if (lblkno != lastlblkno) { if (mp) {
write_metapage(mp);
}
/* determine the bit number and word within the dmap of * the starting block. also determine how many blocks * are to be updated within this dmap.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
nblks = min(rem, (s64)BPERDMAP - dbitno);
/* update the bits of the dmap words. the first and last * words may only have a subset of their bits updated. if * this is the case, we'll work against that word (i.e. * partial first and/or last) only in a single pass. a * single pass will also be used to update all words that * are to have all their bits updated.
*/ for (rbits = nblks; rbits > 0;
rbits -= nbits, dbitno += nbits) { /* determine the bit number within the word and * the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nbits = min(rbits, DBWORD - wbitno);
/* check if only part of the word is to be updated. */ if (nbits < DBWORD) { /* update (free or allocate) the bits * in this word.
*/
mask =
(ONES << (DBWORD - nbits) >> wbitno); if (free)
dp->pmap[word] &=
cpu_to_le32(~mask); else
dp->pmap[word] |=
cpu_to_le32(mask);
word += 1;
} else { /* one or more words are to have all * their bits updated. determine how * many words and how many bits.
*/
nwords = rbits >> L2DBWORD;
nbits = nwords << L2DBWORD;
/* update (free or allocate) the bits * in these words.
*/ if (free)
memset(&dp->pmap[word], 0,
nwords * 4); else
memset(&dp->pmap[word], (int) ONES,
nwords * 4);
/* write the last buffer. */ if (mp) {
write_metapage(mp);
}
return (0);
}
/* * NAME: dbNextAG() * * FUNCTION: find the preferred allocation group for new allocations. * * Within the allocation groups, we maintain a preferred * allocation group which consists of a group with at least * average free space. It is the preferred group that we target * new inode allocation towards. The tie-in between inode * allocation and block allocation occurs as we allocate the * first (data) block of an inode and specify the inode (block) * as the allocation hint for this block. * * We try to avoid having more than one open file growing in * an allocation group, as this will lead to fragmentation. * This differs from the old OS/2 method of trying to keep * empty ags around for large allocations. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * * RETURN VALUES: * the preferred allocation group number.
*/ int dbNextAG(struct inode *ipbmap)
{
s64 avgfree; int agpref;
s64 hwm = 0; int i; int next_best = -1; struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
BMAP_LOCK(bmp);
/* determine the average number of free blocks within the ags. */
avgfree = (u32)bmp->db_nfree / bmp->db_numag;
/* * if the current preferred ag does not have an active allocator * and has at least average freespace, return it
*/
agpref = bmp->db_agpref; if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
(bmp->db_agfree[agpref] >= avgfree)) goto unlock;
/* From the last preferred ag, find the next one with at least * average free space.
*/ for (i = 0 ; i < bmp->db_numag; i++, agpref++) { if (agpref >= bmp->db_numag)
agpref = 0;
if (atomic_read(&bmp->db_active[agpref])) /* open file is currently growing in this ag */ continue; if (bmp->db_agfree[agpref] >= avgfree) { /* Return this one */
bmp->db_agpref = agpref; goto unlock;
} elseif (bmp->db_agfree[agpref] > hwm) { /* Less than avg. freespace, but best so far */
hwm = bmp->db_agfree[agpref];
next_best = agpref;
}
}
/* * If no inactive ag was found with average freespace, use the * next best
*/ if (next_best != -1)
bmp->db_agpref = next_best; /* else leave db_agpref unchanged */
unlock:
BMAP_UNLOCK(bmp);
/* return the preferred group.
*/ return (bmp->db_agpref);
}
/* * NAME: dbAlloc() * * FUNCTION: attempt to allocate a specified number of contiguous free * blocks from the working allocation block map. * * the block allocation policy uses hints and a multi-step * approach. * * for allocation requests smaller than the number of blocks * per dmap, we first try to allocate the new blocks * immediately following the hint. if these blocks are not * available, we try to allocate blocks near the hint. if * no blocks near the hint are available, we next try to * allocate within the same dmap as contains the hint. * * if no blocks are available in the dmap or the allocation * request is larger than the dmap size, we try to allocate * within the same allocation group as contains the hint. if * this does not succeed, we finally try to allocate anywhere * within the aggregate. * * we also try to allocate anywhere within the aggregate * for allocation requests larger than the allocation group * size or requests that specify no hint value. * * PARAMETERS: * ip - pointer to in-core inode; * hint - allocation hint. * nblocks - number of contiguous blocks in the range. * results - on successful return, set to the starting block number * of the newly allocated contiguous range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error
*/ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
{ int rc, agno; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp; struct metapage *mp;
s64 lblkno, blkno; struct dmap *dp; int l2nb;
s64 mapSize; int writers;
/* assert that nblocks is valid */
assert(nblocks > 0);
/* get the log2 number of blocks to be allocated. * if the number of blocks is not a log2 multiple, * it will be rounded up to the next log2 multiple.
*/
l2nb = BLKSTOL2(nblocks);
bmp = JFS_SBI(ip->i_sb)->bmap;
mapSize = bmp->db_mapsize;
/* the hint should be within the map */ if (hint >= mapSize) {
jfs_error(ip->i_sb, "the hint is outside the map\n"); return -EIO;
}
/* if the number of blocks to be allocated is greater than the * allocation group size, try to allocate anywhere.
*/ if (l2nb > bmp->db_agl2size) {
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
rc = dbAllocAny(bmp, nblocks, l2nb, results);
goto write_unlock;
}
/* * If no hint, let dbNextAG recommend an allocation group
*/ if (hint == 0) goto pref_ag;
/* we would like to allocate close to the hint. adjust the * hint to the block following the hint since the allocators * will start looking for free space starting at this point.
*/
blkno = hint + 1;
if (blkno >= bmp->db_mapsize) goto pref_ag;
agno = blkno >> bmp->db_agl2size;
/* check if blkno crosses over into a new allocation group. * if so, check if we should allow allocations within this * allocation group.
*/ if ((blkno & (bmp->db_agsize - 1)) == 0) /* check if the AG is currently being written to. * if so, call dbNextAG() to find a non-busy * AG with sufficient free space.
*/ if (atomic_read(&bmp->db_active[agno])) goto pref_ag;
/* check if the allocation request size can be satisfied from a * single dmap. if so, try to allocate from the dmap containing * the hint using a tiered strategy.
*/ if (nblocks <= BPERDMAP) {
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* get the buffer for the dmap containing the hint.
*/
rc = -EIO;
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) goto read_unlock;
dp = (struct dmap *) mp->data;
/* first, try to satisfy the allocation request with the * blocks beginning at the hint.
*/ if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks))
!= -ENOSPC) { if (rc == 0) {
*results = blkno;
mark_metapage_dirty(mp);
}
release_metapage(mp); goto read_unlock;
}
writers = atomic_read(&bmp->db_active[agno]); if ((writers > 1) ||
((writers == 1) && (JFS_IP(ip)->active_ag != agno))) { /* * Someone else is writing in this allocation * group. To avoid fragmenting, try another ag
*/
release_metapage(mp);
IREAD_UNLOCK(ipbmap); goto pref_ag;
}
/* next, try to satisfy the allocation request with blocks * near the hint.
*/ if ((rc =
dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
!= -ENOSPC) { if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp); goto read_unlock;
}
/* try to satisfy the allocation request with blocks within * the same dmap as the hint.
*/ if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
!= -ENOSPC) { if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp); goto read_unlock;
}
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
}
/* try to satisfy the allocation request with blocks within * the same allocation group as the hint.
*/
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC) goto write_unlock;
IWRITE_UNLOCK(ipbmap);
pref_ag: /* * Let dbNextAG recommend a preferred allocation group
*/
agno = dbNextAG(ipbmap);
IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
/* Try to allocate within this allocation group. if that fails, try to * allocate anywhere in the map.
*/ if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
rc = dbAllocAny(bmp, nblocks, l2nb, results);
write_unlock:
IWRITE_UNLOCK(ipbmap);
return (rc);
read_unlock:
IREAD_UNLOCK(ipbmap);
return (rc);
}
/* * NAME: dbReAlloc() * * FUNCTION: attempt to extend a current allocation by a specified * number of blocks. * * this routine attempts to satisfy the allocation request * by first trying to extend the existing allocation in * place by allocating the additional blocks as the blocks * immediately following the current allocation. if these * blocks are not available, this routine will attempt to * allocate a new set of contiguous blocks large enough * to cover the existing allocation plus the additional * number of blocks required. * * PARAMETERS: * ip - pointer to in-core inode requiring allocation. * blkno - starting block of the current allocation. * nblocks - number of contiguous blocks within the current * allocation. * addnblocks - number of blocks to add to the allocation. * results - on successful return, set to the starting block number * of the existing allocation if the existing allocation * was extended in place or to a newly allocated contiguous * range if the existing allocation could not be extended * in place. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error
*/ int
dbReAlloc(struct inode *ip,
s64 blkno, s64 nblocks, s64 addnblocks, s64 * results)
{ int rc;
/* try to extend the allocation in place.
*/ if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) {
*results = blkno; return (0);
} else { if (rc != -ENOSPC) return (rc);
}
/* could not extend the allocation in place, so allocate a * new set of blocks for the entire request (i.e. try to get * a range of contiguous blocks large enough to cover the * existing allocation plus the additional blocks.)
*/ return (dbAlloc
(ip, blkno + nblocks - 1, addnblocks + nblocks, results));
}
/* * NAME: dbExtend() * * FUNCTION: attempt to extend a current allocation by a specified * number of blocks. * * this routine attempts to satisfy the allocation request * by first trying to extend the existing allocation in * place by allocating the additional blocks as the blocks * immediately following the current allocation. * * PARAMETERS: * ip - pointer to in-core inode requiring allocation. * blkno - starting block of the current allocation. * nblocks - number of contiguous blocks within the current * allocation. * addnblocks - number of blocks to add to the allocation. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error
*/ staticint dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
{ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
s64 lblkno, lastblkno, extblkno;
uint rel_block; struct metapage *mp; struct dmap *dp; int rc; struct inode *ipbmap = sbi->ipbmap; struct bmap *bmp;
/* * We don't want a non-aligned extent to cross a page boundary
*/ if (((rel_block = blkno & (sbi->nbperpage - 1))) &&
(rel_block + nblocks + addnblocks > sbi->nbperpage)) return -ENOSPC;
/* get the last block of the current allocation */
lastblkno = blkno + nblocks - 1;
/* determine the block number of the block following * the existing allocation.
*/
extblkno = lastblkno + 1;
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* better be within the file system */
bmp = sbi->bmap; if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) {
IREAD_UNLOCK(ipbmap);
jfs_error(ip->i_sb, "the block is outside the filesystem\n"); return -EIO;
}
/* we'll attempt to extend the current allocation in place by * allocating the additional blocks as the blocks immediately * following the current allocation. we only try to extend the * current allocation in place if the number of additional blocks * can fit into a dmap, the last block of the current allocation * is not the last block of the file system, and the start of the * inplace extension is not on an allocation group boundary.
*/ if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize ||
(extblkno & (bmp->db_agsize - 1)) == 0) {
IREAD_UNLOCK(ipbmap); return -ENOSPC;
}
/* get the buffer for the dmap containing the first block * of the extension.
*/
lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage);
mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) {
IREAD_UNLOCK(ipbmap); return -EIO;
}
dp = (struct dmap *) mp->data;
/* try to allocate the blocks immediately following the * current allocation.
*/
rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks);
IREAD_UNLOCK(ipbmap);
/* were we successful ? */ if (rc == 0)
write_metapage(mp); else /* we were not successful */
release_metapage(mp);
return (rc);
}
/* * NAME: dbAllocNext() * * FUNCTION: attempt to allocate the blocks of the specified block * range within a dmap. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap. * blkno - starting block number of the range. * nblocks - number of contiguous free blocks of the range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/ staticint dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks)
{ int dbitno, word, rembits, nb, nwords, wbitno, nw; int l2size;
s8 *leaf;
u32 mask;
/* pick up a pointer to the leaves of the dmap tree.
*/
leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
/* determine the bit number and word within the dmap of the * starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* check if the specified block range is contained within * this dmap.
*/ if (dbitno + nblocks > BPERDMAP) return -ENOSPC;
/* check if the starting leaf indicates that anything * is free.
*/ if (leaf[word] == NOFREE) return -ENOSPC;
/* check the dmaps words corresponding to block range to see * if the block range is free. not all bits of the first and * last words may be contained within the block range. if this * is the case, we'll work against those words (i.e. partial first * and/or last) on an individual basis (a single pass) and examine * the actual bits to determine if they are free. a single pass * will be used for all dmap words fully contained within the * specified range. within this pass, the leaves of the dmap * tree will be examined to determine if the blocks are free. a * single leaf may describe the free space of multiple dmap * words, so we may visit only a subset of the actual leaves * corresponding to the dmap words of the block range.
*/ for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { /* determine the bit number within the word and * the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of the word is to be examined.
*/ if (nb < DBWORD) { /* check if the bits are free.
*/
mask = (ONES << (DBWORD - nb) >> wbitno); if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask) return -ENOSPC;
word += 1;
} else { /* one or more dmap words are fully contained * within the block range. determine how many * words and how many bits.
*/
nwords = rembits >> L2DBWORD;
nb = nwords << L2DBWORD;
/* now examine the appropriate leaves to determine * if the blocks are free.
*/ while (nwords > 0) { /* does the leaf describe any free space ?
*/ if (leaf[word] < BUDMIN) return -ENOSPC;
/* determine the l2 number of bits provided * by this leaf.
*/
l2size =
min_t(int, leaf[word], NLSTOL2BSZ(nwords));
/* determine how many words were handled.
*/
nw = BUDSIZE(l2size, BUDMIN);
/* * NAME: dbAllocNear() * * FUNCTION: attempt to allocate a number of contiguous free blocks near * a specified block (hint) within a dmap. * * starting with the dmap leaf that covers the hint, we'll * check the next four contiguous leaves for sufficient free * space. if sufficient free space is found, we'll allocate * the desired free space. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap. * blkno - block number to allocate near. * nblocks - actual number of contiguous free blocks desired. * l2nb - log2 number of contiguous free blocks desired. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/ staticint
dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
{ int word, lword, rc;
s8 *leaf;
/* determine the word within the dmap that holds the hint * (i.e. blkno). also, determine the last word in the dmap * that we'll include in our examination.
*/
word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
lword = min(word + 4, LPERDMAP);
/* examine the leaves for sufficient free space.
*/ for (; word < lword; word++) { /* does the leaf describe sufficient free space ?
*/ if (leaf[word] < l2nb) continue;
/* determine the block number within the file system * of the first block described by this dmap word.
*/
blkno = le64_to_cpu(dp->start) + (word << L2DBWORD);
/* if not all bits of the dmap word are free, get the * starting bit number within the dmap word of the required * string of free bits and adjust the block number with the * value.
*/ if (leaf[word] < BUDMIN)
blkno +=
dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb);
/* allocate the blocks.
*/ if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
*results = blkno;
return (rc);
}
return -ENOSPC;
}
/* * NAME: dbAllocAG() * * FUNCTION: attempt to allocate the specified number of contiguous * free blocks within the specified allocation group. * * unless the allocation group size is equal to the number * of blocks per dmap, the dmap control pages will be used to * find the required free space, if available. we start the * search at the highest dmap control page level which * distinctly describes the allocation group's free space * (i.e. the highest level at which the allocation group's * free space is not mixed in with that of any other group). * in addition, we start the search within this level at a * height of the dmapctl dmtree at which the nodes distinctly * describe the allocation group's free space. at this height, * the allocation group's free space may be represented by 1 * or two sub-trees, depending on the allocation group size. * we search the top nodes of these subtrees left to right for * sufficient free space. if sufficient free space is found, * the subtree is searched to find the leftmost leaf that * has free space. once we have made it to the leaf, we * move the search to the next lower level dmap control page * corresponding to this leaf. we continue down the dmap control * pages until we find the dmap that contains or starts the * sufficient free space and we allocate at this dmap. * * if the allocation group size is equal to the dmap size, * we'll start at the dmap corresponding to the allocation * group and attempt the allocation at this level. * * the dmap control page search is also not performed if the * allocation group is completely free and we go to the first * dmap of the allocation group to do the allocation. this is * done because the allocation group may be part (not the first * part) of a larger binary buddy system, causing the dmap * control pages to indicate no free space (NOFREE) within * the allocation group. * * PARAMETERS: * bmp - pointer to bmap descriptor * agno - allocation group number. * nblocks - actual number of contiguous free blocks desired. * l2nb - log2 number of contiguous free blocks desired. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * note: IWRITE_LOCK(ipmap) held on entry/exit;
*/ staticint
dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
{ struct metapage *mp; struct dmapctl *dcp; int rc, ti, i, k, m, n, agperlev;
s64 blkno, lblkno; int budmin;
/* allocation request should not be for more than the * allocation group size.
*/ if (l2nb > bmp->db_agl2size) {
jfs_error(bmp->db_ipbmap->i_sb, "allocation request is larger than the allocation group size\n"); return -EIO;
}
/* determine the starting block number of the allocation * group.
*/
blkno = (s64) agno << bmp->db_agl2size;
/* check if the allocation group size is the minimum allocation * group size or if the allocation group is completely free. if * the allocation group size is the minimum size of BPERDMAP (i.e. * 1 dmap), there is no need to search the dmap control page (below) * that fully describes the allocation group since the allocation * group is already fully described by a dmap. in this case, we * just call dbAllocCtl() to search the dmap tree and allocate the * required space if available. * * if the allocation group is completely free, dbAllocCtl() is * also called to allocate the required space. this is done for * two reasons. first, it makes no sense searching the dmap control * pages for free space when we know that free space exists. second, * the dmap control pages may indicate that the allocation group * has no free space if the allocation group is part (not the first * part) of a larger binary buddy system.
*/ if (bmp->db_agsize == BPERDMAP
|| bmp->db_agfree[agno] == bmp->db_agsize) {
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); if ((rc == -ENOSPC) &&
(bmp->db_agfree[agno] == bmp->db_agsize)) {
printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n",
(unsignedlonglong) blkno,
(unsignedlonglong) nblocks);
jfs_error(bmp->db_ipbmap->i_sb, "dbAllocCtl failed in free AG\n");
} return (rc);
}
/* the buffer for the dmap control page that fully describes the * allocation group.
*/
lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO;
dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin;
/* search the subtree(s) of the dmap control page that describes * the allocation group, looking for sufficient free space. to begin, * determine how many allocation groups are represented in a dmap * control page at the control page level (i.e. L0, L1, L2) that * fully describes an allocation group. next, determine the starting * tree index of this allocation group within the control page.
*/
agperlev =
(1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
if (ti < 0 || ti >= le32_to_cpu(dcp->nleafs)) {
jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
release_metapage(mp); return -EIO;
}
/* dmap control page trees fan-out by 4 and a single allocation * group may be described by 1 or 2 subtrees within the ag level * dmap control page, depending upon the ag size. examine the ag's * subtrees for sufficient free space, starting with the leftmost * subtree.
*/ for (i = 0; i < bmp->db_agwidth; i++, ti++) { /* is there sufficient free space ?
*/ if (l2nb > dcp->stree[ti]) continue;
/* sufficient free space found in a subtree. now search down * the subtree to find the leftmost leaf that describes this * free space.
*/ for (k = bmp->db_agheight; k > 0; k--) { for (n = 0, m = (ti << 2) + 1; n < 4; n++) { if (l2nb <= dcp->stree[m + n]) {
ti = m + n; break;
}
} if (n == 4) {
jfs_error(bmp->db_ipbmap->i_sb, "failed descending stree\n");
release_metapage(mp); return -EIO;
}
}
/* determine the block number within the file system * that corresponds to this leaf.
*/ if (bmp->db_aglevel == 2)
blkno = 0; elseif (bmp->db_aglevel == 1)
blkno &= ~(MAXL1SIZE - 1); else/* bmp->db_aglevel == 0 */
blkno &= ~(MAXL0SIZE - 1);
/* release the buffer in preparation for going down * the next level of dmap control pages.
*/
release_metapage(mp);
/* check if we need to continue to search down the lower * level dmap control pages. we need to if the number of * blocks required is less than maximum number of blocks * described at the next lower level.
*/ if (l2nb < budmin) {
/* search the lower level dmap control pages to get * the starting block number of the dmap that * contains or starts off the free space.
*/ if ((rc =
dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1,
&blkno))) { if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb, "control page inconsistent\n"); return -EIO;
} return (rc);
}
}
/* allocate the blocks.
*/
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb, "unable to allocate blocks\n");
rc = -EIO;
} return (rc);
}
/* no space in the allocation group. release the buffer and * return -ENOSPC.
*/
release_metapage(mp);
return -ENOSPC;
}
/* * NAME: dbAllocAny() * * FUNCTION: attempt to allocate the specified number of contiguous * free blocks anywhere in the file system. * * dbAllocAny() attempts to find the sufficient free space by * searching down the dmap control pages, starting with the * highest level (i.e. L0, L1, L2) control page. if free space * large enough to satisfy the desired free space is found, the * desired free space is allocated. * * PARAMETERS: * bmp - pointer to bmap descriptor * nblocks - actual number of contiguous free blocks desired. * l2nb - log2 number of contiguous free blocks desired. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/ staticint dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
{ int rc;
s64 blkno = 0;
/* starting with the top level dmap control page, search * down the dmap control levels for sufficient free space. * if free space is found, dbFindCtl() returns the starting * block number of the dmap that contains or starts off the * range of free space.
*/ if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno))) return (rc);
/* allocate the blocks.
*/
rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); if (rc == -ENOSPC) {
jfs_error(bmp->db_ipbmap->i_sb, "unable to allocate blocks\n"); return -EIO;
} return (rc);
}
/* * NAME: dbDiscardAG() * * FUNCTION: attempt to discard (TRIM) all free blocks of specific AG * * algorithm: * 1) allocate blocks, as large as possible and save them * while holding IWRITE_LOCK on ipbmap * 2) trim all these saved block/length values * 3) mark the blocks free again * * benefit: * - we work only on one ag at some time, minimizing how long we * need to lock ipbmap * - reading / writing the fs is possible most time, even on * trimming * * downside: * - we write two times to the dmapctl and dmap pages * - but for me, this seems the best way, better ideas? * /TR 2012 * * PARAMETERS: * ip - pointer to in-core inode * agno - ag to trim * minlen - minimum value of contiguous blocks * * RETURN VALUES: * s64 - actual number of blocks trimmed
*/
s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
{ struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
s64 nblocks, blkno;
u64 trimmed = 0; int rc, l2nb; struct super_block *sb = ipbmap->i_sb;
/* the whole ag is free, trim now */ if (bmp->db_agfree[agno] == 0) break;
/* give a hint for the next while */
nblocks = bmp->db_agfree[agno]; continue;
} elseif (rc == -ENOSPC) { /* search for next smaller log2 block */
l2nb = BLKSTOL2(nblocks) - 1; if (unlikely(l2nb < 0)) break;
nblocks = 1LL << l2nb;
} else { /* Trim any already allocated blocks */
jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n"); break;
}
/* check, if our trim array is full */ if (unlikely(count >= range_cnt - 1)) break;
}
IWRITE_UNLOCK(ipbmap);
tt->nblocks = 0; /* mark the current end */ for (tt = totrim; tt->nblocks != 0; tt++) { /* when mounted with online discard, dbFree() will
* call jfs_issue_discard() itself */ if (!(JFS_SBI(sb)->flag & JFS_DISCARD))
jfs_issue_discard(ip, tt->blkno, tt->nblocks);
dbFree(ip, tt->blkno, tt->nblocks);
trimmed += tt->nblocks;
}
kfree(totrim);
return trimmed;
}
/* * NAME: dbFindCtl() * * FUNCTION: starting at a specified dmap control page level and block * number, search down the dmap control levels for a range of * contiguous free blocks large enough to satisfy an allocation * request for the specified number of free blocks. * * if sufficient contiguous free blocks are found, this routine * returns the starting block number within a dmap page that * contains or starts a range of contiqious free blocks that * is sufficient in size. * * PARAMETERS: * bmp - pointer to bmap descriptor * level - starting dmap control page level. * l2nb - log2 number of contiguous free blocks desired. * *blkno - on entry, starting block number for conducting the search. * on successful return, the first block within a dmap page * that contains or starts a range of contiguous free blocks. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/ staticint dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
{ int rc, leafidx, lev;
s64 b, lblkno; struct dmapctl *dcp; int budmin; struct metapage *mp;
/* starting at the specified dmap control page level and block * number, search down the dmap control levels for the starting * block number of a dmap page that contains or starts off * sufficient free blocks.
*/ for (lev = level, b = *blkno; lev >= 0; lev--) { /* get the buffer of the dmap control page for the block * number and level (i.e. L0, L1, L2).
*/
lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO;
dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin;
/* search the tree within the dmap control page for * sufficient free space. if sufficient free space is found, * dbFindLeaf() returns the index of the leaf at which * free space was found.
*/
rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
/* release the buffer.
*/
release_metapage(mp);
/* space found ?
*/ if (rc) { if (lev != level) {
jfs_error(bmp->db_ipbmap->i_sb, "dmap inconsistent\n"); return -EIO;
} return -ENOSPC;
}
/* adjust the block number to reflect the location within * the dmap control page (i.e. the leaf) at which free * space was found.
*/
b += (((s64) leafidx) << budmin);
/* we stop the search at this dmap control page level if * the number of blocks required is greater than or equal * to the maximum number of blocks described at the next * (lower) level.
*/ if (l2nb >= budmin) break;
}
*blkno = b; return (0);
}
/* * NAME: dbAllocCtl() * * FUNCTION: attempt to allocate a specified number of contiguous * blocks starting within a specific dmap. * * this routine is called by higher level routines that search * the dmap control pages above the actual dmaps for contiguous * free space. the result of successful searches by these * routines are the starting block numbers within dmaps, with * the dmaps themselves containing the desired contiguous free * space or starting a contiguous free space of desired size * that is made up of the blocks of one or more dmaps. these * calls should not fail due to insufficent resources. * * this routine is called in some cases where it is not known * whether it will fail due to insufficient resources. more * specifically, this occurs when allocating from an allocation * group whose size is equal to the number of blocks per dmap. * in this case, the dmap control pages are not examined prior * to calling this routine (to save pathlength) and the call * might fail. * * for a request size that fits within a dmap, this routine relies * upon the dmap's dmtree to find the requested contiguous free * space. for request sizes that are larger than a dmap, the * requested free space will start at the first block of the * first dmap (i.e. blkno). * * PARAMETERS: * bmp - pointer to bmap descriptor * nblocks - actual number of contiguous free blocks to allocate. * l2nb - log2 number of contiguous free blocks to allocate. * blkno - starting block number of the dmap to start the allocation * from. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/ staticint
dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
{ int rc, nb;
s64 b, lblkno, n; struct metapage *mp; struct dmap *dp;
/* check if the allocation request is confined to a single dmap.
*/ if (l2nb <= L2BPERDMAP) { /* get the buffer for the dmap.
*/
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO;
dp = (struct dmap *) mp->data;
if (dp->tree.budmin < 0) {
release_metapage(mp); return -EIO;
}
/* try to allocate the blocks.
*/
rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results); if (rc == 0)
mark_metapage_dirty(mp);
release_metapage(mp);
return (rc);
}
/* allocation request involving multiple dmaps. it must start on * a dmap boundary.
*/
assert((blkno & (BPERDMAP - 1)) == 0);
/* allocate the blocks dmap by dmap.
*/ for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) { /* get the buffer for the dmap.
*/
lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) {
rc = -EIO; goto backout;
}
dp = (struct dmap *) mp->data;
/* the dmap better be all free.
*/ if (dp->tree.stree[ROOT] != L2BPERDMAP) {
release_metapage(mp);
jfs_error(bmp->db_ipbmap->i_sb, "the dmap is not all free\n");
rc = -EIO; goto backout;
}
/* determine how many blocks to allocate from this dmap.
*/
nb = min_t(s64, n, BPERDMAP);
/* allocate the blocks from the dmap.
*/ if ((rc = dbAllocDmap(bmp, dp, b, nb))) {
release_metapage(mp); goto backout;
}
/* write the buffer.
*/
write_metapage(mp);
}
/* set the results (starting block number) and return.
*/
*results = blkno; return (0);
/* something failed in handling an allocation request involving * multiple dmaps. we'll try to clean up by backing out any * allocation that has already happened for this request. if * we fail in backing out the allocation, we'll mark the file * system to indicate that blocks have been leaked.
*/
backout:
/* try to backout the allocations dmap by dmap.
*/ for (n = nblocks - n, b = blkno; n > 0;
n -= BPERDMAP, b += BPERDMAP) { /* get the buffer for this dmap.
*/
lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { /* could not back out. mark the file system * to indicate that we have leaked blocks.
*/
jfs_error(bmp->db_ipbmap->i_sb, "I/O Error: Block Leakage\n"); continue;
}
dp = (struct dmap *) mp->data;
/* free the blocks is this dmap.
*/ if (dbFreeDmap(bmp, dp, b, BPERDMAP)) { /* could not back out. mark the file system * to indicate that we have leaked blocks.
*/
release_metapage(mp);
jfs_error(bmp->db_ipbmap->i_sb, "Block Leakage\n"); continue;
}
/* write the buffer.
*/
write_metapage(mp);
}
return (rc);
}
/* * NAME: dbAllocDmapLev() * * FUNCTION: attempt to allocate a specified number of contiguous blocks * from a specified dmap. * * this routine checks if the contiguous blocks are available. * if so, nblocks of blocks are allocated; otherwise, ENOSPC is * returned. * * PARAMETERS: * mp - pointer to bmap descriptor * dp - pointer to dmap to attempt to allocate blocks from. * l2nb - log2 number of contiguous block desired. * nblocks - actual number of contiguous block desired. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or * IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
*/ staticint
dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks, int l2nb, s64 * results)
{
s64 blkno; int leafidx, rc;
/* can't be more than a dmaps worth of blocks */
assert(l2nb <= L2BPERDMAP);
/* search the tree within the dmap page for sufficient * free space. if sufficient free space is found, dbFindLeaf() * returns the index of the leaf at which free space was found.
*/ if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false)) return -ENOSPC;
if (leafidx < 0) return -EIO;
/* determine the block number within the file system corresponding * to the leaf at which free space was found.
*/
blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD);
/* if not all bits of the dmap word are free, get the starting * bit number within the dmap word of the required string of free * bits and adjust the block number with this value.
*/ if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN)
blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb);
/* allocate the blocks */ if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
*results = blkno;
return (rc);
}
/* * NAME: dbAllocDmap() * * FUNCTION: adjust the disk allocation map to reflect the allocation * of a specified block range within a dmap. * * this routine allocates the specified blocks from the dmap * through a call to dbAllocBits(). if the allocation of the * block range causes the maximum string of free blocks within * the dmap to change (i.e. the value of the root of the dmap's * dmtree), this routine will cause this change to be reflected * up through the appropriate levels of the dmap control pages * by a call to dbAdjCtl() for the L0 dmap control page that * covers this dmap. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap to allocate the block range from. * blkno - starting block number of the block to be allocated. * nblocks - number of blocks to be allocated. * * RETURN VALUES: * 0 - success * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/ staticint dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks)
{
s8 oldroot; int rc;
/* save the current value of the root (i.e. maximum free string) * of the dmap tree.
*/
oldroot = dp->tree.stree[ROOT];
/* if the root has not changed, done. */ if (dp->tree.stree[ROOT] == oldroot) return (0);
/* root changed. bubble the change up to the dmap control pages. * if the adjustment of the upper level control pages fails, * backout the bit allocation (thus making everything consistent).
*/ if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0)))
dbFreeBits(bmp, dp, blkno, nblocks);
return (rc);
}
/* * NAME: dbFreeDmap() * * FUNCTION: adjust the disk allocation map to reflect the allocation * of a specified block range within a dmap. * * this routine frees the specified blocks from the dmap through * a call to dbFreeBits(). if the deallocation of the block range * causes the maximum string of free blocks within the dmap to * change (i.e. the value of the root of the dmap's dmtree), this * routine will cause this change to be reflected up through the * appropriate levels of the dmap control pages by a call to * dbAdjCtl() for the L0 dmap control page that covers this dmap. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap to free the block range from. * blkno - starting block number of the block to be freed. * nblocks - number of blocks to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/ staticint dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks)
{
s8 oldroot; int rc = 0, word;
/* save the current value of the root (i.e. maximum free string) * of the dmap tree.
*/
oldroot = dp->tree.stree[ROOT];
/* if error or the root has not changed, done. */ if (rc || (dp->tree.stree[ROOT] == oldroot)) return (rc);
/* root changed. bubble the change up to the dmap control pages. * if the adjustment of the upper level control pages fails, * backout the deallocation.
*/ if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) {
word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
/* as part of backing out the deallocation, we will have * to back split the dmap tree if the deallocation caused * the freed blocks to become part of a larger binary buddy * system.
*/ if (dp->tree.stree[word] == NOFREE)
dbBackSplit((dmtree_t *)&dp->tree, word, false);
dbAllocBits(bmp, dp, blkno, nblocks);
}
return (rc);
}
/* * NAME: dbAllocBits() * * FUNCTION: allocate a specified block range from a dmap. * * this routine updates the dmap to reflect the working * state allocation of the specified block range. it directly * updates the bits of the working map and causes the adjustment * of the binary buddy system described by the dmap's dmtree * leaves to reflect the bits allocated. it also causes the * dmap's dmtree, as a whole, to reflect the allocated range. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap to allocate bits from. * blkno - starting block number of the bits to be allocated. * nblocks - number of bits to be allocated. * * RETURN VALUES: none * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/ staticvoid dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks)
{ int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
dmtree_t *tp = (dmtree_t *) & dp->tree; int size;
s8 *leaf;
/* pick up a pointer to the leaves of the dmap tree */
leaf = dp->tree.stree + LEAFIND;
/* determine the bit number and word within the dmap of the * starting block.
*/
dbitno = blkno & (BPERDMAP - 1);
word = dbitno >> L2DBWORD;
/* block range better be within the dmap */
assert(dbitno + nblocks <= BPERDMAP);
/* allocate the bits of the dmap's words corresponding to the block * range. not all bits of the first and last words may be contained * within the block range. if this is the case, we'll work against * those words (i.e. partial first and/or last) on an individual basis * (a single pass), allocating the bits of interest by hand and * updating the leaf corresponding to the dmap word. a single pass * will be used for all dmap words fully contained within the * specified range. within this pass, the bits of all fully contained * dmap words will be marked as free in a single shot and the leaves * will be updated. a single leaf may describe the free space of * multiple dmap words, so we may update only a subset of the actual * leaves corresponding to the dmap words of the block range.
*/ for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { /* determine the bit number within the word and * the number of bits within the word.
*/
wbitno = dbitno & (DBWORD - 1);
nb = min(rembits, DBWORD - wbitno);
/* check if only part of a word is to be allocated.
*/ if (nb < DBWORD) { /* allocate (set to 1) the appropriate bits within * this dmap word.
*/
dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
>> wbitno);
/* update the leaf for this dmap word. in addition * to setting the leaf value to the binary buddy max * of the updated dmap word, dbSplit() will split * the binary system of the leaves if need be.
*/
dbSplit(tp, word, BUDMIN,
dbMaxBud((u8 *)&dp->wmap[word]), false);
word += 1;
} else { /* one or more dmap words are fully contained * within the block range. determine how many * words and allocate (set to 1) the bits of these * words.
*/
nwords = rembits >> L2DBWORD;
memset(&dp->wmap[word], (int) ONES, nwords * 4);
/* determine how many bits.
*/
nb = nwords << L2DBWORD;
/* now update the appropriate leaves to reflect * the allocated words.
*/ for (; nwords > 0; nwords -= nw) { if (leaf[word] < BUDMIN) {
jfs_error(bmp->db_ipbmap->i_sb, "leaf page corrupt\n"); break;
}
/* determine what the leaf value should be * updated to as the minimum of the l2 number * of bits being allocated and the l2 number * of bits currently described by this leaf.
*/
size = min_t(int, leaf[word],
NLSTOL2BSZ(nwords));
/* update the leaf to reflect the allocation. * in addition to setting the leaf value to * NOFREE, dbSplit() will split the binary * system of the leaves to reflect the current * allocation (size).
*/
dbSplit(tp, word, size, NOFREE, false);
/* get the number of dmap words handled */
nw = BUDSIZE(size, BUDMIN);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.26 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.