// SPDX-License-Identifier: GPL-2.0-or-later /* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * file.c
*/
/* * This file contains code for handling regular files. A regular file * consists of a sequence of contiguous compressed blocks, and/or a * compressed fragment block (tail-end packed block). The compressed size * of each datablock is stored in a block list contained within the * file inode (itself stored in one or more compressed metadata blocks). * * To speed up access to datablocks when reading 'large' files (256 Mbytes or * larger), the code implements an index cache that caches the mapping from * block index to datablock location on disk. * * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while * retaining a simple and space-efficient block list on disk. The cache * is split into slots, caching up to eight 224 GiB files (128 KiB blocks). * Larger files use multiple slots, with 1.75 TiB files using all 8 slots. * The index cache is designed to be memory efficient, and by default uses * 16 KiB.
*/
/* * Locate cache slot in range [offset, index] for specified inode. If * there's more than one return the slot closest to index.
*/ staticstruct meta_index *locate_meta_index(struct inode *inode, int offset, int index)
{ struct meta_index *meta = NULL; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int i;
mutex_lock(&msblk->meta_index_mutex);
TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
if (msblk->meta_index == NULL) goto not_allocated;
for (i = 0; i < SQUASHFS_META_SLOTS; i++) { if (msblk->meta_index[i].inode_number == inode->i_ino &&
msblk->meta_index[i].offset >= offset &&
msblk->meta_index[i].offset <= index &&
msblk->meta_index[i].locked == 0) {
TRACE("locate_meta_index: entry %d, offset %d\n", i,
msblk->meta_index[i].offset);
meta = &msblk->meta_index[i];
offset = meta->offset;
}
}
/* * Find and initialise an empty cache slot for index offset.
*/ staticstruct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
{ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; struct meta_index *meta = NULL; int i;
if (msblk->meta_index == NULL) { /* * First time cache index has been used, allocate and * initialise. The cache index could be allocated at * mount time but doing it here means it is allocated only * if a 'large' file is read.
*/
msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS, sizeof(*(msblk->meta_index)), GFP_KERNEL); if (msblk->meta_index == NULL) {
ERROR("Failed to allocate meta_index\n"); goto failed;
} for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
msblk->meta_index[i].inode_number = 0;
msblk->meta_index[i].locked = 0;
}
msblk->next_meta_index = 0;
}
for (i = SQUASHFS_META_SLOTS; i &&
msblk->meta_index[msblk->next_meta_index].locked; i--)
msblk->next_meta_index = (msblk->next_meta_index + 1) %
SQUASHFS_META_SLOTS;
if (i == 0) {
TRACE("empty_meta_index: failed!\n"); goto failed;
}
TRACE("empty_meta_index: returned meta entry %d, %p\n",
msblk->next_meta_index,
&msblk->meta_index[msblk->next_meta_index]);
meta = &msblk->meta_index[msblk->next_meta_index];
msblk->next_meta_index = (msblk->next_meta_index + 1) %
SQUASHFS_META_SLOTS;
/* * Read the next n blocks from the block list, starting from * metadata block <start_block, offset>.
*/ staticlonglong read_indexes(struct super_block *sb, int n,
u64 *start_block, int *offset)
{ int err, i; longlong block = 0;
__le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (blist == NULL) {
ERROR("read_indexes: Failed to allocate block_list\n"); return -ENOMEM;
}
while (n) { int blocks = min_t(int, n, PAGE_SIZE >> 2);
for (i = 0; i < blocks; i++) { int size = squashfs_block_size(blist[i]); if (size < 0) {
err = size; goto failure;
}
block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
}
n -= blocks;
}
kfree(blist); return block;
failure:
kfree(blist); return err;
}
/* * Each cache index slot has SQUASHFS_META_ENTRIES, each of which * can cache one index -> datablock/blocklist-block mapping. We wish * to distribute these over the length of the file, entry[0] maps index x, * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on. * The larger the file, the greater the skip factor. The skip factor is * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure * the number of metadata blocks that need to be read fits into the cache. * If the skip factor is limited in this way then the file will use multiple * slots.
*/ staticinlineint calculate_skip(u64 blocks)
{
u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
* SQUASHFS_META_INDEXES); return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
}
/* * Search and grow the index cache for the specified inode, returning the * on-disk locations of the datablock and block list metadata block * <index_block, index_offset> for index (scaled to nearest cache index).
*/ staticint fill_meta_index(struct inode *inode, int index,
u64 *index_block, int *index_offset, u64 *data_block)
{ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int skip = calculate_skip(i_size_read(inode) >> msblk->block_log); int offset = 0; struct meta_index *meta; struct meta_entry *meta_entry;
u64 cur_index_block = squashfs_i(inode)->block_list_start; int cur_offset = squashfs_i(inode)->offset;
u64 cur_data_block = squashfs_i(inode)->start; int err, i;
/* * Scale index to cache index (cache slot entry)
*/
index /= SQUASHFS_META_INDEXES * skip;
while (offset < index) {
meta = locate_meta_index(inode, offset + 1, index);
/* * If necessary grow cache slot by reading block list. Cache * slot is extended up to index or to the end of the slot, in * which case further slots will be used.
*/ for (i = meta->offset + meta->entries; i <= index &&
i < meta->offset + SQUASHFS_META_ENTRIES; i++) { int blocks = skip * SQUASHFS_META_INDEXES; longlong res = read_indexes(inode->i_sb, blocks,
&cur_index_block, &cur_offset);
if (res < 0) { if (meta->entries == 0) /* * Don't leave an empty slot on read * error allocated to this inode...
*/
meta->inode_number = 0;
err = res; goto failed;
}
/* * Get the on-disk location and compressed size of the datablock * specified by index. Fill_meta_index() does most of the work.
*/ staticint read_blocklist(struct inode *inode, int index, u64 *block)
{
u64 start; longlong blks; int offset;
__le32 size; int res = fill_meta_index(inode, index, &start, &offset, block);
TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset" " 0x%x, block 0x%llx\n", res, index, start, offset,
*block);
if (res < 0) return res;
/* * res contains the index of the mapping returned by fill_meta_index(), * this will likely be less than the desired index (because the * meta_index cache works at a higher granularity). Read any * extra block indexes needed.
*/ if (res < index) {
blks = read_indexes(inode->i_sb, index - res, &start, &offset); if (blks < 0) return (int) blks;
*block += blks;
}
/* * Read length of block specified by index.
*/
res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset, sizeof(size)); if (res < 0) return res; return squashfs_block_size(size);
}
/* * Loop copying datablock into pages. As the datablock likely covers * many PAGE_SIZE pages (default block size is 128 KiB) explicitly * grab the pages from the page cache, except for the page that we've * been called to fill.
*/ for (i = start_index; i <= end_index && bytes > 0; i++,
bytes -= PAGE_SIZE, offset += PAGE_SIZE) { struct folio *push_folio;
size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; bool updated = false;
TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail);
push_folio = (i == folio->index) ? folio :
__filemap_get_folio(mapping, i,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
mapping_gfp_mask(mapping));
if (IS_ERR(push_folio)) continue;
if (folio_test_uptodate(push_folio)) goto skip_folio;
updated = squashfs_fill_page(push_folio, buffer, offset, avail);
skip_folio:
folio_end_read(push_folio, updated); if (i != folio->index)
folio_put(push_folio);
}
}
/* Read datablock stored packed inside a fragment (tail-end packed block) */ staticint squashfs_readpage_fragment(struct folio *folio, int expected)
{ struct inode *inode = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size); int res = buffer->error;
if (res)
ERROR("Unable to read page, block %llx, size %x\n",
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size); else
squashfs_copy_cache(folio, buffer, expected,
squashfs_i(inode)->fragment_offset);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.