/* These macros may change in future, to provide better st_ino semantics. */ #define OFFSET(x) ((x)->i_ino)
staticunsignedlong cramino(conststruct cramfs_inode *cino, unsignedint offset)
{ if (!cino->offset) return offset + 1; if (!cino->size) return offset + 1;
/* * The file mode test fixes buggy mkcramfs implementations where * cramfs_inode->offset is set to a non zero value for entries * which did not contain data, like devices node and fifos.
*/ switch (cino->mode & S_IFMT) { case S_IFREG: case S_IFDIR: case S_IFLNK: return cino->offset << 2; default: break;
} return offset + 1;
}
/* if the lower 2 bits are zero, the inode contains data */ if (!(inode->i_ino & 3)) {
inode->i_size = cramfs_inode->size;
inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
}
/* Struct copy intentional */
inode_set_mtime_to_ts(inode,
inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, zerotime))); /* inode->i_nlink is left 1 - arguably wrong for directories, but it's the best we can do without reading the directory contents. 1 yields the right result in GNU find, even
without -noleaf option. */
unlock_new_inode(inode);
return inode;
}
/* * We have our own block cache: don't fill up the buffer cache * with the rom-image, because the way the filesystem is set * up the accesses should be fairly regular and cached in the * page cache and dentry tree anyway.. * * This also acts as a way to guarantee contiguous areas of up to * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to * worry about end-of-buffer issues even when decompressing a full * page cache. * * Note: This is all optimized away at compile time when * CONFIG_CRAMFS_BLOCKDEV=n.
*/ #define READ_BUFFERS (2) /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */ #define NEXT_BUFFER(_ix) ((_ix) ^ 1)
/* * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed" * data that takes up more space than the original and with unlucky * alignment.
*/ #define BLKS_PER_BUF_SHIFT (2) #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
/* * Returns a pointer to a buffer containing at least LEN bytes of * filesystem starting at byte offset OFFSET into the filesystem.
*/ staticvoid *cramfs_read(struct super_block *sb, unsignedint offset, unsignedint len)
{ struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
/* * For a mapping to be possible, we need a range of uncompressed and * contiguous blocks. Return the offset for the first block and number of * valid blocks for which that is true, or zero otherwise.
*/ static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages)
{ struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb); int i;
u32 *blockptrs, first_block_addr;
/* * We can dereference memory directly here as this code may be * reached only when there is a direct filesystem image mapping * available in memory.
*/
blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4);
first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS;
i = 0; do {
u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT);
u32 expect = (first_block_addr + block_off) |
CRAMFS_BLK_FLAG_DIRECT_PTR |
CRAMFS_BLK_FLAG_UNCOMPRESSED; if (blockptrs[i] != expect) {
pr_debug("range: block %d/%d got %#x expects %#x\n",
pgoff+i, pgoff + *pages - 1,
blockptrs[i], expect); if (i == 0) return 0; break;
}
} while (++i < *pages);
/* * Return true if the last page of a file in the filesystem image contains * some other data that doesn't belong to that file. It is assumed that the * last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED * (verified by cramfs_get_block_range() and directly accessible in memory.
*/ staticbool cramfs_last_page_is_shared(struct inode *inode)
{ struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
u32 partial, last_page, blockaddr, *blockptrs; char *tail_data;
offset = cramfs_get_block_range(inode, pgoff, &pages);
bailout_reason = "unsuitable block layout"; if (!offset) goto bailout;
address = sbi->linear_phys_addr + offset;
bailout_reason = "data is not page aligned"; if (!PAGE_ALIGNED(address)) goto bailout;
/* Don't map the last page if it contains some other data */ if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) {
pr_debug("mmap: %pD: last page is shared\n", file);
pages--;
}
if (!pages) {
bailout_reason = "no suitable block remaining"; goto bailout;
}
if (pages == vma_pages(vma)) { /* * The entire vma is mappable. remap_pfn_range() will * make it distinguishable from a non-direct mapping * in /proc/<pid>/maps by substituting the file offset * with the actual physical address.
*/
ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
pages * PAGE_SIZE, vma->vm_page_prot);
} else { /* * Let's create a mixed map if we can't map it all. * The normal paging machinery will take care of the * unpopulated ptes via cramfs_read_folio().
*/ int i;
vm_flags_set(vma, VM_MIXEDMAP); for (i = 0; i < pages && !ret; i++) {
vm_fault_t vmf; unsignedlong off = i * PAGE_SIZE;
vmf = vmf_insert_mixed(vma, vma->vm_start + off,
PHYS_PFN(address + off)); if (vmf & VM_FAULT_ERROR)
ret = vm_fault_to_errno(vmf, 0);
}
}
bailout:
pr_debug("%pD[%lu]: direct mmap impossible: %s\n",
file, pgoff, bailout_reason); /* Didn't manage any direct map, but normal paging is still possible */ return 0;
}
/* We don't know the real size yet */
sbi->size = PAGE_SIZE;
/* Read the first block and get the superblock from it */
mutex_lock(&read_mutex);
memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super));
mutex_unlock(&read_mutex);
/* Do sanity checks on the superblock */ if (super->magic != CRAMFS_MAGIC) { /* check for wrong endianness */ if (super->magic == CRAMFS_MAGIC_WEND) { if (!silent)
errorfc(fc, "wrong endianness"); return -EINVAL;
}
/* get feature flags first */ if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
errorfc(fc, "unsupported filesystem features"); return -EINVAL;
}
/* Check that the root inode is in a sane state */ if (!S_ISDIR(super->root.mode)) {
errorfc(fc, "root is not a directory"); return -EINVAL;
} /* correct strange, hard-coded permissions of mkcramfs */
super->root.mode |= 0555;
/* Map only one page for now. Will remap it when fs size is known. */
err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size,
&sbi->linear_virt_addr, &sbi->linear_phys_addr); if (err || sbi->mtd_point_size != PAGE_SIZE) {
pr_err("unable to get direct memory access to mtd:%s\n",
sb->s_mtd->name); return err ? : -ENODATA;
}
pr_info("checking physical address %pap for linear cramfs image\n",
&sbi->linear_phys_addr);
err = cramfs_read_super(sb, fc, &super); if (err) return err;
/* Remap the whole filesystem now */
pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n",
sb->s_mtd->name, sbi->size/1024);
mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE);
err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size,
&sbi->linear_virt_addr, &sbi->linear_phys_addr); if (err || sbi->mtd_point_size != sbi->size) {
pr_err("unable to get direct memory access to mtd:%s\n",
sb->s_mtd->name); return err ? : -ENODATA;
}
/* Offset within the thing. */ if (ctx->pos >= inode->i_size) return 0;
offset = ctx->pos; /* Directory entries are always 4-byte aligned */ if (offset & 3) return -EINVAL;
buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL); if (!buf) return -ENOMEM;
while (offset < inode->i_size) { struct cramfs_inode *de; unsignedlong nextoffset; char *name;
ino_t ino;
umode_t mode; int namelen;
mutex_lock(&read_mutex);
de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
name = (char *)(de+1);
/* * Namelengths on disk are shifted by two * and the name padded out to 4-byte boundaries * with zeroes.
*/
namelen = de->namelen << 2;
memcpy(buf, name, namelen);
ino = cramino(de, OFFSET(inode) + offset);
mode = de->mode;
mutex_unlock(&read_mutex);
nextoffset = offset + sizeof(*de) + namelen; for (;;) { if (!namelen) {
kfree(buf); return -EIO;
} if (buf[namelen-1]) break;
namelen--;
} if (!dir_emit(ctx, buf, namelen, ino, mode >> 12)) break;
if (direct) { /* * The block pointer is an absolute start pointer, * shifted by 2 bits. The size is included in the * first 2 bytes of the data block when compressed, * or PAGE_SIZE otherwise.
*/
block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT; if (uncompressed) {
block_len = PAGE_SIZE; /* if last block: cap to file length */ if (folio->index == maxblock - 1)
block_len =
offset_in_page(inode->i_size);
} else {
block_len = *(u16 *)
cramfs_read(sb, block_start, 2);
block_start += 2;
}
} else { /* * The block pointer indicates one past the end of * the current block (start of next block). If this * is the first block then it starts where the block * pointer table ends, otherwise its start comes * from the previous block's pointer.
*/
block_start = OFFSET(inode) + maxblock * 4; if (folio->index)
block_start = *(u32 *)
cramfs_read(sb, blkptr_offset - 4, 4); /* Beware... previous ptr might be a direct ptr */ if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) { /* See comments on earlier code. */
u32 prev_start = block_start;
block_start = prev_start & ~CRAMFS_BLK_FLAGS;
block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) {
block_start += PAGE_SIZE;
} else {
block_len = *(u16 *)
cramfs_read(sb, block_start, 2);
block_start += 2 + block_len;
}
}
block_start &= ~CRAMFS_BLK_FLAGS;
block_len = block_ptr - block_start;
}
staticint cramfs_get_tree(struct fs_context *fc)
{ int ret = -ENOPROTOOPT;
if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
ret = get_tree_mtd(fc, cramfs_mtd_fill_super); if (!ret) return 0;
} if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
ret = get_tree_bdev(fc, cramfs_blkdev_fill_super); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.