static noinline int process_page_range(struct inode *inode, u64 start, u64 end, unsignedlong flags)
{ int ret; struct folio_batch fbatch;
pgoff_t index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT; int i; int count = 0; int loops = 0;
folio_batch_init(&fbatch);
while (index <= end_index) {
ret = filemap_get_folios_contig(inode->i_mapping, &index,
end_index, &fbatch); for (i = 0; i < ret; i++) { struct folio *folio = fbatch.folios[i];
if (flags & PROCESS_TEST_LOCKED &&
!folio_test_locked(folio))
count++; if (flags & PROCESS_UNLOCK && folio_test_locked(folio))
folio_unlock(folio); if (flags & PROCESS_RELEASE)
folio_put(folio);
}
folio_batch_release(&fbatch);
cond_resched();
loops++; if (loops > 100000) {
printk(KERN_ERR "stuck in a loop, start %llu, end %llu, ret %d\n",
start, end, ret); break;
}
}
root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(root)) {
test_std_err(TEST_ALLOC_ROOT);
ret = PTR_ERR(root); goto out;
}
inode = btrfs_new_test_inode(); if (!inode) {
test_std_err(TEST_ALLOC_INODE);
ret = -ENOMEM; goto out;
}
tmp = &BTRFS_I(inode)->io_tree;
BTRFS_I(inode)->root = root;
/* * Passing NULL as we don't have fs_info but tracepoints are not used * at this point
*/
btrfs_extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
/* * First go through and create and mark all of our pages dirty, we pin * everything to make sure our pages don't get evicted and screw up our * test.
*/ for (pgoff_t index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); if (!page) {
test_err("failed to allocate test page");
ret = -ENOMEM; goto out;
}
SetPageDirty(page); if (index) {
unlock_page(page);
} else {
get_page(page);
locked_page = page;
}
}
/* Test this scenario * |--- delalloc ---| * |--- search ---|
*/
btrfs_set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
start = 0;
end = start + PAGE_SIZE - 1;
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end); if (!found) {
test_err("should have found at least one delalloc"); goto out_bits;
} if (start != 0 || end != (sectorsize - 1)) {
test_err("expected start 0 end %u, got start %llu end %llu",
sectorsize - 1, start, end); goto out_bits;
}
btrfs_unlock_extent(tmp, start, end, NULL);
unlock_page(locked_page);
put_page(locked_page);
/* * Test this scenario * * |--- delalloc ---| * |--- search ---|
*/
test_start = SZ_64M;
locked_page = find_lock_page(inode->i_mapping,
test_start >> PAGE_SHIFT); if (!locked_page) {
test_err("couldn't find the locked page"); goto out_bits;
}
btrfs_set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
start = test_start;
end = start + PAGE_SIZE - 1;
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end); if (!found) {
test_err("couldn't find delalloc in our range"); goto out_bits;
} if (start != test_start || end != max_bytes - 1) {
test_err("expected start %llu end %llu, got start %llu, end %llu",
test_start, max_bytes - 1, start, end); goto out_bits;
} if (process_page_range(inode, start, end,
PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
test_err("there were unlocked pages in the range"); goto out_bits;
}
btrfs_unlock_extent(tmp, start, end, NULL); /* locked_page was unlocked above */
put_page(locked_page);
/* * Test this scenario * |--- delalloc ---| * |--- search ---|
*/
test_start = max_bytes + sectorsize;
locked_page = find_lock_page(inode->i_mapping, test_start >>
PAGE_SHIFT); if (!locked_page) {
test_err("couldn't find the locked page"); goto out_bits;
}
start = test_start;
end = start + PAGE_SIZE - 1;
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end); if (found) {
test_err("found range when we shouldn't have"); goto out_bits;
} if (end != test_start + PAGE_SIZE - 1) {
test_err("did not return the proper end offset"); goto out_bits;
}
/* * Test this scenario * [------- delalloc -------| * [max_bytes]|-- search--| * * We are re-using our test_start from above since it works out well.
*/
btrfs_set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
start = test_start;
end = start + PAGE_SIZE - 1;
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end); if (!found) {
test_err("didn't find our range"); goto out_bits;
} if (start != test_start || end != total_dirty - 1) {
test_err("expected start %llu end %llu, got start %llu end %llu",
test_start, total_dirty - 1, start, end); goto out_bits;
} if (process_page_range(inode, start, end,
PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
test_err("pages in range were not all locked"); goto out_bits;
}
btrfs_unlock_extent(tmp, start, end, NULL);
/* * Now to test where we run into a page that is no longer dirty in the * range we want to find.
*/
page = find_get_page(inode->i_mapping,
(max_bytes + SZ_1M) >> PAGE_SHIFT); if (!page) {
test_err("couldn't find our page"); goto out_bits;
}
ClearPageDirty(page);
put_page(page);
/* We unlocked it in the previous test */
lock_page(locked_page);
start = test_start;
end = start + PAGE_SIZE - 1; /* * Currently if we fail to find dirty pages in the delalloc range we * will adjust max_bytes down to PAGE_SIZE and then re-search. If * this changes at any point in the future we will need to fix this * tests expected behavior.
*/
found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end); if (!found) {
test_err("didn't find our range"); goto out_bits;
} if (start != test_start && end != test_start + PAGE_SIZE - 1) {
test_err("expected start %llu end %llu, got start %llu end %llu",
test_start, test_start + PAGE_SIZE - 1, start, end); goto out_bits;
} if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
PROCESS_UNLOCK)) {
test_err("pages in range were not all locked"); goto out_bits;
}
ret = 0;
out_bits: if (ret)
dump_extent_io_tree(tmp);
btrfs_clear_extent_bit(tmp, 0, total_dirty - 1, (unsigned)-1, NULL);
out: if (locked_page)
put_page(locked_page);
process_page_range(inode, 0, total_dirty - 1,
PROCESS_UNLOCK | PROCESS_RELEASE);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info); return ret;
}
test_err( "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x",
i / BITS_PER_BYTE, i % BITS_PER_BYTE,
i / BITS_PER_BYTE, has, expect); return -EINVAL;
}
} return 0;
}
/* * Generate a wonky pseudo-random bit pattern for the sake of not using * something repetitive that could miss some hypothetical off-by-n bug.
*/
x = 0;
ret = test_bitmap_clear("clear all run 3", bitmap, eb, 0, 0,
byte_len * BITS_PER_BYTE); if (ret < 0) return ret;
for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) {
x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU; for (j = 0; j < 32; j++) { if (x & (1U << j)) {
bitmap_set(bitmap, i * 32 + j, 1);
extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
}
}
}
ret = check_eb_bitmap(bitmap, eb); if (ret) {
test_err("random bit pattern failed"); return ret;
}
bitmap = kmalloc(nodesize, GFP_KERNEL); if (!bitmap) {
test_err("couldn't allocate test bitmap");
ret = -ENOMEM; goto out;
}
eb = alloc_dummy_extent_buffer(fs_info, 0); if (!eb) {
test_std_err(TEST_ALLOC_ROOT);
ret = -ENOMEM; goto out;
}
ret = __test_eb_bitmaps(bitmap, eb); if (ret) goto out;
free_extent_buffer(eb);
/* * Test again for case where the tree block is sectorsize aligned but * not nodesize aligned.
*/
eb = alloc_dummy_extent_buffer(fs_info, sectorsize); if (!eb) {
test_std_err(TEST_ALLOC_ROOT);
ret = -ENOMEM; goto out;
}
/* Test correct handling of empty tree */
btrfs_find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED); if (start != 0 || end != -1) {
test_err( "error getting a range from completely empty tree: start %llu end %llu",
start, end); goto out;
} /* * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between * 4M-32M
*/
btrfs_set_extent_bit(&tree, SZ_1M, SZ_4M - 1,
CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
if (start != 0 || end != SZ_1M - 1) {
test_err("error finding beginning range: start %llu end %llu",
start, end); goto out;
}
/* Now add 32M-64M so that we have a hole between 4M-32M */
btrfs_set_extent_bit(&tree, SZ_32M, SZ_64M - 1,
CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
/* * Request first hole starting at 12M, we should get 4M-32M
*/
btrfs_find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
CHUNK_TRIMMED | CHUNK_ALLOCATED);
if (start != SZ_4M || end != SZ_32M - 1) {
test_err("error finding trimmed range: start %llu end %llu",
start, end); goto out;
}
/* * Search in the middle of allocated range, should get the next one * available, which happens to be unallocated -> 4M-32M
*/
btrfs_find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
CHUNK_TRIMMED | CHUNK_ALLOCATED);
if (start != SZ_4M || end != SZ_32M - 1) {
test_err("error finding next unalloc range: start %llu end %llu",
start, end); goto out;
}
/* * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag * being unset in this range, we should get the entry in range 64M-72M
*/
btrfs_set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL);
btrfs_find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
CHUNK_TRIMMED);
if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
test_err("error finding exact range: start %llu end %llu",
start, end); goto out;
}
/* * Search in the middle of set range whose immediate neighbour doesn't * have the bits set so it must be returned
*/ if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
test_err("error finding next alloc range: start %llu end %llu",
start, end); goto out;
}
/* * Search beyond any known range, shall return after last known range * and end should be -1
*/
btrfs_find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED); if (start != SZ_64M + SZ_8M || end != -1) {
test_err( "error handling beyond end of range search: start %llu end %llu",
start, end); goto out;
}
ret = 0;
out: if (ret)
dump_extent_io_tree(&tree);
btrfs_clear_extent_bit(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.