// SPDX-License-Identifier: GPL-2.0+ /* * Buffer/page management specific to NILFS * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi and Seiji Kihara.
*/
/** * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not. * @folio: Folio to be checked. * * Return: false if the folio has dirty buffers, true otherwise.
*/ bool nilfs_folio_buffers_clean(struct folio *folio)
{ struct buffer_head *bh, *head;
bh = head = folio_buffers(folio); do { if (buffer_dirty(bh)) returnfalse;
bh = bh->b_this_page;
} while (bh != head); returntrue;
}
/** * nilfs_copy_folio -- copy the folio with buffers * @dst: destination folio * @src: source folio * @copy_dirty: flag whether to copy dirty states on the folio's buffer heads. * * This function is for both data folios and btnode folios. The dirty flag * should be treated by caller. The folio must not be under i/o. * Both src and dst folio must be locked
*/ staticvoid nilfs_copy_folio(struct folio *dst, struct folio *src, bool copy_dirty)
{ struct buffer_head *dbh, *dbufs, *sbh; unsignedlong mask = NILFS_BUFFER_INHERENT_BITS;
if (folio_test_uptodate(src) && !folio_test_uptodate(dst))
folio_mark_uptodate(dst); elseif (!folio_test_uptodate(src) && folio_test_uptodate(dst))
folio_clear_uptodate(dst); if (folio_test_mappedtodisk(src) && !folio_test_mappedtodisk(dst))
folio_set_mappedtodisk(dst); elseif (!folio_test_mappedtodisk(src) && folio_test_mappedtodisk(dst))
folio_clear_mappedtodisk(dst);
do {
unlock_buffer(sbh);
unlock_buffer(dbh);
sbh = sbh->b_this_page;
dbh = dbh->b_this_page;
} while (dbh != dbufs);
}
int nilfs_copy_dirty_pages(struct address_space *dmap, struct address_space *smap)
{ struct folio_batch fbatch; unsignedint i;
pgoff_t index = 0; int err = 0;
folio_batch_init(&fbatch);
repeat: if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
PAGECACHE_TAG_DIRTY, &fbatch)) return 0;
for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i], *dfolio;
folio_lock(folio); if (unlikely(!folio_test_dirty(folio)))
NILFS_FOLIO_BUG(folio, "inconsistent dirty state");
dfolio = filemap_grab_folio(dmap, folio->index); if (IS_ERR(dfolio)) { /* No empty page is added to the page cache */
folio_unlock(folio);
err = PTR_ERR(dfolio); break;
} if (unlikely(!folio_buffers(folio)))
NILFS_FOLIO_BUG(folio, "found empty page in dat page cache");
/** * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache * @dmap: destination page cache * @smap: source page cache * * No pages must be added to the cache during this process. * This must be ensured by the caller.
*/ void nilfs_copy_back_pages(struct address_space *dmap, struct address_space *smap)
{ struct folio_batch fbatch; unsignedint i, n;
pgoff_t start = 0;
folio_batch_init(&fbatch);
repeat:
n = filemap_get_folios(smap, &start, ~0UL, &fbatch); if (!n) return;
for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i], *dfolio;
pgoff_t index = folio->index;
folio_lock(folio);
dfolio = filemap_lock_folio(dmap, index); if (!IS_ERR(dfolio)) { /* overwrite existing folio in the destination cache */
WARN_ON(folio_test_dirty(dfolio));
nilfs_copy_folio(dfolio, folio, false);
folio_unlock(dfolio);
folio_put(dfolio); /* Do we not need to remove folio from smap here? */
} else { struct folio *f;
/* move the folio to the destination cache */
xa_lock_irq(&smap->i_pages);
f = __xa_erase(&smap->i_pages, index);
WARN_ON(folio != f);
smap->nrpages--;
xa_unlock_irq(&smap->i_pages);
/** * nilfs_clear_dirty_pages - discard dirty pages in address space * @mapping: address space with dirty pages for discarding
*/ void nilfs_clear_dirty_pages(struct address_space *mapping)
{ struct folio_batch fbatch; unsignedint i;
pgoff_t index = 0;
folio_batch_init(&fbatch);
while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
PAGECACHE_TAG_DIRTY, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i];
folio_lock(folio);
/* * This folio may have been removed from the address * space by truncation or invalidation when the lock * was acquired. Skip processing in that case.
*/ if (likely(folio->mapping == mapping))
nilfs_clear_folio_dirty(folio);
/** * nilfs_clear_folio_dirty - discard dirty folio * @folio: dirty folio that will be discarded * * nilfs_clear_folio_dirty() clears working states including dirty state for * the folio and its buffers. If the folio has buffers, clear only if it is * confirmed that none of the buffer heads are busy (none have valid * references and none are locked).
*/ void nilfs_clear_folio_dirty(struct folio *folio)
{ struct buffer_head *bh, *head;
for (bh = head = folio_buffers(folio), block_start = 0;
bh != head || !block_start;
block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + bh->b_size; if (block_end > from && block_start < to && !buffer_dirty(bh))
nc++;
} return nc;
}
/* * NILFS2 needs clear_page_dirty() in the following two cases: * * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty * flag of pages when it copies back pages from shadow cache to the * original cache. * * 2) Some B-tree operations like insertion or deletion may dispose buffers * in dirty state, and this needs to cancel the dirty state of their pages.
*/ void __nilfs_clear_folio_dirty(struct folio *folio)
{ struct address_space *mapping = folio->mapping;
if (mapping) {
xa_lock_irq(&mapping->i_pages); if (folio_test_dirty(folio)) {
__xa_clear_mark(&mapping->i_pages, folio->index,
PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&mapping->i_pages);
folio_clear_dirty_for_io(folio); return;
}
xa_unlock_irq(&mapping->i_pages); return;
}
folio_clear_dirty(folio);
}
/** * nilfs_find_uncommitted_extent - find extent of uncommitted data * @inode: inode * @start_blk: start block offset (in) * @blkoff: start offset of the found extent (out) * * This function searches an extent of buffers marked "delayed" which * starts from a block offset equal to or larger than @start_blk. If * such an extent was found, this will store the start offset in * @blkoff and return its length in blocks. * * Return: Length in blocks of found extent, 0 otherwise.
*/ unsignedlong nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
sector_t *blkoff)
{ unsignedint i, nr_folios;
pgoff_t index; unsignedlong length = 0; struct folio_batch fbatch; struct folio *folio;
if (inode->i_mapping->nrpages == 0) return 0;
index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.