int bch2_filemap_get_contig_folios_d(struct address_space *, loff_t,
u64, fgf_t, gfp_t, folios *); int bch2_write_invalidate_inode_pages_range(struct address_space *, loff_t, loff_t);
/* * Use u64 for the end pos and sector helpers because if the folio covers the * max supported range of the mapping, the start offset of the next folio * overflows loff_t. This breaks much of the range based processing in the * buffered write path.
*/ staticinline u64 folio_end_pos(struct folio *folio)
{ return folio_pos(folio) + folio_size(folio);
}
enum bch_folio_sector_state { #define x(n) SECTOR_##n,
BCH_FOLIO_SECTOR_STATE() #undef x
};
struct bch_folio_sector { /* Uncompressed, fully allocated replicas (or on disk reservation): */
u8 nr_replicas:4, /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
replicas_reserved:4;
u8 state;
};
struct bch_folio {
spinlock_t lock;
atomic_t write_count; /* * Is the sector state up to date with the btree? * (Not the data itself)
*/ bool uptodate; struct bch_folio_sector s[];
};
/* Helper for when we need to add debug instrumentation: */ staticinlinevoid bch2_folio_sector_set(struct folio *folio, struct bch_folio *s, unsigned i, unsigned n)
{
s->s[i].state = n;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.