/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/backing-dev.h * * low-level device information and state which is propagated up through * to high-level code.
*/
staticinlinebool bdi_has_dirty_io(struct backing_dev_info *bdi)
{ /* * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are * any dirty wbs. See wb_update_write_bandwidth().
*/ return atomic_long_read(&bdi->tot_write_bandwidth);
}
/** * writeback_in_progress - determine whether there is writeback in progress * @wb: bdi_writeback of interest * * Determine whether there is writeback waiting to be handled against a * bdi_writeback.
*/ staticinlinebool writeback_in_progress(struct bdi_writeback *wb)
{ return test_bit(WB_writeback_running, &wb->state);
}
/** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest * * Cgroup writeback requires support from the filesystem. Also, both memcg and * iocg have to be on the default hierarchy. Test whether all conditions are * met. * * Note that the test result may change dynamically on the same inode * depending on how memcg and iocg are configured.
*/ staticinlinebool inode_cgwb_enabled(struct inode *inode)
{ struct backing_dev_info *bdi = inode_to_bdi(inode);
/** * wb_find_current - find wb for %current on a bdi * @bdi: bdi of interest * * Find the wb of @bdi which matches both the memcg and blkcg of %current. * Must be called under rcu_read_lock() which protects the returend wb. * NULL if not found.
*/ staticinlinestruct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
{ struct cgroup_subsys_state *memcg_css; struct bdi_writeback *wb;
memcg_css = task_css(current, memory_cgrp_id); if (!memcg_css->parent) return &bdi->wb;
/* * %current's blkcg equals the effective blkcg of its memcg. No * need to use the relatively expensive cgroup_get_e_css().
*/ if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) return wb; return NULL;
}
/** * wb_get_create_current - get or create wb for %current on a bdi * @bdi: bdi of interest * @gfp: allocation mask * * Equivalent to wb_get_create() on %current's memcg. This function is * called from a relatively hot path and optimizes the common cases using * wb_find_current().
*/ staticinlinestruct bdi_writeback *
wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
{ struct bdi_writeback *wb;
/** * inode_to_wb - determine the wb of an inode * @inode: inode of interest * * Returns the wb @inode is currently associated with. The caller must be * holding either @inode->i_lock, the i_pages lock, or the * associated wb's list_lock.
*/ staticinlinestruct bdi_writeback *inode_to_wb(conststruct inode *inode)
{ #ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB) &&
(!lockdep_is_held(&inode->i_lock) &&
!lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
!lockdep_is_held(&inode->i_wb->list_lock))); #endif return inode->i_wb;
}
staticinlinestruct bdi_writeback *inode_to_wb_wbc( struct inode *inode, struct writeback_control *wbc)
{ /* * If wbc does not have inode attached, it means cgroup writeback was * disabled when wbc started. Just use the default wb in that case.
*/ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
}
/** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode * @cookie: output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't * holding inode->i_lock, the i_pages lock or wb->list_lock. This * function determines the wb associated with @inode and ensures that the * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and * can't sleep during the transaction. IRQs may or may not be disabled on * return.
*/ staticinlinestruct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
rcu_read_lock();
/* * Paired with store_release in inode_switch_wbs_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
if (unlikely(cookie->locked))
xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
/* * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages * lock. inode_to_wb() will bark. Deref directly.
*/ return inode->i_wb;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.