// SPDX-License-Identifier: GPL-2.0-only /* * mm/page-writeback.c * * Copyright (C) 2002, Linus Torvalds. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * Contains functions related to writing back dirty pages at the * address_space level. * * 10Apr2002 Andrew Morton * Initial version
*/
/* * Sleep at most 200ms at a time in balance_dirty_pages().
*/ #define MAX_PAUSE max(HZ/5, 1)
/* * Try to keep balance_dirty_pages() call intervals higher than this many pages * by raising pause time to max_pause when falls below it.
*/ #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
/* * Estimate write bandwidth or update dirty limit at 200ms intervals.
*/ #define BANDWIDTH_INTERVAL max(HZ/5, 1)
#define RATELIMIT_CALC_SHIFT 10
/* * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling.
*/ staticlong ratelimit_pages = 32;
/* The following parameters are exported via /proc/sys/vm */
/* * Start background writeback (via writeback threads) at this percentage
*/ staticint dirty_background_ratio = 10;
/* * dirty_background_bytes starts at 0 (disabled) so that it is a function of * dirty_background_ratio * the amount of dirtyable memory
*/ staticunsignedlong dirty_background_bytes;
/* * free highmem will not be subtracted from the total free memory * for calculating free ratios if vm_highmem_is_dirtyable is true
*/ staticint vm_highmem_is_dirtyable;
/* * The generator of dirty data starts writeback at this percentage
*/ staticint vm_dirty_ratio = 20;
/* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of * vm_dirty_ratio * the amount of dirtyable memory
*/ staticunsignedlong vm_dirty_bytes;
/* * The interval between `kupdate'-style writebacks
*/ unsignedint dirty_writeback_interval = 5 * 100; /* centiseconds */
EXPORT_SYMBOL_GPL(dirty_writeback_interval);
/* * The longest time for which data is allowed to remain dirty
*/ unsignedint dirty_expire_interval = 30 * 100; /* centiseconds */
/* * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: * a full sync is triggered after this time elapses without any disk activity.
*/ int laptop_mode;
EXPORT_SYMBOL(laptop_mode);
/* End of sysctl-exported parameters */
struct wb_domain global_wb_domain;
/* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will * reflect changes in current writeout rate.
*/ #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
/* * @wb may already be clean by the time control reaches here and * the total may not include its bw.
*/ if (this_bw < tot_bw) { if (min) {
min *= this_bw;
min = div64_ul(min, tot_bw);
} if (max < 100 * BDI_RATIO_SCALE) {
max *= this_bw;
max = div64_ul(max, tot_bw);
}
}
/* * In a memory zone, there is a certain amount of pages we consider * available for the page cache, which is essentially the number of * free and reclaimable pages, minus some zone reserves to protect * lowmem and the ability to uphold the zone's watermarks without * requiring writeback. * * This number of dirtyable pages is the base value of which the * user-configurable dirty ratio is the effective number of pages that * are allowed to be actually dirtied. Per individual zone, or * globally by using the sum of dirtyable pages over all zones. * * Because the user is allowed to specify the dirty limit globally as * absolute number of bytes, calculating the per-zone dirty limit can * require translating the configured limit into a percentage of * global dirtyable memory first.
*/
/** * node_dirtyable_memory - number of dirtyable pages in a node * @pgdat: the node * * Return: the node's number of pages potentially available for dirty * page cache. This is the base value for the per-node dirty limits.
*/ staticunsignedlong node_dirtyable_memory(struct pglist_data *pgdat)
{ unsignedlong nr_pages = 0; int z;
for (z = 0; z < MAX_NR_ZONES; z++) { struct zone *zone = pgdat->node_zones + z;
/* * Pages reserved for the kernel should not be considered * dirtyable, to prevent a situation where reclaim has to * clean pages in order to balance the zones.
*/
nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
/* * Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure * that this does not occur.
*/ return min(x, total); #else return 0; #endif
}
/** * global_dirtyable_memory - number of globally dirtyable pages * * Return: the global number of pages potentially available for dirty * page cache. This is the base value for the global dirty limits.
*/ staticunsignedlong global_dirtyable_memory(void)
{ unsignedlong x;
x = global_zone_page_state(NR_FREE_PAGES); /* * Pages reserved for the kernel should not be considered * dirtyable, to prevent a situation where reclaim has to * clean pages in order to balance the zones.
*/
x -= min(x, totalreserve_pages);
x += global_node_page_state(NR_INACTIVE_FILE);
x += global_node_page_state(NR_ACTIVE_FILE);
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
return x + 1; /* Ensure that we never return 0 */
}
/** * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain * @dtc: dirty_throttle_control of interest * * Calculate @dtc->thresh and ->bg_thresh considering * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller * must ensure that @dtc->avail is set before calling this function. The * dirty limits will be lifted by 1/4 for real-time tasks.
*/ staticvoid domain_dirty_limits(struct dirty_throttle_control *dtc)
{ constunsignedlong available_memory = dtc->avail; struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); unsignedlong bytes = vm_dirty_bytes; unsignedlong bg_bytes = dirty_background_bytes; /* convert ratios to per-PAGE_SIZE for higher precision */ unsignedlong ratio = (vm_dirty_ratio * PAGE_SIZE) / 100; unsignedlong bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100; unsignedlong thresh; unsignedlong bg_thresh; struct task_struct *tsk;
/* gdtc is !NULL iff @dtc is for memcg domain */ if (gdtc) { unsignedlong global_avail = gdtc->avail;
/* * The byte settings can't be applied directly to memcg * domains. Convert them to ratios by scaling against * globally available memory. As the ratios are in * per-PAGE_SIZE, they can be obtained by dividing bytes by * number of pages.
*/ if (bytes)
ratio = min(DIV_ROUND_UP(bytes, global_avail),
PAGE_SIZE); if (bg_bytes)
bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
PAGE_SIZE);
bytes = bg_bytes = 0;
}
tsk = current; if (rt_or_dl_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
} /* * Dirty throttling logic assumes the limits in page units fit into * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
*/ if (thresh > UINT_MAX)
thresh = UINT_MAX; /* This makes sure bg_thresh is within 32-bits as well */ if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
dtc->thresh = thresh;
dtc->bg_thresh = bg_thresh;
/* we should eventually report the domain in the TP */ if (!gdtc)
trace_global_dirty_state(bg_thresh, thresh);
}
/** * global_dirty_limits - background-writeback and dirty-throttling thresholds * @pbackground: out parameter for bg_thresh * @pdirty: out parameter for thresh * * Calculate bg_thresh and thresh for global_wb_domain. See * domain_dirty_limits() for details.
*/ void global_dirty_limits(unsignedlong *pbackground, unsignedlong *pdirty)
{ struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
/** * node_dirty_limit - maximum number of dirty pages allowed in a node * @pgdat: the node * * Return: the maximum number of dirty pages allowed in a node, based * on the node's dirtyable memory.
*/ staticunsignedlong node_dirty_limit(struct pglist_data *pgdat)
{ unsignedlong node_memory = node_dirtyable_memory(pgdat); struct task_struct *tsk = current; unsignedlong dirty;
/* * Dirty throttling logic assumes the limits in page units fit into * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
*/ return min_t(unsignedlong, dirty, UINT_MAX);
}
/** * node_dirty_ok - tells whether a node is within its dirty limits * @pgdat: the node to check * * Return: %true when the dirty pages in @pgdat are within the node's * dirty limit, %false if the limit is exceeded.
*/ bool node_dirty_ok(struct pglist_data *pgdat)
{ unsignedlong limit = node_dirty_limit(pgdat); unsignedlong nr_pages = 0;
staticunsignedlong wp_next_time(unsignedlong cur_time)
{
cur_time += VM_COMPLETIONS_PERIOD_LEN; /* 0 has a special meaning... */ if (!cur_time) return 1; return cur_time;
}
staticvoid wb_domain_writeout_add(struct wb_domain *dom, struct fprop_local_percpu *completions, unsignedint max_prop_frac, long nr)
{
__fprop_add_percpu_max(&dom->completions, completions,
max_prop_frac, nr); /* First event after period switching was turned off? */ if (unlikely(!dom->period_time)) { /* * We can race with other wb_domain_writeout_add calls here but * it does not cause any harm since the resulting time when * timer will fire and what is in writeout_period_time will be * roughly the same.
*/
dom->period_time = wp_next_time(jiffies);
mod_timer(&dom->period_timer, dom->period_time);
}
}
/* * Increment @wb's writeout completion count and the global writeout * completion count. Called from __folio_end_writeback().
*/ staticinlinevoid __wb_writeout_add(struct bdi_writeback *wb, long nr)
{ struct wb_domain *cgdom;
/* * On idle system, we can be called long after we scheduled because we use * deferred timers so count with missed periods.
*/ staticvoid writeout_period(struct timer_list *t)
{ struct wb_domain *dom = timer_container_of(dom, t, period_timer); int miss_periods = (jiffies - dom->period_time) /
VM_COMPLETIONS_PERIOD_LEN;
if (fprop_new_period(&dom->completions, miss_periods + 1)) {
dom->period_time = wp_next_time(dom->period_time +
miss_periods * VM_COMPLETIONS_PERIOD_LEN);
mod_timer(&dom->period_timer, dom->period_time);
} else { /* * Aging has zeroed all fractions. Stop wasting CPU on period * updates.
*/
dom->period_time = 0;
}
}
int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
{
memset(dom, 0, sizeof(*dom));
/* * bdi_min_ratio keeps the sum of the minimum dirty shares of all * registered backing devices, which, for obvious reasons, can not * exceed 100%.
*/ staticunsignedint bdi_min_ratio;
/* * Memory which can be further allocated to a memcg domain is capped by * system-wide clean memory excluding the amount being used in the domain.
*/ staticvoid mdtc_calc_avail(struct dirty_throttle_control *mdtc, unsignedlong filepages, unsignedlong headroom)
{ struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); unsignedlong clean = filepages - min(filepages, mdtc->dirty); unsignedlong global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); unsignedlong other_clean = global_clean - min(global_clean, clean);
/** * __wb_calc_thresh - @wb's share of dirty threshold * @dtc: dirty_throttle_context of interest * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc * * Note that balance_dirty_pages() will only seriously take dirty throttling * threshold as a hard limit when sleeping max_pause per page is not enough * to keep the dirty pages under control. For example, when the device is * completely stalled due to some error conditions, or when there are 1000 * dd tasks writing to a slow 10MB/s USB key. * In the other normal situations, it acts more gently by throttling the tasks * more (rather than completely block them) when the wb dirty pages go high. * * It allocates high/low dirty limits to fast/slow devices, in order to prevent * - starving fast devices * - piling up dirty pages (that will take long time to sync) on slow devices * * The wb's share of dirty limit will be adapting to its throughput and * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. * * Return: @wb's dirty limit in pages. For dirty throttling limit, the term * "dirty" in the context of dirty balancing includes all PG_dirty and * PG_writeback pages.
*/ staticunsignedlong __wb_calc_thresh(struct dirty_throttle_control *dtc, unsignedlong thresh)
{ struct wb_domain *dom = dtc_dom(dtc); struct bdi_writeback *wb = dtc->wb;
u64 wb_thresh;
u64 wb_max_thresh; unsignedlong numerator, denominator; unsignedlong wb_min_ratio, wb_max_ratio;
/* * Calculate this wb's share of the thresh ratio.
*/
fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
&numerator, &denominator);
/* * It's very possible that wb_thresh is close to 0 not because the * device is slow, but that it has remained inactive for long time. * Honour such devices a reasonable good (hopefully IO efficient) * threshold, so that the occasional writes won't be blocked and active * writes can rampup the threshold quickly.
*/ if (thresh > dtc->dirty) { if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT))
wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100); else
wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8);
}
/* * setpoint - dirty 3 * f(dirty) := 1.0 + (----------------) * limit - setpoint * * it's a 3rd order polynomial that subjects to * * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast * (2) f(setpoint) = 1.0 => the balance point * (3) f(limit) = 0 => the hard limit * (4) df/dx <= 0 => negative feedback control * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) * => fast response on large errors; small oscillation near setpoint
*/ staticlonglong pos_ratio_polynom(unsignedlong setpoint, unsignedlong dirty, unsignedlong limit)
{ longlong pos_ratio; long x;
/* * Dirty position control. * * (o) global/bdi setpoints * * We want the dirty pages be balanced around the global/wb setpoints. * When the number of dirty pages is higher/lower than the setpoint, the * dirty position control ratio (and hence task dirty ratelimit) will be * decreased/increased to bring the dirty pages back to the setpoint. * * pos_ratio = 1 << RATELIMIT_CALC_SHIFT * * if (dirty < setpoint) scale up pos_ratio * if (dirty > setpoint) scale down pos_ratio * * if (wb_dirty < wb_setpoint) scale up pos_ratio * if (wb_dirty > wb_setpoint) scale down pos_ratio * * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT * * (o) global control line * * ^ pos_ratio * | * | |<===== global dirty control scope ======>| * 2.0 * * * * * * * * | .* * | . * * | . * * | . * * | . * * | . * * 1.0 ................................* * | . . * * | . . * * | . . * * | . . * * | . . * * 0 +------------.------------------.----------------------*-------------> * freerun^ setpoint^ limit^ dirty pages * * (o) wb control line * * ^ pos_ratio * | * | * * | * * | * * | * * | * |<=========== span ============>| * 1.0 .......................* * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * 1/4 ...............................................* * * * * * * * * * * * * | . . * | . . * | . . * 0 +----------------------.-------------------------------.-------------> * wb_setpoint^ x_intercept^ * * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can * be smoothly throttled down to normal if it starts high in situations like * - start writing to a slow SD card and a fast disk at the same time. The SD * card's wb_dirty may rush to many times higher than wb_setpoint. * - the wb dirty thresh drops quickly due to change of JBOD workload
*/ staticvoid wb_position_ratio(struct dirty_throttle_control *dtc)
{ struct bdi_writeback *wb = dtc->wb; unsignedlong write_bw = READ_ONCE(wb->avg_write_bandwidth); unsignedlong freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); unsignedlong limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsignedlong wb_thresh = dtc->wb_thresh; unsignedlong x_intercept; unsignedlong setpoint; /* dirty pages' target balance point */ unsignedlong wb_setpoint; unsignedlong span; longlong pos_ratio; /* for scaling up/down the rate limit */ long x;
dtc->pos_ratio = 0;
if (unlikely(dtc->dirty >= limit)) return;
/* * global setpoint * * See comment for pos_ratio_polynom().
*/
setpoint = (freerun + limit) / 2;
pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
/* * The strictlimit feature is a tool preventing mistrusted filesystems * from growing a large number of dirty pages before throttling. For * such filesystems balance_dirty_pages always checks wb counters * against wb limits. Even if global "nr_dirty" is under "freerun". * This is especially important for fuse which sets bdi->max_ratio to * 1% by default. * * Here, in wb_position_ratio(), we calculate pos_ratio based on * two values: wb_dirty and wb_thresh. Let's consider an example: * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global * limits are set by default to 10% and 20% (background and throttle). * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages. * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is * about ~6K pages (as the average of background and throttle wb * limits). The 3rd order polynomial will provide positive feedback if * wb_dirty is under wb_setpoint and vice versa. * * Note, that we cannot use global counters in these calculations * because we want to throttle process writing to a strictlimit wb * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB * in the example above).
*/ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { longlong wb_pos_ratio;
/* * Typically, for strictlimit case, wb_setpoint << setpoint * and pos_ratio >> wb_pos_ratio. In the other words global * state ("dirty") is not limiting factor and we have to * make decision based on wb counters. But there is an * important case when global pos_ratio should get precedence: * global limits are exceeded (e.g. due to activities on other * wb's) while given strictlimit wb is below limit. * * "pos_ratio * wb_pos_ratio" would work for the case above, * but it would look too non-natural for the case of all * activity in the system coming from a single strictlimit wb * with bdi->max_ratio == 100%. * * Note that min() below somewhat changes the dynamics of the * control system. Normally, pos_ratio value can be well over 3 * (when globally we are at freerun and wb is well below wb * setpoint). Now the maximum pos_ratio in the same situation * is 2. We might want to tweak this if we observe the control * system is too slow to adapt.
*/
dtc->pos_ratio = min(pos_ratio, wb_pos_ratio); return;
}
/* * We have computed basic pos_ratio above based on global situation. If * the wb is over/under its share of dirty pages, we want to scale * pos_ratio further down/up. That is done by the following mechanism.
*/
/* * wb setpoint * * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint) * * x_intercept - wb_dirty * := -------------------------- * x_intercept - wb_setpoint * * The main wb control line is a linear function that subjects to * * (1) f(wb_setpoint) = 1.0 * (2) k = - 1 / (8 * write_bw) (in single wb case) * or equally: x_intercept = wb_setpoint + 8 * write_bw * * For single wb case, the dirty pages are observed to fluctuate * regularly within range * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2] * for various filesystems, where (2) can yield in a reasonable 12.5% * fluctuation range for pos_ratio. * * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its * own size, so move the slope over accordingly and choose a slope that * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
*/ if (unlikely(wb_thresh > dtc->thresh))
wb_thresh = dtc->thresh; /* * scale global setpoint to wb's: * wb_setpoint = setpoint * wb_thresh / thresh
*/
x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
wb_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single wb case as indicated by * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case. * * wb_thresh thresh - wb_thresh * span = --------- * (8 * write_bw) + ------------------ * wb_thresh * thresh thresh
*/
span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
x_intercept = wb_setpoint + span;
/* * wb reserve area, safeguard against dirty pool underrun and disk idle * It may push the desired control point of global dirty pages higher * than setpoint.
*/
x_intercept = wb_thresh / 2; if (dtc->wb_dirty < x_intercept) { if (dtc->wb_dirty > x_intercept / 8)
pos_ratio = div_u64(pos_ratio * x_intercept,
dtc->wb_dirty); else
pos_ratio *= 8;
}
/* * Follow up in one step.
*/ if (limit < thresh) {
limit = thresh; goto update;
}
/* * Follow down slowly. Use the higher one as the target, because thresh * may drop below dirty. This is exactly the reason to introduce * dom->dirty_limit which is guaranteed to lie above the dirty pages.
*/
thresh = max(thresh, dtc->dirty); if (limit > thresh) {
limit -= (limit - thresh) >> 5; goto update;
} return;
update:
dom->dirty_limit = limit;
}
/* * Maintain wb->dirty_ratelimit, the base dirty throttle rate. * * Normal wb tasks will be curbed at or below it in long term. * Obviously it should be around (write_bw / N) when there are N dd tasks.
*/ staticvoid wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, unsignedlong dirtied, unsignedlong elapsed)
{ struct bdi_writeback *wb = dtc->wb; unsignedlong dirty = dtc->dirty; unsignedlong freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); unsignedlong limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsignedlong setpoint = (freerun + limit) / 2; unsignedlong write_bw = wb->avg_write_bandwidth; unsignedlong dirty_ratelimit = wb->dirty_ratelimit; unsignedlong dirty_rate; unsignedlong task_ratelimit; unsignedlong balanced_dirty_ratelimit; unsignedlong step; unsignedlong x; unsignedlong shift;
/* * The dirty rate will match the writeout rate in long term, except * when dirty pages are truncated by userspace or re-dirtied by FS.
*/
dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
/* * task_ratelimit reflects each dd's dirty rate for the past 200ms.
*/
task_ratelimit = (u64)dirty_ratelimit *
dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
/* * A linear estimation of the "balanced" throttle rate. The theory is, * if there are N dd tasks, each throttled at task_ratelimit, the wb's * dirty_rate will be measured to be (N * task_ratelimit). So the below * formula will yield the balanced rate limit (write_bw / N). * * Note that the expanded form is not a pure rate feedback: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) * but also takes pos_ratio into account: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) * * (1) is not realistic because pos_ratio also takes part in balancing * the dirty rate. Consider the state * pos_ratio = 0.5 (3) * rate = 2 * (write_bw / N) (4) * If (1) is used, it will stuck in that state! Because each dd will * be throttled at * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) * yielding * dirty_rate = N * task_ratelimit = write_bw (6) * put (6) into (1) we get * rate_(i+1) = rate_(i) (7) * * So we end up using (2) to always keep * rate_(i+1) ~= (write_bw / N) (8) * regardless of the value of pos_ratio. As long as (8) is satisfied, * pos_ratio is able to drive itself to 1.0, which is not only where * the dirty count meet the setpoint, but also where the slope of * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
*/
balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
dirty_rate | 1); /* * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
*/ if (unlikely(balanced_dirty_ratelimit > write_bw))
balanced_dirty_ratelimit = write_bw;
/* * We could safely do this and return immediately: * * wb->dirty_ratelimit = balanced_dirty_ratelimit; * * However to get a more stable dirty_ratelimit, the below elaborated * code makes use of task_ratelimit to filter out singular points and * limit the step size. * * The below code essentially only uses the relative value of * * task_ratelimit - dirty_ratelimit * = (pos_ratio - 1) * dirty_ratelimit * * which reflects the direction and size of dirty position error.
*/
/* * dirty_ratelimit will follow balanced_dirty_ratelimit iff * task_ratelimit is on the same side of dirty_ratelimit, too. * For example, when * - dirty_ratelimit > balanced_dirty_ratelimit * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) * lowering dirty_ratelimit will help meet both the position and rate * control targets. Otherwise, don't update dirty_ratelimit if it will * only help meet the rate target. After all, what the users ultimately * feel and care are stable dirty rate and small position error. * * |task_ratelimit - dirty_ratelimit| is used to limit the step size * and filter out the singular points of balanced_dirty_ratelimit. Which * keeps jumping around randomly and can even leap far away at times * due to the small 200ms estimation period of dirty_rate (we want to * keep that period small to reduce time lags).
*/
step = 0;
/* * For strictlimit case, calculations above were based on wb counters * and limits (starting from pos_ratio = wb_position_ratio() and up to * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate). * Hence, to calculate "step" properly, we have to use wb_dirty as * "dirty" and wb_setpoint as "setpoint".
*/ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
dirty = dtc->wb_dirty;
setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
}
if (dirty < setpoint) {
x = min3(wb->balanced_dirty_ratelimit,
balanced_dirty_ratelimit, task_ratelimit); if (dirty_ratelimit < x)
step = x - dirty_ratelimit;
} else {
x = max3(wb->balanced_dirty_ratelimit,
balanced_dirty_ratelimit, task_ratelimit); if (dirty_ratelimit > x)
step = dirty_ratelimit - x;
}
/* * Don't pursue 100% rate matching. It's impossible since the balanced * rate itself is constantly fluctuating. So decrease the track speed * when it gets close to the target. Helps eliminate pointless tremors.
*/
shift = dirty_ratelimit / (2 * step + 1); if (shift < BITS_PER_LONG)
step = DIV_ROUND_UP(step >> shift, 8); else
step = 0;
/* * Lockless checks for elapsed time are racy and delayed update after * IO completion doesn't do it at all (to make sure written pages are * accounted reasonably quickly). Make sure elapsed >= 1 to avoid * division errors.
*/
elapsed = max(now - wb->bw_time_stamp, 1UL);
dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
if (update_ratelimit) {
domain_update_dirty_limit(gdtc, now);
wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
/* * @mdtc is always NULL if !CGROUP_WRITEBACK but the * compiler has no way to figure that out. Help it.
*/ if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
domain_update_dirty_limit(mdtc, now);
wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
}
}
wb_update_write_bandwidth(wb, elapsed, written);
/* * After a task dirtied this many pages, balance_dirty_pages_ratelimited() * will look to see if it needs to start dirty throttling. * * If dirty_poll_interval is too low, big NUMA machines will call the expensive * global_zone_page_state() too often. So scale it near-sqrt to the safety margin * (the number of pages we may dirty without exceeding the dirty limits).
*/ staticunsignedlong dirty_poll_interval(unsignedlong dirty, unsignedlong thresh)
{ if (thresh > dirty) return 1UL << (ilog2(thresh - dirty) >> 1);
/* * Limit pause time for small memory systems. If sleeping for too long * time, a small pool of dirty/writeback pages may go empty and disk go * idle. * * 8 serves as the safety ratio.
*/
t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
t++;
return min_t(unsignedlong, t, MAX_PAUSE);
}
staticlong wb_min_pause(struct bdi_writeback *wb, long max_pause, unsignedlong task_ratelimit, unsignedlong dirty_ratelimit, int *nr_dirtied_pause)
{ long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth)); long lo = ilog2(READ_ONCE(wb->dirty_ratelimit)); long t; /* target pause */ long pause; /* estimated next pause */ int pages; /* target nr_dirtied_pause */
/* target for 10ms pause on 1-dd case */
t = max(1, HZ / 100);
/* * Scale up pause time for concurrent dirtiers in order to reduce CPU * overheads. * * (N * 10ms) on 2^N concurrent tasks.
*/ if (hi > lo)
t += (hi - lo) * (10 * HZ) / 1024;
/* * This is a bit convoluted. We try to base the next nr_dirtied_pause * on the much more stable dirty_ratelimit. However the next pause time * will be computed based on task_ratelimit and the two rate limits may * depart considerably at some time. Especially if task_ratelimit goes * below dirty_ratelimit/2 and the target pause is max_pause, the next * pause time will be max_pause*2 _trimmed down_ to max_pause. As a * result task_ratelimit won't be executed faithfully, which could * eventually bring down dirty_ratelimit. * * We apply two rules to fix it up: * 1) try to estimate the next pause time and if necessary, use a lower * nr_dirtied_pause so as not to exceed max_pause. When this happens, * nr_dirtied_pause will be "dancing" with task_ratelimit. * 2) limit the target pause time to max_pause/2, so that the normal * small fluctuations of task_ratelimit won't trigger rule (1) and * nr_dirtied_pause will remain as stable as dirty_ratelimit.
*/
t = min(t, 1 + max_pause / 2);
pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
/* * Tiny nr_dirtied_pause is found to hurt I/O performance in the test * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. * When the 16 consecutive reads are often interrupted by some dirty * throttling pause during the async writes, cfq will go into idles * (deadline is fine). So push nr_dirtied_pause as high as possible * until reaches DIRTY_POLL_THRESH=32 pages.
*/ if (pages < DIRTY_POLL_THRESH) {
t = max_pause;
pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); if (pages > DIRTY_POLL_THRESH) {
pages = DIRTY_POLL_THRESH;
t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
}
}
pause = HZ * pages / (task_ratelimit + 1); if (pause > max_pause) {
t = max_pause;
pages = task_ratelimit * t / roundup_pow_of_two(HZ);
}
*nr_dirtied_pause = pages; /* * The minimal pause time will normally be half the target pause time.
*/ return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
}
/* * wb_thresh is not treated as some limiting factor as * dirty_thresh, due to reasons * - in JBOD setup, wb_thresh can fluctuate a lot * - in a system with HDD and USB key, the USB key may somehow * go into state (wb_dirty >> wb_thresh) either because * wb_dirty starts high, or because wb_thresh drops low. * In this case we don't want to hard throttle the USB key * dirtiers for 100 seconds until wb_dirty drops under * wb_thresh. Instead the auxiliary wb control line in * wb_position_ratio() will let the dirtier task progress * at some rate <= (write_bw / 2) for bringing down wb_dirty.
*/
dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
dtc->wb_bg_thresh = dtc->thresh ?
div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
/* * In order to avoid the stacked BDI deadlock we need * to ensure we accurately count the 'dirty' pages when * the threshold is low. * * Otherwise it would be possible to get thresh+n pages * reported dirty, even though there are thresh-m pages * actually dirty; with m+n sitting in the percpu * deltas.
*/ if (dtc->wb_thresh < 2 * wb_stat_error()) {
wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
} else {
wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
}
}
/* * Throttle it only when the background writeback cannot catch-up. This avoids * (excessively) small writeouts when the wb limits are ramping up in case of * !strictlimit. * * In strictlimit case make decision based on the wb counters and limits. Small * writeouts when the wb limits are ramping up are the price we consciously pay * for strictlimit-ing.
*/ staticvoid domain_dirty_freerun(struct dirty_throttle_control *dtc, bool strictlimit)
{ unsignedlong dirty, thresh, bg_thresh;
/* was already handled in domain_dirty_freerun */ if (strictlimit) return;
wb_dirty_limits(dtc); /* * LOCAL_THROTTLE tasks must not be throttled when below the per-wb * freerun ceiling.
*/ if (!(current->flags & PF_LOCAL_THROTTLE)) return;
/* * The limits fields dirty_exceeded and pos_ratio won't be updated if wb is * in freerun state. Please don't use these invalid fields in freerun case.
*/ staticvoid balance_wb_limits(struct dirty_throttle_control *dtc, bool strictlimit)
{
wb_dirty_freerun(dtc, strictlimit); if (dtc->freerun) return;
/* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. * If we're over `background_thresh' then the writeback threads are woken to * perform some writeout.
*/ staticint balance_dirty_pages(struct bdi_writeback *wb, unsignedlong pages_dirtied, unsignedint flags)
{ struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; struct dirty_throttle_control * const gdtc = &gdtc_stor; struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
&mdtc_stor : NULL; struct dirty_throttle_control *sdtc; unsignedlong nr_dirty; long period; long pause; long max_pause; long min_pause; int nr_dirtied_pause; unsignedlong task_ratelimit; unsignedlong dirty_ratelimit; struct backing_dev_info *bdi = wb->bdi; bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; unsignedlong start_time = jiffies; int ret = 0;
for (;;) { unsignedlong now = jiffies;
nr_dirty = global_node_page_state(NR_FILE_DIRTY);
balance_domain_limits(gdtc, strictlimit); if (mdtc) { /* * If @wb belongs to !root memcg, repeat the same * basic calculations for the memcg domain.
*/
balance_domain_limits(mdtc, strictlimit);
}
/* * In laptop mode, we wait until hitting the higher threshold * before starting background writeout, and then write out all * the way down to the lower threshold. So slow writers cause * minimal disk activity. * * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low.
*/ if (!laptop_mode && nr_dirty > gdtc->bg_thresh &&
!writeback_in_progress(wb))
wb_start_background_writeback(wb);
/* * If memcg domain is in effect, @dirty should be under * both global and memcg freerun ceilings.
*/ if (gdtc->freerun && (!mdtc || mdtc->freerun)) { unsignedlong intv; unsignedlong m_intv;
/* Start writeback even when in laptop mode */ if (unlikely(!writeback_in_progress(wb)))
wb_start_background_writeback(wb);
mem_cgroup_flush_foreign(wb);
/* * Calculate global domain's pos_ratio and select the * global dtc by default.
*/
balance_wb_limits(gdtc, strictlimit); if (gdtc->freerun) goto free_running;
sdtc = gdtc;
if (mdtc) { /* * If memcg domain is in effect, calculate its * pos_ratio. @wb should satisfy constraints from * both global and memcg domains. Choose the one * w/ lower pos_ratio.
*/
balance_wb_limits(mdtc, strictlimit); if (mdtc->freerun) goto free_running; if (mdtc->pos_ratio < gdtc->pos_ratio)
sdtc = mdtc;
}
/* throttle according to the chosen dtc */
dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
RATELIMIT_CALC_SHIFT;
max_pause = wb_max_pause(wb, sdtc->wb_dirty);
min_pause = wb_min_pause(wb, max_pause,
task_ratelimit, dirty_ratelimit,
&nr_dirtied_pause);
if (unlikely(task_ratelimit == 0)) {
period = max_pause;
pause = max_pause; goto pause;
}
period = HZ * pages_dirtied / task_ratelimit;
pause = period; if (current->dirty_paused_when)
pause -= now - current->dirty_paused_when; /* * For less than 1s think time (ext3/4 may block the dirtier * for up to 800ms from time to time on 1-HDD; so does xfs, * however at much less frequency), try to compensate it in * future periods by updating the virtual time; otherwise just * do a reset, as it may be a light dirtier.
*/ if (pause < min_pause) {
trace_balance_dirty_pages(wb,
sdtc,
dirty_ratelimit,
task_ratelimit,
pages_dirtied,
period,
min(pause, 0L),
start_time); if (pause < -HZ) {
current->dirty_paused_when = now;
current->nr_dirtied = 0;
} elseif (period) {
current->dirty_paused_when += period;
current->nr_dirtied = 0;
} elseif (current->nr_dirtied_pause <= pages_dirtied)
current->nr_dirtied_pause += pages_dirtied; break;
} if (unlikely(pause > max_pause)) { /* for occasional dropped task_ratelimit */
now += min(pause - max_pause, max_pause);
pause = max_pause;
}
/* * This is typically equal to (dirty < thresh) and can also * keep "1000+ dd on a slow USB stick" under control.
*/ if (task_ratelimit) break;
/* * In the case of an unresponsive NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good wb's a pipe * to go through, so that tasks on them still remain responsive. * * In theory 1 page is enough to keep the consumer-producer * pipe going: the flusher cleans 1 page => the task dirties 1 * more page. However wb_dirty has accounting errors. So use * the larger and more IO friendly wb_stat_error.
*/ if (sdtc->wb_dirty <= wb_stat_error()) break;
if (fatal_signal_pending(current)) break;
} return ret;
}
static DEFINE_PER_CPU(int, bdp_ratelimits);
/* * Normal tasks are throttled by * loop { * dirty tsk->nr_dirtied_pause pages; * take a snap in balance_dirty_pages(); * } * However there is a worst case. If every task exit immediately when dirtied * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be * called to throttle the page dirties. The solution is to save the not yet * throttled page dirties in dirty_throttle_leaks on task exit and charge them * randomly into the running tasks. This works well for the above worst case, * as the new task will pick up and accumulate the old task's leaked dirty * count and eventually get throttled.
*/
DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
/** * balance_dirty_pages_ratelimited_flags - Balance dirty memory state. * @mapping: address_space which was dirtied. * @flags: BDP flags. * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * See balance_dirty_pages_ratelimited() for details. * * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to * indicate that memory is out of balance and the caller must wait * for I/O to complete. Otherwise, it will return 0 to indicate * that either memory was already in balance, or it was able to sleep * until the amount of dirty memory returned to balance.
*/ int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, unsignedint flags)
{ struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); struct bdi_writeback *wb = NULL; int ratelimit; int ret = 0; int *p;
if (!(bdi->capabilities & BDI_CAP_WRITEBACK)) return ret;
if (inode_cgwb_enabled(inode))
wb = wb_get_create_current(bdi, GFP_KERNEL); if (!wb)
wb = &bdi->wb;
preempt_disable(); /* * This prevents one CPU to accumulate too many dirtied pages without * calling into balance_dirty_pages(), which can happen when there are * 1000+ tasks, all of them start dirtying pages at exactly the same * time, hence all honoured too large initial task->nr_dirtied_pause.
*/
p = this_cpu_ptr(&bdp_ratelimits); if (unlikely(current->nr_dirtied >= ratelimit))
*p = 0; elseif (unlikely(*p >= ratelimit_pages)) {
*p = 0;
ratelimit = 0;
} /* * Pick up the dirtied pages by the exited tasks. This avoids lots of * short-lived tasks (eg. gcc invocations in a kernel build) escaping * the dirty throttling and livelock other long-run dirtiers.
*/
p = this_cpu_ptr(&dirty_throttle_leaks); if (*p > 0 && current->nr_dirtied < ratelimit) { unsignedlong nr_pages_dirtied;
nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
*p -= nr_pages_dirtied;
current->nr_dirtied += nr_pages_dirtied;
}
preempt_enable();
if (unlikely(current->nr_dirtied >= ratelimit))
ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
/** * balance_dirty_pages_ratelimited - balance dirty memory state. * @mapping: address_space which was dirtied. * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * Once we're over the dirty memory limit we decrease the ratelimiting * by a lot, to prevent individual processes from overshooting the limit * by (ratelimit_pages) each.
*/ void balance_dirty_pages_ratelimited(struct address_space *mapping)
{
balance_dirty_pages_ratelimited_flags(mapping, 0);
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
/* * Similar to wb_dirty_limits, wb_bg_dirty_limits also calculates dirty * and thresh, but it's for background writeback.
*/ staticvoid wb_bg_dirty_limits(struct dirty_throttle_control *dtc)
{ struct bdi_writeback *wb = dtc->wb;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.