/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ staticbool __damon_is_registered_ops(enum damon_ops_id id)
{ struct damon_operations empty_ops = {};
if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) returnfalse; returntrue;
}
/** * damon_is_registered_ops() - Check if a given damon_operations is registered. * @id: Id of the damon_operations to check if registered. * * Return: true if the ops is set, false otherwise.
*/ bool damon_is_registered_ops(enum damon_ops_id id)
{ bool registered;
if (id >= NR_DAMON_OPS) returnfalse;
mutex_lock(&damon_ops_lock);
registered = __damon_is_registered_ops(id);
mutex_unlock(&damon_ops_lock); return registered;
}
/** * damon_register_ops() - Register a monitoring operations set to DAMON. * @ops: monitoring operations set to register. * * This function registers a monitoring operations set of valid &struct * damon_operations->id so that others can find and use them later. * * Return: 0 on success, negative error code otherwise.
*/ int damon_register_ops(struct damon_operations *ops)
{ int err = 0;
/** * damon_select_ops() - Select a monitoring operations to use with the context. * @ctx: monitoring context to use the operations. * @id: id of the registered monitoring operations to select. * * This function finds registered monitoring operations set of @id and make * @ctx to use it. * * Return: 0 on success, negative error code otherwise.
*/ int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
{ int err = 0;
/* * Construct a damon_region struct * * Returns the pointer to the new struct if success, or NULL otherwise
*/ struct damon_region *damon_new_region(unsignedlong start, unsignedlong end)
{ struct damon_region *region;
region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); if (!region) return NULL;
/* * Check whether a region is intersecting an address range * * Returns true if it is.
*/ staticbool damon_intersect(struct damon_region *r, struct damon_addr_range *re)
{ return !(r->ar.end <= re->start || re->end <= r->ar.start);
}
/* * Fill holes in regions with new regions.
*/ staticint damon_fill_regions_holes(struct damon_region *first, struct damon_region *last, struct damon_target *t)
{ struct damon_region *r = first;
if (r == last) break;
next = damon_next_region(r); if (r->ar.end != next->ar.start) {
newr = damon_new_region(r->ar.end, next->ar.start); if (!newr) return -ENOMEM;
damon_insert_region(newr, r, next, t);
}
} return 0;
}
/* * damon_set_regions() - Set regions of a target for given address ranges. * @t: the given target. * @ranges: array of new monitoring target ranges. * @nr_ranges: length of @ranges. * * This function adds new regions to, or modify existing regions of a * monitoring target to fit in specific ranges. * * Return: 0 if success, or negative error code otherwise.
*/ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, unsignedint nr_ranges)
{ struct damon_region *r, *next; unsignedint i; int err;
/* Remove regions which are not in the new ranges */
damon_for_each_region_safe(r, next, t) { for (i = 0; i < nr_ranges; i++) { if (damon_intersect(r, &ranges[i])) break;
} if (i == nr_ranges)
damon_destroy_region(r, t);
}
r = damon_first_region(t); /* Add new regions or resize existing regions to fit in the ranges */ for (i = 0; i < nr_ranges; i++) { struct damon_region *first = NULL, *last, *newr; struct damon_addr_range *range;
range = &ranges[i]; /* Get the first/last regions intersecting with the range */
damon_for_each_region_from(r, t) { if (damon_intersect(r, range)) { if (!first)
first = r;
last = r;
} if (r->ar.start >= range->end) break;
} if (!first) { /* no region intersects with this range */
newr = damon_new_region(
ALIGN_DOWN(range->start,
DAMON_MIN_REGION),
ALIGN(range->end, DAMON_MIN_REGION)); if (!newr) return -ENOMEM;
damon_insert_region(newr, damon_prev_region(r), r, t);
} else { /* resize intersecting regions to fit in this range */
first->ar.start = ALIGN_DOWN(range->start,
DAMON_MIN_REGION);
last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
/* fill possible holes in the range */
err = damon_fill_regions_holes(first, last, t); if (err) return err;
}
} return 0;
}
/** * damos_filter_for_ops() - Return if the filter is ops-hndled one. * @type: type of the filter. * * Return: true if the filter of @type needs to be handled by ops layer, false * otherwise.
*/ bool damos_filter_for_ops(enum damos_filter_type type)
{ switch (type) { case DAMOS_FILTER_TYPE_ADDR: case DAMOS_FILTER_TYPE_TARGET: returnfalse; default: break;
} returntrue;
}
scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); if (!scheme) return NULL;
scheme->pattern = *pattern;
scheme->action = action;
scheme->apply_interval_us = apply_interval_us; /* * next_apply_sis will be set when kdamond starts. While kdamond is * running, it will also updated when it is added to the DAMON context, * or damon_attrs are updated.
*/
scheme->next_apply_sis = 0;
scheme->walk_completed = false;
INIT_LIST_HEAD(&scheme->filters);
INIT_LIST_HEAD(&scheme->ops_filters);
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
scheme->quota = *(damos_quota_init(quota)); /* quota.goals should be separately set by caller */
INIT_LIST_HEAD(&scheme->quota.goals);
/* * Construct a damon_target struct * * Returns the pointer to the new struct if success, or NULL otherwise
*/ struct damon_target *damon_new_target(void)
{ struct damon_target *t;
t = kmalloc(sizeof(*t), GFP_KERNEL); if (!t) return NULL;
/* convert access ratio in bp (per 10,000) to nr_accesses */ staticunsignedint damon_accesses_bp_to_nr_accesses( unsignedint accesses_bp, struct damon_attrs *attrs)
{ return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
}
/* * Convert nr_accesses to access ratio in bp (per 10,000). * * Callers should ensure attrs.aggr_interval is not zero, like * damon_update_monitoring_results() does . Otherwise, divide-by-zero would * happen.
*/ staticunsignedint damon_nr_accesses_to_accesses_bp( unsignedint nr_accesses, struct damon_attrs *attrs)
{ return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
}
staticvoid damon_update_monitoring_result(struct damon_region *r, struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, bool aggregating)
{ if (!aggregating) {
r->nr_accesses = damon_nr_accesses_for_new_attrs(
r->nr_accesses, old_attrs, new_attrs);
r->nr_accesses_bp = r->nr_accesses * 10000;
} else { /* * if this is called in the middle of the aggregation, reset * the aggregations we made so far for this aggregation * interval. In other words, make the status like * kdamond_reset_aggregated() is called.
*/
r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
r->last_nr_accesses, old_attrs, new_attrs);
r->nr_accesses_bp = r->last_nr_accesses * 10000;
r->nr_accesses = 0;
}
r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
}
/* * region->nr_accesses is the number of sampling intervals in the last * aggregation interval that access to the region has found, and region->age is * the number of aggregation intervals that its access pattern has maintained. * For the reason, the real meaning of the two fields depend on current * sampling interval and aggregation interval. This function updates * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
*/ staticvoid damon_update_monitoring_results(struct damon_ctx *ctx, struct damon_attrs *new_attrs, bool aggregating)
{ struct damon_attrs *old_attrs = &ctx->attrs; struct damon_target *t; struct damon_region *r;
/* if any interval is zero, simply forgive conversion */ if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
!new_attrs->sample_interval ||
!new_attrs->aggr_interval) return;
/* * damon_valid_intervals_goal() - return if the intervals goal of @attrs is * valid.
*/ staticbool damon_valid_intervals_goal(struct damon_attrs *attrs)
{ struct damon_intervals_goal *goal = &attrs->intervals_goal;
/* tuning is disabled */ if (!goal->aggrs) returntrue; if (goal->min_sample_us > goal->max_sample_us) returnfalse; if (attrs->sample_interval < goal->min_sample_us ||
goal->max_sample_us < attrs->sample_interval) returnfalse; returntrue;
}
/** * damon_set_attrs() - Set attributes for the monitoring. * @ctx: monitoring context * @attrs: monitoring attributes * * This function should be called while the kdamond is not running, an access * check results aggregation is not ongoing (e.g., from damon_call(). * * Every time interval is in micro-seconds. * * Return: 0 on success, negative error code otherwise.
*/ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
{ unsignedlong sample_interval = attrs->sample_interval ?
attrs->sample_interval : 1; struct damos *s; bool aggregating = ctx->passed_sample_intervals <
ctx->next_aggregation_sis;
if (!damon_valid_intervals_goal(attrs)) return -EINVAL;
if (attrs->min_nr_regions < 3) return -EINVAL; if (attrs->min_nr_regions > attrs->max_nr_regions) return -EINVAL; if (attrs->sample_interval > attrs->aggr_interval) return -EINVAL;
/* calls from core-external doesn't set this. */ if (!attrs->aggr_samples)
attrs->aggr_samples = attrs->aggr_interval / sample_interval;
/** * damon_set_schemes() - Set data access monitoring based operation schemes. * @ctx: monitoring context * @schemes: array of the schemes * @nr_schemes: number of entries in @schemes * * This function should not be called while the kdamond of the context is * running.
*/ void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
ssize_t nr_schemes)
{ struct damos *s, *next;
ssize_t i;
damon_for_each_scheme_safe(s, next, ctx)
damon_destroy_scheme(s); for (i = 0; i < nr_schemes; i++)
damon_add_scheme(ctx, schemes[i]);
}
staticstruct damos_quota_goal *damos_nth_quota_goal( int n, struct damos_quota *q)
{ struct damos_quota_goal *goal; int i = 0;
staticvoid damos_commit_quota_goal( struct damos_quota_goal *dst, struct damos_quota_goal *src)
{
dst->metric = src->metric;
dst->target_value = src->target_value; if (dst->metric == DAMOS_QUOTA_USER_INPUT)
dst->current_value = src->current_value; /* keep last_psi_total as is, since it will be updated in next cycle */
damos_commit_quota_goal_union(dst, src);
}
/** * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. * @dst: The commit destination DAMOS quota. * @src: The commit source DAMOS quota. * * Copies user-specified parameters for quota goals from @src to @dst. Users * should use this function for quota goals-level parameters update of running * DAMON contexts, instead of manual in-place updates. * * This function should be called from parameters-update safe context, like * damon_call().
*/ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
{ struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; int i = 0, j = 0;
/** * damos_filters_default_reject() - decide whether to reject memory that didn't * match with any given filter. * @filters: Given DAMOS filters of a group.
*/ staticbool damos_filters_default_reject(struct list_head *filters)
{ struct damos_filter *last_filter;
/* * The caller should ensure the regions of @src are * 1. valid (end >= src) and * 2. sorted by starting address. * * If @src has no region, @dst keeps current regions.
*/ staticint damon_commit_target_regions( struct damon_target *dst, struct damon_target *src)
{ struct damon_region *src_region; struct damon_addr_range *ranges; int i = 0, err;
damon_for_each_region(src_region, src)
i++; if (!i) return 0;
/** * damon_commit_ctx() - Commit parameters of a DAMON context to another. * @dst: The commit destination DAMON context. * @src: The commit source DAMON context. * * This function copies user-specified parameters from @src to @dst and update * the internal status and results accordingly. Users should use this function * for context-level parameters update of running context, instead of manual * in-place updates. * * This function should be called from parameters-update safe context, like * damon_call().
*/ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
{ int err;
err = damon_commit_schemes(dst, src); if (err) return err;
err = damon_commit_targets(dst, src); if (err) return err; /* * schemes and targets should be updated first, since * 1. damon_set_attrs() updates monitoring results of targets and * next_apply_sis of schemes, and * 2. ops update should be done after pid handling is done (target * committing require putting pids).
*/
err = damon_set_attrs(dst, &src->attrs); if (err) return err;
dst->ops = src->ops;
return 0;
}
/** * damon_nr_running_ctxs() - Return number of currently running contexts.
*/ int damon_nr_running_ctxs(void)
{ int nr_ctxs;
if (ctx->attrs.min_nr_regions)
sz /= ctx->attrs.min_nr_regions; if (sz < DAMON_MIN_REGION)
sz = DAMON_MIN_REGION;
return sz;
}
staticint kdamond_fn(void *data);
/* * __damon_start() - Starts monitoring with given context. * @ctx: monitoring context * * This function should be called while damon_lock is hold. * * Return: 0 on success, negative error code otherwise.
*/ staticint __damon_start(struct damon_ctx *ctx)
{ int err = -EBUSY;
/** * damon_start() - Starts the monitorings for a given group of contexts. * @ctxs: an array of the pointers for contexts to start monitoring * @nr_ctxs: size of @ctxs * @exclusive: exclusiveness of this contexts group * * This function starts a group of monitoring threads for a group of monitoring * contexts. One thread per each context is created and run in parallel. The * caller should handle synchronization between the threads by itself. If * @exclusive is true and a group of threads that created by other * 'damon_start()' call is currently running, this function does nothing but * returns -EBUSY. * * Return: 0 on success, negative error code otherwise.
*/ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
{ int i; int err = 0;
for (i = 0; i < nr_ctxs; i++) {
err = __damon_start(ctxs[i]); if (err) break;
nr_running_ctxs++;
} if (exclusive && nr_running_ctxs)
running_exclusive_ctxs = true;
mutex_unlock(&damon_lock);
return err;
}
/* * __damon_stop() - Stops monitoring of a given context. * @ctx: monitoring context * * Return: 0 on success, negative error code otherwise.
*/ staticint __damon_stop(struct damon_ctx *ctx)
{ struct task_struct *tsk;
/** * damon_stop() - Stops the monitorings for a given group of contexts. * @ctxs: an array of the pointers for contexts to stop monitoring * @nr_ctxs: size of @ctxs * * Return: 0 on success, negative error code otherwise.
*/ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
{ int i, err = 0;
for (i = 0; i < nr_ctxs; i++) { /* nr_running_ctxs is decremented in kdamond_fn */
err = __damon_stop(ctxs[i]); if (err) break;
} return err;
}
/** * damon_is_running() - Returns if a given DAMON context is running. * @ctx: The DAMON context to see if running. * * Return: true if @ctx is running, false otherwise.
*/ bool damon_is_running(struct damon_ctx *ctx)
{ bool running;
/** * damon_call() - Invoke a given function on DAMON worker thread (kdamond). * @ctx: DAMON context to call the function for. * @control: Control variable of the call request. * * Ask DAMON worker thread (kdamond) of @ctx to call a function with an * argument data that respectively passed via &damon_call_control->fn and * &damon_call_control->data of @control. If &damon_call_control->repeat of * @control is set, further wait until the kdamond finishes handling of the * request. Otherwise, return as soon as the request is made. * * The kdamond executes the function with the argument in the main loop, just * after a sampling of the iteration is finished. The function can hence * safely access the internal data of the &struct damon_ctx without additional * synchronization. The return value of the function will be saved in * &damon_call_control->return_code. * * Return: 0 on success, negative error code otherwise.
*/ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
{ if (!control->repeat)
init_completion(&control->completion);
control->canceled = false;
INIT_LIST_HEAD(&control->list);
mutex_lock(&ctx->call_controls_lock);
list_add_tail(&control->list, &ctx->call_controls);
mutex_unlock(&ctx->call_controls_lock); if (!damon_is_running(ctx)) return -EINVAL; if (control->repeat) return 0;
wait_for_completion(&control->completion); if (control->canceled) return -ECANCELED; return 0;
}
/** * damos_walk() - Invoke a given functions while DAMOS walk regions. * @ctx: DAMON context to call the functions for. * @control: Control variable of the walk request. * * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region * that the kdamond will apply DAMOS action to, and wait until the kdamond * finishes handling of the request. * * The kdamond executes the given function in the main loop, for each region * just after it applied any DAMOS actions of @ctx to it. The invocation is * made only within one &damos->apply_interval_us since damos_walk() * invocation, for each scheme. The given callback function can hence safely * access the internal data of &struct damon_ctx and &struct damon_region that * each of the scheme will apply the action for next interval, without * additional synchronizations against the kdamond. If every scheme of @ctx * passed at least one &damos->apply_interval_us, kdamond marks the request as * completed so that damos_walk() can wakeup and return. * * Return: 0 on success, negative error code otherwise.
*/ int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
{
init_completion(&control->completion);
control->canceled = false;
mutex_lock(&ctx->walk_control_lock); if (ctx->walk_control) {
mutex_unlock(&ctx->walk_control_lock); return -EBUSY;
}
ctx->walk_control = control;
mutex_unlock(&ctx->walk_control_lock); if (!damon_is_running(ctx)) return -EINVAL;
wait_for_completion(&control->completion); if (control->canceled) return -ECANCELED; return 0;
}
/* * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing * the problem being propagated.
*/ staticvoid damon_warn_fix_nr_accesses_corruption(struct damon_region *r)
{ if (r->nr_accesses_bp == r->nr_accesses * 10000) return;
WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n",
r->nr_accesses_bp, r->nr_accesses);
r->nr_accesses_bp = r->nr_accesses * 10000;
}
/* * Reset the aggregated monitoring results ('nr_accesses' of each region).
*/ staticvoid kdamond_reset_aggregated(struct damon_ctx *c)
{ struct damon_target *t; unsignedint ti = 0; /* target's index */
damon_for_each_target(t, c) { struct damon_region *r;
/* * damos_skip_charged_region() - Check if the given region or starting part of * it is already charged for the DAMOS quota. * @t: The target of the region. * @rp: The pointer to the region. * @s: The scheme to be applied. * * If a quota of a scheme has exceeded in a quota charge window, the scheme's * action would applied to only a part of the target access pattern fulfilling * regions. To avoid applying the scheme action to only already applied * regions, DAMON skips applying the scheme action to the regions that charged * in the previous charge window. * * This function checks if a given region should be skipped or not for the * reason. If only the starting part of the region has previously charged, * this function splits the region into two so that the second one covers the * area that not charged in the previous charge widnow and saves the second * region in *rp and returns false, so that the caller can apply DAMON action * to the second one. * * Return: true if the region should be entirely skipped, false otherwise.
*/ staticbool damos_skip_charged_region(struct damon_target *t, struct damon_region **rp, struct damos *s)
{ struct damon_region *r = *rp; struct damos_quota *quota = &s->quota; unsignedlong sz_to_skip;
/* Skip previously charged regions */ if (quota->charge_target_from) { if (t != quota->charge_target_from) returntrue; if (r == damon_last_region(t)) {
quota->charge_target_from = NULL;
quota->charge_addr_from = 0; returntrue;
} if (quota->charge_addr_from &&
r->ar.end <= quota->charge_addr_from) returntrue;
switch (filter->type) { case DAMOS_FILTER_TYPE_TARGET:
damon_for_each_target(ti, ctx) { if (ti == t) break;
target_idx++;
}
matched = target_idx == filter->target_idx; break; case DAMOS_FILTER_TYPE_ADDR:
start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
/* inside the range */ if (start <= r->ar.start && r->ar.end <= end) {
matched = true; break;
} /* outside of the range */ if (r->ar.end <= start || end <= r->ar.start) {
matched = false; break;
} /* start before the range and overlap */ if (r->ar.start < start) {
damon_split_region_at(t, r, start - r->ar.start);
matched = false; break;
} /* start inside the range */
damon_split_region_at(t, r, end - r->ar.start);
matched = true; break; default: returnfalse;
}
/* * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. * @ctx: The context of &damon_ctx->walk_control. * @t: The monitoring target of @r that @s will be applied. * @r: The region of @t that @s will be applied. * @s: The scheme of @ctx that will be applied to @r. * * This function is called from kdamond whenever it asked the operation set to * apply a DAMOS scheme action to a region. If a DAMOS walk request is * installed by damos_walk() and not yet uninstalled, invoke it.
*/ staticvoid damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos *s, unsignedlong sz_filter_passed)
{ struct damos_walk_control *control;
if (s->walk_completed) return;
control = ctx->walk_control; if (!control) return;
/* * damos_walk_complete() - Complete DAMOS walk request if all walks are done. * @ctx: The context of &damon_ctx->walk_control. * @s: A scheme of @ctx that all walks are now done. * * This function is called when kdamond finished applying the action of a DAMOS * scheme to all regions that eligible for the given &damos->apply_interval_us. * If every scheme of @ctx including @s now finished walking for at least one * &damos->apply_interval_us, this function makrs the handling of the given * DAMOS walk request is done, so that damos_walk() can wake up and return.
*/ staticvoid damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
{ struct damos *siter; struct damos_walk_control *control;
control = ctx->walk_control; if (!control) return;
s->walk_completed = true; /* if all schemes completed, signal completion to walker */
damon_for_each_scheme(siter, ctx) { if (!siter->walk_completed) return;
}
damon_for_each_scheme(siter, ctx)
siter->walk_completed = false;
/* * damos_walk_cancel() - Cancel the current DAMOS walk request. * @ctx: The context of &damon_ctx->walk_control. * * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS * walk is requested but there is no DAMOS scheme to walk for, or the kdamond * is already out of the main loop and therefore gonna be terminated, and hence * cannot continue the walks. This function therefore marks the walk request * as canceled, so that damos_walk() can wake up and return.
*/ staticvoid damos_walk_cancel(struct damon_ctx *ctx)
{ struct damos_walk_control *control;
mutex_lock(&ctx->walk_control_lock);
control = ctx->walk_control;
mutex_unlock(&ctx->walk_control_lock);
staticvoid damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, struct damon_region *r, struct damos *s)
{ struct damos_quota *quota = &s->quota; unsignedlong sz = damon_sz_region(r); struct timespec64 begin, end; unsignedlong sz_applied = 0; unsignedlong sz_ops_filter_passed = 0; /* * We plan to support multiple context per kdamond, as DAMON sysfs * implies with 'nr_contexts' file. Nevertheless, only single context * per kdamond is supported for now. So, we can simply use '0' context * index here.
*/ unsignedint cidx = 0; struct damos *siter; /* schemes iterator */ unsignedint sidx = 0; struct damon_target *titer; /* targets iterator */ unsignedint tidx = 0; bool do_trace = false;
/* get indices for trace_damos_before_apply() */ if (trace_damos_before_apply_enabled()) {
damon_for_each_scheme(siter, c) { if (siter == s) break;
sidx++;
}
damon_for_each_target(titer, c) { if (titer == t) break;
tidx++;
}
do_trace = true;
}
damon_for_each_scheme(s, c) { struct damos_quota *quota = &s->quota;
if (c->passed_sample_intervals < s->next_apply_sis) continue;
if (!s->wmarks.activated) continue;
/* Check the quota */ if (quota->esz && quota->charged_sz >= quota->esz) continue;
if (damos_skip_charged_region(t, &r, s)) continue;
if (!damos_valid_target(c, t, r, s)) continue;
damos_apply_scheme(c, t, r, s);
}
}
/* * damon_feed_loop_next_input() - get next input to achieve a target score. * @last_input The last input. * @score Current score that made with @last_input. * * Calculate next input to achieve the target score, based on the last input * and current score. Assuming the input and the score are positively * proportional, calculate how much compensation should be added to or * subtracted from the last input as a proportion of the last input. Avoid * next input always being zero by setting it non-zero always. In short form * (assuming support of float and signed calculations), the algorithm is as * below. * * next_input = max(last_input * ((goal - current) / goal + 1), 1) * * For simple implementation, we assume the target score is always 10,000. The * caller should adjust @score for this. * * Returns next input that assumed to achieve the target score.
*/ staticunsignedlong damon_feed_loop_next_input(unsignedlong last_input, unsignedlong score)
{ constunsignedlong goal = 10000; /* Set minimum input as 10000 to avoid compensation be zero */ constunsignedlong min_input = 10000; unsignedlong score_goal_diff, compensation; bool over_achieving = score > goal;
if (score == goal) return last_input; if (score >= goal * 2) return min_input;
switch (goal->metric) { case DAMOS_QUOTA_USER_INPUT: /* User should already set goal->current_value */ break; case DAMOS_QUOTA_SOME_MEM_PSI_US:
now_psi_total = damos_get_some_mem_psi_total();
goal->current_value = now_psi_total - goal->last_psi_total;
goal->last_psi_total = now_psi_total; break; case DAMOS_QUOTA_NODE_MEM_USED_BP: case DAMOS_QUOTA_NODE_MEM_FREE_BP:
goal->current_value = damos_get_node_mem_bp(goal); break; default: break;
}
}
/* Return the highest score since it makes schemes least aggressive */ staticunsignedlong damos_quota_score(struct damos_quota *quota)
{ struct damos_quota_goal *goal; unsignedlong highest_score = 0;
/* * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
*/ staticvoid damos_set_effective_quota(struct damos_quota *quota)
{ unsignedlong throughput; unsignedlong esz = ULONG_MAX;
if (!quota->ms && list_empty("a->goals)) {
quota->esz = quota->sz; return;
}
if (!list_empty("a->goals)) { unsignedlong score = damos_quota_score(quota);
/* * Merge adjacent regions having similar access frequencies * * t target affected by this merge operation * thres '->nr_accesses' diff threshold for the merge * sz_limit size upper limit of each region
*/ staticvoid damon_merge_regions_of(struct damon_target *t, unsignedint thres, unsignedlong sz_limit)
{ struct damon_region *r, *prev = NULL, *next;
/* * Merge adjacent regions having similar access frequencies * * threshold '->nr_accesses' diff threshold for the merge * sz_limit size upper limit of each region * * This function merges monitoring target regions which are adjacent and their * access frequencies are similar. This is for minimizing the monitoring * overhead under the dynamically changeable access pattern. If a merge was * unnecessarily made, later 'kdamond_split_regions()' will revert it. * * The total number of regions could be higher than the user-defined limit, * max_nr_regions for some cases. For example, the user can update * max_nr_regions to a number that lower than the current number of regions * while DAMON is running. For such a case, repeat merging until the limit is * met while increasing @threshold up to possible maximum level.
*/ staticvoid kdamond_merge_regions(struct damon_ctx *c, unsignedint threshold, unsignedlong sz_limit)
{ struct damon_target *t; unsignedint nr_regions; unsignedint max_thres;
/* * Split a region in two * * r the region to be split * sz_r size of the first sub-region that will be made
*/ staticvoid damon_split_region_at(struct damon_target *t, struct damon_region *r, unsignedlong sz_r)
{ struct damon_region *new;
new = damon_new_region(r->ar.start + sz_r, r->ar.end); if (!new) return;
/* Split every region in the given target into 'nr_subs' regions */ staticvoid damon_split_regions_of(struct damon_target *t, int nr_subs)
{ struct damon_region *r, *next; unsignedlong sz_region, sz_sub = 0; int i;
for (i = 0; i < nr_subs - 1 &&
sz_region > 2 * DAMON_MIN_REGION; i++) { /* * Randomly select size of left sub-region to be at * least 10 percent and at most 90% of original region
*/
sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
sz_region / 10, DAMON_MIN_REGION); /* Do not allow blank region */ if (sz_sub == 0 || sz_sub >= sz_region) continue;
/* * Split every target region into randomly-sized small regions * * This function splits every target region into random-sized small regions if * current total number of the regions is equal or smaller than half of the * user-specified maximum number of regions. This is for maximizing the * monitoring accuracy under the dynamically changeable access patterns. If a * split was unnecessarily made, later 'kdamond_merge_regions()' will revert * it.
*/ staticvoid kdamond_split_regions(struct damon_ctx *ctx)
{ struct damon_target *t; unsignedint nr_regions = 0; staticunsignedint last_nr_regions; int nr_subregions = 2;
if (nr_regions > ctx->attrs.max_nr_regions / 2) return;
/* Maybe the middle of the region has different access frequency */ if (last_nr_regions == nr_regions &&
nr_regions < ctx->attrs.max_nr_regions / 3)
nr_subregions = 3;
/* * Check whether current monitoring should be stopped * * The monitoring is stopped when either the user requested to stop, or all * monitoring targets are invalid. * * Returns true if need to stop current monitoring.
*/ staticbool kdamond_need_stop(struct damon_ctx *ctx)
{ struct damon_target *t;
if (kthread_should_stop()) returntrue;
if (!ctx->ops.target_valid) returnfalse;
damon_for_each_target(t, ctx) { if (ctx->ops.target_valid(t)) returnfalse;
}
/* * Returns zero if the scheme is active. Else, returns time to wait for next * watermark check in micro-seconds.
*/ staticunsignedlong damos_wmark_wait_us(struct damos *scheme)
{ unsignedlong metric;
if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) return 0;
/* higher than high watermark or lower than low watermark */ if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { if (scheme->wmarks.activated)
pr_debug("deactivate a scheme (%d) for %s wmark\n",
scheme->action,
str_high_low(metric > scheme->wmarks.high));
scheme->wmarks.activated = false; return scheme->wmarks.interval;
}
/* inactive and higher than middle watermark */ if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
!scheme->wmarks.activated) return scheme->wmarks.interval;
if (!scheme->wmarks.activated)
pr_debug("activate a scheme (%d)\n", scheme->action);
scheme->wmarks.activated = true; return 0;
}
/* * kdamond_call() - handle damon_call_control objects. * @ctx: The &struct damon_ctx of the kdamond. * @cancel: Whether to cancel the invocation of the function. * * If there are &struct damon_call_control requests that registered via * &damon_call() on @ctx, do or cancel the invocation of the function depending * on @cancel. @cancel is set when the kdamond is already out of the main loop * and therefore will be terminated.
*/ staticvoid kdamond_call(struct damon_ctx *ctx, bool cancel)
{ struct damon_call_control *control;
LIST_HEAD(repeat_controls); int ret = 0;
while (true) {
mutex_lock(&ctx->call_controls_lock);
control = list_first_entry_or_null(&ctx->call_controls, struct damon_call_control, list);
mutex_unlock(&ctx->call_controls_lock); if (!control) break; if (cancel) {
control->canceled = true;
} else {
ret = control->fn(control->data);
control->return_code = ret;
}
mutex_lock(&ctx->call_controls_lock);
list_del(&control->list);
mutex_unlock(&ctx->call_controls_lock); if (!control->repeat) {
complete(&control->completion);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.57 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.