/** * bfq_stat_add - add a value to a bfq_stat * @stat: target bfq_stat * @val: value to add * * Add @val to @stat. The caller must ensure that IRQ on the same CPU * don't re-enter this function for the same counter.
*/ staticinlinevoid bfq_stat_add(struct bfq_stat *stat, uint64_t val)
{
percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
}
/** * bfq_stat_read - read the current value of a bfq_stat * @stat: bfq_stat to read
*/ staticinline uint64_t bfq_stat_read(struct bfq_stat *stat)
{ return percpu_counter_sum_positive(&stat->cpu_cnt);
}
/** * bfq_stat_add_aux - add a bfq_stat into another's aux count * @to: the destination bfq_stat * @from: the source * * Add @from's count including the aux one to @to's aux count.
*/ staticinlinevoid bfq_stat_add_aux(struct bfq_stat *to, struct bfq_stat *from)
{
atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
&to->aux_cnt);
}
/** * blkg_prfill_stat - prfill callback for bfq_stat * @sf: seq_file to print to * @pd: policy private data of interest * @off: offset to the bfq_stat in @pd * * prfill callback for printing a bfq_stat.
*/ static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
{ return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
}
/* This should be called with the scheduler lock held. */ staticvoid bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
{
u64 now;
if (!bfqg_stats_waiting(stats)) return;
now = blk_time_get_ns(); if (now > stats->start_group_wait_time)
bfq_stat_add(&stats->group_wait_time,
now - stats->start_group_wait_time);
bfqg_stats_clear_waiting(stats);
}
/* This should be called with the scheduler lock held. */ staticvoid bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, struct bfq_group *curr_bfqg)
{ struct bfqg_stats *stats = &bfqg->stats;
if (bfqg_stats_waiting(stats)) return; if (bfqg == curr_bfqg) return;
stats->start_group_wait_time = blk_time_get_ns();
bfqg_stats_mark_waiting(stats);
}
/* This should be called with the scheduler lock held. */ staticvoid bfqg_stats_end_empty_time(struct bfqg_stats *stats)
{
u64 now;
if (!bfqg_stats_empty(stats)) return;
now = blk_time_get_ns(); if (now > stats->start_empty_time)
bfq_stat_add(&stats->empty_time,
now - stats->start_empty_time);
bfqg_stats_clear_empty(stats);
}
/* * group is already marked empty. This can happen if bfqq got new * request in parent group and moved to this group while being added * to service tree. Just ignore the event and move on.
*/ if (bfqg_stats_empty(stats)) return;
/* * blk-cgroup policy-related handlers * The following functions help in converting between blk-cgroup * internal structures and BFQ-specific structures.
*/
/* * bfq_group handlers * The following functions help in navigating the bfq_group hierarchy * by allowing to find the parent of a bfq_group or the bfq_group * associated to a bfq_queue.
*/
/* * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' * recursive stats can still account for the amount used by this bfqg after * it's gone.
*/ staticvoid bfqg_stats_xfer_dead(struct bfq_group *bfqg)
{ struct bfq_group *parent;
entity->weight = entity->new_weight;
entity->orig_weight = entity->new_weight; if (bfqq) {
bfqq->ioprio = bfqq->new_ioprio;
bfqq->ioprio_class = bfqq->new_ioprio_class; /* * Make sure that bfqg and its associated blkg do not * disappear before entity.
*/
bfqg_and_blkg_get(bfqg);
}
entity->parent = bfqg->my_entity; /* NULL for root group */
entity->sched_data = &bfqg->sched_data;
}
bfqg->my_entity = entity; /* * the root_group's will be set to NULL * in bfq_init_queue()
*/
bfqg->bfqd = bfqd;
bfqg->active_entities = 0;
bfqg->num_queues_with_pending_reqs = 0;
bfqg->rq_pos_tree = RB_ROOT;
}
/* * Update chain of bfq_groups as we might be handling a leaf group * which, along with some of its relatives, has not been hooked yet * to the private hierarchy of BFQ.
*/
entity = &bfqg->entity;
for_each_entity(entity) { struct bfq_group *curr_bfqg = container_of(entity, struct bfq_group, entity); if (curr_bfqg != bfqd->root_group) {
parent = bfqg_parent(curr_bfqg); if (!parent)
parent = bfqd->root_group;
bfq_group_set_parent(curr_bfqg, parent);
}
}
}
/** * bfq_bfqq_move - migrate @bfqq to @bfqg. * @bfqd: queue descriptor. * @bfqq: the queue to move. * @bfqg: the group to move to. * * Move @bfqq to @bfqg, deactivating it from its old group and reactivating * it on the new one. Avoid putting the entity on the old group idle tree. * * Must be called under the scheduler lock, to make sure that the blkg * owning @bfqg does not disappear (see comments in * bfq_bic_update_cgroup on guaranteeing the consistency of blkg * objects).
*/ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_group *bfqg)
{ struct bfq_entity *entity = &bfqq->entity; struct bfq_group *old_parent = bfqq_group(bfqq); bool has_pending_reqs = false;
/* * No point to move bfqq to the same group, which can happen when * root group is offlined
*/ if (old_parent == bfqg) return;
/* * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group * until elevator exit.
*/ if (bfqq == &bfqd->oom_bfqq) return; /* * Get extra reference to prevent bfqq from being freed in * next possible expire or deactivate.
*/
bfqq->ref++;
if (entity->in_groups_with_pending_reqs) {
has_pending_reqs = true;
bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
}
/* If bfqq is empty, then bfq_bfqq_expire also invokes * bfq_del_bfqq_busy, thereby removing bfqq and its entity * from data structures related to current group. Otherwise we * need to remove bfqq explicitly with bfq_deactivate_bfqq, as * we do below.
*/ if (bfqq == bfqd->in_service_queue)
bfq_bfqq_expire(bfqd, bfqd->in_service_queue, false, BFQQE_PREEMPTED);
bfq_reassign_last_bfqq(bfqq, NULL);
entity->parent = bfqg->my_entity;
entity->sched_data = &bfqg->sched_data; /* pin down bfqg and its associated blkg */
bfqg_and_blkg_get(bfqg);
if (has_pending_reqs)
bfq_add_bfqq_in_groups_with_pending_reqs(bfqq);
if (bfq_bfqq_busy(bfqq)) { if (unlikely(!bfqd->nonrot_with_queueing))
bfq_pos_tree_add_move(bfqd, bfqq);
bfq_activate_bfqq(bfqd, bfqq);
}
if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver)
bfq_schedule_dispatch(bfqd); /* release extra ref taken above, bfqq may happen to be freed now */
bfq_put_queue(bfqq);
}
if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) { /* We are the only user of this bfqq, just move it */ if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
bfq_bfqq_move(bfqd, sync_bfqq, bfqg); return;
}
/* * The queue was merged to a different queue. Check * that the merge chain still belongs to the same * cgroup.
*/ for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq) if (bfqq->entity.sched_data != &bfqg->sched_data) break; if (bfqq) { /* * Some queue changed cgroup so the merge is not valid * anymore. We cannot easily just cancel the merge (by * clearing new_bfqq) as there may be other processes * using this queue and holding refs to all queues * below sync_bfqq->new_bfqq. Similarly if the merge * already happened, we need to detach from bfqq now * so that we cannot merge bio to a request from the * old cgroup.
*/
bfq_put_cooperator(sync_bfqq);
bic_set_bfqq(bic, NULL, true, act_idx);
bfq_release_process_ref(bfqd, sync_bfqq);
}
}
/** * __bfq_bic_change_cgroup - move @bic to @bfqg. * @bfqd: the queue descriptor. * @bic: the bic to move. * @bfqg: the group to move to. * * Move bic to blkcg, assuming that bfqd->lock is held; which makes * sure that the reference to cgroup is valid across the call (see * comments in bfq_bic_update_cgroup on this issue)
*/ staticvoid __bfq_bic_change_cgroup(struct bfq_data *bfqd, struct bfq_io_cq *bic, struct bfq_group *bfqg)
{ unsignedint act_idx;
/* * Check whether blkcg has changed. The condition may trigger * spuriously on a newly created cic but there's no harm.
*/ if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) return;
/* * New cgroup for this process. Make sure it is linked to bfq internal * cgroup hierarchy.
*/
bfq_link_bfqg(bfqd, bfqg);
__bfq_bic_change_cgroup(bfqd, bic, bfqg);
bic->blkcg_serial_nr = serial_nr;
}
/** * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. * @st: the service tree being flushed.
*/ staticvoid bfq_flush_idle_tree(struct bfq_service_tree *st)
{ struct bfq_entity *entity = st->first_idle;
for (; entity ; entity = st->first_idle)
__bfq_deactivate_entity(entity, false);
}
/** * bfq_reparent_leaf_entity - move leaf entity to the root_group. * @bfqd: the device data structure with the root group. * @entity: the entity to move, if entity is a leaf; or the parent entity * of an active leaf entity to move, if entity is not a leaf. * @ioprio_class: I/O priority class to reparent.
*/ staticvoid bfq_reparent_leaf_entity(struct bfq_data *bfqd, struct bfq_entity *entity, int ioprio_class)
{ struct bfq_queue *bfqq; struct bfq_entity *child_entity = entity;
/** * bfq_reparent_active_queues - move to the root group all active queues. * @bfqd: the device data structure with the root group. * @bfqg: the group to move from. * @st: the service tree to start the search from. * @ioprio_class: I/O priority class to reparent.
*/ staticvoid bfq_reparent_active_queues(struct bfq_data *bfqd, struct bfq_group *bfqg, struct bfq_service_tree *st, int ioprio_class)
{ struct rb_root *active = &st->active; struct bfq_entity *entity;
while ((entity = bfq_entity_of(rb_first(active))))
bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
if (bfqg->sched_data.in_service_entity)
bfq_reparent_leaf_entity(bfqd,
bfqg->sched_data.in_service_entity,
ioprio_class);
}
/** * bfq_pd_offline - deactivate the entity associated with @pd, * and reparent its children entities. * @pd: descriptor of the policy going offline. * * blkio already grabs the queue_lock for us, so no need to use * RCU-based magic
*/ staticvoid bfq_pd_offline(struct blkg_policy_data *pd)
{ struct bfq_service_tree *st; struct bfq_group *bfqg = pd_to_bfqg(pd); struct bfq_data *bfqd = bfqg->bfqd; struct bfq_entity *entity = bfqg->my_entity; unsignedlong flags; int i;
spin_lock_irqsave(&bfqd->lock, flags);
if (!entity) /* root group */ goto put_async_queues;
/* * Empty all service_trees belonging to this group before * deactivating the group itself.
*/ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
st = bfqg->sched_data.service_tree + i;
/* * It may happen that some queues are still active * (busy) upon group destruction (if the corresponding * processes have been forced to terminate). We move * all the leaf entities corresponding to these queues * to the root_group. * Also, it may happen that the group has an entity * in service, which is disconnected from the active * tree: it must be moved, too. * There is no need to put the sync queues, as the * scheduler has taken no reference.
*/
bfq_reparent_active_queues(bfqd, bfqg, st, i);
/* * The idle tree may still contain bfq_queues * belonging to exited task because they never * migrated to a different cgroup from the one being * destroyed now. In addition, even * bfq_reparent_active_queues() may happen to add some * entities to the idle tree. It happens if, in some * of the calls to bfq_bfqq_move() performed by * bfq_reparent_active_queues(), the queue to move is * empty and gets expired.
*/
bfq_flush_idle_tree(st);
}
spin_unlock_irqrestore(&bfqd->lock, flags); /* * @blkg is going offline and will be ignored by * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so * that they don't get lost. If IOs complete after this point, the * stats for them will be lost. Oh well...
*/
bfqg_stats_xfer_dead(bfqg);
}
bfqg->entity.dev_weight = dev_weight; /* * Setting the prio_changed flag of the entity * to 1 with new_weight == weight would re-set * the value of the weight to its ioprio mapping. * Set the flag only if necessary.
*/ if ((unsignedshort)weight != bfqg->entity.new_weight) {
bfqg->entity.new_weight = (unsignedshort)weight; /* * Make sure that the above new value has been * stored in bfqg->entity.new_weight before * setting the prio_changed flag. In fact, * this flag may be read asynchronously (in * critical sections protected by a different * lock than that held here), and finding this * flag set may cause the execution of the code * for updating parameters whose value may * depend also on bfqg->entity.new_weight (in * __bfq_entity_update_weight_prio). * This barrier makes sure that the new value * of bfqg->entity.new_weight is correctly * seen in that code.
*/
smp_wmb();
bfqg->entity.prio_changed = 1;
}
}
ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, &ctx); if (ret) goto out;
if (sscanf(ctx.body, "%llu", &v) == 1) { /* require "default" on dfl */
ret = -ERANGE; if (!v) goto out;
} elseif (!strcmp(strim(ctx.body), "default")) {
v = 0;
} else {
ret = -EINVAL; goto out;
}
bfqg = blkg_to_bfqg(ctx.blkg);
ret = -ERANGE; if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
ret = 0;
}
out:
blkg_conf_exit(&ctx); return ret ?: nbytes;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.