/* * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue * @q: request queue * @type: the hctx type index * @cpu: CPU
*/ staticinlinestruct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, enum hctx_type type, unsignedint cpu)
{ return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
}
staticinlineenum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
{ enum hctx_type type = HCTX_TYPE_DEFAULT;
/* * The caller ensure that if REQ_POLLED, poll must be enabled.
*/ if (opf & REQ_POLLED)
type = HCTX_TYPE_POLL; elseif ((opf & REQ_OP_MASK) == REQ_OP_READ)
type = HCTX_TYPE_READ; return type;
}
/* * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED). * @ctx: software queue cpu ctx
*/ staticinlinestruct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf, struct blk_mq_ctx *ctx)
{ return ctx->hctxs[blk_mq_get_hctx_type(opf)];
}
/* * Default to double of smaller one between hw queue_depth and * 128, since we don't split into sync/async like the old code * did. Additionally, this is a per-hw queue depth.
*/ staticinlineunsignedint blk_mq_default_nr_requests( struct blk_mq_tag_set *set)
{ return 2 * min_t(unsignedint, set->queue_depth, BLKDEV_DEFAULT_RQ);
}
/* * This assumes per-cpu software queueing queues. They could be per-node * as well, for instance. For now this is hardcoded as-is. Note that we don't * care about preemption, since we know the ctx's are persistent. This does * mean that we can't rely on ctx always matching the currently running CPU.
*/ staticinlinestruct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{ return __blk_mq_get_ctx(q, raw_smp_processor_id());
}
staticinlinebool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
{ /* Fast path: hardware queue is not stopped most of the time. */ if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state))) returnfalse;
/* * This barrier is used to order adding of dispatch list before and * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier * in blk_mq_start_stopped_hw_queue() so that dispatch code could * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not * empty to avoid missing dispatching requests.
*/
smp_mb();
/* Free all requests on the list */ staticinlinevoid blk_mq_free_requests(struct list_head *list)
{ while (!list_empty(list)) { struct request *rq = list_entry_rq(list->next);
/* * For shared tag users, we track the number of currently active users * and attempt to provide a fair share of the tag depth for each of them.
*/ staticinlinebool hctx_may_queue(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
{ unsignedint depth, users;
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) returntrue;
/* * Don't try dividing an ant
*/ if (bt->sb.depth == 1) returntrue;
if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue;
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) returntrue;
} else { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) returntrue;
}
users = READ_ONCE(hctx->tags->active_queues); if (!users) returntrue;
/* * Allow at least some tags
*/
depth = max((bt->sb.depth + users - 1) / users, 4U); return __blk_mq_active_requests(hctx) < depth;
}
/* run the code block in @dispatch_ops with rcu/srcu read lock held */ #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \ do { \ if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \ struct blk_mq_tag_set *__tag_set = (q)->tag_set; \ int srcu_idx; \
\
might_sleep_if(check_sleep); \
srcu_idx = srcu_read_lock(__tag_set->srcu); \
(dispatch_ops); \
srcu_read_unlock(__tag_set->srcu, srcu_idx); \
} else { \
rcu_read_lock(); \
(dispatch_ops); \
rcu_read_unlock(); \
} \
} while (0)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.