/* * Default upper limit for the software max_sectors limit used for regular I/Os. * This can be increased through sysfs. * * This should not be confused with the max_hw_sector limit that is entirely * controlled by the block device driver, usually based on hardware limits.
*/ #define BLK_DEF_MAX_SECTORS_CAP (SZ_4M >> SECTOR_SHIFT)
/* * The code that increments the pm_only counter must ensure that the * counter is globally visible before the queue is unfrozen.
*/ if (blk_queue_pm_only(q) &&
(!pm || queue_rpm_status(q) == RPM_SUSPENDED)) goto fail_put;
staticinlinevoid blk_wait_io(struct completion *done)
{ /* Prevent hang_check timer from firing at us during very long I/O */ unsignedlong timeout = sysctl_hung_task_timeout_secs * HZ / 2;
if (timeout) while (!wait_for_completion_io_timeout(done, timeout))
; else
wait_for_completion_io(done);
}
/* * Merging adjacent physical pages may not work correctly under KMSAN * if their metadata pages aren't adjacent. Just disable merging.
*/ if (IS_ENABLED(CONFIG_KMSAN)) returnfalse;
if (addr1 + vec1->bv_len != addr2) returnfalse; if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) returnfalse; if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) returnfalse; returntrue;
}
/* * Check if adding a bio_vec after bprv with offset would create a gap in * the SG list. Most drivers don't care about this, but some do.
*/ staticinlinebool bvec_gap_to_prev(conststruct queue_limits *lim, struct bio_vec *bprv, unsignedint offset)
{ if (!lim->virt_boundary_mask) returnfalse; return __bvec_gap_to_prev(lim, bprv, offset);
}
staticinlinebool rq_mergeable(struct request *rq)
{ if (blk_rq_is_passthrough(rq)) returnfalse;
if (req_op(rq) == REQ_OP_FLUSH) returnfalse;
if (req_op(rq) == REQ_OP_WRITE_ZEROES) returnfalse;
if (req_op(rq) == REQ_OP_ZONE_APPEND) returnfalse;
if (rq->cmd_flags & REQ_NOMERGE_FLAGS) returnfalse; if (rq->rq_flags & RQF_NOMERGE_FLAGS) returnfalse;
returntrue;
}
/* * There are two different ways to handle DISCARD merges: * 1) If max_discard_segments > 1, the driver treats every bio as a range and * send the bios to controller together. The ranges don't need to be * contiguous. * 2) Otherwise, the request will be normal read/write requests. The ranges * need to be contiguous.
*/ staticinlinebool blk_discard_mergable(struct request *req)
{ if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1) returntrue; returnfalse;
}
if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) return min(q->limits.max_discard_sectors,
UINT_MAX >> SECTOR_SHIFT);
if (unlikely(op == REQ_OP_WRITE_ZEROES)) return q->limits.max_write_zeroes_sectors;
if (rq->cmd_flags & REQ_ATOMIC) return q->limits.atomic_write_max_sectors;
return q->limits.max_sectors;
}
#ifdef CONFIG_BLK_DEV_INTEGRITY void blk_flush_integrity(void); void bio_integrity_free(struct bio *bio);
/* * Integrity payloads can either be owned by the submitter, in which case * bio_uninit will free them, or owned and generated by the block layer, * in which case we'll verify them here (for reads) and free them before * the bio is handed back to the submitted.
*/ bool __bio_integrity_endio(struct bio *bio); staticinlinebool bio_integrity_endio(struct bio *bio)
{ struct bio_integrity_payload *bip = bio_integrity(bio);
if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY)) return __bio_integrity_endio(bio); returntrue;
}
struct bio *bio_split_discard(struct bio *bio, conststruct queue_limits *lim, unsigned *nsegs); struct bio *bio_split_write_zeroes(struct bio *bio, conststruct queue_limits *lim, unsigned *nsegs); struct bio *bio_split_rw(struct bio *bio, conststruct queue_limits *lim, unsigned *nr_segs); struct bio *bio_split_zone_append(struct bio *bio, conststruct queue_limits *lim, unsigned *nr_segs);
/* * All drivers must accept single-segments bios that are smaller than PAGE_SIZE. * * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is * always valid if a bio has data. The check might lead to occasional false * positives when bios are cloned, but compared to the performance impact of * cloned bios themselves the loop below doesn't matter anyway.
*/ staticinlinebool bio_may_need_split(struct bio *bio, conststruct queue_limits *lim)
{ if (lim->chunk_sectors) returntrue; if (bio->bi_vcnt != 1) returntrue; return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
lim->min_segment_size;
}
/** * __bio_split_to_limits - split a bio to fit the queue limits * @bio: bio to be split * @lim: queue limits to split based on * @nr_segs: returns the number of segments in the returned bio * * Check if @bio needs splitting based on the queue limits, and if so split off * a bio fitting the limits from the beginning of @bio and return it. @bio is * shortened to the remainder and re-submitted. * * The split bio is allocated from @q->bio_split, which is provided by the * block layer.
*/ staticinlinestruct bio *__bio_split_to_limits(struct bio *bio, conststruct queue_limits *lim, unsignedint *nr_segs)
{ switch (bio_op(bio)) { case REQ_OP_READ: case REQ_OP_WRITE: if (bio_may_need_split(bio, lim)) return bio_split_rw(bio, lim, nr_segs);
*nr_segs = 1; return bio; case REQ_OP_ZONE_APPEND: return bio_split_zone_append(bio, lim, nr_segs); case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: return bio_split_discard(bio, lim, nr_segs); case REQ_OP_WRITE_ZEROES: return bio_split_write_zeroes(bio, lim, nr_segs); default: /* other operations can't be split */
*nr_segs = 0; return bio;
}
}
/** * get_max_segment_size() - maximum number of bytes to add as a single segment * @lim: Request queue limits. * @paddr: address of the range to add * @len: maximum length available to add at @paddr * * Returns the maximum number of bytes of the range starting at @paddr that can * be added to a single segment.
*/ staticinlineunsigned get_max_segment_size(conststruct queue_limits *lim,
phys_addr_t paddr, unsignedint len)
{ /* * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 * after having calculated the minimum.
*/ return min_t(unsignedlong, len,
min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
(unsignedlong)lim->max_segment_size - 1) + 1);
}
int ll_back_merge_fn(struct request *req, struct bio *bio, unsignedint nr_segs); bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next); unsignedint blk_recalc_rq_segments(struct request *rq); bool blk_rq_merge_ok(struct request *rq, struct bio *bio); enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
int blk_set_default_limits(struct queue_limits *lim); void blk_apply_bdi_limits(struct backing_dev_info *bdi, struct queue_limits *lim); int blk_dev_init(void);
#ifdef CONFIG_BLK_DEV_ZONED void disk_init_zone_resources(struct gendisk *disk); void disk_free_zone_resources(struct gendisk *disk); staticinlinebool bio_zone_write_plugging(struct bio *bio)
{ return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
} staticinlinebool blk_req_bio_is_zone_append(struct request *rq, struct bio *bio)
{ return req_op(rq) == REQ_OP_ZONE_APPEND ||
bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
} void blk_zone_write_plug_bio_merged(struct bio *bio); void blk_zone_write_plug_init_request(struct request *rq); void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio); void blk_zone_write_plug_bio_endio(struct bio *bio); staticinlinevoid blk_zone_bio_endio(struct bio *bio)
{ /* * For write BIOs to zoned devices, signal the completion of the BIO so * that the next write BIO can be submitted by zone write plugging.
*/ if (bio_zone_write_plugging(bio))
blk_zone_write_plug_bio_endio(bio);
}
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, struct lock_class_key *lkclass);
/* * Clean up a page appropriately, where the page may be pinned, may have a * ref taken on it or neither.
*/ staticinlinevoid bio_release_page(struct bio *bio, struct page *page)
{ if (bio_flagged(bio, BIO_PAGE_PINNED))
unpin_user_page(page);
}
struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
/* * Optimized request reference counting. Ideally we'd make timeouts be more * clever, as that's the only reason we need references at all... But until * this happens, this is faster than using refcount_t. Also see: * * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
*/ #define req_ref_zero_or_close_to_overflow(req) \
((unsignedint) atomic_read(&(req->ref)) + 127u <= 127u)
/* * 0 could very well be a valid time, but rather than flag "this is * a valid timestamp" separately, just accept that we'll do an extra * ktime_get_ns() if we just happen to get 0 as the current time.
*/ if (!plug->cur_ktime) {
plug->cur_ktime = ktime_get_ns();
current->flags |= PF_BLOCK_TS;
} return plug->cur_ktime;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.