// SPDX-License-Identifier: GPL-2.0 /* * NVMe ZNS-ZBD command implementation. * Copyright (C) 2021 Western Digital Corporation or its affiliates.
*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/nvme.h> #include <linux/blkdev.h> #include"nvmet.h"
/* * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0 * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k * as page_shift value. When calculating the ZASL use shift by 12.
*/ #define NVMET_MPSMIN_SHIFT 12
staticinline u8 nvmet_zasl(unsignedint zone_append_sects)
{ /* * Zone Append Size Limit (zasl) is expressed as a power of 2 value * with the minimum memory page size (i.e. 12) as unit.
*/ return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
}
staticint validate_conv_zones_cb(struct blk_zone *z, unsignedint i, void *data)
{ if (z->type == BLK_ZONE_TYPE_CONVENTIONAL) return -EOPNOTSUPP; return 0;
}
if (ns->subsys->zasl) { if (ns->subsys->zasl > zasl) returnfalse;
}
ns->subsys->zasl = zasl;
/* * Generic zoned block devices may have a smaller last zone which is * not supported by ZNS. Exclude zoned drives that have such smaller * last zone.
*/ if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) returnfalse; /* * ZNS does not define a conventional zone type. Use report zones * to detect if the device has conventional zones and reject it if * it does.
*/
ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
validate_conv_zones_cb, NULL); if (ret < 0) returnfalse;
switch (req->cmd->zmr.pr) { case 0: case 1: break; default:
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
switch (req->cmd->zmr.zrasf) { case NVME_ZRASF_ZONE_REPORT_ALL: case NVME_ZRASF_ZONE_STATE_EMPTY: case NVME_ZRASF_ZONE_STATE_IMP_OPEN: case NVME_ZRASF_ZONE_STATE_EXP_OPEN: case NVME_ZRASF_ZONE_STATE_CLOSED: case NVME_ZRASF_ZONE_STATE_FULL: case NVME_ZRASF_ZONE_STATE_READONLY: case NVME_ZRASF_ZONE_STATE_OFFLINE: break; default:
req->error_loc =
offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
status = nvmet_bdev_validate_zone_mgmt_recv(req); if (status) goto out;
if (!req_slba_nr_zones) {
status = NVME_SC_SUCCESS; goto out;
}
ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
nvmet_bdev_report_zone_cb, &rz_data); if (ret < 0) {
status = NVME_SC_INTERNAL; goto out;
}
/* * When partial bit is set nr_zones must indicate the number of zone * descriptors actually transferred.
*/ if (req->cmd->zmr.pr)
rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
nr_zones = cpu_to_le64(rz_data.nr_zones);
status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
GFP_NOIO, bdev->bd_disk->node_id); if (!d.zbitmap) {
ret = -ENOMEM; goto out;
}
/* Scan and build bitmap of the eligible zones */
ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d); if (ret != nr_zones) { if (ret > 0)
ret = -EIO; goto out;
} else { /* We scanned all the zones */
ret = 0;
}
while (sector < bdev_nr_sectors(bdev)) { if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
bio = blk_next_bio(bio, bdev, 0,
zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
GFP_KERNEL);
bio->bi_iter.bi_sector = sector; /* This may take a while, so be nice to others */
cond_resched();
}
sector += bdev_zone_sectors(bdev);
}
if (bio) {
ret = submit_bio_wait(bio);
bio_put(bio);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.