/* * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, * NAWUPF, and NACWU are defined for this namespace and should be * used by the host for this namespace instead of the AWUN, AWUPF, * and ACWU fields in the Identify Controller data structure. If * any of these fields are zero that means that the corresponding * field from the identify controller data structure should be used.
*/
id->nsfeat |= 1 << 1;
id->nawun = lpp0b;
id->nawupf = lpp0b;
id->nacwu = lpp0b;
/* * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and * NOWS are defined for this namespace and should be used by * the host for I/O optimization.
*/
id->nsfeat |= 1 << 4; /* NPWG = Namespace Preferred Write Granularity. 0's based */
id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev)); /* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg; /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
id->npdg = to0based(bdev_discard_granularity(bdev) /
bdev_logical_block_size(bdev)); /* NPDG = Namespace Preferred Deallocate Alignment */
id->npda = id->npdg; /* NOWS = Namespace Optimal Write Size */
id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
/* Set WZDS and DRB if device supports unmapped write zeroes */ if (bdev_write_zeroes_unmap_sectors(bdev))
id->dlfeat = (1 << 3) | 0x1;
}
int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{ int ret;
/* * When buffered_io namespace attribute is enabled that means user want * this block device to be used as a file, so block device can take * an advantage of cache.
*/ if (ns->buffered_io) return -ENOTBLK;
ns->bdev_file = bdev_file_open_by_path(ns->device_path,
BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL); if (IS_ERR(ns->bdev_file)) {
ret = PTR_ERR(ns->bdev_file); if (ret != -ENOTBLK) {
pr_err("failed to open block device %s: (%d)\n",
ns->device_path, ret);
}
ns->bdev_file = NULL; return ret;
}
ns->bdev = file_bdev(ns->bdev_file);
ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
ns->pi_type = 0;
ns->metadata_size = 0; if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
nvmet_bdev_ns_enable_integrity(ns);
if (bdev_is_zoned(ns->bdev)) { if (!nvmet_bdev_zns_enable(ns)) {
nvmet_bdev_ns_disable(ns); return -EINVAL;
}
ns->csi = NVME_CSI_ZNS;
}
if (likely(blk_sts == BLK_STS_OK)) return status; /* * Right now there exists M : 1 mapping between block layer error * to the NVMe status code (see nvme_error_status()). For consistency, * when we reverse map we use most appropriate NVMe Status code from * the group of the NVMe status codes used in the nvme_error_status().
*/ switch (blk_sts) { case BLK_STS_NOSPC:
status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, length); break; case BLK_STS_TARGET:
status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, slba); break; case BLK_STS_NOTSUPP:
status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode); break; case BLK_STS_MEDIUM:
status = NVME_SC_ACCESS_DENIED;
req->error_loc = offsetof(struct nvme_rw_command, nsid); break; case BLK_STS_IOERR: default:
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
}
switch (req->cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_write:
req->error_slba = le64_to_cpu(req->cmd->rw.slba); break; case nvme_cmd_write_zeroes:
req->error_slba =
le64_to_cpu(req->cmd->write_zeroes.slba); break; default:
req->error_slba = 0;
} return status;
}
staticvoid nvmet_bio_done(struct bio *bio)
{ struct nvmet_req *req = bio->bi_private;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.