/* * Per UEFI 2.7, the minimum size of the Label Storage Area is large * enough to hold 2 index blocks and 2 labels. The minimum index * block size is 256 bytes. The label size is 128 for namespaces * prior to version 1.2 and at minimum 256 for version 1.2 and later.
*/
nslot = nvdimm_num_label_slots(ndd);
space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
size = __sizeof_namespace_index(nslot) * 2; if (size <= space && nslot >= 2) return size / 2;
dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
ndd->nsarea.config_size, sizeof_namespace_label(ndd)); return 0;
}
staticint __nd_label_validate(struct nvdimm_drvdata *ndd)
{ /* * On media label format consists of two index blocks followed * by an array of labels. None of these structures are ever * updated in place. A sequence number tracks the current * active index and the next one to write, while labels are * written to free slots. * * +------------+ * | | * | nsindex0 | * | | * +------------+ * | | * | nsindex1 | * | | * +------------+ * | label0 | * +------------+ * | label1 | * +------------+ * | | * ....nslot... * | | * +------------+ * | labelN | * +------------+
*/ struct nd_namespace_index *nsindex[] = {
to_namespace_index(ndd, 0),
to_namespace_index(ndd, 1),
}; constint num_index = ARRAY_SIZE(nsindex); struct device *dev = ndd->dev; bool valid[2] = { 0 }; int i, num_valid = 0;
u32 seq;
for (i = 0; i < num_index; i++) {
u32 nslot;
u8 sig[NSINDEX_SIG_LEN];
u64 sum_save, sum, size; unsignedint version, labelsize;
switch (num_valid) { case 0: break; case 1: for (i = 0; i < num_index; i++) if (valid[i]) return i; /* can't have num_valid > 0 but valid[] = { false, false } */
WARN_ON(1); break; default: /* pick the best index... */
seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
__le32_to_cpu(nsindex[1]->seq)); if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK)) return 1; else return 0; break;
}
return -1;
}
staticint nd_label_validate(struct nvdimm_drvdata *ndd)
{ /* * In order to probe for and validate namespace index blocks we * need to know the size of the labels, and we can't trust the * size of the labels until we validate the index blocks. * Resolve this dependency loop by probing for known label * sizes, but default to v1.2 256-byte namespace labels if * discovery fails.
*/ int label_size[] = { 128, 256 }; int i, rc;
for (i = 0; i < ARRAY_SIZE(label_size); i++) {
ndd->nslabel_size = label_size[i];
rc = __nd_label_validate(ndd); if (rc >= 0) return rc;
}
return -1;
}
staticvoid nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst, struct nd_namespace_index *src)
{ /* just exit if either destination or source is NULL */ if (!dst || !src) return;
/** * preamble_index - common variable initialization for nd_label_* routines * @ndd: dimm container for the relevant label set * @idx: namespace_index index * @nsindex_out: on return set to the currently active namespace index * @free: on return set to the free label bitmap in the index * @nslot: on return set to the number of slots in the label space
*/ staticbool preamble_index(struct nvdimm_drvdata *ndd, int idx, struct nd_namespace_index **nsindex_out, unsignedlong **free, u32 *nslot)
{ struct nd_namespace_index *nsindex;
nsindex = to_namespace_index(ndd, idx); if (nsindex == NULL) returnfalse;
/* check that we are written where we expect to be written */ if (slot != nsl_get_slot(ndd, nd_label)) returnfalse;
valid = nsl_validate_checksum(ndd, nd_label); if (!valid)
dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot); return valid;
}
if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 ||
ndd->nsarea.config_size == 0) {
dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
ndd->nsarea.max_xfer, ndd->nsarea.config_size); return -ENXIO;
}
/* * We need to determine the maximum index area as this is the section * we must read and validate before we can start processing labels. * * If the area is too small to contain the two indexes and 2 labels * then we abort. * * Start at a label size of 128 as this should result in the largest * possible namespace index size.
*/
ndd->nslabel_size = 128;
read_size = sizeof_namespace_index(ndd) * 2; if (!read_size) return -ENXIO;
/* Allocate config data */
config_size = ndd->nsarea.config_size;
ndd->data = kvzalloc(config_size, GFP_KERNEL); if (!ndd->data) return -ENOMEM;
/* * We want to guarantee as few reads as possible while conserving * memory. To do that we figure out how much unused space will be left * in the last read, divide that by the total number of reads it is * going to take given our maximum transfer size, and then reduce our * maximum transfer size based on that result.
*/
max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size); if (read_size < max_xfer) { /* trim waste */
max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
DIV_ROUND_UP(config_size, max_xfer); /* make certain we read indexes in exactly 1 read */ if (max_xfer < read_size)
max_xfer = read_size;
}
/* Make our initial read size a multiple of max_xfer size */
read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
config_size);
/* Read the index data */
rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size); if (rc) goto out_err;
/* Validate index data, if not valid assume all labels are invalid */
ndd->ns_current = nd_label_validate(ndd); if (ndd->ns_current < 0) return 0;
/* Record our index values */
ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
/* Copy "current" index on top of the "next" index */
nsindex = to_current_namespace_index(ndd);
nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
/* Determine starting offset for label data */
offset = __le64_to_cpu(nsindex->labeloff);
nslot = __le32_to_cpu(nsindex->nslot);
/* Loop through the free list pulling in any active labels */ for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
size_t label_read_size;
/* zero out the unused labels */ if (test_bit_le(i, nsindex->free)) {
memset(ndd->data + offset, 0, ndd->nslabel_size); continue;
}
/* if we already read past here then just continue */ if (offset + ndd->nslabel_size <= read_size) continue;
/* if we haven't read in a while reset our read_size offset */ if (read_size < offset)
read_size = offset;
/* determine how much more will be read after this next call. */
label_read_size = offset + ndd->nslabel_size - read_size;
label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
max_xfer;
/* truncate last read if needed */ if (read_size + label_read_size > config_size)
label_read_size = config_size - read_size;
/* Read the label data */
rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
read_size, label_read_size); if (rc) goto out_err;
/* push read_size to next read offset */
read_size += label_read_size;
}
memset(nsindex->free, 0xff, nfree / 8); for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
clear_bit_le(nslot + i, free);
}
checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
nsindex->checksum = __cpu_to_le64(checksum);
rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
nsindex, sizeof_namespace_index(ndd)); if (rc < 0) return rc;
if (flags & ND_NSINDEX_INIT) return 0;
/* copy the index we just wrote to the new 'next' */
WARN_ON(index != ndd->ns_next);
nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
WARN_ON(ndd->ns_current == ndd->ns_next);
/* allocate and write the label to the staging (next) index */
slot = nd_label_alloc_slot(ndd); if (slot == UINT_MAX) return -ENXIO;
dev_dbg(ndd->dev, "allocated: %d\n", slot);
/* * We need to preserve all the old labels for the mapping so * they can be garbage collected after writing the new labels.
*/ for (i = old_num_labels; i < num_labels; i++) {
label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); if (!label_ent) return -ENOMEM;
mutex_lock(&nd_mapping->lock);
list_add_tail(&label_ent->list, &nd_mapping->labels);
mutex_unlock(&nd_mapping->lock);
}
nsindex = to_namespace_index(ndd, 0);
memset(nsindex, 0, ndd->nsarea.config_size); for (i = 0; i < 2; i++) { int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
int nd_pmem_namespace_label_update(struct nd_region *nd_region, struct nd_namespace_pmem *nspm, resource_size_t size)
{ int i, rc;
for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct resource *res; int count = 0;
if (size == 0) {
rc = del_labels(nd_mapping, nspm->uuid); if (rc) return rc; continue;
}
for_each_dpa_resource(ndd, res) if (strncmp(res->name, "pmem", 4) == 0)
count++;
WARN_ON_ONCE(!count);
rc = init_labels(nd_mapping, count); if (rc < 0) return rc;
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
NSLABEL_FLAG_UPDATING); if (rc) return rc;
}
if (size == 0) return 0;
/* Clear the UPDATING flag per UEFI 2.7 expectations */ for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i];
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0); if (rc) return rc;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.