/* * Retrieve bus and dimm handle and return if this bus supports * get_config_data commands
*/ int nvdimm_check_config_data(struct device *dev)
{ struct nvdimm *nvdimm = to_nvdimm(dev);
if (!nvdimm->cmd_mask ||
!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { if (test_bit(NDD_LABELING, &nvdimm->flags)) return -ENXIO; else return -ENOTTY;
}
return 0;
}
staticint validate_dimm(struct nvdimm_drvdata *ndd)
{ int rc;
/** * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area * @ndd: dimm to initialize * * Returns: %0 if the area is already valid, -errno on error
*/ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
{ struct nd_cmd_get_config_size *cmd = &ndd->nsarea; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); struct nvdimm_bus_descriptor *nd_desc; int rc = validate_dimm(ndd); int cmd_rc = 0;
if (rc) return rc;
if (cmd->config_size) return 0; /* already valid */
/* * The state may be in the process of changing, userspace should * quiesce probing if it wants a static answer
*/
nvdimm_bus_lock(dev);
nvdimm_bus_unlock(dev); return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
? "active" : "idle");
} static DEVICE_ATTR_RO(state);
/* * For the test version we need to poll the "hardware" in order * to get the updated status for unlock testing.
*/ if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) return sprintf(buf, "overwrite\n"); if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) return sprintf(buf, "disabled\n"); if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) return sprintf(buf, "unlocked\n"); if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags)) return sprintf(buf, "locked\n"); return -ENOTTY;
}
/* * Require all userspace triggered security management to be * done while probing is idle and the DIMM is not in active use * in any region.
*/
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = nvdimm_security_store(dev, buf, len);
nvdimm_bus_unlock(dev);
device_unlock(dev);
if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr) return a->mode; if (!nvdimm->sec.flags) return 0;
if (a == &dev_attr_security.attr) { /* Are there any state mutation ops (make writable)? */ if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
|| nvdimm->sec.ops->change_key
|| nvdimm->sec.ops->erase
|| nvdimm->sec.ops->overwrite) return a->mode; return 0444;
}
if (nvdimm->sec.ops->freeze) return a->mode; return 0;
}
/* We are shutting down. Make state frozen artificially. */
nvdimm_bus_lock(dev);
set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags); if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
dev_put = true;
nvdimm_bus_unlock(dev);
cancel_delayed_work_sync(&nvdimm->dwork); if (dev_put)
put_device(dev);
nd_device_unregister(dev, ND_SYNC);
}
EXPORT_SYMBOL_GPL(nvdimm_delete);
if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "bus lock required for capacity provision\n")) return 0; if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
% nd_region->ndr_mappings, "invalid region align %#lx mappings: %d\n",
nd_region->align, nd_region->ndr_mappings)) return 0; return nd_region->align / nd_region->ndr_mappings;
}
/** * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max * contiguous unallocated dpa range. * @nd_region: constrain available space check to this reference region * @nd_mapping: container of dpa-resource-root + labels * * Returns: %0 if there is an alignment error, otherwise the max * unallocated dpa range
*/
resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, struct nd_mapping *nd_mapping)
{ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nvdimm_bus *nvdimm_bus;
resource_size_t max = 0; struct resource *res; unsignedlong align;
/* if a dimm is disabled the available capacity is zero */ if (!ndd) return 0;
align = dpa_align(nd_region); if (!align) return 0;
if (strcmp(res->name, "pmem-reserve") != 0) continue; /* trim free space relative to current alignment setting */
start = ALIGN(res->start, align);
end = ALIGN_DOWN(res->end + 1, align) - 1; if (end < start) continue; if (end - start + 1 > max)
max = end - start + 1;
}
release_free_pmem(nvdimm_bus, nd_mapping); return max;
}
/** * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa * @nd_mapping: container of dpa-resource-root + labels * @nd_region: constrain available space check to this reference region * * Validate that a PMEM label, if present, aligns with the start of an * interleave set. * * Returns: %0 if there is an alignment error, otherwise the unallocated dpa
*/
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, struct nd_mapping *nd_mapping)
{ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_start, map_end, busy = 0; struct resource *res; unsignedlong align;
if (!ndd) return 0;
align = dpa_align(nd_region); if (!align) return 0;
WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
res = __request_region(&ndd->dpa, start, n, name, 0); if (!res)
kfree(name); return res;
}
/** * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id * @ndd: container of dpa-resource-root + labels * @label_id: dpa resource name of the form pmem-<human readable uuid> * * Returns: sum of the dpa allocated to the label_id
*/
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id)
{
resource_size_t allocated = 0; struct resource *res;
for_each_dpa_resource(ndd, res) if (strcmp(res->name, label_id->id) == 0)
allocated += resource_size(res);
int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
{ int count = 0; /* Flush any possible dimm registration failures */
nd_synchronize();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.