/* * Skip all keepouts before the range being accessed. * Keepouts are sorted.
*/ while ((keepout < keepoutend) && (keepout->end <= offset))
keepout++;
while ((offset < end) && (keepout < keepoutend)) { /* Access the valid portion before the keepout. */ if (offset < keepout->start) {
kend = min(end, keepout->start);
ksize = kend - offset; if (write)
rc = __nvmem_reg_write(nvmem, offset, val, ksize); else
rc = __nvmem_reg_read(nvmem, offset, val, ksize);
if (rc) return rc;
offset += ksize;
val += ksize;
}
/* * Now we're aligned to the start of this keepout zone. Go * through it.
*/
kend = min(end, keepout->end);
ksize = kend - offset; if (!write)
memset(val, keepout->value, ksize);
val += ksize;
offset += ksize;
keepout++;
}
/* * If we ran out of keepouts but there's still stuff to do, send it * down directly
*/ if (offset < end) {
ksize = end - offset; if (write) return __nvmem_reg_write(nvmem, offset, val, ksize); else return __nvmem_reg_read(nvmem, offset, val, ksize);
}
/* * If the device has no .reg_write operation, do not allow * configuration as read-write. * If the device is set as read-only by configuration, it * can be forced into read-write mode using the 'force_ro' * attribute.
*/ if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write) return 0; /* Attribute not visible */
return attr->mode;
}
staticstruct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, constchar *id, int index);
/* * nvmem_setup_compat() - Create an additional binary entry in * drivers sys directory, to be backwards compatible with the older * drivers/misc/eeprom drivers.
*/ staticint nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, conststruct nvmem_config *config)
{ int rval;
staticint nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
{ struct attribute_group group = {
.name = "cells",
}; struct nvmem_cell_entry *entry; conststruct bin_attribute **pattrs; struct bin_attribute *attrs; unsignedint ncells = 0, i = 0; int ret = 0;
mutex_lock(&nvmem_mutex);
if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) goto unlock_mutex;
/* Allocate an array of attributes with a sentinel */
ncells = list_count_nodes(&nvmem->cells);
pattrs = devm_kcalloc(&nvmem->dev, ncells + 1, sizeof(struct bin_attribute *), GFP_KERNEL); if (!pattrs) {
ret = -ENOMEM; goto unlock_mutex;
}
attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL); if (!attrs) {
ret = -ENOMEM; goto unlock_mutex;
}
/* Initialize each attribute to take the name and size of the cell */
list_for_each_entry(entry, &nvmem->cells, node) {
sysfs_bin_attr_init(&attrs[i]);
attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, "%s@%x,%x", entry->name,
entry->offset,
entry->bit_offset);
attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem);
attrs[i].size = entry->bytes;
attrs[i].read = &nvmem_cell_attr_read;
attrs[i].private = entry; if (!attrs[i].attr.name) {
ret = -ENOMEM; goto unlock_mutex;
}
pattrs[i] = &attrs[i];
i++;
}
group.bin_attrs = pattrs;
ret = device_add_group(&nvmem->dev, &group); if (ret) goto unlock_mutex;
if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
dev_err(&nvmem->dev, "cell %s unaligned to nvmem stride %d\n",
cell->name ?: "", nvmem->stride); return -EINVAL;
}
if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) {
dev_err(&nvmem->dev, "cell %s raw len %zd unaligned to nvmem word size %d\n",
cell->name ?: "", cell->raw_len,
nvmem->word_size);
err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); if (err) return err;
cell->name = kstrdup_const(info->name, GFP_KERNEL); if (!cell->name) return -ENOMEM;
return 0;
}
/** * nvmem_add_one_cell() - Add one cell information to an nvmem device * * @nvmem: nvmem device to add cells to. * @info: nvmem cell info to add to the device * * Return: 0 or negative error code on failure.
*/ int nvmem_add_one_cell(struct nvmem_device *nvmem, conststruct nvmem_cell_info *info)
{ struct nvmem_cell_entry *cell; int rval;
cell = kzalloc(sizeof(*cell), GFP_KERNEL); if (!cell) return -ENOMEM;
/** * nvmem_add_cells() - Add cell information to an nvmem device * * @nvmem: nvmem device to add cells to. * @info: nvmem cell info to add to the device * @ncells: number of cells in info * * Return: 0 or negative error code on failure.
*/ staticint nvmem_add_cells(struct nvmem_device *nvmem, conststruct nvmem_cell_info *info, int ncells)
{ int i, rval;
for (i = 0; i < ncells; i++) {
rval = nvmem_add_one_cell(nvmem, &info[i]); if (rval) return rval;
}
return 0;
}
/** * nvmem_register_notifier() - Register a notifier block for nvmem events. * * @nb: notifier block to be called on nvmem events. * * Return: 0 on success, negative error number on failure.
*/ int nvmem_register_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_register(&nvmem_notifier, nb);
}
EXPORT_SYMBOL_GPL(nvmem_register_notifier);
/** * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. * * @nb: notifier block to be unregistered. * * Return: 0 on success, negative error number on failure.
*/ int nvmem_unregister_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
}
EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
while (keepout < keepoutend) { /* Ensure keepouts are sorted and don't overlap. */ if (keepout->start < cur) {
dev_err(&nvmem->dev, "Keepout regions aren't sorted or overlap.\n");
return -ERANGE;
}
if (keepout->end < keepout->start) {
dev_err(&nvmem->dev, "Invalid keepout region.\n");
void nvmem_layout_unregister(struct nvmem_layout *layout)
{ /* Keep the API even with an empty stub in case we need it later */
}
EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
/** * nvmem_register() - Register a nvmem device for given nvmem_config. * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem * * @config: nvmem device configuration with which nvmem device is created. * * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device * on success.
*/
/** * devm_nvmem_register() - Register a managed nvmem device for given * nvmem_config. * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem * * @dev: Device that uses the nvmem device. * @config: nvmem device configuration with which nvmem device is created. * * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device * on success.
*/ struct nvmem_device *devm_nvmem_register(struct device *dev, conststruct nvmem_config *config)
{ struct nvmem_device *nvmem; int ret;
nvmem = nvmem_register(config); if (IS_ERR(nvmem)) return nvmem;
ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); if (ret) return ERR_PTR(ret);
#if IS_ENABLED(CONFIG_OF) /** * of_nvmem_device_get() - Get nvmem device from a given id * * @np: Device tree node that uses the nvmem device. * @id: nvmem name from nvmem-names property. * * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device * on success.
*/ struct nvmem_device *of_nvmem_device_get(struct device_node *np, constchar *id)
{
struct device_node *nvmem_np; struct nvmem_device *nvmem; int index = 0;
if (id)
index = of_property_match_string(np, "nvmem-names", id);
nvmem_np = of_parse_phandle(np, "nvmem", index); if (!nvmem_np) return ERR_PTR(-ENOENT);
/** * nvmem_device_get() - Get nvmem device from a given id * * @dev: Device that uses the nvmem device. * @dev_name: name of the requested nvmem device. * * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device * on success.
*/ struct nvmem_device *nvmem_device_get(struct device *dev, constchar *dev_name)
{ if (dev->of_node) { /* try dt first */ struct nvmem_device *nvmem;
/** * nvmem_device_find() - Find nvmem device with matching function * * @data: Data to pass to match function * @match: Callback function to check device * * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device * on success.
*/ struct nvmem_device *nvmem_device_find(void *data, int (*match)(struct device *dev, constvoid *data))
{ return __nvmem_device_get(data, match);
}
EXPORT_SYMBOL_GPL(nvmem_device_find);
/** * devm_nvmem_device_put() - put already got nvmem device * * @dev: Device that uses the nvmem device. * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), * that needs to be released.
*/ void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
{ int ret;
ret = devres_release(dev, devm_nvmem_device_release,
devm_nvmem_device_match, nvmem);
/** * nvmem_device_put() - put already got nvmem device * * @nvmem: pointer to nvmem device that needs to be released.
*/ void nvmem_device_put(struct nvmem_device *nvmem)
{
__nvmem_device_put(nvmem);
}
EXPORT_SYMBOL_GPL(nvmem_device_put);
/** * devm_nvmem_device_get() - Get nvmem device of device from a given id * * @dev: Device that requests the nvmem device. * @id: name id for the requested nvmem device. * * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device * on success. The nvmem_device will be freed by the automatically once the * device is freed.
*/ struct nvmem_device *devm_nvmem_device_get(struct device *dev, constchar *id)
{ struct nvmem_device **ptr, *nvmem;
ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM);
list_for_each_entry(lookup, &nvmem_lookup_list, node) { if ((strcmp(lookup->dev_id, dev_id) == 0) &&
(strcmp(lookup->con_id, con_id) == 0)) { /* This is the right entry. */
nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
device_match_name); if (IS_ERR(nvmem)) { /* Provider may not be registered yet. */
cell = ERR_CAST(nvmem); break;
}
staticint nvmem_layout_module_get_optional(struct nvmem_device *nvmem)
{ if (!nvmem->layout) return 0;
if (!nvmem->layout->dev.driver ||
!try_module_get(nvmem->layout->dev.driver->owner)) return -EPROBE_DEFER;
return 0;
}
/** * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id * * @np: Device tree node that uses the nvmem cell. * @id: nvmem cell name from nvmem-cell-names property, or NULL * for the cell at index 0 (the lone cell with no accompanying * nvmem-cell-names property). * * Return: Will be an ERR_PTR() on error or a valid pointer * to a struct nvmem_cell. The nvmem_cell will be freed by the * nvmem_cell_put().
*/ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, constchar *id)
{ struct device_node *cell_np, *nvmem_np; struct nvmem_device *nvmem; struct nvmem_cell_entry *cell_entry; struct nvmem_cell *cell; struct of_phandle_args cell_spec; int index = 0; int cell_index = 0; int ret;
/* if cell name exists, find index to the name */ if (id)
index = of_property_match_string(np, "nvmem-cell-names", id);
ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", "#nvmem-cell-cells",
index, &cell_spec); if (ret) return ERR_PTR(-ENOENT);
if (cell_spec.args_count > 1) return ERR_PTR(-EINVAL);
cell_np = cell_spec.np; if (cell_spec.args_count)
cell_index = cell_spec.args[0];
nvmem_np = of_get_parent(cell_np); if (!nvmem_np) {
of_node_put(cell_np); return ERR_PTR(-EINVAL);
}
/* nvmem layouts produce cells within the nvmem-layout container */ if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
nvmem_np = of_get_next_parent(nvmem_np); if (!nvmem_np) {
of_node_put(cell_np); return ERR_PTR(-EINVAL);
}
}
/** * nvmem_cell_get() - Get nvmem cell of device from a given cell name * * @dev: Device that requests the nvmem cell. * @id: nvmem cell name to get (this corresponds with the name from the * nvmem-cell-names property for DT systems and with the con_id from * the lookup entry for non-DT systems). * * Return: Will be an ERR_PTR() on error or a valid pointer * to a struct nvmem_cell. The nvmem_cell will be freed by the * nvmem_cell_put().
*/ struct nvmem_cell *nvmem_cell_get(struct device *dev, constchar *id)
{ struct nvmem_cell *cell;
if (dev->of_node) { /* try dt first */
cell = of_nvmem_cell_get(dev->of_node, id); if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) return cell;
}
/* NULL cell id only allowed for device tree; invalid otherwise */ if (!id) return ERR_PTR(-EINVAL);
/** * devm_nvmem_cell_get() - Get nvmem cell of device from a given id * * @dev: Device that requests the nvmem cell. * @id: nvmem cell name id to get. * * Return: Will be an ERR_PTR() on error or a valid pointer * to a struct nvmem_cell. The nvmem_cell will be freed by the * automatically once the device is freed.
*/ struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, constchar *id)
{ struct nvmem_cell **ptr, *cell;
ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM);
if (bit_offset % BITS_PER_BYTE) { /* First shift */
*p = *b++ >> bit_offset;
/* setup rest of the bytes if any */ for (i = 1; i < cell->bytes; i++) { /* Get bits from next byte and shift them towards msb */
*p++ |= *b << (BITS_PER_BYTE - bit_offset);
*p = *b++ >> bit_offset;
}
} elseif (p != b) {
memmove(p, b, cell->bytes - bytes_offset);
p += cell->bytes - 1;
} else { /* point to the msb */
p += cell->bytes - 1;
}
/* result fits in less bytes */
extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); while (--extra >= 0)
*p-- = 0;
/* clear msb bits if any leftover in the last byte */ if (cell->nbits % BITS_PER_BYTE)
*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
}
staticint __nvmem_cell_read(struct nvmem_device *nvmem, struct nvmem_cell_entry *cell, void *buf, size_t *len, constchar *id, int index)
{ int rc;
if (cell->read_post_process) {
rc = cell->read_post_process(cell->priv, id, index,
cell->offset, buf, cell->raw_len); if (rc) return rc;
}
if (len)
*len = cell->bytes;
return 0;
}
/** * nvmem_cell_read() - Read a given nvmem cell * * @cell: nvmem cell to be read. * @len: pointer to length of cell which will be populated on successful read; * can be NULL. * * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The * buffer should be freed by the consumer with a kfree().
*/ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{ struct nvmem_cell_entry *entry = cell->entry; struct nvmem_device *nvmem = entry->nvmem;
u8 *buf; int rc;
if (!nvmem) return ERR_PTR(-EINVAL);
buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM);
/* setup the first byte with lsb bits from nvmem */
rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); if (rc) goto err;
*b++ |= GENMASK(bit_offset - 1, 0) & v;
/* setup rest of the byte if any */ for (i = 1; i < cell->bytes; i++) { /* Get last byte bits and shift them towards lsb */
pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
pbyte = *b;
p = b;
*b <<= bit_offset;
*b++ |= pbits;
}
}
/* if it's not end on byte boundary */ if ((nbits + bit_offset) % BITS_PER_BYTE) { /* setup the last byte with msb bits from nvmem */
rc = nvmem_reg_read(nvmem,
cell->offset + cell->bytes - 1, &v, 1); if (rc) goto err;
*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
if (!nvmem || nvmem->read_only ||
(cell->bit_offset == 0 && len != cell->bytes)) return -EINVAL;
/* * Any cells which have a read_post_process hook are read-only because * we cannot reverse the operation and it might affect other cells, * too.
*/ if (cell->read_post_process) return -EINVAL;
if (cell->bit_offset || cell->nbits) { if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes) return -EINVAL;
buf = nvmem_cell_prepare_write_buffer(cell, buf, len); if (IS_ERR(buf)) return PTR_ERR(buf);
}
/* free the tmp buffer */ if (cell->bit_offset || cell->nbits)
kfree(buf);
if (rc) return rc;
return len;
}
/** * nvmem_cell_write() - Write to a given nvmem cell * * @cell: nvmem cell to be written. * @buf: Buffer to be written. * @len: length of buffer to be written to nvmem cell. * * Return: length of bytes written or negative on failure.
*/ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
{ return __nvmem_cell_entry_write(cell->entry, buf, len);
}
/** * nvmem_cell_read_u8() - Read a cell value as a u8 * * @dev: Device that requests the nvmem cell. * @cell_id: Name of nvmem cell to read. * @val: pointer to output value. * * Return: 0 on success or negative errno.
*/ int nvmem_cell_read_u8(struct device *dev, constchar *cell_id, u8 *val)
{ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
/** * nvmem_cell_read_u16() - Read a cell value as a u16 * * @dev: Device that requests the nvmem cell. * @cell_id: Name of nvmem cell to read. * @val: pointer to output value. * * Return: 0 on success or negative errno.
*/ int nvmem_cell_read_u16(struct device *dev, constchar *cell_id, u16 *val)
{ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
/** * nvmem_cell_read_u32() - Read a cell value as a u32 * * @dev: Device that requests the nvmem cell. * @cell_id: Name of nvmem cell to read. * @val: pointer to output value. * * Return: 0 on success or negative errno.
*/ int nvmem_cell_read_u32(struct device *dev, constchar *cell_id, u32 *val)
{ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
/** * nvmem_cell_read_u64() - Read a cell value as a u64 * * @dev: Device that requests the nvmem cell. * @cell_id: Name of nvmem cell to read. * @val: pointer to output value. * * Return: 0 on success or negative errno.
*/ int nvmem_cell_read_u64(struct device *dev, constchar *cell_id, u64 *val)
{ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
/* * If nbits is set then nvmem_cell_read() can significantly exaggerate * the length of the real data. Throw away the extra junk.
*/ if (nbits)
*len = DIV_ROUND_UP(nbits, 8);
if (*len > max_len) {
kfree(buf); return ERR_PTR(-ERANGE);
}
return buf;
}
/** * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. * * @dev: Device that requests the nvmem cell. * @cell_id: Name of nvmem cell to read. * @val: pointer to output value. * * Return: 0 on success or negative errno.
*/ int nvmem_cell_read_variable_le_u32(struct device *dev, constchar *cell_id,
u32 *val)
{
size_t len; const u8 *buf; int i;
buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); if (IS_ERR(buf)) return PTR_ERR(buf);
/* Copy w/ implicit endian conversion */
*val = 0; for (i = 0; i < len; i++)
*val |= buf[i] << (8 * i);
/** * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. * * @dev: Device that requests the nvmem cell. * @cell_id: Name of nvmem cell to read. * @val: pointer to output value. * * Return: 0 on success or negative errno.
*/ int nvmem_cell_read_variable_le_u64(struct device *dev, constchar *cell_id,
u64 *val)
{
size_t len; const u8 *buf; int i;
buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); if (IS_ERR(buf)) return PTR_ERR(buf);
/* Copy w/ implicit endian conversion */
*val = 0; for (i = 0; i < len; i++)
*val |= (uint64_t)buf[i] << (8 * i);
/** * nvmem_device_cell_read() - Read a given nvmem device and cell * * @nvmem: nvmem device to read from. * @info: nvmem cell info to be read. * @buf: buffer pointer which will be populated on successful read. * * Return: length of successful bytes read on success and negative * error code on error.
*/
ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf)
{ struct nvmem_cell_entry cell; int rc;
ssize_t len;
if (!nvmem) return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); if (rc) return rc;
/** * nvmem_device_cell_write() - Write cell to a given nvmem device * * @nvmem: nvmem device to be written to. * @info: nvmem cell info to be written. * @buf: buffer to be written to cell. * * Return: length of bytes written or negative error code on failure.
*/ int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf)
{ struct nvmem_cell_entry cell; int rc;
if (!nvmem) return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); if (rc) return rc;
/** * nvmem_device_read() - Read from a given nvmem device * * @nvmem: nvmem device to read from. * @offset: offset in nvmem device. * @bytes: number of bytes to read. * @buf: buffer pointer which will be populated on successful read. * * Return: length of successful bytes read on success and negative * error code on error.
*/ int nvmem_device_read(struct nvmem_device *nvmem, unsignedint offset,
size_t bytes, void *buf)
{ int rc;
/** * nvmem_device_write() - Write cell to a given nvmem device * * @nvmem: nvmem device to be written to. * @offset: offset in nvmem device. * @bytes: number of bytes to write. * @buf: buffer to be written. * * Return: length of bytes written or negative error code on failure.
*/ int nvmem_device_write(struct nvmem_device *nvmem, unsignedint offset,
size_t bytes, void *buf)
{ int rc;
/** * nvmem_add_cell_lookups() - register a list of cell lookup entries * * @entries: array of cell lookup entries * @nentries: number of cell lookup entries in the array
*/ void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
{ int i;
mutex_lock(&nvmem_lookup_mutex); for (i = 0; i < nentries; i++)
list_add_tail(&entries[i].node, &nvmem_lookup_list);
mutex_unlock(&nvmem_lookup_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
/** * nvmem_del_cell_lookups() - remove a list of previously added cell lookup * entries * * @entries: array of cell lookup entries * @nentries: number of cell lookup entries in the array
*/ void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
{ int i;
mutex_lock(&nvmem_lookup_mutex); for (i = 0; i < nentries; i++)
list_del(&entries[i].node);
mutex_unlock(&nvmem_lookup_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
/** * nvmem_dev_name() - Get the name of a given nvmem device. * * @nvmem: nvmem device. * * Return: name of the nvmem device.
*/ constchar *nvmem_dev_name(struct nvmem_device *nvmem)
{ return dev_name(&nvmem->dev);
}
EXPORT_SYMBOL_GPL(nvmem_dev_name);
/** * nvmem_dev_size() - Get the size of a given nvmem device. * * @nvmem: nvmem device. * * Return: size of the nvmem device.
*/
size_t nvmem_dev_size(struct nvmem_device *nvmem)
{ return nvmem->size;
}
EXPORT_SYMBOL_GPL(nvmem_dev_size);
staticint __init nvmem_init(void)
{ int ret;
ret = bus_register(&nvmem_bus_type); if (ret) return ret;
ret = nvmem_layout_bus_register(); if (ret)
bus_unregister(&nvmem_bus_type);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.