if (!regmap_readable(map, reg) && !regmap_cached(map, reg)) returnfalse;
returntrue;
}
/* * Work out where the start offset maps into register numbers, bearing * in mind that we suppress hidden registers.
*/ staticunsignedint regmap_debugfs_get_dump_start(struct regmap *map, unsignedint base,
loff_t from,
loff_t *pos)
{ struct regmap_debugfs_off_cache *c = NULL;
loff_t p = 0; unsignedint i, ret; unsignedint fpos_offset; unsignedint reg_offset;
/* Suppress the cache if we're using a subrange */ if (base) return base;
/* * If we don't have a cache build one so we don't have to do a * linear scan each time.
*/
mutex_lock(&map->cache_lock);
i = base; if (list_empty(&map->debugfs_off_cache)) { for (; i <= map->max_register; i += map->reg_stride) { /* Skip unprinted registers, closing off cache entry */ if (!regmap_printable(map, i)) { if (c) {
c->max = p - 1;
c->max_reg = i - map->reg_stride;
list_add_tail(&c->list,
&map->debugfs_off_cache);
c = NULL;
}
continue;
}
/* No cache entry? Start a new one */ if (!c) {
c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) {
regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock); return base;
}
c->min = p;
c->base_reg = i;
}
p += map->debugfs_tot_len;
}
}
/* Close the last entry off if we didn't scan beyond it */ if (c) {
c->max = p - 1;
c->max_reg = i - map->reg_stride;
list_add_tail(&c->list,
&map->debugfs_off_cache);
}
/* * This should never happen; we return above if we fail to * allocate and we should never be in this code if there are * no registers at all.
*/
WARN_ON(list_empty(&map->debugfs_off_cache));
ret = base;
buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM;
regmap_calc_tot_len(map, buf, count);
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
for (i = start_reg; i >= 0 && i <= to;
i = regmap_next_readable_reg(map, i)) {
/* If we're in the region the user is trying to read */ if (p >= *ppos) { /* ...but not beyond it */ if (buf_pos + map->debugfs_tot_len > count) break;
/* Format the register */
snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
map->debugfs_reg_len, i - from);
buf_pos += map->debugfs_reg_len + 2;
/* Format the value, write all X if we can't read */
ret = regmap_read(map, i, &val); if (ret == 0)
snprintf(buf + buf_pos, count - buf_pos, "%.*x", map->debugfs_val_len, val); else
memset(buf + buf_pos, 'X',
map->debugfs_val_len);
buf_pos += 2 * map->format.val_bytes;
buf[buf_pos++] = '\n';
}
p += map->debugfs_tot_len;
}
ret = buf_pos;
if (copy_to_user(user_buf, buf, buf_pos)) {
ret = -EFAULT; goto out;
}
#undef REGMAP_ALLOW_WRITE_DEBUGFS #ifdef REGMAP_ALLOW_WRITE_DEBUGFS /* * This can be dangerous especially when we have clients such as * PMICs, therefore don't provide any real compile time configuration option * for this feature, people who want to use this will need to modify * the source code directly.
*/ static ssize_t regmap_map_write_file(struct file *file, constchar __user *user_buf,
size_t count, loff_t *ppos)
{ char buf[32];
size_t buf_size; char *start = buf; unsignedlong reg, value; struct regmap *map = file->private_data; int ret;
/* While we are at it, build the register dump cache * now so the read() operation on the `registers' file * can benefit from using the cache. We do not care * about the file position information that is contained
* in the cache, just about the actual register blocks */
regmap_calc_tot_len(map, buf, count);
regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
/* Reset file pointer as the fixed-format of the `registers'
* file is not compatible with the `range' file */
p = 0;
mutex_lock(&map->cache_lock);
list_for_each_entry(c, &map->debugfs_off_cache, list) {
entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
c->base_reg, c->max_reg); if (p >= *ppos) { if (buf_pos + entry_len > count) break;
memcpy(buf + buf_pos, entry, entry_len);
buf_pos += entry_len;
}
p += entry_len;
}
mutex_unlock(&map->cache_lock);
kfree(entry);
ret = buf_pos;
if (copy_to_user(user_buf, buf, buf_pos)) {
ret = -EFAULT; goto out_buf;
}
staticint regmap_access_show(struct seq_file *s, void *ignored)
{ struct regmap *map = s->private; int i, reg_len;
reg_len = regmap_calc_reg_len(map->max_register);
for (i = 0; i <= map->max_register; i += map->reg_stride) { /* Ignore registers which are neither readable nor writable */ if (!regmap_readable(map, i) && !regmap_writeable(map, i)) continue;
/* * Userspace can initiate reads from the hardware over debugfs. * Normally internal regmap structures and buffers are protected with * a mutex or a spinlock, but if the regmap owner decided to disable * all locking mechanisms, this is no longer the case. For safety: * don't create the debugfs entries if locking is disabled.
*/ if (map->debugfs_disable) {
dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n"); return;
}
/* If we don't have the debugfs root yet, postpone init */ if (!regmap_debugfs_root) { struct regmap_debugfs_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return;
node->map = map;
mutex_lock(®map_debugfs_early_lock);
list_add(&node->link, ®map_debugfs_early_list);
mutex_unlock(®map_debugfs_early_lock); return;
}
/* * This could interfere with driver operation. Therefore, don't provide * any real compile time configuration option for this feature. One will * have to modify the source code directly in order to use it.
*/ #undef REGMAP_ALLOW_FORCE_WRITE_FIELD_DEBUGFS #ifdef REGMAP_ALLOW_FORCE_WRITE_FIELD_DEBUGFS
debugfs_create_bool("force_write_field", 0600, map->debugfs,
&map->force_write_field); #endif
next = rb_first(&map->range_tree); while (next) {
range_node = rb_entry(next, struct regmap_range_node, node);
if (range_node->name)
debugfs_create_file(range_node->name, 0400,
map->debugfs, range_node,
®map_range_fops);
next = rb_next(&range_node->node);
}
if (map->cache_ops && map->cache_ops->debugfs_init)
map->cache_ops->debugfs_init(map);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.