/* * This could be nicer and more efficient but we shouldn't * super care.
*/ for (i = 0; i < size; i++) while (n[i] == o[i])
get_random_bytes(&n[i], 1);
}
if (config->num_reg_defaults) {
defaults = kunit_kcalloc(test,
config->num_reg_defaults, sizeof(struct reg_default),
GFP_KERNEL); if (!defaults) goto out_free;
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = param->from_reg + (i * config->reg_stride);
defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
}
}
ret = regmap_init_ram(priv->dev, config, *data); if (IS_ERR(ret)) goto out_free;
/* This calls regmap_exit() on failure, which frees buf and *data */
error = kunit_add_action_or_reset(test, regmap_exit_action, ret); if (error)
ret = ERR_PTR(error);
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* If we write a value to a register we can read it back */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
/* If using a cache the cache satisfied the read */
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* * Data written via the bulk API can be read back with single * reads.
*/
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
/* If using a cache the cache satisfied the read */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* Data written as single writes can be read via the bulk API */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
/* If using a cache the cache satisfied the read */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* * Data written via the multi API can be read back with single * reads.
*/ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
sequence[i].reg = i;
sequence[i].def = val[i];
sequence[i].delay_us = 0;
}
KUNIT_EXPECT_EQ(test, 0,
regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
/* If using a cache the cache satisfied the read */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* Data written as single writes can be read via the multi API */ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
regs[i] = i;
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
}
KUNIT_EXPECT_EQ(test, 0,
regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
/* If using a cache the cache satisfied the read */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
get_random_bytes(&val, sizeof(val));
/* Write some test values */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
regcache_cache_only(map, true);
/* * While in cache-only regmap_read_bypassed() should return the register * value and leave the map in cache-only.
*/ for (i = 0; i < ARRAY_SIZE(val); i++) { /* Put inverted bits in rval to prove we really read the value */
rval = ~val[i];
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
KUNIT_EXPECT_EQ(test, val[i], rval);
/* * Change the underlying register values to prove it is returning * real values not cached values.
*/ for (i = 0; i < ARRAY_SIZE(val); i++) {
val[i] = ~val[i];
data->vals[param->from_reg + i] = val[i];
}
for (i = 0; i < ARRAY_SIZE(val); i++) {
rval = ~val[i];
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
KUNIT_EXPECT_NE(test, val[i], rval);
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
get_random_bytes(&val, sizeof(val));
/* Write some test values */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
regcache_cache_only(map, true);
/* * While in cache-only regmap_read_bypassed() should return the register * value and leave the map in cache-only.
*/ for (i = 0; i < ARRAY_SIZE(val); i++) { /* Register #5 is non-volatile so should read from cache */
KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
regmap_read(map, param->from_reg + i, &rval));
/* Put inverted bits in rval to prove we really read the value */
rval = ~val[i];
KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
KUNIT_EXPECT_EQ(test, val[i], rval);
KUNIT_EXPECT_TRUE(test, map->cache_only);
KUNIT_EXPECT_FALSE(test, map->cache_bypass);
}
/* * Change the underlying register values to prove it is returning * real values not cached values.
*/ for (i = 0; i < ARRAY_SIZE(val); i++) {
val[i] = ~val[i];
data->vals[param->from_reg + i] = val[i];
}
for (i = 0; i < ARRAY_SIZE(val); i++) { if (i == 5) continue;
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[i] = false;
/* Change the value of all registers, readonly should fail */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
/* Did that match what we see on the device? */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[i] = false;
/* * Try to read all the registers, the writeonly one should * fail if we aren't using the flat cache.
*/ for (i = 0; i < BLOCK_TEST_SIZE; i++) { if (config.cache_type != REGCACHE_FLAT) {
KUNIT_EXPECT_EQ(test, i != 5,
regmap_read(map, i, &val) == 0);
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
}
}
/* Did we trigger a hardware access? */
KUNIT_EXPECT_FALSE(test, data->read[5]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Read back the expected default data */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
/* The data should have been read from cache if there was one */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* We should have read the cache defaults back from the map */ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
data->read[i] = false;
}
/* Read back the expected default data */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
/* The data should have been read from cache if there was one */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
/* Only the patched registers are written */ for (i = 0; i < BLOCK_TEST_SIZE; i++) { switch (i) { case 2: case 5:
KUNIT_EXPECT_TRUE(test, data->written[i]);
KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); break; default:
KUNIT_EXPECT_FALSE(test, data->written[i]);
KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); break;
}
}
}
/* * Allow one extra register so that the read/written arrays * are sized big enough to include an entry for the odd * address past the final reg_default register.
*/
config.max_register = BLOCK_TEST_SIZE;
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Only even addresses can be accessed, try both read and write */ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
data->read[i] = false;
data->written[i] = false;
if (i % 2) {
KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, data->vals[i], rval);
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
data->read[i]);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
KUNIT_EXPECT_TRUE(test, data->written[i]);
}
}
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
for (i = test_range.range_min; i < test_range.range_max; i++) {
data->read[i] = false;
data->written[i] = false;
}
/* Reset the page to a non-zero value to trigger a change */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
test_range.range_max));
/* Check we set the page and use the window for writes */
data->written[test_range.selector_reg] = false;
data->written[test_range.window_start] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
/* No physical access triggered in the virtual range */ for (i = test_range.range_min; i < test_range.range_max; i++) {
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
}
/* Try to stress dynamic creation of cache data structures */ staticvoid stress_insert(struct kunit *test)
{ struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; unsignedint rval, *vals;
size_t buf_sz; int i;
/* Write data into the map/cache in ever decreasing strides */ for (i = 0; i < config.max_register; i += 100)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); for (i = 0; i < config.max_register; i += 50)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); for (i = 0; i < config.max_register; i += 25)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); for (i = 0; i < config.max_register; i += 10)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); for (i = 0; i < config.max_register; i += 5)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); for (i = 0; i < config.max_register; i += 3)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); for (i = 0; i < config.max_register; i += 2)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); for (i = 0; i < config.max_register; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
/* Do reads from the cache (if there is one) match? */ for (i = 0; i < config.max_register; i ++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, rval, vals[i]);
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* Ensure the cache has a value in it */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
/* Bypass then write a different value */
regcache_cache_bypass(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
/* Read the bypassed value */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val + 1, rval);
KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
/* Disable bypass, the cache should still return the original value */
regcache_cache_bypass(map, false);
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* Put some data into the cache */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
/* Trash the data on the device itself then resync */
regcache_mark_dirty(map);
memset(data->vals, 0, sizeof(val));
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just write the correct data out? */
KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
}
/* Put some data into the cache */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
/* Set cache-only and change the values */
regcache_cache_only(map, true); for (i = 0; i < ARRAY_SIZE(val); ++i)
val[i] = ~val[i] & val_mask;
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
/* Exit cache-only and sync the cache without marking hardware registers dirty */
regcache_cache_only(map, false);
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just write the correct data out? */
KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* Change the value of one register */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
/* Resync */
regcache_mark_dirty(map); for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just sync the one register we touched? */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
/* Rewrite registers back to their defaults */ for (i = 0; i < config.num_reg_defaults; ++i)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
config.reg_defaults[i].def));
/* * Resync after regcache_mark_dirty() should not write out registers * that are at default value
*/ for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
regcache_mark_dirty(map);
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
}
/* Enter cache-only and change the value of one register */
regcache_cache_only(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
/* Exit cache-only and resync, should write out the changed register */
regcache_cache_only(map, false); for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Was the register written out? */
KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
/* Enter cache-only and write register back to its default value */
regcache_cache_only(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
/* Resync should write out the new value */
regcache_cache_only(map, false); for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Read all registers to fill the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Change the value of all registers, readonly should fail */
get_random_bytes(&val, sizeof(val));
regcache_cache_only(map, true); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
regcache_cache_only(map, false);
/* Resync */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did that match what we see on the device? */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
}
/* Sync the cache */
regcache_mark_dirty(map); for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The patch should be on the device but not in the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
KUNIT_EXPECT_EQ(test, val, rval[i]);
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Ensure the data is read from the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++) {
KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
data->read[param->from_reg + i] = false;
}
KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
/* Drop some registers */
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
param->from_reg + 5));
/* Reread and check only the dropped registers hit the device. */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
}
/* Drop part of range 4 */
reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
/* Mark dirty and reset mock registers to 0 */
regcache_mark_dirty(map); for (i = 0; i < config.max_register + 1; i++) {
data->vals[i] = 0;
data->written[i] = false;
}
/* The registers that were dropped from range 4 should now remain at 0 */
val[4 / 2][3] = 0;
val[4 / 2][4] = 0;
val[4 / 2][5] = 0;
/* Sync and check that the expected register ranges were written */
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Check that odd ranges weren't written */ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
}
/* Check that even ranges (except 2 and 4) were written */ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { if ((rangeidx == 2) || (rangeidx == 4)) continue;
reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
/* Check that range 2 wasn't written */
reg = param->from_reg + (2 * BLOCK_TEST_SIZE); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
/* Check that range 4 was partially written */
reg = param->from_reg + (4 * BLOCK_TEST_SIZE); for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Ensure the data is read from the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
/* Change all values in cache from defaults */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
/* Drop all registers */
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
/* Mark dirty and cache sync should not write anything. */
regcache_mark_dirty(map); for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); for (i = 0; i <= config.max_register; i++)
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Ensure the data is read from the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
/* Change all values in cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
/* Drop all registers */
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
/* * Sync cache without marking it dirty. All registers were dropped * so the cache should not have any entries to write out.
*/ for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); for (i = 0; i <= config.max_register; i++)
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Ensure the data is read from the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
/* Change all values in cache from defaults */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
/* Drop all registers */
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
/* * Sync cache without marking it dirty. All registers were dropped * so the cache should not have any entries to write out.
*/ for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); for (i = 0; i <= config.max_register; i++)
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[param->from_reg + i] = false;
/* No defaults so no registers cached. */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
/* We didn't trigger any reads */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
/* Fill the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Now everything should be cached */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
}
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[param->from_reg + i] = false;
/* No defaults so no registers cached. */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
/* We didn't trigger any reads */ for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
/* Write a zero value */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
/* Read that zero value back */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
KUNIT_EXPECT_EQ(test, 0, val);
/* From the cache? */
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
/* Try to throw it away */
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
}
/* Check that caching the window register works with sync */ staticvoid cache_range_window_reg(struct kunit *test)
{ struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; unsignedint val; int i;
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Write new values to the entire range */ for (i = test_range.range_min; i <= test_range.range_max; i++)
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
val = data->vals[test_range.selector_reg] & test_range.selector_mask;
KUNIT_ASSERT_EQ(test, val, 2);
/* Write to the first register in the range to reset the page */
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
val = data->vals[test_range.selector_reg] & test_range.selector_mask;
KUNIT_ASSERT_EQ(test, val, 0);
/* Trigger a cache sync */
regcache_mark_dirty(map);
KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
/* Write to the first register again, the page should be reset */
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
val = data->vals[test_range.selector_reg] & test_range.selector_mask;
KUNIT_ASSERT_EQ(test, val, 0);
/* Trigger another cache sync */
regcache_mark_dirty(map);
KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
/* Write to the last register again, the page should be reset */
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
val = data->vals[test_range.selector_reg] & test_range.selector_mask;
KUNIT_ASSERT_EQ(test, val, 2);
}
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = i; switch (param->val_endian) { case REGMAP_ENDIAN_LITTLE:
defaults[i].def = le16_to_cpu(buf[i]); break; case REGMAP_ENDIAN_BIG:
defaults[i].def = be16_to_cpu(buf[i]); break; default:
ret = ERR_PTR(-EINVAL); goto out_free;
}
}
/* * We use the defaults in the tests but they don't make sense * to the core if there's no cache.
*/ if (config->cache_type == REGCACHE_NONE)
config->num_reg_defaults = 0;
ret = regmap_init_raw_ram(priv->dev, config, *data); if (IS_ERR(ret)) goto out_free;
/* This calls regmap_exit() on failure, which frees buf and *data */
error = kunit_add_action_or_reset(test, regmap_exit_action, ret); if (error)
ret = ERR_PTR(error);
map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Check that we can read the defaults via the API */ for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
}
}
/* Check that we can read the defaults via the API */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len)); for (i = 0; i < config.max_register + 1; i++) {
def = config.reg_defaults[i].def; if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
} else {
KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
}
}
}
map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
get_random_bytes(&val, sizeof(val));
/* If we write a value to a register we can read it back */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
}
map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
hw_buf = (u16 *)data->vals;
get_random_bytes(&val, sizeof(val));
/* Do a raw write */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
/* We should read back the new values, and defaults for the rest */ for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
/* Put some data into the register following the noinc register */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
/* Write some data to the noinc register */
KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array, sizeof(val_array)));
/* We should read back the last value written */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
KUNIT_ASSERT_EQ(test, val_last, val);
/* Make sure we didn't touch the register after the noinc register */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
KUNIT_ASSERT_EQ(test, val_test, val);
}
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(u16) * 2));
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
/* We should read back the new values, and defaults for the rest */ for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
/* * The value written via _write() was translated by the core, * translate the original copy for comparison purposes.
*/ if (config.val_format_endian == REGMAP_ENDIAN_BIG)
val[2] = cpu_to_be16(val[2]); else
val[2] = cpu_to_le16(val[2]);
/* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
/* Do the sync */
regcache_cache_only(map, false);
regcache_mark_dirty(map);
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
}
map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return;
/* Reset the page to a non-zero value to trigger a change */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
test_range.range_max));
/* Check we set the page and use the window for writes */
data->written[test_range.selector_reg] = false;
data->written[test_range.window_start] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
/* No physical access triggered in the virtual range */ for (i = test_range.range_min; i < test_range.range_max; i++) {
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.