// SPDX-License-Identifier: GPL-2.0-only /* * Processor cache information made available to userspace via sysfs; * intended to be compatible with x86 intel_cacheinfo implementation. * * Copyright 2008 IBM Corporation * Author: Nathan Lynch
*/
/* per-cpu object for tracking: * - a "cache" kobject for the top-level directory * - a list of "index" objects representing the cpu's local cache hierarchy
*/ struct cache_dir { struct kobject *kobj; /* bare (not embedded) kobject for cache
* directory */ struct cache_index_dir *index; /* list of index objects */
};
/* "index" object: each cpu's cache directory has an index * subdirectory corresponding to a cache object associated with the * cpu. This object's lifetime is managed via the embedded kobject.
*/ struct cache_index_dir { struct kobject kobj; struct cache_index_dir *next; /* next index in parent directory */ struct cache *cache;
};
/* Template for determining which OF properties to query for a given
* cache type */ struct cache_type_info { constchar *name; constchar *size_prop;
/* Allow for both [di]-cache-line-size and * [di]-cache-block-size properties. According to the PowerPC * Processor binding, -line-size should be provided if it * differs from the cache block size (that which is operated * on by cache instructions), so we look for -line-size first.
* See cache_get_line_size(). */
/* These are used to index the cache_type_info array. */ #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ #define CACHE_TYPE_INSTRUCTION 2 #define CACHE_TYPE_DATA 3
staticconststruct cache_type_info cache_type_info[] = {
{ /* Embedded systems that use cache-size, cache-block-size,
* etc. for the Unified (typically L2) cache. */
.name = "Unified",
.size_prop = "cache-size",
.line_size_props = { "cache-line-size", "cache-block-size", },
.nr_sets_prop = "cache-sets",
},
{ /* PowerPC Processor binding says the [di]-cache-* * must be equal on unified caches, so just use
* d-cache properties. */
.name = "Unified",
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size", "d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
{
.name = "Instruction",
.size_prop = "i-cache-size",
.line_size_props = { "i-cache-line-size", "i-cache-block-size", },
.nr_sets_prop = "i-cache-sets",
},
{
.name = "Data",
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size", "d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
};
/* Cache object: each instance of this corresponds to a distinct cache * in the system. There are separate objects for Harvard caches: one * each for instruction and data, and each refers to the same OF node. * The refcount of the OF node is elevated for the lifetime of the * cache object. A cache object is released when its shared_cpu_map * is cleared (see cache_cpu_clear). * * A cache object is on two lists: an unsorted global list * (cache_list) of cache objects; and a singly-linked list * representing the local cache hierarchy, which is ordered by level * (e.g. L1d -> L1i -> L2 -> L3).
*/ struct cache { struct device_node *ofnode; /* OF node for this cache, may be cpu */ struct cpumask shared_cpu_map; /* online CPUs using this cache */ int type; /* split cache disambiguation */ int level; /* level not explicit in device tree */ int group_id; /* id of the group of threads that share this cache */ struct list_head list; /* global list of cache objects */ struct cache *next_local; /* next cache of >= level */
};
/* not cache_line_size() because that's a macro in include/linux/cache.h */ staticint cache_get_line_size(conststruct cache *cache, unsignedint *ret)
{ const __be32 *line_size; int i, lim;
/* return the first cache on a local list matching node and thread-group id */ staticstruct cache *cache_lookup_by_node_group(conststruct device_node *node, int group_id)
{ struct cache *cache = NULL; struct cache *iter;
/* * Unified caches can have two different sets of tags. Most embedded * use cache-size, etc. for the unified cache size, but open firmware systems * use d-cache-size, etc. Check on initialization for which type we have, and * return the appropriate structure type. Assume it's embedded if it isn't * open firmware. If it's yet a 3rd type, then there will be missing entries * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need * to be extended further.
*/ staticint cache_is_unified_d(conststruct device_node *np)
{ return of_get_property(np,
cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
}
staticstruct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id, int level)
{
pr_debug("creating L%d ucache for %pOFP\n", level, node);
staticvoid do_subsidiary_caches_debugcheck(struct cache *cache)
{
WARN_ONCE(cache->level != 1, "instantiating cache chain from L%d %s cache for " "%pOFP instead of an L1\n", cache->level,
cache_type_string(cache), cache->ofnode);
WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"), "instantiating cache chain from node %pOFP of type '%s' " "instead of a cpu node\n", cache->ofnode,
of_node_get_device_type(cache->ofnode));
}
/* * If sub-groups of threads in a core containing @cpu_id share the * L@level-cache (information obtained via "ibm,thread-groups" * device-tree property), then we identify the group by the first * thread-sibling in the group. We define this to be the group-id. * * In the absence of any thread-group information for L@level-cache, * this function returns -1.
*/ staticint get_group_id(unsignedint cpu_id, int level)
{ if (has_big_cores && level == 1) return cpumask_first(per_cpu(thread_group_l1_cache_map,
cpu_id)); elseif (thread_group_shares_l2 && level == 2) return cpumask_first(per_cpu(thread_group_l2_cache_map,
cpu_id)); elseif (thread_group_shares_l3 && level == 3) return cpumask_first(per_cpu(thread_group_l3_cache_map,
cpu_id)); return -1;
}
/* Attributes which should always be created -- the kobject/sysfs core * does this automatically via kobj_type->default_groups. This is the * minimum data required to uniquely identify a cache.
*/ staticstruct attribute *cache_index_default_attrs[] = {
&cache_type_attr.attr,
&cache_level_attr.attr,
&cache_shared_cpu_map_attr.attr,
&cache_shared_cpu_list_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(cache_index_default);
/* Attributes which should be created if the cache device node has the * right properties -- see cacheinfo_create_index_opt_attrs
*/ staticstruct kobj_attribute *cache_index_opt_attrs[] = {
&cache_size_attr,
&cache_line_size_attr,
&cache_nr_sets_attr,
&cache_assoc_attr,
};
/* We don't want to create an attribute that can't provide a * meaningful value. Check the return value of each optional * attribute's ->show method before registering the * attribute.
*/ for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { struct kobj_attribute *attr;
ssize_t rc;
attr = cache_index_opt_attrs[i];
rc = attr->show(&dir->kobj, attr, buf); if (rc <= 0) {
pr_debug("not creating %s attribute for " "%pOFP(%s) (rc = %zd)\n",
attr->attr.name, cache->ofnode,
cache_type, rc); continue;
} if (sysfs_create_file(&dir->kobj, &attr->attr))
pr_debug("could not create %s attribute for %pOFP(%s)\n",
attr->attr.name, cache->ofnode, cache_type);
}
kfree(buf);
}
staticvoid cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
{ struct cache_index_dir *index_dir; int rc;
index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); if (!index_dir) return;
/* Prevent userspace from seeing inconsistent state - remove
* the sysfs hierarchy first */
cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
/* careful, sysfs population may have failed */ if (cache_dir)
remove_cache_dir(cache_dir);
per_cpu(cache_dir_pcpu, cpu_id) = NULL;
/* clear the CPU's bit in its cache chain, possibly freeing
* cache objects */
cache = cache_lookup_by_cpu(cpu_id); if (cache)
cache_cpu_clear(cache, cpu_id);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.