summaryrefslogtreecommitdiffstats
path: root/drivers/base/cacheinfo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/cacheinfo.c')
-rw-r--r--drivers/base/cacheinfo.c177
1 files changed, 145 insertions, 32 deletions
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 950b22cdb5f7..f3903d002819 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -229,8 +229,71 @@ static int cache_setup_of_node(unsigned int cpu)
return 0;
}
+
+static int of_count_cache_leaves(struct device_node *np)
+{
+ unsigned int leaves = 0;
+
+ if (of_property_read_bool(np, "cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "i-cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "d-cache-size"))
+ ++leaves;
+
+ if (!leaves) {
+ /* The '[i-|d-|]cache-size' property is required, but
+ * if absent, fallback on the 'cache-unified' property.
+ */
+ if (of_property_read_bool(np, "cache-unified"))
+ return 1;
+ else
+ return 2;
+ }
+
+ return leaves;
+}
+
+int init_of_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
+ unsigned int levels = 0, leaves, level;
+
+ leaves = of_count_cache_leaves(np);
+ if (leaves > 0)
+ levels = 1;
+
+ prev = np;
+ while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
+ if (!of_device_is_compatible(np, "cache"))
+ goto err_out;
+ if (of_property_read_u32(np, "cache-level", &level))
+ goto err_out;
+ if (level <= levels)
+ goto err_out;
+
+ leaves += of_count_cache_leaves(np);
+ levels = level;
+ }
+
+ of_node_put(np);
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+
+ return 0;
+
+err_out:
+ of_node_put(np);
+ return -EINVAL;
+}
+
#else
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
+int init_of_cache_level(unsigned int cpu) { return 0; }
#endif
int __weak cache_setup_acpi(unsigned int cpu)
@@ -256,7 +319,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
- unsigned int index;
+ unsigned int index, sib_index;
int ret = 0;
if (this_cpu_ci->cpu_map_populated)
@@ -284,11 +347,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
-
- sib_leaf = per_cpu_cacheinfo_idx(i, index);
- if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
- cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
- cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ break;
+ }
}
}
/* record the maximum cache line size */
@@ -302,7 +367,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
struct cacheinfo *this_leaf, *sib_leaf;
- unsigned int sibling, index;
+ unsigned int sibling, index, sib_index;
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
@@ -313,9 +378,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
- sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
- cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
- cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ break;
+ }
+ }
}
}
}
@@ -326,10 +396,6 @@ static void free_cache_attributes(unsigned int cpu)
return;
cache_shared_cpu_map_remove(cpu);
-
- kfree(per_cpu_cacheinfo(cpu));
- per_cpu_cacheinfo(cpu) = NULL;
- cache_leaves(cpu) = 0;
}
int __weak init_cache_level(unsigned int cpu)
@@ -342,38 +408,85 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
+static inline
+int allocate_cache_info(int cpu)
+{
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct cacheinfo), GFP_ATOMIC);
+ if (!per_cpu_cacheinfo(cpu)) {
+ cache_leaves(cpu) = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int fetch_cache_info(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci;
+ unsigned int levels = 0, split_levels = 0;
+ int ret;
+
+ if (acpi_disabled) {
+ ret = init_of_cache_level(cpu);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = acpi_get_cache_info(cpu, &levels, &split_levels);
+ if (ret < 0)
+ return ret;
+
+ this_cpu_ci = get_cpu_cacheinfo(cpu);
+ this_cpu_ci->num_levels = levels;
+ /*
+ * This assumes that:
+ * - there cannot be any split caches (data/instruction)
+ * above a unified cache
+ * - data/instruction caches come by pair
+ */
+ this_cpu_ci->num_leaves = levels + split_levels;
+ }
+ if (!cache_leaves(cpu))
+ return -ENOENT;
+
+ return allocate_cache_info(cpu);
+}
+
int detect_cache_attributes(unsigned int cpu)
{
int ret;
- /* Since early detection of the cacheinfo is allowed via this
- * function and this also gets called as CPU hotplug callbacks via
- * cacheinfo_cpu_online, the initialisation can be skipped and only
- * CPU maps can be updated as the CPU online status would be update
- * if called via cacheinfo_cpu_online path.
+ /* Since early initialization/allocation of the cacheinfo is allowed
+ * via fetch_cache_info() and this also gets called as CPU hotplug
+ * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
+ * as it will happen only once (the cacheinfo memory is never freed).
+ * Just populate the cacheinfo.
*/
if (per_cpu_cacheinfo(cpu))
- goto update_cpu_map;
+ goto populate_leaves;
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
- if (per_cpu_cacheinfo(cpu) == NULL) {
- cache_leaves(cpu) = 0;
- return -ENOMEM;
- }
+ ret = allocate_cache_info(cpu);
+ if (ret)
+ return ret;
+populate_leaves:
/*
- * populate_cache_leaves() may completely setup the cache leaves and
- * shared_cpu_map or it may leave it partially setup.
+ * If LLC is valid the cache leaves were already populated so just go to
+ * update the cpu map.
*/
- ret = populate_cache_leaves(cpu);
- if (ret)
- goto free_ci;
+ if (!last_level_cache_is_valid(cpu)) {
+ /*
+ * populate_cache_leaves() may completely setup the cache leaves and
+ * shared_cpu_map or it may leave it partially setup.
+ */
+ ret = populate_cache_leaves(cpu);
+ if (ret)
+ goto free_ci;
+ }
-update_cpu_map:
/*
* For systems using DT for cache hierarchy, fw_token
* and shared_cpu_map will be set up here only if they are