diff options
author | Kan Liang <kan.liang@linux.intel.com> | 2024-06-14 15:46:24 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2024-06-17 17:57:55 +0200 |
commit | 0007f39325921351b7860a976a730acbb198b9ca (patch) | |
tree | 39d6e9883b6252eeb9cab48d7f8ad5d5840ebc33 /arch/x86/events | |
parent | perf/x86/amd: Use try_cmpxchg() in events/amd/{un,}core.c (diff) | |
download | linux-0007f39325921351b7860a976a730acbb198b9ca.tar.xz linux-0007f39325921351b7860a976a730acbb198b9ca.zip |
perf/x86/uncore: Save the unit control address of all units
The unit control address of some CXL units may be wrongly calculated
under some configuration on a EMR machine.
The current implementation only saves the unit control address of the
units from the first die, and the first unit of the rest of dies. Perf
assumed that the units from the other dies have the same offset as the
first die. So the unit control address of the rest of the units can be
calculated. However, the assumption is wrong, especially for the CXL
units.
Introduce an RB tree for each uncore type to save the unit control
address and three kinds of ID information (unit ID, PMU ID, and die ID)
for all units.
The unit ID is a physical ID of a unit.
The PMU ID is a logical ID assigned to a unit. The logical IDs start
from 0 and must be contiguous. The physical ID and the logical ID are
1:1 mapping. The units with the same physical ID in different dies share
the same PMU.
The die ID indicates which die a unit belongs to.
The RB tree can be searched by two different keys (unit ID or PMU ID +
die ID). During the RB tree setup, the unit ID is used as a key to look
up the RB tree. The perf can create/assign a proper PMU ID to the unit.
Later, after the RB tree is setup, PMU ID + die ID is used as a key to
look up the RB tree to fill the cpumask of a PMU. It's used more
frequently, so PMU ID + die ID is compared in the unit_less().
The uncore_find_unit() has to be O(N). But the RB tree setup only occurs
once during the driver load time. It should be acceptable.
Compared with the current implementation, more space is required to save
the information of all units. The extra size should be acceptable.
For example, on EMR, there are 221 units at most. For a 2-socket machine,
the extra space is ~6KB at most.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240614134631.1092359-2-kan.liang@linux.intel.com
Diffstat (limited to 'arch/x86/events')
-rw-r--r-- | arch/x86/events/intel/uncore_discovery.c | 79 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore_discovery.h | 10 |
2 files changed, 87 insertions, 2 deletions
diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index 9a698a92962a..ce520e69a3c1 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -93,6 +93,8 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit) if (!type->box_ctrl_die) goto free_type; + type->units = RB_ROOT; + type->access_type = unit->access_type; num_discovered_types[type->access_type]++; type->type = unit->box_type; @@ -120,10 +122,59 @@ get_uncore_discovery_type(struct uncore_unit_discovery *unit) return add_uncore_discovery_type(unit); } +static inline bool unit_less(struct rb_node *a, const struct rb_node *b) +{ + struct intel_uncore_discovery_unit *a_node, *b_node; + + a_node = rb_entry(a, struct intel_uncore_discovery_unit, node); + b_node = rb_entry(b, struct intel_uncore_discovery_unit, node); + + if (a_node->pmu_idx < b_node->pmu_idx) + return true; + if (a_node->pmu_idx > b_node->pmu_idx) + return false; + + if (a_node->die < b_node->die) + return true; + if (a_node->die > b_node->die) + return false; + + return 0; +} + +static inline struct intel_uncore_discovery_unit * +uncore_find_unit(struct rb_root *root, unsigned int id) +{ + struct intel_uncore_discovery_unit *unit; + struct rb_node *node; + + for (node = rb_first(root); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + if (unit->id == id) + return unit; + } + + return NULL; +} + +static void uncore_find_add_unit(struct intel_uncore_discovery_unit *node, + struct rb_root *root, u16 *num_units) +{ + struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id); + + if (unit) + node->pmu_idx = unit->pmu_idx; + else if (num_units) + node->pmu_idx = (*num_units)++; + + rb_add(&node->node, root, unit_less); +} + static void uncore_insert_box_info(struct uncore_unit_discovery *unit, int die, bool parsed) { + struct intel_uncore_discovery_unit *node; struct intel_uncore_discovery_type *type; unsigned int *ids; u64 *box_offset; @@ -136,14 +187,26 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, return; } + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return; + + node->die = die; + node->id = unit->box_id; + node->addr = unit->ctl; + if (parsed) { type = search_uncore_discovery_type(unit->box_type); if (!type) { pr_info("A spurious uncore type %d is detected, " "Disable the uncore type.\n", unit->box_type); + kfree(node); return; } + + uncore_find_add_unit(node, &type->units, &type->num_units); + /* Store the first box of each die */ if (!type->box_ctrl_die[die]) type->box_ctrl_die[die] = unit->ctl; @@ -152,16 +215,18 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, type = get_uncore_discovery_type(unit); if (!type) - return; + goto free_node; box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL); if (!box_offset) - return; + goto free_node; ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); if (!ids) goto free_box_offset; + uncore_find_add_unit(node, &type->units, &type->num_units); + /* Store generic information for the first box */ if (!type->num_boxes) { type->box_ctrl = unit->ctl; @@ -201,6 +266,8 @@ free_ids: free_box_offset: kfree(box_offset); +free_node: + kfree(node); } static bool @@ -339,8 +406,16 @@ err: void intel_uncore_clear_discovery_tables(void) { struct intel_uncore_discovery_type *type, *next; + struct intel_uncore_discovery_unit *pos; + struct rb_node *node; rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) { + while (!RB_EMPTY_ROOT(&type->units)) { + node = rb_first(&type->units); + pos = rb_entry(node, struct intel_uncore_discovery_unit, node); + rb_erase(node, &type->units); + kfree(pos); + } kfree(type->box_ctrl_die); kfree(type); } diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 22e769a81103..5190017aba51 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -113,17 +113,27 @@ struct uncore_unit_discovery { }; }; +struct intel_uncore_discovery_unit { + struct rb_node node; + unsigned int pmu_idx; /* The idx of the corresponding PMU */ + unsigned int id; /* Unit ID */ + unsigned int die; /* Die ID */ + u64 addr; /* Unit Control Address */ +}; + struct intel_uncore_discovery_type { struct rb_node node; enum uncore_access_type access_type; u64 box_ctrl; /* Unit ctrl addr of the first box */ u64 *box_ctrl_die; /* Unit ctrl addr of the first box of each die */ + struct rb_root units; /* Unit ctrl addr for all units */ u16 type; /* Type ID of the uncore block */ u8 num_counters; u8 counter_width; u8 ctl_offset; /* Counter Control 0 offset */ u8 ctr_offset; /* Counter 0 offset */ u16 num_boxes; /* number of boxes for the uncore block */ + u16 num_units; /* number of units */ unsigned int *ids; /* Box IDs */ u64 *box_offset; /* Box offset */ }; |