287 lines
8.8 KiB
Diff
287 lines
8.8 KiB
Diff
|
From 09c1529eb102b486220c35546f2663ca858a2943 Mon Sep 17 00:00:00 2001
|
||
|
From: Kan Liang <kan.liang@linux.intel.com>
|
||
|
Date: Fri, 2 Aug 2024 08:16:39 -0700
|
||
|
Subject: perf/x86/intel/cstate: Clean up cpumask and hotplug
|
||
|
|
||
|
There are three cstate PMUs with different scopes, core, die and module.
|
||
|
The scopes are supported by the generic perf_event subsystem now.
|
||
|
|
||
|
Set the scope for each PMU and remove all the cpumask and hotplug codes.
|
||
|
|
||
|
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
|
||
|
---
|
||
|
arch/x86/events/intel/cstate.c | 142 ++-------------------------------
|
||
|
include/linux/cpuhotplug.h | 2 -
|
||
|
2 files changed, 5 insertions(+), 139 deletions(-)
|
||
|
|
||
|
--- a/arch/x86/events/intel/cstate.c
|
||
|
+++ b/arch/x86/events/intel/cstate.c
|
||
|
@@ -128,10 +128,6 @@ static ssize_t __cstate_##_var##_show(st
|
||
|
static struct device_attribute format_attr_##_var = \
|
||
|
__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
|
||
|
|
||
|
-static ssize_t cstate_get_attr_cpumask(struct device *dev,
|
||
|
- struct device_attribute *attr,
|
||
|
- char *buf);
|
||
|
-
|
||
|
/* Model -> events mapping */
|
||
|
struct cstate_model {
|
||
|
unsigned long core_events;
|
||
|
@@ -206,22 +202,9 @@ static struct attribute_group cstate_for
|
||
|
.attrs = cstate_format_attrs,
|
||
|
};
|
||
|
|
||
|
-static cpumask_t cstate_core_cpu_mask;
|
||
|
-static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
|
||
|
-
|
||
|
-static struct attribute *cstate_cpumask_attrs[] = {
|
||
|
- &dev_attr_cpumask.attr,
|
||
|
- NULL,
|
||
|
-};
|
||
|
-
|
||
|
-static struct attribute_group cpumask_attr_group = {
|
||
|
- .attrs = cstate_cpumask_attrs,
|
||
|
-};
|
||
|
-
|
||
|
static const struct attribute_group *cstate_attr_groups[] = {
|
||
|
&cstate_events_attr_group,
|
||
|
&cstate_format_attr_group,
|
||
|
- &cpumask_attr_group,
|
||
|
NULL,
|
||
|
};
|
||
|
|
||
|
@@ -269,8 +252,6 @@ static struct perf_msr pkg_msr[] = {
|
||
|
[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr },
|
||
|
};
|
||
|
|
||
|
-static cpumask_t cstate_pkg_cpu_mask;
|
||
|
-
|
||
|
/* cstate_module PMU */
|
||
|
static struct pmu cstate_module_pmu;
|
||
|
static bool has_cstate_module;
|
||
|
@@ -291,28 +272,9 @@ static struct perf_msr module_msr[] = {
|
||
|
[PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr },
|
||
|
};
|
||
|
|
||
|
-static cpumask_t cstate_module_cpu_mask;
|
||
|
-
|
||
|
-static ssize_t cstate_get_attr_cpumask(struct device *dev,
|
||
|
- struct device_attribute *attr,
|
||
|
- char *buf)
|
||
|
-{
|
||
|
- struct pmu *pmu = dev_get_drvdata(dev);
|
||
|
-
|
||
|
- if (pmu == &cstate_core_pmu)
|
||
|
- return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
|
||
|
- else if (pmu == &cstate_pkg_pmu)
|
||
|
- return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
|
||
|
- else if (pmu == &cstate_module_pmu)
|
||
|
- return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask);
|
||
|
- else
|
||
|
- return 0;
|
||
|
-}
|
||
|
-
|
||
|
static int cstate_pmu_event_init(struct perf_event *event)
|
||
|
{
|
||
|
u64 cfg = event->attr.config;
|
||
|
- int cpu;
|
||
|
|
||
|
if (event->attr.type != event->pmu->type)
|
||
|
return -ENOENT;
|
||
|
@@ -331,20 +293,13 @@ static int cstate_pmu_event_init(struct
|
||
|
if (!(core_msr_mask & (1 << cfg)))
|
||
|
return -EINVAL;
|
||
|
event->hw.event_base = core_msr[cfg].msr;
|
||
|
- cpu = cpumask_any_and(&cstate_core_cpu_mask,
|
||
|
- topology_sibling_cpumask(event->cpu));
|
||
|
} else if (event->pmu == &cstate_pkg_pmu) {
|
||
|
if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
|
||
|
return -EINVAL;
|
||
|
cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
|
||
|
if (!(pkg_msr_mask & (1 << cfg)))
|
||
|
return -EINVAL;
|
||
|
-
|
||
|
- event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
|
||
|
-
|
||
|
event->hw.event_base = pkg_msr[cfg].msr;
|
||
|
- cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
|
||
|
- topology_die_cpumask(event->cpu));
|
||
|
} else if (event->pmu == &cstate_module_pmu) {
|
||
|
if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX)
|
||
|
return -EINVAL;
|
||
|
@@ -352,16 +307,10 @@ static int cstate_pmu_event_init(struct
|
||
|
if (!(module_msr_mask & (1 << cfg)))
|
||
|
return -EINVAL;
|
||
|
event->hw.event_base = module_msr[cfg].msr;
|
||
|
- cpu = cpumask_any_and(&cstate_module_cpu_mask,
|
||
|
- topology_cluster_cpumask(event->cpu));
|
||
|
} else {
|
||
|
return -ENOENT;
|
||
|
}
|
||
|
|
||
|
- if (cpu >= nr_cpu_ids)
|
||
|
- return -ENODEV;
|
||
|
-
|
||
|
- event->cpu = cpu;
|
||
|
event->hw.config = cfg;
|
||
|
event->hw.idx = -1;
|
||
|
return 0;
|
||
|
@@ -412,84 +361,6 @@ static int cstate_pmu_event_add(struct p
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-/*
|
||
|
- * Check if exiting cpu is the designated reader. If so migrate the
|
||
|
- * events when there is a valid target available
|
||
|
- */
|
||
|
-static int cstate_cpu_exit(unsigned int cpu)
|
||
|
-{
|
||
|
- unsigned int target;
|
||
|
-
|
||
|
- if (has_cstate_core &&
|
||
|
- cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
|
||
|
-
|
||
|
- target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
|
||
|
- /* Migrate events if there is a valid target */
|
||
|
- if (target < nr_cpu_ids) {
|
||
|
- cpumask_set_cpu(target, &cstate_core_cpu_mask);
|
||
|
- perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
|
||
|
- }
|
||
|
- }
|
||
|
-
|
||
|
- if (has_cstate_pkg &&
|
||
|
- cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
|
||
|
-
|
||
|
- target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
|
||
|
- /* Migrate events if there is a valid target */
|
||
|
- if (target < nr_cpu_ids) {
|
||
|
- cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
|
||
|
- perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
|
||
|
- }
|
||
|
- }
|
||
|
-
|
||
|
- if (has_cstate_module &&
|
||
|
- cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) {
|
||
|
-
|
||
|
- target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu);
|
||
|
- /* Migrate events if there is a valid target */
|
||
|
- if (target < nr_cpu_ids) {
|
||
|
- cpumask_set_cpu(target, &cstate_module_cpu_mask);
|
||
|
- perf_pmu_migrate_context(&cstate_module_pmu, cpu, target);
|
||
|
- }
|
||
|
- }
|
||
|
- return 0;
|
||
|
-}
|
||
|
-
|
||
|
-static int cstate_cpu_init(unsigned int cpu)
|
||
|
-{
|
||
|
- unsigned int target;
|
||
|
-
|
||
|
- /*
|
||
|
- * If this is the first online thread of that core, set it in
|
||
|
- * the core cpu mask as the designated reader.
|
||
|
- */
|
||
|
- target = cpumask_any_and(&cstate_core_cpu_mask,
|
||
|
- topology_sibling_cpumask(cpu));
|
||
|
-
|
||
|
- if (has_cstate_core && target >= nr_cpu_ids)
|
||
|
- cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
|
||
|
-
|
||
|
- /*
|
||
|
- * If this is the first online thread of that package, set it
|
||
|
- * in the package cpu mask as the designated reader.
|
||
|
- */
|
||
|
- target = cpumask_any_and(&cstate_pkg_cpu_mask,
|
||
|
- topology_die_cpumask(cpu));
|
||
|
- if (has_cstate_pkg && target >= nr_cpu_ids)
|
||
|
- cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
|
||
|
-
|
||
|
- /*
|
||
|
- * If this is the first online thread of that cluster, set it
|
||
|
- * in the cluster cpu mask as the designated reader.
|
||
|
- */
|
||
|
- target = cpumask_any_and(&cstate_module_cpu_mask,
|
||
|
- topology_cluster_cpumask(cpu));
|
||
|
- if (has_cstate_module && target >= nr_cpu_ids)
|
||
|
- cpumask_set_cpu(cpu, &cstate_module_cpu_mask);
|
||
|
-
|
||
|
- return 0;
|
||
|
-}
|
||
|
-
|
||
|
static const struct attribute_group *core_attr_update[] = {
|
||
|
&group_cstate_core_c1,
|
||
|
&group_cstate_core_c3,
|
||
|
@@ -526,6 +397,7 @@ static struct pmu cstate_core_pmu = {
|
||
|
.stop = cstate_pmu_event_stop,
|
||
|
.read = cstate_pmu_event_update,
|
||
|
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
|
||
|
+ .scope = PERF_PMU_SCOPE_CORE,
|
||
|
.module = THIS_MODULE,
|
||
|
};
|
||
|
|
||
|
@@ -541,6 +413,7 @@ static struct pmu cstate_pkg_pmu = {
|
||
|
.stop = cstate_pmu_event_stop,
|
||
|
.read = cstate_pmu_event_update,
|
||
|
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
|
||
|
+ .scope = PERF_PMU_SCOPE_PKG,
|
||
|
.module = THIS_MODULE,
|
||
|
};
|
||
|
|
||
|
@@ -556,6 +429,7 @@ static struct pmu cstate_module_pmu = {
|
||
|
.stop = cstate_pmu_event_stop,
|
||
|
.read = cstate_pmu_event_update,
|
||
|
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
|
||
|
+ .scope = PERF_PMU_SCOPE_CLUSTER,
|
||
|
.module = THIS_MODULE,
|
||
|
};
|
||
|
|
||
|
@@ -810,9 +684,6 @@ static int __init cstate_probe(const str
|
||
|
|
||
|
static inline void cstate_cleanup(void)
|
||
|
{
|
||
|
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
|
||
|
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
|
||
|
-
|
||
|
if (has_cstate_core)
|
||
|
perf_pmu_unregister(&cstate_core_pmu);
|
||
|
|
||
|
@@ -827,11 +698,6 @@ static int __init cstate_init(void)
|
||
|
{
|
||
|
int err;
|
||
|
|
||
|
- cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
|
||
|
- "perf/x86/cstate:starting", cstate_cpu_init, NULL);
|
||
|
- cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
|
||
|
- "perf/x86/cstate:online", NULL, cstate_cpu_exit);
|
||
|
-
|
||
|
if (has_cstate_core) {
|
||
|
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
|
||
|
if (err) {
|
||
|
@@ -844,6 +710,8 @@ static int __init cstate_init(void)
|
||
|
|
||
|
if (has_cstate_pkg) {
|
||
|
if (topology_max_dies_per_package() > 1) {
|
||
|
+ /* CLX-AP is multi-die and the cstate is die-scope */
|
||
|
+ cstate_pkg_pmu.scope = PERF_PMU_SCOPE_DIE;
|
||
|
err = perf_pmu_register(&cstate_pkg_pmu,
|
||
|
"cstate_die", -1);
|
||
|
} else {
|
||
|
--- a/include/linux/cpuhotplug.h
|
||
|
+++ b/include/linux/cpuhotplug.h
|
||
|
@@ -152,7 +152,6 @@ enum cpuhp_state {
|
||
|
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||
|
CPUHP_AP_PERF_X86_STARTING,
|
||
|
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
|
||
|
- CPUHP_AP_PERF_X86_CSTATE_STARTING,
|
||
|
CPUHP_AP_PERF_XTENSA_STARTING,
|
||
|
CPUHP_AP_ARM_VFP_STARTING,
|
||
|
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
|
||
|
@@ -209,7 +208,6 @@ enum cpuhp_state {
|
||
|
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
|
||
|
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
|
||
|
CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
||
|
- CPUHP_AP_PERF_X86_CSTATE_ONLINE,
|
||
|
CPUHP_AP_PERF_S390_CF_ONLINE,
|
||
|
CPUHP_AP_PERF_S390_SF_ONLINE,
|
||
|
CPUHP_AP_PERF_ARM_CCI_ONLINE,
|