85 lines
2.3 KiB
Diff
85 lines
2.3 KiB
Diff
|
From fb0a3b5932882f02ed42fcaa6db73aba3eafd6d7 Mon Sep 17 00:00:00 2001
|
||
|
From: Kan Liang <kan.liang@linux.intel.com>
|
||
|
Date: Fri, 2 Aug 2024 08:16:42 -0700
|
||
|
Subject: perf/x86/rapl: Move the pmu allocation out of CPU hotplug
|
||
|
|
||
|
The rapl pmu just needs to be allocated once. It doesn't matter to be
|
||
|
allocated at each CPU hotplug, or the global init_rapl_pmus().
|
||
|
|
||
|
Move the pmu allocation to the init_rapl_pmus(). So the generic hotplug
|
||
|
supports can be applied.
|
||
|
|
||
|
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
|
||
|
Cc: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
|
||
|
---
|
||
|
arch/x86/events/rapl.c | 44 +++++++++++++++++++++++++++++-------------
|
||
|
1 file changed, 31 insertions(+), 13 deletions(-)
|
||
|
|
||
|
--- a/arch/x86/events/rapl.c
|
||
|
+++ b/arch/x86/events/rapl.c
|
||
|
@@ -568,19 +568,8 @@ static int rapl_cpu_online(unsigned int
|
||
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||
|
int target;
|
||
|
|
||
|
- if (!pmu) {
|
||
|
- pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||
|
- if (!pmu)
|
||
|
- return -ENOMEM;
|
||
|
-
|
||
|
- raw_spin_lock_init(&pmu->lock);
|
||
|
- INIT_LIST_HEAD(&pmu->active_list);
|
||
|
- pmu->pmu = &rapl_pmus->pmu;
|
||
|
- pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
||
|
- rapl_hrtimer_init(pmu);
|
||
|
-
|
||
|
- rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
|
||
|
- }
|
||
|
+ if (!pmu)
|
||
|
+ return -ENOMEM;
|
||
|
|
||
|
/*
|
||
|
* Check if there is an online cpu in the package which collects rapl
|
||
|
@@ -673,6 +662,32 @@ static const struct attribute_group *rap
|
||
|
NULL,
|
||
|
};
|
||
|
|
||
|
+static void __init init_rapl_pmu(void)
|
||
|
+{
|
||
|
+ struct rapl_pmu *pmu;
|
||
|
+ int cpu;
|
||
|
+
|
||
|
+ cpus_read_lock();
|
||
|
+
|
||
|
+ for_each_cpu(cpu, cpu_online_mask) {
|
||
|
+ pmu = cpu_to_rapl_pmu(cpu);
|
||
|
+ if (pmu)
|
||
|
+ continue;
|
||
|
+ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||
|
+ if (!pmu)
|
||
|
+ continue;
|
||
|
+ raw_spin_lock_init(&pmu->lock);
|
||
|
+ INIT_LIST_HEAD(&pmu->active_list);
|
||
|
+ pmu->pmu = &rapl_pmus->pmu;
|
||
|
+ pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
||
|
+ rapl_hrtimer_init(pmu);
|
||
|
+
|
||
|
+ rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
|
||
|
+ }
|
||
|
+
|
||
|
+ cpus_read_unlock();
|
||
|
+}
|
||
|
+
|
||
|
static int __init init_rapl_pmus(void)
|
||
|
{
|
||
|
int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package();
|
||
|
@@ -693,6 +708,9 @@ static int __init init_rapl_pmus(void)
|
||
|
rapl_pmus->pmu.read = rapl_pmu_event_read;
|
||
|
rapl_pmus->pmu.module = THIS_MODULE;
|
||
|
rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
|
||
|
+
|
||
|
+ init_rapl_pmu();
|
||
|
+
|
||
|
return 0;
|
||
|
}
|
||
|
|