2024-12-10 06:44:25 +03:00
|
|
|
From 30e2cd787aeb9cb9c1148e07446aac76765f715e Mon Sep 17 00:00:00 2001
|
2024-10-29 05:12:06 +03:00
|
|
|
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
|
2024-12-10 06:44:25 +03:00
|
|
|
Date: Fri, 15 Nov 2024 06:08:00 +0000
|
2024-10-29 05:12:06 +03:00
|
|
|
Subject: perf/x86/rapl: Rename rapl_pmu variables
|
|
|
|
|
|
|
|
Rename struct rapl_pmu variables from "pmu" to "rapl_pmu", to
|
|
|
|
avoid any confusion between the variables of two different
|
|
|
|
structs pmu and rapl_pmu. As rapl_pmu also contains a pointer to
|
|
|
|
struct pmu, which leads to situations in code like pmu->pmu,
|
|
|
|
which is needlessly confusing. Above scenario is replaced with
|
|
|
|
much more readable rapl_pmu->pmu with this change.
|
|
|
|
|
|
|
|
Also rename "pmus" member in rapl_pmus struct, for same reason.
|
|
|
|
|
|
|
|
No functional change.
|
|
|
|
|
|
|
|
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
|
2024-12-10 06:44:25 +03:00
|
|
|
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
|
|
|
|
Reviewed-by: Zhang Rui <rui.zhang@intel.com>
|
|
|
|
Tested-by: Zhang Rui <rui.zhang@intel.com>
|
2024-10-29 05:12:06 +03:00
|
|
|
---
|
2024-12-10 06:44:25 +03:00
|
|
|
arch/x86/events/rapl.c | 91 +++++++++++++++++++++---------------------
|
|
|
|
1 file changed, 46 insertions(+), 45 deletions(-)
|
2024-10-29 05:12:06 +03:00
|
|
|
|
|
|
|
--- a/arch/x86/events/rapl.c
|
|
|
|
+++ b/arch/x86/events/rapl.c
|
2024-12-10 06:44:25 +03:00
|
|
|
@@ -129,7 +129,7 @@ struct rapl_pmu {
|
2024-10-29 05:12:06 +03:00
|
|
|
struct rapl_pmus {
|
|
|
|
struct pmu pmu;
|
|
|
|
unsigned int nr_rapl_pmu;
|
|
|
|
- struct rapl_pmu *pmus[] __counted_by(nr_rapl_pmu);
|
|
|
|
+ struct rapl_pmu *rapl_pmu[] __counted_by(nr_rapl_pmu);
|
|
|
|
};
|
|
|
|
|
|
|
|
enum rapl_unit_quirk {
|
2024-12-10 06:44:25 +03:00
|
|
|
@@ -227,34 +227,34 @@ static void rapl_start_hrtimer(struct ra
|
2024-10-29 05:12:06 +03:00
|
|
|
|
|
|
|
static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
|
|
|
|
{
|
|
|
|
- struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
|
|
|
|
+ struct rapl_pmu *rapl_pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
|
|
|
|
struct perf_event *event;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
- if (!pmu->n_active)
|
|
|
|
+ if (!rapl_pmu->n_active)
|
|
|
|
return HRTIMER_NORESTART;
|
|
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu->lock, flags);
|
|
|
|
+ raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
|
|
|
|
|
|
|
|
- list_for_each_entry(event, &pmu->active_list, active_entry)
|
|
|
|
+ list_for_each_entry(event, &rapl_pmu->active_list, active_entry)
|
|
|
|
rapl_event_update(event);
|
|
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
|
|
|
+ raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
|
|
|
|
|
|
|
|
- hrtimer_forward_now(hrtimer, pmu->timer_interval);
|
|
|
|
+ hrtimer_forward_now(hrtimer, rapl_pmu->timer_interval);
|
|
|
|
|
|
|
|
return HRTIMER_RESTART;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void rapl_hrtimer_init(struct rapl_pmu *pmu)
|
|
|
|
+static void rapl_hrtimer_init(struct rapl_pmu *rapl_pmu)
|
|
|
|
{
|
|
|
|
- struct hrtimer *hr = &pmu->hrtimer;
|
|
|
|
+ struct hrtimer *hr = &rapl_pmu->hrtimer;
|
|
|
|
|
|
|
|
hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
|
|
hr->function = rapl_hrtimer_handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
|
|
|
|
+static void __rapl_pmu_event_start(struct rapl_pmu *rapl_pmu,
|
|
|
|
struct perf_event *event)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
2024-12-10 06:44:25 +03:00
|
|
|
@@ -262,39 +262,39 @@ static void __rapl_pmu_event_start(struc
|
2024-10-29 05:12:06 +03:00
|
|
|
|
|
|
|
event->hw.state = 0;
|
|
|
|
|
|
|
|
- list_add_tail(&event->active_entry, &pmu->active_list);
|
|
|
|
+ list_add_tail(&event->active_entry, &rapl_pmu->active_list);
|
|
|
|
|
|
|
|
local64_set(&event->hw.prev_count, rapl_read_counter(event));
|
|
|
|
|
|
|
|
- pmu->n_active++;
|
|
|
|
- if (pmu->n_active == 1)
|
|
|
|
- rapl_start_hrtimer(pmu);
|
|
|
|
+ rapl_pmu->n_active++;
|
|
|
|
+ if (rapl_pmu->n_active == 1)
|
|
|
|
+ rapl_start_hrtimer(rapl_pmu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rapl_pmu_event_start(struct perf_event *event, int mode)
|
|
|
|
{
|
|
|
|
- struct rapl_pmu *pmu = event->pmu_private;
|
|
|
|
+ struct rapl_pmu *rapl_pmu = event->pmu_private;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu->lock, flags);
|
|
|
|
- __rapl_pmu_event_start(pmu, event);
|
|
|
|
- raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
|
|
|
+ raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
|
|
|
|
+ __rapl_pmu_event_start(rapl_pmu, event);
|
|
|
|
+ raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rapl_pmu_event_stop(struct perf_event *event, int mode)
|
|
|
|
{
|
|
|
|
- struct rapl_pmu *pmu = event->pmu_private;
|
|
|
|
+ struct rapl_pmu *rapl_pmu = event->pmu_private;
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu->lock, flags);
|
|
|
|
+ raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
|
|
|
|
|
|
|
|
/* mark event as deactivated and stopped */
|
|
|
|
if (!(hwc->state & PERF_HES_STOPPED)) {
|
|
|
|
- WARN_ON_ONCE(pmu->n_active <= 0);
|
|
|
|
- pmu->n_active--;
|
|
|
|
- if (pmu->n_active == 0)
|
|
|
|
- hrtimer_cancel(&pmu->hrtimer);
|
|
|
|
+ WARN_ON_ONCE(rapl_pmu->n_active <= 0);
|
|
|
|
+ rapl_pmu->n_active--;
|
|
|
|
+ if (rapl_pmu->n_active == 0)
|
|
|
|
+ hrtimer_cancel(&rapl_pmu->hrtimer);
|
|
|
|
|
|
|
|
list_del(&event->active_entry);
|
|
|
|
|
2024-12-10 06:44:25 +03:00
|
|
|
@@ -312,23 +312,23 @@ static void rapl_pmu_event_stop(struct p
|
2024-10-29 05:12:06 +03:00
|
|
|
hwc->state |= PERF_HES_UPTODATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
|
|
|
+ raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rapl_pmu_event_add(struct perf_event *event, int mode)
|
|
|
|
{
|
|
|
|
- struct rapl_pmu *pmu = event->pmu_private;
|
|
|
|
+ struct rapl_pmu *rapl_pmu = event->pmu_private;
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu->lock, flags);
|
|
|
|
+ raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
|
|
|
|
|
|
|
|
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
|
|
|
|
|
|
|
if (mode & PERF_EF_START)
|
|
|
|
- __rapl_pmu_event_start(pmu, event);
|
|
|
|
+ __rapl_pmu_event_start(rapl_pmu, event);
|
|
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
|
|
|
+ raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2024-12-10 06:44:25 +03:00
|
|
|
@@ -342,7 +342,7 @@ static int rapl_pmu_event_init(struct pe
|
2024-10-29 05:12:06 +03:00
|
|
|
{
|
|
|
|
u64 cfg = event->attr.config & RAPL_EVENT_MASK;
|
2024-12-10 06:44:25 +03:00
|
|
|
int bit, ret = 0;
|
2024-10-29 05:12:06 +03:00
|
|
|
- struct rapl_pmu *pmu;
|
|
|
|
+ struct rapl_pmu *rapl_pmu;
|
2024-12-10 06:44:25 +03:00
|
|
|
unsigned int rapl_pmu_idx;
|
2024-10-29 05:12:06 +03:00
|
|
|
|
|
|
|
/* only look at RAPL events */
|
2024-12-10 06:44:25 +03:00
|
|
|
@@ -375,10 +375,11 @@ static int rapl_pmu_event_init(struct pe
|
2024-10-29 05:12:06 +03:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* must be done before validate_group */
|
|
|
|
- pmu = rapl_pmus->pmus[rapl_pmu_idx];
|
|
|
|
- if (!pmu)
|
|
|
|
+ rapl_pmu = rapl_pmus->rapl_pmu[rapl_pmu_idx];
|
|
|
|
+ if (!rapl_pmu)
|
|
|
|
return -EINVAL;
|
|
|
|
- event->pmu_private = pmu;
|
|
|
|
+
|
|
|
|
+ event->pmu_private = rapl_pmu;
|
|
|
|
event->hw.event_base = rapl_msrs[bit].msr;
|
|
|
|
event->hw.config = cfg;
|
|
|
|
event->hw.idx = bit;
|
2024-12-10 06:44:25 +03:00
|
|
|
@@ -605,7 +606,7 @@ static void cleanup_rapl_pmus(void)
|
2024-10-29 05:12:06 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++)
|
|
|
|
- kfree(rapl_pmus->pmus[i]);
|
|
|
|
+ kfree(rapl_pmus->rapl_pmu[i]);
|
|
|
|
kfree(rapl_pmus);
|
|
|
|
}
|
|
|
|
|
2024-12-10 06:44:25 +03:00
|
|
|
@@ -620,27 +621,27 @@ static const struct attribute_group *rap
|
2024-10-29 05:12:06 +03:00
|
|
|
|
2024-12-10 06:44:25 +03:00
|
|
|
static int __init init_rapl_pmu(void)
|
2024-10-29 05:12:06 +03:00
|
|
|
{
|
|
|
|
- struct rapl_pmu *pmu;
|
|
|
|
+ struct rapl_pmu *rapl_pmu;
|
2024-12-10 06:44:25 +03:00
|
|
|
int idx;
|
|
|
|
|
|
|
|
for (idx = 0; idx < rapl_pmus->nr_rapl_pmu; idx++) {
|
|
|
|
- pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
|
2024-10-29 05:12:06 +03:00
|
|
|
- if (!pmu)
|
2024-12-10 06:44:25 +03:00
|
|
|
+ rapl_pmu = kzalloc(sizeof(*rapl_pmu), GFP_KERNEL);
|
2024-10-29 05:12:06 +03:00
|
|
|
+ if (!rapl_pmu)
|
2024-12-10 06:44:25 +03:00
|
|
|
goto free;
|
|
|
|
|
2024-10-29 05:12:06 +03:00
|
|
|
- raw_spin_lock_init(&pmu->lock);
|
|
|
|
- INIT_LIST_HEAD(&pmu->active_list);
|
|
|
|
- pmu->pmu = &rapl_pmus->pmu;
|
|
|
|
- pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
|
|
|
- rapl_hrtimer_init(pmu);
|
|
|
|
+ raw_spin_lock_init(&rapl_pmu->lock);
|
|
|
|
+ INIT_LIST_HEAD(&rapl_pmu->active_list);
|
|
|
|
+ rapl_pmu->pmu = &rapl_pmus->pmu;
|
|
|
|
+ rapl_pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
|
|
|
+ rapl_hrtimer_init(rapl_pmu);
|
|
|
|
|
2024-12-10 06:44:25 +03:00
|
|
|
- rapl_pmus->pmus[idx] = pmu;
|
|
|
|
+ rapl_pmus->rapl_pmu[idx] = rapl_pmu;
|
2024-10-29 05:12:06 +03:00
|
|
|
}
|
|
|
|
|
2024-12-10 06:44:25 +03:00
|
|
|
return 0;
|
|
|
|
free:
|
|
|
|
for (; idx > 0; idx--)
|
|
|
|
- kfree(rapl_pmus->pmus[idx - 1]);
|
|
|
|
+ kfree(rapl_pmus->rapl_pmu[idx - 1]);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -654,7 +655,7 @@ static int __init init_rapl_pmus(void)
|
|
|
|
rapl_pmu_scope = PERF_PMU_SCOPE_DIE;
|
2024-10-29 05:12:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
- rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
|
|
|
|
+ rapl_pmus = kzalloc(struct_size(rapl_pmus, rapl_pmu, nr_rapl_pmu), GFP_KERNEL);
|
|
|
|
if (!rapl_pmus)
|
|
|
|
return -ENOMEM;
|
|
|
|
|