Konstantin Demin
8cbaf1dea2
3rd patchs (in alphabetical order): - bbr3 - ntsync5 - openwrt - pf-kernel - xanmod - zen no configuration changes for now
72 lines
2.5 KiB
Diff
72 lines
2.5 KiB
Diff
From 8c7eb17e722a6a45c4436e5debb9336089b21d9b Mon Sep 17 00:00:00 2001
|
|
From: Kan Liang <kan.liang@linux.intel.com>
|
|
Date: Fri, 2 Aug 2024 08:16:38 -0700
|
|
Subject: perf: Add PERF_EV_CAP_READ_SCOPE
|
|
|
|
Usually, an event can be read from any CPU of the scope. It doesn't need
|
|
to be read from the advertised CPU.
|
|
|
|
Add a new event cap, PERF_EV_CAP_READ_SCOPE. An event of a PMU with
|
|
scope can be read from any active CPU in the scope.
|
|
|
|
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
|
|
---
|
|
include/linux/perf_event.h | 3 +++
|
|
kernel/events/core.c | 14 +++++++++++---
|
|
2 files changed, 14 insertions(+), 3 deletions(-)
|
|
|
|
--- a/include/linux/perf_event.h
|
|
+++ b/include/linux/perf_event.h
|
|
@@ -633,10 +633,13 @@ typedef void (*perf_overflow_handler_t)(
|
|
* PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
|
|
* cannot be a group leader. If an event with this flag is detached from the
|
|
* group it is scheduled out and moved into an unrecoverable ERROR state.
|
|
+ * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the
|
|
+ * PMU scope where it is active.
|
|
*/
|
|
#define PERF_EV_CAP_SOFTWARE BIT(0)
|
|
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
|
|
#define PERF_EV_CAP_SIBLING BIT(2)
|
|
+#define PERF_EV_CAP_READ_SCOPE BIT(3)
|
|
|
|
#define SWEVENT_HLIST_BITS 8
|
|
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -4477,16 +4477,24 @@ struct perf_read_data {
|
|
int ret;
|
|
};
|
|
|
|
+static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu);
|
|
+
|
|
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
|
|
{
|
|
+ int local_cpu = smp_processor_id();
|
|
u16 local_pkg, event_pkg;
|
|
|
|
if ((unsigned)event_cpu >= nr_cpu_ids)
|
|
return event_cpu;
|
|
|
|
- if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
|
|
- int local_cpu = smp_processor_id();
|
|
+ if (event->group_caps & PERF_EV_CAP_READ_SCOPE) {
|
|
+ const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu);
|
|
+
|
|
+ if (cpumask && cpumask_test_cpu(local_cpu, cpumask))
|
|
+ return local_cpu;
|
|
+ }
|
|
|
|
+ if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
|
|
event_pkg = topology_physical_package_id(event_cpu);
|
|
local_pkg = topology_physical_package_id(local_cpu);
|
|
|
|
@@ -11824,7 +11832,7 @@ static int perf_try_init_event(struct pm
|
|
if (cpu >= nr_cpu_ids)
|
|
ret = -ENODEV;
|
|
else
|
|
- event->cpu = cpu;
|
|
+ event->event_caps |= PERF_EV_CAP_READ_SCOPE;
|
|
} else {
|
|
ret = -ENODEV;
|
|
}
|