Konstantin Demin
8cbaf1dea2
3rd patchs (in alphabetical order): - bbr3 - ntsync5 - openwrt - pf-kernel - xanmod - zen no configuration changes for now
189 lines
5.1 KiB
Diff
189 lines
5.1 KiB
Diff
From f91da33af8295b4b3d73a2083225f69e1d5ff301 Mon Sep 17 00:00:00 2001
|
|
From: Kan Liang <kan.liang@linux.intel.com>
|
|
Date: Fri, 2 Aug 2024 08:16:40 -0700
|
|
Subject: iommu/vt-d: Clean up cpumask and hotplug for perfmon
|
|
|
|
The iommu PMU is system-wide scope, which is supported by the generic
|
|
perf_event subsystem now.
|
|
|
|
Set the scope for the iommu PMU and remove all the cpumask and hotplug
|
|
codes.
|
|
|
|
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
|
|
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
|
|
Cc: David Woodhouse <dwmw2@infradead.org>
|
|
Cc: Joerg Roedel <joro@8bytes.org>
|
|
Cc: Will Deacon <will@kernel.org>
|
|
Cc: iommu@lists.linux.dev
|
|
---
|
|
drivers/iommu/intel/iommu.h | 2 -
|
|
drivers/iommu/intel/perfmon.c | 111 +---------------------------------
|
|
2 files changed, 2 insertions(+), 111 deletions(-)
|
|
|
|
--- a/drivers/iommu/intel/iommu.h
|
|
+++ b/drivers/iommu/intel/iommu.h
|
|
@@ -687,8 +687,6 @@ struct iommu_pmu {
|
|
DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
|
|
struct perf_event *event_list[IOMMU_PMU_IDX_MAX];
|
|
unsigned char irq_name[16];
|
|
- struct hlist_node cpuhp_node;
|
|
- int cpu;
|
|
};
|
|
|
|
#define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED)
|
|
--- a/drivers/iommu/intel/perfmon.c
|
|
+++ b/drivers/iommu/intel/perfmon.c
|
|
@@ -34,28 +34,9 @@ static struct attribute_group iommu_pmu_
|
|
.attrs = attrs_empty,
|
|
};
|
|
|
|
-static cpumask_t iommu_pmu_cpu_mask;
|
|
-
|
|
-static ssize_t
|
|
-cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
|
|
-{
|
|
- return cpumap_print_to_pagebuf(true, buf, &iommu_pmu_cpu_mask);
|
|
-}
|
|
-static DEVICE_ATTR_RO(cpumask);
|
|
-
|
|
-static struct attribute *iommu_pmu_cpumask_attrs[] = {
|
|
- &dev_attr_cpumask.attr,
|
|
- NULL
|
|
-};
|
|
-
|
|
-static struct attribute_group iommu_pmu_cpumask_attr_group = {
|
|
- .attrs = iommu_pmu_cpumask_attrs,
|
|
-};
|
|
-
|
|
static const struct attribute_group *iommu_pmu_attr_groups[] = {
|
|
&iommu_pmu_format_attr_group,
|
|
&iommu_pmu_events_attr_group,
|
|
- &iommu_pmu_cpumask_attr_group,
|
|
NULL
|
|
};
|
|
|
|
@@ -565,6 +546,7 @@ static int __iommu_pmu_register(struct i
|
|
iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups;
|
|
iommu_pmu->pmu.attr_update = iommu_pmu_attr_update;
|
|
iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
|
|
+ iommu_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE;
|
|
iommu_pmu->pmu.module = THIS_MODULE;
|
|
|
|
return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1);
|
|
@@ -773,89 +755,6 @@ static void iommu_pmu_unset_interrupt(st
|
|
iommu->perf_irq = 0;
|
|
}
|
|
|
|
-static int iommu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
|
|
-{
|
|
- struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
|
|
-
|
|
- if (cpumask_empty(&iommu_pmu_cpu_mask))
|
|
- cpumask_set_cpu(cpu, &iommu_pmu_cpu_mask);
|
|
-
|
|
- if (cpumask_test_cpu(cpu, &iommu_pmu_cpu_mask))
|
|
- iommu_pmu->cpu = cpu;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int iommu_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
|
|
-{
|
|
- struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
|
|
- int target = cpumask_first(&iommu_pmu_cpu_mask);
|
|
-
|
|
- /*
|
|
- * The iommu_pmu_cpu_mask has been updated when offline the CPU
|
|
- * for the first iommu_pmu. Migrate the other iommu_pmu to the
|
|
- * new target.
|
|
- */
|
|
- if (target < nr_cpu_ids && target != iommu_pmu->cpu) {
|
|
- perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
|
|
- iommu_pmu->cpu = target;
|
|
- return 0;
|
|
- }
|
|
-
|
|
- if (!cpumask_test_and_clear_cpu(cpu, &iommu_pmu_cpu_mask))
|
|
- return 0;
|
|
-
|
|
- target = cpumask_any_but(cpu_online_mask, cpu);
|
|
-
|
|
- if (target < nr_cpu_ids)
|
|
- cpumask_set_cpu(target, &iommu_pmu_cpu_mask);
|
|
- else
|
|
- return 0;
|
|
-
|
|
- perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
|
|
- iommu_pmu->cpu = target;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int nr_iommu_pmu;
|
|
-static enum cpuhp_state iommu_cpuhp_slot;
|
|
-
|
|
-static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- if (!nr_iommu_pmu) {
|
|
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
|
|
- "driver/iommu/intel/perfmon:online",
|
|
- iommu_pmu_cpu_online,
|
|
- iommu_pmu_cpu_offline);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
- iommu_cpuhp_slot = ret;
|
|
- }
|
|
-
|
|
- ret = cpuhp_state_add_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
|
|
- if (ret) {
|
|
- if (!nr_iommu_pmu)
|
|
- cpuhp_remove_multi_state(iommu_cpuhp_slot);
|
|
- return ret;
|
|
- }
|
|
- nr_iommu_pmu++;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu)
|
|
-{
|
|
- cpuhp_state_remove_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
|
|
-
|
|
- if (--nr_iommu_pmu)
|
|
- return;
|
|
-
|
|
- cpuhp_remove_multi_state(iommu_cpuhp_slot);
|
|
-}
|
|
-
|
|
void iommu_pmu_register(struct intel_iommu *iommu)
|
|
{
|
|
struct iommu_pmu *iommu_pmu = iommu->pmu;
|
|
@@ -866,17 +765,12 @@ void iommu_pmu_register(struct intel_iom
|
|
if (__iommu_pmu_register(iommu))
|
|
goto err;
|
|
|
|
- if (iommu_pmu_cpuhp_setup(iommu_pmu))
|
|
- goto unregister;
|
|
-
|
|
/* Set interrupt for overflow */
|
|
if (iommu_pmu_set_interrupt(iommu))
|
|
- goto cpuhp_free;
|
|
+ goto unregister;
|
|
|
|
return;
|
|
|
|
-cpuhp_free:
|
|
- iommu_pmu_cpuhp_free(iommu_pmu);
|
|
unregister:
|
|
perf_pmu_unregister(&iommu_pmu->pmu);
|
|
err:
|
|
@@ -892,6 +786,5 @@ void iommu_pmu_unregister(struct intel_i
|
|
return;
|
|
|
|
iommu_pmu_unset_interrupt(iommu);
|
|
- iommu_pmu_cpuhp_free(iommu_pmu);
|
|
perf_pmu_unregister(&iommu_pmu->pmu);
|
|
}
|