252 lines
7.6 KiB
Diff
252 lines
7.6 KiB
Diff
From 6f601cdcd33be8fc0da98c6bab777575af3260b8 Mon Sep 17 00:00:00 2001
|
|
From: Rik van Riel <riel@surriel.com>
|
|
Date: Wed, 5 Feb 2025 23:43:29 -0500
|
|
Subject: x86/mm: do targeted broadcast flushing from tlbbatch code
|
|
|
|
Instead of doing a system-wide TLB flush from arch_tlbbatch_flush,
|
|
queue up asynchronous, targeted flushes from arch_tlbbatch_add_pending.
|
|
|
|
This also allows us to avoid adding the CPUs of processes using broadcast
|
|
flushing to the batch->cpumask, and will hopefully further reduce TLB
|
|
flushing from the reclaim and compaction paths.
|
|
|
|
Signed-off-by: Rik van Riel <riel@surriel.com>
|
|
Tested-by: Manali Shukla <Manali.Shukla@amd.com>
|
|
---
|
|
arch/x86/include/asm/invlpgb.h | 21 +++++----
|
|
arch/x86/include/asm/tlbflush.h | 17 ++++---
|
|
arch/x86/mm/tlb.c | 80 +++++++++++++++++++++++++++++++--
|
|
3 files changed, 95 insertions(+), 23 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/invlpgb.h
|
|
+++ b/arch/x86/include/asm/invlpgb.h
|
|
@@ -31,9 +31,8 @@ static inline void __invlpgb(unsigned lo
|
|
}
|
|
|
|
/* Wait for INVLPGB originated by this CPU to complete. */
|
|
-static inline void tlbsync(void)
|
|
+static inline void __tlbsync(void)
|
|
{
|
|
- cant_migrate();
|
|
/* TLBSYNC: supported in binutils >= 0.36. */
|
|
asm volatile(".byte 0x0f, 0x01, 0xff" ::: "memory");
|
|
}
|
|
@@ -61,19 +60,19 @@ static inline void invlpgb_flush_user(un
|
|
unsigned long addr)
|
|
{
|
|
__invlpgb(0, pcid, addr, 0, 0, INVLPGB_PCID | INVLPGB_VA);
|
|
- tlbsync();
|
|
+ __tlbsync();
|
|
}
|
|
|
|
-static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid,
|
|
- unsigned long addr,
|
|
- u16 nr,
|
|
- bool pmd_stride)
|
|
+static inline void __invlpgb_flush_user_nr_nosync(unsigned long pcid,
|
|
+ unsigned long addr,
|
|
+ u16 nr,
|
|
+ bool pmd_stride)
|
|
{
|
|
__invlpgb(0, pcid, addr, nr - 1, pmd_stride, INVLPGB_PCID | INVLPGB_VA);
|
|
}
|
|
|
|
/* Flush all mappings for a given PCID, not including globals. */
|
|
-static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid)
|
|
+static inline void __invlpgb_flush_single_pcid_nosync(unsigned long pcid)
|
|
{
|
|
__invlpgb(0, pcid, 0, 0, 0, INVLPGB_PCID);
|
|
}
|
|
@@ -82,11 +81,11 @@ static inline void invlpgb_flush_single_
|
|
static inline void invlpgb_flush_all(void)
|
|
{
|
|
__invlpgb(0, 0, 0, 0, 0, INVLPGB_INCLUDE_GLOBAL);
|
|
- tlbsync();
|
|
+ __tlbsync();
|
|
}
|
|
|
|
/* Flush addr, including globals, for all PCIDs. */
|
|
-static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
|
|
+static inline void __invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
|
|
{
|
|
__invlpgb(0, 0, addr, nr - 1, 0, INVLPGB_INCLUDE_GLOBAL);
|
|
}
|
|
@@ -95,7 +94,7 @@ static inline void invlpgb_flush_addr_no
|
|
static inline void invlpgb_flush_all_nonglobals(void)
|
|
{
|
|
__invlpgb(0, 0, 0, 0, 0, 0);
|
|
- tlbsync();
|
|
+ __tlbsync();
|
|
}
|
|
|
|
#endif /* _ASM_X86_INVLPGB */
|
|
--- a/arch/x86/include/asm/tlbflush.h
|
|
+++ b/arch/x86/include/asm/tlbflush.h
|
|
@@ -106,6 +106,7 @@ struct tlb_state {
|
|
* need to be invalidated.
|
|
*/
|
|
bool invalidate_other;
|
|
+ bool need_tlbsync;
|
|
|
|
#ifdef CONFIG_ADDRESS_MASKING
|
|
/*
|
|
@@ -310,6 +311,10 @@ static inline void broadcast_tlb_flush(s
|
|
static inline void consider_global_asid(struct mm_struct *mm)
|
|
{
|
|
}
|
|
+
|
|
+static inline void tlbsync(void)
|
|
+{
|
|
+}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
@@ -359,21 +364,15 @@ static inline u64 inc_mm_tlb_gen(struct
|
|
return atomic64_inc_return(&mm->context.tlb_gen);
|
|
}
|
|
|
|
-static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
|
- struct mm_struct *mm,
|
|
- unsigned long uaddr)
|
|
-{
|
|
- inc_mm_tlb_gen(mm);
|
|
- cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
|
|
- mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
|
|
-}
|
|
-
|
|
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
|
|
{
|
|
flush_tlb_mm(mm);
|
|
}
|
|
|
|
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
|
+extern void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
|
+ struct mm_struct *mm,
|
|
+ unsigned long uaddr);
|
|
|
|
static inline bool pte_flags_need_flush(unsigned long oldflags,
|
|
unsigned long newflags,
|
|
--- a/arch/x86/mm/tlb.c
|
|
+++ b/arch/x86/mm/tlb.c
|
|
@@ -488,6 +488,37 @@ static void finish_asid_transition(struc
|
|
WRITE_ONCE(mm->context.asid_transition, false);
|
|
}
|
|
|
|
+static inline void tlbsync(void)
|
|
+{
|
|
+ if (!this_cpu_read(cpu_tlbstate.need_tlbsync))
|
|
+ return;
|
|
+ __tlbsync();
|
|
+ this_cpu_write(cpu_tlbstate.need_tlbsync, false);
|
|
+}
|
|
+
|
|
+static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid,
|
|
+ unsigned long addr,
|
|
+ u16 nr, bool pmd_stride)
|
|
+{
|
|
+ __invlpgb_flush_user_nr_nosync(pcid, addr, nr, pmd_stride);
|
|
+ if (!this_cpu_read(cpu_tlbstate.need_tlbsync))
|
|
+ this_cpu_write(cpu_tlbstate.need_tlbsync, true);
|
|
+}
|
|
+
|
|
+static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid)
|
|
+{
|
|
+ __invlpgb_flush_single_pcid_nosync(pcid);
|
|
+ if (!this_cpu_read(cpu_tlbstate.need_tlbsync))
|
|
+ this_cpu_write(cpu_tlbstate.need_tlbsync, true);
|
|
+}
|
|
+
|
|
+static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
|
|
+{
|
|
+ __invlpgb_flush_addr_nosync(addr, nr);
|
|
+ if (!this_cpu_read(cpu_tlbstate.need_tlbsync))
|
|
+ this_cpu_write(cpu_tlbstate.need_tlbsync, true);
|
|
+}
|
|
+
|
|
static void broadcast_tlb_flush(struct flush_tlb_info *info)
|
|
{
|
|
bool pmd = info->stride_shift == PMD_SHIFT;
|
|
@@ -794,6 +825,8 @@ void switch_mm_irqs_off(struct mm_struct
|
|
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
+ tlbsync();
|
|
+
|
|
/*
|
|
* Verify that CR3 is what we think it is. This will catch
|
|
* hypothetical buggy code that directly switches to swapper_pg_dir
|
|
@@ -976,6 +1009,8 @@ reload_tlb:
|
|
*/
|
|
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
{
|
|
+ tlbsync();
|
|
+
|
|
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
|
|
return;
|
|
|
|
@@ -1650,9 +1685,7 @@ void arch_tlbbatch_flush(struct arch_tlb
|
|
* a local TLB flush is needed. Optimize this use-case by calling
|
|
* flush_tlb_func_local() directly in this case.
|
|
*/
|
|
- if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
|
|
- invlpgb_flush_all_nonglobals();
|
|
- } else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
|
|
+ if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
|
|
flush_tlb_multi(&batch->cpumask, info);
|
|
} else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
|
|
lockdep_assert_irqs_enabled();
|
|
@@ -1661,12 +1694,53 @@ void arch_tlbbatch_flush(struct arch_tlb
|
|
local_irq_enable();
|
|
}
|
|
|
|
+ /*
|
|
+ * If we issued (asynchronous) INVLPGB flushes, wait for them here.
|
|
+ * The cpumask above contains only CPUs that were running tasks
|
|
+ * not using broadcast TLB flushing.
|
|
+ */
|
|
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
|
|
+ tlbsync();
|
|
+
|
|
cpumask_clear(&batch->cpumask);
|
|
|
|
put_flush_tlb_info();
|
|
put_cpu();
|
|
}
|
|
|
|
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
|
+ struct mm_struct *mm,
|
|
+ unsigned long uaddr)
|
|
+{
|
|
+ u16 asid = mm_global_asid(mm);
|
|
+
|
|
+ if (asid) {
|
|
+ invlpgb_flush_user_nr_nosync(kern_pcid(asid), uaddr, 1, false);
|
|
+ /* Do any CPUs supporting INVLPGB need PTI? */
|
|
+ if (static_cpu_has(X86_FEATURE_PTI))
|
|
+ invlpgb_flush_user_nr_nosync(user_pcid(asid), uaddr, 1, false);
|
|
+
|
|
+ /*
|
|
+ * Some CPUs might still be using a local ASID for this
|
|
+ * process, and require IPIs, while others are using the
|
|
+ * global ASID.
|
|
+ *
|
|
+ * In this corner case we need to do both the broadcast
|
|
+ * TLB invalidation, and send IPIs. The IPIs will help
|
|
+ * stragglers transition to the broadcast ASID.
|
|
+ */
|
|
+ if (in_asid_transition(mm))
|
|
+ asid = 0;
|
|
+ }
|
|
+
|
|
+ if (!asid) {
|
|
+ inc_mm_tlb_gen(mm);
|
|
+ cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
|
|
+ }
|
|
+
|
|
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
|
|
+}
|
|
+
|
|
/*
|
|
* Blindly accessing user memory from NMI context can be dangerous
|
|
* if we're in the middle of switching the current user task or
|