136 lines
4.3 KiB
Diff
136 lines
4.3 KiB
Diff
|
From 647727eaa06fc61fbc55de4c09ab0c0fe7bc7263 Mon Sep 17 00:00:00 2001
|
||
|
From: Rik van Riel <riel@surriel.com>
|
||
|
Date: Wed, 22 Jan 2025 23:23:29 -0500
|
||
|
Subject: x86/mm: do targeted broadcast flushing from tlbbatch code
|
||
|
|
||
|
Instead of doing a system-wide TLB flush from arch_tlbbatch_flush,
|
||
|
queue up asynchronous, targeted flushes from arch_tlbbatch_add_pending.
|
||
|
|
||
|
This also allows us to avoid adding the CPUs of processes using broadcast
|
||
|
flushing to the batch->cpumask, and will hopefully further reduce TLB
|
||
|
flushing from the reclaim and compaction paths.
|
||
|
|
||
|
Signed-off-by: Rik van Riel <riel@surriel.com>
|
||
|
---
|
||
|
arch/x86/include/asm/tlbbatch.h | 1 +
|
||
|
arch/x86/include/asm/tlbflush.h | 12 ++-----
|
||
|
arch/x86/mm/tlb.c | 57 +++++++++++++++++++++++++++++++--
|
||
|
3 files changed, 58 insertions(+), 12 deletions(-)
|
||
|
|
||
|
--- a/arch/x86/include/asm/tlbbatch.h
|
||
|
+++ b/arch/x86/include/asm/tlbbatch.h
|
||
|
@@ -10,6 +10,7 @@ struct arch_tlbflush_unmap_batch {
|
||
|
* the PFNs being flushed..
|
||
|
*/
|
||
|
struct cpumask cpumask;
|
||
|
+ bool used_invlpgb;
|
||
|
};
|
||
|
|
||
|
#endif /* _ARCH_X86_TLBBATCH_H */
|
||
|
--- a/arch/x86/include/asm/tlbflush.h
|
||
|
+++ b/arch/x86/include/asm/tlbflush.h
|
||
|
@@ -358,21 +358,15 @@ static inline u64 inc_mm_tlb_gen(struct
|
||
|
return atomic64_inc_return(&mm->context.tlb_gen);
|
||
|
}
|
||
|
|
||
|
-static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
||
|
- struct mm_struct *mm,
|
||
|
- unsigned long uaddr)
|
||
|
-{
|
||
|
- inc_mm_tlb_gen(mm);
|
||
|
- cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
|
||
|
- mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
|
||
|
-}
|
||
|
-
|
||
|
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
|
||
|
{
|
||
|
flush_tlb_mm(mm);
|
||
|
}
|
||
|
|
||
|
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
||
|
+extern void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
||
|
+ struct mm_struct *mm,
|
||
|
+ unsigned long uaddr);
|
||
|
|
||
|
static inline bool pte_flags_need_flush(unsigned long oldflags,
|
||
|
unsigned long newflags,
|
||
|
--- a/arch/x86/mm/tlb.c
|
||
|
+++ b/arch/x86/mm/tlb.c
|
||
|
@@ -1612,9 +1612,7 @@ void arch_tlbbatch_flush(struct arch_tlb
|
||
|
* a local TLB flush is needed. Optimize this use-case by calling
|
||
|
* flush_tlb_func_local() directly in this case.
|
||
|
*/
|
||
|
- if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
|
||
|
- invlpgb_flush_all_nonglobals();
|
||
|
- } else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
|
||
|
+ if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
|
||
|
flush_tlb_multi(&batch->cpumask, info);
|
||
|
} else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
|
||
|
lockdep_assert_irqs_enabled();
|
||
|
@@ -1623,12 +1621,65 @@ void arch_tlbbatch_flush(struct arch_tlb
|
||
|
local_irq_enable();
|
||
|
}
|
||
|
|
||
|
+ /*
|
||
|
+ * If we issued (asynchronous) INVLPGB flushes, wait for them here.
|
||
|
+ * The cpumask above contains only CPUs that were running tasks
|
||
|
+ * not using broadcast TLB flushing.
|
||
|
+ */
|
||
|
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB) && batch->used_invlpgb) {
|
||
|
+ tlbsync();
|
||
|
+ migrate_enable();
|
||
|
+ batch->used_invlpgb = false;
|
||
|
+ }
|
||
|
+
|
||
|
cpumask_clear(&batch->cpumask);
|
||
|
|
||
|
put_flush_tlb_info();
|
||
|
put_cpu();
|
||
|
}
|
||
|
|
||
|
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
||
|
+ struct mm_struct *mm,
|
||
|
+ unsigned long uaddr)
|
||
|
+{
|
||
|
+ u16 asid = mm_global_asid(mm);
|
||
|
+
|
||
|
+ if (asid) {
|
||
|
+ /*
|
||
|
+ * Queue up an asynchronous invalidation. The corresponding
|
||
|
+ * TLBSYNC is done in arch_tlbbatch_flush(), and must be done
|
||
|
+ * on the same CPU.
|
||
|
+ */
|
||
|
+ if (!batch->used_invlpgb) {
|
||
|
+ batch->used_invlpgb = true;
|
||
|
+ migrate_disable();
|
||
|
+ }
|
||
|
+ invlpgb_flush_user_nr_nosync(kern_pcid(asid), uaddr, 1, false);
|
||
|
+ /* Do any CPUs supporting INVLPGB need PTI? */
|
||
|
+ if (static_cpu_has(X86_FEATURE_PTI))
|
||
|
+ invlpgb_flush_user_nr_nosync(user_pcid(asid), uaddr, 1, false);
|
||
|
+
|
||
|
+ /*
|
||
|
+ * Some CPUs might still be using a local ASID for this
|
||
|
+ * process, and require IPIs, while others are using the
|
||
|
+ * global ASID.
|
||
|
+ *
|
||
|
+ * In this corner case we need to do both the broadcast
|
||
|
+ * TLB invalidation, and send IPIs. The IPIs will help
|
||
|
+ * stragglers transition to the broadcast ASID.
|
||
|
+ */
|
||
|
+ if (READ_ONCE(mm->context.asid_transition))
|
||
|
+ asid = 0;
|
||
|
+ }
|
||
|
+
|
||
|
+ if (!asid) {
|
||
|
+ inc_mm_tlb_gen(mm);
|
||
|
+ cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
|
||
|
+ }
|
||
|
+
|
||
|
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
|
||
|
+}
|
||
|
+
|
||
|
/*
|
||
|
* Blindly accessing user memory from NMI context can be dangerous
|
||
|
* if we're in the middle of switching the current user task or
|