release 6.14.7
This commit is contained in:
87
debian/patches/patchset-pf/invlpgb/0003-x86-mm-Consolidate-full-flush-threshold-decision.patch
vendored
Normal file
87
debian/patches/patchset-pf/invlpgb/0003-x86-mm-Consolidate-full-flush-threshold-decision.patch
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
From 170f37d1499a28f7a1902e007111867c7cf0147f Mon Sep 17 00:00:00 2001
|
||||
From: Rik van Riel <riel@surriel.com>
|
||||
Date: Tue, 25 Feb 2025 22:00:36 -0500
|
||||
Subject: x86/mm: Consolidate full flush threshold decision
|
||||
|
||||
Reduce code duplication by consolidating the decision point for whether to do
|
||||
individual invalidations or a full flush inside get_flush_tlb_info().
|
||||
|
||||
Suggested-by: Dave Hansen <dave.hansen@intel.com>
|
||||
Signed-off-by: Rik van Riel <riel@surriel.com>
|
||||
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
|
||||
Acked-by: Dave Hansen <dave.hansen@intel.com>
|
||||
Link: https://lore.kernel.org/r/20250226030129.530345-2-riel@surriel.com
|
||||
---
|
||||
arch/x86/mm/tlb.c | 41 +++++++++++++++++++----------------------
|
||||
1 file changed, 19 insertions(+), 22 deletions(-)
|
||||
|
||||
--- a/arch/x86/mm/tlb.c
|
||||
+++ b/arch/x86/mm/tlb.c
|
||||
@@ -1019,6 +1019,15 @@ static struct flush_tlb_info *get_flush_
|
||||
BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
|
||||
#endif
|
||||
|
||||
+ /*
|
||||
+ * If the number of flushes is so large that a full flush
|
||||
+ * would be faster, do a full flush.
|
||||
+ */
|
||||
+ if ((end - start) >> stride_shift > tlb_single_page_flush_ceiling) {
|
||||
+ start = 0;
|
||||
+ end = TLB_FLUSH_ALL;
|
||||
+ }
|
||||
+
|
||||
info->start = start;
|
||||
info->end = end;
|
||||
info->mm = mm;
|
||||
@@ -1045,17 +1054,8 @@ void flush_tlb_mm_range(struct mm_struct
|
||||
bool freed_tables)
|
||||
{
|
||||
struct flush_tlb_info *info;
|
||||
+ int cpu = get_cpu();
|
||||
u64 new_tlb_gen;
|
||||
- int cpu;
|
||||
-
|
||||
- cpu = get_cpu();
|
||||
-
|
||||
- /* Should we flush just the requested range? */
|
||||
- if ((end == TLB_FLUSH_ALL) ||
|
||||
- ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
|
||||
- start = 0;
|
||||
- end = TLB_FLUSH_ALL;
|
||||
- }
|
||||
|
||||
/* This is also a barrier that synchronizes with switch_mm(). */
|
||||
new_tlb_gen = inc_mm_tlb_gen(mm);
|
||||
@@ -1108,22 +1108,19 @@ static void do_kernel_range_flush(void *
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
- /* Balance as user space task's flush, a bit conservative */
|
||||
- if (end == TLB_FLUSH_ALL ||
|
||||
- (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
|
||||
- on_each_cpu(do_flush_tlb_all, NULL, 1);
|
||||
- } else {
|
||||
- struct flush_tlb_info *info;
|
||||
+ struct flush_tlb_info *info;
|
||||
|
||||
- preempt_disable();
|
||||
- info = get_flush_tlb_info(NULL, start, end, 0, false,
|
||||
- TLB_GENERATION_INVALID);
|
||||
+ guard(preempt)();
|
||||
|
||||
+ info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false,
|
||||
+ TLB_GENERATION_INVALID);
|
||||
+
|
||||
+ if (info->end == TLB_FLUSH_ALL)
|
||||
+ on_each_cpu(do_flush_tlb_all, NULL, 1);
|
||||
+ else
|
||||
on_each_cpu(do_kernel_range_flush, info, 1);
|
||||
|
||||
- put_flush_tlb_info();
|
||||
- preempt_enable();
|
||||
- }
|
||||
+ put_flush_tlb_info();
|
||||
}
|
||||
|
||||
/*
|
Reference in New Issue
Block a user