94 lines
2.7 KiB
Diff
94 lines
2.7 KiB
Diff
|
From 8b2bd3f69b50cfe59eee4506413715878bcbb901 Mon Sep 17 00:00:00 2001
|
||
|
From: Rik van Riel <riel@surriel.com>
|
||
|
Date: Wed, 22 Jan 2025 23:23:22 -0500
|
||
|
Subject: x86/mm: consolidate full flush threshold decision
|
||
|
|
||
|
Reduce code duplication by consolidating the decision point
|
||
|
for whether to do individual invalidations or a full flush
|
||
|
inside get_flush_tlb_info.
|
||
|
|
||
|
Signed-off-by: Rik van Riel <riel@surriel.com>
|
||
|
Suggested-by: Dave Hansen <dave.hansen@intel.com>
|
||
|
---
|
||
|
arch/x86/mm/tlb.c | 43 ++++++++++++++++++++-----------------------
|
||
|
1 file changed, 20 insertions(+), 23 deletions(-)
|
||
|
|
||
|
--- a/arch/x86/mm/tlb.c
|
||
|
+++ b/arch/x86/mm/tlb.c
|
||
|
@@ -981,6 +981,15 @@ static struct flush_tlb_info *get_flush_
|
||
|
info->new_tlb_gen = new_tlb_gen;
|
||
|
info->initiating_cpu = smp_processor_id();
|
||
|
|
||
|
+ /*
|
||
|
+ * If the number of flushes is so large that a full flush
|
||
|
+ * would be faster, do a full flush.
|
||
|
+ */
|
||
|
+ if ((end - start) >> stride_shift > tlb_single_page_flush_ceiling) {
|
||
|
+ info->start = 0;
|
||
|
+ info->end = TLB_FLUSH_ALL;
|
||
|
+ }
|
||
|
+
|
||
|
return info;
|
||
|
}
|
||
|
|
||
|
@@ -998,17 +1007,8 @@ void flush_tlb_mm_range(struct mm_struct
|
||
|
bool freed_tables)
|
||
|
{
|
||
|
struct flush_tlb_info *info;
|
||
|
+ int cpu = get_cpu();
|
||
|
u64 new_tlb_gen;
|
||
|
- int cpu;
|
||
|
-
|
||
|
- cpu = get_cpu();
|
||
|
-
|
||
|
- /* Should we flush just the requested range? */
|
||
|
- if ((end == TLB_FLUSH_ALL) ||
|
||
|
- ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
|
||
|
- start = 0;
|
||
|
- end = TLB_FLUSH_ALL;
|
||
|
- }
|
||
|
|
||
|
/* This is also a barrier that synchronizes with switch_mm(). */
|
||
|
new_tlb_gen = inc_mm_tlb_gen(mm);
|
||
|
@@ -1060,22 +1060,19 @@ static void do_kernel_range_flush(void *
|
||
|
|
||
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||
|
{
|
||
|
- /* Balance as user space task's flush, a bit conservative */
|
||
|
- if (end == TLB_FLUSH_ALL ||
|
||
|
- (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
|
||
|
- on_each_cpu(do_flush_tlb_all, NULL, 1);
|
||
|
- } else {
|
||
|
- struct flush_tlb_info *info;
|
||
|
+ struct flush_tlb_info *info;
|
||
|
|
||
|
- preempt_disable();
|
||
|
- info = get_flush_tlb_info(NULL, start, end, 0, false,
|
||
|
- TLB_GENERATION_INVALID);
|
||
|
+ guard(preempt)();
|
||
|
+
|
||
|
+ info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false,
|
||
|
+ TLB_GENERATION_INVALID);
|
||
|
|
||
|
+ if (info->end == TLB_FLUSH_ALL)
|
||
|
+ on_each_cpu(do_flush_tlb_all, NULL, 1);
|
||
|
+ else
|
||
|
on_each_cpu(do_kernel_range_flush, info, 1);
|
||
|
|
||
|
- put_flush_tlb_info();
|
||
|
- preempt_enable();
|
||
|
- }
|
||
|
+ put_flush_tlb_info();
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
@@ -1247,7 +1244,7 @@ void arch_tlbbatch_flush(struct arch_tlb
|
||
|
|
||
|
int cpu = get_cpu();
|
||
|
|
||
|
- info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false,
|
||
|
+ info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, PAGE_SHIFT, false,
|
||
|
TLB_GENERATION_INVALID);
|
||
|
/*
|
||
|
* flush_tlb_multi() is not optimized for the common case in which only
|