1
0

release 6.12.16

This commit is contained in:
2025-02-21 18:45:20 +03:00
parent 92542309fb
commit 40abc00198
22 changed files with 64 additions and 36 deletions

View File

@@ -75,7 +75,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
.mmu.notify_page_enc_status_changed = paravirt_nop,
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2137,7 +2137,6 @@ static const typeof(pv_ops) xen_mmu_ops
@@ -2189,7 +2189,6 @@ static const typeof(pv_ops) xen_mmu_ops
.flush_tlb_kernel = xen_flush_tlb,
.flush_tlb_one_user = xen_flush_tlb_one_user,
.flush_tlb_multi = xen_flush_tlb_multi,

View File

@@ -15,7 +15,7 @@ Suggested-by: Dave Hansen <dave.hansen@intel.com>
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -973,14 +973,32 @@ static struct flush_tlb_info *get_flush_
@@ -1000,8 +1000,13 @@ static struct flush_tlb_info *get_flush_
BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
#endif
@@ -31,8 +31,9 @@ Suggested-by: Dave Hansen <dave.hansen@intel.com>
info->mm = mm;
info->stride_shift = stride_shift;
info->freed_tables = freed_tables;
info->new_tlb_gen = new_tlb_gen;
@@ -1009,6 +1014,19 @@ static struct flush_tlb_info *get_flush_
info->initiating_cpu = smp_processor_id();
info->trim_cpumask = 0;
+ WARN_ONCE(start != info->start || end != info->end,
+ "TLB flush not stride %x aligned. Start %lx, end %lx\n",
@@ -50,7 +51,7 @@ Suggested-by: Dave Hansen <dave.hansen@intel.com>
return info;
}
@@ -998,17 +1016,8 @@ void flush_tlb_mm_range(struct mm_struct
@@ -1026,17 +1044,8 @@ void flush_tlb_mm_range(struct mm_struct
bool freed_tables)
{
struct flush_tlb_info *info;
@@ -69,7 +70,7 @@ Suggested-by: Dave Hansen <dave.hansen@intel.com>
/* This is also a barrier that synchronizes with switch_mm(). */
new_tlb_gen = inc_mm_tlb_gen(mm);
@@ -1060,22 +1069,19 @@ static void do_kernel_range_flush(void *
@@ -1089,22 +1098,19 @@ static void do_kernel_range_flush(void *
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
@@ -101,7 +102,7 @@ Suggested-by: Dave Hansen <dave.hansen@intel.com>
}
/*
@@ -1247,7 +1253,7 @@ void arch_tlbbatch_flush(struct arch_tlb
@@ -1276,7 +1282,7 @@ void arch_tlbbatch_flush(struct arch_tlb
int cpu = get_cpu();

View File

@@ -15,7 +15,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1057,6 +1057,30 @@ void flush_tlb_all(void)
@@ -1086,6 +1086,30 @@ void flush_tlb_all(void)
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
@@ -46,7 +46,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
static void do_kernel_range_flush(void *info)
{
struct flush_tlb_info *f = info;
@@ -1076,7 +1100,9 @@ void flush_tlb_kernel_range(unsigned lon
@@ -1105,7 +1129,9 @@ void flush_tlb_kernel_range(unsigned lon
info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false,
TLB_GENERATION_INVALID);

View File

@@ -14,7 +14,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1045,6 +1045,19 @@ void flush_tlb_mm_range(struct mm_struct
@@ -1074,6 +1074,19 @@ void flush_tlb_mm_range(struct mm_struct
}
@@ -34,7 +34,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
static void do_flush_tlb_all(void *info)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@@ -1053,6 +1066,8 @@ static void do_flush_tlb_all(void *info)
@@ -1082,6 +1095,8 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void)
{

View File

@@ -17,7 +17,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1301,7 +1301,9 @@ void arch_tlbbatch_flush(struct arch_tlb
@@ -1330,7 +1330,9 @@ void arch_tlbbatch_flush(struct arch_tlb
* a local TLB flush is needed. Optimize this use-case by calling
* flush_tlb_func_local() directly in this case.
*/

View File

@@ -22,7 +22,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -67,6 +67,12 @@ typedef struct {
@@ -69,6 +69,12 @@ typedef struct {
u16 pkey_allocation_map;
s16 execute_only_pkey;
#endif
@@ -46,7 +46,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
@@ -160,6 +162,14 @@ static inline int init_new_context(struc
@@ -161,6 +163,14 @@ static inline int init_new_context(struc
mm->context.execute_only_pkey = -1;
}
#endif
@@ -61,7 +61,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
mm_reset_untag_mask(mm);
init_new_context_ldt(mm);
return 0;
@@ -169,6 +179,10 @@ static inline int init_new_context(struc
@@ -170,6 +180,10 @@ static inline int init_new_context(struc
static inline void destroy_context(struct mm_struct *mm)
{
destroy_context_ldt(mm);
@@ -82,7 +82,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
@@ -238,6 +239,78 @@ void flush_tlb_one_kernel(unsigned long
@@ -239,6 +240,78 @@ void flush_tlb_one_kernel(unsigned long
void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
@@ -578,7 +578,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
f->new_tlb_gen <= local_tlb_gen)) {
/*
@@ -926,7 +1243,7 @@ STATIC_NOPV void native_flush_tlb_multi(
@@ -953,7 +1270,7 @@ STATIC_NOPV void native_flush_tlb_multi(
* up on the new contents of what used to be page tables, while
* doing a speculative memory access.
*/
@@ -586,8 +586,8 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
+ if (info->freed_tables || in_asid_transition(info->mm))
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
else
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
@@ -1030,8 +1347,11 @@ void flush_tlb_mm_range(struct mm_struct
on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
@@ -1058,9 +1375,12 @@ void flush_tlb_mm_range(struct mm_struct
* a local TLB flush is needed. Optimize this use-case by calling
* flush_tlb_func_local() directly in this case.
*/
@@ -595,6 +595,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
+ if (mm_global_asid(mm)) {
+ broadcast_tlb_flush(info);
+ } else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
info->trim_cpumask = should_trim_cpumask(mm);
flush_tlb_multi(mm_cpumask(mm), info);
+ consider_global_asid(mm);
} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {

View File

@@ -90,7 +90,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
#ifdef CONFIG_ADDRESS_MASKING
/*
@@ -309,6 +310,10 @@ static inline void broadcast_tlb_flush(s
@@ -310,6 +311,10 @@ static inline void broadcast_tlb_flush(s
static inline void consider_global_asid(struct mm_struct *mm)
{
}
@@ -101,7 +101,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
#endif
#ifdef CONFIG_PARAVIRT
@@ -358,21 +363,15 @@ static inline u64 inc_mm_tlb_gen(struct
@@ -359,21 +364,15 @@ static inline u64 inc_mm_tlb_gen(struct
return atomic64_inc_return(&mm->context.tlb_gen);
}
@@ -184,7 +184,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
@@ -1621,9 +1656,7 @@ void arch_tlbbatch_flush(struct arch_tlb
@@ -1650,9 +1685,7 @@ void arch_tlbbatch_flush(struct arch_tlb
* a local TLB flush is needed. Optimize this use-case by calling
* flush_tlb_func_local() directly in this case.
*/
@@ -195,7 +195,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
flush_tlb_multi(&batch->cpumask, info);
} else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
lockdep_assert_irqs_enabled();
@@ -1632,12 +1665,53 @@ void arch_tlbbatch_flush(struct arch_tlb
@@ -1661,12 +1694,53 @@ void arch_tlbbatch_flush(struct arch_tlb
local_irq_enable();
}

View File

@@ -65,7 +65,7 @@ Tested-by: Manali Shukla <Manali.Shukla@amd.com>
addr += nr << info->stride_shift;
} while (addr < info->end);
@@ -1686,10 +1687,10 @@ void arch_tlbbatch_add_pending(struct ar
@@ -1715,10 +1716,10 @@ void arch_tlbbatch_add_pending(struct ar
u16 asid = mm_global_asid(mm);
if (asid) {

View File

@@ -61,7 +61,7 @@ Subject: ZEN: drm/amdgpu/pm: Allow override of min_power_limit with
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2792,7 +2792,10 @@ int smu_get_power_limit(void *handle,
@@ -2793,7 +2793,10 @@ int smu_get_power_limit(void *handle,
*limit = smu->max_power_limit;
break;
case SMU_PPT_LIMIT_MIN:
@@ -73,7 +73,7 @@ Subject: ZEN: drm/amdgpu/pm: Allow override of min_power_limit with
break;
default:
return -EINVAL;
@@ -2816,7 +2819,14 @@ static int smu_set_power_limit(void *han
@@ -2817,7 +2820,14 @@ static int smu_set_power_limit(void *han
if (smu->ppt_funcs->set_power_limit)
return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);