150 lines
4.4 KiB
Diff
150 lines
4.4 KiB
Diff
From 8ad4520fc849262ab23adbabebd366d4755035bc Mon Sep 17 00:00:00 2001
|
|
From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
|
|
Date: Tue, 3 Jun 2025 14:14:45 +0300
|
|
Subject: Revert "mm/execmem: Unify early execmem_cache behaviour"
|
|
|
|
The commit d6d1e3e6580c ("mm/execmem: Unify early execmem_cache
|
|
behaviour") changed early behaviour of execemem ROX cache to allow its
|
|
usage in early x86 code that allocates text pages when
|
|
CONFIG_MITGATION_ITS is enabled.
|
|
|
|
The permission management of the pages allocated from execmem for ITS
|
|
mitigation is now completely contained in arch/x86/kernel/alternatives.c
|
|
and therefore there is no need to special case early allocations in
|
|
execmem.
|
|
|
|
This reverts commit d6d1e3e6580ca35071ad474381f053cbf1fb6414.
|
|
|
|
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
|
|
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
|
Cc: stable@vger.kernel.org
|
|
Link: https://lkml.kernel.org/r/20250603111446.2609381-6-rppt@kernel.org
|
|
---
|
|
arch/x86/mm/init_32.c | 3 ---
|
|
arch/x86/mm/init_64.c | 3 ---
|
|
include/linux/execmem.h | 8 +-------
|
|
mm/execmem.c | 40 +++-------------------------------------
|
|
4 files changed, 4 insertions(+), 50 deletions(-)
|
|
|
|
--- a/arch/x86/mm/init_32.c
|
|
+++ b/arch/x86/mm/init_32.c
|
|
@@ -30,7 +30,6 @@
|
|
#include <linux/initrd.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/gfp.h>
|
|
-#include <linux/execmem.h>
|
|
|
|
#include <asm/asm.h>
|
|
#include <asm/bios_ebda.h>
|
|
@@ -756,8 +755,6 @@ void mark_rodata_ro(void)
|
|
pr_info("Write protecting kernel text and read-only data: %luk\n",
|
|
size >> 10);
|
|
|
|
- execmem_cache_make_ro();
|
|
-
|
|
kernel_set_to_readonly = 1;
|
|
|
|
#ifdef CONFIG_CPA_DEBUG
|
|
--- a/arch/x86/mm/init_64.c
|
|
+++ b/arch/x86/mm/init_64.c
|
|
@@ -34,7 +34,6 @@
|
|
#include <linux/gfp.h>
|
|
#include <linux/kcore.h>
|
|
#include <linux/bootmem_info.h>
|
|
-#include <linux/execmem.h>
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/bios_ebda.h>
|
|
@@ -1392,8 +1391,6 @@ void mark_rodata_ro(void)
|
|
(end - start) >> 10);
|
|
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
|
|
|
|
- execmem_cache_make_ro();
|
|
-
|
|
kernel_set_to_readonly = 1;
|
|
|
|
/*
|
|
--- a/include/linux/execmem.h
|
|
+++ b/include/linux/execmem.h
|
|
@@ -54,7 +54,7 @@ enum execmem_range_flags {
|
|
EXECMEM_ROX_CACHE = (1 << 1),
|
|
};
|
|
|
|
-#if defined(CONFIG_ARCH_HAS_EXECMEM_ROX) && defined(CONFIG_EXECMEM)
|
|
+#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
|
|
/**
|
|
* execmem_fill_trapping_insns - set memory to contain instructions that
|
|
* will trap
|
|
@@ -94,15 +94,9 @@ int execmem_make_temp_rw(void *ptr, size
|
|
* Return: 0 on success or negative error code on failure.
|
|
*/
|
|
int execmem_restore_rox(void *ptr, size_t size);
|
|
-
|
|
-/*
|
|
- * Called from mark_readonly(), where the system transitions to ROX.
|
|
- */
|
|
-void execmem_cache_make_ro(void);
|
|
#else
|
|
static inline int execmem_make_temp_rw(void *ptr, size_t size) { return 0; }
|
|
static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; }
|
|
-static inline void execmem_cache_make_ro(void) { }
|
|
#endif
|
|
|
|
/**
|
|
--- a/mm/execmem.c
|
|
+++ b/mm/execmem.c
|
|
@@ -254,34 +254,6 @@ out_unlock:
|
|
return ptr;
|
|
}
|
|
|
|
-static bool execmem_cache_rox = false;
|
|
-
|
|
-void execmem_cache_make_ro(void)
|
|
-{
|
|
- struct maple_tree *free_areas = &execmem_cache.free_areas;
|
|
- struct maple_tree *busy_areas = &execmem_cache.busy_areas;
|
|
- MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
|
|
- MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
|
|
- struct mutex *mutex = &execmem_cache.mutex;
|
|
- void *area;
|
|
-
|
|
- execmem_cache_rox = true;
|
|
-
|
|
- mutex_lock(mutex);
|
|
-
|
|
- mas_for_each(&mas_free, area, ULONG_MAX) {
|
|
- unsigned long pages = mas_range_len(&mas_free) >> PAGE_SHIFT;
|
|
- set_memory_ro(mas_free.index, pages);
|
|
- }
|
|
-
|
|
- mas_for_each(&mas_busy, area, ULONG_MAX) {
|
|
- unsigned long pages = mas_range_len(&mas_busy) >> PAGE_SHIFT;
|
|
- set_memory_ro(mas_busy.index, pages);
|
|
- }
|
|
-
|
|
- mutex_unlock(mutex);
|
|
-}
|
|
-
|
|
static int execmem_cache_populate(struct execmem_range *range, size_t size)
|
|
{
|
|
unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
|
|
@@ -302,15 +274,9 @@ static int execmem_cache_populate(struct
|
|
/* fill memory with instructions that will trap */
|
|
execmem_fill_trapping_insns(p, alloc_size, /* writable = */ true);
|
|
|
|
- if (execmem_cache_rox) {
|
|
- err = set_memory_rox((unsigned long)p, vm->nr_pages);
|
|
- if (err)
|
|
- goto err_free_mem;
|
|
- } else {
|
|
- err = set_memory_x((unsigned long)p, vm->nr_pages);
|
|
- if (err)
|
|
- goto err_free_mem;
|
|
- }
|
|
+ err = set_memory_rox((unsigned long)p, vm->nr_pages);
|
|
+ if (err)
|
|
+ goto err_free_mem;
|
|
|
|
err = execmem_cache_add(p, alloc_size);
|
|
if (err)
|