Konstantin Demin
c3d09a3e94
imported from https://salsa.debian.org/kernel-team/linux.git commit 9d5cc9d9d6501d7f1dd7e194d4b245bd0b6c6a22 version 6.11.4-1
61 lines
2.1 KiB
Diff
61 lines
2.1 KiB
Diff
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Fri, 4 Aug 2023 13:30:38 +0200
|
|
Subject: [PATCH 2/3] softirq: Add function to preempt serving softirqs.
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.11/older/patches-6.11-rt7.tar.xz
|
|
|
|
Add a functionality for the softirq handler to preempt its current work
|
|
if needed. The softirq core has no particular state. It reads and resets
|
|
the pending softirq bits and then processes one after the other.
|
|
It can already be preempted while it invokes a certain softirq handler.
|
|
|
|
By enabling the BH the softirq core releases the per-CPU bh lock which
|
|
serializes all softirq handler. It is safe to do as long as the code
|
|
does not expect any serialisation in between. A typical scenarion would
|
|
after the invocation of callback where no state needs to be preserved
|
|
before the next callback is invoked.
|
|
|
|
Add functionaliry to preempt the serving softirqs.
|
|
|
|
Link: https://lore.kernel.org/r/20230804113039.419794-3-bigeasy@linutronix.de
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
include/linux/bottom_half.h | 2 ++
|
|
kernel/softirq.c | 13 +++++++++++++
|
|
2 files changed, 15 insertions(+)
|
|
|
|
--- a/include/linux/bottom_half.h
|
|
+++ b/include/linux/bottom_half.h
|
|
@@ -35,8 +35,10 @@ static inline void local_bh_enable(void)
|
|
|
|
#ifdef CONFIG_PREEMPT_RT
|
|
extern bool local_bh_blocked(void);
|
|
+extern void softirq_preempt(void);
|
|
#else
|
|
static inline bool local_bh_blocked(void) { return false; }
|
|
+static inline void softirq_preempt(void) { }
|
|
#endif
|
|
|
|
#endif /* _LINUX_BH_H */
|
|
--- a/kernel/softirq.c
|
|
+++ b/kernel/softirq.c
|
|
@@ -248,6 +248,19 @@ void __local_bh_enable_ip(unsigned long
|
|
}
|
|
EXPORT_SYMBOL(__local_bh_enable_ip);
|
|
|
|
+void softirq_preempt(void)
|
|
+{
|
|
+ if (WARN_ON_ONCE(!preemptible()))
|
|
+ return;
|
|
+
|
|
+ if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
|
|
+ return;
|
|
+
|
|
+ __local_bh_enable(SOFTIRQ_OFFSET, true);
|
|
+ /* preemption point */
|
|
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
|
|
+}
|
|
+
|
|
/*
|
|
* Invoked from ksoftirqd_run() outside of the interrupt disabled section
|
|
* to acquire the per CPU local lock for reentrancy protection.
|