72 lines
2.6 KiB
Diff
72 lines
2.6 KiB
Diff
|
From: Frederic Weisbecker <frederic@kernel.org>
|
||
|
Date: Tue, 5 Apr 2022 03:07:51 +0200
|
||
|
Subject: [PATCH] rcutorture: Also force sched priority to timersd on
|
||
|
boosting test.
|
||
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.11/older/patches-6.11-rt7.tar.xz
|
||
|
|
||
|
ksoftirqd is statically boosted to the priority level right above the
|
||
|
one of rcu_torture_boost() so that timers, which torture readers rely on,
|
||
|
get a chance to run while rcu_torture_boost() is polling.
|
||
|
|
||
|
However timers processing got split from ksoftirqd into their own kthread
|
||
|
(timersd) that isn't boosted. It has the same SCHED_FIFO low prio as
|
||
|
rcu_torture_boost() and therefore timers can't preempt it and may
|
||
|
starve.
|
||
|
|
||
|
The issue can be triggered in practice on v5.17.1-rt17 using:
|
||
|
|
||
|
./kvm.sh --allcpus --configs TREE04 --duration 10m --kconfig "CONFIG_EXPERT=y CONFIG_PREEMPT_RT=y"
|
||
|
|
||
|
Fix this with statically boosting timersd just like is done with
|
||
|
ksoftirqd in commit
|
||
|
ea6d962e80b61 ("rcutorture: Judge RCU priority boosting on grace periods, not callbacks")
|
||
|
|
||
|
Suggested-by: Mel Gorman <mgorman@suse.de>
|
||
|
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
Cc: Thomas Gleixner <tglx@linutronix.de>
|
||
|
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
|
||
|
Link: https://lkml.kernel.org/r/20220405010752.1347437-1-frederic@kernel.org
|
||
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
---
|
||
|
include/linux/interrupt.h | 1 +
|
||
|
kernel/rcu/rcutorture.c | 6 ++++++
|
||
|
kernel/softirq.c | 2 +-
|
||
|
3 files changed, 8 insertions(+), 1 deletion(-)
|
||
|
|
||
|
--- a/include/linux/interrupt.h
|
||
|
+++ b/include/linux/interrupt.h
|
||
|
@@ -617,6 +617,7 @@ extern void raise_softirq_irqoff(unsigne
|
||
|
extern void raise_softirq(unsigned int nr);
|
||
|
|
||
|
#ifdef CONFIG_PREEMPT_RT
|
||
|
+DECLARE_PER_CPU(struct task_struct *, timersd);
|
||
|
extern void raise_timer_softirq(void);
|
||
|
extern void raise_hrtimer_softirq(void);
|
||
|
|
||
|
--- a/kernel/rcu/rcutorture.c
|
||
|
+++ b/kernel/rcu/rcutorture.c
|
||
|
@@ -2425,6 +2425,12 @@ static int rcutorture_booster_init(unsig
|
||
|
WARN_ON_ONCE(!t);
|
||
|
sp.sched_priority = 2;
|
||
|
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||
|
+#ifdef CONFIG_PREEMPT_RT
|
||
|
+ t = per_cpu(timersd, cpu);
|
||
|
+ WARN_ON_ONCE(!t);
|
||
|
+ sp.sched_priority = 2;
|
||
|
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||
|
+#endif
|
||
|
}
|
||
|
|
||
|
/* Don't allow time recalculation while creating a new task. */
|
||
|
--- a/kernel/softirq.c
|
||
|
+++ b/kernel/softirq.c
|
||
|
@@ -625,7 +625,7 @@ static inline void tick_irq_exit(void)
|
||
|
}
|
||
|
|
||
|
#ifdef CONFIG_PREEMPT_RT
|
||
|
-static DEFINE_PER_CPU(struct task_struct *, timersd);
|
||
|
+DEFINE_PER_CPU(struct task_struct *, timersd);
|
||
|
static DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
|
||
|
|
||
|
static unsigned int local_pending_timers(void)
|