Hello
> The same bpf plague bug...
> +bpf mailing list
>
It is due to perhaps the single-trip of irq flags introduced in
commit dd934aa8ad1f ("hrtimer: Use irqsave/irqrestore around __run_hrtimer()").
Make that trip double.
Hillf
---
kernel/time/hrtimer.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 41dfff2..7ea6b97 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1349,7 +1349,7 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
struct hrtimer_clock_base *base,
struct hrtimer *timer, ktime_t *now,
- unsigned long flags)
+ unsigned long *flags)
{
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
@@ -1384,11 +1384,11 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
* protected against migration to a different CPU even if the lock
* is dropped.
*/
- raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, *flags);
trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
trace_hrtimer_expire_exit(timer);
- raw_spin_lock_irq(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, *flags);
/*
* Note: We clear the running state after enqueue_hrtimer and
@@ -1417,7 +1417,7 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
}
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
- unsigned long flags, unsigned int active_mask)
+ unsigned long *flags, unsigned int active_mask)
{
struct hrtimer_clock_base *base;
unsigned int active = cpu_base->active_bases & active_mask;
@@ -1462,7 +1462,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
- __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
+ __hrtimer_run_queues(cpu_base, now, &flags, HRTIMER_ACTIVE_SOFT);
cpu_base->softirq_activated = 0;
hrtimer_update_softirq_timer(cpu_base, true);
@@ -1506,7 +1506,7 @@ retry:
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
- __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+ __hrtimer_run_queues(cpu_base, now, &flags, HRTIMER_ACTIVE_HARD);
/* Reevaluate the clock bases for the next expiry */
expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
@@ -1619,7 +1619,7 @@ void hrtimer_run_queues(void)
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
- __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+ __hrtimer_run_queues(cpu_base, now, &flags, HRTIMER_ACTIVE_HARD);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}
--