mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
sched/fair: Avoid rq->lock bouncing in sched_balance_newidle()
While poking at this code recently I noted we do a pointless unlock+lock cycle in sched_balance_newidle(). We drop the rq->lock (so we can balance) but then instantly grab the same rq->lock again in sched_balance_update_blocked_averages(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://patch.msgid.link/20251127154725.532469061@infradead.org
This commit is contained in:
parent
089d84203a
commit
45e0922508
|
|
@ -9905,15 +9905,11 @@ static unsigned long task_h_load(struct task_struct *p)
|
|||
}
|
||||
#endif /* !CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
static void sched_balance_update_blocked_averages(int cpu)
|
||||
static void __sched_balance_update_blocked_averages(struct rq *rq)
|
||||
{
|
||||
bool decayed = false, done = true;
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
struct rq_flags rf;
|
||||
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
update_blocked_load_tick(rq);
|
||||
update_rq_clock(rq);
|
||||
|
||||
decayed |= __update_blocked_others(rq, &done);
|
||||
decayed |= __update_blocked_fair(rq, &done);
|
||||
|
|
@ -9921,7 +9917,15 @@ static void sched_balance_update_blocked_averages(int cpu)
|
|||
update_blocked_load_status(rq, !done);
|
||||
if (decayed)
|
||||
cpufreq_update_util(rq, 0);
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
}
|
||||
|
||||
static void sched_balance_update_blocked_averages(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
guard(rq_lock_irqsave)(rq);
|
||||
update_rq_clock(rq);
|
||||
__sched_balance_update_blocked_averages(rq);
|
||||
}
|
||||
|
||||
/********** Helpers for sched_balance_find_src_group ************************/
|
||||
|
|
@ -12868,12 +12872,17 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
|
|||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* Include sched_balance_update_blocked_averages() in the cost
|
||||
* calculation because it can be quite costly -- this ensures we skip
|
||||
* it when avg_idle gets to be very low.
|
||||
*/
|
||||
t0 = sched_clock_cpu(this_cpu);
|
||||
__sched_balance_update_blocked_averages(this_rq);
|
||||
|
||||
rq_modified_clear(this_rq);
|
||||
raw_spin_rq_unlock(this_rq);
|
||||
|
||||
t0 = sched_clock_cpu(this_cpu);
|
||||
sched_balance_update_blocked_averages(this_cpu);
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_domain(this_cpu, sd) {
|
||||
u64 domain_cost;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user