mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
sched: Add deadline tracepoints
Add the following tracepoints:
* sched_dl_throttle(dl_se, cpu, type):
Called when a deadline entity is throttled
* sched_dl_replenish(dl_se, cpu, type):
Called when a deadline entity's runtime is replenished
* sched_dl_update(dl_se, cpu, type):
Called when a deadline entity updates without throttle or replenish
* sched_dl_server_start(dl_se, cpu, type):
Called when a deadline server is started
* sched_dl_server_stop(dl_se, cpu, type):
Called when a deadline server is stopped
Those tracepoints can be useful to validate the deadline scheduler with
RV and are not exported to tracefs.
Reviewed-by: Phil Auld <pauld@redhat.com>
Acked-by: Juri Lelli <juri.lelli@redhat.com>
Link: https://lore.kernel.org/r/20260330111010.153663-11-gmonaco@redhat.com
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
This commit is contained in:
parent
da282bf7fa
commit
820725b0eb
|
|
@ -896,6 +896,32 @@ DECLARE_TRACE(sched_set_need_resched,
|
|||
TP_PROTO(struct task_struct *tsk, int cpu, int tif),
|
||||
TP_ARGS(tsk, cpu, tif));
|
||||
|
||||
#define DL_OTHER 0
|
||||
#define DL_TASK 1
|
||||
#define DL_SERVER_FAIR 2
|
||||
#define DL_SERVER_EXT 3
|
||||
|
||||
DECLARE_TRACE(sched_dl_throttle,
|
||||
TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
|
||||
TP_ARGS(dl_se, cpu, type));
|
||||
|
||||
DECLARE_TRACE(sched_dl_replenish,
|
||||
TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
|
||||
TP_ARGS(dl_se, cpu, type));
|
||||
|
||||
/* Call to update_curr_dl_se not involving throttle or replenish */
|
||||
DECLARE_TRACE(sched_dl_update,
|
||||
TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
|
||||
TP_ARGS(dl_se, cpu, type));
|
||||
|
||||
DECLARE_TRACE(sched_dl_server_start,
|
||||
TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
|
||||
TP_ARGS(dl_se, cpu, type));
|
||||
|
||||
DECLARE_TRACE(sched_dl_server_stop,
|
||||
TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
|
||||
TP_ARGS(dl_se, cpu, type));
|
||||
|
||||
#endif /* _TRACE_SCHED_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
|||
|
|
@ -122,6 +122,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_entry_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_exit_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_set_need_resched_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_throttle_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_replenish_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_update_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_server_start_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_server_stop_tp);
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
|
||||
|
|
|
|||
|
|
@ -115,6 +115,19 @@ static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
|
|||
}
|
||||
#endif /* !CONFIG_RT_MUTEXES */
|
||||
|
||||
static inline u8 dl_get_type(struct sched_dl_entity *dl_se, struct rq *rq)
|
||||
{
|
||||
if (!dl_server(dl_se))
|
||||
return DL_TASK;
|
||||
if (dl_se == &rq->fair_server)
|
||||
return DL_SERVER_FAIR;
|
||||
#ifdef CONFIG_SCHED_CLASS_EXT
|
||||
if (dl_se == &rq->ext_server)
|
||||
return DL_SERVER_EXT;
|
||||
#endif
|
||||
return DL_OTHER;
|
||||
}
|
||||
|
||||
static inline struct dl_bw *dl_bw_of(int i)
|
||||
{
|
||||
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
|
||||
|
|
@ -733,6 +746,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
|
|||
dl_se->dl_throttled = 1;
|
||||
dl_se->dl_defer_armed = 1;
|
||||
}
|
||||
trace_sched_dl_replenish_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -848,6 +862,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
|
|||
if (dl_se->dl_throttled)
|
||||
dl_se->dl_throttled = 0;
|
||||
|
||||
trace_sched_dl_replenish_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
|
||||
|
||||
/*
|
||||
* If this is the replenishment of a deferred reservation,
|
||||
* clear the flag and return.
|
||||
|
|
@ -1345,6 +1361,7 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
|
|||
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
|
||||
if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
|
||||
return;
|
||||
trace_sched_dl_throttle_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
|
||||
dl_se->dl_throttled = 1;
|
||||
if (dl_se->runtime > 0)
|
||||
dl_se->runtime = 0;
|
||||
|
|
@ -1508,6 +1525,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
|
|||
|
||||
throttle:
|
||||
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
|
||||
trace_sched_dl_throttle_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
|
||||
dl_se->dl_throttled = 1;
|
||||
|
||||
/* If requested, inform the user about runtime overruns. */
|
||||
|
|
@ -1532,6 +1550,8 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
|
|||
|
||||
if (!is_leftmost(dl_se, &rq->dl))
|
||||
resched_curr(rq);
|
||||
} else {
|
||||
trace_sched_dl_update_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1810,6 +1830,7 @@ void dl_server_start(struct sched_dl_entity *dl_se)
|
|||
if (WARN_ON_ONCE(!cpu_online(cpu_of(rq))))
|
||||
return;
|
||||
|
||||
trace_sched_dl_server_start_tp(dl_se, cpu_of(rq), dl_get_type(dl_se, rq));
|
||||
dl_se->dl_server_active = 1;
|
||||
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
|
||||
if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
|
||||
|
|
@ -1821,6 +1842,8 @@ void dl_server_stop(struct sched_dl_entity *dl_se)
|
|||
if (!dl_server(dl_se) || !dl_server_active(dl_se))
|
||||
return;
|
||||
|
||||
trace_sched_dl_server_stop_tp(dl_se, cpu_of(dl_se->rq),
|
||||
dl_get_type(dl_se, dl_se->rq));
|
||||
dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
|
||||
hrtimer_try_to_cancel(&dl_se->dl_timer);
|
||||
dl_se->dl_defer_armed = 0;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user