perf: Skip pmu_ctx based on event_type

To optimize the cgroup context switch, the perf_event_pmu_context
iteration skips the PMUs without cgroup events. A bool cgroup was
introduced to indicate the case. It can work, but this way is hard to
extend for other cases, e.g. skipping non-mediated PMUs. It doesn't
make sense to keep adding bool variables.

Pass the event_type instead of the specific bool variable. Check both
the event_type and related pmu_ctx variables to decide whether skipping
a PMU.

Event flags, e.g., EVENT_CGROUP, should be cleard in the ctx->is_active.
Add EVENT_FLAGS to indicate such event flags.

No functional change.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Yongwei Ma <yongwei.ma@intel.com>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://patch.msgid.link/20251206001720.468579-2-seanjc@google.com
This commit is contained in:
Kan Liang 2025-12-05 16:16:37 -08:00 committed by Peter Zijlstra
parent 7ac422cf7b
commit b825444b61

View File

@ -165,7 +165,7 @@ enum event_type_t {
/* see ctx_resched() for details */
EVENT_CPU = 0x10,
EVENT_CGROUP = 0x20,
EVENT_FLAGS = EVENT_CGROUP,
/* compound helpers */
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
EVENT_TIME_FROZEN = EVENT_TIME | EVENT_FROZEN,
@ -779,27 +779,37 @@ do { \
___p; \
})
#define for_each_epc(_epc, _ctx, _pmu, _cgroup) \
static bool perf_skip_pmu_ctx(struct perf_event_pmu_context *pmu_ctx,
enum event_type_t event_type)
{
if ((event_type & EVENT_CGROUP) && !pmu_ctx->nr_cgroups)
return true;
return false;
}
#define for_each_epc(_epc, _ctx, _pmu, _event_type) \
list_for_each_entry(_epc, &((_ctx)->pmu_ctx_list), pmu_ctx_entry) \
if (_cgroup && !_epc->nr_cgroups) \
if (perf_skip_pmu_ctx(_epc, _event_type)) \
continue; \
else if (_pmu && _epc->pmu != _pmu) \
continue; \
else
static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
static void perf_ctx_disable(struct perf_event_context *ctx,
enum event_type_t event_type)
{
struct perf_event_pmu_context *pmu_ctx;
for_each_epc(pmu_ctx, ctx, NULL, cgroup)
for_each_epc(pmu_ctx, ctx, NULL, event_type)
perf_pmu_disable(pmu_ctx->pmu);
}
static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
static void perf_ctx_enable(struct perf_event_context *ctx,
enum event_type_t event_type)
{
struct perf_event_pmu_context *pmu_ctx;
for_each_epc(pmu_ctx, ctx, NULL, cgroup)
for_each_epc(pmu_ctx, ctx, NULL, event_type)
perf_pmu_enable(pmu_ctx->pmu);
}
@ -964,8 +974,7 @@ static void perf_cgroup_switch(struct task_struct *task)
return;
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
perf_ctx_disable(&cpuctx->ctx, true);
perf_ctx_disable(&cpuctx->ctx, EVENT_CGROUP);
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
/*
@ -981,7 +990,7 @@ static void perf_cgroup_switch(struct task_struct *task)
*/
ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
perf_ctx_enable(&cpuctx->ctx, true);
perf_ctx_enable(&cpuctx->ctx, EVENT_CGROUP);
}
static int perf_cgroup_ensure_storage(struct perf_event *event,
@ -2902,11 +2911,11 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
event_type &= EVENT_ALL;
for_each_epc(epc, &cpuctx->ctx, pmu, false)
for_each_epc(epc, &cpuctx->ctx, pmu, 0)
perf_pmu_disable(epc->pmu);
if (task_ctx) {
for_each_epc(epc, task_ctx, pmu, false)
for_each_epc(epc, task_ctx, pmu, 0)
perf_pmu_disable(epc->pmu);
task_ctx_sched_out(task_ctx, pmu, event_type);
@ -2926,11 +2935,11 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
perf_event_sched_in(cpuctx, task_ctx, pmu);
for_each_epc(epc, &cpuctx->ctx, pmu, false)
for_each_epc(epc, &cpuctx->ctx, pmu, 0)
perf_pmu_enable(epc->pmu);
if (task_ctx) {
for_each_epc(epc, task_ctx, pmu, false)
for_each_epc(epc, task_ctx, pmu, 0)
perf_pmu_enable(epc->pmu);
}
}
@ -3479,11 +3488,10 @@ static void
ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
enum event_type_t active_type = event_type & ~EVENT_FLAGS;
struct perf_event_pmu_context *pmu_ctx;
int is_active = ctx->is_active;
bool cgroup = event_type & EVENT_CGROUP;
event_type &= ~EVENT_CGROUP;
lockdep_assert_held(&ctx->lock);
@ -3514,7 +3522,7 @@ ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t
* see __load_acquire() in perf_event_time_now()
*/
barrier();
ctx->is_active &= ~event_type;
ctx->is_active &= ~active_type;
if (!(ctx->is_active & EVENT_ALL)) {
/*
@ -3535,7 +3543,7 @@ ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t
is_active ^= ctx->is_active; /* changed bits */
for_each_epc(pmu_ctx, ctx, pmu, cgroup)
for_each_epc(pmu_ctx, ctx, pmu, event_type)
__pmu_ctx_sched_out(pmu_ctx, is_active);
}
@ -3691,7 +3699,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
perf_ctx_disable(ctx, false);
perf_ctx_disable(ctx, 0);
/* PMIs are disabled; ctx->nr_no_switch_fast is stable. */
if (local_read(&ctx->nr_no_switch_fast) ||
@ -3715,7 +3723,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
perf_ctx_sched_task_cb(ctx, task, false);
perf_ctx_enable(ctx, false);
perf_ctx_enable(ctx, 0);
/*
* RCU_INIT_POINTER here is safe because we've not
@ -3739,13 +3747,13 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
if (do_switch) {
raw_spin_lock(&ctx->lock);
perf_ctx_disable(ctx, false);
perf_ctx_disable(ctx, 0);
inside_switch:
perf_ctx_sched_task_cb(ctx, task, false);
task_ctx_sched_out(ctx, NULL, EVENT_ALL);
perf_ctx_enable(ctx, false);
perf_ctx_enable(ctx, 0);
raw_spin_unlock(&ctx->lock);
}
}
@ -4054,11 +4062,9 @@ static void
ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
enum event_type_t active_type = event_type & ~EVENT_FLAGS;
struct perf_event_pmu_context *pmu_ctx;
int is_active = ctx->is_active;
bool cgroup = event_type & EVENT_CGROUP;
event_type &= ~EVENT_CGROUP;
lockdep_assert_held(&ctx->lock);
@ -4076,7 +4082,7 @@ ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t
barrier();
}
ctx->is_active |= (event_type | EVENT_TIME);
ctx->is_active |= active_type | EVENT_TIME;
if (ctx->task) {
if (!(is_active & EVENT_ALL))
cpuctx->task_ctx = ctx;
@ -4091,13 +4097,13 @@ ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t
* in order to give them the best chance of going on.
*/
if (is_active & EVENT_PINNED) {
for_each_epc(pmu_ctx, ctx, pmu, cgroup)
for_each_epc(pmu_ctx, ctx, pmu, event_type)
__pmu_ctx_sched_in(pmu_ctx, EVENT_PINNED);
}
/* Then walk through the lower prio flexible groups */
if (is_active & EVENT_FLEXIBLE) {
for_each_epc(pmu_ctx, ctx, pmu, cgroup)
for_each_epc(pmu_ctx, ctx, pmu, event_type)
__pmu_ctx_sched_in(pmu_ctx, EVENT_FLEXIBLE);
}
}
@ -4114,11 +4120,11 @@ static void perf_event_context_sched_in(struct task_struct *task)
if (cpuctx->task_ctx == ctx) {
perf_ctx_lock(cpuctx, ctx);
perf_ctx_disable(ctx, false);
perf_ctx_disable(ctx, 0);
perf_ctx_sched_task_cb(ctx, task, true);
perf_ctx_enable(ctx, false);
perf_ctx_enable(ctx, 0);
perf_ctx_unlock(cpuctx, ctx);
goto rcu_unlock;
}
@ -4131,7 +4137,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
if (!ctx->nr_events)
goto unlock;
perf_ctx_disable(ctx, false);
perf_ctx_disable(ctx, 0);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
@ -4141,7 +4147,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
* events, no need to flip the cpuctx's events around.
*/
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
perf_ctx_disable(&cpuctx->ctx, false);
perf_ctx_disable(&cpuctx->ctx, 0);
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_FLEXIBLE);
}
@ -4150,9 +4156,9 @@ static void perf_event_context_sched_in(struct task_struct *task)
perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true);
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
perf_ctx_enable(&cpuctx->ctx, false);
perf_ctx_enable(&cpuctx->ctx, 0);
perf_ctx_enable(ctx, false);
perf_ctx_enable(ctx, 0);
unlock:
perf_ctx_unlock(cpuctx, ctx);