linux/net/sched/sch_red.c
Jamal Hadi Salim 458d561527 net/sched: sch_red: Replace direct dequeue call with peek and qdisc_dequeue_peeked
When red qdisc has children (eg qfq qdisc) whose peek() callback is
qdisc_peek_dequeued(), we could get a kernel panic. When the parent of such
qdiscs (eg illustrated in patch #3 as tbf) wants to retrieve an skb from
its child (red in this case), it will do the following:
 1a. do a peek() - and when sensing there's an skb the child can offer, then
     - the child in this case(red) calls its child's (qfq) peek.
        qfq does the right thing and will return the gso_skb queue packet.
        Note: if there wasnt a gso_skb entry then qfq will store it there.
 1b. invoke a dequeue() on the child (red). And herein lies the problem.
     - red will call the child's dequeue() which will essentially just
       try to grab something of qfq's queue.

[   78.667668][  T363] KASAN: null-ptr-deref in range [0x0000000000000048-0x000000000000004f]
[   78.667927][  T363] CPU: 1 UID: 0 PID: 363 Comm: ping Not tainted 7.1.0-rc1-00033-g46f74a3f7d57-dirty #790 PREEMPT(full)
[   78.668263][  T363] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[   78.668486][  T363] RIP: 0010:qfq_dequeue+0x446/0xc90 [sch_qfq]
[   78.668718][  T363] Code: 54 c0 e8 dd 90 00 f1 48 c7 c7 e0 03 54 c0 48 89 de e8 ce 90 00 f1 48 8d 7b 48 b8 ff ff 37 00 48 89 fa 48 c1 e0 2a 48 c1 ea 03 <80> 3c 02 00 74 05 e8 ef a1 e1 f1 48 8b 7b 48 48 8d 54 24 58 48 8d
[   78.669312][  T363] RSP: 0018:ffff88810de573e0 EFLAGS: 00010216
[   78.669533][  T363] RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000
[   78.669790][  T363] RDX: 0000000000000009 RSI: 0000000000000004 RDI: 0000000000000048
[   78.670044][  T363] RBP: ffff888110dc4000 R08: ffffffffb1b0885a R09: fffffbfff6ba9078
[   78.670297][  T363] R10: 0000000000000003 R11: ffff888110e31c80 R12: 0000001880000000
[   78.670560][  T363] R13: ffff888110dc4150 R14: ffff888110dc42b8 R15: 0000000000000200
[   78.670814][  T363] FS:  00007f66a8f09c40(0000) GS:ffff888163428000(0000) knlGS:0000000000000000
[   78.671110][  T363] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   78.671324][  T363] CR2: 000055db4c6a30a8 CR3: 000000010da67000 CR4: 0000000000750ef0
[   78.671585][  T363] PKRU: 55555554
[   78.671713][  T363] Call Trace:
[   78.671843][  T363]  <TASK>
[   78.671936][  T363]  ? __pfx_qfq_dequeue+0x10/0x10 [sch_qfq]
[   78.672148][  T363]  ? __pfx__printk+0x10/0x10
[   78.672322][  T363]  ? srso_alias_return_thunk+0x5/0xfbef5
[   78.672496][  T363]  ? lockdep_hardirqs_on_prepare+0xa8/0x1a0
[   78.672706][  T363]  ? srso_alias_return_thunk+0x5/0xfbef5
[   78.672875][  T363]  ? trace_hardirqs_on+0x19/0x1a0
[   78.673047][  T363]  red_dequeue+0x65/0x270 [sch_red]
[   78.673217][  T363]  ? srso_alias_return_thunk+0x5/0xfbef5
[   78.673385][  T363]  tbf_dequeue.cold+0xb0/0x70c [sch_tbf]
[   78.673566][  T363]  __qdisc_run+0x169/0x1900

The right thing to do in #1b is to grab the skb off gso_skb queue.
This patchset fixes that issue by changing #1b to use qdisc_dequeue_peeked()
method instead.

Fixes: 77be155cba ("pkt_sched: Add peek emulation for non-work-conserving qdiscs.")
Reported-by: Manas <ghandatmanas@gmail.com>
Reported-by: Rakshit Awasthi <rakshitawasthi17@gmail.com>
Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260430152957.194015-2-jhs@mojatatu.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2026-05-02 10:20:55 -07:00

581 lines
14 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/sch_red.c Random Early Detection queue.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Changes:
* J Hadi Salim 980914: computation fixes
* Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
* J Hadi Salim 980816: ECN support
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/inet_ecn.h>
#include <net/red.h>
/* Parameters, settable by user:
-----------------------------
limit - bytes (must be > qth_max + burst)
Hard limit on queue length, should be chosen >qth_max
to allow packet bursts. This parameter does not
affect the algorithms behaviour and can be chosen
arbitrarily high (well, less than ram size)
Really, this limit will never be reached
if RED works correctly.
*/
struct red_sched_data {
u32 limit; /* HARD maximal queue length */
unsigned char flags;
/* Non-flags in tc_red_qopt.flags. */
unsigned char userbits;
struct timer_list adapt_timer;
struct Qdisc *sch;
struct red_parms parms;
struct red_vars vars;
struct red_stats stats;
struct Qdisc *qdisc;
struct tcf_qevent qe_early_drop;
struct tcf_qevent qe_mark;
};
#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
static inline int red_use_ecn(struct red_sched_data *q)
{
return q->flags & TC_RED_ECN;
}
static inline int red_use_harddrop(struct red_sched_data *q)
{
return q->flags & TC_RED_HARDDROP;
}
static int red_use_nodrop(struct red_sched_data *q)
{
return q->flags & TC_RED_NODROP;
}
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
enum qdisc_drop_reason reason = QDISC_DROP_CONGESTED;
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
unsigned int len;
int ret;
q->vars.qavg = red_calc_qavg(&q->parms,
&q->vars,
child->qstats.backlog);
if (red_is_idling(&q->vars))
red_end_of_idle_period(&q->vars);
switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
case RED_DONT_MARK:
break;
case RED_PROB_MARK:
qdisc_qstats_overlimit(sch);
if (!red_use_ecn(q)) {
WRITE_ONCE(q->stats.prob_drop,
q->stats.prob_drop + 1);
goto congestion_drop;
}
if (INET_ECN_set_ce(skb)) {
WRITE_ONCE(q->stats.prob_mark,
q->stats.prob_mark + 1);
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
WRITE_ONCE(q->stats.prob_drop,
q->stats.prob_drop + 1);
goto congestion_drop;
}
/* Non-ECT packet in ECN nodrop mode: queue it. */
break;
case RED_HARD_MARK:
reason = QDISC_DROP_OVERLIMIT;
qdisc_qstats_overlimit(sch);
if (red_use_harddrop(q) || !red_use_ecn(q)) {
WRITE_ONCE(q->stats.forced_drop,
q->stats.forced_drop + 1);
goto congestion_drop;
}
if (INET_ECN_set_ce(skb)) {
WRITE_ONCE(q->stats.forced_mark,
q->stats.forced_mark + 1);
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
WRITE_ONCE(q->stats.forced_drop,
q->stats.forced_drop + 1);
goto congestion_drop;
}
/* Non-ECT packet in ECN nodrop mode: queue it. */
break;
}
len = qdisc_pkt_len(skb);
ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) {
sch->qstats.backlog += len;
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
WRITE_ONCE(q->stats.pdrop,
q->stats.pdrop + 1);
qdisc_qstats_drop(sch);
}
return ret;
congestion_drop:
skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
qdisc_drop_reason(skb, sch, to_free, reason);
return NET_XMIT_CN;
}
static struct sk_buff *red_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
skb = qdisc_dequeue_peeked(child);
if (skb) {
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
if (!red_is_idling(&q->vars))
red_start_of_idle_period(&q->vars);
}
return skb;
}
static struct sk_buff *red_peek(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
return child->ops->peek(child);
}
static void red_reset(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
red_restart(&q->vars);
}
static int red_offload(struct Qdisc *sch, bool enable)
{
struct red_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct tc_red_qopt_offload opt = {
.handle = sch->handle,
.parent = sch->parent,
};
if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
return -EOPNOTSUPP;
if (enable) {
opt.command = TC_RED_REPLACE;
opt.set.min = q->parms.qth_min >> q->parms.Wlog;
opt.set.max = q->parms.qth_max >> q->parms.Wlog;
opt.set.probability = q->parms.max_P;
opt.set.limit = q->limit;
opt.set.is_ecn = red_use_ecn(q);
opt.set.is_harddrop = red_use_harddrop(q);
opt.set.is_nodrop = red_use_nodrop(q);
opt.set.qstats = &sch->qstats;
} else {
opt.command = TC_RED_DESTROY;
}
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
}
static void red_destroy(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
tcf_qevent_destroy(&q->qe_mark, sch);
tcf_qevent_destroy(&q->qe_early_drop, sch);
timer_delete_sync(&q->adapt_timer);
red_offload(sch, false);
qdisc_put(q->qdisc);
}
static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
[TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
[TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
[TCA_RED_STAB] = { .len = RED_STAB_SIZE },
[TCA_RED_MAX_P] = { .type = NLA_U32 },
[TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
[TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
[TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
};
static int __red_change(struct Qdisc *sch, struct nlattr **tb,
struct netlink_ext_ack *extack)
{
struct Qdisc *old_child = NULL, *child = NULL;
struct red_sched_data *q = qdisc_priv(sch);
struct nla_bitfield32 flags_bf;
struct tc_red_qopt *ctl;
unsigned char userbits;
unsigned char flags;
int err;
u32 max_P;
u8 *stab;
if (tb[TCA_RED_PARMS] == NULL ||
tb[TCA_RED_STAB] == NULL)
return -EINVAL;
max_P = nla_get_u32_default(tb[TCA_RED_MAX_P], 0);
ctl = nla_data(tb[TCA_RED_PARMS]);
stab = nla_data(tb[TCA_RED_STAB]);
if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Scell_log, stab))
return -EINVAL;
err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
&flags_bf, &userbits, extack);
if (err)
return err;
if (ctl->limit > 0) {
child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
extack);
if (IS_ERR(child))
return PTR_ERR(child);
/* child is fifo, no need to check for noop_qdisc */
qdisc_hash_add(child, true);
}
sch_tree_lock(sch);
flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
err = red_validate_flags(flags, extack);
if (err)
goto unlock_out;
q->flags = flags;
q->userbits = userbits;
q->limit = ctl->limit;
if (child) {
qdisc_purge_queue(q->qdisc);
old_child = q->qdisc;
q->qdisc = child;
}
red_set_parms(&q->parms,
ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
stab,
max_P);
red_set_vars(&q->vars);
timer_delete(&q->adapt_timer);
if (ctl->flags & TC_RED_ADAPTATIVE)
mod_timer(&q->adapt_timer, jiffies + HZ/2);
if (!q->qdisc->q.qlen)
red_start_of_idle_period(&q->vars);
sch_tree_unlock(sch);
red_offload(sch, true);
if (old_child)
qdisc_put(old_child);
return 0;
unlock_out:
sch_tree_unlock(sch);
if (child)
qdisc_put(child);
return err;
}
static inline void red_adaptative_timer(struct timer_list *t)
{
struct red_sched_data *q = timer_container_of(q, t, adapt_timer);
struct Qdisc *sch = q->sch;
spinlock_t *root_lock;
rcu_read_lock();
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
red_adaptative_algo(&q->parms, &q->vars);
mod_timer(&q->adapt_timer, jiffies + HZ/2);
spin_unlock(root_lock);
rcu_read_unlock();
}
static int red_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct red_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_RED_MAX + 1];
int err;
q->qdisc = &noop_qdisc;
q->sch = sch;
timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
if (!opt)
return -EINVAL;
err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
extack);
if (err < 0)
return err;
err = __red_change(sch, tb, extack);
if (err)
return err;
err = tcf_qevent_init(&q->qe_early_drop, sch,
FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
tb[TCA_RED_EARLY_DROP_BLOCK], extack);
if (err)
return err;
return tcf_qevent_init(&q->qe_mark, sch,
FLOW_BLOCK_BINDER_TYPE_RED_MARK,
tb[TCA_RED_MARK_BLOCK], extack);
}
static int red_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct red_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_RED_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
extack);
if (err < 0)
return err;
err = tcf_qevent_validate_change(&q->qe_early_drop,
tb[TCA_RED_EARLY_DROP_BLOCK], extack);
if (err)
return err;
err = tcf_qevent_validate_change(&q->qe_mark,
tb[TCA_RED_MARK_BLOCK], extack);
if (err)
return err;
return __red_change(sch, tb, extack);
}
static int red_dump_offload_stats(struct Qdisc *sch)
{
struct tc_red_qopt_offload hw_stats = {
.command = TC_RED_STATS,
.handle = sch->handle,
.parent = sch->parent,
{
.stats.bstats = &sch->bstats,
.stats.qstats = &sch->qstats,
},
};
return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
}
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct red_sched_data *q = qdisc_priv(sch);
struct nlattr *opts = NULL;
struct tc_red_qopt opt = {
.limit = q->limit,
.flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
q->userbits,
.qth_min = q->parms.qth_min >> q->parms.Wlog,
.qth_max = q->parms.qth_max >> q->parms.Wlog,
.Wlog = q->parms.Wlog,
.Plog = q->parms.Plog,
.Scell_log = q->parms.Scell_log,
};
int err;
err = red_dump_offload_stats(sch);
if (err)
goto nla_put_failure;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
nla_put_bitfield32(skb, TCA_RED_FLAGS,
q->flags, TC_RED_SUPPORTED_FLAGS) ||
tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
nla_nest_cancel(skb, opts);
return -EMSGSIZE;
}
static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct red_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct tc_red_xstats st = {0};
if (sch->flags & TCQ_F_OFFLOADED) {
struct tc_red_qopt_offload hw_stats_request = {
.command = TC_RED_XSTATS,
.handle = sch->handle,
.parent = sch->parent,
{
.xstats = &q->stats,
},
};
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
&hw_stats_request);
}
st.early = READ_ONCE(q->stats.prob_drop) +
READ_ONCE(q->stats.forced_drop);
st.pdrop = READ_ONCE(q->stats.pdrop);
st.marked = READ_ONCE(q->stats.prob_mark) +
READ_ONCE(q->stats.forced_mark);
return gnet_stats_copy_app(d, &st, sizeof(st));
}
static int red_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct red_sched_data *q = qdisc_priv(sch);
tcm->tcm_handle |= TC_H_MIN(1);
tcm->tcm_info = q->qdisc->handle;
return 0;
}
static void red_graft_offload(struct Qdisc *sch,
struct Qdisc *new, struct Qdisc *old,
struct netlink_ext_ack *extack)
{
struct tc_red_qopt_offload graft_offload = {
.handle = sch->handle,
.parent = sch->parent,
.child_handle = new->handle,
.command = TC_RED_GRAFT,
};
qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
TC_SETUP_QDISC_RED, &graft_offload, extack);
}
static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct red_sched_data *q = qdisc_priv(sch);
if (new == NULL)
new = &noop_qdisc;
*old = qdisc_replace(sch, new, &q->qdisc);
red_graft_offload(sch, new, *old, extack);
return 0;
}
static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
{
struct red_sched_data *q = qdisc_priv(sch);
return q->qdisc;
}
static unsigned long red_find(struct Qdisc *sch, u32 classid)
{
return 1;
}
static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
if (!walker->stop) {
tc_qdisc_stats_dump(sch, 1, walker);
}
}
static const struct Qdisc_class_ops red_class_ops = {
.graft = red_graft,
.leaf = red_leaf,
.find = red_find,
.walk = red_walk,
.dump = red_dump_class,
};
static struct Qdisc_ops red_qdisc_ops __read_mostly = {
.id = "red",
.priv_size = sizeof(struct red_sched_data),
.cl_ops = &red_class_ops,
.enqueue = red_enqueue,
.dequeue = red_dequeue,
.peek = red_peek,
.init = red_init,
.reset = red_reset,
.destroy = red_destroy,
.change = red_change,
.dump = red_dump,
.dump_stats = red_dump_stats,
.owner = THIS_MODULE,
};
MODULE_ALIAS_NET_SCH("red");
static int __init red_module_init(void)
{
return register_qdisc(&red_qdisc_ops);
}
static void __exit red_module_exit(void)
{
unregister_qdisc(&red_qdisc_ops);
}
module_init(red_module_init)
module_exit(red_module_exit)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Random Early Detection qdisc");