mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
Merge branch 'net-reduce-sk_filter-and-friends-bloat'
Eric Dumazet says: ==================== net: reduce sk_filter() (and friends) bloat Some functions return an error by value, and a drop_reason by an output parameter. This extra parameter can force stack canaries. A drop_reason is enough and more efficient. This series reduces bloat by 678 bytes on x86_64: $ scripts/bloat-o-meter -t vmlinux.old vmlinux.final add/remove: 0/0 grow/shrink: 3/18 up/down: 79/-757 (-678) Function old new delta vsock_queue_rcv_skb 50 79 +29 ipmr_cache_report 1290 1315 +25 ip6mr_cache_report 1322 1347 +25 tcp_v6_rcv 3169 3167 -2 packet_rcv_spkt 329 327 -2 unix_dgram_sendmsg 1731 1726 -5 netlink_unicast 957 945 -12 netlink_dump 1372 1359 -13 sk_filter_trim_cap 889 858 -31 netlink_broadcast_filtered 1633 1595 -38 tcp_v4_rcv 3152 3111 -41 raw_rcv_skb 122 80 -42 ping_queue_rcv_skb 109 61 -48 ping_rcv 215 162 -53 rawv6_rcv_skb 278 224 -54 __sk_receive_skb 690 632 -58 raw_rcv 591 527 -64 udpv6_queue_rcv_one_skb 935 869 -66 udp_queue_rcv_one_skb 919 853 -66 tun_net_xmit 1146 1074 -72 sock_queue_rcv_skb_reason 166 76 -90 Total: Before=29722890, After=29722212, chg -0.00% Future conversions from sock_queue_rcv_skb() to sock_queue_rcv_skb_reason() can be done later. ==================== Link: https://patch.msgid.link/20260409145625.2306224-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
9336854a59
|
|
@ -1031,9 +1031,11 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
goto drop;
|
||||
}
|
||||
|
||||
if (tfile->socket.sk->sk_filter &&
|
||||
sk_filter_reason(tfile->socket.sk, skb, &drop_reason))
|
||||
goto drop;
|
||||
if (tfile->socket.sk->sk_filter) {
|
||||
drop_reason = sk_filter_reason(tfile->socket.sk, skb);
|
||||
if (drop_reason)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
len = run_ebpf_filter(tun, skb, len);
|
||||
if (len == 0) {
|
||||
|
|
|
|||
|
|
@ -1092,20 +1092,21 @@ bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
|||
return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap,
|
||||
enum skb_drop_reason *reason);
|
||||
enum skb_drop_reason
|
||||
sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
||||
|
||||
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
enum skb_drop_reason ignore_reason;
|
||||
enum skb_drop_reason drop_reason;
|
||||
|
||||
return sk_filter_trim_cap(sk, skb, 1, &ignore_reason);
|
||||
drop_reason = sk_filter_trim_cap(sk, skb, 1);
|
||||
return drop_reason ? -EPERM : 0;
|
||||
}
|
||||
|
||||
static inline int sk_filter_reason(struct sock *sk, struct sk_buff *skb,
|
||||
enum skb_drop_reason *reason)
|
||||
static inline enum skb_drop_reason
|
||||
sk_filter_reason(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
return sk_filter_trim_cap(sk, skb, 1, reason);
|
||||
return sk_filter_trim_cap(sk, skb, 1);
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
|
||||
|
|
|
|||
|
|
@ -2502,12 +2502,23 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
|
|||
struct sk_buff *skb));
|
||||
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
|
||||
enum skb_drop_reason *reason);
|
||||
enum skb_drop_reason
|
||||
sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
return sock_queue_rcv_skb_reason(sk, skb, NULL);
|
||||
enum skb_drop_reason drop_reason = sock_queue_rcv_skb_reason(sk, skb);
|
||||
|
||||
switch (drop_reason) {
|
||||
case SKB_DROP_REASON_SOCKET_RCVBUFF:
|
||||
return -ENOMEM;
|
||||
case SKB_DROP_REASON_PROTO_MEM:
|
||||
return -ENOBUFS;
|
||||
case 0:
|
||||
return 0;
|
||||
default:
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
|
||||
|
|
|
|||
|
|
@ -1682,12 +1682,12 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
|
|||
|
||||
enum skb_drop_reason tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
static inline int tcp_filter(struct sock *sk, struct sk_buff *skb,
|
||||
enum skb_drop_reason *reason)
|
||||
static inline enum skb_drop_reason
|
||||
tcp_filter(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
const struct tcphdr *th = (const struct tcphdr *)skb->data;
|
||||
|
||||
return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), reason);
|
||||
return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th));
|
||||
}
|
||||
|
||||
void tcp_set_state(struct sock *sk, int state);
|
||||
|
|
|
|||
|
|
@ -363,7 +363,6 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
|
|||
struct sockaddr_can *addr;
|
||||
struct sock *sk = op->sk;
|
||||
unsigned int datalen = head->nframes * op->cfsiz;
|
||||
int err;
|
||||
unsigned int *pflags;
|
||||
enum skb_drop_reason reason;
|
||||
|
||||
|
|
@ -420,8 +419,8 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
|
|||
addr->can_family = AF_CAN;
|
||||
addr->can_ifindex = op->rx_ifindex;
|
||||
|
||||
err = sock_queue_rcv_skb_reason(sk, skb, &reason);
|
||||
if (err < 0) {
|
||||
reason = sock_queue_rcv_skb_reason(sk, skb);
|
||||
if (reason) {
|
||||
struct bcm_sock *bo = bcm_sk(sk);
|
||||
|
||||
sk_skb_reason_drop(sk, skb, reason);
|
||||
|
|
|
|||
|
|
@ -291,7 +291,8 @@ static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
|
|||
addr->can_family = AF_CAN;
|
||||
addr->can_ifindex = skb->dev->ifindex;
|
||||
|
||||
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0)
|
||||
reason = sock_queue_rcv_skb_reason(sk, skb);
|
||||
if (reason)
|
||||
sk_skb_reason_drop(sk, skb, reason);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -333,7 +333,8 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
|
|||
if (skb->sk)
|
||||
skcb->msg_flags |= MSG_DONTROUTE;
|
||||
|
||||
if (sock_queue_rcv_skb_reason(&jsk->sk, skb, &reason) < 0)
|
||||
reason = sock_queue_rcv_skb_reason(&jsk->sk, skb);
|
||||
if (reason)
|
||||
sk_skb_reason_drop(&jsk->sk, skb, reason);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -207,7 +207,8 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
|
|||
if (oskb->sk == sk)
|
||||
*pflags |= MSG_CONFIRM;
|
||||
|
||||
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0)
|
||||
reason = sock_queue_rcv_skb_reason(sk, skb);
|
||||
if (reason)
|
||||
sk_skb_reason_drop(sk, skb, reason);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -121,20 +121,20 @@ EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
|
|||
* @sk: sock associated with &sk_buff
|
||||
* @skb: buffer to filter
|
||||
* @cap: limit on how short the eBPF program may trim the packet
|
||||
* @reason: record drop reason on errors (negative return value)
|
||||
*
|
||||
* Run the eBPF program and then cut skb->data to correct size returned by
|
||||
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
|
||||
* than pkt_len we keep whole skb->data. This is the socket level
|
||||
* wrapper to bpf_prog_run. It returns 0 if the packet should
|
||||
* be accepted or -EPERM if the packet should be tossed.
|
||||
* be accepted or a drop_reason if the packet should be tossed.
|
||||
*
|
||||
*/
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int cap, enum skb_drop_reason *reason)
|
||||
enum skb_drop_reason
|
||||
sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
|
||||
{
|
||||
int err;
|
||||
enum skb_drop_reason drop_reason;
|
||||
struct sk_filter *filter;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* If the skb was allocated from pfmemalloc reserves, only
|
||||
|
|
@ -143,21 +143,17 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb,
|
|||
*/
|
||||
if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
|
||||
*reason = SKB_DROP_REASON_PFMEMALLOC;
|
||||
return -ENOMEM;
|
||||
return SKB_DROP_REASON_PFMEMALLOC;
|
||||
}
|
||||
err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
|
||||
if (err) {
|
||||
*reason = SKB_DROP_REASON_SOCKET_FILTER;
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
return SKB_DROP_REASON_SOCKET_FILTER;
|
||||
|
||||
err = security_sock_rcv_skb(sk, skb);
|
||||
if (err) {
|
||||
*reason = SKB_DROP_REASON_SECURITY_HOOK;
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
return SKB_DROP_REASON_SECURITY_HOOK;
|
||||
|
||||
drop_reason = 0;
|
||||
rcu_read_lock();
|
||||
filter = rcu_dereference(sk->sk_filter);
|
||||
if (filter) {
|
||||
|
|
@ -169,11 +165,11 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb,
|
|||
skb->sk = save_sk;
|
||||
err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
|
||||
if (err)
|
||||
*reason = SKB_DROP_REASON_SOCKET_FILTER;
|
||||
drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
return drop_reason;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_filter_trim_cap);
|
||||
|
||||
|
|
|
|||
|
|
@ -520,43 +520,36 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(__sock_queue_rcv_skb);
|
||||
|
||||
int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
|
||||
enum skb_drop_reason *reason)
|
||||
enum skb_drop_reason
|
||||
sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
enum skb_drop_reason drop_reason;
|
||||
int err;
|
||||
|
||||
err = sk_filter_reason(sk, skb, &drop_reason);
|
||||
if (err)
|
||||
goto out;
|
||||
drop_reason = sk_filter_reason(sk, skb);
|
||||
if (drop_reason)
|
||||
return drop_reason;
|
||||
|
||||
err = __sock_queue_rcv_skb(sk, skb);
|
||||
switch (err) {
|
||||
case -ENOMEM:
|
||||
drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
|
||||
break;
|
||||
return SKB_DROP_REASON_SOCKET_RCVBUFF;
|
||||
case -ENOBUFS:
|
||||
drop_reason = SKB_DROP_REASON_PROTO_MEM;
|
||||
break;
|
||||
default:
|
||||
drop_reason = SKB_NOT_DROPPED_YET;
|
||||
break;
|
||||
return SKB_DROP_REASON_PROTO_MEM;
|
||||
}
|
||||
out:
|
||||
if (reason)
|
||||
*reason = drop_reason;
|
||||
return err;
|
||||
return SKB_NOT_DROPPED_YET;
|
||||
}
|
||||
EXPORT_SYMBOL(sock_queue_rcv_skb_reason);
|
||||
|
||||
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
||||
const int nested, unsigned int trim_cap, bool refcounted)
|
||||
{
|
||||
enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
|
||||
enum skb_drop_reason reason;
|
||||
int rc = NET_RX_SUCCESS;
|
||||
int err;
|
||||
|
||||
if (sk_filter_trim_cap(sk, skb, trim_cap, &reason))
|
||||
reason = sk_filter_trim_cap(sk, skb, trim_cap);
|
||||
if (reason)
|
||||
goto discard_and_relse;
|
||||
|
||||
skb->dev = NULL;
|
||||
|
|
|
|||
|
|
@ -935,7 +935,8 @@ static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk,
|
|||
|
||||
pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
|
||||
inet_sk(sk), inet_sk(sk)->inet_num, skb);
|
||||
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
|
||||
reason = sock_queue_rcv_skb_reason(sk, skb);
|
||||
if (reason) {
|
||||
sk_skb_reason_drop(sk, skb, reason);
|
||||
pr_debug("ping_queue_rcv_skb -> failed\n");
|
||||
return reason;
|
||||
|
|
|
|||
|
|
@ -300,7 +300,8 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
/* Charge it to the socket. */
|
||||
|
||||
ipv4_pktinfo_prepare(sk, skb, true);
|
||||
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
|
||||
reason = sock_queue_rcv_skb_reason(sk, skb);
|
||||
if (reason) {
|
||||
sk_skb_reason_drop(sk, skb, reason);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2160,7 +2160,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
|||
}
|
||||
refcounted = true;
|
||||
nsk = NULL;
|
||||
if (!tcp_filter(sk, skb, &drop_reason)) {
|
||||
drop_reason = tcp_filter(sk, skb);
|
||||
if (!drop_reason) {
|
||||
th = (const struct tcphdr *)skb->data;
|
||||
iph = ip_hdr(skb);
|
||||
tcp_v4_fill_cb(skb, iph, th);
|
||||
|
|
@ -2221,7 +2222,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
|||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (tcp_filter(sk, skb, &drop_reason))
|
||||
drop_reason = tcp_filter(sk, skb);
|
||||
if (drop_reason)
|
||||
goto discard_and_relse;
|
||||
|
||||
th = (const struct tcphdr *)skb->data;
|
||||
|
|
|
|||
|
|
@ -2392,7 +2392,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
|
|||
udp_lib_checksum_complete(skb))
|
||||
goto csum_error;
|
||||
|
||||
if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
|
||||
drop_reason = sk_filter_trim_cap(sk, skb, sizeof(struct udphdr));
|
||||
if (drop_reason)
|
||||
goto drop;
|
||||
|
||||
udp_csum_pull_header(skb);
|
||||
|
|
|
|||
|
|
@ -369,7 +369,8 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
/* Charge it to the socket. */
|
||||
skb_dst_drop(skb);
|
||||
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
|
||||
reason = sock_queue_rcv_skb_reason(sk, skb);
|
||||
if (reason) {
|
||||
sk_skb_reason_drop(sk, skb, reason);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1794,7 +1794,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
|
|||
}
|
||||
refcounted = true;
|
||||
nsk = NULL;
|
||||
if (!tcp_filter(sk, skb, &drop_reason)) {
|
||||
drop_reason = tcp_filter(sk, skb);
|
||||
if (!drop_reason) {
|
||||
th = (const struct tcphdr *)skb->data;
|
||||
hdr = ipv6_hdr(skb);
|
||||
tcp_v6_fill_cb(skb, hdr, th);
|
||||
|
|
@ -1855,7 +1856,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
|
|||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (tcp_filter(sk, skb, &drop_reason))
|
||||
drop_reason = tcp_filter(sk, skb);
|
||||
if (drop_reason)
|
||||
goto discard_and_relse;
|
||||
|
||||
th = (const struct tcphdr *)skb->data;
|
||||
|
|
|
|||
|
|
@ -853,7 +853,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
|
|||
udp_lib_checksum_complete(skb))
|
||||
goto csum_error;
|
||||
|
||||
if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
|
||||
drop_reason = sk_filter_trim_cap(sk, skb, sizeof(struct udphdr));
|
||||
if (drop_reason)
|
||||
goto drop;
|
||||
|
||||
udp_csum_pull_header(skb);
|
||||
|
|
|
|||
|
|
@ -101,7 +101,6 @@ static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int framety
|
|||
*/
|
||||
static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
|
||||
{
|
||||
enum skb_drop_reason dr; /* ignored */
|
||||
struct rose_sock *rose = rose_sk(sk);
|
||||
int queued = 0;
|
||||
|
||||
|
|
@ -163,7 +162,7 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
|
|||
rose_frames_acked(sk, nr);
|
||||
if (ns == rose->vr) {
|
||||
rose_start_idletimer(sk);
|
||||
if (!sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN, &dr) &&
|
||||
if (!sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) &&
|
||||
__sock_queue_rcv_skb(sk, skb) == 0) {
|
||||
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
|
||||
queued = 1;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user