mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
If skb_unshare() fails to unshare a packet due to allocation failure in
rxrpc_input_packet(), the skb pointer in the parent (rxrpc_io_thread())
will be NULL'd out. This will likely cause the call to
trace_rxrpc_rx_done() to oops.
Fix this by moving the unsharing down to where rxrpc_input_call_event()
calls rxrpc_input_call_packet(). There are a number of places prior to
that where we ignore DATA packets for a variety of reasons (such as the
call already being complete) for which an unshare is then avoided.
And with that, rxrpc_input_packet() doesn't need to take a pointer to the
pointer to the packet, so change that to just a pointer.
Fixes: 2d1faf7a0c ("rxrpc: Simplify skbuff accounting in receive path")
Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Jeffrey Altman <jaltman@auristor.com>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
cc: stable@kernel.org
Link: https://patch.msgid.link/20260422161438.2593376-4-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
75 lines
1.7 KiB
C
75 lines
1.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* Socket buffer accounting
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/net.h>
|
|
#include <linux/skbuff.h>
|
|
#include <net/sock.h>
|
|
#include <net/af_rxrpc.h>
|
|
#include "ar-internal.h"
|
|
|
|
#define select_skb_count(skb) (&rxrpc_n_rx_skbs)
|
|
|
|
/*
|
|
* Note the allocation or reception of a socket buffer.
|
|
*/
|
|
void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
|
{
|
|
int n = atomic_inc_return(select_skb_count(skb));
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
|
|
}
|
|
|
|
/*
|
|
* Note the re-emergence of a socket buffer from a queue or buffer.
|
|
*/
|
|
void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
|
{
|
|
if (skb) {
|
|
int n = atomic_read(select_skb_count(skb));
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Note the addition of a ref on a socket buffer.
|
|
*/
|
|
void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
|
{
|
|
int n = atomic_inc_return(select_skb_count(skb));
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
|
|
skb_get(skb);
|
|
}
|
|
|
|
/*
|
|
* Note the destruction of a socket buffer.
|
|
*/
|
|
void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
|
{
|
|
if (skb) {
|
|
int n = atomic_dec_return(select_skb_count(skb));
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
|
|
consume_skb(skb);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Clear a queue of socket buffers.
|
|
*/
|
|
void rxrpc_purge_queue(struct sk_buff_head *list)
|
|
{
|
|
struct sk_buff *skb;
|
|
|
|
while ((skb = skb_dequeue((list))) != NULL) {
|
|
int n = atomic_dec_return(select_skb_count(skb));
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n,
|
|
rxrpc_skb_put_purge);
|
|
consume_skb(skb);
|
|
}
|
|
}
|