netfilter pull request 26-05-01

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEjF9xRqF1emXiQiqU1w0aZmrPKyEFAmn0hZ8ACgkQ1w0aZmrP
 KyFzNg//ZVbSZyMag+CJoIJv3sMFDJ7uLSEko9mR0nNvo6hPZDWAysCNychhPCDl
 w9yiar5wM9W1zcSWvtlBFozZUcS55mQbcqCHNEyJdSjQ1zTr7C9Dl9zDU3jDJEoK
 aplUk5VvFYFqEp4Bqy7EA1VGY5uc2WzmbsCAf9Z2pjprTQKD/E5tzyx0RFEPksKU
 0pSvsC8VfOES6mJs3KIng6TfvnaC/TWilOtjXC/1y1jl+WftXgwb0gwIVnWKjZnc
 yEJ6h4VOiW2NjwcW+gcaaqvt0c1T4EO/bDvuVnCJzwxDZKI2W9KOs8yQytO2hNTo
 jrAyjTB0F3yDxcnDP1AO8ipkJzu42wOfZblrZKvSmC4Kwwqq8QlsXqD1HMh3oMqv
 JGNJSB8rNbIqt9RTMB+A5wiAZvZbSGZc3qH+y7Z5z/2Zl7u0+Zwl20YZ1r7RqM9Z
 Ay/+QzZIyRAyKmQDr8nSoqmBy2i0wfw79NovvhgPDl9qak8Cfc8Df8wkd59t3z33
 0VzPO9kieTWW6aqW19l88C7dtspsd93IsMZz3He3Lvy5e4dpPG+2OdLKpPkTYHBg
 17KY4Qs7gYM0m5baHlcmana4bZHWcBz146dmIMUuhoj3gPyjgV+s/Hum3YxD/P43
 PNA6X8pI38R8O97VkPXYg1aoQIRLt9YsGwVTYxPXv2gZgLD0Acw=
 =ASC0
 -----END PGP SIGNATURE-----

Merge tag 'nf-26-05-01' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following batch contains Netfilter fixes for net:

1) Replace skb_try_make_writable() by skb_ensure_writable() in
   nft_fwd_netdev and the flowtable to deal with uncloned packets
   having their network header in paged fragments.

2) Drop packet if output device does not exist and ensure sufficient
   headroom in nft_fwd_netdev before transmitting the skb.

3) Use the existing dup recursion counter in nft_fwd_netdev for the
   neigh_xmit variant, from Weiming Shi.

4) Add .check_hooks interface to x_tables to detach the control plane
   hook check based on the match/target configuration. Then, update
   nft_compat to use .check_hooks from .validate path, this fixes a
   lack of hook validation for several match/targets.

5) Fix incorrect .usersize in xt_CT, from Florian Westphal.

6) Fix a memleak with netdev tables in dormant state,
   from Florian Westphal.

7) Several patches to check if the packet is a fragment, then skip
   layer 4 inspection, for x_tables and nf_tables; as well as common
   nf_socket infrastructure. The xt_hashlimit match drops fragments
   to stay consistent with the existing approach when failing to parse
   the layer 4 protocol header.

8) Ensure sufficient headroom in the flowtable before transmitting
   the skb.

9) Fix the flowtable inline vlan approach for double-tagged vlan:
   Reverse the iteration over .encap[] since it represents the
   encapsulation as seen from the ingress path. Postpone pushing
   layer 2 header so output device is available to calculate needed
   headroom. Finally, add and use nf_flow_vlan_push() to fix it.

10) Fix flowtable inline pppoe with GSO packets. Moreover, use
    FLOW_OFFLOAD_XMIT_DIRECT to fill up destination hardware
    address since neighbour cache does not exist in pppoe.

11) Use skb_pull_rcsum() to decapsulate vlan and pppoe headers, for
    double-tagged vlan in particular this should provide some benefits
    in certain scenarios.

More notes regarding 9-11):

- sashiko is also signalling to use it for IPIP headers, but that needs
  more adjustments such setting skb->protocol after removing the IPIP
  header, will follow up in a separated patch.
- I plan to submit selftests to cover double-tagged-vlan. As for pppoe,
  it should be possible but that would mandate a few userspace dependencies.
  This has been semi-automatically  tested by me and reporters describing
  broken double-vlan-tagged and pppoe currently in the flowtable.

* tag 'nf-26-05-01' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf:
  netfilter: flowtable: use skb_pull_rcsum() to pop vlan/pppoe header
  netfilter: flowtable: fix inline pppoe encapsulation in xmit path
  netfilter: flowtable: fix inline vlan encapsulation in xmit path
  netfilter: flowtable: ensure sufficient headroom in xmit path
  netfilter: xtables: fix L4 header parsing for non-first fragments
  netfilter: nf_tables: skip L4 header parsing for non-first fragments
  netfilter: nf_socket: skip socket lookup for non-first fragments
  netfilter: nf_tables: fix netdev hook allocation memleak with dormant tables
  netfilter: xt_CT: fix usersize for v1 and v2 revision
  netfilter: nft_compat: run xt_check_hooks_{match,target}() from .validate
  netfilter: x_tables: add .check_hooks to matches and targets
  netfilter: nft_fwd_netdev: use recursion counter in neigh egress path
  netfilter: nft_fwd_netdev: add device and headroom validate with neigh forwarding
  netfilter: replace skb_try_make_writable() by skb_ensure_writable()
====================

Link: https://patch.msgid.link/20260501122237.296262-1-pablo@netfilter.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2026-05-01 16:45:41 -07:00
commit 2ab02ac411
29 changed files with 449 additions and 158 deletions

View File

@ -146,6 +146,9 @@ struct xt_match {
/* Called when user tries to insert an entry of this type. */
int (*checkentry)(const struct xt_mtchk_param *);
/* Called to validate hooks based on the match configuration. */
int (*check_hooks)(const struct xt_mtchk_param *);
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_mtdtor_param *);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
@ -187,6 +190,9 @@ struct xt_target {
/* Should return 0 on success or an error code otherwise (-Exxxx). */
int (*checkentry)(const struct xt_tgchk_param *);
/* Called to validate hooks based on the target configuration. */
int (*check_hooks)(const struct xt_tgchk_param *);
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_tgdtor_param *);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
@ -279,8 +285,10 @@ bool xt_find_jump_offset(const unsigned int *offsets,
int xt_check_proc_name(const char *name, unsigned int size);
int xt_check_hooks_match(struct xt_mtchk_param *par);
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u16 proto,
bool inv_proto);
int xt_check_hooks_target(struct xt_tgchk_param *par);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u16 proto,
bool inv_proto);

View File

@ -3,10 +3,23 @@
#define _NF_DUP_NETDEV_H_
#include <net/netfilter/nf_tables.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif);
void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif);
#define NF_RECURSION_LIMIT 2
static inline u8 *nf_get_nf_dup_skb_recursion(void)
{
#ifndef CONFIG_PREEMPT_RT
return this_cpu_ptr(&softnet_data.xmit.nf_dup_skb_recursion);
#else
return &current->net_xmit.nf_dup_skb_recursion;
#endif
}
struct nft_offload_ctx;
struct nft_flow_rule;

View File

@ -148,9 +148,10 @@ struct flow_offload_tuple {
/* All members above are keys for lookups, see flow_offload_hash(). */
struct { } __hash;
u8 dir:2,
u16 dir:2,
xmit_type:3,
encap_num:2,
needs_gso_segment:1,
tun_num:2,
in_vlan_ingress:2;
u16 mtu;
@ -232,6 +233,7 @@ struct nf_flow_route {
u32 hw_ifindex;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
u8 needs_gso_segment:1;
} out;
enum flow_offload_xmit_type xmit_type;
} tuple[FLOW_OFFLOAD_DIR_MAX];

View File

@ -94,6 +94,9 @@ struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
#endif
int doff = 0;
if (ntohs(iph->frag_off) & IP_OFFSET)
return NULL;
if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
struct tcphdr _hdr;
struct udphdr *hp;

View File

@ -100,6 +100,7 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
const struct in6_addr *daddr = NULL, *saddr = NULL;
struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var;
struct sk_buff *data_skb = NULL;
unsigned short fragoff = 0;
int doff = 0;
int thoff = 0, tproto;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
@ -107,8 +108,8 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
struct nf_conn const *ct;
#endif
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0) {
tproto = ipv6_find_hdr(skb, &thoff, -1, &fragoff, NULL);
if (tproto < 0 || fragoff) {
pr_debug("unable to find transport header in IPv6 packet, dropping\n");
return NULL;
}

View File

@ -13,22 +13,6 @@
#include <net/netfilter/nf_tables_offload.h>
#include <net/netfilter/nf_dup_netdev.h>
#define NF_RECURSION_LIMIT 2
#ifndef CONFIG_PREEMPT_RT
static u8 *nf_get_nf_dup_skb_recursion(void)
{
return this_cpu_ptr(&softnet_data.xmit.nf_dup_skb_recursion);
}
#else
static u8 *nf_get_nf_dup_skb_recursion(void)
{
return &current->net_xmit.nf_dup_skb_recursion;
}
#endif
static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev,
enum nf_dev_hooks hook)
{

View File

@ -122,6 +122,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
flow_tuple->tun = route->tuple[dir].in.tun;
flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
flow_tuple->needs_gso_segment = route->tuple[dir].out.needs_gso_segment;
flow_tuple->tun_num = route->tuple[dir].in.num_tuns;
switch (route->tuple[dir].xmit_type) {

View File

@ -445,13 +445,13 @@ static void nf_flow_encap_pop(struct nf_flowtable_ctx *ctx,
switch (skb->protocol) {
case htons(ETH_P_8021Q):
vlan_hdr = (struct vlan_hdr *)skb->data;
__skb_pull(skb, VLAN_HLEN);
skb_pull_rcsum(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vlan_hdr);
skb_reset_network_header(skb);
break;
case htons(ETH_P_PPP_SES):
skb->protocol = __nf_flow_pppoe_proto(skb);
skb_pull(skb, PPPOE_SES_HLEN);
skb_pull_rcsum(skb, PPPOE_SES_HLEN);
skb_reset_network_header(skb);
break;
}
@ -462,23 +462,6 @@ static void nf_flow_encap_pop(struct nf_flowtable_ctx *ctx,
nf_flow_ip_tunnel_pop(ctx, skb);
}
struct nf_flow_xmit {
const void *dest;
const void *source;
struct net_device *outdev;
};
static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
struct nf_flow_xmit *xmit)
{
skb->dev = xmit->outdev;
dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
xmit->dest, xmit->source, skb->len);
dev_queue_xmit(skb);
return NF_STOLEN;
}
static struct flow_offload_tuple_rhash *
nf_flow_offload_lookup(struct nf_flowtable_ctx *ctx,
struct nf_flowtable *flow_table, struct sk_buff *skb)
@ -524,7 +507,7 @@ static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx,
return 0;
}
if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
if (skb_ensure_writable(skb, thoff + ctx->hdrsize))
return -1;
flow_offload_refresh(flow_table, flow, false);
@ -544,7 +527,34 @@ static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx,
return 1;
}
static int nf_flow_pppoe_push(struct sk_buff *skb, u16 id)
/* Similar to skb_vlan_push. */
static int nf_flow_vlan_push(struct sk_buff *skb, __be16 proto, u16 id,
u32 needed_headroom)
{
if (skb_vlan_tag_present(skb)) {
struct vlan_hdr *vhdr;
if (skb_cow_head(skb, needed_headroom + VLAN_HLEN))
return -1;
__skb_push(skb, VLAN_HLEN);
if (skb_mac_header_was_set(skb))
skb->mac_header -= VLAN_HLEN;
vhdr = (struct vlan_hdr *)skb->data;
skb->network_header -= VLAN_HLEN;
vhdr->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
vhdr->h_vlan_encapsulated_proto = skb->protocol;
skb->protocol = skb->vlan_proto;
skb_postpush_rcsum(skb, skb->data, VLAN_HLEN);
}
__vlan_hwaccel_put_tag(skb, proto, id);
return 0;
}
static int nf_flow_pppoe_push(struct sk_buff *skb, u16 id,
u32 needed_headroom)
{
int data_len = skb->len + sizeof(__be16);
struct ppp_hdr {
@ -553,7 +563,7 @@ static int nf_flow_pppoe_push(struct sk_buff *skb, u16 id)
} *ph;
__be16 proto;
if (skb_cow_head(skb, PPPOE_SES_HLEN))
if (skb_cow_head(skb, needed_headroom + PPPOE_SES_HLEN))
return -1;
switch (skb->protocol) {
@ -730,21 +740,24 @@ static int nf_flow_tunnel_v6_push(struct net *net, struct sk_buff *skb,
}
static int nf_flow_encap_push(struct sk_buff *skb,
struct flow_offload_tuple *tuple)
struct flow_offload_tuple *tuple,
struct net_device *outdev)
{
u32 needed_headroom = LL_RESERVED_SPACE(outdev);
int i;
for (i = 0; i < tuple->encap_num; i++) {
for (i = tuple->encap_num - 1; i >= 0; i--) {
switch (tuple->encap[i].proto) {
case htons(ETH_P_8021Q):
case htons(ETH_P_8021AD):
skb_reset_mac_header(skb);
if (skb_vlan_push(skb, tuple->encap[i].proto,
tuple->encap[i].id) < 0)
if (nf_flow_vlan_push(skb, tuple->encap[i].proto,
tuple->encap[i].id,
needed_headroom) < 0)
return -1;
break;
case htons(ETH_P_PPP_SES):
if (nf_flow_pppoe_push(skb, tuple->encap[i].id) < 0)
if (nf_flow_pppoe_push(skb, tuple->encap[i].id,
needed_headroom) < 0)
return -1;
break;
}
@ -753,6 +766,76 @@ static int nf_flow_encap_push(struct sk_buff *skb,
return 0;
}
struct nf_flow_xmit {
const void *dest;
const void *source;
struct net_device *outdev;
struct flow_offload_tuple *tuple;
bool needs_gso_segment;
};
static void __nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
struct nf_flow_xmit *xmit)
{
struct net_device *dev = xmit->outdev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
skb = skb_expand_head(skb, hh_len);
if (!skb)
return;
}
skb->dev = dev;
dev_hard_header(skb, dev, ntohs(skb->protocol),
xmit->dest, xmit->source, skb->len);
dev_queue_xmit(skb);
}
static unsigned int nf_flow_encap_gso_xmit(struct net *net, struct sk_buff *skb,
struct nf_flow_xmit *xmit)
{
struct sk_buff *segs, *nskb;
segs = skb_gso_segment(skb, 0);
if (IS_ERR(segs))
return NF_DROP;
if (segs)
consume_skb(skb);
else
segs = skb;
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
if (nf_flow_encap_push(segs, xmit->tuple, xmit->outdev) < 0) {
kfree_skb(segs);
kfree_skb_list(nskb);
return NF_STOLEN;
}
__nf_flow_queue_xmit(net, segs, xmit);
}
return NF_STOLEN;
}
static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
struct nf_flow_xmit *xmit)
{
if (xmit->tuple->encap_num) {
if (skb_is_gso(skb) && xmit->needs_gso_segment)
return nf_flow_encap_gso_xmit(net, skb, xmit);
if (nf_flow_encap_push(skb, xmit->tuple, xmit->outdev) < 0)
return NF_DROP;
}
__nf_flow_queue_xmit(net, skb, xmit);
return NF_STOLEN;
}
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@ -797,9 +880,6 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_tunnel_v4_push(state->net, skb, other_tuple, &ip_daddr) < 0)
return NF_DROP;
if (nf_flow_encap_push(skb, other_tuple) < 0)
return NF_DROP;
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
rt = dst_rtable(tuplehash->tuple.dst_cache);
@ -829,6 +909,8 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
WARN_ON_ONCE(1);
return NF_DROP;
}
xmit.tuple = other_tuple;
xmit.needs_gso_segment = tuplehash->tuple.needs_gso_segment;
return nf_flow_queue_xmit(state->net, skb, &xmit);
}
@ -1037,7 +1119,7 @@ static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx,
return 0;
}
if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
if (skb_ensure_writable(skb, thoff + ctx->hdrsize))
return -1;
flow_offload_refresh(flow_table, flow, false);
@ -1119,9 +1201,6 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
&ip6_daddr, encap_limit) < 0)
return NF_DROP;
if (nf_flow_encap_push(skb, other_tuple) < 0)
return NF_DROP;
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
rt = dst_rt6_info(tuplehash->tuple.dst_cache);
@ -1151,6 +1230,8 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
WARN_ON_ONCE(1);
return NF_DROP;
}
xmit.tuple = other_tuple;
xmit.needs_gso_segment = tuplehash->tuple.needs_gso_segment;
return nf_flow_queue_xmit(state->net, skb, &xmit);
}

View File

@ -86,6 +86,7 @@ struct nft_forward_info {
u8 ingress_vlans;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
bool needs_gso_segment;
enum flow_offload_xmit_type xmit_type;
};
@ -138,8 +139,11 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
path->encap.proto;
info->num_encaps++;
}
if (path->type == DEV_PATH_PPPOE)
if (path->type == DEV_PATH_PPPOE) {
memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
info->needs_gso_segment = 1;
}
break;
case DEV_PATH_BRIDGE:
if (is_zero_ether_addr(info->h_source))
@ -279,6 +283,7 @@ static void nft_dev_forward_path(const struct nft_pktinfo *pkt,
memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
route->tuple[dir].xmit_type = info.xmit_type;
}
route->tuple[dir].out.needs_gso_segment = info.needs_gso_segment;
}
int nft_flow_route(const struct nft_pktinfo *pkt, const struct nf_conn *ct,

View File

@ -407,6 +407,7 @@ static void nft_netdev_unregister_trans_hook(struct net *net,
}
static void nft_netdev_unregister_hooks(struct net *net,
const struct nft_table *table,
struct list_head *hook_list,
bool release_netdev)
{
@ -414,8 +415,10 @@ static void nft_netdev_unregister_hooks(struct net *net,
struct nf_hook_ops *ops;
list_for_each_entry_safe(hook, next, hook_list, list) {
list_for_each_entry(ops, &hook->ops_list, list)
nf_unregister_net_hook(net, ops);
if (!(table->flags & NFT_TABLE_F_DORMANT)) {
list_for_each_entry(ops, &hook->ops_list, list)
nf_unregister_net_hook(net, ops);
}
if (release_netdev)
nft_netdev_hook_unlink_free_rcu(hook);
}
@ -452,20 +455,25 @@ static void __nf_tables_unregister_hook(struct net *net,
struct nft_base_chain *basechain;
const struct nf_hook_ops *ops;
if (table->flags & NFT_TABLE_F_DORMANT ||
!nft_is_base_chain(chain))
if (!nft_is_base_chain(chain))
return;
basechain = nft_base_chain(chain);
ops = &basechain->ops;
/* must also be called for dormant tables */
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) {
nft_netdev_unregister_hooks(net, table, &basechain->hook_list,
release_netdev);
return;
}
if (table->flags & NFT_TABLE_F_DORMANT)
return;
if (basechain->type->ops_unregister)
return basechain->type->ops_unregister(net, ops);
if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
nft_netdev_unregister_hooks(net, &basechain->hook_list,
release_netdev);
else
nf_unregister_net_hook(net, &basechain->ops);
nf_unregister_net_hook(net, &basechain->ops);
}
static void nf_tables_unregister_hook(struct net *net,
@ -4205,6 +4213,7 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
struct nft_chain *chain;
struct nft_ctx ctx = {
.net = net,
.table = (struct nft_table *)table,
.family = table->family,
};
int err = 0;
@ -11281,11 +11290,9 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
if (!(table->flags & NFT_TABLE_F_DORMANT)) {
nft_netdev_unregister_hooks(net,
&nft_trans_chain_hooks(trans),
true);
}
nft_netdev_unregister_hooks(net, table,
&nft_trans_chain_hooks(trans),
true);
free_percpu(nft_trans_chain_stats(trans));
kfree(nft_trans_chain_name(trans));
nft_trans_destroy(trans);

View File

@ -153,7 +153,7 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr,
if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
ptr = skb_network_header(skb) + pkt->nhoff;
else {
if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
return false;
ptr = skb->data + nft_thoff(pkt);
}

View File

@ -261,10 +261,10 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return ret;
}
nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
nft_compat_wait_for_destructors(ctx->net);
nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
ret = xt_check_target(&par, size, proto, inv);
if (ret < 0) {
if (ret == -ENOENT) {
@ -353,8 +353,6 @@ static int nft_target_dump(struct sk_buff *skb,
static int nft_target_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct xt_target *target = expr->ops->data;
unsigned int hook_mask = 0;
int ret;
if (ctx->family != NFPROTO_IPV4 &&
@ -377,11 +375,21 @@ static int nft_target_validate(const struct nft_ctx *ctx,
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
const struct nf_hook_ops *ops = &basechain->ops;
unsigned int hook_mask = 1 << ops->hooknum;
struct xt_target *target = expr->ops->data;
void *info = nft_expr_priv(expr);
struct xt_tgchk_param par;
union nft_entry e = {};
hook_mask = 1 << ops->hooknum;
if (target->hooks && !(hook_mask & target->hooks))
return -EINVAL;
nft_target_set_tgchk_param(&par, ctx, target, info, &e, 0, false);
ret = xt_check_hooks_target(&par);
if (ret < 0)
return ret;
ret = nft_compat_chain_validate_dependency(ctx, target->table);
if (ret < 0)
return ret;
@ -515,10 +523,10 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return ret;
}
nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
nft_compat_wait_for_destructors(ctx->net);
nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
return xt_check_match(&par, size, proto, inv);
}
@ -614,8 +622,6 @@ static int nft_match_large_dump(struct sk_buff *skb,
static int nft_match_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct xt_match *match = expr->ops->data;
unsigned int hook_mask = 0;
int ret;
if (ctx->family != NFPROTO_IPV4 &&
@ -638,11 +644,30 @@ static int nft_match_validate(const struct nft_ctx *ctx,
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
const struct nf_hook_ops *ops = &basechain->ops;
unsigned int hook_mask = 1 << ops->hooknum;
struct xt_match *match = expr->ops->data;
size_t size = XT_ALIGN(match->matchsize);
struct xt_mtchk_param par;
union nft_entry e = {};
void *info;
hook_mask = 1 << ops->hooknum;
if (match->hooks && !(hook_mask & match->hooks))
return -EINVAL;
if (NFT_EXPR_SIZE(size) > NFT_MATCH_LARGE_THRESH) {
struct nft_xt_match_priv *priv = nft_expr_priv(expr);
info = priv->info;
} else {
info = nft_expr_priv(expr);
}
nft_match_set_mtchk_param(&par, ctx, match, info, &e, 0, false);
ret = xt_check_hooks_match(&par);
if (ret < 0)
return ret;
ret = nft_compat_chain_validate_dependency(ctx, match->table);
if (ret < 0)
return ret;

View File

@ -376,7 +376,7 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
const struct sctp_chunkhdr *sch;
struct sctp_chunkhdr _sch;
if (pkt->tprot != IPPROTO_SCTP)
if (pkt->tprot != IPPROTO_SCTP || pkt->fragoff)
goto err;
do {

View File

@ -95,12 +95,15 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
u8 *nf_dup_skb_recursion = nf_get_nf_dup_skb_recursion();
struct nft_fwd_neigh *priv = nft_expr_priv(expr);
void *addr = &regs->data[priv->sreg_addr];
int oif = regs->data[priv->sreg_dev];
unsigned int verdict = NF_STOLEN;
struct sk_buff *skb = pkt->skb;
int nhoff = skb_network_offset(skb);
struct net_device *dev;
unsigned int hh_len;
int neigh_table;
switch (priv->nfproto) {
@ -111,7 +114,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
verdict = NFT_BREAK;
goto out;
}
if (skb_try_make_writable(skb, sizeof(*iph))) {
if (skb_ensure_writable(skb, nhoff + sizeof(*iph))) {
verdict = NF_DROP;
goto out;
}
@ -132,7 +135,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
verdict = NFT_BREAK;
goto out;
}
if (skb_try_make_writable(skb, sizeof(*ip6h))) {
if (skb_ensure_writable(skb, nhoff + sizeof(*ip6h))) {
verdict = NF_DROP;
goto out;
}
@ -151,13 +154,31 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
goto out;
}
if (*nf_dup_skb_recursion > NF_RECURSION_LIMIT) {
verdict = NF_DROP;
goto out;
}
dev = dev_get_by_index_rcu(nft_net(pkt), oif);
if (dev == NULL)
return;
if (dev == NULL) {
verdict = NF_DROP;
goto out;
}
hh_len = LL_RESERVED_SPACE(dev);
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
skb = skb_expand_head(skb, hh_len);
if (!skb) {
verdict = NF_STOLEN;
goto out;
}
}
skb->dev = dev;
skb_clear_tstamp(skb);
(*nf_dup_skb_recursion)++;
neigh_xmit(neigh_table, dev, addr, skb);
(*nf_dup_skb_recursion)--;
out:
regs->verdict.code = verdict;
}

View File

@ -33,7 +33,7 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
return;
}
if (pkt->tprot != IPPROTO_TCP) {
if (pkt->tprot != IPPROTO_TCP || pkt->fragoff) {
regs->verdict.code = NFT_BREAK;
return;
}

View File

@ -30,8 +30,8 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
__be16 tport = 0;
struct sock *sk;
if (pkt->tprot != IPPROTO_TCP &&
pkt->tprot != IPPROTO_UDP) {
if ((pkt->tprot != IPPROTO_TCP &&
pkt->tprot != IPPROTO_UDP) || pkt->fragoff) {
regs->verdict.code = NFT_BREAK;
return;
}
@ -97,8 +97,8 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
memset(&taddr, 0, sizeof(taddr));
if (pkt->tprot != IPPROTO_TCP &&
pkt->tprot != IPPROTO_UDP) {
if ((pkt->tprot != IPPROTO_TCP &&
pkt->tprot != IPPROTO_UDP) || pkt->fragoff) {
regs->verdict.code = NFT_BREAK;
return;
}

View File

@ -477,11 +477,9 @@ int xt_check_proc_name(const char *name, unsigned int size)
}
EXPORT_SYMBOL(xt_check_proc_name);
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
static int xt_check_match_common(struct xt_mtchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->match->matchsize) != size &&
par->match->matchsize != -1) {
/*
@ -530,6 +528,14 @@ int xt_check_match(struct xt_mtchk_param *par,
par->match->proto);
return -EINVAL;
}
return 0;
}
static int xt_checkentry_match(struct xt_mtchk_param *par)
{
int ret;
if (par->match->checkentry != NULL) {
ret = par->match->checkentry(par);
if (ret < 0)
@ -538,8 +544,34 @@ int xt_check_match(struct xt_mtchk_param *par,
/* Flag up potential errors. */
return -EIO;
}
return 0;
}
int xt_check_hooks_match(struct xt_mtchk_param *par)
{
if (par->match->check_hooks != NULL)
return par->match->check_hooks(par);
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_hooks_match);
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
{
int ret;
ret = xt_check_match_common(par, size, proto, inv_proto);
if (ret < 0)
return ret;
ret = xt_check_hooks_match(par);
if (ret < 0)
return ret;
return xt_checkentry_match(par);
}
EXPORT_SYMBOL_GPL(xt_check_match);
/** xt_check_entry_match - check that matches end before start of target
@ -1012,11 +1044,9 @@ bool xt_find_jump_offset(const unsigned int *offsets,
}
EXPORT_SYMBOL(xt_find_jump_offset);
int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
static int xt_check_target_common(struct xt_tgchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->target->targetsize) != size) {
pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
xt_prefix[par->family], par->target->name,
@ -1061,6 +1091,23 @@ int xt_check_target(struct xt_tgchk_param *par,
par->target->proto);
return -EINVAL;
}
return 0;
}
int xt_check_hooks_target(struct xt_tgchk_param *par)
{
if (par->target->check_hooks != NULL)
return par->target->check_hooks(par);
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_hooks_target);
static int xt_checkentry_target(struct xt_tgchk_param *par)
{
int ret;
if (par->target->checkentry != NULL) {
ret = par->target->checkentry(par);
if (ret < 0)
@ -1071,6 +1118,22 @@ int xt_check_target(struct xt_tgchk_param *par,
}
return 0;
}
int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u16 proto, bool inv_proto)
{
int ret;
ret = xt_check_target_common(par, size, proto, inv_proto);
if (ret < 0)
return ret;
ret = xt_check_hooks_target(par);
if (ret < 0)
return ret;
return xt_checkentry_target(par);
}
EXPORT_SYMBOL_GPL(xt_check_target);
/**

View File

@ -354,7 +354,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.revision = 1,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.usersize = offsetof(struct xt_ct_target_info, ct),
.usersize = offsetof(struct xt_ct_target_info_v1, ct),
.checkentry = xt_ct_tg_check_v1,
.destroy = xt_ct_tg_destroy_v1,
.target = xt_ct_target_v1,
@ -366,7 +366,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.revision = 2,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.usersize = offsetof(struct xt_ct_target_info, ct),
.usersize = offsetof(struct xt_ct_target_info_v1, ct),
.checkentry = xt_ct_tg_check_v2,
.destroy = xt_ct_tg_destroy_v1,
.target = xt_ct_target_v1,
@ -398,7 +398,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
.family = NFPROTO_IPV6,
.revision = 1,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.usersize = offsetof(struct xt_ct_target_info, ct),
.usersize = offsetof(struct xt_ct_target_info_v1, ct),
.checkentry = xt_ct_tg_check_v1,
.destroy = xt_ct_tg_destroy_v1,
.target = xt_ct_target_v1,
@ -410,7 +410,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
.family = NFPROTO_IPV6,
.revision = 2,
.targetsize = sizeof(struct xt_ct_target_info_v1),
.usersize = offsetof(struct xt_ct_target_info, ct),
.usersize = offsetof(struct xt_ct_target_info_v1, ct),
.checkentry = xt_ct_tg_check_v2,
.destroy = xt_ct_tg_destroy_v1,
.target = xt_ct_target_v1,

View File

@ -247,6 +247,21 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
}
#endif
static int tcpmss_tg4_check_hooks(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
return 0;
}
/* Must specify -p tcp --syn */
static inline bool find_syn_match(const struct xt_entry_match *m)
{
@ -262,17 +277,9 @@ static inline bool find_syn_match(const struct xt_entry_match *m)
static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
const struct xt_entry_match *ematch;
if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
if (par->nft_compat)
return 0;
@ -286,17 +293,9 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ip6t_entry *e = par->entryinfo;
const struct xt_entry_match *ematch;
if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
if (par->nft_compat)
return 0;
@ -312,6 +311,7 @@ static struct xt_target tcpmss_tg_reg[] __read_mostly = {
{
.family = NFPROTO_IPV4,
.name = "TCPMSS",
.check_hooks = tcpmss_tg4_check_hooks,
.checkentry = tcpmss_tg4_check,
.target = tcpmss_tg4,
.targetsize = sizeof(struct xt_tcpmss_info),
@ -322,6 +322,7 @@ static struct xt_target tcpmss_tg_reg[] __read_mostly = {
{
.family = NFPROTO_IPV6,
.name = "TCPMSS",
.check_hooks = tcpmss_tg4_check_hooks,
.checkentry = tcpmss_tg6_check,
.target = tcpmss_tg6,
.targetsize = sizeof(struct xt_tcpmss_info),

View File

@ -86,6 +86,9 @@ tproxy_tg4_v0(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tproxy_target_info *tgi = par->targinfo;
if (par->fragoff)
return NF_DROP;
return tproxy_tg4(xt_net(par), skb, tgi->laddr, tgi->lport,
tgi->mark_mask, tgi->mark_value);
}
@ -95,6 +98,9 @@ tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tproxy_target_info_v1 *tgi = par->targinfo;
if (par->fragoff)
return NF_DROP;
return tproxy_tg4(xt_net(par), skb, tgi->laddr.ip, tgi->lport,
tgi->mark_mask, tgi->mark_value);
}
@ -106,6 +112,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct xt_tproxy_target_info_v1 *tgi = par->targinfo;
unsigned short fragoff = 0;
struct udphdr _hdr, *hp;
struct sock *sk;
const struct in6_addr *laddr;
@ -113,8 +120,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
int thoff = 0;
int tproto;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0)
tproto = ipv6_find_hdr(skb, &thoff, -1, &fragoff, NULL);
if (tproto < 0 || fragoff)
return NF_DROP;
hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);

View File

@ -153,14 +153,10 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
return ret;
}
static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
static int addrtype_mt_check_hooks(const struct xt_mtchk_param *par)
{
const char *errmsg = "both incoming and outgoing interface limitation cannot be selected";
struct xt_addrtype_info_v1 *info = par->matchinfo;
if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
goto err;
const char *errmsg;
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) &&
@ -176,6 +172,21 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
goto err;
}
return 0;
err:
pr_info_ratelimited("%s\n", errmsg);
return -EINVAL;
}
static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
{
const char *errmsg = "both incoming and outgoing interface limitation cannot be selected";
struct xt_addrtype_info_v1 *info = par->matchinfo;
if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
goto err;
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
if (par->family == NFPROTO_IPV6) {
if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
@ -211,6 +222,7 @@ static struct xt_match addrtype_mt_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.revision = 1,
.match = addrtype_mt_v1,
.check_hooks = addrtype_mt_check_hooks,
.checkentry = addrtype_mt_checkentry_v1,
.matchsize = sizeof(struct xt_addrtype_info_v1),
.me = THIS_MODULE
@ -221,6 +233,7 @@ static struct xt_match addrtype_mt_reg[] __read_mostly = {
.family = NFPROTO_IPV6,
.revision = 1,
.match = addrtype_mt_v1,
.check_hooks = addrtype_mt_check_hooks,
.checkentry = addrtype_mt_checkentry_v1,
.matchsize = sizeof(struct xt_addrtype_info_v1),
.me = THIS_MODULE

View File

@ -33,14 +33,10 @@ static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
return true;
}
static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
static int devgroup_mt_check_hooks(const struct xt_mtchk_param *par)
{
const struct xt_devgroup_info *info = par->matchinfo;
if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
return -EINVAL;
if (info->flags & XT_DEVGROUP_MATCH_SRC &&
par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
@ -56,9 +52,21 @@ static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
return 0;
}
static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
{
const struct xt_devgroup_info *info = par->matchinfo;
if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
return -EINVAL;
return 0;
}
static struct xt_match devgroup_mt_reg __read_mostly = {
.name = "devgroup",
.match = devgroup_mt,
.check_hooks = devgroup_mt_check_hooks,
.checkentry = devgroup_mt_checkentry,
.matchsize = sizeof(struct xt_devgroup_info),
.family = NFPROTO_UNSPEC,

View File

@ -30,6 +30,10 @@ static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par)
struct tcphdr _tcph;
const struct tcphdr *th;
/* this is fine for IPv6 as ecn_mt_check6() enforces -p tcp */
if (par->fragoff)
return false;
/* In practice, TCP match does this, so can't fail. But let's
* be good citizens.
*/

View File

@ -658,6 +658,8 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
if (!(hinfo->cfg.mode &
(XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
return 0;
if (ntohs(ip_hdr(skb)->frag_off) & IP_OFFSET)
return -1;
nexthdr = ip_hdr(skb)->protocol;
break;
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@ -681,7 +683,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
return 0;
nexthdr = ipv6_hdr(skb)->nexthdr;
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
if ((int)protoff < 0)
if ((int)protoff < 0 || ntohs(frag_off) & IP6_OFFSET)
return -1;
break;
}

View File

@ -27,6 +27,9 @@
static bool
xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
{
if (p->fragoff)
return false;
return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
xt_out(p), p->matchinfo, xt_net(p), nf_osf_fingers);
}

View File

@ -91,6 +91,21 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
}
static int physdev_mt_check_hooks(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
(!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
return -EINVAL;
}
return 0;
}
static int physdev_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
@ -99,13 +114,6 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
return -EINVAL;
if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
(!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
return -EINVAL;
}
#define X(memb) strnlen(info->memb, sizeof(info->memb)) >= sizeof(info->memb)
if (info->bitmask & XT_PHYSDEV_OP_IN) {
@ -141,6 +149,7 @@ static struct xt_match physdev_mt_reg[] __read_mostly = {
{
.name = "physdev",
.family = NFPROTO_IPV4,
.check_hooks = physdev_mt_check_hooks,
.checkentry = physdev_mt_check,
.match = physdev_mt,
.matchsize = sizeof(struct xt_physdev_info),
@ -149,6 +158,7 @@ static struct xt_match physdev_mt_reg[] __read_mostly = {
{
.name = "physdev",
.family = NFPROTO_IPV6,
.check_hooks = physdev_mt_check_hooks,
.checkentry = physdev_mt_check,
.match = physdev_mt,
.matchsize = sizeof(struct xt_physdev_info),

View File

@ -126,13 +126,10 @@ policy_mt(const struct sk_buff *skb, struct xt_action_param *par)
return ret;
}
static int policy_mt_check(const struct xt_mtchk_param *par)
static int policy_mt_check_hooks(const struct xt_mtchk_param *par)
{
const struct xt_policy_info *info = par->matchinfo;
const char *errmsg = "neither incoming nor outgoing policy selected";
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT)))
goto err;
const char *errmsg;
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) {
@ -144,6 +141,21 @@ static int policy_mt_check(const struct xt_mtchk_param *par)
errmsg = "input policy not valid in POSTROUTING and OUTPUT";
goto err;
}
return 0;
err:
pr_info_ratelimited("%s\n", errmsg);
return -EINVAL;
}
static int policy_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_policy_info *info = par->matchinfo;
const char *errmsg = "neither incoming nor outgoing policy selected";
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT)))
goto err;
if (info->len > XT_POLICY_MAX_ELEM) {
errmsg = "too many policy elements";
goto err;
@ -158,6 +170,7 @@ static struct xt_match policy_mt_reg[] __read_mostly = {
{
.name = "policy",
.family = NFPROTO_IPV4,
.check_hooks = policy_mt_check_hooks,
.checkentry = policy_mt_check,
.match = policy_mt,
.matchsize = sizeof(struct xt_policy_info),
@ -166,6 +179,7 @@ static struct xt_match policy_mt_reg[] __read_mostly = {
{
.name = "policy",
.family = NFPROTO_IPV6,
.check_hooks = policy_mt_check_hooks,
.checkentry = policy_mt_check,
.match = policy_mt,
.matchsize = sizeof(struct xt_policy_info),

View File

@ -430,6 +430,29 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
static int
set_target_v3_check_hooks(const struct xt_tgchk_param *par)
{
const struct xt_set_info_target_v3 *info = par->targinfo;
if (info->map_set.index != IPSET_INVALID_ID) {
if (strncmp(par->table, "mangle", 7)) {
pr_info_ratelimited("--map-set only usable from mangle table\n");
return -EINVAL;
}
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
(info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
(par->hook_mask & ~(1 << NF_INET_FORWARD |
1 << NF_INET_LOCAL_OUT |
1 << NF_INET_POST_ROUTING))) {
pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
return -EINVAL;
}
}
return 0;
}
static int
set_target_v3_checkentry(const struct xt_tgchk_param *par)
{
@ -459,20 +482,6 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
}
if (info->map_set.index != IPSET_INVALID_ID) {
if (strncmp(par->table, "mangle", 7)) {
pr_info_ratelimited("--map-set only usable from mangle table\n");
ret = -EINVAL;
goto cleanup_del;
}
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
(info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
(par->hook_mask & ~(1 << NF_INET_FORWARD |
1 << NF_INET_LOCAL_OUT |
1 << NF_INET_POST_ROUTING))) {
pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
ret = -EINVAL;
goto cleanup_del;
}
index = ip_set_nfnl_get_byindex(par->net,
info->map_set.index);
if (index == IPSET_INVALID_ID) {
@ -672,6 +681,7 @@ static struct xt_target set_targets[] __read_mostly = {
.family = NFPROTO_IPV4,
.target = set_target_v3,
.targetsize = sizeof(struct xt_set_info_target_v3),
.check_hooks = set_target_v3_check_hooks,
.checkentry = set_target_v3_checkentry,
.destroy = set_target_v3_destroy,
.me = THIS_MODULE
@ -682,6 +692,7 @@ static struct xt_target set_targets[] __read_mostly = {
.family = NFPROTO_IPV6,
.target = set_target_v3,
.targetsize = sizeof(struct xt_set_info_target_v3),
.check_hooks = set_target_v3_check_hooks,
.checkentry = set_target_v3_checkentry,
.destroy = set_target_v3_destroy,
.me = THIS_MODULE

View File

@ -32,6 +32,10 @@ tcpmss_mt(const struct sk_buff *skb, struct xt_action_param *par)
u8 _opt[15 * 4 - sizeof(_tcph)];
unsigned int i, optlen;
/* this is fine for IPv6 as xt_tcpmss enforces -p tcp */
if (par->fragoff)
return false;
/* If we don't have the whole header, drop packet. */
th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
if (th == NULL)