#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
+#define NF_FLOW_RULE_ACTION_MAX 24
+
static struct workqueue_struct *nf_flow_offload_add_wq;
static struct workqueue_struct *nf_flow_offload_del_wq;
static struct workqueue_struct *nf_flow_offload_stats_wq;
static inline struct flow_action_entry *
flow_action_entry_next(struct nf_flow_rule *flow_rule)
{
- int i = flow_rule->rule->action.num_entries++;
+ int i;
+
+ if (unlikely(flow_rule->rule->action.num_entries >= NF_FLOW_RULE_ACTION_MAX))
+ return NULL;
+
+ i = flow_rule->rule->action.num_entries++;
return &flow_rule->rule->action.entries[i];
}
u32 mask, val;
u16 val16;
+ if (!entry0 || !entry1)
+ return -E2BIG;
+
this_tuple = &flow->tuplehash[dir].tuple;
switch (this_tuple->xmit_type) {
u8 nud_state;
u16 val16;
+ if (!entry0 || !entry1)
+ return -E2BIG;
+
this_tuple = &flow->tuplehash[dir].tuple;
switch (this_tuple->xmit_type) {
return 0;
}
-static void flow_offload_ipv4_snat(struct net *net,
- const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_ipv4_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffffffff);
__be32 addr;
u32 offset;
+ if (!entry)
+ return -E2BIG;
+
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
offset = offsetof(struct iphdr, daddr);
break;
default:
- return;
+ return -EOPNOTSUPP;
}
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
&addr, &mask);
+ return 0;
}
-static void flow_offload_ipv4_dnat(struct net *net,
- const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_ipv4_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffffffff);
__be32 addr;
u32 offset;
+ if (!entry)
+ return -E2BIG;
+
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
offset = offsetof(struct iphdr, saddr);
break;
default:
- return;
+ return -EOPNOTSUPP;
}
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
&addr, &mask);
+ return 0;
}
-static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
+static int flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
unsigned int offset,
const __be32 *addr, const __be32 *mask)
{
for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
entry = flow_action_entry_next(flow_rule);
+ if (!entry)
+ return -E2BIG;
+
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
offset + i * sizeof(u32), &addr[i], mask);
}
+
+ return 0;
}
-static void flow_offload_ipv6_snat(struct net *net,
- const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_ipv6_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
u32 mask = ~htonl(0xffffffff);
const __be32 *addr;
offset = offsetof(struct ipv6hdr, daddr);
break;
default:
- return;
+ return -EOPNOTSUPP;
}
- flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
+ return flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
}
-static void flow_offload_ipv6_dnat(struct net *net,
- const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_ipv6_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
u32 mask = ~htonl(0xffffffff);
const __be32 *addr;
offset = offsetof(struct ipv6hdr, saddr);
break;
default:
- return;
+ return -EOPNOTSUPP;
}
- flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
+ return flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
}
static int flow_offload_l4proto(const struct flow_offload *flow)
return type;
}
-static void flow_offload_port_snat(struct net *net,
- const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_port_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask, port;
u32 offset;
+ if (!entry)
+ return -E2BIG;
+
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
mask = ~htonl(0xffff);
break;
default:
- return;
+ return -EOPNOTSUPP;
}
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
&port, &mask);
+ return 0;
}
-static void flow_offload_port_dnat(struct net *net,
- const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_port_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask, port;
u32 offset;
+ if (!entry)
+ return -E2BIG;
+
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
mask = ~htonl(0xffff0000);
break;
default:
- return;
+ return -EOPNOTSUPP;
}
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
&port, &mask);
+ return 0;
}
-static void flow_offload_ipv4_checksum(struct net *net,
- const struct flow_offload *flow,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_ipv4_checksum(struct net *net,
+ const struct flow_offload *flow,
+ struct nf_flow_rule *flow_rule)
{
u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ if (!entry)
+ return -E2BIG;
+
entry->id = FLOW_ACTION_CSUM;
entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
break;
}
+
+ return 0;
}
-static void flow_offload_redirect(struct net *net,
- const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_redirect(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
const struct flow_offload_tuple *this_tuple, *other_tuple;
struct flow_action_entry *entry;
ifindex = other_tuple->iifidx;
break;
default:
- return;
+ return -EOPNOTSUPP;
}
dev = dev_get_by_index(net, ifindex);
if (!dev)
- return;
+ return -ENODEV;
entry = flow_action_entry_next(flow_rule);
+ if (!entry) {
+ dev_put(dev);
+ return -E2BIG;
+ }
+
entry->id = FLOW_ACTION_REDIRECT;
entry->dev = dev;
+
+ return 0;
}
-static void flow_offload_encap_tunnel(const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_encap_tunnel(const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
const struct flow_offload_tuple *this_tuple;
struct flow_action_entry *entry;
this_tuple = &flow->tuplehash[dir].tuple;
if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
- return;
+ return 0;
dst = this_tuple->dst_cache;
if (dst && dst->lwtstate) {
tun_info = lwt_tun_info(dst->lwtstate);
if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
entry = flow_action_entry_next(flow_rule);
+ if (!entry)
+ return -E2BIG;
entry->id = FLOW_ACTION_TUNNEL_ENCAP;
entry->tunnel = tun_info;
}
}
+
+ return 0;
}
-static void flow_offload_decap_tunnel(const struct flow_offload *flow,
- enum flow_offload_tuple_dir dir,
- struct nf_flow_rule *flow_rule)
+static int flow_offload_decap_tunnel(const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
{
const struct flow_offload_tuple *other_tuple;
struct flow_action_entry *entry;
other_tuple = &flow->tuplehash[!dir].tuple;
if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
- return;
+ return 0;
dst = other_tuple->dst_cache;
if (dst && dst->lwtstate) {
tun_info = lwt_tun_info(dst->lwtstate);
if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
entry = flow_action_entry_next(flow_rule);
+ if (!entry)
+ return -E2BIG;
entry->id = FLOW_ACTION_TUNNEL_DECAP;
}
}
+
+ return 0;
}
static int
const struct flow_offload_tuple *tuple;
int i;
- flow_offload_decap_tunnel(flow, dir, flow_rule);
- flow_offload_encap_tunnel(flow, dir, flow_rule);
+ if (flow_offload_decap_tunnel(flow, dir, flow_rule) < 0 ||
+ flow_offload_encap_tunnel(flow, dir, flow_rule) < 0)
+ return -1;
if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
if (tuple->encap[i].proto == htons(ETH_P_8021Q)) {
entry = flow_action_entry_next(flow_rule);
+ if (!entry)
+ return -1;
entry->id = FLOW_ACTION_VLAN_POP;
}
}
continue;
entry = flow_action_entry_next(flow_rule);
+ if (!entry)
+ return -1;
switch (other_tuple->encap[i].proto) {
case htons(ETH_P_PPP_SES):
return -1;
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
- flow_offload_ipv4_snat(net, flow, dir, flow_rule);
- flow_offload_port_snat(net, flow, dir, flow_rule);
+ if (flow_offload_ipv4_snat(net, flow, dir, flow_rule) < 0 ||
+ flow_offload_port_snat(net, flow, dir, flow_rule) < 0)
+ return -1;
}
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
- flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
- flow_offload_port_dnat(net, flow, dir, flow_rule);
+ if (flow_offload_ipv4_dnat(net, flow, dir, flow_rule) < 0 ||
+ flow_offload_port_dnat(net, flow, dir, flow_rule) < 0)
+ return -1;
}
if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
test_bit(NF_FLOW_DNAT, &flow->flags))
- flow_offload_ipv4_checksum(net, flow, flow_rule);
+ if (flow_offload_ipv4_checksum(net, flow, flow_rule) < 0)
+ return -1;
- flow_offload_redirect(net, flow, dir, flow_rule);
+ if (flow_offload_redirect(net, flow, dir, flow_rule) < 0)
+ return -1;
return 0;
}
return -1;
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
- flow_offload_ipv6_snat(net, flow, dir, flow_rule);
- flow_offload_port_snat(net, flow, dir, flow_rule);
+ if (flow_offload_ipv6_snat(net, flow, dir, flow_rule) < 0 ||
+ flow_offload_port_snat(net, flow, dir, flow_rule) < 0)
+ return -1;
}
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
- flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
- flow_offload_port_dnat(net, flow, dir, flow_rule);
+ if (flow_offload_ipv6_dnat(net, flow, dir, flow_rule) < 0 ||
+ flow_offload_port_dnat(net, flow, dir, flow_rule) < 0)
+ return -1;
}
- flow_offload_redirect(net, flow, dir, flow_rule);
+ if (flow_offload_redirect(net, flow, dir, flow_rule) < 0)
+ return -1;
return 0;
}
EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
-#define NF_FLOW_RULE_ACTION_MAX 16
-
static struct nf_flow_rule *
nf_flow_offload_rule_alloc(struct net *net,
const struct flow_offload_work *offload,