summaryrefslogtreecommitdiffstats
path: root/net/xfrm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-04-21 21:11:28 +0200
committerDavid S. Miller <davem@davemloft.net>2017-04-21 21:11:28 +0200
commit6b633e82b0f902a4cceb9bcdcb5bb31d04ca6264 (patch)
tree15f7953de0e6c05106313c6e01ee06cc2b3389f6 /net/xfrm
parentMAINTAINERS: Add new IPsec offloading files. (diff)
parentesp4/6: Fix GSO path for non-GSO SW-crypto packets (diff)
downloadlinux-6b633e82b0f902a4cceb9bcdcb5bb31d04ca6264.tar.xz
linux-6b633e82b0f902a4cceb9bcdcb5bb31d04ca6264.zip
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next
Steffen Klassert says: ==================== pull request (net-next): ipsec-next 2017-04-20 This adds the basic infrastructure for IPsec hardware offloading, it creates a configuration API and adjusts the packet path. 1) Add the needed netdev features to configure IPsec offloads. 2) Add the IPsec hardware offloading API. 3) Prepare the ESP packet path for hardware offloading. 4) Add gso handlers for esp4 and esp6, this implements the software fallback for GSO packets. 5) Add xfrm replay handler functions for offloading. 6) Change ESP to use a synchronous crypto algorithm on offloading, we don't have the option for asynchronous returns when we handle IPsec at layer2. 7) Add a xfrm validate function to validate_xmit_skb. This implements the software fallback for non GSO packets. 8) Set the inner_network and inner_transport members of the SKB, as well as encapsulation, to reflect the actual positions of these headers, and removes them only once encryption is done on the payload. From Ilan Tayari. 9) Prepare the ESP GRO codepath for hardware offloading. 10) Fix incorrect null pointer check in esp6. From Colin Ian King. 11) Fix for the GSO software fallback path to detect the fallback correctly. From Ilan Tayari. Please pull or let me know if there are problems. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/xfrm')
-rw-r--r--net/xfrm/Makefile1
-rw-r--r--net/xfrm/xfrm_device.c208
-rw-r--r--net/xfrm/xfrm_input.c41
-rw-r--r--net/xfrm/xfrm_output.c46
-rw-r--r--net/xfrm/xfrm_policy.c27
-rw-r--r--net/xfrm/xfrm_replay.c162
-rw-r--r--net/xfrm/xfrm_state.c147
-rw-r--r--net/xfrm/xfrm_user.c28
8 files changed, 628 insertions, 32 deletions
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index c0e961983f17..abf81b329dc1 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
xfrm_input.o xfrm_output.o \
xfrm_sysctl.o xfrm_replay.o
+obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o
obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
new file mode 100644
index 000000000000..8ec8a3fcf8d4
--- /dev/null
+++ b/net/xfrm/xfrm_device.c
@@ -0,0 +1,208 @@
+/*
+ * xfrm_device.c - IPsec device offloading code.
+ *
+ * Copyright (c) 2015 secunet Security Networks AG
+ *
+ * Author:
+ * Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <net/dst.h>
+#include <net/xfrm.h>
+#include <linux/notifier.h>
+
+int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+{
+ int err;
+ struct xfrm_state *x;
+ struct xfrm_offload *xo = xfrm_offload(skb);
+
+ if (skb_is_gso(skb))
+ return 0;
+
+ if (xo) {
+ x = skb->sp->xvec[skb->sp->len - 1];
+ if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ return 0;
+
+ x->outer_mode->xmit(x, skb);
+
+ err = x->type_offload->xmit(x, skb, features);
+ if (err) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ return err;
+ }
+
+ skb_push(skb, skb->data - skb_mac_header(skb));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
+
+int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
+ struct xfrm_user_offload *xuo)
+{
+ int err;
+ struct dst_entry *dst;
+ struct net_device *dev;
+ struct xfrm_state_offload *xso = &x->xso;
+ xfrm_address_t *saddr;
+ xfrm_address_t *daddr;
+
+ if (!x->type_offload)
+ return 0;
+
+ /* We don't yet support UDP encapsulation, TFC padding and ESN. */
+ if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN))
+ return 0;
+
+ dev = dev_get_by_index(net, xuo->ifindex);
+ if (!dev) {
+ if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
+ saddr = &x->props.saddr;
+ daddr = &x->id.daddr;
+ } else {
+ saddr = &x->id.daddr;
+ daddr = &x->props.saddr;
+ }
+
+ dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, x->props.family);
+ if (IS_ERR(dst))
+ return 0;
+
+ dev = dst->dev;
+
+ dev_hold(dev);
+ dst_release(dst);
+ }
+
+ if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
+ dev_put(dev);
+ return 0;
+ }
+
+ xso->dev = dev;
+ xso->num_exthdrs = 1;
+ xso->flags = xuo->flags;
+
+ err = dev->xfrmdev_ops->xdo_dev_state_add(x);
+ if (err) {
+ dev_put(dev);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
+
+bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ int mtu;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ struct net_device *dev = x->xso.dev;
+
+ if (!x->type_offload || x->encap)
+ return false;
+
+ if ((x->xso.offload_handle && (dev == dst->path->dev)) &&
+ !dst->child->xfrm && x->type->get_mtu) {
+ mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
+
+ if (skb->len <= mtu)
+ goto ok;
+
+ if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+ goto ok;
+ }
+
+ return false;
+
+ok:
+ if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
+ return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+int xfrm_dev_register(struct net_device *dev)
+{
+ if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
+ return NOTIFY_BAD;
+ if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
+ !(dev->features & NETIF_F_HW_ESP))
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
+
+static int xfrm_dev_unregister(struct net_device *dev)
+{
+ return NOTIFY_DONE;
+}
+
+static int xfrm_dev_feat_change(struct net_device *dev)
+{
+ if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
+ return NOTIFY_BAD;
+ else if (!(dev->features & NETIF_F_HW_ESP))
+ dev->xfrmdev_ops = NULL;
+
+ if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
+ !(dev->features & NETIF_F_HW_ESP))
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
+
+static int xfrm_dev_down(struct net_device *dev)
+{
+ if (dev->hw_features & NETIF_F_HW_ESP)
+ xfrm_dev_state_flush(dev_net(dev), dev, true);
+
+ xfrm_garbage_collect(dev_net(dev));
+
+ return NOTIFY_DONE;
+}
+
+static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ return xfrm_dev_register(dev);
+
+ case NETDEV_UNREGISTER:
+ return xfrm_dev_unregister(dev);
+
+ case NETDEV_FEAT_CHANGE:
+ return xfrm_dev_feat_change(dev);
+
+ case NETDEV_DOWN:
+ return xfrm_dev_down(dev);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block xfrm_dev_notifier = {
+ .notifier_call = xfrm_dev_event,
+};
+
+void __net_init xfrm_dev_init(void)
+{
+ register_netdevice_notifier(&xfrm_dev_notifier);
+}
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 46bdb4fbed0b..21c6cc965402 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -107,6 +107,8 @@ struct sec_path *secpath_dup(struct sec_path *src)
sp->len = 0;
sp->olen = 0;
+ memset(sp->ovec, 0, sizeof(sp->ovec[XFRM_MAX_OFFLOAD_DEPTH]));
+
if (src) {
int i;
@@ -207,8 +209,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
unsigned int family;
int decaps = 0;
int async = 0;
- struct xfrm_offload *xo;
bool xfrm_gro = false;
+ bool crypto_done = false;
+ struct xfrm_offload *xo = xfrm_offload(skb);
if (encap_type < 0) {
x = xfrm_input_state(skb);
@@ -220,9 +223,40 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
seq = XFRM_SKB_CB(skb)->seq.input.low;
goto resume;
}
+
/* encap_type < -1 indicates a GRO call. */
encap_type = 0;
seq = XFRM_SPI_SKB_CB(skb)->seq;
+
+ if (xo && (xo->flags & CRYPTO_DONE)) {
+ crypto_done = true;
+ x = xfrm_input_state(skb);
+ family = XFRM_SPI_SKB_CB(skb)->family;
+
+ if (!(xo->status & CRYPTO_SUCCESS)) {
+ if (xo->status &
+ (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
+ CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
+ CRYPTO_TUNNEL_AH_AUTH_FAILED |
+ CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
+
+ xfrm_audit_state_icvfail(x, skb,
+ x->type->proto);
+ x->stats.integrity_failed++;
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
+ goto drop;
+ }
+
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+ goto drop;
+ }
+
+ if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+ goto drop;
+ }
+ }
+
goto lock;
}
@@ -311,7 +345,10 @@ lock:
skb_dst_force(skb);
dev_hold(skb->dev);
- nexthdr = x->type->input(x, skb);
+ if (crypto_done)
+ nexthdr = x->type_offload->input_tail(x, skb);
+ else
+ nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS)
return 0;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 8ba29fe58352..8c0b6722aaa8 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -99,12 +99,13 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
skb_dst_force(skb);
- /* Inner headers are invalid now. */
- skb->encapsulation = 0;
-
- err = x->type->output(x, skb);
- if (err == -EINPROGRESS)
- goto out;
+ if (xfrm_offload(skb)) {
+ x->type_offload->encap(x, skb);
+ } else {
+ err = x->type->output(x, skb);
+ if (err == -EINPROGRESS)
+ goto out;
+ }
resume:
if (err) {
@@ -200,8 +201,40 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
int xfrm_output(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
+ struct xfrm_state *x = skb_dst(skb)->xfrm;
int err;
+ secpath_reset(skb);
+ skb->encapsulation = 0;
+
+ if (xfrm_dev_offload_ok(skb, x)) {
+ struct sec_path *sp;
+
+ sp = secpath_dup(skb->sp);
+ if (!sp) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ if (skb->sp)
+ secpath_put(skb->sp);
+ skb->sp = sp;
+ skb->encapsulation = 1;
+
+ sp->olen++;
+ sp->xvec[skb->sp->len++] = x;
+ xfrm_state_hold(x);
+
+ if (skb_is_gso(skb)) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
+
+ return xfrm_output2(net, sk, skb);
+ }
+
+ if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
+ goto out;
+ }
+
if (skb_is_gso(skb))
return xfrm_output_gso(net, sk, skb);
@@ -214,6 +247,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
}
}
+out:
return xfrm_output2(net, sk, skb);
}
EXPORT_SYMBOL_GPL(xfrm_output);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 236cbbc0ab9c..dd44ddc1aea5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -116,11 +116,10 @@ static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short fa
return afinfo;
}
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
- int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- int family)
+struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
+ int family)
{
const struct xfrm_policy_afinfo *afinfo;
struct dst_entry *dst;
@@ -135,6 +134,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
return dst;
}
+EXPORT_SYMBOL(__xfrm_dst_lookup);
static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
int tos, int oif,
@@ -2929,21 +2929,6 @@ void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
}
EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
-static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
- switch (event) {
- case NETDEV_DOWN:
- xfrm_garbage_collect(dev_net(dev));
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block xfrm_dev_notifier = {
- .notifier_call = xfrm_dev_event,
-};
-
#ifdef CONFIG_XFRM_STATISTICS
static int __net_init xfrm_statistics_init(struct net *net)
{
@@ -3020,7 +3005,7 @@ static int __net_init xfrm_policy_init(struct net *net)
INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
if (net_eq(net, &init_net))
- register_netdevice_notifier(&xfrm_dev_notifier);
+ xfrm_dev_init();
return 0;
out_bydst:
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index cdc2e2e71bff..8b23c5bcf8e8 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -45,7 +45,8 @@ u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
return seq_hi;
}
-
+EXPORT_SYMBOL(xfrm_replay_seqhi);
+;
static void xfrm_replay_notify(struct xfrm_state *x, int event)
{
struct km_event c;
@@ -558,6 +559,158 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
x->repl->notify(x, XFRM_REPLAY_UPDATE);
}
+#ifdef CONFIG_XFRM_OFFLOAD
+static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *skb)
+{
+ int err = 0;
+ struct net *net = xs_net(x);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ __u32 oseq = x->replay.oseq;
+
+ if (!xo)
+ return xfrm_replay_overflow(x, skb);
+
+ if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+ if (!skb_is_gso(skb)) {
+ XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
+ xo->seq.low = oseq;
+ } else {
+ XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
+ xo->seq.low = oseq + 1;
+ oseq += skb_shinfo(skb)->gso_segs;
+ }
+
+ XFRM_SKB_CB(skb)->seq.output.hi = 0;
+ xo->seq.hi = 0;
+ if (unlikely(oseq < x->replay.oseq)) {
+ xfrm_audit_state_replay_overflow(x, skb);
+ err = -EOVERFLOW;
+
+ return err;
+ }
+
+ x->replay.oseq = oseq;
+
+ if (xfrm_aevent_is_on(net))
+ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ return err;
+}
+
+static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff *skb)
+{
+ int err = 0;
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ struct net *net = xs_net(x);
+ __u32 oseq = replay_esn->oseq;
+
+ if (!xo)
+ return xfrm_replay_overflow_bmp(x, skb);
+
+ if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+ if (!skb_is_gso(skb)) {
+ XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
+ xo->seq.low = oseq;
+ } else {
+ XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
+ xo->seq.low = oseq + 1;
+ oseq += skb_shinfo(skb)->gso_segs;
+ }
+
+ XFRM_SKB_CB(skb)->seq.output.hi = 0;
+ xo->seq.hi = 0;
+ if (unlikely(oseq < replay_esn->oseq)) {
+ xfrm_audit_state_replay_overflow(x, skb);
+ err = -EOVERFLOW;
+
+ return err;
+ } else {
+ replay_esn->oseq = oseq;
+ }
+
+ if (xfrm_aevent_is_on(net))
+ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ return err;
+}
+
+static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff *skb)
+{
+ int err = 0;
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ struct net *net = xs_net(x);
+ __u32 oseq = replay_esn->oseq;
+ __u32 oseq_hi = replay_esn->oseq_hi;
+
+ if (!xo)
+ return xfrm_replay_overflow_esn(x, skb);
+
+ if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+ if (!skb_is_gso(skb)) {
+ XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
+ XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
+ xo->seq.low = oseq;
+ xo->seq.hi = oseq_hi;
+ } else {
+ XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
+ XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
+ xo->seq.low = oseq = oseq + 1;
+ xo->seq.hi = oseq_hi;
+ oseq += skb_shinfo(skb)->gso_segs;
+ }
+
+ if (unlikely(oseq < replay_esn->oseq)) {
+ XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
+ xo->seq.hi = oseq_hi;
+
+ if (replay_esn->oseq_hi == 0) {
+ replay_esn->oseq--;
+ replay_esn->oseq_hi--;
+ xfrm_audit_state_replay_overflow(x, skb);
+ err = -EOVERFLOW;
+
+ return err;
+ }
+ }
+
+ replay_esn->oseq = oseq;
+ replay_esn->oseq_hi = oseq_hi;
+
+ if (xfrm_aevent_is_on(net))
+ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ return err;
+}
+
+static const struct xfrm_replay xfrm_replay_legacy = {
+ .advance = xfrm_replay_advance,
+ .check = xfrm_replay_check,
+ .recheck = xfrm_replay_check,
+ .notify = xfrm_replay_notify,
+ .overflow = xfrm_replay_overflow_offload,
+};
+
+static const struct xfrm_replay xfrm_replay_bmp = {
+ .advance = xfrm_replay_advance_bmp,
+ .check = xfrm_replay_check_bmp,
+ .recheck = xfrm_replay_check_bmp,
+ .notify = xfrm_replay_notify_bmp,
+ .overflow = xfrm_replay_overflow_offload_bmp,
+};
+
+static const struct xfrm_replay xfrm_replay_esn = {
+ .advance = xfrm_replay_advance_esn,
+ .check = xfrm_replay_check_esn,
+ .recheck = xfrm_replay_recheck_esn,
+ .notify = xfrm_replay_notify_esn,
+ .overflow = xfrm_replay_overflow_offload_esn,
+};
+#else
static const struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance,
.check = xfrm_replay_check,
@@ -581,6 +734,7 @@ static const struct xfrm_replay xfrm_replay_esn = {
.notify = xfrm_replay_notify_esn,
.overflow = xfrm_replay_overflow_esn,
};
+#endif
int xfrm_init_replay(struct xfrm_state *x)
{
@@ -595,10 +749,12 @@ int xfrm_init_replay(struct xfrm_state *x)
if (replay_esn->replay_window == 0)
return -EINVAL;
x->repl = &xfrm_replay_esn;
- } else
+ } else {
x->repl = &xfrm_replay_bmp;
- } else
+ }
+ } else {
x->repl = &xfrm_replay_legacy;
+ }
return 0;
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5a597dbbe564..fc3c5aa38754 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -251,6 +251,75 @@ static void xfrm_put_type(const struct xfrm_type *type)
module_put(type->owner);
}
+static DEFINE_SPINLOCK(xfrm_type_offload_lock);
+int xfrm_register_type_offload(const struct xfrm_type_offload *type,
+ unsigned short family)
+{
+ struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
+ const struct xfrm_type_offload **typemap;
+ int err = 0;
+
+ if (unlikely(afinfo == NULL))
+ return -EAFNOSUPPORT;
+ typemap = afinfo->type_offload_map;
+ spin_lock_bh(&xfrm_type_offload_lock);
+
+ if (likely(typemap[type->proto] == NULL))
+ typemap[type->proto] = type;
+ else
+ err = -EEXIST;
+ spin_unlock_bh(&xfrm_type_offload_lock);
+ rcu_read_unlock();
+ return err;
+}
+EXPORT_SYMBOL(xfrm_register_type_offload);
+
+int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
+ unsigned short family)
+{
+ struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
+ const struct xfrm_type_offload **typemap;
+ int err = 0;
+
+ if (unlikely(afinfo == NULL))
+ return -EAFNOSUPPORT;
+ typemap = afinfo->type_offload_map;
+ spin_lock_bh(&xfrm_type_offload_lock);
+
+ if (unlikely(typemap[type->proto] != type))
+ err = -ENOENT;
+ else
+ typemap[type->proto] = NULL;
+ spin_unlock_bh(&xfrm_type_offload_lock);
+ rcu_read_unlock();
+ return err;
+}
+EXPORT_SYMBOL(xfrm_unregister_type_offload);
+
+static const struct xfrm_type_offload *xfrm_get_type_offload(u8 proto, unsigned short family)
+{
+ struct xfrm_state_afinfo *afinfo;
+ const struct xfrm_type_offload **typemap;
+ const struct xfrm_type_offload *type;
+
+ afinfo = xfrm_state_get_afinfo(family);
+ if (unlikely(afinfo == NULL))
+ return NULL;
+ typemap = afinfo->type_offload_map;
+
+ type = typemap[proto];
+ if ((type && !try_module_get(type->owner)))
+ type = NULL;
+
+ rcu_read_unlock();
+ return type;
+}
+
+static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
+{
+ module_put(type->owner);
+}
+
static DEFINE_SPINLOCK(xfrm_mode_lock);
int xfrm_register_mode(struct xfrm_mode *mode, int family)
{
@@ -365,10 +434,13 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
xfrm_put_mode(x->inner_mode_iaf);
if (x->outer_mode)
xfrm_put_mode(x->outer_mode);
+ if (x->type_offload)
+ xfrm_put_type_offload(x->type_offload);
if (x->type) {
x->type->destructor(x);
xfrm_put_type(x->type);
}
+ xfrm_dev_state_free(x);
security_xfrm_state_free(x);
kfree(x);
}
@@ -538,6 +610,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
net->xfrm.state_num--;
spin_unlock(&net->xfrm.xfrm_state_lock);
+ xfrm_dev_state_delete(x);
+
/* All xfrm_state objects are created by xfrm_state_alloc.
* The xfrm_state_alloc call gives a reference, and that
* is what we are dropping here.
@@ -582,12 +656,41 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
return err;
}
+
+static inline int
+xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
+{
+ int i, err = 0;
+
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ struct xfrm_state *x;
+ struct xfrm_state_offload *xso;
+
+ hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
+ xso = &x->xso;
+
+ if (xso->dev == dev &&
+ (err = security_xfrm_state_delete(x)) != 0) {
+ xfrm_audit_state_delete(x, 0, task_valid);
+ return err;
+ }
+ }
+ }
+
+ return err;
+}
#else
static inline int
xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
{
return 0;
}
+
+static inline int
+xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
+{
+ return 0;
+}
#endif
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
@@ -630,6 +733,48 @@ out:
}
EXPORT_SYMBOL(xfrm_state_flush);
+int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
+{
+ int i, err = 0, cnt = 0;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
+ if (err)
+ goto out;
+
+ err = -ESRCH;
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ struct xfrm_state *x;
+ struct xfrm_state_offload *xso;
+restart:
+ hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
+ xso = &x->xso;
+
+ if (!xfrm_state_kern(x) && xso->dev == dev) {
+ xfrm_state_hold(x);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+ err = xfrm_state_delete(x);
+ xfrm_audit_state_delete(x, err ? 0 : 1,
+ task_valid);
+ xfrm_state_put(x);
+ if (!err)
+ cnt++;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ goto restart;
+ }
+ }
+ }
+ if (cnt)
+ err = 0;
+
+out:
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ return err;
+}
+EXPORT_SYMBOL(xfrm_dev_state_flush);
+
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{
spin_lock_bh(&net->xfrm.xfrm_state_lock);
@@ -2077,6 +2222,8 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
if (x->type == NULL)
goto error;
+ x->type_offload = xfrm_get_type_offload(x->id.proto, family);
+
err = x->type->init_state(x);
if (err)
goto error;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 5f691fd53a6c..ba74e5eeeeef 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -595,6 +595,10 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
goto error;
}
+ if (attrs[XFRMA_OFFLOAD_DEV] &&
+ xfrm_dev_state_add(net, x, nla_data(attrs[XFRMA_OFFLOAD_DEV])))
+ goto error;
+
if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
attrs[XFRMA_REPLAY_ESN_VAL])))
goto error;
@@ -779,6 +783,23 @@ static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
return 0;
}
+static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb)
+{
+ struct xfrm_user_offload *xuo;
+ struct nlattr *attr;
+
+ attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
+ if (attr == NULL)
+ return -EMSGSIZE;
+
+ xuo = nla_data(attr);
+
+ xuo->ifindex = xso->dev->ifindex;
+ xuo->flags = xso->flags;
+
+ return 0;
+}
+
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
{
struct xfrm_algo *algo;
@@ -869,6 +890,10 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
&x->replay);
if (ret)
goto out;
+ if(x->xso.dev)
+ ret = copy_user_offload(&x->xso, skb);
+ if (ret)
+ goto out;
if (x->security)
ret = copy_sec_ctx(x->security, skb);
out:
@@ -2406,6 +2431,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
[XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
+ [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
};
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
@@ -2623,6 +2649,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
l += nla_total_size(sizeof(*x->coaddr));
if (x->props.extra_flags)
l += nla_total_size(sizeof(x->props.extra_flags));
+ if (x->xso.dev)
+ l += nla_total_size(sizeof(x->xso));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size_64bit(sizeof(u64));