summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteffen Klassert <steffen.klassert@secunet.com>2022-12-08 10:25:30 +0100
committerSteffen Klassert <steffen.klassert@secunet.com>2022-12-08 10:25:30 +0100
commite8a292d6a786cab048a83496687d56f121277b88 (patch)
tree4c07e0bb50e3efc4a7817a58edf82189700b2c9c
parentMerge branch 'Extend XFRM core to allow packet offload configuration' (diff)
parentnet/mlx5e: Generalize creation of default IPsec miss group and rule (diff)
downloadlinux-e8a292d6a786cab048a83496687d56f121277b88.tar.xz
linux-e8a292d6a786cab048a83496687d56f121277b88.zip
Merge branch 'mlx5 IPsec packet offload support (Part I)'
Leon Romanovsky says: ============ This series follows previously sent "Extend XFRM core to allow packet offload configuration" series [1]. It is first part with refactoring to mlx5 allow us natively extend mlx5 IPsec logic to support both crypto and packet offloads. ============ Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c629
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h3
-rw-r--r--include/linux/mlx5/mlx5_ifc.h53
9 files changed, 543 insertions, 356 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 65790ff58a74..2d77fb8a8a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1245,4 +1245,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t
int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
#endif
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index be74e1403328..25cd449e8aad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -162,7 +162,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
aso_ctrl = &aso_wqe->aso_ctrl;
- memset(aso_ctrl, 0, sizeof(*aso_ctrl));
aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
MLX5_ASO_ALWAYS_TRUE << 4;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index e6411533f911..c5bccc0df60d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -162,28 +162,21 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* esn */
if (sa_entry->esn_state.trigger) {
- attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
+ attrs->esn_trigger = true;
attrs->esn = sa_entry->esn_state.esn;
- if (sa_entry->esn_state.overlap)
- attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
+ attrs->esn_overlap = sa_entry->esn_state.overlap;
+ attrs->replay_window = x->replay_esn->replay_window;
}
- /* action */
- attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
- MLX5_ACCEL_ESP_ACTION_ENCRYPT :
- MLX5_ACCEL_ESP_ACTION_DECRYPT;
- /* flags */
- attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
- MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
- MLX5_ACCEL_ESP_FLAGS_TUNNEL;
-
+ attrs->dir = x->xso.dir;
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
- attrs->is_ipv6 = (x->props.family != AF_INET);
+ attrs->family = x->props.family;
+ attrs->type = x->xso.type;
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -257,6 +250,17 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Unsupported xfrm offload type\n");
return -EINVAL;
}
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+ if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+ x->replay_esn->replay_window != 64 &&
+ x->replay_esn->replay_window != 128 &&
+ x->replay_esn->replay_window != 256) {
+ netdev_info(netdev,
+ "Unsupported replay window size %u\n",
+ x->replay_esn->replay_window);
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -303,7 +307,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
if (err)
goto err_xfrm;
- err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry);
+ err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err)
goto err_hw_ctx;
@@ -321,7 +325,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
goto out;
err_add_rule:
- mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm:
@@ -341,10 +345,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
cancel_work_sync(&sa_entry->modify_work.work);
- mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry);
}
@@ -371,15 +374,26 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
if (!ipsec->wq)
goto err_wq;
+ if (mlx5_ipsec_device_caps(priv->mdev) &
+ MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
+ ret = mlx5e_ipsec_aso_init(ipsec);
+ if (ret)
+ goto err_aso;
+ }
+
ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret)
goto err_fs_init;
+ ipsec->fs = priv->fs;
priv->ipsec = ipsec;
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return;
err_fs_init:
+ if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ mlx5e_ipsec_aso_cleanup(ipsec);
+err_aso:
destroy_workqueue(ipsec->wq);
err_wq:
kfree(ipsec);
@@ -395,6 +409,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
+ if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
priv->ipsec = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 4c47347d0ee2..990378d52fd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -39,22 +39,11 @@
#include <linux/mlx5/device.h>
#include <net/xfrm.h>
#include <linux/idr.h>
+#include "lib/aso.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
-enum mlx5_accel_esp_flags {
- MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
- MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
- MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
-};
-
-enum mlx5_accel_esp_action {
- MLX5_ACCEL_ESP_ACTION_DECRYPT,
- MLX5_ACCEL_ESP_ACTION_ENCRYPT,
-};
-
struct aes_gcm_keymat {
u64 seq_iv;
@@ -66,7 +55,6 @@ struct aes_gcm_keymat {
};
struct mlx5_accel_esp_xfrm_attrs {
- enum mlx5_accel_esp_action action;
u32 esn;
u32 spi;
u32 flags;
@@ -82,12 +70,18 @@ struct mlx5_accel_esp_xfrm_attrs {
__be32 a6[4];
} daddr;
- u8 is_ipv6;
+ u8 dir : 2;
+ u8 esn_overlap : 1;
+ u8 esn_trigger : 1;
+ u8 type : 2;
+ u8 family;
+ u32 replay_window;
};
enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
MLX5_IPSEC_CAP_ESN = 1 << 1,
+ MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
};
struct mlx5e_priv;
@@ -102,17 +96,26 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_trailer;
};
-struct mlx5e_accel_fs_esp;
+struct mlx5e_ipsec_rx;
struct mlx5e_ipsec_tx;
+struct mlx5e_ipsec_aso {
+ u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
+ dma_addr_t dma_addr;
+ struct mlx5_aso *aso;
+};
+
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct mlx5e_ipsec_sw_stats sw_stats;
struct workqueue_struct *wq;
- struct mlx5e_accel_fs_esp *rx_fs;
- struct mlx5e_ipsec_tx *tx_fs;
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_ipsec_rx *rx_ipv4;
+ struct mlx5e_ipsec_rx *rx_ipv6;
+ struct mlx5e_ipsec_tx *tx;
+ struct mlx5e_ipsec_aso *aso;
};
struct mlx5e_ipsec_esn_state {
@@ -123,7 +126,7 @@ struct mlx5e_ipsec_esn_state {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
- struct mlx5_modify_hdr *set_modify_hdr;
+ struct mlx5_modify_hdr *modify_hdr;
};
struct mlx5e_ipsec_modify_state_work {
@@ -155,10 +158,8 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry);
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
@@ -168,6 +169,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
+int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
+void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
+
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index b859e4a4c744..5bc6f9d1f5a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -9,53 +9,57 @@
#define NUM_IPSEC_FTE BIT(15)
-enum accel_fs_esp_type {
- ACCEL_FS_ESP4,
- ACCEL_FS_ESP6,
- ACCEL_FS_ESP_NUM_TYPES,
+struct mlx5e_ipsec_ft {
+ struct mutex mutex; /* Protect changes to this struct */
+ struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *status;
+ u32 refcnt;
};
-struct mlx5e_ipsec_rx_err {
- struct mlx5_flow_table *ft;
+struct mlx5e_ipsec_miss {
+ struct mlx5_flow_group *group;
struct mlx5_flow_handle *rule;
- struct mlx5_modify_hdr *copy_modify_hdr;
};
-struct mlx5e_accel_fs_esp_prot {
- struct mlx5_flow_table *ft;
- struct mlx5_flow_group *miss_group;
- struct mlx5_flow_handle *miss_rule;
- struct mlx5_flow_destination default_dest;
- struct mlx5e_ipsec_rx_err rx_err;
- u32 refcnt;
- struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
-};
-
-struct mlx5e_accel_fs_esp {
- struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
};
struct mlx5e_ipsec_tx {
+ struct mlx5e_ipsec_ft ft;
struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *ft;
- struct mutex mutex; /* Protect IPsec TX steering */
- u32 refcnt;
};
/* IPsec RX flow steering */
-static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
+static enum mlx5_traffic_types family2tt(u32 family)
{
- if (i == ACCEL_FS_ESP4)
+ if (family == AF_INET)
return MLX5_TT_IPV4_IPSEC_ESP;
return MLX5_TT_IPV6_IPSEC_ESP;
}
-static int rx_err_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_accel_fs_esp_prot *fs_prot,
- struct mlx5e_ipsec_rx_err *rx_err)
+static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
+ int level, int prio,
+ int max_num_groups)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = max_num_groups;
+ ft_attr.max_fte = NUM_IPSEC_FTE;
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+
+ return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+}
+
+static int ipsec_status_rule(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte;
@@ -79,8 +83,8 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
- netdev_err(priv->netdev,
- "fail to alloc ipsec copy modify_header_id err=%d\n", err);
+ mlx5_core_err(mdev,
+ "fail to alloc ipsec copy modify_header_id err=%d\n", err);
goto out_spec;
}
@@ -88,17 +92,16 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_act.modify_hdr = modify_hdr;
- fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
- &fs_prot->default_dest, 1);
+ fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
- netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
+ mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
goto out;
}
kvfree(spec);
- rx_err->rule = fte;
- rx_err->copy_modify_hdr = modify_hdr;
+ rx->status.rule = fte;
+ rx->status.modify_hdr = modify_hdr;
return 0;
out:
@@ -108,13 +111,12 @@ out_spec:
return err;
}
-static int rx_fs_create(struct mlx5e_priv *priv,
- struct mlx5e_accel_fs_esp_prot *fs_prot)
+static int ipsec_miss_create(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_table *ft,
+ struct mlx5e_ipsec_miss *miss,
+ struct mlx5_flow_destination *dest)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table *ft = fs_prot->ft;
- struct mlx5_flow_group *miss_group;
- struct mlx5_flow_handle *miss_rule;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
u32 *flow_group_in;
@@ -130,359 +132,384 @@ static int rx_fs_create(struct mlx5e_priv *priv,
/* Create miss_group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
- miss_group = mlx5_create_flow_group(ft, flow_group_in);
- if (IS_ERR(miss_group)) {
- err = PTR_ERR(miss_group);
- netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
+ miss->group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss->group)) {
+ err = PTR_ERR(miss->group);
+ mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
+ err);
goto out;
}
- fs_prot->miss_group = miss_group;
/* Create miss rule */
- miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
- if (IS_ERR(miss_rule)) {
- mlx5_destroy_flow_group(fs_prot->miss_group);
- err = PTR_ERR(miss_rule);
- netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
+ miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(miss->rule)) {
+ mlx5_destroy_flow_group(miss->group);
+ err = PTR_ERR(miss->rule);
+ mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
+ err);
goto out;
}
- fs_prot->miss_rule = miss_rule;
out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
-static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
{
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
-
- accel_esp = priv->ipsec->rx_fs;
-
- /* The netdev unreg already happened, so all offloaded rule are already removed */
- fs_prot = &accel_esp->fs_prot[type];
+ mlx5_del_flow_rules(rx->sa.rule);
+ mlx5_destroy_flow_group(rx->sa.group);
+ mlx5_destroy_flow_table(rx->ft.sa);
- mlx5_del_flow_rules(fs_prot->miss_rule);
- mlx5_destroy_flow_group(fs_prot->miss_group);
- mlx5_destroy_flow_table(fs_prot->ft);
-
- mlx5_del_flow_rules(fs_prot->rx_err.rule);
- mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
- mlx5_destroy_flow_table(fs_prot->rx_err.ft);
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ mlx5_destroy_flow_table(rx->ft.status);
}
-static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
- struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ struct mlx5_flow_destination dest;
struct mlx5_flow_table *ft;
int err;
- accel_esp = priv->ipsec->rx_fs;
- fs_prot = &accel_esp->fs_prot[type];
- fs_prot->default_dest =
- mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
-
- ft_attr.max_fte = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_NIC_PRIO, 1);
if (IS_ERR(ft))
return PTR_ERR(ft);
- fs_prot->rx_err.ft = ft;
- err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
+ rx->ft.status = ft;
+
+ dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
+ err = ipsec_status_rule(mdev, rx, &dest);
if (err)
goto err_add;
/* Create FT */
- ft_attr.max_fte = NUM_IPSEC_FTE;
- ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
- ft_attr.autogroup.num_reserved_entries = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
+ 1);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
- fs_prot->ft = ft;
+ rx->ft.sa = ft;
- err = rx_fs_create(priv, fs_prot);
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &dest);
if (err)
goto err_fs;
return 0;
err_fs:
- mlx5_destroy_flow_table(fs_prot->ft);
+ mlx5_destroy_flow_table(rx->ft.sa);
err_fs_ft:
- mlx5_del_flow_rules(fs_prot->rx_err.rule);
- mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
- mlx5_destroy_flow_table(fs_prot->rx_err.ft);
+ mlx5_destroy_flow_table(rx->ft.status);
return err;
}
-static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec, u32 family)
{
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
- struct mlx5e_accel_fs_esp_prot *fs_prot;
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination dest = {};
- struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5e_ipsec_rx *rx;
int err = 0;
- accel_esp = priv->ipsec->rx_fs;
- fs_prot = &accel_esp->fs_prot[type];
- mutex_lock(&fs_prot->prot_mutex);
- if (fs_prot->refcnt)
+ if (family == AF_INET)
+ rx = ipsec->rx_ipv4;
+ else
+ rx = ipsec->rx_ipv6;
+
+ mutex_lock(&rx->ft.mutex);
+ if (rx->ft.refcnt)
goto skip;
/* create FT */
- err = rx_create(priv, type);
+ err = rx_create(mdev, ipsec, rx, family);
if (err)
goto out;
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
+ dest.ft = rx->ft.sa;
+ mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
skip:
- fs_prot->refcnt++;
+ rx->ft.refcnt++;
out:
- mutex_unlock(&fs_prot->prot_mutex);
- return err;
+ mutex_unlock(&rx->ft.mutex);
+ if (err)
+ return ERR_PTR(err);
+ return rx;
}
-static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ u32 family)
{
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
-
- accel_esp = priv->ipsec->rx_fs;
- fs_prot = &accel_esp->fs_prot[type];
- mutex_lock(&fs_prot->prot_mutex);
- fs_prot->refcnt--;
- if (fs_prot->refcnt)
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ struct mlx5e_ipsec_rx *rx;
+
+ if (family == AF_INET)
+ rx = ipsec->rx_ipv4;
+ else
+ rx = ipsec->rx_ipv6;
+
+ mutex_lock(&rx->ft.mutex);
+ rx->ft.refcnt--;
+ if (rx->ft.refcnt)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
/* remove FT */
- rx_destroy(priv, type);
+ rx_destroy(mdev, rx);
out:
- mutex_unlock(&fs_prot->prot_mutex);
+ mutex_unlock(&rx->ft.mutex);
}
/* IPsec TX flow steering */
-static int tx_create(struct mlx5e_priv *priv)
+static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5e_ipsec *ipsec = priv->ipsec;
struct mlx5_flow_table *ft;
- int err;
- ft_attr.max_fte = NUM_IPSEC_FTE;
- ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
- return err;
- }
- ipsec->tx_fs->ft = ft;
+ ft = ipsec_ft_create(tx->ns, 0, 0, 1);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ tx->ft.sa = ft;
return 0;
}
-static int tx_ft_get(struct mlx5e_priv *priv)
+static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec)
{
- struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
int err = 0;
- mutex_lock(&tx_fs->mutex);
- if (tx_fs->refcnt)
+ mutex_lock(&tx->ft.mutex);
+ if (tx->ft.refcnt)
goto skip;
- err = tx_create(priv);
+ err = tx_create(mdev, tx);
if (err)
goto out;
skip:
- tx_fs->refcnt++;
+ tx->ft.refcnt++;
out:
- mutex_unlock(&tx_fs->mutex);
- return err;
+ mutex_unlock(&tx->ft.mutex);
+ if (err)
+ return ERR_PTR(err);
+ return tx;
}
-static void tx_ft_put(struct mlx5e_priv *priv)
+static void tx_ft_put(struct mlx5e_ipsec *ipsec)
{
- struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
- mutex_lock(&tx_fs->mutex);
- tx_fs->refcnt--;
- if (tx_fs->refcnt)
+ mutex_lock(&tx->ft.mutex);
+ tx->ft.refcnt--;
+ if (tx->ft.refcnt)
goto out;
- mlx5_destroy_flow_table(tx_fs->ft);
+ mlx5_destroy_flow_table(tx->ft.sa);
out:
- mutex_unlock(&tx_fs->mutex);
+ mutex_unlock(&tx->ft.mutex);
}
-static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
+ __be32 *daddr)
{
- u8 ip_version = attrs->is_ipv6 ? 6 : 4;
-
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
- /* ip_version */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
+
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+}
- /* Non fragmented */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
+static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
+ __be32 *daddr)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
+
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+}
+static void setup_fte_esp(struct mlx5_flow_spec *spec)
+{
/* ESP header */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
+}
+static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
+{
/* SPI number */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
+}
+
+static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
+{
+ /* Non fragmented */
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
+}
+
+static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
+{
+ /* Add IPsec indicator in metadata_reg_a */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters.outer_esp_spi, attrs->spi);
-
- if (ip_version == 4) {
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &attrs->saddr.a4, 4);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &attrs->daddr.a4, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
- } else {
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &attrs->saddr.a6, 16);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &attrs->daddr.a6, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
- 0xff, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- 0xff, 16);
+ misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
+}
+
+static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
+ struct mlx5_flow_act *flow_act)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_modify_hdr *modify_hdr;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ switch (dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+ break;
+ default:
+ return -EINVAL;
}
- flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
- flow_act->crypto.obj_id = ipsec_obj_id;
- flow_act->flags |= FLOW_ACT_NO_APPEND;
+ MLX5_SET(set_action_in, action, data, val);
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+ if (IS_ERR(modify_hdr)) {
+ mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
+ PTR_ERR(modify_hdr));
+ return PTR_ERR(modify_hdr);
+ }
+
+ flow_act->modify_hdr = modify_hdr;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ return 0;
}
-static int rx_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
- u32 ipsec_obj_id = sa_entry->ipsec_obj_id;
- struct mlx5_modify_hdr *modify_hdr = NULL;
- struct mlx5e_accel_fs_esp_prot *fs_prot;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5_flow_destination dest = {};
- struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
- enum accel_fs_esp_type type;
struct mlx5_flow_spec *spec;
- int err = 0;
+ struct mlx5e_ipsec_rx *rx;
+ int err;
- accel_esp = priv->ipsec->rx_fs;
- type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
- fs_prot = &accel_esp->fs_prot[type];
-
- err = rx_ft_get(priv, type);
- if (err)
- return err;
+ rx = rx_ft_get(mdev, ipsec, attrs->family);
+ if (IS_ERR(rx))
+ return PTR_ERR(rx);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- goto out_err;
+ goto err_alloc;
}
- setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
- /* Set bit[31] ipsec marker */
- /* Set bit[23-0] ipsec_obj_id */
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ setup_fte_spi(spec, attrs->spi);
+ setup_fte_esp(spec);
+ setup_fte_no_frags(spec);
- modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
- 1, action);
- if (IS_ERR(modify_hdr)) {
- err = PTR_ERR(modify_hdr);
- netdev_err(priv->netdev,
- "fail to alloc ipsec set modify_header_id err=%d\n", err);
- modify_hdr = NULL;
- goto out_err;
- }
+ err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31),
+ XFRM_DEV_OFFLOAD_IN, &flow_act);
+ if (err)
+ goto err_mod_header;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- flow_act.modify_hdr = modify_hdr;
- dest.ft = fs_prot->rx_err.ft;
- rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
+ dest.ft = rx->ft.status;
+ rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
- attrs->action, err);
- goto out_err;
+ mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
+ goto err_add_flow;
}
+ kvfree(spec);
ipsec_rule->rule = rule;
- ipsec_rule->set_modify_hdr = modify_hdr;
- goto out;
-
-out_err:
- if (modify_hdr)
- mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
- rx_ft_put(priv, type);
+ ipsec_rule->modify_hdr = flow_act.modify_hdr;
+ return 0;
-out:
+err_add_flow:
+ mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+err_mod_header:
kvfree(spec);
+err_alloc:
+ rx_ft_put(mdev, ipsec, attrs->family);
return err;
}
-static int tx_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
+ struct mlx5e_ipsec_tx *tx;
int err = 0;
- err = tx_ft_get(priv);
- if (err)
- return err;
+ tx = tx_ft_get(mdev, ipsec);
+ if (IS_ERR(tx))
+ return PTR_ERR(tx);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -490,23 +517,25 @@ static int tx_add_rule(struct mlx5e_priv *priv,
goto out;
}
- setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec,
- &flow_act);
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
- /* Add IPsec indicator in metadata_reg_a */
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
- MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_IPSEC);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_IPSEC);
+ setup_fte_spi(spec, attrs->spi);
+ setup_fte_esp(spec);
+ setup_fte_no_frags(spec);
+ setup_fte_reg_a(spec);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
- rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
+ rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, NULL, 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
- sa_entry->attrs.action, err);
+ mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
goto out;
}
@@ -515,65 +544,55 @@ static int tx_add_rule(struct mlx5e_priv *priv,
out:
kvfree(spec);
if (err)
- tx_ft_put(priv);
+ tx_ft_put(ipsec);
return err;
}
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT)
- return tx_add_rule(priv, sa_entry);
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
+ return tx_add_rule(sa_entry);
- return rx_add_rule(priv, sa_entry);
+ return rx_add_rule(sa_entry);
}
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_del_flow_rules(ipsec_rule->rule);
- if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) {
- tx_ft_put(priv);
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
+ tx_ft_put(sa_entry->ipsec);
return;
}
- mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
- rx_ft_put(priv,
- sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+ rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
}
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
{
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
- enum accel_fs_esp_type i;
-
- if (!ipsec->rx_fs)
+ if (!ipsec->tx)
return;
- mutex_destroy(&ipsec->tx_fs->mutex);
- WARN_ON(ipsec->tx_fs->refcnt);
- kfree(ipsec->tx_fs);
+ mutex_destroy(&ipsec->tx->ft.mutex);
+ WARN_ON(ipsec->tx->ft.refcnt);
+ kfree(ipsec->tx);
- accel_esp = ipsec->rx_fs;
- for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
- fs_prot = &accel_esp->fs_prot[i];
- mutex_destroy(&fs_prot->prot_mutex);
- WARN_ON(fs_prot->refcnt);
- }
- kfree(ipsec->rx_fs);
+ mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
+ WARN_ON(ipsec->rx_ipv4->ft.refcnt);
+ kfree(ipsec->rx_ipv4);
+
+ mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
+ WARN_ON(ipsec->rx_ipv6->ft.refcnt);
+ kfree(ipsec->rx_ipv6);
}
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_namespace *ns;
- enum accel_fs_esp_type i;
int err = -ENOMEM;
ns = mlx5_get_flow_namespace(ipsec->mdev,
@@ -581,26 +600,28 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
if (!ns)
return -EOPNOTSUPP;
- ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL);
- if (!ipsec->tx_fs)
+ ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
+ if (!ipsec->tx)
return -ENOMEM;
- ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL);
- if (!ipsec->rx_fs)
- goto err_rx;
+ ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
+ if (!ipsec->rx_ipv4)
+ goto err_rx_ipv4;
- mutex_init(&ipsec->tx_fs->mutex);
- ipsec->tx_fs->ns = ns;
+ ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
+ if (!ipsec->rx_ipv6)
+ goto err_rx_ipv6;
- accel_esp = ipsec->rx_fs;
- for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
- fs_prot = &accel_esp->fs_prot[i];
- mutex_init(&fs_prot->prot_mutex);
- }
+ mutex_init(&ipsec->tx->ft.mutex);
+ mutex_init(&ipsec->rx_ipv4->ft.mutex);
+ mutex_init(&ipsec->rx_ipv6->ft.mutex);
+ ipsec->tx->ns = ns;
return 0;
-err_rx:
- kfree(ipsec->tx_fs);
+err_rx_ipv6:
+ kfree(ipsec->rx_ipv4);
+err_rx_ipv4:
+ kfree(ipsec->tx);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 792724ce7336..fc88454aaf8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
+#include "en.h"
#include "ipsec.h"
#include "lib/mlx5.h"
@@ -31,6 +32,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
+ if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
+ caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+
if (!caps)
return 0;
@@ -46,6 +53,38 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
+static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
+ struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ void *aso_ctx;
+
+ aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
+ if (attrs->esn_trigger) {
+ MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
+
+ if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
+ MLX5_SET(ipsec_aso, aso_ctx, window_sz,
+ attrs->replay_window / 64);
+ MLX5_SET(ipsec_aso, aso_ctx, mode,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION);
+ }
+ }
+
+ /* ASO context */
+ MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
+ MLX5_SET(ipsec_obj, obj, full_offload, 1);
+ MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
+ /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
+ * in flow steering to perform matching against. Please be
+ * aware that this register was chosen arbitrary and can't
+ * be used in other places as long as IPsec packet offload
+ * active.
+ */
+ MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+}
+
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -54,6 +93,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
void *obj, *salt_p, *salt_iv_p;
+ struct mlx5e_hw_objs *res;
int err;
obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
@@ -66,11 +106,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */
- if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
+ if (attrs->esn_trigger) {
MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
- if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+ MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
}
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
@@ -81,6 +120,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+ res = &mdev->mlx5e_res.hw_objs;
+ if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+ mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
sa_entry->ipsec_obj_id =
@@ -152,7 +195,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
void *obj;
int err;
- if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
+ if (!attrs->esn_trigger)
return 0;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
@@ -183,8 +226,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
- if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+ MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
@@ -203,3 +245,56 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
}
+
+int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_aso *aso;
+ struct mlx5e_hw_objs *res;
+ struct device *pdev;
+ int err;
+
+ aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
+ if (!aso)
+ return -ENOMEM;
+
+ res = &mdev->mlx5e_res.hw_objs;
+
+ pdev = mlx5_core_dma_dev(mdev);
+ aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
+ DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(pdev, aso->dma_addr);
+ if (err)
+ goto err_dma;
+
+ aso->aso = mlx5_aso_create(mdev, res->pdn);
+ if (IS_ERR(aso->aso)) {
+ err = PTR_ERR(aso->aso);
+ goto err_aso_create;
+ }
+
+ ipsec->aso = aso;
+ return 0;
+
+err_aso_create:
+ dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
+ DMA_BIDIRECTIONAL);
+err_dma:
+ kfree(aso);
+ return err;
+}
+
+void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_aso *aso;
+ struct device *pdev;
+
+ aso = ipsec->aso;
+ pdev = mlx5_core_dma_dev(mdev);
+
+ mlx5_aso_destroy(aso->aso);
+ dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
+ DMA_BIDIRECTIONAL);
+ kfree(aso);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 0f9e4f01c85a..5a80fb7dbbca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -353,12 +353,15 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
cseg->general_id = cpu_to_be32(obj_id);
}
-void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
{
+ struct mlx5_aso_wqe *wqe;
u16 pi;
pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
- return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+ wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+ memset(wqe, 0, sizeof(*wqe));
+ return wqe;
}
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
index 2d40dcf9d42e..c8fc3c838642 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -71,13 +71,14 @@ enum {
};
enum {
+ MLX5_ACCESS_ASO_OPC_MOD_IPSEC = 0x0,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
};
struct mlx5_aso;
-void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso);
void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
struct mlx5_aso_wqe *aso_wqe,
u32 obj_id, u32 opc_mode);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 5a4e914e2a6f..300b56ea5ff4 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -445,7 +445,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
- u8 reserved_at_40[0x6];
+ u8 reformat_add_esp_trasport[0x1];
+ u8 reserved_at_41[0x2];
+ u8 reformat_del_esp_trasport[0x1];
+ u8 reserved_at_44[0x2];
u8 execute_aso[0x1];
u8 reserved_at_47[0x19];
@@ -638,8 +641,10 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 reserved_at_1a0[0x8];
u8 macsec_syndrome[0x8];
+ u8 ipsec_syndrome[0x8];
+ u8 reserved_at_1b8[0x8];
- u8 reserved_at_1b0[0x50];
+ u8 reserved_at_1c0[0x40];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
@@ -6384,6 +6389,9 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
@@ -11563,6 +11571,41 @@ enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
};
+enum {
+ MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
+ MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
+ MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
+ MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
+};
+
+enum {
+ MLX5_IPSEC_ASO_MODE = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
+ MLX5_IPSEC_ASO_INC_SN = 0x2,
+};
+
+struct mlx5_ifc_ipsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_201[0x1];
+ u8 mode[0x2];
+ u8 window_sz[0x2];
+ u8 soft_lft_arm[0x1];
+ u8 hard_lft_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 esn_event_arm[0x1];
+ u8 reserved_at_20a[0x16];
+
+ u8 remove_flow_pkt_cnt[0x20];
+
+ u8 remove_flow_soft_lft[0x20];
+
+ u8 reserved_at_260[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[0x100];
+};
+
struct mlx5_ifc_ipsec_obj_bits {
u8 modify_field_select[0x40];
u8 full_offload[0x1];
@@ -11584,7 +11627,11 @@ struct mlx5_ifc_ipsec_obj_bits {
u8 implicit_iv[0x40];
- u8 reserved_at_100[0x700];
+ u8 reserved_at_100[0x8];
+ u8 ipsec_aso_access_pd[0x18];
+ u8 reserved_at_120[0xe0];
+
+ struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
};
struct mlx5_ifc_create_ipsec_obj_in_bits {