summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/device_drivers/mellanox/mlx5.rst33
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c130
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h4
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c21
-rw-r--r--drivers/infiniband/hw/mlx5/main.c109
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c1588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c480
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c395
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c570
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c770
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c1243
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c976
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c2308
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h1060
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c600
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h604
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h212
-rw-r--r--include/linux/mlx5/device.h7
-rw-r--r--include/linux/mlx5/driver.h14
-rw-r--r--include/linux/mlx5/eswitch.h8
-rw-r--r--include/linux/mlx5/fs.h33
-rw-r--r--include/linux/mlx5/mlx5_ifc.h235
44 files changed, 12464 insertions, 371 deletions
diff --git a/Documentation/networking/device_drivers/mellanox/mlx5.rst b/Documentation/networking/device_drivers/mellanox/mlx5.rst
index b30a63dbf4b7..d071c6b49e1f 100644
--- a/Documentation/networking/device_drivers/mellanox/mlx5.rst
+++ b/Documentation/networking/device_drivers/mellanox/mlx5.rst
@@ -11,6 +11,7 @@ Contents
- `Enabling the driver and kconfig options`_
- `Devlink info`_
+- `Devlink parameters`_
- `Devlink health reporters`_
- `mlx5 tracepoints`_
@@ -122,6 +123,38 @@ User command example::
stored:
fw.version 16.26.0100
+Devlink parameters
+==================
+
+flow_steering_mode: Device flow steering mode
+---------------------------------------------
+The flow steering mode parameter controls the flow steering mode of the driver.
+Two modes are supported:
+1. 'dmfs' - Device managed flow steering.
+2. 'smfs - Software/Driver managed flow steering.
+
+In DMFS mode, the HW steering entities are created and managed through the
+Firmware.
+In SMFS mode, the HW steering entities are created and managed though by
+the driver directly into Hardware without firmware intervention.
+
+SMFS mode is faster and provides better rule inserstion rate compared to default DMFS mode.
+
+User command examples:
+
+- Set SMFS flow steering mode::
+
+ $ devlink dev param set pci/0000:06:00.0 name flow_steering_mode value "smfs" cmode runtime
+
+- Read device flow steering mode::
+
+ $ devlink dev param show pci/0000:06:00.0 name flow_steering_mode
+ pci/0000:06:00.0:
+ name flow_steering_mode type driver-specific
+ values:
+ cmode runtime value smfs
+
+
Devlink health reporters
========================
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 6c8645033102..4937947400cd 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -186,136 +186,6 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
return err;
}
-int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
- u16 uid, phys_addr_t *addr, u32 *obj_id)
-{
- struct mlx5_core_dev *dev = dm->dev;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
- u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
- unsigned long *block_map;
- u64 icm_start_addr;
- u32 log_icm_size;
- u32 num_blocks;
- u32 max_blocks;
- u64 block_idx;
- void *sw_icm;
- int ret;
-
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
- MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
-
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
- steering_sw_icm_start_address);
- log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
- block_map = dm->steering_sw_icm_alloc_blocks;
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
- header_modify_sw_icm_start_address);
- log_icm_size = MLX5_CAP_DEV_MEM(dev,
- log_header_modify_sw_icm_size);
- block_map = dm->header_modify_sw_icm_alloc_blocks;
- break;
- default:
- return -EINVAL;
- }
-
- num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
- MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
- max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
- spin_lock(&dm->lock);
- block_idx = bitmap_find_next_zero_area(block_map,
- max_blocks,
- 0,
- num_blocks, 0);
-
- if (block_idx < max_blocks)
- bitmap_set(block_map,
- block_idx, num_blocks);
-
- spin_unlock(&dm->lock);
-
- if (block_idx >= max_blocks)
- return -ENOMEM;
-
- sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
- icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
- MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
- icm_start_addr);
- MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
-
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (ret) {
- spin_lock(&dm->lock);
- bitmap_clear(block_map,
- block_idx, num_blocks);
- spin_unlock(&dm->lock);
-
- return ret;
- }
-
- *addr = icm_start_addr;
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-
- return 0;
-}
-
-int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
- u16 uid, phys_addr_t addr, u32 obj_id)
-{
- struct mlx5_core_dev *dev = dm->dev;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
- u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
- unsigned long *block_map;
- u32 num_blocks;
- u64 start_idx;
- int err;
-
- num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
- MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
-
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- start_idx =
- (addr - MLX5_CAP64_DEV_MEM(
- dev, steering_sw_icm_start_address)) >>
- MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
- block_map = dm->steering_sw_icm_alloc_blocks;
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- start_idx =
- (addr -
- MLX5_CAP64_DEV_MEM(
- dev, header_modify_sw_icm_start_address)) >>
- MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
- block_map = dm->header_modify_sw_icm_alloc_blocks;
- break;
- default:
- return -EINVAL;
- }
-
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
-
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- spin_lock(&dm->lock);
- bitmap_clear(block_map,
- start_idx, num_blocks);
- spin_unlock(&dm->lock);
-
- return 0;
-}
-
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
{
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 0572dcba6eae..169cab4915e3 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -65,8 +65,4 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
-int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
- u16 uid, phys_addr_t *addr, u32 *obj_id);
-int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
- u16 uid, phys_addr_t addr, u32 obj_id);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index b8841355fcd5..1c8f04abee0c 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -322,11 +322,11 @@ void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
switch (maction->flow_action_raw.sub_type) {
case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
- maction->flow_action_raw.action_id);
+ maction->flow_action_raw.modify_hdr);
break;
case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
- maction->flow_action_raw.action_id);
+ maction->flow_action_raw.pkt_reformat);
break;
case MLX5_IB_FLOW_ACTION_DECAP:
break;
@@ -352,10 +352,11 @@ mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
if (!maction)
return ERR_PTR(-ENOMEM);
- ret = mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in,
- &maction->flow_action_raw.action_id);
+ maction->flow_action_raw.modify_hdr =
+ mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
- if (ret) {
+ if (IS_ERR(maction->flow_action_raw.modify_hdr)) {
+ ret = PTR_ERR(maction->flow_action_raw.modify_hdr);
kfree(maction);
return ERR_PTR(ret);
}
@@ -479,11 +480,13 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx(
if (ret)
return ret;
- ret = mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
- in, namespace,
- &maction->flow_action_raw.action_id);
- if (ret)
+ maction->flow_action_raw.pkt_reformat =
+ mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
+ in, namespace);
+ if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
+ ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
return ret;
+ }
maction->flow_action_raw.sub_type =
MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 0569bcab02d4..4e9f1507ffd9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2280,6 +2280,7 @@ static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
return -EOPNOTSUPP;
break;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
if (!capable(CAP_SYS_RAWIO) ||
!capable(CAP_NET_RAW))
return -EPERM;
@@ -2344,20 +2345,20 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
struct uverbs_attr_bundle *attrs,
int type)
{
- struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+ struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
u64 act_size;
int err;
/* Allocation size must a multiple of the basic block size
* and a power of 2.
*/
- act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
act_size = roundup_pow_of_two(act_size);
dm->size = act_size;
- err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
- to_mucontext(ctx)->devx_uid, &dm->dev_addr,
- &dm->icm_dm.obj_id);
+ err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
+ to_mucontext(ctx)->devx_uid, &dm->dev_addr,
+ &dm->icm_dm.obj_id);
if (err)
return err;
@@ -2365,9 +2366,9 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
&dm->dev_addr, sizeof(dm->dev_addr));
if (err)
- mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
- to_mucontext(ctx)->devx_uid,
- dm->dev_addr, dm->icm_dm.obj_id);
+ mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
+ to_mucontext(ctx)->devx_uid, dm->dev_addr,
+ dm->icm_dm.obj_id);
return err;
}
@@ -2407,8 +2408,14 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
attrs);
break;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ err = handle_alloc_dm_sw_icm(context, dm,
+ attr, attrs,
+ MLX5_SW_ICM_TYPE_STEERING);
+ break;
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
+ err = handle_alloc_dm_sw_icm(context, dm,
+ attr, attrs,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY);
break;
default:
err = -EOPNOTSUPP;
@@ -2428,6 +2435,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
struct mlx5_ib_dm *dm = to_mdm(ibdm);
u32 page_idx;
@@ -2439,19 +2447,23 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
if (ret)
return ret;
- page_idx = (dm->dev_addr -
- pci_resource_start(dm_db->dev->pdev, 0) -
- MLX5_CAP64_DEV_MEM(dm_db->dev,
- memic_bar_start_addr)) >>
- PAGE_SHIFT;
+ page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
+ MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
+ PAGE_SHIFT;
bitmap_clear(ctx->dm_pages, page_idx,
DIV_ROUND_UP(dm->size, PAGE_SIZE));
break;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
+ dm->size, ctx->devx_uid, dm->dev_addr,
+ dm->icm_dm.obj_id);
+ if (ret)
+ return ret;
+ break;
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
- ctx->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
+ ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+ dm->size, ctx->devx_uid, dm->dev_addr,
+ dm->icm_dm.obj_id);
if (ret)
return ret;
break;
@@ -2646,7 +2658,8 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return -EINVAL;
action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- action->modify_id = maction->flow_action_raw.action_id;
+ action->modify_hdr =
+ maction->flow_action_raw.modify_hdr;
return 0;
}
if (maction->flow_action_raw.sub_type ==
@@ -2663,8 +2676,8 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
return -EINVAL;
action->action |=
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- action->reformat_id =
- maction->flow_action_raw.action_id;
+ action->pkt_reformat =
+ maction->flow_action_raw.pkt_reformat;
return 0;
}
/* fall through */
@@ -6096,8 +6109,6 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
- struct mlx5_core_dev *mdev = dev->mdev;
-
mlx5_ib_cleanup_multiport_master(dev);
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
srcu_barrier(&dev->mr_srcu);
@@ -6105,29 +6116,11 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
}
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
-
- WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
- !bitmap_empty(
- dev->dm.steering_sw_icm_alloc_blocks,
- BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
- MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
-
- kfree(dev->dm.steering_sw_icm_alloc_blocks);
-
- WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
- !bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
- BIT(MLX5_CAP_DEV_MEM(
- mdev, log_header_modify_sw_icm_size) -
- MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
-
- kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
}
static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- u64 header_modify_icm_blocks = 0;
- u64 steering_icm_blocks = 0;
int err;
int i;
@@ -6174,51 +6167,17 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
- if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
- if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
- steering_icm_blocks =
- BIT(MLX5_CAP_DEV_MEM(mdev,
- log_steering_sw_icm_size) -
- MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
-
- dev->dm.steering_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(steering_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
- if (!dev->dm.steering_sw_icm_alloc_blocks)
- goto err_mp;
- }
-
- if (MLX5_CAP64_DEV_MEM(mdev,
- header_modify_sw_icm_start_address)) {
- header_modify_icm_blocks = BIT(
- MLX5_CAP_DEV_MEM(
- mdev, log_header_modify_sw_icm_size) -
- MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
-
- dev->dm.header_modify_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
- if (!dev->dm.header_modify_sw_icm_alloc_blocks)
- goto err_dm;
- }
- }
-
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
err = init_srcu_struct(&dev->mr_srcu);
if (err)
- goto err_dm;
+ goto err_mp;
}
return 0;
-err_dm:
- kfree(dev->dm.steering_sw_icm_alloc_blocks);
- kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
-
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 9ae587b74b12..125a507c10ed 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -868,7 +868,10 @@ struct mlx5_ib_flow_action {
struct {
struct mlx5_ib_dev *dev;
u32 sub_type;
- u32 action_id;
+ union {
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ };
} flow_action_raw;
};
};
@@ -881,8 +884,6 @@ struct mlx5_dm {
*/
spinlock_t lock;
DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
- unsigned long *steering_sw_icm_alloc_blocks;
- unsigned long *header_modify_sw_icm_alloc_blocks;
};
struct mlx5_read_counters_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 37fef8cd25e3..0d8dd885b7d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -154,3 +154,10 @@ config MLX5_EN_TLS
Build support for TLS cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
+
+config MLX5_SW_STEERING
+ bool "Mellanox Technologies software-managed steering"
+ depends on MLX5_CORE_EN && MLX5_ESWITCH
+ default y
+ help
+ Build support for software-managed steering in the NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index f4de9ccb5df1..5708fcc079ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -15,7 +15,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
- lib/devcom.o lib/pci_vsc.o diag/fs_tracepoint.o \
+ lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o
#
@@ -67,3 +67,10 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
en_accel/ktls.o en_accel/ktls_tx.o
+
+mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
+ steering/dr_matcher.o steering/dr_rule.o \
+ steering/dr_icm_pool.o steering/dr_crc32.o \
+ steering/dr_ste.o steering/dr_send.o \
+ steering/dr_cmd.o steering/dr_fw.o \
+ steering/dr_action.o steering/fs_dr.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index a400f4430c28..7bf7b6fbc776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -4,6 +4,7 @@
#include <devlink.h>
#include "mlx5_core.h"
+#include "fs_core.h"
#include "eswitch.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
@@ -107,12 +108,121 @@ void mlx5_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
+static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ char *value = val.vstr;
+ int err = 0;
+
+ if (!strcmp(value, "dmfs")) {
+ return 0;
+ } else if (!strcmp(value, "smfs")) {
+ u8 eswitch_mode;
+ bool smfs_cap;
+
+ eswitch_mode = mlx5_eswitch_mode(dev->priv.eswitch);
+ smfs_cap = mlx5_fs_dr_is_supported(dev);
+
+ if (!smfs_cap) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Software managed steering is not supported by current device");
+ }
+
+ else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Software managed steering is not supported when eswitch offlaods enabled.");
+ err = -EOPNOTSUPP;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mlx5_devlink_fs_mode_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ enum mlx5_flow_steering_mode mode;
+
+ if (!strcmp(ctx->val.vstr, "smfs"))
+ mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ mode = MLX5_FLOW_STEERING_MODE_DMFS;
+ dev->priv.steering->mode = mode;
+
+ return 0;
+}
+
+static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ strcpy(ctx->val.vstr, "smfs");
+ else
+ strcpy(ctx->val.vstr, "dmfs");
+ return 0;
+}
+
+enum mlx5_devlink_param_id {
+ MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+};
+
+static const struct devlink_param mlx5_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
+ mlx5_devlink_fs_mode_validate),
+};
+
+static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+
+ if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS)
+ strcpy(value.vstr, "dmfs");
+ else
+ strcpy(value.vstr, "smfs");
+ devlink_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ value);
+}
+
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
{
- return devlink_register(devlink, dev);
+ int err;
+
+ err = devlink_register(devlink, dev);
+ if (err)
+ return err;
+
+ err = devlink_params_register(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
+ if (err)
+ goto params_reg_err;
+ mlx5_devlink_set_params_init_values(devlink);
+ devlink_params_publish(devlink);
+ return 0;
+
+params_reg_err:
+ devlink_unregister(devlink);
+ return err;
}
void mlx5_devlink_unregister(struct devlink *devlink)
{
+ devlink_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 4c4620db3d31..f8ee18b4da6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -291,14 +291,14 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
*/
goto out;
}
-
- err = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv4_encap_size, encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err)
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv4_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ err = PTR_ERR(e->pkt_reformat);
goto destroy_neigh_entry;
+ }
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
@@ -407,13 +407,14 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv6_encap_size, encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err)
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv6_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ err = PTR_ERR(e->pkt_reformat);
goto destroy_neigh_entry;
+ }
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index a0ae5069d8c3..8e512216deb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -161,7 +161,7 @@ struct mlx5e_encap_entry {
*/
struct hlist_node encap_hlist;
struct list_head flows;
- u32 encap_id;
+ struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 5581a8045ede..30d26eba75a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -61,7 +61,7 @@
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
- u32 mod_hdr_id;
+ struct mlx5_modify_hdr *modify_hdr;
u32 hairpin_tirn;
u8 match_level;
struct mlx5_flow_table *hairpin_ft;
@@ -201,7 +201,7 @@ struct mlx5e_mod_hdr_entry {
struct mod_hdr_key key;
- u32 mod_hdr_id;
+ struct mlx5_modify_hdr *modify_hdr;
refcount_t refcnt;
struct completion res_ready;
@@ -334,7 +334,7 @@ static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
WARN_ON(!list_empty(&mh->flows));
if (mh->compl_result > 0)
- mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
+ mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
kfree(mh);
}
@@ -395,11 +395,11 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
mutex_unlock(&tbl->lock);
- err = mlx5_modify_header_alloc(priv->mdev, namespace,
- mh->key.num_actions,
- mh->key.actions,
- &mh->mod_hdr_id);
- if (err) {
+ mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
+ mh->key.num_actions,
+ mh->key.actions);
+ if (IS_ERR(mh->modify_hdr)) {
+ err = PTR_ERR(mh->modify_hdr);
mh->compl_result = err;
goto alloc_header_err;
}
@@ -412,9 +412,9 @@ attach_flow:
list_add(&flow->mod_hdr, &mh->flows);
spin_unlock(&mh->flows_lock);
if (mlx5e_is_eswitch_flow(flow))
- flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
+ flow->esw_attr->modify_hdr = mh->modify_hdr;
else
- flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
+ flow->nic_attr->modify_hdr = mh->modify_hdr;
return 0;
@@ -906,7 +906,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
- .reformat_id = 0,
.flags = FLOW_ACT_NO_APPEND,
};
struct mlx5_fc *counter = NULL;
@@ -947,7 +946,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- flow_act.modify_id = attr->mod_hdr_id;
+ flow_act.modify_hdr = attr->modify_hdr;
kfree(parse_attr->mod_hdr_actions);
if (err)
return err;
@@ -1304,14 +1303,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int err;
- err = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- e->encap_size, e->encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err) {
- mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
- err);
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ e->encap_size, e->encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
+ PTR_ERR(e->pkt_reformat));
return;
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
@@ -1326,7 +1324,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
esw_attr = flow->esw_attr;
spec = &esw_attr->parse_attr->spec;
- esw_attr->dests[flow->tmp_efi_index].encap_id = e->encap_id;
+ esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Flow can be associated with multiple encap entries.
* Before offloading the flow verify that all of them have
@@ -1395,7 +1393,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
/* we know that the encap is valid */
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
- mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
}
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
@@ -1561,7 +1559,7 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
- mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
}
kfree(e->encap_header);
@@ -1896,7 +1894,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
} else if (*match_level != MLX5_MATCH_NONE) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+ /* cvlan_tag enabled in match criteria and
+ * disabled in match value means both S & C tags
+ * don't exist (untagged of both)
+ */
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
*match_level = MLX5_MATCH_L2;
}
@@ -3045,7 +3046,7 @@ attach_flow:
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- attr->dests[out_index].encap_id = e->encap_id;
+ attr->dests[out_index].pkt_reformat = e->pkt_reformat;
attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
*encap_valid = true;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index aba9e7a6ad3c..6bd6f5895244 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -69,7 +69,7 @@ struct vport_ingress {
struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp;
- int modify_metadata_id;
+ struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
@@ -153,6 +153,7 @@ struct mlx5_eswitch_fdb {
} legacy;
struct offloads_fdb {
+ struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *peer_miss_grp;
@@ -385,11 +386,11 @@ struct mlx5_esw_flow_attr {
struct {
u32 flags;
struct mlx5_eswitch_rep *rep;
+ struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_core_dev *mdev;
- u32 encap_id;
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
- u32 mod_hdr_id;
+ struct mlx5_modify_hdr *modify_hdr;
u8 inner_match_level;
u8 outer_match_level;
struct mlx5_fc *counter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 7d3582ee66b7..afa623b15a38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -190,10 +190,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act.reformat_id = attr->dests[j].encap_id;
+ flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.reformat_id =
- attr->dests[j].encap_id;
+ dest[i].vport.pkt_reformat =
+ attr->dests[j].pkt_reformat;
}
i++;
}
@@ -213,7 +213,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- flow_act.modify_id = attr->mod_hdr_id;
+ flow_act.modify_hdr = attr->modify_hdr;
fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
if (IS_ERR(fdb)) {
@@ -276,7 +276,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.reformat_id = attr->dests[i].encap_id;
+ dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -1068,6 +1068,13 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
err = -EOPNOTSUPP;
goto ns_err;
}
+ esw->fdb_table.offloads.ns = root_ns;
+ err = mlx5_flow_namespace_set_mode(root_ns,
+ esw->dev->priv.steering->mode);
+ if (err) {
+ esw_warn(dev, "Failed to set FDB namespace steering mode\n");
+ goto ns_err;
+ }
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
MLX5_CAP_GEN(dev, max_flow_counter_15_0);
@@ -1207,6 +1214,8 @@ send_vport_err:
esw_destroy_offloads_fast_fdb_tables(esw);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
+ /* Holds true only as long as DMFS is the default */
+ mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
ns_err:
kvfree(flow_group_in);
return err;
@@ -1226,6 +1235,9 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
esw_destroy_offloads_fast_fdb_tables(esw);
+ /* Holds true only as long as DMFS is the default */
+ mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
+ MLX5_FLOW_STEERING_MODE_DMFS);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
@@ -1623,13 +1635,42 @@ static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
esw_del_fdb_peer_miss_rules(esw);
}
+static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch *peer_esw,
+ bool pair)
+{
+ struct mlx5_flow_root_namespace *peer_ns;
+ struct mlx5_flow_root_namespace *ns;
+ int err;
+
+ peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
+ ns = esw->dev->priv.steering->fdb_root_ns;
+
+ if (pair) {
+ err = mlx5_flow_namespace_set_peer(ns, peer_ns);
+ if (err)
+ return err;
+
+ mlx5_flow_namespace_set_peer(peer_ns, ns);
+ if (err) {
+ mlx5_flow_namespace_set_peer(ns, NULL);
+ return err;
+ }
+ } else {
+ mlx5_flow_namespace_set_peer(ns, NULL);
+ mlx5_flow_namespace_set_peer(peer_ns, NULL);
+ }
+
+ return 0;
+}
+
static int mlx5_esw_offloads_devcom_event(int event,
void *my_data,
void *event_data)
{
struct mlx5_eswitch *esw = my_data;
- struct mlx5_eswitch *peer_esw = event_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+ struct mlx5_eswitch *peer_esw = event_data;
int err;
switch (event) {
@@ -1638,9 +1679,12 @@ static int mlx5_esw_offloads_devcom_event(int event,
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
- err = mlx5_esw_offloads_pair(esw, peer_esw);
+ err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err)
goto err_out;
+ err = mlx5_esw_offloads_pair(esw, peer_esw);
+ if (err)
+ goto err_peer;
err = mlx5_esw_offloads_pair(peer_esw, esw);
if (err)
@@ -1656,6 +1700,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
mlx5_esw_offloads_unpair(peer_esw);
mlx5_esw_offloads_unpair(esw);
+ mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
break;
}
@@ -1663,7 +1708,8 @@ static int mlx5_esw_offloads_devcom_event(int event,
err_pair:
mlx5_esw_offloads_unpair(esw);
-
+err_peer:
+ mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
err_out:
mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
event, err);
@@ -1734,7 +1780,7 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
if (vport->ingress.modify_metadata_rule) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- flow_act.modify_id = vport->ingress.modify_metadata_id;
+ flow_act.modify_hdr = vport->ingress.modify_metadata;
}
vport->ingress.allow_rule =
@@ -1770,9 +1816,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
MLX5_SET(set_action_in, action, data,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
- err = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- 1, action, &vport->ingress.modify_metadata_id);
- if (err) {
+ vport->ingress.modify_metadata =
+ mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ 1, action);
+ if (IS_ERR(vport->ingress.modify_metadata)) {
+ err = PTR_ERR(vport->ingress.modify_metadata);
esw_warn(esw->dev,
"failed to alloc modify header for vport %d ingress acl (%d)\n",
vport->vport, err);
@@ -1780,7 +1828,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.modify_id = vport->ingress.modify_metadata_id;
+ flow_act.modify_hdr = vport->ingress.modify_metadata;
vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
&spec, &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.modify_metadata_rule)) {
@@ -1794,7 +1842,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
out:
if (err)
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
return err;
}
@@ -1803,7 +1851,7 @@ void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
{
if (vport->ingress.modify_metadata_rule) {
mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
vport->ingress.modify_metadata_rule = NULL;
}
@@ -2113,9 +2161,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ mlx5_rdma_enable_roce(esw->dev);
err = esw_offloads_steering_init(esw);
if (err)
- return err;
+ goto err_steering_init;
err = esw_set_passing_vport_metadata(esw, true);
if (err)
@@ -2130,8 +2179,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
esw_offloads_devcom_init(esw);
mutex_init(&esw->offloads.termtbl_mutex);
- mlx5_rdma_enable_roce(esw->dev);
-
return 0;
err_reps:
@@ -2139,6 +2186,8 @@ err_reps:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
+err_steering_init:
+ mlx5_rdma_disable_roce(esw->dev);
return err;
}
@@ -2163,12 +2212,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
- mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
+ mlx5_rdma_disable_roce(esw->dev);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 1e3381604b3d..579c306caa7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -107,6 +107,50 @@ static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
return 0;
}
+static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ return 0;
+}
+
+static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+}
+
+static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ return 0;
+}
+
+static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+}
+
+static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ return 0;
+}
+
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
@@ -412,11 +456,13 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
} else {
MLX5_SET(flow_context, in_flow_context, action,
fte->action.action);
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
- fte->action.reformat_id);
+ if (fte->action.pkt_reformat)
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.pkt_reformat->id);
}
- MLX5_SET(flow_context, in_flow_context, modify_header_id,
- fte->action.modify_id);
+ if (fte->action.modify_hdr)
+ MLX5_SET(flow_context, in_flow_context, modify_header_id,
+ fte->action.modify_hdr->id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
@@ -468,7 +514,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
MLX5_SET(extended_dest_format, in_dests,
packet_reformat_id,
- dst->dest_attr.vport.reformat_id);
+ dst->dest_attr.vport.pkt_reformat->id);
}
break;
default:
@@ -643,14 +689,15 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
-int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
- enum mlx5_flow_namespace_type namespace,
- u32 *packet_reformat_id)
+static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
{
u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
+ struct mlx5_core_dev *dev = ns->dev;
void *packet_reformat_context_in;
int max_encap_size;
void *reformat;
@@ -693,35 +740,36 @@ int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
- out, packet_reformat_id);
+ pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
+ out, packet_reformat_id);
kfree(in);
return err;
}
-EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
-void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
- u32 packet_reformat_id)
+static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
{
u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
+ struct mlx5_core_dev *dev = ns->dev;
memset(in, 0, sizeof(in));
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
- packet_reformat_id);
+ pkt_reformat->id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
-int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
- u8 namespace, u8 num_actions,
- void *modify_actions, u32 *modify_header_id)
+static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
{
u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
int max_actions, actions_size, inlen, err;
+ struct mlx5_core_dev *dev = ns->dev;
void *actions_in;
u8 table_type;
u32 *in;
@@ -772,26 +820,26 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
+ modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
kfree(in);
return err;
}
-EXPORT_SYMBOL(mlx5_modify_header_alloc);
-void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
+static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
{
u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
+ struct mlx5_core_dev *dev = ns->dev;
memset(in, 0, sizeof(in));
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
- modify_header_id);
+ modify_hdr->id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-EXPORT_SYMBOL(mlx5_modify_header_dealloc);
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
@@ -803,6 +851,13 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.update_fte = mlx5_cmd_update_fte,
.delete_fte = mlx5_cmd_delete_fte,
.update_root_ft = mlx5_cmd_update_root_ft,
+ .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
+ .set_peer = mlx5_cmd_stub_set_peer,
+ .create_ns = mlx5_cmd_stub_create_ns,
+ .destroy_ns = mlx5_cmd_stub_destroy_ns,
};
static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
@@ -815,9 +870,16 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.update_fte = mlx5_cmd_stub_update_fte,
.delete_fte = mlx5_cmd_stub_delete_fte,
.update_root_ft = mlx5_cmd_stub_update_root_ft,
+ .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
+ .set_peer = mlx5_cmd_stub_set_peer,
+ .create_ns = mlx5_cmd_stub_create_ns,
+ .destroy_ns = mlx5_cmd_stub_destroy_ns,
};
-static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
{
return &mlx5_flow_cmds;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index bc4606306009..d62de642eca9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -75,6 +75,30 @@ struct mlx5_flow_cmds {
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect);
+
+ int (*packet_reformat_alloc)(struct mlx5_flow_root_namespace *ns,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
+ void (*packet_reformat_dealloc)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
+ int (*modify_header_alloc)(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr);
+
+ void (*modify_header_dealloc)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr);
+
+ int (*set_peer)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns);
+
+ int (*create_ns)(struct mlx5_flow_root_namespace *ns);
+ int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
@@ -90,5 +114,6 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 7bdec442f0ac..3bbb49354829 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1415,7 +1415,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
- (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
+ (d1->vport.pkt_reformat->id ==
+ d2->vport.pkt_reformat->id) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -2888,3 +2889,160 @@ out:
return err;
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
+
+static struct mlx5_flow_root_namespace
+*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_flow_namespace *ns;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
+ ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ else
+ ns = mlx5_get_flow_namespace(dev, ns_type);
+ if (!ns)
+ return NULL;
+
+ return find_root(&ns->node);
+}
+
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 ns_type, u8 num_actions,
+ void *modify_actions)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_modify_hdr *modify_hdr;
+ int err;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
+ if (!modify_hdr)
+ return ERR_PTR(-ENOMEM);
+
+ modify_hdr->ns_type = ns_type;
+ err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
+ modify_actions, modify_hdr);
+ if (err) {
+ kfree(modify_hdr);
+ return ERR_PTR(err);
+ }
+
+ return modify_hdr;
+}
+EXPORT_SYMBOL(mlx5_modify_header_alloc);
+
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, modify_hdr->ns_type);
+ if (WARN_ON(!root))
+ return;
+ root->cmds->modify_header_dealloc(root, modify_hdr);
+ kfree(modify_hdr);
+}
+EXPORT_SYMBOL(mlx5_modify_header_dealloc);
+
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_flow_root_namespace *root;
+ int err;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
+ if (!pkt_reformat)
+ return ERR_PTR(-ENOMEM);
+
+ pkt_reformat->ns_type = ns_type;
+ pkt_reformat->reformat_type = reformat_type;
+ err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
+ reformat_data, ns_type,
+ pkt_reformat);
+ if (err) {
+ kfree(pkt_reformat);
+ return ERR_PTR(err);
+ }
+
+ return pkt_reformat;
+}
+EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
+
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, pkt_reformat->ns_type);
+ if (WARN_ON(!root))
+ return;
+ root->cmds->packet_reformat_dealloc(root, pkt_reformat);
+ kfree(pkt_reformat);
+}
+EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+
+int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+{
+ if (peer_ns && ns->mode != peer_ns->mode) {
+ mlx5_core_err(ns->dev,
+ "Can't peer namespace of different steering mode\n");
+ return -EINVAL;
+ }
+
+ return ns->cmds->set_peer(ns, peer_ns);
+}
+
+/* This function should be called only at init stage of the namespace.
+ * It is not safe to call this function while steering operations
+ * are executed in the namespace.
+ */
+int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
+ enum mlx5_flow_steering_mode mode)
+{
+ struct mlx5_flow_root_namespace *root;
+ const struct mlx5_flow_cmds *cmds;
+ int err;
+
+ root = find_root(&ns->node);
+ if (&root->ns != ns)
+ /* Can't set cmds to non root namespace */
+ return -EINVAL;
+
+ if (root->table_type != FS_FT_FDB)
+ return -EOPNOTSUPP;
+
+ if (root->mode == mode)
+ return 0;
+
+ if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ cmds = mlx5_fs_cmd_get_dr_cmds();
+ else
+ cmds = mlx5_fs_cmd_get_fw_cmds();
+ if (!cmds)
+ return -EOPNOTSUPP;
+
+ err = cmds->create_ns(root);
+ if (err) {
+ mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
+ err);
+ return err;
+ }
+
+ root->cmds->destroy_ns(root);
+ root->cmds = cmds;
+ root->mode = mode;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 0d16b4b5ab83..00717eba2256 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -37,6 +37,24 @@
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <linux/llist.h>
+#include <steering/fs_dr.h>
+
+struct mlx5_modify_hdr {
+ enum mlx5_flow_namespace_type ns_type;
+ union {
+ struct mlx5_fs_dr_action action;
+ u32 id;
+ };
+};
+
+struct mlx5_pkt_reformat {
+ enum mlx5_flow_namespace_type ns_type;
+ int reformat_type; /* from mlx5_ifc */
+ union {
+ struct mlx5_fs_dr_action action;
+ u32 id;
+ };
+};
/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only,
* and those are in parallel to one another when going over them to connect
@@ -80,9 +98,15 @@ enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
+enum mlx5_flow_steering_mode {
+ MLX5_FLOW_STEERING_MODE_DMFS,
+ MLX5_FLOW_STEERING_MODE_SMFS
+};
+
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
- struct kmem_cache *fgs_cache;
+ enum mlx5_flow_steering_mode mode;
+ struct kmem_cache *fgs_cache;
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
@@ -128,6 +152,7 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
+ struct mlx5_fs_dr_table fs_dr_table;
u32 id;
u16 vport;
unsigned int max_fte;
@@ -168,6 +193,7 @@ struct mlx5_ft_underlay_qp {
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
+ struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 index;
@@ -203,6 +229,7 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
+ struct mlx5_fs_dr_matcher fs_dr_matcher;
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
@@ -214,6 +241,8 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
+ enum mlx5_flow_steering_mode mode;
+ struct mlx5_fs_dr_domain fs_dr_domain;
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
@@ -231,6 +260,14 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
+
+int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns);
+
+int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
+ enum mlx5_flow_steering_mode mode);
+
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
new file mode 100644
index 000000000000..e065c2f68f5a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2019 Mellanox Technologies
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+
+struct mlx5_dm {
+ /* protect access to icm bitmask */
+ spinlock_t lock;
+ unsigned long *steering_sw_icm_alloc_blocks;
+ unsigned long *header_modify_sw_icm_alloc_blocks;
+};
+
+struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
+{
+ u64 header_modify_icm_blocks = 0;
+ u64 steering_icm_blocks = 0;
+ struct mlx5_dm *dm;
+
+ if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
+ return 0;
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&dm->lock);
+
+ if (MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address)) {
+ steering_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->steering_sw_icm_alloc_blocks =
+ kcalloc(BITS_TO_LONGS(steering_icm_blocks),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dm->steering_sw_icm_alloc_blocks)
+ goto err_steering;
+ }
+
+ if (MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address)) {
+ header_modify_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_modify_sw_icm_alloc_blocks =
+ kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dm->header_modify_sw_icm_alloc_blocks)
+ goto err_modify_hdr;
+ }
+
+ return dm;
+
+err_modify_hdr:
+ kfree(dm->steering_sw_icm_alloc_blocks);
+
+err_steering:
+ kfree(dm);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_dm *dm = dev->dm;
+
+ if (!dev->dm)
+ return;
+
+ if (dm->steering_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ kfree(dm->steering_sw_icm_alloc_blocks);
+ }
+
+ if (dm->header_modify_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_modify_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ kfree(dm->header_modify_sw_icm_alloc_blocks);
+ }
+
+ kfree(dm);
+}
+
+int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id)
+{
+ u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
+ struct mlx5_dm *dm = dev->dm;
+ unsigned long *block_map;
+ u64 icm_start_addr;
+ u32 log_icm_size;
+ u32 max_blocks;
+ u64 block_idx;
+ void *sw_icm;
+ int ret;
+
+ if (!dev->dm)
+ return -EOPNOTSUPP;
+
+ if (!length || (length & (length - 1)) ||
+ length & (MLX5_SW_ICM_BLOCK_SIZE(dev) - 1))
+ return -EINVAL;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
+ MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
+
+ switch (type) {
+ case MLX5_SW_ICM_TYPE_STEERING:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
+ block_map = dm->steering_sw_icm_alloc_blocks;
+ break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_sw_icm_size);
+ block_map = dm->header_modify_sw_icm_alloc_blocks;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!block_map)
+ return -EOPNOTSUPP;
+
+ max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+ spin_lock(&dm->lock);
+ block_idx = bitmap_find_next_zero_area(block_map,
+ max_blocks,
+ 0,
+ num_blocks, 0);
+
+ if (block_idx < max_blocks)
+ bitmap_set(block_map,
+ block_idx, num_blocks);
+
+ spin_unlock(&dm->lock);
+
+ if (block_idx >= max_blocks)
+ return -ENOMEM;
+
+ sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
+ icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
+ icm_start_addr);
+ MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ spin_lock(&dm->lock);
+ bitmap_clear(block_map,
+ block_idx, num_blocks);
+ spin_unlock(&dm->lock);
+
+ return ret;
+ }
+
+ *addr = icm_start_addr;
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_alloc);
+
+int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t addr, u32 obj_id)
+{
+ u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ struct mlx5_dm *dm = dev->dm;
+ unsigned long *block_map;
+ u64 icm_start_addr;
+ u64 start_idx;
+ int err;
+
+ if (!dev->dm)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case MLX5_SW_ICM_TYPE_STEERING:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address);
+ block_map = dm->steering_sw_icm_alloc_blocks;
+ break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
+ block_map = dm->header_modify_sw_icm_alloc_blocks;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ start_idx = (addr - icm_start_addr) >> MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ spin_lock(&dm->lock);
+ bitmap_clear(block_map,
+ start_idx, num_blocks);
+ spin_unlock(&dm->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_dealloc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index dee1a8658c87..9648c2297803 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -876,6 +876,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eswitch_cleanup;
}
+ dev->dm = mlx5_dm_create(dev);
+ if (IS_ERR(dev->dm))
+ mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
@@ -910,6 +914,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
+ mlx5_dm_cleanup(dev);
mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_sriov_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 87b75b2207c4..b100489dc85c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -198,6 +198,9 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
+void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 18af6981e0be..0fc7de4aa572 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -14,9 +14,6 @@ static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
{
struct mlx5_core_roce *roce = &dev->priv.roce;
- if (!roce->ft)
- return;
-
mlx5_del_flow_rules(roce->allow_rule);
mlx5_destroy_flow_group(roce->fg);
mlx5_destroy_flow_table(roce->ft);
@@ -145,6 +142,11 @@ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
{
+ struct mlx5_core_roce *roce = &dev->priv.roce;
+
+ if (!roce->ft)
+ return;
+
mlx5_rdma_disable_roce_steering(dev);
mlx5_rdma_del_roce_addr(dev);
mlx5_nic_vport_disable_roce(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
new file mode 100644
index 000000000000..a02f87f85c17
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -0,0 +1,1588 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+enum dr_action_domain {
+ DR_ACTION_DOMAIN_NIC_INGRESS,
+ DR_ACTION_DOMAIN_NIC_EGRESS,
+ DR_ACTION_DOMAIN_FDB_INGRESS,
+ DR_ACTION_DOMAIN_FDB_EGRESS,
+ DR_ACTION_DOMAIN_MAX,
+};
+
+enum dr_action_valid_state {
+ DR_ACTION_STATE_ERR,
+ DR_ACTION_STATE_NO_ACTION,
+ DR_ACTION_STATE_REFORMAT,
+ DR_ACTION_STATE_MODIFY_HDR,
+ DR_ACTION_STATE_MODIFY_VLAN,
+ DR_ACTION_STATE_NON_TERM,
+ DR_ACTION_STATE_TERM,
+ DR_ACTION_STATE_MAX,
+};
+
+static const enum dr_action_valid_state
+next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
+ [DR_ACTION_DOMAIN_NIC_INGRESS] = {
+ [DR_ACTION_STATE_NO_ACTION] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_MODIFY_HDR] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ },
+ [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ },
+ [DR_ACTION_STATE_NON_TERM] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_TERM] = {
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
+ },
+ },
+ [DR_ACTION_DOMAIN_NIC_EGRESS] = {
+ [DR_ACTION_STATE_NO_ACTION] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ },
+ [DR_ACTION_STATE_MODIFY_HDR] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ },
+ [DR_ACTION_STATE_NON_TERM] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ },
+ [DR_ACTION_STATE_TERM] = {
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
+ },
+ },
+ [DR_ACTION_DOMAIN_FDB_INGRESS] = {
+ [DR_ACTION_STATE_NO_ACTION] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_MODIFY_HDR] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ },
+ [DR_ACTION_STATE_NON_TERM] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_TERM] = {
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
+ },
+ },
+ [DR_ACTION_DOMAIN_FDB_EGRESS] = {
+ [DR_ACTION_STATE_NO_ACTION] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_MODIFY_HDR] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_NON_TERM] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_TERM] = {
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
+ },
+ },
+};
+
+struct dr_action_modify_field_conv {
+ u16 hw_field;
+ u8 start;
+ u8 end;
+ u8 l3_type;
+ u8 l4_type;
+};
+
+static const struct dr_action_modify_field_conv dr_action_conv_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15,
+ },
+};
+
+#define MAX_VLANS 2
+struct dr_action_vlan_info {
+ int count;
+ u32 headers[MAX_VLANS];
+};
+
+struct dr_action_apply_attr {
+ u32 modify_index;
+ u16 modify_actions;
+ u32 decap_index;
+ u16 decap_actions;
+ u8 decap_with_vlan:1;
+ u64 final_icm_addr;
+ u32 flow_tag;
+ u32 ctr_id;
+ u16 gvmi;
+ u16 hit_gvmi;
+ u32 reformat_id;
+ u32 reformat_size;
+ struct dr_action_vlan_info vlans;
+};
+
+static int
+dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
+ enum mlx5dr_action_type *action_type)
+{
+ switch (reformat_type) {
+ case DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2:
+ *action_type = DR_ACTION_TYP_TNL_L2_TO_L2;
+ break;
+ case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2:
+ *action_type = DR_ACTION_TYP_L2_TO_TNL_L2;
+ break;
+ case DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2:
+ *action_type = DR_ACTION_TYP_TNL_L3_TO_L2;
+ break;
+ case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3:
+ *action_type = DR_ACTION_TYP_L2_TO_TNL_L3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dr_actions_init_next_ste(u8 **last_ste,
+ u32 *added_stes,
+ enum mlx5dr_ste_entry_type entry_type,
+ u16 gvmi)
+{
+ (*added_stes)++;
+ *last_ste += DR_STE_SIZE;
+ mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi);
+}
+
+static void dr_actions_apply_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct dr_action_apply_attr *attr,
+ u32 *added_stes)
+{
+ bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
+
+ /* We want to make sure the modify header comes before L2
+ * encapsulation. The reason for that is that we support
+ * modify headers for outer headers only
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ mlx5dr_ste_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ mlx5dr_ste_set_tx_push_vlan(last_ste,
+ attr->vlans.headers[i],
+ encap);
+ }
+ }
+
+ if (encap) {
+ /* Modify header and encapsulation require a different STEs.
+ * Since modify header STE format doesn't support encapsulation
+ * tunneling_action.
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
+ action_type_set[DR_ACTION_TYP_PUSH_VLAN])
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ mlx5dr_ste_set_tx_encap(last_ste,
+ attr->reformat_id,
+ attr->reformat_size,
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
+ /* Whenever prio_tag_required enabled, we can be sure that the
+ * previous table (ACL) already push vlan to our packet,
+ * And due to HW limitation we need to set this bit, otherwise
+ * push vlan + reformat will not work.
+ */
+ if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
+ mlx5dr_ste_set_go_back_bit(last_ste);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
+}
+
+static void dr_actions_apply_rx(u8 *action_type_set,
+ u8 *last_ste,
+ struct dr_action_apply_attr *attr,
+ u32 *added_stes)
+{
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+ mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
+ mlx5dr_ste_set_rewrite_actions(last_ste,
+ attr->decap_actions,
+ attr->decap_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
+ mlx5dr_ste_set_rx_decap(last_ste);
+
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i ||
+ action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
+ action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ mlx5dr_ste_set_rx_pop_vlan(last_ste);
+ }
+ }
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_MODIFY_PKT,
+ attr->gvmi);
+ else
+ mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+
+ mlx5dr_ste_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TAG]) {
+ if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_actions_init_next_ste(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag);
+ }
+}
+
+/* Apply the actions on the rule STE array starting from the last_ste.
+ * Actions might require more than one STE, new_num_stes will return
+ * the new size of the STEs array, rule with actions.
+ */
+static void dr_actions_apply(struct mlx5dr_domain *dmn,
+ enum mlx5dr_ste_entry_type ste_type,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct dr_action_apply_attr *attr,
+ u32 *new_num_stes)
+{
+ u32 added_stes = 0;
+
+ if (ste_type == MLX5DR_STE_TYPE_RX)
+ dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes);
+ else
+ dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes);
+
+ last_ste += added_stes * DR_STE_SIZE;
+ *new_num_stes += added_stes;
+
+ mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static enum dr_action_domain
+dr_action_get_action_domain(enum mlx5dr_domain_type domain,
+ enum mlx5dr_ste_entry_type ste_type)
+{
+ switch (domain) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ return DR_ACTION_DOMAIN_NIC_INGRESS;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ return DR_ACTION_DOMAIN_NIC_EGRESS;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ if (ste_type == MLX5DR_STE_TYPE_RX)
+ return DR_ACTION_DOMAIN_FDB_INGRESS;
+ return DR_ACTION_DOMAIN_FDB_EGRESS;
+ default:
+ WARN_ON(true);
+ return DR_ACTION_DOMAIN_MAX;
+ }
+}
+
+static
+int dr_action_validate_and_get_next_state(enum dr_action_domain action_domain,
+ u32 action_type,
+ u32 *state)
+{
+ u32 cur_state = *state;
+
+ /* Check action state machine is valid */
+ *state = next_action_state[action_domain][cur_state][action_type];
+
+ if (*state == DR_ACTION_STATE_ERR)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *dest_action,
+ u64 *final_icm_addr)
+{
+ int ret;
+
+ switch (dest_action->action_type) {
+ case DR_ACTION_TYP_FT:
+ /* Allow destination flow table only if table is a terminating
+ * table, since there is an *assumption* that in such case FW
+ * will recalculate the CS.
+ */
+ if (dest_action->dest_tbl.is_fw_tbl) {
+ *final_icm_addr = dest_action->dest_tbl.fw_tbl.rx_icm_addr;
+ } else {
+ mlx5dr_dbg(dmn,
+ "Destination FT should be terminating when modify TTL is used\n");
+ return -EINVAL;
+ }
+ break;
+
+ case DR_ACTION_TYP_VPORT:
+ /* If destination is vport we will get the FW flow table
+ * that recalculates the CS and forwards to the vport.
+ */
+ ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn,
+ dest_action->vport.num,
+ final_icm_addr);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define WITH_VLAN_NUM_HW_ACTIONS 6
+
+int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_action *actions[],
+ u32 num_actions,
+ u8 *ste_arr,
+ u32 *new_hw_ste_arr_sz)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ u8 action_type_set[DR_ACTION_TYP_MAX] = {};
+ struct mlx5dr_action *dest_action = NULL;
+ u32 state = DR_ACTION_STATE_NO_ACTION;
+ struct dr_action_apply_attr attr = {};
+ enum dr_action_domain action_domain;
+ bool recalc_cs_required = false;
+ u8 *last_ste;
+ int i, ret;
+
+ attr.gvmi = dmn->info.caps.gvmi;
+ attr.hit_gvmi = dmn->info.caps.gvmi;
+ attr.final_icm_addr = nic_dmn->default_icm_addr;
+ action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type);
+
+ for (i = 0; i < num_actions; i++) {
+ struct mlx5dr_action *action;
+ int max_actions_type = 1;
+ u32 action_type;
+
+ action = actions[i];
+ action_type = action->action_type;
+
+ switch (action_type) {
+ case DR_ACTION_TYP_DROP:
+ attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ break;
+ case DR_ACTION_TYP_FT:
+ dest_action = action;
+ if (!action->dest_tbl.is_fw_tbl) {
+ if (action->dest_tbl.tbl->dmn != dmn) {
+ mlx5dr_dbg(dmn,
+ "Destination table belongs to a different domain\n");
+ goto out_invalid_arg;
+ }
+ if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
+ mlx5dr_dbg(dmn,
+ "Destination table level should be higher than source table\n");
+ goto out_invalid_arg;
+ }
+ attr.final_icm_addr = rx_rule ?
+ action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
+ action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr;
+ } else {
+ struct mlx5dr_cmd_query_flow_table_details output;
+ int ret;
+
+ /* get the relevant addresses */
+ if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
+ ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev,
+ action->dest_tbl.fw_tbl.ft->type,
+ action->dest_tbl.fw_tbl.ft->id,
+ &output);
+ if (!ret) {
+ action->dest_tbl.fw_tbl.tx_icm_addr =
+ output.sw_owner_icm_root_1;
+ action->dest_tbl.fw_tbl.rx_icm_addr =
+ output.sw_owner_icm_root_0;
+ } else {
+ mlx5dr_dbg(dmn,
+ "Failed mlx5_cmd_query_flow_table ret: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ attr.final_icm_addr = rx_rule ?
+ action->dest_tbl.fw_tbl.rx_icm_addr :
+ action->dest_tbl.fw_tbl.tx_icm_addr;
+ }
+ break;
+ case DR_ACTION_TYP_QP:
+ mlx5dr_info(dmn, "Domain doesn't support QP\n");
+ goto out_invalid_arg;
+ case DR_ACTION_TYP_CTR:
+ attr.ctr_id = action->ctr.ctr_id +
+ action->ctr.offeset;
+ break;
+ case DR_ACTION_TYP_TAG:
+ attr.flow_tag = action->flow_tag;
+ break;
+ case DR_ACTION_TYP_TNL_L2_TO_L2:
+ break;
+ case DR_ACTION_TYP_TNL_L3_TO_L2:
+ attr.decap_index = action->rewrite.index;
+ attr.decap_actions = action->rewrite.num_of_actions;
+ attr.decap_with_vlan =
+ attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
+ break;
+ case DR_ACTION_TYP_MODIFY_HDR:
+ attr.modify_index = action->rewrite.index;
+ attr.modify_actions = action->rewrite.num_of_actions;
+ recalc_cs_required = action->rewrite.modify_ttl;
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+ attr.reformat_size = action->reformat.reformat_size;
+ attr.reformat_id = action->reformat.reformat_id;
+ break;
+ case DR_ACTION_TYP_VPORT:
+ attr.hit_gvmi = action->vport.caps->vhca_gvmi;
+ dest_action = action;
+ if (rx_rule) {
+ /* Loopback on WIRE vport is not supported */
+ if (action->vport.num == WIRE_PORT)
+ goto out_invalid_arg;
+
+ attr.final_icm_addr = action->vport.caps->icm_address_rx;
+ } else {
+ attr.final_icm_addr = action->vport.caps->icm_address_tx;
+ }
+ break;
+ case DR_ACTION_TYP_POP_VLAN:
+ max_actions_type = MAX_VLANS;
+ attr.vlans.count++;
+ break;
+ case DR_ACTION_TYP_PUSH_VLAN:
+ max_actions_type = MAX_VLANS;
+ if (attr.vlans.count == MAX_VLANS)
+ return -EINVAL;
+
+ attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
+ break;
+ default:
+ goto out_invalid_arg;
+ }
+
+ /* Check action duplication */
+ if (++action_type_set[action_type] > max_actions_type) {
+ mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n",
+ action_type, max_actions_type);
+ goto out_invalid_arg;
+ }
+
+ /* Check action state machine is valid */
+ if (dr_action_validate_and_get_next_state(action_domain,
+ action_type,
+ &state)) {
+ mlx5dr_dbg(dmn, "Invalid action sequence provided\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ *new_hw_ste_arr_sz = nic_matcher->num_of_builders;
+ last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
+
+ /* Due to a HW bug, modifying TTL on RX flows will cause an incorrect
+ * checksum calculation. In this case we will use a FW table to
+ * recalculate.
+ */
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
+ rx_rule && recalc_cs_required && dest_action) {
+ ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
+ if (ret) {
+ mlx5dr_dbg(dmn,
+ "Failed to handle checksum recalculation err %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ dr_actions_apply(dmn,
+ nic_dmn->ste_type,
+ action_type_set,
+ last_ste,
+ &attr,
+ new_hw_ste_arr_sz);
+
+ return 0;
+
+out_invalid_arg:
+ return -EINVAL;
+}
+
+#define CVLAN_ETHERTYPE 0x8100
+#define SVLAN_ETHERTYPE 0x88a8
+#define HDR_LEN_L2_ONLY 14
+#define HDR_LEN_L2_VLAN 18
+#define REWRITE_HW_ACTION_NUM 6
+
+static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *action,
+ void *data, size_t data_sz)
+{
+ struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
+ u64 ops[REWRITE_HW_ACTION_NUM] = {};
+ u32 hdr_fld_4b;
+ u16 hdr_fld_2b;
+ u16 vlan_type;
+ bool vlan;
+ int i = 0;
+ int ret;
+
+ vlan = (data_sz != HDR_LEN_L2_ONLY);
+
+ /* dmac_47_16 */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 16);
+ hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_4b);
+ i++;
+
+ /* smac_47_16 */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 16);
+ hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
+ MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_4b);
+ i++;
+
+ /* dmac_15_0 */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_2b);
+ i++;
+
+ /* ethertype + (optional) vlan */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 32);
+ if (!vlan) {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16);
+ } else {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
+ hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
+ MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b);
+ MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18);
+ }
+ i++;
+
+ /* smac_15_0 */
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_2b);
+ i++;
+
+ if (vlan) {
+ MLX5_SET(dr_action_hw_set, ops + i,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, ops + i,
+ destination_left_shifter, 0);
+ i++;
+ }
+
+ action->rewrite.data = (void *)ops;
+ action->rewrite.num_of_actions = i;
+ action->rewrite.chunk->byte_size = i * sizeof(*ops);
+
+ ret = mlx5dr_send_postsend_action(dmn, action);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mlx5dr_action *
+dr_action_create_generic(enum mlx5dr_action_type action_type)
+{
+ struct mlx5dr_action *action;
+
+ action = kzalloc(sizeof(*action), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ action->action_type = action_type;
+ refcount_set(&action->refcount, 1);
+
+ return action;
+}
+
+struct mlx5dr_action *mlx5dr_action_create_drop(void)
+{
+ return dr_action_create_generic(DR_ACTION_TYP_DROP);
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
+{
+ struct mlx5dr_action *action;
+
+ refcount_inc(&tbl->refcount);
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ goto dec_ref;
+
+ action->dest_tbl.tbl = tbl;
+
+ return action;
+
+dec_ref:
+ refcount_dec(&tbl->refcount);
+ return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5dr_action *action;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ return NULL;
+
+ action->dest_tbl.is_fw_tbl = 1;
+ action->dest_tbl.fw_tbl.ft = ft;
+ action->dest_tbl.fw_tbl.mdev = mdev;
+
+ return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_flow_counter(u32 counter_id)
+{
+ struct mlx5dr_action *action;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_CTR);
+ if (!action)
+ return NULL;
+
+ action->ctr.ctr_id = counter_id;
+
+ return action;
+}
+
+struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
+{
+ struct mlx5dr_action *action;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_TAG);
+ if (!action)
+ return NULL;
+
+ action->flow_tag = tag_value & 0xffffff;
+
+ return action;
+}
+
+static int
+dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
+ struct mlx5dr_domain *dmn,
+ size_t data_sz,
+ void *data)
+{
+ if ((!data && data_sz) || (data && !data_sz) || reformat_type >
+ DR_ACTION_TYP_L2_TO_TNL_L3) {
+ mlx5dr_dbg(dmn, "Invalid reformat parameter!\n");
+ goto out_err;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
+ return 0;
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ if (reformat_type != DR_ACTION_TYP_TNL_L2_TO_L2 &&
+ reformat_type != DR_ACTION_TYP_TNL_L3_TO_L2) {
+ mlx5dr_dbg(dmn, "Action reformat type not support on RX domain\n");
+ goto out_err;
+ }
+ } else if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ if (reformat_type != DR_ACTION_TYP_L2_TO_TNL_L2 &&
+ reformat_type != DR_ACTION_TYP_L2_TO_TNL_L3) {
+ mlx5dr_dbg(dmn, "Action reformat type not support on TX domain\n");
+ goto out_err;
+ }
+ }
+
+ return 0;
+
+out_err:
+ return -EINVAL;
+}
+
+#define ACTION_CACHE_LINE_SIZE 64
+
+static int
+dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+ size_t data_sz, void *data,
+ struct mlx5dr_action *action)
+{
+ u32 reformat_id;
+ int ret;
+
+ switch (action->action_type) {
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+ {
+ enum mlx5dr_action_type rt;
+
+ if (action->action_type == DR_ACTION_TYP_L2_TO_TNL_L2)
+ rt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ else
+ rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+
+ ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, data_sz, data,
+ &reformat_id);
+ if (ret)
+ return ret;
+
+ action->reformat.reformat_id = reformat_id;
+ action->reformat.reformat_size = data_sz;
+ return 0;
+ }
+ case DR_ACTION_TYP_TNL_L2_TO_L2:
+ {
+ return 0;
+ }
+ case DR_ACTION_TYP_TNL_L3_TO_L2:
+ {
+ /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
+ if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN)
+ return -EINVAL;
+
+ action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
+ DR_CHUNK_SIZE_8);
+ if (!action->rewrite.chunk)
+ return -ENOMEM;
+
+ action->rewrite.index = (action->rewrite.chunk->icm_addr -
+ dmn->info.caps.hdr_modify_icm_addr) /
+ ACTION_CACHE_LINE_SIZE;
+
+ ret = dr_actions_l2_rewrite(dmn, action, data, data_sz);
+ if (ret) {
+ mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ return ret;
+ }
+ return 0;
+ }
+ default:
+ mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type);
+ return -EINVAL;
+ }
+}
+
+struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
+{
+ return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
+}
+
+struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
+ __be32 vlan_hdr)
+{
+ u32 vlan_hdr_h = ntohl(vlan_hdr);
+ u16 ethertype = vlan_hdr_h >> 16;
+ struct mlx5dr_action *action;
+
+ if (ethertype != SVLAN_ETHERTYPE && ethertype != CVLAN_ETHERTYPE) {
+ mlx5dr_dbg(dmn, "Invalid vlan ethertype\n");
+ return NULL;
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_PUSH_VLAN);
+ if (!action)
+ return NULL;
+
+ action->push_vlan.vlan_hdr = vlan_hdr_h;
+ return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
+ enum mlx5dr_action_reformat_type reformat_type,
+ size_t data_sz,
+ void *data)
+{
+ enum mlx5dr_action_type action_type;
+ struct mlx5dr_action *action;
+ int ret;
+
+ refcount_inc(&dmn->refcount);
+
+ /* General checks */
+ ret = dr_action_reformat_to_action_type(reformat_type, &action_type);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Invalid reformat_type provided\n");
+ goto dec_ref;
+ }
+
+ ret = dr_action_verify_reformat_params(action_type, dmn, data_sz, data);
+ if (ret)
+ goto dec_ref;
+
+ action = dr_action_create_generic(action_type);
+ if (!action)
+ goto dec_ref;
+
+ action->reformat.dmn = dmn;
+
+ ret = dr_action_create_reformat_action(dmn,
+ data_sz,
+ data,
+ action);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed creating reformat action %d\n", ret);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+dec_ref:
+ refcount_dec(&dmn->refcount);
+ return NULL;
+}
+
+static const struct dr_action_modify_field_conv *
+dr_action_modify_get_hw_info(u16 sw_field)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+
+ if (sw_field >= ARRAY_SIZE(dr_action_conv_arr))
+ goto not_found;
+
+ hw_action_info = &dr_action_conv_arr[sw_field];
+ if (!hw_action_info->end && !hw_action_info->start)
+ goto not_found;
+
+ return hw_action_info;
+
+not_found:
+ return NULL;
+}
+
+static int
+dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_hw_info)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+ u8 offset, length, max_length, action;
+ u16 sw_field;
+ u8 hw_opcode;
+ u32 data;
+
+ /* Get SW modify action data */
+ action = MLX5_GET(set_action_in, sw_action, action_type);
+ length = MLX5_GET(set_action_in, sw_action, length);
+ offset = MLX5_GET(set_action_in, sw_action, offset);
+ sw_field = MLX5_GET(set_action_in, sw_action, field);
+ data = MLX5_GET(set_action_in, sw_action, data);
+
+ /* Convert SW data to HW modify action format */
+ hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ if (!hw_action_info) {
+ mlx5dr_dbg(dmn, "Modify action invalid field given\n");
+ return -EINVAL;
+ }
+
+ max_length = hw_action_info->end - hw_action_info->start + 1;
+
+ switch (action) {
+ case MLX5_ACTION_TYPE_SET:
+ hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET;
+ /* PRM defines that length zero specific length of 32bits */
+ if (!length)
+ length = 32;
+
+ if (length + offset > max_length) {
+ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+ return -EINVAL;
+ }
+ break;
+
+ case MLX5_ACTION_TYPE_ADD:
+ hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD;
+ offset = 0;
+ length = max_length;
+ break;
+
+ default:
+ mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode);
+
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
+ hw_action_info->hw_field);
+
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
+ hw_action_info->start + offset);
+
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length,
+ length == 32 ? 0 : length);
+
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+
+ *ret_hw_info = hw_action_info;
+
+ return 0;
+}
+
+static int
+dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn,
+ const __be64 *sw_action)
+{
+ u16 sw_field;
+ u8 action;
+
+ sw_field = MLX5_GET(set_action_in, sw_action, field);
+ action = MLX5_GET(set_action_in, sw_action, action_type);
+
+ /* Check if SW field is supported in current domain (RX/TX) */
+ if (action == MLX5_ACTION_TYPE_SET) {
+ if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ }
+
+ if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ }
+ } else if (action == MLX5_ACTION_TYPE_ADD) {
+ if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field);
+ return -EINVAL;
+ }
+ } else {
+ mlx5dr_info(dmn, "Unsupported action %d modify action\n", action);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool
+dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
+{
+ u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+
+ return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
+}
+
+static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
+ u32 max_hw_actions,
+ u32 num_sw_actions,
+ __be64 sw_actions[],
+ __be64 hw_actions[],
+ u32 *num_hw_actions,
+ bool *modify_ttl)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+ u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
+ u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
+ u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ int ret, i, hw_idx = 0;
+ __be64 *sw_action;
+ __be64 hw_action;
+
+ *modify_ttl = false;
+
+ for (i = 0; i < num_sw_actions; i++) {
+ sw_action = &sw_actions[i];
+
+ ret = dr_action_modify_check_field_limitation(dmn, sw_action);
+ if (ret)
+ return ret;
+
+ if (!(*modify_ttl))
+ *modify_ttl = dr_action_modify_check_is_ttl_modify(sw_action);
+
+ /* Convert SW action to HW action */
+ ret = dr_action_modify_sw_to_hw(dmn,
+ sw_action,
+ &hw_action,
+ &hw_action_info);
+ if (ret)
+ return ret;
+
+ /* Due to a HW limitation we cannot modify 2 different L3 types */
+ if (l3_type && hw_action_info->l3_type &&
+ hw_action_info->l3_type != l3_type) {
+ mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n");
+ return -EINVAL;
+ }
+ if (hw_action_info->l3_type)
+ l3_type = hw_action_info->l3_type;
+
+ /* Due to a HW limitation we cannot modify two different L4 types */
+ if (l4_type && hw_action_info->l4_type &&
+ hw_action_info->l4_type != l4_type) {
+ mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n");
+ return -EINVAL;
+ }
+ if (hw_action_info->l4_type)
+ l4_type = hw_action_info->l4_type;
+
+ /* HW reads and executes two actions at once this means we
+ * need to create a gap if two actions access the same field
+ */
+ if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) {
+ /* Check if after gap insertion the total number of HW
+ * modify actions doesn't exceeds the limit
+ */
+ hw_idx++;
+ if ((num_sw_actions + hw_idx - i) >= max_hw_actions) {
+ mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n");
+ return -EINVAL;
+ }
+ }
+ hw_field = hw_action_info->hw_field;
+
+ hw_actions[hw_idx] = hw_action;
+ hw_idx++;
+ }
+
+ *num_hw_actions = hw_idx;
+
+ return 0;
+}
+
+static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
+ size_t actions_sz,
+ __be64 actions[],
+ struct mlx5dr_action *action)
+{
+ struct mlx5dr_icm_chunk *chunk;
+ u32 max_hw_actions;
+ u32 num_hw_actions;
+ u32 num_sw_actions;
+ __be64 *hw_actions;
+ bool modify_ttl;
+ int ret;
+
+ num_sw_actions = actions_sz / DR_MODIFY_ACTION_SIZE;
+ max_hw_actions = mlx5dr_icm_pool_chunk_size_to_entries(DR_CHUNK_SIZE_16);
+
+ if (num_sw_actions > max_hw_actions) {
+ mlx5dr_dbg(dmn, "Max number of actions %d exceeds limit %d\n",
+ num_sw_actions, max_hw_actions);
+ return -EINVAL;
+ }
+
+ chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, DR_CHUNK_SIZE_16);
+ if (!chunk)
+ return -ENOMEM;
+
+ hw_actions = kcalloc(1, max_hw_actions * DR_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (!hw_actions) {
+ ret = -ENOMEM;
+ goto free_chunk;
+ }
+
+ ret = dr_actions_convert_modify_header(dmn,
+ max_hw_actions,
+ num_sw_actions,
+ actions,
+ hw_actions,
+ &num_hw_actions,
+ &modify_ttl);
+ if (ret)
+ goto free_hw_actions;
+
+ action->rewrite.chunk = chunk;
+ action->rewrite.modify_ttl = modify_ttl;
+ action->rewrite.data = (u8 *)hw_actions;
+ action->rewrite.num_of_actions = num_hw_actions;
+ action->rewrite.index = (chunk->icm_addr -
+ dmn->info.caps.hdr_modify_icm_addr) /
+ ACTION_CACHE_LINE_SIZE;
+
+ ret = mlx5dr_send_postsend_action(dmn, action);
+ if (ret)
+ goto free_hw_actions;
+
+ return 0;
+
+free_hw_actions:
+ kfree(hw_actions);
+free_chunk:
+ mlx5dr_icm_free_chunk(chunk);
+ return ret;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn,
+ u32 flags,
+ size_t actions_sz,
+ __be64 actions[])
+{
+ struct mlx5dr_action *action;
+ int ret = 0;
+
+ refcount_inc(&dmn->refcount);
+
+ if (actions_sz % DR_MODIFY_ACTION_SIZE) {
+ mlx5dr_dbg(dmn, "Invalid modify actions size provided\n");
+ goto dec_ref;
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_MODIFY_HDR);
+ if (!action)
+ goto dec_ref;
+
+ action->rewrite.dmn = dmn;
+
+ ret = dr_action_create_modify_action(dmn,
+ actions_sz,
+ actions,
+ action);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed creating modify header action %d\n", ret);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+dec_ref:
+ refcount_dec(&dmn->refcount);
+ return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
+ u32 vport, u8 vhca_id_valid,
+ u16 vhca_id)
+{
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *vport_dmn;
+ struct mlx5dr_action *action;
+ u8 peer_vport;
+
+ peer_vport = vhca_id_valid && (vhca_id != dmn->info.caps.gvmi);
+ vport_dmn = peer_vport ? dmn->peer_dmn : dmn;
+ if (!vport_dmn) {
+ mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
+ return NULL;
+ }
+
+ if (vport_dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+ mlx5dr_dbg(dmn, "Domain doesn't support vport actions\n");
+ return NULL;
+ }
+
+ vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport);
+ if (!vport_cap) {
+ mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport);
+ return NULL;
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_VPORT);
+ if (!action)
+ return NULL;
+
+ action->vport.dmn = vport_dmn;
+ action->vport.caps = vport_cap;
+
+ return action;
+}
+
+int mlx5dr_action_destroy(struct mlx5dr_action *action)
+{
+ if (refcount_read(&action->refcount) > 1)
+ return -EBUSY;
+
+ switch (action->action_type) {
+ case DR_ACTION_TYP_FT:
+ if (!action->dest_tbl.is_fw_tbl)
+ refcount_dec(&action->dest_tbl.tbl->refcount);
+ break;
+ case DR_ACTION_TYP_TNL_L2_TO_L2:
+ refcount_dec(&action->reformat.dmn->refcount);
+ break;
+ case DR_ACTION_TYP_TNL_L3_TO_L2:
+ mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ refcount_dec(&action->reformat.dmn->refcount);
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+ mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev,
+ action->reformat.reformat_id);
+ refcount_dec(&action->reformat.dmn->refcount);
+ break;
+ case DR_ACTION_TYP_MODIFY_HDR:
+ mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ refcount_dec(&action->rewrite.dmn->refcount);
+ break;
+ default:
+ break;
+ }
+
+ kfree(action);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
new file mode 100644
index 000000000000..41662c4e2664
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
+ bool other_vport,
+ u16 vport_number,
+ u64 *icm_address_rx,
+ u64 *icm_address_tx)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
+ int err;
+
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
+ MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *icm_address_rx =
+ MLX5_GET64(query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_rx);
+ *icm_address_tx =
+ MLX5_GET64(query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_tx);
+ return 0;
+}
+
+int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
+ u16 vport_number, u16 *gvmi)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ int out_size;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
+ MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
+ HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (err) {
+ kfree(out);
+ return err;
+ }
+
+ *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
+
+ kfree(out);
+ return 0;
+}
+
+int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
+ struct mlx5dr_esw_caps *caps)
+{
+ caps->drop_icm_address_rx =
+ MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_fdb_action_drop_icm_address_rx);
+ caps->drop_icm_address_tx =
+ MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_fdb_action_drop_icm_address_tx);
+ caps->uplink_icm_address_rx =
+ MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_rx);
+ caps->uplink_icm_address_tx =
+ MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_tx);
+ caps->sw_owner =
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
+ sw_owner);
+
+ return 0;
+}
+
+int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+ struct mlx5dr_cmd_caps *caps)
+{
+ caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
+ caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
+ caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
+
+ if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
+ caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
+ caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
+ }
+
+ if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) {
+ caps->flex_parser_id_icmpv6_dw0 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
+ caps->flex_parser_id_icmpv6_dw1 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
+ }
+
+ caps->nic_rx_drop_address =
+ MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
+ caps->nic_tx_drop_address =
+ MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
+ caps->nic_tx_allow_address =
+ MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
+
+ caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
+ caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
+
+ caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
+
+ caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
+ caps->hdr_modify_icm_addr =
+ MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
+
+ caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
+
+ return 0;
+}
+
+int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
+ enum fs_flow_table_type type,
+ u32 table_id,
+ struct mlx5dr_cmd_query_flow_table_details *output)
+{
+ u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
+ int err;
+
+ MLX5_SET(query_flow_table_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE);
+
+ MLX5_SET(query_flow_table_in, in, table_type, type);
+ MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ output->status = MLX5_GET(query_flow_table_out, out, status);
+ output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
+
+ output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
+ flow_table_context.sw_owner_icm_root_1);
+ output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
+ flow_table_context.sw_owner_icm_root_0);
+
+ return 0;
+}
+
+int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
+{
+ u32 out[MLX5_ST_SZ_DW(sync_steering_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
+
+ MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ u32 modify_header_id,
+ u32 vport_id)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
+ void *in_flow_context;
+ unsigned int inlen;
+ void *in_dests;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
+ 1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, table_type, table_type);
+ MLX5_SET(set_fte_in, in, table_id, table_id);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+ MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
+ MLX5_SET(flow_context, in_flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
+
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, table_type);
+ MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u8 num_of_actions,
+ u64 *actions,
+ u32 *modify_header_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
+ void *p_actions;
+ u32 inlen;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
+ num_of_actions * sizeof(u64);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
+ MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
+ p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
+ memcpy(p_actions, actions, num_of_actions * sizeof(u64));
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto out;
+
+ *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
+ modify_header_id);
+out:
+ kvfree(in);
+ return err;
+}
+
+int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
+ u32 modify_header_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
+
+ MLX5_SET(dealloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
+ modify_header_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 *group_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET(create_flow_group_in, in, table_type, table_type);
+ MLX5_SET(create_flow_group_in, in, table_id, table_id);
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto out;
+
+ *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+ kfree(in);
+ return err;
+}
+
+int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {};
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u64 icm_addr_rx,
+ u64 icm_addr_tx,
+ u8 level,
+ bool sw_owner,
+ bool term_tbl,
+ u64 *fdb_rx_icm_addr,
+ u32 *table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
+ void *ft_mdev;
+ int err;
+
+ MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, table_type, table_type);
+
+ ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+ MLX5_SET(flow_table_context, ft_mdev, termination_table, term_tbl);
+ MLX5_SET(flow_table_context, ft_mdev, sw_owner, sw_owner);
+ MLX5_SET(flow_table_context, ft_mdev, level, level);
+
+ if (sw_owner) {
+ /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
+ * icm_addr_1 used for FDB TX
+ */
+ if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
+ MLX5_SET64(flow_table_context, ft_mdev,
+ sw_owner_icm_root_0, icm_addr_rx);
+ } else if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
+ MLX5_SET64(flow_table_context, ft_mdev,
+ sw_owner_icm_root_0, icm_addr_tx);
+ } else if (table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
+ MLX5_SET64(flow_table_context, ft_mdev,
+ sw_owner_icm_root_0, icm_addr_rx);
+ MLX5_SET64(flow_table_context, ft_mdev,
+ sw_owner_icm_root_1, icm_addr_tx);
+ }
+ }
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+ if (!sw_owner && table_type == MLX5_FLOW_TABLE_TYPE_FDB)
+ *fdb_rx_icm_addr =
+ (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
+ (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
+ (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
+
+ return 0;
+}
+
+int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ u32 table_type)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
+
+ MLX5_SET(destroy_flow_table_in, in, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
+ enum mlx5_reformat_ctx_type rt,
+ size_t reformat_size,
+ void *reformat_data,
+ u32 *reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
+ size_t inlen, cmd_data_sz, cmd_total_sz;
+ void *prctx;
+ void *pdata;
+ void *in;
+ int err;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
+ packet_reformat_context.reformat_data);
+ inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
+ memcpy(pdata, reformat_data, reformat_size);
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ return err;
+
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
+ kvfree(in);
+
+ return err;
+}
+
+void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
+ u32 reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
+
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
+ reformat_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
+ u16 index, struct mlx5dr_cmd_gid_attr *attr)
+{
+ u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
+ int err;
+
+ MLX5_SET(query_roce_address_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
+
+ MLX5_SET(query_roce_address_in, in, roce_address_index, index);
+ MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ memcpy(&attr->gid,
+ MLX5_ADDR_OF(query_roce_address_out,
+ out, roce_address.source_l3_address),
+ sizeof(attr->gid));
+ memcpy(attr->mac,
+ MLX5_ADDR_OF(query_roce_address_out, out,
+ roce_address.source_mac_47_32),
+ sizeof(attr->mac));
+
+ if (MLX5_GET(query_roce_address_out, out,
+ roce_address.roce_version) == MLX5_ROCE_VERSION_2)
+ attr->roce_ver = MLX5_ROCE_VERSION_2;
+ else
+ attr->roce_ver = MLX5_ROCE_VERSION_1;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
new file mode 100644
index 000000000000..9e2eccbb1eb8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+/* Copyright (c) 2011-2015 Stephan Brumme. All rights reserved.
+ * Slicing-by-16 contributed by Bulat Ziganshin
+ *
+ * This software is provided 'as-is', without any express or implied warranty.
+ * In no event will the author be held liable for any damages arising from the
+ * of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software.
+ * 2. If you use this software in a product, an acknowledgment in the product
+ * documentation would be appreciated but is not required.
+ * 3. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ *
+ * Taken from http://create.stephan-brumme.com/crc32/ and adapted.
+ */
+
+#include "dr_types.h"
+
+#define DR_STE_CRC_POLY 0xEDB88320L
+
+static u32 dr_ste_crc_tab32[8][256];
+
+static void dr_crc32_calc_lookup_entry(u32 (*tbl)[256], u8 i, u8 j)
+{
+ tbl[i][j] = (tbl[i - 1][j] >> 8) ^ tbl[0][tbl[i - 1][j] & 0xff];
+}
+
+void mlx5dr_crc32_init_table(void)
+{
+ u32 crc, i, j;
+
+ for (i = 0; i < 256; i++) {
+ crc = i;
+ for (j = 0; j < 8; j++) {
+ if (crc & 0x00000001L)
+ crc = (crc >> 1) ^ DR_STE_CRC_POLY;
+ else
+ crc = crc >> 1;
+ }
+ dr_ste_crc_tab32[0][i] = crc;
+ }
+
+ /* Init CRC lookup tables according to crc_slice_8 algorithm */
+ for (i = 0; i < 256; i++) {
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 1, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 2, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 3, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 4, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 5, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 6, i);
+ dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 7, i);
+ }
+}
+
+/* Compute CRC32 (Slicing-by-8 algorithm) */
+u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length)
+{
+ const u32 *curr = (const u32 *)input_data;
+ const u8 *curr_char;
+ u32 crc = 0, one, two;
+
+ if (!input_data)
+ return 0;
+
+ /* Process eight bytes at once (Slicing-by-8) */
+ while (length >= 8) {
+ one = *curr++ ^ crc;
+ two = *curr++;
+
+ crc = dr_ste_crc_tab32[0][(two >> 24) & 0xff]
+ ^ dr_ste_crc_tab32[1][(two >> 16) & 0xff]
+ ^ dr_ste_crc_tab32[2][(two >> 8) & 0xff]
+ ^ dr_ste_crc_tab32[3][two & 0xff]
+ ^ dr_ste_crc_tab32[4][(one >> 24) & 0xff]
+ ^ dr_ste_crc_tab32[5][(one >> 16) & 0xff]
+ ^ dr_ste_crc_tab32[6][(one >> 8) & 0xff]
+ ^ dr_ste_crc_tab32[7][one & 0xff];
+
+ length -= 8;
+ }
+
+ curr_char = (const u8 *)curr;
+ /* Remaining 1 to 7 bytes (standard algorithm) */
+ while (length-- != 0)
+ crc = (crc >> 8) ^ dr_ste_crc_tab32[0][(crc & 0xff)
+ ^ *curr_char++];
+
+ return ((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
+ ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
new file mode 100644
index 000000000000..3b9cf0bccf4d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/mlx5/eswitch.h>
+#include "dr_types.h"
+
+static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
+{
+ /* Per vport cached FW FT for checksum recalculation, this
+ * recalculation is needed due to a HW bug.
+ */
+ dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
+ sizeof(dmn->cache.recalc_cs_ft[0]),
+ GFP_KERNEL);
+ if (!dmn->cache.recalc_cs_ft)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
+{
+ int i;
+
+ for (i = 0; i < dmn->info.caps.num_vports; i++) {
+ if (!dmn->cache.recalc_cs_ft[i])
+ continue;
+
+ mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
+ }
+
+ kfree(dmn->cache.recalc_cs_ft);
+}
+
+int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u32 vport_num,
+ u64 *rx_icm_addr)
+{
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+
+ recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
+ if (!recalc_cs_ft) {
+ /* Table not in cache, need to allocate a new one */
+ recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
+ if (!recalc_cs_ft)
+ return -EINVAL;
+
+ dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
+ }
+
+ *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
+
+ return 0;
+}
+
+static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
+{
+ int ret;
+
+ ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
+ return ret;
+ }
+
+ dmn->uar = mlx5_get_uars_page(dmn->mdev);
+ if (!dmn->uar) {
+ mlx5dr_err(dmn, "Couldn't allocate UAR\n");
+ goto clean_pd;
+ }
+
+ dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
+ if (!dmn->ste_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get icm memory for %s\n",
+ dev_name(dmn->mdev->device));
+ goto clean_uar;
+ }
+
+ dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
+ if (!dmn->action_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get action icm memory for %s\n",
+ dev_name(dmn->mdev->device));
+ goto free_ste_icm_pool;
+ }
+
+ ret = mlx5dr_send_ring_alloc(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Couldn't create send-ring for %s\n",
+ dev_name(dmn->mdev->device));
+ goto free_action_icm_pool;
+ }
+
+ return 0;
+
+free_action_icm_pool:
+ mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+free_ste_icm_pool:
+ mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+clean_uar:
+ mlx5_put_uars_page(dmn->mdev, dmn->uar);
+clean_pd:
+ mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
+
+ return ret;
+}
+
+static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
+{
+ mlx5dr_send_ring_free(dmn, dmn->send_ring);
+ mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+ mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+ mlx5_put_uars_page(dmn->mdev, dmn->uar);
+ mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
+}
+
+static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
+ bool other_vport,
+ u16 vport_number)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+ int ret;
+
+ vport_caps = &dmn->info.caps.vports_caps[vport_number];
+
+ ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
+ other_vport,
+ vport_number,
+ &vport_caps->icm_address_rx,
+ &vport_caps->icm_address_tx);
+ if (ret)
+ return ret;
+
+ ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
+ other_vport,
+ vport_number,
+ &vport_caps->vport_gvmi);
+ if (ret)
+ return ret;
+
+ vport_caps->num = vport_number;
+ vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
+
+ return 0;
+}
+
+static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
+ struct mlx5dr_cmd_vport_cap *wire_vport;
+ int vport;
+ int ret;
+
+ /* Query vports (except wire vport) */
+ for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
+ ret = dr_domain_query_vport(dmn, !!vport, vport);
+ if (ret)
+ return ret;
+ }
+
+ /* Last vport is the wire port */
+ wire_vport = &dmn->info.caps.vports_caps[vport];
+ wire_vport->num = WIRE_PORT;
+ wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
+ wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
+ wire_vport->vport_gvmi = 0;
+ wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
+
+ return 0;
+}
+
+static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
+ struct mlx5dr_domain *dmn)
+{
+ int ret;
+
+ if (!dmn->info.caps.eswitch_manager)
+ return -EOPNOTSUPP;
+
+ ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
+ if (ret)
+ return ret;
+
+ dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
+ dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
+ dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
+
+ dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
+ sizeof(dmn->info.caps.vports_caps[0]),
+ GFP_KERNEL);
+ if (!dmn->info.caps.vports_caps)
+ return -ENOMEM;
+
+ ret = dr_domain_query_vports(dmn);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed to query vports caps\n");
+ goto free_vports_caps;
+ }
+
+ dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
+
+ return 0;
+
+free_vports_caps:
+ kfree(dmn->info.caps.vports_caps);
+ dmn->info.caps.vports_caps = NULL;
+ return ret;
+}
+
+static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
+ struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ int ret;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
+ mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
+ return -EOPNOTSUPP;
+ }
+
+ dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
+
+ ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
+ if (ret)
+ return ret;
+
+ ret = dr_domain_query_fdb_caps(mdev, dmn);
+ if (ret)
+ return ret;
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ if (!dmn->info.caps.rx_sw_owner)
+ return -ENOTSUPP;
+
+ dmn->info.supp_sw_steering = true;
+ dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
+ dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
+ dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ if (!dmn->info.caps.tx_sw_owner)
+ return -ENOTSUPP;
+
+ dmn->info.supp_sw_steering = true;
+ dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
+ dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
+ dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ if (!dmn->info.caps.eswitch_manager)
+ return -ENOTSUPP;
+
+ if (!dmn->info.caps.fdb_sw_owner)
+ return -ENOTSUPP;
+
+ dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
+ dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
+ vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
+ if (!vport_cap) {
+ mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
+ return -ENOENT;
+ }
+
+ dmn->info.supp_sw_steering = true;
+ dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
+ dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
+ dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
+ dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
+ break;
+ default:
+ mlx5dr_dbg(dmn, "Invalid domain\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
+{
+ kfree(dmn->info.caps.vports_caps);
+}
+
+struct mlx5dr_domain *
+mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
+{
+ struct mlx5dr_domain *dmn;
+ int ret;
+
+ if (type > MLX5DR_DOMAIN_TYPE_FDB)
+ return NULL;
+
+ dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
+ if (!dmn)
+ return NULL;
+
+ dmn->mdev = mdev;
+ dmn->type = type;
+ refcount_set(&dmn->refcount, 1);
+ mutex_init(&dmn->mutex);
+
+ if (dr_domain_caps_init(mdev, dmn)) {
+ mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
+ goto free_domain;
+ }
+
+ dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
+ dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
+ dmn->info.caps.log_icm_size);
+
+ if (!dmn->info.supp_sw_steering) {
+ mlx5dr_err(dmn, "SW steering not supported for %s\n",
+ dev_name(mdev->device));
+ goto uninit_caps;
+ }
+
+ /* Allocate resources */
+ ret = dr_domain_init_resources(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed init domain resources for %s\n",
+ dev_name(mdev->device));
+ goto uninit_caps;
+ }
+
+ ret = dr_domain_init_cache(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed initialize domain cache\n");
+ goto uninit_resourses;
+ }
+
+ /* Init CRC table for htbl CRC calculation */
+ mlx5dr_crc32_init_table();
+
+ return dmn;
+
+uninit_resourses:
+ dr_domain_uninit_resources(dmn);
+uninit_caps:
+ dr_domain_caps_uninit(dmn);
+free_domain:
+ kfree(dmn);
+ return NULL;
+}
+
+/* Assure synchronization of the device steering tables with updates made by SW
+ * insertion.
+ */
+int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
+ mutex_lock(&dmn->mutex);
+ ret = mlx5dr_send_ring_force_drain(dmn);
+ mutex_unlock(&dmn->mutex);
+ if (ret)
+ return ret;
+ }
+
+ if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
+ ret = mlx5dr_cmd_sync_steering(dmn->mdev);
+
+ return ret;
+}
+
+int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
+{
+ if (refcount_read(&dmn->refcount) > 1)
+ return -EBUSY;
+
+ /* make sure resources are not used by the hardware */
+ mlx5dr_cmd_sync_steering(dmn->mdev);
+ dr_domain_uninit_cache(dmn);
+ dr_domain_uninit_resources(dmn);
+ dr_domain_caps_uninit(dmn);
+ mutex_destroy(&dmn->mutex);
+ kfree(dmn);
+ return 0;
+}
+
+void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain *peer_dmn)
+{
+ mutex_lock(&dmn->mutex);
+
+ if (dmn->peer_dmn)
+ refcount_dec(&dmn->peer_dmn->refcount);
+
+ dmn->peer_dmn = peer_dmn;
+
+ if (dmn->peer_dmn)
+ refcount_inc(&dmn->peer_dmn->refcount);
+
+ mutex_unlock(&dmn->mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
new file mode 100644
index 000000000000..60ef6e6171e3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/types.h>
+#include "dr_types.h"
+
+struct mlx5dr_fw_recalc_cs_ft *
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
+{
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ u32 table_id, group_id, modify_hdr_id;
+ u64 rx_icm_addr, modify_ttl_action;
+ int ret;
+
+ recalc_cs_ft = kzalloc(sizeof(*recalc_cs_ft), GFP_KERNEL);
+ if (!recalc_cs_ft)
+ return NULL;
+
+ ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
+ 0, 0, dmn->info.caps.max_ft_level - 1,
+ false, true, &rx_icm_addr, &table_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret);
+ goto free_ttl_tbl;
+ }
+
+ ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ table_id, &group_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating TTL W/A FW flow group %d\n", ret);
+ goto destroy_flow_table;
+ }
+
+ /* Modify TTL action by adding zero to trigger CS recalculation */
+ modify_ttl_action = 0;
+ MLX5_SET(set_action_in, &modify_ttl_action, action_type, MLX5_ACTION_TYPE_ADD);
+ MLX5_SET(set_action_in, &modify_ttl_action, field, MLX5_ACTION_IN_FIELD_OUT_IP_TTL);
+
+ ret = mlx5dr_cmd_alloc_modify_header(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, 1,
+ &modify_ttl_action,
+ &modify_hdr_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify header TTL %d\n", ret);
+ goto destroy_flow_group;
+ }
+
+ ret = mlx5dr_cmd_set_fte_modify_and_vport(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ table_id, group_id, modify_hdr_id,
+ vport_num);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed setting TTL W/A flow table entry %d\n", ret);
+ goto dealloc_modify_header;
+ }
+
+ recalc_cs_ft->modify_hdr_id = modify_hdr_id;
+ recalc_cs_ft->rx_icm_addr = rx_icm_addr;
+ recalc_cs_ft->table_id = table_id;
+ recalc_cs_ft->group_id = group_id;
+
+ return recalc_cs_ft;
+
+dealloc_modify_header:
+ mlx5dr_cmd_dealloc_modify_header(dmn->mdev, modify_hdr_id);
+destroy_flow_group:
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ table_id, group_id);
+destroy_flow_table:
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev, table_id, MLX5_FLOW_TABLE_TYPE_FDB);
+free_ttl_tbl:
+ kfree(recalc_cs_ft);
+ return NULL;
+}
+
+void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft)
+{
+ mlx5dr_cmd_del_flow_table_entry(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ recalc_cs_ft->table_id);
+ mlx5dr_cmd_dealloc_modify_header(dmn->mdev, recalc_cs_ft->modify_hdr_id);
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ recalc_cs_ft->table_id,
+ recalc_cs_ft->group_id);
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev,
+ recalc_cs_ft->table_id,
+ MLX5_FLOW_TABLE_TYPE_FDB);
+
+ kfree(recalc_cs_ft);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
new file mode 100644
index 000000000000..e76f61e7555e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
+#define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024)
+
+struct mlx5dr_icm_pool;
+
+struct mlx5dr_icm_bucket {
+ struct mlx5dr_icm_pool *pool;
+
+ /* Chunks that aren't visible to HW not directly and not in cache */
+ struct list_head free_list;
+ unsigned int free_list_count;
+
+ /* Used chunks, HW may be accessing this memory */
+ struct list_head used_list;
+ unsigned int used_list_count;
+
+ /* HW may be accessing this memory but at some future,
+ * undetermined time, it might cease to do so. Before deciding to call
+ * sync_ste, this list is moved to sync_list
+ */
+ struct list_head hot_list;
+ unsigned int hot_list_count;
+
+ /* Pending sync list, entries from the hot list are moved to this list.
+ * sync_ste is executed and then sync_list is concatenated to the free list
+ */
+ struct list_head sync_list;
+ unsigned int sync_list_count;
+
+ u32 total_chunks;
+ u32 num_of_entries;
+ u32 entry_size;
+ /* protect the ICM bucket */
+ struct mutex mutex;
+};
+
+struct mlx5dr_icm_pool {
+ struct mlx5dr_icm_bucket *buckets;
+ enum mlx5dr_icm_type icm_type;
+ enum mlx5dr_icm_chunk_size max_log_chunk_sz;
+ enum mlx5dr_icm_chunk_size num_of_buckets;
+ struct list_head icm_mr_list;
+ /* protect the ICM MR list */
+ struct mutex mr_mutex;
+ struct mlx5dr_domain *dmn;
+};
+
+struct mlx5dr_icm_dm {
+ u32 obj_id;
+ enum mlx5_sw_icm_type type;
+ u64 addr;
+ size_t length;
+};
+
+struct mlx5dr_icm_mr {
+ struct mlx5dr_icm_pool *pool;
+ struct mlx5_core_mkey mkey;
+ struct mlx5dr_icm_dm dm;
+ size_t used_length;
+ size_t length;
+ u64 icm_start_addr;
+ struct list_head mr_list;
+};
+
+static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
+ u32 pd, u64 length, u64 start_addr, int mode,
+ struct mlx5_core_mkey *mkey)
+{
+ u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode_1_0, mode);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ }
+
+ MLX5_SET64(mkc, mkc, len, length);
+ MLX5_SET(mkc, mkc, pd, pd);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+
+ return mlx5_core_create_mkey(mdev, mkey, in, inlen);
+}
+
+static struct mlx5dr_icm_mr *
+dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
+ enum mlx5_sw_icm_type type,
+ size_t align_base)
+{
+ struct mlx5_core_dev *mdev = pool->dmn->mdev;
+ struct mlx5dr_icm_mr *icm_mr;
+ size_t align_diff;
+ int err;
+
+ icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
+ if (!icm_mr)
+ return NULL;
+
+ icm_mr->pool = pool;
+ INIT_LIST_HEAD(&icm_mr->mr_list);
+
+ icm_mr->dm.type = type;
+
+ /* 2^log_biggest_table * entry-size * double-for-alignment */
+ icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) * 2;
+
+ err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
+ &icm_mr->dm.addr, &icm_mr->dm.obj_id);
+ if (err) {
+ mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
+ goto free_icm_mr;
+ }
+
+ /* Register device memory */
+ err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
+ icm_mr->dm.length,
+ icm_mr->dm.addr,
+ MLX5_MKC_ACCESS_MODE_SW_ICM,
+ &icm_mr->mkey);
+ if (err) {
+ mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
+ goto free_dm;
+ }
+
+ icm_mr->icm_start_addr = icm_mr->dm.addr;
+
+ align_diff = icm_mr->icm_start_addr % align_base;
+ if (align_diff)
+ icm_mr->used_length = align_base - align_diff;
+
+ list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
+
+ return icm_mr;
+
+free_dm:
+ mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
+ icm_mr->dm.addr, icm_mr->dm.obj_id);
+free_icm_mr:
+ kvfree(icm_mr);
+ return NULL;
+}
+
+static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
+{
+ struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev;
+ struct mlx5dr_icm_dm *dm = &icm_mr->dm;
+
+ list_del(&icm_mr->mr_list);
+ mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
+ mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
+ dm->addr, dm->obj_id);
+ kvfree(icm_mr);
+}
+
+static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
+{
+ struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+
+ chunk->ste_arr = kvzalloc(bucket->num_of_entries *
+ sizeof(chunk->ste_arr[0]), GFP_KERNEL);
+ if (!chunk->ste_arr)
+ return -ENOMEM;
+
+ chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries *
+ DR_STE_SIZE_REDUCED, GFP_KERNEL);
+ if (!chunk->hw_ste_arr)
+ goto out_free_ste_arr;
+
+ chunk->miss_list = kvmalloc(bucket->num_of_entries *
+ sizeof(chunk->miss_list[0]), GFP_KERNEL);
+ if (!chunk->miss_list)
+ goto out_free_hw_ste_arr;
+
+ return 0;
+
+out_free_hw_ste_arr:
+ kvfree(chunk->hw_ste_arr);
+out_free_ste_arr:
+ kvfree(chunk->ste_arr);
+ return -ENOMEM;
+}
+
+static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
+{
+ size_t mr_free_size, mr_req_size, mr_row_size;
+ struct mlx5dr_icm_pool *pool = bucket->pool;
+ struct mlx5dr_icm_mr *icm_mr = NULL;
+ struct mlx5dr_icm_chunk *chunk;
+ enum mlx5_sw_icm_type dm_type;
+ size_t align_base;
+ int i, err = 0;
+
+ mr_req_size = bucket->num_of_entries * bucket->entry_size;
+ mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type);
+
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ dm_type = MLX5_SW_ICM_TYPE_STEERING;
+ /* Align base is the biggest chunk size / row size */
+ align_base = mr_row_size;
+ } else {
+ dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ /* Align base is 64B */
+ align_base = DR_ICM_MODIFY_HDR_ALIGN_BASE;
+ }
+
+ mutex_lock(&pool->mr_mutex);
+ if (!list_empty(&pool->icm_mr_list)) {
+ icm_mr = list_last_entry(&pool->icm_mr_list,
+ struct mlx5dr_icm_mr, mr_list);
+
+ if (icm_mr)
+ mr_free_size = icm_mr->dm.length - icm_mr->used_length;
+ }
+
+ if (!icm_mr || mr_free_size < mr_row_size) {
+ icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base);
+ if (!icm_mr) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ /* Create memory aligned chunks */
+ for (i = 0; i < mr_row_size / mr_req_size; i++) {
+ chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ chunk->bucket = bucket;
+ chunk->rkey = icm_mr->mkey.key;
+ /* mr start addr is zero based */
+ chunk->mr_addr = icm_mr->used_length;
+ chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length;
+ icm_mr->used_length += mr_req_size;
+ chunk->num_of_entries = bucket->num_of_entries;
+ chunk->byte_size = chunk->num_of_entries * bucket->entry_size;
+
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ err = dr_icm_chunk_ste_init(chunk);
+ if (err)
+ goto out_free_chunk;
+ }
+
+ INIT_LIST_HEAD(&chunk->chunk_list);
+ list_add(&chunk->chunk_list, &bucket->free_list);
+ bucket->free_list_count++;
+ bucket->total_chunks++;
+ }
+ mutex_unlock(&pool->mr_mutex);
+ return 0;
+
+out_free_chunk:
+ kvfree(chunk);
+out_err:
+ mutex_unlock(&pool->mr_mutex);
+ return err;
+}
+
+static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
+{
+ kvfree(chunk->miss_list);
+ kvfree(chunk->hw_ste_arr);
+ kvfree(chunk->ste_arr);
+}
+
+static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
+{
+ struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+
+ list_del(&chunk->chunk_list);
+ bucket->total_chunks--;
+
+ if (bucket->pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_chunk_ste_cleanup(chunk);
+
+ kvfree(chunk);
+}
+
+static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *bucket,
+ enum mlx5dr_icm_chunk_size chunk_size)
+{
+ if (pool->icm_type == DR_ICM_TYPE_STE)
+ bucket->entry_size = DR_STE_SIZE;
+ else
+ bucket->entry_size = DR_MODIFY_ACTION_SIZE;
+
+ bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
+ bucket->pool = pool;
+ mutex_init(&bucket->mutex);
+ INIT_LIST_HEAD(&bucket->free_list);
+ INIT_LIST_HEAD(&bucket->used_list);
+ INIT_LIST_HEAD(&bucket->hot_list);
+ INIT_LIST_HEAD(&bucket->sync_list);
+}
+
+static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket)
+{
+ struct mlx5dr_icm_chunk *chunk, *next;
+
+ mutex_destroy(&bucket->mutex);
+ list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
+ list_splice_tail_init(&bucket->hot_list, &bucket->free_list);
+
+ list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list)
+ dr_icm_chunk_destroy(chunk);
+
+ WARN_ON(bucket->total_chunks != 0);
+
+ /* Cleanup of unreturned chunks */
+ list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list)
+ dr_icm_chunk_destroy(chunk);
+}
+
+static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool)
+{
+ u64 hot_size = 0;
+ int chunk_order;
+
+ for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++)
+ hot_size += pool->buckets[chunk_order].hot_list_count *
+ mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type);
+
+ return hot_size;
+}
+
+static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *bucket)
+{
+ u64 bytes_for_sync;
+
+ bytes_for_sync = dr_icm_hot_mem_size(pool);
+ if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count)
+ return false;
+
+ return true;
+}
+
+static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket)
+{
+ list_splice_tail_init(&bucket->hot_list, &bucket->sync_list);
+ bucket->sync_list_count += bucket->hot_list_count;
+ bucket->hot_list_count = 0;
+}
+
+static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket)
+{
+ list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
+ bucket->free_list_count += bucket->sync_list_count;
+ bucket->sync_list_count = 0;
+}
+
+static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket)
+{
+ list_splice_tail_init(&bucket->sync_list, &bucket->hot_list);
+ bucket->hot_list_count += bucket->sync_list_count;
+ bucket->sync_list_count = 0;
+}
+
+static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *cb,
+ bool buckets[DR_CHUNK_SIZE_MAX])
+{
+ struct mlx5dr_icm_bucket *bucket;
+ int i;
+
+ for (i = 0; i < pool->num_of_buckets; i++) {
+ bucket = &pool->buckets[i];
+ if (bucket == cb) {
+ dr_icm_chill_bucket_start(bucket);
+ continue;
+ }
+
+ /* Freeing the mutex is done at the end of that process, after
+ * sync_ste was executed at dr_icm_chill_buckets_end func.
+ */
+ if (mutex_trylock(&bucket->mutex)) {
+ dr_icm_chill_bucket_start(bucket);
+ buckets[i] = true;
+ }
+ }
+}
+
+static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *cb,
+ bool buckets[DR_CHUNK_SIZE_MAX])
+{
+ struct mlx5dr_icm_bucket *bucket;
+ int i;
+
+ for (i = 0; i < pool->num_of_buckets; i++) {
+ bucket = &pool->buckets[i];
+ if (bucket == cb) {
+ dr_icm_chill_bucket_end(bucket);
+ continue;
+ }
+
+ if (!buckets[i])
+ continue;
+
+ dr_icm_chill_bucket_end(bucket);
+ mutex_unlock(&bucket->mutex);
+ }
+}
+
+static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool,
+ struct mlx5dr_icm_bucket *cb,
+ bool buckets[DR_CHUNK_SIZE_MAX])
+{
+ struct mlx5dr_icm_bucket *bucket;
+ int i;
+
+ for (i = 0; i < pool->num_of_buckets; i++) {
+ bucket = &pool->buckets[i];
+ if (bucket == cb) {
+ dr_icm_chill_bucket_abort(bucket);
+ continue;
+ }
+
+ if (!buckets[i])
+ continue;
+
+ dr_icm_chill_bucket_abort(bucket);
+ mutex_unlock(&bucket->mutex);
+ }
+}
+
+/* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
+ * also memory used for HW STE management for optimizations.
+ */
+struct mlx5dr_icm_chunk *
+mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size)
+{
+ struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */
+ bool buckets[DR_CHUNK_SIZE_MAX] = {};
+ struct mlx5dr_icm_bucket *bucket;
+ int err;
+
+ if (chunk_size > pool->max_log_chunk_sz)
+ return NULL;
+
+ bucket = &pool->buckets[chunk_size];
+
+ mutex_lock(&bucket->mutex);
+
+ /* Take chunk from pool if available, otherwise allocate new chunks */
+ if (list_empty(&bucket->free_list)) {
+ if (dr_icm_reuse_hot_entries(pool, bucket)) {
+ dr_icm_chill_buckets_start(pool, bucket, buckets);
+ err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
+ if (err) {
+ dr_icm_chill_buckets_abort(pool, bucket, buckets);
+ mlx5dr_dbg(pool->dmn, "Sync_steering failed\n");
+ chunk = NULL;
+ goto out;
+ }
+ dr_icm_chill_buckets_end(pool, bucket, buckets);
+ } else {
+ dr_icm_chunks_create(bucket);
+ }
+ }
+
+ if (!list_empty(&bucket->free_list)) {
+ chunk = list_last_entry(&bucket->free_list,
+ struct mlx5dr_icm_chunk,
+ chunk_list);
+ if (chunk) {
+ list_del_init(&chunk->chunk_list);
+ list_add_tail(&chunk->chunk_list, &bucket->used_list);
+ bucket->free_list_count--;
+ bucket->used_list_count++;
+ }
+ }
+out:
+ mutex_unlock(&bucket->mutex);
+ return chunk;
+}
+
+void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
+{
+ struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+
+ if (bucket->pool->icm_type == DR_ICM_TYPE_STE) {
+ memset(chunk->ste_arr, 0,
+ bucket->num_of_entries * sizeof(chunk->ste_arr[0]));
+ memset(chunk->hw_ste_arr, 0,
+ bucket->num_of_entries * DR_STE_SIZE_REDUCED);
+ }
+
+ mutex_lock(&bucket->mutex);
+ list_del_init(&chunk->chunk_list);
+ list_add_tail(&chunk->chunk_list, &bucket->hot_list);
+ bucket->hot_list_count++;
+ bucket->used_list_count--;
+ mutex_unlock(&bucket->mutex);
+}
+
+struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
+ enum mlx5dr_icm_type icm_type)
+{
+ enum mlx5dr_icm_chunk_size max_log_chunk_sz;
+ struct mlx5dr_icm_pool *pool;
+ int i;
+
+ if (icm_type == DR_ICM_TYPE_STE)
+ max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
+ else
+ max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
+
+ pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->buckets = kcalloc(max_log_chunk_sz + 1,
+ sizeof(pool->buckets[0]),
+ GFP_KERNEL);
+ if (!pool->buckets)
+ goto free_pool;
+
+ pool->dmn = dmn;
+ pool->icm_type = icm_type;
+ pool->max_log_chunk_sz = max_log_chunk_sz;
+ pool->num_of_buckets = max_log_chunk_sz + 1;
+ INIT_LIST_HEAD(&pool->icm_mr_list);
+
+ for (i = 0; i < pool->num_of_buckets; i++)
+ dr_icm_bucket_init(pool, &pool->buckets[i], i);
+
+ mutex_init(&pool->mr_mutex);
+
+ return pool;
+
+free_pool:
+ kvfree(pool);
+ return NULL;
+}
+
+void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
+{
+ struct mlx5dr_icm_mr *icm_mr, *next;
+ int i;
+
+ mutex_destroy(&pool->mr_mutex);
+
+ list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list)
+ dr_icm_pool_mr_destroy(icm_mr);
+
+ for (i = 0; i < pool->num_of_buckets; i++)
+ dr_icm_bucket_cleanup(&pool->buckets[i]);
+
+ kfree(pool->buckets);
+ kvfree(pool);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
new file mode 100644
index 000000000000..01008cd66f75
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+static bool dr_mask_is_smac_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->smac_47_16 || spec->smac_15_0);
+}
+
+static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->dmac_47_16 || spec->dmac_15_0);
+}
+
+static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
+ spec->src_ip_63_32 || spec->src_ip_31_0);
+}
+
+static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
+ spec->dst_ip_63_32 || spec->dst_ip_31_0);
+}
+
+static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
+ spec->ip_ecn || spec->ip_dscp);
+}
+
+static bool dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->tcp_sport || spec->tcp_dport ||
+ spec->udp_sport || spec->udp_dport);
+}
+
+static bool dr_mask_is_ipv4_set(struct mlx5dr_match_spec *spec)
+{
+ return (spec->dst_ip_31_0 || spec->src_ip_31_0);
+}
+
+static bool dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec *spec)
+{
+ return (dr_mask_is_l3_base_set(spec) ||
+ dr_mask_is_tcp_udp_base_set(spec) ||
+ dr_mask_is_ipv4_set(spec));
+}
+
+static bool dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc *misc)
+{
+ return misc->vxlan_vni;
+}
+
+static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
+{
+ return spec->ttl_hoplimit;
+}
+
+#define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
+ (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
+ (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
+ (_spec).ethertype || (_spec).ip_version || \
+ (_misc)._inner_outer##_second_vid || \
+ (_misc)._inner_outer##_second_cfi || \
+ (_misc)._inner_outer##_second_prio || \
+ (_misc)._inner_outer##_second_cvlan_tag || \
+ (_misc)._inner_outer##_second_svlan_tag)
+
+#define DR_MASK_IS_ETH_L4_SET(_spec, _misc, _inner_outer) ( \
+ dr_mask_is_l3_base_set(&(_spec)) || \
+ dr_mask_is_tcp_udp_base_set(&(_spec)) || \
+ dr_mask_is_ttl_set(&(_spec)) || \
+ (_misc)._inner_outer##_ipv6_flow_label)
+
+#define DR_MASK_IS_ETH_L4_MISC_SET(_misc3, _inner_outer) ( \
+ (_misc3)._inner_outer##_tcp_seq_num || \
+ (_misc3)._inner_outer##_tcp_ack_num)
+
+#define DR_MASK_IS_FIRST_MPLS_SET(_misc2, _inner_outer) ( \
+ (_misc2)._inner_outer##_first_mpls_label || \
+ (_misc2)._inner_outer##_first_mpls_exp || \
+ (_misc2)._inner_outer##_first_mpls_s_bos || \
+ (_misc2)._inner_outer##_first_mpls_ttl)
+
+static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
+{
+ return (misc->gre_key_h || misc->gre_key_l ||
+ misc->gre_protocol || misc->gre_c_present ||
+ misc->gre_k_present || misc->gre_s_present);
+}
+
+#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET(_misc2, gre_udp) ( \
+ (_misc2).outer_first_mpls_over_##gre_udp##_label || \
+ (_misc2).outer_first_mpls_over_##gre_udp##_exp || \
+ (_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \
+ (_misc2).outer_first_mpls_over_##gre_udp##_ttl)
+
+#define DR_MASK_IS_FLEX_PARSER_0_SET(_misc2) ( \
+ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
+ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
+
+static bool dr_mask_is_flex_parser_tnl_set(struct mlx5dr_match_misc3 *misc3)
+{
+ return (misc3->outer_vxlan_gpe_vni ||
+ misc3->outer_vxlan_gpe_next_protocol ||
+ misc3->outer_vxlan_gpe_flags);
+}
+
+static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
+{
+ return (misc3->icmpv6_type || misc3->icmpv6_code ||
+ misc3->icmpv6_header_data);
+}
+
+static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
+{
+ return misc2->metadata_reg_a;
+}
+
+static bool dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 *misc2)
+{
+ return (misc2->metadata_reg_c_0 || misc2->metadata_reg_c_1 ||
+ misc2->metadata_reg_c_2 || misc2->metadata_reg_c_3);
+}
+
+static bool dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 *misc2)
+{
+ return (misc2->metadata_reg_c_4 || misc2->metadata_reg_c_5 ||
+ misc2->metadata_reg_c_6 || misc2->metadata_reg_c_7);
+}
+
+static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
+{
+ return (misc->source_sqn || misc->source_port);
+}
+
+static bool
+dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn)
+{
+ return dmn->info.caps.flex_protocols &
+ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+}
+
+int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ bool ipv6)
+{
+ if (ipv6) {
+ nic_matcher->ste_builder = nic_matcher->ste_builder6;
+ nic_matcher->num_of_builders = nic_matcher->num_of_builders6;
+ } else {
+ nic_matcher->ste_builder = nic_matcher->ste_builder4;
+ nic_matcher->num_of_builders = nic_matcher->num_of_builders4;
+ }
+
+ if (!nic_matcher->num_of_builders) {
+ mlx5dr_dbg(matcher->tbl->dmn,
+ "Rule not supported on this matcher due to IP related fields\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ bool ipv6)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_match_param mask = {};
+ struct mlx5dr_match_misc3 *misc3;
+ struct mlx5dr_ste_build *sb;
+ u8 *num_of_builders;
+ bool inner, rx;
+ int idx = 0;
+ int ret, i;
+
+ if (ipv6) {
+ sb = nic_matcher->ste_builder6;
+ num_of_builders = &nic_matcher->num_of_builders6;
+ } else {
+ sb = nic_matcher->ste_builder4;
+ num_of_builders = &nic_matcher->num_of_builders4;
+ }
+
+ rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
+
+ /* Create a temporary mask to track and clear used mask fields */
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
+ mask.outer = matcher->mask.outer;
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC)
+ mask.misc = matcher->mask.misc;
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_INNER)
+ mask.inner = matcher->mask.inner;
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC2)
+ mask.misc2 = matcher->mask.misc2;
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
+ mask.misc3 = matcher->mask.misc3;
+
+ ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
+ &matcher->mask, NULL);
+ if (ret)
+ return ret;
+
+ /* Outer */
+ if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
+ DR_MATCHER_CRITERIA_MISC |
+ DR_MATCHER_CRITERIA_MISC2 |
+ DR_MATCHER_CRITERIA_MISC3)) {
+ inner = false;
+
+ if (dr_mask_is_wqe_metadata_set(&mask.misc2))
+ mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx);
+
+ if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
+ mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx);
+
+ if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
+ mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx);
+
+ if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
+ (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
+ ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
+ &dmn->info.caps,
+ inner, rx);
+ if (ret)
+ return ret;
+ }
+
+ if (dr_mask_is_smac_set(&mask.outer) &&
+ dr_mask_is_dmac_set(&mask.outer)) {
+ ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
+ inner, rx);
+ if (ret)
+ return ret;
+ }
+
+ if (dr_mask_is_smac_set(&mask.outer))
+ mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
+ mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+
+ if (ipv6) {
+ if (dr_mask_is_dst_addr_set(&mask.outer))
+ mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
+ inner, rx);
+
+ if (dr_mask_is_src_addr_set(&mask.outer))
+ mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
+ inner, rx);
+
+ if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
+ mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask,
+ inner, rx);
+ } else {
+ if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
+ inner, rx);
+
+ if (dr_mask_is_ttl_set(&mask.outer))
+ mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
+ inner, rx);
+ }
+
+ if (dr_mask_is_flex_parser_tnl_set(&mask.misc3) &&
+ dr_matcher_supp_flex_parser_vxlan_gpe(dmn))
+ mlx5dr_ste_build_flex_parser_tnl(&sb[idx++], &mask,
+ inner, rx);
+
+ if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
+ mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
+ mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2))
+ mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask,
+ inner, rx);
+
+ misc3 = &mask.misc3;
+ if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc3) &&
+ mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) ||
+ (dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) &&
+ mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) {
+ ret = mlx5dr_ste_build_flex_parser_1(&sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+ if (ret)
+ return ret;
+ }
+ if (dr_mask_is_gre_set(&mask.misc))
+ mlx5dr_ste_build_gre(&sb[idx++], &mask, inner, rx);
+ }
+
+ /* Inner */
+ if (matcher->match_criteria & (DR_MATCHER_CRITERIA_INNER |
+ DR_MATCHER_CRITERIA_MISC |
+ DR_MATCHER_CRITERIA_MISC2 |
+ DR_MATCHER_CRITERIA_MISC3)) {
+ inner = true;
+
+ if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
+ mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx);
+
+ if (dr_mask_is_smac_set(&mask.inner) &&
+ dr_mask_is_dmac_set(&mask.inner)) {
+ ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
+ &mask, inner, rx);
+ if (ret)
+ return ret;
+ }
+
+ if (dr_mask_is_smac_set(&mask.inner))
+ mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
+ mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+
+ if (ipv6) {
+ if (dr_mask_is_dst_addr_set(&mask.inner))
+ mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
+ inner, rx);
+
+ if (dr_mask_is_src_addr_set(&mask.inner))
+ mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
+ inner, rx);
+
+ if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
+ mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask,
+ inner, rx);
+ } else {
+ if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
+ inner, rx);
+
+ if (dr_mask_is_ttl_set(&mask.inner))
+ mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
+ inner, rx);
+ }
+
+ if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
+ mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
+ mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+
+ if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2))
+ mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, inner, rx);
+ }
+ /* Empty matcher, takes all */
+ if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
+ mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
+
+ if (idx == 0) {
+ mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n");
+ return -EINVAL;
+ }
+
+ /* Check that all mask fields were consumed */
+ for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
+ if (((u8 *)&mask)[i] != 0) {
+ mlx5dr_info(dmn, "Mask contains unsupported parameters\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ *num_of_builders = idx;
+
+ return 0;
+}
+
+static int dr_matcher_connect(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+{
+ struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_ste_htbl *prev_htbl;
+ int ret;
+
+ /* Connect end anchor hash table to next_htbl or to the default address */
+ if (next_nic_matcher) {
+ info.type = CONNECT_HIT;
+ info.hit_next_htbl = next_nic_matcher->s_htbl;
+ } else {
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_tbl->default_icm_addr;
+ }
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+ curr_nic_matcher->e_anchor,
+ &info, info.type == CONNECT_HIT);
+ if (ret)
+ return ret;
+
+ /* Connect start hash table to end anchor */
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+ curr_nic_matcher->s_htbl,
+ &info, false);
+ if (ret)
+ return ret;
+
+ /* Connect previous hash table to matcher start hash table */
+ if (prev_nic_matcher)
+ prev_htbl = prev_nic_matcher->e_anchor;
+ else
+ prev_htbl = nic_tbl->s_anchor;
+
+ info.type = CONNECT_HIT;
+ info.hit_next_htbl = curr_nic_matcher->s_htbl;
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_htbl,
+ &info, true);
+ if (ret)
+ return ret;
+
+ /* Update the pointing ste and next hash table */
+ curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
+ prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
+
+ if (next_nic_matcher) {
+ next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
+ curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ }
+
+ return 0;
+}
+
+static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_matcher *next_matcher, *prev_matcher, *tmp_matcher;
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_domain *dmn = tbl->dmn;
+ bool first = true;
+ int ret;
+
+ next_matcher = NULL;
+ if (!list_empty(&tbl->matcher_list))
+ list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) {
+ if (tmp_matcher->prio >= matcher->prio) {
+ next_matcher = tmp_matcher;
+ break;
+ }
+ first = false;
+ }
+
+ prev_matcher = NULL;
+ if (next_matcher && !first)
+ prev_matcher = list_entry(next_matcher->matcher_list.prev,
+ struct mlx5dr_matcher,
+ matcher_list);
+ else if (!first)
+ prev_matcher = list_entry(tbl->matcher_list.prev,
+ struct mlx5dr_matcher,
+ matcher_list);
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ ret = dr_matcher_connect(dmn, &matcher->rx,
+ next_matcher ? &next_matcher->rx : NULL,
+ prev_matcher ? &prev_matcher->rx : NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ ret = dr_matcher_connect(dmn, &matcher->tx,
+ next_matcher ? &next_matcher->tx : NULL,
+ prev_matcher ? &prev_matcher->tx : NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (prev_matcher)
+ list_add(&matcher->matcher_list, &prev_matcher->matcher_list);
+ else if (next_matcher)
+ list_add_tail(&matcher->matcher_list,
+ &next_matcher->matcher_list);
+ else
+ list_add(&matcher->matcher_list, &tbl->matcher_list);
+
+ return 0;
+}
+
+static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ mlx5dr_htbl_put(nic_matcher->s_htbl);
+ mlx5dr_htbl_put(nic_matcher->e_anchor);
+}
+
+static void dr_matcher_uninit_fdb(struct mlx5dr_matcher *matcher)
+{
+ dr_matcher_uninit_nic(&matcher->rx);
+ dr_matcher_uninit_nic(&matcher->tx);
+}
+
+static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ dr_matcher_uninit_nic(&matcher->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ dr_matcher_uninit_nic(&matcher->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ dr_matcher_uninit_fdb(matcher);
+ break;
+ default:
+ WARN_ON(true);
+ break;
+ }
+}
+
+static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ int ret, ret_v4, ret_v6;
+
+ ret_v4 = dr_matcher_set_ste_builders(matcher, nic_matcher, false);
+ ret_v6 = dr_matcher_set_ste_builders(matcher, nic_matcher, true);
+
+ if (ret_v4 && ret_v6) {
+ mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
+ return -EINVAL;
+ }
+
+ if (!ret_v4)
+ nic_matcher->ste_builder = nic_matcher->ste_builder4;
+ else
+ nic_matcher->ste_builder = nic_matcher->ste_builder6;
+
+ nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ DR_CHUNK_SIZE_1,
+ MLX5DR_STE_LU_TYPE_DONT_CARE,
+ 0);
+ if (!nic_matcher->e_anchor)
+ return -ENOMEM;
+
+ nic_matcher->s_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ DR_CHUNK_SIZE_1,
+ nic_matcher->ste_builder[0].lu_type,
+ nic_matcher->ste_builder[0].byte_mask);
+ if (!nic_matcher->s_htbl) {
+ ret = -ENOMEM;
+ goto free_e_htbl;
+ }
+
+ /* make sure the tables exist while empty */
+ mlx5dr_htbl_get(nic_matcher->s_htbl);
+ mlx5dr_htbl_get(nic_matcher->e_anchor);
+
+ return 0;
+
+free_e_htbl:
+ mlx5dr_ste_htbl_free(nic_matcher->e_anchor);
+ return ret;
+}
+
+static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher)
+{
+ int ret;
+
+ ret = dr_matcher_init_nic(matcher, &matcher->rx);
+ if (ret)
+ return ret;
+
+ ret = dr_matcher_init_nic(matcher, &matcher->tx);
+ if (ret)
+ goto uninit_nic_rx;
+
+ return 0;
+
+uninit_nic_rx:
+ dr_matcher_uninit_nic(&matcher->rx);
+ return ret;
+}
+
+static int dr_matcher_init(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *mask)
+{
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_domain *dmn = tbl->dmn;
+ int ret;
+
+ if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
+ mlx5dr_info(dmn, "Invalid match criteria attribute\n");
+ return -EINVAL;
+ }
+
+ if (mask) {
+ if (mask->match_sz > sizeof(struct mlx5dr_match_param)) {
+ mlx5dr_info(dmn, "Invalid match size attribute\n");
+ return -EINVAL;
+ }
+ mlx5dr_ste_copy_param(matcher->match_criteria,
+ &matcher->mask, mask);
+ }
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ matcher->rx.nic_tbl = &tbl->rx;
+ ret = dr_matcher_init_nic(matcher, &matcher->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ matcher->tx.nic_tbl = &tbl->tx;
+ ret = dr_matcher_init_nic(matcher, &matcher->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ matcher->rx.nic_tbl = &tbl->rx;
+ matcher->tx.nic_tbl = &tbl->tx;
+ ret = dr_matcher_init_fdb(matcher);
+ break;
+ default:
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+struct mlx5dr_matcher *
+mlx5dr_matcher_create(struct mlx5dr_table *tbl,
+ u16 priority,
+ u8 match_criteria_enable,
+ struct mlx5dr_match_parameters *mask)
+{
+ struct mlx5dr_matcher *matcher;
+ int ret;
+
+ refcount_inc(&tbl->refcount);
+
+ matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!matcher)
+ goto dec_ref;
+
+ matcher->tbl = tbl;
+ matcher->prio = priority;
+ matcher->match_criteria = match_criteria_enable;
+ refcount_set(&matcher->refcount, 1);
+ INIT_LIST_HEAD(&matcher->matcher_list);
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ ret = dr_matcher_init(matcher, mask);
+ if (ret)
+ goto free_matcher;
+
+ ret = dr_matcher_add_to_tbl(matcher);
+ if (ret)
+ goto matcher_uninit;
+
+ mutex_unlock(&tbl->dmn->mutex);
+
+ return matcher;
+
+matcher_uninit:
+ dr_matcher_uninit(matcher);
+free_matcher:
+ mutex_unlock(&tbl->dmn->mutex);
+ kfree(matcher);
+dec_ref:
+ refcount_dec(&tbl->refcount);
+ return NULL;
+}
+
+static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
+ struct mlx5dr_table_rx_tx *nic_tbl,
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_ste_htbl *prev_anchor;
+
+ if (prev_nic_matcher)
+ prev_anchor = prev_nic_matcher->e_anchor;
+ else
+ prev_anchor = nic_tbl->s_anchor;
+
+ /* Connect previous anchor hash table to next matcher or to the default address */
+ if (next_nic_matcher) {
+ info.type = CONNECT_HIT;
+ info.hit_next_htbl = next_nic_matcher->s_htbl;
+ next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
+ prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ } else {
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_tbl->default_icm_addr;
+ prev_anchor->ste_arr[0].next_htbl = NULL;
+ }
+
+ return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
+ &info, true);
+}
+
+static int dr_matcher_remove_from_tbl(struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_matcher *prev_matcher, *next_matcher;
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_domain *dmn = tbl->dmn;
+ int ret = 0;
+
+ if (list_is_last(&matcher->matcher_list, &tbl->matcher_list))
+ next_matcher = NULL;
+ else
+ next_matcher = list_next_entry(matcher, matcher_list);
+
+ if (matcher->matcher_list.prev == &tbl->matcher_list)
+ prev_matcher = NULL;
+ else
+ prev_matcher = list_prev_entry(matcher, matcher_list);
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ ret = dr_matcher_disconnect(dmn, &tbl->rx,
+ next_matcher ? &next_matcher->rx : NULL,
+ prev_matcher ? &prev_matcher->rx : NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+ dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ ret = dr_matcher_disconnect(dmn, &tbl->tx,
+ next_matcher ? &next_matcher->tx : NULL,
+ prev_matcher ? &prev_matcher->tx : NULL);
+ if (ret)
+ return ret;
+ }
+
+ list_del(&matcher->matcher_list);
+
+ return 0;
+}
+
+int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_table *tbl = matcher->tbl;
+
+ if (refcount_read(&matcher->refcount) > 1)
+ return -EBUSY;
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ dr_matcher_remove_from_tbl(matcher);
+ dr_matcher_uninit(matcher);
+ refcount_dec(&matcher->tbl->refcount);
+
+ mutex_unlock(&tbl->dmn->mutex);
+ kfree(matcher);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
new file mode 100644
index 000000000000..3bc3f66b8fa8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -0,0 +1,1243 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
+
+struct mlx5dr_rule_action_member {
+ struct mlx5dr_action *action;
+ struct list_head list;
+};
+
+static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
+ struct list_head *miss_list,
+ struct list_head *send_list)
+{
+ struct mlx5dr_ste_send_info *ste_info_last;
+ struct mlx5dr_ste *last_ste;
+
+ /* The new entry will be inserted after the last */
+ last_ste = list_entry(miss_list->prev, struct mlx5dr_ste, miss_list_node);
+ WARN_ON(!last_ste);
+
+ ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
+ if (!ste_info_last)
+ return -ENOMEM;
+
+ mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
+ mlx5dr_ste_get_icm_addr(new_last_ste));
+ list_add_tail(&new_last_ste->miss_list_node, miss_list);
+
+ mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
+ 0, last_ste->hw_ste,
+ ste_info_last, send_list, true);
+
+ return 0;
+}
+
+static struct mlx5dr_ste *
+dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 *hw_ste)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_htbl *new_htbl;
+ struct mlx5dr_ste *ste;
+
+ /* Create new table for miss entry */
+ new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ DR_CHUNK_SIZE_1,
+ MLX5DR_STE_LU_TYPE_DONT_CARE,
+ 0);
+ if (!new_htbl) {
+ mlx5dr_dbg(dmn, "Failed allocating collision table\n");
+ return NULL;
+ }
+
+ /* One and only entry, never grows */
+ ste = new_htbl->ste_arr;
+ mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_htbl_get(new_htbl);
+
+ return ste;
+}
+
+static struct mlx5dr_ste *
+dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 *hw_ste,
+ struct mlx5dr_ste *orig_ste)
+{
+ struct mlx5dr_ste *ste;
+
+ ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
+ if (!ste) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
+ return NULL;
+ }
+
+ ste->ste_chain_location = orig_ste->ste_chain_location;
+
+ /* In collision entry, all members share the same miss_list_head */
+ ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
+
+ /* Next table */
+ if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
+ DR_CHUNK_SIZE_1)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+ goto free_tbl;
+ }
+
+ return ste;
+
+free_tbl:
+ mlx5dr_ste_free(ste, matcher, nic_matcher);
+ return NULL;
+}
+
+static int
+dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
+ struct mlx5dr_domain *dmn)
+{
+ int ret;
+
+ list_del(&ste_info->send_list);
+ ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
+ ste_info->size, ste_info->offset);
+ if (ret)
+ goto out;
+ /* Copy data to ste, only reduced size, the last 16B (mask)
+ * is already written to the hw.
+ */
+ memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+
+out:
+ kfree(ste_info);
+ return ret;
+}
+
+static int dr_rule_send_update_list(struct list_head *send_ste_list,
+ struct mlx5dr_domain *dmn,
+ bool is_reverse)
+{
+ struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
+ int ret;
+
+ if (is_reverse) {
+ list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
+ send_ste_list, send_list) {
+ ret = dr_rule_handle_one_ste_in_update_list(ste_info,
+ dmn);
+ if (ret)
+ return ret;
+ }
+ } else {
+ list_for_each_entry_safe(ste_info, tmp_ste_info,
+ send_ste_list, send_list) {
+ ret = dr_rule_handle_one_ste_in_update_list(ste_info,
+ dmn);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct mlx5dr_ste *
+dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
+{
+ struct mlx5dr_ste *ste;
+
+ if (list_empty(miss_list))
+ return NULL;
+
+ /* Check if hw_ste is present in the list */
+ list_for_each_entry(ste, miss_list, miss_list_node) {
+ if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
+ return ste;
+ }
+
+ return NULL;
+}
+
+static struct mlx5dr_ste *
+dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct list_head *update_list,
+ struct mlx5dr_ste *col_ste,
+ u8 *hw_ste)
+{
+ struct mlx5dr_ste *new_ste;
+ int ret;
+
+ new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
+ if (!new_ste)
+ return NULL;
+
+ /* In collision entry, all members share the same miss_list_head */
+ new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
+
+ /* Update the previous from the list */
+ ret = dr_rule_append_to_miss_list(new_ste,
+ mlx5dr_ste_get_miss_list(col_ste),
+ update_list);
+ if (ret) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
+ goto err_exit;
+ }
+
+ return new_ste;
+
+err_exit:
+ mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+ return NULL;
+}
+
+static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *cur_ste,
+ struct mlx5dr_ste *new_ste)
+{
+ new_ste->next_htbl = cur_ste->next_htbl;
+ new_ste->ste_chain_location = cur_ste->ste_chain_location;
+
+ if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
+ new_ste->next_htbl->pointing_ste = new_ste;
+
+ /* We need to copy the refcount since this ste
+ * may have been traversed several times
+ */
+ refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
+
+ /* Link old STEs rule_mem list to the new ste */
+ mlx5dr_rule_update_rule_member(cur_ste, new_ste);
+ INIT_LIST_HEAD(&new_ste->rule_list);
+ list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
+}
+
+static struct mlx5dr_ste *
+dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *cur_ste,
+ struct mlx5dr_ste_htbl *new_htbl,
+ struct list_head *update_list)
+{
+ struct mlx5dr_ste_send_info *ste_info;
+ bool use_update_list = false;
+ u8 hw_ste[DR_STE_SIZE] = {};
+ struct mlx5dr_ste *new_ste;
+ int new_idx;
+ u8 sb_idx;
+
+ /* Copy STE mask from the matcher */
+ sb_idx = cur_ste->ste_chain_location - 1;
+ mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
+
+ /* Copy STE control and tag */
+ memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
+ mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+
+ new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
+ new_ste = &new_htbl->ste_arr[new_idx];
+
+ if (mlx5dr_ste_not_used_ste(new_ste)) {
+ mlx5dr_htbl_get(new_htbl);
+ list_add_tail(&new_ste->miss_list_node,
+ mlx5dr_ste_get_miss_list(new_ste));
+ } else {
+ new_ste = dr_rule_rehash_handle_collision(matcher,
+ nic_matcher,
+ update_list,
+ new_ste,
+ hw_ste);
+ if (!new_ste) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
+ new_idx);
+ return NULL;
+ }
+ new_htbl->ctrl.num_of_collisions++;
+ use_update_list = true;
+ }
+
+ memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
+
+ new_htbl->ctrl.num_of_valid_entries++;
+
+ if (use_update_list) {
+ ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ if (!ste_info)
+ goto err_exit;
+
+ mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
+ hw_ste, ste_info,
+ update_list, true);
+ }
+
+ dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
+
+ return new_ste;
+
+err_exit:
+ mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+ return NULL;
+}
+
+static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct list_head *cur_miss_list,
+ struct mlx5dr_ste_htbl *new_htbl,
+ struct list_head *update_list)
+{
+ struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
+
+ if (list_empty(cur_miss_list))
+ return 0;
+
+ list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
+ new_ste = dr_rule_rehash_copy_ste(matcher,
+ nic_matcher,
+ cur_ste,
+ new_htbl,
+ update_list);
+ if (!new_ste)
+ goto err_insert;
+
+ list_del(&cur_ste->miss_list_node);
+ mlx5dr_htbl_put(cur_ste->htbl);
+ }
+ return 0;
+
+err_insert:
+ mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
+ WARN_ON(true);
+ return -EINVAL;
+}
+
+static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ struct mlx5dr_ste_htbl *new_htbl,
+ struct list_head *update_list)
+{
+ struct mlx5dr_ste *cur_ste;
+ int cur_entries;
+ int err = 0;
+ int i;
+
+ cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
+
+ if (cur_entries < 1) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cur_entries; i++) {
+ cur_ste = &cur_htbl->ste_arr[i];
+ if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */
+ continue;
+
+ err = dr_rule_rehash_copy_miss_list(matcher,
+ nic_matcher,
+ mlx5dr_ste_get_miss_list(cur_ste),
+ new_htbl,
+ update_list);
+ if (err)
+ goto clean_copy;
+ }
+
+clean_copy:
+ return err;
+}
+
+static struct mlx5dr_ste_htbl *
+dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ u8 ste_location,
+ struct list_head *update_list,
+ enum mlx5dr_icm_chunk_size new_size)
+{
+ struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_matcher_rx_tx *nic_matcher;
+ struct mlx5dr_ste_send_info *ste_info;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_domain_rx_tx *nic_dmn;
+ u8 formatted_ste[DR_STE_SIZE] = {};
+ LIST_HEAD(rehash_table_send_list);
+ struct mlx5dr_ste *ste_to_update;
+ struct mlx5dr_ste_htbl *new_htbl;
+ int err;
+
+ nic_matcher = nic_rule->nic_matcher;
+ nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+ ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ if (!ste_info)
+ return NULL;
+
+ new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ new_size,
+ cur_htbl->lu_type,
+ cur_htbl->byte_mask);
+ if (!new_htbl) {
+ mlx5dr_err(dmn, "Failed to allocate new hash table\n");
+ goto free_ste_info;
+ }
+
+ /* Write new table to HW */
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ nic_dmn,
+ new_htbl,
+ formatted_ste,
+ &info);
+
+ new_htbl->pointing_ste = cur_htbl->pointing_ste;
+ new_htbl->pointing_ste->next_htbl = new_htbl;
+ err = dr_rule_rehash_copy_htbl(matcher,
+ nic_matcher,
+ cur_htbl,
+ new_htbl,
+ &rehash_table_send_list);
+ if (err)
+ goto free_new_htbl;
+
+ if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
+ nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
+ mlx5dr_err(dmn, "Failed writing table to HW\n");
+ goto free_new_htbl;
+ }
+
+ /* Writing to the hw is done in regular order of rehash_table_send_list,
+ * in order to have the origin data written before the miss address of
+ * collision entries, if exists.
+ */
+ if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
+ mlx5dr_err(dmn, "Failed updating table to HW\n");
+ goto free_ste_list;
+ }
+
+ /* Connect previous hash table to current */
+ if (ste_location == 1) {
+ /* The previous table is an anchor, anchors size is always one STE */
+ struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
+
+ /* On matcher s_anchor we keep an extra refcount */
+ mlx5dr_htbl_get(new_htbl);
+ mlx5dr_htbl_put(cur_htbl);
+
+ nic_matcher->s_htbl = new_htbl;
+
+ /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
+ * (48B len) which works only on first 32B
+ */
+ mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
+ new_htbl->chunk->icm_addr,
+ new_htbl->chunk->num_of_entries);
+
+ ste_to_update = &prev_htbl->ste_arr[0];
+ } else {
+ mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
+ new_htbl);
+ ste_to_update = cur_htbl->pointing_ste;
+ }
+
+ mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
+ 0, ste_to_update->hw_ste, ste_info,
+ update_list, false);
+
+ return new_htbl;
+
+free_ste_list:
+ /* Clean all ste_info's from the new table */
+ list_for_each_entry_safe(del_ste_info, tmp_ste_info,
+ &rehash_table_send_list, send_list) {
+ list_del(&del_ste_info->send_list);
+ kfree(del_ste_info);
+ }
+
+free_new_htbl:
+ mlx5dr_ste_htbl_free(new_htbl);
+free_ste_info:
+ kfree(ste_info);
+ mlx5dr_info(dmn, "Failed creating rehash table\n");
+ return NULL;
+}
+
+static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ u8 ste_location,
+ struct list_head *update_list)
+{
+ struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+ enum mlx5dr_icm_chunk_size new_size;
+
+ new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
+ new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
+
+ if (new_size == cur_htbl->chunk_size)
+ return NULL; /* Skip rehash, we already at the max size */
+
+ return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
+ update_list, new_size);
+}
+
+static struct mlx5dr_ste *
+dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *ste,
+ u8 *hw_ste,
+ struct list_head *miss_list,
+ struct list_head *send_list)
+{
+ struct mlx5dr_ste_send_info *ste_info;
+ struct mlx5dr_ste *new_ste;
+
+ ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ if (!ste_info)
+ return NULL;
+
+ new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
+ if (!new_ste)
+ goto free_send_info;
+
+ if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
+ goto err_exit;
+ }
+
+ mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
+ ste_info, send_list, false);
+
+ ste->htbl->ctrl.num_of_collisions++;
+ ste->htbl->ctrl.num_of_valid_entries++;
+
+ return new_ste;
+
+err_exit:
+ mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+free_send_info:
+ kfree(ste_info);
+ return NULL;
+}
+
+static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_rule_action_member *action_mem;
+ struct mlx5dr_rule_action_member *tmp;
+
+ list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
+ list_del(&action_mem->list);
+ refcount_dec(&action_mem->action->refcount);
+ kvfree(action_mem);
+ }
+}
+
+static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_rule_action_member *action_mem;
+ int i;
+
+ for (i = 0; i < num_actions; i++) {
+ action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
+ if (!action_mem)
+ goto free_action_members;
+
+ action_mem->action = actions[i];
+ INIT_LIST_HEAD(&action_mem->list);
+ list_add_tail(&action_mem->list, &rule->rule_actions_list);
+ refcount_inc(&action_mem->action->refcount);
+ }
+
+ return 0;
+
+free_action_members:
+ dr_rule_remove_action_members(rule);
+ return -ENOMEM;
+}
+
+/* While the pointer of ste is no longer valid, like while moving ste to be
+ * the first in the miss_list, and to be in the origin table,
+ * all rule-members that are attached to this ste should update their ste member
+ * to the new pointer
+ */
+void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
+ struct mlx5dr_ste *new_ste)
+{
+ struct mlx5dr_rule_member *rule_mem;
+
+ if (!list_empty(&ste->rule_list))
+ list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
+ rule_mem->ste = new_ste;
+}
+
+static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule)
+{
+ struct mlx5dr_rule_member *rule_mem;
+ struct mlx5dr_rule_member *tmp_mem;
+
+ if (list_empty(&nic_rule->rule_members_list))
+ return;
+ list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
+ list_del(&rule_mem->list);
+ list_del(&rule_mem->use_ste_list);
+ mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
+ kvfree(rule_mem);
+ }
+}
+
+static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
+ struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+ struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
+
+ if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
+ return false;
+
+ if (!ctrl->may_grow)
+ return false;
+
+ if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
+ (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
+ return true;
+
+ return false;
+}
+
+static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_ste *ste)
+{
+ struct mlx5dr_rule_member *rule_mem;
+
+ rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
+ if (!rule_mem)
+ return -ENOMEM;
+
+ rule_mem->ste = ste;
+ list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
+
+ list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
+
+ return 0;
+}
+
+static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste *last_ste,
+ u8 *hw_ste_arr,
+ u32 new_hw_ste_arr_sz)
+{
+ struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
+ struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
+ u8 num_of_builders = nic_matcher->num_of_builders;
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ u8 *curr_hw_ste, *prev_hw_ste;
+ struct mlx5dr_ste *action_ste;
+ int i, k, ret;
+
+ /* Two cases:
+ * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
+ * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
+ * to support the action.
+ */
+ if (num_of_builders == new_hw_ste_arr_sz)
+ return 0;
+
+ for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
+ curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
+ prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
+ action_ste = dr_rule_create_collision_htbl(matcher,
+ nic_matcher,
+ curr_hw_ste);
+ if (!action_ste)
+ return -ENOMEM;
+
+ mlx5dr_ste_get(action_ste);
+
+ /* While free ste we go over the miss list, so add this ste to the list */
+ list_add_tail(&action_ste->miss_list_node,
+ mlx5dr_ste_get_miss_list(action_ste));
+
+ ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
+ GFP_KERNEL);
+ if (!ste_info_arr[k])
+ goto err_exit;
+
+ /* Point current ste to the new action */
+ mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
+ ret = dr_rule_add_member(nic_rule, action_ste);
+ if (ret) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
+ goto free_ste_info;
+ }
+ mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
+ curr_hw_ste,
+ ste_info_arr[k],
+ send_ste_list, false);
+ }
+
+ return 0;
+
+free_ste_info:
+ kfree(ste_info_arr[k]);
+err_exit:
+ mlx5dr_ste_put(action_ste, matcher, nic_matcher);
+ return -ENOMEM;
+}
+
+static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ struct mlx5dr_ste *ste,
+ u8 ste_location,
+ u8 *hw_ste,
+ struct list_head *miss_list,
+ struct list_head *send_list)
+{
+ struct mlx5dr_ste_send_info *ste_info;
+
+ /* Take ref on table, only on first time this ste is used */
+ mlx5dr_htbl_get(cur_htbl);
+
+ /* new entry -> new branch */
+ list_add_tail(&ste->miss_list_node, miss_list);
+
+ mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+
+ ste->ste_chain_location = ste_location;
+
+ ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ if (!ste_info)
+ goto clean_ste_setting;
+
+ if (mlx5dr_ste_create_next_htbl(matcher,
+ nic_matcher,
+ ste,
+ hw_ste,
+ DR_CHUNK_SIZE_1)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+ goto clean_ste_info;
+ }
+
+ cur_htbl->ctrl.num_of_valid_entries++;
+
+ mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
+ ste_info, send_list, false);
+
+ return 0;
+
+clean_ste_info:
+ kfree(ste_info);
+clean_ste_setting:
+ list_del_init(&ste->miss_list_node);
+ mlx5dr_htbl_put(cur_htbl);
+
+ return -ENOMEM;
+}
+
+static struct mlx5dr_ste *
+dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste_htbl *cur_htbl,
+ u8 *hw_ste,
+ u8 ste_location,
+ struct mlx5dr_ste_htbl **put_htbl)
+{
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_matcher_rx_tx *nic_matcher;
+ struct mlx5dr_domain_rx_tx *nic_dmn;
+ struct mlx5dr_ste_htbl *new_htbl;
+ struct mlx5dr_ste *matched_ste;
+ struct list_head *miss_list;
+ bool skip_rehash = false;
+ struct mlx5dr_ste *ste;
+ int index;
+
+ nic_matcher = nic_rule->nic_matcher;
+ nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+again:
+ index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
+ miss_list = &cur_htbl->chunk->miss_list[index];
+ ste = &cur_htbl->ste_arr[index];
+
+ if (mlx5dr_ste_not_used_ste(ste)) {
+ if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
+ ste, ste_location,
+ hw_ste, miss_list,
+ send_ste_list))
+ return NULL;
+ } else {
+ /* Hash table index in use, check if this ste is in the miss list */
+ matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
+ if (matched_ste) {
+ /* If it is last STE in the chain, and has the same tag
+ * it means that all the previous stes are the same,
+ * if so, this rule is duplicated.
+ */
+ if (mlx5dr_ste_is_last_in_rule(nic_matcher,
+ matched_ste->ste_chain_location)) {
+ mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
+ return NULL;
+ }
+ return matched_ste;
+ }
+
+ if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
+ /* Hash table index in use, try to resize of the hash */
+ skip_rehash = true;
+
+ /* Hold the table till we update.
+ * Release in dr_rule_create_rule()
+ */
+ *put_htbl = cur_htbl;
+ mlx5dr_htbl_get(cur_htbl);
+
+ new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
+ ste_location, send_ste_list);
+ if (!new_htbl) {
+ mlx5dr_htbl_put(cur_htbl);
+ mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
+ cur_htbl->chunk_size);
+ } else {
+ cur_htbl = new_htbl;
+ }
+ goto again;
+ } else {
+ /* Hash table index in use, add another collision (miss) */
+ ste = dr_rule_handle_collision(matcher,
+ nic_matcher,
+ ste,
+ hw_ste,
+ miss_list,
+ send_ste_list);
+ if (!ste) {
+ mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
+ index);
+ return NULL;
+ }
+ }
+ }
+ return ste;
+}
+
+static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
+ u32 s_idx, u32 e_idx)
+{
+ u32 i;
+
+ for (i = s_idx; i < e_idx; i++) {
+ if (value[i] & ~mask[i]) {
+ pr_info("Rule parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ struct mlx5dr_match_param *param)
+{
+ u8 match_criteria = matcher->match_criteria;
+ size_t value_size = value->match_sz;
+ u8 *mask_p = (u8 *)&matcher->mask;
+ u8 *param_p = (u8 *)param;
+ u32 s_idx, e_idx;
+
+ if (!value_size ||
+ (value_size > sizeof(struct mlx5dr_match_param) ||
+ (value_size % sizeof(u32)))) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
+ return false;
+ }
+
+ mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
+ s_idx = offsetof(struct mlx5dr_match_param, outer);
+ e_idx = min(s_idx + sizeof(param->outer), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc);
+ e_idx = min(s_idx + sizeof(param->misc), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+
+ if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
+ s_idx = offsetof(struct mlx5dr_match_param, inner);
+ e_idx = min(s_idx + sizeof(param->inner), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc2);
+ e_idx = min(s_idx + sizeof(param->misc2), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc3);
+ e_idx = min(s_idx + sizeof(param->misc3), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule)
+{
+ dr_rule_clean_rule_members(rule, nic_rule);
+ return 0;
+}
+
+static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
+{
+ dr_rule_destroy_rule_nic(rule, &rule->rx);
+ dr_rule_destroy_rule_nic(rule, &rule->tx);
+ return 0;
+}
+
+static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ dr_rule_destroy_rule_nic(rule, &rule->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ dr_rule_destroy_rule_nic(rule, &rule->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ dr_rule_destroy_rule_fdb(rule);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dr_rule_remove_action_members(rule);
+ kfree(rule);
+ return 0;
+}
+
+static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param)
+{
+ return (param->outer.ip_version == 6 ||
+ param->inner.ip_version == 6 ||
+ param->outer.ethertype == ETH_P_IPV6 ||
+ param->inner.ethertype == ETH_P_IPV6);
+}
+
+static bool dr_rule_skip(enum mlx5dr_domain_type domain,
+ enum mlx5dr_ste_entry_type ste_type,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_match_param *value)
+{
+ if (domain != MLX5DR_DOMAIN_TYPE_FDB)
+ return false;
+
+ if (mask->misc.source_port) {
+ if (ste_type == MLX5DR_STE_TYPE_RX)
+ if (value->misc.source_port != WIRE_PORT)
+ return true;
+
+ if (ste_type == MLX5DR_STE_TYPE_TX)
+ if (value->misc.source_port == WIRE_PORT)
+ return true;
+ }
+
+ /* Metadata C can be used to describe the source vport */
+ if (mask->misc2.metadata_reg_c_0) {
+ if (ste_type == MLX5DR_STE_TYPE_RX)
+ if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT)
+ return true;
+
+ if (ste_type == MLX5DR_STE_TYPE_TX)
+ if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT)
+ return true;
+ }
+ return false;
+}
+
+static int
+dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_match_param *param,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_matcher_rx_tx *nic_matcher;
+ struct mlx5dr_domain_rx_tx *nic_dmn;
+ struct mlx5dr_ste_htbl *htbl = NULL;
+ struct mlx5dr_ste_htbl *cur_htbl;
+ struct mlx5dr_ste *ste = NULL;
+ LIST_HEAD(send_ste_list);
+ u8 *hw_ste_arr = NULL;
+ u32 new_hw_ste_arr_sz;
+ int ret, i;
+
+ nic_matcher = nic_rule->nic_matcher;
+ nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+ INIT_LIST_HEAD(&nic_rule->rule_members_list);
+
+ if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
+ return 0;
+
+ ret = mlx5dr_matcher_select_builders(matcher,
+ nic_matcher,
+ dr_rule_is_ipv6(param));
+ if (ret)
+ goto out_err;
+
+ hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
+ if (!hw_ste_arr) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Set the tag values inside the ste array */
+ ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
+ if (ret)
+ goto free_hw_ste;
+
+ /* Set the actions values/addresses inside the ste array */
+ ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
+ num_actions, hw_ste_arr,
+ &new_hw_ste_arr_sz);
+ if (ret)
+ goto free_hw_ste;
+
+ cur_htbl = nic_matcher->s_htbl;
+
+ /* Go over the array of STEs, and build dr_ste accordingly.
+ * The loop is over only the builders which are equal or less to the
+ * number of stes, in case we have actions that lives in other stes.
+ */
+ for (i = 0; i < nic_matcher->num_of_builders; i++) {
+ /* Calculate CRC and keep new ste entry */
+ u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
+
+ ste = dr_rule_handle_ste_branch(rule,
+ nic_rule,
+ &send_ste_list,
+ cur_htbl,
+ cur_hw_ste_ent,
+ i + 1,
+ &htbl);
+ if (!ste) {
+ mlx5dr_err(dmn, "Failed creating next branch\n");
+ ret = -ENOENT;
+ goto free_rule;
+ }
+
+ cur_htbl = ste->next_htbl;
+
+ /* Keep all STEs in the rule struct */
+ ret = dr_rule_add_member(nic_rule, ste);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
+ goto free_ste;
+ }
+
+ mlx5dr_ste_get(ste);
+ }
+
+ /* Connect actions */
+ ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
+ ste, hw_ste_arr, new_hw_ste_arr_sz);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed apply actions\n");
+ goto free_rule;
+ }
+ ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed sending ste!\n");
+ goto free_rule;
+ }
+
+ if (htbl)
+ mlx5dr_htbl_put(htbl);
+
+ return 0;
+
+free_ste:
+ mlx5dr_ste_put(ste, matcher, nic_matcher);
+free_rule:
+ dr_rule_clean_rule_members(rule, nic_rule);
+ /* Clean all ste_info's */
+ list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
+ list_del(&ste_info->send_list);
+ kfree(ste_info);
+ }
+free_hw_ste:
+ kfree(hw_ste_arr);
+out_err:
+ return ret;
+}
+
+static int
+dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
+ struct mlx5dr_match_param *param,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_match_param copy_param = {};
+ int ret;
+
+ /* Copy match_param since they will be consumed during the first
+ * nic_rule insertion.
+ */
+ memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
+
+ ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
+ num_actions, actions);
+ if (ret)
+ return ret;
+
+ ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
+ num_actions, actions);
+ if (ret)
+ goto destroy_rule_nic_rx;
+
+ return 0;
+
+destroy_rule_nic_rx:
+ dr_rule_destroy_rule_nic(rule, &rule->rx);
+ return ret;
+}
+
+static struct mlx5dr_rule *
+dr_rule_create_rule(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_match_param param = {};
+ struct mlx5dr_rule *rule;
+ int ret;
+
+ if (!dr_rule_verify(matcher, value, &param))
+ return NULL;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ rule->matcher = matcher;
+ INIT_LIST_HEAD(&rule->rule_actions_list);
+
+ ret = dr_rule_add_action_members(rule, num_actions, actions);
+ if (ret)
+ goto free_rule;
+
+ switch (dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ rule->rx.nic_matcher = &matcher->rx;
+ ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
+ num_actions, actions);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ rule->tx.nic_matcher = &matcher->tx;
+ ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
+ num_actions, actions);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ rule->rx.nic_matcher = &matcher->rx;
+ rule->tx.nic_matcher = &matcher->tx;
+ ret = dr_rule_create_rule_fdb(rule, &param,
+ num_actions, actions);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto remove_action_members;
+
+ return rule;
+
+remove_action_members:
+ dr_rule_remove_action_members(rule);
+free_rule:
+ kfree(rule);
+ mlx5dr_info(dmn, "Failed creating rule\n");
+ return NULL;
+}
+
+struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ size_t num_actions,
+ struct mlx5dr_action *actions[])
+{
+ struct mlx5dr_rule *rule;
+
+ mutex_lock(&matcher->tbl->dmn->mutex);
+ refcount_inc(&matcher->refcount);
+
+ rule = dr_rule_create_rule(matcher, value, num_actions, actions);
+ if (!rule)
+ refcount_dec(&matcher->refcount);
+
+ mutex_unlock(&matcher->tbl->dmn->mutex);
+
+ return rule;
+}
+
+int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_table *tbl = rule->matcher->tbl;
+ int ret;
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ ret = dr_rule_destroy_rule(rule);
+
+ mutex_unlock(&tbl->dmn->mutex);
+
+ if (!ret)
+ refcount_dec(&matcher->refcount);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
new file mode 100644
index 000000000000..ef0dea44f3b3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -0,0 +1,976 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+#define QUEUE_SIZE 128
+#define SIGNAL_PER_DIV_QUEUE 16
+#define TH_NUMS_TO_DRAIN 2
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct dr_data_seg {
+ u64 addr;
+ u32 length;
+ u32 lkey;
+ unsigned int send_flags;
+};
+
+struct postsend_info {
+ struct dr_data_seg write;
+ struct dr_data_seg read;
+ u64 remote_addr;
+ u32 rkey;
+};
+
+struct dr_qp_rtr_attr {
+ struct mlx5dr_cmd_gid_attr dgid_attr;
+ enum ib_mtu mtu;
+ u32 qp_num;
+ u16 port_num;
+ u8 min_rnr_timer;
+ u8 sgid_index;
+ u16 udp_src_port;
+};
+
+struct dr_qp_rts_attr {
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+};
+
+struct dr_qp_init_attr {
+ u32 cqn;
+ u32 pdn;
+ u32 max_send_wr;
+ struct mlx5_uars_page *uar;
+};
+
+static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
+{
+ unsigned int idx;
+ u8 opcode;
+
+ opcode = get_cqe_opcode(cqe64);
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ idx = be16_to_cpu(cqe64->wqe_counter) &
+ (dr_cq->qp->sq.wqe_cnt - 1);
+ dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
+ } else if (opcode == MLX5_CQE_RESP_ERR) {
+ ++dr_cq->qp->sq.cc;
+ } else {
+ idx = be16_to_cpu(cqe64->wqe_counter) &
+ (dr_cq->qp->sq.wqe_cnt - 1);
+ dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
+
+ return CQ_OK;
+ }
+
+ return CQ_POLL_ERR;
+}
+
+static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq)
+{
+ struct mlx5_cqe64 *cqe64;
+ int err;
+
+ cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
+ if (!cqe64)
+ return CQ_EMPTY;
+
+ mlx5_cqwq_pop(&dr_cq->wq);
+ err = dr_parse_cqe(dr_cq, cqe64);
+ mlx5_cqwq_update_db_record(&dr_cq->wq);
+
+ return err;
+}
+
+static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
+{
+ int npolled;
+ int err = 0;
+
+ for (npolled = 0; npolled < ne; ++npolled) {
+ err = dr_cq_poll_one(dr_cq);
+ if (err != CQ_OK)
+ break;
+ }
+
+ return err == CQ_POLL_ERR ? err : npolled;
+}
+
+static void dr_qp_event(struct mlx5_core_qp *mqp, int event)
+{
+ pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn);
+}
+
+static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+ struct dr_qp_init_attr *attr)
+{
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
+ struct mlx5_wq_param wqp;
+ struct mlx5dr_qp *dr_qp;
+ int inlen;
+ void *qpc;
+ void *in;
+ int err;
+
+ dr_qp = kzalloc(sizeof(*dr_qp), GFP_KERNEL);
+ if (!dr_qp)
+ return NULL;
+
+ wqp.buf_numa_node = mdev->priv.numa_node;
+ wqp.db_numa_node = mdev->priv.numa_node;
+
+ dr_qp->rq.pc = 0;
+ dr_qp->rq.cc = 0;
+ dr_qp->rq.wqe_cnt = 4;
+ dr_qp->sq.pc = 0;
+ dr_qp->sq.cc = 0;
+ dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr);
+
+ MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+ MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
+ MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
+ err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
+ &dr_qp->wq_ctrl);
+ if (err) {
+ mlx5_core_info(mdev, "Can't create QP WQ\n");
+ goto err_wq;
+ }
+
+ dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt,
+ sizeof(dr_qp->sq.wqe_head[0]),
+ GFP_KERNEL);
+
+ if (!dr_qp->sq.wqe_head) {
+ mlx5_core_warn(mdev, "Can't allocate wqe head\n");
+ goto err_wqe_head;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
+ dr_qp->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, attr->pdn);
+ MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
+ MLX5_SET(qpc, qpc, log_page_size,
+ dr_qp->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, fre, 1);
+ MLX5_SET(qpc, qpc, rlky, 1);
+ MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
+ MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
+ MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
+ if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
+ MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+ mlx5_fill_page_frag_array(&dr_qp->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in,
+ in, pas));
+
+ err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen);
+ kfree(in);
+
+ if (err) {
+ mlx5_core_warn(mdev, " Can't create QP\n");
+ goto err_in;
+ }
+ dr_qp->mqp.event = dr_qp_event;
+ dr_qp->uar = attr->uar;
+
+ return dr_qp;
+
+err_in:
+ kfree(dr_qp->sq.wqe_head);
+err_wqe_head:
+ mlx5_wq_destroy(&dr_qp->wq_ctrl);
+err_wq:
+ kfree(dr_qp);
+ return NULL;
+}
+
+static void dr_destroy_qp(struct mlx5_core_dev *mdev,
+ struct mlx5dr_qp *dr_qp)
+{
+ mlx5_core_destroy_qp(mdev, &dr_qp->mqp);
+ kfree(dr_qp->sq.wqe_head);
+ mlx5_wq_destroy(&dr_qp->wq_ctrl);
+ kfree(dr_qp);
+}
+
+static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
+{
+ dma_wmb();
+ *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff);
+
+ /* After wmb() the hw aware of new work */
+ wmb();
+
+ mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET);
+}
+
+static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
+ u32 rkey, struct dr_data_seg *data_seg,
+ u32 opcode, int nreq)
+{
+ struct mlx5_wqe_raddr_seg *wq_raddr;
+ struct mlx5_wqe_ctrl_seg *wq_ctrl;
+ struct mlx5_wqe_data_seg *wq_dseg;
+ unsigned int size;
+ unsigned int idx;
+
+ size = sizeof(*wq_ctrl) / 16 + sizeof(*wq_dseg) / 16 +
+ sizeof(*wq_raddr) / 16;
+
+ idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1);
+
+ wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
+ wq_ctrl->imm = 0;
+ wq_ctrl->fm_ce_se = (data_seg->send_flags) ?
+ MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) |
+ opcode);
+ wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->mqp.qpn << 8);
+ wq_raddr = (void *)(wq_ctrl + 1);
+ wq_raddr->raddr = cpu_to_be64(remote_addr);
+ wq_raddr->rkey = cpu_to_be32(rkey);
+ wq_raddr->reserved = 0;
+
+ wq_dseg = (void *)(wq_raddr + 1);
+ wq_dseg->byte_count = cpu_to_be32(data_seg->length);
+ wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
+ wq_dseg->addr = cpu_to_be64(data_seg->addr);
+
+ dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++;
+
+ if (nreq)
+ dr_cmd_notify_hw(dr_qp, wq_ctrl);
+}
+
+static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
+{
+ dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+ &send_info->write, MLX5_OPCODE_RDMA_WRITE, 0);
+ dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+ &send_info->read, MLX5_OPCODE_RDMA_READ, 1);
+}
+
+/**
+ * mlx5dr_send_fill_and_append_ste_send_info: Add data to be sent
+ * with send_list parameters:
+ *
+ * @ste: The data that attached to this specific ste
+ * @size: of data to write
+ * @offset: of the data from start of the hw_ste entry
+ * @data: data
+ * @ste_info: ste to be sent with send_list
+ * @send_list: to append into it
+ * @copy_data: if true indicates that the data should be kept because
+ * it's not backuped any where (like in re-hash).
+ * if false, it lets the data to be updated after
+ * it was added to the list.
+ */
+void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
+ u16 offset, u8 *data,
+ struct mlx5dr_ste_send_info *ste_info,
+ struct list_head *send_list,
+ bool copy_data)
+{
+ ste_info->size = size;
+ ste_info->ste = ste;
+ ste_info->offset = offset;
+
+ if (copy_data) {
+ memcpy(ste_info->data_cont, data, size);
+ ste_info->data = ste_info->data_cont;
+ } else {
+ ste_info->data = data;
+ }
+
+ list_add_tail(&ste_info->send_list, send_list);
+}
+
+/* The function tries to consume one wc each time, unless the queue is full, in
+ * that case, which means that the hw is behind the sw in a full queue len
+ * the function will drain the cq till it empty.
+ */
+static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
+ struct mlx5dr_send_ring *send_ring)
+{
+ bool is_drain = false;
+ int ne;
+
+ if (send_ring->pending_wqe < send_ring->signal_th)
+ return 0;
+
+ /* Queue is full start drain it */
+ if (send_ring->pending_wqe >=
+ dmn->send_ring->signal_th * TH_NUMS_TO_DRAIN)
+ is_drain = true;
+
+ do {
+ ne = dr_poll_cq(send_ring->cq, 1);
+ if (ne < 0)
+ return ne;
+ else if (ne == 1)
+ send_ring->pending_wqe -= send_ring->signal_th;
+ } while (is_drain && send_ring->pending_wqe);
+
+ return 0;
+}
+
+static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring,
+ struct postsend_info *send_info)
+{
+ send_ring->pending_wqe++;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->write.send_flags |= IB_SEND_SIGNALED;
+
+ send_ring->pending_wqe++;
+ send_info->read.length = send_info->write.length;
+ /* Read into the same write area */
+ send_info->read.addr = (uintptr_t)send_info->write.addr;
+ send_info->read.lkey = send_ring->mr->mkey.key;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->read.send_flags = IB_SEND_SIGNALED;
+ else
+ send_info->read.send_flags = 0;
+}
+
+static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
+ struct postsend_info *send_info)
+{
+ struct mlx5dr_send_ring *send_ring = dmn->send_ring;
+ u32 buff_offset;
+ int ret;
+
+ ret = dr_handle_pending_wc(dmn, send_ring);
+ if (ret)
+ return ret;
+
+ if (send_info->write.length > dmn->info.max_inline_size) {
+ buff_offset = (send_ring->tx_head &
+ (dmn->send_ring->signal_th - 1)) *
+ send_ring->max_post_send_size;
+ /* Copy to ring mr */
+ memcpy(send_ring->buf + buff_offset,
+ (void *)(uintptr_t)send_info->write.addr,
+ send_info->write.length);
+ send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
+ send_info->write.lkey = send_ring->mr->mkey.key;
+ }
+
+ send_ring->tx_head++;
+ dr_fill_data_segs(send_ring, send_info);
+ dr_post_send(send_ring->qp, send_info);
+
+ return 0;
+}
+
+static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 **data,
+ u32 *byte_size,
+ int *iterations,
+ int *num_stes)
+{
+ int alloc_size;
+
+ if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) {
+ *iterations = htbl->chunk->byte_size /
+ dmn->send_ring->max_post_send_size;
+ *byte_size = dmn->send_ring->max_post_send_size;
+ alloc_size = *byte_size;
+ *num_stes = *byte_size / DR_STE_SIZE;
+ } else {
+ *iterations = 1;
+ *num_stes = htbl->chunk->num_of_entries;
+ alloc_size = *num_stes * DR_STE_SIZE;
+ }
+
+ *data = kzalloc(alloc_size, GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * mlx5dr_send_postsend_ste: write size bytes into offset from the hw cm.
+ *
+ * @dmn: Domain
+ * @ste: The ste struct that contains the data (at
+ * least part of it)
+ * @data: The real data to send size data
+ * @size: for writing.
+ * @offset: The offset from the icm mapped data to
+ * start write to this for write only part of the
+ * buffer.
+ *
+ * Return: 0 on success.
+ */
+int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
+ u8 *data, u16 size, u16 offset)
+{
+ struct postsend_info send_info = {};
+
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = size;
+ send_info.write.lkey = 0;
+ send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
+ send_info.rkey = ste->htbl->chunk->rkey;
+
+ return dr_postsend_icm_data(dmn, &send_info);
+}
+
+int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *formatted_ste, u8 *mask)
+{
+ u32 byte_size = htbl->chunk->byte_size;
+ int num_stes_per_iter;
+ int iterations;
+ u8 *data;
+ int ret;
+ int i;
+ int j;
+
+ ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
+ &iterations, &num_stes_per_iter);
+ if (ret)
+ return ret;
+
+ /* Send the data iteration times */
+ for (i = 0; i < iterations; i++) {
+ u32 ste_index = i * (byte_size / DR_STE_SIZE);
+ struct postsend_info send_info = {};
+
+ /* Copy all ste's on the data buffer
+ * need to add the bit_mask
+ */
+ for (j = 0; j < num_stes_per_iter; j++) {
+ u8 *hw_ste = htbl->ste_arr[ste_index + j].hw_ste;
+ u32 ste_off = j * DR_STE_SIZE;
+
+ if (mlx5dr_ste_is_not_valid_entry(hw_ste)) {
+ memcpy(data + ste_off,
+ formatted_ste, DR_STE_SIZE);
+ } else {
+ /* Copy data */
+ memcpy(data + ste_off,
+ htbl->ste_arr[ste_index + j].hw_ste,
+ DR_STE_SIZE_REDUCED);
+ /* Copy bit_mask */
+ memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
+ mask, DR_STE_SIZE_MASK);
+ }
+ }
+
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = byte_size;
+ send_info.write.lkey = 0;
+ send_info.remote_addr =
+ mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
+ send_info.rkey = htbl->chunk->rkey;
+
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ if (ret)
+ goto out_free;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+
+/* Initialize htble with default STEs */
+int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *ste_init_data,
+ bool update_hw_ste)
+{
+ u32 byte_size = htbl->chunk->byte_size;
+ int iterations;
+ int num_stes;
+ u8 *data;
+ int ret;
+ int i;
+
+ ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
+ &iterations, &num_stes);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_stes; i++) {
+ u8 *copy_dst;
+
+ /* Copy the same ste on the data buffer */
+ copy_dst = data + i * DR_STE_SIZE;
+ memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
+
+ if (update_hw_ste) {
+ /* Copy the reduced ste to hash table ste_arr */
+ copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+ memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
+ }
+ }
+
+ /* Send the data iteration times */
+ for (i = 0; i < iterations; i++) {
+ u8 ste_index = i * (byte_size / DR_STE_SIZE);
+ struct postsend_info send_info = {};
+
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = byte_size;
+ send_info.write.lkey = 0;
+ send_info.remote_addr =
+ mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
+ send_info.rkey = htbl->chunk->rkey;
+
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ if (ret)
+ goto out_free;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+
+int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *action)
+{
+ struct postsend_info send_info = {};
+ int ret;
+
+ send_info.write.addr = (uintptr_t)action->rewrite.data;
+ send_info.write.length = action->rewrite.chunk->byte_size;
+ send_info.write.lkey = 0;
+ send_info.remote_addr = action->rewrite.chunk->mr_addr;
+ send_info.rkey = action->rewrite.chunk->rkey;
+
+ mutex_lock(&dmn->mutex);
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ mutex_unlock(&dmn->mutex);
+
+ return ret;
+}
+
+static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
+ struct mlx5dr_qp *dr_qp,
+ int port)
+{
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, port);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
+ MLX5_SET(qpc, qpc, rre, 1);
+ MLX5_SET(qpc, qpc, rwe, 1);
+
+ return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
+ &dr_qp->mqp);
+}
+
+static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
+ struct mlx5dr_qp *dr_qp,
+ struct dr_qp_rts_attr *attr)
+{
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
+
+ MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->mqp.qpn);
+
+ MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
+ MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
+ MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
+
+ return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc,
+ &dr_qp->mqp);
+}
+
+static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
+ struct mlx5dr_qp *dr_qp,
+ struct dr_qp_rtr_attr *attr)
+{
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
+
+ MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->mqp.qpn);
+
+ MLX5_SET(qpc, qpc, mtu, attr->mtu);
+ MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
+ MLX5_SET(qpc, qpc, remote_qpn, attr->qp_num);
+ memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
+ attr->dgid_attr.mac, sizeof(attr->dgid_attr.mac));
+ memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
+ attr->dgid_attr.gid, sizeof(attr->dgid_attr.gid));
+ MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
+ attr->sgid_index);
+
+ if (attr->dgid_attr.roce_ver == MLX5_ROCE_VERSION_2)
+ MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
+ attr->udp_src_port);
+
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
+ MLX5_SET(qpc, qpc, min_rnr_nak, 1);
+
+ return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
+ &dr_qp->mqp);
+}
+
+static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
+ struct dr_qp_rts_attr rts_attr = {};
+ struct dr_qp_rtr_attr rtr_attr = {};
+ enum ib_mtu mtu = IB_MTU_1024;
+ u16 gid_index = 0;
+ int port = 1;
+ int ret;
+
+ /* Init */
+ ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
+ if (ret)
+ return ret;
+
+ /* RTR */
+ ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
+ if (ret)
+ return ret;
+
+ rtr_attr.mtu = mtu;
+ rtr_attr.qp_num = dr_qp->mqp.qpn;
+ rtr_attr.min_rnr_timer = 12;
+ rtr_attr.port_num = port;
+ rtr_attr.sgid_index = gid_index;
+ rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
+
+ ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
+ if (ret)
+ return ret;
+
+ /* RTS */
+ rts_attr.timeout = 14;
+ rts_attr.retry_cnt = 7;
+ rts_attr.rnr_retry = 7;
+
+ ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void dr_cq_event(struct mlx5_core_cq *mcq,
+ enum mlx5_event event)
+{
+ pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
+}
+
+static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5_uars_page *uar,
+ size_t ncqe)
+{
+ u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_wq_param wqp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5dr_cq *cq;
+ int inlen, err, eqn;
+ unsigned int irqn;
+ void *cqc, *in;
+ __be64 *pas;
+ u32 i;
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return NULL;
+
+ ncqe = roundup_pow_of_two(ncqe);
+ MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe));
+
+ wqp.buf_numa_node = mdev->priv.numa_node;
+ wqp.db_numa_node = mdev->priv.numa_node;
+
+ err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ goto out;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+ cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ goto err_cqwq;
+
+ err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ if (err) {
+ kvfree(in);
+ goto err_cqwq;
+ }
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
+
+ cq->mcq.event = dr_cq_event;
+
+ err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
+ kvfree(in);
+
+ if (err)
+ goto err_cqwq;
+
+ cq->mcq.cqe_sz = 64;
+ cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
+ cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ *cq->mcq.arm_db = 0;
+ cq->mcq.vector = 0;
+ cq->mcq.irqn = irqn;
+ cq->mcq.uar = uar;
+
+ return cq;
+
+err_cqwq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+out:
+ kfree(cq);
+ return NULL;
+}
+
+static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
+{
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+ kfree(cq);
+}
+
+static int
+dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey)
+{
+ u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, a, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ return mlx5_core_create_mkey(mdev, mkey, in, sizeof(in));
+}
+
+static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
+ u32 pdn, void *buf, size_t size)
+{
+ struct mlx5dr_mr *mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ struct device *dma_device;
+ dma_addr_t dma_addr;
+ int err;
+
+ if (!mr)
+ return NULL;
+
+ dma_device = &mdev->pdev->dev;
+ dma_addr = dma_map_single(dma_device, buf, size,
+ DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(dma_device, dma_addr);
+ if (err) {
+ mlx5_core_warn(mdev, "Can't dma buf\n");
+ kfree(mr);
+ return NULL;
+ }
+
+ err = dr_create_mkey(mdev, pdn, &mr->mkey);
+ if (err) {
+ mlx5_core_warn(mdev, "Can't create mkey\n");
+ dma_unmap_single(dma_device, dma_addr, size,
+ DMA_BIDIRECTIONAL);
+ kfree(mr);
+ return NULL;
+ }
+
+ mr->dma_addr = dma_addr;
+ mr->size = size;
+ mr->addr = buf;
+
+ return mr;
+}
+
+static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
+{
+ mlx5_core_destroy_mkey(mdev, &mr->mkey);
+ dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size,
+ DMA_BIDIRECTIONAL);
+ kfree(mr);
+}
+
+int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
+{
+ struct dr_qp_init_attr init_attr = {};
+ int cq_size;
+ int size;
+ int ret;
+
+ dmn->send_ring = kzalloc(sizeof(*dmn->send_ring), GFP_KERNEL);
+ if (!dmn->send_ring)
+ return -ENOMEM;
+
+ cq_size = QUEUE_SIZE + 1;
+ dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
+ if (!dmn->send_ring->cq) {
+ ret = -ENOMEM;
+ goto free_send_ring;
+ }
+
+ init_attr.cqn = dmn->send_ring->cq->mcq.cqn;
+ init_attr.pdn = dmn->pdn;
+ init_attr.uar = dmn->uar;
+ init_attr.max_send_wr = QUEUE_SIZE;
+
+ dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
+ if (!dmn->send_ring->qp) {
+ ret = -ENOMEM;
+ goto clean_cq;
+ }
+
+ dmn->send_ring->cq->qp = dmn->send_ring->qp;
+
+ dmn->info.max_send_wr = QUEUE_SIZE;
+ dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
+ DR_STE_SIZE);
+
+ dmn->send_ring->signal_th = dmn->info.max_send_wr /
+ SIGNAL_PER_DIV_QUEUE;
+
+ /* Prepare qp to be used */
+ ret = dr_prepare_qp_to_rts(dmn);
+ if (ret)
+ goto clean_qp;
+
+ dmn->send_ring->max_post_send_size =
+ mlx5dr_icm_pool_chunk_size_to_byte(DR_CHUNK_SIZE_1K,
+ DR_ICM_TYPE_STE);
+
+ /* Allocating the max size as a buffer for writing */
+ size = dmn->send_ring->signal_th * dmn->send_ring->max_post_send_size;
+ dmn->send_ring->buf = kzalloc(size, GFP_KERNEL);
+ if (!dmn->send_ring->buf) {
+ ret = -ENOMEM;
+ goto clean_qp;
+ }
+
+ memset(dmn->send_ring->buf, 0, size);
+ dmn->send_ring->buf_size = size;
+
+ dmn->send_ring->mr = dr_reg_mr(dmn->mdev,
+ dmn->pdn, dmn->send_ring->buf, size);
+ if (!dmn->send_ring->mr) {
+ ret = -ENOMEM;
+ goto free_mem;
+ }
+
+ dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
+ dmn->pdn, dmn->send_ring->sync_buff,
+ MIN_READ_SYNC);
+ if (!dmn->send_ring->sync_mr) {
+ ret = -ENOMEM;
+ goto clean_mr;
+ }
+
+ return 0;
+
+clean_mr:
+ dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
+free_mem:
+ kfree(dmn->send_ring->buf);
+clean_qp:
+ dr_destroy_qp(dmn->mdev, dmn->send_ring->qp);
+clean_cq:
+ dr_destroy_cq(dmn->mdev, dmn->send_ring->cq);
+free_send_ring:
+ kfree(dmn->send_ring);
+
+ return ret;
+}
+
+void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
+ struct mlx5dr_send_ring *send_ring)
+{
+ dr_destroy_qp(dmn->mdev, send_ring->qp);
+ dr_destroy_cq(dmn->mdev, send_ring->cq);
+ dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
+ dr_dereg_mr(dmn->mdev, send_ring->mr);
+ kfree(send_ring->buf);
+ kfree(send_ring);
+}
+
+int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_send_ring *send_ring = dmn->send_ring;
+ struct postsend_info send_info = {};
+ u8 data[DR_STE_SIZE];
+ int num_of_sends_req;
+ int ret;
+ int i;
+
+ /* Sending this amount of requests makes sure we will get drain */
+ num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
+
+ /* Send fake requests forcing the last to be signaled */
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = DR_STE_SIZE;
+ send_info.write.lkey = 0;
+ /* Using the sync_mr in order to write/read */
+ send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
+ send_info.rkey = send_ring->sync_mr->mkey.key;
+
+ for (i = 0; i < num_of_sends_req; i++) {
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ if (ret)
+ return ret;
+ }
+
+ ret = dr_handle_pending_wc(dmn, send_ring);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
new file mode 100644
index 000000000000..6b0af64536d8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -0,0 +1,2308 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/types.h>
+#include "dr_types.h"
+
+#define DR_STE_CRC_POLY 0xEDB88320L
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_SPI 0x3
+#define IP_VERSION_IPV4 0x4
+#define IP_VERSION_IPV6 0x6
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+
+#define DR_STE_ENABLE_FLOW_TAG BIT(31)
+
+/* Set to STE a specific value using DR_STE_SET */
+#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
+ if ((spec)->s_fname) { \
+ MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
+ (spec)->s_fname = 0; \
+ } \
+} while (0)
+
+/* Set to STE spec->s_fname to tag->t_fname */
+#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
+
+/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
+#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
+
+/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
+#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
+
+#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
+} while (0)
+
+#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
+ DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
+ in_out##_first_mpls_label);\
+ DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
+ in_out##_first_mpls_s_bos); \
+ DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
+ in_out##_first_mpls_exp); \
+ DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
+ in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
+ DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
+ in_out##_first_mpls_label);\
+ DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
+ in_out##_first_mpls_s_bos); \
+ DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
+ in_out##_first_mpls_exp); \
+ DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
+ in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_gre_label || \
+ (_misc)->outer_first_mpls_over_gre_exp || \
+ (_misc)->outer_first_mpls_over_gre_s_bos || \
+ (_misc)->outer_first_mpls_over_gre_ttl)
+#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_udp_label || \
+ (_misc)->outer_first_mpls_over_udp_exp || \
+ (_misc)->outer_first_mpls_over_udp_s_bos || \
+ (_misc)->outer_first_mpls_over_udp_ttl)
+
+#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
+ ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
+ (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
+ MLX5DR_STE_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_tunl_action {
+ DR_STE_TUNL_ACTION_NONE = 0,
+ DR_STE_TUNL_ACTION_ENABLE = 1,
+ DR_STE_TUNL_ACTION_DECAP = 2,
+ DR_STE_TUNL_ACTION_L3_DECAP = 3,
+ DR_STE_TUNL_ACTION_POP_VLAN = 4,
+};
+
+enum dr_ste_action_type {
+ DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
+ DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
+ DR_STE_ACTION_TYPE_ENCAP = 4,
+};
+
+struct dr_hw_ste_format {
+ u8 ctrl[DR_STE_SIZE_CTRL];
+ u8 tag[DR_STE_SIZE_TAG];
+ u8 mask[DR_STE_SIZE_MASK];
+};
+
+u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ u8 masked[DR_STE_SIZE_TAG] = {};
+ u32 crc32, index;
+ u16 bit;
+ int i;
+
+ /* Don't calculate CRC if the result is predicted */
+ if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
+ return 0;
+
+ /* Mask tag using byte mask, bit per byte */
+ bit = 1 << (DR_STE_SIZE_TAG - 1);
+ for (i = 0; i < DR_STE_SIZE_TAG; i++) {
+ if (htbl->byte_mask & bit)
+ masked[i] = hw_ste->tag[i];
+
+ bit = bit >> 1;
+ }
+
+ crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG);
+ index = crc32 & (htbl->chunk->num_of_entries - 1);
+
+ return index;
+}
+
+static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
+{
+ u16 byte_mask = 0;
+ int i;
+
+ for (i = 0; i < DR_STE_SIZE_MASK; i++) {
+ byte_mask = byte_mask << 1;
+ if (bit_mask[i] == 0xff)
+ byte_mask |= 1;
+ }
+ return byte_mask;
+}
+
+void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+
+ memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
+}
+
+void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
+ DR_STE_ENABLE_FLOW_TAG | flow_tag);
+}
+
+void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+ /* This can be used for both rx_steering_mult and for sx_transmit */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
+}
+
+void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
+}
+
+void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
+ bool go_back)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ DR_STE_ACTION_TYPE_PUSH_VLAN);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
+ /* Due to HW limitation we need to set this bit, otherwise reforamt +
+ * push vlan will not work.
+ */
+ if (go_back)
+ mlx5dr_ste_set_go_back_bit(hw_ste_p);
+}
+
+void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
+}
+
+void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_DECAP);
+}
+
+void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_POP_VLAN);
+}
+
+void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_L3_DECAP);
+ MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+}
+
+void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+}
+
+u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, entry_type);
+}
+
+void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+ u32 re_write_index)
+{
+ MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
+ num_of_actions);
+ MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
+ re_write_index);
+}
+
+void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
+ u16 gvmi)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+ MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
+ MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+ /* Set GVMI once, this is the same for RX/TX
+ * bits 63_48 of next table base / miss address encode the next GVMI
+ */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
+{
+ memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
+ memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
+}
+
+static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
+{
+ hw_ste->tag[0] = 0xdc;
+ hw_ste->mask[0] = 0;
+}
+
+u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
+{
+ u64 index =
+ (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
+ MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
+
+ return index << 6;
+}
+
+void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
+{
+ u64 index = (icm_addr >> 5) | ht_size;
+
+ MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
+ MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
+}
+
+u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
+{
+ u32 index = ste - ste->htbl->ste_arr;
+
+ return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
+}
+
+u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
+{
+ u32 index = ste - ste->htbl->ste_arr;
+
+ return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
+}
+
+struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
+{
+ u32 index = ste - ste->htbl->ste_arr;
+
+ return &ste->htbl->miss_list[index];
+}
+
+static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
+ struct mlx5dr_ste_htbl *next_htbl)
+{
+ struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+ u8 *hw_ste = ste->hw_ste;
+
+ MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
+ MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
+ mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+
+ dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
+}
+
+bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 ste_location)
+{
+ return ste_location == nic_matcher->num_of_builders;
+}
+
+/* Replace relevant fields, except of:
+ * htbl - keep the origin htbl
+ * miss_list + list - already took the src from the list.
+ * icm_addr/mr_addr - depends on the hosting table.
+ *
+ * Before:
+ * | a | -> | b | -> | c | ->
+ *
+ * After:
+ * | a | -> | c | ->
+ * While the data that was in b copied to a.
+ */
+static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
+{
+ memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
+ dst->next_htbl = src->next_htbl;
+ if (dst->next_htbl)
+ dst->next_htbl->pointing_ste = dst;
+
+ refcount_set(&dst->refcount, refcount_read(&src->refcount));
+
+ INIT_LIST_HEAD(&dst->rule_list);
+ list_splice_tail_init(&src->rule_list, &dst->rule_list);
+}
+
+/* Free ste which is the head and the only one in miss_list */
+static void
+dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste_send_info *ste_info_head,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste_htbl *stats_tbl)
+{
+ u8 tmp_data_ste[DR_STE_SIZE] = {};
+ struct mlx5dr_ste tmp_ste = {};
+ u64 miss_addr;
+
+ tmp_ste.hw_ste = tmp_data_ste;
+
+ /* Use temp ste because dr_ste_always_miss_addr
+ * touches bit_mask area which doesn't exist at ste->hw_ste.
+ */
+ memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+ miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
+ memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
+
+ list_del_init(&ste->miss_list_node);
+
+ /* Write full STE size in order to have "always_miss" */
+ mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
+ 0, tmp_data_ste,
+ ste_info_head,
+ send_ste_list,
+ true /* Copy data */);
+
+ stats_tbl->ctrl.num_of_valid_entries--;
+}
+
+/* Free ste which is the head but NOT the only one in miss_list:
+ * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
+ */
+static void
+dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
+ struct mlx5dr_ste_send_info *ste_info_head,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste_htbl *stats_tbl)
+
+{
+ struct mlx5dr_ste_htbl *next_miss_htbl;
+
+ next_miss_htbl = next_ste->htbl;
+
+ /* Remove from the miss_list the next_ste before copy */
+ list_del_init(&next_ste->miss_list_node);
+
+ /* All rule-members that use next_ste should know about that */
+ mlx5dr_rule_update_rule_member(next_ste, ste);
+
+ /* Move data from next into ste */
+ dr_ste_replace(ste, next_ste);
+
+ /* Del the htbl that contains the next_ste.
+ * The origin htbl stay with the same number of entries.
+ */
+ mlx5dr_htbl_put(next_miss_htbl);
+
+ mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
+ 0, ste->hw_ste,
+ ste_info_head,
+ send_ste_list,
+ true /* Copy data */);
+
+ stats_tbl->ctrl.num_of_collisions--;
+ stats_tbl->ctrl.num_of_valid_entries--;
+}
+
+/* Free ste that is located in the middle of the miss list:
+ * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
+ */
+static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
+ struct mlx5dr_ste_send_info *ste_info,
+ struct list_head *send_ste_list,
+ struct mlx5dr_ste_htbl *stats_tbl)
+{
+ struct mlx5dr_ste *prev_ste;
+ u64 miss_addr;
+
+ prev_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->prev, struct mlx5dr_ste,
+ miss_list_node);
+ if (!prev_ste) {
+ WARN_ON(true);
+ return;
+ }
+
+ miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
+ mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
+
+ mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
+ prev_ste->hw_ste, ste_info,
+ send_ste_list, true /* Copy data*/);
+
+ list_del_init(&ste->miss_list_node);
+
+ stats_tbl->ctrl.num_of_valid_entries--;
+ stats_tbl->ctrl.num_of_collisions--;
+}
+
+void mlx5dr_ste_free(struct mlx5dr_ste *ste,
+ struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_send_info ste_info_head;
+ struct mlx5dr_ste *next_ste, *first_ste;
+ bool put_on_origin_table = true;
+ struct mlx5dr_ste_htbl *stats_tbl;
+ LIST_HEAD(send_ste_list);
+
+ first_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->next,
+ struct mlx5dr_ste, miss_list_node);
+ stats_tbl = first_ste->htbl;
+
+ /* Two options:
+ * 1. ste is head:
+ * a. head ste is the only ste in the miss list
+ * b. head ste is not the only ste in the miss-list
+ * 2. ste is not head
+ */
+ if (first_ste == ste) { /* Ste is the head */
+ struct mlx5dr_ste *last_ste;
+
+ last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
+ struct mlx5dr_ste, miss_list_node);
+ if (last_ste == first_ste)
+ next_ste = NULL;
+ else
+ next_ste = list_entry(ste->miss_list_node.next,
+ struct mlx5dr_ste, miss_list_node);
+
+ if (!next_ste) {
+ /* One and only entry in the list */
+ dr_ste_remove_head_ste(ste, nic_matcher,
+ &ste_info_head,
+ &send_ste_list,
+ stats_tbl);
+ } else {
+ /* First but not only entry in the list */
+ dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
+ &send_ste_list, stats_tbl);
+ put_on_origin_table = false;
+ }
+ } else { /* Ste in the middle of the list */
+ dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
+ }
+
+ /* Update HW */
+ list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
+ &send_ste_list, send_list) {
+ list_del(&cur_ste_info->send_list);
+ mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
+ cur_ste_info->data, cur_ste_info->size,
+ cur_ste_info->offset);
+ }
+
+ if (put_on_origin_table)
+ mlx5dr_htbl_put(ste->htbl);
+}
+
+bool mlx5dr_ste_equal_tag(void *src, void *dst)
+{
+ struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
+ struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
+
+ return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
+}
+
+void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+ struct mlx5dr_ste_htbl *next_htbl)
+{
+ struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+
+ mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+}
+
+void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+ u64 index = miss_addr >> 6;
+
+ /* Miss address for TX and RX STEs located in the same offsets */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
+}
+
+void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
+{
+ u8 *hw_ste = ste->hw_ste;
+
+ MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+}
+
+/* The assumption here is that we don't update the ste->hw_ste if it is not
+ * used ste, so it will be all zero, checking the next_lu_type.
+ */
+bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste;
+
+ if (MLX5_GET(ste_general, hw_ste, next_lu_type) ==
+ MLX5DR_STE_LU_TYPE_NOP)
+ return true;
+
+ return false;
+}
+
+bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
+{
+ return !refcount_read(&ste->refcount);
+}
+
+static u16 get_bits_per_mask(u16 byte_mask)
+{
+ u16 bits = 0;
+
+ while (byte_mask) {
+ byte_mask = byte_mask & (byte_mask - 1);
+ bits++;
+ }
+
+ return bits;
+}
+
+/* Init one ste as a pattern for ste data array */
+void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+ struct mlx5dr_domain_rx_tx *nic_dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *formatted_ste,
+ struct mlx5dr_htbl_connect_info *connect_info)
+{
+ struct mlx5dr_ste ste = {};
+
+ mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
+ ste.hw_ste = formatted_ste;
+
+ if (connect_info->type == CONNECT_HIT)
+ dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
+ else
+ mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
+}
+
+int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain_rx_tx *nic_dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ struct mlx5dr_htbl_connect_info *connect_info,
+ bool update_hw_ste)
+{
+ u8 formatted_ste[DR_STE_SIZE] = {};
+
+ mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ nic_dmn,
+ htbl,
+ formatted_ste,
+ connect_info);
+
+ return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
+}
+
+int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *ste,
+ u8 *cur_hw_ste,
+ enum mlx5dr_icm_chunk_size log_table_size)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_ste_htbl *next_htbl;
+
+ if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
+ u32 bits_in_mask;
+ u8 next_lu_type;
+ u16 byte_mask;
+
+ next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
+ byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
+
+ /* Don't allocate table more than required,
+ * the size of the table defined via the byte_mask, so no need
+ * to allocate more than that.
+ */
+ bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
+ log_table_size = min(log_table_size, bits_in_mask);
+
+ next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ log_table_size,
+ next_lu_type,
+ byte_mask);
+ if (!next_htbl) {
+ mlx5dr_dbg(dmn, "Failed allocating table\n");
+ return -ENOMEM;
+ }
+
+ /* Write new table to HW */
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
+ &info, false)) {
+ mlx5dr_info(dmn, "Failed writing table to HW\n");
+ goto free_table;
+ }
+
+ mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
+ ste->next_htbl = next_htbl;
+ next_htbl->pointing_ste = ste;
+ }
+
+ return 0;
+
+free_table:
+ mlx5dr_ste_htbl_free(next_htbl);
+ return -ENOENT;
+}
+
+static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
+{
+ struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
+ int num_of_entries;
+
+ htbl->ctrl.may_grow = true;
+
+ if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1)
+ htbl->ctrl.may_grow = false;
+
+ /* Threshold is 50%, one is added to table of size 1 */
+ num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
+ ctrl->increase_threshold = (num_of_entries + 1) / 2;
+}
+
+struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ u8 lu_type, u16 byte_mask)
+{
+ struct mlx5dr_icm_chunk *chunk;
+ struct mlx5dr_ste_htbl *htbl;
+ int i;
+
+ htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
+ if (!htbl)
+ return NULL;
+
+ chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
+ if (!chunk)
+ goto out_free_htbl;
+
+ htbl->chunk = chunk;
+ htbl->lu_type = lu_type;
+ htbl->byte_mask = byte_mask;
+ htbl->ste_arr = chunk->ste_arr;
+ htbl->hw_ste_arr = chunk->hw_ste_arr;
+ htbl->miss_list = chunk->miss_list;
+ refcount_set(&htbl->refcount, 0);
+
+ for (i = 0; i < chunk->num_of_entries; i++) {
+ struct mlx5dr_ste *ste = &htbl->ste_arr[i];
+
+ ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+ ste->htbl = htbl;
+ refcount_set(&ste->refcount, 0);
+ INIT_LIST_HEAD(&ste->miss_list_node);
+ INIT_LIST_HEAD(&htbl->miss_list[i]);
+ INIT_LIST_HEAD(&ste->rule_list);
+ }
+
+ htbl->chunk_size = chunk_size;
+ dr_ste_set_ctrl(htbl);
+ return htbl;
+
+out_free_htbl:
+ kfree(htbl);
+ return NULL;
+}
+
+int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
+{
+ if (refcount_read(&htbl->refcount))
+ return -EBUSY;
+
+ mlx5dr_icm_free_chunk(htbl->chunk);
+ kfree(htbl);
+ return 0;
+}
+
+int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
+ u8 match_criteria,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_match_param *value)
+{
+ if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
+ if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
+ mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_match_param *value,
+ u8 *ste_arr)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_build *sb;
+ int ret, i;
+
+ ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
+ &matcher->mask, value);
+ if (ret)
+ return ret;
+
+ sb = nic_matcher->ste_builder;
+ for (i = 0; i < nic_matcher->num_of_builders; i++) {
+ mlx5dr_ste_init(ste_arr,
+ sb->lu_type,
+ nic_dmn->ste_type,
+ dmn->info.caps.gvmi);
+
+ mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
+
+ ret = sb->ste_build_tag_func(value, sb, ste_arr);
+ if (ret)
+ return ret;
+
+ /* Connect the STEs */
+ if (i < (nic_matcher->num_of_builders - 1)) {
+ /* Need the next builder for these fields,
+ * not relevant for the last ste in the chain.
+ */
+ sb++;
+ MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
+ MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
+ }
+ ste_arr += DR_STE_SIZE;
+ }
+ return 0;
+}
+
+static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ if (mask->smac_47_16 || mask->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
+ mask->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
+ mask->smac_47_16 << 16 | mask->smac_15_0);
+ mask->smac_47_16 = 0;
+ mask->smac_15_0 = 0;
+ }
+
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ } else if (mask->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->svlan_tag = 0;
+ }
+
+ if (mask->cvlan_tag || mask->svlan_tag) {
+ pr_info("Invalid c/svlan mask configuration\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
+{
+ spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
+ spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
+ spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
+ spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
+ spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
+
+ spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
+
+ spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
+ spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
+ spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
+ spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
+ spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
+ spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
+
+ spec->outer_second_cvlan_tag =
+ MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
+ spec->inner_second_cvlan_tag =
+ MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
+ spec->outer_second_svlan_tag =
+ MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
+ spec->inner_second_svlan_tag =
+ MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
+
+ spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
+
+ spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
+ spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
+
+ spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
+
+ spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
+ spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
+
+ spec->outer_ipv6_flow_label =
+ MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
+
+ spec->inner_ipv6_flow_label =
+ MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
+
+ spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
+ spec->geneve_protocol_type =
+ MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
+
+ spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
+}
+
+static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
+{
+ u32 raw_ip[4];
+
+ spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
+
+ spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
+ spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
+
+ spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
+
+ spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
+ spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
+ spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
+ spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
+
+ spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
+ spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
+ spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
+ spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
+ spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
+ spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
+ spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
+ spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
+ spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
+ spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
+
+ spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
+
+ spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
+ spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
+
+ memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(raw_ip));
+
+ spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
+ spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
+ spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
+ spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
+
+ memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(raw_ip));
+
+ spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
+ spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
+ spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
+ spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
+}
+
+static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
+{
+ spec->outer_first_mpls_label =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
+ spec->outer_first_mpls_exp =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
+ spec->outer_first_mpls_s_bos =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
+ spec->outer_first_mpls_ttl =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
+ spec->inner_first_mpls_label =
+ MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
+ spec->inner_first_mpls_exp =
+ MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
+ spec->inner_first_mpls_s_bos =
+ MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
+ spec->inner_first_mpls_ttl =
+ MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
+ spec->outer_first_mpls_over_gre_label =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
+ spec->outer_first_mpls_over_gre_exp =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
+ spec->outer_first_mpls_over_gre_s_bos =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
+ spec->outer_first_mpls_over_gre_ttl =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
+ spec->outer_first_mpls_over_udp_label =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
+ spec->outer_first_mpls_over_udp_exp =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
+ spec->outer_first_mpls_over_udp_s_bos =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
+ spec->outer_first_mpls_over_udp_ttl =
+ MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
+ spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
+ spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
+ spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
+ spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
+ spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
+ spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
+ spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
+ spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
+ spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
+ spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b);
+}
+
+static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
+{
+ spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
+ spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
+ spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
+ spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
+ spec->outer_vxlan_gpe_vni =
+ MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
+ spec->outer_vxlan_gpe_next_protocol =
+ MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
+ spec->outer_vxlan_gpe_flags =
+ MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
+ spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
+ spec->icmpv6_header_data =
+ MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
+ spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
+ spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
+ spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
+ spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
+}
+
+void mlx5dr_ste_copy_param(u8 match_criteria,
+ struct mlx5dr_match_param *set_param,
+ struct mlx5dr_match_parameters *mask)
+{
+ u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
+ u8 *data = (u8 *)mask->match_buf;
+ size_t param_location;
+ void *buff;
+
+ if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
+ if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
+ memcpy(tail_param, data, mask->match_sz);
+ buff = tail_param;
+ } else {
+ buff = mask->match_buf;
+ }
+ dr_ste_copy_mask_spec(buff, &set_param->outer);
+ }
+ param_location = sizeof(struct mlx5dr_match_spec);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc(buff, &set_param->misc);
+ }
+ param_location += sizeof(struct mlx5dr_match_misc);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_spec)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_spec(buff, &set_param->inner);
+ }
+ param_location += sizeof(struct mlx5dr_match_spec);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc2)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc2(buff, &set_param->misc2);
+ }
+
+ param_location += sizeof(struct mlx5dr_match_misc2);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc3)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc3(buff, &set_param->misc3);
+ }
+}
+
+static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ if (spec->smac_47_16 || spec->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
+ spec->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
+ spec->smac_47_16 << 16 | spec->smac_15_0);
+ spec->smac_47_16 = 0;
+ spec->smac_15_0 = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ pr_info("Unsupported ip_version value\n");
+ return -EINVAL;
+ }
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+ return 0;
+}
+
+int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ int ret;
+
+ ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
+
+ return 0;
+}
+
+static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
+}
+
+static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
+}
+
+static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
+ DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
+}
+
+static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
+}
+
+static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
+ bool inner,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ destination_address, mask, dst_ip_31_0);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ source_address, mask, src_ip_31_0);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ destination_port, mask, tcp_dport);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ destination_port, mask, udp_dport);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ source_port, mask, tcp_sport);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ source_port, mask, udp_sport);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ protocol, mask, ip_protocol);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ fragmented, mask, frag);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ dscp, mask, ip_dscp);
+ DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
+ ecn, mask, ip_ecn);
+
+ if (mask->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
+ mask->tcp_flags = 0;
+ }
+}
+
+static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void
+dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_mask->inner_second_cvlan_tag ||
+ misc_mask->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->inner_second_cvlan_tag = 0;
+ misc_mask->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, inner_second_vid);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, inner_second_cfi);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_priority, misc_mask, inner_second_prio);
+ } else {
+ if (misc_mask->outer_second_cvlan_tag ||
+ misc_mask->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->outer_second_cvlan_tag = 0;
+ misc_mask->outer_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, outer_second_vid);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, outer_second_cfi);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
+ second_priority, misc_mask, outer_second_prio);
+ }
+}
+
+static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+ bool inner, u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_spec = &value->misc;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ pr_info("Unsupported ip_version value\n");
+ return -EINVAL;
+ }
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_spec->inner_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->inner_second_cvlan_tag = 0;
+ } else if (misc_spec->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
+ } else {
+ if (misc_spec->outer_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->outer_second_cvlan_tag = 0;
+ } else if (misc_spec->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->outer_second_svlan_tag = 0;
+ }
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
+ }
+
+ return 0;
+}
+
+static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
+
+ dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
+
+ return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
+}
+
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
+}
+
+static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
+}
+
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
+}
+
+static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask,
+ l2_tunneling_network_id, (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+}
+
+static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc *misc = &value->misc;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
+ (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask, bool inner, bool rx)
+{
+ dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
+}
+
+static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
+}
+
+static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
+}
+
+static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
+ DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
+
+ if (mask->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
+ mask->tcp_flags = 0;
+ }
+}
+
+static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
+ DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
+}
+
+static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ return 0;
+}
+
+void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
+{
+ sb->rx = rx;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
+ sb->byte_mask = 0;
+ sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
+}
+
+static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
+
+ if (inner)
+ DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
+ else
+ DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
+}
+
+static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ if (sb->inner)
+ DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
+ else
+ DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
+}
+
+static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
+
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
+ DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
+}
+
+static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc *misc = &value->misc;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
+
+ DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
+ DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
+ DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
+
+ DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
+
+ DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask, bool inner, bool rx)
+{
+ dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_gre_tag;
+}
+
+static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
+ misc_2_mask, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
+ misc_2_mask, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
+ misc_2_mask, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
+ misc_2_mask, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
+ misc_2_mask, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
+ misc_2_mask, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
+ misc_2_mask, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
+ misc_2_mask, outer_first_mpls_over_udp_ttl);
+ }
+}
+
+static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2_mask, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2_mask, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2_mask, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2_mask, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2_mask, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2_mask, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2_mask, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2_mask, outer_first_mpls_over_udp_ttl);
+ }
+ return 0;
+}
+
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
+}
+
+#define ICMP_TYPE_OFFSET_FIRST_DW 24
+#define ICMP_CODE_OFFSET_FIRST_DW 16
+#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
+
+static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
+ bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
+ u32 icmp_header_data_mask;
+ u32 icmp_type_mask;
+ u32 icmp_code_mask;
+ int dw0_location;
+ int dw1_location;
+
+ if (is_ipv4_mask) {
+ icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
+ icmp_type_mask = misc_3_mask->icmpv4_type;
+ icmp_code_mask = misc_3_mask->icmpv4_code;
+ dw0_location = caps->flex_parser_id_icmp_dw0;
+ dw1_location = caps->flex_parser_id_icmp_dw1;
+ } else {
+ icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
+ icmp_type_mask = misc_3_mask->icmpv6_type;
+ icmp_code_mask = misc_3_mask->icmpv6_code;
+ dw0_location = caps->flex_parser_id_icmpv6_dw0;
+ dw1_location = caps->flex_parser_id_icmpv6_dw1;
+ }
+
+ switch (dw0_location) {
+ case 4:
+ if (icmp_type_mask) {
+ MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
+ (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
+ if (is_ipv4_mask)
+ misc_3_mask->icmpv4_type = 0;
+ else
+ misc_3_mask->icmpv6_type = 0;
+ }
+ if (icmp_code_mask) {
+ u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
+ flex_parser_4);
+ MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
+ cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
+ if (is_ipv4_mask)
+ misc_3_mask->icmpv4_code = 0;
+ else
+ misc_3_mask->icmpv6_code = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dw1_location) {
+ case 5:
+ if (icmp_header_data_mask) {
+ MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
+ (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
+ if (is_ipv4_mask)
+ misc_3_mask->icmpv4_header_data = 0;
+ else
+ misc_3_mask->icmpv6_header_data = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
+ u8 *tag = hw_ste->tag;
+ u32 icmp_header_data;
+ int dw0_location;
+ int dw1_location;
+ u32 icmp_type;
+ u32 icmp_code;
+ bool is_ipv4;
+
+ is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
+ if (is_ipv4) {
+ icmp_header_data = misc_3->icmpv4_header_data;
+ icmp_type = misc_3->icmpv4_type;
+ icmp_code = misc_3->icmpv4_code;
+ dw0_location = sb->caps->flex_parser_id_icmp_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmp_dw1;
+ } else {
+ icmp_header_data = misc_3->icmpv6_header_data;
+ icmp_type = misc_3->icmpv6_type;
+ icmp_code = misc_3->icmpv6_code;
+ dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
+ }
+
+ switch (dw0_location) {
+ case 4:
+ if (icmp_type) {
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
+ (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
+ if (is_ipv4)
+ misc_3->icmpv4_type = 0;
+ else
+ misc_3->icmpv6_type = 0;
+ }
+
+ if (icmp_code) {
+ u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
+ flex_parser_4);
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
+ cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
+ if (is_ipv4)
+ misc_3->icmpv4_code = 0;
+ else
+ misc_3->icmpv6_code = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dw1_location) {
+ case 5:
+ if (icmp_header_data) {
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
+ (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
+ if (is_ipv4)
+ misc_3->icmpv4_header_data = 0;
+ else
+ misc_3->icmpv6_header_data = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ int ret;
+
+ ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->caps = caps;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
+
+ return 0;
+}
+
+static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+
+ DR_STE_SET_MASK_V(general_purpose, bit_mask,
+ general_purpose_lookup_field, misc_2_mask,
+ metadata_reg_a);
+}
+
+static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+ misc_2_mask, metadata_reg_a);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
+}
+
+static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
+
+ if (inner) {
+ DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
+ inner_tcp_seq_num);
+ DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
+ inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
+ outer_tcp_seq_num);
+ DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
+ outer_tcp_ack_num);
+ }
+}
+
+static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ u8 *tag = hw_ste->tag;
+
+ if (sb->inner) {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
+}
+
+static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
+
+ if (misc_3_mask->outer_vxlan_gpe_flags ||
+ misc_3_mask->outer_vxlan_gpe_next_protocol) {
+ MLX5_SET(ste_flex_parser_tnl, bit_mask,
+ flex_parser_tunneling_header_63_32,
+ (misc_3_mask->outer_vxlan_gpe_flags << 24) |
+ (misc_3_mask->outer_vxlan_gpe_next_protocol));
+ misc_3_mask->outer_vxlan_gpe_flags = 0;
+ misc_3_mask->outer_vxlan_gpe_next_protocol = 0;
+ }
+
+ if (misc_3_mask->outer_vxlan_gpe_vni) {
+ MLX5_SET(ste_flex_parser_tnl, bit_mask,
+ flex_parser_tunneling_header_31_0,
+ misc_3_mask->outer_vxlan_gpe_vni << 8);
+ misc_3_mask->outer_vxlan_gpe_vni = 0;
+ }
+}
+
+static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ u8 *tag = hw_ste->tag;
+
+ if (misc3->outer_vxlan_gpe_flags ||
+ misc3->outer_vxlan_gpe_next_protocol) {
+ MLX5_SET(ste_flex_parser_tnl, tag,
+ flex_parser_tunneling_header_63_32,
+ (misc3->outer_vxlan_gpe_flags << 24) |
+ (misc3->outer_vxlan_gpe_next_protocol));
+ misc3->outer_vxlan_gpe_flags = 0;
+ misc3->outer_vxlan_gpe_next_protocol = 0;
+ }
+
+ if (misc3->outer_vxlan_gpe_vni) {
+ MLX5_SET(ste_flex_parser_tnl, tag,
+ flex_parser_tunneling_header_31_0,
+ misc3->outer_vxlan_gpe_vni << 8);
+ misc3->outer_vxlan_gpe_vni = 0;
+ }
+
+ return 0;
+}
+
+void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag;
+}
+
+static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+
+ DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
+ misc_2_mask, metadata_reg_c_0);
+ DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
+ misc_2_mask, metadata_reg_c_1);
+ DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
+ misc_2_mask, metadata_reg_c_2);
+ DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
+ misc_2_mask, metadata_reg_c_3);
+}
+
+static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+ DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+ DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+ DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
+}
+
+static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
+
+ DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
+ misc_2_mask, metadata_reg_c_4);
+ DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
+ misc_2_mask, metadata_reg_c_5);
+ DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
+ misc_2_mask, metadata_reg_c_6);
+ DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
+ misc_2_mask, metadata_reg_c_7);
+}
+
+static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+ DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+ DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+ DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+ return 0;
+}
+
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
+}
+
+static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ if (misc_mask->source_port != 0xffff)
+ return -EINVAL;
+
+ DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
+ DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
+
+ return 0;
+}
+
+static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc *misc = &value->misc;
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
+
+ vport_cap = mlx5dr_get_vport_cap(sb->caps, misc->source_port);
+ if (!vport_cap)
+ return -EINVAL;
+
+ if (vport_cap->vport_gvmi)
+ MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_port = 0;
+
+ return 0;
+}
+
+int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ int ret;
+
+ ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
new file mode 100644
index 000000000000..e178d8d3dbc9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+ struct mlx5dr_action *action)
+{
+ struct mlx5dr_matcher *last_matcher = NULL;
+ struct mlx5dr_htbl_connect_info info;
+ struct mlx5dr_ste_htbl *last_htbl;
+ int ret;
+
+ if (action && action->action_type != DR_ACTION_TYP_FT)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ if (!list_empty(&tbl->matcher_list))
+ last_matcher = list_last_entry(&tbl->matcher_list,
+ struct mlx5dr_matcher,
+ matcher_list);
+
+ if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
+ tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
+ if (last_matcher)
+ last_htbl = last_matcher->rx.e_anchor;
+ else
+ last_htbl = tbl->rx.s_anchor;
+
+ tbl->rx.default_icm_addr = action ?
+ action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
+ tbl->rx.nic_dmn->default_icm_addr;
+
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = tbl->rx.default_icm_addr;
+
+ ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
+ tbl->rx.nic_dmn,
+ last_htbl,
+ &info, true);
+ if (ret) {
+ mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret);
+ goto out;
+ }
+ }
+
+ if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
+ tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
+ if (last_matcher)
+ last_htbl = last_matcher->tx.e_anchor;
+ else
+ last_htbl = tbl->tx.s_anchor;
+
+ tbl->tx.default_icm_addr = action ?
+ action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr :
+ tbl->tx.nic_dmn->default_icm_addr;
+
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = tbl->tx.default_icm_addr;
+
+ ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
+ tbl->tx.nic_dmn,
+ last_htbl, &info, true);
+ if (ret) {
+ mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* Release old action */
+ if (tbl->miss_action)
+ refcount_dec(&tbl->miss_action->refcount);
+
+ /* Set new miss action */
+ tbl->miss_action = action;
+ if (tbl->miss_action)
+ refcount_inc(&action->refcount);
+
+out:
+ mutex_unlock(&tbl->dmn->mutex);
+ return ret;
+}
+
+static void dr_table_uninit_nic(struct mlx5dr_table_rx_tx *nic_tbl)
+{
+ mlx5dr_htbl_put(nic_tbl->s_anchor);
+}
+
+static void dr_table_uninit_fdb(struct mlx5dr_table *tbl)
+{
+ dr_table_uninit_nic(&tbl->rx);
+ dr_table_uninit_nic(&tbl->tx);
+}
+
+static void dr_table_uninit(struct mlx5dr_table *tbl)
+{
+ mutex_lock(&tbl->dmn->mutex);
+
+ switch (tbl->dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ dr_table_uninit_nic(&tbl->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ dr_table_uninit_nic(&tbl->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ dr_table_uninit_fdb(tbl);
+ break;
+ default:
+ WARN_ON(true);
+ break;
+ }
+
+ mutex_unlock(&tbl->dmn->mutex);
+}
+
+static int dr_table_init_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_table_rx_tx *nic_tbl)
+{
+ struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+ struct mlx5dr_htbl_connect_info info;
+ int ret;
+
+ nic_tbl->default_icm_addr = nic_dmn->default_icm_addr;
+
+ nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+ DR_CHUNK_SIZE_1,
+ MLX5DR_STE_LU_TYPE_DONT_CARE,
+ 0);
+ if (!nic_tbl->s_anchor)
+ return -ENOMEM;
+
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_dmn->default_icm_addr;
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+ nic_tbl->s_anchor,
+ &info, true);
+ if (ret)
+ goto free_s_anchor;
+
+ mlx5dr_htbl_get(nic_tbl->s_anchor);
+
+ return 0;
+
+free_s_anchor:
+ mlx5dr_ste_htbl_free(nic_tbl->s_anchor);
+ return ret;
+}
+
+static int dr_table_init_fdb(struct mlx5dr_table *tbl)
+{
+ int ret;
+
+ ret = dr_table_init_nic(tbl->dmn, &tbl->rx);
+ if (ret)
+ return ret;
+
+ ret = dr_table_init_nic(tbl->dmn, &tbl->tx);
+ if (ret)
+ goto destroy_rx;
+
+ return 0;
+
+destroy_rx:
+ dr_table_uninit_nic(&tbl->rx);
+ return ret;
+}
+
+static int dr_table_init(struct mlx5dr_table *tbl)
+{
+ int ret = 0;
+
+ INIT_LIST_HEAD(&tbl->matcher_list);
+
+ mutex_lock(&tbl->dmn->mutex);
+
+ switch (tbl->dmn->type) {
+ case MLX5DR_DOMAIN_TYPE_NIC_RX:
+ tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_RX;
+ tbl->rx.nic_dmn = &tbl->dmn->info.rx;
+ ret = dr_table_init_nic(tbl->dmn, &tbl->rx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_NIC_TX:
+ tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_TX;
+ tbl->tx.nic_dmn = &tbl->dmn->info.tx;
+ ret = dr_table_init_nic(tbl->dmn, &tbl->tx);
+ break;
+ case MLX5DR_DOMAIN_TYPE_FDB:
+ tbl->table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+ tbl->rx.nic_dmn = &tbl->dmn->info.rx;
+ tbl->tx.nic_dmn = &tbl->dmn->info.tx;
+ ret = dr_table_init_fdb(tbl);
+ break;
+ default:
+ WARN_ON(true);
+ break;
+ }
+
+ mutex_unlock(&tbl->dmn->mutex);
+
+ return ret;
+}
+
+static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
+{
+ return mlx5dr_cmd_destroy_flow_table(tbl->dmn->mdev,
+ tbl->table_id,
+ tbl->table_type);
+}
+
+static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
+{
+ u64 icm_addr_rx = 0;
+ u64 icm_addr_tx = 0;
+ int ret;
+
+ if (tbl->rx.s_anchor)
+ icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr;
+
+ if (tbl->tx.s_anchor)
+ icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
+
+ ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev,
+ tbl->table_type,
+ icm_addr_rx,
+ icm_addr_tx,
+ tbl->dmn->info.caps.max_ft_level - 1,
+ true, false, NULL,
+ &tbl->table_id);
+
+ return ret;
+}
+
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
+{
+ struct mlx5dr_table *tbl;
+ int ret;
+
+ refcount_inc(&dmn->refcount);
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ goto dec_ref;
+
+ tbl->dmn = dmn;
+ tbl->level = level;
+ refcount_set(&tbl->refcount, 1);
+
+ ret = dr_table_init(tbl);
+ if (ret)
+ goto free_tbl;
+
+ ret = dr_table_create_sw_owned_tbl(tbl);
+ if (ret)
+ goto uninit_tbl;
+
+ return tbl;
+
+uninit_tbl:
+ dr_table_uninit(tbl);
+free_tbl:
+ kfree(tbl);
+dec_ref:
+ refcount_dec(&dmn->refcount);
+ return NULL;
+}
+
+int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
+{
+ int ret;
+
+ if (refcount_read(&tbl->refcount) > 1)
+ return -EBUSY;
+
+ ret = dr_table_destroy_sw_owned_tbl(tbl);
+ if (ret)
+ return ret;
+
+ dr_table_uninit(tbl);
+
+ if (tbl->miss_action)
+ refcount_dec(&tbl->miss_action->refcount);
+
+ refcount_dec(&tbl->dmn->refcount);
+ kfree(tbl);
+
+ return ret;
+}
+
+u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl)
+{
+ return tbl->table_id;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
new file mode 100644
index 000000000000..a37ee6359be2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef _DR_TYPES_
+#define _DR_TYPES_
+
+#include <linux/mlx5/driver.h>
+#include <linux/refcount.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+#include "mlx5_ifc_dr.h"
+#include "mlx5dr.h"
+
+#define DR_RULE_MAX_STES 17
+#define DR_ACTION_MAX_STES 5
+#define WIRE_PORT 0xFFFF
+#define DR_STE_SVLAN 0x1
+#define DR_STE_CVLAN 0x2
+
+#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
+#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
+#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
+
+enum mlx5dr_icm_chunk_size {
+ DR_CHUNK_SIZE_1,
+ DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
+ DR_CHUNK_SIZE_2,
+ DR_CHUNK_SIZE_4,
+ DR_CHUNK_SIZE_8,
+ DR_CHUNK_SIZE_16,
+ DR_CHUNK_SIZE_32,
+ DR_CHUNK_SIZE_64,
+ DR_CHUNK_SIZE_128,
+ DR_CHUNK_SIZE_256,
+ DR_CHUNK_SIZE_512,
+ DR_CHUNK_SIZE_1K,
+ DR_CHUNK_SIZE_2K,
+ DR_CHUNK_SIZE_4K,
+ DR_CHUNK_SIZE_8K,
+ DR_CHUNK_SIZE_16K,
+ DR_CHUNK_SIZE_32K,
+ DR_CHUNK_SIZE_64K,
+ DR_CHUNK_SIZE_128K,
+ DR_CHUNK_SIZE_256K,
+ DR_CHUNK_SIZE_512K,
+ DR_CHUNK_SIZE_1024K,
+ DR_CHUNK_SIZE_2048K,
+ DR_CHUNK_SIZE_MAX,
+};
+
+enum mlx5dr_icm_type {
+ DR_ICM_TYPE_STE,
+ DR_ICM_TYPE_MODIFY_ACTION,
+};
+
+static inline enum mlx5dr_icm_chunk_size
+mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
+{
+ chunk += 2;
+ if (chunk < DR_CHUNK_SIZE_MAX)
+ return chunk;
+
+ return DR_CHUNK_SIZE_MAX;
+}
+
+enum {
+ DR_STE_SIZE = 64,
+ DR_STE_SIZE_CTRL = 32,
+ DR_STE_SIZE_TAG = 16,
+ DR_STE_SIZE_MASK = 16,
+};
+
+enum {
+ DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
+};
+
+enum {
+ DR_MODIFY_ACTION_SIZE = 8,
+};
+
+enum mlx5dr_matcher_criteria {
+ DR_MATCHER_CRITERIA_EMPTY = 0,
+ DR_MATCHER_CRITERIA_OUTER = 1 << 0,
+ DR_MATCHER_CRITERIA_MISC = 1 << 1,
+ DR_MATCHER_CRITERIA_INNER = 1 << 2,
+ DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
+ DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
+ DR_MATCHER_CRITERIA_MAX = 1 << 5,
+};
+
+enum mlx5dr_action_type {
+ DR_ACTION_TYP_TNL_L2_TO_L2,
+ DR_ACTION_TYP_L2_TO_TNL_L2,
+ DR_ACTION_TYP_TNL_L3_TO_L2,
+ DR_ACTION_TYP_L2_TO_TNL_L3,
+ DR_ACTION_TYP_DROP,
+ DR_ACTION_TYP_QP,
+ DR_ACTION_TYP_FT,
+ DR_ACTION_TYP_CTR,
+ DR_ACTION_TYP_TAG,
+ DR_ACTION_TYP_MODIFY_HDR,
+ DR_ACTION_TYP_VPORT,
+ DR_ACTION_TYP_POP_VLAN,
+ DR_ACTION_TYP_PUSH_VLAN,
+ DR_ACTION_TYP_MAX,
+};
+
+struct mlx5dr_icm_pool;
+struct mlx5dr_icm_chunk;
+struct mlx5dr_icm_bucket;
+struct mlx5dr_ste_htbl;
+struct mlx5dr_match_param;
+struct mlx5dr_cmd_caps;
+struct mlx5dr_matcher_rx_tx;
+
+struct mlx5dr_ste {
+ u8 *hw_ste;
+ /* refcount: indicates the num of rules that using this ste */
+ refcount_t refcount;
+
+ /* attached to the miss_list head at each htbl entry */
+ struct list_head miss_list_node;
+
+ /* each rule member that uses this ste attached here */
+ struct list_head rule_list;
+
+ /* this ste is member of htbl */
+ struct mlx5dr_ste_htbl *htbl;
+
+ struct mlx5dr_ste_htbl *next_htbl;
+
+ /* this ste is part of a rule, located in ste's chain */
+ u8 ste_chain_location;
+};
+
+struct mlx5dr_ste_htbl_ctrl {
+ /* total number of valid entries belonging to this hash table. This
+ * includes the non collision and collision entries
+ */
+ unsigned int num_of_valid_entries;
+
+ /* total number of collisions entries attached to this table */
+ unsigned int num_of_collisions;
+ unsigned int increase_threshold;
+ u8 may_grow:1;
+};
+
+struct mlx5dr_ste_htbl {
+ u8 lu_type;
+ u16 byte_mask;
+ refcount_t refcount;
+ struct mlx5dr_icm_chunk *chunk;
+ struct mlx5dr_ste *ste_arr;
+ u8 *hw_ste_arr;
+
+ struct list_head *miss_list;
+
+ enum mlx5dr_icm_chunk_size chunk_size;
+ struct mlx5dr_ste *pointing_ste;
+
+ struct mlx5dr_ste_htbl_ctrl ctrl;
+};
+
+struct mlx5dr_ste_send_info {
+ struct mlx5dr_ste *ste;
+ struct list_head send_list;
+ u16 size;
+ u16 offset;
+ u8 data_cont[DR_STE_SIZE];
+ u8 *data;
+};
+
+void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
+ u16 offset, u8 *data,
+ struct mlx5dr_ste_send_info *ste_info,
+ struct list_head *send_list,
+ bool copy_data);
+
+struct mlx5dr_ste_build {
+ u8 inner:1;
+ u8 rx:1;
+ struct mlx5dr_cmd_caps *caps;
+ u8 lu_type;
+ u16 byte_mask;
+ u8 bit_mask[DR_STE_SIZE_MASK];
+ int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p);
+};
+
+struct mlx5dr_ste_htbl *
+mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ u8 lu_type, u16 byte_mask);
+
+int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
+
+static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
+{
+ if (refcount_dec_and_test(&htbl->refcount))
+ mlx5dr_ste_htbl_free(htbl);
+}
+
+static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
+{
+ refcount_inc(&htbl->refcount);
+}
+
+/* STE utils */
+u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
+void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
+void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
+ struct mlx5dr_ste_htbl *next_htbl);
+void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
+u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
+void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
+void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
+void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
+void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
+bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste);
+bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 ste_location);
+void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
+void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
+void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
+ int size, bool encap_l3);
+void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
+void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
+void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
+void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
+ bool go_back);
+void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
+u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
+void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+ u32 re_write_index);
+void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
+u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
+u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
+struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
+
+void mlx5dr_ste_free(struct mlx5dr_ste *ste,
+ struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher);
+static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
+ struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ if (refcount_dec_and_test(&ste->refcount))
+ mlx5dr_ste_free(ste, matcher, nic_matcher);
+}
+
+/* initial as 0, increased only when ste appears in a new rule */
+static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
+{
+ refcount_inc(&ste->refcount);
+}
+
+void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+ struct mlx5dr_ste_htbl *next_htbl);
+bool mlx5dr_ste_equal_tag(void *src, void *dst);
+int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *ste,
+ u8 *cur_hw_ste,
+ enum mlx5dr_icm_chunk_size log_table_size);
+
+/* STE build functions */
+int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
+ u8 match_criteria,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_match_param *value);
+int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_match_param *value,
+ u8 *ste_arr);
+int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
+
+/* Actions utils */
+int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_action *actions[],
+ u32 num_actions,
+ u8 *ste_arr,
+ u32 *new_hw_ste_arr_sz);
+
+struct mlx5dr_match_spec {
+ u32 smac_47_16; /* Source MAC address of incoming packet */
+ /* Incoming packet Ethertype - this is the Ethertype
+ * following the last VLAN tag of the packet
+ */
+ u32 ethertype:16;
+ u32 smac_15_0:16; /* Source MAC address of incoming packet */
+ u32 dmac_47_16; /* Destination MAC address of incoming packet */
+ /* VLAN ID of first VLAN tag in the incoming packet.
+ * Valid only when cvlan_tag==1 or svlan_tag==1
+ */
+ u32 first_vid:12;
+ /* CFI bit of first VLAN tag in the incoming packet.
+ * Valid only when cvlan_tag==1 or svlan_tag==1
+ */
+ u32 first_cfi:1;
+ /* Priority of first VLAN tag in the incoming packet.
+ * Valid only when cvlan_tag==1 or svlan_tag==1
+ */
+ u32 first_prio:3;
+ u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
+ /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
+ * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
+ */
+ u32 tcp_flags:9;
+ u32 ip_version:4; /* IP version */
+ u32 frag:1; /* Packet is an IP fragment */
+ /* The first vlan in the packet is s-vlan (0x8a88).
+ * cvlan_tag and svlan_tag cannot be set together
+ */
+ u32 svlan_tag:1;
+ /* The first vlan in the packet is c-vlan (0x8100).
+ * cvlan_tag and svlan_tag cannot be set together
+ */
+ u32 cvlan_tag:1;
+ /* Explicit Congestion Notification derived from
+ * Traffic Class/TOS field of IPv6/v4
+ */
+ u32 ip_ecn:2;
+ /* Differentiated Services Code Point derived from
+ * Traffic Class/TOS field of IPv6/v4
+ */
+ u32 ip_dscp:6;
+ u32 ip_protocol:8; /* IP protocol */
+ /* TCP destination port.
+ * tcp and udp sport/dport are mutually exclusive
+ */
+ u32 tcp_dport:16;
+ /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
+ u32 tcp_sport:16;
+ u32 ttl_hoplimit:8;
+ u32 reserved:24;
+ /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
+ u32 udp_dport:16;
+ /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
+ u32 udp_sport:16;
+ /* IPv6 source address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 src_ip_127_96;
+ /* IPv6 source address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 src_ip_95_64;
+ /* IPv6 source address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 src_ip_63_32;
+ /* IPv6 source address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 src_ip_31_0;
+ /* IPv6 destination address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 dst_ip_127_96;
+ /* IPv6 destination address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 dst_ip_95_64;
+ /* IPv6 destination address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 dst_ip_63_32;
+ /* IPv6 destination address of incoming packets
+ * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+ * This field should be qualified by an appropriate ethertype
+ */
+ u32 dst_ip_31_0;
+};
+
+struct mlx5dr_match_misc {
+ u32 source_sqn:24; /* Source SQN */
+ u32 source_vhca_port:4;
+ /* used with GRE, sequence number exist when gre_s_present == 1 */
+ u32 gre_s_present:1;
+ /* used with GRE, key exist when gre_k_present == 1 */
+ u32 gre_k_present:1;
+ u32 reserved_auto1:1;
+ /* used with GRE, checksum exist when gre_c_present == 1 */
+ u32 gre_c_present:1;
+ /* Source port.;0xffff determines wire port */
+ u32 source_port:16;
+ u32 reserved_auto2:16;
+ /* VLAN ID of first VLAN tag the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_vid:12;
+ /* CFI bit of first VLAN tag in the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_cfi:1;
+ /* Priority of second VLAN tag in the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_prio:3;
+ /* VLAN ID of first VLAN tag the outer header of the incoming packet.
+ * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+ */
+ u32 outer_second_vid:12;
+ /* CFI bit of first VLAN tag in the outer header of the incoming packet.
+ * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+ */
+ u32 outer_second_cfi:1;
+ /* Priority of second VLAN tag in the outer header of the incoming packet.
+ * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+ */
+ u32 outer_second_prio:3;
+ u32 gre_protocol:16; /* GRE Protocol (outer) */
+ u32 reserved_auto3:12;
+ /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
+ * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+ */
+ u32 inner_second_svlan_tag:1;
+ /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
+ * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
+ */
+ u32 outer_second_svlan_tag:1;
+ /* The second vlan in the inner header of the packet is c-vlan (0x8100).
+ * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+ */
+ u32 inner_second_cvlan_tag:1;
+ /* The second vlan in the outer header of the packet is c-vlan (0x8100).
+ * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
+ */
+ u32 outer_second_cvlan_tag:1;
+ u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
+ u32 gre_key_h:24; /* GRE Key[31:8] (outer) */
+ u32 reserved_auto4:8;
+ u32 vxlan_vni:24; /* VXLAN VNI (outer) */
+ u32 geneve_oam:1; /* GENEVE OAM field (outer) */
+ u32 reserved_auto5:7;
+ u32 geneve_vni:24; /* GENEVE VNI field (outer) */
+ u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */
+ u32 reserved_auto6:12;
+ u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */
+ u32 reserved_auto7:12;
+ u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
+ u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */
+ u32 reserved_auto8:10;
+ u32 bth_dst_qp:24; /* Destination QP in BTH header */
+ u32 reserved_auto9:8;
+ u8 reserved_auto10[20];
+};
+
+struct mlx5dr_match_misc2 {
+ u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
+ u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
+ u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
+ u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */
+ u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
+ u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
+ u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
+ u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */
+ u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
+ u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
+ u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
+ u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
+ u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
+ u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
+ u32 metadata_reg_c_7; /* metadata_reg_c_7 */
+ u32 metadata_reg_c_6; /* metadata_reg_c_6 */
+ u32 metadata_reg_c_5; /* metadata_reg_c_5 */
+ u32 metadata_reg_c_4; /* metadata_reg_c_4 */
+ u32 metadata_reg_c_3; /* metadata_reg_c_3 */
+ u32 metadata_reg_c_2; /* metadata_reg_c_2 */
+ u32 metadata_reg_c_1; /* metadata_reg_c_1 */
+ u32 metadata_reg_c_0; /* metadata_reg_c_0 */
+ u32 metadata_reg_a; /* metadata_reg_a */
+ u32 metadata_reg_b; /* metadata_reg_b */
+ u8 reserved_auto2[8];
+};
+
+struct mlx5dr_match_misc3 {
+ u32 inner_tcp_seq_num;
+ u32 outer_tcp_seq_num;
+ u32 inner_tcp_ack_num;
+ u32 outer_tcp_ack_num;
+ u32 outer_vxlan_gpe_vni:24;
+ u32 reserved_auto1:8;
+ u32 reserved_auto2:16;
+ u32 outer_vxlan_gpe_flags:8;
+ u32 outer_vxlan_gpe_next_protocol:8;
+ u32 icmpv4_header_data;
+ u32 icmpv6_header_data;
+ u32 icmpv6_code:8;
+ u32 icmpv6_type:8;
+ u32 icmpv4_code:8;
+ u32 icmpv4_type:8;
+ u8 reserved_auto3[0x1c];
+};
+
+struct mlx5dr_match_param {
+ struct mlx5dr_match_spec outer;
+ struct mlx5dr_match_misc misc;
+ struct mlx5dr_match_spec inner;
+ struct mlx5dr_match_misc2 misc2;
+ struct mlx5dr_match_misc3 misc3;
+};
+
+#define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
+ (_misc3)->icmpv4_code || \
+ (_misc3)->icmpv4_header_data)
+
+struct mlx5dr_esw_caps {
+ u64 drop_icm_address_rx;
+ u64 drop_icm_address_tx;
+ u64 uplink_icm_address_rx;
+ u64 uplink_icm_address_tx;
+ bool sw_owner;
+};
+
+struct mlx5dr_cmd_vport_cap {
+ u16 vport_gvmi;
+ u16 vhca_gvmi;
+ u64 icm_address_rx;
+ u64 icm_address_tx;
+ u32 num;
+};
+
+struct mlx5dr_cmd_caps {
+ u16 gvmi;
+ u64 nic_rx_drop_address;
+ u64 nic_tx_drop_address;
+ u64 nic_tx_allow_address;
+ u64 esw_rx_drop_address;
+ u64 esw_tx_drop_address;
+ u32 log_icm_size;
+ u64 hdr_modify_icm_addr;
+ u32 flex_protocols;
+ u8 flex_parser_id_icmp_dw0;
+ u8 flex_parser_id_icmp_dw1;
+ u8 flex_parser_id_icmpv6_dw0;
+ u8 flex_parser_id_icmpv6_dw1;
+ u8 max_ft_level;
+ u16 roce_min_src_udp;
+ u8 num_esw_ports;
+ bool eswitch_manager;
+ bool rx_sw_owner;
+ bool tx_sw_owner;
+ bool fdb_sw_owner;
+ u32 num_vports;
+ struct mlx5dr_esw_caps esw_caps;
+ struct mlx5dr_cmd_vport_cap *vports_caps;
+ bool prio_tag_required;
+};
+
+struct mlx5dr_domain_rx_tx {
+ u64 drop_icm_addr;
+ u64 default_icm_addr;
+ enum mlx5dr_ste_entry_type ste_type;
+};
+
+struct mlx5dr_domain_info {
+ bool supp_sw_steering;
+ u32 max_inline_size;
+ u32 max_send_wr;
+ u32 max_log_sw_icm_sz;
+ u32 max_log_action_icm_sz;
+ struct mlx5dr_domain_rx_tx rx;
+ struct mlx5dr_domain_rx_tx tx;
+ struct mlx5dr_cmd_caps caps;
+};
+
+struct mlx5dr_domain_cache {
+ struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
+};
+
+struct mlx5dr_domain {
+ struct mlx5dr_domain *peer_dmn;
+ struct mlx5_core_dev *mdev;
+ u32 pdn;
+ struct mlx5_uars_page *uar;
+ enum mlx5dr_domain_type type;
+ refcount_t refcount;
+ struct mutex mutex; /* protect domain */
+ struct mlx5dr_icm_pool *ste_icm_pool;
+ struct mlx5dr_icm_pool *action_icm_pool;
+ struct mlx5dr_send_ring *send_ring;
+ struct mlx5dr_domain_info info;
+ struct mlx5dr_domain_cache cache;
+};
+
+struct mlx5dr_table_rx_tx {
+ struct mlx5dr_ste_htbl *s_anchor;
+ struct mlx5dr_domain_rx_tx *nic_dmn;
+ u64 default_icm_addr;
+};
+
+struct mlx5dr_table {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_table_rx_tx rx;
+ struct mlx5dr_table_rx_tx tx;
+ u32 level;
+ u32 table_type;
+ u32 table_id;
+ struct list_head matcher_list;
+ struct mlx5dr_action *miss_action;
+ refcount_t refcount;
+};
+
+struct mlx5dr_matcher_rx_tx {
+ struct mlx5dr_ste_htbl *s_htbl;
+ struct mlx5dr_ste_htbl *e_anchor;
+ struct mlx5dr_ste_build *ste_builder;
+ struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES];
+ struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES];
+ u8 num_of_builders;
+ u8 num_of_builders4;
+ u8 num_of_builders6;
+ u64 default_icm_addr;
+ struct mlx5dr_table_rx_tx *nic_tbl;
+};
+
+struct mlx5dr_matcher {
+ struct mlx5dr_table *tbl;
+ struct mlx5dr_matcher_rx_tx rx;
+ struct mlx5dr_matcher_rx_tx tx;
+ struct list_head matcher_list;
+ u16 prio;
+ struct mlx5dr_match_param mask;
+ u8 match_criteria;
+ refcount_t refcount;
+ struct mlx5dv_flow_matcher *dv_matcher;
+};
+
+struct mlx5dr_rule_member {
+ struct mlx5dr_ste *ste;
+ /* attached to mlx5dr_rule via this */
+ struct list_head list;
+ /* attached to mlx5dr_ste via this */
+ struct list_head use_ste_list;
+};
+
+struct mlx5dr_action {
+ enum mlx5dr_action_type action_type;
+ refcount_t refcount;
+ union {
+ struct {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_icm_chunk *chunk;
+ u8 *data;
+ u32 data_size;
+ u16 num_of_actions;
+ u32 index;
+ u8 allow_rx:1;
+ u8 allow_tx:1;
+ u8 modify_ttl:1;
+ } rewrite;
+ struct {
+ struct mlx5dr_domain *dmn;
+ u32 reformat_id;
+ u32 reformat_size;
+ } reformat;
+ struct {
+ u8 is_fw_tbl:1;
+ union {
+ struct mlx5dr_table *tbl;
+ struct {
+ struct mlx5_flow_table *ft;
+ u64 rx_icm_addr;
+ u64 tx_icm_addr;
+ struct mlx5_core_dev *mdev;
+ } fw_tbl;
+ };
+ } dest_tbl;
+ struct {
+ u32 ctr_id;
+ u32 offeset;
+ } ctr;
+ struct {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_cmd_vport_cap *caps;
+ u32 num;
+ } vport;
+ struct {
+ u32 vlan_hdr; /* tpid_pcp_dei_vid */
+ } push_vlan;
+ u32 flow_tag;
+ };
+};
+
+enum mlx5dr_connect_type {
+ CONNECT_HIT = 1,
+ CONNECT_MISS = 2,
+};
+
+struct mlx5dr_htbl_connect_info {
+ enum mlx5dr_connect_type type;
+ union {
+ struct mlx5dr_ste_htbl *hit_next_htbl;
+ u64 miss_icm_addr;
+ };
+};
+
+struct mlx5dr_rule_rx_tx {
+ struct list_head rule_members_list;
+ struct mlx5dr_matcher_rx_tx *nic_matcher;
+};
+
+struct mlx5dr_rule {
+ struct mlx5dr_matcher *matcher;
+ struct mlx5dr_rule_rx_tx rx;
+ struct mlx5dr_rule_rx_tx tx;
+ struct list_head rule_actions_list;
+};
+
+void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
+ struct mlx5dr_ste *ste);
+
+struct mlx5dr_icm_chunk {
+ struct mlx5dr_icm_bucket *bucket;
+ struct list_head chunk_list;
+ u32 rkey;
+ u32 num_of_entries;
+ u32 byte_size;
+ u64 icm_addr;
+ u64 mr_addr;
+
+ /* Memory optimisation */
+ struct mlx5dr_ste *ste_arr;
+ u8 *hw_ste_arr;
+ struct list_head *miss_list;
+};
+
+static inline int
+mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
+}
+
+static inline int
+mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
+}
+
+int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ bool ipv6);
+
+static inline u32
+mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
+{
+ return 1 << chunk_size;
+}
+
+static inline int
+mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
+ enum mlx5dr_icm_type icm_type)
+{
+ int num_of_entries;
+ int entry_size;
+
+ if (icm_type == DR_ICM_TYPE_STE)
+ entry_size = DR_STE_SIZE;
+ else
+ entry_size = DR_MODIFY_ACTION_SIZE;
+
+ num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
+
+ return entry_size * num_of_entries;
+}
+
+static inline struct mlx5dr_cmd_vport_cap *
+mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
+{
+ if (!caps->vports_caps ||
+ (vport >= caps->num_vports && vport != WIRE_PORT))
+ return NULL;
+
+ if (vport == WIRE_PORT)
+ vport = caps->num_vports;
+
+ return &caps->vports_caps[vport];
+}
+
+struct mlx5dr_cmd_query_flow_table_details {
+ u8 status;
+ u8 level;
+ u64 sw_owner_icm_root_1;
+ u64 sw_owner_icm_root_0;
+};
+
+/* internal API functions */
+int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+ struct mlx5dr_cmd_caps *caps);
+int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
+ bool other_vport, u16 vport_number,
+ u64 *icm_address_rx,
+ u64 *icm_address_tx);
+int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
+ bool other_vport, u16 vport_number, u16 *gvmi);
+int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
+ struct mlx5dr_esw_caps *caps);
+int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
+int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ u32 modify_header_id,
+ u32 vport_id);
+int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id);
+int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u8 num_of_actions,
+ u64 *actions,
+ u32 *modify_header_id);
+int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
+ u32 modify_header_id);
+int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 *group_id);
+int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id);
+int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u64 icm_addr_rx,
+ u64 icm_addr_tx,
+ u8 level,
+ bool sw_owner,
+ bool term_tbl,
+ u64 *fdb_rx_icm_addr,
+ u32 *table_id);
+int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ u32 table_type);
+int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
+ enum fs_flow_table_type type,
+ u32 table_id,
+ struct mlx5dr_cmd_query_flow_table_details *output);
+int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
+ enum mlx5_reformat_ctx_type rt,
+ size_t reformat_size,
+ void *reformat_data,
+ u32 *reformat_id);
+void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
+ u32 reformat_id);
+
+struct mlx5dr_cmd_gid_attr {
+ u8 gid[16];
+ u8 mac[6];
+ u32 roce_ver;
+};
+
+struct mlx5dr_cmd_qp_create_attr {
+ u32 page_id;
+ u32 pdn;
+ u32 cqn;
+ u32 pm_state;
+ u32 service_type;
+ u32 buff_umem_id;
+ u32 db_umem_id;
+ u32 sq_wqe_cnt;
+ u32 rq_wqe_cnt;
+ u32 rq_wqe_shift;
+};
+
+int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
+ u16 index, struct mlx5dr_cmd_gid_attr *attr);
+
+struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
+ enum mlx5dr_icm_type icm_type);
+void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
+
+struct mlx5dr_icm_chunk *
+mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size);
+void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
+bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste);
+int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain_rx_tx *nic_dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ struct mlx5dr_htbl_connect_info *connect_info,
+ bool update_hw_ste);
+void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+ struct mlx5dr_domain_rx_tx *nic_dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *formatted_ste,
+ struct mlx5dr_htbl_connect_info *connect_info);
+void mlx5dr_ste_copy_param(u8 match_criteria,
+ struct mlx5dr_match_param *set_param,
+ struct mlx5dr_match_parameters *mask);
+
+void mlx5dr_crc32_init_table(void);
+u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length);
+
+struct mlx5dr_qp {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_qp wq;
+ struct mlx5_uars_page *uar;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_qp mqp;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int size;
+ unsigned int *wqe_head;
+ unsigned int wqe_cnt;
+ } sq;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int size;
+ unsigned int wqe_cnt;
+ } rq;
+ int max_inline_data;
+};
+
+struct mlx5dr_cq {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_cqwq wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ struct mlx5dr_qp *qp;
+};
+
+struct mlx5dr_mr {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_core_mkey mkey;
+ dma_addr_t dma_addr;
+ void *addr;
+ size_t size;
+};
+
+#define MAX_SEND_CQE 64
+#define MIN_READ_SYNC 64
+
+struct mlx5dr_send_ring {
+ struct mlx5dr_cq *cq;
+ struct mlx5dr_qp *qp;
+ struct mlx5dr_mr *mr;
+ /* How much wqes are waiting for completion */
+ u32 pending_wqe;
+ /* Signal request per this trash hold value */
+ u16 signal_th;
+ /* Each post_send_size less than max_post_send_size */
+ u32 max_post_send_size;
+ /* manage the send queue */
+ u32 tx_head;
+ void *buf;
+ u32 buf_size;
+ struct ib_wc wc[MAX_SEND_CQE];
+ u8 sync_buff[MIN_READ_SYNC];
+ struct mlx5dr_mr *sync_mr;
+};
+
+int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
+void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
+ struct mlx5dr_send_ring *send_ring);
+int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
+int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste *ste,
+ u8 *data,
+ u16 size,
+ u16 offset);
+int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *formatted_ste, u8 *mask);
+int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_htbl *htbl,
+ u8 *ste_init_data,
+ bool update_hw_ste);
+int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *action);
+
+struct mlx5dr_fw_recalc_cs_ft {
+ u64 rx_icm_addr;
+ u32 table_id;
+ u32 group_id;
+ u32 modify_hdr_id;
+};
+
+struct mlx5dr_fw_recalc_cs_ft *
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
+void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
+int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u32 vport_num,
+ u64 *rx_icm_addr);
+#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
new file mode 100644
index 000000000000..3d587d0bdbbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+#include "mlx5dr.h"
+#include "fs_dr.h"
+
+static bool mlx5_dr_is_fw_table(u32 flags)
+{
+ if (flags & MLX5_FLOW_TABLE_TERMINATION)
+ return true;
+
+ return false;
+}
+
+static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+ disconnect);
+}
+
+static int set_miss_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5dr_action *old_miss_action;
+ struct mlx5dr_action *action = NULL;
+ struct mlx5dr_table *next_tbl;
+ int err;
+
+ next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL;
+ if (next_tbl) {
+ action = mlx5dr_action_create_dest_table(next_tbl);
+ if (!action)
+ return -EINVAL;
+ }
+ old_miss_action = ft->fs_dr_table.miss_action;
+ err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
+ if (err && action) {
+ err = mlx5dr_action_destroy(action);
+ if (err) {
+ action = NULL;
+ mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
+ err);
+ }
+ }
+ ft->fs_dr_table.miss_action = action;
+ if (old_miss_action) {
+ err = mlx5dr_action_destroy(old_miss_action);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
+ err);
+ }
+
+ return err;
+}
+
+static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ unsigned int log_size,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5dr_table *tbl;
+ int err;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
+ log_size,
+ next_ft);
+
+ tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
+ ft->level);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
+ return -EINVAL;
+ }
+
+ ft->fs_dr_table.dr_table = tbl;
+ ft->id = mlx5dr_table_get_id(tbl);
+
+ if (next_ft) {
+ err = set_miss_action(ns, ft, next_ft);
+ if (err) {
+ mlx5dr_table_destroy(tbl);
+ ft->fs_dr_table.dr_table = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
+ int err;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+ err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n",
+ err);
+ return err;
+ }
+ if (action) {
+ err = mlx5dr_action_destroy(action);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ return set_miss_action(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5dr_matcher *matcher;
+ u16 priority = MLX5_GET(create_flow_group_in, in,
+ start_flow_index);
+ u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
+ in,
+ match_criteria_enable);
+ struct mlx5dr_match_parameters mask;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
+ fg);
+
+ mask.match_buf = MLX5_ADDR_OF(create_flow_group_in,
+ in, match_criteria);
+ mask.match_sz = sizeof(fg->mask.match_criteria);
+
+ matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table,
+ priority,
+ match_criteria_enable,
+ &mask);
+ if (!matcher) {
+ mlx5_core_err(ns->dev, "Failed creating matcher\n");
+ return -EINVAL;
+ }
+
+ fg->fs_dr_matcher.dr_matcher = matcher;
+ return 0;
+}
+
+static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+ return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
+}
+
+static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num,
+ dest_attr->vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID,
+ dest_attr->vport.vhca_id);
+}
+
+static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
+
+ if (mlx5_dr_is_fw_table(dest_ft->flags))
+ return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev);
+ return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+}
+
+static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
+ struct mlx5_fs_vlan *vlan)
+{
+ u16 n_ethtype = vlan->ethtype;
+ u8 prio = vlan->prio;
+ u16 vid = vlan->vid;
+ u32 vlan_hdr;
+
+ vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+ return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
+}
+
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
+static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
+ struct mlx5dr_action *term_action = NULL;
+ struct mlx5dr_match_parameters params;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5dr_action **fs_dr_actions;
+ struct mlx5dr_action *tmp_action;
+ struct mlx5dr_action **actions;
+ bool delay_encap_set = false;
+ struct mlx5dr_rule *rule;
+ struct mlx5_flow_rule *dst;
+ int fs_dr_num_actions = 0;
+ int num_actions = 0;
+ size_t match_sz;
+ int err = 0;
+ int i;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+
+ actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
+ GFP_KERNEL);
+ if (!actions)
+ return -ENOMEM;
+
+ fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*fs_dr_actions), GFP_KERNEL);
+ if (!fs_dr_actions) {
+ kfree(actions);
+ return -ENOMEM;
+ }
+
+ match_sz = sizeof(fte->val);
+
+ /* The order of the actions are must to be keep, only the following
+ * order is supported by SW steering:
+ * TX: push vlan -> modify header -> encap
+ * RX: decap -> pop vlan -> modify header
+ */
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ enum mlx5dr_action_reformat_type decap_type =
+ DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
+
+ tmp_action = mlx5dr_action_create_packet_reformat(domain,
+ decap_type, 0,
+ NULL);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ bool is_decap = fte->action.pkt_reformat->reformat_type ==
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+
+ if (is_decap)
+ actions[num_actions++] =
+ fte->action.pkt_reformat->action.dr_action;
+ else
+ delay_encap_set = true;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ tmp_action =
+ mlx5dr_action_create_pop_vlan();
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ tmp_action =
+ mlx5dr_action_create_pop_vlan();
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ actions[num_actions++] =
+ fte->action.modify_hdr->action.dr_action;
+
+ if (delay_encap_set)
+ actions[num_actions++] =
+ fte->action.pkt_reformat->action.dr_action;
+
+ /* The order of the actions below is not important */
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ tmp_action = mlx5dr_action_create_drop();
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_action = tmp_action;
+ }
+
+ if (fte->flow_context.flow_tag) {
+ tmp_action =
+ mlx5dr_action_create_tag(fte->flow_context.flow_tag);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ enum mlx5_flow_destination_type type = dst->dest_attr.type;
+ u32 id;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -ENOSPC;
+ goto free_actions;
+ }
+
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
+ id = dst->dest_attr.counter_id;
+
+ tmp_action =
+ mlx5dr_action_create_flow_counter(id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ tmp_action = create_ft_action(dev, dst);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_action = tmp_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ tmp_action = create_vport_action(domain, dst);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_action = tmp_action;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ }
+ }
+
+ params.match_sz = match_sz;
+ params.match_buf = (u64 *)fte->val;
+
+ if (term_action)
+ actions[num_actions++] = term_action;
+
+ rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
+ &params,
+ num_actions,
+ actions);
+ if (!rule) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ kfree(actions);
+ fte->fs_dr_rule.dr_rule = rule;
+ fte->fs_dr_rule.num_actions = fs_dr_num_actions;
+ fte->fs_dr_rule.dr_actions = fs_dr_actions;
+
+ return 0;
+
+free_actions:
+ for (i = 0; i < fs_dr_num_actions; i++)
+ if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
+ mlx5dr_action_destroy(fs_dr_actions[i]);
+
+ mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
+ kfree(actions);
+ kfree(fs_dr_actions);
+ return err;
+}
+
+static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
+ struct mlx5dr_action *action;
+ int dr_reformat;
+
+ switch (reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2;
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2;
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+ reformat_type);
+ return -EOPNOTSUPP;
+ }
+
+ action = mlx5dr_action_create_packet_reformat(dr_domain,
+ dr_reformat,
+ size,
+ reformat_data);
+ if (!action) {
+ mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
+ return -EINVAL;
+ }
+
+ pkt_reformat->action.dr_action = action;
+
+ return 0;
+}
+
+static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ mlx5dr_action_destroy(pkt_reformat->action.dr_action);
+}
+
+static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
+ struct mlx5dr_action *action;
+ size_t actions_sz;
+
+ actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) *
+ num_actions;
+ action = mlx5dr_action_create_modify_header(dr_domain, 0,
+ actions_sz,
+ modify_actions);
+ if (!action) {
+ mlx5_core_err(ns->dev, "Failed allocating modify-header action\n");
+ return -EINVAL;
+ }
+
+ modify_hdr->action.dr_action = action;
+
+ return 0;
+}
+
+static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ mlx5dr_action_destroy(modify_hdr->action.dr_action);
+}
+
+static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule;
+ int err;
+ int i;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+ err = mlx5dr_rule_destroy(rule->dr_rule);
+ if (err)
+ return err;
+
+ for (i = 0; i < rule->num_actions; i++)
+ if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
+ mlx5dr_action_destroy(rule->dr_actions[i]);
+
+ kfree(rule->dr_actions);
+ return 0;
+}
+
+static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+{
+ struct mlx5dr_domain *peer_domain = NULL;
+
+ if (peer_ns)
+ peer_domain = peer_ns->fs_dr_domain.dr_domain;
+ mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
+ peer_domain);
+ return 0;
+}
+
+static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ ns->fs_dr_domain.dr_domain =
+ mlx5dr_domain_create(ns->dev,
+ MLX5DR_DOMAIN_TYPE_FDB);
+ if (!ns->fs_dr_domain.dr_domain) {
+ mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
+}
+
+bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
+{
+ return mlx5dr_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
+ .create_flow_table = mlx5_cmd_dr_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_dr_modify_flow_table,
+ .create_flow_group = mlx5_cmd_dr_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group,
+ .create_fte = mlx5_cmd_dr_create_fte,
+ .update_fte = mlx5_cmd_dr_update_fte,
+ .delete_fte = mlx5_cmd_dr_delete_fte,
+ .update_root_ft = mlx5_cmd_dr_update_root_ft,
+ .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
+ .set_peer = mlx5_cmd_dr_set_peer,
+ .create_ns = mlx5_cmd_dr_create_ns,
+ .destroy_ns = mlx5_cmd_dr_destroy_ns,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
+{
+ return &mlx5_flow_cmds_dr;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
new file mode 100644
index 000000000000..1fb185d6ac7f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2019 Mellanox Technologies
+ */
+
+#ifndef _MLX5_FS_DR_
+#define _MLX5_FS_DR_
+
+#include "mlx5dr.h"
+
+struct mlx5_flow_root_namespace;
+struct fs_fte;
+
+struct mlx5_fs_dr_action {
+ struct mlx5dr_action *dr_action;
+};
+
+struct mlx5_fs_dr_ns {
+ struct mlx5_dr_ns *dr_ns;
+};
+
+struct mlx5_fs_dr_rule {
+ struct mlx5dr_rule *dr_rule;
+ /* Only actions created by fs_dr */
+ struct mlx5dr_action **dr_actions;
+ int num_actions;
+};
+
+struct mlx5_fs_dr_domain {
+ struct mlx5dr_domain *dr_domain;
+};
+
+struct mlx5_fs_dr_matcher {
+ struct mlx5dr_matcher *dr_matcher;
+};
+
+struct mlx5_fs_dr_table {
+ struct mlx5dr_table *dr_table;
+ struct mlx5dr_action *miss_action;
+};
+
+#ifdef CONFIG_MLX5_SW_STEERING
+
+bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
+
+#else
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
+{
+ return NULL;
+}
+
+static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+#endif /* CONFIG_MLX5_SW_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
new file mode 100644
index 000000000000..596c927220d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -0,0 +1,604 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef MLX5_IFC_DR_H
+#define MLX5_IFC_DR_H
+
+enum {
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
+ MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
+ MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
+ MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
+ MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
+ MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
+ MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
+ MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
+ MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
+ MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
+ MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
+ MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
+ MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
+ MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
+ MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
+ MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
+ MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
+};
+
+enum {
+ MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
+ MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
+};
+
+enum {
+ MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
+ MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
+ MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
+};
+
+enum {
+ MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
+ MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
+ MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
+};
+
+enum {
+ MLX5DR_STE_LU_TYPE_NOP = 0x00,
+ MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
+ MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
+ MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
+ MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
+ MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
+ MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
+ MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
+ MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
+ MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
+ MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
+ MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
+ MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
+ MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
+ MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
+ MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
+ MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
+ MLX5DR_STE_LU_TYPE_GRE = 0x16,
+ MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
+ MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
+ MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
+ MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
+ MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
+ MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
+ MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
+};
+
+enum mlx5dr_ste_entry_type {
+ MLX5DR_STE_TYPE_TX = 1,
+ MLX5DR_STE_TYPE_RX = 2,
+ MLX5DR_STE_TYPE_MODIFY_PKT = 6,
+};
+
+struct mlx5_ifc_ste_general_bits {
+ u8 entry_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 entry_sub_type[0x8];
+ u8 byte_mask[0x10];
+
+ u8 next_table_base_63_48[0x10];
+ u8 next_lu_type[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 linear_hash_enable[0x1];
+ u8 reserved_at_5c[0x2];
+ u8 next_table_rank[0x2];
+
+ u8 reserved_at_60[0xa0];
+ u8 tag_value[0x60];
+ u8 bit_mask[0x60];
+};
+
+struct mlx5_ifc_ste_sx_transmit_bits {
+ u8 entry_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 entry_sub_type[0x8];
+ u8 byte_mask[0x10];
+
+ u8 next_table_base_63_48[0x10];
+ u8 next_lu_type[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 linear_hash_enable[0x1];
+ u8 reserved_at_5c[0x2];
+ u8 next_table_rank[0x2];
+
+ u8 sx_wire[0x1];
+ u8 sx_func_lb[0x1];
+ u8 sx_sniffer[0x1];
+ u8 sx_wire_enable[0x1];
+ u8 sx_func_lb_enable[0x1];
+ u8 sx_sniffer_enable[0x1];
+ u8 action_type[0x3];
+ u8 reserved_at_69[0x1];
+ u8 action_description[0x6];
+ u8 gvmi[0x10];
+
+ u8 encap_pointer_vlan_data[0x20];
+
+ u8 loopback_syndome_en[0x8];
+ u8 loopback_syndome[0x8];
+ u8 counter_trigger[0x10];
+
+ u8 miss_address_63_48[0x10];
+ u8 counter_trigger_23_16[0x8];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 learning_point[0x1];
+ u8 go_back[0x1];
+ u8 match_polarity[0x1];
+ u8 mask_mode[0x1];
+ u8 miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_rx_steering_mult_bits {
+ u8 entry_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 entry_sub_type[0x8];
+ u8 byte_mask[0x10];
+
+ u8 next_table_base_63_48[0x10];
+ u8 next_lu_type[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 linear_hash_enable[0x1];
+ u8 reserved_at_[0x2];
+ u8 next_table_rank[0x2];
+
+ u8 member_count[0x10];
+ u8 gvmi[0x10];
+
+ u8 qp_list_pointer[0x20];
+
+ u8 reserved_at_a0[0x1];
+ u8 tunneling_action[0x3];
+ u8 action_description[0x4];
+ u8 reserved_at_a8[0x8];
+ u8 counter_trigger_15_0[0x10];
+
+ u8 miss_address_63_48[0x10];
+ u8 counter_trigger_23_16[0x08];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 learning_point[0x1];
+ u8 fail_on_error[0x1];
+ u8 match_polarity[0x1];
+ u8 mask_mode[0x1];
+ u8 miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_modify_packet_bits {
+ u8 entry_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 entry_sub_type[0x8];
+ u8 byte_mask[0x10];
+
+ u8 next_table_base_63_48[0x10];
+ u8 next_lu_type[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 linear_hash_enable[0x1];
+ u8 reserved_at_[0x2];
+ u8 next_table_rank[0x2];
+
+ u8 number_of_re_write_actions[0x10];
+ u8 gvmi[0x10];
+
+ u8 header_re_write_actions_pointer[0x20];
+
+ u8 reserved_at_a0[0x1];
+ u8 tunneling_action[0x3];
+ u8 action_description[0x4];
+ u8 reserved_at_a8[0x8];
+ u8 counter_trigger_15_0[0x10];
+
+ u8 miss_address_63_48[0x10];
+ u8 counter_trigger_23_16[0x08];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 learning_point[0x1];
+ u8 fail_on_error[0x1];
+ u8 match_polarity[0x1];
+ u8 mask_mode[0x1];
+ u8 miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_bits {
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 qp_type[0x2];
+ u8 ethertype_filter[0x1];
+ u8 reserved_at_43[0x1];
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 port[0x1];
+ u8 reserved_at_48[0x4];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_52[0x2];
+ u8 first_vlan_id[0xc];
+
+ u8 ip_fragmented[0x1];
+ u8 tcp_syn[0x1];
+ u8 encp_type[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 reserved_at_68[0x4];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 reserved_at_72[0x2];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_dst_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 qp_type[0x2];
+ u8 ethertype_filter[0x1];
+ u8 reserved_at_43[0x1];
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 port[0x1];
+ u8 reserved_at_48[0x4];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_52[0x2];
+ u8 first_vlan_id[0xc];
+
+ u8 ip_fragmented[0x1];
+ u8 tcp_syn[0x1];
+ u8 encp_type[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 reserved_at_68[0x4];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 reserved_at_72[0x2];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_dst_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 smac_47_32[0x10];
+
+ u8 smac_31_0[0x20];
+
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 port[0x1];
+ u8 l3_type[0x2];
+ u8 reserved_at_66[0x6];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_72[0x2];
+ u8 first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_bits {
+ u8 destination_address[0x20];
+
+ u8 source_address[0x20];
+
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+
+ u8 fragmented[0x1];
+ u8 first_fragment[0x1];
+ u8 reserved_at_62[0x2];
+ u8 reserved_at_64[0x1];
+ u8 ecn[0x2];
+ u8 tcp_ns[0x1];
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ u8 dscp[0x6];
+ u8 reserved_at_76[0x2];
+ u8 protocol[0x8];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv6_dst_bits {
+ u8 dst_ip_127_96[0x20];
+
+ u8 dst_ip_95_64[0x20];
+
+ u8 dst_ip_63_32[0x20];
+
+ u8 dst_ip_31_0[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l2_tnl_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 l2_tunneling_network_id[0x20];
+
+ u8 ip_fragmented[0x1];
+ u8 tcp_syn[0x1];
+ u8 encp_type[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 reserved_at_6c[0x3];
+ u8 gre_key_flag[0x1];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_72[0x2];
+ u8 first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv6_src_bits {
+ u8 src_ip_127_96[0x20];
+
+ u8 src_ip_95_64[0x20];
+
+ u8 src_ip_63_32[0x20];
+
+ u8 src_ip_31_0[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_misc_bits {
+ u8 version[0x4];
+ u8 ihl[0x4];
+ u8 reserved_at_8[0x8];
+ u8 total_length[0x10];
+
+ u8 identification[0x10];
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+
+ u8 time_to_live[0x8];
+ u8 reserved_at_48[0x8];
+ u8 checksum[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l4_bits {
+ u8 fragmented[0x1];
+ u8 first_fragment[0x1];
+ u8 reserved_at_2[0x6];
+ u8 protocol[0x8];
+ u8 dst_port[0x10];
+
+ u8 ipv6_version[0x4];
+ u8 reserved_at_24[0x1];
+ u8 ecn[0x2];
+ u8 tcp_ns[0x1];
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ u8 src_port[0x10];
+
+ u8 ipv6_payload_length[0x10];
+ u8 ipv6_hop_limit[0x8];
+ u8 dscp[0x6];
+ u8 reserved_at_5e[0x2];
+
+ u8 tcp_data_offset[0x4];
+ u8 reserved_at_64[0x8];
+ u8 flow_label[0x14];
+};
+
+struct mlx5_ifc_ste_eth_l4_misc_bits {
+ u8 checksum[0x10];
+ u8 length[0x10];
+
+ u8 seq_num[0x20];
+
+ u8 ack_num[0x20];
+
+ u8 urgent_pointer[0x10];
+ u8 window_size[0x10];
+};
+
+struct mlx5_ifc_ste_mpls_bits {
+ u8 mpls0_label[0x14];
+ u8 mpls0_exp[0x3];
+ u8 mpls0_s_bos[0x1];
+ u8 mpls0_ttl[0x8];
+
+ u8 mpls1_label[0x20];
+
+ u8 mpls2_label[0x20];
+
+ u8 reserved_at_60[0x16];
+ u8 mpls4_s_bit[0x1];
+ u8 mpls4_qualifier[0x1];
+ u8 mpls3_s_bit[0x1];
+ u8 mpls3_qualifier[0x1];
+ u8 mpls2_s_bit[0x1];
+ u8 mpls2_qualifier[0x1];
+ u8 mpls1_s_bit[0x1];
+ u8 mpls1_qualifier[0x1];
+ u8 mpls0_s_bit[0x1];
+ u8 mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_ste_register_0_bits {
+ u8 register_0_h[0x20];
+
+ u8 register_0_l[0x20];
+
+ u8 register_1_h[0x20];
+
+ u8 register_1_l[0x20];
+};
+
+struct mlx5_ifc_ste_register_1_bits {
+ u8 register_2_h[0x20];
+
+ u8 register_2_l[0x20];
+
+ u8 register_3_h[0x20];
+
+ u8 register_3_l[0x20];
+};
+
+struct mlx5_ifc_ste_gre_bits {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_30[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 strict_src_route[0x1];
+ u8 recur[0x3];
+ u8 flags[0x5];
+ u8 version[0x3];
+ u8 gre_protocol[0x10];
+
+ u8 checksum[0x10];
+ u8 offset[0x10];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 seq_num[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_0_bits {
+ u8 parser_3_label[0x14];
+ u8 parser_3_exp[0x3];
+ u8 parser_3_s_bos[0x1];
+ u8 parser_3_ttl[0x8];
+
+ u8 flex_parser_2[0x20];
+
+ u8 flex_parser_1[0x20];
+
+ u8 flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_1_bits {
+ u8 flex_parser_7[0x20];
+
+ u8 flex_parser_6[0x20];
+
+ u8 flex_parser_5[0x20];
+
+ u8 flex_parser_4[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_tnl_bits {
+ u8 flex_parser_tunneling_header_63_32[0x20];
+
+ u8 flex_parser_tunneling_header_31_0[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_general_purpose_bits {
+ u8 general_purpose_lookup_field[0x20];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_src_gvmi_qp_bits {
+ u8 loopback_syndrome[0x8];
+ u8 reserved_at_8[0x8];
+ u8 source_gvmi[0x10];
+
+ u8 reserved_at_20[0x5];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 source_is_requestor[0x1];
+ u8 source_qp[0x18];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_l2_hdr_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 smac_47_32[0x10];
+
+ u8 smac_31_0[0x20];
+
+ u8 ethertype[0x10];
+ u8 vlan_type[0x10];
+
+ u8 vlan[0x10];
+ u8 reserved_at_90[0x10];
+};
+
+/* Both HW set and HW add share the same HW format with different opcodes */
+struct mlx5_ifc_dr_action_hw_set_bits {
+ u8 opcode[0x8];
+ u8 destination_field_code[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x3];
+ u8 destination_length[0x5];
+
+ u8 inline_data[0x20];
+};
+
+#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
new file mode 100644
index 000000000000..adda9cbfba45
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef _MLX5DR_H_
+#define _MLX5DR_H_
+
+struct mlx5dr_domain;
+struct mlx5dr_table;
+struct mlx5dr_matcher;
+struct mlx5dr_rule;
+struct mlx5dr_action;
+
+enum mlx5dr_domain_type {
+ MLX5DR_DOMAIN_TYPE_NIC_RX,
+ MLX5DR_DOMAIN_TYPE_NIC_TX,
+ MLX5DR_DOMAIN_TYPE_FDB,
+};
+
+enum mlx5dr_domain_sync_flags {
+ MLX5DR_DOMAIN_SYNC_FLAGS_SW = 1 << 0,
+ MLX5DR_DOMAIN_SYNC_FLAGS_HW = 1 << 1,
+};
+
+enum mlx5dr_action_reformat_type {
+ DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2,
+ DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2,
+ DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
+ DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
+};
+
+struct mlx5dr_match_parameters {
+ size_t match_sz;
+ u64 *match_buf; /* Device spec format */
+};
+
+#ifdef CONFIG_MLX5_SW_STEERING
+
+struct mlx5dr_domain *
+mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
+
+int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
+
+int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
+
+void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain *peer_dmn);
+
+struct mlx5dr_table *
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level);
+
+int mlx5dr_table_destroy(struct mlx5dr_table *table);
+
+u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
+
+struct mlx5dr_matcher *
+mlx5dr_matcher_create(struct mlx5dr_table *table,
+ u16 priority,
+ u8 match_criteria_enable,
+ struct mlx5dr_match_parameters *mask);
+
+int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher);
+
+struct mlx5dr_rule *
+mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ size_t num_actions,
+ struct mlx5dr_action *actions[]);
+
+int mlx5dr_rule_destroy(struct mlx5dr_rule *rule);
+
+int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+ struct mlx5dr_action *action);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
+
+struct mlx5dr_action *
+mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
+ struct mlx5_core_dev *mdev);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
+ u32 vport, u8 vhca_id_valid,
+ u16 vhca_id);
+
+struct mlx5dr_action *mlx5dr_action_create_drop(void);
+
+struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
+
+struct mlx5dr_action *
+mlx5dr_action_create_flow_counter(u32 counter_id);
+
+struct mlx5dr_action *
+mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
+ enum mlx5dr_action_reformat_type reformat_type,
+ size_t data_sz,
+ void *data);
+
+struct mlx5dr_action *
+mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
+ u32 flags,
+ size_t actions_sz,
+ __be64 actions[]);
+
+struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
+
+struct mlx5dr_action *
+mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
+
+int mlx5dr_action_destroy(struct mlx5dr_action *action);
+
+static inline bool
+mlx5dr_is_supported(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
+}
+
+#else /* CONFIG_MLX5_SW_STEERING */
+
+static inline struct mlx5dr_domain *
+mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) { return NULL; }
+
+static inline int
+mlx5dr_domain_destroy(struct mlx5dr_domain *domain) { return 0; }
+
+static inline int
+mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags) { return 0; }
+
+static inline void
+mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
+ struct mlx5dr_domain *peer_dmn) { }
+
+static inline struct mlx5dr_table *
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level) { return NULL; }
+
+static inline int
+mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
+
+static inline u32
+mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; }
+
+static inline struct mlx5dr_matcher *
+mlx5dr_matcher_create(struct mlx5dr_table *table,
+ u16 priority,
+ u8 match_criteria_enable,
+ struct mlx5dr_match_parameters *mask) { return NULL; }
+
+static inline int
+mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) { return 0; }
+
+static inline struct mlx5dr_rule *
+mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *value,
+ size_t num_actions,
+ struct mlx5dr_action *actions[]) { return NULL; }
+
+static inline int
+mlx5dr_rule_destroy(struct mlx5dr_rule *rule) { return 0; }
+
+static inline int
+mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+ struct mlx5dr_action *action) { return 0; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
+ struct mlx5_core_dev *mdev) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
+ u32 vport, u8 vhca_id_valid,
+ u16 vhca_id) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_drop(void) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_tag(u32 tag_value) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_flow_counter(u32 counter_id) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
+ enum mlx5dr_action_reformat_type reformat_type,
+ size_t data_sz,
+ void *data) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
+ u32 flags,
+ size_t actions_sz,
+ __be64 actions[]) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_pop_vlan(void) { return NULL; }
+
+static inline struct mlx5dr_action *
+mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain,
+ __be32 vlan_hdr) { return NULL; }
+
+static inline int
+mlx5dr_action_destroy(struct mlx5dr_action *action) { return 0; }
+
+static inline bool
+mlx5dr_is_supported(struct mlx5_core_dev *dev) { return false; }
+
+#endif /* CONFIG_MLX5_SW_STEERING */
+
+#endif /* _MLX5DR_H_ */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index c2f056b5766d..8dd081051a79 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1162,6 +1162,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
@@ -1225,6 +1228,10 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(e_switch_cap, \
mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
+#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_eswitch_cap, \
+ (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 52a56d741f79..3e80f03a387f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -626,6 +626,11 @@ struct mlx5e_resources {
struct mlx5_sq_bfreg bfreg;
};
+enum mlx5_sw_icm_type {
+ MLX5_SW_ICM_TYPE_STEERING,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+};
+
#define MLX5_MAX_RESERVED_GIDS 8
struct mlx5_rsvd_gids {
@@ -657,11 +662,15 @@ struct mlx5_clock {
struct mlx5_pps pps_info;
};
+struct mlx5_dm;
struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
+#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
+#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -695,6 +704,7 @@ struct mlx5_core_dev {
atomic_t num_qps;
u32 issi;
struct mlx5e_resources mlx5e_res;
+ struct mlx5_dm *dm;
struct mlx5_vxlan *vxlan;
struct mlx5_geneve *geneve;
struct {
@@ -1078,6 +1088,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
size_t *offsets);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
+int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id);
+int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 38a70d16d8d5..98e667b176ef 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -60,7 +60,6 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
u16 vport_num);
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
-u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
u16 vport_num, u32 sqn);
@@ -74,7 +73,14 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
u16 vport_num);
+u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
+
+static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
+{
+ return MLX5_ESWITCH_NONE;
+}
+
static inline enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
{
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 97ec6be62ac4..724d276ea133 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -84,6 +84,8 @@ enum {
FDB_SLOW_PATH,
};
+struct mlx5_pkt_reformat;
+struct mlx5_modify_hdr;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@ -121,7 +123,7 @@ struct mlx5_flow_destination {
struct {
u16 num;
u16 vhca_id;
- u32 reformat_id;
+ struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
};
@@ -195,8 +197,8 @@ enum {
struct mlx5_flow_act {
u32 action;
- u32 reformat_id;
- u32 modify_id;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
uintptr_t esp_id;
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
@@ -205,8 +207,6 @@ struct mlx5_flow_act {
#define MLX5_DECLARE_FLOW_ACT(name) \
struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
- .reformat_id = 0, \
- .modify_id = 0, \
.flags = 0, }
/* Single destination per rule.
@@ -236,19 +236,18 @@ u32 mlx5_fc_id(struct mlx5_fc *counter);
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
-int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
- u8 namespace, u8 num_actions,
- void *modify_actions, u32 *modify_header_id);
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 ns_type, u8 num_actions,
+ void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
- u32 modify_header_id);
-
-int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
- enum mlx5_flow_namespace_type namespace,
- u32 *packet_reformat_id);
+ struct mlx5_modify_hdr *modify_hdr);
+
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type ns_type);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
- u32 packet_reformat_id);
+ struct mlx5_pkt_reformat *reformat);
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 66b60afd5e06..7d65c0578ac9 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -282,6 +282,7 @@ enum {
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
+ MLX5_CMD_OP_SYNC_STEERING = 0xb00,
MLX5_CMD_OP_FPGA_CREATE_QP = 0x960,
MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961,
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
@@ -485,7 +486,11 @@ union mlx5_ifc_gre_key_bits {
};
struct mlx5_ifc_fte_match_set_misc_bits {
- u8 reserved_at_0[0x8];
+ u8 gre_c_present[0x1];
+ u8 reserved_auto1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 source_vhca_port[0x4];
u8 source_sqn[0x18];
u8 source_eswitch_owner_vhca_id[0x10];
@@ -565,12 +570,38 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
- u8 reserved_at_1a0[0x60];
+ u8 metadata_reg_b[0x20];
+
+ u8 reserved_at_1c0[0x40];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
- u8 reserved_at_0[0x120];
+ u8 inner_tcp_seq_num[0x20];
+
+ u8 outer_tcp_seq_num[0x20];
+
+ u8 inner_tcp_ack_num[0x20];
+
+ u8 outer_tcp_ack_num[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_b0[0x10];
+
+ u8 icmp_header_data[0x20];
+
+ u8 icmpv6_header_data[0x20];
+
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+
u8 geneve_tlv_option_0_data[0x20];
+
u8 reserved_at_140[0xc0];
};
@@ -666,7 +697,15 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x7200];
+ u8 reserved_at_e00[0x1200];
+
+ u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
+
+ u8 sw_steering_nic_tx_action_drop_icm_address[0x40];
+
+ u8 sw_steering_nic_tx_action_allow_icm_address[0x40];
+
+ u8 reserved_at_20c0[0x5f40];
};
enum {
@@ -698,7 +737,17 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
- u8 reserved_at_800[0x7800];
+ u8 reserved_at_800[0x1000];
+
+ u8 sw_steering_fdb_action_drop_icm_address_rx[0x40];
+
+ u8 sw_steering_fdb_action_drop_icm_address_tx[0x40];
+
+ u8 sw_steering_uplink_icm_address_rx[0x40];
+
+ u8 sw_steering_uplink_icm_address_tx[0x40];
+
+ u8 reserved_at_1900[0x6700];
};
enum {
@@ -849,6 +898,25 @@ struct mlx5_ifc_roce_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_sync_steering_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xc0];
+};
+
+struct mlx5_ifc_sync_steering_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_device_mem_cap_bits {
u8 memic[0x1];
u8 reserved_at_1[0x1f];
@@ -1042,6 +1110,12 @@ enum {
};
enum {
+ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
+ MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
+ MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
+};
+
+enum {
MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
};
@@ -1414,7 +1488,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_6c0[0x4];
u8 flex_parser_id_geneve_tlv_option_0[0x4];
- u8 reserved_at_6c8[0x28];
+ u8 flex_parser_id_icmp_dw1[0x4];
+ u8 flex_parser_id_icmp_dw0[0x4];
+ u8 flex_parser_id_icmpv6_dw1[0x4];
+ u8 flex_parser_id_icmpv6_dw0[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
+
+ u8 reserved_at_6e0[0x10];
u8 sf_base_id[0x10];
u8 reserved_at_700[0x80];
@@ -2652,6 +2733,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
+ struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
u8 reserved_at_0[0x8000];
};
@@ -3255,7 +3337,11 @@ struct mlx5_ifc_esw_vport_context_bits {
u8 cvlan_pcp[0x3];
u8 cvlan_id[0xc];
- u8 reserved_at_60[0x7a0];
+ u8 reserved_at_60[0x720];
+
+ u8 sw_steering_vport_icm_address_rx[0x40];
+
+ u8 sw_steering_vport_icm_address_tx[0x40];
};
enum {
@@ -4941,23 +5027,98 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_query_flow_table_out_bits {
+struct mlx5_ifc_other_hca_cap_bits {
+ u8 roce[0x1];
+ u8 reserved_0[0x27f];
+};
+
+struct mlx5_ifc_query_other_hca_cap_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 reserved_0[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x80];
+ u8 reserved_1[0x40];
- u8 reserved_at_c0[0x8];
+ struct mlx5_ifc_other_hca_cap_bits other_capability;
+};
+
+struct mlx5_ifc_query_other_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_modify_other_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_other_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+ u8 field_select[0x20];
+
+ struct mlx5_ifc_other_hca_cap_bits other_capability;
+};
+
+struct mlx5_ifc_flow_table_context_bits {
+ u8 reformat_en[0x1];
+ u8 decap_en[0x1];
+ u8 sw_owner[0x1];
+ u8 termination_table[0x1];
+ u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_d0[0x8];
+ u8 reserved_at_10[0x8];
u8 log_size[0x8];
- u8 reserved_at_e0[0x120];
+ u8 reserved_at_20[0x8];
+ u8 table_miss_id[0x18];
+
+ u8 reserved_at_40[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_60[0x60];
+
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x80];
+
+ struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_query_flow_table_in_bits {
@@ -5227,7 +5388,7 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 reserved_at_60[0x20];
};
-enum {
+enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
@@ -5323,7 +5484,16 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17,
MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -7371,35 +7541,26 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 klm_pas_mtt[0][0x20];
};
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+ MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2,
+ MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3,
+ MLX5_FLOW_TABLE_TYPE_FDB = 0X4,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6,
+};
+
struct mlx5_ifc_create_flow_table_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
+ u8 icm_address_39_32[0x8];
u8 table_id[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_flow_table_context_bits {
- u8 reformat_en[0x1];
- u8 decap_en[0x1];
- u8 reserved_at_2[0x1];
- u8 termination_table[0x1];
- u8 table_miss_action[0x4];
- u8 level[0x8];
- u8 reserved_at_10[0x8];
- u8 log_size[0x8];
-
- u8 reserved_at_20[0x8];
- u8 table_miss_id[0x18];
-
- u8 reserved_at_40[0x8];
- u8 lag_master_next_table_id[0x18];
-
- u8 reserved_at_60[0xe0];
+ u8 icm_address_31_0[0x20];
};
struct mlx5_ifc_create_flow_table_in_bits {