summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c336
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c300
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h13
-rw-r--r--include/linux/mlx5/fs.h12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h68
33 files changed, 1373 insertions, 195 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a22c32aabf11..cd4a1ab0ea78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -111,6 +111,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o \
+ steering/dr_definer.o \
steering/dr_dbg.o lib/smfs.o
#
# SF device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index c5bb79a4fa57..2732128e7a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -228,6 +228,17 @@ const char *parse_fs_hdrs(struct trace_seq *p,
return ret;
}
+static const char
+*fs_dest_range_field_to_str(enum mlx5_flow_dest_range_field field)
+{
+ switch (field) {
+ case MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN:
+ return "packet len";
+ default:
+ return "unknown dest range field";
+ }
+}
+
const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst,
u32 counter_id)
@@ -259,6 +270,11 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_PORT:
trace_seq_printf(p, "port\n");
break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ trace_seq_printf(p, "field=%s min=%d max=%d\n",
+ fs_dest_range_field_to_str(dst->range.field),
+ dst->range.min, dst->range.max);
+ break;
case MLX5_FLOW_DESTINATION_TYPE_NONE:
trace_seq_printf(p, "none\n");
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index 898fe16a4384..512d43148922 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -3,6 +3,7 @@
#include "act.h"
#include "en/tc_priv.h"
+#include "fs_core.h"
static bool police_act_validate_control(enum flow_action_id act_id,
struct netlink_ext_ack *extack)
@@ -71,6 +72,8 @@ fill_meter_params_from_act(const struct flow_action_entry *act,
params->mode = MLX5_RATE_LIMIT_PPS;
params->rate = act->police.rate_pkt_ps;
params->burst = act->police.burst_pkt;
+ } else if (act->police.mtu) {
+ params->mtu = act->police.mtu;
} else {
return -EOPNOTSUPP;
}
@@ -84,14 +87,25 @@ tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
+ enum mlx5_flow_namespace_type ns = mlx5e_get_flow_namespace(parse_state->flow);
+ struct mlx5e_flow_meter_params *params = &attr->meter_attr.params;
int err;
- err = fill_meter_params_from_act(act, &attr->meter_attr.params);
+ err = fill_meter_params_from_act(act, params);
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
- attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+ if (params->mtu) {
+ if (!(mlx5_fs_get_capabilities(priv->mdev, ns) &
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES))
+ return -EOPNOTSUPP;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flags |= MLX5_ATTR_FLAG_MTU;
+ } else {
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
+ attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 4e5f4aa44724..9c1c24da9453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -241,7 +241,7 @@ mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id)
}
static struct mlx5e_flow_meter_handle *
-__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
+__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters, bool alloc_aso)
{
struct mlx5_core_dev *mdev = flow_meters->mdev;
struct mlx5e_flow_meter_aso_obj *meters_obj;
@@ -268,6 +268,9 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
}
meter->act_counter = counter;
+ if (!alloc_aso)
+ goto no_aso;
+
meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
struct mlx5e_flow_meter_aso_obj,
entry);
@@ -300,11 +303,12 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
}
bitmap_set(meters_obj->meters_map, pos, 1);
- meter->flow_meters = flow_meters;
meter->meters_obj = meters_obj;
meter->obj_id = meters_obj->base_id + pos / 2;
meter->idx = pos % 2;
+no_aso:
+ meter->flow_meters = flow_meters;
mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n",
meter->obj_id, meter->idx);
@@ -332,6 +336,9 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
mlx5_fc_destroy(mdev, meter->act_counter);
mlx5_fc_destroy(mdev, meter->drop_counter);
+ if (meter->params.mtu)
+ goto out_no_aso;
+
meters_obj = meter->meters_obj;
pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
bitmap_clear(meters_obj->meters_map, pos, 1);
@@ -345,6 +352,7 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
list_add(&meters_obj->entry, &flow_meters->partial_list);
}
+out_no_aso:
mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n",
meter->obj_id, meter->idx);
kfree(meter);
@@ -409,12 +417,13 @@ mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
{
struct mlx5e_flow_meter_handle *meter;
- meter = __mlx5e_flow_meter_alloc(flow_meters);
+ meter = __mlx5e_flow_meter_alloc(flow_meters, !params->mtu);
if (IS_ERR(meter))
return meter;
hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
meter->params.index = params->index;
+ meter->params.mtu = params->mtu;
meter->refcnt++;
return meter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index f16abf33bb51..9b795cd106bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -20,6 +20,7 @@ struct mlx5e_flow_meter_params {
u32 index;
u64 rate;
u64 burst;
+ u32 mtu;
};
struct mlx5e_flow_meter_handle {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
index c38211097746..8d7d761482d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -8,7 +8,7 @@
#define MLX5_PACKET_COLOR_BITS MLX5_REG_MAPPING_MBITS(PACKET_COLOR_TO_REG)
#define MLX5_PACKET_COLOR_MASK MLX5_REG_MAPPING_MASK(PACKET_COLOR_TO_REG)
-struct mlx5e_post_meter_priv {
+struct mlx5e_post_meter_rate_table {
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_flow_handle *green_rule;
@@ -17,16 +17,47 @@ struct mlx5e_post_meter_priv {
struct mlx5_flow_attr *red_attr;
};
+struct mlx5e_post_meter_mtu_table {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+};
+
+struct mlx5e_post_meter_mtu_tables {
+ struct mlx5e_post_meter_mtu_table green_table;
+ struct mlx5e_post_meter_mtu_table red_table;
+};
+
+struct mlx5e_post_meter_priv {
+ enum mlx5e_post_meter_type type;
+ union {
+ struct mlx5e_post_meter_rate_table rate_steering_table;
+ struct mlx5e_post_meter_mtu_tables mtu_tables;
+ };
+};
+
struct mlx5_flow_table *
mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter)
{
- return post_meter->ft;
+ return post_meter->rate_steering_table.ft;
}
-static int
+struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_true_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->mtu_tables.green_table.ft;
+}
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_false_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->mtu_tables.red_table.ft;
+}
+
+static struct mlx5_flow_table *
mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
- enum mlx5_flow_namespace_type ns_type,
- struct mlx5e_post_meter_priv *post_meter)
+ enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
@@ -34,7 +65,7 @@ mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
root_ns = mlx5_get_flow_namespace(priv->mdev, ns_type);
if (!root_ns) {
mlx5_core_warn(priv->mdev, "Failed to get namespace for flow meter\n");
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
@@ -42,19 +73,14 @@ mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
ft_attr.max_fte = 2;
ft_attr.level = 1;
- post_meter->ft = mlx5_create_flow_table(root_ns, &ft_attr);
- if (IS_ERR(post_meter->ft)) {
- mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
- return PTR_ERR(post_meter->ft);
- }
-
- return 0;
+ return mlx5_create_flow_table(root_ns, &ft_attr);
}
static int
-mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
- struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rate_fg_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter)
{
+ struct mlx5e_post_meter_rate_table *table = &post_meter->rate_steering_table;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *misc2, *match_criteria;
u32 *flow_group_in;
@@ -73,10 +99,10 @@ mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
- post_meter->fg = mlx5_create_flow_group(post_meter->ft, flow_group_in);
- if (IS_ERR(post_meter->fg)) {
+ table->fg = mlx5_create_flow_group(table->ft, flow_group_in);
+ if (IS_ERR(table->fg)) {
mlx5_core_warn(priv->mdev, "Failed to create post_meter flow group\n");
- err = PTR_ERR(post_meter->fg);
+ err = PTR_ERR(table->fg);
}
kvfree(flow_group_in);
@@ -100,7 +126,6 @@ mlx5e_post_meter_add_rule(struct mlx5e_priv *priv,
else
attr->counter = act_counter;
- attr->ft = post_meter->ft;
attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
attr->outer_match_level = MLX5_MATCH_NONE;
attr->chain = 0;
@@ -117,14 +142,15 @@ mlx5e_post_meter_add_rule(struct mlx5e_priv *priv,
}
static int
-mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
- struct mlx5e_post_meter_priv *post_meter,
- struct mlx5e_post_act *post_act,
- struct mlx5_fc *act_counter,
- struct mlx5_fc *drop_counter,
- struct mlx5_flow_attr *green_attr,
- struct mlx5_flow_attr *red_attr)
+mlx5e_post_meter_rate_rules_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5_flow_attr *green_attr,
+ struct mlx5_flow_attr *red_attr)
{
+ struct mlx5e_post_meter_rate_table *table = &post_meter->rate_steering_table;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err;
@@ -135,7 +161,7 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
-
+ red_attr->ft = post_meter->rate_steering_table.ft;
rule = mlx5e_post_meter_add_rule(priv, post_meter, spec, red_attr,
act_counter, drop_counter);
if (IS_ERR(rule)) {
@@ -143,11 +169,12 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
err = PTR_ERR(rule);
goto err_red;
}
- post_meter->red_rule = rule;
- post_meter->red_attr = red_attr;
+ table->red_rule = rule;
+ table->red_attr = red_attr;
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
+ green_attr->ft = post_meter->rate_steering_table.ft;
rule = mlx5e_post_meter_add_rule(priv, post_meter, spec, green_attr,
act_counter, drop_counter);
if (IS_ERR(rule)) {
@@ -155,43 +182,217 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
err = PTR_ERR(rule);
goto err_green;
}
- post_meter->green_rule = rule;
- post_meter->green_attr = green_attr;
+ table->green_rule = rule;
+ table->green_attr = green_attr;
kvfree(spec);
return 0;
err_green:
- mlx5_del_flow_rules(post_meter->red_rule);
+ mlx5_del_flow_rules(table->red_rule);
err_red:
kvfree(spec);
return err;
}
static void
-mlx5e_post_meter_rules_destroy(struct mlx5_eswitch *esw,
- struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rate_rules_destroy(struct mlx5_eswitch *esw,
+ struct mlx5e_post_meter_priv *post_meter)
{
- mlx5_eswitch_del_offloaded_rule(esw, post_meter->red_rule, post_meter->red_attr);
- mlx5_eswitch_del_offloaded_rule(esw, post_meter->green_rule, post_meter->green_attr);
+ struct mlx5e_post_meter_rate_table *rate_table = &post_meter->rate_steering_table;
+
+ mlx5_eswitch_del_offloaded_rule(esw, rate_table->red_rule, rate_table->red_attr);
+ mlx5_eswitch_del_offloaded_rule(esw, rate_table->green_rule, rate_table->green_attr);
}
static void
-mlx5e_post_meter_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rate_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
{
- mlx5_destroy_flow_group(post_meter->fg);
+ mlx5_destroy_flow_group(post_meter->rate_steering_table.fg);
}
static void
-mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rate_table_destroy(struct mlx5e_post_meter_priv *post_meter)
{
- mlx5_destroy_flow_table(post_meter->ft);
+ mlx5_destroy_flow_table(post_meter->rate_steering_table.ft);
+}
+
+static void
+mlx5e_post_meter_mtu_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5e_post_meter_mtu_tables *mtu_tables = &post_meter->mtu_tables;
+
+ mlx5_del_flow_rules(mtu_tables->green_table.rule);
+ mlx5_del_flow_rules(mtu_tables->red_table.rule);
+}
+
+static void
+mlx5e_post_meter_mtu_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5e_post_meter_mtu_tables *mtu_tables = &post_meter->mtu_tables;
+
+ mlx5_destroy_flow_group(mtu_tables->green_table.fg);
+ mlx5_destroy_flow_group(mtu_tables->red_table.fg);
+}
+
+static void
+mlx5e_post_meter_mtu_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5e_post_meter_mtu_tables *mtu_tables = &post_meter->mtu_tables;
+
+ mlx5_destroy_flow_table(mtu_tables->green_table.ft);
+ mlx5_destroy_flow_table(mtu_tables->red_table.ft);
+}
+
+static int
+mlx5e_post_meter_rate_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5_flow_attr *green_attr,
+ struct mlx5_flow_attr *red_attr)
+{
+ struct mlx5_flow_table *ft;
+ int err;
+
+ post_meter->type = MLX5E_POST_METER_RATE;
+
+ ft = mlx5e_post_meter_table_create(priv, ns_type);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
+ goto err_ft;
+ }
+
+ post_meter->rate_steering_table.ft = ft;
+
+ err = mlx5e_post_meter_rate_fg_create(priv, post_meter);
+ if (err)
+ goto err_fg;
+
+ err = mlx5e_post_meter_rate_rules_create(priv, post_meter, post_act,
+ act_counter, drop_counter,
+ green_attr, red_attr);
+ if (err)
+ goto err_rules;
+
+ return 0;
+
+err_rules:
+ mlx5e_post_meter_rate_fg_destroy(post_meter);
+err_fg:
+ mlx5e_post_meter_rate_table_destroy(post_meter);
+err_ft:
+ return err;
+}
+
+static int
+mlx5e_post_meter_create_mtu_table(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_meter_mtu_table *table)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *flow_group_in;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ table->ft = mlx5e_post_meter_table_create(priv, ns_type);
+ if (IS_ERR(table->ft)) {
+ err = PTR_ERR(table->ft);
+ goto err_ft;
+ }
+
+ /* create miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ fg = mlx5_create_flow_group(table->ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ goto err_miss_grp;
+ }
+ table->fg = fg;
+
+ kvfree(flow_group_in);
+ return 0;
+
+err_miss_grp:
+ mlx5_destroy_flow_table(table->ft);
+err_ft:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static int
+mlx5e_post_meter_mtu_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5_flow_attr *green_attr,
+ struct mlx5_flow_attr *red_attr)
+{
+ struct mlx5e_post_meter_mtu_tables *mtu_tables = &post_meter->mtu_tables;
+ static struct mlx5_flow_spec zero_spec = {};
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ post_meter->type = MLX5E_POST_METER_MTU;
+
+ err = mlx5e_post_meter_create_mtu_table(priv, ns_type, &mtu_tables->green_table);
+ if (err)
+ goto err_green_ft;
+
+ green_attr->ft = mtu_tables->green_table.ft;
+ rule = mlx5e_post_meter_add_rule(priv, post_meter, &zero_spec, green_attr,
+ act_counter, drop_counter);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter conform rule\n");
+ err = PTR_ERR(rule);
+ goto err_green_rule;
+ }
+ mtu_tables->green_table.rule = rule;
+ mtu_tables->green_table.attr = green_attr;
+
+ err = mlx5e_post_meter_create_mtu_table(priv, ns_type, &mtu_tables->red_table);
+ if (err)
+ goto err_red_ft;
+
+ red_attr->ft = mtu_tables->red_table.ft;
+ rule = mlx5e_post_meter_add_rule(priv, post_meter, &zero_spec, red_attr,
+ act_counter, drop_counter);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter exceed rule\n");
+ err = PTR_ERR(rule);
+ goto err_red_rule;
+ }
+ mtu_tables->red_table.rule = rule;
+ mtu_tables->red_table.attr = red_attr;
+
+ return 0;
+
+err_red_rule:
+ mlx5_destroy_flow_table(mtu_tables->red_table.ft);
+err_red_ft:
+ mlx5_del_flow_rules(mtu_tables->green_table.rule);
+err_green_rule:
+ mlx5_destroy_flow_table(mtu_tables->green_table.ft);
+err_green_ft:
+ return err;
}
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act,
+ enum mlx5e_post_meter_type type,
struct mlx5_fc *act_counter,
struct mlx5_fc *drop_counter,
struct mlx5_flow_attr *branch_true,
@@ -204,36 +405,55 @@ mlx5e_post_meter_init(struct mlx5e_priv *priv,
if (!post_meter)
return ERR_PTR(-ENOMEM);
- err = mlx5e_post_meter_table_create(priv, ns_type, post_meter);
- if (err)
- goto err_ft;
-
- err = mlx5e_post_meter_fg_create(priv, post_meter);
- if (err)
- goto err_fg;
+ switch (type) {
+ case MLX5E_POST_METER_MTU:
+ err = mlx5e_post_meter_mtu_create(priv, ns_type, post_act,
+ act_counter, drop_counter, post_meter,
+ branch_true, branch_false);
+ break;
+ case MLX5E_POST_METER_RATE:
+ err = mlx5e_post_meter_rate_create(priv, ns_type, post_act,
+ act_counter, drop_counter, post_meter,
+ branch_true, branch_false);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
- err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, act_counter,
- drop_counter, branch_true, branch_false);
if (err)
- goto err_rules;
+ goto err;
return post_meter;
-err_rules:
- mlx5e_post_meter_fg_destroy(post_meter);
-err_fg:
- mlx5e_post_meter_table_destroy(post_meter);
-err_ft:
+err:
kfree(post_meter);
return ERR_PTR(err);
}
+static void
+mlx5e_post_meter_rate_destroy(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_rate_rules_destroy(esw, post_meter);
+ mlx5e_post_meter_rate_fg_destroy(post_meter);
+ mlx5e_post_meter_rate_table_destroy(post_meter);
+}
+
+static void
+mlx5e_post_meter_mtu_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_mtu_rules_destroy(post_meter);
+ mlx5e_post_meter_mtu_fg_destroy(post_meter);
+ mlx5e_post_meter_mtu_table_destroy(post_meter);
+}
+
void
mlx5e_post_meter_cleanup(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter)
{
- mlx5e_post_meter_rules_destroy(esw, post_meter);
- mlx5e_post_meter_fg_destroy(post_meter);
- mlx5e_post_meter_table_destroy(post_meter);
+ if (post_meter->type == MLX5E_POST_METER_RATE)
+ mlx5e_post_meter_rate_destroy(esw, post_meter);
+ else
+ mlx5e_post_meter_mtu_destroy(post_meter);
+
kfree(post_meter);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
index a4075d33fde2..e013b77186b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -14,18 +14,49 @@
struct mlx5e_post_meter_priv;
+enum mlx5e_post_meter_type {
+ MLX5E_POST_METER_RATE = 0,
+ MLX5E_POST_METER_MTU
+};
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
struct mlx5_flow_table *
mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
+struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_true_ft(struct mlx5e_post_meter_priv *post_meter);
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_false_ft(struct mlx5e_post_meter_priv *post_meter);
+
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act,
+ enum mlx5e_post_meter_type type,
struct mlx5_fc *act_counter,
struct mlx5_fc *drop_counter,
struct mlx5_flow_attr *branch_true,
struct mlx5_flow_attr *branch_false);
+
void
mlx5e_post_meter_cleanup(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter);
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_true_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return NULL;
+}
+
+static inline struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_false_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return NULL;
+}
+
+#endif
+
#endif /* __MLX5_EN_POST_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 623886462c10..75b9e1528fd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -85,18 +85,25 @@ static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
};
-struct vport_stats {
- u64 vport_rx_packets;
- u64 vport_tx_packets;
- u64 vport_rx_bytes;
- u64 vport_tx_bytes;
-};
-
static const struct counter_desc vport_rep_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
- { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
- { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
- { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ rx_vport_rdma_unicast_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, rx_vport_rdma_unicast_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ tx_vport_rdma_unicast_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, tx_vport_rdma_unicast_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ rx_vport_rdma_multicast_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ rx_vport_rdma_multicast_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ tx_vport_rdma_multicast_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ tx_vport_rdma_multicast_bytes) },
};
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
@@ -161,33 +168,80 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
{
+ struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct rtnl_link_stats64 *vport_stats;
- struct ifla_vf_stats vf_stats;
+ u32 *out;
int err;
- err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return;
+
+ err = mlx5_core_query_vport_counter(esw->dev, 1, rep->vport - 1, 0, out);
if (err) {
netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
rep->vport, err);
return;
}
- vport_stats = &priv->stats.vf_vport;
+ #define MLX5_GET_CTR(p, x) \
+ MLX5_GET64(query_vport_counter_out, p, x)
/* flip tx/rx as we are reporting the counters for the switch vport */
- vport_stats->rx_packets = vf_stats.tx_packets;
- vport_stats->rx_bytes = vf_stats.tx_bytes;
- vport_stats->tx_packets = vf_stats.rx_packets;
- vport_stats->tx_bytes = vf_stats.rx_bytes;
+ rep_stats->vport_rx_packets =
+ MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
+ MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
+ MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+
+ rep_stats->vport_tx_packets =
+ MLX5_GET_CTR(out, received_ib_unicast.packets) +
+ MLX5_GET_CTR(out, received_eth_unicast.packets) +
+ MLX5_GET_CTR(out, received_ib_multicast.packets) +
+ MLX5_GET_CTR(out, received_eth_multicast.packets) +
+ MLX5_GET_CTR(out, received_eth_broadcast.packets);
+
+ rep_stats->vport_rx_bytes =
+ MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
+ MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+ rep_stats->vport_tx_bytes =
+ MLX5_GET_CTR(out, received_ib_unicast.octets) +
+ MLX5_GET_CTR(out, received_eth_unicast.octets) +
+ MLX5_GET_CTR(out, received_ib_multicast.octets) +
+ MLX5_GET_CTR(out, received_eth_multicast.octets) +
+ MLX5_GET_CTR(out, received_eth_broadcast.octets);
+
+ rep_stats->rx_vport_rdma_unicast_packets =
+ MLX5_GET_CTR(out, transmitted_ib_unicast.packets);
+ rep_stats->tx_vport_rdma_unicast_packets =
+ MLX5_GET_CTR(out, received_ib_unicast.packets);
+ rep_stats->rx_vport_rdma_unicast_bytes =
+ MLX5_GET_CTR(out, transmitted_ib_unicast.octets);
+ rep_stats->tx_vport_rdma_unicast_bytes =
+ MLX5_GET_CTR(out, received_ib_unicast.octets);
+ rep_stats->rx_vport_rdma_multicast_packets =
+ MLX5_GET_CTR(out, transmitted_ib_multicast.packets);
+ rep_stats->tx_vport_rdma_multicast_packets =
+ MLX5_GET_CTR(out, received_ib_multicast.packets);
+ rep_stats->rx_vport_rdma_multicast_bytes =
+ MLX5_GET_CTR(out, transmitted_ib_multicast.octets);
+ rep_stats->tx_vport_rdma_multicast_bytes =
+ MLX5_GET_CTR(out, received_ib_multicast.octets);
+
+ kvfree(out);
}
static void mlx5e_rep_get_strings(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index cbc831ca646b..37df58ba958c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -463,6 +463,21 @@ struct mlx5e_ptp_cq_stats {
u64 resync_event;
};
+struct mlx5e_rep_stats {
+ u64 vport_rx_packets;
+ u64 vport_tx_packets;
+ u64 vport_rx_bytes;
+ u64 vport_tx_bytes;
+ u64 rx_vport_rdma_unicast_packets;
+ u64 tx_vport_rdma_unicast_packets;
+ u64 rx_vport_rdma_unicast_bytes;
+ u64 tx_vport_rdma_unicast_bytes;
+ u64 rx_vport_rdma_multicast_packets;
+ u64 tx_vport_rdma_multicast_packets;
+ u64 rx_vport_rdma_multicast_bytes;
+ u64 tx_vport_rdma_multicast_bytes;
+};
+
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -471,6 +486,7 @@ struct mlx5e_stats {
struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport;
struct mlx5e_pcie_stats pcie;
+ struct mlx5e_rep_stats rep_stats;
};
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 227fa6ef9e41..9af2aa2922f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -402,8 +402,9 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
static bool
is_flow_meter_action(struct mlx5_flow_attr *attr)
{
- return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
- (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
+ return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) ||
+ attr->flags & MLX5_ATTR_FLAG_MTU);
}
static int
@@ -414,6 +415,7 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5e_post_meter_priv *post_meter;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_flow_meter_handle *meter;
+ enum mlx5e_post_meter_type type;
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
@@ -422,7 +424,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
}
ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
+ type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE;
post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
+ type,
meter->act_counter, meter->drop_counter,
attr->branch_true, attr->branch_false);
if (IS_ERR(post_meter)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index f2677d9ca0b4..50af70ef22f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -114,6 +114,7 @@ enum {
MLX5_ATTR_FLAG_ACCEPT = BIT(5),
MLX5_ATTR_FLAG_CT = BIT(6),
MLX5_ATTR_FLAG_TERMINATING = BIT(7),
+ MLX5_ATTR_FLAG_MTU = BIT(8),
};
/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
index 2db13c71e88c..3d0bbcca1cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
@@ -12,10 +12,11 @@ enum vnic_diag_counter {
MLX5_VNIC_DIAG_CQ_OVERRUN,
MLX5_VNIC_DIAG_INVALID_COMMAND,
MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
+ MLX5_VNIC_DIAG_RX_STEERING_DISCARD,
};
static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
- u32 *val)
+ u64 *val)
{
u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
@@ -57,6 +58,10 @@ static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_cou
case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
break;
+ case MLX5_VNIC_DIAG_RX_STEERING_DISCARD:
+ *val = MLX5_GET64(vnic_diagnostic_statistics, vnic_diag_out,
+ nic_receive_steering_discard);
+ break;
}
return 0;
@@ -65,14 +70,14 @@ static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_cou
static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
enum vnic_diag_counter type)
{
- u32 val = 0;
+ u64 val = 0;
int ret;
ret = mlx5_esw_query_vnic_diag(vport, type, &val);
if (ret)
return ret;
- seq_printf(file, "%d\n", val);
+ seq_printf(file, "%llu\n", val);
return 0;
}
@@ -112,6 +117,11 @@ static int quota_exceeded_command_show(struct seq_file *file, void *priv)
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
}
+static int rx_steering_discard_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_RX_STEERING_DISCARD);
+}
+
DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
@@ -119,6 +129,7 @@ DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
DEFINE_SHOW_ATTRIBUTE(cq_overrun);
DEFINE_SHOW_ATTRIBUTE(invalid_command);
DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
+DEFINE_SHOW_ATTRIBUTE(rx_steering_discard);
void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -179,4 +190,9 @@ void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool
if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
&quota_exceeded_command_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, nic_receive_steering_discard))
+ debugfs_create_file("rx_steering_discard", 0444, vnic_diag, vport,
+ &rx_steering_discard_fops);
+
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1987a9d9d40c..e455b215c708 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -50,6 +50,7 @@
#include "en/mapping.h"
#include "devlink.h"
#include "lag/lag.h"
+#include "en/tc/post_meter.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -202,6 +203,21 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
}
static int
+esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
+ struct mlx5e_meter_attr *meter,
+ int i)
+{
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
+ dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
+ dest[i].range.min = 0;
+ dest[i].range.max = meter->params.mtu;
+ dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
+ dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
+
+ return 0;
+}
+
+static int
esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
u32 sampler_id,
@@ -491,6 +507,9 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
+ } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
+ err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
+ (*i)++;
} else if (esw_is_indir_table(esw, attr)) {
err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d53190f22871..4dcd26b86662 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -448,7 +448,8 @@ static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
- type == MLX5_FLOW_DESTINATION_TYPE_TIR;
+ type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
+ type == MLX5_FLOW_DESTINATION_TYPE_RANGE;
}
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
@@ -1578,7 +1579,13 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
d1->ft_num == d2->ft_num) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
- d1->sampler_id == d2->sampler_id))
+ d1->sampler_id == d2->sampler_id) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
+ d1->range.field == d2->range.field &&
+ d1->range.hit_ft == d2->range.hit_ft &&
+ d1->range.miss_ft == d2->range.miss_ft &&
+ d1->range.min == d2->range.min &&
+ d1->range.max == d2->range.max))
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 3af50fd04d28..f137a0611b77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -123,6 +123,7 @@ enum mlx5_flow_steering_mode {
enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
};
struct mlx5_flow_steering {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index 0259a149a64c..d9fcb9ed726f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -118,13 +118,41 @@ struct mlx5_fib_event_work {
};
};
+static struct net_device*
+mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
+ struct fib_info *fi,
+ struct net_device *current_dev)
+{
+ struct net_device *fib_dev;
+ int i, ldev_idx, nhs;
+
+ nhs = fib_info_num_path(fi);
+ i = 0;
+ if (current_dev) {
+ for (; i < nhs; i++) {
+ fib_dev = fib_info_nh(fi, i)->fib_nh_dev;
+ if (fib_dev == current_dev) {
+ i++;
+ break;
+ }
+ }
+ }
+ for (; i < nhs; i++) {
+ fib_dev = fib_info_nh(fi, i)->fib_nh_dev;
+ ldev_idx = mlx5_lag_dev_get_netdev_idx(ldev, fib_dev);
+ if (ldev_idx >= 0)
+ return ldev->pf[ldev_idx].netdev;
+ }
+
+ return NULL;
+}
+
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
- struct fib_nh *fib_nh0, *fib_nh1;
- unsigned int nhs;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -140,16 +168,25 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
fi->fib_priority >= mp->fib.priority)
return;
+ nh_dev0 = mlx5_lag_get_next_fib_dev(ldev, fi, NULL);
+ nh_dev1 = mlx5_lag_get_next_fib_dev(ldev, fi, nh_dev0);
+
/* Handle add/replace event */
- nhs = fib_info_num_path(fi);
- if (nhs == 1) {
- if (__mlx5_lag_is_active(ldev)) {
- struct fib_nh *nh = fib_info_nh(fi, 0);
- struct net_device *nh_dev = nh->fib_nh_dev;
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
+ if (!nh_dev0) {
+ if (mp->fib.dst == fen_info->dst && mp->fib.dst_len == fen_info->dst_len)
+ mp->fib.mfi = NULL;
+ return;
+ }
- if (i < 0)
- return;
+ if (nh_dev0 == nh_dev1) {
+ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ "Multipath offload doesn't support routes with multiple nexthops of the same device");
+ return;
+ }
+
+ if (!nh_dev1) {
+ if (__mlx5_lag_is_active(ldev)) {
+ int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
i++;
mlx5_lag_set_port_affinity(ldev, i);
@@ -159,21 +196,6 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
return;
}
- if (nhs != 2)
- return;
-
- /* Verify next hops are ports of the same hca */
- fib_nh0 = fib_info_nh(fi, 0);
- fib_nh1 = fib_info_nh(fi, 1);
- if (!(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev) &&
- !(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev)) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
- "Multipath offload require two ports of the same HCA\n");
- return;
- }
-
/* First time we see multipath route */
if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) {
struct lag_tracker tracker;
@@ -268,7 +290,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
struct mlx5_fib_event_work *fib_work;
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- struct net_device *fib_dev;
struct fib_info *fi;
if (info->family != AF_INET)
@@ -285,11 +306,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
fi = fen_info->fi;
if (fi->nh)
return NOTIFY_DONE;
- fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
- fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
- return NOTIFY_DONE;
- }
+
fib_work = mlx5_lag_init_fib_work(ldev, event);
if (!fib_work)
return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index b1dfad274a39..ee104cf04392 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -44,6 +44,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
[DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
+ [DR_ACTION_TYP_RANGE] = "DR_ACTION_TYP_RANGE",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
};
@@ -61,6 +62,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
@@ -79,6 +81,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
@@ -94,6 +97,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
@@ -103,6 +107,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
@@ -116,6 +121,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
@@ -129,6 +135,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
@@ -141,6 +148,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
@@ -159,6 +167,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
@@ -169,6 +178,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -183,6 +193,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
@@ -190,6 +201,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
@@ -197,6 +209,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -207,6 +220,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
@@ -220,6 +234,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
@@ -231,6 +246,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -250,6 +266,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -259,6 +276,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
@@ -276,6 +294,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
@@ -291,6 +310,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
@@ -299,6 +319,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
@@ -311,6 +332,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
@@ -324,6 +346,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
@@ -337,6 +360,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
@@ -354,6 +378,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_ASO] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
@@ -365,6 +390,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
@@ -380,6 +406,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
@@ -388,6 +415,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
@@ -396,6 +424,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -407,6 +436,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
@@ -421,6 +451,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
@@ -433,6 +464,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
@@ -452,6 +484,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
@@ -634,6 +667,83 @@ static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
actions[i]->action_type);
}
+static int dr_action_get_dest_fw_tbl_addr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_action_dest_tbl *dest_tbl,
+ bool is_rx_rule,
+ u64 *final_icm_addr)
+{
+ struct mlx5dr_cmd_query_flow_table_details output;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ int ret;
+
+ if (!dest_tbl->fw_tbl.rx_icm_addr) {
+ ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
+ dest_tbl->fw_tbl.type,
+ dest_tbl->fw_tbl.id,
+ &output);
+ if (ret) {
+ mlx5dr_err(dmn,
+ "Failed mlx5_cmd_query_flow_table ret: %d\n",
+ ret);
+ return ret;
+ }
+
+ dest_tbl->fw_tbl.tx_icm_addr = output.sw_owner_icm_root_1;
+ dest_tbl->fw_tbl.rx_icm_addr = output.sw_owner_icm_root_0;
+ }
+
+ *final_icm_addr = is_rx_rule ? dest_tbl->fw_tbl.rx_icm_addr :
+ dest_tbl->fw_tbl.tx_icm_addr;
+ return 0;
+}
+
+static int dr_action_get_dest_sw_tbl_addr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_action_dest_tbl *dest_tbl,
+ bool is_rx_rule,
+ u64 *final_icm_addr)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_icm_chunk *chunk;
+
+ if (dest_tbl->tbl->dmn != dmn) {
+ mlx5dr_err(dmn,
+ "Destination table belongs to a different domain\n");
+ return -EINVAL;
+ }
+
+ if (dest_tbl->tbl->level <= matcher->tbl->level) {
+ mlx5_core_dbg_once(dmn->mdev,
+ "Connecting table to a lower/same level destination table\n");
+ mlx5dr_dbg(dmn,
+ "Connecting table at level %d to a destination table at level %d\n",
+ matcher->tbl->level,
+ dest_tbl->tbl->level);
+ }
+
+ chunk = is_rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
+ dest_tbl->tbl->tx.s_anchor->chunk;
+
+ *final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+ return 0;
+}
+
+static int dr_action_get_dest_tbl_addr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_action_dest_tbl *dest_tbl,
+ bool is_rx_rule,
+ u64 *final_icm_addr)
+{
+ if (dest_tbl->is_fw_tbl)
+ return dr_action_get_dest_fw_tbl_addr(matcher,
+ dest_tbl,
+ is_rx_rule,
+ final_icm_addr);
+
+ return dr_action_get_dest_sw_tbl_addr(matcher,
+ dest_tbl,
+ is_rx_rule,
+ final_icm_addr);
+}
+
#define WITH_VLAN_NUM_HW_ACTIONS 6
int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
@@ -661,8 +771,6 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->type);
for (i = 0; i < num_actions; i++) {
- struct mlx5dr_action_dest_tbl *dest_tbl;
- struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_action *action;
int max_actions_type = 1;
u32 action_type;
@@ -676,50 +784,27 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
break;
case DR_ACTION_TYP_FT:
dest_action = action;
- dest_tbl = action->dest_tbl;
- if (!dest_tbl->is_fw_tbl) {
- if (dest_tbl->tbl->dmn != dmn) {
- mlx5dr_err(dmn,
- "Destination table belongs to a different domain\n");
- return -EINVAL;
- }
- if (dest_tbl->tbl->level <= matcher->tbl->level) {
- mlx5_core_dbg_once(dmn->mdev,
- "Connecting table to a lower/same level destination table\n");
- mlx5dr_dbg(dmn,
- "Connecting table at level %d to a destination table at level %d\n",
- matcher->tbl->level,
- dest_tbl->tbl->level);
- }
- chunk = rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
- dest_tbl->tbl->tx.s_anchor->chunk;
- attr.final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
- } else {
- struct mlx5dr_cmd_query_flow_table_details output;
- int ret;
-
- /* get the relevant addresses */
- if (!action->dest_tbl->fw_tbl.rx_icm_addr) {
- ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
- dest_tbl->fw_tbl.type,
- dest_tbl->fw_tbl.id,
- &output);
- if (!ret) {
- dest_tbl->fw_tbl.tx_icm_addr =
- output.sw_owner_icm_root_1;
- dest_tbl->fw_tbl.rx_icm_addr =
- output.sw_owner_icm_root_0;
- } else {
- mlx5dr_err(dmn,
- "Failed mlx5_cmd_query_flow_table ret: %d\n",
- ret);
- return ret;
- }
- }
- attr.final_icm_addr = rx_rule ?
- dest_tbl->fw_tbl.rx_icm_addr :
- dest_tbl->fw_tbl.tx_icm_addr;
- }
+ ret = dr_action_get_dest_tbl_addr(matcher, action->dest_tbl,
+ rx_rule, &attr.final_icm_addr);
+ if (ret)
+ return ret;
+ break;
+ case DR_ACTION_TYP_RANGE:
+ ret = dr_action_get_dest_tbl_addr(matcher,
+ action->range->hit_tbl_action->dest_tbl,
+ rx_rule, &attr.final_icm_addr);
+ if (ret)
+ return ret;
+
+ ret = dr_action_get_dest_tbl_addr(matcher,
+ action->range->miss_tbl_action->dest_tbl,
+ rx_rule, &attr.range.miss_icm_addr);
+ if (ret)
+ return ret;
+
+ attr.range.definer_id = action->range->definer_id;
+ attr.range.min = action->range->min;
+ attr.range.max = action->range->max;
break;
case DR_ACTION_TYP_QP:
mlx5dr_info(dmn, "Domain doesn't support QP\n");
@@ -866,6 +951,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
[DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
+ [DR_ACTION_TYP_RANGE] = sizeof(struct mlx5dr_action_range),
};
static struct mlx5dr_action *
@@ -933,6 +1019,123 @@ dec_ref:
return NULL;
}
+static void dr_action_range_definer_fill(u16 *format_id,
+ u8 *dw_selectors,
+ u8 *byte_selectors,
+ u8 *match_mask)
+{
+ int i;
+
+ *format_id = MLX5_IFC_DEFINER_FORMAT_ID_SELECT;
+
+ dw_selectors[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+
+ for (i = 1; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
+ dw_selectors[i] = MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED;
+
+ for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
+ byte_selectors[i] = MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED;
+
+ MLX5_SET(match_definer_match_mask, match_mask,
+ match_dw_0, 0xffffUL << 16);
+}
+
+static int dr_action_create_range_definer(struct mlx5dr_action *action)
+{
+ u8 match_mask[MLX5_FLD_SZ_BYTES(match_definer, match_mask)] = {};
+ u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM] = {};
+ u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM] = {};
+ struct mlx5dr_domain *dmn = action->range->dmn;
+ u32 definer_id;
+ u16 format_id;
+ int ret;
+
+ dr_action_range_definer_fill(&format_id,
+ dw_selectors,
+ byte_selectors,
+ match_mask);
+
+ ret = mlx5dr_definer_get(dmn, format_id,
+ dw_selectors, byte_selectors,
+ match_mask, &definer_id);
+ if (ret)
+ return ret;
+
+ action->range->definer_id = definer_id;
+ return 0;
+}
+
+static void dr_action_destroy_range_definer(struct mlx5dr_action *action)
+{
+ mlx5dr_definer_put(action->range->dmn, action->range->definer_id);
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min,
+ u32 max)
+{
+ struct mlx5dr_action *action;
+ int ret;
+
+ if (!mlx5dr_supp_match_ranges(dmn->mdev)) {
+ mlx5dr_dbg(dmn, "SELECT definer support is needed for match range\n");
+ return NULL;
+ }
+
+ if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+ min > 0xffff || max > 0xffff) {
+ mlx5dr_err(dmn, "Invalid match range parameters\n");
+ return NULL;
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_RANGE);
+ if (!action)
+ return NULL;
+
+ action->range->hit_tbl_action =
+ mlx5dr_is_fw_table(hit_ft) ?
+ mlx5dr_action_create_dest_flow_fw_table(dmn, hit_ft) :
+ mlx5dr_action_create_dest_table(hit_ft->fs_dr_table.dr_table);
+
+ if (!action->range->hit_tbl_action)
+ goto free_action;
+
+ action->range->miss_tbl_action =
+ mlx5dr_is_fw_table(miss_ft) ?
+ mlx5dr_action_create_dest_flow_fw_table(dmn, miss_ft) :
+ mlx5dr_action_create_dest_table(miss_ft->fs_dr_table.dr_table);
+
+ if (!action->range->miss_tbl_action)
+ goto free_hit_tbl_action;
+
+ action->range->min = min;
+ action->range->max = max;
+ action->range->dmn = dmn;
+
+ ret = dr_action_create_range_definer(action);
+ if (ret)
+ goto free_miss_tbl_action;
+
+ /* No need to increase refcount on domain for this action,
+ * the hit/miss table actions will do it internally.
+ */
+
+ return action;
+
+free_miss_tbl_action:
+ mlx5dr_action_destroy(action->range->miss_tbl_action);
+free_hit_tbl_action:
+ mlx5dr_action_destroy(action->range->hit_tbl_action);
+free_action:
+ kfree(action);
+
+ return NULL;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
@@ -1980,6 +2183,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
case DR_ACTION_TYP_ASO_FLOW_METER:
refcount_dec(&action->aso->dmn->refcount);
break;
+ case DR_ACTION_TYP_RANGE:
+ dr_action_destroy_range_definer(action);
+ mlx5dr_action_destroy(action->range->miss_tbl_action);
+ mlx5dr_action_destroy(action->range->hit_tbl_action);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index b4739eafc180..07b6a6dcb92f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -564,6 +564,83 @@ void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
}
+static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
+ u8 *dw_selectors,
+ u8 *byte_selectors)
+{
+ if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
+ return;
+
+ MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
+ MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
+ MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
+ MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
+ MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
+ MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
+ MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
+ MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
+ MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
+
+ MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
+ MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
+ MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
+ MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
+ MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
+ MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
+ MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
+ MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
+}
+
+int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
+ u16 format_id,
+ u8 *dw_selectors,
+ u8 *byte_selectors,
+ u8 *match_mask,
+ u32 *definer_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
+ void *ptr;
+ int err;
+
+ ptr = MLX5_ADDR_OF(create_match_definer_in, in,
+ general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
+ MLX5_SET(match_definer, ptr, format_id, format_id);
+
+ dr_cmd_set_definer_format(ptr, format_id,
+ dw_selectors, byte_selectors);
+
+ ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
+ memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+
+void
+mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
u16 index, struct mlx5dr_cmd_gid_attr *attr)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 7adcf0eec13b..db81d881d38e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -49,7 +49,8 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413,
DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415,
DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420,
- DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421,
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
@@ -107,6 +108,8 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
+ u64 hit_tbl_ptr, miss_tbl_ptr;
+ u32 hit_tbl_id, miss_tbl_id;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
@@ -198,6 +201,30 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
action->sampler->rx_icm_addr,
action->sampler->tx_icm_addr);
break;
+ case DR_ACTION_TYP_RANGE:
+ if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
+ hit_tbl_id = action->range->hit_tbl_action->dest_tbl->fw_tbl.id;
+ hit_tbl_ptr = 0;
+ } else {
+ hit_tbl_id = action->range->hit_tbl_action->dest_tbl->tbl->table_id;
+ hit_tbl_ptr =
+ DR_DBG_PTR_TO_ID(action->range->hit_tbl_action->dest_tbl->tbl);
+ }
+
+ if (action->range->miss_tbl_action->dest_tbl->is_fw_tbl) {
+ miss_tbl_id = action->range->miss_tbl_action->dest_tbl->fw_tbl.id;
+ miss_tbl_ptr = 0;
+ } else {
+ miss_tbl_id = action->range->miss_tbl_action->dest_tbl->tbl->table_id;
+ miss_tbl_ptr =
+ DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
+ }
+
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
+ hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
+ action->range->definer_id);
+ break;
default:
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c
new file mode 100644
index 000000000000..d5ea97751945
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+struct dr_definer_object {
+ u32 id;
+ u16 format_id;
+ u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM];
+ u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM];
+ u8 match_mask[DR_STE_SIZE_MATCH_TAG];
+ refcount_t refcount;
+};
+
+static bool dr_definer_compare(struct dr_definer_object *definer,
+ u16 format_id, u8 *dw_selectors,
+ u8 *byte_selectors, u8 *match_mask)
+{
+ int i;
+
+ if (definer->format_id != format_id)
+ return false;
+
+ for (i = 0; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
+ if (definer->dw_selectors[i] != dw_selectors[i])
+ return false;
+
+ for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
+ if (definer->byte_selectors[i] != byte_selectors[i])
+ return false;
+
+ if (memcmp(definer->match_mask, match_mask, DR_STE_SIZE_MATCH_TAG))
+ return false;
+
+ return true;
+}
+
+static struct dr_definer_object *
+dr_definer_find_obj(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
+{
+ struct dr_definer_object *definer_obj;
+ unsigned long id;
+
+ xa_for_each(&dmn->definers_xa, id, definer_obj) {
+ if (dr_definer_compare(definer_obj, format_id,
+ dw_selectors, byte_selectors,
+ match_mask))
+ return definer_obj;
+ }
+
+ return NULL;
+}
+
+static struct dr_definer_object *
+dr_definer_create_obj(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
+{
+ struct dr_definer_object *definer_obj;
+ int ret = 0;
+
+ definer_obj = kzalloc(sizeof(*definer_obj), GFP_KERNEL);
+ if (!definer_obj)
+ return NULL;
+
+ ret = mlx5dr_cmd_create_definer(dmn->mdev,
+ format_id,
+ dw_selectors,
+ byte_selectors,
+ match_mask,
+ &definer_obj->id);
+ if (ret)
+ goto err_free_definer_obj;
+
+ /* Definer ID can have 32 bits, but STE format
+ * supports only definers with 8 bit IDs.
+ */
+ if (definer_obj->id > 0xff) {
+ mlx5dr_err(dmn, "Unsupported definer ID (%d)\n", definer_obj->id);
+ goto err_destroy_definer;
+ }
+
+ definer_obj->format_id = format_id;
+ memcpy(definer_obj->dw_selectors, dw_selectors, sizeof(definer_obj->dw_selectors));
+ memcpy(definer_obj->byte_selectors, byte_selectors, sizeof(definer_obj->byte_selectors));
+ memcpy(definer_obj->match_mask, match_mask, sizeof(definer_obj->match_mask));
+
+ refcount_set(&definer_obj->refcount, 1);
+
+ ret = xa_insert(&dmn->definers_xa, definer_obj->id, definer_obj, GFP_KERNEL);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Couldn't insert new definer into xarray (%d)\n", ret);
+ goto err_destroy_definer;
+ }
+
+ return definer_obj;
+
+err_destroy_definer:
+ mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
+err_free_definer_obj:
+ kfree(definer_obj);
+
+ return NULL;
+}
+
+static void dr_definer_destroy_obj(struct mlx5dr_domain *dmn,
+ struct dr_definer_object *definer_obj)
+{
+ mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
+ xa_erase(&dmn->definers_xa, definer_obj->id);
+ kfree(definer_obj);
+}
+
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors,
+ u8 *match_mask, u32 *definer_id)
+{
+ struct dr_definer_object *definer_obj;
+ int ret = 0;
+
+ definer_obj = dr_definer_find_obj(dmn, format_id, dw_selectors,
+ byte_selectors, match_mask);
+ if (!definer_obj) {
+ definer_obj = dr_definer_create_obj(dmn, format_id,
+ dw_selectors, byte_selectors,
+ match_mask);
+ if (!definer_obj)
+ return -ENOMEM;
+ } else {
+ refcount_inc(&definer_obj->refcount);
+ }
+
+ *definer_id = definer_obj->id;
+
+ return ret;
+}
+
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id)
+{
+ struct dr_definer_object *definer_obj;
+
+ definer_obj = xa_load(&dmn->definers_xa, definer_id);
+ if (!definer_obj) {
+ mlx5dr_err(dmn, "Definer ID %d not found\n", definer_id);
+ return;
+ }
+
+ if (refcount_dec_and_test(&definer_obj->refcount))
+ dr_definer_destroy_obj(dmn, definer_obj);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 9a9836218c8e..5b8bb2ca31e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -425,10 +425,11 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
refcount_set(&dmn->refcount, 1);
mutex_init(&dmn->info.rx.mutex);
mutex_init(&dmn->info.tx.mutex);
+ xa_init(&dmn->definers_xa);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n");
- goto free_domain;
+ goto def_xa_destroy;
}
dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
@@ -453,7 +454,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
uninit_caps:
dr_domain_caps_uninit(dmn);
-free_domain:
+def_xa_destroy:
+ xa_destroy(&dmn->definers_xa);
kfree(dmn);
return NULL;
}
@@ -493,6 +495,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
+ xa_destroy(&dmn->definers_xa);
mutex_destroy(&dmn->info.tx.mutex);
mutex_destroy(&dmn->info.rx.mutex);
kfree(dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 7879991048ce..74cbe53ee9db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -35,16 +35,28 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
return 0;
}
+static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 *hw_ste)
+{
+ struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
+ u64 icm_addr;
+
+ if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
+ return;
+
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
+}
+
static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
- struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
- u64 icm_addr;
/* Create new table for miss entry */
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -58,8 +70,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->chunk->ste_arr;
- icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
- mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
+ dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -241,7 +252,6 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
struct mlx5dr_ste *new_ste;
- u64 icm_addr;
int new_idx;
u8 sb_idx;
@@ -250,9 +260,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
/* Copy STE control and tag */
- icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
+ dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->chunk->ste_arr[new_idx];
@@ -773,7 +782,6 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
- u64 icm_addr;
/* Take ref on table, only on first time this ste is used */
mlx5dr_htbl_get(cur_htbl);
@@ -781,8 +789,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
+ dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
ste->ste_chain_location = ste_location;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 9e19a8dc9022..1e15f605df6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -90,6 +90,16 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
hw_ste->mask[0] = 0;
}
+bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p)
+{
+ if (!ste_ctx->is_miss_addr_set)
+ return false;
+
+ /* check if miss address is already set for this type of STE */
+ return ste_ctx->is_miss_addr_set(hw_ste_p);
+}
+
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p, u64 miss_addr)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 17513baff9b0..7075142bcfb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -151,6 +151,7 @@ struct mlx5dr_ste_ctx {
bool is_rx, u16 gvmi);
void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
u16 (*get_next_lu_type)(u8 *hw_ste_p);
+ bool (*is_miss_addr_set)(u8 *hw_ste_p);
void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
u64 (*get_miss_addr)(u8 *hw_ste_p);
void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index ee677a5c76be..084145f18084 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -13,6 +13,7 @@ enum dr_ste_v1_entry_format {
DR_STE_V1_TYPE_BWC_BYTE = 0x0,
DR_STE_V1_TYPE_BWC_DW = 0x1,
DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
};
/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
@@ -267,6 +268,16 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
}
+bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p)
+{
+ u8 entry_type = MLX5_GET(ste_match_bwc_v1, hw_ste_p, entry_format);
+
+ /* unlike MATCH STE, for MATCH_RANGES STE both hit and miss addresses
+ * are part of the action, so they both set as part of STE init
+ */
+ return entry_type == DR_STE_V1_TYPE_MATCH_RANGES;
+}
+
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
@@ -520,6 +531,27 @@ static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
init_color);
}
+static void dr_ste_v1_set_match_range_pkt_len(u8 *hw_ste_p, u32 definer_id,
+ u32 min, u32 max)
+{
+ MLX5_SET(ste_match_ranges_v1, hw_ste_p, match_definer_ctx_idx, definer_id);
+
+ /* When the STE will be sent, its mask and tags will be swapped in
+ * dr_ste_v1_prepare_for_postsend(). This, however, is match range STE
+ * which doesn't have mask, and shouldn't have mask/tag swapped.
+ * We're using the common utilities functions to send this STE, so need
+ * to allow for this swapping - place the values in the corresponding
+ * locations to allow flipping them when writing to ICM.
+ *
+ * min/max_value_2 corresponds to match_dw_0 in its definer.
+ * To allow mask/tag swapping, writing the min/max_2 to min/max_0.
+ *
+ * Pkt len is 2 bytes that are stored in the higher section of the DW.
+ */
+ MLX5_SET(ste_match_ranges_v1, hw_ste_p, min_value_0, min << 16);
+ MLX5_SET(ste_match_ranges_v1, hw_ste_p, max_value_0, max << 16);
+}
+
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
@@ -535,6 +567,14 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
}
+static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
+ u32 *added_stes,
+ u16 gvmi)
+{
+ dr_ste_v1_arr_init_next_match(last_ste, added_stes, gvmi);
+ dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
+}
+
void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
@@ -670,6 +710,20 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_DOUBLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_RANGE]) {
+ /* match ranges requires a new STE of its own type */
+ dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
+ dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
+
+ /* we do not support setting any action on the match ranges STE */
+ action_sz = 0;
+
+ dr_ste_v1_set_match_range_pkt_len(last_ste,
+ attr->range.definer_id,
+ attr->range.min,
+ attr->range.max);
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -858,6 +912,20 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_DOUBLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_RANGE]) {
+ /* match ranges requires a new STE of its own type */
+ dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
+ dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
+
+ /* we do not support setting any action on the match ranges STE */
+ action_sz = 0;
+
+ dr_ste_v1_set_match_range_pkt_len(last_ste,
+ attr->range.definer_id,
+ attr->range.min,
+ attr->range.max);
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -2144,6 +2212,7 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
.get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
.set_miss_addr = &dr_ste_v1_set_miss_addr,
.get_miss_addr = &dr_ste_v1_get_miss_addr,
.set_hit_addr = &dr_ste_v1_set_hit_addr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
index 8a1d49790c6e..b5c0f0f8392f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
@@ -7,6 +7,7 @@
#include "dr_types.h"
#include "dr_ste.h"
+bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
index c60fddd125d2..cf1a3c9a1cf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
@@ -202,6 +202,7 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
.get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
.set_miss_addr = &dr_ste_v1_set_miss_addr,
.get_miss_addr = &dr_ste_v1_get_miss_addr,
.set_hit_addr = &dr_ste_v1_set_hit_addr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 41a37b9ac98b..2b769dcbd453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -81,6 +81,7 @@ mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
enum {
DR_STE_SIZE = 64,
DR_STE_SIZE_CTRL = 32,
+ DR_STE_SIZE_MATCH_TAG = 32,
DR_STE_SIZE_TAG = 16,
DR_STE_SIZE_MASK = 16,
DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
@@ -128,6 +129,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
DR_ACTION_TYP_ASO_FLOW_METER,
+ DR_ACTION_TYP_RANGE,
DR_ACTION_TYP_MAX,
};
@@ -237,6 +239,7 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
/* STE utils */
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
+bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx, u8 *hw_ste_p);
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste, u64 miss_addr);
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
@@ -281,6 +284,13 @@ struct mlx5dr_ste_actions_attr {
u8 dest_reg_id;
u8 init_color;
} aso_flow_meter;
+
+ struct {
+ u64 miss_icm_addr;
+ u32 definer_id;
+ u32 min;
+ u32 max;
+ } range;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -924,6 +934,7 @@ struct mlx5dr_domain {
struct mlx5dr_ste_ctx *ste_ctx;
struct list_head dbg_tbl_list;
struct mlx5dr_dbg_dump_info dump_info;
+ struct xarray definers_xa;
};
struct mlx5dr_table_rx_tx {
@@ -1026,6 +1037,15 @@ struct mlx5dr_action_dest_tbl {
};
};
+struct mlx5dr_action_range {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_action *hit_tbl_action;
+ struct mlx5dr_action *miss_tbl_action;
+ u32 definer_id;
+ u32 min;
+ u32 max;
+};
+
struct mlx5dr_action_ctr {
u32 ctr_id;
u32 offset;
@@ -1072,6 +1092,7 @@ struct mlx5dr_action {
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
struct mlx5dr_action_aso_flow_meter *aso;
+ struct mlx5dr_action_range *range;
};
};
@@ -1295,6 +1316,14 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
u32 *reformat_id);
void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
u32 reformat_id);
+int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
+ u16 format_id,
+ u8 *dw_selectors,
+ u8 *byte_selectors,
+ u8 *match_mask,
+ u32 *definer_id);
+void mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev,
+ u32 definer_id);
struct mlx5dr_cmd_gid_attr {
u8 gid[16];
@@ -1483,4 +1512,18 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
+
+static inline bool mlx5dr_is_fw_table(struct mlx5_flow_table *ft)
+{
+ return !ft->fs_dr_table.dr_table;
+}
+
+static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_GEN(dev, steering_format_version) >=
+ MLX5_STEERING_FORMAT_CONNECTX_6DX) &&
+ (MLX5_CAP_GEN_64(dev, match_definer_format_supported) &
+ (1ULL << MLX5_IFC_DEFINER_FORMAT_ID_SELECT));
+}
+
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 13b6d4721e17..984653756779 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -7,10 +7,11 @@
#include "fs_cmd.h"
#include "mlx5dr.h"
#include "fs_dr.h"
+#include "dr_types.h"
-static bool mlx5_dr_is_fw_table(u32 flags)
+static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
{
- if (flags & MLX5_FLOW_TABLE_TERMINATION)
+ if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
return true;
return false;
@@ -69,7 +70,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 flags;
int err;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
ft_attr,
next_ft);
@@ -109,7 +110,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
@@ -134,7 +135,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
@@ -153,7 +154,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
@@ -178,7 +179,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
@@ -209,11 +210,22 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
- if (mlx5_dr_is_fw_table(dest_ft->flags))
+ if (mlx5dr_is_fw_table(dest_ft))
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
}
+static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
+ struct mlx5_flow_rule *dst)
+{
+ return mlx5dr_action_create_dest_match_range(domain,
+ dst->dest_attr.range.field,
+ dst->dest_attr.range.hit_ft,
+ dst->dest_attr.range.miss_ft,
+ dst->dest_attr.range.min,
+ dst->dest_attr.range.max);
+}
+
static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
struct mlx5_fs_vlan *vlan)
{
@@ -260,7 +272,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
int err = 0;
int i;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
@@ -467,6 +479,15 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ tmp_action = create_range_action(domain, dst);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
+ break;
default:
err = -EOPNOTSUPP;
goto free_actions;
@@ -702,7 +723,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
int err;
int i;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
@@ -727,7 +748,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte fte_tmp = {};
int ret;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
@@ -780,11 +801,19 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
+ u32 steering_caps = 0;
+
if (ft_type != FS_FT_FDB ||
MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
return 0;
- return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
+ steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
+ steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
+
+ if (mlx5dr_supp_match_ranges(ns->dev))
+ steering_caps |= MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+
+ return steering_caps;
}
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
index 34c2bd17a8b4..790a17d6207f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
@@ -165,6 +165,41 @@ struct mlx5_ifc_ste_mask_and_match_v1_bits {
u8 action[0x60];
};
+struct mlx5_ifc_ste_match_ranges_v1_bits {
+ u8 entry_format[0x8];
+ u8 counter_id[0x18];
+
+ u8 miss_address_63_48[0x10];
+ u8 match_definer_ctx_idx[0x8];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 reserved_at_5a[0x1];
+ u8 match_polarity[0x1];
+ u8 reparse[0x1];
+ u8 reserved_at_5d[0x3];
+
+ u8 next_table_base_63_48[0x10];
+ u8 hash_definer_ctx_idx[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 hash_type[0x2];
+ u8 hash_after_actions[0x1];
+ u8 reserved_at_9e[0x2];
+
+ u8 action[0x60];
+
+ u8 max_value_0[0x20];
+ u8 min_value_0[0x20];
+ u8 max_value_1[0x20];
+ u8 min_value_1[0x20];
+ u8 max_value_2[0x20];
+ u8 min_value_2[0x20];
+ u8 max_value_3[0x20];
+ u8 min_value_3[0x20];
+};
+
struct mlx5_ifc_ste_eth_l2_src_v1_bits {
u8 reserved_at_0[0x1];
u8 sx_sniffer[0x1];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 84ed77763b21..9afd268a2573 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -140,8 +140,21 @@ mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
u8 init_color,
u8 meter_id);
+struct mlx5dr_action *
+mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min,
+ u32 max);
+
int mlx5dr_action_destroy(struct mlx5dr_action *action);
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors,
+ u8 *match_mask, u32 *definer_id);
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
+
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index c7a91981cd5a..ba6958b49a8e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -50,6 +50,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_PORT,
MLX5_FLOW_DESTINATION_TYPE_COUNTER,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+ MLX5_FLOW_DESTINATION_TYPE_RANGE,
};
enum {
@@ -143,6 +144,10 @@ enum {
MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
};
+enum mlx5_flow_dest_range_field {
+ MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
+};
+
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
@@ -156,6 +161,13 @@ struct mlx5_flow_destination {
struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
+ struct {
+ struct mlx5_flow_table *hit_ft;
+ struct mlx5_flow_table *miss_ft;
+ enum mlx5_flow_dest_range_field field;
+ u32 min;
+ u32 max;
+ } range;
u32 sampler_id;
};
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 2093131483c7..294cfe175c4b 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -6108,6 +6108,38 @@ struct mlx5_ifc_match_definer_format_32_bits {
u8 inner_dmac_15_0[0x10];
};
+enum {
+ MLX5_IFC_DEFINER_FORMAT_ID_SELECT = 61,
+};
+
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED 0x0
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN 0x48
+#define MLX5_IFC_DEFINER_DW_SELECTORS_NUM 9
+#define MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM 8
+
+struct mlx5_ifc_match_definer_match_mask_bits {
+ u8 reserved_at_1c0[5][0x20];
+ u8 match_dw_8[0x20];
+ u8 match_dw_7[0x20];
+ u8 match_dw_6[0x20];
+ u8 match_dw_5[0x20];
+ u8 match_dw_4[0x20];
+ u8 match_dw_3[0x20];
+ u8 match_dw_2[0x20];
+ u8 match_dw_1[0x20];
+ u8 match_dw_0[0x20];
+
+ u8 match_byte_7[0x8];
+ u8 match_byte_6[0x8];
+ u8 match_byte_5[0x8];
+ u8 match_byte_4[0x8];
+
+ u8 match_byte_3[0x8];
+ u8 match_byte_2[0x8];
+ u8 match_byte_1[0x8];
+ u8 match_byte_0[0x8];
+};
+
struct mlx5_ifc_match_definer_bits {
u8 modify_field_select[0x40];
@@ -6116,9 +6148,41 @@ struct mlx5_ifc_match_definer_bits {
u8 reserved_at_80[0x10];
u8 format_id[0x10];
- u8 reserved_at_a0[0x160];
+ u8 reserved_at_a0[0x60];
- u8 match_mask[16][0x20];
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+
+ u8 reserved_at_180[0x40];
+
+ union {
+ struct {
+ u8 match_mask[16][0x20];
+ };
+ struct mlx5_ifc_match_definer_match_mask_bits match_mask_format;
+ };
};
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {