mlx5: Use software enum in APIs instead of PRM

Users of the steering APIs shouldn't use the PRM directly.
Create an software enum to be used instead.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Sponsored by:	NVidia networking
MFC after:	1 week
This commit is contained in:
Mark Bloch 2023-02-19 12:25:10 +00:00 committed by Konstantin Belousov
parent 45e2e55df6
commit 76ed99ed8a
8 changed files with 68 additions and 55 deletions

View file

@ -43,6 +43,12 @@ enum {
MLX5_FS_SNIFFER_FLOW_TAG = 0xFFFFFD,
};
enum mlx5_rule_fwd_action {
MLX5_FLOW_RULE_FWD_ACTION_ALLOW = 0x1,
MLX5_FLOW_RULE_FWD_ACTION_DROP = 0x2,
MLX5_FLOW_RULE_FWD_ACTION_DEST = 0x4,
};
enum {
MLX5_FS_FLOW_TAG_MASK = 0xFFFFFF,
};
@ -168,7 +174,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u32 action,
u32 sw_action,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest);
void mlx5_del_flow_rule(struct mlx5_flow_rule **);

View file

@ -81,7 +81,7 @@ struct fs_fte {
struct list_head dests;
uint32_t index; /* index in ft */
struct mlx5_flow_act flow_act;
u8 action; /* MLX5_FLOW_CONTEXT_ACTION */
u32 sw_action; /* enum mlx5_rule_fwd_action */
enum fs_fte_status status;
};
@ -290,7 +290,7 @@ int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
enum fs_ft_type type, unsigned int table_id,
unsigned int index, unsigned int group_id,
struct mlx5_flow_act *flow_act,
unsigned short action, int dest_size,
u32 sw_action, int dest_size,
struct list_head *dests); /* mlx5_flow_desination */
int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,

View file

@ -253,7 +253,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
match_header,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(flow_rule)) {
printf("mlx5_core: WARN: ""FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@ -825,7 +825,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
MLX5_MATCH_OUTER_HEADERS,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
MLX5_FLOW_RULE_FWD_ACTION_DROP,
&flow_act, &dest);
if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
@ -885,7 +885,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
MLX5_MATCH_OUTER_HEADERS,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
MLX5_FLOW_RULE_FWD_ACTION_ALLOW,
&flow_act, &dest);
if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
@ -902,7 +902,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
MLX5_FLOW_RULE_FWD_ACTION_DROP,
&flow_act, &dest);
if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);

View file

@ -167,7 +167,7 @@ int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
enum fs_ft_type type, unsigned int table_id,
unsigned int index, unsigned int group_id,
struct mlx5_flow_act *flow_act,
unsigned short action, int dest_size,
u32 sw_action, int dest_size,
struct list_head *dests) /* mlx5_flow_desination */
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
@ -183,10 +183,18 @@ int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
int atomic_mod_cap;
u32 prm_action = 0;
if (action != MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)
if (sw_action != MLX5_FLOW_RULE_FWD_ACTION_DEST)
dest_size = 0;
prm_action = action;
if (sw_action & MLX5_FLOW_RULE_FWD_ACTION_ALLOW)
prm_action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
if (sw_action & MLX5_FLOW_RULE_FWD_ACTION_DROP)
prm_action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (sw_action & MLX5_FLOW_RULE_FWD_ACTION_DEST)
prm_action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
dest_size * MLX5_ST_SZ_BYTES(dest_format_struct);

View file

@ -164,7 +164,7 @@ mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *priv,
flow = mlx5_add_flow_rule(ft->t, spec->match_criteria_enable,
spec->match_criteria,
spec->match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act,
&dest);
out:
@ -200,7 +200,7 @@ accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, int type)
priv->fts.vlan.t : fs_tcp->tables[type + 1].t;
rule = mlx5_add_flow_rule(fs_tcp->tables[type].t, 0, match_criteria, match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR(rule))
return (PTR_ERR(rule));

View file

@ -302,7 +302,7 @@ static void fs_remove_node_parent_locked(struct fs_base *node)
kfree(node);
}
static struct fs_fte *fs_alloc_fte(u8 action,
static struct fs_fte *fs_alloc_fte(u32 sw_action,
struct mlx5_flow_act *flow_act,
u32 *match_value,
unsigned int index)
@ -320,7 +320,7 @@ static struct fs_fte *fs_alloc_fte(u8 action,
fte->index = index;
INIT_LIST_HEAD(&fte->dests);
fte->flow_act = *flow_act;
fte->action = action;
fte->sw_action = sw_action;
return fte;
}
@ -341,7 +341,7 @@ static struct fs_fte *alloc_star_ft_entry(struct mlx5_flow_table *ft,
if (fg->num_ftes == fg->max_ftes)
return ERR_PTR(-ENOSPC);
fte = fs_alloc_fte(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
fte = fs_alloc_fte(MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, match_value, index);
if (IS_ERR(fte))
return fte;
@ -447,7 +447,7 @@ static int fs_set_star_rule(struct mlx5_core_dev *dev,
src_ft->id, src_fte->index,
src_ft->star_rule.fg->id,
&src_fte->flow_act,
src_fte->action,
src_fte->sw_action,
src_fte->dests_size,
&src_fte->dests);
if (err)
@ -1590,7 +1590,7 @@ static struct mlx5_flow_rule *_fs_add_dst_fte(struct fs_fte *fte,
&fte->status,
fte->val, ft->type,
ft->id, fte->index, fg->id, &fte->flow_act,
fte->action, fte->dests_size, &fte->dests);
fte->sw_action, fte->dests_size, &fte->dests);
if (err)
goto free_dst;
@ -1652,7 +1652,7 @@ static unsigned int fs_get_free_fg_index(struct mlx5_flow_group *fg,
static struct fs_fte *fs_create_fte(struct mlx5_flow_group *fg,
u32 *match_value,
u8 action,
u32 sw_action,
struct mlx5_flow_act *flow_act,
struct list_head **prev)
{
@ -1660,7 +1660,7 @@ static struct fs_fte *fs_create_fte(struct mlx5_flow_group *fg,
int index = 0;
index = fs_get_free_fg_index(fg, prev);
fte = fs_alloc_fte(action, flow_act, match_value, index);
fte = fs_alloc_fte(sw_action, flow_act, match_value, index);
if (IS_ERR(fte))
return fte;
@ -1710,7 +1710,7 @@ static void fs_del_dst(struct mlx5_flow_rule *dst)
err = mlx5_cmd_fs_set_fte(dev, ft->vport,
&fte->status, match_value, ft->type,
ft->id, fte->index, fg->id,
&fte->flow_act, fte->action,
&fte->flow_act, fte->sw_action,
fte->dests_size, &fte->dests);
if (err) {
mlx5_core_warn(dev, "%s can't delete dst %s\n",
@ -1776,7 +1776,7 @@ static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
/* Add dst algorithm */
static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg,
u32 *match_value,
u8 action,
u32 sw_action,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest)
{
@ -1791,7 +1791,7 @@ static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg,
/* TODO: Check of size against PRM max size */
mutex_lock(&fte->base.lock);
if (fs_match_exact_val(&fg->mask, match_value, &fte->val) &&
action == fte->action &&
sw_action == fte->sw_action &&
!check_conflicting_actions(flow_act, &fte->flow_act)) {
dst = _fs_add_dst_fte(fte, fg, dest);
mutex_unlock(&fte->base.lock);
@ -1808,7 +1808,7 @@ static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg,
goto unlock_fg;
}
fte = fs_create_fte(fg, match_value, action, flow_act, &prev);
fte = fs_create_fte(fg, match_value, sw_action, flow_act, &prev);
if (IS_ERR(fte)) {
dst = (void *)fte;
goto unlock_fg;
@ -1836,7 +1836,7 @@ static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u8 action,
u32 sw_action,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest)
{
@ -1853,7 +1853,7 @@ static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft,
match_criteria)) {
mutex_unlock(&ft->base.lock);
dst = fs_add_dst_fg(g, match_value, action, flow_act, dest);
dst = fs_add_dst_fg(g, match_value, sw_action, flow_act, dest);
if (PTR_ERR(dst) && PTR_ERR(dst) != -ENOSPC)
goto unlock;
}
@ -1866,7 +1866,7 @@ static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft,
}
dst = fs_add_dst_fg(g, match_value,
action, flow_act, dest);
sw_action, flow_act, dest);
if (IS_ERR(dst)) {
/* Remove assumes refcount > 0 and autogroup creates a group
* with a refcount = 0.
@ -1886,7 +1886,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u32 action,
u32 sw_action,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest)
{
@ -1897,7 +1897,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
if (ns)
down_read(&ns->dests_rw_sem);
dst = fs_add_dst_ft(ft, match_criteria_enable, match_criteria,
match_value, action, flow_act, dest);
match_value, sw_action, flow_act, dest);
if (ns)
up_read(&ns->dests_rw_sem);

View file

@ -293,7 +293,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
rule_p = &ai->ft_rule[MLX5E_TT_ANY];
dest.tir_num = tirn[MLX5E_TT_ANY];
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -308,7 +308,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -320,7 +320,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -335,7 +335,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -347,7 +347,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -361,7 +361,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -373,7 +373,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -387,7 +387,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -399,7 +399,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -413,7 +413,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -425,7 +425,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -505,7 +505,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV4];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -513,7 +513,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV6];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -524,7 +524,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -532,7 +532,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -542,7 +542,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -550,7 +550,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -560,7 +560,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -568,7 +568,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -578,7 +578,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -587,7 +587,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
ETHERTYPE_IPV6);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -597,7 +597,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
dest.tir_num = tirn[MLX5E_TT_ANY];
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@ -730,7 +730,7 @@ mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
}
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FLOW_RULE_FWD_ACTION_DEST,
&flow_act,
&dest);
@ -1709,7 +1709,7 @@ mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
@ -1835,7 +1835,7 @@ mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.vxlan_catchall_ft_rule;
*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest);
MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);

View file

@ -2209,8 +2209,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
}
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
action = dst ? MLX5_FLOW_RULE_FWD_ACTION_DEST : 0;
handler->rule = mlx5_add_flow_rule(ft, spec->match_criteria_enable,
spec->match_criteria,
spec->match_value,