mlx5: Introduce new destination type TABLE_TYPE

This new destination type supports flow transition between different
table types, e.g. from NIC_RX to RDMA_RX or from RDMA_TX to NIC_TX.

In addition add driver support to be able to query the capability for
this new destination type.

Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
Sponsored by:	NVidia networking
MFC after:	1 week
This commit is contained in:
Patrisious Haddad 2023-04-04 09:01:24 +03:00 committed by Konstantin Belousov
parent b94ef2a3bc
commit 7b959396ca
3 changed files with 100 additions and 2 deletions

View file

@ -993,6 +993,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEC,
MLX5_CAP_TLS,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_GENERAL_2 = 0x20,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@ -1031,6 +1032,9 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_GEN_MAX(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_GEN_2(mdev, cap) \
MLX5_GET(cmd_hca_cap_2, mdev->hca_caps_cur[MLX5_CAP_GENERAL_2], cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)

View file

@ -559,6 +559,43 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
return err;
}
static int handle_hca_cap_2(struct mlx5_core_dev *dev)
{
void *set_ctx;
void *set_hca_cap;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
int err;
if (MLX5_CAP_GEN_MAX(dev, hca_cap_2)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
if (err)
return err;
} else {
return 0;
}
/* To be added if sw_vhca support was added */
/*if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
!(dev->priv.sw_vhca_id > 0))
return 0;*/
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
return -ENOMEM;
MLX5_SET(set_hca_cap_in, set_ctx, op_mod,
MLX5_CAP_GENERAL_2 << 1);
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL_2],
MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
//MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
err = set_caps(dev, set_ctx, set_sz);
kfree(set_ctx);
return err;
}
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
struct mlx5_reg_host_endianess he_in;
@ -1139,6 +1176,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
err = handle_hca_cap_2(dev);
if (err) {
mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
goto reclaim_boot_pages;
}
err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
mlx5_core_err(dev, "failed to allocate init pages\n");

View file

@ -302,6 +302,11 @@ enum {
MLX5_CMD_OP_GENERAL_END = 0xd00,
};
enum {
MLX5_FT_NIC_RX_2_NIC_RX_RDMA = BIT(0),
MLX5_FT_NIC_TX_RDMA_2_NIC_TX = BIT(1),
};
enum {
MLX5_ICMD_CMDS_OPCODE_ICMD_OPCODE_QUERY_FW_INFO = 0x8007,
MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_CAPABILITY = 0x8400,
@ -524,7 +529,9 @@ struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
u8 reserved_0[0x20];
u8 reserved_0[0x8];
u8 destination_table_type[0x8];
u8 reserved_at_1[0x10];
};
struct mlx5_ifc_ipv4_layout_bits {
@ -1144,7 +1151,12 @@ enum {
};
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_0[0x80];
u8 reserved_0[0x20];
u8 hca_cap_2[0x1];
u8 reserved_at_21[0x1f];
u8 reserved_at_40[0x40];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
@ -1439,10 +1451,48 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_7c0[0x40];
};
struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0x80];
u8 migratable[0x1];
u8 reserved_at_81[0x1f];
u8 max_reformat_insert_size[0x8];
u8 max_reformat_insert_offset[0x8];
u8 max_reformat_remove_size[0x8];
u8 max_reformat_remove_offset[0x8];
u8 reserved_at_c0[0x8];
u8 migration_multi_load[0x1];
u8 migration_tracking_state[0x1];
u8 reserved_at_ca[0x16];
u8 reserved_at_e0[0xc0];
u8 flow_table_type_2_type[0x8];
u8 reserved_at_1a8[0x3];
u8 log_min_mkey_entity_size[0x5];
u8 reserved_at_1b0[0x10];
u8 reserved_at_1c0[0x60];
u8 reserved_at_220[0x1];
u8 sw_vhca_id_valid[0x1];
u8 sw_vhca_id[0xe];
u8 reserved_at_230[0x10];
u8 reserved_at_240[0xb];
u8 ts_cqe_metadata_size2wqe_counter[0x5];
u8 reserved_at_250[0x10];
u8 reserved_at_260[0x5a0];
};
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
};
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
@ -2937,6 +2987,7 @@ struct mlx5_ifc_hca_vport_context_bits {
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
struct mlx5_ifc_odp_cap_bits odp_cap;
struct mlx5_ifc_atomic_caps_bits atomic_caps;
struct mlx5_ifc_roce_cap_bits roce_cap;