ice(4): Update to 1.38.16-k

New features
- Add sysctl "link_active_on_if_down" (defaults to 1 to match previous
behavior): set this to 0 to have the driver bring the physical link down when
the interface is brought administratively down
- Add sysctl "temp" to read chip temperature on E810 devices; this requires a
4.30 or newer NVM (see package sysutils/intel-nvmupdate-100g)

Bug fixes and general changes
- (linked to irdma) properly propagate PF reset request from irdma driver
- (linked to irdma) properly notify irdma of an impending PF reset
- (linked to irdma) move Protocol Engine error handling to irdma
- Print log message when using a DDP that doesn't support the "TX balancing"
mode
- Block LLDP agent configuration when DSCP QoS mode is enabled
- Fix kernel panic when updating NVM when adapter is in the "TX balancing" mode
- Remove ice_sbq_cmd.h since it's unused
- Fix LLDP RX filter to still allow LLDP frames to be received by SW after a PF
reset in SW LLDP mode
- Add ice_if_needs_restart handler in order to fix a bad VLAN and link down
interaction
- Issue PF reset during unload
- nvmupdate process fixes
- Use pci_msix_table_bar() to get MSI-X bar index at runtime instead of hardcoding it

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Reviewed by:	anzhu@netapp.com
MFC after:	3 days
Sponsored by:	Intel Corporation, NetApp
Differential Revision:	https://reviews.freebsd.org/D41655
This commit is contained in:
Eric Joyner 2023-08-24 16:26:13 -07:00
parent ffafa6a4d1
commit 9c30461dd2
No known key found for this signature in database
GPG key ID: 96F0C6FD61E05DE3
31 changed files with 1358 additions and 747 deletions

View file

@ -168,6 +168,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_LED 0x0061
#define ICE_AQC_CAPS_SDP 0x0062
#define ICE_AQC_CAPS_WR_CSR_PROT 0x0064
#define ICE_AQC_CAPS_SENSOR_READING 0x0067
#define ICE_AQC_CAPS_LOGI_TO_PHYSI_PORT_MAP 0x0073
#define ICE_AQC_CAPS_SKU 0x0074
#define ICE_AQC_CAPS_PORT_MAP 0x0075
@ -180,7 +181,8 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084
#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
#define ICE_AQC_CAPS_DYN_FLATTENING 0x0090
#define ICE_AQC_CAPS_DYN_FLATTENING 0x008A
#define ICE_AQC_CAPS_OROM_RECOVERY_UPDATE 0x0090
#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092
u8 major_ver;
@ -793,12 +795,30 @@ struct ice_aqc_sw_rules {
__le32 addr_low;
};
/* Add switch rule response:
* Content of return buffer is same as the input buffer. The status field and
* LUT index are updated as part of the response
*/
struct ice_aqc_sw_rules_elem_hdr {
__le16 type; /* Switch rule type, one of T_... */
#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
__le16 status;
};
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
* This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
struct ice_sw_rule_lkup_rx_tx {
struct ice_aqc_sw_rules_elem_hdr hdr;
__le16 recipe_id;
#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10
/* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */
@ -877,14 +897,17 @@ struct ice_sw_rule_lkup_rx_tx {
* lookup-type
*/
__le16 hdr_len;
u8 hdr[STRUCT_HACK_VAR_LEN];
u8 hdr_data[STRUCT_HACK_VAR_LEN];
};
#pragma pack(1)
/* Add/Update/Remove large action command/response entry
* "index" is returned as part of a response to a successful Add command, and
* can be used to identify the action for Update/Get/Remove commands.
*/
struct ice_sw_rule_lg_act {
struct ice_aqc_sw_rules_elem_hdr hdr;
__le16 index; /* Index in large action table */
__le16 size;
/* Max number of large actions */
@ -939,50 +962,30 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
__le32 act[STRUCT_HACK_VAR_LEN]; /* array of size for actions */
};
#pragma pack()
#pragma pack(1)
/* Add/Update/Remove VSI list command/response entry
* "index" is returned as part of a response to a successful Add command, and
* can be used to identify the VSI list for Update/Get/Remove commands.
*/
struct ice_sw_rule_vsi_list {
struct ice_aqc_sw_rules_elem_hdr hdr;
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
__le16 vsi[STRUCT_HACK_VAR_LEN]; /* Array of number_vsi VSI numbers */
};
#pragma pack()
#pragma pack(1)
/* Query VSI list command/response entry */
struct ice_sw_rule_vsi_list_query {
__le16 index;
ice_declare_bitmap(vsi_list, ICE_MAX_VSI);
u8 vsi_list[DIVIDE_AND_ROUND_UP(ICE_MAX_VSI, BITS_PER_BYTE)];
};
#pragma pack()
#pragma pack(1)
/* Add switch rule response:
* Content of return buffer is same as the input buffer. The status field and
* LUT index are updated as part of the response
*/
struct ice_aqc_sw_rules_elem {
__le16 type; /* Switch rule type, one of T_... */
#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
__le16 status;
union {
struct ice_sw_rule_lkup_rx_tx lkup_tx_rx;
struct ice_sw_rule_lg_act lg_act;
struct ice_sw_rule_vsi_list vsi_list;
struct ice_sw_rule_vsi_list_query vsi_list_query;
} pdata;
};
#pragma pack()
/* PFC Ignore (direct 0x0301)
* The command and response use the same descriptor structure
*/
@ -994,8 +997,8 @@ struct ice_aqc_pfc_ignore {
u8 reserved[14];
};
/* Set PFC Mode (direct 0x0303)
* Query PFC Mode (direct 0x0302)
/* Query PFC Mode (direct 0x0302)
* Set PFC Mode (direct 0x0303)
*/
struct ice_aqc_set_query_pfc_mode {
u8 pfc_mode;
@ -1098,9 +1101,9 @@ struct ice_aqc_txsched_elem {
u8 generic;
#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1
#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1
#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
#define ICE_AQC_ELEM_GENERIC_SP_S 0x4
#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5
#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \
(0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)
@ -1673,6 +1676,32 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
/* Get sensor reading (direct 0x0632) */
struct ice_aqc_get_sensor_reading {
u8 sensor;
#define ICE_AQC_INT_TEMP_SENSOR 0x0
u8 format;
#define ICE_AQC_INT_TEMP_FORMAT 0x0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
/* Get sensor reading response (direct 0x0632) */
struct ice_aqc_get_sensor_reading_resp {
union {
u8 raw[8];
/* Output data for sensor 0x00, format 0x00 */
struct {
s8 temp;
u8 temp_warning_threshold;
u8 temp_critical_threshold;
u8 temp_fatal_threshold;
u8 reserved[4];
} s0f0;
} data;
};
/* DNL Get Status command (indirect 0x0680)
* Structure used for the response, the command uses the generic
* ice_aqc_generic struct to pass a buffer address to the FW.
@ -2476,6 +2505,19 @@ struct ice_aqc_get_set_rss_keys {
u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE];
};
enum ice_lut_type {
ICE_LUT_VSI = 0,
ICE_LUT_PF = 1,
ICE_LUT_GLOBAL = 2,
ICE_LUT_TYPE_MASK = 3
};
enum ice_lut_size {
ICE_LUT_VSI_SIZE = 64,
ICE_LUT_GLOBAL_SIZE = 512,
ICE_LUT_PF_SIZE = 2048,
};
/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
@ -2484,21 +2526,13 @@ struct ice_aqc_get_set_rss_lut {
__le16 vsi_id;
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
(0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S)
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2
(ICE_LUT_TYPE_MASK << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S)
#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2
#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \
(0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S)
(ICE_LUT_TYPE_MASK << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S)
#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128
#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512
#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1
#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048
#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2
#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4
@ -2686,7 +2720,7 @@ struct ice_aqc_move_rdma_qset_buffer {
};
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C42 and 0x0C41) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
@ -2989,6 +3023,8 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
struct ice_aqc_get_sensor_reading get_sensor_reading;
struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
struct ice_aqc_dnl_get_status get_status;
struct ice_aqc_dnl_run_command dnl_run;
struct ice_aqc_dnl_call_command dnl_call;
@ -3239,6 +3275,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_sensor_reading = 0x0632,
ice_aqc_opc_dnl_get_status = 0x0680,
ice_aqc_opc_dnl_run = 0x0681,
ice_aqc_opc_dnl_call = 0x0682,

View file

@ -343,6 +343,88 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
return ICE_SUCCESS;
}
/**
* ice_phy_maps_to_media
* @phy_type_low: PHY type low bits
* @phy_type_high: PHY type high bits
* @media_mask_low: media type PHY type low bitmask
* @media_mask_high: media type PHY type high bitmask
*
* Return true if PHY type [low|high] bits are only of media type PHY types
* [low|high] bitmask.
*/
static bool
ice_phy_maps_to_media(u64 phy_type_low, u64 phy_type_high,
u64 media_mask_low, u64 media_mask_high)
{
/* check if a PHY type exist for media type */
if (!(phy_type_low & media_mask_low ||
phy_type_high & media_mask_high))
return false;
/* check that PHY types are only of media type */
if (!(phy_type_low & ~media_mask_low) &&
!(phy_type_high & ~media_mask_high))
return true;
return false;
}
/**
* ice_set_media_type - Sets media type
* @pi: port information structure
*
* Set ice_port_info PHY media type based on PHY type. This should be called
* from Get PHY caps with media.
*/
static void ice_set_media_type(struct ice_port_info *pi)
{
enum ice_media_type *media_type;
u64 phy_type_high, phy_type_low;
phy_type_high = pi->phy.phy_type_high;
phy_type_low = pi->phy.phy_type_low;
media_type = &pi->phy.media_type;
/* if no media, then media type is NONE */
if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
*media_type = ICE_MEDIA_NONE;
/* else if PHY types are only BASE-T, then media type is BASET */
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
ICE_MEDIA_BASET_PHY_TYPE_LOW_M, 0))
*media_type = ICE_MEDIA_BASET;
/* else if any PHY type is BACKPLANE, then media type is BACKPLANE */
else if (phy_type_low & ICE_MEDIA_BP_PHY_TYPE_LOW_M ||
phy_type_high & ICE_MEDIA_BP_PHY_TYPE_HIGH_M)
*media_type = ICE_MEDIA_BACKPLANE;
/* else if PHY types are only optical, or optical and C2M, then media
* type is FIBER
*/
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
ICE_MEDIA_OPT_PHY_TYPE_LOW_M, 0) ||
(phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M &&
phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M))
*media_type = ICE_MEDIA_FIBER;
/* else if PHY types are only DA, or DA and C2C, then media type DA */
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
ICE_MEDIA_DAC_PHY_TYPE_LOW_M, 0) ||
(phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M &&
(phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M ||
phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
*media_type = ICE_MEDIA_DA;
/* else if PHY types are only C2M or only C2C, then media is AUI */
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
ICE_MEDIA_C2M_PHY_TYPE_LOW_M,
ICE_MEDIA_C2M_PHY_TYPE_HIGH_M) ||
ice_phy_maps_to_media(phy_type_low, phy_type_high,
ICE_MEDIA_C2C_PHY_TYPE_LOW_M,
ICE_MEDIA_C2C_PHY_TYPE_HIGH_M))
*media_type = ICE_MEDIA_AUI;
else
*media_type = ICE_MEDIA_UNKNOWN;
}
/**
* ice_aq_get_phy_caps - returns PHY capabilities
* @pi: port information structure
@ -434,6 +516,9 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
sizeof(pi->phy.link_info.module_type),
ICE_NONDMA_TO_NONDMA);
ice_set_media_type(pi);
ice_debug(hw, ICE_DBG_LINK, "%s: media_type = 0x%x\n", prefix,
pi->phy.media_type);
}
return status;
@ -513,156 +598,6 @@ ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
return ICE_ERR_DOES_NOT_EXIST;
}
/**
* ice_is_media_cage_present
* @pi: port information structure
*
* Returns true if media cage is present, else false. If no cage, then
* media type is backplane or BASE-T.
*/
static bool ice_is_media_cage_present(struct ice_port_info *pi)
{
struct ice_aqc_get_link_topo *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.get_link_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
cmd->addr.topo_params.node_type_ctx =
(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
ICE_AQC_LINK_TOPO_NODE_CTX_S);
/* set node type */
cmd->addr.topo_params.node_type_ctx |=
(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE);
/* Node type cage can be used to determine if cage is present. If AQC
* returns error (ENOENT), then no cage present. If no cage present then
* connection type is backplane or BASE-T.
*/
return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL);
}
/**
* ice_get_media_type - Gets media type
* @pi: port information structure
*/
static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
{
struct ice_link_status *hw_link_info;
if (!pi)
return ICE_MEDIA_UNKNOWN;
hw_link_info = &pi->phy.link_info;
if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
/* If more than one media type is selected, report unknown */
return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) {
/* 1G SGMII is a special case where some DA cable PHYs
* may show this as an option when it really shouldn't
* be since SGMII is meant to be between a MAC and a PHY
* in a backplane. Try to detect this case and handle it
*/
if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
(hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
return ICE_MEDIA_DA;
switch (hw_link_info->phy_type_low) {
case ICE_PHY_TYPE_LOW_1000BASE_SX:
case ICE_PHY_TYPE_LOW_1000BASE_LX:
case ICE_PHY_TYPE_LOW_10GBASE_SR:
case ICE_PHY_TYPE_LOW_10GBASE_LR:
case ICE_PHY_TYPE_LOW_25GBASE_SR:
case ICE_PHY_TYPE_LOW_25GBASE_LR:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
case ICE_PHY_TYPE_LOW_40GBASE_LR4:
case ICE_PHY_TYPE_LOW_50GBASE_SR2:
case ICE_PHY_TYPE_LOW_50GBASE_LR2:
case ICE_PHY_TYPE_LOW_50GBASE_SR:
case ICE_PHY_TYPE_LOW_50GBASE_FR:
case ICE_PHY_TYPE_LOW_50GBASE_LR:
case ICE_PHY_TYPE_LOW_100GBASE_SR4:
case ICE_PHY_TYPE_LOW_100GBASE_LR4:
case ICE_PHY_TYPE_LOW_100GBASE_SR2:
case ICE_PHY_TYPE_LOW_100GBASE_DR:
return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_100BASE_TX:
case ICE_PHY_TYPE_LOW_1000BASE_T:
case ICE_PHY_TYPE_LOW_2500BASE_T:
case ICE_PHY_TYPE_LOW_5GBASE_T:
case ICE_PHY_TYPE_LOW_10GBASE_T:
case ICE_PHY_TYPE_LOW_25GBASE_T:
return ICE_MEDIA_BASET;
case ICE_PHY_TYPE_LOW_10G_SFI_DA:
case ICE_PHY_TYPE_LOW_25GBASE_CR:
case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
case ICE_PHY_TYPE_LOW_25GBASE_CR1:
case ICE_PHY_TYPE_LOW_40GBASE_CR4:
case ICE_PHY_TYPE_LOW_50GBASE_CR2:
case ICE_PHY_TYPE_LOW_50GBASE_CP:
case ICE_PHY_TYPE_LOW_100GBASE_CR4:
case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
case ICE_PHY_TYPE_LOW_100GBASE_CP2:
return ICE_MEDIA_DA;
case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
case ICE_PHY_TYPE_LOW_40G_XLAUI:
case ICE_PHY_TYPE_LOW_50G_LAUI2:
case ICE_PHY_TYPE_LOW_50G_AUI2:
case ICE_PHY_TYPE_LOW_50G_AUI1:
case ICE_PHY_TYPE_LOW_100G_AUI4:
case ICE_PHY_TYPE_LOW_100G_CAUI4:
if (ice_is_media_cage_present(pi))
return ICE_MEDIA_AUI;
/* fall-through */
case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_X:
case ICE_PHY_TYPE_LOW_5GBASE_KR:
case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_KR:
case ICE_PHY_TYPE_LOW_25GBASE_KR1:
case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
case ICE_PHY_TYPE_LOW_40GBASE_KR4:
case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
case ICE_PHY_TYPE_LOW_50GBASE_KR2:
case ICE_PHY_TYPE_LOW_100GBASE_KR4:
case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
return ICE_MEDIA_BACKPLANE;
}
} else {
switch (hw_link_info->phy_type_high) {
case ICE_PHY_TYPE_HIGH_100G_AUI2:
case ICE_PHY_TYPE_HIGH_100G_CAUI2:
if (ice_is_media_cage_present(pi))
return ICE_MEDIA_AUI;
/* fall-through */
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE;
case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
return ICE_MEDIA_FIBER;
}
}
return ICE_MEDIA_UNKNOWN;
}
#define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1
/**
@ -681,7 +616,6 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_aqc_get_link_status_data link_data = { 0 };
struct ice_aqc_get_link_status *resp;
struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
@ -694,7 +628,6 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
hw = pi->hw;
li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
li = &pi->phy.link_info;
hw_fc_info = &pi->fc;
@ -716,7 +649,6 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
li->link_speed = LE16_TO_CPU(link_data.link_speed);
li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
li->link_info = link_data.link_info;
li->link_cfg_err = link_data.link_cfg_err;
li->an_info = link_data.an_info;
@ -747,7 +679,6 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
(unsigned long long)li->phy_type_low);
ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
(unsigned long long)li->phy_type_high);
ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
@ -1066,7 +997,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
hw->port_info = (struct ice_port_info *)
if (!hw->port_info)
hw->port_info = (struct ice_port_info *)
ice_malloc(hw, sizeof(*hw->port_info));
if (!hw->port_info) {
status = ICE_ERR_NO_MEMORY;
@ -1205,7 +1137,7 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_timeout, uld_mask;
u32 cnt, reg = 0, grst_timeout, uld_mask, reset_wait_cnt;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
@ -1237,8 +1169,10 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ?
GLNVM_ULD_PE_DONE_M : 0);
reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT;
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
for (cnt = 0; cnt < reset_wait_cnt; cnt++) {
reg = rd32(hw, GLNVM_ULD) & uld_mask;
if (reg == uld_mask) {
ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
@ -1247,7 +1181,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
ice_msec_delay(10, true);
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
if (cnt == reset_wait_cnt) {
ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg);
return ICE_ERR_RESET_FAILED;
@ -1265,7 +1199,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
*/
static enum ice_status ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg;
u32 cnt, reg, reset_wait_cnt, cfg_lock_timeout;
/* If at function entry a global reset was already in progress, i.e.
* state is not 'device active' or any of the reset done bits are not
@ -1290,8 +1224,10 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* timeout plus the PFR timeout which will account for a possible reset
* that is occurring during a download package operation.
*/
for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
ICE_PF_RESET_WAIT_COUNT; cnt++) {
reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT;
cfg_lock_timeout = ICE_GLOBAL_CFG_LOCK_TIMEOUT;
for (cnt = 0; cnt < cfg_lock_timeout + reset_wait_cnt; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
@ -1299,7 +1235,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
ice_msec_delay(1, true);
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
if (cnt == cfg_lock_timeout + reset_wait_cnt) {
ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
}
@ -2452,6 +2388,11 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
true : false;
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
caps->netlist_auth =
(number & ICE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
true : false;
ice_debug(hw, ICE_DBG_INIT, "%s: netlist_auth = %d\n", prefix,
caps->netlist_auth);
break;
case ICE_AQC_CAPS_CEM:
caps->mgmt_cem = (number == 1);
@ -2534,6 +2475,8 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
(phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
caps->ext_topo_dev_img_prog_en[index] =
(phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
caps->ext_topo_dev_img_ver_schema[index] =
(phys_id & ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA) != 0;
ice_debug(hw, ICE_DBG_INIT,
"%s: ext_topo_dev_img_ver_high[%d] = %d\n",
prefix, index,
@ -2554,6 +2497,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
"%s: ext_topo_dev_img_prog_en[%d] = %d\n",
prefix, index,
caps->ext_topo_dev_img_prog_en[index]);
ice_debug(hw, ICE_DBG_INIT,
"%s: ext_topo_dev_img_ver_schema[%d] = %d\n",
prefix, index,
caps->ext_topo_dev_img_ver_schema[index]);
break;
}
case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
@ -2564,6 +2511,11 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
prefix, caps->dyn_flattening_en);
break;
case ICE_AQC_CAPS_OROM_RECOVERY_UPDATE:
caps->orom_recovery_update = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: orom_recovery_update = %d\n",
prefix, caps->orom_recovery_update);
break;
default:
/* Not one of the recognized common capabilities */
found = false;
@ -2782,6 +2734,26 @@ ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->nac_topo.id);
}
/**
* ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
* @cap: capability element to parse
*
* Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
* enabled sensors.
*/
static void
ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
struct ice_aqc_list_caps_elem *cap)
{
dev_p->supported_sensors = LE32_TO_CPU(cap->number);
ice_debug(hw, ICE_DBG_INIT,
"dev caps: supported sensors (bitmap) = 0x%x\n",
dev_p->supported_sensors);
}
/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
@ -2827,6 +2799,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_NAC_TOPOLOGY:
ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
break;
case ICE_AQC_CAPS_SENSOR_READING:
ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@ -3776,8 +3751,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
break;
case ICE_FEC_DIS_AUTO:
/* Set No FEC and auto FEC */
if (!ice_fw_supports_fec_dis_auto(hw))
return ICE_ERR_NOT_SUPPORTED;
if (!ice_fw_supports_fec_dis_auto(hw)) {
status = ICE_ERR_NOT_SUPPORTED;
goto out;
}
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
/* fall-through */
case ICE_FEC_AUTO:
@ -3852,6 +3829,7 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_AQ_ERROR;
struct ice_aqc_restart_an *cmd;
struct ice_aq_desc desc;
@ -3866,7 +3844,16 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
else
cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
status = ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
if (status)
return status;
if (ena_link)
pi->phy.curr_user_phy_cfg.caps |= ICE_AQC_PHY_EN_LINK;
else
pi->phy.curr_user_phy_cfg.caps &= ~ICE_AQC_PHY_EN_LINK;
return ICE_SUCCESS;
}
/**
@ -4062,6 +4049,51 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
return ICE_SUCCESS;
}
static u16 ice_lut_type_to_size(u16 lut_type)
{
switch (lut_type) {
case ICE_LUT_VSI:
return ICE_LUT_VSI_SIZE;
case ICE_LUT_GLOBAL:
return ICE_LUT_GLOBAL_SIZE;
case ICE_LUT_PF:
return ICE_LUT_PF_SIZE;
default:
return 0;
}
}
static u16 ice_lut_size_to_flag(u16 lut_size)
{
u16 f = 0;
switch (lut_size) {
case ICE_LUT_GLOBAL_SIZE:
f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG;
break;
case ICE_LUT_PF_SIZE:
f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG;
break;
default:
break;
}
return f << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S;
}
int ice_lut_size_to_type(int lut_size)
{
switch (lut_size) {
case ICE_LUT_VSI_SIZE:
return ICE_LUT_VSI;
case ICE_LUT_GLOBAL_SIZE:
return ICE_LUT_GLOBAL;
case ICE_LUT_PF_SIZE:
return ICE_LUT_PF;
default:
return -1;
}
}
/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
@ -4073,7 +4105,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
static enum ice_status
__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
u16 flags, vsi_id, lut_type, lut_size, glob_lut_idx = 0, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
enum ice_status status;
@ -4084,16 +4116,22 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
vsi_handle = params->vsi_handle;
lut = params->lut;
lut_type = params->lut_type;
lut_size = ice_lut_type_to_size(lut_type);
cmd_resp = &desc.params.get_set_rss_lut;
if (lut_type == ICE_LUT_GLOBAL)
glob_lut_idx = params->global_lut_id;
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
if (!lut || !lut_size || !ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
lut_size = params->lut_size;
lut_type = params->lut_type;
glob_lut_idx = params->global_lut_id;
vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
if (lut_size > params->lut_size)
return ICE_ERR_INVAL_SIZE;
cmd_resp = &desc.params.get_set_rss_lut;
if (set && lut_size != params->lut_size)
return ICE_ERR_PARAM;
vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
@ -4107,61 +4145,15 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
ICE_AQC_GSET_RSS_LUT_VSI_VALID);
switch (lut_type) {
case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
break;
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
}
flags = ice_lut_size_to_flag(lut_size) |
((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M) |
((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
if (!set)
goto ice_aq_get_set_rss_lut_send;
} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
if (!set)
goto ice_aq_get_set_rss_lut_send;
} else {
goto ice_aq_get_set_rss_lut_send;
}
/* LUT size is only valid for Global and PF table types */
switch (lut_size) {
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
/* fall-through */
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
}
ice_aq_get_set_rss_lut_send:
cmd_resp->flags = CPU_TO_LE16(flags);
status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
ice_aq_get_set_rss_lut_exit:
params->lut_size = LE16_TO_CPU(desc.datalen);
return status;
}
@ -5155,7 +5147,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
/* add a leaf node into scheduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
if (!status)
status = ice_sched_replay_q_bw(pi, q_ctx);
@ -5390,7 +5382,7 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
for (i = 0; i < num_qsets; i++) {
node.node_teid = buf->rdma_qsets[i].qset_teid;
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
&node);
&node, NULL);
if (status)
break;
qset_teid[i] = LE32_TO_CPU(node.node_teid);
@ -5456,6 +5448,42 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
return status;
}
/**
* ice_aq_get_sensor_reading
* @hw: pointer to the HW struct
* @sensor: sensor type
* @format: requested response format
* @data: pointer to data to be read from the sensor
* @cd: pointer to command details structure or NULL
*
* Get sensor reading (0x0632)
*/
enum ice_status
ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
struct ice_aqc_get_sensor_reading_resp *data,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sensor_reading *cmd;
struct ice_aq_desc desc;
enum ice_status status;
if (!data)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
cmd = &desc.params.get_sensor_reading;
cmd->sensor = sensor;
cmd->format = format;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (!status)
ice_memcpy(data, &desc.params.get_sensor_reading_resp,
sizeof(*data), ICE_NONDMA_TO_NONDMA);
return status;
}
/**
* ice_is_main_vsi - checks whether the VSI is main VSI
* @hw: pointer to the HW struct
@ -6527,7 +6555,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
* elements long because the link_speed returned by the firmware is a 16 bit
* value, but is indexed by [fls(speed) - 1]
*/
static const u32 ice_aq_to_link_speed[15] = {
static const u32 ice_aq_to_link_speed[] = {
ICE_LINK_SPEED_10MBPS, /* BIT(0) */
ICE_LINK_SPEED_100MBPS,
ICE_LINK_SPEED_1000MBPS,
@ -6539,10 +6567,6 @@ static const u32 ice_aq_to_link_speed[15] = {
ICE_LINK_SPEED_40000MBPS,
ICE_LINK_SPEED_50000MBPS,
ICE_LINK_SPEED_100000MBPS, /* BIT(10) */
ICE_LINK_SPEED_UNKNOWN,
ICE_LINK_SPEED_UNKNOWN,
ICE_LINK_SPEED_UNKNOWN,
ICE_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
@ -6553,6 +6577,9 @@ static const u32 ice_aq_to_link_speed[15] = {
*/
u32 ice_get_link_speed(u16 index)
{
if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
return ICE_LINK_SPEED_UNKNOWN;
return ice_aq_to_link_speed[index];
}
@ -6569,6 +6596,7 @@ bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
ICE_FW_FEC_DIS_AUTO_MIN,
ICE_FW_FEC_DIS_AUTO_PATCH);
}
/**
* ice_is_fw_auto_drop_supported
* @hw: pointer to the hardware structure

View file

@ -128,6 +128,7 @@ ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
u32 tx_drbell_q_index);
int ice_lut_size_to_type(int lut_size);
enum ice_status
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
enum ice_status
@ -206,7 +207,6 @@ enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
enum ice_status
@ -296,6 +296,10 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
enum ice_status
ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
struct ice_aqc_get_sensor_reading_resp *data,
struct ice_sq_cd *cd);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);

View file

@ -113,13 +113,6 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (!cq->sq.desc_buf.va)
return ICE_ERR_NO_MEMORY;
cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
sizeof(struct ice_sq_cd));
if (!cq->sq.cmd_buf) {
ice_free_dma_mem(hw, &cq->sq.desc_buf);
return ICE_ERR_NO_MEMORY;
}
return ICE_SUCCESS;
}
@ -188,7 +181,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (cq->rq_buf_size > ICE_AQ_LG_BUF)
desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
/* This is in accordance with control queue design, there is no
* register for buffer size configuration
*/
desc->datalen = CPU_TO_LE16(bi->size);
@ -321,9 +314,6 @@ do { \
ice_free_dma_mem((hw), \
&(qi)->ring.r.ring##_bi[i]); \
} \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
ice_free(hw, (qi)->ring.cmd_buf); \
/* free DMA head */ \
ice_free(hw, (qi)->ring.dma_head); \
} while (0)
@ -391,11 +381,11 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
* ice_init_rq - initialize ARQ
* ice_init_rq - initialize receive side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
* The main initialization routine for the Admin Receive (Event) Queue.
* The main initialization routine for Receive side of a control queue.
* Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
@ -453,7 +443,7 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
* ice_shutdown_sq - shutdown the Control ATQ
* ice_shutdown_sq - shutdown the transmit side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@ -473,7 +463,7 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto shutdown_sq_out;
}
/* Stop firmware AdminQ processing */
/* Stop processing of the control queue */
wr32(hw, cq->sq.head, 0);
wr32(hw, cq->sq.tail, 0);
wr32(hw, cq->sq.len, 0);
@ -674,8 +664,9 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
*
* NOTE: this function does not destroy the control queue locks.
*/
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
bool unloading)
static void
ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
bool unloading)
{
struct ice_ctl_q_info *cq;
@ -824,7 +815,7 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
}
/**
* ice_clean_sq - cleans Admin send queue (ATQ)
* ice_clean_sq - cleans send side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@ -834,21 +825,17 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
struct ice_sq_cd *details;
struct ice_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
ntc++;
if (ntc == sq->count)
ntc = 0;
desc = ICE_CTL_Q_DESC(*sq, ntc);
details = ICE_CTL_Q_DETAILS(*sq, ntc);
}
sq->next_to_clean = ntc;
@ -856,16 +843,40 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return ICE_CTL_Q_DESC_UNUSED(sq);
}
/**
* ice_ctl_q_str - Convert control queue type to string
* @qtype: the control queue type
*
* Returns: A string name for the given control queue type.
*/
static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
{
switch (qtype) {
case ICE_CTL_Q_UNKNOWN:
return "Unknown CQ";
case ICE_CTL_Q_ADMIN:
return "AQ";
case ICE_CTL_Q_MAILBOX:
return "MBXQ";
default:
return "Unrecognized CQ";
}
}
/**
* ice_debug_cq
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
* @desc: pointer to control queue descriptor
* @buf: pointer to command buffer
* @buf_len: max length of buf
* @response: true if this is the writeback response
*
* Dumps debug log about control command with descriptor contents.
*/
static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
static void
ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
void *desc, void *buf, u16 buf_len, bool response)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 datalen, flags;
@ -879,7 +890,8 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
datalen = LE16_TO_CPU(cq_desc->datalen);
flags = LE16_TO_CPU(cq_desc->flags);
ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
LE16_TO_CPU(cq_desc->opcode), flags, datalen,
LE16_TO_CPU(cq_desc->retval));
ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
@ -904,23 +916,23 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
}
/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
* ice_sq_done - check if the last send on a control queue has completed
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
* Returns: true if all the descriptors on the send side of a control queue
* are finished processing, false otherwise.
*/
bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
/* AQ designers suggest use of head for better
/* control queue designers suggest use of head for better
* timing reliability than DD bit
*/
return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
}
/**
* ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
* ice_sq_send_cmd_nolock - send command to a control queue
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command (non DMA mem)
@ -928,8 +940,9 @@ bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
* This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
* This is the main send command routine for a control queue. It prepares the
* command into a descriptor, bumps the send queue tail, waits for the command
* to complete, captures status and data for the command, etc.
*/
static enum ice_status
ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@ -940,7 +953,6 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
enum ice_status status = ICE_SUCCESS;
struct ice_sq_cd *details;
u32 total_delay = 0;
u16 retval = 0;
u32 val = 0;
@ -983,12 +995,6 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
if (cd)
*details = *cd;
else
ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
/* Call clean and check queue available function to reclaim the
* descriptors that were processed by FW/MBX; the function returns the
* number of desc available. The clean function called here could be
@ -1025,8 +1031,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* Debug desc and buffer */
ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
@ -1074,13 +1079,12 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
ice_debug_cq(hw, (void *)desc, buf, buf_size);
ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
/* save writeback AQ if requested */
if (details->wb_desc)
ice_memcpy(details->wb_desc, desc_on_ring,
sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
if (cd && cd->wb_desc)
ice_memcpy(cd->wb_desc, desc_on_ring,
sizeof(*cd->wb_desc), ICE_DMA_TO_NONDMA);
/* update the error if time out occurred */
if (!cmd_completed) {
@ -1099,7 +1103,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
/**
* ice_sq_send_cmd - send command to Control Queue (ATQ)
* ice_sq_send_cmd - send command to a control queue
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command
@ -1107,8 +1111,9 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
* This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
* Main command for the transmit side of a control queue. It puts the command
* on the queue, bumps the tail, waits for processing of the command, captures
* command status and results, etc.
*/
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@ -1150,9 +1155,9 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
* the contents through e. It can also return how many events are
* left to process through 'pending'.
* Clean one element from the receive side of a control queue. On return 'e'
* contains contents of the message, and 'pending' contains the number of
* events left to process.
*/
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@ -1208,8 +1213,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
e->msg_len, ICE_DMA_TO_NONDMA);
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size

View file

@ -68,7 +68,6 @@ enum ice_ctl_q {
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
void *cmd_buf; /* command buffer memory */
union {
struct ice_dma_mem *sq_bi;
@ -98,8 +97,6 @@ struct ice_sq_cd {
struct ice_aq_desc *wb_desc;
};
#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
/* rq event information */
struct ice_rq_event_info {
struct ice_aq_desc desc;

View file

@ -1843,7 +1843,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
/* new TC */
status = ice_sched_query_elem(pi->hw, teid2, &elem);
if (!status)
status = ice_sched_add_node(pi, 1, &elem);
status = ice_sched_add_node(pi, 1, &elem, NULL);
if (status)
break;
/* update the TC number */

View file

@ -468,6 +468,7 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
switch (mac_type) {
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K:
case ICE_MAC_GENERIC_3K_E825:
default:
seg_id = SEGMENT_TYPE_ICE_E810;
break;
@ -488,6 +489,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
case ICE_MAC_GENERIC_3K:
sign_type = SEGMENT_SIGN_TYPE_RSA3K;
break;
case ICE_MAC_GENERIC_3K_E825:
sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825;
break;
case ICE_MAC_GENERIC:
default:
sign_type = SEGMENT_SIGN_TYPE_RSA2K;

View file

@ -126,6 +126,7 @@ struct ice_pkg_hdr {
#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */
#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005
/* generic segment */
struct ice_generic_seg_hdr {

View file

@ -39,7 +39,7 @@
#define BIT(a) (1UL << (a))
#ifndef BIT_ULL
#define BIT_ULL(a) (1ULL << (a))
#endif /* BIT_ULL */
#endif /* !BIT_ULL */
#define BITS_PER_BYTE 8
@ -51,7 +51,7 @@
#ifndef MIN_T
#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
#endif
#endif /* !MIN_T */
#define IS_ASCII(_ch) ((_ch) < 0x80)

View file

@ -69,6 +69,8 @@ enum feat_list {
ICE_FEATURE_HAS_PBA,
ICE_FEATURE_DCB,
ICE_FEATURE_TX_BALANCE,
ICE_FEATURE_DUAL_NAC,
ICE_FEATURE_TEMP_SENSOR,
/* Must be last entry */
ICE_FEATURE_COUNT
};

View file

@ -727,15 +727,16 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
u8 i;
for (i = 0; i < params->prof->segs_cnt; i++) {
u64 match = params->prof->segs[i].match;
ice_declare_bitmap(match, ICE_FLOW_FIELD_IDX_MAX);
enum ice_flow_field j;
ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
ICE_FLOW_FIELD_IDX_MAX) {
ice_cp_bitmap(match, params->prof->segs[i].match,
ICE_FLOW_FIELD_IDX_MAX);
ice_for_each_set_bit(j, match, ICE_FLOW_FIELD_IDX_MAX) {
status = ice_flow_xtract_fld(hw, params, i, j);
if (status)
return status;
ice_clear_bit(j, (ice_bitmap_t *)&match);
ice_clear_bit(j, match);
}
}
@ -810,7 +811,10 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
for (i = 0; i < segs_cnt; i++)
if (segs[i].hdrs != p->segs[i].hdrs ||
((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
segs[i].match != p->segs[i].match))
(ice_cmp_bitmap(segs[i].match,
p->segs[i].match,
ICE_FLOW_FIELD_IDX_MAX) ==
false)))
break;
/* A match is found if all segments are matched */
@ -1184,11 +1188,9 @@ ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
enum ice_flow_fld_match_type field_type, u16 val_loc,
u16 mask_loc, u16 last_loc)
{
u64 bit = BIT_ULL(fld);
seg->match |= bit;
ice_set_bit(fld, seg->match);
if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
seg->range |= bit;
ice_set_bit(fld, seg->range);
seg->fields[fld].type = field_type;
seg->fields[fld].src.val = val_loc;
@ -1440,6 +1442,14 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *tmp;
u64 seg_match = 0;
u16 i;
/* convert match bitmap to u64 for hash field comparison */
ice_for_each_set_bit(i, prof->segs[prof->segs_cnt - 1].match,
ICE_FLOW_FIELD_IDX_MAX) {
seg_match |= 1ULL << i;
}
/* Search for RSS hash fields associated to the VSI that match the
* hash configurations associated to the flow profile. If found
@ -1448,7 +1458,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
hdr_type = ice_get_rss_hdr_type(prof);
LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
ice_rss_cfg, l_entry)
if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
if (r->hash.hash_flds == seg_match &&
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
r->hash.hdr_type == hdr_type) {
ice_clear_bit(vsi_handle, r->vsis);
@ -1473,11 +1483,18 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *rss_cfg;
u64 seg_match = 0;
u16 i;
ice_for_each_set_bit(i, prof->segs[prof->segs_cnt - 1].match,
ICE_FLOW_FIELD_IDX_MAX) {
seg_match |= 1ULL << i;
}
hdr_type = ice_get_rss_hdr_type(prof);
LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
ice_rss_cfg, l_entry)
if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
if (r->hash.hash_flds == seg_match &&
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
r->hash.hdr_type == hdr_type) {
ice_set_bit(vsi_handle, r->vsis);
@ -1488,7 +1505,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
if (!rss_cfg)
return ICE_ERR_NO_MEMORY;
rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->hash.hash_flds = seg_match;
rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
rss_cfg->hash.hdr_type = hdr_type;
rss_cfg->hash.symm = prof->cfg.symm;

View file

@ -262,8 +262,10 @@ struct ice_flow_fld_info {
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
/* Bitmask indicating header fields to be matched */
ice_declare_bitmap(match, ICE_FLOW_FIELD_IDX_MAX);
/* Bitmask indicating header fields matched as ranges */
ice_declare_bitmap(range, ICE_FLOW_FIELD_IDX_MAX);
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
};

View file

@ -107,12 +107,12 @@ ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg)
"\t\nMax: 128"
#define ICE_SYSCTL_HELP_FWLOG_ARQ_ENA \
"\nControl whether to enable/disable reporing to admin Rx queue" \
"\nControl whether to enable/disable reporting to admin Rx queue" \
"\n0 - Enable firmware reporting via ARQ" \
"\n1 - Disable firmware reporting via ARQ"
#define ICE_SYSCTL_HELP_FWLOG_UART_ENA \
"\nControl whether to enable/disable reporing to UART" \
"\nControl whether to enable/disable reporting to UART" \
"\n0 - Enable firmware reporting via UART" \
"\n1 - Disable firmware reporting via UART"

View file

@ -502,3 +502,4 @@ ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
ice_info_fwlog(hw, 32, 1, (u8 *)buf, LE16_TO_CPU(desc->datalen));
}

View file

@ -189,7 +189,6 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
#define ICE_FXD_FLTR_QW1_FDID_PRI_ZERO 0x0ULL
#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL

View file

@ -126,7 +126,6 @@ static int ice_add_ethertype_to_list(struct ice_vsi *vsi,
struct ice_list_head *list,
u16 ethertype, u16 direction,
enum ice_sw_fwd_act_type action);
static void ice_add_rx_lldp_filter(struct ice_softc *sc);
static void ice_del_rx_lldp_filter(struct ice_softc *sc);
static u16 ice_aq_phy_types_to_link_speeds(u64 phy_type_low,
u64 phy_type_high);
@ -172,7 +171,6 @@ static void ice_sbuf_print_ets_cfg(struct sbuf *sbuf, const char *name,
struct ice_dcb_ets_cfg *ets);
static void ice_stop_pf_vsi(struct ice_softc *sc);
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt);
static void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib);
static int ice_config_pfc(struct ice_softc *sc, u8 new_mode);
void
ice_add_dscp2tc_map_sysctls(struct ice_softc *sc,
@ -241,6 +239,9 @@ static int ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_debug_set_link(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_temperature(SYSCTL_HANDLER_ARGS);
/**
* ice_map_bar - Map PCIe BAR memory
@ -2076,7 +2077,7 @@ ice_process_link_event(struct ice_softc *sc,
if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) {
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status != ICE_SUCCESS)
if (status != ICE_SUCCESS && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
__func__, ice_status_str(status),
@ -2111,9 +2112,6 @@ ice_process_ctrlq_event(struct ice_softc *sc, const char *qname,
case ice_aqc_opc_get_link_status:
ice_process_link_event(sc, event);
break;
case ice_mbx_opc_send_msg_to_pf:
/* TODO: handle IOV event */
break;
case ice_aqc_opc_fw_logs_event:
ice_handle_fw_log_event(sc, &event->desc, event->msg_buf);
break;
@ -3126,6 +3124,9 @@ ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS)
pi->phy.curr_user_speed_req = sysctl_speeds;
if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !sc->link_up)
return 0;
/* Apply settings requested by user */
return ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS);
}
@ -3196,6 +3197,9 @@ ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS)
/* Cache user FEC mode for later link ups */
pi->phy.curr_user_fec_req = new_mode;
if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !sc->link_up)
return 0;
/* Apply settings requested by user */
return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FEC);
}
@ -3364,6 +3368,9 @@ ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS)
return (ret);
}
if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !sc->link_up)
return 0;
/* Apply settings requested by user */
return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FC);
}
@ -3817,7 +3824,7 @@ ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
/* Block transition to FW LLDP if DSCP mode is enabled */
local_dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg;
if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) &&
if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) ||
ice_dscp_is_mapped(local_dcbx_cfg)) {
device_printf(dev,
"Cannot enable FW-LLDP agent while DSCP QoS is active.\n");
@ -3863,7 +3870,18 @@ ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
}
}
ice_start_dcbx_agent(sc);
hw->port_info->qos_cfg.is_sw_lldp = false;
/* Init DCB needs to be done during enabling LLDP to properly
* propagate the configuration.
*/
status = ice_init_dcb(hw, true);
if (status) {
device_printf(dev,
"%s: ice_init_dcb failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
}
}
return (ret);
@ -4183,9 +4201,12 @@ ice_sysctl_pfc_config(SYSCTL_HANDLER_ARGS)
/* If LFC is active and PFC is going to be turned on, turn LFC off */
if (user_pfc != 0 && pi->phy.curr_user_fc_req != ICE_FC_NONE) {
pi->phy.curr_user_fc_req = ICE_FC_NONE;
ret = ice_apply_saved_phy_cfg(sc, ICE_APPLY_FC);
if (ret)
return (ret);
if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) ||
sc->link_up) {
ret = ice_apply_saved_phy_cfg(sc, ICE_APPLY_FC);
if (ret)
return (ret);
}
}
return ice_config_pfc(sc, user_pfc);
@ -4282,6 +4303,70 @@ ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS)
return (0);
}
#define ICE_SYSCTL_HELP_SET_LINK_ACTIVE \
"\nKeep link active after setting interface down:" \
"\n\t0 - disable" \
"\n\t1 - enable"
/**
* ice_sysctl_set_link_active
* @oidp: sysctl oid structure
* @arg1: pointer to private data structure
* @arg2: unused
* @req: sysctl request pointer
*
* Set the link_active_on_if_down sysctl flag.
*/
static int
ice_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
bool mode;
int ret;
UNREFERENCED_PARAMETER(arg2);
mode = ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN);
ret = sysctl_handle_bool(oidp, &mode, 0, req);
if ((ret) || (req->newptr == NULL))
return (ret);
if (mode)
ice_set_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN);
else
ice_clear_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN);
return (0);
}
/**
* ice_sysctl_debug_set_link
* @oidp: sysctl oid structure
* @arg1: pointer to private data structure
* @arg2: unused
* @req: sysctl request pointer
*
* Set link up/down in debug session.
*/
static int
ice_sysctl_debug_set_link(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
bool mode;
int ret;
UNREFERENCED_PARAMETER(arg2);
ret = sysctl_handle_bool(oidp, &mode, 0, req);
if ((ret) || (req->newptr == NULL))
return (ret);
ice_set_link(sc, mode != 0);
return (0);
}
/**
* ice_add_device_sysctls - add device specific dynamic sysctls
* @sc: device private structure
@ -4312,6 +4397,12 @@ ice_add_device_sysctls(struct ice_softc *sc)
OID_AUTO, "pba_number", CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
ice_sysctl_pba_number, "A", "Product Board Assembly Number");
}
if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_TEMP_SENSOR)) {
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "temp", CTLTYPE_S8 | CTLFLAG_RD,
sc, 0, ice_sysctl_temperature, "CU",
"Device temperature in degrees Celcius (C)");
}
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "ddp_version", CTLTYPE_STRING | CTLFLAG_RD,
@ -4363,6 +4454,10 @@ ice_add_device_sysctls(struct ice_softc *sc)
sc, 0, ice_sysctl_allow_no_fec_mod_in_auto, "CU",
"Allow \"No FEC\" mode in FEC auto-negotiation");
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "link_active_on_if_down", CTLTYPE_U8 | CTLFLAG_RWTUN,
sc, 0, ice_sysctl_set_link_active, "CU", ICE_SYSCTL_HELP_SET_LINK_ACTIVE);
ice_add_dscp2tc_map_sysctls(sc, ctx, ctx_list);
/* Differentiate software and hardware statistics, by keeping hw stats
@ -6398,6 +6493,13 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
* sysctl read call.
*/
if (input_buf[0] == '1') {
if (!sc->fw_debug_dump_cluster_mask) {
device_printf(dev,
"%s: Debug Dump failed because no cluster was specified with the \"clusters\" sysctl.\n",
__func__);
return (EINVAL);
}
ice_set_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP);
return (0);
}
@ -6407,13 +6509,6 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
/* --- FW debug dump state is set --- */
if (!sc->fw_debug_dump_cluster_mask) {
device_printf(dev,
"%s: Debug Dump failed because no cluster was specified.\n",
__func__);
ret = EINVAL;
goto out;
}
/* Caller just wants the upper bound for size */
if (req->oldptr == NULL && req->newptr == NULL) {
@ -6441,7 +6536,6 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
sbuf_finish(sbuf);
sbuf_delete(sbuf);
out:
ice_clear_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP);
return (ret);
}
@ -6505,6 +6599,10 @@ ice_add_debug_sysctls(struct ice_softc *sc)
ice_sysctl_dump_state_flags, "A",
"Driver State Flags");
SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "set_link",
ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RW, sc, 0,
ice_sysctl_debug_set_link, "CU", "Set link");
SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_type_low",
ICE_CTLFLAG_DEBUG | CTLTYPE_U64 | CTLFLAG_RW, sc, 0,
ice_sysctl_phy_type_low, "QU",
@ -6736,11 +6834,11 @@ ice_vsi_set_rss_params(struct ice_vsi *vsi)
case ICE_VSI_PF:
/* The PF VSI inherits RSS instance of the PF */
vsi->rss_table_size = cap->rss_table_size;
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_VF:
vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
vsi->rss_lut_type = ICE_LUT_VSI;
break;
default:
device_printf(sc->dev,
@ -7344,6 +7442,10 @@ ice_load_pkg_file(struct ice_softc *sc)
"Transmit balancing feature disabled\n");
ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_en);
return (status);
} else if (status == ICE_ERR_CFG) {
/* Status is ICE_ERR_CFG when DDP does not support transmit balancing */
device_printf(dev,
"DDP package does not support transmit balancing feature - please update to the latest DDP package and try again\n");
}
}
@ -8632,7 +8734,7 @@ ice_set_default_local_mib_settings(struct ice_softc *sc)
* Reconfigures the PF LAN VSI based on updated DCB configuration
* found in the hw struct's/port_info's/ local dcbx configuration.
*/
static void
void
ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib)
{
struct ice_aqc_port_ets_elem port_ets = { 0 };
@ -8943,7 +9045,7 @@ ice_cfg_pf_ethertype_filters(struct ice_softc *sc)
* VSI. Called when the fw_lldp_agent is disabled, to allow the LLDP frames to
* be forwarded to the stack.
*/
static void
void
ice_add_rx_lldp_filter(struct ice_softc *sc)
{
struct ice_list_head ethertype_list;
@ -9103,14 +9205,18 @@ ice_init_link_configuration(struct ice_softc *sc)
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA);
/* Apply default link settings */
ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC);
if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN)) {
ice_set_link(sc, false);
ice_set_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED);
} else
ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC);
} else {
/* Set link down, and poll for media available in timer. This prevents the
* driver from receiving spurious link-related events.
*/
ice_set_state(&sc->state, ICE_STATE_NO_MEDIA);
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status != ICE_SUCCESS)
if (status != ICE_SUCCESS && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
__func__, ice_status_str(status),
@ -9475,6 +9581,45 @@ ice_set_link_management_mode(struct ice_softc *sc)
sc->ldo_tlv = tlv;
}
/**
* ice_set_link -- Set up/down link on phy
* @sc: device private structure
* @enabled: link status to set up
*
* This should be called when change of link status is needed.
*/
void
ice_set_link(struct ice_softc *sc, bool enabled)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
enum ice_status status;
if (ice_driver_is_detaching(sc))
return;
if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA))
return;
if (enabled)
ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC);
else {
status = ice_aq_set_link_restart_an(hw->port_info, false, NULL);
if (status != ICE_SUCCESS) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: Link control not enabled in current device mode\n",
__func__);
else
device_printf(dev,
"%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
__func__, ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
} else
sc->link_up = false;
}
}
/**
* ice_init_saved_phy_cfg -- Set cached user PHY cfg settings with NVM defaults
* @sc: device private structure
@ -10898,3 +11043,47 @@ ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS)
return (0);
}
/**
* ice_sysctl_temperature - Retrieve NIC temp via AQ command
* @oidp: sysctl oid structure
* @arg1: pointer to private data structure
* @arg2: unused
* @req: sysctl request pointer
*
* If ICE_DBG_DIAG is set in the debug.debug_mask sysctl, then this will print
* temperature threshold information in the kernel message log, too.
*/
static int
ice_sysctl_temperature(SYSCTL_HANDLER_ARGS)
{
struct ice_aqc_get_sensor_reading_resp resp;
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
enum ice_status status;
UNREFERENCED_PARAMETER(oidp);
UNREFERENCED_PARAMETER(arg2);
if (ice_driver_is_detaching(sc))
return (ESHUTDOWN);
status = ice_aq_get_sensor_reading(hw, ICE_AQC_INT_TEMP_SENSOR,
ICE_AQC_INT_TEMP_FORMAT, &resp, NULL);
if (status != ICE_SUCCESS) {
device_printf(dev,
"Get Sensor Reading AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
return (EIO);
}
ice_debug(hw, ICE_DBG_DIAG, "%s: Warning Temp Threshold: %d\n", __func__,
resp.data.s0f0.temp_warning_threshold);
ice_debug(hw, ICE_DBG_DIAG, "%s: Critical Temp Threshold: %d\n", __func__,
resp.data.s0f0.temp_critical_threshold);
ice_debug(hw, ICE_DBG_DIAG, "%s: Fatal Temp Threshold: %d\n", __func__,
resp.data.s0f0.temp_fatal_threshold);
return sysctl_handle_8(oidp, &resp.data.s0f0.temp, 0, req);
}

View file

@ -368,17 +368,6 @@ enum ice_rx_dtype {
*/
#define ICE_START_LLDP_RETRY_WAIT (2 * hz)
/*
* The ice_(set|clear)_vsi_promisc() function expects a mask of promiscuous
* modes to operate on. This mask is the default one for the driver, where
* promiscuous is enabled/disabled for all types of non-VLAN-tagged/VLAN 0
* traffic.
*/
#define ICE_VSI_PROMISC_MASK (ICE_PROMISC_UCAST_TX | \
ICE_PROMISC_UCAST_RX | \
ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_MCAST_RX)
/*
* Only certain cluster IDs are valid for the FW debug dump functionality,
* so define a mask of those here.
@ -610,6 +599,8 @@ enum ice_state {
ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
ICE_STATE_MULTIPLE_TCS,
ICE_STATE_DO_FW_DEBUG_DUMP,
ICE_STATE_LINK_ACTIVE_ON_DOWN,
ICE_STATE_FIRST_INIT_LINK,
/* This entry must be last */
ICE_STATE_LAST,
};
@ -906,9 +897,12 @@ int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* dat
int ice_alloc_intr_tracking(struct ice_softc *sc);
void ice_free_intr_tracking(struct ice_softc *sc);
void ice_set_default_local_lldp_mib(struct ice_softc *sc);
void ice_set_link(struct ice_softc *sc, bool enabled);
void ice_add_rx_lldp_filter(struct ice_softc *sc);
void ice_init_health_events(struct ice_softc *sc);
void ice_cfg_pba_num(struct ice_softc *sc);
int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg);
void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib);
#endif /* _ICE_LIB_H_ */

View file

@ -115,6 +115,7 @@ DEFINE_CLASS_0(ice_rdma_di, ice_rdma_di_class, ice_rdma_di_methods, sizeof(struc
*
* Implements IRDMA_DI_RESET, called by the RDMA client driver to request
* a reset of an ice driver device.
* @return 0 on success
*/
static int
ice_rdma_pf_reset(struct ice_rdma_peer *peer)
@ -136,6 +137,7 @@ ice_rdma_pf_reset(struct ice_rdma_peer *peer)
*
* Implements IRDMA_DI_MSIX_INIT, called by the RDMA client driver to
* initialize the MSI-X resources required for RDMA functionality.
* @returns ENOSYS
*/
static int
ice_rdma_pf_msix_init(struct ice_rdma_peer *peer,
@ -156,6 +158,8 @@ ice_rdma_pf_msix_init(struct ice_rdma_peer *peer,
* registration or unregistration
* @peer: the RDMA peer client structure
* @res: resources to be registered or unregistered
* @returns 0 on success, EINVAL on argument issues, ENOMEM on memory
* allocation failure, EXDEV on vsi device mismatch
*/
static int
ice_rdma_qset_register_request(struct ice_rdma_peer *peer, struct ice_rdma_qset_update *res)
@ -287,6 +291,7 @@ ice_rdma_qset_register_request(struct ice_rdma_peer *peer, struct ice_rdma_qset_
* when opening or closing rdma driver
* @peer: the RDMA peer client structure
* @enable: enable or disable the rdma filter
* @return 0 on success, EINVAL on wrong vsi
*/
static int
ice_rdma_update_vsi_filter(struct ice_rdma_peer *peer,
@ -410,6 +415,8 @@ ice_rdma_cp_qos_info(struct ice_hw *hw, struct ice_dcbx_cfg *dcbx_cfg,
*
* Verify that the client RDMA driver provided a version that is compatible
* with the driver interface.
* @return 0 on success, ENOTSUP when LAN-RDMA interface version doesn't match,
* EINVAL on kobject interface fail.
*/
static int
ice_rdma_check_version(struct ice_rdma_info *info)
@ -477,6 +484,9 @@ ice_rdma_check_version(struct ice_rdma_info *info)
* The RDMA client driver must provide the version number it expects, along
* with a pointer to a kobject class that extends the irdma_di_if class, and
* implements the irdma_if class interface.
* @return 0 on success, ECONNREFUSED when RDMA is turned off, EBUSY when irdma
* already registered, ENOTSUP when LAN-RDMA interface version doesn't match,
* EINVAL on kobject interface fail.
*/
int
ice_rdma_register(struct ice_rdma_info *info)
@ -539,6 +549,7 @@ ice_rdma_register(struct ice_rdma_info *info)
* Called by the RDMA client driver on unload. Used to de-initialize the RDMA
* client driver interface and shut down communication between the ice driver
* and the RDMA client driver.
* @return 0 on success, ENOENT when irdma driver wasn't registered
*/
int
ice_rdma_unregister(void)
@ -609,6 +620,8 @@ ice_rdma_exit(void)
* Notify the client RDMA driver of a new PF device.
*
* @pre must be called while holding the ice_rdma mutex.
* @return 0 on success and when RDMA feature is not available, EEXIST when
* irdma is already attached
*/
static int
ice_rdma_pf_attach_locked(struct ice_softc *sc)
@ -663,6 +676,7 @@ ice_rdma_pf_attach_locked(struct ice_softc *sc)
* @sc: the ice driver softc
*
* Called during PF attach to notify the RDMA client of a new PF.
* @return 0 or EEXIST if irdma was already attached
*/
int
ice_rdma_pf_attach(struct ice_softc *sc)
@ -737,6 +751,7 @@ ice_rdma_pf_detach(struct ice_softc *sc)
*
* Called by the ice driver when a PF has been initialized. Notifies the RDMA
* client that a PF is up and ready to operate.
* @return 0 on success, propagates IRDMA_OPEN return value
*/
int
ice_rdma_pf_init(struct ice_softc *sc)
@ -765,6 +780,7 @@ ice_rdma_pf_init(struct ice_softc *sc)
*
* Called by the ice driver when a PF is stopped. Notifies the RDMA client
* driver that the PF has stopped and is not ready to operate.
* @return 0 on success
*/
int
ice_rdma_pf_stop(struct ice_softc *sc)

View file

@ -92,6 +92,15 @@
*
* If the version specified is not compatible, then the registration will
* of the RDMA driver will fail.
*
* @var ice_rdma_info::major_version
* describe major changes in the interface
* @var ice_rdma_info::minor_version
* describe changes and fixes with backward compatibility
* @var ice_rdma_info::patch_version
* changes without impact on compatibility or features
* @var ice_rdma_info::rdma_class
* kobject class
*/
struct ice_rdma_info {
uint16_t major_version;
@ -113,6 +122,7 @@ DECLARE_CLASS(ice_rdma_di_class);
*
* Defines a mapping for MSI-X vectors being requested by the peer RDMA driver
* for a given PF.
*
*/
struct ice_rdma_msix_mapping {
uint8_t itr_indx;
@ -165,6 +175,25 @@ struct ice_qos_app_priority_table {
* @brief Holds all necessary data for RDMA to work with DCB
*
* Struct to hold QoS info
* @var ice_qos_params::tc_info
* traffic class information
* @var ice_qos_params::up2tc
* mapping from user priority to traffic class
* @var ice_qos_params::vsi_relative_bw
* bandwidth settings
* @var ice_qos_params::vsi_priority_type
* priority type
* @var ice_qos_params::num_apps
* app count
* @var ice_qos_params::pfc_mode
* PFC mode
* @var ice_qos_params::dscp_map
* dscp mapping
* @var ice_qos_params::apps
* apps
* @var ice_qos_params::num_tc
* number of traffic classes
};
*/
struct ice_qos_params {
struct ice_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
@ -187,6 +216,23 @@ struct ice_qos_params {
*
* Because the definition of this structure is shared between the two drivers,
* its ABI should be handled carefully.
*
* @var ice_rdma_peer::ifp
* pointer to ifnet structure
* @var ice_rdma_peer::dev
* device pointer
* @var ice_rdma_peer::pci_mem
* information about PCI
* @var ice_rdma_peer::initial_qos_info
* initial information on QoS
* @var ice_rdma_peer::msix
* info about msix vectors
* @var ice_rdma_peer::mtu
* initial mtu size
* @var ice_rdma_peer::pf_vsi_num
* id of vsi
* @var ice_rdma_peer::pf_id
* id of PF
*/
struct ice_rdma_peer {
/**
@ -221,12 +267,23 @@ enum ice_res_type {
/**
* @struct ice_rdma_qset_params
* @brief struct to hold per RDMA Qset info
*
* @var ice_rdma_qset_params::teid
* qset teid
* @var ice_rdma_qset_params::qs_handle
* qset from rdma driver
* @var ice_rdma_qset_params::vsi_id
* vsi index
* @var ice_rdma_qset_params::tc
* traffic class to which qset should belong to
* @var ice_rdma_qset_params::reserved
* for future use
*/
struct ice_rdma_qset_params {
uint32_t teid; /* qset TEID */
uint16_t qs_handle; /* RDMA driver provides this */
uint16_t vsi_id; /* VSI index */
uint8_t tc; /* TC branch the QSet should belong to */
uint32_t teid;
uint16_t qs_handle;
uint16_t vsi_id;
uint8_t tc;
uint8_t reserved[3];
};
@ -234,6 +291,15 @@ struct ice_rdma_qset_params {
/**
* @struct ice_rdma_qset_update
* @brief struct used to register and unregister qsets for RDMA driver
*
* @var ice_rdma_qset_update::res_type
* ALLOC or FREE
* @var ice_rdma_qset_update::cnt_req
* how many qsets are requested
* @var ice_rdma_qset_update::res_allocated
* how many qsets are allocated
* @var ice_rdma_qset_update::qsets
* rdma qset info
*/
struct ice_rdma_qset_update {
enum ice_res_type res_type;
@ -263,6 +329,9 @@ enum ice_rdma_event_type {
/**
* @struct ice_rdma_event
* @brief struct for event information to pass to RDMA driver
*
* @var ice_rdma_event::type
* event type
*/
struct ice_rdma_event {
enum ice_rdma_event_type type;
@ -292,6 +361,9 @@ struct ice_rdma_event {
/**
* @struct ice_rdma_request
* @brief struct with data for a request from the RDMA driver
*
* @var ice_rdma_request::type
* event type
*/
struct ice_rdma_request {
enum ice_rdma_event_type type;

View file

@ -53,6 +53,14 @@ extern bool ice_enable_irdma;
* @brief RDMA peer list node
*
* Structure used to store peer entries for each PF in a linked list.
* @var ice_rdma_entry::attached
* check for irdma driver attached
* @var ice_rdma_entry::initiated
* check for irdma driver ready to use
* @var ice_rdma_entry::node
* list node of the RDMA entry
* @var ice_rdma_entry::peer
* pointer to peer
*/
struct ice_rdma_entry {
LIST_ENTRY(ice_rdma_entry) node;
@ -80,6 +88,15 @@ LIST_HEAD(ice_rdma_peers, ice_rdma_entry);
* Contains global state shared across all PFs by the device driver, such as
* the kobject class of the currently connected peer driver, and the linked
* list of peer entries for each PF.
*
* @var ice_rdma_state::registered
* check forr irdma driver registered
* @var ice_rdma_state::peer_class
* kobject class for irdma driver
* @var ice_rdma_state::mtx
* mutex for protecting irdma operations
* @var ice_rdma_state::peers
* list of RDMA entries
*/
struct ice_rdma_state {
bool registered;

View file

@ -1,120 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _ICE_SBQ_CMD_H_
#define _ICE_SBQ_CMD_H_
/* This header file defines the Sideband Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
*/
/* Sideband Queue command structure and opcodes */
enum ice_sbq_opc {
/* Sideband Queue commands */
ice_sbq_opc_neigh_dev_req = 0x0C00,
ice_sbq_opc_neigh_dev_ev = 0x0C01
};
/* Sideband Queue descriptor. Indirect command
* and non posted
*/
struct ice_sbq_cmd_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 cmd_retval;
/* Opaque message data */
__le32 cookie_high;
__le32 cookie_low;
union {
__le16 cmd_len;
__le16 cmpl_len;
} param0;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
struct ice_sbq_evt_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 cmd_retval;
u8 data[24];
};
enum ice_sbq_msg_dev {
rmn_0 = 0x02,
rmn_1 = 0x03,
rmn_2 = 0x04,
cgu = 0x06
};
enum ice_sbq_msg_opcode {
ice_sbq_msg_rd = 0x00,
ice_sbq_msg_wr = 0x01
};
#define ICE_SBQ_MSG_FLAGS 0x40
#define ICE_SBQ_MSG_SBE_FBE 0x0F
struct ice_sbq_msg_req {
u8 dest_dev;
u8 src_dev;
u8 opcode;
u8 flags;
u8 sbe_fbe;
u8 func_id;
__le16 msg_addr_low;
__le32 msg_addr_high;
__le32 data;
};
struct ice_sbq_msg_cmpl {
u8 dest_dev;
u8 src_dev;
u8 opcode;
u8 flags;
__le32 data;
};
/* Internal struct */
struct ice_sbq_msg_input {
u8 dest_dev;
u8 opcode;
u16 msg_addr_low;
u32 msg_addr_high;
u32 data;
};
#endif /* _ICE_SBQ_CMD_H_ */

View file

@ -170,12 +170,14 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
* @pi: port information structure
* @layer: Scheduler layer of the node
* @info: Scheduler element information from firmware
* @prealloc_node: preallocated ice_sched_node struct for SW DB
*
* This function inserts a scheduler node to the SW DB.
*/
enum ice_status
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
struct ice_aqc_txsched_elem_data *info,
struct ice_sched_node *prealloc_node)
{
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
@ -203,7 +205,11 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem);
if (status)
return status;
node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node));
if (prealloc_node)
node = prealloc_node;
else
node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node));
if (!node)
return ICE_ERR_NO_MEMORY;
if (hw->max_children[layer]) {
@ -970,13 +976,15 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
* @num_nodes: number of nodes
* @num_nodes_added: pointer to num nodes added
* @first_node_teid: if new nodes are added then return the TEID of first node
* @prealloc_nodes: preallocated nodes struct for software DB
*
* This function add nodes to HW as well as to SW DB for a given layer
*/
static enum ice_status
enum ice_status
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid)
u16 *num_nodes_added, u32 *first_node_teid,
struct ice_sched_node **prealloc_nodes)
{
struct ice_sched_node *prev, *new_node;
struct ice_aqc_add_elem *buf;
@ -1022,7 +1030,11 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*num_nodes_added = num_nodes;
/* add nodes to the SW DB */
for (i = 0; i < num_nodes; i++) {
status = ice_sched_add_node(pi, layer, &buf->generic[i]);
if (prealloc_nodes)
status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]);
else
status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
if (status != ICE_SUCCESS) {
ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
@ -1101,7 +1113,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
}
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
num_nodes_added, first_node_teid);
num_nodes_added, first_node_teid, NULL);
}
/**
@ -1126,11 +1138,11 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes = num_nodes;
enum ice_status status = ICE_SUCCESS;
u32 temp;
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
u16 max_child_nodes, num_added = 0;
u32 temp;
status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
layer, new_num_nodes,
@ -1361,7 +1373,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
ICE_AQC_ELEM_TYPE_ENTRY_POINT)
hw->sw_entry_point_layer = j;
status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL);
if (status)
goto err_init_port;
}
@ -1486,11 +1498,6 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
#define PSM_CLK_SRC_367_MHZ 0x0
#define PSM_CLK_SRC_416_MHZ 0x1
#define PSM_CLK_SRC_446_MHZ 0x2
#define PSM_CLK_SRC_390_MHZ 0x3
switch (clk_src) {
case PSM_CLK_SRC_367_MHZ:
hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
@ -1504,11 +1511,12 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
case PSM_CLK_SRC_390_MHZ:
hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
break;
default:
ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
clk_src);
/* fall back to a safe default */
hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
/* default condition is not required as clk_src is restricted
* to a 2-bit value from GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M mask.
* The above switch statements cover the possible values of
* this variable.
*/
}
}
@ -2364,7 +2372,7 @@ ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
* This function removes the child from the old parent and adds it to a new
* parent
*/
static void
void
ice_sched_update_parent(struct ice_sched_node *new_parent,
struct ice_sched_node *node)
{
@ -2398,7 +2406,7 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
*
* This function move the child nodes to a given parent.
*/
static enum ice_status
enum ice_status
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
@ -4383,7 +4391,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
* ID from local database. The caller needs to hold scheduler lock.
*/
static enum ice_status
enum ice_status
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
@ -4419,6 +4427,58 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
/**
* ice_sched_set_node_priority - set node's priority
* @pi: port information structure
* @node: tree node
* @priority: number 0-7 representing priority among siblings
*
* This function sets priority of a node among it's siblings.
*/
enum ice_status
ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
u16 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
buf = node->info;
data = &buf.data;
data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
data->generic |= ICE_AQC_ELEM_GENERIC_PRIO_M &
(priority << ICE_AQC_ELEM_GENERIC_PRIO_S);
return ice_sched_update_elem(pi->hw, node, &buf);
}
/**
* ice_sched_set_node_weight - set node's weight
* @pi: port information structure
* @node: tree node
* @weight: number 1-200 representing weight for WFQ
*
* This function sets weight of the node for WFQ algorithm.
*/
enum ice_status
ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
buf = node->info;
data = &buf.data;
data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR |
ICE_AQC_ELEM_VALID_GENERIC;
data->cir_bw.bw_alloc = CPU_TO_LE16(weight);
data->eir_bw.bw_alloc = CPU_TO_LE16(weight);
data->generic |= ICE_AQC_ELEM_GENERIC_SP_M &
(0x0 << ICE_AQC_ELEM_GENERIC_SP_S);
return ice_sched_update_elem(pi->hw, node, &buf);
}
/**
* ice_sched_set_node_bw_lmt - set node's BW limit
* @pi: port information structure
@ -4432,7 +4492,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
* NOTE: Caller provides the correct SRL node in case of shared profile
* settings.
*/
static enum ice_status
enum ice_status
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{

View file

@ -34,6 +34,8 @@
#include "ice_common.h"
#define SCHED_NODE_NAME_MAX_LEN 32
#define ICE_SCHED_5_LAYERS 5
#define ICE_SCHED_9_LAYERS 9
@ -65,6 +67,11 @@
#define ICE_PSM_CLK_446MHZ_IN_HZ 446428571
#define ICE_PSM_CLK_390MHZ_IN_HZ 390625000
#define PSM_CLK_SRC_367_MHZ 0x0
#define PSM_CLK_SRC_416_MHZ 0x1
#define PSM_CLK_SRC_446_MHZ 0x2
#define PSM_CLK_SRC_390_MHZ 0x3
struct rl_profile_params {
u32 bw; /* in Kbps */
u16 rl_multiplier;
@ -123,6 +130,32 @@ enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw);
enum ice_status
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num);
enum ice_status
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid,
struct ice_sched_node **prealloc_node);
enum ice_status
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list);
enum ice_status
ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
u16 priority);
enum ice_status
ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node,
u16 weight);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
@ -139,7 +172,11 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
/* Add a scheduling node into SW DB for given info */
enum ice_status
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info);
struct ice_aqc_txsched_elem_data *info,
struct ice_sched_node *prealloc_node);
void
ice_sched_update_parent(struct ice_sched_node *new_parent,
struct ice_sched_node *node);
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
@ -251,7 +288,6 @@ void ice_sched_replay_agg(struct ice_hw *hw);
enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
enum ice_status ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */

View file

@ -1042,6 +1042,10 @@ ice_state_to_str(enum ice_state state)
return "MULTIPLE_TCS";
case ICE_STATE_DO_FW_DEBUG_DUMP:
return "DO_FW_DEBUG_DUMP";
case ICE_STATE_LINK_ACTIVE_ON_DOWN:
return "LINK_ACTIVE_ON_DOWN";
case ICE_STATE_FIRST_INIT_LINK:
return "FIRST_INIT_LINK";
case ICE_STATE_LAST:
return NULL;
}

View file

@ -42,7 +42,7 @@
#define ICE_PPP_IPV6_PROTO_ID 0x0057
#define ICE_ETH_P_8021Q 0x8100
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
/* Dummy ethernet header needed in the ice_sw_rule_*
* struct to configure any switch filter rules.
* {DA (6 bytes), SA(6 bytes),
* Ether type (2 bytes for header without VLAN tag) OR
@ -1207,7 +1207,8 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
*/
static void
ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
struct ice_sw_rule_lkup_rx_tx *s_rule,
enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
u16 vlan_tpid = ICE_ETH_P_8021Q;
@ -1219,15 +1220,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) {
s_rule->pdata.lkup_tx_rx.act = 0;
s_rule->pdata.lkup_tx_rx.index =
CPU_TO_LE16(f_info->fltr_rule_id);
s_rule->pdata.lkup_tx_rx.hdr_len = 0;
s_rule->act = 0;
s_rule->index = CPU_TO_LE16(f_info->fltr_rule_id);
s_rule->hdr_len = 0;
return;
}
eth_hdr_sz = sizeof(dummy_eth_header);
eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
eth_hdr = s_rule->hdr_data;
/* initialize the ether header with a dummy header */
ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
@ -1312,14 +1312,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
}
s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
/* Recipe set depending on lookup type */
s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
s_rule->recipe_id = CPU_TO_LE16(f_info->lkup_type);
s_rule->src = CPU_TO_LE16(f_info->src);
s_rule->act = CPU_TO_LE32(act);
if (daddr)
ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
@ -1334,7 +1334,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
/* Create the switch rule with the final dummy Ethernet header */
if (opc != ice_aqc_opc_update_sw_rules)
s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
s_rule->hdr_len = CPU_TO_LE16(eth_hdr_sz);
}
/**
@ -1351,7 +1351,8 @@ static enum ice_status
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
struct ice_sw_rule_lkup_rx_tx *rx_tx;
struct ice_sw_rule_lg_act *lg_act;
/* For software marker we need 3 large actions
* 1. FWD action: FWD TO VSI or VSI LIST
* 2. GENERIC VALUE action to hold the profile ID
@ -1372,18 +1373,19 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 1. Large Action
* 2. Look up Tx Rx
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
lg_act_size = (u16)ice_struct_size(lg_act, act, num_lg_acts);
rules_size = lg_act_size +
ice_struct_size(rx_tx, hdr_data, DUMMY_ETH_HDR_LEN);
lg_act = (struct ice_sw_rule_lg_act *)ice_malloc(hw, rules_size);
if (!lg_act)
return ICE_ERR_NO_MEMORY;
rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
rx_tx = (struct ice_sw_rule_lkup_rx_tx *)((u8 *)lg_act + lg_act_size);
/* Fill in the first switch rule i.e. large action */
lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
lg_act->hdr.type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
lg_act->index = CPU_TO_LE16(l_id);
lg_act->size = CPU_TO_LE16(num_lg_acts);
/* First action VSI forwarding or VSI list forwarding depending on how
* many VSIs
@ -1395,13 +1397,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
lg_act->act[0] = CPU_TO_LE32(act);
/* Second action descriptor type */
act = ICE_LG_ACT_GENERIC;
act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
lg_act->act[1] = CPU_TO_LE32(act);
act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
@ -1411,24 +1413,22 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
lg_act->act[2] = CPU_TO_LE32(act);
/* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
/* Update the action to point to the large action ID */
rx_tx->pdata.lkup_tx_rx.act =
CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
ICE_SINGLE_ACT_PTR_VAL_M));
rx_tx->act = CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
ICE_SINGLE_ACT_PTR_VAL_M));
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
rx_tx->pdata.lkup_tx_rx.index =
CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
rx_tx->index = CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
ice_aqc_opc_update_sw_rules, NULL);
@ -1452,8 +1452,8 @@ static enum ice_status
ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 counter_id, u16 l_id)
{
struct ice_aqc_sw_rules_elem *lg_act;
struct ice_aqc_sw_rules_elem *rx_tx;
struct ice_sw_rule_lkup_rx_tx *rx_tx;
struct ice_sw_rule_lg_act *lg_act;
enum ice_status status;
/* 2 actions will be added while adding a large action counter */
const int num_acts = 2;
@ -1471,18 +1471,20 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 1. Large Action
* 2. Look up Tx Rx
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
lg_act_size = (u16)ice_struct_size(lg_act, act, num_acts);
rules_size = lg_act_size +
ice_struct_size(rx_tx, hdr_data, DUMMY_ETH_HDR_LEN);
lg_act = (struct ice_sw_rule_lg_act *)ice_malloc(hw, rules_size);
if (!lg_act)
return ICE_ERR_NO_MEMORY;
rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
rx_tx = (struct ice_sw_rule_lkup_rx_tx *)((u8 *)lg_act +
lg_act_size);
/* Fill in the first switch rule i.e. large action */
lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
lg_act->hdr.type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
lg_act->index = CPU_TO_LE16(l_id);
lg_act->size = CPU_TO_LE16(num_acts);
/* First action VSI forwarding or VSI list forwarding depending on how
* many VSIs
@ -1495,13 +1497,13 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
lg_act->act[0] = CPU_TO_LE32(act);
/* Second action counter ID */
act = ICE_LG_ACT_STAT_COUNT;
act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
ICE_LG_ACT_STAT_COUNT_M;
lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
lg_act->act[1] = CPU_TO_LE32(act);
/* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
@ -1509,14 +1511,14 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act = ICE_SINGLE_ACT_PTR;
act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
rx_tx->act = CPU_TO_LE32(act);
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
f_rule_id = m_ent->fltr_info.fltr_rule_id;
rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
rx_tx->index = CPU_TO_LE16(f_rule_id);
status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
ice_aqc_opc_update_sw_rules, NULL);
@ -1578,7 +1580,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_sw_rule_vsi_list *s_rule;
enum ice_status status;
u16 s_rule_size;
u16 rule_type;
@ -1603,8 +1605,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
else
return ICE_ERR_PARAM;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
s_rule_size = (u16)ice_struct_size(s_rule, vsi, num_vsi);
s_rule = (struct ice_sw_rule_vsi_list *)ice_malloc(hw, s_rule_size);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
for (i = 0; i < num_vsi; i++) {
@ -1613,13 +1615,13 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
goto exit;
}
/* AQ call requires hw_vsi_id(s) */
s_rule->pdata.vsi_list.vsi[i] =
s_rule->vsi[i] =
CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
}
s_rule->type = CPU_TO_LE16(rule_type);
s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
s_rule->hdr.type = CPU_TO_LE16(rule_type);
s_rule->number_vsi = CPU_TO_LE16(num_vsi);
s_rule->index = CPU_TO_LE16(vsi_list_id);
status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
@ -1668,11 +1670,12 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_sw_rule_lkup_rx_tx *s_rule;
enum ice_status status;
s_rule = (struct ice_aqc_sw_rules_elem *)
ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
s_rule = (struct ice_sw_rule_lkup_rx_tx *)
ice_malloc(hw, ice_struct_size(s_rule, hdr_data,
DUMMY_ETH_HDR_LEN));
if (!s_rule)
return ICE_ERR_NO_MEMORY;
fm_entry = (struct ice_fltr_mgmt_list_entry *)
@ -1693,17 +1696,17 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
ice_aqc_opc_add_sw_rules);
status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
ice_aqc_opc_add_sw_rules, NULL);
status = ice_aq_sw_rules(hw, s_rule,
ice_struct_size(s_rule, hdr_data,
DUMMY_ETH_HDR_LEN),
1, ice_aqc_opc_add_sw_rules, NULL);
if (status) {
ice_free(hw, fm_entry);
goto ice_create_pkt_fwd_rule_exit;
}
f_entry->fltr_info.fltr_rule_id =
LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
fm_entry->fltr_info.fltr_rule_id =
LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
f_entry->fltr_info.fltr_rule_id = LE16_TO_CPU(s_rule->index);
fm_entry->fltr_info.fltr_rule_id = LE16_TO_CPU(s_rule->index);
/* The book keeping entries will get removed when base driver
* calls remove filter AQ command
@ -1726,21 +1729,24 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
static enum ice_status
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_sw_rule_lkup_rx_tx *s_rule;
enum ice_status status;
s_rule = (struct ice_aqc_sw_rules_elem *)
ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
s_rule = (struct ice_sw_rule_lkup_rx_tx *)
ice_malloc(hw, ice_struct_size(s_rule, hdr_data,
DUMMY_ETH_HDR_LEN));
if (!s_rule)
return ICE_ERR_NO_MEMORY;
ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
s_rule->index = CPU_TO_LE16(f_info->fltr_rule_id);
/* Update switch rule with new rule set to forward VSI list */
status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
ice_aqc_opc_update_sw_rules, NULL);
status = ice_aq_sw_rules(hw, s_rule,
ice_struct_size(s_rule, hdr_data,
DUMMY_ETH_HDR_LEN),
1, ice_aqc_opc_update_sw_rules, NULL);
ice_free(hw, s_rule);
return status;
@ -1756,9 +1762,9 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
enum ice_status status = ICE_SUCCESS;
struct ice_switch_info *sw = NULL;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
struct ice_switch_info *sw;
sw = hw->switch_info;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
@ -2060,7 +2066,7 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle of the VSI to remove
* @fm_list: filter management entry for which the VSI list management needs to
* be done
* be done
*/
static enum ice_status
ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
@ -2145,7 +2151,6 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_remove_rule_internal - Remove a filter rule of a given type
*
* @hw: pointer to the hardware structure
* @recp_list: recipe list for which the rule needs to removed
* @f_entry: rule entry containing filter information
@ -2204,10 +2209,10 @@ ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
if (remove_rule) {
/* Remove the lookup rule */
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_sw_rule_lkup_rx_tx *s_rule;
s_rule = (struct ice_aqc_sw_rules_elem *)
ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
s_rule = (struct ice_sw_rule_lkup_rx_tx *)
ice_malloc(hw, ice_struct_size(s_rule, hdr_data, 0));
if (!s_rule) {
status = ICE_ERR_NO_MEMORY;
goto exit;
@ -2217,8 +2222,8 @@ ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
ice_aqc_opc_remove_sw_rules);
status = ice_aq_sw_rules(hw, s_rule,
ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
ice_aqc_opc_remove_sw_rules, NULL);
ice_struct_size(s_rule, hdr_data, 0),
1, ice_aqc_opc_remove_sw_rules, NULL);
/* Remove a book keeping from the list */
ice_free(hw, s_rule);
@ -2334,7 +2339,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
struct ice_switch_info *sw, u8 lport)
{
struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
struct LIST_HEAD_TYPE *rule_head;
u16 total_elem_left, s_rule_size;
@ -2395,8 +2400,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
}
/* Allocate switch rule buffer for the bulk update for unicast */
s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
s_rule = (struct ice_aqc_sw_rules_elem *)
s_rule_size = ice_struct_size(s_rule, hdr_data, DUMMY_ETH_HDR_LEN);
s_rule = (struct ice_sw_rule_lkup_rx_tx *)
ice_calloc(hw, num_unicast, s_rule_size);
if (!s_rule) {
status = ICE_ERR_NO_MEMORY;
@ -2412,7 +2417,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
ice_aqc_opc_add_sw_rules);
r_iter = (struct ice_aqc_sw_rules_elem *)
r_iter = (struct ice_sw_rule_lkup_rx_tx *)
((u8 *)r_iter + s_rule_size);
}
}
@ -2422,7 +2427,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
/* Call AQ switch rule in AQ_MAX chunk */
for (total_elem_left = num_unicast; total_elem_left > 0;
total_elem_left -= elem_sent) {
struct ice_aqc_sw_rules_elem *entry = r_iter;
struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
elem_sent = MIN_T(u8, total_elem_left,
(ICE_AQ_MAX_BUF_LEN / s_rule_size));
@ -2431,7 +2436,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
NULL);
if (status)
goto ice_add_mac_exit;
r_iter = (struct ice_aqc_sw_rules_elem *)
r_iter = (struct ice_sw_rule_lkup_rx_tx *)
((u8 *)r_iter + (elem_sent * s_rule_size));
}
@ -2445,7 +2450,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
f_info->fltr_rule_id =
LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
LE16_TO_CPU(r_iter->index);
f_info->fltr_act = ICE_FWD_TO_VSI;
/* Create an entry to track this MAC address */
fm_entry = (struct ice_fltr_mgmt_list_entry *)
@ -2461,7 +2466,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
*/
LIST_ADD(&fm_entry->list_entry, rule_head);
r_iter = (struct ice_aqc_sw_rules_elem *)
r_iter = (struct ice_sw_rule_lkup_rx_tx *)
((u8 *)r_iter + s_rule_size);
}
}
@ -2917,7 +2922,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction)
{
struct ice_fltr_list_entry f_list_entry;
struct ice_sw_recipe *recp_list;
struct ice_sw_recipe *recp_list = NULL;
struct ice_fltr_info f_info;
struct ice_hw *hw = pi->hw;
enum ice_status status;
@ -3244,34 +3249,38 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_determine_promisc_mask
* @fi: filter info to parse
* @promisc_mask: pointer to mask to be filled in
*
* Helper function to determine which ICE_PROMISC_ mask corresponds
* to given filter into.
*/
static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
static void ice_determine_promisc_mask(struct ice_fltr_info *fi,
ice_bitmap_t *promisc_mask)
{
u16 vid = fi->l_data.mac_vlan.vlan_id;
u8 *macaddr = fi->l_data.mac.mac_addr;
bool is_tx_fltr = false;
u8 promisc_mask = 0;
ice_zero_bitmap(promisc_mask, ICE_PROMISC_MAX);
if (fi->flag == ICE_FLTR_TX)
is_tx_fltr = true;
if (IS_BROADCAST_ETHER_ADDR(macaddr))
promisc_mask |= is_tx_fltr ?
ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
else if (IS_MULTICAST_ETHER_ADDR(macaddr))
promisc_mask |= is_tx_fltr ?
ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
else if (IS_UNICAST_ETHER_ADDR(macaddr))
promisc_mask |= is_tx_fltr ?
ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
if (vid)
promisc_mask |= is_tx_fltr ?
ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
if (IS_BROADCAST_ETHER_ADDR(macaddr)) {
ice_set_bit(is_tx_fltr ? ICE_PROMISC_BCAST_TX
: ICE_PROMISC_BCAST_RX, promisc_mask);
} else if (IS_MULTICAST_ETHER_ADDR(macaddr)) {
ice_set_bit(is_tx_fltr ? ICE_PROMISC_MCAST_TX
: ICE_PROMISC_MCAST_RX, promisc_mask);
} else if (IS_UNICAST_ETHER_ADDR(macaddr)) {
ice_set_bit(is_tx_fltr ? ICE_PROMISC_UCAST_TX
: ICE_PROMISC_UCAST_RX, promisc_mask);
}
return promisc_mask;
if (vid) {
ice_set_bit(is_tx_fltr ? ICE_PROMISC_VLAN_TX
: ICE_PROMISC_VLAN_RX, promisc_mask);
}
}
/**
@ -3284,10 +3293,11 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
* @lkup: switch rule filter lookup type
*/
static enum ice_status
_ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid, struct ice_switch_info *sw,
enum ice_sw_lkup_type lkup)
_ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid,
struct ice_switch_info *sw, enum ice_sw_lkup_type lkup)
{
ice_declare_bitmap(fltr_promisc_mask, ICE_PROMISC_MAX);
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
@ -3297,10 +3307,11 @@ _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
return ICE_ERR_PARAM;
*vid = 0;
*promisc_mask = 0;
rule_head = &sw->recp_list[lkup].filt_rules;
rule_lock = &sw->recp_list[lkup].filt_rule_lock;
ice_zero_bitmap(promisc_mask, ICE_PROMISC_MAX);
ice_acquire_lock(rule_lock);
LIST_FOR_EACH_ENTRY(itr, rule_head,
ice_fltr_mgmt_list_entry, list_entry) {
@ -3310,7 +3321,10 @@ _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
if (!ice_vsi_uses_fltr(itr, vsi_handle))
continue;
*promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
ice_determine_promisc_mask(&itr->fltr_info, fltr_promisc_mask);
ice_or_bitmap(promisc_mask, promisc_mask, fltr_promisc_mask,
ICE_PROMISC_MAX);
}
ice_release_lock(rule_lock);
@ -3325,9 +3339,12 @@ _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
* @vid: VLAN ID of promisc VLAN VSI
*/
enum ice_status
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid)
{
if (!vid || !promisc_mask || !hw)
return ICE_ERR_PARAM;
return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info, ICE_SW_LKUP_PROMISC);
}
@ -3340,9 +3357,12 @@ ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
* @vid: VLAN ID of promisc VLAN VSI
*/
enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid)
{
if (!hw || !promisc_mask || !vid)
return ICE_ERR_PARAM;
return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info,
ICE_SW_LKUP_PROMISC_VLAN);
@ -3376,14 +3396,17 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
* _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to clear mode
* @promisc_mask: mask of promiscuous config bits to clear
* @promisc_mask: pointer to mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
* @sw: pointer to switch info struct for which function add rule
*/
static enum ice_status
_ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid, struct ice_switch_info *sw)
_ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid,
struct ice_switch_info *sw)
{
ice_declare_bitmap(compl_promisc_mask, ICE_PROMISC_MAX);
ice_declare_bitmap(fltr_promisc_mask, ICE_PROMISC_MAX);
struct ice_fltr_list_entry *fm_entry, *tmp;
struct LIST_HEAD_TYPE remove_list_head;
struct ice_fltr_mgmt_list_entry *itr;
@ -3395,7 +3418,8 @@ _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
if (ice_is_bit_set(promisc_mask, ICE_PROMISC_VLAN_RX) &&
ice_is_bit_set(promisc_mask, ICE_PROMISC_VLAN_TX))
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
else
recipe_id = ICE_SW_LKUP_PROMISC;
@ -3409,7 +3433,7 @@ _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
LIST_FOR_EACH_ENTRY(itr, rule_head,
ice_fltr_mgmt_list_entry, list_entry) {
struct ice_fltr_info *fltr_info;
u8 fltr_promisc_mask = 0;
ice_zero_bitmap(compl_promisc_mask, ICE_PROMISC_MAX);
if (!ice_vsi_uses_fltr(itr, vsi_handle))
continue;
@ -3419,10 +3443,12 @@ _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
vid != fltr_info->l_data.mac_vlan.vlan_id)
continue;
fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
ice_determine_promisc_mask(fltr_info, fltr_promisc_mask);
ice_andnot_bitmap(compl_promisc_mask, fltr_promisc_mask,
promisc_mask, ICE_PROMISC_MAX);
/* Skip if filter is not completely specified by given mask */
if (fltr_promisc_mask & ~promisc_mask)
if (ice_is_any_bit_set(compl_promisc_mask, ICE_PROMISC_MAX))
continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
@ -3451,13 +3477,16 @@ _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
* ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to clear mode
* @promisc_mask: mask of promiscuous config bits to clear
* @promisc_mask: pointer to mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
*/
enum ice_status
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
u8 promisc_mask, u16 vid)
ice_bitmap_t *promisc_mask, u16 vid)
{
if (!hw || !promisc_mask)
return ICE_ERR_PARAM;
return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info);
}
@ -3466,16 +3495,18 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
* _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @promisc_mask: pointer to mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
* @lport: logical port number to configure promisc mode
* @sw: pointer to switch info struct for which function add rule
*/
static enum ice_status
_ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid, u8 lport, struct ice_switch_info *sw)
_ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid, u8 lport,
struct ice_switch_info *sw)
{
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
ice_declare_bitmap(p_mask, ICE_PROMISC_MAX);
struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info new_fltr;
enum ice_status status = ICE_SUCCESS;
@ -3492,7 +3523,11 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
/* Do not modify original bitmap */
ice_cp_bitmap(p_mask, promisc_mask, ICE_PROMISC_MAX);
if (ice_is_bit_set(p_mask, ICE_PROMISC_VLAN_RX) &&
ice_is_bit_set(p_mask, ICE_PROMISC_VLAN_TX)) {
new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
new_fltr.l_data.mac_vlan.vlan_id = vid;
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
@ -3506,44 +3541,43 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
* individual type, and clear it out in the input mask as it
* is found.
*/
while (promisc_mask) {
while (ice_is_any_bit_set(p_mask, ICE_PROMISC_MAX)) {
struct ice_sw_recipe *recp_list;
u8 *mac_addr;
pkt_type = 0;
is_tx_fltr = false;
if (promisc_mask & ICE_PROMISC_UCAST_RX) {
promisc_mask &= ~ICE_PROMISC_UCAST_RX;
if (ice_test_and_clear_bit(ICE_PROMISC_UCAST_RX,
p_mask)) {
pkt_type = UCAST_FLTR;
} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
promisc_mask &= ~ICE_PROMISC_UCAST_TX;
} else if (ice_test_and_clear_bit(ICE_PROMISC_UCAST_TX,
p_mask)) {
pkt_type = UCAST_FLTR;
is_tx_fltr = true;
} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
promisc_mask &= ~ICE_PROMISC_MCAST_RX;
} else if (ice_test_and_clear_bit(ICE_PROMISC_MCAST_RX,
p_mask)) {
pkt_type = MCAST_FLTR;
} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
promisc_mask &= ~ICE_PROMISC_MCAST_TX;
} else if (ice_test_and_clear_bit(ICE_PROMISC_MCAST_TX,
p_mask)) {
pkt_type = MCAST_FLTR;
is_tx_fltr = true;
} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
promisc_mask &= ~ICE_PROMISC_BCAST_RX;
} else if (ice_test_and_clear_bit(ICE_PROMISC_BCAST_RX,
p_mask)) {
pkt_type = BCAST_FLTR;
} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
promisc_mask &= ~ICE_PROMISC_BCAST_TX;
} else if (ice_test_and_clear_bit(ICE_PROMISC_BCAST_TX,
p_mask)) {
pkt_type = BCAST_FLTR;
is_tx_fltr = true;
}
/* Check for VLAN promiscuous flag */
if (promisc_mask & ICE_PROMISC_VLAN_RX) {
promisc_mask &= ~ICE_PROMISC_VLAN_RX;
} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
promisc_mask &= ~ICE_PROMISC_VLAN_TX;
if (ice_is_bit_set(p_mask, ICE_PROMISC_VLAN_RX)) {
ice_clear_bit(ICE_PROMISC_VLAN_RX, p_mask);
} else if (ice_test_and_clear_bit(ICE_PROMISC_VLAN_TX,
p_mask)) {
is_tx_fltr = true;
}
/* Set filter DA based on packet type */
mac_addr = new_fltr.l_data.mac.mac_addr;
if (pkt_type == BCAST_FLTR) {
@ -3587,13 +3621,16 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
* ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @promisc_mask: pointer to mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
*/
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid)
{
if (!hw || !promisc_mask)
return ICE_ERR_PARAM;
return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
hw->port_info->lport,
hw->switch_info);
@ -3603,7 +3640,7 @@ ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
* _ice_set_vlan_vsi_promisc
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @promisc_mask: pointer to mask of promiscuous config bits
* @rm_vlan_promisc: Clear VLANs VSI promisc mode
* @lport: logical port number to configure promisc mode
* @sw: pointer to switch info struct for which function add rule
@ -3611,9 +3648,9 @@ ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
static enum ice_status
_ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc, u8 lport,
struct ice_switch_info *sw)
_ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, bool rm_vlan_promisc,
u8 lport, struct ice_switch_info *sw)
{
struct ice_fltr_list_entry *list_itr, *tmp;
struct LIST_HEAD_TYPE vsi_list_head;
@ -3673,9 +3710,12 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc)
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, bool rm_vlan_promisc)
{
if (!hw || !promisc_mask)
return ICE_ERR_PARAM;
return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
rm_vlan_promisc, hw->port_info->lport,
hw->switch_info);
@ -4249,7 +4289,7 @@ enum ice_status
ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle)
{
struct ice_switch_info *sw;
struct ice_switch_info *sw = NULL;
enum ice_status status = ICE_SUCCESS;
u8 i;

View file

@ -51,18 +51,6 @@
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP 70
#define DUMMY_ETH_HDR_LEN 16
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
#define ICE_SW_RULE_LG_ACT_SIZE(n) \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
/* Worst case buffer length for ice_aqc_opc_get_res_alloc */
#define ICE_MAX_RES_TYPES 0x80
@ -193,6 +181,40 @@ struct ice_adv_lkup_elem {
union ice_prot_hdr m_u; /* Mask of header values to match */
};
struct entry_vsi_fwd {
u16 vsi_list;
u8 list;
u8 valid;
};
struct entry_to_q {
u16 q_idx;
u8 q_region_sz;
u8 q_pri;
};
struct entry_prune {
u16 vsi_list;
u8 list;
u8 egr;
u8 ing;
u8 prune_t;
};
struct entry_mirror {
u16 mirror_vsi;
};
struct entry_generic_act {
u16 generic_value;
u8 offset;
u8 priority;
};
struct entry_statistics {
u8 counter_idx;
};
struct ice_sw_act_ctrl {
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
@ -382,14 +404,16 @@ struct ice_adv_fltr_mgmt_list_entry {
};
enum ice_promisc_flags {
ICE_PROMISC_UCAST_RX = 0x1,
ICE_PROMISC_UCAST_TX = 0x2,
ICE_PROMISC_MCAST_RX = 0x4,
ICE_PROMISC_MCAST_TX = 0x8,
ICE_PROMISC_BCAST_RX = 0x10,
ICE_PROMISC_BCAST_TX = 0x20,
ICE_PROMISC_VLAN_RX = 0x40,
ICE_PROMISC_VLAN_TX = 0x80,
ICE_PROMISC_UCAST_RX = 0,
ICE_PROMISC_UCAST_TX,
ICE_PROMISC_MCAST_RX,
ICE_PROMISC_MCAST_TX,
ICE_PROMISC_BCAST_RX,
ICE_PROMISC_BCAST_TX,
ICE_PROMISC_VLAN_RX,
ICE_PROMISC_VLAN_TX,
/* Max value */
ICE_PROMISC_MAX,
};
struct ice_dummy_pkt_offsets {
@ -405,7 +429,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
enum ice_status
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_aqc_sw_rules_elem *s_rule,
struct ice_sw_rule_lkup_rx_tx *s_rule,
const u8 *dummy_pkt, u16 pkt_len,
const struct ice_dummy_pkt_offsets *offsets);
@ -516,22 +540,22 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
bool *rule_exists);
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid);
enum ice_status
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid);
enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, bool rm_vlan_promisc);
/* Get VSIs Promisc/defport settings */
enum ice_status
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid);
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid);
enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid);
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid);
enum ice_status ice_replay_all_fltr(struct ice_hw *hw);

View file

@ -226,11 +226,13 @@ enum ice_mac_type {
ICE_MAC_E810,
ICE_MAC_GENERIC,
ICE_MAC_GENERIC_3K,
ICE_MAC_GENERIC_3K_E825,
};
/* Media Types */
enum ice_media_type {
ICE_MEDIA_UNKNOWN = 0,
ICE_MEDIA_NONE = 0,
ICE_MEDIA_UNKNOWN,
ICE_MEDIA_FIBER,
ICE_MEDIA_BASET,
ICE_MEDIA_BACKPLANE,
@ -238,6 +240,84 @@ enum ice_media_type {
ICE_MEDIA_AUI,
};
#define ICE_MEDIA_BASET_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_100BASE_TX | \
ICE_PHY_TYPE_LOW_1000BASE_T | \
ICE_PHY_TYPE_LOW_2500BASE_T | \
ICE_PHY_TYPE_LOW_5GBASE_T | \
ICE_PHY_TYPE_LOW_10GBASE_T | \
ICE_PHY_TYPE_LOW_25GBASE_T)
#define ICE_MEDIA_C2M_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \
ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \
ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \
ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \
ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \
ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC)
#define ICE_MEDIA_C2M_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC)
#define ICE_MEDIA_OPT_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_1000BASE_SX | \
ICE_PHY_TYPE_LOW_1000BASE_LX | \
ICE_PHY_TYPE_LOW_10GBASE_SR | \
ICE_PHY_TYPE_LOW_10GBASE_LR | \
ICE_PHY_TYPE_LOW_25GBASE_SR | \
ICE_PHY_TYPE_LOW_25GBASE_LR | \
ICE_PHY_TYPE_LOW_40GBASE_SR4 | \
ICE_PHY_TYPE_LOW_40GBASE_LR4 | \
ICE_PHY_TYPE_LOW_50GBASE_SR2 | \
ICE_PHY_TYPE_LOW_50GBASE_LR2 | \
ICE_PHY_TYPE_LOW_50GBASE_SR | \
ICE_PHY_TYPE_LOW_50GBASE_LR | \
ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
ICE_PHY_TYPE_LOW_50GBASE_FR | \
ICE_PHY_TYPE_LOW_100GBASE_DR)
#define ICE_MEDIA_BP_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_1000BASE_KX | \
ICE_PHY_TYPE_LOW_2500BASE_KX | \
ICE_PHY_TYPE_LOW_5GBASE_KR | \
ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
ICE_PHY_TYPE_LOW_25GBASE_KR | \
ICE_PHY_TYPE_LOW_25GBASE_KR_S | \
ICE_PHY_TYPE_LOW_25GBASE_KR1 | \
ICE_PHY_TYPE_LOW_40GBASE_KR4 | \
ICE_PHY_TYPE_LOW_50GBASE_KR2 | \
ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \
ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4)
#define ICE_MEDIA_BP_PHY_TYPE_HIGH_M ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4
#define ICE_MEDIA_DAC_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_10G_SFI_DA | \
ICE_PHY_TYPE_LOW_25GBASE_CR | \
ICE_PHY_TYPE_LOW_25GBASE_CR_S | \
ICE_PHY_TYPE_LOW_25GBASE_CR1 | \
ICE_PHY_TYPE_LOW_40GBASE_CR4 | \
ICE_PHY_TYPE_LOW_50GBASE_CR2 | \
ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
ICE_PHY_TYPE_LOW_50GBASE_CP | \
ICE_PHY_TYPE_LOW_100GBASE_CP2)
#define ICE_MEDIA_C2C_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_100M_SGMII | \
ICE_PHY_TYPE_LOW_1G_SGMII | \
ICE_PHY_TYPE_LOW_2500BASE_X | \
ICE_PHY_TYPE_LOW_10G_SFI_C2C | \
ICE_PHY_TYPE_LOW_25G_AUI_C2C | \
ICE_PHY_TYPE_LOW_40G_XLAUI | \
ICE_PHY_TYPE_LOW_50G_LAUI2 | \
ICE_PHY_TYPE_LOW_50G_AUI2 | \
ICE_PHY_TYPE_LOW_50G_AUI1 | \
ICE_PHY_TYPE_LOW_100G_CAUI4 | \
ICE_PHY_TYPE_LOW_100G_AUI4)
#define ICE_MEDIA_C2C_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
ICE_PHY_TYPE_HIGH_100G_AUI2)
/* Software VSI types. */
enum ice_vsi_type {
ICE_VSI_PF = 0,
@ -392,9 +472,11 @@ struct ice_hw_common_caps {
bool sec_rev_disabled;
bool update_disabled;
bool nvm_unified_update;
bool netlist_auth;
#define ICE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
#define ICE_NVM_MGMT_UPDATE_DISABLED BIT(1)
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
#define ICE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
/* PCIe reset avoidance */
bool pcie_reset_avoidance; /* false: not supported, true: supported */
/* Post update reset restriction */
@ -412,8 +494,12 @@ struct ice_hw_common_caps {
#define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
bool ext_topo_dev_img_ver_schema[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA BIT(2)
bool tx_sched_topo_comp_mode_en;
bool dyn_flattening_en;
/* Support for OROM update in Recovery Mode */
bool orom_recovery_update;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
@ -440,6 +526,9 @@ struct ice_hw_dev_caps {
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
u32 num_funcs;
struct ice_nac_topology nac_topo;
/* bitmap of supported sensors */
u32 supported_sensors;
#define ICE_SENSOR_SUPPORT_E810_INT_TEMP BIT(0)
};
/* Information about MAC such as address, etc... */
@ -859,6 +948,7 @@ struct ice_port_info {
struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS];
struct ice_qos_cfg qos_cfg;
u8 is_vf:1;
u8 is_custom_tx_enabled:1;
};
struct ice_switch_info {
@ -909,14 +999,15 @@ struct ice_mbx_snap_buffer_data {
u16 max_num_msgs_mbx;
};
/* Structure to track messages sent by VFs on mailbox:
* 1. vf_cntr : a counter array of VFs to track the number of
* asynchronous messages sent by each VF
* 2. vfcntr_len : number of entries in VF counter array
/* Structure used to track a single VF's messages on the mailbox:
* 1. list_entry: linked list entry node
* 2. msg_count: the number of asynchronous messages sent by this VF
* 3. malicious: whether this VF has been detected as malicious before
*/
struct ice_mbx_vf_counter {
u32 *vf_cntr;
u32 vfcntr_len;
struct ice_mbx_vf_info {
struct LIST_ENTRY_TYPE list_entry;
u32 msg_count;
u8 malicious : 1;
};
/* Structure to hold data relevant to the captured static snapshot
@ -924,7 +1015,7 @@ struct ice_mbx_vf_counter {
*/
struct ice_mbx_snapshot {
struct ice_mbx_snap_buffer_data mbx_buf;
struct ice_mbx_vf_counter mbx_vf;
struct LIST_HEAD_TYPE mbx_vf;
};
/* Structure to hold data to be used for capturing or updating a
@ -975,6 +1066,8 @@ struct ice_hw {
u8 pf_id; /* device profile info */
enum ice_phy_model phy_model;
u8 phy_ports;
u8 max_phy_port;
u16 max_burst_size; /* driver sets this value */
@ -1349,4 +1442,5 @@ struct ice_aq_get_set_rss_lut_params {
/* AQ API version for FW auto drop reports */
#define ICE_FW_API_AUTO_DROP_MAJ 1
#define ICE_FW_API_AUTO_DROP_MIN 4
#endif /* _ICE_TYPE_H_ */

View file

@ -83,7 +83,7 @@ static int ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
static int ice_if_suspend(if_ctx_t ctx);
static int ice_if_resume(if_ctx_t ctx);
static bool ice_if_needs_restart(if_ctx_t, enum iflib_restart_event);
static bool ice_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
static int ice_msix_que(void *arg);
static int ice_msix_admin(void *arg);
@ -110,6 +110,7 @@ static int ice_allocate_msix(struct ice_softc *sc);
static void ice_admin_timer(void *arg);
static void ice_transition_recovery_mode(struct ice_softc *sc);
static void ice_transition_safe_mode(struct ice_softc *sc);
static void ice_set_default_promisc_mask(ice_bitmap_t *promisc_mask);
/*
* Device Interface Declaration
@ -342,6 +343,7 @@ ice_setup_scctx(struct ice_softc *sc)
{
if_softc_ctx_t scctx = sc->scctx;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
bool safe_mode, recovery_mode;
safe_mode = ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE);
@ -393,7 +395,7 @@ ice_setup_scctx(struct ice_softc *sc)
scctx->isc_tx_tso_size_max = ICE_TSO_SIZE;
scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE;
scctx->isc_msix_bar = PCIR_BAR(ICE_MSIX_BAR);
scctx->isc_msix_bar = pci_msix_table_bar(dev);
scctx->isc_rss_table_size = hw->func_caps.common_cap.rss_table_size;
/*
@ -512,6 +514,9 @@ ice_if_attach_pre(if_ctx_t ctx)
ice_init_device_features(sc);
/* Keep flag set by default */
ice_set_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN);
/* Notify firmware of the device driver version */
err = ice_send_version(sc);
if (err)
@ -695,7 +700,8 @@ ice_update_link_status(struct ice_softc *sc, bool update_media)
if (sc->link_up) { /* link is up */
uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info);
ice_set_default_local_lldp_mib(sc);
if (!(hw->port_info->phy.link_info_old.link_info & ICE_AQ_LINK_UP))
ice_set_default_local_lldp_mib(sc);
iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
ice_rdma_link_change(sc, LINK_STATE_UP, baudrate);
@ -709,7 +715,7 @@ ice_update_link_status(struct ice_softc *sc, bool update_media)
}
/* Update the supported media types */
if (update_media) {
if (update_media && !ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
status = ice_add_media_types(sc, sc->media);
if (status)
device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n",
@ -731,6 +737,7 @@ ice_if_attach_post(if_ctx_t ctx)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
enum ice_status status;
int err;
ASSERT_CTX_LOCKED(sc);
@ -793,6 +800,15 @@ ice_if_attach_post(if_ctx_t ctx)
ice_cfg_pba_num(sc);
/* Set a default value for PFC mode on attach since the FW state is unknown
* before sysctl tunables are executed and it can't be queried. This fixes an
* issue when loading the driver with the FW LLDP agent enabled but the FW
* was previously in DSCP PFC mode.
*/
status = ice_aq_set_pfc_mode(&sc->hw, ICE_AQC_PFC_VLAN_BASED_PFC, NULL);
if (status != ICE_SUCCESS)
device_printf(sc->dev, "Setting pfc mode failed, status %s\n", ice_status_str(status));
ice_add_device_sysctls(sc);
/* Get DCBX/LLDP state and start DCBX agent */
@ -817,6 +833,10 @@ ice_if_attach_post(if_ctx_t ctx)
callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc);
mtx_unlock(&sc->admin_mtx);
if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) &&
!ice_test_state(&sc->state, ICE_STATE_NO_MEDIA))
ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK);
ice_clear_state(&sc->state, ICE_STATE_ATTACHING);
return 0;
@ -895,6 +915,7 @@ ice_if_detach(if_ctx_t ctx)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
struct ice_vsi *vsi = &sc->pf_vsi;
enum ice_status status;
int i;
ASSERT_CTX_LOCKED(sc);
@ -953,6 +974,14 @@ ice_if_detach(if_ctx_t ctx)
if (!ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
ice_deinit_hw(&sc->hw);
IFLIB_CTX_UNLOCK(sc);
status = ice_reset(&sc->hw, ICE_RESET_PFR);
IFLIB_CTX_LOCK(sc);
if (status) {
device_printf(sc->dev, "device PF reset failed, err %s\n",
ice_status_str(status));
}
ice_free_pci_mapping(sc);
return 0;
@ -1732,6 +1761,25 @@ ice_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
return (0);
}
/**
* ice_set_default_promisc_mask - Set default config for promisc settings
* @promisc_mask: bitmask to setup
*
* The ice_(set|clear)_vsi_promisc() function expects a mask of promiscuous
* modes to operate on. The mask used in here is the default one for the
* driver, where promiscuous is enabled/disabled for all types of
* non-VLAN-tagged/VLAN 0 traffic.
*/
static void
ice_set_default_promisc_mask(ice_bitmap_t *promisc_mask)
{
ice_zero_bitmap(promisc_mask, ICE_PROMISC_MAX);
ice_set_bit(ICE_PROMISC_UCAST_TX, promisc_mask);
ice_set_bit(ICE_PROMISC_UCAST_RX, promisc_mask);
ice_set_bit(ICE_PROMISC_MCAST_TX, promisc_mask);
ice_set_bit(ICE_PROMISC_MCAST_RX, promisc_mask);
}
/**
* ice_if_promisc_set - Set device promiscuous mode
* @ctx: iflib context structure
@ -1750,17 +1798,20 @@ ice_if_promisc_set(if_ctx_t ctx, int flags)
enum ice_status status;
bool promisc_enable = flags & IFF_PROMISC;
bool multi_enable = flags & IFF_ALLMULTI;
ice_declare_bitmap(promisc_mask, ICE_PROMISC_MAX);
/* Do not support configuration when in recovery mode */
if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
return (ENOSYS);
ice_set_default_promisc_mask(promisc_mask);
if (multi_enable)
return (EOPNOTSUPP);
if (promisc_enable) {
status = ice_set_vsi_promisc(hw, sc->pf_vsi.idx,
ICE_VSI_PROMISC_MASK, 0);
promisc_mask, 0);
if (status && status != ICE_ERR_ALREADY_EXISTS) {
device_printf(dev,
"Failed to enable promiscuous mode for PF VSI, err %s aq_err %s\n",
@ -1770,7 +1821,7 @@ ice_if_promisc_set(if_ctx_t ctx, int flags)
}
} else {
status = ice_clear_vsi_promisc(hw, sc->pf_vsi.idx,
ICE_VSI_PROMISC_MASK, 0);
promisc_mask, 0);
if (status) {
device_printf(dev,
"Failed to disable promiscuous mode for PF VSI, err %s aq_err %s\n",
@ -1983,6 +2034,11 @@ ice_if_init(if_ctx_t ctx)
/* Configure promiscuous mode */
ice_if_promisc_set(ctx, if_getflags(sc->ifp));
if (!ice_testandclear_state(&sc->state, ICE_STATE_FIRST_INIT_LINK))
if (!sc->link_up && ((if_getflags(sc->ifp) & IFF_UP) ||
ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN)))
ice_set_link(sc, true);
ice_rdma_pf_init(sc);
ice_set_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED);
@ -2020,14 +2076,18 @@ ice_poll_for_media_avail(struct ice_softc *sc)
enum ice_status status;
/* Re-enable link and re-apply user link settings */
ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC);
if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) ||
(if_getflags(sc->ifp) & IFF_UP)) {
ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC);
/* Update the OS about changes in media capability */
status = ice_add_media_types(sc, sc->media);
if (status)
device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
/* Update the OS about changes in media capability */
status = ice_add_media_types(sc, sc->media);
if (status)
device_printf(sc->dev,
"Error adding device media types: %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
}
ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA);
}
@ -2311,7 +2371,7 @@ ice_prepare_for_reset(struct ice_softc *sc)
ice_clear_hw_tbls(hw);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
ice_sched_cleanup_all(hw);
ice_shutdown_all_ctrlq(hw, false);
}
@ -2554,6 +2614,9 @@ ice_rebuild(struct ice_softc *sc)
goto err_deinit_pf_vsi;
}
if (hw->port_info->qos_cfg.is_sw_lldp)
ice_add_rx_lldp_filter(sc);
/* Refresh link status */
ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED);
sc->hw.port_info->phy.get_link_info = true;
@ -2578,8 +2641,13 @@ ice_rebuild(struct ice_softc *sc)
* because the state of IFC_DO_RESET is cached within task_fn_admin in
* the iflib core, we also want re-run the admin task so that iflib
* resets immediately instead of waiting for the next interrupt.
* If LLDP is enabled we need to reconfig DCB to properly reinit all TC
* queues, not only 0. It contains ice_request_stack_reinit as well.
*/
ice_request_stack_reinit(sc);
if (hw->port_info->qos_cfg.is_sw_lldp)
ice_request_stack_reinit(sc);
else
ice_do_dcb_reconfig(sc, false);
return;
@ -2706,6 +2774,8 @@ ice_handle_pf_reset_request(struct ice_softc *sc)
static void
ice_init_device_features(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
/* Set capabilities that all devices support */
ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap);
@ -2720,22 +2790,22 @@ ice_init_device_features(struct ice_softc *sc)
ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
/* Disable features due to hardware limitations... */
if (!sc->hw.func_caps.common_cap.rss_table_size)
if (!hw->func_caps.common_cap.rss_table_size)
ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
if (!sc->hw.func_caps.common_cap.iwarp || !ice_enable_irdma)
if (!hw->func_caps.common_cap.iwarp || !ice_enable_irdma)
ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
if (!sc->hw.func_caps.common_cap.dcb)
if (!hw->func_caps.common_cap.dcb)
ice_clear_bit(ICE_FEATURE_DCB, sc->feat_cap);
/* Disable features due to firmware limitations... */
if (!ice_is_fw_health_report_supported(&sc->hw))
if (!ice_is_fw_health_report_supported(hw))
ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
if (!ice_fwlog_supported(&sc->hw))
if (!ice_fwlog_supported(hw))
ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap);
if (sc->hw.fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_FW_LOGGING))
ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en);
else
ice_fwlog_unregister(&sc->hw);
ice_fwlog_unregister(hw);
}
/* Disable capabilities not supported by the OS */
@ -2748,6 +2818,11 @@ ice_init_device_features(struct ice_softc *sc)
/* Disable features based on sysctl settings */
if (!ice_tx_balance_en)
ice_clear_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
if (hw->dev_caps.supported_sensors & ICE_SENSOR_SUPPORT_E810_INT_TEMP) {
ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_cap);
ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_en);
}
}
/**
@ -2898,6 +2973,10 @@ ice_if_stop(if_ctx_t ctx)
/* Disable the Tx and Rx queues */
ice_vsi_disable_tx(&sc->pf_vsi);
ice_control_all_rx_queues(&sc->pf_vsi, false);
if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) &&
!(if_getflags(sc->ifp) & IFF_UP) && sc->link_up)
ice_set_link(sc, false);
}
/**
@ -3078,8 +3157,9 @@ ice_if_resume(if_ctx_t ctx)
return (0);
}
/* ice_if_needs_restart - Tell iflib when the driver needs to be reinitialized
* @ctx: iflib context
/**
* ice_if_needs_restart - Tell iflib when the driver needs to be reinitialized
* @ctx: iflib context pointer
* @event: event code to check
*
* Defaults to returning false for unknown events.

View file

@ -45,6 +45,7 @@ INTERFACE irdma_di;
* @peer: the RDMA peer structure
*
* Called by the RDMA client driver to request a reset of the ice device.
* @return 0 on success
*/
METHOD int reset {
struct ice_rdma_peer *peer;
@ -57,6 +58,7 @@ METHOD int reset {
*
* Called by the RDMA client driver to request initialization of the MSI-X
* resources used for RDMA functionality.
* @returns ENOSYS
*/
METHOD int msix_init {
struct ice_rdma_peer *peer;
@ -68,6 +70,8 @@ METHOD int msix_init {
* registration or deregistration
* @peer: the RDMA peer client structure
* @res: resources to be registered or unregistered
* @returns 0 on success, EINVAL on argument issues, ENOMEM on memory
* allocation failure, EXDEV on vsi device mismatch
*/
METHOD int qset_register_request {
struct ice_rdma_peer *peer;
@ -79,6 +83,7 @@ METHOD int qset_register_request {
* when opening or closing rdma driver
* @peer: the RDMA peer client structure
* @enable: enable or disable the rdma filter
* @return 0 on success, EINVAL on wrong vsi
*/
METHOD int vsi_filter_update {
struct ice_rdma_peer *peer;

View file

@ -46,6 +46,7 @@ INTERFACE irdma;
*
* Called by the ice driver during attach to notify the RDMA client driver
* that a new PF has been initialized.
* @returns 0 on success, EIO, ENOMEM, EINVAL, EBUSY if a problem is encountered
*/
METHOD int probe {
struct ice_rdma_peer *peer;
@ -57,6 +58,7 @@ METHOD int probe {
*
* Called by the ice driver during the if_init routine to notify the RDMA
* client driver that a PF has been activated.
* @returns 0
*/
METHOD int open {
struct ice_rdma_peer *peer;
@ -68,6 +70,7 @@ METHOD int open {
*
* Called by the ice driver during the if_stop routine to notify the RDMA
* client driver that a PF has been deactivated.
* @returns 0
*/
METHOD int close {
struct ice_rdma_peer *peer;
@ -79,6 +82,7 @@ METHOD int close {
*
* Called by the ice driver during detach to notify the RDMA client driver
* that a PF has been removed.
* @returns 0
*/
METHOD int remove {
struct ice_rdma_peer *peer;

View file

@ -203,7 +203,7 @@ enum virtchnl_ops {
VIRTCHNL_OP_CONFIG_QUANTA = 113,
VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
/* opcode 116 through 128 are reserved */
/* opcode 116 through 130 are reserved */
VIRTCHNL_OP_MAX,
};