ice(4): Update to 1.39.13-k

- Adds mirror interface functionality
- Remove unused virtchnl headers

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

MFC-with:	768329961d
MFC after:	3 days
Sponsored by:	Intel Corporation
Tested by:	jeffrey.e.pieper@intel.com
Differential Revision:	https://reviews.freebsd.org/D44004
This commit is contained in:
Eric Joyner 2024-02-12 22:26:26 -08:00
parent 015f8cc5b0
commit 9e54973fc3
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
27 changed files with 2134 additions and 784 deletions

View File

@ -152,6 +152,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_WOL_PROXY 0x0008
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VMDQ 0x0014
#define ICE_AQC_CAPS_802_1QBG 0x0015
#define ICE_AQC_CAPS_802_1BR 0x0016
#define ICE_AQC_CAPS_VSI 0x0017
@ -184,6 +185,8 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_DYN_FLATTENING 0x008A
#define ICE_AQC_CAPS_OROM_RECOVERY_UPDATE 0x0090
#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
u8 major_ver;
u8 minor_ver;
@ -358,6 +361,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14)
#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15)
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
@ -2198,6 +2203,14 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_MINSREV_MOD_ID 0x130
#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
#define ICE_AQC_NVM_CMPO_MOD_ID 0x153
/* Cage Max Power override NVM module */
struct ice_aqc_nvm_cmpo {
__le16 length;
#define ICE_AQC_NVM_CMPO_ENABLE BIT(8)
__le16 cages_cfg[8];
};
/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
* type field is excluded from the section when reading and writing from
@ -2509,11 +2522,13 @@ enum ice_lut_type {
ICE_LUT_VSI = 0,
ICE_LUT_PF = 1,
ICE_LUT_GLOBAL = 2,
ICE_LUT_TYPE_MASK = 3
ICE_LUT_TYPE_MASK = 3,
ICE_LUT_PF_SMALL = 5, /* yields ICE_LUT_PF when &= ICE_LUT_TYPE_MASK */
};
enum ice_lut_size {
ICE_LUT_VSI_SIZE = 64,
ICE_LUT_PF_SMALL_SIZE = 128,
ICE_LUT_GLOBAL_SIZE = 512,
ICE_LUT_PF_SIZE = 2048,
};
@ -2796,7 +2811,7 @@ struct ice_aqc_event_lan_overflow {
/* Debug Dump Internal Data (indirect 0xFF08) */
struct ice_aqc_debug_dump_internals {
u8 cluster_id;
__le16 cluster_id; /* Expresses next cluster ID in response */
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
@ -2809,7 +2824,7 @@ struct ice_aqc_debug_dump_internals {
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21
u8 reserved;
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_MNG_TRANSACTIONS 22
__le16 table_id; /* Used only for non-memory clusters */
__le32 idx; /* In table entries for tables, in bytes for memory */
__le32 addr_high;

View File

@ -402,7 +402,7 @@ static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u16 size)
}
/**
* ice_cp_bitmap - copy bitmaps.
* ice_cp_bitmap - copy bitmaps
* @dst: bitmap destination
* @src: bitmap to copy from
* @size: Size of the bitmaps in bits
@ -460,7 +460,7 @@ ice_bitmap_hweight(ice_bitmap_t *bm, u16 size)
}
/**
* ice_cmp_bitmap - compares two bitmaps.
* ice_cmp_bitmap - compares two bitmaps
* @bmp1: the bitmap to compare
* @bmp2: the bitmap to compare with bmp1
* @size: Size of the bitmaps in bits

View File

@ -2319,6 +2319,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
caps->sr_iov_1_1);
break;
case ICE_AQC_CAPS_VMDQ:
caps->vmdq = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %d\n", prefix, caps->vmdq);
break;
case ICE_AQC_CAPS_802_1QBG:
caps->evb_802_1_qbg = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
@ -2404,7 +2408,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
break;
case ICE_AQC_CAPS_ROCEV2_LAG:
caps->roce_lag = (number == 1);
caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
prefix, caps->roce_lag);
break;
@ -2726,6 +2730,10 @@ ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
ice_info(hw, "PF is configured in %s mode with IP instance ID %d\n",
(dev_p->nac_topo.mode == 0) ? "primary" : "secondary",
dev_p->nac_topo.id);
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
!!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
@ -3060,7 +3068,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
* ice_aq_set_port_params - set physical port parameters.
* ice_aq_set_port_params - set physical port parameters
* @pi: pointer to the port info struct
* @bad_frame_vsi: defines the VSI to which bad frames are forwarded
* @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
@ -4058,6 +4066,8 @@ static u16 ice_lut_type_to_size(u16 lut_type)
return ICE_LUT_GLOBAL_SIZE;
case ICE_LUT_PF:
return ICE_LUT_PF_SIZE;
case ICE_LUT_PF_SMALL:
return ICE_LUT_PF_SMALL_SIZE;
default:
return 0;
}
@ -4089,6 +4099,8 @@ int ice_lut_size_to_type(int lut_size)
return ICE_LUT_GLOBAL;
case ICE_LUT_PF_SIZE:
return ICE_LUT_PF;
case ICE_LUT_PF_SMALL_SIZE:
return ICE_LUT_PF_SMALL;
default:
return -1;
}
@ -4116,8 +4128,8 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
vsi_handle = params->vsi_handle;
lut = params->lut;
lut_type = params->lut_type;
lut_size = ice_lut_type_to_size(lut_type);
lut_size = ice_lut_type_to_size(params->lut_type);
lut_type = params->lut_type & ICE_LUT_TYPE_MASK;
cmd_resp = &desc.params.get_set_rss_lut;
if (lut_type == ICE_LUT_GLOBAL)
glob_lut_idx = params->global_lut_id;
@ -4773,6 +4785,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
* @buf: dump buffer
* @buf_size: dump buffer size
* @ret_buf_size: return buffer size (returned by FW)
* @ret_next_cluster: next cluster to read (returned by FW)
* @ret_next_table: next block to read (returned by FW)
* @ret_next_index: next index to read (returned by FW)
* @cd: pointer to command details structure
@ -4780,10 +4793,10 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
* Get internal FW/HW data (0xFF08) for debug purposes.
*/
enum ice_status
ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_table, u32 *ret_next_index,
struct ice_sq_cd *cd)
u16 *ret_next_cluster, u16 *ret_next_table,
u32 *ret_next_index, struct ice_sq_cd *cd)
{
struct ice_aqc_debug_dump_internals *cmd;
struct ice_aq_desc desc;
@ -4796,7 +4809,7 @@ ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
cmd->cluster_id = cluster_id;
cmd->cluster_id = CPU_TO_LE16(cluster_id);
cmd->table_id = CPU_TO_LE16(table_id);
cmd->idx = CPU_TO_LE32(start);
@ -4805,6 +4818,8 @@ ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
if (!status) {
if (ret_buf_size)
*ret_buf_size = LE16_TO_CPU(desc.datalen);
if (ret_next_cluster)
*ret_next_cluster = LE16_TO_CPU(cmd->cluster_id);
if (ret_next_table)
*ret_next_table = LE16_TO_CPU(cmd->table_id);
if (ret_next_index)
@ -6051,7 +6066,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*/
enum ice_status
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
@ -6183,8 +6198,6 @@ static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
if (hw->fw_min_ver == min && hw->fw_patch >= patch)
return true;
}
} else if (hw->fw_branch > branch) {
return true;
}
return false;
@ -6591,10 +6604,14 @@ u32 ice_get_link_speed(u16 index)
*/
bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
{
return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH,
return ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E810,
ICE_FW_FEC_DIS_AUTO_MAJ,
ICE_FW_FEC_DIS_AUTO_MIN,
ICE_FW_FEC_DIS_AUTO_PATCH);
ICE_FW_FEC_DIS_AUTO_PATCH) ||
ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E82X,
ICE_FW_FEC_DIS_AUTO_MAJ_E82X,
ICE_FW_FEC_DIS_AUTO_MIN_E82X,
ICE_FW_FEC_DIS_AUTO_PATCH_E82X);
}
/**

View File

@ -88,10 +88,10 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
enum ice_status
ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_table, u32 *ret_next_index,
struct ice_sq_cd *cd);
u16 *ret_next_cluster, u16 *ret_next_table,
u32 *ret_next_index, struct ice_sq_cd *cd);
enum ice_status ice_set_mac_type(struct ice_hw *hw);
@ -352,7 +352,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,

View File

@ -482,7 +482,7 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
* ice_aq_ver_check - Check the reported AQ API version.
* ice_aq_ver_check - Check the reported AQ API version
* @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
@ -1037,12 +1037,18 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (cq->sq.next_to_use == cq->sq.count)
cq->sq.next_to_use = 0;
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
ice_flush(hw);
/* Wait a short time before initial ice_sq_done() check, to allow
* hardware time for completion.
*/
ice_usec_delay(5, false);
do {
if (ice_sq_done(hw, cq))
break;
ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
ice_usec_delay(10, false);
total_delay++;
} while (total_delay < cq->sq_cmd_timeout);

View File

@ -60,8 +60,7 @@ enum ice_ctl_q {
};
/* Control Queue timeout settings - max delay 1s */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 100000 /* Count 100000 times */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */

View File

@ -62,16 +62,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
const char ice_driver_version[] = "1.37.11-k";
const char ice_driver_version[] = "1.39.13-k";
const uint8_t ice_major_version = 1;
const uint8_t ice_minor_version = 37;
const uint8_t ice_patch_version = 11;
const uint8_t ice_minor_version = 39;
const uint8_t ice_patch_version = 13;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 1.37.11-k")
PVID(vendor, devid, name " - 1.39.13-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.37.11-k")
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.39.13-k")
/**
* @var ice_vendor_info_array

View File

@ -89,7 +89,6 @@ enum feat_list {
static inline void
ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap)
{
ice_clear_bit(ICE_FEATURE_SRIOV, bitmap);
#ifndef DEV_NETMAP
ice_clear_bit(ICE_FEATURE_NETMAP, bitmap);
#endif

View File

@ -672,6 +672,7 @@ enum ice_status ice_replay_tunnels(struct ice_hw *hw)
if (status) {
ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n",
status, port);
hw->tnl.tbl[i].ref = refs;
break;
}

View File

@ -404,12 +404,11 @@ struct ice_flow_prof_params {
};
#define ICE_FLOW_SEG_HDRS_L3_MASK \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
ICE_FLOW_SEG_HDR_ARP)
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
ICE_FLOW_SEG_HDR_SCTP)
/* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
/* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */
#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
@ -483,15 +482,13 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
(hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
src = i ?
(const ice_bitmap_t *)ice_ptypes_ipv4_il :
src = i ? (const ice_bitmap_t *)ice_ptypes_ipv4_il :
(const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
(hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
src = i ?
(const ice_bitmap_t *)ice_ptypes_ipv6_il :
src = i ? (const ice_bitmap_t *)ice_ptypes_ipv6_il :
(const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
@ -645,8 +642,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
case ICE_FLOW_FIELD_IDX_ICMP_CODE:
/* ICMP type and code share the same extraction seq. entry */
prot_id = (params->prof->segs[seg].hdrs &
ICE_FLOW_SEG_HDR_IPV4) ?
prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
ICE_FLOW_FIELD_IDX_ICMP_CODE :
@ -1301,20 +1297,20 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
/* set outer most header */
if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_FRAG |
ICE_FLOW_SEG_HDR_IPV_OTHER;
ICE_FLOW_SEG_HDR_IPV_FRAG |
ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_FRAG |
ICE_FLOW_SEG_HDR_IPV_OTHER;
ICE_FLOW_SEG_HDR_IPV_FRAG |
ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_GRE |
ICE_FLOW_SEG_HDR_IPV_OTHER;
ICE_FLOW_SEG_HDR_GRE |
ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_GRE |
ICE_FLOW_SEG_HDR_IPV_OTHER;
ICE_FLOW_SEG_HDR_GRE |
ICE_FLOW_SEG_HDR_IPV_OTHER;
if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
return ICE_ERR_PARAM;
@ -1418,11 +1414,14 @@ ice_get_rss_hdr_type(struct ice_flow_prof *prof)
if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
hdr_type = ICE_RSS_OUTER_HEADERS;
} else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
const struct ice_flow_seg_info *s;
s = &prof->segs[ICE_RSS_OUTER_HEADERS];
if (s->hdrs == ICE_FLOW_SEG_HDR_NONE)
hdr_type = ICE_RSS_INNER_HEADERS;
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4)
hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6)
hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
}
@ -1529,13 +1528,14 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
* [62:63] - Encapsulation flag:
* 0 if non-tunneled
* 1 if tunneled
* 2 for tunneled with outer ipv4
* 3 for tunneled with outer ipv6
* 2 for tunneled with outer IPv4
* 3 for tunneled with outer IPv6
*/
#define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
#define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
(((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
(((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
(((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & \
ICE_FLOW_PROF_ENCAP_M)))
/**
* ice_add_rss_cfg_sync - add an RSS configuration
@ -1559,7 +1559,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
return ICE_ERR_PARAM;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
ICE_FLOW_SEG_SINGLE :
ICE_FLOW_SEG_MAX;
segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
sizeof(*segs));
@ -1663,18 +1664,16 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
struct ice_rss_hash_cfg local_cfg;
enum ice_status status;
if (!ice_is_vsi_valid(hw, vsi_handle) ||
!cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg ||
cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
cfg->hash_flds == ICE_HASH_INVALID)
return ICE_ERR_PARAM;
ice_acquire_lock(&hw->rss_locks);
local_cfg = *cfg;
if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
ice_acquire_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
ice_release_lock(&hw->rss_locks);
} else {
ice_acquire_lock(&hw->rss_locks);
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
if (!status) {
@ -1682,8 +1681,8 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
status = ice_add_rss_cfg_sync(hw, vsi_handle,
&local_cfg);
}
ice_release_lock(&hw->rss_locks);
}
ice_release_lock(&hw->rss_locks);
return status;
}
@ -1707,7 +1706,8 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
u8 segs_cnt;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
ICE_FLOW_SEG_SINGLE :
ICE_FLOW_SEG_MAX;
segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
sizeof(*segs));
if (!segs)
@ -1762,8 +1762,8 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
struct ice_rss_hash_cfg local_cfg;
enum ice_status status;
if (!ice_is_vsi_valid(hw, vsi_handle) ||
!cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg ||
cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
cfg->hash_flds == ICE_HASH_INVALID)
return ICE_ERR_PARAM;
@ -1774,7 +1774,6 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
} else {
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
if (!status) {
local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
status = ice_rem_rss_cfg_sync(hw, vsi_handle,

View File

@ -188,14 +188,14 @@ enum ice_flow_avf_hdr_field {
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
/* take inner headers as inputset for packet with outer ipv4. */
/* take inner headers as inputset for packet with outer IPv4. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
/* take inner headers as inputset for packet with outer ipv6. */
/* take inner headers as inputset for packet with outer IPv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */
/* take inner as inputset for GTPoGRE with outer ipv4 + gre. */
/* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
/* take inner as inputset for GTPoGRE with outer ipv6 + gre. */
/* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
ICE_RSS_ANY_HEADERS
};

View File

@ -5476,6 +5476,7 @@
#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_S 1
#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_M BIT(1)
#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GL_MNG_FWSM_FW_MODES_S 0
#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
#define GL_MNG_FWSM_RSV0_S 3

View File

@ -192,6 +192,29 @@ struct ice_rx_queue {
struct if_irq que_irq;
};
/**
* @struct ice_mirr_if
* @brief structure representing a mirroring interface
*/
struct ice_mirr_if {
struct ice_softc *back;
struct ifnet *ifp;
struct ice_vsi *vsi;
device_t subdev;
if_ctx_t subctx;
if_softc_ctx_t subscctx;
u16 num_irq_vectors;
u16 *if_imap;
u16 *os_imap;
struct ice_irq_vector *rx_irqvs;
u32 state;
bool if_attached;
};
/**
* @struct ice_softc
* @brief main structure representing one device
@ -262,7 +285,7 @@ struct ice_softc {
struct ice_resmgr rx_qmgr;
/* Interrupt allocation manager */
struct ice_resmgr imgr;
struct ice_resmgr dev_imgr;
u16 *pf_imap;
int lan_vectors;
@ -302,7 +325,7 @@ struct ice_softc {
/* NVM link override settings */
struct ice_link_default_override_tlv ldo_tlv;
u16 fw_debug_dump_cluster_mask;
u32 fw_debug_dump_cluster_mask;
struct sx *iflib_ctx_lock;
@ -310,6 +333,11 @@ struct ice_softc {
ice_declare_bitmap(feat_cap, ICE_FEATURE_COUNT);
ice_declare_bitmap(feat_en, ICE_FEATURE_COUNT);
struct ice_resmgr os_imgr;
/* For mirror interface */
struct ice_mirr_if *mirr_if;
int extra_vectors;
int last_rid;
};
#endif /* _ICE_IFLIB_H_ */

View File

@ -44,6 +44,18 @@
/* Tx/Rx hotpath utility functions */
#include "ice_common_txrx.h"
/*
* Driver private implementations
*/
static int _ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi);
static int _ice_ift_txd_credits_update(struct ice_softc *sc, struct ice_tx_queue *txq, bool clear);
static int _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget);
static int _ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri);
static void _ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
uint64_t *paddrs, uint16_t count);
static void _ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq,
uint32_t pidx);
/*
* iflib txrx method declarations
*/
@ -55,6 +67,13 @@ static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t
static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi);
static int ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear);
static int ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi);
static void ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx);
static int ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
static int ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri);
static void ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru);
static void ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
/* Macro to help extract the NIC mode flexible Rx descriptor fields from the
* advanced 32byte Rx descriptors.
@ -82,8 +101,27 @@ struct if_txrx ice_txrx = {
};
/**
* ice_ift_txd_encap - prepare Tx descriptors for a packet
* @arg: the iflib softc structure pointer
* @var ice_subif_txrx
* @brief Tx/Rx operations for the iflib stack, for subinterfaces
*
* Structure defining the Tx and Rx related operations that iflib can request
* the subinterface driver to perform. These are the main entry points for the
* hot path of the transmit and receive paths in the iflib driver.
*/
struct if_txrx ice_subif_txrx = {
.ift_txd_credits_update = ice_ift_txd_credits_update_subif,
.ift_txd_encap = ice_ift_txd_encap_subif,
.ift_txd_flush = ice_ift_txd_flush_subif,
.ift_rxd_available = ice_ift_rxd_available_subif,
.ift_rxd_pkt_get = ice_ift_rxd_pkt_get_subif,
.ift_rxd_refill = ice_ift_rxd_refill_subif,
.ift_rxd_flush = ice_ift_rxd_flush_subif,
.ift_txq_select_v2 = NULL,
};
/**
* _ice_ift_txd_encap - prepare Tx descriptors for a packet
* @txq: driver's TX queue context
* @pi: packet info
*
* Prepares and encapsulates the given packet into into Tx descriptors, in
@ -94,10 +132,8 @@ struct if_txrx ice_txrx = {
* Return 0 on success, non-zero error code on failure.
*/
static int
ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
_ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
int nsegs = pi->ipi_nsegs;
bus_dma_segment_t *segs = pi->ipi_segs;
struct ice_tx_desc *txd = NULL;
@ -156,6 +192,27 @@ ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
return (0);
}
/**
* ice_ift_txd_encap - prepare Tx descriptors for a packet
* @arg: the iflib softc structure pointer
* @pi: packet info
*
* Prepares and encapsulates the given packet into Tx descriptors, in
* preparation for sending to the transmit engine. Sets the necessary context
* descriptors for TSO and other offloads, and prepares the last descriptor
* for the writeback status.
*
* Return 0 on success, non-zero error code on failure.
*/
static int
ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
return _ice_ift_txd_encap(txq, pi);
}
/**
* ice_ift_txd_flush - Flush Tx descriptors to hardware
* @arg: device specific softc pointer
@ -176,9 +233,9 @@ ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
}
/**
* ice_ift_txd_credits_update - cleanup Tx descriptors
* @arg: device private softc
* @txqid: the Tx queue to update
* _ice_ift_txd_credits_update - cleanup Tx descriptors
* @sc: device private softc
* @txq: the Tx queue to update
* @clear: if false, only report, do not actually clean
*
* If clear is false, iflib is asking if we *could* clean up any Tx
@ -186,13 +243,12 @@ ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
*
* If clear is true, iflib is requesting to cleanup and reclaim used Tx
* descriptors.
*
* Called by other txd_credits_update functions passed to iflib.
*/
static int
ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
_ice_ift_txd_credits_update(struct ice_softc *sc __unused, struct ice_tx_queue *txq, bool clear)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
qidx_t processed = 0;
qidx_t cur, prev, ntxd, rs_cidx;
int32_t delta;
@ -235,9 +291,28 @@ ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
}
/**
* ice_ift_rxd_available - Return number of available Rx packets
* ice_ift_txd_credits_update - cleanup PF VSI Tx descriptors
* @arg: device private softc
* @rxqid: the Rx queue id
* @txqid: the Tx queue to update
* @clear: if false, only report, do not actually clean
*
* Wrapper for _ice_ift_txd_credits_update() meant for TX queues that
* belong to the PF VSI.
*
* @see _ice_ift_txd_credits_update()
*/
static int
ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
return _ice_ift_txd_credits_update(sc, txq, clear);
}
/**
* _ice_ift_rxd_available - Return number of available Rx packets
* @rxq: RX queue driver structure
* @pidx: descriptor start point
* @budget: maximum Rx budget
*
@ -245,10 +320,8 @@ ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
* of the given budget.
*/
static int
ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
_ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
union ice_32b_rx_flex_desc *rxd;
uint16_t status0;
int cnt, i, nrxd;
@ -270,21 +343,54 @@ ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
return (cnt);
}
/**
* ice_ift_rxd_available - Return number of available Rx packets
* @arg: device private softc
* @rxqid: the Rx queue id
* @pidx: descriptor start point
* @budget: maximum Rx budget
*
* Wrapper for _ice_ift_rxd_available() that provides a function pointer
* that iflib requires for RX processing.
*/
static int
ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
return _ice_ift_rxd_available(rxq, pidx, budget);
}
/**
* ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
* @arg: device specific softc
* @ri: receive packet info
*
* Wrapper function for _ice_ift_rxd_pkt_get() that provides a function pointer
* used by iflib for RX packet processing.
*/
static int
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
return _ice_ift_rxd_pkt_get(rxq, ri);
}
/**
* _ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
* @rxq: RX queue driver structure
* @ri: receive packet info
*
* This function is called by iflib, and executes in ithread context. It is
* called by iflib to obtain data which has been DMA'ed into host memory.
* Returns zero on success, and EBADMSG on failure.
*/
static int
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
_ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri)
{
struct ice_softc *sc = (struct ice_softc *)arg;
if_softc_ctx_t scctx = sc->scctx;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
union ice_32b_rx_flex_desc *cur;
u16 status0, plen, ptype;
bool eop;
@ -341,7 +447,7 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
/* Get packet type and set checksum flags */
ptype = le16toh(cur->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
if ((if_getcapenable(ri->iri_ifp) & IFCAP_RXCSUM) != 0)
ice_rx_checksum(rxq, &ri->iri_csum_flags,
&ri->iri_csum_data, status0, ptype);
@ -357,16 +463,14 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
* @arg: device specific softc structure
* @iru: the Rx descriptor update structure
*
* Update the Rx descriptor indices for a given queue, assigning new physical
* addresses to the descriptors, preparing them for re-use by the hardware.
* Wrapper function for _ice_ift_rxd_refill() that provides a function pointer
* used by iflib for RX packet processing.
*/
static void
ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq;
uint32_t next_pidx;
int i;
uint64_t *paddrs;
uint32_t pidx;
uint16_t qsidx, count;
@ -378,6 +482,26 @@ ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
rxq = &(sc->pf_vsi.rx_queues[qsidx]);
_ice_ift_rxd_refill(rxq, pidx, paddrs, count);
}
/**
* _ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware
* @rxq: RX queue driver structure
* @pidx: first index to refill
* @paddrs: physical addresses to use
* @count: number of descriptors to refill
*
* Update the Rx descriptor indices for a given queue, assigning new physical
* addresses to the descriptors, preparing them for re-use by the hardware.
*/
static void
_ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
uint64_t *paddrs, uint16_t count)
{
uint32_t next_pidx;
int i;
for (i = 0, next_pidx = pidx; i < count; i++) {
rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
if (++next_pidx == (uint32_t)rxq->desc_count)
@ -392,8 +516,8 @@ ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
* @flidx: unused parameter
* @pidx: descriptor index to advance tail to
*
* Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
* software is done with the descriptor and it can be recycled.
* Wrapper function for _ice_ift_rxd_flush() that provides a function pointer
* used by iflib for RX packet processing.
*/
static void
ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
@ -401,11 +525,36 @@ ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
struct ice_hw *hw = &sc->hw;
wr32(hw, rxq->tail, pidx);
_ice_ift_rxd_flush(sc, rxq, (uint32_t)pidx);
}
/**
* _ice_ift_rxd_flush - Flush Rx descriptors to hardware
* @sc: device specific softc pointer
* @rxq: RX queue driver structure
* @pidx: descriptor index to advance tail to
*
* Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
* software is done with the descriptor and it can be recycled.
*/
static void
_ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq, uint32_t pidx)
{
wr32(&sc->hw, rxq->tail, pidx);
}
/**
* ice_ift_queue_select - Select queue index to transmit packet on
* @arg: device specific softc
* @m: transmit packet data
* @pi: transmit packet metadata
*
* Called by iflib to determine which queue index to transmit the packet
* pointed to by @m on. In particular, ensures packets go out on the right
* queue index for the right transmit class when multiple traffic classes are
* enabled in the driver.
*/
static qidx_t
ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
{
@ -456,3 +605,146 @@ ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
else
return (tc_base_queue);
}
/**
* ice_ift_txd_credits_update_subif - cleanup subinterface VSI Tx descriptors
* @arg: subinterface private structure (struct ice_mirr_if)
* @txqid: the Tx queue to update
* @clear: if false, only report, do not actually clean
*
* Wrapper for _ice_ift_txd_credits_update() meant for TX queues that
* do not belong to the PF VSI.
*
* See _ice_ift_txd_credits_update().
*/
static int
ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_softc *sc = mif->back;
struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid];
return _ice_ift_txd_credits_update(sc, txq, clear);
}
/**
* ice_ift_txd_encap_subif - prepare Tx descriptors for a packet
* @arg: subinterface private structure (struct ice_mirr_if)
* @pi: packet info
*
* Wrapper for _ice_ift_txd_encap_subif() meant for TX queues that
* do not belong to the PF VSI.
*
* See _ice_ift_txd_encap_subif().
*/
static int
ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_tx_queue *txq = &mif->vsi->tx_queues[pi->ipi_qsidx];
return _ice_ift_txd_encap(txq, pi);
}
/**
* ice_ift_txd_flush_subif - Flush Tx descriptors to hardware
* @arg: subinterface private structure (struct ice_mirr_if)
* @txqid: the Tx queue to flush
* @pidx: descriptor index to advance tail to
*
* Advance the Transmit Descriptor Tail (TDT). Functionally identical to
* the ice_ift_txd_encap() meant for the main PF VSI, but provides a function
* pointer to iflib for use with non-main-PF VSI TX queues.
*/
static void
ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid];
struct ice_hw *hw = &mif->back->hw;
wr32(hw, txq->tail, pidx);
}
/**
* ice_ift_rxd_available_subif - Return number of available Rx packets
* @arg: subinterface private structure (struct ice_mirr_if)
* @rxqid: the Rx queue id
* @pidx: descriptor start point
* @budget: maximum Rx budget
*
* Determines how many Rx packets are available on the queue, up to a maximum
* of the given budget.
*
* See _ice_ift_rxd_available().
*/
static int
ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid];
return _ice_ift_rxd_available(rxq, pidx, budget);
}
/**
* ice_ift_rxd_pkt_get_subif - Called by iflib to send data to upper layer
* @arg: subinterface private structure (struct ice_mirr_if)
* @ri: receive packet info
*
* Wrapper function for _ice_ift_rxd_pkt_get() that provides a function pointer
* used by iflib for RX packet processing, for iflib subinterfaces.
*/
static int
ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_rx_queue *rxq = &mif->vsi->rx_queues[ri->iri_qsidx];
return _ice_ift_rxd_pkt_get(rxq, ri);
}
/**
* ice_ift_rxd_refill_subif - Prepare Rx descriptors for re-use by hardware
* @arg: subinterface private structure (struct ice_mirr_if)
* @iru: the Rx descriptor update structure
*
* Wrapper function for _ice_ift_rxd_refill() that provides a function pointer
* used by iflib for RX packet processing, for iflib subinterfaces.
*/
static void
ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_rx_queue *rxq = &mif->vsi->rx_queues[iru->iru_qsidx];
uint64_t *paddrs;
uint32_t pidx;
uint16_t count;
paddrs = iru->iru_paddrs;
pidx = iru->iru_pidx;
count = iru->iru_count;
_ice_ift_rxd_refill(rxq, pidx, paddrs, count);
}
/**
* ice_ift_rxd_flush_subif - Flush Rx descriptors to hardware
* @arg: subinterface private structure (struct ice_mirr_if)
* @rxqid: the Rx queue to flush
* @flidx: unused parameter
* @pidx: descriptor index to advance tail to
*
* Wrapper function for _ice_ift_rxd_flush() that provides a function pointer
* used by iflib for RX packet processing.
*/
static void
ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx __unused,
qidx_t pidx)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid];
_ice_ift_rxd_flush(mif->back, rxq, pidx);
}

View File

@ -179,8 +179,9 @@ ice_add_dscp2tc_map_sysctls(struct ice_softc *sc,
static void ice_set_default_local_mib_settings(struct ice_softc *sc);
static bool ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg);
static void ice_start_dcbx_agent(struct ice_softc *sc);
static void ice_fw_debug_dump_print_cluster(struct ice_softc *sc,
struct sbuf *sbuf, u16 cluster_id);
static u16 ice_fw_debug_dump_print_cluster(struct ice_softc *sc,
struct sbuf *sbuf, u16 cluster_id);
static void ice_remove_vsi_mirroring(struct ice_vsi *vsi);
static int ice_module_init(void);
static int ice_module_exit(void);
@ -242,6 +243,8 @@ static int ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_debug_set_link(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_temperature(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_create_mirror_interface(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_destroy_mirror_interface(SYSCTL_HANDLER_ARGS);
/**
* ice_map_bar - Map PCIe BAR memory
@ -355,6 +358,10 @@ ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi,
sc->all_vsi[idx] = vsi;
vsi->dynamic = dynamic;
/* Set default mirroring rule information */
vsi->rule_mir_ingress = ICE_INVAL_MIRROR_RULE_ID;
vsi->rule_mir_egress = ICE_INVAL_MIRROR_RULE_ID;
/* Setup the VSI tunables now */
ice_add_vsi_tunables(vsi, sc->vsi_sysctls);
}
@ -382,7 +389,7 @@ ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type)
return NULL;
}
vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_WAITOK|M_ZERO);
vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_NOWAIT | M_ZERO);
if (!vsi) {
device_printf(sc->dev, "Unable to allocate VSI memory\n");
return NULL;
@ -550,6 +557,7 @@ ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_VF:
case ICE_VSI_VMDQ2:
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
@ -571,6 +579,8 @@ ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type)
*
* Configures the context for the given VSI, setting up how the firmware
* should map the queues for this VSI.
*
* @pre vsi->qmap_type is set to a valid type
*/
static int
ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
@ -620,11 +630,96 @@ ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
return 0;
}
/**
* ice_setup_vsi_mirroring -- Setup a VSI for mirroring PF VSI traffic
* @vsi: VSI to setup
*
* @pre vsi->mirror_src_vsi is set to the SW VSI num that traffic is to be
* mirrored from
*
* Returns 0 on success, EINVAL on failure.
*/
int
ice_setup_vsi_mirroring(struct ice_vsi *vsi)
{
struct ice_mir_rule_buf rule = { };
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
enum ice_status status;
u16 rule_id, dest_vsi;
u16 count = 1;
rule.vsi_idx = ice_get_hw_vsi_num(hw, vsi->mirror_src_vsi);
rule.add = true;
dest_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
rule_id = ICE_INVAL_MIRROR_RULE_ID;
status = ice_aq_add_update_mir_rule(hw, ICE_AQC_RULE_TYPE_VPORT_INGRESS,
dest_vsi, count, &rule, NULL,
&rule_id);
if (status) {
device_printf(dev,
"Could not add INGRESS rule for mirror vsi %d to vsi %d, err %s aq_err %s\n",
rule.vsi_idx, dest_vsi, ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
return (EINVAL);
}
vsi->rule_mir_ingress = rule_id;
rule_id = ICE_INVAL_MIRROR_RULE_ID;
status = ice_aq_add_update_mir_rule(hw, ICE_AQC_RULE_TYPE_VPORT_EGRESS,
dest_vsi, count, &rule, NULL, &rule_id);
if (status) {
device_printf(dev,
"Could not add EGRESS rule for mirror vsi %d to vsi %d, err %s aq_err %s\n",
rule.vsi_idx, dest_vsi, ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
return (EINVAL);
}
vsi->rule_mir_egress = rule_id;
return (0);
}
/**
* ice_remove_vsi_mirroring -- Teardown any VSI mirroring rules
* @vsi: VSI to remove mirror rules from
*/
static void
ice_remove_vsi_mirroring(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->sc->hw;
enum ice_status status = ICE_SUCCESS;
bool keep_alloc = false;
if (vsi->rule_mir_ingress != ICE_INVAL_MIRROR_RULE_ID)
status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_ingress, keep_alloc, NULL);
if (status)
device_printf(vsi->sc->dev, "Could not remove mirror VSI ingress rule, err %s aq_err %s\n",
ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
status = ICE_SUCCESS;
if (vsi->rule_mir_egress != ICE_INVAL_MIRROR_RULE_ID)
status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_egress, keep_alloc, NULL);
if (status)
device_printf(vsi->sc->dev, "Could not remove mirror VSI egress rule, err %s aq_err %s\n",
ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
}
/**
* ice_initialize_vsi - Initialize a VSI for use
* @vsi: the vsi to initialize
*
* Initialize a VSI over the adminq and prepare it for operation.
*
* @pre vsi->num_tx_queues is set
* @pre vsi->num_rx_queues is set
*/
int
ice_initialize_vsi(struct ice_vsi *vsi)
@ -640,6 +735,9 @@ ice_initialize_vsi(struct ice_vsi *vsi)
case ICE_VSI_PF:
ctx.flags = ICE_AQ_VSI_TYPE_PF;
break;
case ICE_VSI_VMDQ2:
ctx.flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
default:
return (ENODEV);
}
@ -754,6 +852,9 @@ ice_release_vsi(struct ice_vsi *vsi)
ice_del_vsi_sysctl_ctx(vsi);
/* Remove the configured mirror rule, if it exists */
ice_remove_vsi_mirroring(vsi);
/*
* If we unload the driver after a reset fails, we do not need to do
* this step.
@ -1264,6 +1365,10 @@ ice_configure_all_rxq_interrupts(struct ice_vsi *vsi)
ice_configure_rxq_interrupt(hw, vsi->rx_qmap[rxq->me],
rxq->irqv->me, ICE_RX_ITR);
ice_debug(hw, ICE_DBG_INIT,
"RXQ(%d) intr enable: me %d rxqid %d vector %d\n",
i, rxq->me, vsi->rx_qmap[rxq->me], rxq->irqv->me);
}
ice_flush(hw);
@ -1462,6 +1567,9 @@ ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VMDQ2:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
break;
default:
return (ENODEV);
}
@ -4458,6 +4566,14 @@ ice_add_device_sysctls(struct ice_softc *sc)
OID_AUTO, "link_active_on_if_down", CTLTYPE_U8 | CTLFLAG_RWTUN,
sc, 0, ice_sysctl_set_link_active, "CU", ICE_SYSCTL_HELP_SET_LINK_ACTIVE);
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "create_mirror_interface", CTLTYPE_STRING | CTLFLAG_RW,
sc, 0, ice_sysctl_create_mirror_interface, "A", "");
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "destroy_mirror_interface", CTLTYPE_STRING | CTLFLAG_RW,
sc, 0, ice_sysctl_destroy_mirror_interface, "A", "");
ice_add_dscp2tc_map_sysctls(sc, ctx, ctx_list);
/* Differentiate software and hardware statistics, by keeping hw stats
@ -6247,16 +6363,18 @@ ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
return (0);
}
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID (0xFFFFFF)
#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING \
"\nSelect clusters to dump with \"dump\" sysctl" \
"\nFlags:" \
"\n\t 0x1 - Switch" \
"\n\t 0x2 - ACL" \
"\n\t 0x4 - Tx Scheduler" \
"\n\t 0x8 - Profile Configuration" \
"\n\t 0x20 - Link" \
"\n\t 0x80 - DCB" \
"\n\t 0x100 - L2P" \
"\n\t 0x1 - Switch" \
"\n\t 0x2 - ACL" \
"\n\t 0x4 - Tx Scheduler" \
"\n\t 0x8 - Profile Configuration" \
"\n\t 0x20 - Link" \
"\n\t 0x80 - DCB" \
"\n\t 0x100 - L2P" \
"\n\t 0x400000 - Manageability Transactions" \
"\n\t" \
"\nUse \"sysctl -x\" to view flags properly."
@ -6273,7 +6391,7 @@ ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
device_t dev = sc->dev;
u16 clusters;
u32 clusters;
int ret;
UNREFERENCED_PARAMETER(arg2);
@ -6287,15 +6405,15 @@ ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
clusters = sc->fw_debug_dump_cluster_mask;
ret = sysctl_handle_16(oidp, &clusters, 0, req);
ret = sysctl_handle_32(oidp, &clusters, 0, req);
if ((ret) || (req->newptr == NULL))
return (ret);
if (!clusters ||
(clusters & ~(ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK))) {
if (clusters & ~(ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK)) {
device_printf(dev,
"%s: ERROR: Incorrect settings requested\n",
__func__);
sc->fw_debug_dump_cluster_mask = ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID;
return (EINVAL);
}
@ -6319,7 +6437,7 @@ ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
* @remark Only intended to be used by the sysctl handler
* ice_sysctl_fw_debug_dump_do_dump
*/
static void
static u16
ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 cluster_id)
{
struct ice_hw *hw = &sc->hw;
@ -6330,20 +6448,21 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
int counter = 0;
u8 *data_buf;
/* Other setup */
data_buf = (u8 *)malloc(data_buf_size, M_ICE, M_NOWAIT | M_ZERO);
if (!data_buf)
return;
/* Input parameters / loop variables */
u16 table_id = 0;
u32 offset = 0;
/* Output from the Get Internal Data AQ command */
u16 ret_buf_size = 0;
u16 ret_next_cluster = 0;
u16 ret_next_table = 0;
u32 ret_next_index = 0;
/* Other setup */
data_buf = (u8 *)malloc(data_buf_size, M_ICE, M_NOWAIT | M_ZERO);
if (!data_buf)
return ret_next_cluster;
ice_debug(hw, ICE_DBG_DIAG, "%s: dumping cluster id %d\n", __func__,
cluster_id);
@ -6363,7 +6482,7 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
status = ice_aq_get_internal_data(hw, cluster_id, table_id,
offset, data_buf, data_buf_size, &ret_buf_size,
&ret_next_table, &ret_next_index, NULL);
&ret_next_cluster, &ret_next_table, &ret_next_index, NULL);
if (status) {
device_printf(dev,
"%s: ice_aq_get_internal_data in cluster %d: err %s aq_err %s\n",
@ -6429,6 +6548,7 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
}
free(data_buf, M_ICE);
return ret_next_cluster;
}
#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP \
@ -6437,6 +6557,7 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
"\nthis data is opaque and not a string."
#define ICE_FW_DUMP_BASE_TEXT_SIZE (1024 * 1024)
#define ICE_FW_DUMP_ALL_TEXT_SIZE (10 * 1024 * 1024)
#define ICE_FW_DUMP_CLUST0_TEXT_SIZE (2 * 1024 * 1024)
#define ICE_FW_DUMP_CLUST1_TEXT_SIZE (128 * 1024)
#define ICE_FW_DUMP_CLUST2_TEXT_SIZE (2 * 1024 * 1024)
@ -6493,9 +6614,9 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
* sysctl read call.
*/
if (input_buf[0] == '1') {
if (!sc->fw_debug_dump_cluster_mask) {
if (sc->fw_debug_dump_cluster_mask == ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID) {
device_printf(dev,
"%s: Debug Dump failed because no cluster was specified with the \"clusters\" sysctl.\n",
"%s: Debug Dump failed because an invalid cluster was specified.\n",
__func__);
return (EINVAL);
}
@ -6513,12 +6634,16 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
/* Caller just wants the upper bound for size */
if (req->oldptr == NULL && req->newptr == NULL) {
size_t est_output_len = ICE_FW_DUMP_BASE_TEXT_SIZE;
if (sc->fw_debug_dump_cluster_mask & 0x1)
est_output_len += ICE_FW_DUMP_CLUST0_TEXT_SIZE;
if (sc->fw_debug_dump_cluster_mask & 0x2)
est_output_len += ICE_FW_DUMP_CLUST1_TEXT_SIZE;
if (sc->fw_debug_dump_cluster_mask & 0x4)
est_output_len += ICE_FW_DUMP_CLUST2_TEXT_SIZE;
if (sc->fw_debug_dump_cluster_mask == 0)
est_output_len += ICE_FW_DUMP_ALL_TEXT_SIZE;
else {
if (sc->fw_debug_dump_cluster_mask & 0x1)
est_output_len += ICE_FW_DUMP_CLUST0_TEXT_SIZE;
if (sc->fw_debug_dump_cluster_mask & 0x2)
est_output_len += ICE_FW_DUMP_CLUST1_TEXT_SIZE;
if (sc->fw_debug_dump_cluster_mask & 0x4)
est_output_len += ICE_FW_DUMP_CLUST2_TEXT_SIZE;
}
ret = SYSCTL_OUT(req, 0, est_output_len);
return (ret);
@ -6529,9 +6654,17 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
ice_debug(&sc->hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__);
for_each_set_bit(bit, &sc->fw_debug_dump_cluster_mask,
sizeof(sc->fw_debug_dump_cluster_mask) * 8)
ice_fw_debug_dump_print_cluster(sc, sbuf, bit);
if (sc->fw_debug_dump_cluster_mask) {
for_each_set_bit(bit, &sc->fw_debug_dump_cluster_mask,
sizeof(sc->fw_debug_dump_cluster_mask) * 8)
ice_fw_debug_dump_print_cluster(sc, sbuf, bit);
} else {
u16 next_cluster_id = 0;
/* We don't support QUEUE_MNG and FULL_CSR_SPACE */
do {
next_cluster_id = ice_fw_debug_dump_print_cluster(sc, sbuf, next_cluster_id);
} while (next_cluster_id != 0 && next_cluster_id < ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG);
}
sbuf_finish(sbuf);
sbuf_delete(sbuf);
@ -6711,7 +6844,7 @@ ice_add_debug_sysctls(struct ice_softc *sc)
dump_list = SYSCTL_CHILDREN(dump_node);
SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters",
ICE_CTLFLAG_DEBUG | CTLTYPE_U16 | CTLFLAG_RW, sc, 0,
ICE_CTLFLAG_DEBUG | CTLTYPE_U32 | CTLFLAG_RW, sc, 0,
ice_sysctl_fw_debug_dump_cluster_setting, "SU",
ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING);
@ -6837,6 +6970,7 @@ ice_vsi_set_rss_params(struct ice_vsi *vsi)
vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_VF:
case ICE_VSI_VMDQ2:
vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_lut_type = ICE_LUT_VSI;
break;
@ -10034,7 +10168,7 @@ ice_alloc_intr_tracking(struct ice_softc *sc)
int err;
/* Initialize the interrupt allocation manager */
err = ice_resmgr_init_contig_only(&sc->imgr,
err = ice_resmgr_init_contig_only(&sc->dev_imgr,
hw->func_caps.common_cap.num_msix_vectors);
if (err) {
device_printf(dev, "Unable to initialize PF interrupt manager: %s\n",
@ -10066,7 +10200,7 @@ ice_alloc_intr_tracking(struct ice_softc *sc)
return (0);
free_imgr:
ice_resmgr_destroy(&sc->imgr);
ice_resmgr_destroy(&sc->dev_imgr);
return (err);
}
@ -10083,19 +10217,21 @@ void
ice_free_intr_tracking(struct ice_softc *sc)
{
if (sc->pf_imap) {
ice_resmgr_release_map(&sc->imgr, sc->pf_imap,
ice_resmgr_release_map(&sc->dev_imgr, sc->pf_imap,
sc->lan_vectors);
free(sc->pf_imap, M_ICE);
sc->pf_imap = NULL;
}
if (sc->rdma_imap) {
ice_resmgr_release_map(&sc->imgr, sc->rdma_imap,
ice_resmgr_release_map(&sc->dev_imgr, sc->rdma_imap,
sc->lan_vectors);
free(sc->rdma_imap, M_ICE);
sc->rdma_imap = NULL;
}
ice_resmgr_destroy(&sc->imgr);
ice_resmgr_destroy(&sc->dev_imgr);
ice_resmgr_destroy(&sc->os_imgr);
}
/**
@ -10869,6 +11005,7 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
/* Returned arguments from the Admin Queue */
u16 ret_buf_size = 0;
u16 ret_next_cluster = 0;
u16 ret_next_table = 0;
u32 ret_next_index = 0;
@ -10935,7 +11072,8 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
memset(ddc->data, 0, ifd_len - sizeof(*ddc));
status = ice_aq_get_internal_data(hw, ddc->cluster_id, ddc->table_id, ddc->offset,
(u8 *)ddc->data, ddc->data_size, &ret_buf_size, &ret_next_table, &ret_next_index, NULL);
(u8 *)ddc->data, ddc->data_size, &ret_buf_size,
&ret_next_cluster, &ret_next_table, &ret_next_index, NULL);
ice_debug(hw, ICE_DBG_DIAG, "%s: ret_buf_size %d, ret_next_table %d, ret_next_index %d\n",
__func__, ret_buf_size, ret_next_table, ret_next_index);
if (status) {
@ -10950,6 +11088,7 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
ddc->table_id = ret_next_table;
ddc->offset = ret_next_index;
ddc->data_size = ret_buf_size;
ddc->cluster_id = ret_next_cluster;
/* Copy the possibly modified contents of the handled request out */
err = copyout(ddc, ifd->ifd_data, ifd->ifd_len);
@ -11087,3 +11226,146 @@ ice_sysctl_temperature(SYSCTL_HANDLER_ARGS)
return sysctl_handle_8(oidp, &resp.data.s0f0.temp, 0, req);
}
/**
* ice_sysctl_create_mirror_interface - Create a new ifnet that monitors
* traffic from the main PF VSI
*/
static int
ice_sysctl_create_mirror_interface(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
device_t dev = sc->dev;
int ret;
UNREFERENCED_PARAMETER(arg2);
ret = priv_check(curthread, PRIV_DRIVER);
if (ret)
return (ret);
if (ice_driver_is_detaching(sc))
return (ESHUTDOWN);
/* If the user hasn't written "1" to this sysctl yet: */
if (!ice_test_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC)) {
/* Avoid output on the first set of reads to this sysctl in
* order to prevent a null byte from being written to the
* end result when called via sysctl(8).
*/
if (req->oldptr == NULL && req->newptr == NULL) {
ret = SYSCTL_OUT(req, 0, 0);
return (ret);
}
char input_buf[2] = "";
ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req);
if ((ret) || (req->newptr == NULL))
return (ret);
/* If we get '1', then indicate we'll create the interface in
* the next sysctl read call.
*/
if (input_buf[0] == '1') {
if (sc->mirr_if) {
device_printf(dev,
"Mirror interface %s already exists!\n",
if_name(sc->mirr_if->ifp));
return (EEXIST);
}
ice_set_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC);
return (0);
}
return (EINVAL);
}
/* --- "Do Create Mirror Interface" is set --- */
/* Caller just wants the upper bound for size */
if (req->oldptr == NULL && req->newptr == NULL) {
ret = SYSCTL_OUT(req, 0, 128);
return (ret);
}
device_printf(dev, "Creating new mirroring interface...\n");
ret = ice_create_mirror_interface(sc);
if (ret)
return (ret);
ice_clear_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC);
ret = sysctl_handle_string(oidp, __DECONST(char *, "Interface attached"), 0, req);
return (ret);
}
/**
* ice_sysctl_destroy_mirror_interface - Destroy network interface that monitors
* traffic from the main PF VSI
*/
static int
ice_sysctl_destroy_mirror_interface(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
device_t dev = sc->dev;
int ret;
UNREFERENCED_PARAMETER(arg2);
ret = priv_check(curthread, PRIV_DRIVER);
if (ret)
return (ret);
if (ice_driver_is_detaching(sc))
return (ESHUTDOWN);
/* If the user hasn't written "1" to this sysctl yet: */
if (!ice_test_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC)) {
/* Avoid output on the first set of reads to this sysctl in
* order to prevent a null byte from being written to the
* end result when called via sysctl(8).
*/
if (req->oldptr == NULL && req->newptr == NULL) {
ret = SYSCTL_OUT(req, 0, 0);
return (ret);
}
char input_buf[2] = "";
ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req);
if ((ret) || (req->newptr == NULL))
return (ret);
/* If we get '1', then indicate we'll create the interface in
* the next sysctl read call.
*/
if (input_buf[0] == '1') {
if (!sc->mirr_if) {
device_printf(dev,
"No mirror interface exists!\n");
return (EINVAL);
}
ice_set_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC);
return (0);
}
return (EINVAL);
}
/* --- "Do Destroy Mirror Interface" is set --- */
/* Caller just wants the upper bound for size */
if (req->oldptr == NULL && req->newptr == NULL) {
ret = SYSCTL_OUT(req, 0, 128);
return (ret);
}
device_printf(dev, "Destroying mirroring interface...\n");
ice_destroy_mirror_interface(sc);
ice_clear_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC);
ret = sysctl_handle_string(oidp, __DECONST(char *, "Interface destroyed"), 0, req);
return (ret);
}

View File

@ -54,6 +54,7 @@
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#include <net/if_types.h>
#include <sys/bitstring.h>
@ -264,6 +265,10 @@ struct ice_bar_info {
*/
#define ICE_DEFAULT_VF_QUEUES 4
/*
* An invalid VSI number to indicate that mirroring should be disabled.
*/
#define ICE_INVALID_MIRROR_VSI ((u16)-1)
/*
* The maximum number of RX queues allowed per TC in a VSI.
*/
@ -372,7 +377,7 @@ enum ice_rx_dtype {
* Only certain cluster IDs are valid for the FW debug dump functionality,
* so define a mask of those here.
*/
#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x1af
#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x4001AF
struct ice_softc;
@ -513,8 +518,6 @@ struct ice_vsi {
u16 *tx_qmap; /* Tx VSI to PF queue mapping */
u16 *rx_qmap; /* Rx VSI to PF queue mapping */
bitstr_t *vmap; /* Vector(s) assigned to VSI */
enum ice_resmgr_alloc_type qmap_type;
struct ice_tx_queue *tx_queues; /* Tx queue array */
@ -556,6 +559,11 @@ struct ice_vsi {
/* VSI-level stats */
struct ice_vsi_hw_stats hw_stats;
/* VSI mirroring details */
u16 mirror_src_vsi;
u16 rule_mir_ingress;
u16 rule_mir_egress;
};
/**
@ -564,7 +572,7 @@ struct ice_vsi {
*/
struct ice_debug_dump_cmd {
u32 offset; /* offset to read/write from table, in bytes */
u16 cluster_id;
u16 cluster_id; /* also used to get next cluster id */
u16 table_id;
u16 data_size; /* size of data field, in bytes */
u16 reserved1;
@ -587,6 +595,7 @@ enum ice_state {
ICE_STATE_RESET_OICR_RECV,
ICE_STATE_RESET_PFR_REQ,
ICE_STATE_PREPARED_FOR_RESET,
ICE_STATE_SUBIF_NEEDS_REINIT,
ICE_STATE_RESET_FAILED,
ICE_STATE_DRIVER_INITIALIZED,
ICE_STATE_NO_MEDIA,
@ -601,6 +610,8 @@ enum ice_state {
ICE_STATE_DO_FW_DEBUG_DUMP,
ICE_STATE_LINK_ACTIVE_ON_DOWN,
ICE_STATE_FIRST_INIT_LINK,
ICE_STATE_DO_CREATE_MIRR_INTFC,
ICE_STATE_DO_DESTROY_MIRR_INTFC,
/* This entry must be last */
ICE_STATE_LAST,
};
@ -797,6 +808,16 @@ void ice_request_stack_reinit(struct ice_softc *sc);
/* Details of how to check if the network stack is detaching us */
bool ice_driver_is_detaching(struct ice_softc *sc);
/* Details of how to setup/teardown a mirror interface */
/**
* @brief Create an interface for mirroring
*/
int ice_create_mirror_interface(struct ice_softc *sc);
/**
* @brief Destroy created mirroring interface
*/
void ice_destroy_mirror_interface(struct ice_softc *sc);
const char * ice_fw_module_str(enum ice_aqc_fw_logging_mod module);
void ice_add_fw_logging_tunables(struct ice_softc *sc,
struct sysctl_oid *parent);
@ -904,5 +925,6 @@ void ice_cfg_pba_num(struct ice_softc *sc);
int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg);
void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib);
int ice_setup_vsi_mirroring(struct ice_vsi *vsi);
#endif /* _ICE_LIB_H_ */

View File

@ -296,7 +296,7 @@ ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
}
/**
* ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
* ice_check_sr_access_params - verify params for Shadow RAM R/W operations
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to access
@ -356,7 +356,7 @@ enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
}
/**
* ice_write_sr_aq - Writes Shadow RAM.
* ice_write_sr_aq - Writes Shadow RAM
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to write
@ -425,7 +425,6 @@ enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (hw->flash.blank_nvm_mode)
return ICE_SUCCESS;
@ -873,7 +872,7 @@ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select
* @nvm: pointer to NVM info structure
*
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling
* in the nvm info structure.
* in the NVM info structure.
*/
static enum ice_status
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
@ -1006,7 +1005,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
orom_data, hw->flash.banks.orom_size);
if (status) {
ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
return status;
goto exit_error;
}
/* Scan the memory buffer to locate the CIVD data section */
@ -1030,7 +1029,8 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum);
goto err_invalid_checksum;
status = ICE_ERR_NVM;
goto exit_error;
}
*civd = *tmp;
@ -1038,11 +1038,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
return ICE_SUCCESS;
}
status = ICE_ERR_NVM;
ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
err_invalid_checksum:
exit_error:
ice_free(hw, orom_data);
return ICE_ERR_NVM;
return status;
}
/**
@ -1200,7 +1201,7 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
}
/**
* ice_discover_flash_size - Discover the available flash size.
* ice_discover_flash_size - Discover the available flash size
* @hw: pointer to the HW struct
*
* The device flash could be up to 16MB in size. However, it is possible that
@ -1457,6 +1458,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
status = ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->netlist);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
return ICE_SUCCESS;
}
@ -1708,7 +1710,6 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
ice_release_nvm(hw);
if (!status)
@ -1771,19 +1772,19 @@ ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
enum ice_status status;
enum ice_status err;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = ICE_LO_BYTE(cmd_flags);
cmd->offset_high = ICE_HI_BYTE(cmd_flags);
cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF);
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status && response_flags)
err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!err && response_flags)
*response_flags = cmd->cmd_flags;
return status;
return err;
}
/**

View File

@ -108,6 +108,7 @@ ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
enum ice_status
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);

View File

@ -179,6 +179,7 @@ enum ice_prot_id {
#define ICE_TUN_FLAG_MDID_OFF(word) \
(ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
#define ICE_TUN_FLAG_MASK 0xFF
#define ICE_FROM_NETWORK_FLAG_MASK 0x8
#define ICE_DIR_FLAG_MASK 0x10
#define ICE_TUN_FLAG_IN_VLAN_MASK 0x80 /* VLAN inside tunneled header */
#define ICE_TUN_FLAG_VLAN_MASK 0x01

View File

@ -55,9 +55,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return ICE_ERR_NO_MEMORY;
/* coverity[suspicious_sizeof] */
root->children = (struct ice_sched_node **)
ice_calloc(hw, hw->max_children[0], sizeof(*root));
ice_calloc(hw, hw->max_children[0], sizeof(*root->children));
if (!root->children) {
ice_free(hw, root);
return ICE_ERR_NO_MEMORY;
@ -213,9 +212,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!node)
return ICE_ERR_NO_MEMORY;
if (hw->max_children[layer]) {
/* coverity[suspicious_sizeof] */
node->children = (struct ice_sched_node **)
ice_calloc(hw, hw->max_children[layer], sizeof(*node));
ice_calloc(hw, hw->max_children[layer],
sizeof(*node->children));
if (!node->children) {
ice_free(hw, node);
return ICE_ERR_NO_MEMORY;

View File

@ -1018,6 +1018,8 @@ ice_state_to_str(enum ice_state state)
return "RESET_PFR_REQ";
case ICE_STATE_PREPARED_FOR_RESET:
return "PREPARED_FOR_RESET";
case ICE_STATE_SUBIF_NEEDS_REINIT:
return "SUBIF_NEEDS_REINIT";
case ICE_STATE_RESET_FAILED:
return "RESET_FAILED";
case ICE_STATE_DRIVER_INITIALIZED:
@ -1046,6 +1048,10 @@ ice_state_to_str(enum ice_state state)
return "LINK_ACTIVE_ON_DOWN";
case ICE_STATE_FIRST_INIT_LINK:
return "FIRST_INIT_LINK";
case ICE_STATE_DO_CREATE_MIRR_INTFC:
return "DO_CREATE_MIRR_INTFC";
case ICE_STATE_DO_DESTROY_MIRR_INTFC:
return "DO_DESTROY_MIRR_INTFC";
case ICE_STATE_LAST:
return NULL;
}

View File

@ -1180,6 +1180,7 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
*
* In all other cases, the LAN enable has to be set to false.
*/
if (hw->evb_veb) {
if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
fi->lkup_type == ICE_SW_LKUP_PROMISC ||
@ -1196,6 +1197,13 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->lan_en = true;
}
}
/* To be able to receive packets coming from the VF on the same PF,
* unicast filter needs to be added without LB_EN bit
*/
if (fi->flag & ICE_FLTR_RX_LB) {
fi->lb_en = false;
fi->lan_en = true;
}
}
/**
@ -2023,7 +2031,7 @@ ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
new_fltr = &f_entry->fltr_info;
if (new_fltr->flag & ICE_FLTR_RX)
new_fltr->src = lport;
else if (new_fltr->flag & ICE_FLTR_TX)
else if (new_fltr->flag & (ICE_FLTR_TX | ICE_FLTR_RX_LB))
new_fltr->src =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@ -3259,12 +3267,15 @@ static void ice_determine_promisc_mask(struct ice_fltr_info *fi,
{
u16 vid = fi->l_data.mac_vlan.vlan_id;
u8 *macaddr = fi->l_data.mac.mac_addr;
bool is_rx_lb_fltr = false;
bool is_tx_fltr = false;
ice_zero_bitmap(promisc_mask, ICE_PROMISC_MAX);
if (fi->flag == ICE_FLTR_TX)
is_tx_fltr = true;
if (fi->flag == ICE_FLTR_RX_LB)
is_rx_lb_fltr = true;
if (IS_BROADCAST_ETHER_ADDR(macaddr)) {
ice_set_bit(is_tx_fltr ? ICE_PROMISC_BCAST_TX
@ -3273,8 +3284,12 @@ static void ice_determine_promisc_mask(struct ice_fltr_info *fi,
ice_set_bit(is_tx_fltr ? ICE_PROMISC_MCAST_TX
: ICE_PROMISC_MCAST_RX, promisc_mask);
} else if (IS_UNICAST_ETHER_ADDR(macaddr)) {
ice_set_bit(is_tx_fltr ? ICE_PROMISC_UCAST_TX
: ICE_PROMISC_UCAST_RX, promisc_mask);
if (is_tx_fltr)
ice_set_bit(ICE_PROMISC_UCAST_TX, promisc_mask);
else if (is_rx_lb_fltr)
ice_set_bit(ICE_PROMISC_UCAST_RX_LB, promisc_mask);
else
ice_set_bit(ICE_PROMISC_UCAST_RX, promisc_mask);
}
if (vid) {
@ -3510,7 +3525,7 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info new_fltr;
enum ice_status status = ICE_SUCCESS;
bool is_tx_fltr;
bool is_tx_fltr, is_rx_lb_fltr;
u16 hw_vsi_id;
int pkt_type;
u8 recipe_id;
@ -3547,6 +3562,7 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
pkt_type = 0;
is_tx_fltr = false;
is_rx_lb_fltr = false;
if (ice_test_and_clear_bit(ICE_PROMISC_UCAST_RX,
p_mask)) {
@ -3569,6 +3585,10 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
p_mask)) {
pkt_type = BCAST_FLTR;
is_tx_fltr = true;
} else if (ice_test_and_clear_bit(ICE_PROMISC_UCAST_RX_LB,
p_mask)) {
pkt_type = UCAST_FLTR;
is_rx_lb_fltr = true;
}
/* Check for VLAN promiscuous flag */
@ -3596,6 +3616,9 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
if (is_tx_fltr) {
new_fltr.flag |= ICE_FLTR_TX;
new_fltr.src = hw_vsi_id;
} else if (is_rx_lb_fltr) {
new_fltr.flag |= ICE_FLTR_RX_LB;
new_fltr.src = hw_vsi_id;
} else {
new_fltr.flag |= ICE_FLTR_RX;
new_fltr.src = lport;

View File

@ -38,9 +38,11 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_MAX_SW 256
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_FLTR_RX BIT(0)
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_FLTR_RX BIT(0)
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_RX_LB BIT(2)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_PROFID_IPV4_GTPC_TEID 41
#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
@ -412,6 +414,7 @@ enum ice_promisc_flags {
ICE_PROMISC_BCAST_TX,
ICE_PROMISC_VLAN_RX,
ICE_PROMISC_VLAN_TX,
ICE_PROMISC_UCAST_RX_LB,
/* Max value */
ICE_PROMISC_MAX,
};

View File

@ -322,6 +322,7 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF = 1,
ICE_VSI_VMDQ2 = 2,
ICE_VSI_LB = 6,
};
@ -452,6 +453,9 @@ struct ice_hw_common_caps {
/* SR-IOV virtualization */
u8 sr_iov_1_1; /* SR-IOV enabled */
/* VMDQ */
u8 vmdq; /* VMDQ supported */
/* EVB capabilities */
u8 evb_802_1_qbg; /* Edge Virtual Bridging */
u8 evb_802_1_qbh; /* Bridge Port Extension */
@ -1171,8 +1175,11 @@ struct ice_hw {
struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT];
struct ice_lock rss_locks; /* protect RSS configuration */
struct LIST_HEAD_TYPE rss_list_head;
u16 vsi_owning_pf_lut; /* SW IDX of VSI that acquired PF RSS LUT */
struct ice_mbx_snapshot mbx_snapshot;
u8 dvm_ena;
bool subscribable_recipes_supported;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
@ -1428,11 +1435,17 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
/* FW branch number for hardware families */
#define ICE_FW_VER_BRANCH_E82X 0
#define ICE_FW_VER_BRANCH_E810 1
/* FW version for FEC disable in Auto FEC mode */
#define ICE_FW_FEC_DIS_AUTO_BRANCH 1
#define ICE_FW_FEC_DIS_AUTO_MAJ 7
#define ICE_FW_FEC_DIS_AUTO_MIN 0
#define ICE_FW_FEC_DIS_AUTO_PATCH 5
#define ICE_FW_FEC_DIS_AUTO_MAJ_E82X 7
#define ICE_FW_FEC_DIS_AUTO_MIN_E82X 1
#define ICE_FW_FEC_DIS_AUTO_PATCH_E82X 2
/* AQ API version for FW health reports */
#define ICE_FW_API_HEALTH_REPORT_MAJ 1

File diff suppressed because it is too large Load Diff

View File

@ -1,594 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2024, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VIRTCHNL_INLINE_IPSEC_H_
#define _VIRTCHNL_INLINE_IPSEC_H_
#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3
#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16
#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128
#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2
#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128
#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
#define VIRTCHNL_IPSEC_SA_DESTROY 0
#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF
#define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF
#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF
#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF
/* crypto type */
#define VIRTCHNL_AUTH 1
#define VIRTCHNL_CIPHER 2
#define VIRTCHNL_AEAD 3
/* caps enabled */
#define VIRTCHNL_IPSEC_ESN_ENA BIT(0)
#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA BIT(1)
#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA BIT(2)
#define VIRTCHNL_IPSEC_AUDIT_ENA BIT(3)
#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA BIT(4)
#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA BIT(5)
#define VIRTCHNL_IPSEC_ARW_CHECK_ENA BIT(6)
#define VIRTCHNL_IPSEC_24BIT_SPI_ENA BIT(7)
/* algorithm type */
/* Hash Algorithm */
#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */
#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */
#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */
#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */
#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */
#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */
#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */
#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */
#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */
#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */
#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */
#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */
/* Cipher Algorithm */
#define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */
#define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */
#define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */
#define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */
/* AEAD Algorithm */
#define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */
#define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */
#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
/* protocol type */
#define VIRTCHNL_PROTO_ESP 1
#define VIRTCHNL_PROTO_AH 2
#define VIRTCHNL_PROTO_RSVD1 3
/* sa mode */
#define VIRTCHNL_SA_MODE_TRANSPORT 1
#define VIRTCHNL_SA_MODE_TUNNEL 2
#define VIRTCHNL_SA_MODE_TRAN_TUN 3
#define VIRTCHNL_SA_MODE_UNKNOWN 4
/* sa direction */
#define VIRTCHNL_DIR_INGRESS 1
#define VIRTCHNL_DIR_EGRESS 2
#define VIRTCHNL_DIR_INGRESS_EGRESS 3
/* sa termination */
#define VIRTCHNL_TERM_SOFTWARE 1
#define VIRTCHNL_TERM_HARDWARE 2
/* sa ip type */
#define VIRTCHNL_IPV4 1
#define VIRTCHNL_IPV6 2
/* for virtchnl_ipsec_resp */
enum inline_ipsec_resp {
INLINE_IPSEC_SUCCESS = 0,
INLINE_IPSEC_FAIL = -1,
INLINE_IPSEC_ERR_FIFO_FULL = -2,
INLINE_IPSEC_ERR_NOT_READY = -3,
INLINE_IPSEC_ERR_VF_DOWN = -4,
INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
INLINE_IPSEC_ERR_NO_MEM = -6,
};
/* Detailed opcodes for DPDK and IPsec use */
enum inline_ipsec_ops {
INLINE_IPSEC_OP_GET_CAP = 0,
INLINE_IPSEC_OP_GET_STATUS = 1,
INLINE_IPSEC_OP_SA_CREATE = 2,
INLINE_IPSEC_OP_SA_UPDATE = 3,
INLINE_IPSEC_OP_SA_DESTROY = 4,
INLINE_IPSEC_OP_SP_CREATE = 5,
INLINE_IPSEC_OP_SP_DESTROY = 6,
INLINE_IPSEC_OP_SA_READ = 7,
INLINE_IPSEC_OP_EVENT = 8,
INLINE_IPSEC_OP_RESP = 9,
};
#pragma pack(1)
/* Not all valid, if certain field is invalid, set 1 for all bits */
struct virtchnl_algo_cap {
u32 algo_type;
u16 block_size;
u16 min_key_size;
u16 max_key_size;
u16 inc_key_size;
u16 min_iv_size;
u16 max_iv_size;
u16 inc_iv_size;
u16 min_digest_size;
u16 max_digest_size;
u16 inc_digest_size;
u16 min_aad_size;
u16 max_aad_size;
u16 inc_aad_size;
};
#pragma pack()
/* vf record the capability of crypto from the virtchnl */
struct virtchnl_sym_crypto_cap {
u8 crypto_type;
u8 algo_cap_num;
struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
};
/* VIRTCHNL_OP_GET_IPSEC_CAP
* VF pass virtchnl_ipsec_cap to PF
* and PF return capability of ipsec from virtchnl.
*/
#pragma pack(1)
struct virtchnl_ipsec_cap {
/* max number of SA per VF */
u16 max_sa_num;
/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
u8 virtchnl_protocol_type;
/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
u8 virtchnl_sa_mode;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 termination_mode;
/* number of supported crypto capability */
u8 crypto_cap_num;
/* descriptor ID */
u16 desc_id;
/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
u32 caps_enabled;
/* crypto capabilities */
struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
};
/* configuration of crypto function */
struct virtchnl_ipsec_crypto_cfg_item {
u8 crypto_type;
u32 algo_type;
/* Length of valid IV data. */
u16 iv_len;
/* Length of digest */
u16 digest_len;
/* SA salt */
u32 salt;
/* The length of the symmetric key */
u16 key_len;
/* key data buffer */
u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
};
#pragma pack()
struct virtchnl_ipsec_sym_crypto_cfg {
struct virtchnl_ipsec_crypto_cfg_item
items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
};
#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_CREATE
* VF send this SA configuration to PF using virtchnl;
* PF create SA as configuration and PF driver will return
* an unique index (sa_idx) for the created SA.
*/
struct virtchnl_ipsec_sa_cfg {
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 virtchnl_termination;
/* type of outer IP - IPv4/IPv6 */
u8 virtchnl_ip_type;
/* type of esn - !0:enable/0:disable */
u8 esn_enabled;
/* udp encap - !0:enable/0:disable */
u8 udp_encap_enabled;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* reserved */
u8 reserved1;
/* SA security parameter index */
u32 spi;
/* outer src ip address */
u8 src_addr[16];
/* outer dst ip address */
u8 dst_addr[16];
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
u16 spd_ref;
/* high 32 bits of esn */
u32 esn_hi;
/* low 32 bits of esn */
u32 esn_low;
/* When enabled, sa_index must be valid */
u8 sa_index_en;
/* SA index when sa_index_en is true */
u32 sa_index;
/* auditing mode - enable/disable */
u8 audit_en;
/* lifetime byte limit - enable/disable
* When enabled, byte_limit_hard and byte_limit_soft
* must be valid.
*/
u8 byte_limit_en;
/* hard byte limit count */
u64 byte_limit_hard;
/* soft byte limit count */
u64 byte_limit_soft;
/* drop on authentication failure - enable/disable */
u8 drop_on_auth_fail_en;
/* anti-reply window check - enable/disable
* When enabled, arw_size must be valid.
*/
u8 arw_check_en;
/* size of arw window, offset by 1. Setting to 0
* represents ARW window size of 1. Setting to 127
* represents ARW window size of 128
*/
u8 arw_size;
/* no ip offload mode - enable/disable
* When enabled, ip type and address must not be valid.
*/
u8 no_ip_offload_en;
/* SA Domain. Used to logical separate an SADB into groups.
* PF drivers supporting a single group ignore this field.
*/
u16 sa_domain;
/* crypto configuration */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#pragma pack()
/* VIRTCHNL_OP_IPSEC_SA_UPDATE
* VF send configuration of index of SA to PF
* PF will update SA according to configuration
*/
struct virtchnl_ipsec_sa_update {
u32 sa_index; /* SA to update */
u32 esn_hi; /* high 32 bits of esn */
u32 esn_low; /* low 32 bits of esn */
};
#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_DESTROY
* VF send configuration of index of SA to PF
* PF will destroy SA according to configuration
* flag bitmap indicate all SA or just selected SA will
* be destroyed
*/
struct virtchnl_ipsec_sa_destroy {
/* All zero bitmap indicates all SA will be destroyed.
* Non-zero bitmap indicates the selected SA in
* array sa_index will be destroyed.
*/
u8 flag;
/* selected SA index */
u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
};
/* VIRTCHNL_OP_IPSEC_SA_READ
* VF send this SA configuration to PF using virtchnl;
* PF read SA and will return configuration for the created SA.
*/
struct virtchnl_ipsec_sa_read {
/* SA valid - invalid/valid */
u8 valid;
/* SA active - inactive/active */
u8 active;
/* SA SN rollover - not_rollover/rollover */
u8 sn_rollover;
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 virtchnl_termination;
/* auditing mode - enable/disable */
u8 audit_en;
/* lifetime byte limit - enable/disable
* When set to limit, byte_limit_hard and byte_limit_soft
* must be valid.
*/
u8 byte_limit_en;
/* hard byte limit count */
u64 byte_limit_hard;
/* soft byte limit count */
u64 byte_limit_soft;
/* drop on authentication failure - enable/disable */
u8 drop_on_auth_fail_en;
/* anti-replay window check - enable/disable
* When set to check, arw_size, arw_top, and arw must be valid
*/
u8 arw_check_en;
/* size of arw window, offset by 1. Setting to 0
* represents ARW window size of 1. Setting to 127
* represents ARW window size of 128
*/
u8 arw_size;
/* reserved */
u8 reserved1;
/* top of anti-replay-window */
u64 arw_top;
/* anti-replay-window */
u8 arw[16];
/* packets processed */
u64 packets_processed;
/* bytes processed */
u64 bytes_processed;
/* packets dropped */
u32 packets_dropped;
/* authentication failures */
u32 auth_fails;
/* ARW check failures */
u32 arw_fails;
/* type of esn - enable/disable */
u8 esn;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* SA security parameter index */
u32 spi;
/* SA salt */
u32 salt;
/* high 32 bits of esn */
u32 esn_hi;
/* low 32 bits of esn */
u32 esn_low;
/* SA Domain. Used to logical separate an SADB into groups.
* PF drivers supporting a single group ignore this field.
*/
u16 sa_domain;
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
u16 spd_ref;
/* crypto configuration. Salt and keys are set to 0 */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#pragma pack()
/* Add allowlist entry in IES */
struct virtchnl_ipsec_sp_cfg {
u32 spi;
u32 dip[4];
/* Drop frame if true or redirect to QAT if false. */
u8 drop;
/* Congestion domain. For future use. */
u8 cgd;
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
/* Set TC (congestion domain) if true. For future use. */
u8 set_tc;
/* 0 for NAT-T unsupported, 1 for NAT-T supported */
u8 is_udp;
/* reserved */
u8 reserved;
/* NAT-T UDP port number. Only valid in case NAT-T supported */
u16 udp_port;
};
#pragma pack(1)
/* Delete allowlist entry in IES */
struct virtchnl_ipsec_sp_destroy {
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
u32 rule_id;
};
#pragma pack()
/* Response from IES to allowlist operations */
struct virtchnl_ipsec_sp_cfg_resp {
u32 rule_id;
};
struct virtchnl_ipsec_sa_cfg_resp {
u32 sa_handle;
};
#define INLINE_IPSEC_EVENT_RESET 0x1
#define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2
#define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4
struct virtchnl_ipsec_event {
u32 ipsec_event_data;
};
#define INLINE_IPSEC_STATUS_AVAILABLE 0x1
#define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2
struct virtchnl_ipsec_status {
u32 status;
};
struct virtchnl_ipsec_resp {
u32 resp;
};
/* Internal message descriptor for VF <-> IPsec communication */
struct inline_ipsec_msg {
u16 ipsec_opcode;
u16 req_id;
union {
/* IPsec request */
struct virtchnl_ipsec_sa_cfg sa_cfg[0];
struct virtchnl_ipsec_sp_cfg sp_cfg[0];
struct virtchnl_ipsec_sa_update sa_update[0];
struct virtchnl_ipsec_sa_destroy sa_destroy[0];
struct virtchnl_ipsec_sp_destroy sp_destroy[0];
/* IPsec response */
struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
struct virtchnl_ipsec_cap ipsec_cap[0];
struct virtchnl_ipsec_status ipsec_status[0];
/* response to del_sa, del_sp, update_sa */
struct virtchnl_ipsec_resp ipsec_resp[0];
/* IPsec event (no req_id is required) */
struct virtchnl_ipsec_event event[0];
/* Reserved */
struct virtchnl_ipsec_sa_read sa_read[0];
} ipsec_data;
};
static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
{
u16 valid_len = sizeof(struct inline_ipsec_msg);
switch (opcode) {
case INLINE_IPSEC_OP_GET_CAP:
case INLINE_IPSEC_OP_GET_STATUS:
break;
case INLINE_IPSEC_OP_SA_CREATE:
valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
break;
case INLINE_IPSEC_OP_SP_CREATE:
valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
break;
case INLINE_IPSEC_OP_SA_UPDATE:
valid_len += sizeof(struct virtchnl_ipsec_sa_update);
break;
case INLINE_IPSEC_OP_SA_DESTROY:
valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
break;
case INLINE_IPSEC_OP_SP_DESTROY:
valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
break;
/* Only for msg length caculation of response to VF in case of
* inline ipsec failure.
*/
case INLINE_IPSEC_OP_RESP:
valid_len += sizeof(struct virtchnl_ipsec_resp);
break;
default:
valid_len = 0;
break;
}
return valid_len;
}
#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */