Update to the shared code for Intel I40E drivers in preparation

for the ixl 1.3.0 and ixlv 1.2.0 revisions.

MFC after:	1 week
This commit is contained in:
Jack F Vogel 2015-01-12 18:32:45 +00:00
parent 4b12fb6103
commit f247dc2523
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=277082
10 changed files with 973 additions and 273 deletions

View file

@ -589,10 +589,10 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
if (ret_code != I40E_SUCCESS)
goto init_adminq_free_asq;
if (i40e_is_vf(hw)) /* VF has no need of firmware */
goto init_adminq_exit;
/* There are some cases where the firmware may not be quite ready
/* VF has no need of firmware */
if (i40e_is_vf(hw))
goto init_adminq_exit;
/* There are some cases where the firmware may not be quite ready
* for AdminQ operations, so we retry the AdminQ setup a few times
* if we see timeouts in this first AQ call.
*/
@ -600,6 +600,7 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
ret_code = i40e_aq_get_firmware_version(hw,
&hw->aq.fw_maj_ver,
&hw->aq.fw_min_ver,
&hw->aq.fw_build,
&hw->aq.api_maj_ver,
&hw->aq.api_min_ver,
NULL);
@ -625,7 +626,8 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
hw->aq.nvm_busy = FALSE;
hw->aq.nvm_release_on_done = FALSE;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
@ -767,12 +769,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
status = I40E_ERR_NVM;
goto asq_send_command_exit;
}
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
i40e_memcpy(details,
@ -924,9 +920,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
if (!status && i40e_is_nvm_update_op(desc))
hw->aq.nvm_busy = TRUE;
asq_send_command_error:
i40e_release_spinlock(&hw->aq.asq_spinlock);
asq_send_command_exit:
@ -1042,7 +1035,6 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
i40e_release_spinlock(&hw->aq.arq_spinlock);
if (i40e_is_nvm_update_op(&e->desc)) {
hw->aq.nvm_busy = FALSE;
if (hw->aq.nvm_release_on_done) {
i40e_release_nvm(hw);
hw->aq.nvm_release_on_done = FALSE;

View file

@ -36,6 +36,7 @@
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
#include "i40e_status.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
@ -100,9 +101,9 @@ struct i40e_adminq_info {
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
bool nvm_busy;
bool nvm_release_on_done;
struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
@ -115,7 +116,7 @@ struct i40e_adminq_info {
/* general information */
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);

View file

@ -263,6 +263,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
i40e_aqc_opc_lldp_stop = 0x0A05,
i40e_aqc_opc_lldp_start = 0x0A06,
i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@ -275,6 +278,8 @@ enum i40e_admin_queue_opc {
/* OEM commands */
i40e_aqc_opc_oem_parameter_change = 0xFE00,
i40e_aqc_opc_oem_device_status_change = 0xFE01,
i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
i40e_aqc_opc_debug_get_deviceid = 0xFF00,
@ -283,7 +288,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@ -417,6 +421,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_VSI 0x0017
#define I40E_AQ_CAP_ID_DCB 0x0018
#define I40E_AQ_CAP_ID_FCOE 0x0021
#define I40E_AQ_CAP_ID_ISCSI 0x0022
#define I40E_AQ_CAP_ID_RSS 0x0040
#define I40E_AQ_CAP_ID_RXQ 0x0041
#define I40E_AQ_CAP_ID_TXQ 0x0042
@ -461,8 +466,11 @@ struct i40e_aqc_arp_proxy_data {
__le32 pfpm_proxyfc;
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
};
I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
__le16 table_idx_mac_addr_0;
@ -488,6 +496,8 @@ struct i40e_aqc_ns_proxy_data {
u8 ipv6_addr_1[16];
};
I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
@ -498,6 +508,8 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
@ -569,6 +581,8 @@ struct i40e_aqc_get_switch_config_header_resp {
u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
struct i40e_aqc_switch_config_element_resp {
u8 element_type;
#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@ -594,6 +608,8 @@ struct i40e_aqc_switch_config_element_resp {
__le16 element_info;
};
I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
/* Get Switch Configuration (indirect 0x0200)
* an array of elements are returned in the response buffer
* the first in the array is the header, remainder are elements
@ -603,6 +619,8 @@ struct i40e_aqc_get_switch_config_resp {
struct i40e_aqc_switch_config_element_resp element[1];
};
I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
@ -668,6 +686,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
@ -1099,6 +1119,8 @@ struct i40e_aqc_remove_tag {
u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
/* Add multicast E-Tag (direct 0x0257)
* del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
* and no external data
@ -1214,7 +1236,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@ -1247,7 +1269,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
u8 reserved2[14];
/* response section */
@ -1366,6 +1388,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
u8 reserved1[28];
};
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
@ -1377,6 +1401,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
__le16 qs_handles[8];
};
I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
u8 tc_valid_bits;
@ -1390,6 +1416,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
u8 reserved3[23];
};
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
u8 tc_valid_bits;
@ -1401,6 +1429,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
__le16 tc_bw_max[2];
};
I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
__le16 seid;
@ -1428,6 +1458,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved2[96];
};
I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 tc_valid_bits;
@ -1439,6 +1471,8 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 reserved1[28];
};
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
@ -1450,6 +1484,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
u8 reserved1[20];
};
I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 tc_valid_bits;
@ -1460,6 +1496,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 reserved2[23];
};
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
u8 reserved[4];
@ -1475,6 +1513,8 @@ struct i40e_aqc_query_port_ets_config_resp {
u8 reserved3[32];
};
I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
@ -1489,6 +1529,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
__le16 tc_bw_max[2];
};
I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
/* Suspend/resume port TX traffic
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
@ -1502,6 +1544,8 @@ struct i40e_aqc_configure_partition_bw_data {
u8 max_bw[16]; /* bandwidth limit */
};
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@ -1584,6 +1628,8 @@ struct i40e_aqc_module_desc {
u8 reserved2[8];
};
I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
u8 link_speed; /* bitmap using the above enum bit patterns */
@ -1612,6 +1658,8 @@ struct i40e_aq_get_phy_abilities_resp {
struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
@ -1795,12 +1843,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
__le16 cmd_flags;
#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
#define ANVM_READ_SINGLE_FEATURE 0
#define ANVM_READ_MULTIPLE_FEATURES 1
#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
__le16 element_id; /* Feature/field ID */
u8 reserved[2];
__le16 element_id; /* Feature/field ID */
__le16 element_id_msw; /* MSWord of field ID */
__le32 address_high;
__le32 address_low;
};
@ -1818,21 +1866,31 @@ struct i40e_aqc_nvm_config_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
__le16 instance_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
__le16 feature_options;
__le16 feature_selection;
};
I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
struct i40e_aqc_nvm_config_data_immediate_field {
#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
__le16 field_id;
__le16 instance_id;
__le32 field_id;
__le32 field_value;
__le16 field_options;
__le16 field_value;
__le16 reserved;
};
I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@ -1995,10 +2053,78 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Apply MIB changes (0x0A07)
* uses the generic struc as it contains no data
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response
*/
#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved1;
u8 oper_num_tc;
u8 oper_prio_tc[4];
u8 reserved2;
u8 oper_tc_bw[8];
u8 oper_pfc_en;
u8 reserved3;
__le16 oper_app_prio;
u8 reserved4;
__le16 tlv_status;
};
I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
struct i40e_aqc_get_cee_dcb_cfg_resp {
u8 oper_num_tc;
u8 oper_prio_tc[4];
u8 oper_tc_bw[8];
u8 oper_pfc_en;
__le16 oper_app_prio;
__le32 tlv_status;
u8 reserved[12];
};
I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
/* Set Local LLDP MIB (indirect 0x0A08)
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx
*/
struct i40e_aqc_lldp_set_local_mib {
#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
u8 type;
u8 reserved0;
__le16 length;
u8 reserved1[4];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
/* Stop/Start LLDP Agent (direct 0x0A09)
* Used for stopping/starting specific LLDP agent. e.g. DCBx
*/
struct i40e_aqc_lldp_stop_start_specific_agent {
#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
#define I40E_AQC_START_SPECIFIC_AGENT_MASK (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
@ -2073,7 +2199,8 @@ struct i40e_aqc_oem_param_change {
#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
#define I40E_AQ_OEM_PARAM_MAC 2
__le32 param_value1;
u8 param_value2[8];
__le16 param_value2;
u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@ -2087,6 +2214,28 @@ struct i40e_aqc_oem_state_change {
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
/* Initialize OCSD (0xFE02, direct) */
struct i40e_aqc_opc_oem_ocsd_initialize {
u8 type_status;
u8 reserved1[3];
__le32 ocsd_memory_block_addr_high;
__le32 ocsd_memory_block_addr_low;
__le32 requested_update_interval;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
/* Initialize OCBB (0xFE03, direct) */
struct i40e_aqc_opc_oem_ocbb_initialize {
u8 type_status;
u8 reserved1[3];
__le32 ocbb_memory_block_addr_high;
__le32 ocbb_memory_block_addr_low;
u8 reserved2[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
/* debug commands */
/* get device id (0xFF00) uses the generic structure */

View file

@ -95,47 +95,51 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len = LE16_TO_CPU(aq_desc->datalen);
u8 *aq_buffer = (u8 *)buffer;
u32 data[4];
u32 i = 0;
u8 *buf = (u8 *)buffer;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
aq_desc->retval);
LE16_TO_CPU(aq_desc->opcode),
LE16_TO_CPU(aq_desc->flags),
LE16_TO_CPU(aq_desc->datalen),
LE16_TO_CPU(aq_desc->retval));
i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
aq_desc->cookie_high, aq_desc->cookie_low);
LE32_TO_CPU(aq_desc->cookie_high),
LE32_TO_CPU(aq_desc->cookie_low));
i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
aq_desc->params.internal.param0,
aq_desc->params.internal.param1);
LE32_TO_CPU(aq_desc->params.internal.param0),
LE32_TO_CPU(aq_desc->params.internal.param1));
i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
aq_desc->params.external.addr_high,
aq_desc->params.external.addr_low);
LE32_TO_CPU(aq_desc->params.external.addr_high),
LE32_TO_CPU(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
i40e_memset(data, 0, sizeof(data), I40E_NONDMA_MEM);
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
for (i = 0; i < len; i++) {
data[((i % 16) / 4)] |=
((u32)aq_buffer[i]) << (8 * (i % 4));
if ((i % 16) == 15) {
i40e_debug(hw, mask,
"\t0x%04X %08X %08X %08X %08X\n",
i - 15, data[0], data[1], data[2],
data[3]);
i40e_memset(data, 0, sizeof(data),
I40E_NONDMA_MEM);
}
/* write the full 16-byte chunks */
for (i = 0; i < (len - 16); i += 16)
i40e_debug(hw, mask,
"\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
i, buf[i], buf[i+1], buf[i+2], buf[i+3],
buf[i+4], buf[i+5], buf[i+6], buf[i+7],
buf[i+8], buf[i+9], buf[i+10], buf[i+11],
buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
/* write whatever's left over without overrunning the buffer */
if (i < len) {
char d_buf[80];
int j = 0;
memset(d_buf, 0, sizeof(d_buf));
j += sprintf(d_buf, "\t0x%04X ", i);
while (i < len)
j += sprintf(&d_buf[j], " %02X", buf[i++]);
i40e_debug(hw, mask, "%s\n", d_buf);
}
if ((i % 16) != 0)
i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
i - (i % 16), data[0], data[1], data[2],
data[3]);
}
}
@ -545,6 +549,30 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
};
/**
* i40e_validate_mac_addr - Validate unicast MAC address
* @mac_addr: pointer to MAC address
*
* Tests a MAC address to ensure it is a valid Individual Address
**/
enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
{
enum i40e_status_code status = I40E_SUCCESS;
DEBUGFUNC("i40e_validate_mac_addr");
/* Broadcast addresses ARE multicast addresses
* Make sure it is not a multicast address
* Reject the zero address
*/
if (I40E_IS_MULTICAST(mac_addr) ||
(mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
status = I40E_ERR_INVALID_MAC_ADDR;
return status;
}
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@ -560,7 +588,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
{
enum i40e_status_code status = I40E_SUCCESS;
u32 reg;
u32 port, ari, func_rid;
DEBUGFUNC("i40e_init_shared_code");
@ -575,18 +603,17 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
hw->phy.get_link_info = TRUE;
/* Determine port number */
reg = rd32(hw, I40E_PFGEN_PORTNUM);
reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
hw->port = (u8)reg;
/* Determine the PF number based on the PCI fn */
reg = rd32(hw, I40E_GLPCI_CAPSUP);
if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
/* Determine port number and PF number*/
port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
>> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
hw->port = (u8)port;
ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
func_rid = rd32(hw, I40E_PF_FUNC_RID);
if (ari)
hw->pf_id = (u8)(func_rid & 0xff);
else
hw->pf_id = (u8)hw->bus.func;
hw->pf_id = (u8)(func_rid & 0x7);
status = i40e_init_nvm(hw);
return status;
@ -728,25 +755,60 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
}
/**
* i40e_validate_mac_addr - Validate unicast MAC address
* @mac_addr: pointer to MAC address
* i40e_read_pba_string - Reads part number string from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number string from the EEPROM
* @pba_num_size: part number string buffer length
*
* Tests a MAC address to ensure it is a valid Individual Address
* Reads the part number string from the EEPROM.
**/
enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size)
{
enum i40e_status_code status = I40E_SUCCESS;
u16 pba_word = 0;
u16 pba_size = 0;
u16 pba_ptr = 0;
u16 i = 0;
DEBUGFUNC("i40e_validate_mac_addr");
status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
DEBUGOUT("Failed to read PBA flags or flag is invalid.\n");
return status;
}
/* Broadcast addresses ARE multicast addresses
* Make sure it is not a multicast address
* Reject the zero address
status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
if (status != I40E_SUCCESS) {
DEBUGOUT("Failed to read PBA Block pointer.\n");
return status;
}
status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
if (status != I40E_SUCCESS) {
DEBUGOUT("Failed to read PBA Block size.\n");
return status;
}
/* Subtract one to get PBA word count (PBA Size word is included in
* total size)
*/
if (I40E_IS_MULTICAST(mac_addr) ||
(mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
status = I40E_ERR_INVALID_MAC_ADDR;
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
DEBUGOUT("Buffer to small for PBA data.\n");
return I40E_ERR_PARAM;
}
for (i = 0; i < pba_size; i++) {
status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
if (status != I40E_SUCCESS) {
DEBUGOUT1("Failed to read PBA Block word %d.\n", i);
return status;
}
pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
pba_num[(i * 2) + 1] = pba_word & 0xFF;
}
pba_num[(pba_size * 2)] = '\0';
return status;
}
@ -799,7 +861,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
return media;
}
#define I40E_PF_RESET_WAIT_COUNT 100
#define I40E_PF_RESET_WAIT_COUNT 110
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@ -818,8 +880,9 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@ -1010,8 +1073,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
return gpio_val;
}
#define I40E_LED0 22
#define I40E_COMBINED_ACTIVITY 0xA
#define I40E_FILTER_ACTIVITY 0xE
#define I40E_LINK_ACTIVITY 0xC
#define I40E_MAC_ACTIVITY 0xD
#define I40E_LED0 22
/**
* i40e_led_get - return current on/off mode
@ -1024,6 +1090,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
u32 current_mode = 0;
u32 mode = 0;
int i;
@ -1036,6 +1103,20 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
/* ignore gpio LED src mode entries related to the activity
* LEDs
*/
current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
>> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
switch (current_mode) {
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
continue;
default:
break;
}
mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
@ -1055,6 +1136,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
**/
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
u32 current_mode = 0;
int i;
if (mode & 0xfffffff0)
@ -1069,6 +1151,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
/* ignore gpio LED src mode entries related to the activity
* LEDs
*/
current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
>> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
switch (current_mode) {
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
continue;
default:
break;
}
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@ -1077,8 +1173,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (mode == I40E_LINK_ACTIVITY)
blink = FALSE;
gpio_val |= (blink ? 1 : 0) <<
I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
if (blink)
gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
break;
@ -1207,7 +1305,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
return status;
}
memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
memset(&config, 0, sizeof(config));
/* clear the old pause settings */
config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
~(I40E_AQ_PHY_FLAG_PAUSE_RX);
@ -1230,14 +1328,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
}
/* Update the link info */
status = i40e_update_link_info(hw, TRUE);
status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
if (status) {
/* Wait a little bit (on 40G cards it sometimes takes a really
* long time for link to come back from the atomic reset)
* and try once more
*/
i40e_msec_delay(1000);
status = i40e_update_link_info(hw, TRUE);
status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
}
if (status)
*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@ -1375,7 +1473,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
/* save off old link status information */
i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
sizeof(struct i40e_link_status), I40E_NONDMA_TO_NONDMA);
sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
/* update link status */
hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
@ -1412,7 +1510,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
/* save link status information */
if (link)
i40e_memcpy(link, hw_link_info, sizeof(struct i40e_link_status),
i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
I40E_NONDMA_TO_NONDMA);
/* flag cleared so helper functions don't call AQ again */
@ -1422,36 +1520,6 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
return status;
}
/**
* i40e_update_link_info
* @hw: pointer to the hw struct
* @enable_lse: enable/disable LinkStatusEvent reporting
*
* Returns the link status of the adapter
**/
enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw,
bool enable_lse)
{
struct i40e_aq_get_phy_abilities_resp abilities;
enum i40e_status_code status;
status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
if (status)
return status;
status = i40e_aq_get_phy_capabilities(hw, FALSE, false,
&abilities, NULL);
if (status)
return status;
if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
hw->phy.link_info.an_enabled = TRUE;
else
hw->phy.link_info.an_enabled = FALSE;
return status;
}
/**
* i40e_aq_set_phy_int_mask
* @hw: pointer to the hw struct
@ -1904,6 +1972,7 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @fw_major_version: firmware major version
* @fw_minor_version: firmware minor version
* @fw_build: firmware build number
* @api_major_version: major queue version
* @api_minor_version: minor queue version
* @cmd_details: pointer to command details structure or NULL
@ -1912,6 +1981,7 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
**/
enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details)
{
@ -1929,6 +1999,8 @@ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
*fw_major_version = LE16_TO_CPU(resp->fw_major);
if (fw_minor_version != NULL)
*fw_minor_version = LE16_TO_CPU(resp->fw_minor);
if (fw_build != NULL)
*fw_build = LE32_TO_CPU(resp->fw_build);
if (api_major_version != NULL)
*api_major_version = LE16_TO_CPU(resp->api_major);
if (api_minor_version != NULL)
@ -1969,7 +2041,7 @@ enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_SI);
desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
cmd->driver_major_ver = dv->major_version;
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
@ -2173,7 +2245,7 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
@ -2215,7 +2287,7 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
@ -2255,7 +2327,7 @@ enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !v_list || !hw)
return I40E_ERR_PARAM;
buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
buf_size = count * sizeof(*v_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
@ -2295,7 +2367,7 @@ enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !v_list || !hw)
return I40E_ERR_PARAM;
buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
buf_size = count * sizeof(*v_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
@ -2352,6 +2424,41 @@ enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
return status;
}
/**
* i40e_aq_debug_read_register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
* @cmd_details: pointer to command details structure or NULL
*
* Read the register using the admin queue commands
**/
enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd_resp =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
enum i40e_status_code status;
if (reg_val == NULL)
return I40E_ERR_PARAM;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
cmd_resp->address = CPU_TO_LE32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (status == I40E_SUCCESS) {
*reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
(u64)LE32_TO_CPU(cmd_resp->value_low);
}
return status;
}
/**
* i40e_aq_debug_write_register
* @hw: pointer to the hw struct
@ -2565,6 +2672,77 @@ enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
return status;
}
/**
* i40e_aq_read_nvm_config - read an nvm config block
* @hw: pointer to the hw struct
* @cmd_flags: NVM access admin command bits
* @field_id: field or feature id
* @data: buffer for result
* @buf_size: buffer size
* @element_count: pointer to count of elements read by FW
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
u8 cmd_flags, u32 field_id, void *data,
u16 buf_size, u16 *element_count,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_config_read *cmd =
(struct i40e_aqc_nvm_config_read *)&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
if (buf_size > I40E_AQ_LARGE_BUF)
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
else
cmd->element_id_msw = 0;
status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
if (!status && element_count)
*element_count = LE16_TO_CPU(cmd->element_count);
return status;
}
/**
* i40e_aq_write_nvm_config - write an nvm config block
* @hw: pointer to the hw struct
* @cmd_flags: NVM access admin command bits
* @data: buffer for result
* @buf_size: buffer size
* @element_count: count of elements to be written
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
u8 cmd_flags, void *data, u16 buf_size,
u16 element_count,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_config_write *cmd =
(struct i40e_aqc_nvm_config_write *)&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
cmd->element_count = CPU_TO_LE16(element_count);
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
return status;
}
/**
* i40e_aq_erase_nvm
* @hw: pointer to the hw struct
@ -2621,6 +2799,7 @@ enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
#define I40E_DEV_FUNC_CAP_VSI 0x17
#define I40E_DEV_FUNC_CAP_DCB 0x18
#define I40E_DEV_FUNC_CAP_FCOE 0x21
#define I40E_DEV_FUNC_CAP_ISCSI 0x22
#define I40E_DEV_FUNC_CAP_RSS 0x40
#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
@ -2649,6 +2828,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
enum i40e_admin_queue_opc list_type_opc)
{
struct i40e_aqc_list_capabilities_element_resp *cap;
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
u32 i = 0;
@ -2719,6 +2899,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (number == 1)
p->fcoe = TRUE;
break;
case I40E_DEV_FUNC_CAP_ISCSI:
if (number == 1)
p->iscsi = TRUE;
break;
case I40E_DEV_FUNC_CAP_RSS:
p->rss = TRUE;
p->rss_table_size = number;
@ -2778,11 +2962,36 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
}
}
/* Software override ensuring FCoE is disabled if npar or mfp
* mode because it is not supported in these modes.
/* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
p->fcoe = FALSE;
/* count the enabled ports (aka the "not disabled" ports) */
hw->num_ports = 0;
for (i = 0; i < 4; i++) {
u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
u64 port_cfg = 0;
/* use AQ read to get the physical register offset instead
* of the port relative offset
*/
i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
hw->num_ports++;
}
valid_functions = p->valid_functions;
num_functions = 0;
while (valid_functions) {
if (valid_functions & 1)
num_functions++;
valid_functions >>= 1;
}
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
if (p->npar_enable || p->mfp_mode_1)
p->fcoe = FALSE;
hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
hw->num_partitions = num_functions / hw->num_ports;
/* additional HW specific goodies that might
* someday be HW version specific
@ -2939,6 +3148,45 @@ enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
return status;
}
/**
* i40e_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the hw struct
* @mib_type: Local, Remote or both Local and Remote MIBs
* @buff: pointer to a user supplied buffer to store the MIB block
* @buff_size: size of the buffer (in bytes)
* @cmd_details: pointer to command details structure or NULL
*
* Set the LLDP MIB.
**/
enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_set_local_mib *cmd =
(struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
enum i40e_status_code status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_lldp_set_local_mib);
/* Indirect Command */
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (buff_size > I40E_AQ_LARGE_BUF)
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
desc.datalen = CPU_TO_LE16(buff_size);
cmd->type = mib_type;
cmd->length = CPU_TO_LE16(buff_size);
cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
return status;
}
/**
* i40e_aq_cfg_lldp_mib_change_event
* @hw: pointer to the hw struct
@ -3167,6 +3415,64 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_get_cee_dcb_config
* @hw: pointer to the hw struct
* @buff: response buffer that stores CEE operational configuration
* @buff_size: size of the buffer passed
* @cmd_details: pointer to command details structure or NULL
*
* Get CEE DCBX mode operational configuration from firmware
**/
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
enum i40e_status_code status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
cmd_details);
return status;
}
/**
* i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
* @hw: pointer to the hw struct
* @start_agent: True if DCBx Agent needs to be Started
* False if DCBx Agent needs to be Stopped
* @cmd_details: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent
**/
enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
bool start_agent,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
(struct i40e_aqc_lldp_stop_start_specific_agent *)
&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_lldp_stop_start_spec_agent);
if (start_agent)
cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
@ -3195,7 +3501,7 @@ enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
if (!status && filter_index)
*filter_index = resp->index;
return status;
@ -3246,8 +3552,7 @@ enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
(struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
enum i40e_status_code status;
u16 length = count
* sizeof(struct i40e_aqc_switch_resource_alloc_element_resp);
u16 length = count * sizeof(*buf);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_resource_alloc);
@ -3258,7 +3563,7 @@ enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
if (!status)
if (!status && num_entries)
*num_entries = cmd_resp->num_entries;
return status;
@ -3673,7 +3978,7 @@ enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
if (!status && stat_index)
*stat_index = LE16_TO_CPU(cmd_resp->stat_index);
return status;
@ -4260,8 +4565,7 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
buff_len = sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data) *
filter_count;
buff_len = filter_count * sizeof(*filters);
desc.datalen = CPU_TO_LE16(buff_len);
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
@ -4298,8 +4602,7 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
buff_len = sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data) *
filter_count;
buff_len = filter_count * sizeof(*filters);
desc.datalen = CPU_TO_LE16(buff_len);
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
@ -4514,7 +4817,7 @@ enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
cmd->cmd_flags = CPU_TO_LE16(bios_mode);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
if (!status)
if (!status && reset_needed)
*reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
@ -4669,7 +4972,7 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
{
enum i40e_status_code status;
struct i40e_aq_desc desc;
u16 bwd_size = sizeof(struct i40e_aqc_configure_partition_bw_data);
u16 bwd_size = sizeof(*bw_data);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);

View file

@ -864,7 +864,7 @@ static void i40e_write_dword(u8 *hmc_bits,
if (ce_info->width < 32)
mask = ((u32)1 << ce_info->width) - 1;
else
mask = 0xFFFFFFFF;
mask = ~(u32)0;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@ -916,7 +916,7 @@ static void i40e_write_qword(u8 *hmc_bits,
if (ce_info->width < 64)
mask = ((u64)1 << ce_info->width) - 1;
else
mask = 0xFFFFFFFFFFFFFFFFUL;
mask = ~(u64)0;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@ -1046,7 +1046,7 @@ static void i40e_read_dword(u8 *hmc_bits,
if (ce_info->width < 32)
mask = ((u32)1 << ce_info->width) - 1;
else
mask = 0xFFFFFFFF;
mask = ~(u32)0;
/* shift to correct alignment */
mask <<= shift_width;
@ -1099,7 +1099,7 @@ static void i40e_read_qword(u8 *hmc_bits,
if (ce_info->width < 64)
mask = ((u64)1 << ce_info->width) - 1;
else
mask = 0xFFFFFFFFFFFFFFFFUL;
mask = ~(u64)0;
/* shift to correct alignment */
mask <<= shift_width;

View file

@ -34,6 +34,18 @@
#include "i40e_prototype.h"
enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 words, void *data,
bool last_command);
/**
* i40e_init_nvm_ops - Initialize NVM function pointers
* @hw: pointer to the HW structure
@ -71,7 +83,7 @@ enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
} else { /* Blank programming mode */
nvm->blank_nvm_mode = TRUE;
ret_code = I40E_ERR_NVM_BLANK_MODE;
DEBUGOUT("NVM init error: unsupported blank mode.\n");
i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
}
return ret_code;
@ -90,7 +102,7 @@ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u64 gtime, timeout;
u64 time = 0;
u64 time_left = 0;
DEBUGFUNC("i40e_acquire_nvm");
@ -98,40 +110,39 @@ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
goto i40e_i40e_acquire_nvm_exit;
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
0, &time, NULL);
0, &time_left, NULL);
/* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
/* Store the timeout */
hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
if (ret_code != I40E_SUCCESS) {
/* Set the polling timeout */
if (time > I40E_MAX_NVM_TIMEOUT)
timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ gtime;
else
timeout = hw->nvm.hw_semaphore_timeout;
if (ret_code)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
access, time_left, ret_code, hw->aq.asq_last_status);
if (ret_code && time_left) {
/* Poll until the current NVM owner timeouts */
while (gtime < timeout) {
timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
while ((gtime < timeout) && time_left) {
i40e_msec_delay(10);
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
ret_code = i40e_aq_request_resource(hw,
I40E_NVM_RESOURCE_ID,
access, 0, &time,
access, 0, &time_left,
NULL);
if (ret_code == I40E_SUCCESS) {
hw->nvm.hw_semaphore_timeout =
I40E_MS_TO_GTIME(time) + gtime;
I40E_MS_TO_GTIME(time_left) + gtime;
break;
}
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
}
if (ret_code != I40E_SUCCESS) {
hw->nvm.hw_semaphore_timeout = 0;
hw->nvm.hw_semaphore_wait =
I40E_MS_TO_GTIME(time) + gtime;
DEBUGOUT1("NVM acquire timed out, wait %llu ms before trying again.\n",
time);
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
time_left, ret_code, hw->aq.asq_last_status);
}
}
@ -176,7 +187,7 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
i40e_usec_delay(5);
}
if (ret_code == I40E_ERR_TIMEOUT)
DEBUGOUT("Done bit in GLNVM_SRCTL not set");
i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
return ret_code;
}
@ -190,14 +201,30 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
**/
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
return i40e_read_nvm_word_srctl(hw, offset, data);
}
/**
* i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
u16 *data)
{
enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
DEBUGFUNC("i40e_read_nvm_srctl");
DEBUGFUNC("i40e_read_nvm_word_srctl");
if (offset >= hw->nvm.sr_size) {
DEBUGOUT("NVM read error: Offset beyond Shadow RAM limit.\n");
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM read error: Offset %d beyond Shadow RAM limit %d\n",
offset, hw->nvm.sr_size);
ret_code = I40E_ERR_PARAM;
goto read_nvm_exit;
}
@ -220,13 +247,35 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
}
if (ret_code != I40E_SUCCESS)
DEBUGOUT1("NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
offset);
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
offset);
read_nvm_exit:
return ret_code;
}
/**
* i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
u16 *data)
{
enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
DEBUGFUNC("i40e_read_nvm_word_aq");
ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
*data = LE16_TO_CPU(*(__le16 *)data);
return ret_code;
}
/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer
* @hw: pointer to the HW structure
@ -240,16 +289,33 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
**/
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
/**
* i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 index, word;
DEBUGFUNC("i40e_read_nvm_buffer");
DEBUGFUNC("i40e_read_nvm_buffer_srctl");
/* Loop thru the selected region */
for (word = 0; word < *words; word++) {
index = offset + word;
ret_code = i40e_read_nvm_word(hw, index, &data[word]);
ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
if (ret_code != I40E_SUCCESS)
break;
}
@ -259,6 +325,114 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
return ret_code;
}
/**
* i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
enum i40e_status_code ret_code;
u16 read_size = *words;
bool last_cmd = FALSE;
u16 words_read = 0;
u16 i = 0;
DEBUGFUNC("i40e_read_nvm_buffer_aq");
do {
/* Calculate number of bytes we should read in this step.
* FVL AQ do not allow to read more than one page at a time or
* to cross page boundaries.
*/
if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
read_size = min(*words,
(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
(offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
else
read_size = min((*words - words_read),
I40E_SR_SECTOR_SIZE_IN_WORDS);
/* Check if this is last command, if so set proper flag */
if ((words_read + read_size) >= *words)
last_cmd = TRUE;
ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
data + words_read, last_cmd);
if (ret_code != I40E_SUCCESS)
goto read_nvm_buffer_aq_exit;
/* Increment counter for words already read and move offset to
* new read location
*/
words_read += read_size;
offset += read_size;
} while (words_read < *words);
for (i = 0; i < *words; i++)
data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
read_nvm_buffer_aq_exit:
*words = words_read;
return ret_code;
}
/**
* i40e_read_nvm_aq - Read Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
* @words: number of words to write
* @data: buffer with words to write to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 words, void *data,
bool last_command)
{
enum i40e_status_code ret_code = I40E_ERR_NVM;
DEBUGFUNC("i40e_read_nvm_aq");
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value).
* Firmware will check the module-based model.
*/
if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write fail error: tried to write %d words, limit is %d.\n",
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
offset, words);
else
ret_code = i40e_aq_read_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
data, last_command, NULL);
return ret_code;
}
/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
@ -317,6 +491,8 @@ enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
{
DEBUGFUNC("i40e_write_nvm_word");
*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
/* Value 0x00 below means that we treat SR as a flat mem */
return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
}
@ -338,8 +514,15 @@ enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
u8 module_pointer, u32 offset,
u16 words, void *data)
{
__le16 *le_word_ptr = (__le16 *)data;
u16 *word_ptr = (u16 *)data;
u32 i = 0;
DEBUGFUNC("i40e_write_nvm_buffer");
for (i = 0; i < words; i++)
le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
/* Here we will only write one buffer as the size of the modules
* mirrored in the Shadow RAM is always less than 4K.
*/
@ -360,14 +543,21 @@ enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
u16 word = 0;
u32 i = 0;
u16 *data;
u16 i = 0;
DEBUGFUNC("i40e_calc_nvm_checksum");
ret_code = i40e_allocate_virt_mem(hw, &vmem,
I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
if (ret_code)
goto i40e_calc_nvm_checksum_exit;
data = (u16 *)vmem.va;
/* read pointer to VPD area */
ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code != I40E_SUCCESS) {
@ -377,7 +567,7 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
/* read pointer to PCIe Alt Auto-load module */
ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
&pcie_alt_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@ -387,33 +577,39 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
* except the VPD and PCIe ALT Auto-load modules
*/
for (i = 0; i < hw->nvm.sr_size; i++) {
/* Skip Checksum word */
if (i == I40E_SR_SW_CHECKSUM_WORD)
i++;
/* Skip VPD module (convert byte size to word count) */
if (i == (u32)vpd_module) {
i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
if (i >= hw->nvm.sr_size)
break;
}
/* Skip PCIe ALT module (convert byte size to word count) */
if (i == (u32)pcie_alt_module) {
i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
if (i >= hw->nvm.sr_size)
break;
/* Read SR page */
if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
}
ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
/* Skip Checksum word */
if (i == I40E_SR_SW_CHECKSUM_WORD)
continue;
/* Skip VPD module (convert byte size to word count) */
if ((i >= (u32)vpd_module) &&
(i < ((u32)vpd_module +
(I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
continue;
}
checksum_local += word;
/* Skip PCIe ALT module (convert byte size to word count) */
if ((i >= (u32)pcie_alt_module) &&
(i < ((u32)pcie_alt_module +
(I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
continue;
}
checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
}
*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
i40e_calc_nvm_checksum_exit:
i40e_free_virt_mem(hw, &vmem);
return ret_code;
}

View file

@ -58,6 +58,7 @@ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
u16 i40e_clean_asq(struct i40e_hw *hw);
void i40e_free_adminq_asq(struct i40e_hw *hw);
void i40e_free_adminq_arq(struct i40e_hw *hw);
enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
@ -86,11 +87,15 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@ -124,8 +129,6 @@ enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw,
bool enable_lse);
enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
u64 advt_reg,
struct i40e_asq_cmd_details *cmd_details);
@ -193,6 +196,14 @@ enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, bool last_command,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
u8 cmd_flags, u32 field_id, void *data,
u16 buf_size, u16 *element_count,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
u8 cmd_flags, void *data, u16 buf_size,
u16 element_count,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
void *buff, u16 buff_size, u16 *data_size,
enum i40e_admin_queue_opc list_type_opc,
@ -205,6 +216,9 @@ enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
@ -225,6 +239,12 @@ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
bool start_agent,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
u8 *filter_index,
@ -324,6 +344,8 @@ enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
@ -363,16 +385,15 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
struct i40e_aqc_configure_partition_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
/* prototype for functions used for NVM access */
enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
enum i40e_status_code i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,

View file

@ -319,6 +319,10 @@
#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
@ -430,6 +434,8 @@
#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
@ -493,7 +499,9 @@
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
@ -557,9 +565,6 @@
#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
@ -1075,7 +1080,7 @@
#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
@ -1180,7 +1185,7 @@
#define I40E_VFINT_ITRN_MAX_INDEX 2
#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
@ -1812,9 +1817,6 @@
#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
@ -1911,6 +1913,11 @@
#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
@ -2383,20 +2390,20 @@
#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCH_MAX_INDEX 3
#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL_MAX_INDEX 3
#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH_MAX_INDEX 3
#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCL_MAX_INDEX 3
#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
@ -2629,10 +2636,6 @@
#define I40E_GLPRT_TDOLD_MAX_INDEX 3
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_TDPC_MAX_INDEX 3
#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_UPRCH_MAX_INDEX 3
#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
@ -2999,9 +3002,6 @@
#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
@ -3267,7 +3267,7 @@
#define I40E_VFINT_ITRN1_MAX_INDEX 2
#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
@ -3375,4 +3375,4 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
#endif
#endif /* _I40E_REGISTER_H_ */

View file

@ -246,12 +246,12 @@ struct i40e_link_status {
u8 an_info;
u8 ext_info;
u8 loopback;
bool an_enabled;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
u16 max_frame_size;
bool crc_enable;
u8 pacing;
u8 requested_speeds;
};
struct i40e_phy_info {
@ -285,6 +285,7 @@ struct i40e_hw_capabilities {
bool evb_802_1_qbh; /* Bridge Port Extension */
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
bool mfp_mode_1;
bool mgmt_cem;
bool ieee_1588;
@ -335,8 +336,7 @@ enum i40e_aq_resource_access_type {
};
struct i40e_nvm_info {
u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
u64 hw_semaphore_wait; /* - || - */
u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
u32 timeout; /* [ms] */
u16 sr_size; /* Shadow RAM size in words */
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
@ -455,9 +455,18 @@ struct i40e_fc_info {
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DCBX_MAX_APPS 32
#define I40E_LLDPDU_SIZE 1500
#define I40E_TLV_STATUS_OPER 0x1
#define I40E_TLV_STATUS_SYNC 0x2
#define I40E_TLV_STATUS_ERR 0x4
#define I40E_CEE_OPER_MAX_APPS 3
#define I40E_APP_PROTOID_FCOE 0x8906
#define I40E_APP_PROTOID_ISCSI 0x0cbc
#define I40E_APP_PROTOID_FIP 0x8914
#define I40E_APP_SEL_ETHTYPE 0x1
#define I40E_APP_SEL_TCPIP 0x2
/* IEEE 802.1Qaz ETS Configuration data */
struct i40e_ieee_ets_config {
/* CEE or IEEE 802.1Qaz ETS Configuration data */
struct i40e_dcb_ets_config {
u8 willing;
u8 cbs;
u8 maxtcs;
@ -466,34 +475,30 @@ struct i40e_ieee_ets_config {
u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
};
/* IEEE 802.1Qaz ETS Recommendation data */
struct i40e_ieee_ets_recommend {
u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
};
/* IEEE 802.1Qaz PFC Configuration data */
struct i40e_ieee_pfc_config {
/* CEE or IEEE 802.1Qaz PFC Configuration data */
struct i40e_dcb_pfc_config {
u8 willing;
u8 mbc;
u8 pfccap;
u8 pfcenable;
};
/* IEEE 802.1Qaz Application Priority data */
struct i40e_ieee_app_priority_table {
/* CEE or IEEE 802.1Qaz Application Priority data */
struct i40e_dcb_app_priority_table {
u8 priority;
u8 selector;
u16 protocolid;
};
struct i40e_dcbx_config {
u8 dcbx_mode;
#define I40E_DCBX_MODE_CEE 0x1
#define I40E_DCBX_MODE_IEEE 0x2
u32 numapps;
struct i40e_ieee_ets_config etscfg;
struct i40e_ieee_ets_recommend etsrec;
struct i40e_ieee_pfc_config pfc;
struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
struct i40e_dcb_ets_config etscfg;
struct i40e_dcb_ets_config etsrec;
struct i40e_dcb_pfc_config pfc;
struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
};
/* Port hardware description */
@ -501,7 +506,7 @@ struct i40e_hw {
u8 *hw_addr;
void *back;
/* function pointer structs */
/* subsystem structs */
struct i40e_phy_info phy;
struct i40e_mac_info mac;
struct i40e_bus_info bus;
@ -528,6 +533,11 @@ struct i40e_hw {
u8 pf_id;
u16 main_vsi_seid;
/* for multi-function MACs */
u16 partition_id;
u16 num_partitions;
u16 num_ports;
/* Closest numa node to the device */
u16 numa_node;
@ -653,13 +663,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
@ -1194,6 +1205,14 @@ struct i40e_eth_stats {
u64 tx_errors; /* tepc */
};
/* Statistics collected per VEB per TC */
struct i40e_veb_tc_stats {
u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
};
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
@ -1261,17 +1280,23 @@ struct i40e_hw_port_stats {
#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define I40E_SR_MNG_CONFIG_PTR 0x0E
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
#define I40E_SR_NVM_IMAGE_VERSION 0x18
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
#define I40E_SR_NVM_MAP_VERSION 0x29
#define I40E_SR_NVM_IMAGE_VERSION 0x2A
#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PXE_SETUP_PTR 0x30
#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34
#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35
#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
@ -1284,6 +1309,9 @@ struct i40e_hw_port_stats {
#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49
#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D
#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
@ -1404,6 +1432,18 @@ enum i40e_reset_type {
I40E_RESET_EMPR = 3,
};
/* IEEE 802.1AB LLDP Agent Variables from NVM */
#define I40E_NVM_LLDP_CFG_PTR 0xD
struct i40e_lldp_variables {
u16 length;
u16 adminstatus;
u16 msgfasttx;
u16 msgtxinterval;
u16 txparams;
u16 timers;
u16 crc8;
};
/* Offsets into Alternate Ram */
#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */

View file

@ -67,31 +67,29 @@
* of the virtchnl_msg structure.
*/
enum i40e_virtchnl_ops {
/* VF sends req. to pf for the following
* ops.
/* The PF sends status change events to VFs using
* the I40E_VIRTCHNL_OP_EVENT opcode.
* VFs send requests to the PF using the other ops.
*/
I40E_VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
I40E_VIRTCHNL_OP_RESET_VF,
I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_VIRTCHNL_OP_ENABLE_QUEUES,
I40E_VIRTCHNL_OP_DISABLE_QUEUES,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_VIRTCHNL_OP_DEL_VLAN,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_VIRTCHNL_OP_GET_STATS,
I40E_VIRTCHNL_OP_FCOE,
I40E_VIRTCHNL_OP_CONFIG_RSS,
/* PF sends status change events to vfs using
* the following op.
*/
I40E_VIRTCHNL_OP_EVENT,
I40E_VIRTCHNL_OP_RESET_VF = 2,
I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
I40E_VIRTCHNL_OP_ADD_VLAN = 12,
I40E_VIRTCHNL_OP_DEL_VLAN = 13,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue