Remove support for QLNX_RCV_IN_TASKQ - i.e., Rx only in TaskQ.

Added support for LLDP passthru
Upgrade ECORE to version 8.33.5.0
Upgrade STORMFW to version 8.33.7.0
Added support for SRIOV

MFC after:5 days
This commit is contained in:
David C Somayajulu 2018-07-25 02:36:55 +00:00
parent eed76687f0
commit 217ec20885
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=336695
91 changed files with 86617 additions and 62582 deletions

View file

@ -34,7 +34,7 @@
#include "ecore_status.h"
#include <sys/bitstring.h>
#if __FreeBSD_version >= 1200000
#if __FreeBSD_version >= 1200032
#include <linux/bitmap.h>
#else
#if __FreeBSD_version >= 1100090
@ -62,6 +62,7 @@ extern void qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
extern void qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
uint32_t reg_value);
extern int qlnx_pci_find_capability(void *ecore_dev, int cap);
extern int qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap);
extern uint32_t qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr);
extern void qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value);
@ -72,6 +73,7 @@ extern void qlnx_reg_wr32(void *p_hwfn, uint32_t reg_addr, uint32_t value);
extern void qlnx_reg_wr16(void *p_hwfn, uint32_t reg_addr, uint16_t value);
extern void qlnx_dbell_wr32(void *p_hwfn, uint32_t reg_addr, uint32_t value);
extern void qlnx_dbell_wr32_db(void *p_hwfn, void *reg_addr, uint32_t value);
extern void *qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys,
uint32_t size);
@ -89,6 +91,16 @@ extern void qlnx_get_protocol_stats(void *cdev, int proto_type,
extern void qlnx_sp_isr(void *arg);
extern void qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
void *p_sw_info);
extern void qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id);
extern int qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params);
extern int qlnx_iov_update_vport(void *p_hwfn, uint8_t vfid, void *params,
uint16_t *tlvs);
extern int qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id);
extern void qlnx_vf_flr_update(void *p_hwfn);
#define nothing do {} while(0)
#ifdef ECORE_PACKAGE
/* Memory Types */
@ -130,7 +142,6 @@ rounddown_pow_of_two(unsigned long x)
((type)(val1) < (type)(val2) ? (type)(val1) : (val2))
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
#define nothing do {} while(0)
#define BUILD_BUG_ON(cond) nothing
#endif /* #ifndef QLNX_RDMA */
@ -209,6 +220,7 @@ typedef struct osal_list_t
#define DIRECT_REG_WR(p_hwfn, addr, value) qlnx_direct_reg_wr32(p_hwfn, addr, value)
#define DIRECT_REG_WR64(p_hwfn, addr, value) \
qlnx_direct_reg_wr64(p_hwfn, addr, value)
#define DIRECT_REG_WR_DB(p_hwfn, addr, value) qlnx_dbell_wr32_db(p_hwfn, addr, value)
#define DIRECT_REG_RD(p_hwfn, addr) qlnx_direct_reg_rd32(p_hwfn, addr)
#define REG_RD(hwfn, addr) qlnx_reg_rd32(hwfn, addr)
#define DOORBELL(hwfn, addr, value) \
@ -238,7 +250,8 @@ typedef struct osal_list_t
#define OSAL_DPC_ALLOC(hwfn) malloc(PAGE_SIZE, M_QLNXBUF, M_NOWAIT)
#define OSAL_DPC_INIT(dpc, hwfn) nothing
#define OSAL_SCHEDULE_RECOVERY_HANDLER(x) nothing
extern void qlnx_schedule_recovery(void *p_hwfn);
#define OSAL_SCHEDULE_RECOVERY_HANDLER(x) do {qlnx_schedule_recovery(x);} while(0)
#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
#define OSAL_DPC_SYNC(hwfn) nothing
@ -371,7 +384,9 @@ do { \
#define OSAL_PCI_WRITE_CONFIG_DWORD(dev, reg, value) \
qlnx_pci_write_config_dword(dev, reg, value);
#define OSAL_PCI_FIND_CAPABILITY(dev, cap) qlnx_pci_find_capability(dev, cap);
#define OSAL_PCI_FIND_CAPABILITY(dev, cap) qlnx_pci_find_capability(dev, cap)
#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, ext_cap) \
qlnx_pci_find_ext_capability(dev, ext_cap)
#define OSAL_MMIOWB(dev) qlnx_barrier(dev)
#define OSAL_BARRIER(dev) qlnx_barrier(dev)
@ -390,8 +405,7 @@ do { \
#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \
find_first_zero_bit(bitmap, length)
#define OSAL_LINK_UPDATE(hwfn) qlnx_link_update(hwfn)
#define OSAL_VF_FLR_UPDATE(hwfn)
#define OSAL_LINK_UPDATE(hwfn, ptt) qlnx_link_update(hwfn)
#define QLNX_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define QLNX_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
@ -536,9 +550,37 @@ OSAL_CRC8(u8 * cdu_crc8_table, u8 * data_to_crc, int data_to_crc_len, u8 init_va
#define OSAL_HW_INFO_CHANGE(p_hwfn, offset)
#define OSAL_MFW_TLV_REQ(p_hwfn)
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, req, vf_sw_info) {};
#define OSAL_LLDP_RX_TLVS(p_hwfn, buffer, len)
#define OSAL_MFW_CMD_PREEMPT(p_hwfn)
#define OSAL_TRANSCEIVER_UPDATE(p_hwfn)
#define OSAL_MFW_FILL_TLV_DATA(p_hwfn, group, data) (0)
#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, res) (0)
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, req, vf_sw_info) \
qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, req, vf_sw_info)
#define OSAL_IOV_PF_RESP_TYPE(p_hwfn, relative_vf_id, status)
#define OSAL_IOV_VF_CLEANUP(p_hwfn, relative_vf_id) \
qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id)
#define OSAL_IOV_VF_ACQUIRE(p_hwfn, relative_vf_id) ECORE_SUCCESS
#define OSAL_IOV_GET_OS_TYPE() VFPF_ACQUIRE_OS_FREEBSD
#define OSAL_IOV_PRE_START_VPORT(p_hwfn, relative_vf_id, params) ECORE_SUCCESS
#define OSAL_IOV_POST_START_VPORT(p_hwfn, relative_vf_id, vport_id, opaque_fid)
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, x, y, z) ECORE_SUCCESS
#define OSAL_IOV_CHK_UCAST(p_hwfn, vfid, params) \
qlnx_iov_chk_ucast(p_hwfn, vfid, params);
#define OSAL_PF_VF_MALICIOUS(p_hwfn, relative_vf_id)
#define OSAL_IOV_VF_MSG_TYPE(p_hwfn, relative_vf_id, type)
#define OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vfid, params, tlvs) \
qlnx_iov_update_vport(p_hwfn, vfid, params, tlvs)
#define OSAL_PF_VF_MSG(p_hwfn, relative_vf_id) \
qlnx_pf_vf_msg(p_hwfn, relative_vf_id)
#define OSAL_VF_FLR_UPDATE(p_hwfn) qlnx_vf_flr_update(p_hwfn)
#define OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf)
#endif /* #ifdef ECORE_PACKAGE */
#endif /* #ifdef __BCM_OSAL_ECORE_PACKAGE */

View file

@ -104,8 +104,8 @@
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 30
#define FW_REVISION_VERSION 0
#define FW_MINOR_VERSION 33
#define FW_REVISION_VERSION 7
#define FW_ENGINEERING_VERSION 0
/***********************/
@ -113,76 +113,69 @@
/***********************/
/* PCI functions */
#define MAX_NUM_PORTS_BB (2)
#define MAX_NUM_PORTS_K2 (4)
#define MAX_NUM_PORTS_E5 (MAX_NUM_PORTS_K2)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_E5)
#define MAX_NUM_PORTS_BB (2)
#define MAX_NUM_PORTS_K2 (4)
#define MAX_NUM_PORTS_E5 (4)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_E5)
#define MAX_NUM_PFS_BB (8)
#define MAX_NUM_PFS_K2 (16)
#define MAX_NUM_PFS_E5 (MAX_NUM_PFS_K2)
#define MAX_NUM_PFS (MAX_NUM_PFS_E5)
#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
#define MAX_NUM_PFS_BB (8)
#define MAX_NUM_PFS_K2 (16)
#define MAX_NUM_PFS_E5 (16)
#define MAX_NUM_PFS (MAX_NUM_PFS_E5)
#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
#define MAX_NUM_VFS_BB (120)
#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_E4 (MAX_NUM_VFS_K2)
#define MAX_NUM_VFS_E5 (240)
#define COMMON_MAX_NUM_VFS (MAX_NUM_VFS_E5)
#define MAX_NUM_VFS_BB (120)
#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_E4 (MAX_NUM_VFS_K2)
#define MAX_NUM_VFS_E5 (240)
#define COMMON_MAX_NUM_VFS (MAX_NUM_VFS_E5)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS_E4)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS_E4)
/* in both BB and K2, the VF number starts from 16. so for arrays containing all */
/* possible PFs and VFs - we need a constant for this size */
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER_E4 (MAX_NUM_PFS + MAX_NUM_VFS_E4)
#define MAX_FUNCTION_NUMBER_E5 (MAX_NUM_PFS + MAX_NUM_VFS_E5)
#define COMMON_MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS_E5)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER_E4 (MAX_NUM_PFS + MAX_NUM_VFS_E4)
#define MAX_FUNCTION_NUMBER_E5 (MAX_NUM_PFS + MAX_NUM_VFS_E5)
#define COMMON_MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS_E5)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
#define MAX_NUM_VPORTS_E4 (MAX_NUM_VPORTS_K2)
#define MAX_NUM_VPORTS_E5 (256)
#define COMMON_MAX_NUM_VPORTS (MAX_NUM_VPORTS_E5)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
#define MAX_NUM_VPORTS_E4 (MAX_NUM_VPORTS_K2)
#define MAX_NUM_VPORTS_E5 (256)
#define COMMON_MAX_NUM_VPORTS (MAX_NUM_VPORTS_E5)
#define MAX_NUM_L2_QUEUES_K2 (320)
#define MAX_NUM_L2_QUEUES_BB (256)
#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
#define MAX_NUM_L2_QUEUES_K2 (320)
#define MAX_NUM_L2_QUEUES_E5 (320) /* TODO_E5_VITALY - fix to 512 */
#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_E5)
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
#define NUM_PHYS_TCS_4PORT_K2 (4)
#define NUM_PHYS_TCS_4PORT_E5 (6)
#define NUM_OF_PHYS_TCS (8)
#define PURE_LB_TC NUM_OF_PHYS_TCS
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
#define NUM_TCS_4PORT_E5 (NUM_PHYS_TCS_4PORT_E5 + 1)
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* Num of possible traffic priority values */
#define NUM_OF_PRIO (8)
#define NUM_PHYS_TCS_4PORT_K2 4
#define NUM_PHYS_TCS_4PORT_TX_E5 6
#define NUM_PHYS_TCS_4PORT_RX_E5 4
#define NUM_OF_PHYS_TCS 8
#define PURE_LB_TC NUM_OF_PHYS_TCS
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
#define NUM_TCS_4PORT_TX_E5 (NUM_PHYS_TCS_4PORT_TX_E5 + 1)
#define NUM_TCS_4PORT_RX_E5 (NUM_PHYS_TCS_4PORT_RX_E5 + 1)
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* CIDs */
#define NUM_OF_CONNECTION_TYPES_E4 (8)
#define NUM_OF_CONNECTION_TYPES_E5 (16)
#define NUM_OF_TASK_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
/* Clock values */
#define MASTER_CLK_FREQ_E4 (375e6)
#define STORM_CLK_FREQ_E4 (1000e6)
#define CLK25M_CLK_FREQ_E4 (25e6)
#define STORM_CLK_DUAL_CORE_FREQ_E5 (3000e6)
#define NUM_OF_TASK_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
/* Global PXP windows (GTT) */
#define NUM_OF_GTT 19
#define GTT_DWORD_SIZE_BITS 10
#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
#define NUM_OF_GTT 19
#define GTT_DWORD_SIZE_BITS 10
#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
/* Tools Version */
#define TOOLS_VERSION 10
@ -417,7 +410,7 @@
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB_E4 12
#define PIS_PER_SB_E5 8
#define MAX_PIS_PER_SB OSAL_MAX_T(u8, PIS_PER_SB_E4, PIS_PER_SB_E5)
#define MAX_PIS_PER_SB OSAL_MAX_T(PIS_PER_SB_E4,PIS_PER_SB_E5)
#define CAU_HC_STOPPED_STATE 3 /* fsm is stopped or not valid for this sb */
@ -548,10 +541,6 @@
/* VF BAR */
#define PXP_VF_BAR0 0
#define PXP_VF_BAR0_START_GRC 0x3E00
#define PXP_VF_BAR0_GRC_LENGTH 0x200
#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
@ -582,9 +571,17 @@
#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
#define PXP_VF_BAR0_START_GRC 0x3E00
#define PXP_VF_BAR0_GRC_LENGTH 0x200
#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
#define PXP_VF_BAR0_START_IGU2 0x10000
#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + PXP_VF_BAR0_IGU2_LENGTH - 1)
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
@ -593,13 +590,14 @@
// ILT Records
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS OSAL_MAX_T(u16, PXP_NUM_ILT_RECORDS_BB,PXP_NUM_ILT_RECORDS_K2)
#define MAX_NUM_ILT_RECORDS OSAL_MAX_T(PXP_NUM_ILT_RECORDS_BB,PXP_NUM_ILT_RECORDS_K2)
#define PXP_NUM_ILT_RECORDS_E5 13664
// Host Interface
#define PXP_QUEUES_ZONE_MAX_NUM 320
#define PXP_QUEUES_ZONE_MAX_NUM_E4 320
#define PXP_QUEUES_ZONE_MAX_NUM_E5 512
/*****************/
@ -650,18 +648,6 @@
#define PRS_GFT_CAM_LINES_NO_MATCH 31
/*
* Async data KCQ CQE
*/
struct async_data
{
__le32 cid /* Context ID of the connection */;
__le16 itid /* Task Id of the task (for error that happened on a a task) */;
u8 error_code /* error code - relevant only if the opcode indicates its an error */;
u8 fw_debug_param /* internal fw debug parameter */;
};
/*
* Interrupt coalescing TimeSet
*/
@ -692,24 +678,30 @@ struct eth_rx_prod_data
};
struct regpair
struct tcp_ulp_connect_done_params
{
__le32 lo /* low word for reg-pair */;
__le32 hi /* high word for reg-pair */;
__le16 mss;
u8 snd_wnd_scale;
u8 flags;
#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
};
/*
* Event Ring VF-PF Channel data
*/
struct vf_pf_channel_eqe_data
struct iscsi_connect_done_results
{
struct regpair msg_addr /* VF-PF message address */;
__le16 icid /* Context ID of the connection */;
__le16 conn_id /* Driver connection ID */;
struct tcp_ulp_connect_done_params params /* decided tcp params after connect done */;
};
struct iscsi_eqe_data
{
__le32 cid /* Context ID of the connection */;
__le16 conn_id /* Task Id of the task (for error that happened on a a task) */;
__le16 icid /* Context ID of the connection */;
__le16 conn_id /* Driver connection ID */;
__le16 reserved;
u8 error_code /* error code - relevant only if the opcode indicates its an error */;
u8 error_pdu_opcode_reserved;
#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F /* The processed PDUs opcode on which happened the error - updated for specific error codes, by defualt=0xFF */
@ -720,78 +712,6 @@ struct iscsi_eqe_data
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
/*
* RoCE Destroy Event Data
*/
struct rdma_eqe_destroy_qp
{
__le32 cid /* Dedicated field RoCE destroy QP event */;
u8 reserved[4];
};
/*
* RDMA Event Data Union
*/
union rdma_eqe_data
{
struct regpair async_handle /* Host handle for the Async Completions */;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data /* RoCE Destroy Event Data */;
};
/*
* Event Ring malicious VF data
*/
struct malicious_vf_eqe_data
{
u8 vf_id /* Malicious VF ID */;
u8 err_id /* Malicious VF error */;
__le16 reserved[3];
};
/*
* Event Ring initial cleanup data
*/
struct initial_cleanup_eqe_data
{
u8 vf_id /* VF ID */;
u8 reserved[7];
};
/*
* Event Data Union
*/
union event_ring_data
{
u8 bytes[8] /* Byte Array */;
struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
union rdma_eqe_data rdma_data /* Dedicated field for RDMA data */;
struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
struct initial_cleanup_eqe_data vf_init_cleanup /* VF Initial Cleanup data */;
};
/*
* Event Ring Entry
*/
struct event_ring_entry
{
u8 protocol_id /* Event Protocol ID */;
u8 opcode /* Event Opcode */;
__le16 reserved0 /* Reserved */;
__le16 echo /* Echo value from ramrod data on the host */;
u8 fw_return_code /* FW return code for SP ramrods */;
u8 flags;
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 /* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
union event_ring_data data;
};
/*
* Multi function mode
@ -824,6 +744,31 @@ enum protocol_type
};
struct regpair
{
__le32 lo /* low word for reg-pair */;
__le32 hi /* high word for reg-pair */;
};
/*
* RoCE Destroy Event Data
*/
struct rdma_eqe_destroy_qp
{
__le32 cid /* Dedicated field RoCE destroy QP event */;
u8 reserved[4];
};
/*
* RDMA Event Data Union
*/
union rdma_eqe_data
{
struct regpair async_handle /* Host handle for the Async Completions */;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data /* RoCE Destroy Event Data */;
};
/*
@ -843,7 +788,6 @@ struct ustorm_queue_zone
};
/*
* status block structure
*/
@ -893,6 +837,17 @@ struct cau_sb_entry
};
/*
* Igu cleanup bit values to distinguish between clean or producer consumer update.
*/
enum command_type_bit
{
IGU_COMMAND_TYPE_NOP=0,
IGU_COMMAND_TYPE_SET=1,
MAX_COMMAND_TYPE_BIT
};
/*
* core doorbell data
*/
@ -1339,89 +1294,73 @@ struct pxp_vf_zone_a_permission
*/
struct rdif_task_context
{
__le32 initialRefTag;
__le16 appTagValue;
__le16 appTagMask;
__le32 initial_ref_tag;
__le16 app_tag_value;
__le16 app_tag_mask;
u8 flags0;
#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 /* 0 = IP checksum, 1 = CRC */
#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 /* 1/2/3 - Protection Type */
#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 /* 0=0x0000, 1=0xffff */
#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 /* Keep reference tag constant */
#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
u8 partialDifData[7];
__le16 partialCrcValue;
__le16 partialChecksumValue;
__le32 offsetInIO;
#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 /* 0 = IP checksum, 1 = CRC */
#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 /* 1/2/3 - Protection Type */
#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 /* 0=0x0000, 1=0xffff */
#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 /* Keep reference tag constant */
#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
u8 partial_dif_data[7];
__le16 partial_crc_value;
__le16 partial_checksum_value;
__le32 offset_in_io;
__le16 flags1;
#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 /* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 /* 0=None, 1=DIF, 2=DIX */
#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 /* DIF tag right at the beginning of DIF interval */
#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 /* 0=None, 1=DIF */
#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 /* Forward application tag with mask */
#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 /* Forward reference tag with mask */
#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 /* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 /* 0=None, 1=DIF, 2=DIX */
#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 /* DIF tag right at the beginning of DIF interval */
#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 /* 0=None, 1=DIF */
#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 /* Forward application tag with mask */
#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 /* Forward reference tag with mask */
#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
__le16 state;
#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF /* mask for refernce tag handling */
#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF /* mask for refernce tag handling */
#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
__le32 reserved2;
};
/*
* RSS hash type
*/
enum rss_hash_type
{
RSS_HASH_TYPE_DEFAULT=0,
RSS_HASH_TYPE_IPV4=1,
RSS_HASH_TYPE_TCP_IPV4=2,
RSS_HASH_TYPE_IPV6=3,
RSS_HASH_TYPE_TCP_IPV6=4,
RSS_HASH_TYPE_UDP_IPV4=5,
RSS_HASH_TYPE_UDP_IPV6=6,
MAX_RSS_HASH_TYPE
};
/*
* status block structure
*/
@ -1469,85 +1408,85 @@ struct status_block_e5
*/
struct tdif_task_context
{
__le32 initialRefTag;
__le16 appTagValue;
__le16 appTagMask;
__le16 partialCrcValueB;
__le16 partialChecksumValueB;
__le32 initial_ref_tag;
__le16 app_tag_value;
__le16 app_tag_mask;
__le16 partial_crc_value_b;
__le16 partial_checksum_value_b;
__le16 stateB;
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
u8 reserved1;
u8 flags0;
#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 /* 0 = IP checksum, 1 = CRC */
#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 /* 1/2/3 - Protection Type */
#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 /* 0=0x0000, 1=0xffff */
#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 /* 0 = IP checksum, 1 = CRC */
#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 /* 1/2/3 - Protection Type */
#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 /* 0=0x0000, 1=0xffff */
#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
__le32 flags1;
#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 /* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 /* 0=None, 1=DIF, 2=DIX */
#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 /* DIF tag right at the beginning of DIF interval */
#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 /* reserved */
#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 /* 0=None, 1=DIF */
#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF /* mask for refernce tag handling */
#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 /* Forward application tag with mask */
#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 /* Forward reference tag with mask */
#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 /* Keep reference tag constant */
#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
__le32 offsetInIOB;
__le16 partialCrcValueA;
__le16 partialChecksumValueA;
__le32 offsetInIOA;
u8 partialDifDataA[8];
u8 partialDifDataB[8];
#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 /* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 /* 0=None, 1=DIF, 2=DIX */
#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 /* DIF tag right at the beginning of DIF interval */
#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 /* reserved */
#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 /* 0=None, 1=DIF */
#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF /* mask for refernce tag handling */
#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 /* Forward application tag with mask */
#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 /* Forward reference tag with mask */
#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 /* Keep reference tag constant */
#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
__le32 offset_in_io_b;
__le16 partial_crc_value_a;
__le16 partial_checksum_value_a;
__le32 offset_in_io_a;
u8 partial_dif_data_a[8];
u8 partial_dif_data_b[8];
};
@ -1602,7 +1541,7 @@ struct timers_context
/*
* Enum for next_protocol field of tunnel_parsing_flags
* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc
*/
enum tunnel_next_protocol
{

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,6 +31,7 @@
#ifndef __ECORE_H
#define __ECORE_H
#include "ecore_status.h"
#include "ecore_hsi_common.h"
#include "ecore_hsi_debug_tools.h"
#include "ecore_hsi_init_func.h"
@ -39,8 +40,8 @@
#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
#define ECORE_MINOR_VERSION 30
#define ECORE_REVISION_VERSION 0
#define ECORE_MINOR_VERSION 33
#define ECORE_REVISION_VERSION 5
#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
@ -58,6 +59,7 @@
/* Constants */
#define ECORE_WID_SIZE (1024)
#define ECORE_MIN_WIDS (4)
/* Configurable */
#define ECORE_PF_DEMS_SIZE (4)
@ -80,11 +82,14 @@ enum ecore_nvm_cmd {
ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
ECORE_ENCRYPT_PASSWORD = DRV_MSG_CODE_ENCRYPT_PASSWORD,
ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
};
#ifndef LINUX_REMOVE
#if !defined(CONFIG_ECORE_L2) && !defined(CONFIG_ECORE_ROCE) && \
!defined(CONFIG_ECORE_FCOE) && !defined(CONFIG_ECORE_ISCSI)
!defined(CONFIG_ECORE_FCOE) && !defined(CONFIG_ECORE_ISCSI) && \
!defined(CONFIG_ECORE_IWARP)
#define CONFIG_ECORE_L2
#define CONFIG_ECORE_SRIOV
#define CONFIG_ECORE_ROCE
@ -93,8 +98,10 @@ enum ecore_nvm_cmd {
#define CONFIG_ECORE_ISCSI
#define CONFIG_ECORE_LL2
#endif
#endif
/* helpers */
#ifndef __EXTRACT__LINUX__IF__
#define MASK_FIELD(_name, _value) \
((_value) &= (_name##_MASK))
@ -115,9 +122,10 @@ do { \
#define SET_MFW_FIELD(name, field, value) \
do { \
(name) &= ~((field ## _MASK) << (field ## _OFFSET)); \
(name) &= ~(field ## _MASK); \
(name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK)); \
} while (0)
#endif
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
@ -139,6 +147,7 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
((sizeof(type_name) + (u32)(1<<(p_hwfn->p_dev->cache_shift))-1) & \
~((1<<(p_hwfn->p_dev->cache_shift))-1))
#ifndef LINUX_REMOVE
#ifndef U64_HI
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
#endif
@ -146,7 +155,9 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
#ifndef U64_LO
#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
#endif
#endif
#ifndef __EXTRACT__LINUX__IF__
#ifndef UEFI
/* Debug print definitions */
#define DP_ERR(p_dev, fmt, ...) \
@ -203,6 +214,7 @@ enum DP_LEVEL {
#define ECORE_LOG_NOTICE_MASK (0x80000000)
enum DP_MODULE {
#ifndef LINUX_REMOVE
ECORE_MSG_DRV = 0x0001,
ECORE_MSG_PROBE = 0x0002,
ECORE_MSG_LINK = 0x0004,
@ -218,6 +230,7 @@ enum DP_MODULE {
ECORE_MSG_PKTDATA = 0x1000,
ECORE_MSG_HW = 0x2000,
ECORE_MSG_WOL = 0x4000,
#endif
ECORE_MSG_SPQ = 0x10000,
ECORE_MSG_STATS = 0x20000,
ECORE_MSG_DCB = 0x40000,
@ -232,6 +245,7 @@ enum DP_MODULE {
ECORE_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
#endif
#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++)
@ -252,6 +266,7 @@ struct ecore_l2_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
struct ecore_llh_info;
struct ecore_rt_data {
u32 *init_val;
@ -321,6 +336,15 @@ struct ecore_qm_iids {
u32 tids;
};
/* The PCI relax ordering is either taken care by management FW or can be
* enable/disable by ecore client.
*/
enum ecore_pci_rlx_odr {
ECORE_DEFAULT_RLX_ODR,
ECORE_ENABLE_RLX_ODR,
ECORE_DISABLE_RLX_ODR
};
#define MAX_PF_PER_PORT 8
/* HW / FW resources, output of features supported below, most information
@ -387,6 +411,7 @@ enum ecore_dev_cap {
ECORE_DEV_CAP_IWARP
};
#ifndef __EXTRACT__LINUX__IF__
enum ecore_hw_err_type {
ECORE_HW_ERR_FAN_FAIL,
ECORE_HW_ERR_MFW_RESP_FAIL,
@ -395,6 +420,7 @@ enum ecore_hw_err_type {
ECORE_HW_ERR_RAMROD_FAIL,
ECORE_HW_ERR_FW_ASSERT,
};
#endif
enum ecore_wol_support {
ECORE_WOL_SUPPORT_NONE,
@ -404,6 +430,7 @@ enum ecore_wol_support {
enum ecore_db_rec_exec {
DB_REC_DRY_RUN,
DB_REC_REAL_DEAL,
DB_REC_ONCE,
};
struct ecore_hw_info {
@ -468,8 +495,10 @@ struct ecore_hw_info {
u32 hw_mode;
unsigned long device_capabilities;
#ifndef __EXTRACT__LINUX__THROW__
/* Default DCBX mode */
u8 dcbx_mode;
#endif
u16 mtu;
@ -480,8 +509,10 @@ struct ecore_hw_info {
#define DMAE_MAX_RW_SIZE 0x2000
struct ecore_dmae_info {
/* Mutex for synchronizing access to functions */
osal_mutex_t mutex;
/* Spinlock for synchronizing access to functions */
osal_spinlock_t lock;
bool b_mem_ready;
u8 channel;
@ -560,14 +591,68 @@ struct ecore_fw_data {
u32 init_ops_size;
};
enum ecore_mf_mode_bit {
/* Supports PF-classification based on tag */
ECORE_MF_OVLAN_CLSS,
/* Supports PF-classification based on MAC */
ECORE_MF_LLH_MAC_CLSS,
/* Supports PF-classification based on protocol type */
ECORE_MF_LLH_PROTO_CLSS,
/* Requires a default PF to be set */
ECORE_MF_NEED_DEF_PF,
/* Allow LL2 to multicast/broadcast */
ECORE_MF_LL2_NON_UNICAST,
/* Allow Cross-PF [& child VFs] Tx-switching */
ECORE_MF_INTER_PF_SWITCH,
/* TODO - if we ever re-utilize any of this logic, we can rename */
ECORE_MF_UFP_SPECIFIC,
ECORE_MF_DISABLE_ARFS,
/* Use vlan for steering */
ECORE_MF_8021Q_TAGGING,
/* Use stag for steering */
ECORE_MF_8021AD_TAGGING,
};
enum ecore_ufp_mode {
ECORE_UFP_MODE_ETS,
ECORE_UFP_MODE_VNIC_BW,
ECORE_UFP_MODE_UNKNOWN
};
enum ecore_ufp_pri_type {
ECORE_UFP_PRI_OS,
ECORE_UFP_PRI_VNIC,
ECORE_UFP_PRI_UNKNOWN
};
struct ecore_ufp_info {
enum ecore_ufp_pri_type pri_type;
enum ecore_ufp_mode mode;
u8 tc;
};
enum BAR_ID {
BAR_ID_0, /* used for GRC */
BAR_ID_1 /* Used for doorbells */
};
struct ecore_hwfn {
struct ecore_dev *p_dev;
u8 my_id; /* ID inside the PF */
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
#define ECORE_PATH_ID(_p_hwfn) \
(ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
#define ECORE_PATH_ID(_p_hwfn) \
(ECORE_IS_BB((_p_hwfn)->p_dev) ? ((_p_hwfn)->abs_pf_id & 1) : 0)
u8 port_id;
bool b_active;
@ -576,7 +661,6 @@ struct ecore_hwfn {
char name[NAME_SIZE];
void *dp_ctx;
bool first_on_engine;
bool hw_init_done;
u8 num_funcs_on_engine;
@ -588,6 +672,11 @@ struct ecore_hwfn {
u64 db_phys_addr;
unsigned long db_size;
#ifndef LINUX_REMOVE
u64 reg_offset;
u64 db_offset;
#endif
/* PTT pool */
struct ecore_ptt_pool *p_ptt_pool;
@ -646,6 +735,7 @@ struct ecore_hwfn {
struct ecore_pf_iov *pf_iov_info;
struct ecore_mcp_info *mcp_info;
struct ecore_dcbx_info *p_dcbx_info;
struct ecore_ufp_info ufp_info;
struct ecore_dmae_info dmae_info;
@ -679,17 +769,22 @@ struct ecore_hwfn {
struct ecore_db_recovery_info db_recovery_info;
};
#ifndef __EXTRACT__LINUX__THROW__
enum ecore_mf_mode {
ECORE_MF_DEFAULT,
ECORE_MF_OVLAN,
ECORE_MF_NPAR,
ECORE_MF_UFP,
};
#endif
#ifndef __EXTRACT__LINUX__IF__
enum ecore_dev_type {
ECORE_DEV_TYPE_BB,
ECORE_DEV_TYPE_AH,
ECORE_DEV_TYPE_E5,
};
#endif
struct ecore_dev {
u32 dp_module;
@ -709,7 +804,7 @@ struct ecore_dev {
#endif
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
#define ECORE_IS_E4(dev) (ECORE_IS_BB(dev) || ECORE_IS_AH(dev))
#define ECORE_IS_E5(dev) ((dev)->type == ECORE_DEV_TYPE_E5)
#define ECORE_E5_MISSING_CODE OSAL_BUILD_BUG_ON(false)
@ -722,55 +817,60 @@ struct ecore_dev {
#define ECORE_DEV_ID_MASK_E5 0x8100
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 16
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 0
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
u8 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 0
#ifndef ASIC_ONLY
#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_EMUL_B0(_p_dev))
#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev))
#define CHIP_REV_IS_SLOW(_p_dev) \
(CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
#define CHIP_REV_IS_A0(_p_dev) \
(CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_FPGA_A0(_p_dev) || \
!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) \
(CHIP_REV_IS_EMUL_B0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev) || \
(_p_dev)->chip_rev == 1)
#define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
#define CHIP_REV_IS_EMUL(_p_dev) \
(CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_EMUL_B0(_p_dev))
#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
#define CHIP_REV_IS_FPGA(_p_dev) \
(CHIP_REV_IS_FPGA_A0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev))
#define CHIP_REV_IS_SLOW(_p_dev) \
(CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
#define CHIP_REV_IS_A0(_p_dev) \
(CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_FPGA_A0(_p_dev) || \
(!(_p_dev)->chip_rev && !(_p_dev)->chip_metal))
#define CHIP_REV_IS_B0(_p_dev) \
(CHIP_REV_IS_EMUL_B0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev) || \
((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal))
#define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
#else
#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
#define CHIP_REV_IS_A0(_p_dev) \
(!(_p_dev)->chip_rev && !(_p_dev)->chip_metal)
#define CHIP_REV_IS_B0(_p_dev) \
((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal)
#endif
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 4
u8 chip_metal;
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 0
u16 chip_bond_id;
#define CHIP_BOND_ID_MASK 0xf
#define CHIP_BOND_ID_SHIFT 0
u8 chip_bond_id;
#define CHIP_BOND_ID_MASK 0xff
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
u8 num_ports;
u8 num_ports_in_engine;
u8 num_funcs_in_port;
u8 path_id;
unsigned long mf_bits;
#ifndef __EXTRACT__LINUX__THROW__
enum ecore_mf_mode mf_mode;
#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
#endif
int pcie_width;
int pcie_speed;
@ -804,13 +904,29 @@ struct ecore_dev {
/* HW functions */
u8 num_hwfns;
struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
#define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1)
/* Engine affinity */
u8 l2_affin_hint;
u8 fir_affin;
u8 iwarp_affin;
/* Macro for getting the engine-affinitized hwfn for FCoE/iSCSI/RoCE */
#define ECORE_FIR_AFFIN_HWFN(dev) (&dev->hwfns[dev->fir_affin])
/* Macro for getting the engine-affinitized hwfn for iWARP */
#define ECORE_IWARP_AFFIN_HWFN(dev) (&dev->hwfns[dev->iwarp_affin])
/* Generic macro for getting the engine-affinitized hwfn */
#define ECORE_AFFIN_HWFN(dev) \
(ECORE_IS_IWARP_PERSONALITY(ECORE_LEADING_HWFN(dev)) ? \
ECORE_IWARP_AFFIN_HWFN(dev) : \
ECORE_FIR_AFFIN_HWFN(dev))
/* Macro for getting the index (0/1) of the engine-affinitized hwfn */
#define ECORE_AFFIN_HWFN_IDX(dev) \
(IS_LEAD_HWFN(ECORE_AFFIN_HWFN(dev)) ? 0 : 1)
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
#ifdef CONFIG_ECORE_SW_CHANNEL
bool b_hw_channel;
#endif
struct ecore_tunnel_info tunnel;
bool b_is_vf;
bool b_dont_override_vf_msix;
@ -820,6 +936,7 @@ struct ecore_dev {
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
u8 ilt_page_size;
struct ecore_eth_stats *reset_stats;
struct ecore_fw_data *fw_data;
@ -841,6 +958,9 @@ struct ecore_dev {
#ifndef ASIC_ONLY
bool b_is_emul_full;
#endif
/* LLH info */
u8 ppfid_bitmap;
struct ecore_llh_info *p_llh_info;
};
#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
@ -854,7 +974,9 @@ struct ecore_dev {
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
#ifndef LINUX_REMOVE
#define CRC8_TABLE_SIZE 256
#endif
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
@ -892,7 +1014,6 @@ int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
int ecore_device_num_engines(struct ecore_dev *p_dev);
int ecore_device_num_ports(struct ecore_dev *p_dev);
int ecore_device_get_port_id(struct ecore_dev *p_dev);
void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
u8 *mac);
@ -917,7 +1038,7 @@ const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
/* doorbell recovery mechanism */
void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
enum ecore_db_rec_exec);
enum ecore_db_rec_exec db_exec);
/* amount of resources used in qm init */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
@ -926,6 +1047,29 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
ecore_device_num_ports((_p_hwfn)->p_dev))
/* The PFID<->PPFID calculation is based on the relative index of a PF on its
* port. In BB there is a bug in the LLH in which the PPFID is actually engine
* based, and thus it equals the PFID.
*/
#define ECORE_PFID_BY_PPFID(_p_hwfn, abs_ppfid) \
(ECORE_IS_BB((_p_hwfn)->p_dev) ? \
(abs_ppfid) : \
(abs_ppfid) * (_p_hwfn)->p_dev->num_ports_in_engine + \
MFW_PORT(_p_hwfn))
#define ECORE_PPFID_BY_PFID(_p_hwfn) \
(ECORE_IS_BB((_p_hwfn)->p_dev) ? \
(_p_hwfn)->rel_pf_id : \
(_p_hwfn)->rel_pf_id / (_p_hwfn)->p_dev->num_ports_in_engine)
enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 addr,
u32 val);
/* Utility functions for dumping the content of the NIG LLH filters */
enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid);
enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev);
#endif /* __ECORE_H */

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -196,11 +196,13 @@ static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
return p_chain->u.chain16.prod_idx;
}
#ifndef LINUX_REMOVE
static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u32(p_chain));
return p_chain->u.chain32.prod_idx;
}
#endif
static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
{
@ -232,8 +234,10 @@ static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
(u32)(p_chain->u.chain16.prod_idx)) -
(u32)p_chain->u.chain16.cons_idx);
if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
used -= (((u32)ECORE_U16_MAX + 1) / p_chain->elem_per_page +
p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
p_chain->u.chain16.cons_idx / p_chain->elem_per_page) %
p_chain->page_cnt;
return (u16)(p_chain->capacity - used);
}
@ -249,12 +253,15 @@ ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
(u64)(p_chain->u.chain32.prod_idx)) -
(u64)p_chain->u.chain32.cons_idx);
if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
used -= (((u64)ECORE_U32_MAX + 1) / p_chain->elem_per_page +
p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
p_chain->u.chain32.cons_idx / p_chain->elem_per_page) %
p_chain->page_cnt;
return p_chain->capacity - used;
}
#ifndef LINUX_REMOVE
static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
{
if (is_chain_u16(p_chain))
@ -278,6 +285,7 @@ u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
{
return p_chain->elem_per_page;
}
#endif
static OSAL_INLINE
u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
@ -291,10 +299,12 @@ u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
return p_chain->elem_unusable;
}
#ifndef LINUX_REMOVE
static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
{
return p_chain->size;
}
#endif
static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
{
@ -373,6 +383,7 @@ ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
} \
} while (0)
#ifndef LINUX_REMOVE
/**
* @brief ecore_chain_return_multi_produced -
*
@ -391,6 +402,7 @@ void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
p_chain->u.chain32.cons_idx += num;
test_and_skip(p_chain, cons_idx);
}
#endif
/**
* @brief ecore_chain_return_produced -
@ -551,7 +563,7 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
/* Use (page_cnt - 1) as a reset value for the prod/cons page's
/* Use "page_cnt-1" as a reset value for the prod/cons page's
* indices, to avoid unnecessary page advancing on the first
* call to ecore_chain_produce/consume. Instead, the indices
* will be advanced to page_cnt and then will be wrapped to 0.
@ -750,6 +762,21 @@ static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
/* Use "prod_idx-1" since ecore_chain_produce() advances the
* page index before the producer index when getting to
* "next_page_mask".
*/
u32 elem_idx =
(prod_idx - 1 + p_chain->capacity) % p_chain->capacity;
u32 page_idx = elem_idx / p_chain->elem_per_page;
if (is_chain_u16(p_chain))
p_chain->pbl.c.pbl_u16.prod_page_idx = (u16)page_idx;
else
p_chain->pbl.c.pbl_u32.prod_page_idx = page_idx;
}
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16)prod_idx;
else
@ -757,6 +784,38 @@ static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
p_chain->p_prod_elem = p_prod_elem;
}
/**
* @brief ecore_chain_set_cons - sets the cons to the given value
*
* @param cons_idx
* @param p_cons_elem
*/
static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain,
u32 cons_idx, void *p_cons_elem)
{
if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
/* Use "cons_idx-1" since ecore_chain_consume() advances the
* page index before the consumer index when getting to
* "next_page_mask".
*/
u32 elem_idx =
(cons_idx - 1 + p_chain->capacity) % p_chain->capacity;
u32 page_idx = elem_idx / p_chain->elem_per_page;
if (is_chain_u16(p_chain))
p_chain->pbl.c.pbl_u16.cons_page_idx = (u16)page_idx;
else
p_chain->pbl.c.pbl_u32.cons_page_idx = page_idx;
}
if (is_chain_u16(p_chain))
p_chain->u.chain16.cons_idx = (u16)cons_idx;
else
p_chain->u.chain32.cons_idx = cons_idx;
p_chain->p_cons_elem = p_cons_elem;
}
/**
* @brief ecore_chain_pbl_zero_mem - set chain memory to 0
*

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -50,7 +50,7 @@ __FBSDID("$FreeBSD$");
#include "ecore_hw.h"
#include "ecore_dev_api.h"
#include "ecore_sriov.h"
#include "ecore_roce.h"
#include "ecore_rdma.h"
#include "ecore_mcp.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
@ -72,8 +72,6 @@ __FBSDID("$FreeBSD$");
#define TM_ELEM_SIZE 4
/* ILT constants */
#define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
@ -117,6 +115,7 @@ struct src_ent {
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
@ -152,16 +151,6 @@ struct ecore_conn_type_cfg {
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
enum ilt_clients {
ILT_CLI_CDUC,
ILT_CLI_CDUT,
ILT_CLI_QM,
ILT_CLI_TM,
ILT_CLI_SRC,
ILT_CLI_TSDM,
ILT_CLI_MAX
};
struct ilt_cfg_pair {
u32 reg;
u32 val;
@ -256,6 +245,7 @@ struct ecore_cxt_mngr {
/* total number of SRQ's for this hwfn */
u32 srq_count;
u32 xrc_srq_count;
/* Maximal number of L2 steering filters */
u32 arfs_count;
@ -440,18 +430,58 @@ static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
return OSAL_NULL;
}
static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn,
u32 num_srqs, u32 num_xrc_srqs)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
p_mgr->xrc_srq_count = num_xrc_srqs;
}
u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
return p_hwfn->p_cxt_mngr->srq_count;
}
return p_mgr->srq_count;
u32 ecore_cxt_get_xrc_srq_count(struct ecore_hwfn *p_hwfn)
{
return p_hwfn->p_cxt_mngr->xrc_srq_count;
}
u32 ecore_cxt_get_ilt_page_size(struct ecore_hwfn *p_hwfn,
enum ilt_clients ilt_client)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
}
static u32 ecore_cxt_srqs_per_page(struct ecore_hwfn *p_hwfn)
{
u32 page_size;
page_size = ecore_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
return page_size / SRQ_CXT_SIZE;
}
u32 ecore_cxt_get_total_srq_count(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
u32 total_srqs;
total_srqs = p_mgr->srq_count;
/* XRC SRQs use the first and only the first SRQ ILT page. So if XRC
* SRQs are requested we need to allocate an extra SRQ ILT page for
* them. For that We increase the number of regular SRQs to cause the
* allocation of that extra page.
*/
if (p_mgr->xrc_srq_count)
total_srqs += ecore_cxt_srqs_per_page(p_hwfn);
return total_srqs;
}
/* set the iids (cid/tid) count per protocol */
@ -854,8 +884,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
}
/* TSDM (SRQ CONTEXT) */
total = ecore_cxt_get_srq_count(p_hwfn);
total = ecore_cxt_get_total_srq_count(p_hwfn);
if (total) {
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
@ -965,7 +994,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->t2_num_pages *
sizeof(struct ecore_dma_mem));
if (!p_mngr->t2) {
DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
rc = ECORE_NOMEM;
goto t2_fail;
}
@ -1053,6 +1082,9 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 ilt_size, i;
if (p_mngr->ilt_shadow == OSAL_NULL)
return;
ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
@ -1066,6 +1098,7 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
p_dma->p_virt = OSAL_NULL;
}
OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
p_mngr->ilt_shadow = OSAL_NULL;
}
static enum _ecore_status_t ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
@ -1131,8 +1164,8 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
size * sizeof(struct ecore_dma_mem));
if (!p_mngr->ilt_shadow) {
DP_NOTICE(p_hwfn, true, "Failed to allocate ilt shadow table\n");
if (p_mngr->ilt_shadow == OSAL_NULL) {
DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
rc = ECORE_NOMEM;
goto ilt_shadow_fail;
}
@ -1175,12 +1208,14 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
for (type = 0; type < MAX_CONN_TYPES; type++) {
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].cid_map = OSAL_NULL;
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
OSAL_FREE(p_hwfn->p_dev,
p_mngr->acquired_vf[type][vf].cid_map);
p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
@ -1257,7 +1292,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
if (!p_mngr) {
DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_cxt_mngr'\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
return ECORE_NOMEM;
}
@ -1289,7 +1324,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
/* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
p_mngr->clients[i].p_size.val = p_hwfn->p_dev->ilt_page_size;
/* Initialize task sizes */
p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
@ -1317,21 +1352,21 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
/* Allocate the ILT shadow table */
rc = ecore_ilt_shadow_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
goto tables_alloc_fail;
}
/* Allocate the T2 table */
rc = ecore_cxt_src_t2_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
goto tables_alloc_fail;
}
/* Allocate and initialize the acquired cids bitmaps */
rc = ecore_cid_map_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
goto tables_alloc_fail;
}
@ -1559,22 +1594,28 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
}
}
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool is_pf_loading)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
struct ecore_mcp_link_state *p_link;
struct ecore_qm_iids iids;
OSAL_MEM_ZERO(&iids, sizeof(iids));
ecore_cxt_qm_iids(p_hwfn, &iids);
p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
is_pf_loading,
iids.cids, iids.vf_cids, iids.tids,
qm_info->start_pq,
qm_info->num_pqs - qm_info->num_vf_pqs,
qm_info->num_vf_pqs,
qm_info->start_vport,
qm_info->num_vports, qm_info->pf_wfq, qm_info->pf_rl,
qm_info->num_vports, qm_info->pf_wfq,
qm_info->pf_rl, p_link->speed,
p_hwfn->qm_info.qm_pq_params,
p_hwfn->qm_info.qm_vport_params);
}
@ -1752,7 +1793,7 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
if (p_shdw[line].p_virt != OSAL_NULL) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
(unsigned long long)(p_shdw[line].p_phys >> 12));
(unsigned long long)(p_shdw[line].p_phys >> 12));
DP_VERBOSE(
p_hwfn, ECORE_MSG_ILT,
@ -1941,7 +1982,7 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
ecore_qm_init_pf(p_hwfn, p_ptt);
ecore_qm_init_pf(p_hwfn, p_ptt, true);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
@ -2128,12 +2169,9 @@ static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_pf_params *p_params,
u32 num_tasks)
{
u32 num_cons, num_qps, num_srqs;
u32 num_cons, num_qps;
enum protocol_type proto;
/* Override personality with rdma flavor */
num_srqs = OSAL_MIN_T(u32, ECORE_RDMA_MAX_SRQS, p_params->num_srqs);
/* The only case RDMA personality can be overriden is if NVRAM is
* configured with ETH_RDMA or if no rdma protocol was requested
*/
@ -2168,7 +2206,6 @@ static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn,
num_cons += ECORE_IWARP_PREALLOC_CNT;
#endif
proto = PROTOCOLID_IWARP;
p_params->roce_edpm_mode = false;
break;
case ECORE_PCI_ETH_ROCE:
num_qps = OSAL_MIN_T(u32, ROCE_MAX_QPS, p_params->num_qps);
@ -2180,6 +2217,8 @@ static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn,
}
if (num_cons && num_tasks) {
u32 num_srqs, num_xrc_srqs, max_xrc_srqs, page_size;
ecore_cxt_set_proto_cid_count(p_hwfn, proto,
num_cons, 0);
@ -2191,7 +2230,18 @@ static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn,
1, /* RoCE segment type */
num_tasks,
false); /* !force load */
ecore_cxt_set_srq_count(p_hwfn, num_srqs);
num_srqs = OSAL_MIN_T(u32, ECORE_RDMA_MAX_SRQS,
p_params->num_srqs);
/* XRC SRQs populate a single ILT page */
page_size = ecore_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
max_xrc_srqs = page_size / XRC_SRQ_CXT_SIZE;
max_xrc_srqs = OSAL_MIN_T(u32, max_xrc_srqs, ECORE_RDMA_MAX_XRC_SRQS);
num_xrc_srqs = OSAL_MIN_T(u32, p_params->num_xrc_srqs,
max_xrc_srqs);
ecore_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
} else {
DP_INFO(p_hwfn->p_dev,
@ -2223,6 +2273,8 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn,
}
case ECORE_PCI_ETH:
{
u32 count = 0;
struct ecore_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
@ -2231,7 +2283,12 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn,
ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons,
p_params->num_vf_cons);
p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
count = p_params->num_arfs_filters;
if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
&p_hwfn->p_dev->mf_bits))
p_hwfn->p_cxt_mngr->arfs_count = count;
break;
}
@ -2358,10 +2415,17 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
p_blk = &p_cli->pf_blks[CDUC_BLK];
break;
case ECORE_ELEM_SRQ:
/* The first ILT page is not used for regular SRQs. Skip it. */
iid += ecore_cxt_srqs_per_page(p_hwfn);
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
elem_size = SRQ_CXT_SIZE;
p_blk = &p_cli->pf_blks[SRQ_BLK];
break;
case ECORE_ELEM_XRC_SRQ:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
elem_size = XRC_SRQ_CXT_SIZE;
p_blk = &p_cli->pf_blks[SRQ_BLK];
break;
case ECORE_ELEM_TASK:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
@ -2384,7 +2448,9 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
* This section can be run in parallel from different contexts and thus
* a mutex protection is needed.
*/
#ifdef _NTDDK_
#pragma warning(suppress : 28121)
#endif
OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
@ -2421,7 +2487,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
elem = (union type1_task_context *)elem_start;
SET_FIELD(elem->roce_ctx.tdif_context.flags1,
TDIF_TASK_CONTEXT_REFTAGMASK , 0xf);
TDIF_TASK_CONTEXT_REF_TAG_MASK , 0xf);
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
}
}
@ -2444,7 +2510,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
0 /* no flags */);
OSAL_NULL /* default parameters */);
if (elem_type == ECORE_ELEM_CXT) {
u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
@ -2558,7 +2624,7 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
(u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset,
sizeof(ilt_hw_entry) / sizeof(u32),
0 /* no flags */);
OSAL_NULL /* default parameters */);
}
ecore_ptt_release(p_hwfn, p_ptt);

View file

@ -43,7 +43,18 @@
enum ecore_cxt_elem_type {
ECORE_ELEM_CXT,
ECORE_ELEM_SRQ,
ECORE_ELEM_TASK
ECORE_ELEM_TASK,
ECORE_ELEM_XRC_SRQ,
};
enum ilt_clients {
ILT_CLI_CDUC,
ILT_CLI_CDUT,
ILT_CLI_QM,
ILT_CLI_TM,
ILT_CLI_SRC,
ILT_CLI_TSDM,
ILT_CLI_MAX
};
u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
@ -55,8 +66,11 @@ u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
enum protocol_type type);
u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
u32 ecore_cxt_get_xrc_srq_count(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
*
@ -138,8 +152,10 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
*
* @param p_hwfn
* @param p_ptt
* @param is_pf_loading
*/
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool is_pf_loading);
/**
* @brief Reconfigures QM pf on the fly
@ -237,4 +253,9 @@ enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
u8 ctx_type,
void **task_ctx);
u32 ecore_cxt_get_ilt_page_size(struct ecore_hwfn *p_hwfn,
enum ilt_clients ilt_client);
u32 ecore_cxt_get_total_srq_count(struct ecore_hwfn *p_hwfn);
#endif /* _ECORE_CID_ */

File diff suppressed because it is too large Load diff

View file

@ -68,6 +68,21 @@ u32 ecore_dbg_get_fw_func_ver(void);
*/
enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_read_regs - Reads registers into a buffer (using GRC).
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf - Destination buffer.
* @param addr - Source GRC address in dwords.
* @param len - Number of registers to read.
*/
void ecore_read_regs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf,
u32 addr,
u32 len);
/**
* @brief ecore_dbg_bus_reset - Resets the Debug block.
*
@ -198,12 +213,12 @@ enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
enum block_id block,
u8 line_num,
u8 cycle_en,
u8 right_shift,
u8 force_valid,
u8 force_frame);
enum block_id block,
u8 line_num,
u8 cycle_en,
u8 right_shift,
u8 force_valid,
u8 force_frame);
/**
* @brief ecore_dbg_bus_enable_storm - Enables recording of the specified Storm

File diff suppressed because it is too large Load diff

View file

@ -39,7 +39,7 @@ __FBSDID("$FreeBSD$");
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
#ifdef CONFIG_ECORE_ROCE
#include "ecore_roce.h"
#include "ecore_rdma.h"
#endif
#include "ecore_iov_api.h"
@ -194,46 +194,61 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
}
}
u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
{
struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
u8 i;
if (!dscp->enabled)
return ECORE_DCBX_DSCP_DISABLED;
for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
if (pri == dscp->dscp_pri_map[i])
return i;
return ECORE_DCBX_DSCP_DISABLED;
}
static void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
{
struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
/* PF update ramrod data */
p_data->arr[type].enable = enable;
p_data->arr[type].priority = prio;
p_data->arr[type].tc = tc;
p_data->arr[type].dscp_enable = dscp->enabled;
if (p_data->arr[type].dscp_enable) {
u8 i;
p_data->arr[type].dscp_val = ecore_dcbx_get_dscp_value(p_hwfn, prio);
if (p_data->arr[type].dscp_val == ECORE_DCBX_DSCP_DISABLED) {
p_data->arr[type].dscp_enable = false;
p_data->arr[type].dscp_val = 0;
} else
p_data->arr[type].dscp_enable = enable;
for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
if (prio == dscp->dscp_pri_map[i]) {
p_data->arr[type].dscp_val = i;
break;
}
}
p_data->arr[type].update = UPDATE_DCB_DSCP;
if (enable && p_data->arr[type].dscp_enable)
p_data->arr[type].update = UPDATE_DCB_DSCP;
else if (enable)
p_data->arr[type].update = UPDATE_DCB;
else
p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
/* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
p_data->arr[type].dont_add_vlan0 = true;
/* QM reconf data */
if (p_hwfn->hw_info.personality == personality)
p_hwfn->hw_info.offload_tc = tc;
/* Configure dcbx vlan priority in doorbell block for roce EDPM */
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) &&
(type == DCBX_PROTOCOL_ROCE)) {
ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
}
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
@ -249,7 +264,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
personality = ecore_dcbx_app_update[i].personality;
ecore_dcbx_set_params(p_data, p_hwfn, enable,
ecore_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
prio, tc, type, personality);
}
}
@ -302,27 +317,27 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
*type = DCBX_PROTOCOL_IWARP;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
DP_ERR(p_hwfn,
"No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
id, app_prio_bitmap);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"No action required, App TLV entry = 0x%x\n",
app_prio_bitmap);
return false;
}
return true;
}
/* Parse app TLV's to update TC information in hw_info structure for
/* Parse app TLV's to update TC information in hw_info structure for
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static enum _ecore_status_t
ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
int count, u8 dcbx_version)
{
enum dcbx_protocol_type type;
bool enable, ieee, eth_tlv;
u8 tc, priority_map;
bool enable, ieee;
u16 protocol_id;
u8 priority;
enum _ecore_status_t rc = ECORE_SUCCESS;
@ -333,6 +348,7 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
count, pri_tc_tbl, dcbx_version);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
eth_tlv = false;
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = GET_MFW_FIELD(p_tbl[i].entry,
@ -356,13 +372,22 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
enable = !(type == DCBX_PROTOCOL_ETH);
if (type == DCBX_PROTOCOL_ETH) {
enable = false;
eth_tlv = true;
} else
enable = true;
ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
enable, priority, tc, type);
}
}
/* If Eth TLV is not detected, use UFP TC as default TC */
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC,
&p_hwfn->p_dev->mf_bits) && !eth_tlv)
p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
/* Update ramrod protocol data and hw_info fields
* with default info when corresponding APP TLV's are not detected.
* The enabled field has a different logic for ethernet as only for
@ -375,8 +400,8 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
if (p_data->arr[type].update)
continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
/* if no app tlv was present, don't override in FW */
ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false,
priority, tc, type);
}
@ -387,7 +412,7 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static enum _ecore_status_t
ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct dcbx_app_priority_feature *p_app;
struct dcbx_app_priority_entry *p_tbl;
@ -411,7 +436,7 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
p_info = &p_hwfn->hw_info;
num_entries = GET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
rc = ecore_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc != ECORE_SUCCESS)
return rc;
@ -448,6 +473,12 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
p_data->addr, p_data->size);
prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
} else if (type == ECORE_DCBX_LLDP_TLVS) {
ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_tlvs,
p_data->addr, p_data->size);
prefix_seq_num = p_data->lldp_tlvs->prefix_seq_num;
suffix_seq_num = p_data->lldp_tlvs->suffix_seq_num;
} else {
ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib,
p_data->addr, p_data->size);
@ -527,7 +558,7 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
p_params->app_error = GET_MFW_FIELD(p_app->flags, DCBX_APP_ERROR);
p_params->num_app_entries = GET_MFW_FIELD(p_app->flags,
DCBX_APP_NUM_ENTRIES);
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
for (i = 0; i < p_params->num_app_entries; i++) {
entry = &p_params->app_entry[i];
if (ieee) {
u8 sf_ieee;
@ -685,7 +716,32 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
params->remote.valid = true;
}
static enum _ecore_status_t
static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_dscp_params *p_dscp;
struct dcb_dscp_map *p_dscp_map;
int i, j, entry;
u32 pri_map;
p_dscp = &params->dscp;
p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE);
/* MFW encodes 64 dscp entries into 8 element array of u32 entries,
* where each entry holds the 4bit priority map for 8 dscp entries.
*/
for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
pri_map = p_dscp_map->dscp_pri_map[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
entry, pri_map);
for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
(j * 4)) & 0xf;
}
}
static void
ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
@ -708,7 +764,7 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
p_operational->enabled = enabled;
p_operational->valid = false;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
return ECORE_INVAL;
return;
}
p_feat = &p_hwfn->p_dcbx_info->operational.features;
@ -741,33 +797,6 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
p_operational->err = err;
p_operational->enabled = enabled;
p_operational->valid = true;
return ECORE_SUCCESS;
}
static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_dscp_params *p_dscp;
struct dcb_dscp_map *p_dscp_map;
int i, j, entry;
u32 pri_map;
p_dscp = &params->dscp;
p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE);
/* MFW encodes 64 dscp entries into 8 element array of u32 entries,
* where each entry holds the 4bit priority map for 8 dscp entries.
*/
for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
entry, pri_map);
for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
(j * 4)) & 0xf;
}
}
static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
@ -779,9 +808,9 @@ static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(params->lldp_local.local_chassis_id,
p_local->local_chassis_id,
OSAL_ARRAY_SIZE(p_local->local_chassis_id));
sizeof(params->lldp_local.local_chassis_id));
OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id,
OSAL_ARRAY_SIZE(p_local->local_port_id));
sizeof(params->lldp_local.local_port_id));
}
static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
@ -793,9 +822,9 @@ static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(params->lldp_remote.peer_chassis_id,
p_remote->peer_chassis_id,
OSAL_ARRAY_SIZE(p_remote->peer_chassis_id));
sizeof(params->lldp_remote.peer_chassis_id));
OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
OSAL_ARRAY_SIZE(p_remote->peer_port_id));
sizeof(params->lldp_remote.peer_port_id));
}
static enum _ecore_status_t
@ -975,7 +1004,7 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
rc = ecore_dcbx_process_mib_info(p_hwfn);
rc = ecore_dcbx_process_mib_info(p_hwfn, p_ptt);
if (!rc) {
/* reconfigure tcs of QM queues according
* to negotiation results
@ -1001,10 +1030,18 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_dcbx_results *p_data;
u16 val;
/* Update the DSCP to TC mapping bit if required */
/* Update the DSCP to TC mapping enable bit if required */
if (p_hwfn->p_dcbx_info->dscp_nig_update) {
ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE,
0x1);
u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
u32 addr = NIG_REG_DSCP_TO_TC_MAP_ENABLE;
rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, addr, val);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Failed to update the DSCP to TC mapping enable bit\n");
return rc;
}
p_hwfn->p_dcbx_info->dscp_nig_update = false;
}
@ -1026,17 +1063,19 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
{
#ifndef __EXTRACT__LINUX__
OSAL_BUILD_BUG_ON(ECORE_LLDP_CHASSIS_ID_STAT_LEN !=
LLDP_CHASSIS_ID_STAT_LEN);
OSAL_BUILD_BUG_ON(ECORE_LLDP_PORT_ID_STAT_LEN !=
LLDP_PORT_ID_STAT_LEN);
OSAL_BUILD_BUG_ON(ECORE_DCBX_MAX_APP_PROTOCOL !=
DCBX_MAX_APP_PROTOCOL);
#endif
p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_hwfn->p_dcbx_info));
if (!p_hwfn->p_dcbx_info) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_dcbx_info'");
return ECORE_NOMEM;
}
@ -1062,6 +1101,7 @@ static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
p_data->dcb_tc = p_src->arr[type].tc;
p_data->dscp_enable_flag = p_src->arr[type].dscp_enable;
p_data->dscp_val = p_src->arr[type].dscp_val;
p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
}
/* Set pf update ramrod command params */
@ -1071,8 +1111,6 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct protocol_dcb_data *p_dcb_data;
u8 update_flag;
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
p_dest->update_fcoe_dcb_data_mode = update_flag;
@ -1115,16 +1153,15 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
rc = ECORE_TIMEOUT;
DP_ERR(p_hwfn, "rc = %d\n", rc);
return rc;
}
if (!p_ptt)
return ECORE_TIMEOUT;
rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
if (rc != ECORE_SUCCESS)
goto out;
ecore_dcbx_get_dscp_params(p_hwfn, p_get);
rc = ecore_dcbx_get_params(p_hwfn, p_get, type);
out:
@ -1236,7 +1273,7 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
p_app->flags |= (u32)p_params->num_app_entries <<
DCBX_APP_NUM_ENTRIES_OFFSET;
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
for (i = 0; i < p_params->num_app_entries; i++) {
entry = &p_app->app_pri_tbl[i].entry;
*entry = 0;
if (ieee) {
@ -1280,14 +1317,14 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
*entry |= ((u32)p_params->app_entry[i].proto_id <<
DCBX_APP_PROTOCOL_ID_OFFSET);
*entry &= ~DCBX_APP_PRI_MAP_MASK;
*entry |= ((u32)(p_params->app_entry[i].prio) <<
*entry |= ((u32)(1 << p_params->app_entry[i].prio) <<
DCBX_APP_PRI_MAP_OFFSET);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
}
static enum _ecore_status_t
static void
ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
struct dcbx_local_params *local_admin,
struct ecore_dcbx_set *params)
@ -1305,6 +1342,9 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
} else
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx version = %d\n",
local_admin->config);
if (params->override_flags & ECORE_DCBX_OVERRIDE_PFC_CFG)
ecore_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
&params->config.params);
@ -1316,8 +1356,6 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG)
ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
&params->config.params, ieee);
return ECORE_SUCCESS;
}
static enum _ecore_status_t
@ -1341,12 +1379,18 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
val |= (((u32)p_params->dscp.dscp_pri_map[entry]) <<
(j * 4));
p_dscp_map->dscp_pri_map[i] = OSAL_CPU_TO_BE32(val);
p_dscp_map->dscp_pri_map[i] = val;
}
p_hwfn->p_dcbx_info->dscp_nig_update = true;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"pri_map[] = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
p_dscp_map->dscp_pri_map[0], p_dscp_map->dscp_pri_map[1],
p_dscp_map->dscp_pri_map[2], p_dscp_map->dscp_pri_map[3],
p_dscp_map->dscp_pri_map[4], p_dscp_map->dscp_pri_map[5],
p_dscp_map->dscp_pri_map[6], p_dscp_map->dscp_pri_map[7]);
return ECORE_SUCCESS;
}
@ -1362,15 +1406,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
u32 resp = 0, param = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
if (!hw_commit) {
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
sizeof(p_hwfn->p_dcbx_info->set));
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
sizeof(p_hwfn->p_dcbx_info->set));
if (!hw_commit)
return ECORE_SUCCESS;
}
/* clear set-parmas cache */
OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
sizeof(struct ecore_dcbx_set));
OSAL_MEMSET(&local_admin, 0, sizeof(local_admin));
ecore_dcbx_set_local_params(p_hwfn, &local_admin, params);
@ -1406,7 +1445,7 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_set *params)
{
struct ecore_dcbx_get *dcbx_info;
int rc;
enum _ecore_status_t rc;
if (p_hwfn->p_dcbx_info->set.config.valid) {
OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
@ -1416,10 +1455,8 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*dcbx_info));
if (!dcbx_info) {
DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
if (!dcbx_info)
return ECORE_NOMEM;
}
OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
@ -1439,9 +1476,12 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.dscp,
&p_hwfn->p_dcbx_info->get.dscp,
sizeof(struct ecore_dcbx_dscp_params));
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
&dcbx_info->operational.params,
sizeof(struct ecore_dcbx_admin_params));
sizeof(p_hwfn->p_dcbx_info->set.config.params));
p_hwfn->p_dcbx_info->set.config.valid = true;
OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
@ -1451,3 +1491,330 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_lldp_agent agent,
u8 tlv_type)
{
u32 mb_param = 0, mcp_resp = 0, mcp_param = 0, val = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
switch (agent) {
case ECORE_LLDP_NEAREST_BRIDGE:
val = LLDP_NEAREST_BRIDGE;
break;
case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
val = LLDP_NEAREST_NON_TPMR_BRIDGE;
break;
case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
val = LLDP_NEAREST_CUSTOMER_BRIDGE;
break;
default:
DP_ERR(p_hwfn, "Invalid agent type %d\n", agent);
return ECORE_INVAL;
}
SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val);
SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_TLV_RX_TYPE, tlv_type);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX,
mb_param, &mcp_resp, &mcp_param);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to register TLV\n");
return rc;
}
enum _ecore_status_t
ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct ecore_dcbx_mib_meta_data data;
enum _ecore_status_t rc = ECORE_SUCCESS;
struct lldp_received_tlvs_s tlvs;
int i;
for (i = 0; i < LLDP_MAX_LLDP_AGENTS; i++) {
OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, lldp_received_tlvs[i]);
data.lldp_tlvs = &tlvs;
data.size = sizeof(tlvs);
rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data,
ECORE_DCBX_LLDP_TLVS);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false, "Failed to read lldp TLVs\n");
return rc;
}
if (!tlvs.length)
continue;
for (i = 0; i < MAX_TLV_BUFFER; i++)
tlvs.tlvs_buffer[i] =
OSAL_CPU_TO_BE32(tlvs.tlvs_buffer[i]);
OSAL_LLDP_RX_TLVS(p_hwfn, tlvs.tlvs_buffer, tlvs.length);
}
return rc;
}
enum _ecore_status_t
ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_config_params *p_params)
{
struct lldp_config_params_s lldp_params;
u32 addr, val;
int i;
switch (p_params->agent) {
case ECORE_LLDP_NEAREST_BRIDGE:
val = LLDP_NEAREST_BRIDGE;
break;
case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
val = LLDP_NEAREST_NON_TPMR_BRIDGE;
break;
case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
val = LLDP_NEAREST_CUSTOMER_BRIDGE;
break;
default:
DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
return ECORE_INVAL;
}
addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, lldp_config_params[val]);
ecore_memcpy_from(p_hwfn, p_ptt, &lldp_params, addr,
sizeof(lldp_params));
p_params->tx_interval = GET_MFW_FIELD(lldp_params.config,
LLDP_CONFIG_TX_INTERVAL);
p_params->tx_hold = GET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD);
p_params->tx_credit = GET_MFW_FIELD(lldp_params.config,
LLDP_CONFIG_MAX_CREDIT);
p_params->rx_enable = GET_MFW_FIELD(lldp_params.config,
LLDP_CONFIG_ENABLE_RX);
p_params->tx_enable = GET_MFW_FIELD(lldp_params.config,
LLDP_CONFIG_ENABLE_TX);
OSAL_MEMCPY(p_params->chassis_id_tlv, lldp_params.local_chassis_id,
sizeof(p_params->chassis_id_tlv));
for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++)
p_params->chassis_id_tlv[i] =
OSAL_BE32_TO_CPU(p_params->chassis_id_tlv[i]);
OSAL_MEMCPY(p_params->port_id_tlv, lldp_params.local_port_id,
sizeof(p_params->port_id_tlv));
for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++)
p_params->port_id_tlv[i] =
OSAL_BE32_TO_CPU(p_params->port_id_tlv[i]);
return ECORE_SUCCESS;
}
enum _ecore_status_t
ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_config_params *p_params)
{
u32 mb_param = 0, mcp_resp = 0, mcp_param = 0;
struct lldp_config_params_s lldp_params;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 addr, val;
int i;
switch (p_params->agent) {
case ECORE_LLDP_NEAREST_BRIDGE:
val = LLDP_NEAREST_BRIDGE;
break;
case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
val = LLDP_NEAREST_NON_TPMR_BRIDGE;
break;
case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
val = LLDP_NEAREST_CUSTOMER_BRIDGE;
break;
default:
DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
return ECORE_INVAL;
}
SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val);
addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, lldp_config_params[val]);
OSAL_MEMSET(&lldp_params, 0, sizeof(lldp_params));
SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_TX_INTERVAL,
p_params->tx_interval);
SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD, p_params->tx_hold);
SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_MAX_CREDIT,
p_params->tx_credit);
SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_RX,
!!p_params->rx_enable);
SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_TX,
!!p_params->tx_enable);
for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++)
p_params->chassis_id_tlv[i] =
OSAL_CPU_TO_BE32(p_params->chassis_id_tlv[i]);
OSAL_MEMCPY(lldp_params.local_chassis_id, p_params->chassis_id_tlv,
sizeof(lldp_params.local_chassis_id));
for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++)
p_params->port_id_tlv[i] =
OSAL_CPU_TO_BE32(p_params->port_id_tlv[i]);
OSAL_MEMCPY(lldp_params.local_port_id, p_params->port_id_tlv,
sizeof(lldp_params.local_port_id));
ecore_memcpy_to(p_hwfn, p_ptt, addr, &lldp_params, sizeof(lldp_params));
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP,
mb_param, &mcp_resp, &mcp_param);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc);
return rc;
}
enum _ecore_status_t
ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_sys_tlvs *p_params)
{
u32 mb_param = 0, mcp_resp = 0, mcp_param = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
struct lldp_system_tlvs_buffer_s lld_tlv_buf;
u32 addr, *p_val;
u8 len;
int i;
p_val = (u32 *)p_params->buf;
for (i = 0; i < ECORE_LLDP_SYS_TLV_SIZE / 4; i++)
p_val[i] = OSAL_CPU_TO_BE32(p_val[i]);
OSAL_MEMSET(&lld_tlv_buf, 0, sizeof(lld_tlv_buf));
SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_VALID, 1);
SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_MANDATORY,
!!p_params->discard_mandatory_tlv);
SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_LENGTH,
p_params->buf_size);
len = ECORE_LLDP_SYS_TLV_SIZE / 2;
OSAL_MEMCPY(lld_tlv_buf.data, p_params->buf, len);
addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, system_lldp_tlvs_buf);
ecore_memcpy_to(p_hwfn, p_ptt, addr, &lld_tlv_buf, sizeof(lld_tlv_buf));
if (p_params->buf_size > len) {
addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, system_lldp_tlvs_buf2);
ecore_memcpy_to(p_hwfn, p_ptt, addr, &p_params->buf[len],
ECORE_LLDP_SYS_TLV_SIZE / 2);
}
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP,
mb_param, &mcp_resp, &mcp_param);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc);
return rc;
}
enum _ecore_status_t
ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn,
u8 dscp_index, u8 *p_dscp_pri)
{
struct ecore_dcbx_get *p_dcbx_info;
enum _ecore_status_t rc;
if (dscp_index >= ECORE_DCBX_DSCP_SIZE) {
DP_ERR(p_hwfn, "Invalid dscp index %d\n", dscp_index);
return ECORE_INVAL;
}
p_dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_dcbx_info));
if (!p_dcbx_info)
return ECORE_NOMEM;
OSAL_MEMSET(p_dcbx_info, 0, sizeof(*p_dcbx_info));
rc = ecore_dcbx_query_params(p_hwfn, p_dcbx_info,
ECORE_DCBX_OPERATIONAL_MIB);
if (rc) {
OSAL_FREE(p_hwfn->p_dev, p_dcbx_info);
return rc;
}
*p_dscp_pri = p_dcbx_info->dscp.dscp_pri_map[dscp_index];
OSAL_FREE(p_hwfn->p_dev, p_dcbx_info);
return ECORE_SUCCESS;
}
enum _ecore_status_t
ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 dscp_index, u8 pri_val)
{
struct ecore_dcbx_set dcbx_set;
enum _ecore_status_t rc;
if (dscp_index >= ECORE_DCBX_DSCP_SIZE ||
pri_val >= ECORE_MAX_PFC_PRIORITIES) {
DP_ERR(p_hwfn, "Invalid dscp params: index = %d pri = %d\n",
dscp_index, pri_val);
return ECORE_INVAL;
}
OSAL_MEMSET(&dcbx_set, 0, sizeof(dcbx_set));
rc = ecore_dcbx_get_config_params(p_hwfn, &dcbx_set);
if (rc)
return rc;
dcbx_set.override_flags = ECORE_DCBX_OVERRIDE_DSCP_CFG;
dcbx_set.dscp.dscp_pri_map[dscp_index] = pri_val;
return ecore_dcbx_config_params(p_hwfn, p_ptt, &dcbx_set, 1);
}
enum _ecore_status_t
ecore_lldp_get_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_stats *p_params)
{
u32 mcp_resp = 0, mcp_param = 0, addr, val;
struct lldp_stats_stc lldp_stats;
enum _ecore_status_t rc;
switch (p_params->agent) {
case ECORE_LLDP_NEAREST_BRIDGE:
val = LLDP_NEAREST_BRIDGE;
break;
case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
val = LLDP_NEAREST_NON_TPMR_BRIDGE;
break;
case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
val = LLDP_NEAREST_CUSTOMER_BRIDGE;
break;
default:
DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
return ECORE_INVAL;
}
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_LLDP_STATS,
val << DRV_MB_PARAM_LLDP_STATS_AGENT_OFFSET,
&mcp_resp, &mcp_param);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "GET_LLDP_STATS failed, error = %d\n", rc);
return rc;
}
addr = p_hwfn->mcp_info->drv_mb_addr +
OFFSETOF(struct public_drv_mb, union_data);
ecore_memcpy_from(p_hwfn, p_ptt, &lldp_stats, addr, sizeof(lldp_stats));
p_params->tx_frames = lldp_stats.tx_frames_total;
p_params->rx_frames = lldp_stats.rx_frames_total;
p_params->rx_discards = lldp_stats.rx_frames_discarded;
p_params->rx_age_outs = lldp_stats.rx_age_outs;
return ECORE_SUCCESS;
}

View file

@ -39,6 +39,8 @@
#include "ecore_hsi_common.h"
#include "ecore_dcbx_api.h"
#define ECORE_DCBX_DSCP_DISABLED 0XFF
struct ecore_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@ -57,6 +59,7 @@ struct ecore_dcbx_info {
struct ecore_dcbx_mib_meta_data {
struct lldp_config_params_s *lldp_local;
struct lldp_status_params_s *lldp_remote;
struct lldp_received_tlvs_s *lldp_tlvs;
struct dcbx_local_params *local_admin;
struct dcb_dscp_map *dscp_map;
struct dcbx_mib *mib;
@ -66,14 +69,18 @@ struct ecore_dcbx_mib_meta_data {
/* ECORE local interface routines */
enum _ecore_status_t
ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
enum ecore_mib_read_type);
ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum ecore_mib_read_type type);
enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
struct ecore_ptt *);
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_info_free(struct ecore_hwfn *);
void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
/* Returns TOS value for a given priority */
u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri);
enum _ecore_status_t
ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
#endif /* __ECORE_DCBX_H__ */

View file

@ -40,7 +40,8 @@ enum ecore_mib_read_type {
ECORE_DCBX_REMOTE_MIB,
ECORE_DCBX_LOCAL_MIB,
ECORE_DCBX_REMOTE_LLDP_MIB,
ECORE_DCBX_LOCAL_LLDP_MIB
ECORE_DCBX_LOCAL_LLDP_MIB,
ECORE_DCBX_LLDP_TLVS
};
struct ecore_dcbx_app_data {
@ -50,8 +51,10 @@ struct ecore_dcbx_app_data {
u8 tc; /* Traffic Class */
bool dscp_enable; /* DSCP enabled */
u8 dscp_val; /* DSCP value */
bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
};
#ifndef __EXTRACT__LINUX__IF__
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
DCBX_PROTOCOL_FCOE,
@ -122,7 +125,6 @@ struct ecore_dcbx_params {
bool ets_willing;
bool ets_enabled;
bool ets_cbs;
bool valid; /* Indicate validity of params */
u8 ets_pri_tc_tbl[ECORE_MAX_PFC_PRIORITIES];
u8 ets_tc_bw_tbl[ECORE_MAX_PFC_PRIORITIES];
u8 ets_tc_tsa_tbl[ECORE_MAX_PFC_PRIORITIES];
@ -164,6 +166,7 @@ struct ecore_dcbx_get {
struct ecore_dcbx_admin_params local;
struct ecore_dcbx_dscp_params dscp;
};
#endif
#define ECORE_DCBX_VERSION_DISABLED 0
#define ECORE_DCBX_VERSION_IEEE 1
@ -195,18 +198,84 @@ struct ecore_dcbx_app_metadata {
enum ecore_pci_personality personality;
};
enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
struct ecore_dcbx_get *,
enum ecore_mib_read_type);
enum ecore_lldp_agent {
ECORE_LLDP_NEAREST_BRIDGE = 0,
ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE,
ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE,
ECORE_LLDP_MAX_AGENTS
};
enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *,
struct ecore_dcbx_set *);
struct ecore_lldp_config_params {
enum ecore_lldp_agent agent;
u8 tx_interval;
u8 tx_hold;
u8 tx_credit;
bool rx_enable;
bool tx_enable;
u32 chassis_id_tlv[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
u32 port_id_tlv[ECORE_LLDP_PORT_ID_STAT_LEN];
};
enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
struct ecore_ptt *,
struct ecore_dcbx_set *,
bool);
#define ECORE_LLDP_SYS_TLV_SIZE 256
struct ecore_lldp_sys_tlvs {
bool discard_mandatory_tlv;
u8 buf[ECORE_LLDP_SYS_TLV_SIZE];
u16 buf_size;
};
struct ecore_lldp_stats {
enum ecore_lldp_agent agent;
u32 tx_frames;
u32 rx_frames;
u32 rx_discards;
u32 rx_age_outs;
};
enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *p_get,
enum ecore_mib_read_type type);
enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_set
*params);
enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_set *params,
bool hw_commit);
enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_lldp_agent agent,
u8 tlv_type);
enum _ecore_status_t
ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_config_params *p_params);
enum _ecore_status_t
ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_config_params *p_params);
enum _ecore_status_t
ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_sys_tlvs *p_params);
/* Returns priority value for a given dscp index */
enum _ecore_status_t
ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn,
u8 dscp_index, u8 *p_dscp_pri);
/* Sets priority value for a given dscp index */
enum _ecore_status_t
ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 dscp_index, u8 pri_val);
enum _ecore_status_t
ecore_lldp_get_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_stats *p_params);
#ifndef __EXTRACT__LINUX__C__
static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
@ -215,5 +284,6 @@ static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
{DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
};
#endif
#endif /* __ECORE_DCBX_API_H__ */

File diff suppressed because it is too large Load diff

View file

@ -35,6 +35,8 @@
#include "ecore_chain.h"
#include "ecore_int_api.h"
#define ECORE_DEFAULT_ILT_PAGE_SIZE 4
struct ecore_wake_info {
u32 wk_info;
u32 wk_details;
@ -61,7 +63,7 @@ void ecore_init_dp(struct ecore_dev *p_dev,
*
* @param p_dev
*/
void ecore_init_struct(struct ecore_dev *p_dev);
enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_free -
@ -86,6 +88,12 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
*/
void ecore_resc_setup(struct ecore_dev *p_dev);
enum ecore_mfw_timeout_fallback {
ECORE_TO_FALLBACK_TO_NONE,
ECORE_TO_FALLBACK_TO_DEFAULT,
ECORE_TO_FALLBACK_FAIL_LOAD,
};
enum ecore_override_force_load {
ECORE_OVERRIDE_FORCE_LOAD_NONE,
ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
@ -108,6 +116,11 @@ struct ecore_drv_load_params {
#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
/* Action to take in case the MFW doesn't support timeout values other
* then default and none.
*/
enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
@ -127,11 +140,17 @@ struct ecore_hw_init_params {
/* NPAR tx switching to be used for vports configured for tx-switching */
bool allow_npar_tx_switch;
/* PCI relax ordering to be configured by MFW or ecore client */
enum ecore_pci_rlx_odr pci_rlx_odr_mode;
/* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data;
/* Driver load parameters */
struct ecore_drv_load_params *p_drv_load_params;
/* Avoid engine affinity for RoCE/storage in case of CMT mode */
bool avoid_eng_affin;
};
/**
@ -174,6 +193,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
*/
enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
#ifndef LINUX_REMOVE
/**
* @brief ecore_hw_hibernate_prepare -should be called when
* the system is going into the hibernate state
@ -192,6 +212,7 @@ void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev);
*/
void ecore_hw_hibernate_resume(struct ecore_dev *p_dev);
#endif
/**
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
@ -329,7 +350,18 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#ifndef __EXTRACT__LINUX__
/**
* @brief ecore_get_dev_name - get device name, e.g., "BB B0"
*
* @param p_hwfn
* @param name - this is where the name will be written to
* @param max_chars - maximum chars that can be written to name including '\0'
*/
void ecore_get_dev_name(struct ecore_dev *p_dev,
u8 *name,
u8 max_chars);
#ifndef __EXTRACT__LINUX__IF__
struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
@ -392,6 +424,7 @@ struct ecore_eth_stats_common {
u64 tx_mac_mc_packets;
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
u64 link_change_count;
};
struct ecore_eth_stats_bb {
@ -439,11 +472,17 @@ enum ecore_dmae_address_type_t {
#define ECORE_DMAE_FLAG_VF_SRC 0x00000002
#define ECORE_DMAE_FLAG_VF_DST 0x00000004
#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
#define ECORE_DMAE_FLAG_PORT 0x00000010
#define ECORE_DMAE_FLAG_PF_SRC 0x00000020
#define ECORE_DMAE_FLAG_PF_DST 0x00000040
struct ecore_dmae_params {
u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
u8 port_id;
u8 src_pfid;
u8 dst_pfid;
};
/**
@ -455,7 +494,9 @@ struct ecore_dmae_params {
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
* @param flags (one of the flags defined above)
* @param p_params (default parameters will be used in case of OSAL_NULL)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
@ -463,7 +504,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
u32 flags);
struct ecore_dmae_params *p_params);
/**
* @brief ecore_dmae_grc2host - Read data from dmae data offset
@ -473,7 +514,9 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
* @param flags - one of the flags defined above
* @param p_params (default parameters will be used in case of OSAL_NULL)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
@ -481,7 +524,7 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
u32 grc_addr,
dma_addr_t dest_addr,
u32 size_in_dwords,
u32 flags);
struct ecore_dmae_params *p_params);
/**
* @brief ecore_dmae_host2host - copy data from to source address
@ -492,7 +535,9 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
* @param p_params (default parameters will be used in case of OSAL_NULL)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
@ -573,28 +618,79 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
u8 *dst_id);
/**
* @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
* @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter
* banks that are allocated to the PF.
*
* @param p_hwfn
* @param p_ptt
* @param p_filter - MAC to add
* @param p_dev
*
* @return u8 - Number of LLH filter banks
*/
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 *p_filter);
u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev);
enum ecore_eng {
ECORE_ENG0,
ECORE_ENG1,
ECORE_BOTH_ENG,
};
/**
* @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
* @brief ecore_llh_get_l2_affinity_hint - Return the hint for the L2 affinity
*
* @param p_hwfn
* @param p_ptt
* @param p_filter - MAC to remove
* @param p_dev
*
* @return enum ecore_eng - L2 affintiy hint
*/
void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 *p_filter);
enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev);
enum ecore_llh_port_filter_type_t {
/**
* @brief ecore_llh_set_ppfid_affinity - Set the engine affinity for the given
* LLH filter bank.
*
* @param p_dev
* @param ppfid - relative within the allocated ppfids ('0' is the default one).
* @param eng
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
u8 ppfid, enum ecore_eng eng);
/**
* @brief ecore_llh_set_roce_affinity - Set the RoCE engine affinity
*
* @param p_dev
* @param eng
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
enum ecore_eng eng);
/**
* @brief ecore_llh_add_mac_filter - Add a LLH MAC filter into the given filter
* bank.
*
* @param p_dev
* @param ppfid - relative within the allocated ppfids ('0' is the default one).
* @param mac_addr - MAC to add
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
u8 mac_addr[ETH_ALEN]);
/**
* @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given
* filter bank.
*
* @param p_dev
* @param ppfid - relative within the allocated ppfids ('0' is the default one).
* @param mac_addr - MAC to remove
*/
void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
u8 mac_addr[ETH_ALEN]);
enum ecore_llh_prot_filter_type_t {
ECORE_LLH_FILTER_ETHERTYPE,
ECORE_LLH_FILTER_TCP_SRC_PORT,
ECORE_LLH_FILTER_TCP_DEST_PORT,
@ -605,45 +701,52 @@ enum ecore_llh_port_filter_type_t {
};
/**
* @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
* @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the
* given filter bank.
*
* @param p_hwfn
* @param p_ptt
* @param p_dev
* @param ppfid - relative within the allocated ppfids ('0' is the default one).
* @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
* @param type - type of filters and comparing
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 source_port_or_eth_type,
u16 dest_port,
enum ecore_llh_port_filter_type_t type);
ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
enum ecore_llh_prot_filter_type_t type,
u16 source_port_or_eth_type, u16 dest_port);
/**
* @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
* @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from
* the given filter bank.
*
* @param p_hwfn
* @param p_ptt
* @param p_dev
* @param ppfid - relative within the allocated ppfids ('0' is the default one).
* @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
* @param type - type of filters and comparing
*/
void
ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 source_port_or_eth_type,
u16 dest_port,
enum ecore_llh_port_filter_type_t type);
void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
enum ecore_llh_prot_filter_type_t type,
u16 source_port_or_eth_type,
u16 dest_port);
/**
* @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
* @brief ecore_llh_clear_ppfid_filters - Remove all LLH filters from the given
* filter bank.
*
* @param p_hwfn
* @param p_ptt
* @param p_dev
* @param ppfid - relative within the allocated ppfids ('0' is the default one).
*/
void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid);
/**
* @brief ecore_llh_clear_all_filters - Remove all LLH filters
*
* @param p_dev
*/
void ecore_llh_clear_all_filters(struct ecore_dev *p_dev);
/**
* @brief ecore_llh_set_function_as_default - set function as defult per port
@ -669,6 +772,20 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id,
bool is_vf);
/**
* @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
*
* @param p_hwfn
* @param p_coal - store coalesce value read from the hardware.
* @param p_handle
*
* @return enum _ecore_status_t
**/
enum _ecore_status_t
ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
void *handle);
/**
* @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
@ -700,20 +817,19 @@ ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_change_pci_hwfn - Enable or disable PCI BUS MASTER
* @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
*
* @param p_hwfn
* @param p_ptt
* @param enable - true/false
* @param b_enable - true/false
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 enable);
enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool b_enable);
#ifndef __EXTRACT__LINUX__
#ifndef __EXTRACT__LINUX__IF__
enum ecore_db_rec_width {
DB_REC_WIDTH_32B,
DB_REC_WIDTH_64B,
@ -753,4 +869,30 @@ enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
void OSAL_IOMEM *db_addr,
void *db_data);
#ifndef __EXTRACT__LINUX__THROW__
static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
{
return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
}
#endif
/**
* @brief ecore_set_dev_access_enable - Enable or disable access to the device
*
* @param p_hwfn
* @param b_enable - true/false
*/
void ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable);
/**
* @brief ecore_set_ilt_page_size - Set ILT page size
*
* @param p_dev
* @param ilt_size
*
* @return enum _ecore_status_t
*/
void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_size);
#endif

View file

@ -42,12 +42,24 @@ struct ecore_fcoe_info {
osal_list_t free_list;
};
#ifdef CONFIG_ECORE_FCOE
enum _ecore_status_t ecore_fcoe_alloc(struct ecore_hwfn *p_hwfn);
void ecore_fcoe_setup(struct ecore_hwfn *p_hwfn);
void ecore_fcoe_free(struct ecore_hwfn *p_hwfn);
#else
static inline enum _ecore_status_t ecore_fcoe_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
{
return ECORE_INVAL;
}
static inline void ecore_fcoe_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
static inline void ecore_fcoe_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
#endif
#ifndef __EXTRACT__LINUX__THROW__
enum _ecore_status_t
ecore_sp_fcoe_conn_offload(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_conn *p_conn,
@ -59,6 +71,7 @@ ecore_sp_fcoe_conn_destroy(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_conn *p_conn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
#endif
#endif /*__ECORE_FCOE_H__*/

View file

@ -33,6 +33,7 @@
#include "ecore_sp_api.h"
#ifndef __EXTRACT__LINUX__C__
struct ecore_fcoe_conn {
osal_list_entry_t list_entry;
bool free_on_delete;
@ -75,8 +76,10 @@ struct ecore_fcoe_conn {
struct fc_addr_nw d_id;
u8 flags;
u8 def_q_idx;
};
};
#endif
#ifndef __EXTRACT__LINUX__IF__
struct ecore_fcoe_stats {
u64 fcoe_rx_byte_cnt;
u64 fcoe_rx_data_pkt_cnt;
@ -93,6 +96,7 @@ struct ecore_fcoe_stats {
u64 fcoe_tx_xfer_pkt_cnt;
u64 fcoe_tx_other_pkt_cnt;
};
#endif
enum _ecore_status_t
ecore_fcoe_acquire_connection(struct ecore_hwfn *p_hwfn,

View file

@ -94,6 +94,7 @@ enum core_event_opcode
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
CORE_EVENT_TX_QUEUE_UPDATE,
MAX_CORE_EVENT_OPCODE
};
@ -175,6 +176,7 @@ enum core_ramrod_cmd_id
CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
MAX_CORE_RAMROD_CMD_ID
};
@ -263,7 +265,7 @@ enum core_rx_cqe_type
*/
struct core_rx_fast_path_cqe
{
u8 type /* CQE type */;
u8 type /* CQE type (use enum core_rx_cqe_type) */;
u8 placement_offset /* Offset (in bytes) of the packet from start of the buffer */;
struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
__le16 packet_length /* Total packet length (from the parser) */;
@ -279,7 +281,7 @@ struct core_rx_fast_path_cqe
*/
struct core_rx_gsi_offload_cqe
{
u8 type /* CQE type */;
u8 type /* CQE type (use enum core_rx_cqe_type) */;
u8 data_length_error /* set if gsi data is bigger than buff */;
struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
__le16 data_length /* Total packet length (from the parser) */;
@ -287,7 +289,8 @@ struct core_rx_gsi_offload_cqe
__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
__le16 qp_id /* These are the lower 16 bit of QP id in RoCE BTH header */;
__le32 gid_dst[4] /* Gid destination address */;
__le32 src_qp /* Source QP from DETH header */;
__le32 reserved[3];
};
/*
@ -295,7 +298,7 @@ struct core_rx_gsi_offload_cqe
*/
struct core_rx_slow_path_cqe
{
u8 type /* CQE type */;
u8 type /* CQE type (use enum core_rx_cqe_type) */;
u8 ramrod_cmd_id;
__le16 echo;
struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
@ -330,14 +333,15 @@ struct core_rx_start_ramrod_data
u8 complete_event_flg /* post completion to the event ring if set */;
u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
u8 inner_vlan_removal_en /* if set, 802.1q tags will be removed and copied to CQE */;
u8 inner_vlan_stripping_en /* if set, 802.1q tags will be removed and copied to CQE */;
u8 report_outer_vlan /* if set and inner vlan does not exist, the outer vlan will copied to CQE as inner vlan. should be used in MF_OVLAN mode only. */;
u8 queue_id /* Light L2 RX Queue ID */;
u8 main_func_queue /* Is this the main queue for the PF */;
u8 mf_si_bcast_accept_all /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */;
u8 mf_si_mcast_accept_all /* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */;
struct core_rx_action_on_error action_on_error /* Specifies how ll2 should deal with packets errors: packet_too_big and no_buff */;
u8 gsi_offload_flag /* set when in GSI offload mode on ROCE connection */;
u8 reserved[7];
u8 reserved[6];
};
@ -360,30 +364,32 @@ struct core_rx_stop_ramrod_data
struct core_tx_bd_data
{
__le16 as_bitfield;
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 /* Do not allow additional VLAN manipulations on this packet (DCB) */
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 /* Insert VLAN into packet */
#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
#define CORE_TX_BD_DATA_START_BD_MASK 0x1 /* This is the first BD of the packet (for debug) */
#define CORE_TX_BD_DATA_START_BD_SHIFT 2
#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 /* Calculate the IP checksum for the packet */
#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 /* Calculate the L4 checksum for the packet */
#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 /* Packet is IPv6 with extensions */
#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol: 0-TCP, 1-UDP */
#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1 /* The pseudo checksum mode to place in the L4 checksum field. Required only when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode) */
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
#define CORE_TX_BD_DATA_NBDS_MASK 0xF /* Number of BDs that make up one packet - width wide enough to present CORE_LL2_TX_MAX_BDS_PER_PACKET */
#define CORE_TX_BD_DATA_NBDS_SHIFT 8
#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 /* Use roce_flavor enum - Differentiate between Roce flavors is valid when connType is ROCE (use enum core_roce_flavor_type) */
#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 /* Calculate ip length */
#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 /* Do not allow additional VLAN manipulations on this packet (DCB) */
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 /* Insert VLAN into packet. Cannot be set for LB packets (tx_dst == CORE_TX_DEST_LB) */
#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
#define CORE_TX_BD_DATA_START_BD_MASK 0x1 /* This is the first BD of the packet (for debug) */
#define CORE_TX_BD_DATA_START_BD_SHIFT 2
#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 /* Calculate the IP checksum for the packet */
#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 /* Calculate the L4 checksum for the packet */
#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 /* Packet is IPv6 with extensions */
#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol: 0-TCP, 1-UDP */
#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1 /* The pseudo checksum mode to place in the L4 checksum field. Required only when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode) */
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
#define CORE_TX_BD_DATA_NBDS_MASK 0xF /* Number of BDs that make up one packet - width wide enough to present CORE_LL2_TX_MAX_BDS_PER_PACKET */
#define CORE_TX_BD_DATA_NBDS_SHIFT 8
#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 /* Use roce_flavor enum - Differentiate between Roce flavors is valid when connType is ROCE (use enum core_roce_flavor_type) */
#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 /* Calculate ip length */
#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1 /* disables the STAG insertion, relevant only in MF OVLAN mode. */
#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
};
/*
@ -428,7 +434,7 @@ struct core_tx_start_ramrod_data
u8 sb_index /* Status block protocol index */;
u8 stats_en /* Statistics Enable */;
u8 stats_id /* Statistics Counter ID */;
u8 conn_type /* connection type that loaded ll2 */;
u8 conn_type /* connection type that loaded ll2 (use enum protocol_type) */;
__le16 pbl_size /* Number of BD pages pointed by PBL */;
__le16 qm_pq_id /* QM PQ ID */;
u8 gsi_offload_flag /* set when in GSI offload mode on ROCE connection */;
@ -446,14 +452,26 @@ struct core_tx_stop_ramrod_data
/*
* Enum flag for what type of dcb data to update
* Ramrod data for tx queue update ramrod
*/
struct core_tx_update_ramrod_data
{
u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
u8 reserved0;
__le16 qm_pq_id /* Updated QM PQ ID */;
__le32 reserved1[1];
};
/*
* Enum flag for what type of DCB data to update
*/
enum dcb_dscp_update_mode
{
DONT_UPDATE_DCB_DSCP /* use when no change should be done to dcb data */,
UPDATE_DCB /* use to update only l2 (vlan) priority */,
UPDATE_DSCP /* use to update only l3 dscp */,
UPDATE_DCB_DSCP /* update vlan pri and dscp */,
DONT_UPDATE_DCB_DSCP /* use when no change should be done to DCB data */,
UPDATE_DCB /* use to update only L2 (vlan) priority */,
UPDATE_DSCP /* use to update only IP DSCP */,
UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
MAX_DCB_DSCP_UPDATE_MODE
};
@ -490,7 +508,7 @@ struct xstorm_core_conn_st_ctx
struct e4_xstorm_core_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 core_state /* state */;
u8 state /* state */;
u8 flags0;
#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
@ -1452,6 +1470,7 @@ struct e5_core_conn_context
struct pstorm_core_conn_st_ctx pstorm_st_context /* pstorm storm context */;
struct regpair pstorm_st_padding[2] /* padding */;
struct xstorm_core_conn_st_ctx xstorm_st_context /* xstorm storm context */;
struct regpair xstorm_st_padding[2] /* padding */;
struct e5_xstorm_core_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
struct e5_tstorm_core_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
struct e5_ustorm_core_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
@ -1562,6 +1581,66 @@ struct eth_ustorm_per_queue_stat
};
/*
* Event Ring VF-PF Channel data
*/
struct vf_pf_channel_eqe_data
{
struct regpair msg_addr /* VF-PF message address */;
};
/*
* Event Ring malicious VF data
*/
struct malicious_vf_eqe_data
{
u8 vf_id /* Malicious VF ID */;
u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
__le16 reserved[3];
};
/*
* Event Ring initial cleanup data
*/
struct initial_cleanup_eqe_data
{
u8 vf_id /* VF ID */;
u8 reserved[7];
};
/*
* Event Data Union
*/
union event_ring_data
{
u8 bytes[8] /* Byte Array */;
struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
struct iscsi_connect_done_results iscsi_conn_done_info /* Dedicated fields to iscsi connect done results */;
union rdma_eqe_data rdma_data /* Dedicated field for RDMA data */;
struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
struct initial_cleanup_eqe_data vf_init_cleanup /* VF Initial Cleanup data */;
};
/*
* Event Ring Entry
*/
struct event_ring_entry
{
u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
u8 opcode /* Event Opcode */;
__le16 reserved0 /* Reserved */;
__le16 echo /* Echo value from ramrod data on the host */;
u8 fw_return_code /* FW return code for SP ramrods */;
u8 flags;
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 /* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
union event_ring_data data;
};
/*
* Event Ring Next Page Address
*/
@ -1582,6 +1661,7 @@ union event_ring_element
/*
* Ports mode
*/
@ -1593,6 +1673,20 @@ enum fw_flow_ctrl_mode
};
/*
* GFT profile type.
*/
enum gft_profile_type
{
GFT_PROFILE_TYPE_4_TUPLE /* tunnel type, inner 4 tuple, IP type and L4 type match. */,
GFT_PROFILE_TYPE_L4_DST_PORT /* tunnel type, inner L4 destination port, IP type and L4 type match. */,
GFT_PROFILE_TYPE_IP_DST_ADDR /* tunnel type, inner IP destination address and IP type match. */,
GFT_PROFILE_TYPE_IP_SRC_ADDR /* tunnel type, inner IP source address and IP type match. */,
GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
MAX_GFT_PROFILE_TYPE
};
/*
* Major and Minor hsi Versions
*/
@ -1603,6 +1697,7 @@ struct hsi_fp_ver_struct
};
/*
* Integration Phase
*/
@ -1628,6 +1723,7 @@ enum iwarp_ll2_tx_queues
};
/*
* Malicious VF error ID
*/
@ -1654,6 +1750,7 @@ enum malicious_vf_error_id
ETH_TUNN_IPV6_EXT_NBD_ERR /* Tunneled packet with IPv6+Ext without a proper number of BDs */,
ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
ETH_PACKET_SIZE_TOO_LARGE /* packet scanned is too large (can be 9700 at most) */,
MAX_MALICIOUS_VF_ERROR_ID
};
@ -1678,6 +1775,28 @@ struct mstorm_vf_zone
};
/*
* vlan header including TPID and TCI fields
*/
struct vlan_header
{
__le16 tpid /* Tag Protocol Identifier */;
__le16 tci /* Tag Control Information */;
};
/*
* outer tag configurations
*/
struct outer_tag_config_struct
{
u8 enable_stag_pri_change /* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette Davis, UFP with Host Control mode, and UFP with DCB over base interface. else - 0. */;
u8 pri_map_valid /* If inner_to_outer_pri_map is initialize then set pri_map_valid */;
u8 reserved[2];
struct vlan_header outer_tag /* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol identifier and outer tag control information */;
u8 inner_to_outer_pri_map[8] /* Map from inner to outer priority. Set pri_map_valid when init map */;
};
/*
* personality per PF
*/
@ -1702,11 +1821,11 @@ struct pf_start_tunnel_config
{
u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set - FW will use a default port */;
u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set - FW will use a default port */;
u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
u8 tunnel_clss_l2geneve /* Rx classification scheme for l2 GENEVE tunnel. */;
u8 tunnel_clss_ipgeneve /* Rx classification scheme for ip GENEVE tunnel. */;
u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. (use enum tunnel_clss) */;
u8 tunnel_clss_l2geneve /* Rx classification scheme for l2 GENEVE tunnel. (use enum tunnel_clss) */;
u8 tunnel_clss_ipgeneve /* Rx classification scheme for ip GENEVE tunnel. (use enum tunnel_clss) */;
u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. (use enum tunnel_clss) */;
u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. (use enum tunnel_clss) */;
u8 reserved;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */;
@ -1720,7 +1839,6 @@ struct pf_start_ramrod_data
struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
struct regpair consolid_q_pbl_addr /* PBL address of consolidation queue */;
struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */;
__le32 reserved;
__le16 event_ring_sb_id /* Status block ID */;
u8 base_vf_id /* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */;
u8 num_vfs /* Amount of vfs owned by PF */;
@ -1729,30 +1847,29 @@ struct pf_start_ramrod_data
u8 path_id /* HW path ID (engine ID) */;
u8 warning_as_error /* In FW asserts, treat warning as error */;
u8 dont_log_ramrods /* If not set - throw a warning for each ramrod (for debug) */;
u8 personality /* define what type of personality is new PF */;
u8 personality /* define what type of personality is new PF (use enum personality_type) */;
__le16 log_type_mask /* Log type mask. Each bit set enables a corresponding event type logging. Event types are defined as ASSERT_LOG_TYPE_xxx */;
u8 mf_mode /* Multi function mode */;
u8 integ_phase /* Integration phase */;
u8 mf_mode /* Multi function mode (use enum mf_mode) */;
u8 integ_phase /* Integration phase (use enum integ_phase) */;
u8 allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independent function mode */;
u8 inner_to_outer_pri_map[8] /* Map from inner to outer priority. Set pri_map_valid when init map */;
u8 pri_map_valid /* If inner_to_outer_pri_map is initialize then set pri_map_valid */;
__le32 outer_tag /* In case mf_mode is MF_OVLAN, this field specifies the outer vlan (lower 16 bits) and ethType to use (higher 16 bits) */;
u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
struct outer_tag_config_struct outer_tag_config /* Outer tag configurations */;
};
/*
* Data for port update ramrod
* Per protocol DCB data
*/
struct protocol_dcb_data
{
u8 dcb_enable_flag /* dcbEnable flag value */;
u8 dscp_enable_flag /* If set use dscp value */;
u8 dcb_priority /* dcbPri flag value */;
u8 dcb_tc /* dcb TC value */;
u8 dscp_val /* dscp value to write if dscp_enable_flag is set */;
u8 reserved0;
u8 dcb_enable_flag /* Enable DCB */;
u8 dscp_enable_flag /* Enable updating DSCP value */;
u8 dcb_priority /* DCB priority */;
u8 dcb_tc /* DCB TC */;
u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
u8 dcb_dont_add_vlan0 /* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged frames */;
};
/*
@ -1765,11 +1882,11 @@ struct pf_update_tunnel_config
u8 update_rx_def_non_ucast_clss /* Update per PORT default tunnel RX classification scheme for traffic with non unicast outer MAC in NPAR mode. */;
u8 set_vxlan_udp_port_flg /* Update VXLAN tunnel UDP destination port. */;
u8 set_geneve_udp_port_flg /* Update GENEVE tunnel UDP destination port. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
u8 tunnel_clss_l2geneve /* Classification scheme for l2 GENEVE tunnel. */;
u8 tunnel_clss_ipgeneve /* Classification scheme for ip GENEVE tunnel. */;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. (use enum tunnel_clss) */;
u8 tunnel_clss_l2geneve /* Classification scheme for l2 GENEVE tunnel. (use enum tunnel_clss) */;
u8 tunnel_clss_ipgeneve /* Classification scheme for ip GENEVE tunnel. (use enum tunnel_clss) */;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. (use enum tunnel_clss) */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. (use enum tunnel_clss) */;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
__le16 reserved;
@ -1780,14 +1897,14 @@ struct pf_update_tunnel_config
*/
struct pf_update_ramrod_data
{
u8 pf_id;
u8 update_eth_dcb_data_mode /* Update Eth DCB data indication */;
u8 update_fcoe_dcb_data_mode /* Update FCOE DCB data indication */;
u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB data indication */;
u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
u8 update_rroce_dcb_data_mode /* Update RROCE (RoceV2) DCB data indication */;
u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
u8 update_eth_dcb_data_mode /* Update Eth DCB data indication (use enum dcb_dscp_update_mode) */;
u8 update_fcoe_dcb_data_mode /* Update FCOE DCB data indication (use enum dcb_dscp_update_mode) */;
u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB data indication (use enum dcb_dscp_update_mode) */;
u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication (use enum dcb_dscp_update_mode) */;
u8 update_rroce_dcb_data_mode /* Update RROCE (RoceV2) DCB data indication (use enum dcb_dscp_update_mode) */;
u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication (use enum dcb_dscp_update_mode) */;
u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
u8 update_enable_stag_pri_change /* Update Enable STAG Priority Change indication */;
struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */;
@ -1795,7 +1912,8 @@ struct pf_update_ramrod_data
struct protocol_dcb_data rroce_dcb_data /* core roce related fields */;
struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */;
__le16 mf_vlan /* new outer vlan id value */;
__le16 reserved;
u8 enable_stag_pri_change /* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette Davis, UFP with Host Control mode, and UFP with DCB over base interface. else - 0. */;
u8 reserved;
struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */;
};
@ -1864,7 +1982,7 @@ struct ramrod_header
{
__le32 cid /* Slowpath Connection CID */;
u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
u8 protocol_id /* Ramrod Protocol ID */;
u8 protocol_id /* Ramrod Protocol ID (use enum protocol_type) */;
__le16 echo /* Ramrod echo */;
};
@ -1942,6 +2060,7 @@ struct tstorm_per_port_stat
struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
struct regpair eth_vxlan_tunn_filter_discard /* VXLAN dropped packets */;
struct regpair eth_geneve_tunn_filter_discard /* GENEVE dropped packets */;
struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
};
@ -2011,6 +2130,7 @@ struct vf_pf_channel_data
};
/*
* Ramrod data for VF start ramrod
*/
@ -2019,7 +2139,7 @@ struct vf_start_ramrod_data
u8 vf_id /* VF ID */;
u8 enable_flr_ack /* If set, initial cleanup ack will be sent to parent PF SP event queue */;
__le16 opaque_fid /* VF opaque FID */;
u8 personality /* define what type of personality is new VF */;
u8 personality /* define what type of personality is new VF (use enum personality_type) */;
u8 reserved[7];
struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
};
@ -2051,6 +2171,7 @@ enum vf_zone_size_mode
/*
* Attentions status block
*/
@ -2064,17 +2185,6 @@ struct atten_status_block
};
/*
* Igu cleanup bit values to distinguish between clean or producer consumer update.
*/
enum command_type_bit
{
IGU_COMMAND_TYPE_NOP=0,
IGU_COMMAND_TYPE_SET=1,
MAX_COMMAND_TYPE_BIT
};
/*
* DMAE command
*/
@ -2378,6 +2488,51 @@ struct e5_ystorm_core_conn_ag_ctx
};
struct fw_asserts_ram_section
{
__le16 section_ram_line_offset /* The offset of the section in the RAM in RAM lines (64-bit units) */;
__le16 section_ram_line_size /* The size of the section in RAM lines (64-bit units) */;
u8 list_dword_offset /* The offset of the asserts list within the section in dwords */;
u8 list_element_dword_size /* The size of an assert list element in dwords */;
u8 list_num_elements /* The number of elements in the asserts list */;
u8 list_next_index_dword_offset /* The offset of the next list index field within the section in dwords */;
};
struct fw_ver_num
{
u8 major /* Firmware major version number */;
u8 minor /* Firmware minor version number */;
u8 rev /* Firmware revision version number */;
u8 eng /* Firmware engineering version number (for bootleg versions) */;
};
struct fw_ver_info
{
__le16 tools_ver /* Tools version number */;
u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
u8 reserved1;
struct fw_ver_num num /* FW version number */;
__le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
__le32 reserved2;
};
struct fw_info
{
struct fw_ver_info ver /* FW version information */;
struct fw_asserts_ram_section fw_asserts_section /* Info regarding the FW asserts section in the Storm RAM */;
};
struct fw_info_location
{
__le32 grc_addr /* GRC address where the fw_info struct is located. */;
__le32 size /* Size of the fw_info structure (thats located at the grc_addr). */;
};
/*
* IGU cleanup command
*/

View file

@ -122,6 +122,7 @@ enum block_addr
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
GRCBASE_PXPREQBUS = 0x56000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@ -215,6 +216,7 @@ enum block_id
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
BLOCK_PXPREQBUS,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@ -251,7 +253,7 @@ enum bin_dbg_buffer_type
*/
struct dbg_attn_bit_mapping
{
__le16 data;
u16 data;
#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF /* The index of an attention in the blocks attentions list (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits (if is_unused_bit_cnt=1) */
#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 /* if set, the val field indicates the number of consecutive unused attention bits */
@ -264,11 +266,11 @@ struct dbg_attn_bit_mapping
*/
struct dbg_attn_block_type_data
{
__le16 names_offset /* Offset of this block attention names in the debug attention name offsets array */;
__le16 reserved1;
u16 names_offset /* Offset of this block attention names in the debug attention name offsets array */;
u16 reserved1;
u8 num_regs /* Number of attention registers in this block */;
u8 reserved2;
__le16 regs_offset /* Offset of this blocks attention registers in the attention registers array (in dbg_attn_reg units) */;
u16 regs_offset /* Offset of this blocks attention registers in the attention registers array (in dbg_attn_reg units) */;
};
/*
@ -285,15 +287,15 @@ struct dbg_attn_block
*/
struct dbg_attn_reg_result
{
__le32 data;
u32 data;
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF /* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF /* Number of attention indexes in this register */
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
__le16 block_attn_offset /* The offset of this registers attentions within the blocks attentions list (a value in the range 0..number of block attentions-1) */;
__le16 reserved;
__le32 sts_val /* Value read from the STS attention register */;
__le32 mask_val /* Value read from the MASK attention register */;
u16 block_attn_offset /* The offset of this registers attentions within the blocks attentions list (a value in the range 0..number of block attentions-1) */;
u16 reserved;
u32 sts_val /* Value read from the STS attention register */;
u32 mask_val /* Value read from the MASK attention register */;
};
/*
@ -307,7 +309,7 @@ struct dbg_attn_block_result
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F /* Number of registers in the block in which at least one attention bit is set */
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
__le16 names_offset /* Offset of this registers block attention names in the attention name offsets array */;
u16 names_offset /* Offset of this registers block attention names in the attention name offsets array */;
struct dbg_attn_reg_result reg_results[15] /* result data for each register in the block in which at least one attention bit is set */;
};
@ -318,7 +320,7 @@ struct dbg_attn_block_result
*/
struct dbg_mode_hdr
{
__le16 data;
u16 data;
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 /* indicates if a mode expression should be evaluated (0/1) */
#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF /* offset (in bytes) in modes expression buffer. valid only if eval_mode is set. */
@ -331,14 +333,14 @@ struct dbg_mode_hdr
struct dbg_attn_reg
{
struct dbg_mode_hdr mode /* Mode header */;
__le16 block_attn_offset /* The offset of this registers attentions within the blocks attentions list (a value in the range 0..number of block attentions-1) */;
__le32 data;
u16 block_attn_offset /* The offset of this registers attentions within the blocks attentions list (a value in the range 0..number of block attentions-1) */;
u32 data;
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF /* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF /* Number of attention in this register */
#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
__le32 sts_clr_address /* STS_CLR attention register GRC address (in dwords) */;
__le32 mask_address /* MASK attention register GRC address (in dwords) */;
u32 sts_clr_address /* STS_CLR attention register GRC address (in dwords) */;
u32 mask_address /* MASK attention register GRC address (in dwords) */;
};
@ -361,7 +363,7 @@ struct dbg_bus_block
{
u8 num_of_lines /* Number of debug lines in this block (excluding signature and latency events). */;
u8 has_latency_events /* Indicates if this block has a latency events debug line (0/1). */;
__le16 lines_offset /* Offset of this blocks lines in the Debug Bus lines array. */;
u16 lines_offset /* Offset of this blocks lines in the Debug Bus lines array. */;
};
@ -372,7 +374,7 @@ struct dbg_bus_block_user_data
{
u8 num_of_lines /* Number of debug lines in this block (excluding signature and latency events). */;
u8 has_latency_events /* Indicates if this block has a latency events debug line (0/1). */;
__le16 names_offset /* Offset of this blocks lines in the debug bus line name offsets array. */;
u16 names_offset /* Offset of this blocks lines in the debug bus line name offsets array. */;
};
@ -408,12 +410,12 @@ struct dbg_dump_cond_hdr
*/
struct dbg_dump_mem
{
__le32 dword0;
u32 dword0;
#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */
#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */
#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
__le32 dword1;
u32 dword1;
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF /* register size (in dwords) */
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 /* indicates if the register is wide-bus */
@ -428,7 +430,7 @@ struct dbg_dump_mem
*/
struct dbg_dump_reg
{
__le32 data;
u32 data;
#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
#define DBG_DUMP_REG_ADDRESS_SHIFT 0
#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates if the register is wide-bus */
@ -443,7 +445,7 @@ struct dbg_dump_reg
*/
struct dbg_dump_split_hdr
{
__le32 hdr;
u32 hdr;
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF /* size in dwords of the data following this header */
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF /* split type ID */
@ -457,7 +459,7 @@ struct dbg_dump_split_hdr
struct dbg_idle_chk_cond_hdr
{
struct dbg_mode_hdr mode /* Mode header */;
__le16 data_size /* size in dwords of the data following this header */;
u16 data_size /* size in dwords of the data following this header */;
};
@ -466,14 +468,14 @@ struct dbg_idle_chk_cond_hdr
*/
struct dbg_idle_chk_cond_reg
{
__le32 data;
u32 data;
#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF /* Register GRC address (in dwords) */
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 /* indicates if the register is wide-bus */
#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF /* value from block_id enum */
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
__le16 num_entries /* number of registers entries to check */;
u16 num_entries /* number of registers entries to check */;
u8 entry_size /* size of registers entry (in dwords) */;
u8 start_entry /* index of the first entry to check */;
};
@ -484,14 +486,14 @@ struct dbg_idle_chk_cond_reg
*/
struct dbg_idle_chk_info_reg
{
__le32 data;
u32 data;
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF /* Register GRC address (in dwords) */
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 /* indicates if the register is wide-bus */
#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF /* value from block_id enum */
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
__le16 size /* register size in dwords */;
u16 size /* register size in dwords */;
struct dbg_mode_hdr mode /* Mode header */;
};
@ -511,8 +513,8 @@ union dbg_idle_chk_reg
*/
struct dbg_idle_chk_result_hdr
{
__le16 rule_id /* Failing rule index */;
__le16 mem_entry_id /* Failing memory entry index */;
u16 rule_id /* Failing rule index */;
u16 mem_entry_id /* Failing memory entry index */;
u8 num_dumped_cond_regs /* number of dumped condition registers */;
u8 num_dumped_info_regs /* number of dumped condition registers */;
u8 severity /* from dbg_idle_chk_severity_types enum */;
@ -531,7 +533,7 @@ struct dbg_idle_chk_result_reg_hdr
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F /* register index within the failing rule */
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
u8 start_entry /* index of the first checked entry */;
__le16 size /* register size in dwords */;
u16 size /* register size in dwords */;
};
@ -540,15 +542,15 @@ struct dbg_idle_chk_result_reg_hdr
*/
struct dbg_idle_chk_rule
{
__le16 rule_id /* Idle Check rule ID */;
u16 rule_id /* Idle Check rule ID */;
u8 severity /* value from dbg_idle_chk_severity_types enum */;
u8 cond_id /* Condition ID */;
u8 num_cond_regs /* number of condition registers */;
u8 num_info_regs /* number of info registers */;
u8 num_imms /* number of immediates in the condition */;
u8 reserved1;
__le16 reg_offset /* offset of this rules registers in the idle check register array (in dbg_idle_chk_reg units) */;
__le16 imm_offset /* offset of this rules immediate values in the immediate values array (in dwords) */;
u16 reg_offset /* offset of this rules registers in the idle check register array (in dbg_idle_chk_reg units) */;
u16 imm_offset /* offset of this rules immediate values in the immediate values array (in dwords) */;
};
@ -557,7 +559,7 @@ struct dbg_idle_chk_rule
*/
struct dbg_idle_chk_rule_parsing_data
{
__le32 data;
u32 data;
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 /* indicates if this register has a FW message */
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF /* Offset of this rules strings in the debug strings array (in bytes) */
@ -583,7 +585,7 @@ enum dbg_idle_chk_severity_types
*/
struct dbg_bus_block_data
{
__le16 data;
u16 data;
#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF /* 4-bit value: bit i set -> dword/qword i is enabled. */
#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF /* Number of dwords/qwords to shift right the debug data (0-3) */
@ -661,8 +663,8 @@ struct dbg_bus_trigger_state_data
*/
struct dbg_bus_mem_addr
{
__le32 lo;
__le32 hi;
u32 lo;
u32 hi;
};
/*
@ -672,7 +674,7 @@ struct dbg_bus_pci_buf_data
{
struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
__le32 size /* PCI buffer size in bytes */;
u32 size /* PCI buffer size in bytes */;
};
/*
@ -708,13 +710,13 @@ union dbg_bus_storm_eid_params
struct dbg_bus_storm_data
{
u8 enabled /* indicates if the Storm is enabled for recording */;
u8 mode /* Storm debug mode, valid only if the Storm is enabled */;
u8 mode /* Storm debug mode, valid only if the Storm is enabled (use enum dbg_bus_storm_modes) */;
u8 hw_id /* HW ID associated with the Storm */;
u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
u8 eid_range_not_mask /* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is set, */;
u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
union dbg_bus_storm_eid_params eid_filter_params /* EID filter params to filter on. Valid only if eid_filter_en is set. */;
__le32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
u32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
};
/*
@ -722,13 +724,13 @@ struct dbg_bus_storm_data
*/
struct dbg_bus_data
{
__le32 app_version /* The tools version number of the application */;
u8 state /* The current debug bus state */;
u32 app_version /* The tools version number of the application */;
u8 state /* The current debug bus state (use enum dbg_bus_states) */;
u8 hw_dwords /* HW dwords per cycle */;
__le16 hw_id_mask /* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the HW ID of dword/qword i */;
u16 hw_id_mask /* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the HW ID of dword/qword i */;
u8 num_enabled_blocks /* Number of blocks enabled for recording */;
u8 num_enabled_storms /* Number of Storms enabled for recording */;
u8 target /* Output target */;
u8 target /* Output target (use enum dbg_bus_targets) */;
u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
u8 timestamp_input_en /* Indicates if timestamp recording is enabled (0/1) */;
@ -736,7 +738,7 @@ struct dbg_bus_data
u8 adding_filter /* If true, the next added constraint belong to the filter. Otherwise, it belongs to the last added trigger state. Valid only if either filter or triggers are enabled. */;
u8 filter_pre_trigger /* Indicates if the recording filter should be applied before the trigger. Valid only if both filter and trigger are enabled (0/1) */;
u8 filter_post_trigger /* Indicates if the recording filter should be applied after the trigger. Valid only if both filter and trigger are enabled (0/1) */;
__le16 reserved;
u16 reserved;
u8 trigger_en /* Indicates if the recording trigger is enabled (0/1) */;
struct dbg_bus_trigger_state_data trigger_states[3] /* trigger states data */;
u8 next_trigger_state /* ID of next trigger state to be added */;
@ -879,8 +881,8 @@ struct dbg_grc_data
{
u8 params_initialized /* Indicates if the GRC parameters were initialized */;
u8 reserved1;
__le16 reserved2;
__le32 param_val[48] /* Value of each GRC parameter. Array size must match the enum dbg_grc_params. */;
u16 reserved2;
u32 param_val[48] /* Value of each GRC parameter. Array size must match the enum dbg_grc_params. */;
};
@ -998,7 +1000,7 @@ enum dbg_status
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
DBG_STATUS_DMAE_FAILED,
DBG_STATUS_RESERVED2,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@ -1034,10 +1036,10 @@ enum dbg_storms
*/
struct idle_chk_data
{
__le32 buf_size /* Idle check buffer size in dwords */;
u32 buf_size /* Idle check buffer size in dwords */;
u8 buf_size_set /* Indicates if the idle check buffer size was set (0/1) */;
u8 reserved1;
__le16 reserved2;
u16 reserved2;
};
/*
@ -1053,7 +1055,8 @@ struct dbg_tools_data
u8 chip_id /* Chip ID (from enum chip_ids) */;
u8 platform_id /* Platform ID */;
u8 initialized /* Indicates if the data was initialized */;
u8 reserved;
u8 use_dmae /* Indicates if DMAE should be used */;
u32 num_regs_read /* Numbers of registers that were read since last log */;
};

View file

@ -62,7 +62,7 @@ struct xstorm_eth_conn_st_ctx
struct e4_xstorm_eth_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 state /* state */;
u8 flags0;
#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
@ -864,51 +864,6 @@ struct e5_xstorm_eth_conn_ag_ctx
__le16 word15 /* word15 */;
};
struct e5_ystorm_eth_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
#define E5_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */
#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
#define E5_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
__le32 terminate_spqe /* reg0 */;
__le32 reg1 /* reg1 */;
__le16 tx_bd_cons_upd /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
};
struct e5_tstorm_eth_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1030,6 +985,51 @@ struct e5_tstorm_eth_conn_ag_ctx
__le16 e4_reserved9 /* word4 */;
};
struct e5_ystorm_eth_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
#define E5_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */
#define E5_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
#define E5_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
#define E5_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
__le32 terminate_spqe /* reg0 */;
__le32 reg1 /* reg1 */;
__le16 tx_bd_cons_upd /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
};
struct e5_ustorm_eth_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1121,10 +1121,11 @@ struct e5_eth_conn_context
struct regpair tstorm_st_padding[2] /* padding */;
struct pstorm_eth_conn_st_ctx pstorm_st_context /* pstorm storm context */;
struct xstorm_eth_conn_st_ctx xstorm_st_context /* xstorm storm context */;
struct regpair xstorm_st_padding[2] /* padding */;
struct e5_xstorm_eth_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
struct e5_tstorm_eth_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
struct ystorm_eth_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct e5_ystorm_eth_conn_ag_ctx ystorm_ag_context /* ystorm aggregative context */;
struct e5_tstorm_eth_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
struct e5_ustorm_eth_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
struct ustorm_eth_conn_st_ctx ustorm_st_context /* ustorm storm context */;
struct mstorm_eth_conn_st_ctx mstorm_st_context /* mstorm storm context */;
@ -1184,6 +1185,7 @@ enum eth_event_opcode
ETH_EVENT_RX_DELETE_UDP_FILTER,
ETH_EVENT_RX_CREATE_GFT_ACTION,
ETH_EVENT_RX_GFT_UPDATE_FILTER,
ETH_EVENT_TX_QUEUE_UPDATE,
MAX_ETH_EVENT_OPCODE
};
@ -1206,9 +1208,9 @@ enum eth_filter_action
*/
struct eth_filter_cmd
{
u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
u8 type /* Filter Type (MAC/VLAN/Pair/VNI) (use enum eth_filter_type) */;
u8 vport_id /* the vport id */;
u8 action /* filter command action: add/remove/replace */;
u8 action /* filter command action: add/remove/replace (use enum eth_filter_action) */;
u8 reserved0;
__le32 vni;
__le16 mac_lsb;
@ -1295,6 +1297,7 @@ enum eth_ramrod_cmd_id
ETH_RAMROD_RX_DELETE_UDP_FILTER /* RX - Delete a UDP Filter to the Searcher */,
ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */,
ETH_RAMROD_GFT_UPDATE_FILTER /* RX - Add/Delete a GFT Filter to the Searcher */,
ETH_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
MAX_ETH_RAMROD_CMD_ID
};
@ -1373,7 +1376,7 @@ struct eth_vport_rss_config
#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF /* if set update the rss keys */
#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
u8 rss_id /* The RSS engine ID. Must be allocated to each vport with RSS enabled. Total number of RSS engines is ETH_RSS_ENGINE_NUM_ , according to chip type. */;
u8 rss_mode /* The RSS mode for this function */;
u8 rss_mode /* The RSS mode for this function (use enum eth_vport_rss_mode) */;
u8 update_rss_key /* if set update the rss key */;
u8 update_rss_ind_table /* if set update the indirection table values */;
u8 update_rss_capabilities /* if set update the capabilities and indirection table size. */;
@ -1416,7 +1419,6 @@ struct eth_vport_rx_mode
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
__le16 reserved2[3];
};
@ -1429,8 +1431,8 @@ struct eth_vport_tpa_param
u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
u8 tpa_pkt_split_flg /* If set, start each tpa segment on new SGE (GRO mode). One SGE per segment allowed */;
u8 tpa_hdr_data_split_flg /* If set, put header of first TPA segment on bd and data on SGE */;
u8 tpa_pkt_split_flg /* If set, start each TPA segment on new BD (GRO mode). One BD per segment allowed. */;
u8 tpa_hdr_data_split_flg /* If set, put header of first TPA segment on first BD and data on second BD. */;
u8 tpa_gro_consistent_flg /* If set, GRO data consistent will checked for TPA continue */;
u8 tpa_max_aggs_num /* maximum number of opened aggregations per v-port */;
__le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
@ -1459,12 +1461,11 @@ struct eth_vport_tx_mode
#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
__le16 reserved2[3];
};
/*
* Ramrod data for rx create gft action
* GFT filter update action type.
*/
enum gft_filter_update_action
{
@ -1474,17 +1475,6 @@ enum gft_filter_update_action
};
/*
* Ramrod data for rx create gft action
*/
enum gft_logic_filter_type
{
GFT_FILTER_TYPE /* flow FW is GFT-logic as well */,
RFS_FILTER_TYPE /* flow FW is A-RFS-logic */,
MAX_GFT_LOGIC_FILTER_TYPE
};
/*
@ -1505,7 +1495,7 @@ struct rx_add_openflow_filter_data
__le16 vlan_id /* Searcher String - Vlan ID */;
__le16 l2_eth_type /* Searcher String - Last L2 Ethertype */;
u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */;
u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */;
u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type (use enum eth_ipv4_frag_type) */;
u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */;
u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
__le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */;
@ -1541,7 +1531,7 @@ struct rx_create_openflow_action_data
struct rx_queue_start_ramrod_data
{
__le16 rx_queue_id /* ID of RX queue */;
__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
__le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
__le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
__le16 sb_id /* Status block ID */;
u8 sb_index /* index of the protocol index */;
@ -1590,7 +1580,8 @@ struct rx_queue_update_ramrod_data
u8 complete_cqe_flg /* post completion to the CQE ring if set */;
u8 complete_event_flg /* post completion to the event ring if set */;
u8 vport_id /* ID of virtual port */;
u8 reserved[4];
u8 set_default_rss_queue /* If set, update default rss queue to this RX queue. */;
u8 reserved[3];
u8 reserved1 /* FW reserved. */;
u8 reserved2 /* FW reserved. */;
u8 reserved3 /* FW reserved. */;
@ -1607,7 +1598,7 @@ struct rx_udp_filter_data
{
__le16 action_icid /* CID of Action to run for this filter */;
__le16 vlan_id /* Searcher String - Vlan ID */;
u8 ip_type /* Searcher String - IP Type */;
u8 ip_type /* Searcher String - IP Type (use enum eth_ip_type) */;
u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
__le16 reserved1;
__le32 ip_dst_addr[4] /* Searcher String - IP Destination Address, for IPv4 use ip_dst_addr[0] only */;
@ -1619,17 +1610,22 @@ struct rx_udp_filter_data
/*
* Ramrod to add filter - filter is packet headr of type of packet wished to pass certin FW flow
* add or delete GFT filter - filter is packet header of type of packet wished to pass certain FW flow
*/
struct rx_update_gft_filter_data
{
struct regpair pkt_hdr_addr /* Pointer to Packet Header That Defines GFT Filter */;
__le16 pkt_hdr_length /* Packet Header Length */;
__le16 rx_qid_or_action_icid /* If is_rfs flag is set: Queue Id to associate filter with else: action icid */;
u8 vport_id /* Field is used if is_rfs flag is set: vport Id of which to associate filter with */;
u8 filter_type /* Use enum to set type of flow using gft HW logic blocks */;
u8 filter_action /* Use to set type of action on filter */;
__le16 action_icid /* Action icid. Valid if action_icid_valid flag set. */;
__le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */;
__le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */;
__le16 vport_id /* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */;
u8 action_icid_valid /* If set, action_icid will used for GFT filter update. */;
u8 rx_qid_valid /* If set, rx_qid will used for traffic steering, in additional to vport_id. flow_id_valid must be cleared. If cleared, queue ID will selected by RSS. */;
u8 flow_id_valid /* If set, flow_id will reported by CQE, rx_qid_valid must be cleared. If cleared, flow_id 0 will reported by CQE. */;
u8 filter_action /* Use to set type of action on filter (use enum gft_filter_update_action) */;
u8 assert_on_error /* 0 - dont assert in case of error. Just return an error code. 1 - assert in case of error. */;
u8 reserved;
};
@ -1660,7 +1656,7 @@ struct tx_queue_start_ramrod_data
#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
u8 pxp_st_hint /* PXP command Steering tag hint */;
u8 pxp_st_hint /* PXP command Steering tag hint (use enum pxp_tph_st_hint) */;
u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */;
u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */;
__le16 pxp_st_index /* PXP command Steering tag index */;
@ -1685,6 +1681,18 @@ struct tx_queue_stop_ramrod_data
};
/*
* Ramrod data for tx queue update ramrod
*/
struct tx_queue_update_ramrod_data
{
__le16 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
__le16 qm_pq_id /* Updated QM PQ ID */;
__le32 reserved0;
struct regpair reserved1[5];
};
/*
* Ramrod data for vport update ramrod
@ -1718,9 +1726,9 @@ struct vport_start_ramrod_data
u8 untagged /* If set untagged filter (vlan0) is added to current Vport, otherwise port is marked as any-vlan */;
struct eth_tx_err_vals tx_err_behav /* Desired behavior per TX error type */;
u8 zero_placement_offset /* If set, ETH header padding will not inserted. placement_offset will be zero. */;
u8 ctl_frame_mac_check_en /* If set, Contorl frames will be filtered according to MAC check. */;
u8 ctl_frame_ethtype_check_en /* If set, Contorl frames will be filtered according to ethtype check. */;
u8 reserved[5];
u8 ctl_frame_mac_check_en /* If set, control frames will be filtered according to MAC check. */;
u8 ctl_frame_ethtype_check_en /* If set, control frames will be filtered according to ethtype check. */;
u8 reserved[1];
};
@ -1768,8 +1776,8 @@ struct vport_update_ramrod_data_cmn
u8 update_mtu_flg /* If set, MTU will be updated. Vport must be not active. */;
__le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
u8 update_ctl_frame_checks_en_flg /* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be updated */;
u8 ctl_frame_mac_check_en /* If set, Contorl frames will be filtered according to MAC check. */;
u8 ctl_frame_ethtype_check_en /* If set, Contorl frames will be filtered according to ethtype check. */;
u8 ctl_frame_mac_check_en /* If set, control frames will be filtered according to MAC check. */;
u8 ctl_frame_ethtype_check_en /* If set, control frames will be filtered according to ethtype check. */;
u8 reserved[15];
};
@ -1786,6 +1794,7 @@ struct vport_update_ramrod_data
struct vport_update_ramrod_data_cmn common /* Common data for all vport update ramrods */;
struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
__le32 reserved[3];
struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */;
struct vport_update_ramrod_mcast approx_mcast;
struct eth_vport_rss_config rss_config /* rss config data */;
@ -1799,7 +1808,7 @@ struct vport_update_ramrod_data
struct E4XstormEthConnAgCtxDqExtLdPart
{
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 state /* state */;
u8 flags0;
#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
@ -2072,7 +2081,7 @@ struct e4_mstorm_eth_conn_ag_ctx
struct e4_xstorm_eth_hw_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 state /* state */;
u8 flags0;
#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
@ -2806,30 +2815,30 @@ struct gft_cam_line
/*
* GFT CAM line struct (for driversim use)
* GFT CAM line struct with fields breakout
*/
struct gft_cam_line_mapped
{
__le32 camline;
#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 /* Indication if the line is valid. */
#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 /* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 /* (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 /* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 /* (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF /* use enum gft_profile_upper_protocol_type (use enum gft_profile_upper_protocol_type) */
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF /* (use enum gft_profile_upper_protocol_type) */
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF /* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF /* (use enum gft_profile_tunnel_type) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 /* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 /* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF /* use enum gft_profile_upper_protocol_type (use enum gft_profile_upper_protocol_type) */
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF /* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25

View file

@ -159,8 +159,10 @@ struct pstorm_fcoe_conn_st_ctx
#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 /* Outer Vlan flag */
#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF
#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4
#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK 0x1 /* Indicaiton that there should be a single vlan (for UFP mode) */
#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT 4
#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x7
#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 5
u8 did_2 /* DID FC address - Third byte that is sent to NW via PBF */;
u8 did_1 /* DID FC address - Second byte that is sent to NW via PBF */;
u8 did_0 /* DID FC address - First byte that is sent to NW via PBF */;
@ -239,7 +241,7 @@ struct xstorm_fcoe_conn_st_ctx
struct e4_xstorm_fcoe_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 fcoe_state /* state */;
u8 state /* state */;
u8 flags0;
#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
@ -490,7 +492,7 @@ struct ustorm_fcoe_conn_st_ctx
struct e4_tstorm_fcoe_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 fcoe_state /* state */;
u8 state /* state */;
u8 flags0;
#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
@ -670,9 +672,10 @@ struct tstorm_fcoe_conn_st_ctx
#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
u8 q_relative_offset /* CQ, RQ and CMDQ relative offset for connection */;
u8 cq_relative_offset /* CQ relative offset for connection */;
u8 cmdq_relative_offset /* CmdQ relative offset for connection */;
u8 bdq_resource_id /* The BDQ resource ID to which this function is mapped */;
u8 reserved0[5] /* Alignment to 128b */;
u8 reserved0[4] /* Alignment to 128b */;
};
struct e4_mstorm_fcoe_conn_ag_ctx
@ -719,7 +722,8 @@ struct e4_mstorm_fcoe_conn_ag_ctx
struct fcoe_mstorm_fcoe_conn_st_ctx_fp
{
__le16 xfer_prod /* XferQ producer */;
__le16 reserved1;
u8 num_cqs /* Number of CQs per function (internal to FW) */;
u8 reserved1;
u8 protection_info;
#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1 /* Does this connection support protection (if couple of GOS share this connection it is enough that one of them support protection) */
#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
@ -1270,7 +1274,6 @@ struct e5_fcoe_conn_context
struct regpair pstorm_st_padding[2] /* padding */;
struct xstorm_fcoe_conn_st_ctx xstorm_st_context /* xstorm storm context */;
struct e5_xstorm_fcoe_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
struct regpair xstorm_ag_padding[6] /* padding */;
struct ustorm_fcoe_conn_st_ctx ustorm_st_context /* ustorm storm context */;
struct regpair ustorm_st_padding[2] /* padding */;
struct e5_tstorm_fcoe_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;

View file

@ -38,17 +38,19 @@
#define NUM_OF_VLAN_PRIORITIES 8
/* Size of CRC8 lookup table */
#ifndef LINUX_REMOVE
#define CRC8_TABLE_SIZE 256
#endif
/*
* BRB RAM init requirements
*/
struct init_brb_ram_req
{
__le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
__le32 headroom_per_tc /* headroom size per TC, in bytes */;
__le32 min_pkt_size /* min packet size, in bytes */;
__le32 max_ports_per_engine /* min packet size, in bytes */;
u32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
u32 headroom_per_tc /* headroom size per TC, in bytes */;
u32 min_pkt_size /* min packet size, in bytes */;
u32 max_ports_per_engine /* min packet size, in bytes */;
u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
};
@ -60,7 +62,7 @@ struct init_ets_tc_req
{
u8 use_sp /* if set, this TC participates in the arbitration with a strict priority (the priority is equal to the TC ID) */;
u8 use_wfq /* if set, this TC participates in the arbitration with a WFQ weight (indicated by the weight field) */;
__le16 weight /* An arbitration weight. Valid only if use_wfq is set. */;
u16 weight /* An arbitration weight. Valid only if use_wfq is set. */;
};
/*
@ -68,7 +70,7 @@ struct init_ets_tc_req
*/
struct init_ets_req
{
__le32 mtu /* Max packet size (in bytes) */;
u32 mtu /* Max packet size (in bytes) */;
struct init_ets_tc_req tc_req[NUM_OF_TCS] /* ETS initialization requirements per TC. */;
};
@ -79,10 +81,10 @@ struct init_ets_req
*/
struct init_nig_lb_rl_req
{
__le16 lb_mac_rate /* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */;
__le16 lb_rate /* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */;
__le32 mtu /* Max packet size (in bytes) */;
__le16 tc_rate[NUM_OF_PHYS_TCS] /* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */;
u16 lb_mac_rate /* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */;
u16 lb_rate /* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */;
u32 mtu /* Max packet size (in bytes) */;
u16 tc_rate[NUM_OF_PHYS_TCS] /* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */;
};
@ -112,9 +114,9 @@ struct init_qm_port_params
{
u8 active /* Indicates if this port is active */;
u8 active_phys_tcs /* Vector of valid bits for active TCs used by this port */;
__le16 num_pbf_cmd_lines /* number of PBF command lines that can be used by this port */;
__le16 num_btb_blocks /* number of BTB blocks that can be used by this port */;
__le16 reserved;
u16 num_pbf_cmd_lines /* number of PBF command lines that can be used by this port */;
u16 num_btb_blocks /* number of BTB blocks that can be used by this port */;
u16 reserved;
};
@ -135,9 +137,9 @@ struct init_qm_pq_params
*/
struct init_qm_vport_params
{
__le32 vport_rl /* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if VPORT RL is globally disabled. */;
__le16 vport_wfq /* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is globally disabled. */;
__le16 first_tx_pq_id[NUM_OF_TCS] /* the first Tx PQ ID associated with this VPORT for each TC. */;
u32 vport_rl /* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if VPORT RL is globally disabled. */;
u16 vport_wfq /* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is globally disabled. */;
u16 first_tx_pq_id[NUM_OF_TCS] /* the first Tx PQ ID associated with this VPORT for each TC. */;
};
#endif /* __ECORE_HSI_INIT_FUNC__ */

View file

@ -55,51 +55,6 @@ enum chip_ids
};
struct fw_asserts_ram_section
{
__le16 section_ram_line_offset /* The offset of the section in the RAM in RAM lines (64-bit units) */;
__le16 section_ram_line_size /* The size of the section in RAM lines (64-bit units) */;
u8 list_dword_offset /* The offset of the asserts list within the section in dwords */;
u8 list_element_dword_size /* The size of an assert list element in dwords */;
u8 list_num_elements /* The number of elements in the asserts list */;
u8 list_next_index_dword_offset /* The offset of the next list index field within the section in dwords */;
};
struct fw_ver_num
{
u8 major /* Firmware major version number */;
u8 minor /* Firmware minor version number */;
u8 rev /* Firmware revision version number */;
u8 eng /* Firmware engineering version number (for bootleg versions) */;
};
struct fw_ver_info
{
__le16 tools_ver /* Tools version number */;
u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
u8 reserved1;
struct fw_ver_num num /* FW version number */;
__le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
__le32 reserved2;
};
struct fw_info
{
struct fw_ver_info ver /* FW version information */;
struct fw_asserts_ram_section fw_asserts_section /* Info regarding the FW asserts section in the Storm RAM */;
};
struct fw_info_location
{
__le32 grc_addr /* GRC address where the fw_info struct is located. */;
__le32 size /* Size of the fw_info structure (thats located at the grc_addr). */;
};
enum init_modes
{
MODE_BB_A0_DEPRECATED,
@ -149,8 +104,8 @@ enum init_split_types
*/
struct bin_buffer_hdr
{
__le32 offset /* buffer offset in bytes from the beginning of the binary file */;
__le32 length /* buffer length in bytes */;
u32 offset /* buffer offset in bytes from the beginning of the binary file */;
u32 length /* buffer length in bytes */;
};
@ -173,7 +128,7 @@ enum bin_init_buffer_type
*/
struct init_array_raw_hdr
{
__le32 data;
u32 data;
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF /* Init array type, from init_array_types enum */
#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
@ -185,7 +140,7 @@ struct init_array_raw_hdr
*/
struct init_array_standard_hdr
{
__le32 data;
u32 data;
#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF /* Init array type, from init_array_types enum */
#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF /* Init array size (in dwords) */
@ -197,7 +152,7 @@ struct init_array_standard_hdr
*/
struct init_array_zipped_hdr
{
__le32 data;
u32 data;
#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF /* Init array type, from init_array_types enum */
#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF /* Init array zipped size (in bytes) */
@ -209,7 +164,7 @@ struct init_array_zipped_hdr
*/
struct init_array_pattern_hdr
{
__le32 data;
u32 data;
#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF /* Init array type, from init_array_types enum */
#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF /* pattern size in dword */
@ -251,13 +206,13 @@ enum init_array_types
*/
struct init_callback_op
{
__le32 op_data;
u32 op_data;
#define INIT_CALLBACK_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_CALLBACK_OP_OP_SHIFT 0
#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
__le16 callback_id /* Callback ID */;
__le16 block_id /* Blocks ID */;
u16 callback_id /* Callback ID */;
u16 block_id /* Blocks ID */;
};
@ -266,12 +221,12 @@ struct init_callback_op
*/
struct init_delay_op
{
__le32 op_data;
u32 op_data;
#define INIT_DELAY_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_DELAY_OP_OP_SHIFT 0
#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_DELAY_OP_RESERVED_SHIFT 4
__le32 delay /* delay in us */;
u32 delay /* delay in us */;
};
@ -280,15 +235,15 @@ struct init_delay_op
*/
struct init_if_mode_op
{
__le32 op_data;
u32 op_data;
#define INIT_IF_MODE_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_IF_MODE_OP_OP_SHIFT 0
#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF /* Commands to skip if the modes dont match */
#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
__le16 reserved2;
__le16 modes_buf_offset /* offset (in bytes) in modes expression buffer */;
u16 reserved2;
u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */;
};
@ -297,7 +252,7 @@ struct init_if_mode_op
*/
struct init_if_phase_op
{
__le32 op_data;
u32 op_data;
#define INIT_IF_PHASE_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_IF_PHASE_OP_OP_SHIFT 0
#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 /* Indicates if DMAE is enabled in this phase */
@ -306,7 +261,7 @@ struct init_if_phase_op
#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF /* Commands to skip if the phases dont match */
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
__le32 phase_data;
u32 phase_data;
#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
@ -333,12 +288,12 @@ enum init_mode_ops
*/
struct init_raw_op
{
__le32 op_data;
u32 op_data;
#define INIT_RAW_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_RAW_OP_OP_SHIFT 0
#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
#define INIT_RAW_OP_PARAM1_SHIFT 4
__le32 param2 /* Init param 2 */;
u32 param2 /* Init param 2 */;
};
/*
@ -346,8 +301,8 @@ struct init_raw_op
*/
struct init_op_array_params
{
__le16 size /* array size in dwords */;
__le16 offset /* array start offset in dwords */;
u16 size /* array size in dwords */;
u16 offset /* array start offset in dwords */;
};
/*
@ -355,9 +310,9 @@ struct init_op_array_params
*/
union init_write_args
{
__le32 inline_val /* value to write, used when init source is INIT_SRC_INLINE */;
__le32 zeros_count /* number of zeros to write, used when init source is INIT_SRC_ZEROS */;
__le32 array_offset /* array offset to write, used when init source is INIT_SRC_ARRAY */;
u32 inline_val /* value to write, used when init source is INIT_SRC_INLINE */;
u32 zeros_count /* number of zeros to write, used when init source is INIT_SRC_ZEROS */;
u32 array_offset /* array offset to write, used when init source is INIT_SRC_ARRAY */;
struct init_op_array_params runtime /* runtime array params to write, used when init source is INIT_SRC_RUNTIME */;
};
@ -366,7 +321,7 @@ union init_write_args
*/
struct init_write_op
{
__le32 data;
u32 data;
#define INIT_WRITE_OP_OP_MASK 0xF /* init operation, from init_op_types enum */
#define INIT_WRITE_OP_OP_SHIFT 0
#define INIT_WRITE_OP_SOURCE_MASK 0x7 /* init source type, taken from init_source_types enum */
@ -385,7 +340,7 @@ struct init_write_op
*/
struct init_read_op
{
__le32 op_data;
u32 op_data;
#define INIT_READ_OP_OP_MASK 0xF /* init operation, from init_op_types enum */
#define INIT_READ_OP_OP_SHIFT 0
#define INIT_READ_OP_POLL_TYPE_MASK 0xF /* polling type, from init_poll_types enum */
@ -394,7 +349,7 @@ struct init_read_op
#define INIT_READ_OP_RESERVED_SHIFT 8
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF /* internal (absolute) GRC address, in dwords */
#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val /* expected polling value, used only when polling is done */;
u32 expected_val /* expected polling value, used only when polling is done */;
};
/*
@ -463,11 +418,11 @@ enum init_source_types
*/
struct iro
{
__le32 base /* RAM field offset */;
__le16 m1 /* multiplier 1 */;
__le16 m2 /* multiplier 2 */;
__le16 m3 /* multiplier 3 */;
__le16 size /* RAM field size */;
u32 base /* RAM field offset */;
u16 m1 /* multiplier 1 */;
u16 m2 /* multiplier 2 */;
u16 m3 /* multiplier 3 */;
u16 size /* RAM field size */;
};
#endif /* __ECORE_HSI_INIT_TOOL__ */

View file

@ -46,7 +46,7 @@
*/
struct ystorm_iscsi_conn_st_ctx
{
__le32 reserved[4];
__le32 reserved[8];
};
/*
@ -63,8 +63,8 @@ struct pstorm_iscsi_tcp_conn_st_ctx
*/
struct xstorm_iscsi_tcp_conn_st_ctx
{
__le32 reserved_iscsi[40];
__le32 reserved_tcp[4];
__le32 reserved_iscsi[44];
};
struct e4_xstorm_iscsi_conn_ag_ctx
@ -411,7 +411,7 @@ struct e4_tstorm_iscsi_conn_ag_ctx
#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 rx_tcp_checksum_err_cnt /* reg2 */;
__le32 reg3 /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* reg5 */;
@ -498,7 +498,7 @@ struct e4_ustorm_iscsi_conn_ag_ctx
*/
struct tstorm_iscsi_conn_st_ctx
{
__le32 reserved[40];
__le32 reserved[44];
};
struct e4_mstorm_iscsi_conn_ag_ctx
@ -545,7 +545,7 @@ struct e4_mstorm_iscsi_conn_ag_ctx
struct mstorm_iscsi_tcp_conn_st_ctx
{
__le32 reserved_tcp[20];
__le32 reserved_iscsi[8];
__le32 reserved_iscsi[12];
};
/*
@ -562,7 +562,6 @@ struct ustorm_iscsi_conn_st_ctx
struct e4_iscsi_conn_context
{
struct ystorm_iscsi_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context /* pstorm storm context */;
struct regpair pstorm_st_padding[2] /* padding */;
struct pb_context xpb2_context /* xpb2 context */;

View file

@ -65,7 +65,7 @@ struct pstorm_iwarp_conn_st_ctx
*/
struct xstorm_iwarp_conn_st_ctx
{
__le32 reserved[44];
__le32 reserved[48];
};
struct e4_xstorm_iwarp_conn_ag_ctx
@ -331,85 +331,85 @@ struct e4_tstorm_iwarp_conn_ag_ctx
u8 reserved0 /* cdu_validation */;
u8 state /* state */;
u8 flags0;
#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 /* bit3 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 /* bit5 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 /* bit3 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 /* bit5 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 /* timer1cf */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 /* timer2cf */
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 /* timer_stop_all */
#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 /* timer1cf */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 /* timer2cf */
#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 /* timer_stop_all */
#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf9 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 /* cf1en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 /* cf2en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 /* cf3en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 /* cf9 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 /* cf10 */
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 /* cf1en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 /* cf2en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 /* cf3en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf9en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_EN_MASK 0x1 /* cf9en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_EN_SHIFT 5
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 /* cf10en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 /* rule6en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 /* rule6en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 unaligned_nxt_seq /* reg2 */;
@ -428,7 +428,7 @@ struct e4_tstorm_iwarp_conn_ag_ctx
__le16 conn_dpi /* conn_dpi */;
__le16 rq_prod /* word3 */;
__le32 snd_seq /* reg9 */;
__le32 reg10 /* reg10 */;
__le32 last_hq_sequence /* reg10 */;
};
/*
@ -896,21 +896,23 @@ struct e5_iwarp_conn_context
struct iwarp_create_qp_ramrod_data
{
u8 flags;
#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
u8 reserved1 /* Basic/Enhanced */;
#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_SHIFT 6
#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 7
u8 reserved1 /* Basic/Enhanced (use enum mpa_negotiation_mode) */;
__le16 pd;
__le16 sq_num_pages;
__le16 rq_num_pages;
@ -963,13 +965,13 @@ struct iwarp_eqe_data_tcp_async_completion
enum iwarp_eqe_sync_opcode
{
IWARP_EVENT_TYPE_TCP_OFFLOAD=11 /* iWARP event queue response after option 2 offload Ramrod */,
IWARP_EVENT_TYPE_TCP_ABORT,
IWARP_EVENT_TYPE_MPA_OFFLOAD /* Synchronous completion for MPA offload Request */,
IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
IWARP_EVENT_TYPE_CREATE_QP,
IWARP_EVENT_TYPE_QUERY_QP,
IWARP_EVENT_TYPE_MODIFY_QP,
IWARP_EVENT_TYPE_DESTROY_QP,
IWARP_EVENT_TYPE_ABORT_TCP_OFFLOAD,
MAX_IWARP_EQE_SYNC_OPCODE
};
@ -998,6 +1000,8 @@ enum iwarp_fw_return_code
IWARP_EXCEPTION_DETECTED_LLP_RESET /* LLP has Reset (either because of an RST, or a bad-close condition) - Used for async IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED */,
IWARP_EXCEPTION_DETECTED_IRQ_FULL /* Peer sent more outstanding Read Requests than IRD - Used for async IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED */,
IWARP_EXCEPTION_DETECTED_RQ_EMPTY /* SEND request received with RQ empty - Used for async IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED */,
IWARP_EXCEPTION_DETECTED_SRQ_EMPTY /* SEND request received with SRQ empty - Used for async IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED */,
IWARP_EXCEPTION_DETECTED_SRQ_LIMIT /* Number of SRQ wqes is below the limit */,
IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT /* TCP Retransmissions timed out - Used for async IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED */,
IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR /* Peers Remote Access caused error */,
IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW /* CQ overflow detected */,
@ -1046,22 +1050,25 @@ enum iwarp_modify_qp_new_state_type
*/
struct iwarp_modify_qp_ramrod_data
{
__le16 transition_to_state;
__le16 transition_to_state /* (use enum iwarp_modify_qp_new_state_type) */;
__le16 flags;
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 /* change QP state as per transition_to_state field */
#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 /* If set, the rdma_rd/wr/atomic_en should be updated */
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
__le32 reserved3[3];
__le32 reserved4[8];
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 /* change QP state as per transition_to_state field */
#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 /* If set, the rdma_rd/wr/atomic_en should be updated */
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 /* If set, the physicalQ1Val/physicalQ0Val/regularLatencyPhyQueue should be updated */
#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 5
#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x3FF
#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 6
__le16 physical_q0 /* Updated physicalQ0Val */;
__le16 physical_q1 /* Updated physicalQ1Val */;
__le32 reserved1[10];
};
@ -1103,7 +1110,7 @@ struct iwarp_mpa_offload_ramrod_data
{
struct mpa_outgoing_params common;
__le32 tcp_cid;
u8 mode /* Basic/Enhanced */;
u8 mode /* Basic/Enhanced (use enum mpa_negotiation_mode) */;
u8 tcp_connect_side /* Passive/Active. use enum tcp_connect_mode */;
u8 rtr_pref;
#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7 /* (use enum mpa_rtr_type) */
@ -1114,9 +1121,10 @@ struct iwarp_mpa_offload_ramrod_data
struct mpa_ulp_buffer incoming_ulp_buffer /* host buffer for placing the incoming MPA reply */;
struct regpair async_eqe_output_buf /* host buffer for async tcp/mpa completion information - must have space for at least 8 bytes */;
struct regpair handle_for_async /* a host cookie that will be echoed back with in every qp-specific async EQE */;
struct regpair shared_queue_addr /* Address of shared queue adress that consist of SQ/RQ and FW internal queues (IRQ/ORQ/HQ) */;
struct regpair shared_queue_addr /* Address of shared queue address that consist of SQ/RQ and FW internal queues (IRQ/ORQ/HQ) */;
__le16 rcv_wnd /* TCP window after scaling */;
u8 stats_counter_id /* Statistics counter ID to use */;
u8 reserved3[15];
u8 reserved3[13];
};
@ -1131,7 +1139,7 @@ struct iwarp_offload_params
__le16 physical_q0 /* Physical QM queue to be tied to logical Q0 */;
__le16 physical_q1 /* Physical QM queue to be tied to logical Q1 */;
u8 stats_counter_id /* Statistics counter ID to use */;
u8 mpa_mode /* Basic/Enahnced. Used for a verification for incoming MPA request */;
u8 mpa_mode /* Basic/Enahnced. Used for a verification for incoming MPA request (use enum mpa_negotiation_mode) */;
u8 reserved[10];
};
@ -1165,13 +1173,13 @@ struct iwarp_query_qp_ramrod_data
enum iwarp_ramrod_cmd_id
{
IWARP_RAMROD_CMD_ID_TCP_OFFLOAD=11 /* iWARP TCP connection offload ramrod */,
IWARP_RAMROD_CMD_ID_TCP_ABORT /* Abort TCP connection without changing the QP state. */,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD /* iWARP MPA offload ramrod */,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP,
IWARP_RAMROD_CMD_ID_QUERY_QP,
IWARP_RAMROD_CMD_ID_MODIFY_QP,
IWARP_RAMROD_CMD_ID_DESTROY_QP,
IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD,
MAX_IWARP_RAMROD_CMD_ID
};
@ -1240,8 +1248,10 @@ struct unaligned_opaque_data
u8 flags;
#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1 /* packet reached window right edge */
#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x7F
#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 1
#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1 /* Indication that the connection is closed. Clean all connecitons database. */
#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
__le32 cid;
};

View file

@ -57,8 +57,8 @@ struct e4_ystorm_rdma_task_ag_ctx
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 /* bit2 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 /* bit3 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
@ -114,8 +114,8 @@ struct e4_mstorm_rdma_task_ag_ctx
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 /* bit3 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
@ -606,7 +606,10 @@ struct rdma_init_func_hdr
u8 cq_ring_mode /* 0 for 32 bit cq producer and consumer counters and 1 for 16 bit */;
u8 vf_id /* This field should be assigned to Virtual Function ID if vf_valid == 1. Otherwise its dont care */;
u8 vf_valid;
u8 reserved[3];
u8 relaxed_ordering /* 1 for using relaxed ordering PCI writes */;
__le16 first_reg_srq_id /* The SRQ ID of thr first regular (non XRC) SRQ */;
__le32 reg_srq_base_addr /* Logical base address of first regular (non XRC) SRQ */;
__le32 reserved;
};
@ -645,29 +648,29 @@ enum rdma_ramrod_cmd_id
*/
struct rdma_register_tid_ramrod_data
{
__le32 flags;
#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF
#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0
__le16 flags;
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5
#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24
#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25
#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10
#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29
#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30
#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13
#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
u8 flags1;
#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
@ -685,14 +688,15 @@ struct rdma_register_tid_ramrod_data
u8 vf_id /* This field should be assigned to Virtual Function ID if vf_valid == 1. Otherwise its dont care */;
u8 vf_valid;
__le16 pd;
__le16 reserved2;
__le32 length_lo /* lower 32 bits of the registered MR length. */;
__le32 itid;
__le32 reserved2;
__le32 reserved3;
struct regpair va;
struct regpair pbl_base;
struct regpair dif_error_addr /* DIF TX IO writes error information to this location when memory region is invalidated. */;
struct regpair dif_runt_addr /* DIF RX IO writes runt value to this location when last RDMA Read of the IO has completed. */;
__le32 reserved3[2];
__le32 reserved4[2];
};
@ -727,7 +731,7 @@ struct rdma_resize_cq_ramrod_data
/*
* The rdma storm context of Mstorm
* The rdma SRQ context
*/
struct rdma_srq_context
{
@ -740,13 +744,23 @@ struct rdma_srq_context
*/
struct rdma_srq_create_ramrod_data
{
u8 flags;
#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_MASK 0x1
#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_SHIFT 0
#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 /* Only applicable when xrc_flag is set */
#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 1
#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_MASK 0x3F
#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_SHIFT 2
u8 reserved2;
__le16 xrc_domain /* Only applicable when xrc_flag is set */;
__le32 xrc_srq_cq_cid /* Only applicable when xrc_flag is set */;
struct regpair pbl_base_addr /* SRQ PBL base address */;
__le16 pages_in_srq_pbl /* Number of pages in PBL */;
__le16 pd_id;
struct rdma_srq_id srq_id /* SRQ Index */;
__le16 page_size /* Page size in SGEs(16 bytes) elements. Supports up to 2M bytes page size */;
__le16 reserved1;
__le32 reserved2;
__le16 reserved3;
__le32 reserved4;
struct regpair producers_addr /* SRQ PBL base address */;
};
@ -784,6 +798,15 @@ enum rdma_tid_type
};
/*
* The rdma XRC SRQ context
*/
struct rdma_xrc_srq_context
{
struct regpair temp[9];
};
struct E4XstormRoceConnAgCtxDqExtLdPart
@ -1951,8 +1974,8 @@ struct e5_xstorm_rdma_conn_ag_ctx
#define E5_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
#define E5_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
#define E5_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
#define E5_XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
#define E5_XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5
#define E5_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 /* bit13 */
#define E5_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
#define E5_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 /* bit14 */
#define E5_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
#define E5_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 /* bit15 */

View file

@ -145,8 +145,8 @@ struct roce_create_qp_req_ramrod_data
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_SHIFT 7
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
@ -172,11 +172,11 @@ struct roce_create_qp_req_ramrod_data
__le16 udp_src_port /* Only relevant in RRoCE */;
__le32 src_gid[4] /* BE order. In case of RRoCE on IPv4 the high register will hold the address. Low registers must be zero! */;
__le32 dst_gid[4] /* BE order. In case of RRoCE on IPv4 the high register will hold the address. Low registers must be zero! */;
__le32 cq_cid;
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
u8 stats_counter_id /* Statistics counter ID to use */;
u8 reserved3[7];
__le32 cq_cid;
__le16 regular_latency_phy_queue;
__le16 dpi;
};
@ -187,7 +187,7 @@ struct roce_create_qp_req_ramrod_data
*/
struct roce_create_qp_resp_ramrod_data
{
__le16 flags;
__le32 flags;
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 /* Use roce_flavor enum */
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
@ -206,6 +206,11 @@ struct roce_create_qp_resp_ramrod_data
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x7FFF
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 17
__le16 xrc_domain /* SRC domain. Only applicable when xrc_flag is set */;
u8 max_ird;
u8 traffic_class /* In case of RRoCE on IPv4 will be used as TOS */;
u8 hop_limit /* In case of RRoCE on IPv4 will be used as TTL */;
@ -231,13 +236,32 @@ struct roce_create_qp_resp_ramrod_data
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
__le16 low_latency_phy_queue;
u8 reserved2[6];
u8 reserved2[2];
__le32 cq_cid;
__le16 regular_latency_phy_queue;
__le16 dpi;
};
/*
* roce DCQCN received statistics
*/
struct roce_dcqcn_received_stats
{
struct regpair ecn_pkt_rcv /* The number of total packets with ECN indication received */;
struct regpair cnp_pkt_rcv /* The number of total RoCE packets with CNP opcode received */;
};
/*
* roce DCQCN sent statistics
*/
struct roce_dcqcn_sent_stats
{
struct regpair cnp_pkt_sent /* The number of total RoCE packets with CNP opcode sent */;
};
/*
* RoCE destroy qp requester output params
*/
@ -277,7 +301,7 @@ struct roce_destroy_qp_resp_ramrod_data
/*
* roce func init ramrod data
* roce special events statistics
*/
struct roce_events_stats
{
@ -355,8 +379,10 @@ struct roce_modify_qp_req_ramrod_data
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
u8 fields;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
@ -370,7 +396,9 @@ struct roce_modify_qp_req_ramrod_data
__le32 ack_timeout_val;
__le16 mtu;
__le16 reserved2;
__le32 reserved3[3];
__le32 reserved3[2];
__le16 low_latency_phy_queue;
__le16 regular_latency_phy_queue;
__le32 src_gid[4] /* BE order. In case of IPv4 the higher register will hold the address. Low registers must be zero! */;
__le32 dst_gid[4] /* BE order. In case of IPv4 the higher register will hold the address. Low registers must be zero! */;
};
@ -402,8 +430,10 @@ struct roce_modify_qp_resp_ramrod_data
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
u8 fields;
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
@ -415,7 +445,9 @@ struct roce_modify_qp_resp_ramrod_data
__le16 p_key;
__le32 flow_label;
__le16 mtu;
__le16 reserved2;
__le16 low_latency_phy_queue;
__le16 regular_latency_phy_queue;
u8 reserved2[6];
__le32 src_gid[4] /* BE order. In case of IPv4 the higher register will hold the address. Low registers must be zero! */;
__le32 dst_gid[4] /* BE order. In case of IPv4 the higher register will hold the address. Low registers must be zero! */;
};
@ -665,7 +697,7 @@ struct e4_tstorm_roce_req_conn_ag_ctx
u8 byte4 /* byte4 */;
u8 byte5 /* byte5 */;
__le16 snd_sq_cons /* word1 */;
__le16 word2 /* conn_dpi */;
__le16 conn_dpi /* conn_dpi */;
__le16 word3 /* word3 */;
__le32 reg9 /* reg9 */;
__le32 reg10 /* reg10 */;
@ -1314,10 +1346,10 @@ struct e4_xstorm_roce_resp_conn_ag_ctx
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 /* rule11en */
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 /* rule10en */
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
@ -1364,12 +1396,12 @@ struct e4_xstorm_roce_resp_conn_ag_ctx
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2 /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
__le16 word1 /* physical_q1 */;
__le16 irq_prod /* physical_q2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le16 irq_prod_shadow /* physical_q1 */;
__le16 word2 /* physical_q2 */;
__le16 irq_cons /* word3 */;
__le16 irq_prod /* word4 */;
__le16 e5_reserved1 /* word5 */;
__le16 irq_cons /* conn_dpi */;
__le16 conn_dpi /* conn_dpi */;
u8 rxmit_opcode /* byte3 */;
u8 byte4 /* byte4 */;
u8 byte5 /* byte5 */;
@ -1948,100 +1980,100 @@ struct e5_tstorm_roce_resp_conn_ag_ctx
u8 byte0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 /* bit4 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 /* exist_in_qm1 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 /* bit4 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 /* timer1cf */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 /* timer2cf */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 /* cf4 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 /* timer1cf */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 /* timer2cf */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 /* cf4 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 /* cf5 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 /* cf5 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 /* cf1en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 /* cf2en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 /* cf1en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 /* cf2en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 /* cf4en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 /* cf5en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 /* cf4en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 /* cf5en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 /* rule6en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 /* rule6en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags6;
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit6 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit7 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit8 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf11 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf11en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule9en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule10en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit6 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit7 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit8 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf11 */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf11en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule9en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule10en */
#define E5_TSTORM_ROCE_RESP_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 tx_async_error_type /* byte2 */;
__le16 rq_cons /* word0 */;
__le32 psn_and_rxmit_id_echo /* reg0 */;

View file

@ -40,6 +40,13 @@ __FBSDID("$FreeBSD$");
#include "ecore_utils.h"
#include "ecore_iov_api.h"
#ifdef _NTDDK_
#pragma warning(push)
#pragma warning(disable : 28167)
#pragma warning(disable : 28123)
#pragma warning(disable : 28121)
#endif
#ifndef ASIC_ONLY
#define ECORE_EMUL_FACTOR 2000
#define ECORE_FPGA_FACTOR 200
@ -63,6 +70,12 @@ struct ecore_ptt_pool {
struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
};
static void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
p_hwfn->p_ptt_pool = OSAL_NULL;
}
enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
@ -90,10 +103,12 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->p_ptt_pool = p_pool;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
__ecore_ptt_pool_free(p_hwfn);
return ECORE_NOMEM;
}
#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
return ECORE_SUCCESS;
}
@ -114,11 +129,10 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
p_hwfn->p_ptt_pool = OSAL_NULL;
__ecore_ptt_pool_free(p_hwfn);
}
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *p_ptt;
unsigned int i;
@ -159,7 +173,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
}
u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
@ -226,8 +240,8 @@ static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
return ecore_ptt_get_bar_addr(p_ptt) + offset;
}
struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
enum reserved_ptts ptt_idx)
struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
enum reserved_ptts ptt_idx)
{
if (ptt_idx >= RESERVED_PTT_MAX) {
DP_NOTICE(p_hwfn, true,
@ -538,11 +552,16 @@ enum _ecore_status_t ecore_hw_unlock(struct ecore_hwfn *p_hwfn,
#endif /* HW locks logic */
/* DMAE */
#define ECORE_DMAE_FLAGS_IS_SET(params, flag) \
((params) != OSAL_NULL && ((params)->flags & ECORE_DMAE_FLAG_##flag))
static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct ecore_dmae_params *p_params)
{
u8 src_pfid, dst_pfid, port_id;
u16 opcode_b = 0;
u32 opcode = 0;
@ -553,17 +572,21 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) <<
DMAE_CMD_SRC_SHIFT;
opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
src_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
p_params->src_pfid : p_hwfn->rel_pf_id;
opcode |= (src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
DMAE_CMD_SRC_PF_ID_SHIFT;
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) <<
DMAE_CMD_DST_SHIFT;
opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
dst_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
p_params->dst_pfid : p_hwfn->rel_pf_id;
opcode |= (dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
DMAE_CMD_DST_PF_ID_SHIFT;
/* DMAE_E4_TODO need to check which value to specifiy here. */
/* DMAE_E4_TODO need to check which value to specify here. */
/* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT;*/
/* Whether to write a completion word to the completion destination:
@ -574,7 +597,7 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT;
if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
if (ECORE_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
/* swapping mode 3 - big endian there should be a define ifdefed in
@ -582,7 +605,9 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
*/
opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
port_id = (ECORE_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
p_params->port_id : p_hwfn->port_id;
opcode |= port_id << DMAE_CMD_PORT_ID_SHIFT;
/* reset source address in next go */
opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
@ -593,14 +618,14 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
DMAE_CMD_DST_ADDR_RESET_SHIFT;
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
} else {
opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT);
}
if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
@ -690,7 +715,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
if (*p_comp == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `p_completion_word'\n");
goto err;
}
@ -699,7 +724,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(struct dmae_cmd));
if (*p_cmd == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct dmae_cmd'\n");
goto err;
}
@ -708,12 +733,13 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(u32) * DMAE_MAX_RW_SIZE);
if (*p_buff == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `intermediate_buffer'\n");
goto err;
}
p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
p_hwfn->dmae_info.b_mem_ready = true;
return ECORE_SUCCESS;
err:
@ -725,8 +751,9 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
{
dma_addr_t p_phys;
/* Just make sure no one is in the middle */
OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
p_hwfn->dmae_info.b_mem_ready = false;
OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
@ -751,8 +778,6 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
}
OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
}
static enum _ecore_status_t
@ -844,17 +869,20 @@ static enum _ecore_status_t ecore_dmae_execute_sub_operation(struct ecore_hwfn *
}
cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
#ifndef __EXTRACT__LINUX__
if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
OSAL_DMA_SYNC(p_hwfn->p_dev,
(void *)HILO_U64(cmd->src_addr_hi,
cmd->src_addr_lo),
length_dw * sizeof(u32), false);
#endif
ecore_dmae_post_command(p_hwfn, p_ptt);
ecore_status = ecore_dmae_operation_wait(p_hwfn);
#ifndef __EXTRACT__LINUX__
/* TODO - is it true ? */
if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
@ -862,12 +890,12 @@ static enum _ecore_status_t ecore_dmae_execute_sub_operation(struct ecore_hwfn *
(void *)HILO_U64(cmd->src_addr_hi,
cmd->src_addr_lo),
length_dw * sizeof(u32), true);
#endif
if (ecore_status != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, ECORE_MSG_HW,
"Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x, intermediate buffer 0x%llx.\n",
(unsigned long long)src_addr,
(unsigned long long)dst_addr, length_dw,
(unsigned long long)src_addr, (unsigned long long)dst_addr, length_dw,
(unsigned long long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
return ecore_status;
}
@ -895,6 +923,14 @@ static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 offset = 0;
if (!p_hwfn->dmae_info.b_mem_ready) {
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"No buffers allocated. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
(unsigned long long)src_addr, src_type, (unsigned long long)dst_addr, dst_type,
size_in_dwords);
return ECORE_NOMEM;
}
if (p_hwfn->p_dev->recov_in_prog) {
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
@ -906,6 +942,13 @@ static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn
return ECORE_SUCCESS;
}
if (!cmd) {
DP_NOTICE(p_hwfn, true,
"ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
(unsigned long long)src_addr, (unsigned long long)dst_addr, length_cur);
return ECORE_INVAL;
}
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
(dst_type == ECORE_DMAE_ADDRESS_GRC),
@ -925,7 +968,7 @@ static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn
for (i = 0; i <= cnt_split; i++) {
offset = length_limit * i;
if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
if (!ECORE_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
if (src_type == ECORE_DMAE_ADDRESS_GRC)
src_addr_split = src_addr + offset;
else
@ -968,24 +1011,20 @@ enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
u32 flags)
struct ecore_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct ecore_dmae_params params;
enum _ecore_status_t rc;
OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
params.flags = flags;
OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
grc_addr_in_dw,
ECORE_DMAE_ADDRESS_HOST_VIRT,
ECORE_DMAE_ADDRESS_GRC,
size_in_dwords, &params);
size_in_dwords, p_params);
OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
@ -995,23 +1034,19 @@ enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
u32 grc_addr,
dma_addr_t dest_addr,
u32 size_in_dwords,
u32 flags)
struct ecore_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct ecore_dmae_params params;
enum _ecore_status_t rc;
OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
params.flags = flags;
OSAL_MUTEX_ACQUIRE(&(p_hwfn->dmae_info.mutex));
OSAL_SPIN_LOCK(&(p_hwfn->dmae_info.lock));
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, ECORE_DMAE_ADDRESS_GRC,
ECORE_DMAE_ADDRESS_HOST_VIRT,
size_in_dwords, &params);
size_in_dwords, p_params);
OSAL_MUTEX_RELEASE(&(p_hwfn->dmae_info.mutex));
OSAL_SPIN_UNLOCK(&(p_hwfn->dmae_info.lock));
return rc;
}
@ -1025,7 +1060,7 @@ enum _ecore_status_t ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
{
enum _ecore_status_t rc;
OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
dest_addr,
@ -1034,7 +1069,7 @@ enum _ecore_status_t ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
size_in_dwords,
p_params);
OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
@ -1052,3 +1087,105 @@ void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
}
enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
const char *phase)
{
u32 size = OSAL_PAGE_SIZE / 2, val;
enum _ecore_status_t rc = ECORE_SUCCESS;
dma_addr_t p_phys;
void *p_virt;
u32 *p_tmp;
p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
if (!p_virt) {
DP_NOTICE(p_hwfn, false,
"DMAE sanity [%s]: failed to allocate memory\n",
phase);
return ECORE_NOMEM;
}
/* Fill the bottom half of the allocated memory with a known pattern */
for (p_tmp = (u32 *)p_virt;
p_tmp < (u32 *)((u8 *)p_virt + size);
p_tmp++) {
/* Save the address itself as the value */
val = (u32)(osal_uintptr_t)p_tmp;
*p_tmp = val;
}
/* Zero the top half of the allocated memory */
OSAL_MEM_ZERO((u8 *)p_virt + size, size);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
phase, (unsigned long long)p_phys, p_virt,
(unsigned long long)(p_phys + size), (u8 *)p_virt + size,
size);
rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
size / 4 /* size_in_dwords */,
OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
phase, rc);
goto out;
}
/* Verify that the top half of the allocated memory has the pattern */
for (p_tmp = (u32 *)((u8 *)p_virt + size);
p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
p_tmp++) {
/* The corresponding address in the bottom half */
val = (u32)(osal_uintptr_t)p_tmp - size;
if (*p_tmp != val) {
DP_NOTICE(p_hwfn, false,
"DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
phase,
(unsigned long long)(p_phys + (u32)((u8 *)p_tmp - (u8 *)p_virt)),
p_tmp, *p_tmp, val);
rc = ECORE_UNKNOWN_ERROR;
goto out;
}
}
out:
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
return rc;
}
void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 abs_ppfid, u32 hw_addr, u32 val)
{
u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
ecore_fid_pretend(p_hwfn, p_ptt,
pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
ecore_wr(p_hwfn, p_ptt, hw_addr, val);
ecore_fid_pretend(p_hwfn, p_ptt,
p_hwfn->rel_pf_id <<
PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
}
u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 abs_ppfid, u32 hw_addr)
{
u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
u32 val;
ecore_fid_pretend(p_hwfn, p_ptt,
pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
val = ecore_rd(p_hwfn, p_ptt, hw_addr);
ecore_fid_pretend(p_hwfn, p_ptt,
p_hwfn->rel_pf_id <<
PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
return val;
}
#ifdef _NTDDK_
#pragma warning(pop)
#endif

View file

@ -112,15 +112,6 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
*/
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
*
* @param p_ptt
*
* @return u32
*/
u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
*
@ -158,8 +149,8 @@ struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
* @param val
* @param hw_addr
* @param val
*/
void ecore_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@ -171,7 +162,6 @@ void ecore_wr(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
* @param val
* @param hw_addr
*/
u32 ecore_rd(struct ecore_hwfn *p_hwfn,
@ -277,4 +267,33 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type);
enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
const char *phase);
/**
* @brief ecore_ppfid_wr - Write value to BAR using the given ptt while
* pretending to a PF to which the given PPFID pertains.
*
* @param p_hwfn
* @param p_ptt
* @param abs_ppfid
* @param hw_addr
* @param val
*/
void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 abs_ppfid, u32 hw_addr, u32 val);
/**
* @brief ecore_ppfid_rd - Read value from BAR using the given ptt while
* pretending to a PF to which the given PPFID pertains.
*
* @param p_hwfn
* @param p_ptt
* @param abs_ppfid
* @param hw_addr
*/
u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 abs_ppfid, u32 hw_addr);
#endif /* __ECORE_HW_H__ */

View file

@ -84,16 +84,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
/* 0.7 * upper bound (62500000) */
#define QM_WFQ_MAX_INC_VAL 43750000
/* Max WFQ increment value is 0.7 * upper bound */
#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
/* Number of VOQs in E5 QmWfqCrd register */
#define QM_WFQ_CRD_E5_NUM_VOQS 16
#define QM_WFQ_CRD_E5_NUM_VOQS 16
/* RL constants: */
/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_RL_UPPER_BOUND 62500000
/* Period in us */
#define QM_RL_PERIOD 5
@ -101,17 +98,29 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* 0.7 * upper bound (62500000) */
#define QM_RL_MAX_INC_VAL 43750000
/* RL increment value - rate is specified in mbps. the factor of 1.01 was
* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
* 2544 test. In this scenario the PF RL was reducing the line rate to 99%
* although the credit increment value was the correct one and FW calculated
* correct packet sizes. The reason for the inaccuracy of the RL is unknown at
* this point.
*/
#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * QM_RL_PERIOD * 101) / (8 * 100)), 1)
* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
* 2544 test. In this scenario the PF RL was reducing the line rate to 99%
* although the credit increment value was the correct one and FW calculated
* correct packet sizes. The reason for the inaccuracy of the RL is unknown at
* this point.
*/
#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / (8 * 100)), 1)
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_PF_RL_UPPER_BOUND 62500000
/* Max PF RL increment value is 0.7 * upper bound */
#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
/* Vport RL Upper bound, link speed is in Mpbs */
#define QM_VP_RL_UPPER_BOUND(speed) ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
/* Max Vport RL increment value is the Vport RL upper bound */
#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
/* Vport RL credit threshold in case of QM bypass */
#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
@ -163,11 +172,11 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
#define QM_INIT_TX_PQ_MAP(map, chip, pq_id, rl_valid, vp_pq_id, rl_id, ext_voq, wrr) OSAL_MEMSET(&map, 0, sizeof(map)); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&map))
#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, ext_voq, wrr) OSAL_MEMSET(&map, 0, sizeof(map)); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&map))
#define WRITE_PQ_INFO_TO_RAM 1
#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4
/******************** INTERNAL IMPLEMENTATION *********************/
@ -205,7 +214,7 @@ static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn,
/* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_RL_UPPER_BOUND);
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_PF_RL_UPPER_BOUND);
}
}
@ -233,7 +242,7 @@ static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn,
/* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_RL_UPPER_BOUND);
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_VP_RL_BYPASS_THRESH_SPEED);
}
}
@ -374,24 +383,25 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u32 base_mem_addr_4kb,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u32 base_mem_addr_4kb,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
/* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
num_pqs = num_pf_pqs + num_vf_pqs;
@ -443,17 +453,22 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare PQ map entry */
if (ECORE_IS_E5(p_hwfn->p_dev)) {
struct qm_rf_pq_map_e5 tx_pq_map;
QM_INIT_TX_PQ_MAP(tx_pq_map, E5, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E5, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
}
else {
struct qm_rf_pq_map_e4 tx_pq_map;
QM_INIT_TX_PQ_MAP(tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
}
/* Set base address */
/* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb);
/* Clear PQ pointer table entry (64 bit) */
if (is_pf_loading)
for (j = 0; j < 2; j++)
STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + (pq_id * 2) + j, 0);
/* Write PQ info to RAM */
if (WRITE_PQ_INFO_TO_RAM != 0)
{
u32 pq_info = 0;
@ -479,13 +494,14 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 pf_id,
u32 num_pf_cids,
u32 num_tids,
u32 base_mem_addr_4kb)
u8 pf_id,
bool is_pf_loading,
u32 num_pf_cids,
u32 num_tids,
u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
u16 i, pq_id, pq_group;
u16 i, j, pq_id, pq_group;
/* A single other PQ group is used in each PF, where PQ group i is used
* in PF i.
@ -501,9 +517,15 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size));
/* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
/* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb);
/* Clear PQ pointer table entry */
if (is_pf_loading)
for (j = 0; j < 2; j++)
STORE_RT_REG(p_hwfn, QM_REG_PTRTBLOTHER_RT_OFFSET + (pq_id * 2) + j, 0);
mem_addr_4kb += pq_mem_4kb;
}
}
@ -535,10 +557,11 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
(ext_voq < QM_WFQ_CRD_E5_NUM_VOQS ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (ext_voq % QM_WFQ_CRD_E5_NUM_VOQS) * MAX_NUM_PFS_E5 + pf_id :
(pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + ext_voq * MAX_NUM_PFS_BB + (pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
}
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
@ -552,13 +575,13 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn,
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
return 0;
@ -605,6 +628,7 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
u32 link_speed,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
@ -617,14 +641,14 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
/* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ? vport_params[i].vport_rl : link_speed);
if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, QM_VP_RL_UPPER_BOUND(link_speed) | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val);
}
@ -672,10 +696,10 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
/******************** INTERFACE IMPLEMENTATION *********************/
u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs)
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@ -726,22 +750,24 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
}
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
u32 link_speed,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
u32 other_mem_size_4kb;
u8 tc, i;
@ -755,12 +781,12 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, num_tids, 0);
#endif
/* Map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params);
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, is_pf_loading, num_pf_cids, num_vf_cids,
start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params);
/* Init PF WFQ */
if (pf_wfq)
@ -776,7 +802,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
return -1;
/* Set VPORT RL */
if (ecore_vport_rl_rt_init(p_hwfn, start_vport, num_vports, vport_params))
if (ecore_vport_rl_rt_init(p_hwfn, start_vport, num_vports, link_speed, vport_params))
return -1;
return 0;
@ -808,7 +834,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
return -1;
}
@ -847,7 +873,8 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vport_id,
u32 vport_rl)
u32 vport_rl,
u32 link_speed)
{
u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
@ -856,8 +883,8 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
return -1;
}
inc_val = QM_RL_INC_VAL(vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n");
return -1;
}
@ -1239,19 +1266,6 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
#endif /* UNUSED_HSI_FUNC */
#ifndef UNUSED_HSI_FUNC
/* In MF, should be called once per engine to set EtherType of OuterTag */
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
{
/* Update PRS register */
STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
/* Update NIG register */
STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
/* Update PBF register */
STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
}
/* In MF, should be called once per port to set EtherType of OuterTag */
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
{
@ -1263,7 +1277,8 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
#define SET_TUNNEL_TYPE_ENABLE_BIT(var,offset,enable) var = ((var) & ~(1 << (offset))) | ( (enable) ? (1 << (offset)) : 0)
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
#define PRS_ETH_OUTPUT_FORMAT -46832
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@ -1289,8 +1304,16 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) /* TODO: handle E5 init */
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
if (reg_val) /* TODO: handle E5 init */
{
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
{
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
}
/* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
@ -1313,8 +1336,16 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) /* TODO: handle E5 init */
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
if (reg_val) /* TODO: handle E5 init */
{
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
{
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
}
/* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
@ -1354,8 +1385,16 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, eth_geneve_enable);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) /* TODO: handle E5 init */
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
if (reg_val) /* TODO: handle E5 init */
{
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
{
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
}
/* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0);
@ -1370,6 +1409,39 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0);
}
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool enable)
{
u32 reg_val, cfg_mask;
/* read PRS config register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
/* set VXLAN_NO_L2_ENABLE mask */
cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
if (enable)
{
/* set VXLAN_NO_L2_ENABLE flag */
reg_val |= cfg_mask;
/* update PRS FIC register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
}
else
{
/* clear VXLAN_NO_L2_ENABLE flag */
reg_val &= ~cfg_mask;
}
/* write PRS config register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
}
#ifndef UNUSED_HSI_FUNC
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
@ -1381,32 +1453,21 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
#define REG_SIZE sizeof(u32)
void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id)
void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id)
{
union gft_cam_line_union cam_line;
struct gft_ram_line ram_line;
u32 i, *ram_line_ptr;
/* disable gft search for PF */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
ram_line_ptr = (u32*)&ram_line;
/* Clean ram & cam for next gft session*/
/* Stop using gft logic, disable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
/* Zero camline */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, 0);
/* Clean ram & cam for next rfs/gft session*/
/* Zero camline */
OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line.cam_line_mapped.camline);
/* Zero ramline */
OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
/* Each iteration write to reg */
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + i*REG_SIZE, *(ram_line_ptr + i));
/* Zero ramline */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id, 0);
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, 0);
}
@ -1422,89 +1483,110 @@ void ecore_set_gft_event_id_cm_hdr (struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
}
void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
void ecore_gft_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
bool tcp,
bool udp,
bool ipv4,
bool ipv6)
bool ipv6,
enum gft_profile_type profile_type)
{
u32 rfs_cm_hdr_event_id, *ram_line_ptr;
union gft_cam_line_union cam_line;
struct gft_ram_line ram_line;
int i;
u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
ram_line_ptr = (u32*)&ram_line;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
if (!tcp && !udp)
DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
if (profile_type >= MAX_GFT_PROFILE_TYPE)
DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true, "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6\n");
if (!tcp && !udp)
DP_NOTICE(p_hwfn, true, "set_rfs_mode_enable: must accept at least on of - udp or tcp\n");
/* Set RFS event ID to be awakened i Tstorm By Prs */
reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
/* Set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
/* Configure Registers for RFS mode */
/* Do not load context only cid in PRS on match. */
ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
/* Enable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
/* Do not use tenant ID exist bit for gft search*/
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
/* Do not load context only cid in PRS on match. */
ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
/* Cam line is now valid!! */
cam_line.cam_line_mapped.camline = 0;
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_VALID, 1);
/* Set Cam */
cam_line = 0;
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
/* Filters are per PF!! */
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
/* Filters are per PF!! */
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
if (!(tcp && udp)) {
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL);
else
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL);
}
if (!(tcp && udp)) {
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL);
else
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL);
}
if (!(ipv4 && ipv6)) {
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
if (ipv4)
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4);
else
SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6);
}
if (!(ipv4 && ipv6)) {
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
if (ipv4)
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4);
else
SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6);
}
/* Write characteristics to cam */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line.cam_line_mapped.camline);
cam_line.cam_line_mapped.camline = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id);
/* Write characteristics to cam */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line);
cam_line = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id);
/* Write line to RAM - compare to filter 4 tuple */
ram_line.lo = 0;
ram_line.hi= 0;
SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
/* Write line to RAM - compare to filter 4 tuple */
ram_line_lo = 0;
ram_line_hi = 0;
/* Tunnel type */
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
/* Each iteration write to reg */
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + i*REG_SIZE, *(ram_line_ptr + i));
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE)
{
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
}
else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT)
{
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
}
else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR)
{
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
}
else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR)
{
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
}
else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE)
{
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
}
/* Set default profile so that no filter match will happen */
ram_line.lo = 0xffffffff;
ram_line.hi = 0x3ff;
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id, ram_line_lo);
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, ram_line_hi);
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH + i*REG_SIZE, *(ram_line_ptr + i));
/* Set default profile so that no filter match will happen */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
/* Enable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
@ -1563,7 +1645,9 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
return offset;
}
#ifndef LINUX_REMOVE
#define CRC8_INIT_VALUE 0xFF
#endif
static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
/* Calculate and return CDU validation byte per connection type/region/cid */
@ -1621,8 +1705,7 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
}
/* Calcualte and set validation bytes for session context */
void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
u8 ctx_type, u32 cid)
void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
@ -1639,8 +1722,7 @@ void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
}
/* Calcualte and set validation bytes for task context */
void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
u32 tid)
void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
@ -1707,3 +1789,59 @@ void ecore_enable_context_validation(struct ecore_hwfn * p_hwfn, struct ecore_pt
ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
#define RSS_IND_TABLE_BASE_ADDR 4112
#define RSS_IND_TABLE_VPORT_SIZE 16
#define RSS_IND_TABLE_ENTRY_PER_LINE 8
/* Update RSS indirection table entry. */
void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn * p_hwfn,
struct ecore_ptt *p_ptt,
u8 rss_id,
u8 ind_table_index,
u16 ind_table_value)
{
u32 cnt, rss_addr;
u32 * reg_val;
u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
u16 rss_ind_mask [RSS_IND_TABLE_ENTRY_PER_LINE];
/* get entry address */
rss_addr = RSS_IND_TABLE_BASE_ADDR +
RSS_IND_TABLE_VPORT_SIZE * rss_id +
ind_table_index/RSS_IND_TABLE_ENTRY_PER_LINE;
/* prepare update command */
ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt ++)
{
if (cnt == ind_table_index)
{
rss_ind_entry[cnt] = ind_table_value;
rss_ind_mask[cnt] = 0xFFFF;
}
else
{
rss_ind_entry[cnt] = 0;
rss_ind_mask[cnt] = 0;
}
}
/* Update entry in HW*/
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
reg_val = (u32*)rss_ind_mask;
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
reg_val = (u32*)rss_ind_entry;
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
}

View file

@ -49,10 +49,10 @@ struct init_qm_pq_params;
* @return The required host memory size in 4KB units.
*/
u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs);
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs);
/**
* @brief ecore_qm_common_rt_init - Prepare QM runtime init values for the
@ -87,6 +87,8 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @param port_id - port ID
* @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
* @param is_pf_loading - indicates if the PF is currently loading,
* i.e. it has no allocated QM resources.
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@ -102,6 +104,7 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @param pf_rl - rate limit in Mb/sec units. a value of 0
* means don't configure. ignored if PF RL is
* globally disabled.
* @param link_speed - link speed in Mbps.
* @param pq_params - array of size (num_pf_pqs + num_vf_pqs) with
* parameters for each Tx PQ associated with the
* specified PF.
@ -111,22 +114,24 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @return 0 on success, -1 on error.
*/
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
u32 link_speed,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
/**
* @brief ecore_init_pf_wfq - Initializes the WFQ weight of the specified PF
@ -179,17 +184,19 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
* @brief ecore_init_vport_rl - Initializes the rate limit of the specified
* VPORT.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
* @param link_speed - link speed in Mbps.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vport_id,
u32 vport_rl);
u32 vport_rl,
u32 link_speed);
/**
* @brief ecore_send_qm_stop_cmd - Sends a stop command to the QM
@ -292,16 +299,6 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
#endif /* UNUSED_HSI_FUNC */
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_set_engine_mf_ovlan_eth_type - Initializes Nig,Prs,Pbf and llh
* ethType Regs to input ethType. Should Be called once per engine if engine
* is in BD mode.
*
* @param p_hwfn - HW device data
* @param ethType - etherType to configure
*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType);
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType. should Be called once per port.
@ -309,7 +306,8 @@ void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType);
* @param p_hwfn - HW device data
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType);
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
u32 ethType);
#endif /* UNUSED_HSI_FUNC */
@ -374,6 +372,16 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
bool eth_geneve_enable,
bool ip_geneve_enable);
/**
* @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing
*
* @param p_ptt - ptt window used for writing the registers.
* @param enable - VXLAN no L2 enable flag.
*/
void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool enable);
#ifndef UNUSED_HSI_FUNC
/**
@ -386,34 +394,36 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
* @brief ecore_gft_disable - Disable and GFT
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to disable RFS.
* @param pf_id - pf on which to disable GFT.
*/
void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id);
void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id);
/**
* @brief ecore_set_rfs_mode_enable - Enable and configure HW for RFS
* @brief ecore_gft_config - Enable and configure HW for GFT
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to enable RFS.
* @param pf_id - pf on which to enable GFT.
* @param tcp - set profile tcp packets.
* @param udp - set profile udp packet.
* @param ipv4 - set profile ipv4 packet.
* @param ipv6 - set profile ipv6 packet.
* @param profile_type - define packet same fields. Use enum gft_profile_type.
*/
void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
void ecore_gft_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
bool tcp,
bool udp,
bool ipv4,
bool ipv6);
bool ipv6,
enum gft_profile_type profile_type);
#endif /* UNUSED_HSI_FUNC */
@ -479,8 +489,10 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
* @param ctx_type - context type.
* @param cid - context cid.
*/
void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
u8 ctx_type, u32 cid);
void ecore_calc_session_ctx_validation(void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 cid);
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
@ -491,7 +503,9 @@ void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
* @param ctx_type - context type.
* @param tid - context tid.
*/
void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
void ecore_calc_task_ctx_validation(void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 tid);
/**
@ -519,4 +533,20 @@ void ecore_memset_task_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
/**
* @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table entry.
* The function must run in exclusive mode to prevent wrong RSS configuration.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param rss_id - RSS engine ID.
* @param ind_table_index - RSS indirect table index.
* @param ind_table_value - RSS indirect table new value.
*/
void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn * p_hwfn,
struct ecore_ptt *p_ptt,
u8 rss_id,
u8 ind_table_index,
u16 ind_table_value);
#endif

View file

@ -141,7 +141,8 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
addr + (i << 2), segment,
OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS)
return rc;
@ -204,10 +205,11 @@ static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
for (i = 0; i < size; i++)
ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
} else {
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_buf +
dmae_data_offset),
addr, size, 0);
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_buf +
dmae_data_offset),
addr, size,
OSAL_NULL /* default parameters */);
}
return rc;
@ -218,13 +220,15 @@ static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
struct ecore_dmae_params params;
OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
OSAL_MEMSET(&params, 0, sizeof(params));
params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC;
return ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(&(zero_buffer[0])),
addr, fill_count,
ECORE_DMAE_FLAG_RW_REPL_SRC);
addr, fill_count, &params);
}
static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
@ -357,10 +361,10 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
ecore_init_rt(p_hwfn, p_ptt, addr,
OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
b_must_dmae);
rc = ecore_init_rt(p_hwfn, p_ptt, addr,
OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
b_must_dmae);
break;
}
@ -431,12 +435,32 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
}
if (i == ECORE_INIT_MAX_POLL_COUNT)
DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
addr,
OSAL_LE32_TO_CPU(cmd->expected_val), val,
OSAL_LE32_TO_CPU(cmd->op_data));
}
/* init_ops callbacks entry point */
static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_callback_op *p_cmd)
{
enum _ecore_status_t rc;
switch (p_cmd->callback_id) {
case DMAE_READY_CB:
rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
break;
default:
DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
p_cmd->callback_id);
return ECORE_INVAL;
}
return rc;
}
static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
u16 *p_offset, int modes)
{
@ -479,12 +503,12 @@ static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
INIT_IF_PHASE_OP_CMD_OFFSET);
return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
else
return 0;
}
@ -545,8 +569,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
break;
case INIT_OP_CALLBACK:
DP_NOTICE(p_hwfn, true,
"Currently init values have no need of callbacks\n");
rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}

File diff suppressed because it is too large Load diff

View file

@ -310,98 +310,103 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 tmp;
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS2);
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
u32 addr_lo, addr_hi, details;
addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
DP_INFO(p_hwfn, "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
addr_hi, addr_lo, details,
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
tmp,
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
DP_NOTICE(p_hwfn, false,
"Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
addr_hi, addr_lo, details,
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
tmp,
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
}
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PGLUE_B_REG_TX_ERR_RD_DETAILS2);
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
u32 addr_lo, addr_hi, details;
addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_DETAILS);
DP_INFO(p_hwfn, "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
addr_hi, addr_lo, details,
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
tmp,
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
DP_NOTICE(p_hwfn, false,
"Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
addr_hi, addr_lo, details,
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
tmp,
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
}
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
DP_NOTICE(p_hwfn, false, "ICPL eror - %08x\n", tmp);
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
u32 addr_hi, addr_lo;
addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
DP_INFO(p_hwfn, "ICPL eror - %08x [Address %08x:%08x]\n",
tmp, addr_hi, addr_lo);
DP_NOTICE(p_hwfn, false,
"ICPL eror - %08x [Address %08x:%08x]\n",
tmp, addr_hi, addr_lo);
}
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
u32 addr_hi, addr_lo, details;
addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_DETAILS);
DP_INFO(p_hwfn, "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
details, tmp, addr_hi, addr_lo);
DP_NOTICE(p_hwfn, false,
"ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
details, tmp, addr_hi, addr_lo);
}
/* Clear the indications */
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
{
return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
}
static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
{
DP_NOTICE(p_hwfn, false, "FW assertion!\n");
@ -481,6 +486,14 @@ static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
int_sts);
/* int_sts may be zero since all PFs were interrupted for doorbell
* overflow but another one already handled it. Can abort here. If
* This PF also requires overflow recovery we will be interrupted again.
* The masked almost full indication may also be set. Ignoring.
*/
if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
return ECORE_SUCCESS;
/* check if db_drop or overflow happened */
if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@ -598,7 +611,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] =
{ /* After Invert 2 */
{"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
{"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
{"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
@ -962,18 +975,16 @@ static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
"%s parity attention is set [address 0x%08x, bit %d]\n",
p_aeu->bit_name, aeu_en_reg, bit_index);
if (block_id == MAX_BLOCK_ID)
return;
if (block_id != MAX_BLOCK_ID) {
ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
ecore_int_attn_print(p_hwfn, block_id,
ATTN_TYPE_PARITY, false);
/* In A0, there's a single parity bit for several blocks */
if (block_id == BLOCK_BTB) {
ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
ATTN_TYPE_PARITY, false);
ecore_int_attn_print(p_hwfn, BLOCK_MCP,
ATTN_TYPE_PARITY, false);
/* In A0, there's a single parity bit for several blocks */
if (block_id == BLOCK_BTB) {
ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
ATTN_TYPE_PARITY, false);
ecore_int_attn_print(p_hwfn, BLOCK_MCP,
ATTN_TYPE_PARITY, false);
}
}
/* Prevent this parity error from being re-asserted */
@ -1404,7 +1415,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
/* SB struct */
p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_dev, true, "Failed to allocate `struct ecore_sb_attn_info'\n");
DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n");
return ECORE_NOMEM;
}
@ -1412,7 +1423,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
SB_ATTN_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
DP_NOTICE(p_dev, true, "Failed to allocate status block (attentions)\n");
DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n");
OSAL_FREE(p_dev, p_sb);
return ECORE_NOMEM;
}
@ -1534,10 +1545,12 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr,
CAU_REG_SB_ADDR_MEMORY +
igu_sb_id * sizeof(u64), 2, 0);
igu_sb_id * sizeof(u64), 2,
OSAL_NULL /* default parameters */);
ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
igu_sb_id * sizeof(u64), 2, 0);
igu_sb_id * sizeof(u64), 2,
OSAL_NULL /* default parameters */);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
@ -1792,7 +1805,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
/* SB struct */
p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_sb_info'\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
return ECORE_NOMEM;
}
@ -1801,7 +1814,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
&p_phys,
SB_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
OSAL_FREE(p_hwfn->p_dev, p_sb);
return ECORE_NOMEM;
}
@ -1925,15 +1938,6 @@ ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 tmp;
/* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
* attentions. Since we're waiting for BRCM answer regarding this
* attention, in the meanwhile we simply mask it.
*/
tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
tmp &= ~0x800;
ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
ecore_int_igu_enable_attn(p_hwfn, p_ptt);
@ -2157,14 +2161,13 @@ int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
p_info->usage.cnt, vfs);
return ECORE_INVAL;
}
/* Currently cap the number of VFs SBs by the
* number of VFs.
*/
p_info->usage.iov_cnt = vfs;
}
}
/* Cap the number of VFs SBs by the number of VFs */
if (IS_PF_SRIOV(p_hwfn))
p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
/* Mark all SBs as free, now in the right PF/VFs division */
p_info->usage.free_cnt = p_info->usage.cnt;
p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
@ -2441,6 +2444,12 @@ ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_INVAL;
}
if (p_block == OSAL_NULL) {
DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
"SB address (p_block) is NULL\n");
return ECORE_INVAL;
}
/* At this point, p_block points to the SB we want to relocate */
if (b_to_vf) {
p_block->status &= ~ECORE_IGU_STATUS_PF;
@ -2637,7 +2646,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof(u64),
(u64)(osal_uintptr_t)&sb_entry, 2, 0);
(u64)(osal_uintptr_t)&sb_entry, 2,
OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@ -2650,8 +2660,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof(u64), 2, 0);
CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2,
OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
return rc;

View file

@ -286,5 +286,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
#endif
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#endif /* __ECORE_INT_H__ */

View file

@ -119,18 +119,28 @@ static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
{
struct igu_prod_cons_update igu_ack = { 0 };
#ifndef ECORE_CONFIG_DIRECT_HWFN
u32 val;
#endif
#ifndef LINUX_REMOVE
if (sb_info->p_dev->int_mode == ECORE_INT_MODE_POLL)
return;
#endif
igu_ack.sb_id_and_flags =
((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
OSAL_CPU_TO_LE32((sb_info->sb_ack <<
IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
(int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
(IGU_SEG_ACCESS_REG <<
IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
#ifdef ECORE_CONFIG_DIRECT_HWFN
DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
igu_ack.sb_id_and_flags);
#else
DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
val = OSAL_LE32_TO_CPU(igu_ack.sb_id_and_flags);
DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, val);
#endif
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.

View file

@ -86,6 +86,7 @@ struct ecore_mcp_link_capabilities;
#define VFPF_ACQUIRE_OS_ESX (2)
#define VFPF_ACQUIRE_OS_SOLARIS (3)
#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
#define VFPF_ACQUIRE_OS_FREEBSD (5)
struct ecore_vf_acquire_sw_info {
u32 driver_version;
@ -325,6 +326,7 @@ void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
*/
bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
#endif
/**
* @brief Check if given VF ID @vfid is valid
@ -343,6 +345,7 @@ bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
int rel_vf_id,
bool b_enabled_only, bool b_non_malicious);
#ifndef LINUX_REMOVE
/**
* @brief Get VF's public info structure
*
@ -742,6 +745,20 @@ ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
u16 vxlan_port, u16 geneve_port);
#ifdef CONFIG_ECORE_SW_CHANNEL
/**
* @brief Set whether PF should communicate with VF using SW/HW channel
* Needs to be called for an enabled VF before acquire is over
* [latest good point for doing that is OSAL_IOV_VF_ACQUIRE()]
*
* @param p_hwfn
* @param vfid - relative vf index
* @param b_is_hw - true iff PF is to use HW channel for communication
*/
void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
bool b_is_hw);
#endif
#else
#ifndef LINUX_REMOVE
static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_dev OSAL_UNUSED *p_dev, u8 OSAL_UNUSED to_disable) {}
@ -755,7 +772,15 @@ static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(struct e
static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, struct ecore_mcp_link_params OSAL_UNUSED *params, struct ecore_mcp_link_state OSAL_UNUSED *link, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_caps) {}
static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, struct ecore_mcp_link_params OSAL_UNUSED *params, struct ecore_mcp_link_state OSAL_UNUSED *link, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_caps) {}
static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED rel_vf_id, bool OSAL_UNUSED b_enabled_only) {return false;}
#endif
static OSAL_INLINE bool
ecore_iov_is_valid_vfid(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED rel_vf_id,
bool OSAL_UNUSED b_enabled_only,
bool OSAL_UNUSED b_non_malicious)
{
return false;
}
#ifndef LINUX_REMOVE
static OSAL_INLINE struct ecore_public_vf_info* ecore_iov_get_public_vf_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, bool OSAL_UNUSED b_enabled_only) {return OSAL_NULL;}
static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED vfid) {}
static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u64 OSAL_UNUSED *events) {}
@ -794,6 +819,12 @@ static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct e
#endif
static OSAL_INLINE void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, u16 OSAL_UNUSED vxlan_port, u16 OSAL_UNUSED geneve_port) { return; }
static OSAL_INLINE u16 ecore_iov_get_next_active_vf(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) { return MAX_NUM_VFS_E4; }
#ifdef CONFIG_ECORE_SW_CHANNEL
static OSAL_INLINE void
ecore_iov_set_vf_hw_channel(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
int OSAL_UNUSED vfid, bool OSAL_UNUSED b_is_hw) {}
#endif
#endif
#define ecore_for_each_vf(_p_hwfn, _i) \

View file

@ -178,5 +178,11 @@
/* Tstorm RoCE Event Statistics */
#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
/* DCQCN Received Statistics */
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[49].base + ((roce_pf_id) * IRO[49].m1))
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
/* DCQCN Sent Statistics */
#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[50].base + ((roce_pf_id) * IRO[50].m1))
#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
#endif /* __IRO_H__ */

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,56 +31,58 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
ARRAY_DECL struct iro iro_arr[49] = {
ARRAY_DECL struct iro iro_arr[51] = {
{ 0x0, 0x0, 0x0, 0x0, 0x8}, /* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x4cb0, 0x80, 0x0, 0x0, 0x80}, /* TSTORM_PORT_STAT_OFFSET(port_id) */
{ 0x6518, 0x20, 0x0, 0x0, 0x20}, /* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
{ 0x4cb8, 0x88, 0x0, 0x0, 0x88}, /* TSTORM_PORT_STAT_OFFSET(port_id) */
{ 0x6530, 0x20, 0x0, 0x0, 0x20}, /* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4}, /* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xa80, 0x8, 0x0, 0x0, 0x4}, /* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x2}, /* USTORM_EQE_CONS_OFFSET(pf_id) */
{ 0x80, 0x8, 0x0, 0x0, 0x4}, /* USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) */
{ 0x84, 0x8, 0x0, 0x0, 0x2}, /* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
{ 0x4c40, 0x0, 0x0, 0x0, 0x78}, /* XSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x3df0, 0x0, 0x0, 0x0, 0x78}, /* YSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x29b0, 0x0, 0x0, 0x0, 0x78}, /* PSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c38, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4990, 0x0, 0x0, 0x0, 0x78}, /* MSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x7e48, 0x0, 0x0, 0x0, 0x78}, /* USTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c48, 0x0, 0x0, 0x0, 0x78}, /* XSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x3e38, 0x0, 0x0, 0x0, 0x78}, /* YSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x2b78, 0x0, 0x0, 0x0, 0x78}, /* PSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c40, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4998, 0x0, 0x0, 0x0, 0x78}, /* MSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x7f50, 0x0, 0x0, 0x0, 0x78}, /* USTORM_INTEG_TEST_DATA_OFFSET */
{ 0xa28, 0x8, 0x0, 0x0, 0x8}, /* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0x61f8, 0x10, 0x0, 0x0, 0x10}, /* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0x6210, 0x10, 0x0, 0x0, 0x10}, /* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30}, /* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0x95b8, 0x30, 0x0, 0x0, 0x30}, /* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
{ 0x4b60, 0x80, 0x0, 0x0, 0x40}, /* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x96c0, 0x30, 0x0, 0x0, 0x30}, /* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
{ 0x4b68, 0x80, 0x0, 0x0, 0x40}, /* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
{ 0x53a0, 0x80, 0x4, 0x0, 0x4}, /* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
{ 0xc7c8, 0x0, 0x0, 0x0, 0x4}, /* MSTORM_TPA_TIMEOUT_US_OFFSET */
{ 0x4ba0, 0x80, 0x0, 0x0, 0x20}, /* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x8050, 0x40, 0x0, 0x0, 0x30}, /* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x53a8, 0x80, 0x4, 0x0, 0x4}, /* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
{ 0xc7d0, 0x0, 0x0, 0x0, 0x4}, /* MSTORM_TPA_TIMEOUT_US_OFFSET */
{ 0x4ba8, 0x80, 0x0, 0x0, 0x20}, /* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x8158, 0x40, 0x0, 0x0, 0x30}, /* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0xe770, 0x60, 0x0, 0x0, 0x60}, /* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x2b48, 0x80, 0x0, 0x0, 0x38}, /* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0xf1b0, 0x78, 0x0, 0x0, 0x78}, /* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x2d10, 0x80, 0x0, 0x0, 0x38}, /* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0xf2b8, 0x78, 0x0, 0x0, 0x78}, /* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0xaef8, 0x0, 0x0, 0x0, 0xf0}, /* TSTORM_ETH_PRS_INPUT_OFFSET */
{ 0xafe8, 0x8, 0x0, 0x0, 0x8}, /* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
{ 0xaf20, 0x0, 0x0, 0x0, 0xf0}, /* TSTORM_ETH_PRS_INPUT_OFFSET */
{ 0xb010, 0x8, 0x0, 0x0, 0x8}, /* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8}, /* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0xac0, 0x8, 0x0, 0x0, 0x8}, /* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
{ 0x2578, 0x8, 0x0, 0x0, 0x8}, /* USTORM_TOE_CQ_PROD_OFFSET(rss_id) */
{ 0x24f8, 0x8, 0x0, 0x0, 0x8}, /* USTORM_TOE_GRQ_PROD_OFFSET(pf_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x8}, /* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
{ 0x200, 0x18, 0x8, 0x0, 0x8}, /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0x400, 0x18, 0x8, 0x0, 0x8}, /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x18, 0x8, 0x0, 0x2}, /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xd9a8, 0x38, 0x0, 0x0, 0x24}, /* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12988, 0x10, 0x0, 0x0, 0x8}, /* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa0, 0x38, 0x0, 0x0, 0x18}, /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0xa580, 0x38, 0x0, 0x0, 0x10}, /* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x86f8, 0x30, 0x0, 0x0, 0x18}, /* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x101f8, 0x10, 0x0, 0x0, 0x10}, /* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0xde28, 0x48, 0x0, 0x0, 0x38}, /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
{ 0x10660, 0x20, 0x0, 0x0, 0x20}, /* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
{ 0x2b80, 0x80, 0x0, 0x0, 0x10}, /* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x5020, 0x10, 0x0, 0x0, 0x10}, /* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0xc9b0, 0x30, 0x0, 0x0, 0x10}, /* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
{ 0xeec0, 0x10, 0x0, 0x0, 0x10}, /* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
{ 0xd898, 0x50, 0x0, 0x0, 0x3c}, /* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12908, 0x18, 0x0, 0x0, 0x10}, /* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa8, 0x40, 0x0, 0x0, 0x18}, /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0xa588, 0x50, 0x0, 0x0, 0x20}, /* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x8700, 0x40, 0x0, 0x0, 0x28}, /* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x10300, 0x18, 0x0, 0x0, 0x10}, /* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0xde48, 0x48, 0x0, 0x0, 0x38}, /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
{ 0x10768, 0x20, 0x0, 0x0, 0x20}, /* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
{ 0x2d48, 0x80, 0x0, 0x0, 0x10}, /* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x5048, 0x10, 0x0, 0x0, 0x10}, /* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0xc9b8, 0x30, 0x0, 0x0, 0x10}, /* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
{ 0xed90, 0x10, 0x0, 0x0, 0x10}, /* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
{ 0xa3a0, 0x10, 0x0, 0x0, 0x10}, /* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
{ 0x13108, 0x8, 0x0, 0x0, 0x8}, /* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
};
#endif /* __IRO_VALUES_H__ */

View file

@ -39,6 +39,7 @@
#include "ecore_sp_commands.h"
#include "ecore_iscsi_api.h"
#ifndef __EXTRACT__LINUX__H__
struct ecore_iscsi_info {
osal_spinlock_t lock;
osal_list_t free_list;
@ -47,11 +48,26 @@ struct ecore_iscsi_info {
iscsi_event_cb_t event_cb;
};
#ifdef CONFIG_ECORE_ISCSI
enum _ecore_status_t ecore_iscsi_alloc(struct ecore_hwfn *p_hwfn);
void ecore_iscsi_setup(struct ecore_hwfn *p_hwfn);
void ecore_iscsi_free(struct ecore_hwfn *p_hwfn);
#else
static inline enum _ecore_status_t
ecore_iscsi_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
{
return ECORE_INVAL;
}
static inline void
ecore_iscsi_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
static inline void
ecore_iscsi_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
#endif
#endif
void ecore_iscsi_free_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
@ -110,6 +126,26 @@ ecore_sp_iscsi_mac_update(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
/**
* @brief ecore_sp_iscsi_mac_update - iSCSI connection's MAC update
*
* This ramrod updates remote MAC for iSCSI offloaded connection in FW
*
* @param p_path
* @param p_conn
* @param reset
* @param comp_mode
* @param comp_addr
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_iscsi_stats_tcp_update(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn,
bool reset,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
/**
* @brief ecore_sp_iscsi_conn_terminate - iSCSI connection
* terminate

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,10 +31,62 @@
#ifndef __ECORE_ISCSI_API_H__
#define __ECORE_ISCSI_API_H__
#ifndef __EXTRACT__LINUX__IF__
typedef enum _ecore_status_t (*iscsi_event_cb_t)(void *context,
u8 fw_event_code,
void *fw_handle);
struct ecore_iscsi_stats
{
u64 iscsi_rx_bytes_cnt;
u64 iscsi_rx_packet_cnt;
u64 iscsi_rx_new_ooo_isle_events_cnt;
u32 iscsi_cmdq_threshold_cnt;
u32 iscsi_rq_threshold_cnt;
u32 iscsi_immq_threshold_cnt;
u64 iscsi_rx_dropped_pdus_task_not_valid;
u64 iscsi_rx_data_pdu_cnt;
u64 iscsi_rx_r2t_pdu_cnt;
u64 iscsi_rx_total_pdu_cnt;
u64 iscsi_tx_go_to_slow_start_event_cnt;
u64 iscsi_tx_fast_retransmit_event_cnt;
u64 iscsi_tx_data_pdu_cnt;
u64 iscsi_tx_r2t_pdu_cnt;
u64 iscsi_tx_total_pdu_cnt;
u64 iscsi_tx_bytes_cnt;
u64 iscsi_tx_packet_cnt;
u64 iscsi_rx_tcp_payload_bytes_cnt;
u64 iscsi_rx_tcp_pkt_cnt;
u64 iscsi_rx_pure_ack_cnt;
u64 iscsi_rx_dup_ack_cnt;
u64 iscsi_tx_pure_ack_cnt;
u64 iscsi_tx_delayed_ack_cnt;
u64 iscsi_tx_tcp_payload_bytes_cnt;
u64 iscsi_tx_tcp_pkt_cnt;
};
struct ecore_iscsi_tcp_stats
{
u64 iscsi_tcp_tx_packets_cnt;
u64 iscsi_tcp_tx_bytes_cnt;
u64 iscsi_tcp_tx_rxmit_cnt;
u64 iscsi_tcp_rx_packets_cnt;
u64 iscsi_tcp_rx_bytes_cnt;
u64 iscsi_tcp_rx_dup_ack_cnt;
u32 iscsi_tcp_rx_chksum_err_cnt;
};
#endif
#ifndef __EXTRACT__LINUX__C__
struct ecore_iscsi_conn {
osal_list_entry_t list_entry;
bool free_on_delete;
@ -54,6 +106,8 @@ struct ecore_iscsi_conn {
struct tcp_upload_params *tcp_upload_params_virt_addr;
dma_addr_t tcp_upload_params_phys_addr;
struct iscsi_conn_stats_params *conn_stats_params_virt_addr;
dma_addr_t conn_stats_params_phys_addr;
struct scsi_terminate_extra_params *queue_cnts_virt_addr;
dma_addr_t queue_cnts_phys_addr;
dma_addr_t syn_phy_addr;
@ -62,7 +116,7 @@ struct ecore_iscsi_conn {
u8 local_mac[6];
u8 remote_mac[6];
u16 vlan_id;
u8 tcp_flags;
u16 tcp_flags;
u8 ip_version;
u32 remote_ip[4];
u32 local_ip[4];
@ -148,32 +202,7 @@ struct ecore_iscsi_conn {
u8 crc_seed; /* 0=0x0000, 1=0xffff */
u8 keep_ref_tag_const;
};
struct ecore_iscsi_stats
{
u64 iscsi_rx_bytes_cnt;
u64 iscsi_rx_packet_cnt;
u64 iscsi_rx_new_ooo_isle_events_cnt;
u32 iscsi_cmdq_threshold_cnt;
u32 iscsi_rq_threshold_cnt;
u32 iscsi_immq_threshold_cnt;
u64 iscsi_rx_dropped_pdus_task_not_valid;
u64 iscsi_rx_data_pdu_cnt;
u64 iscsi_rx_r2t_pdu_cnt;
u64 iscsi_rx_total_pdu_cnt;
u64 iscsi_tx_go_to_slow_start_event_cnt;
u64 iscsi_tx_fast_retransmit_event_cnt;
u64 iscsi_tx_data_pdu_cnt;
u64 iscsi_tx_r2t_pdu_cnt;
u64 iscsi_tx_total_pdu_cnt;
u64 iscsi_tx_bytes_cnt;
u64 iscsi_tx_packet_cnt;
};
#endif
/**
* @brief ecore_iscsi_acquire_connection - allocate resources,
@ -189,6 +218,15 @@ ecore_iscsi_acquire_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_in_conn,
struct ecore_iscsi_conn **p_out_conn);
/**
* @brief ecore_iscsi_setup_connection- initialize connection data.
*
* @param p_conn container of iSCSI connection data
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iscsi_setup_connection(struct ecore_iscsi_conn *p_conn);
void OSAL_IOMEM *ecore_iscsi_get_db_addr(struct ecore_hwfn *p_hwfn,
u32 cid);
@ -267,6 +305,24 @@ enum _ecore_status_t
ecore_iscsi_update_remote_mac(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_iscsi_get_tcp_stats - get and optionally reset TCP statistics
* of offloaded iSCSI connection
*
*
* @param p_path
* @param p_conn container of iSCSI connection data
* @param p_stats - buffer to place extracted stats
* @param reset - 1 - for reset stats (after extraction of accumulated
* statistics in optionally provided buffer)
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iscsi_get_tcp_stats(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn,
struct ecore_iscsi_tcp_stats *p_stats,
u8 reset);
/**
* @brief ecore_iscsi_clear_connection_sq - clear SQ
* offloaded iSCSI connection

View file

@ -0,0 +1,341 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __ECORE_IWARP_H__
#define __ECORE_IWARP_H__
enum ecore_iwarp_qp_state {
ECORE_IWARP_QP_STATE_IDLE,
ECORE_IWARP_QP_STATE_RTS,
ECORE_IWARP_QP_STATE_TERMINATE,
ECORE_IWARP_QP_STATE_CLOSING,
ECORE_IWARP_QP_STATE_ERROR,
};
enum ecore_iwarp_listener_state {
ECORE_IWARP_LISTENER_STATE_ACTIVE,
ECORE_IWARP_LISTENER_STATE_UNPAUSE,
ECORE_IWARP_LISTENER_STATE_PAUSE,
ECORE_IWARP_LISTENER_STATE_DESTROYING,
};
enum ecore_iwarp_qp_state
ecore_roce2iwarp_state(enum ecore_roce_qp_state state);
#ifdef CONFIG_ECORE_IWARP
#define ECORE_IWARP_PREALLOC_CNT ECORE_IWARP_MAX_LIS_BACKLOG
#define ECORE_IWARP_LL2_SYN_TX_SIZE (128)
#define ECORE_IWARP_LL2_SYN_RX_SIZE (256)
#define ECORE_IWARP_MAX_SYN_PKT_SIZE (128)
#define ECORE_IWARP_LL2_OOO_DEF_TX_SIZE (256)
#define ECORE_MAX_OOO (16)
#define ECORE_IWARP_LL2_OOO_MAX_RX_SIZE (16384)
#define ECORE_IWARP_HANDLE_INVAL (0xff)
struct ecore_iwarp_ll2_buff {
struct ecore_iwarp_ll2_buff *piggy_buf;
void *data;
dma_addr_t data_phys_addr;
u32 buff_size;
};
struct ecore_iwarp_ll2_mpa_buf {
osal_list_entry_t list_entry;
struct ecore_iwarp_ll2_buff *ll2_buf;
struct unaligned_opaque_data data;
u16 tcp_payload_len;
u8 placement_offset;
};
/* In some cases a fpdu will arrive with only one byte of the header, in this
* case the fpdu_length will be partial ( contain only higher byte and
* incomplete bytes will contain the invalid value
*/
#define ECORE_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
struct ecore_iwarp_fpdu {
struct ecore_iwarp_ll2_buff *mpa_buf;
dma_addr_t pkt_hdr;
u8 pkt_hdr_size;
dma_addr_t mpa_frag;
void *mpa_frag_virt;
u16 mpa_frag_len;
u16 fpdu_length;
u16 incomplete_bytes;
};
struct ecore_iwarp_info {
osal_list_t listen_list; /* ecore_iwarp_listener */
osal_list_t ep_list; /* ecore_iwarp_ep */
osal_list_t ep_free_list;/* pre-allocated ep's */
osal_list_t mpa_buf_list;/* list of mpa_bufs */
osal_list_t mpa_buf_pending_list;
osal_spinlock_t iw_lock;
osal_spinlock_t qp_lock; /* for teardown races */
struct iwarp_rxmit_stats_drv stats;
u32 rcv_wnd_scale;
u16 rcv_wnd_size;
u16 max_mtu;
u16 num_ooo_rx_bufs;
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
u8 tcp_flags;
u8 ll2_syn_handle;
u8 ll2_ooo_handle;
u8 ll2_mpa_handle;
u8 peer2peer;
u8 _pad;
enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type;
struct ecore_iwarp_fpdu *partial_fpdus;
struct ecore_iwarp_ll2_mpa_buf *mpa_bufs;
u8 *mpa_intermediate_buf;
u16 max_num_partial_fpdus;
/* MPA statistics */
u64 unalign_rx_comp;
};
enum ecore_iwarp_ep_state {
ECORE_IWARP_EP_INIT,
ECORE_IWARP_EP_MPA_REQ_RCVD,
ECORE_IWARP_EP_MPA_OFFLOADED,
ECORE_IWARP_EP_ESTABLISHED,
ECORE_IWARP_EP_CLOSED,
ECORE_IWARP_EP_ABORTING
};
union async_output {
struct iwarp_eqe_data_mpa_async_completion mpa_response;
struct iwarp_eqe_data_tcp_async_completion mpa_request;
};
#define ECORE_MAX_PRIV_DATA_LEN (512)
struct ecore_iwarp_ep_memory {
u8 in_pdata[ECORE_MAX_PRIV_DATA_LEN];
u8 out_pdata[ECORE_MAX_PRIV_DATA_LEN];
union async_output async_output;
};
/* Endpoint structure represents a TCP connection. This connection can be
* associated with a QP or not (in which case QP==NULL)
*/
struct ecore_iwarp_ep {
osal_list_entry_t list_entry;
int sig;
struct ecore_rdma_qp *qp;
enum ecore_iwarp_ep_state state;
/* This contains entire buffer required for ep memories. This is the
* only one actually allocated and freed. The rest are pointers into
* this buffer
*/
struct ecore_iwarp_ep_memory *ep_buffer_virt;
dma_addr_t ep_buffer_phys;
struct ecore_iwarp_cm_info cm_info;
struct ecore_iwarp_listener *listener;
enum tcp_connect_mode connect_mode;
enum mpa_rtr_type rtr_type;
enum mpa_negotiation_mode mpa_rev;
u32 tcp_cid;
u32 cid;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
u16 mss;
bool mpa_reply_processed;
/* The event_cb function is called for asynchrounous events associated
* with the ep. It is initialized at different entry points depending
* on whether the ep is the tcp connection active side or passive side
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
/* For Passive side - syn packet related data */
struct ecore_iwarp_ll2_buff *syn;
u16 syn_ip_payload_length;
dma_addr_t syn_phy_addr;
};
struct ecore_iwarp_listener {
osal_list_entry_t list_entry;
/* The event_cb function is called for connection requests.
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
osal_list_t ep_list;
osal_spinlock_t lock;
u32 max_backlog;
u8 ip_version;
u32 ip_addr[4];
u16 port;
u16 vlan;
bool drop;
bool done;
enum ecore_iwarp_listener_state state;
};
enum _ecore_status_t
ecore_iwarp_alloc(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t
ecore_iwarp_setup(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_start_in_params *params);
void
ecore_iwarp_init_fw_ramrod(struct ecore_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod);
enum _ecore_status_t
ecore_iwarp_stop(struct ecore_hwfn *p_hwfn);
void
ecore_iwarp_resc_free(struct ecore_hwfn *p_hwfn);
void
ecore_iwarp_init_devinfo(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t
ecore_iwarp_init_hw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
enum _ecore_status_t
ecore_iwarp_create_qp(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_qp *qp,
struct ecore_rdma_create_qp_out_params *out_params);
enum _ecore_status_t
ecore_iwarp_modify_qp(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_qp *qp,
enum ecore_iwarp_qp_state new_state,
bool internal);
enum _ecore_status_t
ecore_iwarp_destroy_qp(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_qp *qp);
enum _ecore_status_t
ecore_iwarp_fw_destroy(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_qp *qp);
enum _ecore_status_t
ecore_iwarp_query_qp(struct ecore_rdma_qp *qp,
struct ecore_rdma_query_qp_out_params *out_params);
#else
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
struct ecore_rdma_start_in_params OSAL_UNUSED *params)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE void
ecore_iwarp_init_fw_ramrod(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
struct iwarp_init_func_ramrod_data OSAL_UNUSED *p_ramrod)
{
}
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE void
ecore_iwarp_resc_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
{
}
static OSAL_INLINE void
ecore_iwarp_init_devinfo(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
{
}
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_init_hw(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
struct ecore_ptt OSAL_UNUSED *p_ptt)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_create_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
struct ecore_rdma_qp OSAL_UNUSED *qp,
struct ecore_rdma_create_qp_out_params OSAL_UNUSED *out_params)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_modify_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
struct ecore_rdma_qp OSAL_UNUSED *qp,
enum ecore_iwarp_qp_state OSAL_UNUSED new_state,
bool OSAL_UNUSED internal)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_destroy_qp(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
struct ecore_rdma_qp OSAL_UNUSED *qp)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_fw_destroy(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
struct ecore_rdma_qp OSAL_UNUSED *qp)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE enum _ecore_status_t
ecore_iwarp_query_qp(struct ecore_rdma_qp OSAL_UNUSED *qp,
struct ecore_rdma_query_qp_out_params OSAL_UNUSED *out_params)
{
return ECORE_SUCCESS;
}
#endif
#endif

View file

@ -54,6 +54,13 @@ __FBSDID("$FreeBSD$");
#define ECORE_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
#ifdef _NTDDK_
#pragma warning(push)
#pragma warning(disable : 28167)
#pragma warning(disable : 28123)
#pragma warning(disable : 28121)
#endif
struct ecore_l2_info {
u32 queues;
unsigned long **pp_qid_usage;
@ -102,7 +109,8 @@ enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
return ECORE_NOMEM;
#endif
return ECORE_SUCCESS;
@ -135,6 +143,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
break;
OSAL_VFREE(p_hwfn->p_dev,
p_hwfn->p_l2_info->pp_qid_usage[i]);
p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
@ -144,6 +153,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
#endif
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
out_l2_info:
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
@ -221,6 +231,7 @@ static struct ecore_queue_cid *
_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
u16 opaque_fid, u32 cid,
struct ecore_queue_start_common_params *p_params,
bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params)
{
struct ecore_queue_cid *p_cid;
@ -239,6 +250,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
p_cid->rel.queue_id = p_params->queue_id;
p_cid->rel.stats_id = p_params->stats_id;
p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
p_cid->b_is_rx = b_is_rx;
p_cid->sb_idx = p_params->sb_idx;
/* Fill-in bits related to VFs' queues if information was provided */
@ -258,7 +270,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
}
/* Calculate the engine-absolute indices of the resources.
* The would guarantee they're valid later on.
* This would guarantee they're valid later on.
* In some cases [SBs] we already have the right values.
*/
rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
@ -312,6 +324,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params)
{
struct ecore_queue_cid *p_cid;
@ -346,7 +359,7 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
}
p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
p_params, p_vf_params);
p_params, b_is_rx, p_vf_params);
if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
_ecore_cxt_release_cid(p_hwfn, cid, vfid);
@ -355,9 +368,11 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
static struct ecore_queue_cid *
ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
bool b_is_rx,
struct ecore_queue_start_common_params *p_params)
{
return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
OSAL_NULL);
}
enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
@ -366,6 +381,7 @@ enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
struct eth_vport_tpa_param *p_tpa;
u16 rx_mode = 0, tx_err = 0;
u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
@ -390,8 +406,8 @@ enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->vport_id = abs_vport_id;
p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
p_ramrod->untagged = p_params->only_untagged;
p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
@ -426,22 +442,22 @@ enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
/* TPA related fields */
OSAL_MEMSET(&p_ramrod->tpa_param, 0,
sizeof(struct eth_vport_tpa_param));
p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
p_tpa = &p_ramrod->tpa_param;
OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
switch (p_params->tpa_mode) {
case ECORE_TPA_MODE_GRO:
p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
p_ramrod->tpa_param.tpa_max_size = (u16)-1;
p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu/2;
p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu/2;
p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
p_tpa->tpa_max_size = (u16)-1;
p_tpa->tpa_min_size_to_cont = p_params->mtu/2;
p_tpa->tpa_min_size_to_start = p_params->mtu/2;
p_tpa->tpa_ipv4_en_flg = 1;
p_tpa->tpa_ipv6_en_flg = 1;
p_tpa->tpa_ipv4_tunn_en_flg = 1;
p_tpa->tpa_ipv6_tunn_en_flg = 1;
p_tpa->tpa_pkt_split_flg = 1;
p_tpa->tpa_gro_consistent_flg = 1;
break;
default:
break;
@ -471,7 +487,8 @@ enum _ecore_status_t ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
p_params->remove_inner_vlan,
p_params->tpa_mode,
p_params->max_buffers_per_cqe,
p_params->only_untagged);
p_params->only_untagged,
p_params->zero_placement_offset);
return ecore_sp_eth_vport_start(p_hwfn, p_params);
}
@ -482,6 +499,7 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
struct ecore_rss_params *p_rss)
{
struct eth_vport_rss_config *p_config;
u16 capabilities = 0;
int i, table_size;
enum _ecore_status_t rc = ECORE_SUCCESS;
@ -510,27 +528,26 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
p_config->capabilities = 0;
SET_FIELD(p_config->capabilities,
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4));
SET_FIELD(p_config->capabilities,
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6));
SET_FIELD(p_config->capabilities,
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
SET_FIELD(p_config->capabilities,
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
SET_FIELD(p_config->capabilities,
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
SET_FIELD(p_config->capabilities,
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
p_config->tbl_size = p_rss->rss_table_size_log;
p_config->capabilities =
OSAL_CPU_TO_LE16(p_config->capabilities);
p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
"update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
@ -663,6 +680,7 @@ ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sge_tpa_params *p_params)
{
struct eth_vport_tpa_param *p_tpa;
u16 val;
if (!p_params) {
p_ramrod->common.update_tpa_param_flg = 0;
@ -684,9 +702,12 @@ ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
p_tpa->tpa_max_size = p_params->tpa_max_size;
p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
val = p_params->tpa_max_size;
p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
val = p_params->tpa_min_size_to_start;
p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
val = p_params->tpa_min_size_to_cont;
p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
}
static void
@ -703,7 +724,7 @@ ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
p_ramrod->common.update_approx_mcast_flg = 1;
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
u32 *p_bins = (u32 *)p_params->bins;
u32 *p_bins = p_params->bins;
p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
}
@ -1001,7 +1022,7 @@ ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc;
/* Allocate a CID for the queue */
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
if (p_cid == OSAL_NULL)
return ECORE_NOMEM;
@ -1043,6 +1064,7 @@ enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 i;
#ifndef LINUX_REMOVE
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_rxqs_update(p_hwfn,
(struct ecore_queue_cid **)
@ -1050,6 +1072,7 @@ enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
num_rxqs,
complete_cqe_flg,
complete_event_flg);
#endif
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.comp_mode = comp_mode;
@ -1083,6 +1106,50 @@ enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
return rc;
}
enum _ecore_status_t
ecore_sp_eth_rx_queues_set_default(struct ecore_hwfn *p_hwfn,
void *p_rxq_handler,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc = ECORE_SUCCESS;
if (IS_VF(p_hwfn->p_dev))
return ECORE_NOTIMPL;
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
p_cid = (struct ecore_queue_cid *)p_rxq_handler;
/* Get SPQ entry */
init_data.cid = p_cid->cid;
init_data.opaque_fid = p_cid->opaque_fid;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_RX_QUEUE_UPDATE,
PROTOCOLID_ETH, &init_data);
if (rc != ECORE_SUCCESS)
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_update;
p_ramrod->vport_id = p_cid->abs.vport_id;
p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = 0;
p_ramrod->complete_event_flg = 1;
p_ramrod->set_default_rss_queue = 1;
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
return rc;
}
static enum _ecore_status_t
ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid,
@ -1216,7 +1283,7 @@ ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc;
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
if (p_cid == OSAL_NULL)
return ECORE_INVAL;
@ -1551,8 +1618,8 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
u8 abs_vport_id = 0;
@ -1589,8 +1656,7 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
/* explicitly clear out the entire vector */
OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
0, sizeof(p_ramrod->approx_mcast.bins));
OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
ETH_MULTICAST_MAC_BINS_IN_REGS);
OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
/* filter ADD op is explicit set op and it removes
* any existing filters for the vport.
*/
@ -1599,16 +1665,15 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
u32 bit;
bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
OSAL_SET_BIT(bit, bins);
bins[bit / 32] |= 1 << (bit % 32);
}
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
struct vport_update_ramrod_mcast *p_ramrod_bins;
u32 *p_bins = (u32 *)bins;
p_ramrod_bins = &p_ramrod->approx_mcast;
p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
}
}
@ -1942,6 +2007,11 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
p_ah->tx_1519_to_max_byte_packets =
port_stats.eth.u1.ah1.t1519_to_max;
}
p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
p_hwfn->mcp_info->port_addr +
OFFSETOF(struct public_port,
link_change_count));
}
void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
@ -1976,6 +2046,7 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
bool b_get_port_stats;
if (IS_PF(p_dev)) {
/* The main vport index is relative first */
@ -1990,8 +2061,9 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
continue;
}
b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
IS_PF(p_dev) ? true : false);
b_get_port_stats);
out:
if (IS_PF(p_dev) && p_ptt)
@ -2056,42 +2128,57 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
/* PORT statistics are not necessarily reset, so we need to
* read and create a baseline for future statistics.
* Link change stat is maintained by MFW, return its value as is.
*/
if (!p_dev->reset_stats)
DP_INFO(p_dev, "Reset stats not allocated\n");
else
else {
_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
p_dev->reset_stats->common.link_change_count = 0;
}
}
static enum gft_profile_type
ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode)
{
if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE)
return GFT_PROFILE_TYPE_4_TUPLE;
if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST)
return GFT_PROFILE_TYPE_IP_DST_ADDR;
return GFT_PROFILE_TYPE_L4_DST_PORT;
}
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params)
{
if (p_cfg_params->arfs_enable) {
ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp,
p_cfg_params->udp,
p_cfg_params->ipv4,
p_cfg_params->ipv6);
if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
return;
if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) {
ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp,
p_cfg_params->udp,
p_cfg_params->ipv4,
p_cfg_params->ipv6,
ecore_arfs_mode_to_hsi(p_cfg_params->mode));
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
"Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
p_cfg_params->tcp ? "Enable" : "Disable",
p_cfg_params->udp ? "Enable" : "Disable",
p_cfg_params->ipv4 ? "Enable" : "Disable",
p_cfg_params->ipv6 ? "Enable" : "Disable");
p_cfg_params->ipv6 ? "Enable" : "Disable",
(u32)p_cfg_params->mode);
} else {
ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Disabled Filtering\n");
ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
p_cfg_params->arfs_enable ? "Enable" : "Disable");
}
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
bool b_is_add)
struct ecore_ntuple_filter_params *p_params)
{
struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
@ -2100,13 +2187,15 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
return rc;
rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
if (rc != ECORE_SUCCESS)
return rc;
if (p_params->qid != ECORE_RFS_NTUPLE_QID_RSS) {
rc = ecore_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
if (rc != ECORE_SUCCESS)
return rc;
}
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@ -2129,19 +2218,138 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_update_gft;
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
p_ramrod->vport_id = abs_vport_id;
p_ramrod->filter_type = RFS_FILTER_TYPE;
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(p_params->length);
if (p_params->qid != ECORE_RFS_NTUPLE_QID_RSS) {
p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
}
p_ramrod->flow_id_valid = 0;
p_ramrod->flow_id = 0;
p_ramrod->vport_id = OSAL_CPU_TO_LE16 ((u16)abs_vport_id);
p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
abs_vport_id, abs_rx_q_id,
b_is_add ? "Adding" : "Removing",
(unsigned long long)p_addr, length);
p_params->b_is_add ? "Adding" : "Removing",
(unsigned long long)p_params->addr, p_params->length);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid,
u16 *p_rx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
u8 timer_res;
enum _ecore_status_t rc;
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
(u64)(osal_uintptr_t)&sb_entry, 2,
OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
address = BAR0_MAP_REG_USDM_RAM +
USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
coalesce = ecore_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
if (!is_valid)
return ECORE_INVAL;
coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
*p_rx_coal = (u16)(coalesce << timer_res);
return ECORE_SUCCESS;
}
int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid,
u16 *p_tx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
u8 timer_res;
enum _ecore_status_t rc;
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
(u64)(osal_uintptr_t)&sb_entry, 2,
OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
address = BAR0_MAP_REG_XSDM_RAM +
XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
coalesce = ecore_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
if (!is_valid)
return ECORE_INVAL;
coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
*p_tx_coal = (u16)(coalesce << timer_res);
return ECORE_SUCCESS;
}
enum _ecore_status_t
ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
void *handle)
{
struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_ptt *p_ptt;
#ifdef CONFIG_ECORE_SRIOV
if (IS_VF(p_hwfn->p_dev)) {
rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false,
"Unable to read queue calescing\n");
return rc;
}
#endif
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_AGAIN;
if (p_cid->b_is_rx) {
rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
if (rc != ECORE_SUCCESS)
goto out;
} else {
rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
if (rc != ECORE_SUCCESS)
goto out;
}
out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
#ifdef _NTDDK_
#pragma warning(pop)
#endif

View file

@ -86,6 +86,8 @@ struct ecore_queue_cid {
u32 cid;
u16 opaque_fid;
bool b_is_rx;
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
@ -118,6 +120,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params);
enum _ecore_status_t
@ -162,4 +165,25 @@ ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid,
u16 *p_hw_coal);
enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid,
u16 *p_hw_coal);
#endif

View file

@ -35,6 +35,7 @@
#include "ecore_sp_api.h"
#include "ecore_int_api.h"
#ifndef __EXTRACT__LINUX__
enum ecore_rss_caps {
ECORE_RSS_IPV4 = 0x1,
ECORE_RSS_IPV6 = 0x2,
@ -67,7 +68,9 @@ enum ecore_ptp_hwtstamp_tx_type {
ECORE_PTP_HWTSTAMP_TX_OFF,
ECORE_PTP_HWTSTAMP_TX_ON,
};
#endif
#ifndef __EXTRACT__LINUX__
struct ecore_queue_start_common_params {
/* Should always be relative to entity sending this. */
u8 vport_id;
@ -78,6 +81,8 @@ struct ecore_queue_start_common_params {
struct ecore_sb_info *p_sb;
u8 sb_idx;
u8 tc;
};
struct ecore_rxq_start_ret_params {
@ -89,6 +94,7 @@ struct ecore_txq_start_ret_params {
void OSAL_IOMEM *p_doorbell;
void *p_handle;
};
#endif
struct ecore_rss_params {
u8 update_rss_config;
@ -180,12 +186,21 @@ struct ecore_filter_accept_flags {
#define ECORE_ACCEPT_BCAST 0x20
};
#ifndef __EXTRACT__LINUX__
enum ecore_filter_config_mode {
ECORE_FILTER_CONFIG_MODE_DISABLE,
ECORE_FILTER_CONFIG_MODE_5_TUPLE,
ECORE_FILTER_CONFIG_MODE_L4_PORT,
ECORE_FILTER_CONFIG_MODE_IP_DEST,
};
#endif
struct ecore_arfs_config_params {
bool tcp;
bool udp;
bool ipv4;
bool ipv6;
bool arfs_enable; /* Enable or disable arfs mode */
enum ecore_filter_config_mode mode;
};
/* Add / remove / move / remove-all unicast MAC-VLAN filters.
@ -371,7 +386,7 @@ struct ecore_sp_vport_update_params {
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
u32 bins[8];
struct ecore_rss_params *rss_params;
struct ecore_filter_accept_flags accept_flags;
struct ecore_sge_tpa_params *sge_tpa_params;
@ -447,6 +462,27 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
/**
* @brief ecore_sp_eth_rx_queues_set_default -
*
* This ramrod sets RSS RX queue as default one.
*
* @note Final phase API.
*
* @param p_hwfn
* @param p_rxq_handlers queue handlers to be updated.
* @param comp_mode
* @param p_comp_data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_eth_rx_queues_set_default(struct ecore_hwfn *p_hwfn,
void *p_rxq_handler,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_eth_stats *stats,
@ -472,6 +508,30 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params);
#ifndef __EXTRACT__LINUX__
struct ecore_ntuple_filter_params {
/* Physically mapped address containing header of buffer to be used
* as filter.
*/
dma_addr_t addr;
/* Length of header in bytes */
u16 length;
/* Relative queue-id to receive classified packet */
#define ECORE_RFS_NTUPLE_QID_RSS ((u16)-1)
u16 qid;
/* Identifier can either be according to vport-id or vfid */
bool b_is_vf;
u8 vport_id;
u8 vf_id;
/* true iff this filter is to be added. Else to be removed */
bool b_is_add;
};
#endif
/**
* @brief - ecore_configure_rfs_ntuple_filter
*
@ -481,20 +541,10 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
* @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
* it with cookie and callback function address, if not
* using this mode then client must pass NULL.
* @params p_addr p_addr is an actual packet header that needs to be
* filter. It has to mapped with IO to read prior to
* calling this, [contains 4 tuples- src ip, dest ip,
* src port, dest port].
* @params length length of p_addr header up to past the transport header.
* @params qid receive packet will be directed to this queue.
* @params vport_id
* @params b_is_add flag to add or remove filter.
*
* @params p_params
*/
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
bool b_is_add);
struct ecore_ntuple_filter_params *p_params);
#endif

View file

@ -103,6 +103,7 @@ struct ecore_ll2_tx_queue {
struct ecore_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx;
void OSAL_IOMEM *doorbell_addr;
struct core_db_data db_msg;
u16 bds_idx;
u16 cur_send_frag_num;
u16 cur_completing_frag_num;
@ -151,6 +152,7 @@ void ecore_ll2_setup(struct ecore_hwfn *p_hwfn);
*/
void ecore_ll2_free(struct ecore_hwfn *p_hwfn);
#ifndef LINUX_REMOVE
/**
* @brief ecore_ll2_get_fragment_of_tx_packet
*
@ -168,5 +170,6 @@ ecore_ll2_get_fragment_of_tx_packet(struct ecore_hwfn *p_hwfn,
u8 connection_handle,
dma_addr_t *addr,
bool *last_fragment);
#endif
#endif /*__ECORE_LL2_H__*/

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -33,6 +33,7 @@
/* ECORE LL2 API: called by ECORE's upper level client */
/* must be the asme as core_rx_conn_type */
#ifndef __EXTRACT__LINUX__
enum ecore_ll2_conn_type {
ECORE_LL2_TYPE_FCOE /* FCoE L2 connection */,
@ -112,7 +113,7 @@ struct ecore_ll2_comp_rx_data {
u32 opaque_data_1; /* src_mac_addr_lo */
/* GSI only */
u32 gid_dst[4];
u32 src_qp;
u16 qp_id;
};
@ -186,6 +187,7 @@ struct ecore_ll2_acquire_data {
/* Output container for LL2 connection's handle */
u8 *p_connection_handle;
};
#endif
/**
* @brief ecore_ll2_acquire_connection - allocate resources,
@ -238,6 +240,7 @@ enum _ecore_status_t ecore_ll2_post_rx_buffer(void *cxt,
void *cookie,
u8 notify_fw);
#ifndef __EXTRACT__LINUX__
struct ecore_ll2_tx_pkt_info {
u8 num_of_bds;
u16 vlan;
@ -251,7 +254,9 @@ struct ecore_ll2_tx_pkt_info {
bool enable_l4_cksum;
bool calc_ip_len;
void *cookie;
bool remove_stag;
};
#endif
/**
* @brief ecore_ll2_prepare_tx_packet - request for start Tx BD
@ -317,6 +322,10 @@ ecore_ll2_set_fragment_of_tx_packet(void *cxt,
enum _ecore_status_t ecore_ll2_terminate_connection(void *cxt,
u8 connection_handle);
enum _ecore_status_t __ecore_ll2_get_stats(void *cxt,
u8 connection_handle,
struct ecore_ll2_stats *p_stats);
/**
* @brief ecore_ll2_get_stats - get LL2 queue's statistics
*
@ -331,6 +340,6 @@ enum _ecore_status_t ecore_ll2_terminate_connection(void *cxt,
*/
enum _ecore_status_t ecore_ll2_get_stats(void *cxt,
u8 connection_handle,
struct ecore_ll2_stats *p_stats);
struct ecore_ll2_stats *p_stats);
#endif

File diff suppressed because it is too large Load diff

View file

@ -48,11 +48,7 @@
((rel_pfid) | \
((p_hwfn)->abs_pf_id & 1) << 3) : \
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->p_dev->num_ports_in_engine * \
ecore_device_num_engines((_p_hwfn)->p_dev)))
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
struct ecore_mcp_info {
/* List for mailbox commands which were sent and wait for a response */
@ -104,11 +100,16 @@ struct ecore_mcp_mb_params {
u32 cmd;
u32 param;
void *p_data_src;
u8 data_src_size;
void *p_data_dst;
u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
u8 data_src_size;
u8 data_dst_size;
u32 flags;
#define ECORE_MB_FLAG_CAN_SLEEP (0x1 << 0)
#define ECORE_MB_FLAG_AVOID_BLOCK (0x1 << 1)
#define ECORE_MB_FLAGS_IS_SET(params, flag) \
((params) != OSAL_NULL && ((params)->flags & ECORE_MB_FLAG_##flag))
};
enum ecore_ov_eswitch {
@ -137,7 +138,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Intialize the port interface with the MCP
* @brief Initialize the port interface with the MCP
*
* @param p_hwfn
* @param p_ptt
@ -218,6 +219,28 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_load_req_params *p_params);
/**
* @brief Sends a LOAD_DONE message to the MFW
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
*/
enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Sends a CANCEL_LOAD_REQ message to the MFW
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
*/
enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
@ -453,10 +476,11 @@ enum ecore_resc_lock {
/* Locks that the MFW is aware of should be added here downwards */
/* Ecore only locks should be added here upwards */
ECORE_RESC_LOCK_PTP_PORT0,
ECORE_RESC_LOCK_PTP_PORT1,
ECORE_RESC_LOCK_PTP_PORT2,
ECORE_RESC_LOCK_PTP_PORT3,
ECORE_RESC_LOCK_IND_TABLE = 26,
ECORE_RESC_LOCK_PTP_PORT0 = 27,
ECORE_RESC_LOCK_PTP_PORT1 = 28,
ECORE_RESC_LOCK_PTP_PORT2 = 29,
ECORE_RESC_LOCK_PTP_PORT3 = 30,
ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL,
/* A dummy value to be used for auxillary functions in need of
@ -479,7 +503,7 @@ struct ecore_resc_lock_params {
#define ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
/* The interval in usec between retries */
u16 retry_interval;
u32 retry_interval;
#define ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
/* Use sleep or delay between retries */
@ -575,4 +599,89 @@ enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
enum ecore_mcp_drv_attr_cmd {
ECORE_MCP_DRV_ATTR_CMD_READ,
ECORE_MCP_DRV_ATTR_CMD_WRITE,
ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR,
ECORE_MCP_DRV_ATTR_CMD_CLEAR,
};
struct ecore_mcp_drv_attr {
enum ecore_mcp_drv_attr_cmd attr_cmd;
u32 attr_num;
/* R/RC - will be set with the read value
* W - should hold the required value to be written
* C - DC
*/
u32 val;
/* W - mask/offset to be applied on the given value
* R/RC/C - DC
*/
u32 mask;
u32 offset;
};
/**
* @brief Handle the drivers' attributes that are kept by the MFW.
*
* @param p_hwfn
* @param p_ptt
* @param p_drv_attr
*/
enum _ecore_status_t
ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_attr *p_drv_attr);
/**
* @brief Read ufp config from the shared memory.
*
* @param p_hwfn
* @param p_ptt
*/
void
ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief Get the engine affinity configuration.
*
* @param p_hwfn
* @param p_ptt
*/
enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Get the PPFID bitmap.
*
* @param p_hwfn
* @param p_ptt
*/
enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Acquire MCP lock to access to HW indirection table entries
*
* @param p_hwfn
* @param p_ptt
* @param retry_num
* @param retry_interval
*/
enum _ecore_status_t
ecore_mcp_ind_table_lock(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 retry_num,
u32 retry_interval);
/**
* @brief Release MCP lock of access to HW indirection table entries
*
* @param p_hwfn
* @param p_ptt
*/
enum _ecore_status_t
ecore_mcp_ind_table_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
#endif /* __ECORE_MCP_H__ */

View file

@ -74,17 +74,24 @@ struct ecore_mcp_link_params {
struct ecore_mcp_link_capabilities {
u32 speed_capabilities;
bool default_speed_autoneg; /* In Mb/s */
u32 default_speed; /* In Mb/s */
u32 default_speed; /* In Mb/s */ /* __LINUX__THROW__ */
enum ecore_mcp_eee_mode default_eee;
u32 eee_lpi_timer;
u8 eee_speed_caps;
};
struct ecore_mcp_link_state {
bool link_up;
u32 line_speed; /* In Mb/s */
u32 min_pf_rate; /* In Mb/s */
u32 speed; /* In Mb/s */
/* Actual link speed in Mb/s */
u32 line_speed;
/* PF max speed in MB/s, deduced from line_speed
* according to PF max bandwidth configuration.
*/
u32 speed;
bool full_duplex;
bool an;
@ -561,9 +568,11 @@ union ecore_mfw_tlv_data {
struct ecore_mfw_tlv_iscsi iscsi;
};
#ifndef __EXTRACT__LINUX__
enum ecore_hw_info_change {
ECORE_HW_INFO_CHANGE_OVLAN,
};
#endif
/**
* @brief - returns the link params of the hw function
@ -572,7 +581,8 @@ enum ecore_hw_info_change {
*
* @returns pointer to link params
*/
struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn*);
struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn
*p_hwfn);
/**
* @brief - return the link state of the hw function
@ -581,7 +591,8 @@ struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn*);
*
* @returns pointer to link state
*/
struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn*);
struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn
*p_hwfn);
/**
* @brief - return the link capabilities of the hw function
@ -638,14 +649,62 @@ enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
* @brief Get media type value of the port.
*
* @param p_dev - ecore dev pointer
* @param p_ptt
* @param mfw_ver - media type value
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
u32 *media_type);
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *media_type);
/**
* @brief Get transciever data of the port.
*
* @param p_dev - ecore dev pointer
* @param p_ptt
* @param p_transciever_type - media type value
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_tranceiver_type);
/**
* @brief Get transciever supported speed mask.
*
* @param p_dev - ecore dev pointer
* @param p_ptt
* @param p_speed_mask - Bit mask of all supported speeds.
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_speed_mask);
/**
* @brief Get board configuration.
*
* @param p_dev - ecore dev pointer
* @param p_ptt
* @param p_board_config - Board config.
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_board_config);
/**
* @brief - Sends a command to the MCP mailbox.
@ -653,9 +712,9 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param cmd - command to be sent to the MCP
* @param param - optional param
* @param o_mcp_resp - the MCP response code (exclude sequence)
* @param o_mcp_param - optional parameter provided by the MCP response
* @param param - Optional param
* @param o_mcp_resp - The MCP response code (exclude sequence)
* @param o_mcp_param - Optional parameter provided by the MCP response
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - operation was successful
@ -752,6 +811,17 @@ u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief A recovery handler must call this function as its first step.
* It is assumed that the handler is not run from an interrupt context.
*
* @param p_dev
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev);
/**
* @brief Notify MFW about the change in base device properties
*
@ -923,7 +993,7 @@ enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
* @param p_dev
* @param addr - nvm offset
* @param cmd - nvm command
* @param p_buf - nvm write buffer
* @param p_buf - nvm read buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@ -936,7 +1006,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
*
* @param p_dev
* @param addr - nvm offset
* @param p_buf - nvm write buffer
* @param p_buf - nvm read buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@ -1312,4 +1382,23 @@ ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum _ecore_status_t
ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 *wwn);
/**
* @brief - Return whether management firmware support smart AN
*
* @param p_hwfn
*
* @return bool - true if feature is supported.
*/
bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn);
/**
* @brief - Return whether management firmware support setting of
* PCI relaxed ordering.
*
* @param p_hwfn
*
* @return bool - true if feature is supported.
*/
bool ecore_mcp_rlx_odr_supported(struct ecore_hwfn *p_hwfn);
#endif

File diff suppressed because it is too large Load diff

View file

@ -79,11 +79,26 @@ struct ecore_ooo_info {
u16 cid_base;
};
#if defined(CONFIG_ECORE_ISCSI) || defined(CONFIG_ECORE_IWARP)
enum _ecore_status_t ecore_ooo_alloc(struct ecore_hwfn *p_hwfn);
void ecore_ooo_setup(struct ecore_hwfn *p_hwfn);
void ecore_ooo_free(struct ecore_hwfn *p_hwfn);
#else
static inline enum _ecore_status_t
ecore_ooo_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn)
{
return ECORE_INVAL;
}
static inline void
ecore_ooo_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
static inline void
ecore_ooo_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
#endif
void ecore_ooo_save_history_entry(struct ecore_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);

View file

@ -123,11 +123,12 @@ struct ecore_iscsi_pf_params {
u8 gl_cmd_pi;
u8 debug_mode;
u8 ll2_ooo_queue_id;
u8 ooo_enable;
u8 is_target;
u8 is_tmwo_en;
u8 is_soc_en;
u8 soc_num_of_blocks_log;
u8 bdq_pbl_num_entries[3];
u8 disable_stats_collection;
};
enum ecore_rdma_protocol {
@ -143,7 +144,8 @@ struct ecore_rdma_pf_params {
*/
u32 min_dpis; /* number of requested DPIs */
u32 num_qps; /* number of requested Queue Pairs */
u32 num_srqs; /* number of requested SRQ */
u32 num_srqs; /* number of requested SRQs */
u32 num_xrc_srqs; /* number of requested XRC SRQs */
u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
u8 gl_pi; /* protocol index */

View file

@ -0,0 +1,279 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __ECORE_RDMA_H__
#define __ECORE_RDMA_H__
#include "ecore_status.h"
#include "ecore.h"
#include "ecore_hsi_common.h"
#include "ecore_proto_if.h"
#include "ecore_rdma_api.h"
#include "ecore_dev_api.h"
#include "ecore_roce.h"
#include "ecore_iwarp.h"
/* Constants */
/* HW/FW RoCE Limitations (internal. For external see ecore_rdma_api.h) */
#define ECORE_RDMA_MAX_FMR (RDMA_MAX_TIDS) /* 2^17 - 1 */
#define ECORE_RDMA_MAX_P_KEY (1)
#define ECORE_RDMA_MAX_WQE (0x7FFF) /* 2^15 -1 */
#define ECORE_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) /* 2^15 -1 */
#define ECORE_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) /* TODO: > 4k?! */
#define ECORE_RDMA_ACK_DELAY (15) /* 131 milliseconds */
#define ECORE_RDMA_MAX_MR_SIZE (0x10000000000ULL) /* 2^40 */
#define ECORE_RDMA_MAX_CQS (RDMA_MAX_CQS) /* 64k */
#define ECORE_RDMA_MAX_MRS (RDMA_MAX_TIDS) /* 2^17 - 1 */
/* Add 1 for header element */
#define ECORE_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
#define ECORE_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
#define ECORE_RDMA_SRQ_WQE_ELEM_SIZE (16)
#define ECORE_RDMA_MAX_SRQS (32 * 1024) /* 32k */
/* Configurable */
/* Max CQE is derived from u16/32 size, halved and decremented by 1 to handle
* wrap properly and then decremented by 1 again. The latter decrement comes
* from a requirement to create a chain that is bigger than what the user
* requested by one:
* The CQE size is 32 bytes but the FW writes in chunks of 64
* bytes, for performance purposes. Allocating an extra entry and telling the
* FW we have less prevents overwriting the first entry in case of a wrap i.e.
* when the FW writes the last entry and the application hasn't read the first
* one.
*/
#define ECORE_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
#define ECORE_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
#define ECORE_RDMA_MAX_XRC_SRQS (RDMA_MAX_XRC_SRQS)
/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
* SRQs is much smaller so there's no need to have that many domains.
*/
#define ECORE_RDMA_MAX_XRCDS (OSAL_ROUNDUP_POW_OF_TWO(RDMA_MAX_XRC_SRQS))
#define IS_IWARP(_p_hwfn) (_p_hwfn->p_rdma_info->proto == PROTOCOLID_IWARP)
#define IS_ROCE(_p_hwfn) (_p_hwfn->p_rdma_info->proto == PROTOCOLID_ROCE)
enum ecore_rdma_toggle_bit {
ECORE_RDMA_TOGGLE_BIT_CLEAR = 0,
ECORE_RDMA_TOGGLE_BIT_SET = 1
};
/* @@@TBD Currently we support only affilited events
* enum ecore_rdma_unaffiliated_event_code {
* ECORE_RDMA_PORT_ACTIVE, // Link Up
* ECORE_RDMA_PORT_CHANGED, // SGID table has changed
* ECORE_RDMA_LOCAL_CATASTROPHIC_ERR, // Fatal device error
* ECORE_RDMA_PORT_ERR, // Link down
* };
*/
#define QEDR_MAX_BMAP_NAME (10)
struct ecore_bmap {
u32 max_count;
unsigned long *bitmap;
char name[QEDR_MAX_BMAP_NAME];
};
struct ecore_rdma_info {
osal_spinlock_t lock;
struct ecore_bmap cq_map;
struct ecore_bmap pd_map;
struct ecore_bmap xrcd_map;
struct ecore_bmap tid_map;
struct ecore_bmap srq_map;
struct ecore_bmap xrc_srq_map;
struct ecore_bmap qp_map;
struct ecore_bmap tcp_cid_map;
struct ecore_bmap cid_map;
struct ecore_bmap dpi_map;
struct ecore_bmap toggle_bits;
struct ecore_rdma_events events;
struct ecore_rdma_device *dev;
struct ecore_rdma_port *port;
u32 last_tid;
u8 num_cnqs;
struct rdma_sent_stats rdma_sent_pstats;
struct rdma_rcv_stats rdma_rcv_tstats;
u32 num_qps;
u32 num_mrs;
u32 num_srqs;
u16 srq_id_offset;
u16 queue_zone_base;
u16 max_queue_zones;
struct ecore_rdma_glob_cfg glob_cfg;
enum protocol_type proto;
struct ecore_roce_info roce;
#ifdef CONFIG_ECORE_IWARP
struct ecore_iwarp_info iwarp;
#endif
bool active;
int ref_cnt;
};
struct cq_prod {
u32 req;
u32 resp;
};
struct ecore_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
u32 qpid; /* iwarp: may differ from icid */
u16 icid;
u16 qp_idx;
enum ecore_roce_qp_state cur_state;
enum ecore_rdma_qp_type qp_type;
#ifdef CONFIG_ECORE_IWARP
enum ecore_iwarp_qp_state iwarp_state;
#endif
bool use_srq;
bool signal_all;
bool fmr_and_reserved_lkey;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
u16 pd; /* Protection domain */
u16 pkey; /* Primary P_key index */
u32 dest_qp;
u16 mtu;
u16 srq_id;
u8 traffic_class_tos; /* IPv6/GRH traffic class; IPv4 TOS */
u8 hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
u16 dpi;
u32 flow_label; /* ignored in IPv4 */
u16 vlan_id;
u32 ack_timeout;
u8 retry_cnt;
u8 rnr_retry_cnt;
u8 min_rnr_nak_timer;
bool sqd_async;
union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
enum roce_mode roce_mode;
u16 udp_src_port; /* RoCEv2 only */
u8 stats_queue;
/* requeseter */
u8 max_rd_atomic_req;
u32 sq_psn;
u16 sq_cq_id; /* The cq to be associated with the send queue*/
u16 sq_num_pages;
dma_addr_t sq_pbl_ptr;
void *orq;
dma_addr_t orq_phys_addr;
u8 orq_num_pages;
bool req_offloaded;
bool has_req;
/* responder */
u8 max_rd_atomic_resp;
u32 rq_psn;
u16 rq_cq_id; /* The cq to be associated with the receive queue */
u16 rq_num_pages;
dma_addr_t rq_pbl_ptr;
void *irq;
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
bool has_resp;
struct cq_prod cq_prod;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
void *shared_queue;
dma_addr_t shared_queue_phys_addr;
#ifdef CONFIG_ECORE_IWARP
struct ecore_iwarp_ep *ep;
#endif
u16 xrcd_id;
};
static OSAL_INLINE bool ecore_rdma_is_xrc_qp(struct ecore_rdma_qp *qp)
{
if ((qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_TGT) ||
(qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_INI))
return 1;
return 0;
}
enum _ecore_status_t ecore_rdma_info_alloc(struct ecore_hwfn *p_hwfn);
void ecore_rdma_info_free(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t
ecore_rdma_bmap_alloc(struct ecore_hwfn *p_hwfn,
struct ecore_bmap *bmap,
u32 max_count,
char *name);
void
ecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
struct ecore_bmap *bmap,
bool check);
enum _ecore_status_t
ecore_rdma_bmap_alloc_id(struct ecore_hwfn *p_hwfn,
struct ecore_bmap *bmap,
u32 *id_num);
void
ecore_bmap_set_id(struct ecore_hwfn *p_hwfn,
struct ecore_bmap *bmap,
u32 id_num);
void
ecore_bmap_release_id(struct ecore_hwfn *p_hwfn,
struct ecore_bmap *bmap,
u32 id_num);
int
ecore_bmap_test_id(struct ecore_hwfn *p_hwfn,
struct ecore_bmap *bmap,
u32 id_num);
void
ecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac);
bool
ecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn);
u16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc);
#endif /*__ECORE_RDMA_H__*/

View file

@ -0,0 +1,970 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __ECORE_RDMA_API_H__
#define __ECORE_RDMA_API_H__
#ifndef LINUX_REMOVE
#ifndef ETH_ALEN
#define ETH_ALEN 6
#endif
#endif
#ifndef __EXTRACT__LINUX__
enum ecore_roce_ll2_tx_dest
{
ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
ECORE_ROCE_LL2_TX_DEST_MAX
};
/* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
/* CNQ size Limitation
* The CNQ size should be set as twice the amount of CQs, since for each CQ one
* element may be inserted into the CNQ and another element is used per CQ to
* accommodate for a possible race in the arm mechanism.
* The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
* that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
* Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
* of performance.
*/
#define ECORE_RDMA_MAX_CNQ_SIZE (0xFFFF) /* 2^16 - 1 */
/* rdma interface */
enum ecore_roce_qp_state {
ECORE_ROCE_QP_STATE_RESET, /* Reset */
ECORE_ROCE_QP_STATE_INIT, /* Initialized */
ECORE_ROCE_QP_STATE_RTR, /* Ready to Receive */
ECORE_ROCE_QP_STATE_RTS, /* Ready to Send */
ECORE_ROCE_QP_STATE_SQD, /* Send Queue Draining */
ECORE_ROCE_QP_STATE_ERR, /* Error */
ECORE_ROCE_QP_STATE_SQE /* Send Queue Error */
};
enum ecore_rdma_qp_type {
ECORE_RDMA_QP_TYPE_RC,
ECORE_RDMA_QP_TYPE_XRC_INI,
ECORE_RDMA_QP_TYPE_XRC_TGT,
ECORE_RDMA_QP_TYPE_INVAL = 0xffff,
};
enum ecore_rdma_tid_type
{
ECORE_RDMA_TID_REGISTERED_MR,
ECORE_RDMA_TID_FMR,
ECORE_RDMA_TID_MW_TYPE1,
ECORE_RDMA_TID_MW_TYPE2A
};
typedef
void (*affiliated_event_t)(void *context,
u8 fw_event_code,
void *fw_handle);
typedef
void (*unaffiliated_event_t)(void *context,
u8 event_code);
struct ecore_rdma_events {
void *context;
affiliated_event_t affiliated_event;
unaffiliated_event_t unaffiliated_event;
};
struct ecore_rdma_device {
/* Vendor specific information */
u32 vendor_id;
u32 vendor_part_id;
u32 hw_ver;
u64 fw_ver;
u64 node_guid; /* node GUID */
u64 sys_image_guid; /* System image GUID */
u8 max_cnq;
u8 max_sge; /* The maximum number of scatter/gather entries
* per Work Request supported
*/
u8 max_srq_sge; /* The maximum number of scatter/gather entries
* per Work Request supported for SRQ
*/
u16 max_inline;
u32 max_wqe; /* The maximum number of outstanding work
* requests on any Work Queue supported
*/
u32 max_srq_wqe; /* The maximum number of outstanding work
* requests on any Work Queue supported for SRQ
*/
u8 max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
* & atomic operation that can be
* outstanding per QP
*/
u8 max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
* initiation of RDMA Read
* & atomic operations
*/
u64 max_dev_resp_rd_atomic_resc;
u32 max_cq;
u32 max_qp;
u32 max_srq; /* Maximum number of SRQs */
u32 max_mr; /* Maximum number of MRs supported by this device */
u64 max_mr_size; /* Size (in bytes) of the largest contiguous memory
* block that can be registered by this device
*/
u32 max_cqe;
u32 max_mw; /* The maximum number of memory windows supported */
u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd; /* The maximum number of protection domains supported */
u32 max_ah;
u8 max_pkey;
u16 max_srq_wr; /* Maximum number of WRs per SRQ */
u8 max_stats_queues; /* Maximum number of statistics queues */
u32 dev_caps;
/* Abilty to support RNR-NAK generation */
#define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
#define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
/* Abilty to support shutdown port */
#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
/* Abilty to support port active event */
#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
/* Abilty to support port change event */
#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
/* Abilty to support system image GUID */
#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
/* Abilty to support bad P_Key counter support */
#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
/* Abilty to support atomic operations */
#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
/* Abilty to support modifying the maximum number of
* outstanding work requests per QP
*/
#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
/* Abilty to support automatic path migration */
#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
/* Abilty to support the base memory management extensions */
#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
/* Abilty to support multipile page sizes per memory region */
#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
/* Abilty to support block list physical buffer list */
#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
/* Abilty to support zero based virtual addresses */
#define ECORE_RDMA_DEV_CAP_ZBVA_MASK 0x1
#define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT 14
/* Abilty to support local invalidate fencing */
#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
/* Abilty to support Loopback on QP */
#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
u64 page_size_caps;
u8 dev_ack_delay;
u32 reserved_lkey; /* Value of reserved L_key */
u32 bad_pkey_counter; /* Bad P_key counter support indicator */
struct ecore_rdma_events events;
};
enum ecore_port_state {
ECORE_RDMA_PORT_UP,
ECORE_RDMA_PORT_DOWN,
};
enum ecore_roce_capability {
ECORE_ROCE_V1 = 1 << 0,
ECORE_ROCE_V2 = 1 << 1,
};
struct ecore_rdma_port {
enum ecore_port_state port_state;
int link_speed;
u64 max_msg_size;
u8 source_gid_table_len;
void *source_gid_table_ptr;
u8 pkey_table_len;
void *pkey_table_ptr;
u32 pkey_bad_counter;
enum ecore_roce_capability capability;
};
struct ecore_rdma_cnq_params
{
u8 num_pbl_pages; /* Number of pages in the PBL allocated
* for this queue
*/
u64 pbl_ptr; /* Address to the first entry of the queue PBL */
};
/* The CQ Mode affects the CQ doorbell transaction size.
* 64/32 bit machines should configure to 32/16 bits respectively.
*/
enum ecore_rdma_cq_mode {
ECORE_RDMA_CQ_MODE_16_BITS,
ECORE_RDMA_CQ_MODE_32_BITS,
};
struct ecore_roce_dcqcn_params {
u8 notification_point;
u8 reaction_point;
/* fields for notification point */
u32 cnp_send_timeout;
u8 cnp_dscp;
u8 cnp_vlan_priority;
/* fields for reaction point */
u32 rl_bc_rate; /* Byte Counter Limit. */
u32 rl_max_rate; /* Maximum rate in Mbps resolution */
u32 rl_r_ai; /* Active increase rate */
u32 rl_r_hai; /* Hyper active increase rate */
u32 dcqcn_gd; /* Alpha denominator */
u32 dcqcn_k_us; /* Alpha update interval */
u32 dcqcn_timeout_us;
};
struct ecore_rdma_glob_cfg {
/* global tunables affecting all QPs created after they are
* set.
*/
u8 vlan_pri_en;
u8 vlan_pri;
u8 ecn_en;
u8 ecn;
u8 dscp_en;
u8 dscp;
};
#ifndef LINUX_REMOVE
#define ECORE_RDMA_DCSP_BIT_MASK 0x01
#define ECORE_RDMA_DCSP_EN_BIT_MASK 0x02
#define ECORE_RDMA_ECN_BIT_MASK 0x04
#define ECORE_RDMA_ECN_EN_BIT_MASK 0x08
#define ECORE_RDMA_VLAN_PRIO_BIT_MASK 0x10
#define ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK 0x20
enum _ecore_status_t
ecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_glob_cfg *in_params,
u32 glob_cfg_bits);
enum _ecore_status_t
ecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_glob_cfg *out_params);
#endif /* LINUX_REMOVE */
#ifdef CONFIG_ECORE_IWARP
#define ECORE_IWARP_MAX_LIS_BACKLOG (256)
#define ECORE_MPA_RTR_TYPE_NONE 0 /* No RTR type */
#define ECORE_MPA_RTR_TYPE_ZERO_SEND (1 << 0)
#define ECORE_MPA_RTR_TYPE_ZERO_WRITE (1 << 1)
#define ECORE_MPA_RTR_TYPE_ZERO_READ (1 << 2)
enum ecore_mpa_rev {
ECORE_MPA_REV1,
ECORE_MPA_REV2,
};
struct ecore_iwarp_params {
u32 rcv_wnd_size;
u16 ooo_num_rx_bufs;
#define ECORE_IWARP_TS_EN (1 << 0)
#define ECORE_IWARP_DA_EN (1 << 1)
u8 flags;
u8 crc_needed;
enum ecore_mpa_rev mpa_rev;
u8 mpa_rtr;
u8 mpa_peer2peer;
};
#endif
struct ecore_roce_params {
enum ecore_rdma_cq_mode cq_mode;
struct ecore_roce_dcqcn_params dcqcn_params;
u8 ll2_handle; /* required for UD QPs */
};
struct ecore_rdma_start_in_params {
struct ecore_rdma_events *events;
struct ecore_rdma_cnq_params cnq_pbl_list[128];
u8 desired_cnq;
u16 max_mtu;
u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_ECORE_IWARP
struct ecore_iwarp_params iwarp;
#endif
struct ecore_roce_params roce;
};
struct ecore_rdma_add_user_out_params {
/* output variables (given to miniport) */
u16 dpi;
u64 dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
u16 wid_count;
};
enum roce_mode
{
ROCE_V1,
ROCE_V2_IPV4,
ROCE_V2_IPV6,
MAX_ROCE_MODE
};
/* ECORE GID can be used as IPv4/6 address in RoCE v2 */
union ecore_gid {
u8 bytes[16];
u16 words[8];
u32 dwords[4];
u64 qwords[2];
u32 ipv4_addr;
};
struct ecore_rdma_register_tid_in_params {
/* input variables (given by miniport) */
u32 itid; /* index only, 18 bit long, lkey = itid << 8 | key */
enum ecore_rdma_tid_type tid_type;
u8 key;
u16 pd;
bool local_read;
bool local_write;
bool remote_read;
bool remote_write;
bool remote_atomic;
bool mw_bind;
u64 pbl_ptr;
bool pbl_two_level;
u8 pbl_page_size_log; /* for the pages that contain the pointers
* to the MR pages
*/
u8 page_size_log; /* for the MR pages */
u32 fbo;
u64 length; /* only lower 40 bits are valid */
u64 vaddr;
bool zbva;
bool phy_mr;
bool dma_mr;
/* DIF related fields */
bool dif_enabled;
u64 dif_error_addr;
u64 dif_runt_addr;
};
/*Returns the CQ CID or zero in case of failure */
struct ecore_rdma_create_cq_in_params {
/* input variables (given by miniport) */
u32 cq_handle_lo; /* CQ handle to be written in CNQ */
u32 cq_handle_hi;
u32 cq_size;
u16 dpi;
bool pbl_two_level;
u64 pbl_ptr;
u16 pbl_num_pages;
u8 pbl_page_size_log; /* for the pages that contain the
* pointers to the CQ pages
*/
u8 cnq_id;
u16 int_timeout;
};
struct ecore_rdma_create_srq_in_params {
u64 pbl_base_addr;
u64 prod_pair_addr;
u16 num_pages;
u16 pd_id;
u16 page_size;
/* XRC related only */
bool is_xrc;
u16 xrcd_id;
u32 cq_cid;
bool reserved_key_en;
};
struct ecore_rdma_destroy_cq_in_params {
/* input variables (given by miniport) */
u16 icid;
};
struct ecore_rdma_destroy_cq_out_params {
/* output variables, provided to the upper layer */
/* Sequence number of completion notification sent for the CQ on
* the associated CNQ
*/
u16 num_cq_notif;
};
#endif
struct ecore_rdma_resize_cq_in_params {
/* input variables (given by miniport) */
u16 icid;
u32 cq_size;
bool pbl_two_level;
u64 pbl_ptr;
u16 pbl_num_pages;
u8 pbl_page_size_log; /* for the pages that contain the
* pointers to the CQ pages
*/
};
#ifndef __EXTRACT__LINUX__
struct ecore_rdma_create_qp_in_params {
/* input variables (given by miniport) */
u32 qp_handle_lo; /* QP handle to be written in CQE */
u32 qp_handle_hi;
u32 qp_handle_async_lo; /* QP handle to be written in async event */
u32 qp_handle_async_hi;
bool use_srq;
bool signal_all;
bool fmr_and_reserved_lkey;
u16 pd;
u16 dpi;
u16 sq_cq_id;
u16 sq_num_pages;
u64 sq_pbl_ptr; /* Not relevant for iWARP */
u8 max_sq_sges;
u16 rq_cq_id;
u16 rq_num_pages;
u64 rq_pbl_ptr; /* Not relevant for iWARP */
u16 srq_id;
u8 stats_queue;
enum ecore_rdma_qp_type qp_type;
u16 xrcd_id;
};
struct ecore_rdma_create_qp_out_params {
/* output variables (given to miniport) */
u32 qp_id;
u16 icid;
void *rq_pbl_virt;
dma_addr_t rq_pbl_phys;
void *sq_pbl_virt;
dma_addr_t sq_pbl_phys;
};
struct ecore_rdma_modify_qp_in_params {
/* input variables (given by miniport) */
u32 modify_flags;
#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
enum ecore_roce_qp_state new_state;
u16 pkey;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
u32 dest_qp;
u16 mtu;
u8 traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
u8 hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
u32 flow_label; /* ignored in IPv4 */
union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
u16 udp_src_port; /* RoCEv2 only */
u16 vlan_id;
u32 rq_psn;
u32 sq_psn;
u8 max_rd_atomic_resp;
u8 max_rd_atomic_req;
u32 ack_timeout;
u8 retry_cnt;
u8 rnr_retry_cnt;
u8 min_rnr_nak_timer;
bool sqd_async;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
bool use_local_mac;
enum roce_mode roce_mode;
};
struct ecore_rdma_query_qp_out_params {
/* output variables (given to miniport) */
enum ecore_roce_qp_state state;
u32 rq_psn; /* responder */
u32 sq_psn; /* requester */
bool draining; /* send queue is draining */
u16 mtu;
u32 dest_qp;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
u32 flow_label; /* ignored in IPv4 */
u8 hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
u8 traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
u32 timeout;
u8 rnr_retry;
u8 retry_cnt;
u8 min_rnr_nak_timer;
u16 pkey_index;
u8 max_rd_atomic;
u8 max_dest_rd_atomic;
bool sqd_async;
};
struct ecore_rdma_destroy_qp_out_params {
u32 sq_cq_prod;
u32 rq_cq_prod;
};
struct ecore_rdma_create_srq_out_params {
u16 srq_id;
};
struct ecore_rdma_destroy_srq_in_params {
u16 srq_id;
bool is_xrc;
};
struct ecore_rdma_modify_srq_in_params {
u32 wqe_limit;
u16 srq_id;
bool is_xrc;
};
#endif
struct ecore_rdma_resize_cq_out_params {
/* output variables, provided to the upper layer */
u32 prod; /* CQ producer value on old PBL */
u32 cons; /* CQ consumer value on old PBL */
};
struct ecore_rdma_resize_cnq_in_params {
/* input variables (given by miniport) */
u32 cnq_id;
u32 pbl_page_size_log; /* for the pages that contain the
* pointers to the cnq pages
*/
u64 pbl_ptr;
};
#ifndef __EXTRACT__LINUX__
struct ecore_rdma_stats_out_params {
u64 sent_bytes;
u64 sent_pkts;
u64 rcv_bytes;
u64 rcv_pkts;
/* RoCE only */
u64 icrc_errors; /* wraps at 32 bits */
u64 retransmit_events; /* wraps at 32 bits */
u64 silent_drops; /* wraps at 16 bits */
u64 rnr_nacks_sent; /* wraps at 16 bits */
/* RoCE DCQCN */
u64 ecn_pkt_rcv;
u64 cnp_pkt_rcv;
u64 cnp_pkt_sent;
/* iWARP only */
u64 iwarp_tx_fast_rxmit_cnt;
u64 iwarp_tx_slow_start_cnt;
u64 unalign_rx_comp;
};
struct ecore_rdma_counters_out_params {
u64 pd_count;
u64 max_pd;
u64 dpi_count;
u64 max_dpi;
u64 cq_count;
u64 max_cq;
u64 qp_count;
u64 max_qp;
u64 tid_count;
u64 max_tid;
u64 srq_count;
u64 max_srq;
u64 xrc_srq_count;
u64 max_xrc_srq;
u64 xrcd_count;
u64 max_xrcd;
};
#endif
enum _ecore_status_t
ecore_rdma_add_user(void *rdma_cxt,
struct ecore_rdma_add_user_out_params *out_params);
enum _ecore_status_t
ecore_rdma_alloc_pd(void *rdma_cxt,
u16 *pd);
enum _ecore_status_t
ecore_rdma_alloc_tid(void *rdma_cxt,
u32 *tid);
enum _ecore_status_t
ecore_rdma_create_cq(void *rdma_cxt,
struct ecore_rdma_create_cq_in_params *params,
u16 *icid);
/* Returns a pointer to the responders' CID, which is also a pointer to the
* ecore_qp_params struct. Returns NULL in case of failure.
*/
struct ecore_rdma_qp*
ecore_rdma_create_qp(void *rdma_cxt,
struct ecore_rdma_create_qp_in_params *in_params,
struct ecore_rdma_create_qp_out_params *out_params);
enum _ecore_status_t
ecore_roce_create_ud_qp(void *rdma_cxt,
struct ecore_rdma_create_qp_out_params *out_params);
enum _ecore_status_t
ecore_rdma_deregister_tid(void *rdma_cxt,
u32 tid);
enum _ecore_status_t
ecore_rdma_destroy_cq(void *rdma_cxt,
struct ecore_rdma_destroy_cq_in_params *in_params,
struct ecore_rdma_destroy_cq_out_params *out_params);
enum _ecore_status_t
ecore_rdma_destroy_qp(void *rdma_cxt,
struct ecore_rdma_qp *qp,
struct ecore_rdma_destroy_qp_out_params *out_params);
enum _ecore_status_t
ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
void
ecore_rdma_free_pd(void *rdma_cxt,
u16 pd);
enum _ecore_status_t
ecore_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id);
void
ecore_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id);
void
ecore_rdma_free_tid(void *rdma_cxt,
u32 tid);
enum _ecore_status_t
ecore_rdma_modify_qp(void *rdma_cxt,
struct ecore_rdma_qp *qp,
struct ecore_rdma_modify_qp_in_params *params);
struct ecore_rdma_device*
ecore_rdma_query_device(void *rdma_cxt);
struct ecore_rdma_port*
ecore_rdma_query_port(void *rdma_cxt);
enum _ecore_status_t
ecore_rdma_query_qp(void *rdma_cxt,
struct ecore_rdma_qp *qp,
struct ecore_rdma_query_qp_out_params *out_params);
enum _ecore_status_t
ecore_rdma_register_tid(void *rdma_cxt,
struct ecore_rdma_register_tid_in_params *params);
void ecore_rdma_remove_user(void *rdma_cxt,
u16 dpi);
enum _ecore_status_t
ecore_rdma_resize_cnq(void *rdma_cxt,
struct ecore_rdma_resize_cnq_in_params *in_params);
/*Returns the CQ CID or zero in case of failure */
enum _ecore_status_t
ecore_rdma_resize_cq(void *rdma_cxt,
struct ecore_rdma_resize_cq_in_params *in_params,
struct ecore_rdma_resize_cq_out_params *out_params);
/* Before calling rdma_start upper layer (VBD/qed) should fill the
* page-size and mtu in hwfn context
*/
enum _ecore_status_t
ecore_rdma_start(void *p_hwfn,
struct ecore_rdma_start_in_params *params);
enum _ecore_status_t
ecore_rdma_stop(void *rdma_cxt);
enum _ecore_status_t
ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
struct ecore_rdma_stats_out_params *out_parms);
enum _ecore_status_t
ecore_rdma_query_counters(void *rdma_cxt,
struct ecore_rdma_counters_out_params *out_parms);
u32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id);
#ifndef LINUX_REMOVE
u32 ecore_rdma_query_cau_timer_res(void);
#endif
void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t
ecore_rdma_create_srq(void *rdma_cxt,
struct ecore_rdma_create_srq_in_params *in_params,
struct ecore_rdma_create_srq_out_params *out_params);
enum _ecore_status_t
ecore_rdma_destroy_srq(void *rdma_cxt,
struct ecore_rdma_destroy_srq_in_params *in_params);
enum _ecore_status_t
ecore_rdma_modify_srq(void *rdma_cxt,
struct ecore_rdma_modify_srq_in_params *in_params);
#ifdef CONFIG_ECORE_IWARP
/* iWARP API */
#ifndef __EXTRACT__LINUX__
enum ecore_iwarp_event_type {
ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
* ( ack on mpa response )
*/
ECORE_IWARP_EVENT_LISTEN_PAUSE_COMP, /* Passive side will drop
* MPA requests
*/
ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
ECORE_IWARP_EVENT_DISCONNECT,
ECORE_IWARP_EVENT_CLOSE,
/* Slow/Error path events start from here */
ECORE_IWARP_EVENT_IRQ_FULL,
ECORE_IWARP_ERROR_EVENTS_START = ECORE_IWARP_EVENT_IRQ_FULL,
ECORE_IWARP_EVENT_RQ_EMPTY,
ECORE_IWARP_EVENT_LLP_TIMEOUT,
ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
ECORE_IWARP_EVENT_CQ_OVERFLOW,
ECORE_IWARP_EVENT_QP_CATASTROPHIC,
ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
ECORE_IWARP_EVENT_TERMINATE_RECEIVED
};
enum ecore_tcp_ip_version
{
ECORE_TCP_IPV4,
ECORE_TCP_IPV6,
};
struct ecore_iwarp_cm_info {
enum ecore_tcp_ip_version ip_version;
u32 remote_ip[4];
u32 local_ip[4];
u16 remote_port;
u16 local_port;
u16 vlan;
const void *private_data;
u16 private_data_len;
u8 ord;
u8 ird;
};
struct ecore_iwarp_cm_event_params {
enum ecore_iwarp_event_type event;
const struct ecore_iwarp_cm_info *cm_info;
void *ep_context; /* To be passed to accept call */
int status;
};
typedef int (*iwarp_event_handler)(void *context,
struct ecore_iwarp_cm_event_params *event);
/* Active Side Connect Flow:
* upper layer driver calls ecore_iwarp_connect
* Function is blocking: i.e. returns after tcp connection is established
* After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
* will be passed to upperlayer driver using the event_cb passed in
* ecore_iwarp_connect_in. Information of the established connection will be
* initialized in event data.
*/
struct ecore_iwarp_connect_in {
iwarp_event_handler event_cb;
void *cb_context;
struct ecore_rdma_qp *qp;
struct ecore_iwarp_cm_info cm_info;
u16 mss;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
};
struct ecore_iwarp_connect_out {
void *ep_context;
};
/* Passive side connect flow:
* upper layer driver calls ecore_iwarp_create_listen
* once Syn packet that matches a ip/port that is listened on arrives, ecore
* will offload the tcp connection. After MPA Request is received on the
* offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
* to upper layer driver using the event_cb passed below. The event data
* will be placed in event parameter. After upper layer driver processes the
* event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
* MPA negotiation. Once negotiation is complete the event
* ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
* originally in ecore_iwarp_listen_in structure.
*/
struct ecore_iwarp_listen_in {
iwarp_event_handler event_cb; /* Callback func for delivering events */
void *cb_context; /* passed to event_cb */
u32 max_backlog; /* Max num of pending incoming connection requests */
enum ecore_tcp_ip_version ip_version;
u32 ip_addr[4];
u16 port;
u16 vlan;
};
struct ecore_iwarp_listen_out {
void *handle; /* to be sent to destroy */
};
struct ecore_iwarp_accept_in {
void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
void *cb_context; /* context to be passed to event_cb */
struct ecore_rdma_qp *qp;
const void *private_data;
u16 private_data_len;
u8 ord;
u8 ird;
};
struct ecore_iwarp_reject_in {
void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
void *cb_context; /* context to be passed to event_cb */
const void *private_data;
u16 private_data_len;
};
struct ecore_iwarp_send_rtr_in {
void *ep_context;
};
struct ecore_iwarp_tcp_abort_in {
void *ep_context;
};
#endif
enum _ecore_status_t
ecore_iwarp_connect(void *rdma_cxt,
struct ecore_iwarp_connect_in *iparams,
struct ecore_iwarp_connect_out *oparams);
enum _ecore_status_t
ecore_iwarp_create_listen(void *rdma_cxt,
struct ecore_iwarp_listen_in *iparams,
struct ecore_iwarp_listen_out *oparams);
enum _ecore_status_t
ecore_iwarp_accept(void *rdma_cxt,
struct ecore_iwarp_accept_in *iparams);
enum _ecore_status_t
ecore_iwarp_reject(void *rdma_cxt,
struct ecore_iwarp_reject_in *iparams);
enum _ecore_status_t
ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
enum _ecore_status_t
ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
enum _ecore_status_t
ecore_iwarp_pause_listen(void *rdma_cxt, void *handle, bool pause, bool comp);
#endif /* CONFIG_ECORE_IWARP */
#endif

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -28,362 +28,68 @@
*
*/
#ifndef __ECORE_RDMA_H__
#define __ECORE_RDMA_H__
#ifndef __ECORE_ROCE_H__
#define __ECORE_ROCE_H__
#include "ecore_status.h"
#include "ecore.h"
#include "ecore_hsi_common.h"
#include "ecore_proto_if.h"
#include "ecore_roce_api.h"
#include "ecore_dev_api.h"
/* Constants */
/* HW/FW RoCE Limitations (internal. For external see ecore_rdma_api.h) */
#define ECORE_RDMA_MAX_FMR (RDMA_MAX_TIDS) /* 2^17 - 1 */
#define ECORE_RDMA_MAX_P_KEY (1)
#define ECORE_RDMA_MAX_WQE (0x7FFF) /* 2^15 -1 */
#define ECORE_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) /* 2^15 -1 */
#define ECORE_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) /* TODO: > 4k?! */
#define ECORE_RDMA_ACK_DELAY (15) /* 131 milliseconds */
#define ECORE_RDMA_MAX_MR_SIZE (0x10000000000ULL) /* 2^40 */
#define ECORE_RDMA_MAX_CQS (RDMA_MAX_CQS) /* 64k */
#define ECORE_RDMA_MAX_MRS (RDMA_MAX_TIDS) /* 2^17 - 1 */
/* Add 1 for header element */
#define ECORE_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
#define ECORE_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
#define ECORE_RDMA_SRQ_WQE_ELEM_SIZE (16)
#define ECORE_RDMA_MAX_SRQS (32 * 1024) /* 32k */
/* Configurable */
/* Max CQE is derived from u16/32 size, halved and decremented by 1 to handle
* wrap properly and then decremented by 1 again. The latter decrement comes
* from a requirement to create a chain that is bigger than what the user
* requested by one:
* The CQE size is 32 bytes but the FW writes in chunks of 64
* bytes, for performance purposes. Allocating an extra entry and telling the
* FW we have less prevents overwriting the first entry in case of a wrap i.e.
* when the FW writes the last entry and the application hasn't read the first
* one.
*/
#define ECORE_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
#define ECORE_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
enum ecore_rdma_toggle_bit {
ECORE_RDMA_TOGGLE_BIT_CLEAR = 0,
ECORE_RDMA_TOGGLE_BIT_SET = 1
};
/* @@@TBD Currently we support only affilited events
* enum ecore_rdma_unaffiliated_event_code {
* ECORE_RDMA_PORT_ACTIVE, // Link Up
* ECORE_RDMA_PORT_CHANGED, // SGID table has changed
* ECORE_RDMA_LOCAL_CATASTROPHIC_ERR, // Fatal device error
* ECORE_RDMA_PORT_ERR, // Link down
* };
*/
#define QEDR_MAX_BMAP_NAME (10)
struct ecore_bmap {
u32 max_count;
unsigned long *bitmap;
char name[QEDR_MAX_BMAP_NAME];
};
#define ECORE_ROCE_QP_TO_ICID(qp_idx) ((qp_idx)*2)
#define ECORE_ROCE_ICID_TO_QP(icid) ((icid)/2)
/* functions for enabling/disabling edpm in rdma PFs according to existence of
* qps during DCBx update or bar size
*/
void ecore_roce_dpm_dcbx(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
void ecore_rdma_dpm_bar(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
void
ecore_roce_dpm_dcbx(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
#ifdef CONFIG_ECORE_IWARP
void
ecore_rdma_dpm_bar(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
#define ECORE_IWARP_PREALLOC_CNT (256)
enum _ecore_status_t
ecore_roce_dcqcn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_roce_dcqcn_params *params,
struct roce_init_func_ramrod_data *p_ramrod,
struct ecore_ptt *p_ptt);
#define ECORE_IWARP_LL2_SYN_TX_SIZE (128)
#define ECORE_IWARP_LL2_SYN_RX_SIZE (256)
enum _ecore_status_t
ecore_roce_setup(struct ecore_hwfn *p_hwfn);
#define ECORE_IWARP_LL2_OOO_DEF_TX_SIZE (256)
#define ECORE_IWARP_LL2_OOO_DEF_RX_SIZE (4096)
#define ECORE_IWARP_LL2_OOO_MAX_RX_SIZE (16384)
enum _ecore_status_t
ecore_roce_stop_rl(struct ecore_hwfn *p_hwfn);
#define ECORE_IWARP_MAX_SYN_PKT_SIZE (128)
#define ECORE_IWARP_HANDLE_INVAL (0xff)
enum _ecore_status_t
ecore_roce_stop(struct ecore_hwfn *p_hwfn);
struct ecore_iwarp_ll2_buff {
struct ecore_iwarp_ll2_buff *piggy_buf;
void *data;
dma_addr_t data_phys_addr;
u32 buff_size;
};
enum _ecore_status_t
ecore_roce_query_qp(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_qp *qp,
struct ecore_rdma_query_qp_out_params *out_params);
struct ecore_iwarp_ll2_mpa_buf {
osal_list_entry_t list_entry;
struct ecore_iwarp_ll2_buff *ll2_buf;
struct unaligned_opaque_data data;
u16 tcp_payload_len;
u8 placement_offset;
};
enum _ecore_status_t
ecore_roce_destroy_qp(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_qp *qp,
struct ecore_rdma_destroy_qp_out_params *out_params);
/* In some cases a fpdu will arrive with only one byte of the header, in this
* case the fpdu_length will be partial ( contain only higher byte and
* incomplete bytes will contain the invalid value */
#define ECORE_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
struct ecore_iwarp_fpdu {
struct ecore_iwarp_ll2_buff *mpa_buf;
dma_addr_t pkt_hdr;
u8 pkt_hdr_size;
dma_addr_t mpa_frag;
void *mpa_frag_virt;
u16 mpa_frag_len;
u16 fpdu_length;
u16 incomplete_bytes;
};
struct ecore_iwarp_info {
osal_list_t listen_list; /* ecore_iwarp_listener */
osal_list_t ep_list; /* ecore_iwarp_ep */
osal_list_t ep_free_list;/* pre-allocated ep's */
osal_list_t mpa_buf_list;/* list of mpa_bufs */
osal_list_t mpa_buf_pending_list;
osal_spinlock_t iw_lock;
osal_spinlock_t qp_lock; /* for teardown races */
struct iwarp_rxmit_stats_drv stats;
u32 rcv_wnd_scale;
u16 max_mtu;
u16 num_ooo_rx_bufs;
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
u8 tcp_flags;
u8 ll2_syn_handle;
u8 ll2_ooo_handle;
u8 ll2_mpa_handle;
u8 peer2peer;
u8 _pad;
enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type;
struct ecore_iwarp_fpdu *partial_fpdus;
struct ecore_iwarp_ll2_mpa_buf *mpa_bufs;
u8 *mpa_intermediate_buf;
u16 max_num_partial_fpdus;
/* MPA statistics */
u64 unalign_rx_comp;
};
#endif
enum _ecore_status_t
ecore_roce_alloc_qp_idx(struct ecore_hwfn *p_hwfn,
u16 *qp_idx16);
#define IS_ECORE_DCQCN(p_hwfn) \
(!!(p_hwfn->pf_params.rdma_pf_params.enable_dcqcn))
struct ecore_roce_info {
struct roce_events_stats event_stats;
struct roce_dcqcn_received_stats dcqcn_rx_stats;
struct roce_dcqcn_sent_stats dcqcn_tx_stats;
u8 dcqcn_enabled;
u8 dcqcn_reaction_point;
};
struct ecore_rdma_info {
osal_spinlock_t lock;
enum _ecore_status_t
ecore_roce_modify_qp(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_qp *qp,
enum ecore_roce_qp_state prev_state,
struct ecore_rdma_modify_qp_in_params *params);
struct ecore_bmap cq_map;
struct ecore_bmap pd_map;
struct ecore_bmap tid_map;
struct ecore_bmap srq_map;
struct ecore_bmap cid_map;
struct ecore_bmap tcp_cid_map;
struct ecore_bmap real_cid_map;
struct ecore_bmap dpi_map;
struct ecore_bmap toggle_bits;
struct ecore_rdma_events events;
struct ecore_rdma_device *dev;
struct ecore_rdma_port *port;
u32 last_tid;
u8 num_cnqs;
struct rdma_sent_stats rdma_sent_pstats;
struct rdma_rcv_stats rdma_rcv_tstats;
u32 num_qps;
u32 num_mrs;
u32 num_srqs;
u16 queue_zone_base;
u16 max_queue_zones;
enum protocol_type proto;
struct ecore_roce_info roce;
#ifdef CONFIG_ECORE_IWARP
struct ecore_iwarp_info iwarp;
#endif
};
#ifdef CONFIG_ECORE_IWARP
enum ecore_iwarp_qp_state {
ECORE_IWARP_QP_STATE_IDLE,
ECORE_IWARP_QP_STATE_RTS,
ECORE_IWARP_QP_STATE_TERMINATE,
ECORE_IWARP_QP_STATE_CLOSING,
ECORE_IWARP_QP_STATE_ERROR,
};
#endif
struct ecore_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
u32 qpid; /* iwarp: may differ from icid */
u16 icid;
enum ecore_roce_qp_state cur_state;
#ifdef CONFIG_ECORE_IWARP
enum ecore_iwarp_qp_state iwarp_state;
#endif
bool use_srq;
bool signal_all;
bool fmr_and_reserved_lkey;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
u16 pd; /* Protection domain */
u16 pkey; /* Primary P_key index */
u32 dest_qp;
u16 mtu;
u16 srq_id;
u8 traffic_class_tos; /* IPv6/GRH traffic class; IPv4 TOS */
u8 hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
u16 dpi;
u32 flow_label; /* ignored in IPv4 */
u16 vlan_id;
u32 ack_timeout;
u8 retry_cnt;
u8 rnr_retry_cnt;
u8 min_rnr_nak_timer;
bool sqd_async;
union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
enum roce_mode roce_mode;
u16 udp_src_port; /* RoCEv2 only */
u8 stats_queue;
/* requeseter */
u8 max_rd_atomic_req;
u32 sq_psn;
u16 sq_cq_id; /* The cq to be associated with the send queue*/
u16 sq_num_pages;
dma_addr_t sq_pbl_ptr;
void *orq;
dma_addr_t orq_phys_addr;
u8 orq_num_pages;
bool req_offloaded;
/* responder */
u8 max_rd_atomic_resp;
u32 rq_psn;
u16 rq_cq_id; /* The cq to be associated with the receive queue */
u16 rq_num_pages;
dma_addr_t rq_pbl_ptr;
void *irq;
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
u32 cq_prod;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
void *shared_queue;
dma_addr_t shared_queue_phys_addr;
#ifdef CONFIG_ECORE_IWARP
struct ecore_iwarp_ep *ep;
#endif
};
#ifdef CONFIG_ECORE_IWARP
enum ecore_iwarp_ep_state {
ECORE_IWARP_EP_INIT,
ECORE_IWARP_EP_MPA_REQ_RCVD,
ECORE_IWARP_EP_ESTABLISHED,
ECORE_IWARP_EP_CLOSED
};
union async_output {
struct iwarp_eqe_data_mpa_async_completion mpa_response;
struct iwarp_eqe_data_tcp_async_completion mpa_request;
};
#define ECORE_MAX_PRIV_DATA_LEN (512)
struct ecore_iwarp_ep_memory {
u8 in_pdata[ECORE_MAX_PRIV_DATA_LEN];
u8 out_pdata[ECORE_MAX_PRIV_DATA_LEN];
union async_output async_output;
};
/* Endpoint structure represents a TCP connection. This connection can be
* associated with a QP or not (in which case QP==NULL)
*/
struct ecore_iwarp_ep {
osal_list_entry_t list_entry;
int sig;
struct ecore_rdma_qp *qp;
enum ecore_iwarp_ep_state state;
/* This contains entire buffer required for ep memories. This is the
* only one actually allocated and freed. The rest are pointers into
* this buffer
*/
struct ecore_iwarp_ep_memory *ep_buffer_virt;
dma_addr_t ep_buffer_phys;
struct ecore_iwarp_cm_info cm_info;
enum tcp_connect_mode connect_mode;
enum mpa_rtr_type rtr_type;
enum mpa_negotiation_mode mpa_rev;
u32 tcp_cid;
u32 cid;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
u16 mss;
bool mpa_reply_processed;
/* The event_cb function is called for asynchrounous events associated
* with the ep. It is initialized at different entry points depending
* on whether the ep is the tcp connection active side or passive side
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
/* For Passive side - syn packet related data */
struct ecore_iwarp_ll2_buff *syn;
u16 syn_ip_payload_length;
dma_addr_t syn_phy_addr;
};
struct ecore_iwarp_listener {
osal_list_entry_t list_entry;
/* The event_cb function is called for connection requests.
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
u32 max_backlog;
u8 ip_version;
u32 ip_addr[4];
u16 port;
u16 vlan;
};
void ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn,
u8 fw_event_code,
struct regpair *fw_handle,
u8 fw_return_code);
#endif /* CONFIG_ECORE_IWARP */
void ecore_roce_async_event(struct ecore_hwfn *p_hwfn,
u8 fw_event_code,
union rdma_eqe_data *rdma_data);
#endif /*__ECORE_RDMA_H__*/
#endif /*__ECORE_ROCE_H__*/

View file

@ -31,11 +31,10 @@
#ifndef __ECORE_RDMA_API_H__
#define __ECORE_RDMA_API_H__
#ifndef LINUX_REMOVE
#ifndef ETH_ALEN
#define ETH_ALEN 6
#endif
#ifndef __EXTRACT__LINUX__
enum ecore_roce_ll2_tx_dest
{
@ -327,7 +326,6 @@ struct ecore_rdma_create_cq_in_params {
u16 int_timeout;
};
#endif
struct ecore_rdma_resize_cq_in_params {
/* input variables (given by miniport) */
@ -342,7 +340,6 @@ struct ecore_rdma_resize_cq_in_params {
*/
};
#ifndef __EXTRACT__LINUX__
enum roce_mode
{
@ -551,7 +548,6 @@ struct ecore_rdma_modify_srq_in_params {
u32 wqe_limit;
u16 srq_id;
};
#endif
struct ecore_rdma_resize_cq_out_params {
/* output variables, provided to the upper layer */
@ -568,7 +564,6 @@ struct ecore_rdma_resize_cnq_in_params {
u64 pbl_ptr;
};
#ifndef __EXTRACT__LINUX__
struct ecore_rdma_stats_out_params {
u64 sent_bytes;
u64 sent_pkts;
@ -599,7 +594,6 @@ struct ecore_rdma_counters_out_params {
u64 tid_count;
u64 max_tid;
};
#endif
enum _ecore_status_t
ecore_rdma_add_user(void *rdma_cxt,
@ -707,30 +701,16 @@ ecore_rdma_query_counters(void *rdma_cxt,
u32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
u32 ecore_rdma_query_cau_timer_res(void);
u32 ecore_rdma_query_cau_timer_res(void *p_hwfn);
void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t
ecore_rdma_create_srq(void *rdma_cxt,
struct ecore_rdma_create_srq_in_params *in_params,
struct ecore_rdma_create_srq_out_params *out_params);
enum _ecore_status_t
ecore_rdma_destroy_srq(void *rdma_cxt,
struct ecore_rdma_destroy_srq_in_params *in_params);
enum _ecore_status_t
ecore_rdma_modify_srq(void *rdma_cxt,
struct ecore_rdma_modify_srq_in_params *in_params);
#ifdef CONFIG_ECORE_IWARP
/* iWARP API */
#ifndef __EXTRACT__LINUX__
enum ecore_iwarp_event_type {
ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
@ -854,7 +834,6 @@ struct ecore_iwarp_tcp_abort_in {
void *ep_context;
};
#endif
enum _ecore_status_t
ecore_iwarp_connect(void *rdma_cxt,

View file

@ -51,424 +51,513 @@
#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 1049
#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 1049
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 2073
#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
#define CAU_REG_PI_MEMORY_RT_OFFSET 3097
#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
#define CAU_REG_PI_MEMORY_RT_SIZE 4416
#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 7513
#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 7514
#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 7515
#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 7516
#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 7517
#define PRS_REG_SEARCH_TCP_RT_OFFSET 7518
#define PRS_REG_SEARCH_FCOE_RT_OFFSET 7519
#define PRS_REG_SEARCH_ROCE_RT_OFFSET 7520
#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 7521
#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 7522
#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 7523
#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 7524
#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 7525
#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 7526
#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 7527
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 7528
#define SRC_REG_FIRSTFREE_RT_OFFSET 7529
#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
#define SRC_REG_FIRSTFREE_RT_SIZE 2
#define SRC_REG_LASTFREE_RT_OFFSET 7531
#define SRC_REG_LASTFREE_RT_OFFSET 6527
#define SRC_REG_LASTFREE_RT_SIZE 2
#define SRC_REG_COUNTFREE_RT_OFFSET 7533
#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 7534
#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 7535
#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 7536
#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 7537
#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 7538
#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 7539
#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 7540
#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 7541
#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 7542
#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 7543
#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 7544
#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 7545
#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 7546
#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 7547
#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 7548
#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 7549
#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 7550
#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 7551
#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 7552
#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 7553
#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 7554
#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 7555
#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 7556
#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 7557
#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 7558
#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 7559
#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 7560
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 7561
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 7562
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 7563
#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 7564
#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 7565
#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 7566
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
#define PGLUE_REG_B_VF_BASE_RT_OFFSET 29566
#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 29567
#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 29568
#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 29569
#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 29570
#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 29571
#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 29572
#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 29573
#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 29574
#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 29575
#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 29576
#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 29577
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 29578
#define SRC_REG_COUNTFREE_RT_OFFSET 6529
#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29994
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
#define QM_REG_MAXPQSIZE_0_RT_OFFSET 30602
#define QM_REG_MAXPQSIZE_1_RT_OFFSET 30603
#define QM_REG_MAXPQSIZE_2_RT_OFFSET 30604
#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 30605
#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 30606
#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 30607
#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 30608
#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 30609
#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 30610
#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 30611
#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 30612
#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 30613
#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 30614
#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 30615
#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 30616
#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 30617
#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 30618
#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 30619
#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 30620
#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 30621
#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 30622
#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 30623
#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 30624
#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 30625
#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 30626
#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 30627
#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 30628
#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 30629
#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 30630
#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 30631
#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 30632
#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 30633
#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 30634
#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 30635
#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 30636
#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 30637
#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 30638
#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 30639
#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 30640
#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 30641
#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 30642
#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 30643
#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 30644
#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 30645
#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 30646
#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 30647
#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 30648
#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 30649
#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 30650
#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 30651
#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 30652
#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 30653
#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 30654
#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 30655
#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 30656
#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 30657
#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 30658
#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 30659
#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 30660
#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 30661
#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 30662
#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 30663
#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 30664
#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 30665
#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 30666
#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 30667
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 30668
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 30669
#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 30797
#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 30798
#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 30799
#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 30800
#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 30801
#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 30802
#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 30803
#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 30804
#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 30805
#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 30806
#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 30807
#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 30808
#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 30809
#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 30810
#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 30811
#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 30812
#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 30813
#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 30814
#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 30815
#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 30816
#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 30817
#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 30818
#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 30819
#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 30820
#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 30821
#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 30822
#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 30823
#define QM_REG_PQTX2PF_0_RT_OFFSET 30824
#define QM_REG_PQTX2PF_1_RT_OFFSET 30825
#define QM_REG_PQTX2PF_2_RT_OFFSET 30826
#define QM_REG_PQTX2PF_3_RT_OFFSET 30827
#define QM_REG_PQTX2PF_4_RT_OFFSET 30828
#define QM_REG_PQTX2PF_5_RT_OFFSET 30829
#define QM_REG_PQTX2PF_6_RT_OFFSET 30830
#define QM_REG_PQTX2PF_7_RT_OFFSET 30831
#define QM_REG_PQTX2PF_8_RT_OFFSET 30832
#define QM_REG_PQTX2PF_9_RT_OFFSET 30833
#define QM_REG_PQTX2PF_10_RT_OFFSET 30834
#define QM_REG_PQTX2PF_11_RT_OFFSET 30835
#define QM_REG_PQTX2PF_12_RT_OFFSET 30836
#define QM_REG_PQTX2PF_13_RT_OFFSET 30837
#define QM_REG_PQTX2PF_14_RT_OFFSET 30838
#define QM_REG_PQTX2PF_15_RT_OFFSET 30839
#define QM_REG_PQTX2PF_16_RT_OFFSET 30840
#define QM_REG_PQTX2PF_17_RT_OFFSET 30841
#define QM_REG_PQTX2PF_18_RT_OFFSET 30842
#define QM_REG_PQTX2PF_19_RT_OFFSET 30843
#define QM_REG_PQTX2PF_20_RT_OFFSET 30844
#define QM_REG_PQTX2PF_21_RT_OFFSET 30845
#define QM_REG_PQTX2PF_22_RT_OFFSET 30846
#define QM_REG_PQTX2PF_23_RT_OFFSET 30847
#define QM_REG_PQTX2PF_24_RT_OFFSET 30848
#define QM_REG_PQTX2PF_25_RT_OFFSET 30849
#define QM_REG_PQTX2PF_26_RT_OFFSET 30850
#define QM_REG_PQTX2PF_27_RT_OFFSET 30851
#define QM_REG_PQTX2PF_28_RT_OFFSET 30852
#define QM_REG_PQTX2PF_29_RT_OFFSET 30853
#define QM_REG_PQTX2PF_30_RT_OFFSET 30854
#define QM_REG_PQTX2PF_31_RT_OFFSET 30855
#define QM_REG_PQTX2PF_32_RT_OFFSET 30856
#define QM_REG_PQTX2PF_33_RT_OFFSET 30857
#define QM_REG_PQTX2PF_34_RT_OFFSET 30858
#define QM_REG_PQTX2PF_35_RT_OFFSET 30859
#define QM_REG_PQTX2PF_36_RT_OFFSET 30860
#define QM_REG_PQTX2PF_37_RT_OFFSET 30861
#define QM_REG_PQTX2PF_38_RT_OFFSET 30862
#define QM_REG_PQTX2PF_39_RT_OFFSET 30863
#define QM_REG_PQTX2PF_40_RT_OFFSET 30864
#define QM_REG_PQTX2PF_41_RT_OFFSET 30865
#define QM_REG_PQTX2PF_42_RT_OFFSET 30866
#define QM_REG_PQTX2PF_43_RT_OFFSET 30867
#define QM_REG_PQTX2PF_44_RT_OFFSET 30868
#define QM_REG_PQTX2PF_45_RT_OFFSET 30869
#define QM_REG_PQTX2PF_46_RT_OFFSET 30870
#define QM_REG_PQTX2PF_47_RT_OFFSET 30871
#define QM_REG_PQTX2PF_48_RT_OFFSET 30872
#define QM_REG_PQTX2PF_49_RT_OFFSET 30873
#define QM_REG_PQTX2PF_50_RT_OFFSET 30874
#define QM_REG_PQTX2PF_51_RT_OFFSET 30875
#define QM_REG_PQTX2PF_52_RT_OFFSET 30876
#define QM_REG_PQTX2PF_53_RT_OFFSET 30877
#define QM_REG_PQTX2PF_54_RT_OFFSET 30878
#define QM_REG_PQTX2PF_55_RT_OFFSET 30879
#define QM_REG_PQTX2PF_56_RT_OFFSET 30880
#define QM_REG_PQTX2PF_57_RT_OFFSET 30881
#define QM_REG_PQTX2PF_58_RT_OFFSET 30882
#define QM_REG_PQTX2PF_59_RT_OFFSET 30883
#define QM_REG_PQTX2PF_60_RT_OFFSET 30884
#define QM_REG_PQTX2PF_61_RT_OFFSET 30885
#define QM_REG_PQTX2PF_62_RT_OFFSET 30886
#define QM_REG_PQTX2PF_63_RT_OFFSET 30887
#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30888
#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30889
#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30890
#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30891
#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30892
#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30893
#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30894
#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30895
#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30896
#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30897
#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30898
#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30899
#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30900
#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30901
#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30902
#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30903
#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30904
#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30905
#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30906
#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30907
#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30908
#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30909
#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30910
#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30911
#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30912
#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30913
#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30914
#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30915
#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30916
#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
#define QM_REG_PTRTBLOTHER_RT_SIZE 256
#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 31172
#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
#define QM_REG_RLGLBLCRD_RT_OFFSET 31428
#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
#define QM_REG_RLGLBLCRD_RT_SIZE 256
#define QM_REG_RLGLBLENABLE_RT_OFFSET 31684
#define QM_REG_RLPFPERIOD_RT_OFFSET 31685
#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 31686
#define QM_REG_RLPFINCVAL_RT_OFFSET 31687
#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
#define QM_REG_RLPFINCVAL_RT_SIZE 16
#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 31703
#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
#define QM_REG_RLPFCRD_RT_OFFSET 31719
#define QM_REG_RLPFCRD_RT_OFFSET 35389
#define QM_REG_RLPFCRD_RT_SIZE 16
#define QM_REG_RLPFENABLE_RT_OFFSET 31735
#define QM_REG_RLPFVOQENABLE_RT_OFFSET 31736
#define QM_REG_WFQPFWEIGHT_RT_OFFSET 31737
#define QM_REG_RLPFENABLE_RT_OFFSET 35405
#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 31753
#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
#define QM_REG_WFQPFCRD_RT_OFFSET 31769
#define QM_REG_WFQPFCRD_RT_OFFSET 35439
#define QM_REG_WFQPFCRD_RT_SIZE 256
#define QM_REG_WFQPFENABLE_RT_OFFSET 32025
#define QM_REG_WFQVPENABLE_RT_OFFSET 32026
#define QM_REG_BASEADDRTXPQ_RT_OFFSET 32027
#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
#define QM_REG_TXPQMAP_RT_OFFSET 32539
#define QM_REG_TXPQMAP_RT_OFFSET 36209
#define QM_REG_TXPQMAP_RT_SIZE 512
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 33051
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
#define QM_REG_WFQVPCRD_RT_OFFSET 33563
#define QM_REG_WFQVPCRD_RT_OFFSET 37233
#define QM_REG_WFQVPCRD_RT_SIZE 512
#define QM_REG_WFQVPMAP_RT_OFFSET 34075
#define QM_REG_WFQVPMAP_RT_OFFSET 37745
#define QM_REG_WFQVPMAP_RT_SIZE 512
#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34587
#define QM_REG_PTRTBLTX_RT_OFFSET 38257
#define QM_REG_PTRTBLTX_RT_SIZE 1024
#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
#define QM_REG_VOQCRDLINE_RT_OFFSET 34907
#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
#define QM_REG_VOQCRDLINE_RT_SIZE 36
#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34943
#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34979
#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34980
#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34981
#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34982
#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34983
#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34984
#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34985
#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34986
#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34990
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34994
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34998
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34999
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 35031
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 35047
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 35063
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 35079
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 35095
#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 35096
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 35097
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 35098
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 35099
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 35100
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 35101
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 35102
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 35103
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 35104
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 35105
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 35106
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 35107
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 35108
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 35109
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 35110
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 35111
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 35112
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 35113
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 35114
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 35115
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 35116
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 35117
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 35118
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 35119
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 35120
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 35121
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 35122
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 35123
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 35124
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 35125
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 35126
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 35127
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 35128
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 35129
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 35130
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 35131
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 35132
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 35133
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 35134
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 35135
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 35136
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 35137
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 35138
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 35139
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 35140
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 35141
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 35142
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 35143
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 35144
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 35145
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 35146
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 35147
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 35148
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 35149
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 35150
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 35151
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 35152
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 35153
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 35154
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 35155
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 35156
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 35157
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 35158
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 35159
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 35160
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 35161
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 35162
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 35163
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 35164
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 35165
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 35166
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 35167
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 35168
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 35169
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 35170
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 35171
#define XCM_REG_CON_PHY_Q3_RT_OFFSET 35172
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
#define RUNTIME_ARRAY_SIZE 35173
#define RUNTIME_ARRAY_SIZE 43023
/* Init Callbacks */
#define DMAE_READY_CB 0
#endif /* __RT_DEFS_H__ */

View file

@ -48,6 +48,9 @@ __FBSDID("$FreeBSD$");
#include "ecore_dcbx.h"
#include "ecore_sriov.h"
#include "ecore_vf.h"
#ifndef LINUX_REMOVE
#include "ecore_tcp_ip.h"
#endif
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
@ -102,9 +105,9 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
"Initialized: CID %08x cmd %02x protocol %02x data_addr %llx comp_mode [%s]\n",
opaque_cid, cmd, protocol,
(unsigned long)&p_ent->ramrod,
(unsigned long long)(osal_uintptr_t)&p_ent->ramrod,
D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
@ -318,10 +321,9 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
&p_tun->ip_gre);
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
@ -331,6 +333,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 page_cnt;
u8 i;
/* update initial eq producer */
ecore_eq_prod_update(p_hwfn,
@ -359,19 +362,35 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
switch (mode) {
case ECORE_MF_DEFAULT:
case ECORE_MF_NPAR:
p_ramrod->mf_mode = MF_NPAR;
break;
case ECORE_MF_OVLAN:
if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
p_ramrod->mf_mode = MF_OVLAN;
break;
default:
DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n");
else
p_ramrod->mf_mode = MF_NPAR;
p_ramrod->outer_tag_config.outer_tag.tci =
OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits))
p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
&p_hwfn->p_dev->mf_bits)) {
p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
}
p_ramrod->outer_tag_config.pri_map_valid = 1;
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
/* enable_stag_pri_change should be set if port is in BD mode or,
* UFP with Host Control mode or, UFP with DCB over base interface.
*/
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
(p_hwfn->p_dcbx_info->results.dcbx_enabled))
p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
else
p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
}
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@ -384,7 +403,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn))
if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
&p_hwfn->p_dev->mf_bits))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
switch (p_hwfn->hw_info.personality) {
@ -421,8 +441,9 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index, p_ramrod->outer_tag);
"Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
p_ramrod->outer_tag_config.outer_tag.tci);
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
@ -457,6 +478,57 @@ enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_UNKNOWN) {
DP_INFO(p_hwfn, "Invalid priority type %d\n",
p_hwfn->ufp_info.pri_type);
return ECORE_INVAL;
}
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_CB;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
(p_hwfn->p_dcbx_info->results.dcbx_enabled))
p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
else
p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
/* QM rate limiter resolution is 1.6Mbps */
#define QM_RL_RESOLUTION(mb_val) ((mb_val) * 10 / 16)
/* FW uses 1/64k to express gd */
#define FW_GD_RESOLUTION(gd) (64 * 1024 / (gd))
static u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
{
return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
}
static u16 ecore_sp_rl_gd_denom(u32 gd)
{
return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
}
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
struct ecore_rl_update_params *params)
{
@ -488,15 +560,24 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
rl_update->rl_id_last = params->rl_id_last;
rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
rl_update->rl_max_rate = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
rl_update->rl_r_ai = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
rl_update->rl_r_hai = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
rl_update->dcqcn_g = OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
params->dcqcn_timeuot_us);
rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
rl_update->qcn_update_param_flg, rl_update->dcqcn_update_param_flg,
rl_update->rl_init_flg, rl_update->rl_start_flg,
rl_update->rl_stop_flg, rl_update->rl_id_first,
rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
rl_update->rl_bc_rate, rl_update->rl_max_rate,
rl_update->rl_r_ai, rl_update->rl_r_hai,
rl_update->dcqcn_g, rl_update->dcqcn_k_us,
rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}

View file

@ -84,7 +84,6 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param p_tunn - pf start tunneling configuration
* @param mode
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
*
@ -94,7 +93,6 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
/**
@ -148,10 +146,10 @@ struct ecore_rl_update_params {
u8 rl_id_last;
u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
u32 rl_bc_rate; /* Byte Counter Limit */
u16 rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
u16 rl_r_ai; /* Active increase rate */
u16 rl_r_hai; /* Hyper active increase rate */
u16 dcqcn_g; /* DCQCN Alpha update gain in 1/64K resolution */
u32 rl_max_rate; /* Maximum rate in Mbps resolution */
u32 rl_r_ai; /* Active increase rate */
u32 rl_r_hai; /* Hyper active increase rate */
u32 dcqcn_gd; /* DCQCN Alpha update gain */
u32 dcqcn_k_us; /* DCQCN Alpha update interval */
u32 dcqcn_timeuot_us;
u32 qcn_timeuot_us;
@ -178,4 +176,13 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_pf_update_ufp - PF ufp update Ramrod
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn);
#endif /*__ECORE_SP_COMMANDS_H__*/

View file

@ -44,8 +44,8 @@ __FBSDID("$FreeBSD$");
#include "ecore_int.h"
#include "ecore_dev_api.h"
#include "ecore_mcp.h"
#ifdef CONFIG_ECORE_ROCE
#include "ecore_roce.h"
#ifdef CONFIG_ECORE_RDMA
#include "ecore_rdma.h"
#endif
#include "ecore_hw.h"
#include "ecore_sriov.h"
@ -54,6 +54,12 @@ __FBSDID("$FreeBSD$");
#include "ecore_ooo.h"
#endif
#ifdef _NTDDK_
#pragma warning(push)
#pragma warning(disable : 28167)
#pragma warning(disable : 28123)
#endif
/***************************************************************************
* Structures & Definitions
***************************************************************************/
@ -62,7 +68,7 @@ __FBSDID("$FreeBSD$");
#define SPQ_BLOCK_DELAY_MAX_ITER (10)
#define SPQ_BLOCK_DELAY_US (10)
#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
#define SPQ_BLOCK_SLEEP_MAX_ITER (200)
#define SPQ_BLOCK_SLEEP_MS (5)
#ifndef REMOVE_DBG
@ -89,7 +95,7 @@ static void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
p_hwfn->port_id, p_eqe->opcode,
OSAL_LE16_TO_CPU(p_eqe->echo),
p_eqe->fw_return_code,
OSAL_LE32_TO_CPU(p_eqe->data.iscsi_info.cid),
OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.icid),
OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.conn_id),
p_eqe->data.iscsi_info.error_code);
break;
@ -142,6 +148,10 @@ static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
: SPQ_BLOCK_DELAY_MAX_ITER;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
iter_cnt *= 5;
#endif
while (iter_cnt--) {
OSAL_POLL_MODE_DPC(p_hwfn);
@ -185,13 +195,12 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_NOTICE(p_hwfn, true, "ptt, failed to acquire\n");
if (!p_ptt)
return ECORE_AGAIN;
}
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = ecore_mcp_drain(p_hwfn, p_ptt);
ecore_ptt_release(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
goto err;
@ -200,20 +209,15 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
/* Retry after drain */
rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (rc == ECORE_SUCCESS)
goto out;
return ECORE_SUCCESS;
comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return ECORE_SUCCESS;
}
out:
ecore_ptt_release(p_hwfn, p_ptt);
return ECORE_SUCCESS;
err:
ecore_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn, true,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
@ -317,9 +321,9 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent)
{
struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
struct core_db_data *p_db_data = &p_spq->db_data;
u16 echo = ecore_chain_get_prod_idx(p_chain);
struct slow_path_element *elem;
struct core_db_data db;
p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
elem = ecore_chain_produce(p_chain);
@ -328,28 +332,23 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
*elem = p_ent->elem; /* struct assignment */
*elem = p_ent->elem; /* Struct assignment */
/* send a doorbell on the slow hwfn session */
OSAL_MEMSET(&db, 0, sizeof(db));
SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
p_db_data->spq_prod =
OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
/* make sure the SPQE is updated before the doorbell */
/* Make sure the SPQE is updated before the doorbell */
OSAL_WMB(p_hwfn->p_dev);
DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
/* make sure doorbell is rang */
/* Make sure doorbell was rung */
OSAL_WMB(p_hwfn->p_dev);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
db.agg_flags, ecore_chain_get_prod_idx(p_chain));
p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
return ECORE_SUCCESS;
}
@ -362,50 +361,46 @@ static enum _ecore_status_t
ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
#ifdef CONFIG_ECORE_ROCE
case PROTOCOLID_ROCE:
{
ecore_roce_async_event(p_hwfn,
p_eqe->opcode,
&p_eqe->data.rdma_data);
return ECORE_SUCCESS;
}
#ifdef CONFIG_ECORE_IWARP
case PROTOCOLID_IWARP:
{
ecore_iwarp_async_event(p_hwfn,
p_eqe->opcode,
&p_eqe->data.rdma_data.async_handle,
p_eqe->fw_return_code);
return ECORE_SUCCESS;
}
#endif
#endif
case PROTOCOLID_COMMON:
return ecore_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo,
&p_eqe->data);
#ifdef CONFIG_ECORE_ISCSI
case PROTOCOLID_ISCSI:
if (p_hwfn->p_iscsi_info->event_cb != OSAL_NULL) {
struct ecore_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
ecore_spq_async_comp_cb cb;
return p_iscsi->event_cb(p_iscsi->event_context,
p_eqe->opcode, &p_eqe->data);
} else {
DP_NOTICE(p_hwfn,
false, "iSCSI async completion is not set\n");
return ECORE_NOTIMPL;
}
#endif
default:
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) {
return ECORE_INVAL;
}
cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
if (cb) {
return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
&p_eqe->data, p_eqe->fw_return_code);
} else {
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return ECORE_INVAL;
}
}
enum _ecore_status_t
ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
enum protocol_type protocol_id,
ecore_spq_async_comp_cb cb)
{
if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
return ECORE_INVAL;
}
p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
return ECORE_SUCCESS;
}
void
ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
enum protocol_type protocol_id)
{
if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
return;
}
p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
}
/***************************************************************************
@ -485,6 +480,11 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
/* Attempt to post pending requests */
OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
rc = ecore_spq_pend_post(p_hwfn);
OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
return rc;
}
@ -495,7 +495,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
/* Allocate EQ struct */
p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
if (!p_eq) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_eq'\n");
return ECORE_NOMEM;
}
@ -508,7 +508,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
num_elem,
sizeof(union event_ring_element),
&p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
@ -579,8 +579,11 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
struct ecore_spq_entry *p_virt = OSAL_NULL;
struct core_db_data *p_db_data;
void OSAL_IOMEM *db_addr;
dma_addr_t p_phys = 0;
u32 i, capacity;
enum _ecore_status_t rc;
OSAL_LIST_INIT(&p_spq->pending);
OSAL_LIST_INIT(&p_spq->completion_pending);
@ -618,6 +621,24 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
/* reset the chain itself */
ecore_chain_reset(&p_spq->chain);
/* Initialize the address/data of the SPQ doorbell */
p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
p_db_data = &p_spq->db_data;
OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
/* Register the SPQ doorbell with the doorbell recovery mechanism */
db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
DB_REC_WIDTH_32B, DB_REC_KERNEL);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
}
enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
@ -631,7 +652,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
if (!p_spq) {
DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_spq'\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
return ECORE_NOMEM;
}
@ -643,7 +664,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain, OSAL_NULL)) {
DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
@ -660,7 +681,8 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq->p_phys = p_phys;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
goto spq_allocate_fail;
#endif
p_hwfn->p_spq = p_spq;
@ -675,11 +697,16 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
void ecore_spq_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
void OSAL_IOMEM *db_addr;
u32 capacity;
if (!p_spq)
return;
/* Delete the SPQ doorbell from the doorbell recovery mechanism */
db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
if (p_spq->p_virt) {
capacity = ecore_chain_get_capacity(&p_spq->chain);
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
@ -711,7 +738,7 @@ enum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
if (!p_ent) {
DP_NOTICE(p_hwfn, true, "Failed to allocate an SPQ entry for a pending ramrod\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
rc = ECORE_NOMEM;
goto out_unlock;
}
@ -868,7 +895,7 @@ static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
struct ecore_spq_entry *p_ent = OSAL_NULL;
@ -1000,7 +1027,6 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_spq_entry *tmp;
struct ecore_spq_entry *found = OSAL_NULL;
enum _ecore_status_t rc;
if (!p_hwfn) {
return ECORE_INVAL;
@ -1080,12 +1106,7 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
*/
ecore_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
OSAL_SPIN_LOCK(&p_spq->lock);
rc = ecore_spq_pend_post(p_hwfn);
OSAL_SPIN_UNLOCK(&p_spq->lock);
return rc;
return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
@ -1095,7 +1116,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
/* Allocate ConsQ struct */
p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
if (!p_consq) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_consq'\n");
return ECORE_NOMEM;
}
@ -1108,7 +1129,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
ECORE_CHAIN_PAGE_SIZE/0x80,
0x80,
&p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
@ -1135,3 +1156,7 @@ void ecore_consq_free(struct ecore_hwfn *p_hwfn)
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
p_hwfn->p_consq = OSAL_NULL;
}
#ifdef _NTDDK_
#pragma warning(pop)
#endif

View file

@ -102,6 +102,7 @@ union ramrod_data
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
struct iscsi_spe_conn_termination iscsi_conn_terminate;
struct iscsi_spe_conn_statistics iscsi_conn_statistics;
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
@ -155,6 +156,22 @@ struct ecore_consq {
struct ecore_chain chain;
};
typedef enum _ecore_status_t
(*ecore_spq_async_comp_cb)(struct ecore_hwfn *p_hwfn,
u8 opcode,
u16 echo,
union event_ring_data *data,
u8 fw_return_code);
enum _ecore_status_t
ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
enum protocol_type protocol_id,
ecore_spq_async_comp_cb cb);
void
ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
enum protocol_type protocol_id);
struct ecore_spq {
osal_spinlock_t lock;
@ -194,6 +211,10 @@ struct ecore_spq {
u32 comp_count;
u32 cid;
u32 db_addr_offset;
struct core_db_data db_data;
ecore_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
struct ecore_port;
@ -348,5 +369,5 @@ void ecore_consq_setup(struct ecore_hwfn *p_hwfn);
* @param p_hwfn
*/
void ecore_consq_free(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_SPQ_H__ */

File diff suppressed because it is too large Load diff

View file

@ -139,6 +139,11 @@ struct ecore_vf_info {
struct ecore_bulletin bulletin;
dma_addr_t vf_bulletin;
#ifdef CONFIG_ECORE_SW_CHANNEL
/* Determine whether PF communicate with VF using HW/SW channel */
bool b_hw_channel;
#endif
/* PF saves a copy of the last VF acquire message */
struct vfpf_acquire_tlv acquire;
@ -275,19 +280,6 @@ void ecore_iov_free(struct ecore_hwfn *p_hwfn);
*/
void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
/**
* @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data);
/**
* @brief Mark structs of vfs that have been FLR-ed.
*
@ -327,13 +319,12 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
bool b_enabled_only);
#else
static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED **offset, OSAL_UNUSED u16 type, OSAL_UNUSED u16 length) {return OSAL_NULL;}
static OSAL_INLINE void *ecore_add_tlv(u8 OSAL_UNUSED **offset, OSAL_UNUSED u16 type, OSAL_UNUSED u16 length) {return OSAL_NULL;}
static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *tlvs_list) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
static OSAL_INLINE void ecore_iov_free_hw_info(struct ecore_dev OSAL_UNUSED *p_dev) {}
static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED opcode, __le16 OSAL_UNUSED echo, union event_ring_data OSAL_UNUSED *data) {return ECORE_INVAL;}
static OSAL_INLINE u32 ecore_crc32(u32 OSAL_UNUSED crc, u8 OSAL_UNUSED *ptr, u32 OSAL_UNUSED length) {return 0;}
static OSAL_INLINE bool ecore_iov_mark_vf_flr(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u32 OSAL_UNUSED *disabled_vfs) {return false;}
static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *p_tlvs_list, u16 OSAL_UNUSED req_type) {return OSAL_NULL;}

View file

@ -0,0 +1,140 @@
/*
* Copyright (c) 2018-2019 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __ECORE_TCP_IP_H
#define __ECORE_TCP_IP_H
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_HLEN 14 /* Total octets in header. */
#define VLAN_HLEN 4 /* additional bytes required by VLAN */
#define MAX_VLAN_PRIO 7 /* Max vlan priority value in 801.1Q tag */
#define MAX_DSCP 63 /* Max DSCP value in IP header */
#define IPPROTO_TCP 6
#ifndef htonl
#define htonl(val) OSAL_CPU_TO_BE32(val)
#endif
#ifndef ntohl
#define ntohl(val) OSAL_BE32_TO_CPU(val)
#endif
#ifndef htons
#define htons(val) OSAL_CPU_TO_BE16(val)
#endif
#ifndef ntohs
#define ntohs(val) OSAL_BE16_TO_CPU(val)
#endif
struct ecore_ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
u16 h_proto; /* packet type ID field */
};
struct ecore_iphdr {
u8 ihl:4,
version:4;
u8 tos;
u16 tot_len;
u16 id;
u16 frag_off;
u8 ttl;
u8 protocol;
u16 check;
u32 saddr;
u32 daddr;
/*The options start here. */
};
struct ecore_vlan_ethhdr {
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
u16 h_vlan_proto;
u16 h_vlan_TCI;
u16 h_vlan_encapsulated_proto;
};
struct ecore_in6_addr {
union {
u8 u6_addr8[16];
u16 u6_addr16[8];
u32 u6_addr32[4];
} in6_u;
};
struct ecore_ipv6hdr {
u8 priority:4,
version:4;
u8 flow_lbl[3];
u16 payload_len;
u8 nexthdr;
u8 hop_limit;
struct ecore_in6_addr saddr;
struct ecore_in6_addr daddr;
};
struct ecore_tcphdr {
u16 source;
u16 dest;
u32 seq;
u32 ack_seq;
u16 res1:4,
doff:4,
fin:1,
syn:1,
rst:1,
psh:1,
ack:1,
urg:1,
ece:1,
cwr:1;
u16 window;
u16 check;
u16 urg_ptr;
};
enum {
INET_ECN_NOT_ECT = 0,
INET_ECN_ECT_1 = 1,
INET_ECN_ECT_0 = 2,
INET_ECN_CE = 3,
INET_ECN_MASK = 3,
};
#endif

View file

@ -56,4 +56,8 @@
#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
#ifndef USHRT_MAX
#define USHRT_MAX ((u16)(~0U))
#endif
#endif

File diff suppressed because it is too large Load diff

View file

@ -37,9 +37,10 @@
#include "ecore_vfpf_if.h"
/* Default number of CIDs [total of both Rx and Tx] to be requested
* by default.
* by default, and maximum possible number.
*/
#define ECORE_ETH_VF_DEFAULT_NUM_CIDS (32)
#define ECORE_ETH_VF_MAX_NUM_CIDS (255)
/* This data is held in the ecore_hwfn structure for VFs only. */
struct ecore_vf_iov {
@ -71,25 +72,39 @@ struct ecore_vf_iov {
* compatability [with older PFs] we'd still need to store these.
*/
struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
#ifdef CONFIG_ECORE_SW_CHANNEL
/* Would be set if the VF is to try communicating with it PF
* using a hw channel.
*/
bool b_hw_channel;
#endif
/* Determines whether VF utilizes doorbells via limited register
* bar or via the doorbell bar.
*/
bool b_doorbell_bar;
};
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
/**
* @brief VF - Get coalesce per VF's relative queue.
*
* @param p_hwfn
* @param p_coal - coalesce value in micro second for VF queues.
* @param p_cid - queue cid
*
**/
enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
u16 *p_coal,
struct ecore_queue_cid *p_cid);
/**
* @brief VF - Set Rx/Tx coalesce per VF's relative queue.
* Coalesce value '0' will omit the configuration.
* Coalesce value '0' will omit the configuration.
*
* @param p_hwfn
* @param rx_coal - coalesce value in micro second for rx queue
* @param tx_coal - coalesce value in micro second for tx queue
* @param queue_cid
* @param p_hwfn
* @param rx_coal - coalesce value in micro second for rx queue
* @param tx_coal - coalesce value in micro second for tx queue
* @param p_cid - queue cid
*
**/
enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
@ -254,6 +269,7 @@ void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
* @param tpa_mode
* @param max_buffers_per_cqe,
* @param only_untagged - default behavior regarding vlan acceptance
* @param zero_placement_offset - if set, zero padding will be inserted
*
* @return enum _ecore_status
*/
@ -264,7 +280,8 @@ enum _ecore_status_t ecore_vf_pf_vport_start(
u8 inner_vlan_removal,
enum ecore_tpa_mode tpa_mode,
u8 max_buffers_per_cqe,
u8 only_untagged);
u8 only_untagged,
u8 zero_placement_offset);
/**
* @brief ecore_vf_pf_vport_stop - stop the VF's vport
@ -321,6 +338,8 @@ enum _ecore_status_t
ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn);
void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id);
#else
static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid, u16 OSAL_UNUSED bd_max_bytes, dma_addr_t OSAL_UNUSED bd_chain_phys_addr, dma_addr_t OSAL_UNUSED cqe_pbl_addr, u16 OSAL_UNUSED cqe_pbl_size, void OSAL_IOMEM OSAL_UNUSED **pp_prod) {return ECORE_INVAL;}
@ -337,16 +356,23 @@ static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn OS
static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED sb_id) {return 0;}
static OSAL_INLINE void ecore_vf_set_sb_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED sb_id, struct ecore_sb_info OSAL_UNUSED *p_sb) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED vport_id, u16 OSAL_UNUSED mtu, u8 OSAL_UNUSED inner_vlan_removal, enum ecore_tpa_mode OSAL_UNUSED tpa_mode, u8 OSAL_UNUSED max_buffers_per_cqe, u8 OSAL_UNUSED only_untagged) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED vport_id, u16 OSAL_UNUSED mtu, u8 OSAL_UNUSED inner_vlan_removal, enum ecore_tpa_mode OSAL_UNUSED tpa_mode, u8 OSAL_UNUSED max_buffers_per_cqe, u8 OSAL_UNUSED only_untagged, u8 OSAL_UNUSED zero_placement_offset) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_filter_ucast OSAL_UNUSED *p_param) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_filter_mcast OSAL_UNUSED *p_filter_cmd) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_params OSAL_UNUSED *p_params, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_state OSAL_UNUSED *p_link, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_link_caps, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_mcp_link_params OSAL_UNUSED *p_params, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_mcp_link_state OSAL_UNUSED *p_link, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities OSAL_UNUSED *p_link_caps, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_tunnel_param_update(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_tunnel_info OSAL_UNUSED *p_tunn) { return ECORE_INVAL; }
static OSAL_INLINE void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info OSAL_UNUSED *p_tun) { return; }
static OSAL_INLINE u32
ecore_vf_hw_bar_size(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
enum BAR_ID OSAL_UNUSED bar_id)
{
return 0;
}
#endif
#endif /* __ECORE_VF_H__ */

View file

@ -91,6 +91,14 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
u8 *num_txqs);
/**
* @brief Get number of available connections [both Rx and Tx] for VF
*
* @param p_hwfn
* @param num_cids - allocated number of connections
*/
void ecore_vf_get_num_cids(struct ecore_hwfn *p_hwfn, u8 *num_cids);
/**
* @brief Get port mac address for VF
*
@ -181,6 +189,19 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_eng);
void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
u16 *p_vxlan_port, u16 *p_geneve_port);
#ifdef CONFIG_ECORE_SW_CHANNEL
/**
* @brief set the VF to use a SW/HW channel when communicating with PF.
* NOTICE: today the likely first place to call this from VF
* would be OSAL_VF_FILL_ACQUIRE_RESC_REQ(); Might want to consider
* something a bit more appropriate.
*
* @param p_hwfn
* @param b_is_hw - true iff VF is to use a HW-channel
*/
void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw);
#endif
#else
static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *p_change) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_params OSAL_UNUSED *params) {}
@ -188,6 +209,13 @@ static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn OSAL_UNUSED *p
static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_link_caps) {}
static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_rxqs) {}
static OSAL_INLINE void ecore_vf_get_num_txqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_txqs) {}
static OSAL_INLINE void
ecore_vf_get_num_cids(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
u8 OSAL_UNUSED *num_cids)
{
}
static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *port_mac) {}
static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_vlan_filters) {}
static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_mac_filters) {}
@ -198,5 +226,11 @@ static OSAL_INLINE bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn OSAL_UNUSED *p
#endif
static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED *fw_major, u16 OSAL_UNUSED *fw_minor, u16 OSAL_UNUSED *fw_rev, u16 OSAL_UNUSED *fw_eng) {}
static OSAL_INLINE void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED *p_vxlan_port, u16 OSAL_UNUSED *p_geneve_port) { return; }
#ifdef CONFIG_ECORE_SW_CHANNEL
static OSAL_INLINE void
ecore_vf_set_hw_channel(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
bool OSAL_UNUSED b_is_hw) {}
#endif
#endif
#endif

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,8 +35,10 @@
#define T_ETH_INDIRECTION_TABLE_SIZE 128 /* @@@ TBD MichalK this should be HSI? */
#define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */
#ifndef LINUX_REMOVE
#ifndef ETH_ALEN
#define ETH_ALEN 6 /* @@@ TBD MichalK - should this be defined here?*/
#endif
#endif
/***********************************************
*
@ -125,6 +127,12 @@ struct vfpf_acquire_tlv {
* this, and use the legacy CID scheme.
*/
#define VFPF_ACQUIRE_CAP_QUEUE_QIDS (1 << 2)
/* The VF is using the physical bar. While this is mostly internal
* to the VF, might affect the number of CIDs supported assuming
* QUEUE_QIDS is set.
*/
#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR (1 << 3)
u64 capabilities;
u8 fw_major;
u8 fw_minor;
@ -214,7 +222,8 @@ struct pfvf_acquire_resp_tlv {
u16 chip_rev;
u8 dev_type;
u8 padding;
/* Doorbell bar size configured in HW: log(size) or 0 */
u8 bar_size;
struct pfvf_stats_info stats_info;
@ -385,7 +394,8 @@ struct vfpf_vport_start_tlv {
u8 only_untagged;
u8 max_buffers_per_cqe;
u8 padding[4];
u8 zero_placement_offset;
u8 padding[3];
};
/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
@ -413,7 +423,13 @@ struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
u64 bins[8];
/* This was a mistake; There are only 256 approx bins,
* and in HSI they're divided into 32-bit values.
* As old VFs used to set-bit to the values on its side,
* the upper half of the array is never expected to contain any data.
*/
u64 bins[4];
u64 obsolete_bins[4];
};
struct vfpf_vport_update_accept_param_tlv {
@ -527,6 +543,19 @@ struct vfpf_update_coalesce {
u8 padding[2];
};
struct vfpf_read_coal_req_tlv {
struct vfpf_first_tlv first_tlv;
u16 qid;
u8 is_rx;
u8 padding[5];
};
struct pfvf_read_coal_resp_tlv {
struct pfvf_tlv hdr;
u16 coal;
u8 padding[6];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@ -540,6 +569,7 @@ union vfpf_tlvs {
struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct vfpf_read_coal_req_tlv read_coal_req;
struct tlv_buffer_size tlv_buf_size;
};
@ -549,6 +579,7 @@ union pfvf_tlvs {
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp;
struct pfvf_read_coal_resp_tlv read_coal_resp;
};
/* This is a structure which is allocated in the VF, which the PF may update
@ -668,6 +699,7 @@ enum {
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.

View file

@ -101,6 +101,9 @@
/* Control frame check constants */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4 /* Number of etherType values configured by the driver for control frame check */
/* GFS constants */
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/*
@ -212,15 +215,6 @@ struct eth_edpm_fw_data
};
/*
* FW debug.
*/
struct eth_fast_path_cqe_fw_debug
{
__le16 reserved2 /* FW reserved. */;
};
/*
* tunneling parsing flags
*/
@ -260,7 +254,7 @@ struct eth_pmd_flow_flags
*/
struct eth_fast_path_rx_reg_cqe
{
u8 type /* CQE type */;
u8 type /* CQE type (use enum eth_rx_cqe_type) */;
u8 bitfields;
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 /* Type of calculated RSS hash (use enum rss_hash_type) */
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
@ -276,9 +270,9 @@ struct eth_fast_path_rx_reg_cqe
u8 placement_offset /* Offset of placement from BD start */;
struct eth_tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */;
u8 bd_num /* Number of BDs, used for packet */;
u8 reserved[9];
struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
u8 reserved1[3];
u8 reserved;
__le16 flow_id /* aRFS flow ID. */;
u8 reserved1[11];
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
@ -288,7 +282,7 @@ struct eth_fast_path_rx_reg_cqe
*/
struct eth_fast_path_rx_tpa_cont_cqe
{
u8 type /* CQE type */;
u8 type /* CQE type (use enum eth_rx_cqe_type) */;
u8 tpa_agg_index /* TPA aggregation index */;
__le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* List of the segment sizes */;
u8 reserved;
@ -304,7 +298,7 @@ struct eth_fast_path_rx_tpa_cont_cqe
*/
struct eth_fast_path_rx_tpa_end_cqe
{
u8 type /* CQE type */;
u8 type /* CQE type (use enum eth_rx_cqe_type) */;
u8 tpa_agg_index /* TPA aggregation index */;
__le16 total_packet_len /* Total aggregated packet length */;
u8 num_of_bds /* Total number of BDs comprising the packet */;
@ -324,7 +318,7 @@ struct eth_fast_path_rx_tpa_end_cqe
*/
struct eth_fast_path_rx_tpa_start_cqe
{
u8 type /* CQE type */;
u8 type /* CQE type (use enum eth_rx_cqe_type) */;
u8 bitfields;
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 /* Type of calculated RSS hash (use enum rss_hash_type) */
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
@ -342,7 +336,7 @@ struct eth_fast_path_rx_tpa_start_cqe
u8 tpa_agg_index /* TPA aggregation index */;
u8 header_len /* Packet L2+L3+L4 header length */;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE] /* Additional BDs length list. */;
struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
__le16 flow_id /* aRFS flow ID. */;
u8 reserved;
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
@ -371,7 +365,7 @@ struct eth_rx_bd
*/
struct eth_slow_path_rx_cqe
{
u8 type /* CQE type */;
u8 type /* CQE type (use enum eth_rx_cqe_type) */;
u8 ramrod_cmd_id;
u8 error_flag;
u8 reserved[25];
@ -586,4 +580,20 @@ struct eth_db_data
__le16 bd_prod;
};
/*
* RSS hash type
*/
enum rss_hash_type
{
RSS_HASH_TYPE_DEFAULT=0,
RSS_HASH_TYPE_IPV4=1,
RSS_HASH_TYPE_TCP_IPV4=2,
RSS_HASH_TYPE_IPV6=3,
RSS_HASH_TYPE_TCP_IPV6=4,
RSS_HASH_TYPE_UDP_IPV4=5,
RSS_HASH_TYPE_UDP_IPV6=6,
MAX_RSS_HASH_TYPE
};
#endif /* __ETH_COMMON__ */

View file

@ -198,7 +198,7 @@ union fcoe_dix_desc_ctx
*/
struct ystorm_fcoe_task_st_ctx
{
u8 task_type /* Task type. use enum fcoe_task_type */;
u8 task_type /* Task type. use enum fcoe_task_type (use enum fcoe_task_type) */;
u8 sgl_mode;
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 /* use enum scsi_sgl_mode (use enum scsi_sgl_mode) */
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
@ -424,8 +424,8 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write
*/
struct fcoe_tstorm_fcoe_task_st_ctx_read_only
{
u8 task_type /* Task type. use enum fcoe_task_type */;
u8 dev_type /* Device type (disk or tape). use enum fcoe_device_type */;
u8 task_type /* Task type. use enum fcoe_task_type (use enum fcoe_task_type) */;
u8 dev_type /* Device type (disk or tape). use enum fcoe_device_type (use enum fcoe_device_type) */;
u8 conf_supported /* Confirmation supported indication */;
u8 glbl_q_num /* Global RQ/CQ num to be used for sense data placement/completion */;
__le32 cid /* CID which that tasks associated to */;
@ -1011,10 +1011,12 @@ struct fcoe_conn_offload_ramrod_data
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 /* Does inner vlan exist */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1 /* Does a single vlan (inner/outer) should be used. - UFP mode */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 /* indication for conn mode: 0=Initiator, 1=Target, 2=Both Initiator and Traget */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7
__le16 conn_id /* Drivers connection ID. Should be sent in EQs to speed-up drivers access to connection data. */;
u8 def_q_idx /* Default queue number to be used for unsolicited traffic */;
u8 reserved[5];
@ -1030,6 +1032,17 @@ struct fcoe_conn_terminate_ramrod_data
};
/*
* FCoE device type
*/
enum fcoe_device_type
{
FCOE_TASK_DEV_TYPE_DISK,
FCOE_TASK_DEV_TYPE_TAPE,
MAX_FCOE_DEVICE_TYPE
};
/*
@ -1091,6 +1104,27 @@ struct fcoe_rx_stat
/*
* FCoE SQE request type
*/
enum fcoe_sqe_request_type
{
SEND_FCOE_CMD,
SEND_FCOE_MIDPATH,
SEND_FCOE_ABTS_REQUEST,
FCOE_EXCHANGE_CLEANUP,
FCOE_SEQUENCE_RECOVERY,
SEND_FCOE_XFER_RDY,
SEND_FCOE_RSP,
SEND_FCOE_RSP_WITH_SENSE_DATA,
SEND_FCOE_TARGET_DATA,
SEND_FCOE_INITIATOR_DATA,
SEND_FCOE_XFER_CONTINUATION_RDY /* Xfer Continuation (==1) ready to be sent. Previous XFERs data received successfully. */,
SEND_FCOE_TARGET_ABTS_RSP,
MAX_FCOE_SQE_REQUEST_TYPE
};
/*
* FCoe statistics request
*/
@ -1100,6 +1134,28 @@ struct fcoe_stat_ramrod_data
};
/*
* FCoE task type
*/
enum fcoe_task_type
{
FCOE_TASK_TYPE_WRITE_INITIATOR,
FCOE_TASK_TYPE_READ_INITIATOR,
FCOE_TASK_TYPE_MIDPATH,
FCOE_TASK_TYPE_UNSOLICITED,
FCOE_TASK_TYPE_ABTS,
FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
FCOE_TASK_TYPE_WRITE_TARGET,
FCOE_TASK_TYPE_READ_TARGET,
FCOE_TASK_TYPE_RSP,
FCOE_TASK_TYPE_RSP_SENSE_DATA,
FCOE_TASK_TYPE_ABTS_TARGET,
FCOE_TASK_TYPE_ENUM_SIZE,
MAX_FCOE_TASK_TYPE
};

View file

@ -256,12 +256,14 @@ struct ystorm_iscsi_task_state
union iscsi_seq_num seq_num /* PDU index in sequence */;
struct iscsi_dif_flags dif_flags /* Dif flags */;
u8 flags;
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 /* local_completion */
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 /* Equals 1 if SGL is predicted and 0 otherwise. */
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 /* local_completion */
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 /* Equals 1 if SGL is predicted and 0 otherwise. */
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 /* Indication for Ystorm that TDIFs offsetInIo is not synced with buffer_offset */
#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3
};
/*
@ -848,8 +850,8 @@ struct e4_ystorm_iscsi_task_ag_ctx
#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 /* bit2 */
#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
@ -1358,6 +1360,22 @@ struct iscsi_conn_offload_params
};
/*
* iSCSI connection statistics
*/
struct iscsi_conn_stats_params
{
struct regpair iscsi_tcp_tx_packets_cnt /* Counts number of transmitted TCP packets for this iSCSI connection */;
struct regpair iscsi_tcp_tx_bytes_cnt /* Counts number of transmitted TCP bytes for this iSCSI connection */;
struct regpair iscsi_tcp_tx_rxmit_cnt /* Counts number of TCP retransmission events for this iSCSI connection */;
struct regpair iscsi_tcp_rx_packets_cnt /* Counts number of received TCP packets for this iSCSI connection */;
struct regpair iscsi_tcp_rx_bytes_cnt /* Counts number of received TCP bytes for this iSCSI connection */;
struct regpair iscsi_tcp_rx_dup_ack_cnt /* Counts number of received TCP duplicate acks for this iSCSI connection */;
__le32 iscsi_tcp_rx_chksum_err_cnt /* Counts number of received TCP packets with checksum err for this iSCSI connection */;
__le32 reserved;
};
/*
* spe message header
*/
@ -1414,7 +1432,7 @@ struct iscsi_conn_update_ramrod_params
struct iscsi_cqe_common
{
__le16 conn_id /* Drivers connection Id */;
u8 cqe_type /* Indicates CQE type */;
u8 cqe_type /* Indicates CQE type (use enum iscsi_cqes_type) */;
union cqe_error_status error_bitmap /* CQE error status */;
__le32 reserved[3];
union iscsi_task_hdr iscsi_hdr /* iscsi header union */;
@ -1426,14 +1444,14 @@ struct iscsi_cqe_common
struct iscsi_cqe_solicited
{
__le16 conn_id /* Drivers connection Id */;
u8 cqe_type /* Indicates CQE type */;
u8 cqe_type /* Indicates CQE type (use enum iscsi_cqes_type) */;
union cqe_error_status error_bitmap /* CQE error status */;
__le16 itid /* initiator itt (Initiator mode) or target ttt (Target mode) */;
u8 task_type /* Task type */;
u8 fw_dbg_field /* FW debug params */;
u8 caused_conn_err /* Equals 1 if this TID caused the connection error, otherwise equals 0. */;
u8 reserved0[3];
__le32 reserved1[1];
__le32 data_truncated_bytes /* Target Mode only: Valid only if data_truncated_err equals 1: The remaining bytes till the end of the IO. */;
union iscsi_task_hdr iscsi_hdr /* iscsi header union */;
};
@ -1443,11 +1461,11 @@ struct iscsi_cqe_solicited
struct iscsi_cqe_unsolicited
{
__le16 conn_id /* Drivers connection Id */;
u8 cqe_type /* Indicates CQE type */;
u8 cqe_type /* Indicates CQE type (use enum iscsi_cqes_type) */;
union cqe_error_status error_bitmap /* CQE error status */;
__le16 reserved0 /* Reserved */;
u8 reserved1 /* Reserved */;
u8 unsol_cqe_type /* Represent this unsolicited CQE position in a sequence of packets belonging to the same unsolicited PDU */;
u8 unsol_cqe_type /* Represent this unsolicited CQE position in a sequence of packets belonging to the same unsolicited PDU (use enum iscsi_cqe_unsolicited_type) */;
__le16 rqe_opaque /* Relevant for Unsolicited CQE only: The opaque data of RQ BDQ */;
__le16 reserved2[3] /* Reserved */;
union iscsi_task_hdr iscsi_hdr /* iscsi header union */;
@ -1503,22 +1521,22 @@ enum iscsi_cqe_unsolicited_type
struct iscsi_debug_modes
{
u8 flags;
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 /* Assert on Rx connection error */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 /* Assert if TCP RESET arrived */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 /* Assert if TCP FIN arrived */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 /* Assert if cleanup flow */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 /* Assert if REJECT PDU or ASYNC PDU arrived */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 /* Assert if NOP IN PDU or NOP OUT PDU arrived */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 /* Assert if data digest error */
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 /* Assert if DIF error */
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 /* Assert on Rx connection error */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 /* Assert if TCP RESET arrived */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 /* Assert if TCP FIN arrived */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 /* Assert if cleanup flow */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 /* Assert if REJECT PDU or ASYNC PDU arrived */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 /* Assert if NOP IN PDU or NOP OUT PDU arrived */
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1 /* Assert if DIF or data digest error */
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6
#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1 /* Assert if HQ corruption detected */
#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7
};
@ -1535,9 +1553,9 @@ enum iscsi_eqe_opcode
ISCSI_EVENT_TYPE_CLEAR_SQ /* iSCSI response after clear sq Ramrod */,
ISCSI_EVENT_TYPE_TERMINATE_CONN /* iSCSI response after termination Ramrod */,
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN /* iSCSI response after MAC address update Ramrod */,
ISCSI_EVENT_TYPE_COLLECT_STATS_CONN /* iSCSI response after collecting connection statistics Ramrod */,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE /* iSCSI response after option 2 connect completed (A-syn EQE) */,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE /* iSCSI response after option 2 termination completed (A-syn EQE) */,
RESERVED9 /* reserved9 */,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES=10 /* Never returned in EQE, used to separate Regular event types from Error event types */,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD /* iSCSI abort response after TCP RST packet recieve (A-syn EQE) */,
ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD /* iSCSI response after close receive (A-syn EQE) */,
@ -1629,6 +1647,7 @@ enum iscsi_ramrod_cmd_id
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN=5 /* iSCSI connection offload Ramrod. Command ID known only to FW and VBD */,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ=6 /* iSCSI connection clear-sq ramrod. */,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE=7 /* iSCSI connection update MAC address ramrod. */,
ISCSI_RAMROD_CMD_ID_CONN_STATS=8 /* iSCSI collect connection statistics ramrod. */,
MAX_ISCSI_RAMROD_CMD_ID
};
@ -1679,6 +1698,20 @@ struct iscsi_spe_conn_offload_option2
};
/*
* ISCSI collect connection statistics request
*/
struct iscsi_spe_conn_statistics
{
struct iscsi_slow_path_hdr hdr /* spe message header. */;
__le16 conn_id /* ISCSI Connection ID. */;
__le32 fw_cid /* Context ID (cid) of the connection. */;
u8 reset_stats /* Indicates whether to reset the connection statistics. */;
u8 reserved0[7];
struct regpair stats_cnts_addr /* cmdq and unsolicited counters termination params */;
};
/*
* ISCSI connection termination request
*/
@ -1716,7 +1749,11 @@ struct iscsi_spe_func_init
u8 num_r2tq_pages_in_ring /* Number of entries in the R2TQ PBL. Provided by driver at function init spe */;
u8 num_uhq_pages_in_ring /* Number of entries in the uHQ PBL (xHQ entries is X2). Provided by driver at function init spe */;
u8 ll2_rx_queue_id /* Queue ID of the Light-L2 Rx Queue */;
u8 ooo_enable /* Enable Out Of Order mode (enable = 1) */;
u8 flags;
#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 /* Enable counters - function and connection counters */
#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F /* reserved */
#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode /* Use iscsi_debug_mode flags */;
__le16 reserved1;
__le32 reserved2;
@ -1855,6 +1892,7 @@ struct iscsi_xhqe
struct mstorm_iscsi_stats_drv
{
struct regpair iscsi_rx_dropped_PDUs_task_not_valid /* Number of Rx silently dropped PDUs due to task not valid */;
struct regpair iscsi_rx_dup_ack_cnt /* Received Dup-ACKs - after 3 dup ack, the counter doesnt count the same dup ack */;
};
@ -1878,6 +1916,9 @@ struct tstorm_iscsi_stats_drv
struct regpair iscsi_rx_bytes_cnt /* Counts the number of rx bytes that were received */;
struct regpair iscsi_rx_packet_cnt /* Counts the number of rx packets that were received */;
struct regpair iscsi_rx_new_ooo_isle_events_cnt /* Counts the number of new out-of-order isle event */;
struct regpair iscsi_rx_tcp_payload_bytes_cnt /* Received In-Order TCP Payload Bytes */;
struct regpair iscsi_rx_tcp_pkt_cnt /* Received In-Order TCP Packets */;
struct regpair iscsi_rx_pure_ack_cnt /* Received Pure-ACKs */;
__le32 iscsi_cmdq_threshold_cnt /* Counts the number of times elements in cmdQ reached threshold */;
__le32 iscsi_rq_threshold_cnt /* Counts the number of times elements in RQQ reached threshold */;
__le32 iscsi_immq_threshold_cnt /* Counts the number of times elements in immQ reached threshold */;
@ -1903,6 +1944,8 @@ struct xstorm_iscsi_stats_drv
{
struct regpair iscsi_tx_go_to_slow_start_event_cnt /* Number of times slow start event occurred */;
struct regpair iscsi_tx_fast_retransmit_event_cnt /* Number of times fast retransmit event occurred */;
struct regpair iscsi_tx_pure_ack_cnt /* Transmitted Pure-ACKs */;
struct regpair iscsi_tx_delayed_ack_cnt /* Transmitted Delayed ACKs */;
};
@ -1914,6 +1957,8 @@ struct ystorm_iscsi_stats_drv
struct regpair iscsi_tx_data_pdu_cnt /* Number of data PDUs that were transmitted */;
struct regpair iscsi_tx_r2t_pdu_cnt /* Number of R2T PDUs that were transmitted */;
struct regpair iscsi_tx_total_pdu_cnt /* Number of total PDUs that were transmitted */;
struct regpair iscsi_tx_tcp_payload_bytes_cnt /* Transmitted In-Order TCP Payload Bytes */;
struct regpair iscsi_tx_tcp_pkt_cnt /* Transmitted In-Order TCP Packets */;
};

View file

@ -44,7 +44,9 @@
#include "eth.h"
#include "pmm.h"
#include "ah_eth.h"
#include "e5_eth.h"
#endif
#include "global.h"
#include "mcp_public.h"
typedef enum active_mf_mode {
@ -66,7 +68,9 @@ enum ov_current_cfg {
};
struct dci_info_global {
enum ov_current_cfg current_cfg;
u16 mba_ver;
u8 current_cfg;
u8 extern_dci_mgmt;
u8 pci_bus_num;
u8 boot_progress;
};
@ -101,14 +105,16 @@ struct private_global {
u32 exp_rom_nvm_addr;
/* The pmm_config structure holds all active phy/link configuration */
#ifndef RECOVERY
#if (!defined MFW_SIM) && (!defined RECOVERY)
#ifdef b900
struct pmm_config eth_cfg;
#else
#elif b940
struct ah_eth eth_cfg;
#elif b510
struct e5_eth eth_cfg;
#else
#endif
#endif
u32 lldp_counter;
u32 avs_init_timestamp;
@ -120,8 +126,9 @@ struct private_global {
u32 drv_nvm_state;
/* Per PF bitmask */
#define DRV_NVM_STATE_IN_PROGRESS_MASK (0x0000ffff)
#define DRV_NVM_STATE_IN_PROGRESS_MASK (0x0001ffff)
#define DRV_NVM_STATE_IN_PROGRESS_OFFSET (0)
#define DRV_NVM_STATE_IN_PROGRESS_VAL_MFW (0x00010000)
u32 storm_fw_ver;
@ -132,6 +139,18 @@ struct private_global {
struct res_alloc_cache res_alloc;
#define G_RES_ALLOC_P (&g_spad.private_data.global.res_alloc)
u32 resource_max_values[RESOURCE_MAX_NUM];
u32 glb_counter_100ms;
/*collection of global bits and controls*/
u32 flags_and_ctrl;
#define PRV_GLOBAL_FIO_BMB_INITIATED_MASK 0x00000001
#define PRV_GLOBAL_FIO_BMB_INITIATED_OFFSET 0
#define PRV_GLOBAL_ENABLE_NET_THREAD_LONG_RUN_MASK 0x00000002
#define PRV_GLOBAL_ENABLE_NET_THREAD_LONG_RUN_OFFSET 1
#ifdef b900
u32 es_fir_engines : 8, es_fir_valid_bitmap : 8, es_l2_engines : 8, es_l2_valid_bitmap : 8;
#endif
u64 ecc_events;
};
/**************************************/
@ -144,11 +163,13 @@ struct private_path {
#define RECOVERY_MAX_COUNTDOWN_SECONDS 2
u32 drv_load_vars; /* When the seconds_since_mcp_reset gets here */
#define DRV_LOAD_DEF_TIMEOUT 10
#define DRV_LOAD_TIMEOUT_MASK 0x0000ffff
#define DRV_LOAD_TIMEOUT_OFFSET 0
#define DRV_LOAD_NEED_FORCE_MASK 0xffff0000
#define DRV_LOAD_NEED_FORCE_OFFSET 16
struct load_rsp_stc drv_load_params;
u64 ecc_events;
};
@ -175,10 +196,14 @@ struct drv_port_info_t {
typedef enum _lldp_subscriber_e {
LLDP_SUBSCRIBER_MANDATORY = 0,
LLDP_SUBSCRIBER_SYSTEM,
LLDP_SUBSCRIBER_DCBX_IEEE,
LLDP_SUBSCRIBER_DCBX_CEE,
LLDP_SUBSCRIBER_EEE,
LLDP_SUBSCRIBER_CDCP,
LLDP_SUBSCRIBER_DCI,
LLDP_SUBSCRIBER_UFP,
LLDP_SUBSCRIBER_NCSI,
MAX_SUBSCRIBERS
} lldp_subscriber_e;
@ -214,7 +239,6 @@ typedef struct {
#define MAX_PACKET_SIZE (1516) /* So it can be devided by 4 */
#define LLDP_CHASSIS_ID_TLV_LEN 7
#define LLDP_PORT_ID_TLV_LEN 7
#define MAX_TLV_BUFFER 128 /* In dwords. 512 in bytes*/
typedef struct {
u16 len;
u8 header[MAX_ETH_HEADER];
@ -239,7 +263,7 @@ typedef struct {
u8 *received_tlvs[MAX_TLVS];
} lldp_receive_data_s;
#define MAX_REGISTERED_TLVS 6
#define MAX_REGISTERED_TLVS 12
typedef struct {
u32 config; /* Uses same defines as local config plus some more below*/
@ -280,6 +304,89 @@ struct dci_info_port {
};
#endif
struct lldp_cdcp {
u32 flags;
#define NTPMR_TTL_EXPIRED 0x00000001
#define CDCP_TLV_RCVD 0x00000002
#define CDCP_TLV_SENT 0x00000004
u32 remote_mib;
#define CDCP_ROLE_MASK 0x00000001
#define CDCP_ROLE_OFFSET 0
#define CDCP_ROLE_BRIDGE 0x0
#define CDCP_ROLE_STATION 0x1
#define CDCP_SCOMP_MASK 0x00000002
#define CDCP_SCOMP_OFFSET 1
#define CDCP_CHAN_CAP_MASK 0x0000fff0
#define CDCP_CHAN_CAP_OFFSET 4
u32 num_of_chan;
};
/* Accommodates link-tlv size for max-pf scids (27) + end-of-tlv size (2) */
#define UFP_REQ_MAX_PAYLOAD_SIZE (32)
/* Accommodates max-NIC props-tlv-size (117:5 +(16*7)), link-tlv (27),
* end-tlv (2).
*/
#define UFP_RSP_MAX_PAYLOAD_SIZE (160)
struct ufp_info_port {
u8 req_payload[UFP_REQ_MAX_PAYLOAD_SIZE];
u8 rsp_payload[UFP_RSP_MAX_PAYLOAD_SIZE];
u16 req_len;
u16 rsp_len;
u8 switch_version;
u8 switch_status;
u8 flags;
#define UFP_CAP_ENABLED (1 << 0)
#define UFP_REQ_SENT (1 << 1)
#define UFP_RSP_SENT (1 << 2)
#define UFP_CAP_SENT (1 << 3)
u8 pending_flags;
#define UFP_REQ_PENDING (1 << 0)
#define UFP_RSP_PENDING (1 << 1)
};
#define UFP_ENABLED(_port_) \
(g_spad.private_data.port[_port_].ufp_port.flags & UFP_CAP_ENABLED)
/* Max 200-byte packet, accommodates UFP_RSP_MAX_PAYLOAD_SIZE */
#define ECP_MAX_PKT_SIZE (200)
/* Tx-state machine, Qbg variable names specified in comments on the right */
struct ecp_tx_state {
u8 tx_pkt[ECP_MAX_PKT_SIZE];
BOOL ulp_req_rcvd; /* requestReceived */
BOOL ack_rcvd; /* ackReceived */
u16 req_seq_num; /* sequence */
/* State used for timer-based retries */
u16 ack_timer_counter;
#define ECP_TIMEOUT_COUNT 1 /* 1 second to detect ACK timeout */
u16 num_retries; /* retries */
#define ECP_MAX_RETRIES 3
u32 tx_errors; /* txErrors */
u32 ulp_pkt_len;
};
typedef void (*ulp_rx_indication_t)(u8 port, u16 subtype, u32 pkt_len, u8 *pkt);
/* Rx state machine, Qbg variable names specified in comments on the right */
struct ecp_rx_state {
BOOL ecpdu_rcvd; /* ecpduReceived */
u16 last_req_seq; /* lastSeq */
u8 first_req_rcvd;
u8 rsvd;
ulp_rx_indication_t rx_cb_func;
};
struct ecp_state_s {
struct ecp_tx_state tx_state;
struct ecp_rx_state rx_state;
u16 subtype;
};
struct private_port {
struct drv_port_info_t port_info;
active_mf_mode_t mf_mode;
@ -297,15 +404,21 @@ struct private_port {
u32 nig_drain_end_ts;
/* time stamp of the end of NIG drain time for the TC pause drain, this timer is used togther for all TC */
u32 nig_drain_tc_end_ts;
u32 tc_drain_en_bitmap;
u32 recv_lldp_tlvs[LLDP_MAX_LLDP_AGENTS][MAX_TLV_BUFFER];
u32 tc_drain_en_bitmap;
tlv_s lldp_core_tlv_desc[LLDP_MAX_LLDP_AGENTS][MAX_REGISTERED_TLVS];
u8 current_core_tlv_num[LLDP_MAX_LLDP_AGENTS];
struct mcp_mac lldp_mac;
#ifdef CONFIG_HP_DCI_SUPPORT
struct dci_info_port dci_port;
#endif
struct lldp_cdcp cdcp_info;
struct ufp_info_port ufp_port;
struct ecp_state_s ecp_info;
struct lldp_stats_stc lldp_stats[LLDP_MAX_LLDP_AGENTS];
u32 temperature;
u8 prev_ext_lasi_status;
u8 rsvd1;
u16 rsvd2;
};
@ -339,6 +452,7 @@ struct drv_func_info_t {
u8_t unload_wol_param; /* See drv_mb_param */
u8_t eswitch_mode;
u8_t ppfid_bmp;
};
struct dci_info_func {
@ -353,12 +467,13 @@ struct dci_info_func {
u8 drv_state;
u16 fcoe_cvid;
u8 fcoe_fabric_name[8];
#define CONNECTION_ID_LENGTH 16
u8 local_conn_id[CONNECTION_ID_LENGTH];
};
struct private_func {
struct drv_func_info_t func_info;
u32 init_hw_page;
u32 num_of_msix;
struct pf_sb_t sb;
struct dci_info_func dci_func;
};

View file

@ -250,11 +250,11 @@ struct couple_mode_teaming {
/**************************************
* LLDP and DCBX HSI structures
**************************************/
#define LLDP_CHASSIS_ID_STAT_LEN 4
#define LLDP_PORT_ID_STAT_LEN 4
#define LLDP_CHASSIS_ID_STAT_LEN 4
#define LLDP_PORT_ID_STAT_LEN 4
#define DCBX_MAX_APP_PROTOCOL 32
#define MAX_SYSTEM_LLDP_TLV_DATA 32
#define MAX_SYSTEM_LLDP_TLV_DATA 32 /* In dwords. 128 in bytes*/
#define MAX_TLV_BUFFER 128 /* In dwords. 512 in bytes*/
typedef enum _lldp_agent_e {
LLDP_NEAREST_BRIDGE = 0,
LLDP_NEAREST_NON_TPMR_BRIDGE,
@ -413,6 +413,7 @@ struct dcbx_local_params {
#define DCBX_CONFIG_VERSION_DISABLED 0
#define DCBX_CONFIG_VERSION_IEEE 1
#define DCBX_CONFIG_VERSION_CEE 2
#define DCBX_CONFIG_VERSION_DYNAMIC (DCBX_CONFIG_VERSION_IEEE | DCBX_CONFIG_VERSION_CEE)
#define DCBX_CONFIG_VERSION_STATIC 4
u32 flags;
@ -435,11 +436,29 @@ struct dcbx_mib {
};
struct lldp_system_tlvs_buffer_s {
u16 valid;
u16 length;
u32 flags;
#define LLDP_SYSTEM_TLV_VALID_MASK 0x1
#define LLDP_SYSTEM_TLV_VALID_OFFSET 0
/* This bit defines if system TLVs are instead of mandatory TLVS or in
* addition to them. Set 1 for replacing mandatory TLVs
*/
#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2
#define LLDP_SYSTEM_TLV_MANDATORY_OFFSET 1
#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000
#define LLDP_SYSTEM_TLV_LENGTH_OFFSET 16
u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
};
/* Since this struct is written by MFW and read by driver need to add
* sequence guards (as in case of DCBX MIB)
*/
struct lldp_received_tlvs_s {
u32 prefix_seq_num;
u32 length;
u32 tlvs_buffer[MAX_TLV_BUFFER];
u32 suffix_seq_num;
};
struct dcb_dscp_map {
u32 flags;
#define DCB_DSCP_ENABLE_MASK 0x1
@ -455,6 +474,32 @@ struct dcb_dscp_map {
etc.*/
};
struct mcp_val64 {
u32 lo;
u32 hi;
};
/* generic_idc_msg_t to be used for inter driver communication.
* source_pf specifies the originating PF that sent messages to all target PFs
* msg contains 64 bit value of the message - opaque to the MFW
*/
struct generic_idc_msg_s {
u32 source_pf;
struct mcp_val64 msg;
};
/**************************************
* Attributes commands
**************************************/
enum _attribute_commands_e {
ATTRIBUTE_CMD_READ = 0,
ATTRIBUTE_CMD_WRITE,
ATTRIBUTE_CMD_READ_CLEAR,
ATTRIBUTE_CMD_CLEAR,
ATTRIBUTE_NUM_OF_COMMANDS
};
/**************************************/
/* */
/* P U B L I C G L O B A L */
@ -635,6 +680,13 @@ struct public_port {
#define LINK_STATUS_EXT_PHY_LINK_UP 0x40000000
u32 link_status1;
#define LP_PRESENCE_STATUS_OFFSET 0
#define LP_PRESENCE_STATUS_MASK 0x3
#define LP_PRESENCE_UNKNOWN 0x0
#define LP_PRESENCE_PROBING 0x1
#define LP_PRESENT 0x2
#define LP_NOT_PRESENT 0x3
u32 ext_phy_fw_version;
u32 drv_phy_cfg_addr; /* Points to struct eth_phy_cfg (For READ-ONLY) */
@ -733,6 +785,8 @@ struct public_port {
#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
@ -755,6 +809,11 @@ struct public_port {
#define EEE_LP_ADV_STATUS_MASK 0x00000f00 /* Same values as in EEE_LD_ADV, but for Link Parter */
#define EEE_LP_ADV_STATUS_OFFSET 8
#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 /* Supported speeds for EEE */
#define EEE_SUPPORTED_SPEED_OFFSET 12
#define EEE_1G_SUPPORTED (1 << 1)
#define EEE_10G_SUPPORTED (1 << 2)
u32 eee_remote; /* Used for EEE in LLDP */
#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
#define EEE_REMOTE_TW_TX_OFFSET 0
@ -771,6 +830,20 @@ struct public_port {
#define ETH_TRANSCEIVER_HAS_DIAGNOSTIC (1 << 6)
#define ETH_TRANSCEIVER_IDENT_MASK 0x0000ff00
#define ETH_TRANSCEIVER_IDENT_OFFSET 8
u32 oem_cfg_port;
#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
#define OEM_CFG_SCHED_TYPE_OFFSET 2
#define OEM_CFG_SCHED_TYPE_ETS 0x1
#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS];
u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA];
};
/**************************************/
@ -801,7 +874,10 @@ struct public_func {
/* For PCP default value get the MSB byte of the map default */
u32 c2s_pcp_map_default;
u32 reserved[4];
/* For generic inter driver communication channel messages between PFs via MFW*/
struct generic_idc_msg_s generic_idc_msg;
u32 num_of_msix;
// replace old mf_cfg
u32 config;
@ -845,7 +921,9 @@ struct public_func {
#define FUNC_MF_CFG_BOOT_MODE_ENABLED 0x08000000
u32 status;
#define FUNC_STATUS_VLINK_DOWN 0x00000001
#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
#define FUNC_STATUS_LOGICAL_LINK_UP 0x00000002
#define FUNC_STATUS_FORCED_LINK 0x00000004
u32 mac_upper; /* MAC */
#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
@ -905,6 +983,23 @@ struct public_func {
#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
#define DRV_ID_DRV_INIT_HW_OFFSET 31
#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_OFFSET)
u32 oem_cfg_func;
#define OEM_CFG_FUNC_TC_MASK 0x0000000F
#define OEM_CFG_FUNC_TC_OFFSET 0
#define OEM_CFG_FUNC_TC_0 0x0
#define OEM_CFG_FUNC_TC_1 0x1
#define OEM_CFG_FUNC_TC_2 0x2
#define OEM_CFG_FUNC_TC_3 0x3
#define OEM_CFG_FUNC_TC_4 0x4
#define OEM_CFG_FUNC_TC_5 0x5
#define OEM_CFG_FUNC_TC_6 0x6
#define OEM_CFG_FUNC_TC_7 0x7
#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
};
/**************************************/
@ -924,11 +1019,6 @@ struct mcp_mac {
u32 mac_lower;
};
struct mcp_val64 {
u32 lo;
u32 hi;
};
struct mcp_file_att {
u32 nvm_start_addr;
u32 len;
@ -1012,24 +1102,24 @@ struct mdump_config_stc {
};
enum resource_id_enum {
RESOURCE_NUM_SB_E = 0,
RESOURCE_NUM_SB_E = 0,
RESOURCE_NUM_L2_QUEUE_E = 1,
RESOURCE_NUM_VPORT_E = 2,
RESOURCE_NUM_VMQ_E = 3,
RESOURCE_NUM_VMQ_E = 3,
RESOURCE_FACTOR_NUM_RSS_PF_E = 4, /* Not a real resource!! it's a factor used to calculate others */
RESOURCE_FACTOR_RSS_PER_VF_E = 5, /* Not a real resource!! it's a factor used to calculate others */
RESOURCE_NUM_RL_E = 6,
RESOURCE_NUM_PQ_E = 7,
RESOURCE_NUM_VF_E = 8,
RESOURCE_NUM_RL_E = 6,
RESOURCE_NUM_PQ_E = 7,
RESOURCE_NUM_VF_E = 8,
RESOURCE_VFC_FILTER_E = 9,
RESOURCE_ILT_E = 10,
RESOURCE_CQS_E = 11,
RESOURCE_ILT_E = 10,
RESOURCE_CQS_E = 11,
RESOURCE_GFT_PROFILES_E = 12,
RESOURCE_NUM_TC_E = 13,
RESOURCE_NUM_TC_E = 13,
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
RESOURCE_BDQ_E = 17,
RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@ -1099,6 +1189,19 @@ struct mdump_retain_data_stc {
u32 status;
};
struct attribute_cmd_write_stc {
u32 val;
u32 mask;
u32 offset;
};
struct lldp_stats_stc {
u32 tx_frames_total;
u32 rx_frames_total;
u32 rx_frames_discarded;
u32 rx_age_outs;
};
union drv_union_data {
struct mcp_mac wol_mac; /* UNLOAD_DONE */
@ -1131,6 +1234,8 @@ union drv_union_data {
struct load_req_stc load_req;
struct load_rsp_stc load_rsp;
struct mdump_retain_data_stc mdump_retain;
struct attribute_cmd_write_stc attribute_cmd_write;
struct lldp_stats_stc lldp_stats;
/* ... */
};
@ -1149,8 +1254,8 @@ struct public_drv_mb {
/* - DONT_CARE - Don't flap the link if up */
#define DRV_MSG_CODE_LINK_RESET 0x23000000
// Vitaly: LLDP commands
#define DRV_MSG_CODE_SET_LLDP 0x24000000
#define DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX 0x24100000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
/* OneView feature driver HSI*/
#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
@ -1171,6 +1276,9 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OEM_UPDATE_BOOT_CFG 0x3e000000
#define DRV_MSG_CODE_OEM_RESET_TO_DEFAULT 0x3f000000
#define DRV_MSG_CODE_OV_GET_CURR_CFG 0x40000000
#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
#define DRV_MSG_CODE_GET_LLDP_STATS 0x42000000
#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000 /* params [31:8] - reserved, [7:0] - bitmap */
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000 /*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
@ -1288,6 +1396,23 @@ struct public_drv_mb {
#define DRV_MSG_CODE_READ_WOL_REG 0X00320000
#define DRV_MSG_CODE_WRITE_WOL_REG 0X00330000
#define DRV_MSG_CODE_GET_WOL_BUFFER 0X00340000
#define DRV_MSG_CODE_ATTRIBUTE 0x00350000 /* Param: [0:23] Attribute key, [24:31] Attribute sub command */
#define DRV_MSG_CODE_ENCRYPT_PASSWORD 0x00360000 /* Param: Password len. Union: Plain Password */
#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000 /* Param: None */
/* Pmbus commands */
#define DRV_MSG_CODE_PMBUS_READ 0x00380000 /* Param: [0:7] - Cmd, [8:9] - len */
#define DRV_MSG_CODE_PMBUS_WRITE 0x00390000 /* Param: [0:7] - Cmd, [8:9] - len, [16:31] -data*/
#define DRV_MB_PARAM_PMBUS_CMD_OFFSET 0
#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF
#define DRV_MB_PARAM_PMBUS_LEN_OFFSET 8
#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300
#define DRV_MB_PARAM_PMBUS_DATA_OFFSET 16
#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000
#define DRV_MSG_CODE_GENERIC_IDC 0x003a0000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@ -1306,10 +1431,18 @@ struct public_drv_mb {
#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
/* LLDP / DCBX params*/
/* To be used with SET_LLDP command */
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
#define DRV_MB_PARAM_LLDP_SEND_OFFSET 0
/* To be used with SET_LLDP and REGISTER_LLDP_TLVS_RX commands */
#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
#define DRV_MB_PARAM_LLDP_AGENT_OFFSET 1
/* To be used with REGISTER_LLDP_TLVS_RX command */
#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001
#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_OFFSET 0
#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0
#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_OFFSET 4
/* To be used with SET_DCBX command */
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
#define DRV_MB_PARAM_DCBX_NOTIFY_OFFSET 3
@ -1409,6 +1542,12 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FCOE_CVID_MASK 0xFFF
#define DRV_MB_PARAM_FCOE_CVID_OFFSET 0
#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
#define DRV_MB_PARAM_LLDP_STATS_AGENT_MASK 0xFF
#define DRV_MB_PARAM_LLDP_STATS_AGENT_OFFSET 0
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
@ -1454,11 +1593,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001 /* driver supports SmartLinQ parameter */
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 /* driver supports EEE parameter */
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_MASK 0xFFFF0000
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_OFFSET 16
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 /* driver supports virtual link parameter */
/* Driver attributes params */
#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF
#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
@ -1482,6 +1627,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
#define FW_MSG_CODE_REGISTER_LLDP_TLVS_RX_DONE 0x24100000
#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE 0x26000000
#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE 0x27000000
@ -1505,6 +1651,9 @@ struct public_drv_mb {
#define FW_MSG_CODE_UPDATE_BOOT_CFG_DONE 0x3e000000
#define FW_MSG_CODE_RESET_TO_DEFAULT_ACK 0x3f000000
#define FW_MSG_CODE_OV_GET_CURR_CFG_DONE 0x40000000
#define FW_MSG_CODE_GET_OEM_UPDATES_DONE 0x41000000
#define FW_MSG_CODE_GET_LLDP_STATS_DONE 0x42000000
#define FW_MSG_CODE_GET_LLDP_STATS_ERROR 0x42010000
#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
@ -1545,6 +1694,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_ERROR 0x00170000
#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
@ -1591,6 +1741,10 @@ struct public_drv_mb {
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
#define FW_MSG_CODE_ATTRIBUTE_INVALID_KEY 0x00020000
#define FW_MSG_CODE_ATTRIBUTE_INVALID_CMD 0x00030000
#define FW_MSG_CODE_IDC_BUSY 0x00010000
u32 fw_mb_param;
/* Resource Allocation params - MFW version support */
@ -1606,11 +1760,33 @@ struct public_drv_mb {
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
/* get MFW feature support response */
#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 /* MFW supports SmartLinQ */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 /* MFW supports EEE */
#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 /* MFW supports SmartLinQ */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 /* MFW supports EEE */
#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004 /* MFW supports DRV_LOAD Timeout */
#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET 0x00000008 /* MFW supports early detection of LP Presence */
#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD 0x00000010 /* MFW supports relaxed ordering setting */
#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 /* MFW supports virtual link */
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1<<0)
#define FW_MB_PARAM_OEM_UPDATE_MASK 0xFF
#define FW_MB_PARAM_OEM_UPDATE_OFFSET 0
#define FW_MB_PARAM_OEM_UPDATE_BW 0x01
#define FW_MB_PARAM_OEM_UPDATE_S_TAG 0x02
#define FW_MB_PARAM_OEM_UPDATE_CFG 0x04
#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_OFFSET 0
#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_OFFSET 1
#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_OFFSET 2
#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_OFFSET 3
#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xFF
#define FW_MB_PARAM_PPFID_BITMAP_OFFSET 0
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
@ -1673,6 +1849,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
MFW_DRV_MSG_GET_TLV_REQ,
MFW_DRV_MSG_OEM_CFG_UPDATE,
MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED,
MFW_DRV_MSG_GENERIC_IDC, /* Generic Inter Driver Communication message */
MFW_DRV_MSG_MAX
};
@ -1967,4 +2146,97 @@ enum tlvs {
DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
};
#define I2C_DEV_ADDR_A2 0xa2
#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60
#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2
#define SFP_EEPROM_A2_VCC_ADDR 0x62
#define SFP_EEPROM_A2_VCC_SIZE 2
#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64
#define SFP_EEPROM_A2_TX_BIAS_SIZE 2
#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66
#define SFP_EEPROM_A2_TX_POWER_SIZE 2
#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68
#define SFP_EEPROM_A2_RX_POWER_SIZE 2
#define I2C_DEV_ADDR_A0 0xa0
#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16
#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2
#define QSFP_EEPROM_A0_VCC_ADDR 0x1a
#define QSFP_EEPROM_A0_VCC_SIZE 2
#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a
#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2
#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32
#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2
#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22
#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2
/**************************************
* eDiag NETWORK Mode (DON)
**************************************/
#define ETH_DON_TYPE 0x0911 /* NETWORK Mode for QeDiag */
#define ETH_DON_TRACE_TYPE 0x0912 /* NETWORK Mode Continous Trace */
#define DON_RESP_UNKNOWN_CMD_ID 0x10 /* Response Error */
/* Op Codes, Response is Op Code+1 */
#define DON_REG_READ_REQ_CMD_ID 0x11
#define DON_REG_WRITE_REQ_CMD_ID 0x22
#define DON_CHALLENGE_REQ_CMD_ID 0x33
#define DON_NVM_READ_REQ_CMD_ID 0x44
#define DON_BLOCK_READ_REQ_CMD_ID 0x55
#define DON_MFW_MODE_TRACE_CONTINUOUS_ID 0x70
#if defined(MFW) || defined(DIAG) || defined(WINEDIAG)
#ifndef UEFI
#if defined(_MSC_VER)
#pragma pack(push,1)
#else
#pragma pack(1)
#endif
#endif
typedef struct {
u8 dst_addr[6];
u8 src_addr[6];
u16 ether_type;
/* DON Message data starts here, after L2 header */
/* Do not change alignment to keep backward compatability */
u16 cmd_id; /* Op code and response code */
union {
struct { /* DON Commands */
u32 address;
u32 val;
u32 resp_status;
};
struct { /* DON Traces */
u16 mcp_clock; /* MCP Clock in MHz */
u16 trace_size; /* Trace size in bytes */
u32 seconds; /* Seconds since last reset */
u32 ticks; /* Timestamp (NOW) */
};
};
union {
u8 digest[32]; /* SHA256 */
u8 data[32];
/* u32 dword[8]; */
};
} don_packet_t;
#ifndef UEFI
#if defined(_MSC_VER)
#pragma pack(pop)
#else
#pragma pack(0)
#endif
#endif /* #ifndef UEFI */
#endif /* #if defined(MFW) || defined(DIAG) || defined(WINEDIAG) */
#endif /* MCP_PUBLIC_H */

View file

@ -36,20 +36,21 @@
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
* Created: 3/15/2017
* Created: 12/4/2017
*
****************************************************************************/
#ifndef NVM_CFG_H
#define NVM_CFG_H
#define NVM_CFG_version 0x83000
#define NVM_CFG_new_option_seq 22
#define NVM_CFG_version 0x83306
#define NVM_CFG_removed_option_seq 1
#define NVM_CFG_new_option_seq 26
#define NVM_CFG_updated_value_seq 4
#define NVM_CFG_removed_option_seq 2
#define NVM_CFG_updated_value_seq 5
struct nvm_cfg_mac_address
{
@ -370,8 +371,8 @@ struct nvm_cfg1_glob
#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
/* Set caution temperature */
#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK 0x00FF0000
#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
#define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_MASK 0x00FF0000
#define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_OFFSET 16
/* Set external thermal sensor I2C address */
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK 0xFF000000
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24
@ -539,6 +540,11 @@ struct nvm_cfg1_glob
#define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_OFFSET 28
#define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_DISABLED 0x0
#define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_TI 0x1
/* Enable/Disable PCIE Relaxed Ordering */
#define NVM_CFG1_GLOB_PCIE_RELAXED_ORDERING_MASK 0x40000000
#define NVM_CFG1_GLOB_PCIE_RELAXED_ORDERING_OFFSET 30
#define NVM_CFG1_GLOB_PCIE_RELAXED_ORDERING_DISABLED 0x0
#define NVM_CFG1_GLOB_PCIE_RELAXED_ORDERING_ENABLED 0x1
u32 led_global_settings; /* 0x74 */
#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
@ -1068,7 +1074,35 @@ struct nvm_cfg1_glob
#define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20
u32 preboot_debug_mode_std; /* 0x140 */
u32 preboot_debug_mode_ext; /* 0x144 */
u32 reserved[56]; /* 0x148 */
u32 ext_phy_cfg1; /* 0x148 */
/* Ext PHY MDI pair swap value */
#define NVM_CFG1_GLOB_RESERVED_244_MASK 0x0000FFFF
#define NVM_CFG1_GLOB_RESERVED_244_OFFSET 0
u32 clocks; /* 0x14C */
/* Sets core clock frequency */
#define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MASK 0x000000FF
#define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_OFFSET 0
#define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_DEFAULT 0x0
#define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_375 0x1
#define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_350 0x2
#define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_325 0x3
#define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_300 0x4
#define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_280 0x5
/* Sets MAC clock frequency */
#define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_MASK 0x0000FF00
#define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_OFFSET 8
#define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_MAC_CLK_DEFAULT 0x0
#define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_MAC_CLK_782 0x1
#define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_MAC_CLK_516 0x2
/* Sets storm clock frequency */
#define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_MASK 0x00FF0000
#define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_OFFSET 16
#define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_DEFAULT 0x0
#define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_1200 0x1
#define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_1000 0x2
#define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_900 0x3
#define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_1100 0x4
u32 reserved[54]; /* 0x150 */
};
struct nvm_cfg1_path
@ -1167,6 +1201,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@ -1175,6 +1210,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@ -1185,6 +1221,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
@ -1199,6 +1236,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
@ -1279,6 +1317,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM5422X 0x2
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
/* EEE power saving mode */
@ -1312,6 +1351,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
@ -1351,6 +1391,13 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_50G 0x10
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_50G 0x20
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_100G 0x40
/* UID LED Blink Mode Settings */
#define NVM_CFG1_PORT_UID_LED_MODE_MASK_MASK 0x0F000000
#define NVM_CFG1_PORT_UID_LED_MODE_MASK_OFFSET 24
#define NVM_CFG1_PORT_UID_LED_MODE_MASK_ACTIVITY_LED 0x1
#define NVM_CFG1_PORT_UID_LED_MODE_MASK_LINK_LED0 0x2
#define NVM_CFG1_PORT_UID_LED_MODE_MASK_LINK_LED1 0x4
#define NVM_CFG1_PORT_UID_LED_MODE_MASK_LINK_LED2 0x8
u32 transceiver_00; /* 0x40 */
/* Define for mapping of transceiver signal module absent */
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
@ -1453,6 +1500,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@ -1461,6 +1509,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@ -1471,6 +1520,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_50G 0x6
@ -1480,6 +1530,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_50G 0x6
@ -1522,6 +1573,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@ -1530,6 +1582,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@ -1540,6 +1593,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_50G 0x6
@ -1549,6 +1603,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_50G 0x6
@ -1591,6 +1646,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@ -1599,6 +1655,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@ -1609,6 +1666,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_50G 0x6
@ -1618,6 +1676,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_50G 0x6
@ -1660,6 +1719,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@ -1668,6 +1728,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@ -1678,6 +1739,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_50G 0x6
@ -1687,6 +1749,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_50G 0x6
@ -1729,6 +1792,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_50G 0x20
@ -1737,6 +1801,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_OFFSET 16
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_10G 0x2
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_50G 0x20
@ -1747,6 +1812,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_50G 0x6
@ -1756,6 +1822,7 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_50G 0x6
@ -1798,7 +1865,11 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_OFFSET 0
#define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_MASK 0x0000FF00
#define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_OFFSET 8
u32 reserved[115]; /* 0x8C */
u32 ext_phy_cfg1; /* 0x8C */
/* Ext PHY MDI pair swap value */
#define NVM_CFG1_PORT_EXT_PHY_MDI_PAIR_SWAP_MASK 0x0000FFFF
#define NVM_CFG1_PORT_EXT_PHY_MDI_PAIR_SWAP_OFFSET 0
u32 reserved[114]; /* 0x90 */
};
struct nvm_cfg1_func
@ -1937,6 +2008,17 @@ struct nvm_cfg1
/******************************************
* nvm_cfg structs
******************************************/
struct board_info
{
u16 vendor_id;
u16 eth_did_suffix;
u16 sub_vendor_id;
u16 sub_device_id;
char *board_name;
char *friendly_name;
};
enum nvm_cfg_sections
{
NVM_CFG_SECTION_NVM_CFG1,
@ -1951,4 +2033,3 @@ struct nvm_cfg
};
#endif /* NVM_CFG_H */

View file

@ -41,6 +41,8 @@
#define CRC_MAGIC_VALUE 0xDEBB20E3
#define CRC32_POLYNOMIAL 0xEDB88320
#define _KB(x) (x*1024)
#define _MB(x) (_KB(x)*1024)
#define NVM_CRC_SIZE (sizeof(u32))
enum nvm_sw_arbitrator {
NVM_SW_ARB_HOST,
@ -109,6 +111,12 @@ enum nvm_image_type {
NVM_TYPE_8485X_PHY_FW = 0x23,
NVM_TYPE_PUB_KEY = 0x24,
NVM_TYPE_RECOVERY = 0x25,
NVM_TYPE_PLDM = 0x26,
NVM_TYPE_UPK1 = 0x27,
NVM_TYPE_UPK2 = 0x28,
NVM_TYPE_MASTER_KC = 0x29,
NVM_TYPE_BACKUP_KC = 0x2a,
NVM_TYPE_ROM_TEST = 0xf0,
NVM_TYPE_MAX,
};
@ -154,7 +162,13 @@ struct image_map g_image_table[] = {
{"ETH_PHY_FW2", "-ethphy2", NVM_TYPE_ETH_PHY_FW2},
{"BDN", "-bdn", NVM_TYPE_BDN},
{"PK", "-pk", NVM_TYPE_PUB_KEY},
{"RECOVERY", "-recovery",NVM_TYPE_RECOVERY}
{"RECOVERY", "-recovery",NVM_TYPE_RECOVERY},
{"PLDM", "-pldm", NVM_TYPE_PLDM},
{"UPK1", "-upk1", NVM_TYPE_UPK1},
{"UPK2", "-upk2", NVM_TYPE_UPK2},
{"ROMTEST", "-romtest" ,NVM_TYPE_ROM_TEST},
{"MASTER_KC", "-kc" ,NVM_TYPE_MASTER_KC},
{"BACKUP_KC", "" ,NVM_TYPE_BACKUP_KC}
};
#define IMAGE_TABLE_SIZE (sizeof(g_image_table) / sizeof(struct image_map))
@ -163,7 +177,14 @@ struct image_map g_image_table[] = {
#define MAX_NVM_DIR_ENTRIES 150
/* Note: The has given 150 possible entries since anyway each file captures at least one page. */
struct nvm_dir {
struct nvm_dir_meta {
u32 dir_id;
u32 nvm_dir_addr;
u32 num_images;
u32 next_mfw_to_run;
};
struct nvm_dir {
s32 seq; /* This dword is used to indicate whether this dir is valid, and whether it is more updated than the other dir */
#define NVM_DIR_NEXT_MFW_MASK 0x00000001
#define NVM_DIR_SEQ_MASK 0xfffffffe
@ -201,8 +222,10 @@ struct nvm_vpd_image {
#define FLASH_PAGE_SIZE 0x1000
#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
#define ASIC_MIM_MAX_SIZE (300*FLASH_PAGE_SIZE) /* 1.2Mb */
#define FPGA_MIM_MAX_SIZE (62*FLASH_PAGE_SIZE) /* 250Kb */
#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200)) /* 1.2Mb - E4*/
#define NG_ASIC_MIM_MAX_SIZE (_MB(2)) /* 2Mb - E5 */
#define FPGA_MIM_MAX_SIZE (0x3E000) /* 250Kb */
/* Each image must start on its own page. Bootstrap and LIM are bound together, so they can share the same page.
* The LIM itself should be very small, so limit it to 8Kb, but in order to open a new page, we decrement the bootstrap size out of it.
@ -210,48 +233,53 @@ struct nvm_vpd_image {
#define LIM_MAX_SIZE ((2*FLASH_PAGE_SIZE) - sizeof(struct legacy_bootstrap_region) - NVM_RSV_SIZE)
#define LIM_OFFSET (NVM_OFFSET(lim_image))
#define NVM_RSV_SIZE (44)
#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : FPGA_MIM_MAX_SIZE )
#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + ((idx == NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + MIM_MAX_SIZE(is_asic)*2)
#define GET_MIM_MAX_SIZE(is_asic, is_e4) ((!is_asic) ? FPGA_MIM_MAX_SIZE : ((is_e4) ? LEGACY_ASIC_MIM_MAX_SIZE : NG_ASIC_MIM_MAX_SIZE))
#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + ((idx == NVM_TYPE_MIM2) ?GET_MIM_MAX_SIZE(is_asic, is_e4) : 0))
#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + GET_MIM_MAX_SIZE(is_asic, is_e4)*2)
#define EMUL_NVM_FIXED_AREA_SIZE() (sizeof(struct nvm_image) + GET_MIM_MAX_SIZE(0, 0))
#define E5_MASTER_KEY_CHAIN_ADDR 0x1000
#define E5_BACKUP_KEY_CHAIN_ADDR ((0x20000 << (REG_READ(0, MCP_REG_NVM_CFG4) & 0x7)) - 0x1000)
union nvm_dir_union {
struct nvm_dir dir;
u8 page[FLASH_PAGE_SIZE];
};
/* Address
* +-------------------+ 0x000000
* | Bootstrap: |
* | magic_number |
* | sram_start_addr |
* | code_len |
* | code_start_addr |
* | crc |
* +-------------------+ 0x000014
* | rsrv |
* +-------------------+ 0x000040
* | LIM |
* +-------------------+ 0x002000
* | Dir1 |
* +-------------------+ 0x003000
* | Dir2 |
* +-------------------+ 0x004000
* | MIM1 |
* +-------------------+ 0x130000
* | MIM2 |
* +-------------------+ 0x25C000
* | Rest Images: |
* | TIM1/2 |
* | MFW_TRACE1/2 |
* | Eagle/Falcon FW |
* | PCIE/AVS FW |
* | MBA/CCM/L2B |
* | VPD |
* | optic_modules |
* | ... |
* +-------------------+ 0x400000
*/
struct nvm_image {
/* E4 Address E5 Address
* +-------------------+ 0x000000 * +-------------------+ 0x000000
* | Bootstrap: | * | |
* | magic_number | * | |
* | sram_start_addr | * | |
* | code_len | * | |
* | code_start_addr | * | |
* | crc | * | |
* +-------------------+ 0x000014 * | |
* | rsrv | * | rsrv |
* +-------------------+ 0x000040 * +-------------------+ 0x001000
* | LIM | * | Master Key Chain |
* +-------------------+ 0x002000 * +-------------------+ 0x002000
* | Dir1 | * | Dir1 |
* +-------------------+ 0x003000 * +-------------------+ 0x003000
* | Dir2 | * | Dir2 |
* +-------------------+ 0x004000 * +-------------------+ 0x004000
* | MIM1 | * | MIM1 |
* +-------------------+ 0x130000 * +-------------------+ 0x130000
* | MIM2 | * | MIM2 |
* +-------------------+ 0x25C000 * +-------------------+ 0x25C000
* | Rest Images: | * | Rest Images: |
* | TIM1/2 | * | TIM1/2 |
* | MFW_TRACE1/2 | * | MFW_TRACE1/2 |
* | Eagle/Falcon FW | * | Eagle/Falcon FW |
* | PCIE/AVS FW | * | PCIE/AVS FW |
* | MBA/CCM/L2B | * | MBA/CCM/L2B |
* | VPD | * | VPD |
* | optic_modules | * +-------------------+ Flash end - 0x1000
* | ... | * | Backup Key Chain |
* +-------------------+ 0x400000 * +-------------------+ Flash end
*/
struct nvm_image {
/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
/* NVM Offset (size) */
struct legacy_bootstrap_region bootstrap; /* 0x000000 (0x000014) */
@ -265,6 +293,7 @@ struct nvm_image {
#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image*)0)->f))))
struct hw_set_info {
u32 reg_type;
#define GRC_REG_TYPE 1

View file

@ -51,6 +51,7 @@ struct qlnx_ivec {
typedef struct qlnx_ivec qlnx_ivec_t;
//#define QLNX_MAX_RSS 30
#define QLNX_MAX_VF_RSS 4
#define QLNX_MAX_RSS 36
#define QLNX_DEFAULT_RSS 16
#define QLNX_MAX_TC 1
@ -335,6 +336,24 @@ typedef struct _qlnx_mcast {
uint8_t addr[6];
} __packed qlnx_mcast_t;
typedef struct _qlnx_vf_attr {
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t num_rings;
} qlnx_vf_attr_t;
typedef struct _qlnx_sriov_task {
struct task pf_task;
struct taskqueue *pf_taskqueue;
#define QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG 0x01
#define QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE 0x02
#define QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE 0x04
volatile uint32_t flags;
} qlnx_sriov_task_t;
/*
* Adapter structure contains the hardware independent information of the
* pci function.
@ -350,6 +369,7 @@ struct qlnx_host {
/* some flags */
volatile struct {
volatile uint32_t
hw_init :1,
callout_init :1,
slowpath_start :1,
parent_tag :1,
@ -361,6 +381,7 @@ struct qlnx_host {
device_t pci_dev;
uint8_t pci_func;
uint8_t dev_unit;
uint16_t device_id;
struct ifnet *ifp;
int if_flags;
@ -460,6 +481,11 @@ struct qlnx_host {
uint64_t err_fp_null;
uint64_t err_get_proto_invalid_type;
/* error recovery related */
uint32_t error_recovery;
struct task err_task;
struct taskqueue *err_taskqueue;
/* grcdump related */
uint32_t err_inject;
uint32_t grcdump_taken;
@ -481,12 +507,22 @@ struct qlnx_host {
uint32_t storm_stats_gather;
uint32_t personality;
uint16_t sriov_initialized;
uint16_t num_vfs;
qlnx_vf_attr_t *vf_attr;
qlnx_sriov_task_t sriov_task[MAX_HWFNS_PER_DEVICE];
uint32_t curr_vf;
void *next;
void *qlnx_rdma;
volatile int qlnxr_debug;
};
typedef struct qlnx_host qlnx_host_t;
/* note that align has to be a power of 2 */
#define QL_ALIGN(size, align) (((size) + ((align) - 1)) & (~((align) - 1)))
#define QL_ALIGN(size, align) (((size) + ((align) - 1)) & (~((align) - 1)));
#define QL_MIN(x, y) ((x < y) ? x : y)
#define QL_RUNNING(ifp) \
@ -667,8 +703,14 @@ extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
int hwfn_index);
extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
extern void qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
struct qlnx_link_output *if_link);
extern int qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs);
extern int qlnx_vf_device(qlnx_host_t *ha);
extern void qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info);
extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
u16 sb_id);
/*
* Some OS specific stuff
@ -681,7 +723,7 @@ extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
#else
#define QLNX_IFM_100G_SR4 IFM_UNKNOWN
#define QLNX_IFM_100G_LR4 IFM_UNKNOWN
#endif
#endif /* #if (defined IFM_100G_SR4) */
#if (defined IFM_25G_SR)
#define QLNX_IFM_25G_SR IFM_25G_SR
@ -689,7 +731,7 @@ extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
#else
#define QLNX_IFM_25G_SR IFM_UNKNOWN
#define QLNX_IFM_25G_CR IFM_UNKNOWN
#endif
#endif /* #if (defined IFM_25G_SR) */
#if __FreeBSD_version < 1100000
@ -731,6 +773,7 @@ extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
<< PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
#ifndef QLNX_RDMA
#if defined(__i386__) || defined(__amd64__)
static __inline
@ -742,6 +785,7 @@ void prefetch(void *x)
#else
#define prefetch(x)
#endif
#endif
#endif /* #ifndef _QLNX_DEF_H_ */

View file

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include "nvm_cfg.h"
#include "ecore_dev_api.h"
#include "ecore_dbg_fw_funcs.h"
#include "ecore_dcbx_api.h"
#include "qlnx_ioctl.h"
#include "qlnx_def.h"
@ -574,7 +575,7 @@ qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
p_hwfn = &ha->cdev.hwfns[0];
qlnx_fill_link(p_hwfn, &if_link);
qlnx_fill_link(ha, p_hwfn, &if_link);
dev_info->supported = if_link.supported_caps;
dev_info->advertising = if_link.advertised_caps;
@ -771,6 +772,235 @@ qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
return;
}
#ifdef QLNX_USER_LLDP
static int
qlnx_lldp_configure(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, uint32_t enable)
{
int ret = 0;
uint8_t lldp_mac[6] = {0};
struct ecore_lldp_config_params lldp_params;
struct ecore_lldp_sys_tlvs tlv_params;
ret = ecore_mcp_get_lldp_mac(p_hwfn, p_ptt, lldp_mac);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: ecore_mcp_get_lldp_mac failed\n", __func__);
return (-1);
}
bzero(&lldp_params, sizeof(struct ecore_lldp_config_params));
bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
lldp_params.agent = ECORE_LLDP_NEAREST_BRIDGE;
lldp_params.tx_interval = 30; //Default value used as suggested by MFW
lldp_params.tx_hold = 4; //Default value used as suggested by MFW
lldp_params.tx_credit = 5; //Default value used as suggested by MFW
lldp_params.rx_enable = enable ? 1 : 0;
lldp_params.tx_enable = enable ? 1 : 0;
lldp_params.chassis_id_tlv[0] = 0;
lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_TYPE_CHASSIS_ID << 1);
lldp_params.chassis_id_tlv[0] |=
((QLNX_LLDP_CHASSIS_ID_SUBTYPE_OCTETS +
QLNX_LLDP_CHASSIS_ID_MAC_ADDR_LEN) << 8);
lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_CHASSIS_ID_SUBTYPE_MAC << 16);
lldp_params.chassis_id_tlv[0] |= lldp_mac[0] << 24;
lldp_params.chassis_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
(lldp_mac[3] << 16) | (lldp_mac[4] << 24);
lldp_params.chassis_id_tlv[2] = lldp_mac[5];
lldp_params.port_id_tlv[0] = 0;
lldp_params.port_id_tlv[0] |= (QLNX_LLDP_TYPE_PORT_ID << 1);
lldp_params.port_id_tlv[0] |=
((QLNX_LLDP_PORT_ID_SUBTYPE_OCTETS +
QLNX_LLDP_PORT_ID_MAC_ADDR_LEN) << 8);
lldp_params.port_id_tlv[0] |= (QLNX_LLDP_PORT_ID_SUBTYPE_MAC << 16);
lldp_params.port_id_tlv[0] |= lldp_mac[0] << 24;
lldp_params.port_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
(lldp_mac[3] << 16) | (lldp_mac[4] << 24);
lldp_params.port_id_tlv[2] = lldp_mac[5];
ret = ecore_lldp_set_params(p_hwfn, p_ptt, &lldp_params);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: ecore_lldp_set_params failed\n", __func__);
return (-1);
}
//If LLDP is disable then disable discard_mandatory_tlv flag
if (!enable) {
tlv_params.discard_mandatory_tlv = false;
tlv_params.buf_size = 0;
ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
}
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: ecore_lldp_set_system_tlvs failed\n", __func__);
}
return (ret);
}
static int
qlnx_register_default_lldp_tlvs(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
int ret = 0;
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_CHASSIS_ID);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_CHASSIS_ID failed\n", __func__);
goto qlnx_register_default_lldp_tlvs_exit;
}
//register Port ID TLV
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_ID);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_PORT_ID failed\n", __func__);
goto qlnx_register_default_lldp_tlvs_exit;
}
//register TTL TLV
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_TTL);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_TTL failed\n", __func__);
goto qlnx_register_default_lldp_tlvs_exit;
}
//register Port Description TLV
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_DESC);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_PORT_DESC failed\n", __func__);
goto qlnx_register_default_lldp_tlvs_exit;
}
//register System Name TLV
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_NAME);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_SYS_NAME failed\n", __func__);
goto qlnx_register_default_lldp_tlvs_exit;
}
//register System Description TLV
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_DESC);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_SYS_DESC failed\n", __func__);
goto qlnx_register_default_lldp_tlvs_exit;
}
//register System Capabilities TLV
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_CAPS);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_SYS_CAPS failed\n", __func__);
goto qlnx_register_default_lldp_tlvs_exit;
}
//register Management Address TLV
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_MGMT_ADDR);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_MGMT_ADDR failed\n", __func__);
goto qlnx_register_default_lldp_tlvs_exit;
}
//register Organizationally Specific TLVs
ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_ORG_SPECIFIC);
if (ret != ECORE_SUCCESS) {
device_printf(ha->pci_dev,
"%s: QLNX_LLDP_TYPE_ORG_SPECIFIC failed\n", __func__);
}
qlnx_register_default_lldp_tlvs_exit:
return (ret);
}
int
qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs)
{
int ret = 0;
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
struct ecore_lldp_sys_tlvs tlv_params;
p_hwfn = &ha->cdev.hwfns[0];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
device_printf(ha->pci_dev,
"%s: ecore_ptt_acquire failed\n", __func__);
return (ENXIO);
}
ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 0);
if (ret) {
device_printf(ha->pci_dev,
"%s: qlnx_lldp_configure disable failed\n", __func__);
goto qlnx_set_lldp_tlvx_exit;
}
ret = qlnx_register_default_lldp_tlvs(ha, p_hwfn, p_ptt);
if (ret) {
device_printf(ha->pci_dev,
"%s: qlnx_register_default_lldp_tlvs failed\n",
__func__);
goto qlnx_set_lldp_tlvx_exit;
}
ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 1);
if (ret) {
device_printf(ha->pci_dev,
"%s: qlnx_lldp_configure enable failed\n", __func__);
goto qlnx_set_lldp_tlvx_exit;
}
if (lldp_tlvs != NULL) {
bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
tlv_params.discard_mandatory_tlv =
(lldp_tlvs->discard_mandatory_tlv ? true: false);
tlv_params.buf_size = lldp_tlvs->buf_size;
memcpy(tlv_params.buf, lldp_tlvs->buf, lldp_tlvs->buf_size);
ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
if (ret) {
device_printf(ha->pci_dev,
"%s: ecore_lldp_set_system_tlvs failed\n",
__func__);
}
}
qlnx_set_lldp_tlvx_exit:
ecore_ptt_release(p_hwfn, p_ptt);
return (ret);
}
#endif /* #ifdef QLNX_USER_LLDP */
static int
qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
@ -854,6 +1084,12 @@ qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
}
break;
#ifdef QLNX_USER_LLDP
case QLNX_SET_LLDP_TLVS:
rval = qlnx_set_lldp_tlvx(ha, (qlnx_lldp_sys_tlvs_t *)data);
break;
#endif /* #ifdef QLNX_USER_LLDP */
default:
rval = EINVAL;
break;

View file

@ -239,6 +239,36 @@ struct qlnx_storm_stats_dump {
typedef struct qlnx_storm_stats_dump qlnx_storm_stats_dump_t;
#define QLNX_LLDP_TYPE_END_OF_LLDPDU 0
#define QLNX_LLDP_TYPE_CHASSIS_ID 1
#define QLNX_LLDP_TYPE_PORT_ID 2
#define QLNX_LLDP_TYPE_TTL 3
#define QLNX_LLDP_TYPE_PORT_DESC 4
#define QLNX_LLDP_TYPE_SYS_NAME 5
#define QLNX_LLDP_TYPE_SYS_DESC 6
#define QLNX_LLDP_TYPE_SYS_CAPS 7
#define QLNX_LLDP_TYPE_MGMT_ADDR 8
#define QLNX_LLDP_TYPE_ORG_SPECIFIC 127
#define QLNX_LLDP_CHASSIS_ID_SUBTYPE_OCTETS 1 //Subtype is 1 byte
#define QLNX_LLDP_CHASSIS_ID_SUBTYPE_MAC 0x04 //Mac Address
#define QLNX_LLDP_CHASSIS_ID_MAC_ADDR_LEN 6 // Mac address is 6 bytes
#define QLNX_LLDP_CHASSIS_ID_SUBTYPE_IF_NAME 0x06 //Interface Name
#define QLNX_LLDP_PORT_ID_SUBTYPE_OCTETS 1 //Subtype is 1 byte
#define QLNX_LLDP_PORT_ID_SUBTYPE_MAC 0x03 //Mac Address
#define QLNX_LLDP_PORT_ID_MAC_ADDR_LEN 6 // Mac address is 6 bytes
#define QLNX_LLDP_PORT_ID_SUBTYPE_IF_NAME 0x05 //Interface Name
#define QLNX_LLDP_SYS_TLV_SIZE 256
struct qlnx_lldp_sys_tlvs {
int discard_mandatory_tlv;
uint8_t buf[QLNX_LLDP_SYS_TLV_SIZE];
uint16_t buf_size;
};
typedef struct qlnx_lldp_sys_tlvs qlnx_lldp_sys_tlvs_t;
/*
* Read grcdump size
*/
@ -309,8 +339,9 @@ typedef struct qlnx_storm_stats_dump qlnx_storm_stats_dump_t;
*/
#define QLNX_TRACE _IOWR('q', 14, qlnx_trace_t)
/*
* Set LLDP TLVS
*/
#define QLNX_SET_LLDP_TLVS _IOWR('q', 15, qlnx_lldp_sys_tlvs_t)
#endif /* #ifndef _QLNX_IOCTL_H_ */

File diff suppressed because it is too large Load diff

View file

@ -94,6 +94,12 @@
#include <sys/smp.h>
#include <sys/sched.h>
#ifdef CONFIG_ECORE_SRIOV
#include <sys/nv.h>
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
#endif /* #ifdef CONFIG_ECORE_SRIOV */
static __inline int qlnx_ms_to_hz(int ms)
{
int qlnx_hz;

View file

@ -37,7 +37,7 @@
* version numbers
*/
#define QLNX_VERSION_MAJOR 1
#define QLNX_VERSION_MINOR 4
#define QLNX_VERSION_BUILD 7
#define QLNX_VERSION_MAJOR 2
#define QLNX_VERSION_MINOR 0
#define QLNX_VERSION_BUILD 112

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -30,43 +30,929 @@
#ifndef __RDMA_COMMON__
#define __RDMA_COMMON__
/************************/
/* RDMA FW CONSTANTS */
/************************/
/************************************************************************/
/* Add include to common rdma target for both eCore and protocol rdma driver */
/************************************************************************/
#define RDMA_RESERVED_LKEY (0) //Reserved lkey
#define RDMA_RING_PAGE_SIZE (0x1000) //4KB pages
#define RDMA_RESERVED_LKEY (0) //Reserved lkey
#define RDMA_RING_PAGE_SIZE (0x1000) //4KB pages
#define RDMA_MAX_SGE_PER_SQ_WQE (4) //max number of SGEs in a single request
#define RDMA_MAX_SGE_PER_RQ_WQE (4) //max number of SGEs in a single request
#define RDMA_MAX_SGE_PER_SQ_WQE (4) //max number of SGEs in a single request
#define RDMA_MAX_SGE_PER_RQ_WQE (4) //max number of SGEs in a single request
#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) //max size of data in single request
#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) //max size of data in single request
#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
#define RDMA_MAX_CQS (64*1024)
#define RDMA_MAX_TIDS (128*1024-1)
#define RDMA_MAX_PDS (64*1024)
#define RDMA_MAX_CQS (64*1024)
#define RDMA_MAX_TIDS (128*1024-1)
#define RDMA_MAX_PDS (64*1024)
#define RDMA_MAX_XRC_SRQS (1024)
#define RDMA_MAX_SRQS (32*1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
struct rdma_srq_id
{
__le16 srq_idx /* SRQ index */;
__le16 opaque_fid;
__le16 srq_idx /* SRQ index */;
__le16 opaque_fid;
};
struct rdma_srq_producers
{
__le32 sge_prod /* Current produced sge in SRQ */;
__le32 wqe_prod /* Current produced WQE to SRQ */;
__le32 sge_prod /* Current produced sge in SRQ */;
__le32 wqe_prod /* Current produced WQE to SRQ */;
};
/*
* rdma completion notification queue element
*/
struct rdma_cnqe
{
struct regpair cq_handle;
};
struct rdma_cqe_responder
{
struct regpair srq_wr_id;
struct regpair qp_handle;
__le32 imm_data_or_inv_r_Key /* immediate data in case imm_flg is set, or invalidated r_key in case inv_flg is set */;
__le32 length;
__le32 imm_data_hi /* High bytes of immediate data in case imm_flg is set in iWARP only */;
__le16 rq_cons /* Valid only when status is WORK_REQUEST_FLUSHED_ERR. Indicates an aggregative flush on all posted RQ WQEs until the reported rq_cons. */;
u8 flags;
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3 /* (use enum rdma_cqe_type) */
#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1 /* r_key invalidated indicator */
#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1 /* immediate data indicator */
#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1 /* 1=this CQE relates to an RDMA Write. 0=Send. */
#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
u8 status;
};
struct rdma_cqe_requester
{
__le16 sq_cons;
__le16 reserved0;
__le32 reserved1;
struct regpair qp_handle;
struct regpair reserved2;
__le32 reserved3;
__le16 reserved4;
u8 flags;
#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3 /* (use enum rdma_cqe_type) */
#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
u8 status;
};
struct rdma_cqe_common
{
struct regpair reserved0;
struct regpair qp_handle;
__le16 reserved1[7];
u8 flags;
#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
#define RDMA_CQE_COMMON_TYPE_MASK 0x3 /* (use enum rdma_cqe_type) */
#define RDMA_CQE_COMMON_TYPE_SHIFT 1
#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
u8 status;
};
/*
* rdma completion queue element
*/
union rdma_cqe
{
struct rdma_cqe_responder resp;
struct rdma_cqe_requester req;
struct rdma_cqe_common cmn;
};
/*
* CQE requester status enumeration
*/
enum rdma_cqe_requester_status_enum
{
RDMA_CQE_REQ_STS_OK,
RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
};
/*
* CQE responder status enumeration
*/
enum rdma_cqe_responder_status_enum
{
RDMA_CQE_RESP_STS_OK,
RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
};
/*
* CQE type enumeration
*/
enum rdma_cqe_type
{
RDMA_CQE_TYPE_REQUESTER,
RDMA_CQE_TYPE_RESPONDER_RQ,
RDMA_CQE_TYPE_RESPONDER_SRQ,
RDMA_CQE_TYPE_INVALID,
MAX_RDMA_CQE_TYPE
};
/*
* DIF Block size options
*/
enum rdma_dif_block_size
{
RDMA_DIF_BLOCK_512=0,
RDMA_DIF_BLOCK_4096=1,
MAX_RDMA_DIF_BLOCK_SIZE
};
/*
* DIF CRC initial value
*/
enum rdma_dif_crc_seed
{
RDMA_DIF_CRC_SEED_0000=0,
RDMA_DIF_CRC_SEED_FFFF=1,
MAX_RDMA_DIF_CRC_SEED
};
/*
* RDMA DIF Error Result Structure
*/
struct rdma_dif_error_result
{
__le32 error_intervals /* Total number of error intervals in the IO. */;
__le32 dif_error_1st_interval /* Number of the first interval that contained error. Set to 0xFFFFFFFF if error occurred in the Runt Block. */;
u8 flags;
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1 /* CRC error occurred. */
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1 /* App Tag error occurred. */
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1 /* Ref Tag error occurred. */
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF
#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3
#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1 /* Used to indicate the structure is valid. Toggles each time an invalidate region is performed. */
#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7
u8 reserved1[55] /* Pad to 64 bytes to ensure efficient word line writing. */;
};
/*
* DIF IO direction
*/
enum rdma_dif_io_direction_flg
{
RDMA_DIF_DIR_RX=0,
RDMA_DIF_DIR_TX=1,
MAX_RDMA_DIF_IO_DIRECTION_FLG
};
/*
* RDMA DIF Runt Result Structure
*/
struct rdma_dif_runt_result
{
__le16 guard_tag /* CRC result of received IO. */;
__le16 reserved[3];
};
/*
* memory window type enumeration
*/
enum rdma_mw_type
{
RDMA_MW_TYPE_1,
RDMA_MW_TYPE_2A,
MAX_RDMA_MW_TYPE
};
struct rdma_rq_sge
{
struct regpair addr;
__le32 length;
__le32 flags;
#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF /* key of memory relating to this RQ */
#define RDMA_RQ_SGE_L_KEY_SHIFT 0
#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7 /* first SGE - number of SGEs in this RQ WQE. Other SGEs - should be set to 0 */
#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
#define RDMA_RQ_SGE_RESERVED0_MASK 0x7
#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
};
struct rdma_sq_atomic_wqe
{
__le32 reserved1;
__le32 length /* Total data length (8 bytes for Atomic) */;
__le32 xrc_srq /* Valid only when XRC is set for the QP */;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1 /* Dont care for atomic wqe */
#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1 /* Should be 0 for atomic wqe */
#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* Should be 0 for atomic wqe */
#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3
#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6
u8 wqe_size /* Size of WQE in 16B chunks including SGE */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
struct regpair remote_va /* remote virtual address */;
__le32 r_key /* Remote key */;
__le32 reserved2;
struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
struct regpair swap_data /* Swap or add data */;
};
/*
* First element (16 bytes) of atomic wqe
*/
struct rdma_sq_atomic_wqe_1st
{
__le32 reserved1;
__le32 length /* Total data length (8 bytes for Atomic) */;
__le32 xrc_srq /* Valid only when XRC is set for the QP */;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1 /* Dont care for atomic wqe */
#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1 /* Should be 0 for atomic wqe */
#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
u8 wqe_size /* Size of WQE in 16B chunks including all SGEs. Set to number of SGEs + 1. */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
/*
* Second element (16 bytes) of atomic wqe
*/
struct rdma_sq_atomic_wqe_2nd
{
struct regpair remote_va /* remote virtual address */;
__le32 r_key /* Remote key */;
__le32 reserved2;
};
/*
* Third element (16 bytes) of atomic wqe
*/
struct rdma_sq_atomic_wqe_3rd
{
struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
struct regpair swap_data /* Swap or add data */;
};
struct rdma_sq_bind_wqe
{
struct regpair addr;
__le32 l_key;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0
#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1 /* Dont care for bind wqe */
#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1 /* Should be 0 for bind wqe */
#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
u8 wqe_size /* Size of WQE in 16B chunks */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
u8 bind_ctrl;
#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1 /* zero based indication */
#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1 /* (use enum rdma_mw_type) */
#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
u8 access_ctrl;
#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7
#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5
u8 reserved3;
u8 length_hi /* upper 8 bits of the registered MW length */;
__le32 length_lo /* lower 32 bits of the registered MW length */;
__le32 parent_l_key /* l_key of the parent MR */;
__le32 reserved4;
};
/*
* First element (16 bytes) of bind wqe
*/
struct rdma_sq_bind_wqe_1st
{
struct regpair addr;
__le32 l_key;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1 /* Dont care for bind wqe */
#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1 /* Should be 0 for bind wqe */
#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
u8 wqe_size /* Size of WQE in 16B chunks */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
/*
* Second element (16 bytes) of bind wqe
*/
struct rdma_sq_bind_wqe_2nd
{
u8 bind_ctrl;
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1 /* zero based indication */
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1 /* (use enum rdma_mw_type) */
#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
u8 access_ctrl;
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1
#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3
#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4
#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7
#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5
u8 reserved3;
u8 length_hi /* upper 8 bits of the registered MW length */;
__le32 length_lo /* lower 32 bits of the registered MW length */;
__le32 parent_l_key /* l_key of the parent MR */;
__le32 reserved4;
};
/*
* Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
*/
struct rdma_sq_common_wqe
{
__le32 reserved1[3];
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE (only relevant in SENDs and RDMA write with Imm) */
#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs (only relevant in SENDs and RDMA writes) */
#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7
#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5
u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
struct rdma_sq_fmr_wqe
{
struct regpair addr;
__le32 l_key;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1 /* Dont care for FMR wqe */
#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1 /* Should be 0 for FMR wqe */
#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3
#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6
u8 wqe_size /* Size of WQE in 16B chunks */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
u8 fmr_ctrl;
#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F /* 0 is 4k, 1 is 8k... */
#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1 /* zero based indication */
#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1 /* indication whether bind is enabled for this MR */
#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6
#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1
#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7
u8 access_ctrl;
#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7
#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5
u8 reserved3;
u8 length_hi /* upper 8 bits of the registered MR length */;
__le32 length_lo /* lower 32 bits of the registered MR length. In case of DIF the length is specified including the DIF guards. */;
struct regpair pbl_addr /* Address of PBL */;
__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
__le16 dif_app_tag /* App tag of all DIF Blocks. */;
__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
__le16 dif_flags;
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1 /* In RX IO, Ref Tag will remain at constant value of dif_base_ref_tag */
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 Reserved5;
};
/*
* First element (16 bytes) of fmr wqe
*/
struct rdma_sq_fmr_wqe_1st
{
struct regpair addr;
__le32 l_key;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1 /* Dont care for FMR wqe */
#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1 /* Should be 0 for FMR wqe */
#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3
#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6
u8 wqe_size /* Size of WQE in 16B chunks */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
/*
* Second element (16 bytes) of fmr wqe
*/
struct rdma_sq_fmr_wqe_2nd
{
u8 fmr_ctrl;
#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F /* 0 is 4k, 1 is 8k... */
#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1 /* zero based indication */
#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1 /* indication whether bind is enabled for this MR */
#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1
#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7
u8 access_ctrl;
#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1
#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0
#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1
#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1
#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1
#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3
#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1
#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4
#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7
#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5
u8 reserved3;
u8 length_hi /* upper 8 bits of the registered MR length */;
__le32 length_lo /* lower 32 bits of the registered MR length. */;
struct regpair pbl_addr /* Address of PBL */;
};
/*
* Third element (16 bytes) of fmr wqe
*/
struct rdma_sq_fmr_wqe_3rd
{
__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
__le16 dif_app_tag /* App tag of all DIF Blocks. */;
__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
__le16 dif_flags;
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
__le32 Reserved5;
};
struct rdma_sq_local_inv_wqe
{
struct regpair reserved;
__le32 inv_l_key /* The invalidate local key */;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1 /* Dont care for local invalidate wqe */
#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1 /* Should be 0 for local invalidate wqe */
#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3
#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6
u8 wqe_size /* Size of WQE in 16B chunks */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
struct rdma_sq_rdma_wqe
{
__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
__le32 length /* Total data length. If DIF on host is enabled, length does NOT include DIF guards. */;
__le32 xrc_srq /* Valid only when XRC is set for the QP */;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE */
#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 /* If set, indicated read with invalidate WQE. iWARP only */
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
struct regpair remote_va /* Remote virtual address */;
__le32 r_key /* Remote key */;
u8 dif_flags;
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1 /* if dif_on_host_flg set: WQE executes first RDMA on related IO. */
#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1 /* if dif_on_host_flg set: WQE executes last RDMA on related IO. */
#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
u8 reserved2[3];
};
/*
* First element (16 bytes) of rdma wqe
*/
struct rdma_sq_rdma_wqe_1st
{
__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
__le32 length /* Total data length */;
__le32 xrc_srq /* Valid only when XRC is set for the QP */;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE */
#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK 0x1 /* If set, indicated read with invalidate WQE. iWARP only */
#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT 6
#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x1
#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 7
u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
/*
* Second element (16 bytes) of rdma wqe
*/
struct rdma_sq_rdma_wqe_2nd
{
struct regpair remote_va /* Remote virtual address */;
__le32 r_key /* Remote key */;
u8 dif_flags;
#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0
#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1 /* if dif_on_host_flg set: WQE executes first DIF on related MR. */
#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1 /* if dif_on_host_flg set: WQE executes last DIF on related MR. */
#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2
#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F
#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3
u8 reserved2[3];
};
/*
* SQ WQE req type enumeration
*/
enum rdma_sq_req_type
{
RDMA_SQ_REQ_TYPE_SEND,
RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
RDMA_SQ_REQ_TYPE_RDMA_WR,
RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
RDMA_SQ_REQ_TYPE_RDMA_RD,
RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
RDMA_SQ_REQ_TYPE_FAST_MR,
RDMA_SQ_REQ_TYPE_BIND,
RDMA_SQ_REQ_TYPE_INVALID,
MAX_RDMA_SQ_REQ_TYPE
};
struct rdma_sq_send_wqe
{
__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
__le32 length /* Total data length */;
__le32 xrc_srq /* Valid only when XRC is set for the QP */;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0
#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE */
#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* Should be 0 for send wqe */
#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3
#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6
u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
__le32 reserved1[4];
};
struct rdma_sq_send_wqe_1st
{
__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
__le32 length /* Total data length */;
__le32 xrc_srq /* Valid only when XRC is set for the QP */;
u8 req_type /* Type of WQE */;
u8 flags;
#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0
#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE */
#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3
#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4
#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7
#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5
u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
struct rdma_sq_send_wqe_2st
{
__le32 reserved1[4];
};
struct rdma_sq_sge
{
__le32 length /* Total length of the send. If DIF on host is enabled, SGE length includes the DIF guards. */;
struct regpair addr;
__le32 l_key;
};
struct rdma_srq_wqe_header
{
struct regpair wr_id;
u8 num_sges /* number of SGEs in WQE */;
u8 reserved2[7];
};
struct rdma_srq_sge
{
struct regpair addr;
__le32 length;
__le32 l_key;
};
/*
* rdma srq sge
*/
union rdma_srq_elm
{
struct rdma_srq_wqe_header header;
struct rdma_srq_sge sge;
};
/*
* Rdma doorbell data for flags update
*/
struct rdma_pwm_flags_data
{
__le16 icid /* internal CID */;
u8 agg_flags /* aggregative flags */;
u8 reserved;
};
/*
* Rdma doorbell data for SQ and RQ
*/
struct rdma_pwm_val16_data
{
__le16 icid /* internal CID */;
__le16 value /* aggregated value to update */;
};
union rdma_pwm_val16_data_union
{
struct rdma_pwm_val16_data as_struct /* Parameters field */;
__le32 as_dword;
};
/*
* Rdma doorbell data for CQ
*/
struct rdma_pwm_val32_data
{
__le16 icid /* internal CID */;
u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
u8 params;
#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1 /* Connection type is iWARP */
#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1 /* Flag indicating 16b variable should be updated. Should be used when conn_type_is_iwarp is used */
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
__le32 value /* aggregated value to update */;
};
union rdma_pwm_val32_data_union
{
struct rdma_pwm_val32_data as_struct /* Parameters field */;
struct regpair as_repair;
};
#endif /* __RDMA_COMMON__ */

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -64,6 +64,9 @@ enum roce_async_events_type
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR,
ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR,
ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR,
MAX_ROCE_ASYNC_EVENTS_TYPE
};

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -105,7 +105,7 @@ enum spad_sections {
SPAD_SECTION_NVM_CFG,
SPAD_SECTION_PUBLIC,
SPAD_SECTION_PRIVATE,
SPAD_SECTION_MAX
SPAD_SECTION_MAX /* Cannot be modified anymore since ROM relying on this size !! */
};
#ifndef MDUMP_PARSE_TOOL
@ -120,7 +120,7 @@ struct spad_layout {
#endif /* MDUMP_PARSE_TOOL */
#define MCP_TRACE_SIZE 2048 /* 2kb */
#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + __builtin_offsetof(struct static_init, f))
/* This section is located at a fixed location in the beginning of the scratchpad,
@ -130,12 +130,23 @@ struct spad_layout {
* Moreover, the spad_layout section is part of the MFW firmware, and is loaded with it
* from nvram in order to clear this portion.
*/
struct static_init {
u32 num_sections; /* 0xe20000 */
offsize_t sections[SPAD_SECTION_MAX]; /* 0xe20004 */
#define SECTION(_sec_) *((offsize_t*)(STRUCT_OFFSET(sections[_sec_])))
#ifdef SECURE_BOOT
u32 tim_sha256[8]; /* Used by E5 ROM. Do not relocate */
u32 rom_status_code; /* Used by E5 ROM. Do not relocate */
u32 secure_running_mfw; /* Instead of the one after the trace_buffer */ /* Used by E5 ROM. Do not relocate */
#define SECURE_RUNNING_MFW *((u32*)(STRUCT_OFFSET(secure_running_mfw)))
#endif
struct mcp_trace trace; /* 0xe20014 */
#ifdef MFW
#define MCP_TRACE_P ((struct mcp_trace*)(STRUCT_OFFSET(trace)))
u8 trace_buffer[MCP_TRACE_SIZE]; /* 0xe20030 */
#define MCP_TRACE_BUF ((u8*)(STRUCT_OFFSET(trace_buffer)))
@ -183,11 +194,15 @@ struct static_init {
#define FLAGS_PEND_SMBUS_VMAIN_TO_AUX (1 << 10)
#define FLAGS_NVM_CFG_EFUSE_FAILURE (1 << 11)
#define FLAGS_POWER_TRANSITION (1 << 12)
#define FLAGS_MCTP_CHECK_PUMA_TIMEOUT (1 << 13)
#define FLAGS_MCTP_TX_PLDM_UPDATE (1 << 14)
#define FLAGS_MCTP_SENSOR_EVENT (1 << 15)
#define FLAGS_PMBUS_ERROR (1 << 28)
#define FLAGS_OS_DRV_LOADED (1 << 29)
#define FLAGS_OVER_TEMP_OCCUR (1 << 30)
#define FLAGS_FAN_FAIL_OCCUR (1 << 31)
u32 rsrv_persist[4]; /* Persist reserved for MFW upgrades */ /* 0xe20854 */
#endif /* MFW */
};
#ifndef MDUMP_PARSE_TOOL

View file

@ -35,7 +35,7 @@
/*********************/
#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
// Each Resource ID is one-one-valued mapped by the driver to a BDQ Resource ID (for instance per port)
#define BDQ_NUM_RESOURCES (4)
@ -51,6 +51,7 @@
/* SCSI op codes */
#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
#define SCSI_OPCODE_READ_10 (0x28)
#define SCSI_OPCODE_WRITE_6 (0x0A)
#define SCSI_OPCODE_WRITE_10 (0x2A)
#define SCSI_OPCODE_WRITE_12 (0xAA)
@ -151,19 +152,19 @@ struct scsi_init_func_queues
__le16 cmdq_num_entries /* CMDQ num entries */;
u8 bdq_resource_id /* Each function-init Ramrod maps its funciton ID to a BDQ function ID, each BDQ function ID contains per-BDQ-ID BDQs */;
u8 q_validity;
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
#define SCSI_INIT_FUNC_QUEUES_TMWO_EN_MASK 0x1 /* This bit is valid if TQ is enabled for this function, tmwo option enabled/disabled */
#define SCSI_INIT_FUNC_QUEUES_TMWO_EN_SHIFT 4
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x7
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 5
__le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS] /* CQ/CMDQ status block number array */;
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1 /* This bit is valid if TQ is enabled for this function, SOC option enabled/disabled */
#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4
#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7 /* Relevant for TQe SOC option - num of blocks in SGE - log */
#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5
__le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS] /* CQ/CMDQ status block number array */;
u8 num_queues /* Number of continuous global queues used */;
u8 queue_relative_offset /* offset of continuous global queues used */;
u8 cq_sb_pi /* Protocol Index of CQ in status block (CQ consumer) */;

View file

@ -69,7 +69,7 @@ struct tcp_init_params
{
__le32 two_msl_timer /* 2MSL (used for TIME_WAIT state) timeout value */;
__le16 tx_sws_timer /* Transmission silly window syndrom timeout value */;
u8 maxFinRT /* Minimum Fin RT */;
u8 max_fin_rt /* Minimum Fin RT */;
u8 reserved[9];
};
@ -97,24 +97,29 @@ struct tcp_offload_params
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
u8 flags;
#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 /* timestamp enable */
#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 /* delayed ack enabled */
#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 /* keep alive enabled */
#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 /* nagle algorithm enabled */
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 /* delayed ack counter enabled */
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 /* fin already sent to far end */
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 /* fin received */
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
u8 ip_version;
__le16 flags;
#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 /* timestamp enable */
#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 /* delayed ack enabled */
#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 /* keep alive enabled */
#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1 /* ECN sender enabled */
#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3
#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1 /* ECN receiver enabled */
#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 /* nagle algorithm enabled */
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 /* delayed ack counter enabled */
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6
#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 /* fin already sent to far end */
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 /* fin received */
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8
#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F
#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9
u8 ip_version /* (use enum tcp_ip_version) */;
u8 reserved0[3];
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
@ -124,19 +129,23 @@ struct tcp_offload_params
__le16 local_port;
__le16 mss /* the mss derived from remote mss and local mtu, ipVersion options and tags */;
u8 rcv_wnd_scale;
u8 connect_mode /* TCP connect mode: use enum tcp_connect_mode */;
u8 connect_mode /* TCP connect mode: use enum tcp_connect_mode (use enum tcp_connect_mode) */;
__le16 srtt /* in ms */;
__le32 cwnd /* absolute congestion window */;
__le32 ss_thresh;
__le16 reserved1;
__le32 rcv_wnd /* absolute receive window (not scaled) */;
__le32 cwnd /* absolute congestion window */;
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
__le16 reserved1;
__le32 ka_timeout /* This member specifies, in ms, the timeout interval for inactivity before sending a keepalive probe */;
__le32 ka_interval /* This member specifies, in ms, the timeout after which to retransmit a keepalive frame if no response is received to a keepalive probe */;
__le32 max_rt_time /* This member specifies, in ms, the maximum time that the offload target should spend retransmitting a segment */;
__le32 initial_rcv_wnd /* Initial receive window */;
__le32 rcv_next;
__le32 snd_una;
__le32 snd_next;
__le32 snd_max;
__le32 snd_wnd /* absolute send window (not scaled) */;
__le32 rcv_wnd /* absolute receive window (not scaled) */;
__le32 snd_wl1 /* the segment sequence number used for the last window update */;
__le32 ts_recent /* The timestamp value to send in the next ACK */;
__le32 ts_recent_age /* The length of time, in ms, since the most recent timestamp was received */;
@ -149,14 +158,10 @@ struct tcp_offload_params
u8 rt_cnt /* The number of retransmits that have been sent */;
__le16 rtt_var /* in ms */;
__le16 fw_internal /* fw internal use - initialize value = 0 */;
__le32 ka_timeout /* This member specifies, in ms, the timeout interval for inactivity before sending a keepalive probe */;
__le32 ka_interval /* This member specifies, in ms, the timeout after which to retransmit a keepalive frame if no response is received to a keepalive probe */;
__le32 max_rt_time /* This member specifies, in ms, the maximum time that the offload target should spend retransmitting a segment */;
__le32 initial_rcv_wnd /* Initial receive window */;
u8 snd_wnd_scale;
u8 ack_frequency /* delayed ack counter threshold */;
__le16 da_timeout_value /* delayed ack timeout value in ms */;
__le32 reserved3[2];
__le32 reserved3;
};
@ -172,16 +177,19 @@ struct tcp_offload_params_opt2
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
u8 flags;
__le16 flags;
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 /* timestamp enable */
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 /* delayed ack enabled */
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 /* keep alive enabled */
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
u8 ip_version;
#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1 /* ECN enabled */
#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4
u8 ip_version /* (use enum tcp_ip_version) */;
u8 reserved1[3];
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
@ -191,11 +199,17 @@ struct tcp_offload_params_opt2
__le16 local_port;
__le16 mss /* the mss derived from remote mss and local mtu, ipVersion options and tags */;
u8 rcv_wnd_scale;
u8 connect_mode /* TCP connect mode: use enum tcp_connect_mode */;
u8 connect_mode /* TCP connect mode: use enum tcp_connect_mode (use enum tcp_connect_mode) */;
__le16 syn_ip_payload_length /* length of Tcp header in SYN packet - relevent for passive mode */;
__le32 syn_phy_addr_lo /* physical address (low) of SYN buffer - relevent for passive mode */;
__le32 syn_phy_addr_hi /* physical address (high) of SYN buffer - relevent for passive mode */;
__le32 reserved1[22];
__le32 cwnd /* absolute congestion window */;
u8 ka_max_probe_cnt;
u8 reserved2[3];
__le32 ka_timeout /* This member specifies, in ms, the timeout interval for inactivity before sending a keepalive probe */;
__le32 ka_interval /* This member specifies, in ms, the timeout after which to retransmit a keepalive frame if no response is received to a keepalive probe */;
__le32 max_rt_time /* This member specifies, in ms, the maximum time that the offload target should spend retransmitting a segment */;
__le32 reserved3[16];
};

View file

@ -36,5 +36,6 @@ SYSDIR?=${SRCTOP}/sys
.include "${SYSDIR}/conf/kern.opts.mk"
SUBDIR=qlnxe
SUBDIR+=qlnxev
.include <bsd.subdir.mk>

View file

@ -42,6 +42,10 @@ SRCS=ecore_cxt.c ecore_dcbx.c ecore_dev.c ecore_hw.c
SRCS+=ecore_init_fw_funcs.c ecore_int.c ecore_mcp.c
SRCS+=ecore_sp_commands.c ecore_spq.c ecore_l2.c
SRCS+=ecore_init_ops.c ecore_dbg_fw_funcs.c
SRCS+=ecore_mng_tlv.c
SRCS+=ecore_sriov.c
SRCS+=ecore_vf.c
SRCS+=qlnx_ioctl.c
SRCS+=qlnx_os.c
@ -49,6 +53,8 @@ SRCS+=qlnx_os.c
SRCS+= device_if.h
SRCS+= bus_if.h
SRCS+= pci_if.h
SRCS+= pci_iov_if.h
.include <bsd.kmod.mk>
@ -65,6 +71,8 @@ CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
#CFLAGS += -DQLNX_SOFT_LRO
#CFLAGS += -DQLNX_QSORT_LRO
#CFLAGS += -DQLNX_MAX_COALESCE
#CFLAGS += -DQLNX_RCV_IN_TASKQ
#CFLAGS += -DQLNX_USER_LLDP
CFLAGS += -DCONFIG_ECORE_SRIOV
CWARNFLAGS+= -Wno-cast-qual

View file

@ -0,0 +1,79 @@
#/*
# * Copyright (c) 2017-2018 Cavium, Inc.
# * All rights reserved.
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions
# * are met:
# *
# * 1. Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in the
# * documentation and/or other materials provided with the distribution.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# * POSSIBILITY OF SUCH DAMAGE.
# */
# /*
# * File : Makefile
# * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
# */
#
# $FreeBSD$
#
.PATH: ${SRCTOP}/sys/dev/qlnx/qlnxe
#.PATH: ${.CURDIR}/../qlnxe
KMOD=if_qlnxev
SRCS=ecore_cxt.c ecore_dcbx.c ecore_dev.c ecore_hw.c
SRCS+=ecore_init_fw_funcs.c ecore_int.c ecore_mcp.c
SRCS+=ecore_sp_commands.c ecore_spq.c ecore_l2.c
SRCS+=ecore_init_ops.c ecore_dbg_fw_funcs.c
SRCS+=ecore_mng_tlv.c
#SRIOV related
SRCS+=ecore_sriov.c
SRCS+=ecore_vf.c
SRCS+=qlnx_ioctl.c
SRCS+=qlnx_os.c
SRCS+= device_if.h
SRCS+= bus_if.h
SRCS+= pci_if.h
SRCS+= pci_iov_if.h
.include <bsd.kmod.mk>
CFLAGS += -DQLNX_DEBUG
CFLAGS += -DECORE_PACKAGE
CFLAGS += -DCONFIG_ECORE_L2
CFLAGS += -DECORE_CONFIG_DIRECT_HWFN
CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
CFLAGS += -g
CFLAGS += -fno-inline
#CFLAGS += -DQLNX_SOFT_LRO
#CFLAGS += -DQLNX_QSORT_LRO
#CFLAGS += -DQLNX_MAX_COALESCE
#SRIOV related
CFLAGS += -DCONFIG_ECORE_SRIOV
CFLAGS += -DQLNX_VF
CWARNFLAGS+= -Wno-cast-qual