RDMA/bnxt_re: Add bnxt_re RoCE driver

This patch introduces the RoCE driver for the
Broadcom NetXtreme-E 10/25/50/100/200G RoCE HCAs.

The RoCE driver is a two part driver that relies
on the bnxt_en NIC driver to operate. The changes
needed in the bnxt_en driver is included through
another patch "L2-RoCE driver communication interface"
in this set.

Presently, There is no user space support, Hence
recommendation to use the krping kernel module for
testing. User space support will be incorporated in
subsequent patch submissions.

Reviewed by:            imp
Approved by:            imp
Differential revision:  https://reviews.freebsd.org/D45011
This commit is contained in:
Sumit Saxena 2024-05-28 10:31:59 +00:00
parent 862af86f4b
commit acd884dec9
17 changed files with 22790 additions and 0 deletions

View file

@ -0,0 +1,177 @@
/*
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Uverbs ABI header file
*/
#ifndef __BNXT_RE_UVERBS_ABI_H__
#define __BNXT_RE_UVERBS_ABI_H__
#include <asm/types.h>
#include <linux/types.h>
#define BNXT_RE_ABI_VERSION 6
enum {
BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED = 0x01,
BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED = 0x02,
BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED = 0x04,
BNXT_RE_COMP_MASK_UCNTX_MQP_EX_SUPPORTED = 0x08,
BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED = 0x10,
BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED = 0x20,
BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40
};
enum {
BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE = 0x02
};
struct bnxt_re_uctx_req {
__aligned_u64 comp_mask;
};
#define BNXT_RE_CHIP_ID0_CHIP_NUM_SFT 0x00
#define BNXT_RE_CHIP_ID0_CHIP_REV_SFT 0x10
#define BNXT_RE_CHIP_ID0_CHIP_MET_SFT 0x18
struct bnxt_re_uctx_resp {
__u32 dev_id;
__u32 max_qp;
__u32 pg_size;
__u32 cqe_sz;
__u32 max_cqd;
__u32 chip_id0;
__u32 chip_id1;
__u32 modes;
__aligned_u64 comp_mask;
} __attribute__((packed));
enum {
BNXT_RE_COMP_MASK_PD_HAS_WC_DPI = 0x01,
BNXT_RE_COMP_MASK_PD_HAS_DBR_BAR_ADDR = 0x02,
};
struct bnxt_re_pd_resp {
__u32 pdid;
__u32 dpi;
__u64 dbr;
__u64 comp_mask;
__u32 wcdpi;
__u64 dbr_bar_addr;
} __attribute__((packed));
enum {
BNXT_RE_COMP_MASK_CQ_HAS_DB_INFO = 0x01,
BNXT_RE_COMP_MASK_CQ_HAS_WC_DPI = 0x02,
BNXT_RE_COMP_MASK_CQ_HAS_CQ_PAGE = 0x04,
};
enum {
BNXT_RE_COMP_MASK_CQ_REQ_HAS_CAP_MASK = 0x1
};
enum {
BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_RECOVERY = 0x1,
BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_PACING_NOTIFY = 0x2
};
#define BNXT_RE_IS_DBR_PACING_NOTIFY_CQ(_req) \
(_req.comp_mask & BNXT_RE_COMP_MASK_CQ_REQ_HAS_CAP_MASK && \
_req.cq_capability & BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_PACING_NOTIFY)
#define BNXT_RE_IS_DBR_RECOV_CQ(_req) \
(_req.comp_mask & BNXT_RE_COMP_MASK_CQ_REQ_HAS_CAP_MASK && \
_req.cq_capability & BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_RECOVERY)
struct bnxt_re_cq_req {
__u64 cq_va;
__u64 cq_handle;
__aligned_u64 comp_mask;
__u16 cq_capability;
} __attribute__((packed));
struct bnxt_re_cq_resp {
__u32 cqid;
__u32 tail;
__u32 phase;
__u32 rsvd;
__aligned_u64 comp_mask;
__u32 dpi;
__u64 dbr;
__u32 wcdpi;
__u64 uctx_cq_page;
} __attribute__((packed));
struct bnxt_re_resize_cq_req {
__u64 cq_va;
} __attribute__((packed));
struct bnxt_re_qp_req {
__u64 qpsva;
__u64 qprva;
__u64 qp_handle;
} __attribute__((packed));
struct bnxt_re_qp_resp {
__u32 qpid;
} __attribute__((packed));
struct bnxt_re_srq_req {
__u64 srqva;
__u64 srq_handle;
} __attribute__((packed));
struct bnxt_re_srq_resp {
__u32 srqid;
} __attribute__((packed));
/* Modify QP */
enum {
BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK = 0x1,
BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN = 0x1,
BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK = 0x2
};
struct bnxt_re_modify_qp_ex_req {
__aligned_u64 comp_mask;
__u32 dpi;
__u32 rsvd;
} __packed;
struct bnxt_re_modify_qp_ex_resp {
__aligned_u64 comp_mask;
__u32 ppp_st_idx;
__u32 path_mtu;
} __packed;
enum bnxt_re_shpg_offt {
BNXT_RE_BEG_RESV_OFFT = 0x00,
BNXT_RE_AVID_OFFT = 0x10,
BNXT_RE_AVID_SIZE = 0x04,
BNXT_RE_END_RESV_OFFT = 0xFF0
};
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,632 @@
/*
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: IB Verbs interpreter (header)
*/
#ifndef __BNXT_RE_IB_VERBS_H__
#define __BNXT_RE_IB_VERBS_H__
#include <rdma/ib_addr.h>
#include "bnxt_re-abi.h"
#include "qplib_res.h"
#include "qplib_fp.h"
struct bnxt_re_dev;
#define BNXT_RE_ROCE_V2_UDP_SPORT 0x8CD1
#define BNXT_RE_QP_RANDOM_QKEY 0x81818181
#ifndef IB_MTU_8192
#define IB_MTU_8192 8192
#endif
#ifndef SPEED_1000
#define SPEED_1000 1000
#endif
#ifndef SPEED_10000
#define SPEED_10000 10000
#endif
#ifndef SPEED_20000
#define SPEED_20000 20000
#endif
#ifndef SPEED_25000
#define SPEED_25000 25000
#endif
#ifndef SPEED_40000
#define SPEED_40000 40000
#endif
#ifndef SPEED_50000
#define SPEED_50000 50000
#endif
#ifndef SPEED_100000
#define SPEED_100000 100000
#endif
#ifndef SPEED_200000
#define SPEED_200000 200000
#endif
#ifndef IB_SPEED_HDR
#define IB_SPEED_HDR 64
#endif
#define RDMA_NETWORK_IPV4 1
#define RDMA_NETWORK_IPV6 2
#define ROCE_DMAC(x) (x)->dmac
#define dma_rmb() rmb()
#define compat_ib_alloc_device(size) ib_alloc_device(size);
#define rdev_from_cq_in(cq_in) to_bnxt_re_dev(cq_in->device, ibdev)
#define GET_UVERBS_ABI_VERSION(ibdev) (ibdev->uverbs_abi_ver)
#define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB 0x1cUL
#define IB_POLL_UNBOUND_WORKQUEUE IB_POLL_WORKQUEUE
#define BNXT_RE_LEGACY_FENCE_BYTES 64
#define BNXT_RE_LEGACY_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_LEGACY_FENCE_BYTES, PAGE_SIZE)
static inline struct
bnxt_re_cq *__get_cq_from_cq_in(struct ib_cq *cq_in,
struct bnxt_re_dev *rdev);
static inline struct
bnxt_re_qp *__get_qp_from_qp_in(struct ib_pd *qp_in,
struct bnxt_re_dev *rdev);
static inline bool
bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, u16 vlan_id);
#define bnxt_re_compat_qfwstr(void) \
bnxt_re_query_fw_str(struct ib_device *ibdev, \
char *str, size_t str_len)
static inline
struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap);
struct bnxt_re_gid_ctx {
u32 idx;
u32 refcnt;
};
struct bnxt_re_legacy_fence_data {
u32 size;
void *va;
dma_addr_t dma_addr;
struct bnxt_re_mr *mr;
struct ib_mw *mw;
struct bnxt_qplib_swqe bind_wqe;
u32 bind_rkey;
};
struct bnxt_re_pd {
struct ib_pd ibpd;
struct bnxt_re_dev *rdev;
struct bnxt_qplib_pd qplib_pd;
struct bnxt_re_legacy_fence_data fence;
};
struct bnxt_re_ah {
struct ib_ah ibah;
struct bnxt_re_dev *rdev;
struct bnxt_qplib_ah qplib_ah;
};
struct bnxt_re_srq {
struct ib_srq ibsrq;
struct bnxt_re_dev *rdev;
u32 srq_limit;
struct bnxt_qplib_srq qplib_srq;
struct ib_umem *umem;
spinlock_t lock;
};
union ip_addr {
u32 ipv4_addr;
u8 ipv6_addr[16];
};
struct bnxt_re_qp_info_entry {
union ib_gid sgid;
union ib_gid dgid;
union ip_addr s_ip;
union ip_addr d_ip;
u16 s_port;
#define BNXT_RE_QP_DEST_PORT 4791
u16 d_port;
};
struct bnxt_re_qp {
struct ib_qp ib_qp;
struct list_head list;
struct bnxt_re_dev *rdev;
spinlock_t sq_lock;
spinlock_t rq_lock;
struct bnxt_qplib_qp qplib_qp;
struct ib_umem *sumem;
struct ib_umem *rumem;
/* QP1 */
u32 send_psn;
struct ib_ud_header qp1_hdr;
struct bnxt_re_cq *scq;
struct bnxt_re_cq *rcq;
struct dentry *qp_info_pdev_dentry;
struct bnxt_re_qp_info_entry qp_info_entry;
void *qp_data;
};
struct bnxt_re_cq {
struct ib_cq ibcq;
struct list_head cq_list;
struct bnxt_re_dev *rdev;
struct bnxt_re_ucontext *uctx;
spinlock_t cq_lock;
u16 cq_count;
u16 cq_period;
struct bnxt_qplib_cq qplib_cq;
struct bnxt_qplib_cqe *cql;
#define MAX_CQL_PER_POLL 1024
u32 max_cql;
struct ib_umem *umem;
struct ib_umem *resize_umem;
struct ib_ucontext *context;
int resize_cqe;
/* list of cq per uctx. Used only for Thor-2 */
void *uctx_cq_page;
void *dbr_recov_cq_page;
bool is_dbr_soft_cq;
};
struct bnxt_re_mr {
struct bnxt_re_dev *rdev;
struct ib_mr ib_mr;
struct ib_umem *ib_umem;
struct bnxt_qplib_mrw qplib_mr;
u32 npages;
u64 *pages;
struct bnxt_qplib_frpl qplib_frpl;
bool is_invalcb_active;
};
struct bnxt_re_frpl {
struct bnxt_re_dev *rdev;
struct bnxt_qplib_frpl qplib_frpl;
u64 *page_list;
};
struct bnxt_re_mw {
struct bnxt_re_dev *rdev;
struct ib_mw ib_mw;
struct bnxt_qplib_mrw qplib_mw;
};
struct bnxt_re_ucontext {
struct ib_ucontext ibucontext;
struct bnxt_re_dev *rdev;
struct list_head cq_list;
struct bnxt_qplib_dpi dpi;
struct bnxt_qplib_dpi wcdpi;
void *shpg;
spinlock_t sh_lock;
uint64_t cmask;
struct mutex cq_lock; /* Protect cq list */
void *dbr_recov_cq_page;
struct bnxt_re_cq *dbr_recov_cq;
};
struct bnxt_re_ah_info {
union ib_gid sgid;
struct ib_gid_attr sgid_attr;
u16 vlan_tag;
u8 nw_type;
};
struct ifnet *bnxt_re_get_netdev(struct ib_device *ibdev,
u8 port_num);
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
int bnxt_re_modify_device(struct ib_device *ibdev,
int device_modify_mask,
struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
void bnxt_re_compat_qfwstr(void);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
unsigned int index, void **context);
int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context);
int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
int bnxt_re_alloc_pd(struct ib_pd *pd_in, struct ib_udata *udata);
void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata);
int bnxt_re_create_ah(struct ib_ah *ah_in, struct ib_ah_attr *attr,
u32 flags, struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags);
int bnxt_re_create_srq(struct ib_srq *srq_in,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
void bnxt_re_destroy_srq(struct ib_srq *ib_srq,
struct ib_udata *udata);
int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *qp_in,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int bnxt_re_create_cq(struct ib_cq *cq_in,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
int bnxt_re_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int bnxt_re_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
u32 max_num_sg, struct ib_udata *udata);
int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_udata *udata);
int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
int
bnxt_re_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
const struct ib_mad_hdr *in_mad, size_t in_mad_size,
struct ib_mad_hdr *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index);
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
void bnxt_re_disassociate_ucntx(struct ib_ucontext *ibcontext);
static inline int __bnxt_re_set_vma_data(void *bnxt_re_uctx,
struct vm_area_struct *vma);
void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev);
void bnxt_re_handle_cqn(struct bnxt_qplib_cq *cq);
static inline int
bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index,
union ib_gid *sgid, struct ib_gid_attr **sgid_attr,
struct ib_global_route *grh, struct ib_ah *ah);
static inline enum rdma_network_type
bnxt_re_gid_to_network_type(struct ib_gid_attr *sgid_attr,
union ib_gid *sgid);
static inline
struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev,
struct ib_ucontext *ucontext,
struct ib_udata *udata,
unsigned long addr,
size_t size, int access, int dmasync);
static inline
struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev,
struct ib_ucontext *ucontext,
struct ib_udata *udata,
unsigned long addr,
size_t size, int access, int dmasync);
static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem);
static inline void bnxt_re_peer_mem_release(struct ib_umem *umem);
void bnxt_re_resolve_dmac_task(struct work_struct *work);
static inline enum ib_qp_type __from_hw_to_ib_qp_type(u8 type)
{
switch (type) {
case CMDQ_CREATE_QP1_TYPE_GSI:
case CMDQ_CREATE_QP_TYPE_GSI:
return IB_QPT_GSI;
case CMDQ_CREATE_QP_TYPE_RC:
return IB_QPT_RC;
case CMDQ_CREATE_QP_TYPE_UD:
return IB_QPT_UD;
case CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE:
return IB_QPT_RAW_ETHERTYPE;
default:
return IB_QPT_MAX;
}
}
static inline u8 __from_ib_qp_state(enum ib_qp_state state)
{
switch (state) {
case IB_QPS_RESET:
return CMDQ_MODIFY_QP_NEW_STATE_RESET;
case IB_QPS_INIT:
return CMDQ_MODIFY_QP_NEW_STATE_INIT;
case IB_QPS_RTR:
return CMDQ_MODIFY_QP_NEW_STATE_RTR;
case IB_QPS_RTS:
return CMDQ_MODIFY_QP_NEW_STATE_RTS;
case IB_QPS_SQD:
return CMDQ_MODIFY_QP_NEW_STATE_SQD;
case IB_QPS_SQE:
return CMDQ_MODIFY_QP_NEW_STATE_SQE;
case IB_QPS_ERR:
default:
return CMDQ_MODIFY_QP_NEW_STATE_ERR;
}
}
static inline u32 __from_ib_mtu(enum ib_mtu mtu)
{
switch (mtu) {
case IB_MTU_256:
return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
case IB_MTU_512:
return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
case IB_MTU_1024:
return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
case IB_MTU_2048:
return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
case IB_MTU_4096:
return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
default:
return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
}
}
static inline enum ib_mtu __to_ib_mtu(u32 mtu)
{
switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
return IB_MTU_256;
case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
return IB_MTU_512;
case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
return IB_MTU_1024;
case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
return IB_MTU_2048;
case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
return IB_MTU_4096;
case CMDQ_MODIFY_QP_PATH_MTU_MTU_8192:
return IB_MTU_8192;
default:
return IB_MTU_2048;
}
}
static inline enum ib_qp_state __to_ib_qp_state(u8 state)
{
switch (state) {
case CMDQ_MODIFY_QP_NEW_STATE_RESET:
return IB_QPS_RESET;
case CMDQ_MODIFY_QP_NEW_STATE_INIT:
return IB_QPS_INIT;
case CMDQ_MODIFY_QP_NEW_STATE_RTR:
return IB_QPS_RTR;
case CMDQ_MODIFY_QP_NEW_STATE_RTS:
return IB_QPS_RTS;
case CMDQ_MODIFY_QP_NEW_STATE_SQD:
return IB_QPS_SQD;
case CMDQ_MODIFY_QP_NEW_STATE_SQE:
return IB_QPS_SQE;
case CMDQ_MODIFY_QP_NEW_STATE_ERR:
default:
return IB_QPS_ERR;
}
}
static inline int bnxt_re_init_pow2_flag(struct bnxt_re_uctx_req *req,
struct bnxt_re_uctx_resp *resp)
{
resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT)) {
resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
return -EINVAL;
}
return 0;
}
static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
{
return uctx ? (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED) ?
ent : roundup_pow_of_two(ent) : ent;
}
static inline int bnxt_re_init_rsvd_wqe_flag(struct bnxt_re_uctx_req *req,
struct bnxt_re_uctx_resp *resp,
bool genp5)
{
resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE)) {
resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
return -EINVAL;
} else if (!genp5) {
resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
}
return 0;
}
static inline u32 bnxt_re_get_diff(struct bnxt_re_ucontext *uctx,
struct bnxt_qplib_chip_ctx *cctx)
{
if (!uctx) {
/* return res-wqe only for gen p4 for user resource */
return _is_chip_gen_p5_p7(cctx) ? 0 : BNXT_QPLIB_RESERVED_QP_WRS;
} else if (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED) {
return 0;
}
/* old lib */
return BNXT_QPLIB_RESERVED_QP_WRS;
}
static inline void bnxt_re_init_qpmtu(struct bnxt_re_qp *qp, int mtu,
int mask, struct ib_qp_attr *qp_attr,
bool *is_qpmtu_high)
{
int qpmtu, qpmtu_int;
int ifmtu, ifmtu_int;
ifmtu = iboe_get_mtu(mtu);
ifmtu_int = ib_mtu_enum_to_int(ifmtu);
qpmtu = ifmtu;
qpmtu_int = ifmtu_int;
if (mask & IB_QP_PATH_MTU) {
qpmtu = qp_attr->path_mtu;
qpmtu_int = ib_mtu_enum_to_int(qpmtu);
if (qpmtu_int > ifmtu_int) {
/* Trim the QP path mtu to interface mtu and update
* the new mtu to user qp for retransmission psn
* calculations.
*/
qpmtu = ifmtu;
qpmtu_int = ifmtu_int;
*is_qpmtu_high = true;
}
}
qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
qp->qplib_qp.mtu = qpmtu_int;
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
}
inline unsigned long compare_ether_header(void *a, void *b)
{
u32 *a32 = (u32 *)((u8 *)a + 2);
u32 *b32 = (u32 *)((u8 *)b + 2);
return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
(a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
}
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
inline uint16_t
crc16(uint16_t crc, const void *buffer, unsigned int len)
{
const unsigned char *cp = buffer;
/* CRC table for the CRC-16. The poly is 0x8005 (x16 + x15 + x2 + 1). */
static uint16_t const crc16_table[256] = {
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
};
while (len--)
crc = (((crc >> 8) & 0xffU) ^
crc16_table[(crc ^ *cp++) & 0xffU]) & 0x0000ffffU;
return crc;
}
static inline int __bnxt_re_set_vma_data(void *bnxt_re_uctx,
struct vm_area_struct *vma)
{
return 0;
}
static inline bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
u16 vlan_id)
{
bool ret = true;
/*
* Check if the vlan is configured in the host.
* If not configured, it can be a transparent
* VLAN. So dont report the vlan id.
*/
return ret;
}
#endif

4467
sys/dev/bnxt/bnxt_re/main.c Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,638 @@
/*
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Fast Path Operators (header)
*/
#ifndef __BNXT_QPLIB_FP_H__
#define __BNXT_QPLIB_FP_H__
/* Temp header structures for SQ */
struct sq_ud_ext_hdr {
__le32 dst_qp;
__le32 avid;
__le64 rsvd;
};
struct sq_raw_ext_hdr {
__le32 cfa_meta;
__le32 rsvd0;
__le64 rsvd1;
};
struct sq_rdma_ext_hdr {
__le64 remote_va;
__le32 remote_key;
__le32 rsvd;
};
struct sq_atomic_ext_hdr {
__le64 swap_data;
__le64 cmp_data;
};
struct sq_fr_pmr_ext_hdr {
__le64 pblptr;
__le64 va;
};
struct sq_bind_ext_hdr {
__le64 va;
__le32 length_lo;
__le32 length_hi;
};
struct rq_ext_hdr {
__le64 rsvd1;
__le64 rsvd2;
};
#define BNXT_QPLIB_ETHTYPE_ROCEV1 0x8915
struct bnxt_qplib_srq {
struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi;
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_cq *cq;
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sginfo;
u64 srq_handle;
u32 id;
u16 wqe_size;
u32 max_wqe;
u32 max_sge;
u32 threshold;
bool arm_req;
int start_idx;
int last_idx;
u16 eventq_hw_ring_id;
bool is_user;
spinlock_t lock;
};
struct bnxt_qplib_sge {
u64 addr;
u32 size;
u32 lkey;
};
/*
* Buffer space for ETH(14), IP or GRH(40), UDP header(8)
* and ib_bth + ib_deth (20).
* Max required is 82 when RoCE V2 is enabled
*/
/*
* RoCE V1 (38 bytes needed)
* +------------+----------+--------+--------+-------+
* |Eth-hdr(14B)| GRH (40B)|bth+deth| Mad | iCRC |
* | | supplied | 20B |payload | 4B |
* | | by user |supplied| 256B | |
* | | mad | |by user | |
* | | | | | |
* | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
* +------------+----------+--------+--------+-------+
*/
/*
* RoCE V2-IPv4 (46 Bytes needed)
* +------------+----------+--------+--------+-------+
* |Eth-hdr(14B)| IP-hdr |UDP-hdr | Mad | iCRC |
* | | supplied | 8B |payload | 4B |
* | | by user |bth+deth| 256B | |
* | | mad lower| 20B |supplied| |
* | | 20B out | (sge 3)|by user | |
* | | of 40B | | | |
* | | grh space| | | |
* | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
* +------------+----------+--------+--------+-------+
*/
/*
* RoCE V2-IPv6 (46 Bytes needed)
* +------------+----------+--------+--------+-------+
* |Eth-hdr(14B)| IPv6 |UDP-hdr | Mad | iCRC |
* | | supplied | 8B |payload | 4B |
* | | by user |bth+deth| 256B | |
* | | mad lower| 20B |supplied| |
* | | 40 bytes | |by user | |
* | | grh space| | | |
* | | | | | |
* | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
* +------------+----------+--------+--------+-------+
*/
#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE 74
#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE 46
#define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
#define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
#define BNXT_QPLIB_MAX_SQSZ 0xFFFF
struct bnxt_qplib_hdrbuf {
dma_addr_t dma_map;
void *va;
u32 len;
u32 step;
};
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
u8 type;
u8 flags;
u32 start_psn;
u32 next_psn;
u32 slot_idx;
u8 slots;
/* WIP: make it void * to handle legacy also */
struct sq_psn_search *psn_search;
void *inline_data;
};
struct bnxt_qplib_swqe {
/* General */
#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
#define BNXT_QPLIB_QP1_DUMMY_WRID 0x44554D59 /* "DUMY" */
u64 wr_id;
u8 reqs_type;
u8 type;
#define BNXT_QPLIB_SWQE_TYPE_SEND 0
#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
#define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
#define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
#define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
#define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
#define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
#define BNXT_QPLIB_SWQE_TYPE_RECV 128
#define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
u8 flags;
#define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP (1 << 0)
#define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE (1 << 1)
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE (1 << 2)
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT (1 << 3)
#define BNXT_QPLIB_SWQE_FLAGS_INLINE (1 << 4)
struct bnxt_qplib_sge *sg_list;
int num_sge;
union {
/* Send, with imm, inval key */
struct {
union {
__be32 imm_data;
u32 inv_key;
};
u32 q_key;
u32 dst_qp;
u16 avid;
} send;
/* Send Raw Ethernet and QP1 */
struct {
u16 lflags;
u16 cfa_action;
u32 cfa_meta;
} rawqp1;
/* RDMA write, with imm, read */
struct {
union {
__be32 imm_data;
u32 inv_key;
};
u64 remote_va;
u32 r_key;
} rdma;
/* Atomic cmp/swap, fetch/add */
struct {
u64 remote_va;
u32 r_key;
u64 swap_data;
u64 cmp_data;
} atomic;
/* Local Invalidate */
struct {
u32 inv_l_key;
} local_inv;
/* FR-PMR */
struct {
u8 access_cntl;
u8 pg_sz_log;
bool zero_based;
u32 l_key;
u32 length;
u8 pbl_pg_sz_log;
#define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
#define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
#define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
#define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
#define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
#define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
#define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
#define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
u8 levels;
#define PAGE_SHIFT_4K 12
__le64 *pbl_ptr;
dma_addr_t pbl_dma_ptr;
u64 *page_list;
u16 page_list_len;
u64 va;
} frmr;
/* Bind */
struct {
u8 access_cntl;
#define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE (1 << 0)
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ (1 << 1)
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE (1 << 2)
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC (1 << 3)
#define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND (1 << 4)
bool zero_based;
u8 mw_type;
u32 parent_l_key;
u32 r_key;
u64 va;
u32 length;
} bind;
};
};
struct bnxt_qplib_q {
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sginfo;
struct bnxt_qplib_hwq hwq;
u32 max_wqe;
u16 max_sge;
u16 wqe_size;
u16 q_full_delta;
u32 psn;
bool condition;
bool single;
bool legacy_send_phantom;
u32 phantom_wqe_cnt;
u32 phantom_cqe_cnt;
u32 next_cq_cons;
bool flushed;
u32 swq_start;
u32 swq_last;
};
#define BNXT_QPLIB_PPP_REQ 0x1
#define BNXT_QPLIB_PPP_ST_IDX_SHIFT 0x1
struct bnxt_qplib_ppp {
u32 dpi;
u8 req;
u8 st_idx_en;
};
struct bnxt_qplib_qp {
struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi;
struct bnxt_qplib_chip_ctx *cctx;
u64 qp_handle;
#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32 id;
u8 type;
u8 sig_type;
u8 wqe_mode;
u8 state;
u8 cur_qp_state;
u8 is_user;
u64 modify_flags;
u32 max_inline_data;
u32 mtu;
u32 path_mtu;
bool en_sqd_async_notify;
u16 pkey_index;
u32 qkey;
u32 dest_qp_id;
u8 access;
u8 timeout;
u8 retry_cnt;
u8 rnr_retry;
u64 wqe_cnt;
u32 min_rnr_timer;
u32 max_rd_atomic;
u32 max_dest_rd_atomic;
u32 dest_qpn;
u8 smac[6];
u16 vlan_id;
u8 nw_type;
u16 port_id;
struct bnxt_qplib_ah ah;
struct bnxt_qplib_ppp ppp;
#define BTH_PSN_MASK ((1 << 24) - 1)
/* SQ */
struct bnxt_qplib_q sq;
/* RQ */
struct bnxt_qplib_q rq;
/* SRQ */
struct bnxt_qplib_srq *srq;
/* CQ */
struct bnxt_qplib_cq *scq;
struct bnxt_qplib_cq *rcq;
/* IRRQ and ORRQ */
struct bnxt_qplib_hwq irrq;
struct bnxt_qplib_hwq orrq;
/* Header buffer for QP1 */
struct bnxt_qplib_hdrbuf *sq_hdr_buf;
struct bnxt_qplib_hdrbuf *rq_hdr_buf;
/* ToS */
u8 tos_ecn;
u8 tos_dscp;
/* To track the SQ and RQ flush list */
struct list_head sq_flush;
struct list_head rq_flush;
/* 4 bytes of QP's scrabled mac received from FW */
u32 lag_src_mac;
u32 msn;
u32 msn_tbl_sz;
/* get devflags in PI code */
u16 dev_cap_flags;
};
#define CQE_CMP_VALID(hdr, pass) \
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
!(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq)
{
int cons, prod, avail;
/* False full is possible retrying post-send makes sense */
cons = hwq->cons;
prod = hwq->prod;
avail = cons - prod;
if (cons <= prod)
avail += hwq->depth;
return avail;
}
static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_hwq *hwq, u8 slots)
{
return __bnxt_qplib_get_avail(hwq) <= slots;
}
struct bnxt_qplib_cqe {
u8 status;
u8 type;
u8 opcode;
u32 length;
/* Lower 16 is cfa_metadata0, Upper 16 is cfa_metadata1 */
u32 cfa_meta;
#define BNXT_QPLIB_META1_SHIFT 16
#define BNXT_QPLIB_CQE_CFA_META1_VALID 0x80000UL
u64 wr_id;
union {
__be32 immdata;
u32 invrkey;
};
u64 qp_handle;
u64 mr_handle;
u16 flags;
u8 smac[6];
u32 src_qp;
u16 raweth_qp1_flags;
u16 raweth_qp1_errors;
u16 raweth_qp1_cfa_code;
u32 raweth_qp1_flags2;
u32 raweth_qp1_metadata;
u8 raweth_qp1_payload_offset;
u16 pkey_index;
};
#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
struct bnxt_qplib_cq {
struct bnxt_qplib_dpi *dpi;
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq;
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sginfo;
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_hwq resize_hwq;
struct list_head sqf_head;
struct list_head rqf_head;
u32 max_wqe;
u32 id;
u16 count;
u16 period;
u32 cnq_hw_ring_id;
u64 cq_handle;
atomic_t arm_state;
#define CQ_RESIZE_WAIT_TIME_MS 500
unsigned long flags;
#define CQ_FLAGS_RESIZE_IN_PROG 1
wait_queue_head_t waitq;
spinlock_t flush_lock; /* lock flush queue list */
spinlock_t compl_lock; /* synch CQ handlers */
u16 cnq_events;
bool is_cq_err_event;
bool destroyed;
u8 toggle;
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
#define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
#define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * x + 2)
#define IRRQ_SLOTS_TO_IRD_LIMIT(s) ((s >> 1) - 1)
#define ORD_LIMIT_TO_ORRQ_SLOTS(x) (x + 1)
#define ORRQ_SLOTS_TO_ORD_LIMIT(s) (s - 1)
#define NQE_CMP_VALID(hdr, pass) \
(!!(le32_to_cpu((hdr)->info63_v & 0xffffffff) & NQ_BASE_V) == \
!(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
#define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
/* MSN table print macros for debugging */
#define BNXT_RE_MSN_IDX(m) (((m) & SQ_MSN_SEARCH_START_IDX_MASK) >> \
SQ_MSN_SEARCH_START_IDX_SFT)
#define BNXT_RE_MSN_NPSN(m) (((m) & SQ_MSN_SEARCH_NEXT_PSN_MASK) >> \
SQ_MSN_SEARCH_NEXT_PSN_SFT)
#define BNXT_RE_MSN_SPSN(m) (((m) & SQ_MSN_SEARCH_START_PSN_MASK) >> \
SQ_MSN_SEARCH_START_PSN_SFT)
#define BNXT_MSN_TBLE_SGE 6
struct bnxt_qplib_nq_stats {
u64 num_dbqne_processed;
u64 num_srqne_processed;
u64 num_cqne_processed;
u64 num_tasklet_resched;
u64 num_nq_rearm;
};
struct bnxt_qplib_nq_db {
struct bnxt_qplib_reg_desc reg;
void __iomem *db;
struct bnxt_qplib_db_info dbinfo;
};
typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq);
typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_srq *srq, u8 event);
struct bnxt_qplib_nq {
struct bnxt_qplib_res *res;
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_nq_db nq_db;
char *name;
u16 ring_id;
int msix_vec;
bool requested;
int budget;
u32 load;
struct mutex lock;
cqn_handler_t cqn_handler;
srqn_handler_t srqn_handler;
struct workqueue_struct *cqn_wq;
struct bnxt_qplib_nq_stats stats;
};
struct bnxt_qplib_nq_work {
struct work_struct work;
struct bnxt_qplib_nq *nq;
struct bnxt_qplib_cq *cq;
};
static inline dma_addr_t
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
{
struct bnxt_qplib_hdrbuf *buf;
buf = qp->rq_hdr_buf;
return (buf->dma_map + index * buf->step);
}
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
int msix_vector, bool need_init);
int bnxt_qplib_enable_nq(struct bnxt_qplib_nq *nq, int nq_idx,
int msix_vector, int bar_reg_offset,
cqn_handler_t cqn_handler,
srqn_handler_t srq_handler);
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge);
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge);
u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe);
void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_modify_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
int new_cqes);
void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
void bnxt_qplib_free_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num, struct bnxt_qplib_qp **qp);
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq_mem(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq_mem(struct bnxt_qplib_res *res,
struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe *cqe,
int num_cqes);
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
void bnxt_qplib_free_hdr_buf(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp);
int bnxt_qplib_alloc_hdr_buf(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp, u32 slen, u32 rlen);
static inline bool __can_request_ppp(struct bnxt_qplib_qp *qp)
{
bool can_request = false;
if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RESET &&
qp->state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
qp->ppp.req &&
!(qp->ppp.st_idx_en &
CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED))
can_request = true;
return can_request;
}
/* MSN table update inlin */
static inline uint64_t bnxt_re_update_msn_tbl(uint32_t st_idx, uint32_t npsn, uint32_t start_psn)
{
return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) &
SQ_MSN_SEARCH_START_IDX_MASK) |
(((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) &
SQ_MSN_SEARCH_NEXT_PSN_MASK) |
(((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
SQ_MSN_SEARCH_START_PSN_MASK));
}
void bnxt_re_schedule_dbq_event(struct bnxt_qplib_res *res);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,354 @@
/*
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: RDMA Controller HW interface (header)
*/
#ifndef __BNXT_QPLIB_RCFW_H__
#define __BNXT_QPLIB_RCFW_H__
#include <linux/semaphore.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include "qplib_tlv.h"
#define RCFW_CMDQ_TRIG_VAL 1
#define RCFW_COMM_PCI_BAR_REGION 0
#define RCFW_COMM_CONS_PCI_BAR_REGION 2
#define RCFW_COMM_BASE_OFFSET 0x600
#define RCFW_PF_VF_COMM_PROD_OFFSET 0xc
#define RCFW_COMM_TRIG_OFFSET 0x100
#define RCFW_COMM_SIZE 0x104
#define RCFW_DBR_PCI_BAR_REGION 2
#define RCFW_DBR_BASE_PAGE_SHIFT 12
#define RCFW_MAX_LATENCY_SEC_SLAB_INDEX 128
#define RCFW_MAX_LATENCY_MSEC_SLAB_INDEX 3000
#define RCFW_MAX_STAT_INDEX 0xFFFF
#define RCFW_FW_STALL_MAX_TIMEOUT 40
#define GET_OPCODE_TYPE(x) \
((x) == 0x1 ? "CREATE_QP": \
((x) == 0x2 ? "DESTROY_QP": \
((x) == 0x3 ? "MODIFY_QP": \
((x) == 0x4 ? "QUERY_QP": \
((x) == 0x5 ? "CREATE_SRQ": \
((x) == 0x6 ? "DESTROY_SRQ": \
((x) == 0x8 ? "QUERY_SRQ": \
((x) == 0x9 ? "CREATE_CQ": \
((x) == 0xa ? "DESTROY_CQ": \
((x) == 0xc ? "RESIZE_CQ": \
((x) == 0xd ? "ALLOCATE_MRW": \
((x) == 0xe ? "DEALLOCATE_KEY": \
((x) == 0xf ? "REGISTER_MR": \
((x) == 0x10 ? "DEREGISTER_MR": \
((x) == 0x11 ? "ADD_GID": \
((x) == 0x12 ? "DELETE_GID": \
((x) == 0x17 ? "MODIFY_GID": \
((x) == 0x18 ? "QUERY_GID": \
((x) == 0x13 ? "CREATE_QP1": \
((x) == 0x14 ? "DESTROY_QP1": \
((x) == 0x15 ? "CREATE_AH": \
((x) == 0x16 ? "DESTROY_AH": \
((x) == 0x80 ? "INITIALIZE_FW": \
((x) == 0x81 ? "DEINITIALIZE_FW": \
((x) == 0x82 ? "STOP_FUNC": \
((x) == 0x83 ? "QUERY_FUNC": \
((x) == 0x84 ? "SET_FUNC_RESOURCES": \
((x) == 0x85 ? "READ_CONTEXT": \
((x) == 0x86 ? "VF_BACKCHANNEL_REQUEST": \
((x) == 0x87 ? "READ_VF_MEMORY": \
((x) == 0x88 ? "COMPLETE_VF_REQUEST": \
((x) == 0x89 ? "EXTEND_CONTEXT_ARRRAY": \
((x) == 0x8a ? "MAP_TC_TO_COS": \
((x) == 0x8b ? "QUERY_VERSION": \
((x) == 0x8c ? "MODIFY_ROCE_CC": \
((x) == 0x8d ? "QUERY_ROCE_CC": \
((x) == 0x8e ? "QUERY_ROCE_STATS": \
((x) == 0x8f ? "SET_LINK_AGGR_MODE": \
((x) == 0x90 ? "MODIFY_CQ": \
((x) == 0x91 ? "QUERY_QP_EXTEND": \
((x) == 0x92 ? "QUERY_ROCE_STATS_EXT": \
"Unknown OPCODE" \
)))))))))))))))))))))))))))))))))))))))))
extern unsigned int cmdq_shadow_qd;
/* Cmdq contains a fix number of a 16-Byte slots */
struct bnxt_qplib_cmdqe {
u8 data[16];
};
#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
static inline void bnxt_qplib_rcfw_cmd_prep(void *r, u8 opcode, u8 cmd_size)
{
struct cmdq_base *req = r;
req->opcode = opcode;
req->cmd_size = cmd_size;
}
/* Shadow queue depth for non blocking command */
#define RCFW_CMD_NON_BLOCKING_SHADOW_QD 64
#define RCFW_CMD_DEV_ERR_CHECK_TIME_MS 1000 /* 1 Second time out*/
#define RCFW_ERR_RETRY_COUNT (RCFW_CMD_WAIT_TIME_MS / RCFW_CMD_DEV_ERR_CHECK_TIME_MS)
/* CMDQ elements */
#define BNXT_QPLIB_CMDQE_MAX_CNT 8192
#define BNXT_QPLIB_CMDQE_BYTES (BNXT_QPLIB_CMDQE_MAX_CNT * \
BNXT_QPLIB_CMDQE_UNITS)
#define BNXT_QPLIB_CMDQE_NPAGES ((BNXT_QPLIB_CMDQE_BYTES % \
PAGE_SIZE) ? \
((BNXT_QPLIB_CMDQE_BYTES / \
PAGE_SIZE) + 1) : \
(BNXT_QPLIB_CMDQE_BYTES / \
PAGE_SIZE))
#define BNXT_QPLIB_CMDQE_PAGE_SIZE (BNXT_QPLIB_CMDQE_NPAGES * \
PAGE_SIZE)
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
#define RCFW_MAX_COOKIE_VALUE (BNXT_QPLIB_CMDQE_MAX_CNT - 1)
#define RCFW_CMD_IS_BLOCKING 0x8000
#define RCFW_NO_FW_ACCESS(rcfw) \
(test_bit(ERR_DEVICE_DETACHED, &(rcfw)->cmdq.flags) || \
pci_channel_offline((rcfw)->pdev))
/* Crsq buf is 1024-Byte */
struct bnxt_qplib_crsbe {
u8 data[1024];
};
/* Get the number of command units required for the req. The
* function returns correct value only if called before
* setting using bnxt_qplib_set_cmd_slots
*/
static inline u32 bnxt_qplib_get_cmd_slots(struct cmdq_base *req)
{
u32 cmd_units = 0;
if (HAS_TLV_HEADER(req)) {
struct roce_tlv *tlv_req = (struct roce_tlv *)req;
cmd_units = tlv_req->total_size;
} else {
cmd_units = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
BNXT_QPLIB_CMDQE_UNITS;
}
return cmd_units;
}
/* Set the cmd_size to a factor of CMDQE unit */
static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
{
u32 cmd_byte = 0;
if (HAS_TLV_HEADER(req)) {
struct roce_tlv *tlv_req = (struct roce_tlv *)req;
cmd_byte = tlv_req->total_size * BNXT_QPLIB_CMDQE_UNITS;
} else {
cmd_byte = req->cmd_size;
req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
BNXT_QPLIB_CMDQE_UNITS;
}
return cmd_byte;
}
/* CREQ */
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
#define CREQ_CMP_VALID(hdr, pass) \
(!!((hdr)->v & CREQ_BASE_V) == \
!(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
#define CREQ_ENTRY_POLL_BUDGET 8
typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *);
struct bnxt_qplib_crsqe {
struct creq_qp_event *resp;
u32 req_size;
bool is_waiter_alive;
bool is_internal_cmd;
bool is_in_used;
/* Free slots at the time of submission */
u32 free_slots;
unsigned long send_timestamp;
u8 opcode;
u8 requested_qp_state;
};
struct bnxt_qplib_rcfw_sbuf {
void *sb;
dma_addr_t dma_addr;
u32 size;
};
#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
#define FIRMWARE_INITIALIZED_FLAG (0)
#define FIRMWARE_FIRST_FLAG (31)
#define FIRMWARE_STALL_DETECTED (3)
#define ERR_DEVICE_DETACHED (4)
struct bnxt_qplib_cmdq_mbox {
struct bnxt_qplib_reg_desc reg;
void __iomem *prod;
void __iomem *db;
};
struct bnxt_qplib_cmdq_ctx {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_cmdq_mbox cmdq_mbox;
wait_queue_head_t waitq;
unsigned long flags;
unsigned long last_seen;
u32 seq_num;
};
struct bnxt_qplib_creq_db {
struct bnxt_qplib_reg_desc reg;
void __iomem *db;
struct bnxt_qplib_db_info dbinfo;
};
struct bnxt_qplib_creq_stat {
u64 creq_arm_count;
u64 creq_tasklet_schedule_count;
u64 creq_qp_event_processed;
u64 creq_func_event_processed;
};
struct bnxt_qplib_creq_ctx {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_creq_db creq_db;
struct bnxt_qplib_creq_stat stats;
aeq_handler_t aeq_handler;
u16 ring_id;
int msix_vec;
bool requested;
char *irq_name;
};
/* RCFW Communication Channels */
#define BNXT_QPLIB_RCFW_SEND_RETRY_COUNT 4000
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
struct bnxt_qplib_res *res;
struct bnxt_qplib_cmdq_ctx cmdq;
struct bnxt_qplib_creq_ctx creq;
struct bnxt_qplib_crsqe *crsqe_tbl;
u32 rcfw_lat_slab_sec[RCFW_MAX_LATENCY_SEC_SLAB_INDEX];
/* Slow path Perf Stats */
bool sp_perf_stats_enabled;
u32 *rcfw_lat_slab_msec;
u64 *qp_create_stats;
u64 *qp_destroy_stats;
u64 *qp_modify_stats;
u64 *mr_create_stats;
u64 *mr_destroy_stats;
u32 qp_create_stats_id;
u32 qp_destroy_stats_id;
u32 qp_modify_stats_id;
u32 mr_create_stats_id;
u32 mr_destroy_stats_id;
bool init_oos_stats;
u64 oos_prev;
u32 num_irq_stopped;
u32 num_irq_started;
u32 poll_in_intr_en;
u32 poll_in_intr_dis;
atomic_t rcfw_intr_enabled;
u32 cmdq_full_dbg;
struct semaphore rcfw_inflight;
unsigned int curr_shadow_qd;
atomic_t timeout_send;
/* cached from chip cctx for quick reference in slow path */
u16 max_timeout;
};
struct bnxt_qplib_cmdqmsg {
struct cmdq_base *req;
struct creq_base *resp;
void *sb;
u32 req_sz;
u32 res_sz;
u8 block;
u8 qp_state;
};
static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
void *req, void *resp, void *sb,
u32 req_sz, u32 res_sz, u8 block)
{
msg->req = req;
msg->resp = resp;
msg->sb = sb;
msg->req_sz = req_sz;
msg->res_sz = res_sz;
msg->block = block;
}
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res);
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
bool need_init);
int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off,
aeq_handler_t aeq_handler);
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw,
u32 size);
void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_rcfw_sbuf *sbuf);
int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_cmdqmsg *msg);
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
int __check_cmdq_stall(struct bnxt_qplib_rcfw *rcfw,
u32 *cur_prod, u32 *cur_cons);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,840 @@
/*
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: QPLib resource manager (header)
*/
#ifndef __BNXT_QPLIB_RES_H__
#define __BNXT_QPLIB_RES_H__
#include "hsi_struct_def.h"
extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_57508 0x1750
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
#define CHIP_NUM_58818 0xd818
#define CHIP_NUM_57608 0x1760
#define BNXT_QPLIB_MAX_QPC_COUNT (64 * 1024)
#define BNXT_QPLIB_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_QPLIB_MAX_CQ_COUNT (64 * 1024)
#define BNXT_QPLIB_MAX_CQ_COUNT_P5 (128 * 1024)
#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
#define BNXT_QPLIB_DBR_VF_DB_OFFSET 0x4000
#define BNXT_QPLIB_DBR_KEY_INVALID -1
/* chip gen type */
#define BNXT_RE_DEFAULT 0xf
enum bnxt_qplib_wqe_mode {
BNXT_QPLIB_WQE_MODE_STATIC = 0x00,
BNXT_QPLIB_WQE_MODE_VARIABLE = 0x01,
BNXT_QPLIB_WQE_MODE_INVALID = 0x02
};
#define BNXT_RE_PUSH_MODE_NONE 0
#define BNXT_RE_PUSH_MODE_WCB 1
#define BNXT_RE_PUSH_MODE_PPP 2
#define BNXT_RE_PUSH_ENABLED(mode) ((mode) == BNXT_RE_PUSH_MODE_WCB ||\
(mode) == BNXT_RE_PUSH_MODE_PPP)
#define BNXT_RE_PPP_ENABLED(cctx) ((cctx)->modes.db_push_mode ==\
BNXT_RE_PUSH_MODE_PPP)
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
#define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */
#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */
#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
struct bnxt_qplib_drv_modes {
u8 wqe_mode;
u8 te_bypass;
u8 db_push;
/* To control advanced cc params display in configfs */
u8 cc_pr_mode;
/* Other modes to follow here e.g. GSI QP mode */
u8 dbr_pacing;
u8 dbr_pacing_ext;
u8 dbr_drop_recov;
u8 dbr_primary_pf;
u8 dbr_pacing_v0;
};
struct bnxt_qplib_chip_ctx {
u16 chip_num;
u8 chip_rev;
u8 chip_metal;
u64 hwrm_intf_ver;
struct bnxt_qplib_drv_modes modes;
u32 dbr_stat_db_fifo;
u32 dbr_aeq_arm_reg;
u32 dbr_throttling_reg;
u16 hw_stats_size;
u16 hwrm_cmd_max_timeout;
};
static inline bool _is_chip_num_p7(u16 chip_num)
{
return (chip_num == CHIP_NUM_58818 ||
chip_num == CHIP_NUM_57608);
}
static inline bool _is_chip_p7(struct bnxt_qplib_chip_ctx *cctx)
{
return _is_chip_num_p7(cctx->chip_num);
}
/* SR2 is Gen P5 */
static inline bool _is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
{
return (cctx->chip_num == CHIP_NUM_57508 ||
cctx->chip_num == CHIP_NUM_57504 ||
cctx->chip_num == CHIP_NUM_57502);
}
static inline bool _is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
{
return (_is_chip_gen_p5(cctx) || _is_chip_p7(cctx));
}
static inline bool _is_wqe_mode_variable(struct bnxt_qplib_chip_ctx *cctx)
{
return cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE;
}
struct bnxt_qplib_db_pacing_data {
u32 do_pacing;
u32 pacing_th;
u32 dev_err_state;
u32 alarm_th;
u32 grc_reg_offset;
u32 fifo_max_depth;
u32 fifo_room_mask;
u8 fifo_room_shift;
};
static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
{
return cctx->modes.dbr_pacing;
}
static inline u8 bnxt_qplib_dbr_pacing_ext_en(struct bnxt_qplib_chip_ctx *cctx)
{
return cctx->modes.dbr_pacing_ext;
}
static inline u8 bnxt_qplib_dbr_pacing_is_primary_pf(struct bnxt_qplib_chip_ctx *cctx)
{
return cctx->modes.dbr_primary_pf;
}
static inline void bnxt_qplib_dbr_pacing_set_primary_pf
(struct bnxt_qplib_chip_ctx *cctx, u8 val)
{
cctx->modes.dbr_primary_pf = val;
}
/* Defines for handling the HWRM version check */
#define HWRM_VERSION_DEV_ATTR_MAX_DPI 0x1000A0000000D
#define HWRM_VERSION_ROCE_STATS_FN_ID 0x1000A00000045
#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
#define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
((HWQ_CMP(hwq->prod, hwq)\
- HWQ_CMP(hwq->cons, hwq))\
& (hwq->max_elements - 1)))
enum bnxt_qplib_hwq_type {
HWQ_TYPE_CTX,
HWQ_TYPE_QUEUE,
HWQ_TYPE_L2_CMPL,
HWQ_TYPE_MR
};
#define MAX_PBL_LVL_0_PGS 1
#define MAX_PBL_LVL_1_PGS 512
#define MAX_PBL_LVL_1_PGS_SHIFT 9
#define MAX_PDL_LVL_SHIFT 9
enum bnxt_qplib_pbl_lvl {
PBL_LVL_0,
PBL_LVL_1,
PBL_LVL_2,
PBL_LVL_MAX
};
#define ROCE_PG_SIZE_4K (4 * 1024)
#define ROCE_PG_SIZE_8K (8 * 1024)
#define ROCE_PG_SIZE_64K (64 * 1024)
#define ROCE_PG_SIZE_2M (2 * 1024 * 1024)
#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
enum bnxt_qplib_hwrm_pg_size {
BNXT_QPLIB_HWRM_PG_SIZE_4K = 0,
BNXT_QPLIB_HWRM_PG_SIZE_8K = 1,
BNXT_QPLIB_HWRM_PG_SIZE_64K = 2,
BNXT_QPLIB_HWRM_PG_SIZE_2M = 3,
BNXT_QPLIB_HWRM_PG_SIZE_8M = 4,
BNXT_QPLIB_HWRM_PG_SIZE_1G = 5,
};
struct bnxt_qplib_reg_desc {
u8 bar_id;
resource_size_t bar_base;
unsigned long offset;
void __iomem *bar_reg;
size_t len;
};
struct bnxt_qplib_pbl {
u32 pg_count;
u32 pg_size;
void **pg_arr;
dma_addr_t *pg_map_arr;
};
struct bnxt_qplib_sg_info {
struct scatterlist *sghead;
u32 nmap;
u32 npages;
u32 pgshft;
u32 pgsize;
bool nopte;
};
struct bnxt_qplib_hwq_attr {
struct bnxt_qplib_res *res;
struct bnxt_qplib_sg_info *sginfo;
enum bnxt_qplib_hwq_type type;
u32 depth;
u32 stride;
u32 aux_stride;
u32 aux_depth;
};
struct bnxt_qplib_hwq {
struct pci_dev *pdev;
spinlock_t lock;
struct bnxt_qplib_pbl pbl[PBL_LVL_MAX];
enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */
void **pbl_ptr; /* ptr for easy access
to the PBL entries */
dma_addr_t *pbl_dma_ptr; /* ptr for easy access
to the dma_addr */
u32 max_elements;
u32 depth; /* original requested depth */
u16 element_size; /* Size of each entry */
u16 qe_ppg; /* queue entry per page */
u32 prod; /* raw */
u32 cons; /* raw */
u8 cp_bit;
u8 is_user;
u64 *pad_pg;
u32 pad_stride;
u32 pad_pgofft;
};
struct bnxt_qplib_db_info {
void __iomem *db;
void __iomem *priv_db;
struct bnxt_qplib_hwq *hwq;
struct bnxt_qplib_res *res;
u32 xid;
u32 max_slot;
u32 flags;
u8 toggle;
spinlock_t lock;
u64 shadow_key;
u64 shadow_key_arm_ena;
u32 seed; /* For DB pacing */
};
enum bnxt_qplib_db_info_flags_mask {
BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
BNXT_QPLIB_FLAG_EPOCH_CONS_MASK = 0x1UL,
BNXT_QPLIB_FLAG_EPOCH_PROD_MASK = 0x2UL,
};
enum bnxt_qplib_db_epoch_flag_shift {
BNXT_QPLIB_DB_EPOCH_CONS_SHIFT = BNXT_QPLIB_DBR_EPOCH_SHIFT,
BNXT_QPLIB_DB_EPOCH_PROD_SHIFT = (BNXT_QPLIB_DBR_EPOCH_SHIFT - 1)
};
/* Tables */
struct bnxt_qplib_pd_tbl {
unsigned long *tbl;
u32 max;
};
struct bnxt_qplib_sgid_tbl {
struct bnxt_qplib_gid_info *tbl;
u16 *hw_id;
u16 max;
u16 active;
void *ctx;
bool *vlan;
};
enum {
BNXT_QPLIB_DPI_TYPE_KERNEL = 0,
BNXT_QPLIB_DPI_TYPE_UC = 1,
BNXT_QPLIB_DPI_TYPE_WC = 2
};
struct bnxt_qplib_dpi {
u32 dpi;
u32 bit;
void __iomem *dbr;
u64 umdbr;
u8 type;
};
#define BNXT_QPLIB_MAX_EXTENDED_PPP_PAGES 512
struct bnxt_qplib_dpi_tbl {
void **app_tbl;
unsigned long *tbl;
u16 max;
u16 avail_ppp;
struct bnxt_qplib_reg_desc ucreg; /* Hold entire DB bar. */
struct bnxt_qplib_reg_desc wcreg;
void __iomem *priv_db;
};
struct bnxt_qplib_stats {
dma_addr_t dma_map;
void *dma;
u32 size;
u32 fw_id;
};
struct bnxt_qplib_vf_res {
u32 max_qp;
u32 max_mrw;
u32 max_srq;
u32 max_cq;
u32 max_gid;
};
#define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448
#define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64
#define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64
#define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128
#define MAX_TQM_ALLOC_REQ 48
#define MAX_TQM_ALLOC_BLK_SIZE 8
struct bnxt_qplib_tqm_ctx {
struct bnxt_qplib_hwq pde;
enum bnxt_qplib_pbl_lvl pde_level; /* Original level */
struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ];
u8 qcount[MAX_TQM_ALLOC_REQ];
};
struct bnxt_qplib_hctx {
struct bnxt_qplib_hwq hwq;
u32 max;
};
struct bnxt_qplib_refrec {
void *handle;
u32 xid;
};
struct bnxt_qplib_reftbl {
struct bnxt_qplib_refrec *rec;
u32 max;
spinlock_t lock; /* reftbl lock */
};
struct bnxt_qplib_reftbls {
struct bnxt_qplib_reftbl qpref;
struct bnxt_qplib_reftbl cqref;
struct bnxt_qplib_reftbl srqref;
};
#define GET_TBL_INDEX(id, tbl) ((id) % (((tbl)->max) - 1))
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_reftbl *tbl)
{
return (qid == 1) ? tbl->max : GET_TBL_INDEX(qid, tbl);
}
/*
* This structure includes the number of various roce resource table sizes
* actually allocated by the driver. May be less than the maximums the firmware
* allows if the driver imposes lower limits than the firmware.
*/
struct bnxt_qplib_ctx {
struct bnxt_qplib_hctx qp_ctx;
struct bnxt_qplib_hctx mrw_ctx;
struct bnxt_qplib_hctx srq_ctx;
struct bnxt_qplib_hctx cq_ctx;
struct bnxt_qplib_hctx tim_ctx;
struct bnxt_qplib_tqm_ctx tqm_ctx;
struct bnxt_qplib_stats stats;
struct bnxt_qplib_stats stats2;
struct bnxt_qplib_vf_res vf_res;
};
struct bnxt_qplib_res {
struct pci_dev *pdev;
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_dev_attr *dattr;
struct bnxt_qplib_ctx *hctx;
struct ifnet *netdev;
struct bnxt_en_dev *en_dev;
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
struct mutex pd_tbl_lock;
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
struct mutex dpi_tbl_lock;
struct bnxt_qplib_reftbls reftbl;
bool prio;
bool is_vf;
struct bnxt_qplib_db_pacing_data *pacing_data;
};
struct bnxt_qplib_query_stats_info {
u32 function_id;
u8 collection_id;
bool vf_valid;
};
struct bnxt_qplib_query_qp_info {
u32 function_id;
u32 num_qps;
u32 start_index;
bool vf_valid;
};
struct bnxt_qplib_query_fn_info {
bool vf_valid;
u32 host;
u32 filter;
};
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
struct bnxt_qplib_pd;
struct bnxt_qplib_dev_attr;
bool _is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx);
bool _is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx);
bool _is_chip_a0(struct bnxt_qplib_chip_ctx *cctx);
bool _is_chip_p7(struct bnxt_qplib_chip_ctx *cctx);
bool _is_alloc_mr_unified(struct bnxt_qplib_dev_attr *dattr);
void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
struct bnxt_qplib_hwq *hwq);
int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_hwq_attr *hwq_attr);
void bnxt_qplib_get_guid(const u8 *dev_addr, u8 *guid);
int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd *pd);
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd_tbl *pd_tbl,
struct bnxt_qplib_pd *pd);
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi *dpi,
void *app, u8 type);
int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi *dpi);
int bnxt_qplib_stop_res(struct bnxt_qplib_res *res);
void bnxt_qplib_clear_tbls(struct bnxt_qplib_res *res);
int bnxt_qplib_init_tbls(struct bnxt_qplib_res *res);
void bnxt_qplib_free_tbls(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_tbls(struct bnxt_qplib_res *res, u8 pppp_factor);
void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_stat_mem(struct pci_dev *pdev,
struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats);
void bnxt_qplib_free_stat_mem(struct bnxt_qplib_res *res,
struct bnxt_qplib_stats *stats);
int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
int bnxt_qplib_enable_atomic_ops_to_root(struct pci_dev *dev);
u8 _get_chip_gen_p5_type(struct bnxt_qplib_chip_ctx *cctx);
static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
u32 indx, u64 *pg)
{
u32 pg_num, pg_idx;
pg_num = (indx / hwq->qe_ppg);
pg_idx = (indx % hwq->qe_ppg);
if (pg)
*pg = (u64)&hwq->pbl_ptr[pg_num];
return (void *)((u8 *)hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
}
static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
struct bnxt_qplib_hwq *hwq, u32 cnt)
{
/* move prod and update toggle/epoch if wrap around */
hwq->prod += cnt;
if (hwq->prod >= hwq->depth) {
hwq->prod %= hwq->depth;
dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT;
}
}
static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons,
u32 cnt, u32 *dbinfo_flags)
{
/* move cons and update toggle/epoch if wrap around */
*cons += cnt;
if (*cons >= max_elements) {
*cons %= max_elements;
*dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT;
}
}
static inline u8 _get_pte_pg_size(struct bnxt_qplib_hwq *hwq)
{
u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
struct bnxt_qplib_pbl *pbl;
pbl = &hwq->pbl[hwq->level];
switch (pbl->pg_size) {
case ROCE_PG_SIZE_4K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
break;
case ROCE_PG_SIZE_8K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
break;
case ROCE_PG_SIZE_64K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
break;
case ROCE_PG_SIZE_2M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
break;
case ROCE_PG_SIZE_8M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
break;
case ROCE_PG_SIZE_1G: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
break;
default:
break;
}
return pg_size;
}
static inline u64 _get_base_addr(struct bnxt_qplib_hwq *hwq)
{
return hwq->pbl[PBL_LVL_0].pg_map_arr[0];
}
static inline u8 _get_base_pg_size(struct bnxt_qplib_hwq *hwq)
{
u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
struct bnxt_qplib_pbl *pbl;
pbl = &hwq->pbl[PBL_LVL_0];
switch (pbl->pg_size) {
case ROCE_PG_SIZE_4K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
break;
case ROCE_PG_SIZE_8K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
break;
case ROCE_PG_SIZE_64K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
break;
case ROCE_PG_SIZE_2M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
break;
case ROCE_PG_SIZE_8M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
break;
case ROCE_PG_SIZE_1G: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
break;
default:
break;
}
return pg_size;
}
static inline enum bnxt_qplib_hwq_type _get_hwq_type(struct bnxt_qplib_res *res)
{
return _is_chip_gen_p5_p7(res->cctx) ? HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
}
static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
u16 flags, bool virtfn)
{
return (_is_ext_stats_supported(flags) &&
((virtfn && _is_chip_p7(ctx)) || (!virtfn)));
}
static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
(CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
}
/* Disable HW_RETX */
#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
static inline bool _is_cqe_v2_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
CREQ_QUERY_FUNC_RESP_SB_CQE_V2;
}
#define BNXT_DB_FIFO_ROOM_MASK 0x1fff8000
#define BNXT_DB_FIFO_ROOM_SHIFT 15
#define BNXT_MAX_FIFO_DEPTH 0x2c00
#define BNXT_DB_PACING_ALGO_THRESHOLD 250
#define BNXT_DEFAULT_PACING_PROBABILITY 0xFFFF
#define BNXT_DBR_PACING_WIN_BASE 0x2000
#define BNXT_DBR_PACING_WIN_MAP_OFF 4
#define BNXT_DBR_PACING_WIN_OFF(reg) (BNXT_DBR_PACING_WIN_BASE + \
static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
bool arm)
{
u32 key = 0;
key = info->hwq->cons | (CMPL_DOORBELL_IDX_VALID |
(CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
if (!arm)
key |= CMPL_DOORBELL_MASK;
/* memory barrier */
wmb();
writel(key, info->db);
}
#define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle) \
(((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
(type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) | \
((toggle) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT)))
static inline void bnxt_qplib_write_db(struct bnxt_qplib_db_info *info,
u64 key, void __iomem *db,
u64 *shadow_key)
{
unsigned long flags;
spin_lock_irqsave(&info->lock, flags);
*shadow_key = key;
writeq(key, db);
spin_unlock_irqrestore(&info->lock, flags);
}
static inline void __replay_writeq(u64 key, void __iomem *db)
{
/* No need to replay uninitialised shadow_keys */
if (key != BNXT_QPLIB_DBR_KEY_INVALID)
writeq(key, db);
}
static inline void bnxt_qplib_replay_db(struct bnxt_qplib_db_info *info,
bool is_arm_ena)
{
if (!spin_trylock_irq(&info->lock))
return;
if (is_arm_ena)
__replay_writeq(info->shadow_key_arm_ena, info->priv_db);
else
__replay_writeq(info->shadow_key, info->db);
spin_unlock_irq(&info->lock);
}
static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
u32 type)
{
u64 key = 0;
u32 indx;
u8 toggle = 0;
if (type == DBC_DBC_TYPE_CQ_ARMALL ||
type == DBC_DBC_TYPE_CQ_ARMSE)
toggle = info->toggle;
indx = ((info->hwq->cons & DBC_DBC_INDEX_MASK) |
((info->flags & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK) <<
BNXT_QPLIB_DB_EPOCH_CONS_SHIFT));
key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, toggle);
bnxt_qplib_write_db(info, key, info->db, &info->shadow_key);
}
static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info,
u32 type)
{
u64 key = 0;
u32 indx;
indx = (((info->hwq->prod / info->max_slot) & DBC_DBC_INDEX_MASK) |
((info->flags & BNXT_QPLIB_FLAG_EPOCH_PROD_MASK) <<
BNXT_QPLIB_DB_EPOCH_PROD_SHIFT));
key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, 0);
bnxt_qplib_write_db(info, key, info->db, &info->shadow_key);
}
static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
u32 type)
{
u64 key = 0;
u8 toggle = 0;
if (type == DBC_DBC_TYPE_CQ_ARMENA)
toggle = info->toggle;
/* Index always at 0 */
key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle);
bnxt_qplib_write_db(info, key, info->priv_db,
&info->shadow_key_arm_ena);
}
static inline void bnxt_qplib_cq_coffack_db(struct bnxt_qplib_db_info *info)
{
u64 key = 0;
/* Index always at 0 */
key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_CQ_CUTOFF_ACK, 0, 0);
bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key);
}
static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info)
{
u64 key = 0;
/* Index always at 0 */
key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, 0, 0);
bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key);
}
static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
struct bnxt_qplib_chip_ctx *cctx,
bool arm)
{
u32 type;
type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
if (_is_chip_gen_p5_p7(cctx))
bnxt_qplib_ring_db(info, type);
else
bnxt_qplib_ring_db32(info, arm);
}
struct bnxt_qplib_max_res {
u32 max_qp;
u32 max_mr;
u32 max_cq;
u32 max_srq;
u32 max_ah;
u32 max_pd;
};
/*
* Defines for maximum resources supported for chip revisions
* Maximum PDs supported are restricted to Max QPs
* GENP4 - Wh+
* DEFAULT - Thor
*/
#define BNXT_QPLIB_GENP4_PF_MAX_QP (16 * 1024)
#define BNXT_QPLIB_GENP4_PF_MAX_MRW (16 * 1024)
#define BNXT_QPLIB_GENP4_PF_MAX_CQ (16 * 1024)
#define BNXT_QPLIB_GENP4_PF_MAX_SRQ (1 * 1024)
#define BNXT_QPLIB_GENP4_PF_MAX_AH (16 * 1024)
#define BNXT_QPLIB_GENP4_PF_MAX_PD BNXT_QPLIB_GENP4_PF_MAX_QP
#define BNXT_QPLIB_DEFAULT_PF_MAX_QP (64 * 1024)
#define BNXT_QPLIB_DEFAULT_PF_MAX_MRW (256 * 1024)
#define BNXT_QPLIB_DEFAULT_PF_MAX_CQ (64 * 1024)
#define BNXT_QPLIB_DEFAULT_PF_MAX_SRQ (4 * 1024)
#define BNXT_QPLIB_DEFAULT_PF_MAX_AH (64 * 1024)
#define BNXT_QPLIB_DEFAULT_PF_MAX_PD BNXT_QPLIB_DEFAULT_PF_MAX_QP
#define BNXT_QPLIB_DEFAULT_VF_MAX_QP (6 * 1024)
#define BNXT_QPLIB_DEFAULT_VF_MAX_MRW (6 * 1024)
#define BNXT_QPLIB_DEFAULT_VF_MAX_CQ (6 * 1024)
#define BNXT_QPLIB_DEFAULT_VF_MAX_SRQ (4 * 1024)
#define BNXT_QPLIB_DEFAULT_VF_MAX_AH (6 * 1024)
#define BNXT_QPLIB_DEFAULT_VF_MAX_PD BNXT_QPLIB_DEFAULT_VF_MAX_QP
static inline void bnxt_qplib_max_res_supported(struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_res *qpl_res,
struct bnxt_qplib_max_res *max_res,
bool vf_res_limit)
{
switch (cctx->chip_num) {
case CHIP_NUM_57608:
case CHIP_NUM_58818:
case CHIP_NUM_57504:
case CHIP_NUM_57502:
case CHIP_NUM_57508:
if (!qpl_res->is_vf) {
max_res->max_qp = BNXT_QPLIB_DEFAULT_PF_MAX_QP;
max_res->max_mr = BNXT_QPLIB_DEFAULT_PF_MAX_MRW;
max_res->max_cq = BNXT_QPLIB_DEFAULT_PF_MAX_CQ;
max_res->max_srq = BNXT_QPLIB_DEFAULT_PF_MAX_SRQ;
max_res->max_ah = BNXT_QPLIB_DEFAULT_PF_MAX_AH;
max_res->max_pd = BNXT_QPLIB_DEFAULT_PF_MAX_PD;
} else {
max_res->max_qp = BNXT_QPLIB_DEFAULT_VF_MAX_QP;
max_res->max_mr = BNXT_QPLIB_DEFAULT_VF_MAX_MRW;
max_res->max_cq = BNXT_QPLIB_DEFAULT_VF_MAX_CQ;
max_res->max_srq = BNXT_QPLIB_DEFAULT_VF_MAX_SRQ;
max_res->max_ah = BNXT_QPLIB_DEFAULT_VF_MAX_AH;
max_res->max_pd = BNXT_QPLIB_DEFAULT_VF_MAX_PD;
}
break;
default:
/* Wh+/Stratus max resources */
max_res->max_qp = BNXT_QPLIB_GENP4_PF_MAX_QP;
max_res->max_mr = BNXT_QPLIB_GENP4_PF_MAX_MRW;
max_res->max_cq = BNXT_QPLIB_GENP4_PF_MAX_CQ;
max_res->max_srq = BNXT_QPLIB_GENP4_PF_MAX_SRQ;
max_res->max_ah = BNXT_QPLIB_GENP4_PF_MAX_AH;
max_res->max_pd = BNXT_QPLIB_GENP4_PF_MAX_PD;
break;
}
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,432 @@
/*
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Slow Path Operators (header)
*/
#ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__
#include <rdma/ib_verbs.h>
#define BNXT_QPLIB_RESERVED_QP_WRS 128
/* Resource maximums reported by the firmware */
struct bnxt_qplib_dev_attr {
#define FW_VER_ARR_LEN 4
u8 fw_ver[FW_VER_ARR_LEN];
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
#define BNXT_QPLIB_MAX_OUT_RD_ATOM 126
u32 max_qp_rd_atom;
u32 max_qp_init_rd_atom;
u32 max_qp_wqes;
u32 max_qp_sges;
u32 max_cq;
/* HW supports only 8K entries in PBL.
* So max CQEs that can be supported per CQ is 1M.
*/
#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
u32 max_cq_wqes;
u32 max_cq_sges;
u32 max_mr;
u64 max_mr_size;
#define BNXT_QPLIB_MAX_PD (64 * 1024)
u32 max_pd;
u32 max_mw;
u32 max_raw_ethy_qp;
u32 max_ah;
u32 max_fmr;
u32 max_map_per_fmr;
u32 max_srq;
u32 max_srq_wqes;
u32 max_srq_sges;
u32 max_pkey;
u32 max_inline_data;
u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
u8 is_atomic;
u16 dev_cap_flags;
u64 page_size_cap;
u32 max_dpi;
};
struct bnxt_qplib_pd {
u32 id;
};
struct bnxt_qplib_gid {
u8 data[16];
};
struct bnxt_qplib_gid_info {
struct bnxt_qplib_gid gid;
u16 vlan_id;
};
struct bnxt_qplib_ah {
struct bnxt_qplib_gid dgid;
struct bnxt_qplib_pd *pd;
u32 id;
u8 sgid_index;
u8 host_sgid_index; /* For Query AH if the hw table and SW table are differnt */
u8 traffic_class;
u32 flow_label;
u8 hop_limit;
u8 sl;
u8 dmac[6];
u16 vlan_id;
u8 nw_type;
u8 enable_cc;
};
struct bnxt_qplib_mrw {
struct bnxt_qplib_pd *pd;
int type;
u32 flags;
#define BNXT_QPLIB_FR_PMR 0x80000000
u32 lkey;
u32 rkey;
#define BNXT_QPLIB_RSVD_LKEY 0xFFFFFFFF
u64 va;
u64 total_size;
u32 npages;
u64 mr_handle;
struct bnxt_qplib_hwq hwq;
};
struct bnxt_qplib_mrinfo {
struct bnxt_qplib_mrw *mrw;
struct bnxt_qplib_sg_info sg;
u64 *ptes;
bool is_dma;
};
struct bnxt_qplib_frpl {
int max_pg_ptrs;
struct bnxt_qplib_hwq hwq;
};
struct bnxt_qplib_cc_param_ext {
u64 ext_mask;
u16 inact_th_hi;
u16 min_delta_cnp;
u16 init_cp;
u8 tr_update_mode;
u8 tr_update_cyls;
u8 fr_rtt;
u8 ai_rate_incr;
u16 rr_rtt_th;
u16 ar_cr_th;
u16 cr_min_th;
u8 bw_avg_weight;
u8 cr_factor;
u16 cr_th_max_cp;
u8 cp_bias_en;
u8 cp_bias;
u8 cnp_ecn;
u8 rtt_jitter_en;
u16 bytes_per_usec;
u16 cc_cr_reset_th;
u8 cr_width;
u8 min_quota;
u8 max_quota;
u8 abs_max_quota;
u16 tr_lb;
u8 cr_prob_fac;
u8 tr_prob_fac;
u16 fair_cr_th;
u8 red_div;
u8 cnp_ratio_th;
u16 ai_ext_rtt;
u8 exp_crcp_ratio;
u8 low_rate_en;
u16 cpcr_update_th;
u16 ai_rtt_th1;
u16 ai_rtt_th2;
u16 cf_rtt_th;
u16 sc_cr_th1; /* severe congestion cr threshold 1 */
u16 sc_cr_th2; /* severe congestion cr threshold 2 */
u32 l64B_per_rtt;
u8 cc_ack_bytes;
u16 reduce_cf_rtt_th;
};
struct bnxt_qplib_cc_param {
u8 alt_vlan_pcp;
u16 alt_tos_dscp;
#define BNXT_QPLIB_USER_DSCP_VALID 0x80
u8 cnp_dscp_user;
u8 roce_dscp_user;
u8 cc_mode;
u8 enable;
u16 inact_th;
u16 init_cr;
u16 init_tr;
u16 rtt;
u8 g;
u8 nph_per_state;
u8 time_pph;
u8 pkts_pph;
u8 tos_ecn;
u8 tos_dscp;
u8 qp1_tos_dscp;
u16 tcp_cp;
struct bnxt_qplib_cc_param_ext cc_ext;
u8 disable_prio_vlan_tx;
/* Mask used while programming the configfs values */
u32 mask;
/* Mask used while displaying the configfs values */
u32 cur_mask;
u8 roce_pri;
#define BNXT_QPLIB_CC_PARAM_MASK_VLAN_TX_DISABLE 0x40000
#define BNXT_QPLIB_CC_PARAM_MASK_ROCE_PRI 0x80000
/* prev value to clear dscp table */
u8 prev_roce_pri;
u8 prev_alt_vlan_pcp;
u8 prev_tos_dscp;
u16 prev_alt_tos_dscp;
/* To track if admin has enabled ECN explicitly */
u8 admin_enable;
};
struct bnxt_qplib_roce_stats {
u64 to_retransmits;
u64 seq_err_naks_rcvd;
/* seq_err_naks_rcvd is 64 b */
u64 max_retry_exceeded;
/* max_retry_exceeded is 64 b */
u64 rnr_naks_rcvd;
/* rnr_naks_rcvd is 64 b */
u64 missing_resp;
u64 unrecoverable_err;
/* unrecoverable_err is 64 b */
u64 bad_resp_err;
/* bad_resp_err is 64 b */
u64 local_qp_op_err;
/* local_qp_op_err is 64 b */
u64 local_protection_err;
/* local_protection_err is 64 b */
u64 mem_mgmt_op_err;
/* mem_mgmt_op_err is 64 b */
u64 remote_invalid_req_err;
/* remote_invalid_req_err is 64 b */
u64 remote_access_err;
/* remote_access_err is 64 b */
u64 remote_op_err;
/* remote_op_err is 64 b */
u64 dup_req;
/* dup_req is 64 b */
u64 res_exceed_max;
/* res_exceed_max is 64 b */
u64 res_length_mismatch;
/* res_length_mismatch is 64 b */
u64 res_exceeds_wqe;
/* res_exceeds_wqe is 64 b */
u64 res_opcode_err;
/* res_opcode_err is 64 b */
u64 res_rx_invalid_rkey;
/* res_rx_invalid_rkey is 64 b */
u64 res_rx_domain_err;
/* res_rx_domain_err is 64 b */
u64 res_rx_no_perm;
/* res_rx_no_perm is 64 b */
u64 res_rx_range_err;
/* res_rx_range_err is 64 b */
u64 res_tx_invalid_rkey;
/* res_tx_invalid_rkey is 64 b */
u64 res_tx_domain_err;
/* res_tx_domain_err is 64 b */
u64 res_tx_no_perm;
/* res_tx_no_perm is 64 b */
u64 res_tx_range_err;
/* res_tx_range_err is 64 b */
u64 res_irrq_oflow;
/* res_irrq_oflow is 64 b */
u64 res_unsup_opcode;
/* res_unsup_opcode is 64 b */
u64 res_unaligned_atomic;
/* res_unaligned_atomic is 64 b */
u64 res_rem_inv_err;
/* res_rem_inv_err is 64 b */
u64 res_mem_error;
/* res_mem_error is 64 b */
u64 res_srq_err;
/* res_srq_err is 64 b */
u64 res_cmp_err;
/* res_cmp_err is 64 b */
u64 res_invalid_dup_rkey;
/* res_invalid_dup_rkey is 64 b */
u64 res_wqe_format_err;
/* res_wqe_format_err is 64 b */
u64 res_cq_load_err;
/* res_cq_load_err is 64 b */
u64 res_srq_load_err;
/* res_srq_load_err is 64 b */
u64 res_tx_pci_err;
/* res_tx_pci_err is 64 b */
u64 res_rx_pci_err;
/* res_rx_pci_err is 64 b */
u64 res_oos_drop_count;
/* res_oos_drop_count */
u64 active_qp_count_p0;
/* port 0 active qps */
u64 active_qp_count_p1;
/* port 1 active qps */
u64 active_qp_count_p2;
/* port 2 active qps */
u64 active_qp_count_p3;
/* port 3 active qps */
};
struct bnxt_qplib_ext_stat {
u64 tx_atomic_req;
u64 tx_read_req;
u64 tx_read_res;
u64 tx_write_req;
u64 tx_send_req;
u64 tx_roce_pkts;
u64 tx_roce_bytes;
u64 rx_atomic_req;
u64 rx_read_req;
u64 rx_read_res;
u64 rx_write_req;
u64 rx_send_req;
u64 rx_roce_pkts;
u64 rx_roce_bytes;
u64 rx_roce_good_pkts;
u64 rx_roce_good_bytes;
u64 rx_out_of_buffer;
u64 rx_out_of_sequence;
u64 tx_cnp;
u64 rx_cnp;
u64 rx_ecn_marked;
u64 seq_err_naks_rcvd;
u64 rnr_naks_rcvd;
u64 missing_resp;
u64 to_retransmits;
u64 dup_req;
u64 rx_dcn_payload_cut;
u64 te_bypassed;
};
#define BNXT_QPLIB_ACCESS_LOCAL_WRITE (1 << 0)
#define BNXT_QPLIB_ACCESS_REMOTE_READ (1 << 1)
#define BNXT_QPLIB_ACCESS_REMOTE_WRITE (1 << 2)
#define BNXT_QPLIB_ACCESS_REMOTE_ATOMIC (1 << 3)
#define BNXT_QPLIB_ACCESS_MW_BIND (1 << 4)
#define BNXT_QPLIB_ACCESS_ZERO_BASED (1 << 5)
#define BNXT_QPLIB_ACCESS_ON_DEMAND (1 << 6)
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
const union ib_gid *gid, const u8 *mac, u16 vlan_id,
bool update, u32 *index);
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res);
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block);
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block);
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw);
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrinfo *mrinfo, bool block);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl, int max);
void bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl);
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
int bnxt_qplib_set_link_aggr_mode(struct bnxt_qplib_res *res,
u8 aggr_mode, u8 member_port_map,
u8 active_port_map, bool aggr_en,
u32 stats_fw_id);
int bnxt_qplib_get_roce_error_stats(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_roce_stats *stats,
struct bnxt_qplib_query_stats_info *sinfo);
int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
struct bnxt_qplib_ext_stat *estat,
struct bnxt_qplib_query_stats_info *sinfo);
static inline void bnxt_re_set_max_gid(u16 *max_sgid);
bool ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state, enum ib_qp_state next_state,
enum ib_qp_type type, enum ib_qp_attr_mask mask);
#define BNXT_MAX_SQ_SIZE 0xFFFF
#define BNXT_MAX_VAR_WQE_SIZE 512
#define BNXT_SGE_SIZE 16
/* PF defines */
#define BNXT_RE_MAX_QP_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (64 * 1024) : 0
#define BNXT_RE_MAX_MRW_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (256 * 1024) : 0
#define BNXT_RE_MAX_CQ_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (64 * 1024) : 0
#define BNXT_RE_MAX_SRQ_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (4 * 1024) : 0
#define BNXT_RE_MAX_AH_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (64 * 1024) : 0
/* VF defines */
#define BNXT_RE_VF_MAX_QP_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (6 * 1024) : 0
#define BNXT_RE_VF_MAX_MRW_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (6 * 1024) : 0
#define BNXT_RE_VF_MAX_CQ_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (6 * 1024) : 0
#define BNXT_RE_VF_MAX_SRQ_SUPPORTED(chip_gen) \
chip_gen == BNXT_RE_DEFAULT ? (4 * 1024) : 0
static inline void bnxt_re_set_max_gid(u16 *max_sgid)
{
*max_sgid = max_t(u32, 256, *max_sgid);
*max_sgid = min_t(u32, 256, *max_sgid);
}
#endif

View file

@ -0,0 +1,187 @@
/*
* Copyright (c) 2017 - 2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __QPLIB_TLV_H__
#define __QPLIB_TLV_H__
struct roce_tlv {
struct tlv tlv;
u8 total_size;
u8 unused[7];
};
#define CHUNK_SIZE 16
#define CHUNKS(x) (((x) + CHUNK_SIZE - 1) / CHUNK_SIZE)
#define ROCE_1ST_TLV_PREP(rtlv, tot_chunks, content_bytes, more) \
do { \
(rtlv)->tlv.cmd_discr = CMD_DISCR_TLV_ENCAP; \
(rtlv)->tlv.tlv_type = TLV_TYPE_ROCE_SP_COMMAND; \
(rtlv)->tlv.length = (content_bytes); \
(rtlv)->tlv.flags = TLV_FLAGS_REQUIRED; \
(rtlv)->tlv.flags |= (more) ? TLV_FLAGS_MORE : 0; \
(rtlv)->total_size = (tot_chunks); \
} while (0)
#define ROCE_EXT_TLV_PREP(rtlv, ext_type, content_bytes, more, reqd) \
do { \
(rtlv)->tlv.cmd_discr = CMD_DISCR_TLV_ENCAP; \
(rtlv)->tlv.tlv_type = (ext_type); \
(rtlv)->tlv.length = (content_bytes); \
(rtlv)->tlv.flags |= (more) ? TLV_FLAGS_MORE : 0; \
(rtlv)->tlv.flags |= (reqd) ? TLV_FLAGS_REQUIRED : 0; \
} while (0)
/*
* TLV size in units of 16 byte chunks
*/
#define TLV_SIZE ((sizeof(struct roce_tlv) + 15) / 16)
/*
* TLV length in bytes
*/
#define TLV_BYTES (TLV_SIZE * 16)
#define HAS_TLV_HEADER(msg) (((struct tlv *)(msg))->cmd_discr == CMD_DISCR_TLV_ENCAP)
#define GET_TLV_DATA(tlv) ((void *)&((uint8_t *)(tlv))[TLV_BYTES])
static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode;
else
return req->opcode;
}
static inline void __set_cmdq_base_opcode(struct cmdq_base *req,
u32 size, u8 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val;
else
req->opcode = val;
}
static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->cookie;
else
return req->cookie;
}
static inline void __set_cmdq_base_cookie(struct cmdq_base *req,
u32 size, __le16 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->cookie = val;
else
req->cookie = val;
}
static inline __le64 __get_cmdq_base_resp_addr(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr;
else
return req->resp_addr;
}
static inline void __set_cmdq_base_resp_addr(struct cmdq_base *req,
u32 size, __le64 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr = val;
else
req->resp_addr = val;
}
static inline u8 __get_cmdq_base_resp_size(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size;
else
return req->resp_size;
}
static inline void __set_cmdq_base_resp_size(struct cmdq_base *req,
u32 size, u8 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->resp_size = val;
else
req->resp_size = val;
}
static inline u8 __get_cmdq_base_cmd_size(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct roce_tlv *)(req))->total_size;
else
return req->cmd_size;
}
static inline void __set_cmdq_base_cmd_size(struct cmdq_base *req,
u32 size, u8 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->cmd_size = val;
else
req->cmd_size = val;
}
static inline __le16 __get_cmdq_base_flags(struct cmdq_base *req, u32 size)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
return ((struct cmdq_base *)GET_TLV_DATA(req))->flags;
else
return req->flags;
}
static inline void __set_cmdq_base_flags(struct cmdq_base *req,
u32 size, __le16 val)
{
if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
((struct cmdq_base *)GET_TLV_DATA(req))->flags = val;
else
req->flags = val;
}
struct bnxt_qplib_tlv_modify_cc_req {
struct roce_tlv tlv_hdr;
struct cmdq_modify_roce_cc base_req;
__le64 tlvpad;
struct cmdq_modify_roce_cc_gen1_tlv ext_req;
};
struct bnxt_qplib_tlv_query_rcc_sb {
struct roce_tlv tlv_hdr;
struct creq_query_roce_cc_resp_sb base_sb;
struct creq_query_roce_cc_gen1_resp_sb_tlv gen1_sb;
};
#endif

View file

@ -0,0 +1,773 @@
/*
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: statistics related functions
*/
#include "bnxt_re.h"
#include "bnxt.h"
int bnxt_re_get_flow_stats_from_service_pf(struct bnxt_re_dev *rdev,
struct bnxt_re_flow_counters *stats,
struct bnxt_qplib_query_stats_info *sinfo)
{
struct hwrm_cfa_flow_stats_output resp = {};
struct hwrm_cfa_flow_stats_input req = {};
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct bnxt_fw_msg fw_msg = {};
u16 target_id;
int rc = 0;
if (sinfo->function_id == 0xFFFFFFFF)
target_id = -1;
else
target_id = sinfo->function_id + 1;
/* Issue HWRM cmd to read flow counters for CNP tx and rx */
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_CFA_FLOW_STATS, -1, target_id);
req.num_flows = cpu_to_le16(6);
req.flow_handle_0 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT);
req.flow_handle_1 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT |
HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
req.flow_handle_2 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT);
req.flow_handle_3 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT |
HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
req.flow_handle_4 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT);
req.flow_handle_5 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT |
HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
bnxt_re_fill_fw_msg(&fw_msg, &req, sizeof(req), &resp,
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to get CFA Flow stats : rc = 0x%x\n", rc);
return rc;
}
stats->cnp_stats.cnp_tx_pkts = le64_to_cpu(resp.packet_0);
stats->cnp_stats.cnp_tx_bytes = le64_to_cpu(resp.byte_0);
stats->cnp_stats.cnp_rx_pkts = le64_to_cpu(resp.packet_1);
stats->cnp_stats.cnp_rx_bytes = le64_to_cpu(resp.byte_1);
stats->ro_stats.tx_pkts = le64_to_cpu(resp.packet_2) +
le64_to_cpu(resp.packet_4);
stats->ro_stats.tx_bytes = le64_to_cpu(resp.byte_2) +
le64_to_cpu(resp.byte_4);
stats->ro_stats.rx_pkts = le64_to_cpu(resp.packet_3) +
le64_to_cpu(resp.packet_5);
stats->ro_stats.rx_bytes = le64_to_cpu(resp.byte_3) +
le64_to_cpu(resp.byte_5);
return 0;
}
int bnxt_re_get_qos_stats(struct bnxt_re_dev *rdev)
{
struct bnxt_re_ro_counters roce_only_tmp[2] = {{}, {}};
struct bnxt_re_cnp_counters tmp_counters[2] = {{}, {}};
struct hwrm_cfa_flow_stats_output resp = {};
struct hwrm_cfa_flow_stats_input req = {};
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct bnxt_fw_msg fw_msg = {};
struct bnxt_re_cc_stat *cnps;
struct bnxt_re_rstat *dstat;
int rc = 0;
u64 bytes;
u64 pkts;
/* Issue HWRM cmd to read flow counters for CNP tx and rx */
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_CFA_FLOW_STATS, -1, -1);
req.num_flows = cpu_to_le16(6);
req.flow_handle_0 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT);
req.flow_handle_1 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT |
HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
req.flow_handle_2 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT);
req.flow_handle_3 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT |
HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
req.flow_handle_4 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT);
req.flow_handle_5 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT |
HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to get CFA Flow stats : rc = 0x%x\n", rc);
goto done;
}
tmp_counters[0].cnp_tx_pkts = le64_to_cpu(resp.packet_0);
tmp_counters[0].cnp_tx_bytes = le64_to_cpu(resp.byte_0);
tmp_counters[0].cnp_rx_pkts = le64_to_cpu(resp.packet_1);
tmp_counters[0].cnp_rx_bytes = le64_to_cpu(resp.byte_1);
roce_only_tmp[0].tx_pkts = le64_to_cpu(resp.packet_2) +
le64_to_cpu(resp.packet_4);
roce_only_tmp[0].tx_bytes = le64_to_cpu(resp.byte_2) +
le64_to_cpu(resp.byte_4);
roce_only_tmp[0].rx_pkts = le64_to_cpu(resp.packet_3) +
le64_to_cpu(resp.packet_5);
roce_only_tmp[0].rx_bytes = le64_to_cpu(resp.byte_3) +
le64_to_cpu(resp.byte_5);
cnps = &rdev->stats.cnps;
dstat = &rdev->stats.dstat;
if (!cnps->is_first) {
/* First query done.. */
cnps->is_first = true;
cnps->prev[0].cnp_tx_pkts = tmp_counters[0].cnp_tx_pkts;
cnps->prev[0].cnp_tx_bytes = tmp_counters[0].cnp_tx_bytes;
cnps->prev[0].cnp_rx_pkts = tmp_counters[0].cnp_rx_pkts;
cnps->prev[0].cnp_rx_bytes = tmp_counters[0].cnp_rx_bytes;
cnps->prev[1].cnp_tx_pkts = tmp_counters[1].cnp_tx_pkts;
cnps->prev[1].cnp_tx_bytes = tmp_counters[1].cnp_tx_bytes;
cnps->prev[1].cnp_rx_pkts = tmp_counters[1].cnp_rx_pkts;
cnps->prev[1].cnp_rx_bytes = tmp_counters[1].cnp_rx_bytes;
dstat->prev[0].tx_pkts = roce_only_tmp[0].tx_pkts;
dstat->prev[0].tx_bytes = roce_only_tmp[0].tx_bytes;
dstat->prev[0].rx_pkts = roce_only_tmp[0].rx_pkts;
dstat->prev[0].rx_bytes = roce_only_tmp[0].rx_bytes;
dstat->prev[1].tx_pkts = roce_only_tmp[1].tx_pkts;
dstat->prev[1].tx_bytes = roce_only_tmp[1].tx_bytes;
dstat->prev[1].rx_pkts = roce_only_tmp[1].rx_pkts;
dstat->prev[1].rx_bytes = roce_only_tmp[1].rx_bytes;
} else {
u64 byte_mask, pkts_mask;
u64 diff;
byte_mask = bnxt_re_get_cfa_stat_mask(rdev->chip_ctx,
BYTE_MASK);
pkts_mask = bnxt_re_get_cfa_stat_mask(rdev->chip_ctx,
PKTS_MASK);
/*
* Calculate the number of cnp packets and use
* the value to calculate the CRC bytes.
* Multply pkts with 4 and add it to total bytes
*/
pkts = bnxt_re_stat_diff(tmp_counters[0].cnp_tx_pkts,
&cnps->prev[0].cnp_tx_pkts,
pkts_mask);
cnps->cur[0].cnp_tx_pkts += pkts;
diff = bnxt_re_stat_diff(tmp_counters[0].cnp_tx_bytes,
&cnps->prev[0].cnp_tx_bytes,
byte_mask);
bytes = diff + pkts * 4;
cnps->cur[0].cnp_tx_bytes += bytes;
pkts = bnxt_re_stat_diff(tmp_counters[0].cnp_rx_pkts,
&cnps->prev[0].cnp_rx_pkts,
pkts_mask);
cnps->cur[0].cnp_rx_pkts += pkts;
bytes = bnxt_re_stat_diff(tmp_counters[0].cnp_rx_bytes,
&cnps->prev[0].cnp_rx_bytes,
byte_mask);
cnps->cur[0].cnp_rx_bytes += bytes;
/*
* Calculate the number of cnp packets and use
* the value to calculate the CRC bytes.
* Multply pkts with 4 and add it to total bytes
*/
pkts = bnxt_re_stat_diff(tmp_counters[1].cnp_tx_pkts,
&cnps->prev[1].cnp_tx_pkts,
pkts_mask);
cnps->cur[1].cnp_tx_pkts += pkts;
diff = bnxt_re_stat_diff(tmp_counters[1].cnp_tx_bytes,
&cnps->prev[1].cnp_tx_bytes,
byte_mask);
cnps->cur[1].cnp_tx_bytes += diff + pkts * 4;
pkts = bnxt_re_stat_diff(tmp_counters[1].cnp_rx_pkts,
&cnps->prev[1].cnp_rx_pkts,
pkts_mask);
cnps->cur[1].cnp_rx_pkts += pkts;
bytes = bnxt_re_stat_diff(tmp_counters[1].cnp_rx_bytes,
&cnps->prev[1].cnp_rx_bytes,
byte_mask);
cnps->cur[1].cnp_rx_bytes += bytes;
pkts = bnxt_re_stat_diff(roce_only_tmp[0].tx_pkts,
&dstat->prev[0].tx_pkts,
pkts_mask);
dstat->cur[0].tx_pkts += pkts;
diff = bnxt_re_stat_diff(roce_only_tmp[0].tx_bytes,
&dstat->prev[0].tx_bytes,
byte_mask);
dstat->cur[0].tx_bytes += diff + pkts * 4;
pkts = bnxt_re_stat_diff(roce_only_tmp[0].rx_pkts,
&dstat->prev[0].rx_pkts,
pkts_mask);
dstat->cur[0].rx_pkts += pkts;
bytes = bnxt_re_stat_diff(roce_only_tmp[0].rx_bytes,
&dstat->prev[0].rx_bytes,
byte_mask);
dstat->cur[0].rx_bytes += bytes;
pkts = bnxt_re_stat_diff(roce_only_tmp[1].tx_pkts,
&dstat->prev[1].tx_pkts,
pkts_mask);
dstat->cur[1].tx_pkts += pkts;
diff = bnxt_re_stat_diff(roce_only_tmp[1].tx_bytes,
&dstat->prev[1].tx_bytes,
byte_mask);
dstat->cur[1].tx_bytes += diff + pkts * 4;
pkts = bnxt_re_stat_diff(roce_only_tmp[1].rx_pkts,
&dstat->prev[1].rx_pkts,
pkts_mask);
dstat->cur[1].rx_pkts += pkts;
bytes = bnxt_re_stat_diff(roce_only_tmp[1].rx_bytes,
&dstat->prev[1].rx_bytes,
byte_mask);
dstat->cur[1].rx_bytes += bytes;
}
done:
return rc;
}
static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
u8 indx, struct bnxt_qplib_ext_stat *s)
{
struct bnxt_re_ext_roce_stats *e_errs;
struct bnxt_re_cnp_counters *cnp;
struct bnxt_re_ext_rstat *ext_d;
struct bnxt_re_ro_counters *ro;
cnp = &rdev->stats.cnps.cur[indx];
ro = &rdev->stats.dstat.cur[indx];
ext_d = &rdev->stats.dstat.ext_rstat[indx];
e_errs = &rdev->stats.dstat.e_errs;
cnp->cnp_tx_pkts = s->tx_cnp;
cnp->cnp_rx_pkts = s->rx_cnp;
/* In bonding mode do not duplicate other stats */
if (indx)
return;
cnp->ecn_marked = s->rx_ecn_marked;
ro->tx_pkts = s->tx_roce_pkts;
ro->tx_bytes = s->tx_roce_bytes;
ro->rx_pkts = s->rx_roce_pkts;
ro->rx_bytes = s->rx_roce_bytes;
ext_d->tx.atomic_req = s->tx_atomic_req;
ext_d->tx.read_req = s->tx_read_req;
ext_d->tx.read_resp = s->tx_read_res;
ext_d->tx.write_req = s->tx_write_req;
ext_d->tx.send_req = s->tx_send_req;
ext_d->rx.atomic_req = s->rx_atomic_req;
ext_d->rx.read_req = s->rx_read_req;
ext_d->rx.read_resp = s->rx_read_res;
ext_d->rx.write_req = s->rx_write_req;
ext_d->rx.send_req = s->rx_send_req;
ext_d->grx.rx_pkts = s->rx_roce_good_pkts;
ext_d->grx.rx_bytes = s->rx_roce_good_bytes;
ext_d->rx_dcn_payload_cut = s->rx_dcn_payload_cut;
ext_d->te_bypassed = s->te_bypassed;
e_errs->oob = s->rx_out_of_buffer;
e_errs->oos = s->rx_out_of_sequence;
e_errs->seq_err_naks_rcvd = s->seq_err_naks_rcvd;
e_errs->rnr_naks_rcvd = s->rnr_naks_rcvd;
e_errs->missing_resp = s->missing_resp;
e_errs->to_retransmits = s->to_retransmits;
e_errs->dup_req = s->dup_req;
}
static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_ext_stat estat[2] = {{}, {}};
struct bnxt_qplib_query_stats_info sinfo;
u32 fid;
int rc;
fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
/* Set default values for sinfo */
sinfo.function_id = 0xFFFFFFFF;
sinfo.collection_id = 0xFF;
sinfo.vf_valid = false;
rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, &estat[0], &sinfo);
if (rc)
goto done;
bnxt_re_copy_ext_stats(rdev, 0, &estat[0]);
done:
return rc;
}
static void bnxt_re_copy_rstat(struct bnxt_re_rdata_counters *d,
struct ctx_hw_stats_ext *s,
bool is_thor)
{
d->tx_ucast_pkts = le64_to_cpu(s->tx_ucast_pkts);
d->tx_mcast_pkts = le64_to_cpu(s->tx_mcast_pkts);
d->tx_bcast_pkts = le64_to_cpu(s->tx_bcast_pkts);
d->tx_discard_pkts = le64_to_cpu(s->tx_discard_pkts);
d->tx_error_pkts = le64_to_cpu(s->tx_error_pkts);
d->tx_ucast_bytes = le64_to_cpu(s->tx_ucast_bytes);
/* Add four bytes of CRC bytes per packet */
d->tx_ucast_bytes += d->tx_ucast_pkts * 4;
d->tx_mcast_bytes = le64_to_cpu(s->tx_mcast_bytes);
d->tx_bcast_bytes = le64_to_cpu(s->tx_bcast_bytes);
d->rx_ucast_pkts = le64_to_cpu(s->rx_ucast_pkts);
d->rx_mcast_pkts = le64_to_cpu(s->rx_mcast_pkts);
d->rx_bcast_pkts = le64_to_cpu(s->rx_bcast_pkts);
d->rx_discard_pkts = le64_to_cpu(s->rx_discard_pkts);
d->rx_error_pkts = le64_to_cpu(s->rx_error_pkts);
d->rx_ucast_bytes = le64_to_cpu(s->rx_ucast_bytes);
d->rx_mcast_bytes = le64_to_cpu(s->rx_mcast_bytes);
d->rx_bcast_bytes = le64_to_cpu(s->rx_bcast_bytes);
if (is_thor) {
d->rx_agg_pkts = le64_to_cpu(s->rx_tpa_pkt);
d->rx_agg_bytes = le64_to_cpu(s->rx_tpa_bytes);
d->rx_agg_events = le64_to_cpu(s->rx_tpa_events);
d->rx_agg_aborts = le64_to_cpu(s->rx_tpa_errors);
}
}
static void bnxt_re_get_roce_data_stats(struct bnxt_re_dev *rdev)
{
bool is_thor = _is_chip_gen_p5_p7(rdev->chip_ctx);
struct bnxt_re_rdata_counters *rstat;
rstat = &rdev->stats.dstat.rstat[0];
bnxt_re_copy_rstat(rstat, rdev->qplib_res.hctx->stats.dma, is_thor);
}
int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_query_stats_info sinfo;
int rc = 0;
/* Stats are in 1s cadence */
if (test_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags)) {
if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
rdev->is_virtfn))
rc = bnxt_re_get_ext_stat(rdev);
else
rc = bnxt_re_get_qos_stats(rdev);
if (rc && rc != -ENOMEM)
clear_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS,
&rdev->flags);
}
if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
bnxt_re_get_roce_data_stats(rdev);
/* Set default values for sinfo */
sinfo.function_id = 0xFFFFFFFF;
sinfo.collection_id = 0xFF;
sinfo.vf_valid = false;
rc = bnxt_qplib_get_roce_error_stats(&rdev->rcfw,
&rdev->stats.dstat.errs,
&sinfo);
if (rc && rc != -ENOMEM)
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
&rdev->flags);
}
return rc;
}
static const char * const bnxt_re_stat_descs[] = {
"link_state",
"max_qp",
"max_srq",
"max_cq",
"max_mr",
"max_mw",
"max_ah",
"max_pd",
"active_qp",
"active_rc_qp",
"active_ud_qp",
"active_srq",
"active_cq",
"active_mr",
"active_mw",
"active_ah",
"active_pd",
"qp_watermark",
"rc_qp_watermark",
"ud_qp_watermark",
"srq_watermark",
"cq_watermark",
"mr_watermark",
"mw_watermark",
"ah_watermark",
"pd_watermark",
"resize_cq_count",
"hw_retransmission",
"recoverable_errors",
"rx_pkts",
"rx_bytes",
"tx_pkts",
"tx_bytes",
"cnp_tx_pkts",
"cnp_tx_bytes",
"cnp_rx_pkts",
"cnp_rx_bytes",
"roce_only_rx_pkts",
"roce_only_rx_bytes",
"roce_only_tx_pkts",
"roce_only_tx_bytes",
"rx_roce_error_pkts",
"rx_roce_discard_pkts",
"tx_roce_error_pkts",
"tx_roce_discards_pkts",
"res_oob_drop_count",
"tx_atomic_req",
"rx_atomic_req",
"tx_read_req",
"tx_read_resp",
"rx_read_req",
"rx_read_resp",
"tx_write_req",
"rx_write_req",
"tx_send_req",
"rx_send_req",
"rx_good_pkts",
"rx_good_bytes",
"rx_dcn_payload_cut",
"te_bypassed",
"rx_ecn_marked_pkts",
"max_retry_exceeded",
"to_retransmits",
"seq_err_naks_rcvd",
"rnr_naks_rcvd",
"missing_resp",
"dup_reqs",
"unrecoverable_err",
"bad_resp_err",
"local_qp_op_err",
"local_protection_err",
"mem_mgmt_op_err",
"remote_invalid_req_err",
"remote_access_err",
"remote_op_err",
"res_exceed_max",
"res_length_mismatch",
"res_exceeds_wqe",
"res_opcode_err",
"res_rx_invalid_rkey",
"res_rx_domain_err",
"res_rx_no_perm",
"res_rx_range_err",
"res_tx_invalid_rkey",
"res_tx_domain_err",
"res_tx_no_perm",
"res_tx_range_err",
"res_irrq_oflow",
"res_unsup_opcode",
"res_unaligned_atomic",
"res_rem_inv_err",
"res_mem_error64",
"res_srq_err",
"res_cmp_err",
"res_invalid_dup_rkey",
"res_wqe_format_err",
"res_cq_load_err",
"res_srq_load_err",
"res_tx_pci_err",
"res_rx_pci_err",
"res_oos_drop_count",
"num_irq_started",
"num_irq_stopped",
"poll_in_intr_en",
"poll_in_intr_dis",
"cmdq_full_dbg_cnt",
"fw_service_prof_type_sup",
"dbq_int_recv",
"dbq_int_en",
"dbq_pacing_resched",
"dbq_pacing_complete",
"dbq_pacing_alerts",
"dbq_dbr_fifo_reg"
};
static void bnxt_re_print_ext_stat(struct bnxt_re_dev *rdev,
struct rdma_hw_stats *stats)
{
struct bnxt_re_cnp_counters *cnp;
struct bnxt_re_ext_rstat *ext_s;
ext_s = &rdev->stats.dstat.ext_rstat[0];
cnp = &rdev->stats.cnps.cur[0];
stats->value[BNXT_RE_TX_ATOMIC_REQ] = ext_s->tx.atomic_req;
stats->value[BNXT_RE_RX_ATOMIC_REQ] = ext_s->rx.atomic_req;
stats->value[BNXT_RE_TX_READ_REQ] = ext_s->tx.read_req;
stats->value[BNXT_RE_TX_READ_RESP] = ext_s->tx.read_resp;
stats->value[BNXT_RE_RX_READ_REQ] = ext_s->rx.read_req;
stats->value[BNXT_RE_RX_READ_RESP] = ext_s->rx.read_resp;
stats->value[BNXT_RE_TX_WRITE_REQ] = ext_s->tx.write_req;
stats->value[BNXT_RE_RX_WRITE_REQ] = ext_s->rx.write_req;
stats->value[BNXT_RE_TX_SEND_REQ] = ext_s->tx.send_req;
stats->value[BNXT_RE_RX_SEND_REQ] = ext_s->rx.send_req;
stats->value[BNXT_RE_RX_GOOD_PKTS] = ext_s->grx.rx_pkts;
stats->value[BNXT_RE_RX_GOOD_BYTES] = ext_s->grx.rx_bytes;
if (_is_chip_p7(rdev->chip_ctx)) {
stats->value[BNXT_RE_RX_DCN_PAYLOAD_CUT] = ext_s->rx_dcn_payload_cut;
stats->value[BNXT_RE_TE_BYPASSED] = ext_s->te_bypassed;
}
stats->value[BNXT_RE_RX_ECN_MARKED_PKTS] = cnp->ecn_marked;
}
static void bnxt_re_print_roce_only_counters(struct bnxt_re_dev *rdev,
struct rdma_hw_stats *stats)
{
struct bnxt_re_ro_counters *roce_only = &rdev->stats.dstat.cur[0];
stats->value[BNXT_RE_ROCE_ONLY_RX_PKTS] = roce_only->rx_pkts;
stats->value[BNXT_RE_ROCE_ONLY_RX_BYTES] = roce_only->rx_bytes;
stats->value[BNXT_RE_ROCE_ONLY_TX_PKTS] = roce_only->tx_pkts;
stats->value[BNXT_RE_ROCE_ONLY_TX_BYTES] = roce_only->tx_bytes;
}
static void bnxt_re_print_normal_total_counters(struct bnxt_re_dev *rdev,
struct rdma_hw_stats *stats)
{
struct bnxt_re_ro_counters *roce_only;
struct bnxt_re_cc_stat *cnps;
cnps = &rdev->stats.cnps;
roce_only = &rdev->stats.dstat.cur[0];
stats->value[BNXT_RE_RX_PKTS] = cnps->cur[0].cnp_rx_pkts + roce_only->rx_pkts;
stats->value[BNXT_RE_RX_BYTES] = cnps->cur[0].cnp_rx_bytes + roce_only->rx_bytes;
stats->value[BNXT_RE_TX_PKTS] = cnps->cur[0].cnp_tx_pkts + roce_only->tx_pkts;
stats->value[BNXT_RE_TX_BYTES] = cnps->cur[0].cnp_tx_bytes + roce_only->tx_bytes;
}
static void bnxt_re_print_normal_counters(struct bnxt_re_dev *rdev,
struct rdma_hw_stats *rstats)
{
struct bnxt_re_rdata_counters *stats;
struct bnxt_re_cc_stat *cnps;
bool en_disp;
stats = &rdev->stats.dstat.rstat[0];
cnps = &rdev->stats.cnps;
en_disp = !_is_chip_gen_p5_p7(rdev->chip_ctx);
bnxt_re_print_normal_total_counters(rdev, rstats);
if (!rdev->is_virtfn) {
rstats->value[BNXT_RE_CNP_TX_PKTS] = cnps->cur[0].cnp_tx_pkts;
if (en_disp)
rstats->value[BNXT_RE_CNP_TX_BYTES] = cnps->cur[0].cnp_tx_bytes;
rstats->value[BNXT_RE_CNP_RX_PKTS] = cnps->cur[0].cnp_rx_pkts;
if (en_disp)
rstats->value[BNXT_RE_CNP_RX_BYTES] = cnps->cur[0].cnp_rx_bytes;
}
/* Print RoCE only bytes.. CNP counters include RoCE packets also */
bnxt_re_print_roce_only_counters(rdev, rstats);
rstats->value[BNXT_RE_RX_ROCE_ERROR_PKTS] = stats ? stats->rx_error_pkts : 0;
rstats->value[BNXT_RE_RX_ROCE_DISCARD_PKTS] = stats ? stats->rx_discard_pkts : 0;
if (!en_disp) {
rstats->value[BNXT_RE_TX_ROCE_ERROR_PKTS] = stats ? stats->tx_error_pkts : 0;
rstats->value[BNXT_RE_TX_ROCE_DISCARDS_PKTS] = stats ? stats->tx_discard_pkts : 0;
}
if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
rdev->is_virtfn)) {
rstats->value[BNXT_RE_RES_OOB_DROP_COUNT] = rdev->stats.dstat.e_errs.oob;
bnxt_re_print_ext_stat(rdev, rstats);
}
}
static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
struct rdma_hw_stats *stats)
{
struct bnxt_re_dbr_sw_stats *dbr_sw_stats = rdev->dbr_sw_stats;
stats->value[BNXT_RE_DBQ_PACING_RESCHED] = dbr_sw_stats->dbq_pacing_resched;
stats->value[BNXT_RE_DBQ_PACING_CMPL] = dbr_sw_stats->dbq_pacing_complete;
stats->value[BNXT_RE_DBQ_PACING_ALERT] = dbr_sw_stats->dbq_pacing_alerts;
stats->value[BNXT_RE_DBQ_DBR_FIFO_REG] = readl_fbsd(rdev->en_dev->softc,
rdev->dbr_db_fifo_reg_off, 0);
}
int bnxt_re_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_re_ext_roce_stats *e_errs;
struct bnxt_re_rdata_counters *rstat;
struct bnxt_qplib_roce_stats *errs;
unsigned long tstamp_diff;
struct pci_dev *pdev;
int sched_msec;
int rc = 0;
if (!port || !stats)
return -EINVAL;
if (!rdev)
return -ENODEV;
if (!__bnxt_re_is_rdev_valid(rdev)) {
return -ENODEV;
}
pdev = rdev->en_dev->pdev;
errs = &rdev->stats.dstat.errs;
rstat = &rdev->stats.dstat.rstat[0];
e_errs = &rdev->stats.dstat.e_errs;
#define BNXT_RE_STATS_CTX_UPDATE_TIMER 250
sched_msec = BNXT_RE_STATS_CTX_UPDATE_TIMER;
tstamp_diff = jiffies - rdev->stats.read_tstamp;
if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
if (/* restrict_stats && */ tstamp_diff < msecs_to_jiffies(sched_msec))
goto skip_query;
rc = bnxt_re_get_device_stats(rdev);
if (rc)
dev_err(rdev_to_dev(rdev),
"Failed to query device stats\n");
rdev->stats.read_tstamp = jiffies;
}
if (rdev->dbr_pacing)
bnxt_re_copy_db_pacing_stats(rdev, stats);
skip_query:
if (rdev->netdev)
stats->value[BNXT_RE_LINK_STATE] = bnxt_re_link_state(rdev);
stats->value[BNXT_RE_MAX_QP] = rdev->dev_attr->max_qp;
stats->value[BNXT_RE_MAX_SRQ] = rdev->dev_attr->max_srq;
stats->value[BNXT_RE_MAX_CQ] = rdev->dev_attr->max_cq;
stats->value[BNXT_RE_MAX_MR] = rdev->dev_attr->max_mr;
stats->value[BNXT_RE_MAX_MW] = rdev->dev_attr->max_mw;
stats->value[BNXT_RE_MAX_AH] = rdev->dev_attr->max_ah;
stats->value[BNXT_RE_MAX_PD] = rdev->dev_attr->max_pd;
stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&rdev->stats.rsors.qp_count);
stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&rdev->stats.rsors.rc_qp_count);
stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&rdev->stats.rsors.ud_qp_count);
stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&rdev->stats.rsors.srq_count);
stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->stats.rsors.cq_count);
stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->stats.rsors.mr_count);
stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->stats.rsors.mw_count);
stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&rdev->stats.rsors.ah_count);
stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&rdev->stats.rsors.pd_count);
stats->value[BNXT_RE_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_qp_count);
stats->value[BNXT_RE_RC_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_rc_qp_count);
stats->value[BNXT_RE_UD_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_ud_qp_count);
stats->value[BNXT_RE_SRQ_WATERMARK] = atomic_read(&rdev->stats.rsors.max_srq_count);
stats->value[BNXT_RE_CQ_WATERMARK] = atomic_read(&rdev->stats.rsors.max_cq_count);
stats->value[BNXT_RE_MR_WATERMARK] = atomic_read(&rdev->stats.rsors.max_mr_count);
stats->value[BNXT_RE_MW_WATERMARK] = atomic_read(&rdev->stats.rsors.max_mw_count);
stats->value[BNXT_RE_AH_WATERMARK] = atomic_read(&rdev->stats.rsors.max_ah_count);
stats->value[BNXT_RE_PD_WATERMARK] = atomic_read(&rdev->stats.rsors.max_pd_count);
stats->value[BNXT_RE_RESIZE_CQ_COUNT] = atomic_read(&rdev->stats.rsors.resize_count);
stats->value[BNXT_RE_HW_RETRANSMISSION] = BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags) ? 1 : 0;
stats->value[BNXT_RE_RECOVERABLE_ERRORS] = rstat ? rstat->tx_bcast_pkts : 0;
bnxt_re_print_normal_counters(rdev, stats);
stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] = errs->max_retry_exceeded;
if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
rdev->is_virtfn) &&
_is_hw_retx_supported(rdev->dev_attr->dev_cap_flags)) {
stats->value[BNXT_RE_TO_RETRANSMITS] = e_errs->to_retransmits;
stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = e_errs->seq_err_naks_rcvd;
stats->value[BNXT_RE_RNR_NAKS_RCVD] = e_errs->rnr_naks_rcvd;
stats->value[BNXT_RE_MISSING_RESP] = e_errs->missing_resp;
stats->value[BNXT_RE_DUP_REQS] = e_errs->dup_req;
} else {
stats->value[BNXT_RE_TO_RETRANSMITS] = errs->to_retransmits;
stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = errs->seq_err_naks_rcvd;
stats->value[BNXT_RE_RNR_NAKS_RCVD] = errs->rnr_naks_rcvd;
stats->value[BNXT_RE_MISSING_RESP] = errs->missing_resp;
stats->value[BNXT_RE_DUP_REQS] = errs->dup_req;
}
stats->value[BNXT_RE_UNRECOVERABLE_ERR] = errs->unrecoverable_err;
stats->value[BNXT_RE_BAD_RESP_ERR] = errs->bad_resp_err;
stats->value[BNXT_RE_LOCAL_QP_OP_ERR] = errs->local_qp_op_err;
stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] = errs->local_protection_err;
stats->value[BNXT_RE_MEM_MGMT_OP_ERR] = errs->mem_mgmt_op_err;
stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] = errs->remote_invalid_req_err;
stats->value[BNXT_RE_REMOTE_ACCESS_ERR] = errs->remote_access_err;
stats->value[BNXT_RE_REMOTE_OP_ERR] = errs->remote_op_err;
stats->value[BNXT_RE_RES_EXCEED_MAX] = errs->res_exceed_max;
stats->value[BNXT_RE_RES_LENGTH_MISMATCH] = errs->res_length_mismatch;
stats->value[BNXT_RE_RES_EXCEEDS_WQE] = errs->res_exceeds_wqe;
stats->value[BNXT_RE_RES_OPCODE_ERR] = errs->res_opcode_err;
stats->value[BNXT_RE_RES_RX_INVALID_RKEY] = errs->res_rx_invalid_rkey;
stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] = errs->res_rx_domain_err;
stats->value[BNXT_RE_RES_RX_NO_PERM] = errs->res_rx_no_perm;
stats->value[BNXT_RE_RES_RX_RANGE_ERR] = errs->res_rx_range_err;
stats->value[BNXT_RE_RES_TX_INVALID_RKEY] = errs->res_tx_invalid_rkey;
stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] = errs->res_tx_domain_err;
stats->value[BNXT_RE_RES_TX_NO_PERM] = errs->res_tx_no_perm;
stats->value[BNXT_RE_RES_TX_RANGE_ERR] = errs->res_tx_range_err;
stats->value[BNXT_RE_RES_IRRQ_OFLOW] = errs->res_irrq_oflow;
stats->value[BNXT_RE_RES_UNSUP_OPCODE] = errs->res_unsup_opcode;
stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] = errs->res_unaligned_atomic;
stats->value[BNXT_RE_RES_REM_INV_ERR] = errs->res_rem_inv_err;
stats->value[BNXT_RE_RES_MEM_ERROR64] = errs->res_mem_error;
stats->value[BNXT_RE_RES_SRQ_ERR] = errs->res_srq_err;
stats->value[BNXT_RE_RES_CMP_ERR] = errs->res_cmp_err;
stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] = errs->res_invalid_dup_rkey;
stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] = errs->res_wqe_format_err;
stats->value[BNXT_RE_RES_CQ_LOAD_ERR] = errs->res_cq_load_err;
stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] = errs->res_srq_load_err;
stats->value[BNXT_RE_RES_TX_PCI_ERR] = errs->res_tx_pci_err;
stats->value[BNXT_RE_RES_RX_PCI_ERR] = errs->res_rx_pci_err;
if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
rdev->is_virtfn)) {
stats->value[BNXT_RE_RES_OOS_DROP_COUNT] = e_errs->oos;
} else {
/* Display on function 0 as OOS counters are chip-wide */
if (PCI_FUNC(pdev->devfn) == 0)
stats->value[BNXT_RE_RES_OOS_DROP_COUNT] = errs->res_oos_drop_count;
}
stats->value[BNXT_RE_NUM_IRQ_STARTED] = rdev->rcfw.num_irq_started;
stats->value[BNXT_RE_NUM_IRQ_STOPPED] = rdev->rcfw.num_irq_stopped;
stats->value[BNXT_RE_POLL_IN_INTR_EN] = rdev->rcfw.poll_in_intr_en;
stats->value[BNXT_RE_POLL_IN_INTR_DIS] = rdev->rcfw.poll_in_intr_dis;
stats->value[BNXT_RE_CMDQ_FULL_DBG_CNT] = rdev->rcfw.cmdq_full_dbg;
if (!rdev->is_virtfn)
stats->value[BNXT_RE_FW_SERVICE_PROF_TYPE_SUP] = is_qport_service_type_supported(rdev);
return ARRAY_SIZE(bnxt_re_stat_descs);
}
struct rdma_hw_stats *bnxt_re_alloc_hw_port_stats(struct ib_device *ibdev,
u8 port_num)
{
return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs,
ARRAY_SIZE(bnxt_re_stat_descs),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}

View file

@ -0,0 +1,353 @@
/*
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: statistics related data structures
*/
#ifndef __STATS_H__
#define __STATS_H__
#define BNXT_RE_CFA_STAT_BYTES_MASK 0xFFFFFFFFF
#define BNXT_RE_CFA_STAT_PKTS_MASK 0xFFFFFFF
enum {
BYTE_MASK = 0,
PKTS_MASK = 1
};
struct bnxt_re_cnp_counters {
u64 cnp_tx_pkts;
u64 cnp_tx_bytes;
u64 cnp_rx_pkts;
u64 cnp_rx_bytes;
u64 ecn_marked;
};
struct bnxt_re_ro_counters {
u64 tx_pkts;
u64 tx_bytes;
u64 rx_pkts;
u64 rx_bytes;
};
struct bnxt_re_flow_counters {
struct bnxt_re_ro_counters ro_stats;
struct bnxt_re_cnp_counters cnp_stats;
};
struct bnxt_re_ext_cntr {
u64 atomic_req;
u64 read_req;
u64 read_resp;
u64 write_req;
u64 send_req;
};
struct bnxt_re_ext_good {
u64 rx_pkts;
u64 rx_bytes;
};
struct bnxt_re_ext_rstat {
struct bnxt_re_ext_cntr tx;
struct bnxt_re_ext_cntr rx;
struct bnxt_re_ext_good grx;
u64 rx_dcn_payload_cut;
u64 te_bypassed;
};
struct bnxt_re_rdata_counters {
u64 tx_ucast_pkts;
u64 tx_mcast_pkts;
u64 tx_bcast_pkts;
u64 tx_discard_pkts;
u64 tx_error_pkts;
u64 tx_ucast_bytes;
u64 tx_mcast_bytes;
u64 tx_bcast_bytes;
u64 rx_ucast_pkts;
u64 rx_mcast_pkts;
u64 rx_bcast_pkts;
u64 rx_discard_pkts;
u64 rx_error_pkts;
u64 rx_ucast_bytes;
u64 rx_mcast_bytes;
u64 rx_bcast_bytes;
u64 rx_agg_pkts;
u64 rx_agg_bytes;
u64 rx_agg_events;
u64 rx_agg_aborts;
};
struct bnxt_re_cc_stat {
struct bnxt_re_cnp_counters prev[2];
struct bnxt_re_cnp_counters cur[2];
bool is_first;
};
struct bnxt_re_ext_roce_stats {
u64 oob;
u64 oos;
u64 seq_err_naks_rcvd;
u64 rnr_naks_rcvd;
u64 missing_resp;
u64 to_retransmits;
u64 dup_req;
};
struct bnxt_re_rstat {
struct bnxt_re_ro_counters prev[2];
struct bnxt_re_ro_counters cur[2];
struct bnxt_re_rdata_counters rstat[2];
struct bnxt_re_ext_rstat ext_rstat[2];
struct bnxt_re_ext_roce_stats e_errs;
struct bnxt_qplib_roce_stats errs;
unsigned long long prev_oob;
};
struct bnxt_re_res_cntrs {
atomic_t qp_count;
atomic_t rc_qp_count;
atomic_t ud_qp_count;
atomic_t cq_count;
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
atomic_t ah_count;
atomic_t pd_count;
atomic_t resize_count;
atomic_t max_qp_count;
atomic_t max_rc_qp_count;
atomic_t max_ud_qp_count;
atomic_t max_cq_count;
atomic_t max_srq_count;
atomic_t max_mr_count;
atomic_t max_mw_count;
atomic_t max_ah_count;
atomic_t max_pd_count;
};
struct bnxt_re_device_stats {
struct bnxt_re_rstat dstat;
struct bnxt_re_res_cntrs rsors;
struct bnxt_re_cc_stat cnps;
unsigned long read_tstamp;
/* To be used in case to disable stats query from worker or change
* query interval. 0 means stats_query disabled.
*/
u32 stats_query_sec;
/* A free running counter to be used along with stats_query_sec to
* decide whether to issue the command to FW.
*/
u32 stats_query_counter;
};
static inline u64 bnxt_re_get_cfa_stat_mask(struct bnxt_qplib_chip_ctx *cctx,
bool type)
{
u64 mask;
if (type == BYTE_MASK) {
mask = BNXT_RE_CFA_STAT_BYTES_MASK; /* 36 bits */
if (_is_chip_gen_p5_p7(cctx))
mask >>= 0x01; /* 35 bits */
} else {
mask = BNXT_RE_CFA_STAT_PKTS_MASK; /* 28 bits */
if (_is_chip_gen_p5_p7(cctx))
mask |= (0x10000000ULL); /* 29 bits */
}
return mask;
}
static inline u64 bnxt_re_stat_diff(u64 cur, u64 *prev, u64 mask)
{
u64 diff;
if (!cur)
return 0;
diff = (cur - *prev) & mask;
if (diff)
*prev = cur;
return diff;
}
static inline void bnxt_re_clear_rsors_stat(struct bnxt_re_res_cntrs *rsors)
{
atomic_set(&rsors->qp_count, 0);
atomic_set(&rsors->cq_count, 0);
atomic_set(&rsors->srq_count, 0);
atomic_set(&rsors->mr_count, 0);
atomic_set(&rsors->mw_count, 0);
atomic_set(&rsors->ah_count, 0);
atomic_set(&rsors->pd_count, 0);
atomic_set(&rsors->resize_count, 0);
atomic_set(&rsors->max_qp_count, 0);
atomic_set(&rsors->max_cq_count, 0);
atomic_set(&rsors->max_srq_count, 0);
atomic_set(&rsors->max_mr_count, 0);
atomic_set(&rsors->max_mw_count, 0);
atomic_set(&rsors->max_ah_count, 0);
atomic_set(&rsors->max_pd_count, 0);
atomic_set(&rsors->max_rc_qp_count, 0);
atomic_set(&rsors->max_ud_qp_count, 0);
}
enum bnxt_re_hw_stats {
BNXT_RE_LINK_STATE,
BNXT_RE_MAX_QP,
BNXT_RE_MAX_SRQ,
BNXT_RE_MAX_CQ,
BNXT_RE_MAX_MR,
BNXT_RE_MAX_MW,
BNXT_RE_MAX_AH,
BNXT_RE_MAX_PD,
BNXT_RE_ACTIVE_QP,
BNXT_RE_ACTIVE_RC_QP,
BNXT_RE_ACTIVE_UD_QP,
BNXT_RE_ACTIVE_SRQ,
BNXT_RE_ACTIVE_CQ,
BNXT_RE_ACTIVE_MR,
BNXT_RE_ACTIVE_MW,
BNXT_RE_ACTIVE_AH,
BNXT_RE_ACTIVE_PD,
BNXT_RE_QP_WATERMARK,
BNXT_RE_RC_QP_WATERMARK,
BNXT_RE_UD_QP_WATERMARK,
BNXT_RE_SRQ_WATERMARK,
BNXT_RE_CQ_WATERMARK,
BNXT_RE_MR_WATERMARK,
BNXT_RE_MW_WATERMARK,
BNXT_RE_AH_WATERMARK,
BNXT_RE_PD_WATERMARK,
BNXT_RE_RESIZE_CQ_COUNT,
BNXT_RE_HW_RETRANSMISSION,
BNXT_RE_RECOVERABLE_ERRORS,
BNXT_RE_RX_PKTS,
BNXT_RE_RX_BYTES,
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_CNP_TX_PKTS,
BNXT_RE_CNP_TX_BYTES,
BNXT_RE_CNP_RX_PKTS,
BNXT_RE_CNP_RX_BYTES,
BNXT_RE_ROCE_ONLY_RX_PKTS,
BNXT_RE_ROCE_ONLY_RX_BYTES,
BNXT_RE_ROCE_ONLY_TX_PKTS,
BNXT_RE_ROCE_ONLY_TX_BYTES,
BNXT_RE_RX_ROCE_ERROR_PKTS,
BNXT_RE_RX_ROCE_DISCARD_PKTS,
BNXT_RE_TX_ROCE_ERROR_PKTS,
BNXT_RE_TX_ROCE_DISCARDS_PKTS,
BNXT_RE_RES_OOB_DROP_COUNT,
BNXT_RE_TX_ATOMIC_REQ,
BNXT_RE_RX_ATOMIC_REQ,
BNXT_RE_TX_READ_REQ,
BNXT_RE_TX_READ_RESP,
BNXT_RE_RX_READ_REQ,
BNXT_RE_RX_READ_RESP,
BNXT_RE_TX_WRITE_REQ,
BNXT_RE_RX_WRITE_REQ,
BNXT_RE_TX_SEND_REQ,
BNXT_RE_RX_SEND_REQ,
BNXT_RE_RX_GOOD_PKTS,
BNXT_RE_RX_GOOD_BYTES,
BNXT_RE_RX_DCN_PAYLOAD_CUT,
BNXT_RE_TE_BYPASSED,
BNXT_RE_RX_ECN_MARKED_PKTS,
BNXT_RE_MAX_RETRY_EXCEEDED,
BNXT_RE_TO_RETRANSMITS,
BNXT_RE_SEQ_ERR_NAKS_RCVD,
BNXT_RE_RNR_NAKS_RCVD,
BNXT_RE_MISSING_RESP,
BNXT_RE_DUP_REQS,
BNXT_RE_UNRECOVERABLE_ERR,
BNXT_RE_BAD_RESP_ERR,
BNXT_RE_LOCAL_QP_OP_ERR,
BNXT_RE_LOCAL_PROTECTION_ERR,
BNXT_RE_MEM_MGMT_OP_ERR,
BNXT_RE_REMOTE_INVALID_REQ_ERR,
BNXT_RE_REMOTE_ACCESS_ERR,
BNXT_RE_REMOTE_OP_ERR,
BNXT_RE_RES_EXCEED_MAX,
BNXT_RE_RES_LENGTH_MISMATCH,
BNXT_RE_RES_EXCEEDS_WQE,
BNXT_RE_RES_OPCODE_ERR,
BNXT_RE_RES_RX_INVALID_RKEY,
BNXT_RE_RES_RX_DOMAIN_ERR,
BNXT_RE_RES_RX_NO_PERM,
BNXT_RE_RES_RX_RANGE_ERR,
BNXT_RE_RES_TX_INVALID_RKEY,
BNXT_RE_RES_TX_DOMAIN_ERR,
BNXT_RE_RES_TX_NO_PERM,
BNXT_RE_RES_TX_RANGE_ERR,
BNXT_RE_RES_IRRQ_OFLOW,
BNXT_RE_RES_UNSUP_OPCODE,
BNXT_RE_RES_UNALIGNED_ATOMIC,
BNXT_RE_RES_REM_INV_ERR,
BNXT_RE_RES_MEM_ERROR64,
BNXT_RE_RES_SRQ_ERR,
BNXT_RE_RES_CMP_ERR,
BNXT_RE_RES_INVALID_DUP_RKEY,
BNXT_RE_RES_WQE_FORMAT_ERR,
BNXT_RE_RES_CQ_LOAD_ERR,
BNXT_RE_RES_SRQ_LOAD_ERR,
BNXT_RE_RES_TX_PCI_ERR,
BNXT_RE_RES_RX_PCI_ERR,
BNXT_RE_RES_OOS_DROP_COUNT,
BNXT_RE_NUM_IRQ_STARTED,
BNXT_RE_NUM_IRQ_STOPPED,
BNXT_RE_POLL_IN_INTR_EN,
BNXT_RE_POLL_IN_INTR_DIS,
BNXT_RE_CMDQ_FULL_DBG_CNT,
BNXT_RE_FW_SERVICE_PROF_TYPE_SUP,
BNXT_RE_DBQ_INT_RECV,
BNXT_RE_DBQ_INT_EN,
BNXT_RE_DBQ_PACING_RESCHED,
BNXT_RE_DBQ_PACING_CMPL,
BNXT_RE_DBQ_PACING_ALERT,
BNXT_RE_DBQ_DBR_FIFO_REG,
BNXT_RE_DBQ_NUM_EXT_COUNTERS
};
#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1)
struct bnxt_re_stats {
struct bnxt_qplib_roce_stats errs;
struct bnxt_qplib_ext_stat ext_stat;
};
struct rdma_hw_stats *bnxt_re_alloc_hw_port_stats(struct ib_device *ibdev,
u8 port_num);
int bnxt_re_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port, int index);
int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev);
int bnxt_re_get_flow_stats_from_service_pf(struct bnxt_re_dev *rdev,
struct bnxt_re_flow_counters *stats,
struct bnxt_qplib_query_stats_info *sinfo);
int bnxt_re_get_qos_stats(struct bnxt_re_dev *rdev);
#endif /* __STATS_H__ */

View file

@ -0,0 +1,22 @@
.PATH: ${SRCTOP}/sys/dev/bnxt/bnxt_re
KMOD=bnxt_re
SRCS += ib_verbs.c ib_verbs.h
SRCS += qplib_fp.c qplib_fp.h
SRCS += qplib_sp.c qplib_sp.h
SRCS += qplib_res.c qplib_res.h
SRCS += qplib_rcfw.c qplib_rcfw.h
SRCS += stats.c stats.h
SRCS += main.c bnxt_re.h
SRCS += opt_inet.h opt_inet6.h opt_ratelimit.h
SRCS += ${LINUXKPI_GENSRCS}
CFLAGS+= -I${SRCTOP}/sys/dev/bnxt/bnxt_en
CFLAGS+= -I${SRCTOP}/sys/ofed/include
CFLAGS+= -I${SRCTOP}/sys/ofed/include/uapi
CFLAGS+= ${LINUXKPI_INCLUDES}
CFLAGS+= -DCONFIG_INFINIBAND_USER_MEM
.include <bsd.kmod.mk>
CFLAGS+= -Wno-cast-qual -Wno-pointer-arith ${GCC_MS_EXTENSIONS}