bnxt_en: L2-RoCE driver communication interface

- Added Aux bus support for RoCE.
- Implemented the ulp ops that are required by RoCE driver.
- Restructure context memory data structures
- DBR pacing support

Reviewed by:            imp
Approved by:            imp
Differential revision:  https://reviews.freebsd.org/D45006
This commit is contained in:
Chandrakanth patil 2024-04-28 17:59:34 +05:30 committed by Sumit Saxena
parent 4354163e5f
commit 050d28e13c
11 changed files with 1926 additions and 338 deletions

View file

@ -42,9 +42,13 @@
#include <net/if.h>
#include <net/if_var.h>
#include <net/iflib.h>
#include <linux/types.h>
#include "hsi_struct_def.h"
#include "bnxt_dcb.h"
#include "bnxt_auxbus_compat.h"
#define DFLT_HWRM_CMD_TIMEOUT 500
/* PCI IDs */
#define BROADCOM_VENDOR_ID 0x14E4
@ -90,6 +94,58 @@
#define NETXTREME_E_VF2 0x16d3
#define NETXTREME_E_VF3 0x16dc
#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \
(((data1) & \
HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
#define BNXT_EVENT_ERROR_REPORT_TYPE(data1) \
(((data1) & \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >> \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
#define BNXT_EVENT_INVALID_SIGNAL_DATA(data2) \
(((data2) & \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >> \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
#define BNXT_EVENT_DBR_EPOCH(data) \
(((data) & HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK) >> \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT)
#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
(((data2) & \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >> \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
#define EVENT_DATA2_NVM_ERR_ADDR(data2) \
(((data2) & \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK) >> \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT)
#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
(((data1) & \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) == \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
#define EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1) \
(((data1) & \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK) == \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE)
#define EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1) \
(((data1) & \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK) == \
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE)
#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
((data1) & HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
((data2) & HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
#define INVALID_STATS_CTX_ID -1
/* Maximum numbers of RX and TX descriptors. iflib requires this to be a power
* of two. The hardware has no particular limitation. */
#define BNXT_MAX_RXD ((INT32_MAX >> 1) + 1)
@ -139,16 +195,20 @@
#define DBR_TYPE_PUSH_END (0xdULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
#define BNXT_MAX_NUM_QUEUES 32
#define BNXT_MAX_L2_QUEUES 128
#define BNXT_ROCE_IRQ_COUNT 9
#define BNXT_MAX_NUM_QUEUES (BNXT_MAX_L2_QUEUES + BNXT_ROCE_IRQ_COUNT)
/* Completion related defines */
#define CMP_VALID(cmp, v_bit) \
((!!(((struct cmpl_base *)(cmp))->info3_v & htole32(CMPL_BASE_V))) == !!(v_bit) )
/* Chip class phase 5 */
#define BNXT_CHIP_P5(sc) ((softc->flags & BNXT_FLAG_CHIP_P5))
#define BNXT_CHIP_P5(sc) ((sc->flags & BNXT_FLAG_CHIP_P5))
#define DB_PF_OFFSET_P5 0x10000
#define DB_VF_OFFSET_P5 0x4000
#define NQ_VALID(cmp, v_bit) \
((!!(((nq_cn_t *)(cmp))->v & htole32(NQ_CN_V))) == !!(v_bit) )
@ -509,10 +569,9 @@ struct bnxt_ver_info {
uint8_t hwrm_if_update;
char hwrm_if_ver[BNXT_VERSTR_SIZE];
char driver_hwrm_if_ver[BNXT_VERSTR_SIZE];
char hwrm_fw_ver[BNXT_VERSTR_SIZE];
char mgmt_fw_ver[BNXT_VERSTR_SIZE];
char netctrl_fw_ver[BNXT_VERSTR_SIZE];
char roce_fw_ver[BNXT_VERSTR_SIZE];
char mgmt_fw_ver[FW_VER_STR_LEN];
char netctrl_fw_ver[FW_VER_STR_LEN];
char roce_fw_ver[FW_VER_STR_LEN];
char fw_ver_str[FW_VER_STR_LEN];
char phy_ver[BNXT_VERSTR_SIZE];
char pkg_ver[64];
@ -589,19 +648,22 @@ struct bnxt_hw_lro {
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
uint16_t flags;
int nr_pages;
int page_size;
uint16_t flags;
#define BNXT_RMEM_VALID_PTE_FLAG 1
#define BNXT_RMEM_RING_PTE_FLAG 2
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
uint16_t depth;
uint8_t init_val;
struct iflib_dma_info *pg_arr;
struct iflib_dma_info pg_tbl;
int vmem_size;
void **vmem;
uint16_t depth;
struct bnxt_ctx_mem_type *ctx_mem;
struct iflib_dma_info *pg_arr;
struct iflib_dma_info pg_tbl;
int vmem_size;
void **vmem;
};
struct bnxt_ctx_pg_info {
@ -612,43 +674,85 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
#define BNXT_MAX_TQM_SP_RINGS 1
#define BNXT_MAX_TQM_FP_LEGACY_RINGS 8
#define BNXT_MAX_TQM_FP_RINGS 9
#define BNXT_MAX_TQM_LEGACY_RINGS \
(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_LEGACY_RINGS)
#define BNXT_MAX_TQM_RINGS \
(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
#define BNXT_BACKING_STORE_CFG_LEN \
sizeof(struct hwrm_func_backing_store_cfg_input)
#define BNXT_SET_CTX_PAGE_ATTR(attr) \
do { \
if (BNXT_PAGE_SIZE == 0x2000) \
attr = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_SRQ_PG_SIZE_PG_8K; \
else if (BNXT_PAGE_SIZE == 0x10000) \
attr = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_QPC_PG_SIZE_PG_64K; \
else \
attr = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_QPC_PG_SIZE_PG_4K; \
} while (0)
struct bnxt_ctx_mem_type {
u16 type;
u16 entry_size;
u32 flags;
#define BNXT_CTX_MEM_TYPE_VALID HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID
u32 instance_bmap;
u8 init_value;
u8 entry_multiple;
u16 init_offset;
#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
u32 max_entries;
u32 min_entries;
u8 split_entry_cnt;
#define BNXT_MAX_SPLIT_ENTRY 4
union {
struct {
u32 qp_l2_entries;
u32 qp_qp1_entries;
};
u32 srq_l2_entries;
u32 cq_l2_entries;
u32 vnic_entries;
struct {
u32 mrav_av_entries;
u32 mrav_num_entries_units;
};
u32 split[BNXT_MAX_SPLIT_ENTRY];
};
struct bnxt_ctx_pg_info *pg_info;
};
#define BNXT_CTX_QP HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QP
#define BNXT_CTX_SRQ HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRQ
#define BNXT_CTX_CQ HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ
#define BNXT_CTX_VNIC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_VNIC
#define BNXT_CTX_STAT HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_STAT
#define BNXT_CTX_STQM HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SP_TQM_RING
#define BNXT_CTX_FTQM HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_FP_TQM_RING
#define BNXT_CTX_MRAV HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MRAV
#define BNXT_CTX_TIM HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TIM
#define BNXT_CTX_TKC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TKC
#define BNXT_CTX_RKC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RKC
#define BNXT_CTX_MTQM HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MP_TQM_RING
#define BNXT_CTX_SQDBS HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SQ_DB_SHADOW
#define BNXT_CTX_RQDBS HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RQ_DB_SHADOW
#define BNXT_CTX_SRQDBS HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRQ_DB_SHADOW
#define BNXT_CTX_CQDBS HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ_DB_SHADOW
#define BNXT_CTX_QTKC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QUIC_TKC
#define BNXT_CTX_QRKC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QUIC_RKC
#define BNXT_CTX_MAX (BNXT_CTX_QRKC + 1)
struct bnxt_ctx_mem_info {
uint32_t qp_max_entries;
uint16_t qp_min_qp1_entries;
uint16_t qp_max_l2_entries;
uint16_t qp_entry_size;
uint16_t srq_max_l2_entries;
uint32_t srq_max_entries;
uint16_t srq_entry_size;
uint16_t cq_max_l2_entries;
uint32_t cq_max_entries;
uint16_t cq_entry_size;
uint16_t vnic_max_vnic_entries;
uint16_t vnic_max_ring_table_entries;
uint16_t vnic_entry_size;
uint32_t stat_max_entries;
uint16_t stat_entry_size;
uint16_t tqm_entry_size;
uint32_t tqm_min_entries_per_ring;
uint32_t tqm_max_entries_per_ring;
uint32_t mrav_max_entries;
uint16_t mrav_entry_size;
uint16_t tim_entry_size;
uint32_t tim_max_entries;
uint8_t tqm_entries_multiple;
uint8_t ctx_kind_initializer;
u8 tqm_fp_rings_count;
uint32_t flags;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
struct bnxt_ctx_pg_info qp_mem;
struct bnxt_ctx_pg_info srq_mem;
struct bnxt_ctx_pg_info cq_mem;
struct bnxt_ctx_pg_info vnic_mem;
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[9];
struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_MAX];
};
struct bnxt_hw_resc {
@ -678,7 +782,7 @@ struct bnxt_hw_resc {
uint16_t max_nqs;
uint16_t max_irqs;
uint16_t resv_irqs;
}
};
enum bnxt_type_ets {
BNXT_TYPE_ETS_TSA = 0,
@ -710,11 +814,23 @@ struct bnxt_softc_list {
#define BIT_ULL(nr) (1ULL << (nr))
#endif
struct bnxt_aux_dev {
struct auxiliary_device aux_dev;
struct bnxt_en_dev *edev;
int id;
};
struct bnxt_msix_tbl {
uint32_t entry;
uint32_t vector;
};
struct bnxt_softc {
device_t dev;
if_ctx_t ctx;
if_softc_ctx_t scctx;
if_shared_ctx_t sctx;
if_t ifp;
uint32_t domain;
uint32_t bus;
uint32_t slot;
@ -738,11 +854,16 @@ struct bnxt_softc {
#define BNXT_FLAG_FW_CAP_EXT_STATS 0x0080
#define BNXT_FLAG_MULTI_HOST 0x0100
#define BNXT_FLAG_MULTI_ROOT 0x0200
#define BNXT_FLAG_ROCEV1_CAP 0x0400
#define BNXT_FLAG_ROCEV2_CAP 0x0800
#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | BNXT_FLAG_ROCEV2_CAP)
uint32_t flags;
#define BNXT_STATE_LINK_CHANGE (0)
#define BNXT_STATE_MAX (BNXT_STATE_LINK_CHANGE + 1)
bitstr_t *state_bv;
uint32_t total_msix;
uint32_t total_irqs;
struct bnxt_msix_tbl *irq_tbl;
struct bnxt_func_info func;
struct bnxt_func_qcfg fn_qcfg;
@ -812,6 +933,8 @@ struct bnxt_softc {
struct iflib_dma_info def_cp_ring_mem;
struct iflib_dma_info def_nq_ring_mem;
struct grouptask def_cp_task;
int db_size;
int legacy_db_size;
struct bnxt_doorbell_ops db_ops;
struct sysctl_ctx_list hw_stats;
@ -908,6 +1031,33 @@ struct bnxt_softc {
#define BNXT_PHY_FL_NO_PAUSE (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_PAUSE_UNSUPPORTED << 8)
#define BNXT_PHY_FL_NO_PFC (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_PFC_UNSUPPORTED << 8)
#define BNXT_PHY_FL_BANK_SEL (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_BANK_ADDR_SUPPORTED << 8)
struct bnxt_aux_dev *aux_dev;
struct net_device *net_dev;
struct mtx en_ops_lock;
uint8_t port_partition_type;
struct bnxt_en_dev *edev;
unsigned long state;
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
#define BNXT_STATE_READ_STATS 2
#define BNXT_STATE_FW_RESET_DET 3
#define BNXT_STATE_IN_FW_RESET 4
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9
#define BNXT_STATE_L2_FILTER_RETRY 10
#define BNXT_STATE_FW_ACTIVATE 11
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
#define BNXT_STATE_FW_ACTIVATE_RESET 14
#define BNXT_STATE_HALF_OPEN 15
#define BNXT_NO_FW_ACCESS(bp) \
test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state)
struct pci_dev *pdev;
int fw_reset_state;
};
struct bnxt_filter_info {

View file

@ -0,0 +1,194 @@
/*-
* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2024 Broadcom, All Rights Reserved.
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/delay.h>
#include "bnxt_auxbus_compat.h"
static struct list_head bnxt_aux_bus_dev_list = LINUX_LIST_HEAD_INIT(bnxt_aux_bus_dev_list);
static struct list_head bnxt_aux_bus_drv_list = LINUX_LIST_HEAD_INIT(bnxt_aux_bus_drv_list);
static DEFINE_MUTEX(bnxt_auxbus_lock);
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
for (; id->name[0]; id++) {
const char *p = strrchr(dev_name(&auxdev->dev), '.');
int match_size;
if (!p)
continue;
match_size = p - dev_name(&auxdev->dev);
if (strlen(id->name) == match_size &&
!strncmp(dev_name(&auxdev->dev), id->name, match_size))
return id;
}
return NULL;
}
int auxiliary_device_init(struct auxiliary_device *auxdev)
{
struct device *dev = &auxdev->dev;
char *modname = KBUILD_MODNAME;
int ret;
if (!dev->parent) {
pr_err("auxiliary_device has a NULL dev->parent\n");
return -EINVAL;
}
if (!auxdev->name) {
pr_err("auxiliary_device has a NULL name\n");
return -EINVAL;
}
ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id);
if (ret) {
dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret);
return ret;
}
return 0;
}
int auxiliary_device_add(struct auxiliary_device *auxdev)
{
const struct auxiliary_device_id *id;
struct auxiliary_driver *auxdrv = NULL;
bool found = true;
int ret = 0;
mutex_lock(&bnxt_auxbus_lock);
list_for_each_entry(auxdrv, &bnxt_aux_bus_drv_list, list) {
if (auxdrv) {
msleep(2 * 1000);
id = auxiliary_match_id(auxdrv->id_table, auxdev);
if (id) {
ret = auxdrv->probe(auxdev, id);
if (!ret)
auxdev->dev.driver = &auxdrv->driver;
else
found = false;
break;
}
}
}
if (found)
list_add_tail(&auxdev->list, &bnxt_aux_bus_dev_list);
mutex_unlock(&bnxt_auxbus_lock);
return ret;
}
void auxiliary_device_uninit(struct auxiliary_device *auxdev)
{
return;
}
void auxiliary_device_delete(struct auxiliary_device *auxdev)
{
struct auxiliary_driver *auxdrv;
mutex_lock(&bnxt_auxbus_lock);
list_for_each_entry(auxdrv, &bnxt_aux_bus_drv_list, list) {
if (auxdev->dev.driver != &auxdrv->driver)
continue;
if (auxdrv->remove)
auxdrv->remove(auxdev);
auxdev->dev.driver = NULL;
}
list_del(&auxdev->list);
mutex_unlock(&bnxt_auxbus_lock);
}
int auxiliary_driver_register(struct auxiliary_driver *auxdrv)
{
const struct auxiliary_device_id *id;
struct auxiliary_device *auxdev;
int ret = 0;
if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
return -EINVAL;
if (auxdrv->name)
auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", KBUILD_MODNAME,
auxdrv->name);
else
auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", KBUILD_MODNAME);
if (!auxdrv->driver.name)
return -ENOMEM;
mutex_lock(&bnxt_auxbus_lock);
list_for_each_entry(auxdev, &bnxt_aux_bus_dev_list, list) {
if (auxdev->dev.driver)
continue;
id = auxiliary_match_id(auxdrv->id_table, auxdev);
if (id) {
ret = auxdrv->probe(auxdev, id);
if (ret)
continue;
auxdev->dev.driver = &auxdrv->driver;
}
}
list_add_tail(&auxdrv->list, &bnxt_aux_bus_drv_list);
mutex_unlock(&bnxt_auxbus_lock);
return 0;
}
EXPORT_SYMBOL(auxiliary_driver_register);
void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
{
struct auxiliary_device *auxdev;
/* PF auxiliary devices are added to the list first and then VF devices.
* If we remove PF aux device driver first, it causes failures while
* removing VF driver.
* We need to remove VF auxiliary drivers first, so walk backwards.
*/
mutex_lock(&bnxt_auxbus_lock);
list_for_each_entry_reverse(auxdev, &bnxt_aux_bus_dev_list, list) {
if (auxdev->dev.driver != &auxdrv->driver)
continue;
if (auxdrv->remove)
auxdrv->remove(auxdev);
auxdev->dev.driver = NULL;
}
kfree(auxdrv->driver.name);
list_del(&auxdrv->list);
mutex_unlock(&bnxt_auxbus_lock);
}
EXPORT_SYMBOL(auxiliary_driver_unregister);

View file

@ -0,0 +1,75 @@
/*-
* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2024 Broadcom, All Rights Reserved.
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _BNXT_AUXILIARY_COMPAT_H_
#define _BNXT_AUXILIARY_COMPAT_H_
#include <linux/device.h>
#include <linux/idr.h>
#define KBUILD_MODNAME "if_bnxt"
#define AUXILIARY_NAME_SIZE 32
struct auxiliary_device_id {
char name[AUXILIARY_NAME_SIZE];
uint64_t driver_data;
};
struct auxiliary_device {
struct device dev;
const char *name;
uint32_t id;
struct list_head list;
};
struct auxiliary_driver {
int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
void (*remove)(struct auxiliary_device *auxdev);
const char *name;
struct device_driver driver;
const struct auxiliary_device_id *id_table;
struct list_head list;
};
int auxiliary_device_init(struct auxiliary_device *auxdev);
int auxiliary_device_add(struct auxiliary_device *auxdev);
void auxiliary_device_uninit(struct auxiliary_device *auxdev);
void auxiliary_device_delete(struct auxiliary_device *auxdev);
int auxiliary_driver_register(struct auxiliary_driver *auxdrv);
void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
{
return dev_get_drvdata(&auxdev->dev);
}
static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
{
dev_set_drvdata(&auxdev->dev, data);
}
#endif /* _BNXT_AUXILIARY_COMPAT_H_ */

View file

@ -28,6 +28,7 @@
#include <sys/cdefs.h>
#include <sys/endian.h>
#include <linux/pci.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
@ -122,6 +123,10 @@ _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
req->seq_id = htole16(softc->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
if (BNXT_NO_FW_ACCESS(softc) &&
(req->req_type != HWRM_FUNC_RESET && req->req_type != HWRM_VER_GET))
return -EINVAL;
if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
msg_len > BNXT_HWRM_MAX_REQ_LEN) {
void *short_cmd_req = softc->hwrm_short_cmd_req_addr.idi_vaddr;
@ -313,6 +318,39 @@ bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc, uint32_t path_dir)
return rc;
}
static int bnxt_alloc_all_ctx_pg_info(struct bnxt_softc *softc)
{
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
u16 type;
for (type = 0; type < BNXT_CTX_MAX; type++) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
int n = 1;
if (!ctxm->max_entries || ctxm->pg_info)
continue;
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
if (!ctxm->pg_info)
return -ENOMEM;
}
return 0;
}
static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
u8 init_val, u8 init_offset,
bool init_mask_set)
{
ctxm->init_value = init_val;
ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
if (init_mask_set)
ctxm->init_offset = init_offset * 4;
else
ctxm->init_value = 0;
}
int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc)
{
struct hwrm_func_backing_store_qcaps_input req = {0};
@ -320,63 +358,110 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc)
(void *)softc->hwrm_cmd_resp.idi_vaddr;
int rc;
if (softc->hwrm_spec_code < 0x10902 || BNXT_VF(softc) || softc->ctx_mem)
if (softc->hwrm_spec_code < 0x10902 || softc->ctx_mem)
return 0;
if (BNXT_VF(softc))
return 0;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS);
BNXT_HWRM_LOCK(softc);
rc = _hwrm_send_message(softc, &req, sizeof(req));
if (!rc) {
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
int i;
u8 init_val, init_idx = 0;
u16 init_mask;
ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
ctx = softc->ctx_mem;
if (!ctx) {
rc = -ENOMEM;
goto ctx_err;
ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
if (!ctx) {
rc = -ENOMEM;
goto ctx_err;
}
softc->ctx_mem = ctx;
}
ctx_pg = malloc(sizeof(*ctx_pg) * (softc->rx_max_q + 1),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!ctx_pg) {
free(ctx, M_DEVBUF);
rc = -ENOMEM;
goto ctx_err;
}
for (i = 0; i < softc->rx_max_q + 1; i++, ctx_pg++)
ctx->tqm_mem[i] = ctx_pg;
init_val = resp->ctx_kind_initializer;
init_mask = le16_to_cpu(resp->ctx_init_mask);
softc->ctx_mem = ctx;
ctx->qp_max_entries = le32toh(resp->qp_max_entries);
ctx->qp_min_qp1_entries = le16toh(resp->qp_min_qp1_entries);
ctx->qp_max_l2_entries = le16toh(resp->qp_max_l2_entries);
ctx->qp_entry_size = le16toh(resp->qp_entry_size);
ctx->srq_max_l2_entries = le16toh(resp->srq_max_l2_entries);
ctx->srq_max_entries = le32toh(resp->srq_max_entries);
ctx->srq_entry_size = le16toh(resp->srq_entry_size);
ctx->cq_max_l2_entries = le16toh(resp->cq_max_l2_entries);
ctx->cq_max_entries = le32toh(resp->cq_max_entries);
ctx->cq_entry_size = le16toh(resp->cq_entry_size);
ctx->vnic_max_vnic_entries =
le16toh(resp->vnic_max_vnic_entries);
ctx->vnic_max_ring_table_entries =
le16toh(resp->vnic_max_ring_table_entries);
ctx->vnic_entry_size = le16toh(resp->vnic_entry_size);
ctx->stat_max_entries = le32toh(resp->stat_max_entries);
ctx->stat_entry_size = le16toh(resp->stat_entry_size);
ctx->tqm_entry_size = le16toh(resp->tqm_entry_size);
ctx->tqm_min_entries_per_ring =
le32toh(resp->tqm_min_entries_per_ring);
ctx->tqm_max_entries_per_ring =
le32toh(resp->tqm_max_entries_per_ring);
ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
if (!ctx->tqm_entries_multiple)
ctx->tqm_entries_multiple = 1;
ctx->mrav_max_entries = le32toh(resp->mrav_max_entries);
ctx->mrav_entry_size = le16toh(resp->mrav_entry_size);
ctx->tim_entry_size = le16toh(resp->tim_entry_size);
ctx->tim_max_entries = le32toh(resp->tim_max_entries);
ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
ctxm->vnic_entries = le32_to_cpu(resp->vnic_max_vnic_entries);
ctxm->max_entries = ctxm->vnic_entries +
le16_to_cpu(resp->vnic_max_ring_table_entries);
ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val,
resp->vnic_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val,
resp->stat_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
ctxm->entry_multiple = resp->tqm_entries_multiple;
if (!ctxm->entry_multiple)
ctxm->entry_multiple = 1;
memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
ctxm->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units);
bnxt_init_ctx_initializer(ctxm, init_val,
resp->mrav_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = softc->tx_max_q;
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_LEGACY_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_LEGACY_RINGS;
if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
softc->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
}
ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
rc = bnxt_alloc_all_ctx_pg_info(softc);
} else {
rc = 0;
}
@ -419,150 +504,213 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t enables)
struct hwrm_func_backing_store_cfg_input req = {0};
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
struct bnxt_ctx_pg_info *ctx_pg;
uint32_t *num_entries, req_len = sizeof(req);
uint64_t *pg_dir;
uint8_t *pg_attr;
int i, rc;
uint32_t ena;
struct bnxt_ctx_mem_type *ctxm;
u32 req_len = sizeof(req);
__le32 *num_entries;
u32 ena, flags = 0;
__le64 *pg_dir;
u8 *pg_attr;
int i;
if (!ctx)
return 0;
if (req_len > softc->hwrm_max_ext_req_len)
req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG);
req.enables = htole32(enables);
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
ctx_pg = &ctx->qp_mem;
req.qp_num_entries = htole32(ctx_pg->entries);
req.qp_num_qp1_entries = htole16(ctx->qp_min_qp1_entries);
req.qp_num_l2_entries = htole16(ctx->qp_max_l2_entries);
req.qp_entry_size = htole16(ctx->qp_entry_size);
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
ctx_pg = ctxm->pg_info;
req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
req.qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
req.qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
req.qp_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.qpc_pg_size_qpc_lvl,
&req.qpc_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
ctx_pg = &ctx->srq_mem;
req.srq_num_entries = htole32(ctx_pg->entries);
req.srq_num_l2_entries = htole16(ctx->srq_max_l2_entries);
req.srq_entry_size = htole16(ctx->srq_entry_size);
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
ctx_pg = ctxm->pg_info;
req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
req.srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
req.srq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.srq_pg_size_srq_lvl,
&req.srq_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
ctx_pg = &ctx->cq_mem;
req.cq_num_entries = htole32(ctx_pg->entries);
req.cq_num_l2_entries = htole16(ctx->cq_max_l2_entries);
req.cq_entry_size = htole16(ctx->cq_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
ctx_pg = ctxm->pg_info;
req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
req.cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
req.cq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.cq_pg_size_cq_lvl,
&req.cq_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV) {
ctx_pg = &ctx->mrav_mem;
req.mrav_num_entries = htole32(ctx_pg->entries);
req.mrav_entry_size = htole16(ctx->mrav_entry_size);
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
ctx_pg = ctxm->pg_info;
req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
if (ctxm->mrav_num_entries_units)
flags |=
HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_MRAV_RESERVATION_SPLIT;
req.mrav_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.mrav_pg_size_mrav_lvl,
&req.mrav_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM) {
ctx_pg = &ctx->tim_mem;
req.tim_num_entries = htole32(ctx_pg->entries);
req.tim_entry_size = htole16(ctx->tim_entry_size);
ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
ctx_pg = ctxm->pg_info;
req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
req.tim_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.tim_pg_size_tim_lvl,
&req.tim_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
ctx_pg = &ctx->vnic_mem;
req.vnic_num_vnic_entries =
htole16(ctx->vnic_max_vnic_entries);
ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
ctx_pg = ctxm->pg_info;
req.vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
req.vnic_num_ring_table_entries =
htole16(ctx->vnic_max_ring_table_entries);
req.vnic_entry_size = htole16(ctx->vnic_entry_size);
cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
req.vnic_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.vnic_pg_size_vnic_lvl,
&req.vnic_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
ctx_pg = &ctx->stat_mem;
req.stat_num_entries = htole32(ctx->stat_max_entries);
req.stat_entry_size = htole16(ctx->stat_entry_size);
ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
ctx_pg = ctxm->pg_info;
req.stat_num_entries = cpu_to_le32(ctxm->max_entries);
req.stat_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.stat_pg_size_stat_lvl,
&req.stat_page_dir);
}
ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
for (i = 0, num_entries = &req.tqm_sp_num_entries,
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP,
ctx_pg = ctxm->pg_info;
i < BNXT_MAX_TQM_LEGACY_RINGS;
ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
req.tqm_entry_size = htole16(ctx->tqm_entry_size);
ctx_pg = ctx->tqm_mem[i];
*num_entries = htole32(ctx_pg->entries);
req.tqm_entry_size = cpu_to_le16(ctxm->entry_size);
*num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
if (req_len > softc->hwrm_max_ext_req_len)
req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
rc = hwrm_send_message(softc, &req, req_len);
if (rc)
rc = -EIO;
return rc;
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
pg_attr = &req.tqm_ring8_pg_size_tqm_ring_lvl;
pg_dir = &req.tqm_ring8_page_dir;
ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[8];
req.tqm_ring8_num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
return hwrm_send_message(softc, &req, sizeof(req));
}
int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all)
{
struct hwrm_func_resource_qcaps_output *resp =
struct hwrm_func_resource_qcaps_output *resp =
(void *)softc->hwrm_cmd_resp.idi_vaddr;
struct hwrm_func_resource_qcaps_input req = {0};
struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
int rc;
struct hwrm_func_resource_qcaps_input req = {0};
struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
int rc;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESOURCE_QCAPS);
req.fid = htole16(0xffff);
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESOURCE_QCAPS);
req.fid = htole16(0xffff);
BNXT_HWRM_LOCK(softc);
rc = _hwrm_send_message(softc, &req, sizeof(req));
if (rc) {
rc = -EIO;
goto hwrm_func_resc_qcaps_exit;
}
rc = _hwrm_send_message(softc, &req, sizeof(req));
if (rc) {
rc = -EIO;
goto hwrm_func_resc_qcaps_exit;
}
hw_resc->max_tx_sch_inputs = le16toh(resp->max_tx_scheduler_inputs);
if (!all)
goto hwrm_func_resc_qcaps_exit;
hw_resc->max_tx_sch_inputs = le16toh(resp->max_tx_scheduler_inputs);
if (!all)
goto hwrm_func_resc_qcaps_exit;
hw_resc->min_rsscos_ctxs = le16toh(resp->min_rsscos_ctx);
hw_resc->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
hw_resc->min_cp_rings = le16toh(resp->min_cmpl_rings);
hw_resc->max_cp_rings = le16toh(resp->max_cmpl_rings);
hw_resc->min_tx_rings = le16toh(resp->min_tx_rings);
hw_resc->max_tx_rings = le16toh(resp->max_tx_rings);
hw_resc->min_rx_rings = le16toh(resp->min_rx_rings);
hw_resc->max_rx_rings = le16toh(resp->max_rx_rings);
hw_resc->min_hw_ring_grps = le16toh(resp->min_hw_ring_grps);
hw_resc->max_hw_ring_grps = le16toh(resp->max_hw_ring_grps);
hw_resc->min_l2_ctxs = le16toh(resp->min_l2_ctxs);
hw_resc->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
hw_resc->min_vnics = le16toh(resp->min_vnics);
hw_resc->max_vnics = le16toh(resp->max_vnics);
hw_resc->min_stat_ctxs = le16toh(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16toh(resp->max_stat_ctx);
hw_resc->min_rsscos_ctxs = le16toh(resp->min_rsscos_ctx);
hw_resc->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
hw_resc->min_cp_rings = le16toh(resp->min_cmpl_rings);
hw_resc->max_cp_rings = le16toh(resp->max_cmpl_rings);
hw_resc->min_tx_rings = le16toh(resp->min_tx_rings);
hw_resc->max_tx_rings = le16toh(resp->max_tx_rings);
hw_resc->min_rx_rings = le16toh(resp->min_rx_rings);
hw_resc->max_rx_rings = le16toh(resp->max_rx_rings);
hw_resc->min_hw_ring_grps = le16toh(resp->min_hw_ring_grps);
hw_resc->max_hw_ring_grps = le16toh(resp->max_hw_ring_grps);
hw_resc->min_l2_ctxs = le16toh(resp->min_l2_ctxs);
hw_resc->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
hw_resc->min_vnics = le16toh(resp->min_vnics);
hw_resc->max_vnics = le16toh(resp->max_vnics);
hw_resc->min_stat_ctxs = le16toh(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16toh(resp->max_stat_ctx);
if (BNXT_CHIP_P5(softc)) {
hw_resc->max_nqs = le16toh(resp->max_msix);
hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
}
hw_resc->max_nqs = le16toh(resp->max_msix);
hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
}
hwrm_func_resc_qcaps_exit:
BNXT_HWRM_UNLOCK(softc);
return rc;
return rc;
}
int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc *softc,
struct bnxt_ctx_mem_type *ctxm,
bool last)
{
struct hwrm_func_backing_store_cfg_v2_input req = {0};
u32 instance_bmap = ctxm->instance_bmap;
int i, j, rc = 0, n = 1;
__le32 *p;
if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
return 0;
if (instance_bmap)
n = hweight32(ctxm->instance_bmap);
else
instance_bmap = 1;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG_V2);
req.type = cpu_to_le16(ctxm->type);
req.entry_size = cpu_to_le16(ctxm->entry_size);
for (i = 0, p = &req.split_entry_0; i < ctxm->split_entry_cnt; i++)
p[i] = cpu_to_le32(ctxm->split[i]);
for (i = 0, j = 0; j < n && !rc; i++) {
struct bnxt_ctx_pg_info *ctx_pg;
if (!(instance_bmap & (1 << i)))
continue;
req.instance = cpu_to_le16(i);
ctx_pg = &ctxm->pg_info[j++];
if (!ctx_pg->entries)
continue;
req.num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.page_size_pbl_level,
&req.page_dir);
if (last && j == (n - 1))
req.flags =
cpu_to_le32(HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_FLAGS_BS_CFG_ALL_DONE);
rc = hwrm_send_message(softc, &req, sizeof(req));
}
return rc;
}
int
@ -626,8 +774,6 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj_8b;
softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min_8b;
softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd_8b;
snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
resp->hwrm_fw_major, resp->hwrm_fw_minor, resp->hwrm_fw_build);
strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
BNXT_VERSTR_SIZE);
strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
@ -649,9 +795,9 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
}
else {
snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
"%d.%d.%d", resp->mgmt_fw_major, resp->mgmt_fw_minor,
resp->mgmt_fw_build);
snprintf(softc->ver_info->mgmt_fw_ver, FW_VER_STR_LEN,
"%d.%d.%d.%d", resp->mgmt_fw_major, resp->mgmt_fw_minor,
resp->mgmt_fw_build, resp->mgmt_fw_patch);
strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
BNXT_NAME_SIZE);
}
@ -663,9 +809,9 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
BNXT_NAME_SIZE);
}
else {
snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
"%d.%d.%d", resp->netctrl_fw_major, resp->netctrl_fw_minor,
resp->netctrl_fw_build);
snprintf(softc->ver_info->netctrl_fw_ver, FW_VER_STR_LEN,
"%d.%d.%d.%d", resp->netctrl_fw_major, resp->netctrl_fw_minor,
resp->netctrl_fw_build, resp->netctrl_fw_patch);
strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
BNXT_NAME_SIZE);
}
@ -676,8 +822,8 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
}
else {
snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
"%d.%d.%d", resp->roce_fw_major, resp->roce_fw_minor,
resp->roce_fw_build);
"%d.%d.%d.%d", resp->roce_fw_major, resp->roce_fw_minor,
resp->roce_fw_build, resp->roce_fw_patch);
strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
BNXT_NAME_SIZE);
}
@ -719,7 +865,6 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
softc->hwrm_max_req_len = le16toh(resp->max_req_win_len);
softc->hwrm_max_ext_req_len = le16toh(resp->max_ext_req_len);
}
#define DFLT_HWRM_CMD_TIMEOUT 500
softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout);
if (!softc->hwrm_cmd_timeo)
softc->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
@ -767,9 +912,9 @@ bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
req.ver_maj = __FreeBSD_version / 100000;
req.ver_min = (__FreeBSD_version / 1000) % 100;
req.ver_upd = (__FreeBSD_version / 100) % 10;
req.ver_maj = HWRM_VERSION_MAJOR;
req.ver_min = HWRM_VERSION_MINOR;
req.ver_upd = HWRM_VERSION_UPDATE;
return hwrm_send_message(softc, &req, sizeof(req));
}
@ -832,6 +977,14 @@ bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
softc->flags |= BNXT_FLAG_FW_CAP_EXT_STATS;
/* Enable RoCE only on Thor devices */
if (BNXT_CHIP_P5(softc)) {
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED)
softc->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED)
softc->flags |= BNXT_FLAG_ROCEV2_CAP;
}
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
softc->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED)
@ -932,6 +1085,7 @@ bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
struct hwrm_func_qcfg_output *resp =
(void *)softc->hwrm_cmd_resp.idi_vaddr;
struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
uint32_t min_db_offset = 0;
uint16_t flags;
int rc;
@ -940,13 +1094,22 @@ bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
BNXT_HWRM_LOCK(softc);
rc = _hwrm_send_message(softc, &req, sizeof(req));
if (rc)
goto fail;
goto end;
fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_2:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
softc->port_partition_type = resp->port_partition_type;
break;
}
flags = le16toh(resp->flags);
if (flags & (HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED |
HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED)) {
@ -964,7 +1127,26 @@ bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
softc->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_ENABLE_RDMA_SRIOV)
softc->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
fail:
if (softc->db_size)
goto end;
softc->legacy_db_size = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
if (BNXT_CHIP_P5(softc)) {
if (BNXT_PF(softc))
min_db_offset = DB_PF_OFFSET_P5;
else
min_db_offset = DB_VF_OFFSET_P5;
}
softc->db_size = roundup2(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
1024, PAGE_SIZE);
if (!softc->db_size || softc->db_size > pci_resource_len(softc->pdev, 2) ||
softc->db_size <= min_db_offset)
softc->db_size = pci_resource_len(softc->pdev, 2);
end:
BNXT_HWRM_UNLOCK(softc);
return rc;
}
@ -1127,21 +1309,14 @@ bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause,
int
bnxt_hwrm_vnic_set_hds(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_plcmodes_cfg_input req = {0};
struct hwrm_vnic_plcmodes_cfg_input req = {0};
if (!BNXT_CHIP_P5(softc))
return 0;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
/*
* TBD -- Explore these flags
* 1. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4
* 2. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6
* 3. req.jumbo_thresh
* 4. req.hds_threshold
*/
req.flags = htole32(HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
req.flags = htole32(HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
req.vnic_id = htole16(vnic->id);
return hwrm_send_message(softc, &req, sizeof(req));
@ -2661,13 +2836,6 @@ int bnxt_hwrm_set_coal(struct bnxt_softc *softc)
for (i = 0; i < softc->nrxqsets; i++) {
req = &req_rx;
/*
* TBD:
* Check if Tx also needs to be done
* So far, Tx processing has been done in softirq contest
*
* req = &req_tx;
*/
req->ring_id = htole16(softc->grp_info[i].cp_ring_id);
rc = hwrm_send_message(softc, req, sizeof(*req));

View file

@ -38,6 +38,8 @@
#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
/* HWRM Function Prototypes */
int
hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len);
int bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc);
void bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc);
int bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
@ -122,9 +124,12 @@ int bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc);
int bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc);
int bnxt_hwrm_set_coal(struct bnxt_softc *softc);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc, unsigned long *bmap,
int bmap_size);
int bmap_size);
int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc);
int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t);
int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc *softc,
struct bnxt_ctx_mem_type *ctxm,
bool last);
int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all);
int bnxt_hwrm_reserve_pf_rings (struct bnxt_softc *softc);
void bnxt_hwrm_ring_info_get(struct bnxt_softc *softc, uint8_t ring_type,

View file

@ -1069,9 +1069,6 @@ bnxt_create_ver_sysctls(struct bnxt_softc *softc)
SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"driver_hwrm_if", CTLFLAG_RD, vi->driver_hwrm_if_ver, 0,
"HWRM firmware version");
SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"hwrm_fw", CTLFLAG_RD, vi->hwrm_fw_ver, 0,
"HWRM firmware version");
SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"mgmt_fw", CTLFLAG_RD, vi->mgmt_fw_ver, 0,
"management firmware version");

View file

@ -0,0 +1,524 @@
/*-
* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2024 Broadcom, All Rights Reserved.
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/iflib.h>
#include "hsi_struct_def.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_ulp_ops *ulp_ops, void *handle)
{
struct bnxt_softc *bp = edev->softc;
struct bnxt_ulp *ulp;
int rc = 0;
if (ulp_id >= BNXT_MAX_ULP)
return -EINVAL;
mtx_lock(&bp->en_ops_lock);
ulp = &edev->ulp_tbl[ulp_id];
if (rcu_access_pointer(ulp->ulp_ops)) {
device_printf(bp->dev, "ulp id %d already registered\n", ulp_id);
rc = -EBUSY;
goto exit;
}
edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
atomic_set(&ulp->ref_count, 0);
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (ulp_id == BNXT_ROCE_ULP) {
if (test_bit(BNXT_STATE_OPEN, &bp->state) && bp->is_dev_init)
bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info);
}
exit:
mtx_unlock(&bp->en_ops_lock);
return rc;
}
static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
{
struct bnxt_softc *bp = edev->softc;
struct bnxt_ulp *ulp;
int i = 0;
if (ulp_id >= BNXT_MAX_ULP)
return -EINVAL;
ulp = &edev->ulp_tbl[ulp_id];
if (!rcu_access_pointer(ulp->ulp_ops)) {
device_printf(bp->dev, "ulp id %d not registered\n", ulp_id);
return -EINVAL;
}
if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
edev->en_ops->bnxt_free_msix(edev, ulp_id);
mtx_lock(&bp->en_ops_lock);
RCU_INIT_POINTER(ulp->ulp_ops, NULL);
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
msleep(100);
i++;
}
mtx_unlock(&bp->en_ops_lock);
return 0;
}
static void bnxt_fill_msix_vecs(struct bnxt_softc *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
if (BNXT_CHIP_P5(bp))
ent[i].db_offset = DB_PF_OFFSET_P5;
else
ent[i].db_offset = (idx + i) * 0x80;
}
}
static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_msix_entry *ent, int num_msix)
{
struct bnxt_softc *bp = edev->softc;
int avail_msix, idx;
if (ulp_id != BNXT_ROCE_ULP)
return -EINVAL;
if (edev->ulp_tbl[ulp_id].msix_requested)
return -EAGAIN;
idx = bp->total_irqs - BNXT_ROCE_IRQ_COUNT;
avail_msix = BNXT_ROCE_IRQ_COUNT;
mtx_lock(&bp->en_ops_lock);
edev->ulp_tbl[ulp_id].msix_base = idx;
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
bnxt_fill_msix_vecs(bp, ent);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
mtx_unlock(&bp->en_ops_lock);
return avail_msix;
}
static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
{
struct bnxt_softc *bp = edev->softc;
if (ulp_id != BNXT_ROCE_ULP)
return -EINVAL;
if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return 0;
mtx_lock(&bp->en_ops_lock);
edev->ulp_tbl[ulp_id].msix_requested = 0;
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)
goto stopped;
stopped:
mtx_unlock(&bp->en_ops_lock);
return 0;
}
int bnxt_get_ulp_msix_num(struct bnxt_softc *bp)
{
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_en_dev *edev = bp->edev;
return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
}
return 0;
}
int bnxt_get_ulp_msix_base(struct bnxt_softc *bp)
{
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_en_dev *edev = bp->edev;
if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
}
return 0;
}
static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_fw_msg *fw_msg)
{
struct bnxt_softc *softc = edev->softc;
int rc;
if ((ulp_id != BNXT_ROCE_ULP) && softc->fw_reset_state)
return -EBUSY;
rc = bnxt_hwrm_passthrough(softc, fw_msg->msg, fw_msg->msg_len, fw_msg->resp,
fw_msg->resp_max_len, fw_msg->timeout);
return rc;
}
static void bnxt_ulp_get(struct bnxt_ulp *ulp)
{
atomic_inc(&ulp->ref_count);
}
static void bnxt_ulp_put(struct bnxt_ulp *ulp)
{
atomic_dec(&ulp->ref_count);
}
void bnxt_ulp_stop(struct bnxt_softc *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
edev->en_state = bp->state;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
ops = ulp->ulp_ops;
if (!ops || !ops->ulp_stop)
continue;
ops->ulp_stop(ulp->handle);
}
}
void bnxt_ulp_start(struct bnxt_softc *bp, int err)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
edev->en_state = bp->state;
if (err)
return;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
ops = ulp->ulp_ops;
if (!ops || !ops->ulp_start)
continue;
ops->ulp_start(ulp->handle);
}
}
void bnxt_ulp_sriov_cfg(struct bnxt_softc *bp, int num_vfs)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
rcu_read_lock();
ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_sriov_config) {
rcu_read_unlock();
continue;
}
bnxt_ulp_get(ulp);
rcu_read_unlock();
ops->ulp_sriov_config(ulp->handle, num_vfs);
bnxt_ulp_put(ulp);
}
}
void bnxt_ulp_shutdown(struct bnxt_softc *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
ops = ulp->ulp_ops;
if (!ops || !ops->ulp_shutdown)
continue;
ops->ulp_shutdown(ulp->handle);
}
}
void bnxt_ulp_irq_stop(struct bnxt_softc *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
if (!ulp->msix_requested)
return;
ops = ulp->ulp_ops;
if (!ops || !ops->ulp_irq_stop)
return;
ops->ulp_irq_stop(ulp->handle);
}
}
void bnxt_ulp_async_events(struct bnxt_softc *bp, struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
rcu_read_lock();
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_async_notifier)
continue;
if (!ulp->async_events_bmap ||
event_id > ulp->max_async_event_id)
continue;
/* Read max_async_event_id first before testing the bitmap. */
rmb();
if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)
continue;
if (test_bit(event_id, ulp->async_events_bmap))
ops->ulp_async_notifier(ulp->handle, cmpl);
}
rcu_read_unlock();
}
static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
unsigned long *events_bmap, u16 max_id)
{
struct bnxt_softc *bp = edev->softc;
struct bnxt_ulp *ulp;
if (ulp_id >= BNXT_MAX_ULP)
return -EINVAL;
mtx_lock(&bp->en_ops_lock);
ulp = &edev->ulp_tbl[ulp_id];
ulp->async_events_bmap = events_bmap;
wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp);
mtx_unlock(&bp->en_ops_lock);
return 0;
}
static void bnxt_destroy_irq(struct bnxt_softc *softc)
{
kfree(softc->irq_tbl);
}
static int bnxt_populate_irq(struct bnxt_softc *softc)
{
struct resource_list *rl = NULL;
struct resource_list_entry *rle = NULL;
struct bnxt_msix_tbl *irq_tbl = NULL;
struct pci_devinfo *dinfo = NULL;
int i;
softc->total_irqs = softc->scctx->isc_nrxqsets + BNXT_ROCE_IRQ_COUNT;
irq_tbl = kzalloc(softc->total_irqs * sizeof(*softc->irq_tbl), GFP_KERNEL);
if (!irq_tbl) {
device_printf(softc->dev, "Failed to allocate IRQ table\n");
return -1;
}
dinfo = device_get_ivars(softc->pdev->dev.bsddev);
rl = &dinfo->resources;
rle = resource_list_find(rl, SYS_RES_IRQ, 1);
softc->pdev->dev.irq_start = rle->start;
softc->pdev->dev.irq_end = rle->start + softc->total_irqs;
for (i = 0; i < softc->total_irqs; i++) {
irq_tbl[i].entry = i;
irq_tbl[i].vector = softc->pdev->dev.irq_start + i;
}
softc->irq_tbl = irq_tbl;
return 0;
}
static const struct bnxt_en_ops bnxt_en_ops_tbl = {
.bnxt_register_device = bnxt_register_dev,
.bnxt_unregister_device = bnxt_unregister_dev,
.bnxt_request_msix = bnxt_req_msix_vecs,
.bnxt_free_msix = bnxt_free_msix_vecs,
.bnxt_send_fw_msg = bnxt_send_msg,
.bnxt_register_fw_async_events = bnxt_register_async_events,
};
void bnxt_aux_dev_release(struct device *dev)
{
struct bnxt_aux_dev *bnxt_adev =
container_of(dev, struct bnxt_aux_dev, aux_dev.dev);
struct bnxt_softc *bp = bnxt_adev->edev->softc;
kfree(bnxt_adev->edev);
bnxt_adev->edev = NULL;
bp->edev = NULL;
}
static inline void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt_softc *bp)
{
edev->en_ops = &bnxt_en_ops_tbl;
edev->net = bp->ifp;
edev->pdev = bp->pdev;
edev->softc = bp;
edev->l2_db_size = bp->db_size;
mtx_init(&bp->en_ops_lock, "Ethernet ops lock", NULL, MTX_DEF);
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
if (bp->is_asym_q)
edev->flags |= BNXT_EN_FLAG_ASYM_Q;
edev->hwrm_bar = bp->hwrm_bar;
edev->port_partition_type = bp->port_partition_type;
edev->ulp_version = BNXT_ULP_VERSION;
}
int bnxt_rdma_aux_device_del(struct bnxt_softc *softc)
{
struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
struct auxiliary_device *adev;
adev = &bnxt_adev->aux_dev;
auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
bnxt_destroy_irq(softc);
return 0;
}
int bnxt_rdma_aux_device_add(struct bnxt_softc *bp)
{
struct bnxt_aux_dev *bnxt_adev = bp->aux_dev;
struct bnxt_en_dev *edev = bnxt_adev->edev;
struct auxiliary_device *aux_dev;
int ret = -1;
if (bnxt_populate_irq(bp))
return ret;
device_printf(bp->dev, "V:D:SV:SD %x:%x:%x:%x, irq 0x%x, "
"devfn 0x%x, cla 0x%x, rev 0x%x, msi_en 0x%x\n",
bp->pdev->vendor, bp->pdev->device, bp->pdev->subsystem_vendor,
bp->pdev->subsystem_device, bp->pdev->irq, bp->pdev->devfn,
bp->pdev->class, bp->pdev->revision, bp->pdev->msi_enabled);
aux_dev = &bnxt_adev->aux_dev;
aux_dev->id = bnxt_adev->id;
aux_dev->name = "rdma";
aux_dev->dev.parent = &bp->pdev->dev;
aux_dev->dev.release = bnxt_aux_dev_release;
if (!edev) {
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
return -ENOMEM;
}
bnxt_set_edev_info(edev, bp);
bnxt_adev->edev = edev;
bp->edev = edev;
ret = auxiliary_device_init(aux_dev);
if (ret)
goto err_free_edev;
ret = auxiliary_device_add(aux_dev);
if (ret)
goto err_dev_uninit;
return 0;
err_dev_uninit:
auxiliary_device_uninit(aux_dev);
err_free_edev:
kfree(edev);
bnxt_adev->edev = NULL;
bp->edev = NULL;
return ret;
}

View file

@ -0,0 +1,161 @@
/*-
* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2024 Broadcom, All Rights Reserved.
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
#include <linux/rcupdate.h>
#include "bnxt.h"
#define BNXT_ROCE_ULP 0
#define BNXT_OTHER_ULP 1
#define BNXT_MAX_ULP 2
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
struct hwrm_async_event_cmpl;
struct bnxt_softc;
struct bnxt_bar_info;
struct bnxt_msix_entry {
uint32_t vector;
uint32_t ring_idx;
uint32_t db_offset;
};
struct bnxt_ulp_ops {
void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
void (*ulp_stop)(void *);
void (*ulp_start)(void *);
void (*ulp_sriov_config)(void *, int);
void (*ulp_shutdown)(void *);
void (*ulp_irq_stop)(void *);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
struct bnxt_fw_msg {
void *msg;
int msg_len;
void *resp;
int resp_max_len;
int timeout;
};
struct bnxt_ulp {
void *handle;
struct bnxt_ulp_ops __rcu *ulp_ops;
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
u16 msix_base;
atomic_t ref_count;
};
struct bnxt_en_dev {
struct ifnet *net;
struct pci_dev *pdev;
struct bnxt_softc *softc;
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
#define BNXT_EN_FLAG_ASYM_Q 0x10
#define BNXT_EN_FLAG_MULTI_HOST 0x20
#define BNXT_EN_ASYM_Q(edev) ((edev)->flags & BNXT_EN_FLAG_ASYM_Q)
#define BNXT_EN_MH(edev) ((edev)->flags & BNXT_EN_FLAG_MULTI_HOST)
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
int l2_db_size; /* Doorbell BAR size in
* bytes mapped by L2
* driver.
*/
int l2_db_size_nc; /* Doorbell BAR size in
* bytes mapped as non-
* cacheable.
*/
u32 ulp_version; /* bnxt_re checks the
* ulp_version is correct
* to ensure compatibility
* with bnxt_en.
*/
#define BNXT_ULP_VERSION 0x695a0008 /* Change this when any interface
* structure or API changes
* between bnxt_en and bnxt_re.
*/
unsigned long en_state;
void __iomem *bar0;
u16 hw_ring_stats_size;
u16 pf_port_id;
u8 port_partition_type;
#define BNXT_EN_NPAR(edev) ((edev)->port_partition_type)
u8 port_count;
struct bnxt_dbr *en_dbr;
struct bnxt_bar_info hwrm_bar;
u32 espeed;
};
struct bnxt_en_ops {
int (*bnxt_register_device)(struct bnxt_en_dev *, int,
struct bnxt_ulp_ops *, void *);
int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
struct bnxt_msix_entry *, int);
int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
struct bnxt_fw_msg *);
int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
unsigned long *, u16);
int (*bnxt_dbr_complete)(struct bnxt_en_dev *, int, u32);
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
{
if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
return true;
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt_softc *bp);
int bnxt_get_ulp_msix_base(struct bnxt_softc *bp);
int bnxt_get_ulp_stat_ctxs(struct bnxt_softc *bp);
void bnxt_ulp_stop(struct bnxt_softc *bp);
void bnxt_ulp_start(struct bnxt_softc *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt_softc *bp, int num_vfs);
void bnxt_ulp_shutdown(struct bnxt_softc *bp);
void bnxt_ulp_irq_stop(struct bnxt_softc *bp);
void bnxt_ulp_irq_restart(struct bnxt_softc *bp, int err);
void bnxt_ulp_async_events(struct bnxt_softc *bp, struct hwrm_async_event_cmpl *cmpl);
struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
void bnxt_aux_dev_release(struct device *dev);
int bnxt_rdma_aux_device_add(struct bnxt_softc *bp);
int bnxt_rdma_aux_device_del(struct bnxt_softc *bp);
#endif

View file

@ -12069,7 +12069,12 @@ typedef struct hwrm_async_event_cmpl_error_report_base {
* thresholds.
*/
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD UINT32_C(0x5)
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
/*
* Speed change not supported with dual rate transceivers
* on this board.
*/
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED UINT32_C(0x6)
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
} hwrm_async_event_cmpl_error_report_base_t, *phwrm_async_event_cmpl_error_report_base_t;
#define GET_ERROR_REPORT_TYPE(x) \

View file

@ -40,7 +40,6 @@
#include <machine/resource.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/if.h>
#include <net/if_dl.h>
@ -49,6 +48,14 @@
#include <net/ethernet.h>
#include <net/iflib.h>
#include <linux/pci.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rcupdate.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
@ -61,6 +68,8 @@
#include "bnxt_sysctl.h"
#include "hsi_struct_def.h"
#include "bnxt_mgmt.h"
#include "bnxt_ulp.h"
#include "bnxt_auxbus_compat.h"
/*
* PCI Device ID Table
@ -225,6 +234,8 @@ static int bnxt_wol_config(if_ctx_t ctx);
static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
static void bnxt_get_port_module_status(struct bnxt_softc *softc);
static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
/*
* Device Interface Declaration
@ -248,12 +259,17 @@ static driver_t bnxt_driver = {
DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
MODULE_DEPEND(bnxt, pci, 1, 1, 1);
MODULE_DEPEND(bnxt, ether, 1, 1, 1);
MODULE_DEPEND(bnxt, iflib, 1, 1, 1);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
MODULE_VERSION(if_bnxt, 1);
IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
static DEFINE_IDA(bnxt_aux_dev_ids);
static device_method_t bnxt_iflib_methods[] = {
DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
@ -331,10 +347,11 @@ static struct if_shared_ctx bnxt_sctx_init = {
.isc_ntxd_min = {16, 16, 16},
.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
PAGE_SIZE / sizeof(struct tx_bd_short),
PAGE_SIZE / sizeof(struct cmpl_base) * 2},
/* NQ depth 4096 */
PAGE_SIZE / sizeof(struct cmpl_base) * 16},
.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
.isc_admin_intrcnt = 1,
.isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT,
.isc_vendor_info = bnxt_vendor_info_array,
.isc_driver_version = bnxt_driver_version,
};
@ -770,26 +787,43 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
return rc;
}
static void bnxt_free_ring(struct bnxt_softc *bp, struct bnxt_ring_mem_info *rmem)
static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
{
int i;
int i;
for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i].idi_vaddr)
continue;
for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i].idi_vaddr)
continue;
iflib_dma_free(&rmem->pg_arr[i]);
rmem->pg_arr[i].idi_vaddr = NULL;
}
if (rmem->pg_tbl.idi_vaddr) {
rmem->pg_arr[i].idi_vaddr = NULL;
}
if (rmem->pg_tbl.idi_vaddr) {
iflib_dma_free(&rmem->pg_tbl);
rmem->pg_tbl.idi_vaddr = NULL;
rmem->pg_tbl.idi_vaddr = NULL;
}
if (rmem->vmem_size && *rmem->vmem) {
free(*rmem->vmem, M_DEVBUF);
*rmem->vmem = NULL;
}
}
if (rmem->vmem_size && *rmem->vmem) {
free(*rmem->vmem, M_DEVBUF);
*rmem->vmem = NULL;
}
}
static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
{
u8 init_val = ctxm->init_value;
u16 offset = ctxm->init_offset;
u8 *p2 = p;
int i;
if (!init_val)
return;
if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
memset(p, init_val, len);
return;
}
for (i = 0; i < len; i += ctxm->entry_size)
*(p2 + i + offset) = init_val;
}
static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
@ -820,8 +854,9 @@ static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *
if (rc)
return -ENOMEM;
if (rmem->init_val)
memset(rmem->pg_arr[i].idi_vaddr, rmem->init_val, rmem->page_size);
if (rmem->ctx_mem)
bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
@ -844,11 +879,12 @@ static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *
return 0;
}
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
@ -866,14 +902,14 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
}
static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
struct bnxt_ctx_pg_info *ctx_pg, uint32_t mem_size,
uint8_t depth, bool use_init_val)
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
u8 depth, struct bnxt_ctx_mem_type *ctxm)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
if (!mem_size)
return 0;
return -EINVAL;
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@ -884,8 +920,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
int nr_tbls, i;
rmem->depth = 2;
ctx_pg->ctx_pg_tbl = malloc(MAX_CTX_PAGES * sizeof(ctx_pg),
M_DEVBUF, M_NOWAIT | M_ZERO);
ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
GFP_KERNEL);
if (!ctx_pg->ctx_pg_tbl)
return -ENOMEM;
nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
@ -896,7 +932,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
for (i = 0; i < nr_tbls; i++) {
struct bnxt_ctx_pg_info *pg_tbl;
pg_tbl = malloc(sizeof(*pg_tbl), M_DEVBUF, M_NOWAIT | M_ZERO);
pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
if (!pg_tbl)
return -ENOMEM;
ctx_pg->ctx_pg_tbl[i] = pg_tbl;
@ -904,8 +940,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
if (use_init_val)
rmem->init_val = softc->ctx_mem->ctx_kind_initializer;
rmem->ctx_mem = ctxm;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@ -920,8 +955,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
if (use_init_val)
rmem->init_val = softc->ctx_mem->ctx_kind_initializer;
rmem->ctx_mem = ctxm;
rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
}
return rc;
@ -949,48 +983,78 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
free(pg_tbl , M_DEVBUF);
ctx_pg->ctx_pg_tbl[i] = NULL;
}
free(ctx_pg->ctx_pg_tbl , M_DEVBUF);
kfree(ctx_pg->ctx_pg_tbl);
ctx_pg->ctx_pg_tbl = NULL;
}
bnxt_free_ring(softc, rmem);
ctx_pg->nr_pages = 0;
}
static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
struct bnxt_ctx_mem_type *ctxm, u32 entries,
u8 pg_lvl)
{
struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
int i, rc = 0, n = 1;
u32 mem_size;
if (!ctxm->entry_size || !ctx_pg)
return -EINVAL;
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
if (ctxm->entry_multiple)
entries = roundup(entries, ctxm->entry_multiple);
entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
mem_size = entries * ctxm->entry_size;
for (i = 0; i < n && !rc; i++) {
ctx_pg[i].entries = entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
ctxm->init_value ? ctxm : NULL);
}
return rc;
}
static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
{
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
int i;
u16 type;
if (!ctx)
return;
if (ctx->tqm_mem[0]) {
for (i = 0; i < softc->rx_max_q + 1; i++) {
if (!ctx->tqm_mem[i])
continue;
bnxt_free_ctx_pg_tbls(softc, ctx->tqm_mem[i]);
}
free(ctx->tqm_mem[0] , M_DEVBUF);
ctx->tqm_mem[0] = NULL;
for (type = 0; type < BNXT_CTX_MAX; type++) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
int i, n = 1;
if (!ctx_pg)
continue;
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
for (i = 0; i < n; i++)
bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
kfree(ctx_pg);
ctxm->pg_info = NULL;
}
bnxt_free_ctx_pg_tbls(softc, &ctx->tim_mem);
bnxt_free_ctx_pg_tbls(softc, &ctx->mrav_mem);
bnxt_free_ctx_pg_tbls(softc, &ctx->stat_mem);
bnxt_free_ctx_pg_tbls(softc, &ctx->vnic_mem);
bnxt_free_ctx_pg_tbls(softc, &ctx->cq_mem);
bnxt_free_ctx_pg_tbls(softc, &ctx->srq_mem);
bnxt_free_ctx_pg_tbls(softc, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
free(softc->ctx_mem, M_DEVBUF);
softc->ctx_mem = NULL;
kfree(ctx);
softc->ctx = NULL;
}
static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
{
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
uint32_t mem_size, ena, entries;
u32 l2_qps, qp1_qps, max_qps;
u32 ena, entries_sp, entries;
u32 srqs, max_srqs, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
u8 pg_lvl = 1;
int i, rc;
if (!BNXT_CHIP_P5(softc))
@ -1006,97 +1070,106 @@ static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
(1024 * 64); /* FIXME: Enable 64K QPs */
mem_size = ctx->qp_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
l2_qps = ctxm->qp_l2_entries;
qp1_qps = ctxm->qp_qp1_entries;
max_qps = ctxm->max_entries;
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
srqs = ctxm->srq_l2_entries;
max_srqs = ctxm->max_entries;
if (softc->flags & BNXT_FLAG_ROCE_CAP) {
pg_lvl = 2;
extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
extra_srqs = min_t(u32, 8192, max_srqs - srqs);
}
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->srq_mem;
/* FIXME: Temporarily enable 8K RoCE SRQs */
ctx_pg->entries = ctx->srq_max_l2_entries + (1024 * 8);
mem_size = ctx->srq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->cq_mem;
/* FIXME: Temporarily enable 64K RoCE CQ */
ctx_pg->entries = ctx->cq_max_l2_entries + (1024 * 64 * 2);
mem_size = ctx->cq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
extra_qps * 2, pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->vnic_mem;
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true);
ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
if (rc)
return rc;
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true);
ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
if (rc)
return rc;
ctx_pg = &ctx->mrav_mem;
/* FIXME: Temporarily enable 256K RoCE MRs */
ctx_pg->entries = 1024 * 256;
mem_size = ctx->mrav_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
ena = 0;
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
ctx_pg = ctxm->pg_info;
/* 128K extra is needed to accomodate static AH context
* allocation by f/w.
*/
num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
num_ah = min_t(u32, num_mr, 1024 * 128);
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
if (rc)
return rc;
ctx_pg->entries = num_mr + num_ah;
ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
if (ctxm->mrav_num_entries_units)
ctx_pg->entries =
((num_mr / ctxm->mrav_num_entries_units) << 16) |
(num_ah / ctxm->mrav_num_entries_units);
ctx_pg = &ctx->tim_mem;
/* Firmware needs number of TIM entries equal to
* number of Total QP contexts enabled, including
* L2 QPs.
*/
ctx_pg->entries = ctx->qp_min_qp1_entries +
ctx->qp_max_l2_entries + 1024 * 64;
/* FIXME: L2 driver is not able to create queue depth
* worth of 1M 32bit timers. Need a fix when l2-roce
* interface is well designed.
*/
mem_size = ctx->tim_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false);
ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
if (rc)
return rc;
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
/* FIXME: Temporarily increase the TQM queue depth
* by 1K for 1K RoCE QPs.
*/
entries = ctx->qp_max_l2_entries + 1024 * 64;
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
ctx->tqm_max_entries_per_ring);
for (i = 0; i < softc->rx_max_q + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * entries;
rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false);
skip_rdma:
ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
min = ctxm->min_entries;
entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
2 * (extra_qps + qp1_qps) + min;
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
if (rc)
return rc;
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
entries = l2_qps + 2 * (extra_qps + qp1_qps);
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
if (rc)
return rc;
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
if (i < BNXT_MAX_TQM_LEGACY_RINGS)
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
else
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
}
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
if (rc)
if (rc) {
device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
rc);
else
ctx->flags |= BNXT_CTX_FLAG_INITED;
return rc;
}
ctx->flags |= BNXT_CTX_FLAG_INITED;
return 0;
}
/*
* If we update the index, a write barrier is needed after the write to ensure
* the completion ring has space before the RX/TX ring does. Since we can't
@ -1317,7 +1390,6 @@ bnxt_attach_pre(if_ctx_t ctx)
break;
}
#define PCI_DEVFN(device, func) ((((device) & 0x1f) << 3) | ((func) & 0x07))
softc->domain = pci_get_domain(softc->dev);
softc->bus = pci_get_bus(softc->dev);
softc->slot = pci_get_slot(softc->dev);
@ -1332,8 +1404,24 @@ bnxt_attach_pre(if_ctx_t ctx)
pci_enable_busmaster(softc->dev);
if (bnxt_pci_mapping(softc))
return (ENXIO);
if (bnxt_pci_mapping(softc)) {
device_printf(softc->dev, "PCI mapping failed\n");
rc = ENXIO;
goto pci_map_fail;
}
softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
if (!softc->pdev) {
device_printf(softc->dev, "pdev alloc failed\n");
rc = -ENOMEM;
goto free_pci_map;
}
rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
if (rc) {
device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
goto pci_attach_fail;
}
/* HWRM setup/init */
BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
@ -1448,6 +1536,11 @@ bnxt_attach_pre(if_ctx_t ctx)
memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
}
/* Get the HW capabilities */
rc = bnxt_hwrm_func_qcaps(softc);
if (rc)
goto failed;
if (softc->hwrm_spec_code >= 0x10803) {
rc = bnxt_alloc_ctx_mem(softc);
if (rc) {
@ -1459,11 +1552,6 @@ bnxt_attach_pre(if_ctx_t ctx)
softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
}
/* Get the HW capabilities */
rc = bnxt_hwrm_func_qcaps(softc);
if (rc)
goto failed;
/* Get the current configuration of this function */
rc = bnxt_hwrm_func_qcfg(softc);
if (rc) {
@ -1637,7 +1725,14 @@ bnxt_attach_pre(if_ctx_t ctx)
bnxt_free_hwrm_dma_mem(softc);
dma_fail:
BNXT_HWRM_LOCK_DESTROY(softc);
if (softc->pdev)
linux_pci_detach_device(softc->pdev);
pci_attach_fail:
kfree(softc->pdev);
softc->pdev = NULL;
free_pci_map:
bnxt_pci_mapping_free(softc);
pci_map_fail:
pci_disable_busmaster(softc->dev);
return (rc);
}
@ -1649,6 +1744,7 @@ bnxt_attach_post(if_ctx_t ctx)
if_t ifp = iflib_get_ifp(ctx);
int rc;
softc->ifp = ifp;
bnxt_create_config_sysctls_post(softc);
/* Update link state etc... */
@ -1658,6 +1754,7 @@ bnxt_attach_post(if_ctx_t ctx)
/* Needs to be done after probing the phy */
bnxt_create_ver_sysctls(softc);
ifmedia_removeall(softc->media);
bnxt_add_media_types(softc);
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
@ -1665,6 +1762,8 @@ bnxt_attach_post(if_ctx_t ctx)
ETHER_CRC_LEN;
softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
bnxt_dcb_init(softc);
bnxt_rdma_aux_device_init(softc);
failed:
return rc;
@ -1678,6 +1777,8 @@ bnxt_detach(if_ctx_t ctx)
struct bnxt_vlan_tag *tmp;
int i;
bnxt_rdma_aux_device_uninit(softc);
bnxt_dcb_free(softc);
SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
bnxt_num_pfs--;
bnxt_wol_config(ctx);
@ -1715,6 +1816,8 @@ bnxt_detach(if_ctx_t ctx)
bnxt_free_hwrm_short_cmd_req(softc);
BNXT_HWRM_LOCK_DESTROY(softc);
if (softc->pdev)
linux_pci_detach_device(softc->pdev);
free(softc->state_bv, M_DEVBUF);
pci_disable_busmaster(softc->dev);
bnxt_pci_mapping_free(softc);
@ -1868,6 +1971,78 @@ static void bnxt_get_port_module_status(struct bnxt_softc *softc)
}
}
static void bnxt_aux_dev_free(struct bnxt_softc *softc)
{
kfree(softc->aux_dev);
softc->aux_dev = NULL;
}
static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
{
struct bnxt_aux_dev *bnxt_adev;
msleep(1000 * 2);
bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
if (!bnxt_adev)
return ERR_PTR(-ENOMEM);
return bnxt_adev;
}
static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
{
struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
/* Skip if no auxiliary device init was done. */
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
return;
if (IS_ERR_OR_NULL(bnxt_adev))
return;
bnxt_rdma_aux_device_del(softc);
if (bnxt_adev->id >= 0)
ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
bnxt_aux_dev_free(softc);
}
static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
{
int rc;
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
return;
softc->aux_dev = bnxt_aux_dev_init(softc);
if (IS_ERR_OR_NULL(softc->aux_dev)) {
device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
goto skip_aux_init;
}
softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
if (softc->aux_dev->id < 0) {
device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
bnxt_aux_dev_free(softc);
goto skip_aux_init;
}
msleep(1000 * 2);
/* If aux bus init fails, continue with netdev init. */
rc = bnxt_rdma_aux_device_add(softc);
if (rc) {
device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
msleep(1000 * 2);
ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
bnxt_aux_dev_free(softc);
}
device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
__func__, __LINE__, softc->aux_dev->id);
skip_aux_init:
return;
}
/* Device configuration */
static void
bnxt_init(if_ctx_t ctx)
@ -2032,7 +2207,6 @@ bnxt_init(if_ctx_t ctx)
bnxt_get_port_module_status(softc);
bnxt_media_status(softc->ctx, &ifmr);
bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
bnxt_dcb_init(softc);
return;
fail:
@ -2047,7 +2221,6 @@ bnxt_stop(if_ctx_t ctx)
struct bnxt_softc *softc = iflib_get_softc(ctx);
softc->is_dev_init = false;
bnxt_dcb_free(softc);
bnxt_do_disable_intr(&softc->def_cp_ring);
bnxt_func_reset(softc);
bnxt_clear_ids(softc);
@ -3320,6 +3493,41 @@ bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
return rc;
}
#define ETHTOOL_SPEED_1000 1000
#define ETHTOOL_SPEED_10000 10000
#define ETHTOOL_SPEED_20000 20000
#define ETHTOOL_SPEED_25000 25000
#define ETHTOOL_SPEED_40000 40000
#define ETHTOOL_SPEED_50000 50000
#define ETHTOOL_SPEED_100000 100000
#define ETHTOOL_SPEED_200000 200000
#define ETHTOOL_SPEED_UNKNOWN -1
static u32
bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
{
switch (fw_link_speed) {
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
return ETHTOOL_SPEED_1000;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
return ETHTOOL_SPEED_10000;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
return ETHTOOL_SPEED_20000;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
return ETHTOOL_SPEED_25000;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
return ETHTOOL_SPEED_40000;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
return ETHTOOL_SPEED_50000;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
return ETHTOOL_SPEED_100000;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
return ETHTOOL_SPEED_200000;
default:
return ETHTOOL_SPEED_UNKNOWN;
}
}
void
bnxt_report_link(struct bnxt_softc *softc)
{
@ -3327,6 +3535,10 @@ bnxt_report_link(struct bnxt_softc *softc)
const char *duplex = NULL, *flow_ctrl = NULL;
const char *signal_mode = "";
if(softc->edev)
softc->edev->espeed =
bnxt_fw_to_ethtool_speed(link_info->link_speed);
if (link_info->link_up == link_info->last_link_up) {
if (!link_info->link_up)
return;
@ -3459,12 +3671,96 @@ bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
cmp[i].info3_v = !cpr->v_bit;
}
static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
{
u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
switch (err_type) {
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
device_printf(softc->dev,
"1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
break;
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
device_printf(softc->dev,
"Pause Storm detected!\n");
break;
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
device_printf(softc->dev,
"One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
BNXT_EVENT_DBR_EPOCH(data1));
break;
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
const char *nvm_err_str;
if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
nvm_err_str = "nvm write error";
else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
nvm_err_str = "nvm erase error";
else
nvm_err_str = "unrecognized nvm error";
device_printf(softc->dev,
"%s reported at address 0x%x\n", nvm_err_str,
(u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
break;
}
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
char *threshold_type;
char *dir_str;
switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
threshold_type = "warning";
break;
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
threshold_type = "critical";
break;
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
threshold_type = "fatal";
break;
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
threshold_type = "shutdown";
break;
default:
device_printf(softc->dev,
"Unknown Thermal threshold type event\n");
return;
}
if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
dir_str = "above";
else
dir_str = "below";
device_printf(softc->dev,
"Chip temperature has gone %s the %s thermal threshold!\n",
dir_str, threshold_type);
device_printf(softc->dev,
"Temperature (In Celsius), Current: %u, threshold: %u\n",
BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
break;
}
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
device_printf(softc->dev,
"Speed change is not supported with dual rate transceivers on this board\n");
break;
default:
device_printf(softc->dev,
"FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
err_type, data1, data2);
break;
}
}
static void
bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
{
struct hwrm_async_event_cmpl *ae = (void *)cmpl;
uint16_t async_id = le16toh(ae->event_id);
struct ifmediareq ifmr;
u32 data1 = le32toh(ae->event_data1);
u32 data2 = le32toh(ae->event_data2);
switch (async_id) {
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
@ -3475,6 +3771,13 @@ bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
else
bnxt_media_status(softc->ctx, &ifmr);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
bnxt_event_error_report(softc, data1, data2);
goto async_event_process_exit;
}
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
@ -3496,6 +3799,8 @@ bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
"Unknown async completion type %u\n", async_id);
break;
}
async_event_process_exit:
bnxt_ulp_async_events(softc, ae);
}
static void

View file

@ -3,14 +3,18 @@
KMOD = if_bnxt
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ratelimit.h
SRCS += bnxt_txrx.c if_bnxt.c
SRCS += bnxt_hwrm.c
SRCS += bnxt_sysctl.c
SRCS += bnxt_mgmt.c
SRCS += bnxt_dcb.c bnxt_dcb.h
SRCS += bnxt_auxbus_compat.c bnxt_auxbus_compat.h
SRCS += bnxt_ulp.c bnxt_ulp.h
SRCS += ${LINUXKPI_GENSRCS}
CFLAGS+= -DIFLIB
CFLAGS+= -I${SRCTOP}/sys/ofed/include
CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
.include <bsd.kmod.mk>