Merge branch 'bnxt_en-updates-for-net-next'

Michael Chan says:

====================
bnxt_en: Updates for net-next

The first patch prevents a driver crash when RSS contexts are
configred in ifdown state.  Patches 2 to 6 are improvements for
managing MSIX for the aux device (for RoCE).  The existing
scheme statically carves out the MSIX vectors for RoCE even if
the RoCE driver is not loaded.  The new scheme adds flexibility
and allows the L2 driver to use the RoCE MSIX vectors if needed
when they are unused by the RoCE driver.  The last patch updates
the MODULE_DESCRIPTION().
====================

Link: https://lore.kernel.org/r/20240409215431.41424-1-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-04-10 19:55:11 -07:00
commit 872c00cc2b
5 changed files with 186 additions and 84 deletions

View file

@ -76,7 +76,7 @@
NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
@ -3905,13 +3905,12 @@ static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
int i, j, rc, ulp_base_vec, ulp_msix;
int i, j, rc, ulp_msix;
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
ulp_base_vec = bnxt_get_ulp_msix_base(bp);
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr, *cpr2;
@ -3930,10 +3929,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
if (ulp_msix && i >= ulp_base_vec)
ring->map_idx = i + ulp_msix;
else
ring->map_idx = i;
ring->map_idx = ulp_msix + i;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
@ -7347,17 +7343,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
int bnxt_nq_rings_in_use(struct bnxt *bp)
{
int cp = bp->cp_nr_rings;
int ulp_msix, ulp_base;
ulp_msix = bnxt_get_ulp_msix_num(bp);
if (ulp_msix) {
ulp_base = bnxt_get_ulp_msix_base(bp);
cp += ulp_msix;
if ((ulp_base + ulp_msix) > cp)
cp = ulp_base + ulp_msix;
}
return cp;
return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
}
static int bnxt_cp_rings_in_use(struct bnxt *bp)
@ -7373,16 +7359,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
int cp = bp->cp_nr_rings;
if (!ulp_stat)
return cp;
if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
return bnxt_get_ulp_msix_base(bp) + ulp_stat;
return cp + ulp_stat;
return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
}
static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
@ -7493,14 +7470,27 @@ static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_rings hwr = {0};
int cp = bp->cp_nr_rings;
int rx_rings, rc;
int ulp_msix = 0;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
hwr.cp = bnxt_nq_rings_in_use(bp);
if (!bnxt_ulp_registered(bp->edev)) {
ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
if (!ulp_msix)
bnxt_set_ulp_stat_ctxs(bp, 0);
if (ulp_msix > bp->ulp_num_msix_want)
ulp_msix = bp->ulp_num_msix_want;
hwr.cp = cp + ulp_msix;
} else {
hwr.cp = bnxt_nq_rings_in_use(bp);
}
hwr.tx = bp->tx_nr_rings;
hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@ -7572,6 +7562,19 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (!netif_is_rxfh_configured(bp->dev))
bnxt_set_dflt_rss_indir_tbl(bp, NULL);
if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
int resv_msix, resv_ctx, ulp_ctxs;
struct bnxt_hw_resc *hw_resc;
hw_resc = &bp->hw_resc;
resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
ulp_msix = min_t(int, resv_msix, ulp_msix);
bnxt_set_ulp_msix_num(bp, ulp_msix);
resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
}
return rc;
}
@ -10618,13 +10621,23 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
bool irq_cleared = false;
int tcs = bp->num_tc;
int irqs_required;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
if (irq_re_init && BNXT_NEW_RM(bp) &&
bnxt_get_num_msix(bp) != bp->total_irqs) {
if (!bnxt_ulp_registered(bp->edev)) {
int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
if (ulp_msix > bp->ulp_num_msix_want)
ulp_msix = bp->ulp_num_msix_want;
irqs_required = ulp_msix + bp->cp_nr_rings;
} else {
irqs_required = bnxt_get_num_msix(bp);
}
if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
irq_cleared = true;
@ -13634,8 +13647,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
return -ENOMEM;
hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
hwr.cp += bnxt_get_ulp_msix_num(bp);
hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
hwr.grp = rx;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
}
@ -14809,10 +14822,13 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
bnxt_rdma_aux_device_uninit(bp);
bnxt_rdma_aux_device_del(bp);
bnxt_ptp_clear(bp);
unregister_netdev(dev);
bnxt_rdma_aux_device_uninit(bp);
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
@ -14905,8 +14921,9 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_rx = hw_resc->max_rx_rings;
*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
bnxt_get_ulp_msix_num(bp),
hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
bnxt_get_ulp_msix_num_in_use(bp),
hw_resc->max_stat_ctxs -
bnxt_get_ulp_stat_ctxs_in_use(bp));
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
@ -15002,6 +15019,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
int avail_msix;
if (!bnxt_can_reserve_rings(bp))
return 0;
@ -15029,6 +15047,14 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
bnxt_set_ulp_msix_num(bp, ulp_num_msix);
bnxt_set_dflt_ulp_stat_ctxs(bp);
}
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
@ -15378,6 +15404,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
if (BNXT_VF(bp) && rc == -ENODEV) {
@ -15431,13 +15458,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
bnxt_init_multi_rss_ctx(bp);
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
bnxt_dl_fw_reporters_create(bp);
bnxt_rdma_aux_device_init(bp);
bnxt_rdma_aux_device_add(bp);
bnxt_print_device_info(bp);
@ -15445,6 +15473,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
init_err_cleanup:
bnxt_rdma_aux_device_uninit(bp);
bnxt_dl_unregister(bp);
init_err_dl:
bnxt_shutdown_tc(bp);

View file

@ -2303,6 +2303,7 @@ struct bnxt {
struct bnxt_irq *irq_tbl;
int total_irqs;
int ulp_num_msix_want;
u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_BNXT_DCB

View file

@ -1876,6 +1876,11 @@ static int bnxt_set_rxfh_context(struct bnxt *bp,
return -EOPNOTSUPP;
}
if (!netif_running(bp->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
return -EAGAIN;
}
if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) {
rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context);
if (!rss_ctx) {

View file

@ -31,21 +31,74 @@ static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
int num_msix, i;
if (!edev->ulp_tbl->msix_requested) {
netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
return;
}
num_msix = edev->ulp_tbl->msix_requested;
idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
ent[i].vector = bp->irq_tbl[i].vector;
ent[i].ring_idx = i;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ent[i].db_offset = bp->db_offset;
else
ent[i].db_offset = (idx + i) * 0x80;
ent[i].db_offset = i * 0x80;
}
}
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
if (bp->edev)
return bp->edev->ulp_num_msix_vec;
return 0;
}
void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
{
if (bp->edev)
bp->edev->ulp_num_msix_vec = num;
}
int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev))
return bp->edev->ulp_num_msix_vec;
return 0;
}
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
if (bp->edev)
return bp->edev->ulp_num_ctxs;
return 0;
}
void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
{
if (bp->edev)
bp->edev->ulp_num_ctxs = num_ulp_ctx;
}
int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev))
return bp->edev->ulp_num_ctxs;
return 0;
}
void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
{
if (bp->edev) {
bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
/* Reserve one additional stat_ctx for PF0 (except
* on 1-port NICs) as it also creates one stat_ctx
* for PF1 in case of RoCE bonding.
*/
if (BNXT_PF(bp) && !bp->pf.port_id &&
bp->port_count > 1)
bp->edev->ulp_num_ctxs++;
}
}
@ -57,25 +110,34 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt *bp = netdev_priv(dev);
unsigned int max_stat_ctxs;
struct bnxt_ulp *ulp;
int rc = 0;
rtnl_lock();
if (!bp->irq_tbl) {
rc = -ENODEV;
goto exit;
}
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->cp_nr_rings == max_stat_ctxs)
return -ENOMEM;
bp->cp_nr_rings == max_stat_ctxs) {
rc = -ENOMEM;
goto exit;
}
ulp = edev->ulp_tbl;
if (!ulp)
return -ENOMEM;
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return 0;
exit:
rtnl_unlock();
return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@ -87,8 +149,10 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
int i = 0;
ulp = edev->ulp_tbl;
rtnl_lock();
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
@ -101,11 +165,12 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
msleep(100);
i++;
}
rtnl_unlock();
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
int bnxt_get_ulp_msix_num(struct bnxt *bp)
static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
u32 roce_msix = BNXT_VF(bp) ?
BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
@ -114,29 +179,6 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp)
min_t(u32, roce_msix, num_online_cpus()) : 0);
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
if (edev->ulp_tbl->msix_requested)
return edev->ulp_tbl->msix_base;
}
return 0;
}
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
if (edev->ulp_tbl->msix_requested)
return BNXT_MIN_ROCE_STAT_CTXS;
}
return 0;
}
int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
@ -306,7 +348,6 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
aux_priv = bp->aux_priv;
adev = &aux_priv->aux_dev;
auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
}
@ -324,6 +365,14 @@ static void bnxt_aux_dev_release(struct device *dev)
bp->aux_priv = NULL;
}
void bnxt_rdma_aux_device_del(struct bnxt *bp)
{
if (!bp->edev)
return;
auxiliary_device_delete(&bp->aux_priv->aux_dev);
}
static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
{
edev->net = bp->dev;
@ -344,7 +393,23 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
edev->bar0 = bp->bar0;
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
}
void bnxt_rdma_aux_device_add(struct bnxt *bp)
{
struct auxiliary_device *aux_dev;
int rc;
if (!bp->edev)
return;
aux_dev = &bp->aux_priv->aux_dev;
rc = auxiliary_device_add(aux_dev);
if (rc) {
netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
auxiliary_device_uninit(aux_dev);
bp->flags &= ~BNXT_FLAG_ROCE_CAP;
}
}
void bnxt_rdma_aux_device_init(struct bnxt *bp)
@ -400,13 +465,7 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
rc = auxiliary_device_add(aux_dev);
if (rc) {
netdev_warn(bp->dev,
"Failed to add auxiliary device for ROCE\n");
goto aux_dev_uninit;
}
bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
return;

View file

@ -46,7 +46,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
u16 msix_base;
atomic_t ref_count;
};
@ -86,18 +85,25 @@ struct bnxt_en_dev {
* updated in resume.
*/
void __iomem *bar0;
u16 ulp_num_msix_vec;
u16 ulp_num_ctxs;
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
if (edev && edev->ulp_tbl)
if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops))
return true;
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp);
void bnxt_set_ulp_msix_num(struct bnxt *bp, int num);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs);
int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp);
void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
@ -105,6 +111,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
void bnxt_rdma_aux_device_del(struct bnxt *bp);
void bnxt_rdma_aux_device_add(struct bnxt *bp);
void bnxt_rdma_aux_device_init(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);