Merge branch 'bnxt_en-error-recovery'

Michael Chan says:

====================
bnxt_en: Error recovery fixes.

This series adds some fixes and enhancements to the error recovery
logic.  The health register logic is improved and we also add missing
code to free and re-create VF representors in the firmware after
error recovery.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-04-12 13:20:38 -07:00
commit 5711ffd313
3 changed files with 115 additions and 27 deletions

View file

@ -9532,8 +9532,8 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
do {
sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
rc = __bnxt_hwrm_ver_get(bp, true);
if (!sts || (!BNXT_FW_IS_BOOTING(sts) &&
!BNXT_FW_IS_RECOVERING(sts)))
if (!BNXT_FW_IS_BOOTING(sts) &&
!BNXT_FW_IS_RECOVERING(sts))
break;
retry++;
} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
@ -11081,6 +11081,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_disable_device(bp->pdev);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
@ -11825,6 +11826,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
@ -12972,6 +12975,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc);
}
bnxt_inv_fw_health_reg(bp);
bnxt_dl_register(bp);
rc = register_netdev(dev);

View file

@ -284,8 +284,26 @@ void bnxt_vf_reps_open(struct bnxt *bp)
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return;
for (i = 0; i < pci_num_vf(bp->pdev); i++)
for (i = 0; i < pci_num_vf(bp->pdev); i++) {
/* Open the VF-Rep only if it is allocated in the FW */
if (bp->vf_reps[i]->tx_cfa_action != CFA_HANDLE_INVALID)
bnxt_vf_rep_open(bp->vf_reps[i]->dev);
}
}
static void __bnxt_free_one_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep)
{
if (!vf_rep)
return;
if (vf_rep->dst) {
dst_release((struct dst_entry *)vf_rep->dst);
vf_rep->dst = NULL;
}
if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) {
hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
}
}
static void __bnxt_vf_reps_destroy(struct bnxt *bp)
@ -297,11 +315,7 @@ static void __bnxt_vf_reps_destroy(struct bnxt *bp)
for (i = 0; i < num_vfs; i++) {
vf_rep = bp->vf_reps[i];
if (vf_rep) {
dst_release((struct dst_entry *)vf_rep->dst);
if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID)
hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
__bnxt_free_one_vf_rep(bp, vf_rep);
if (vf_rep->dev) {
/* if register_netdev failed, then netdev_ops
* would have been set to NULL
@ -350,6 +364,80 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
__bnxt_vf_reps_destroy(bp);
}
/* Free the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
* under the rtnl_lock() this routine is safe under the rtnl_lock().
*/
void bnxt_vf_reps_free(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
int i;
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return;
for (i = 0; i < num_vfs; i++)
__bnxt_free_one_vf_rep(bp, bp->vf_reps[i]);
}
static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
u16 *cfa_code_map)
{
/* get cfa handles from FW */
if (hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, &vf_rep->tx_cfa_action,
&vf_rep->rx_cfa_code))
return -ENOLINK;
cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!vf_rep->dst)
return -ENOMEM;
/* only cfa_action is needed to mux a packet while TXing */
vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
vf_rep->dst->u.port_info.lower_dev = bp->dev;
return 0;
}
/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
* under the rtnl_lock() this routine is safe under the rtnl_lock().
*/
int bnxt_vf_reps_alloc(struct bnxt *bp)
{
u16 *cfa_code_map = bp->cfa_code_map, num_vfs = pci_num_vf(bp->pdev);
struct bnxt_vf_rep *vf_rep;
int rc, i;
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return 0;
if (!cfa_code_map)
return -EINVAL;
for (i = 0; i < MAX_CFA_CODE; i++)
cfa_code_map[i] = VF_IDX_INVALID;
for (i = 0; i < num_vfs; i++) {
vf_rep = bp->vf_reps[i];
vf_rep->vf_idx = i;
rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map);
if (rc)
goto err;
}
return 0;
err:
netdev_info(bp->dev, "%s error=%d\n", __func__, rc);
bnxt_vf_reps_free(bp);
return rc;
}
/* Use the OUI of the PF's perm addr and report the same mac addr
* for the same VF-rep each time
*/
@ -428,25 +516,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
vf_rep->vf_idx = i;
vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
/* get cfa handles from FW */
rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx,
&vf_rep->tx_cfa_action,
&vf_rep->rx_cfa_code);
if (rc) {
rc = -ENOLINK;
rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map);
if (rc)
goto err;
}
cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!vf_rep->dst) {
rc = -ENOMEM;
goto err;
}
/* only cfa_action is needed to mux a packet while TXing */
vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
vf_rep->dst->u.port_info.lower_dev = bp->dev;
bnxt_vf_rep_netdev_init(bp, vf_rep, dev);
rc = register_netdev(dev);

View file

@ -19,6 +19,8 @@ void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code);
int bnxt_vf_reps_alloc(struct bnxt *bp);
void bnxt_vf_reps_free(struct bnxt *bp);
static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
@ -61,5 +63,15 @@ static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
{
return false;
}
static inline int bnxt_vf_reps_alloc(struct bnxt *bp)
{
return 0;
}
static inline void bnxt_vf_reps_free(struct bnxt *bp)
{
}
#endif /* CONFIG_BNXT_SRIOV */
#endif /* BNXT_VFR_H */