ice: map XDP queues to vectors in ice_vsi_map_rings_to_vectors()

ice_pf_dcb_recfg() re-maps queues to vectors with
ice_vsi_map_rings_to_vectors(), which does not restore the previous
state for XDP queues. This leads to no AF_XDP traffic after rebuild.

Map XDP queues to vectors in ice_vsi_map_rings_to_vectors().
Also, move the code around, so XDP queues are mapped independently only
through .ndo_bpf().

Fixes: 6624e780a5 ("ice: split ice_vsi_setup into smaller functions")
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-5-e3563aa89b0c@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Larysa Zaremba 2024-06-03 14:42:34 -07:00 committed by Jakub Kicinski
parent 744d197162
commit f3df404425
4 changed files with 84 additions and 62 deletions

View file

@ -940,6 +940,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type); enum ice_xdp_cfg cfg_type);
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
void ice_map_xdp_rings(struct ice_vsi *vsi);
int int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags); u32 flags);

View file

@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
} }
rx_rings_rem -= rx_rings_per_v; rx_rings_rem -= rx_rings_per_v;
} }
if (ice_is_xdp_ena_vsi(vsi))
ice_map_xdp_rings(vsi);
} }
/** /**

View file

@ -2274,13 +2274,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
if (ret) if (ret)
goto unroll_vector_base; goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */
ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi); ret = ice_vsi_determine_xdp_res(vsi);
if (ret) if (ret)
@ -2291,6 +2284,13 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
goto unroll_vector_base; goto unroll_vector_base;
} }
ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */
ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false;
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL) if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at /* Do not exit if configuring RSS had an issue, at

View file

@ -2707,50 +2707,33 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog); bpf_prog_put(old_prog);
} }
/** static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
* @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_q_vector *q_vector;
int xdp_rings_rem = vsi->num_xdp_txq; struct ice_tx_ring *ring;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_txqs,
.pf_map_size = pf->max_pf_txqs,
.q_count = vsi->num_xdp_txq,
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = vsi->alloc_txq,
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
int i, v_idx;
int status;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
sizeof(*vsi->xdp_rings), GFP_KERNEL);
if (!vsi->xdp_rings)
return -ENOMEM;
vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
if (__ice_vsi_get_qs(&xdp_qs_cfg))
goto err_map_xdp;
if (static_key_enabled(&ice_xdp_locking_key)) if (static_key_enabled(&ice_xdp_locking_key))
netdev_warn(vsi->netdev, return vsi->xdp_rings[qid % vsi->num_xdp_txq];
"Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
if (ice_xdp_alloc_setup_rings(vsi)) q_vector = vsi->rx_rings[qid]->q_vector;
goto clear_xdp_rings; ice_for_each_tx_ring(ring, q_vector->tx)
if (ice_ring_is_xdp(ring))
return ring;
return NULL;
}
/**
* ice_map_xdp_rings - Map XDP rings to interrupt vectors
* @vsi: the VSI with XDP rings being configured
*
* Map XDP rings to interrupt vectors and perform the configuration steps
* dependent on the mapping.
*/
void ice_map_xdp_rings(struct ice_vsi *vsi)
{
int xdp_rings_rem = vsi->num_xdp_txq;
int v_idx, q_idx;
/* follow the logic from ice_vsi_map_rings_to_vectors */ /* follow the logic from ice_vsi_map_rings_to_vectors */
ice_for_each_q_vector(vsi, v_idx) { ice_for_each_q_vector(vsi, v_idx) {
@ -2771,22 +2754,55 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_rings_rem -= xdp_rings_per_v; xdp_rings_rem -= xdp_rings_per_v;
} }
ice_for_each_rxq(vsi, i) { ice_for_each_rxq(vsi, q_idx) {
if (static_key_enabled(&ice_xdp_locking_key)) { vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; q_idx);
} else { ice_tx_xsk_pool(vsi, q_idx);
struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
struct ice_tx_ring *ring;
ice_for_each_tx_ring(ring, q_vector->tx) {
if (ice_ring_is_xdp(ring)) {
vsi->rx_rings[i]->xdp_ring = ring;
break;
}
}
}
ice_tx_xsk_pool(vsi, i);
} }
}
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
* @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_txqs,
.pf_map_size = pf->max_pf_txqs,
.q_count = vsi->num_xdp_txq,
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = vsi->alloc_txq,
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
int status, i;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
sizeof(*vsi->xdp_rings), GFP_KERNEL);
if (!vsi->xdp_rings)
return -ENOMEM;
vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
if (__ice_vsi_get_qs(&xdp_qs_cfg))
goto err_map_xdp;
if (static_key_enabled(&ice_xdp_locking_key))
netdev_warn(vsi->netdev,
"Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
/* omit the scheduler update if in reset path; XDP queues will be /* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where * taken into account at the end of ice_vsi_rebuild, where
@ -2795,6 +2811,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (cfg_type == ICE_XDP_CFG_PART) if (cfg_type == ICE_XDP_CFG_PART)
return 0; return 0;
ice_map_xdp_rings(vsi);
/* tell the Tx scheduler that right now we have /* tell the Tx scheduler that right now we have
* additional queues * additional queues
*/ */