RDMA v6.10 first rc

Small bug fixes:
 
 - Prevent a crash in bnxt if the en and rdma drivers disagree on the MSI
   vectors
 
 - Have rxe memcpy inline data from the correct address
 
 - Fix rxe's validation of UD packets
 
 - Several mlx5 mr cache issues: bad lock balancing on error, missing
   propagation of the ATS property to the HW, wrong bucketing of freed
   mrs in some cases
 
 - Incorrect goto error unwind in mlx5 driver probe
 
 - Missed userspace input validation in mlx5 SRQ create
 
 - Incorrect uABI in MANA rejecting valid optional MR creation flags
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZnV97wAKCRCFwuHvBreF
 YSzqAQDmUCn0Kb6HXzKViHMCRLoKs6n+dGDPR+wcatcbcoihqgD+JjxHSqL8tDeU
 ASHftDmvTY3CY1KHrnw8OQam7fq+3w8=
 =fAbT
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Small bug fixes:

   - Prevent a crash in bnxt if the en and rdma drivers disagree on the
     MSI vectors

   - Have rxe memcpy inline data from the correct address

   - Fix rxe's validation of UD packets

   - Several mlx5 mr cache issues: bad lock balancing on error, missing
     propagation of the ATS property to the HW, wrong bucketing of freed
     mrs in some cases

   - Incorrect goto error unwind in mlx5 driver probe

   - Missed userspace input validation in mlx5 SRQ create

   - Incorrect uABI in MANA rejecting valid optional MR creation flags"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/mana_ib: Ignore optional access flags for MRs
  RDMA/mlx5: Add check for srq max_sge attribute
  RDMA/mlx5: Fix unwind flow as part of mlx5_ib_stage_init_init
  RDMA/mlx5: Ensure created mkeys always have a populated rb_key
  RDMA/mlx5: Follow rb_key.ats when creating new mkeys
  RDMA/mlx5: Remove extra unlock on error path
  RDMA/rxe: Fix responder length checking for UD request packets
  RDMA/rxe: Fix data copy for IB_SEND_INLINE
  RDMA/bnxt_re: Fix the max msix vectors macro
This commit is contained in:
Linus Torvalds 2024-06-21 13:55:38 -07:00
commit ffdf504cab
7 changed files with 30 additions and 15 deletions

View file

@ -107,8 +107,6 @@ struct bnxt_re_gsi_context {
struct bnxt_re_sqp_entries *sqp_tbl;
};
#define BNXT_RE_MIN_MSIX 2
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
@ -168,7 +166,7 @@ struct bnxt_re_dev {
struct bnxt_qplib_rcfw rcfw;
/* NQ */
struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;

View file

@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
start, iova, length, access_flags);
access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~VALID_MR_FLAGS)
return ERR_PTR(-EINVAL);

View file

@ -3759,10 +3759,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
return 0;
err:
mlx5r_macsec_dealloc_gids(dev);
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
err:
mlx5r_macsec_dealloc_gids(dev);
return err;
}

View file

@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2,
(ent->rb_key.access_mode >> 2) & 0x7);
MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->rb_key.access_mode,
@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
new = &((*new)->rb_left);
if (cmp < 0)
new = &((*new)->rb_right);
if (cmp == 0) {
mutex_unlock(&cache->rb_lock);
if (cmp == 0)
return -EEXIST;
}
}
/* Add new node and rebalance tree. */
@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
}
mr->mmkey.cache_ent = ent;
mr->mmkey.type = MLX5_MKEY_MR;
mr->mmkey.rb_key = ent->rb_key;
mr->mmkey.cacheable = true;
init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
mr->page_shift = order_base_2(page_size);
mr->mmkey.cacheable = true;
set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;

View file

@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
int err;
struct mlx5_srq_attr in = {};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
__u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
if (init_attr->srq_type != IB_SRQT_BASIC &&
init_attr->srq_type != IB_SRQT_XRC &&
init_attr->srq_type != IB_SRQT_TM)
return -EOPNOTSUPP;
/* Sanity check SRQ size before proceeding */
if (init_attr->attr.max_wr >= max_srq_wqes) {
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
max_srq_wqes);
/* Sanity check SRQ and sge size before proceeding */
if (init_attr->attr.max_wr >= max_srq_wqes ||
init_attr->attr.max_sge > max_sge_sz) {
mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
init_attr->attr.max_wr, max_srq_wqes,
init_attr->attr.max_sge, max_sge_sz);
return -EINVAL;
}

View file

@ -344,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
* receive buffer later. For rmda operations additional
* length checks are performed in check_rkey.
*/
if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
unsigned int payload = payload_size(pkt);
unsigned int recv_buffer_len = 0;
int i;
for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
if (payload + 40 > recv_buffer_len) {
rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
return RESPST_ERR_LENGTH;
}
}
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
(qp_type(qp) == IB_QPT_UC))) {
unsigned int mtu = qp->mtu;

View file

@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
int i;
for (i = 0; i < ibwr->num_sge; i++, sge++) {
memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
p += sge->length;
}
}