Cleanup pthread locks in ofed RDMA verbs

On FreeBSD, pthread mutex, cond, and spinlocks allocate memory.  On
Linux-based systems, these calls do not allocate memory.  So there was a
safe assumption that the ofed RDMA verb calls do not need to explicitly
destroy the pthread locks.  This assumption is false on FreeBSD.  So let
us rearrange the code to cleanup the pthread locks.

Reviewed by:	delphij
MFC after:	2 weeks
Sponsored by:	Dell EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D41105
This commit is contained in:
Sean Lim 2023-09-18 18:56:30 -05:00 committed by Eric van Gyzen
parent 75556db552
commit a687910fc4
16 changed files with 453 additions and 109 deletions

View file

@ -519,7 +519,9 @@ static struct verbs_device *cxgb4_driver_init(const char *uverbs_sys_path,
return NULL;
}
pthread_spin_init(&dev->lock, PTHREAD_PROCESS_PRIVATE);
if (pthread_spin_init(&dev->lock, PTHREAD_PROCESS_PRIVATE))
goto err;
dev->ibv_dev.ops = &c4iw_dev_ops;
dev->chip_version = CHELSIO_CHIP_VERSION(hca_table[i].device >> 8);
dev->abi_version = abi_version;
@ -554,6 +556,11 @@ static struct verbs_device *cxgb4_driver_init(const char *uverbs_sys_path,
}
return &dev->ibv_dev;
err:
free(dev);
return NULL;
}
static __attribute__((constructor)) void cxgb4_register_driver(void)

View file

@ -190,7 +190,9 @@ struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
PDBG("%s c4iw_create_cq_resp reserved field modified by kernel\n",
__FUNCTION__);
pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
ret = pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
if (ret)
goto err2;
#ifdef STALL_DETECTION
gettimeofday(&chp->time, NULL);
#endif
@ -203,12 +205,12 @@ struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
chp->cq.queue = mmap(NULL, chp->cq.memsize, PROT_READ|PROT_WRITE,
MAP_SHARED, context->cmd_fd, resp.key);
if (chp->cq.queue == MAP_FAILED)
goto err2;
goto err3;
chp->cq.ugts = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
context->cmd_fd, resp.gts_key);
if (chp->cq.ugts == MAP_FAILED)
goto err3;
goto err4;
if (dev_is_t4(chp->rhp))
chp->cq.ugts += 1;
@ -216,7 +218,7 @@ struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
chp->cq.ugts += 5;
chp->cq.sw_queue = calloc(chp->cq.size, sizeof *chp->cq.queue);
if (!chp->cq.sw_queue)
goto err4;
goto err5;
PDBG("%s cqid 0x%x key %" PRIx64 " va %p memsize %lu gts_key %"
PRIx64 " va %p qid_mask 0x%x\n",
@ -228,10 +230,12 @@ struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
pthread_spin_unlock(&dev->lock);
INC_STAT(cq);
return &chp->ibv_cq;
err4:
err5:
munmap(MASKED(chp->cq.ugts), c4iw_page_size);
err3:
err4:
munmap(chp->cq.queue, chp->cq.memsize);
err3:
pthread_spin_destroy(&chp->lock);
err2:
(void)ibv_cmd_destroy_cq(&chp->ibv_cq);
err1:
@ -265,6 +269,7 @@ int c4iw_destroy_cq(struct ibv_cq *ibcq)
if (ret) {
return ret;
}
verbs_cleanup_cq(ibcq);
munmap(MASKED(chp->cq.ugts), c4iw_page_size);
munmap(chp->cq.queue, chp->cq.memsize);
@ -273,6 +278,7 @@ int c4iw_destroy_cq(struct ibv_cq *ibcq)
pthread_spin_unlock(&dev->lock);
free(chp->cq.sw_queue);
pthread_spin_destroy(&chp->lock);
free(chp);
return 0;
}
@ -337,38 +343,40 @@ static struct ibv_qp *create_qp_v0(struct ibv_pd *pd,
qhp->wq.rq.qid = resp.rqid;
qhp->wq.rq.size = resp.rq_size;
qhp->wq.rq.memsize = resp.rq_memsize;
pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
ret = pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
if (ret)
goto err3;
dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.sq_db_gts_key);
if (dbva == MAP_FAILED)
goto err3;
goto err4;
qhp->wq.sq.udb = dbva;
qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.sq_key);
if (qhp->wq.sq.queue == MAP_FAILED)
goto err4;
goto err5;
dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.rq_db_gts_key);
if (dbva == MAP_FAILED)
goto err5;
goto err6;
qhp->wq.rq.udb = dbva;
qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.rq_key);
if (qhp->wq.rq.queue == MAP_FAILED)
goto err6;
goto err7;
qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
if (!qhp->wq.sq.sw_sq)
goto err7;
goto err8;
qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
if (!qhp->wq.rq.sw_rq)
goto err8;
goto err9;
PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
" rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
@ -385,16 +393,18 @@ static struct ibv_qp *create_qp_v0(struct ibv_pd *pd,
pthread_spin_unlock(&dev->lock);
INC_STAT(qp);
return &qhp->ibv_qp;
err8:
err9:
free(qhp->wq.sq.sw_sq);
err7:
err8:
munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
err6:
err7:
munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
err5:
err6:
munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
err4:
err5:
munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
err4:
pthread_spin_destroy(&qhp->lock);
err3:
(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
err2:
@ -448,12 +458,14 @@ static struct ibv_qp *create_qp(struct ibv_pd *pd,
fprintf(stderr, "libcxgb4 warning - downlevel iw_cxgb4 driver. "
"MA workaround disabled.\n");
}
pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
ret = pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
if (ret)
goto err3;
dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.sq_db_gts_key);
if (dbva == MAP_FAILED)
goto err3;
goto err4;
qhp->wq.sq.udb = dbva;
if (!dev_is_t4(qhp->rhp)) {
unsigned long segment_offset = 128 * (qhp->wq.sq.qid &
@ -471,12 +483,12 @@ static struct ibv_qp *create_qp(struct ibv_pd *pd,
PROT_READ|PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.sq_key);
if (qhp->wq.sq.queue == MAP_FAILED)
goto err4;
goto err5;
dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.rq_db_gts_key);
if (dbva == MAP_FAILED)
goto err5;
goto err6;
qhp->wq.rq.udb = dbva;
if (!dev_is_t4(qhp->rhp)) {
unsigned long segment_offset = 128 * (qhp->wq.rq.qid &
@ -493,22 +505,22 @@ static struct ibv_qp *create_qp(struct ibv_pd *pd,
PROT_READ|PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.rq_key);
if (qhp->wq.rq.queue == MAP_FAILED)
goto err6;
goto err7;
qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
if (!qhp->wq.sq.sw_sq)
goto err7;
goto err8;
qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
if (!qhp->wq.rq.sw_rq)
goto err8;
goto err9;
if (t4_sq_onchip(&qhp->wq)) {
qhp->wq.sq.ma_sync = mmap(NULL, c4iw_page_size, PROT_WRITE,
MAP_SHARED, pd->context->cmd_fd,
resp.ma_sync_key);
if (qhp->wq.sq.ma_sync == MAP_FAILED)
goto err9;
goto err10;
qhp->wq.sq.ma_sync += (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
}
@ -534,18 +546,20 @@ static struct ibv_qp *create_qp(struct ibv_pd *pd,
pthread_spin_unlock(&dev->lock);
INC_STAT(qp);
return &qhp->ibv_qp;
err9:
err10:
free(qhp->wq.rq.sw_rq);
err8:
err9:
free(qhp->wq.sq.sw_sq);
err7:
err8:
munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
err6:
err7:
munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
err5:
err6:
munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
err4:
err5:
munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
err4:
pthread_spin_destroy(&qhp->lock);
err3:
(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
err2:
@ -625,6 +639,7 @@ int c4iw_destroy_qp(struct ibv_qp *ibqp)
free(qhp->wq.rq.sw_rq);
free(qhp->wq.sq.sw_sq);
pthread_spin_destroy(&qhp->lock);
free(qhp);
return 0;
}

View file

@ -232,7 +232,8 @@ static struct cm_id_private *ib_cm_alloc_id(struct ib_cm_device *device,
memset(cm_id_priv, 0, sizeof *cm_id_priv);
cm_id_priv->id.device = device;
cm_id_priv->id.context = context;
pthread_mutex_init(&cm_id_priv->mut, NULL);
if (pthread_mutex_init(&cm_id_priv->mut, NULL))
goto err;
if (pthread_cond_init(&cm_id_priv->cond, NULL))
goto err;

View file

@ -679,6 +679,7 @@ int ibv_cmd_create_srq_ex(struct ibv_context *context,
struct ibv_create_srq_resp *resp, size_t resp_size)
{
struct verbs_xrcd *vxrcd = NULL;
int ret = 0;
IBV_INIT_CMD_RESP(cmd, cmd_size, CREATE_XSRQ, resp, resp_size);
@ -705,8 +706,17 @@ int ibv_cmd_create_srq_ex(struct ibv_context *context,
cmd->cq_handle = attr_ex->cq->handle;
}
if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
return errno;
ret = pthread_mutex_init(&srq->srq.mutex, NULL);
if (ret)
goto err;
ret = pthread_cond_init(&srq->srq.cond, NULL);
if (ret)
goto err_mutex;
if (write(context->cmd_fd, cmd, cmd_size) != cmd_size) {
ret = errno;
goto err_cond;
}
(void) VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
@ -715,8 +725,6 @@ int ibv_cmd_create_srq_ex(struct ibv_context *context,
srq->srq.srq_context = attr_ex->srq_context;
srq->srq.pd = attr_ex->pd;
srq->srq.events_completed = 0;
pthread_mutex_init(&srq->srq.mutex, NULL);
pthread_cond_init(&srq->srq.cond, NULL);
/*
* check that the last field is available.
@ -744,6 +752,12 @@ int ibv_cmd_create_srq_ex(struct ibv_context *context,
attr_ex->attr.max_sge = resp->max_sge;
return 0;
err_cond:
pthread_cond_destroy(&srq->srq.cond);
err_mutex:
pthread_mutex_destroy(&srq->srq.mutex);
err:
return ret;
}
@ -837,6 +851,9 @@ int ibv_cmd_destroy_srq(struct ibv_srq *srq)
pthread_cond_wait(&srq->cond, &srq->mutex);
pthread_mutex_unlock(&srq->mutex);
pthread_cond_destroy(&srq->cond);
pthread_mutex_destroy(&srq->mutex);
return 0;
}
@ -887,6 +904,31 @@ static int create_qp_ex_common(struct verbs_qp *qp,
return 0;
}
static int create_qp_handle_resp_common_cleanup(struct verbs_qp *qp)
{
pthread_cond_destroy(&qp->qp.cond);
pthread_mutex_destroy(&qp->qp.mutex);
}
static int create_qp_handle_resp_common_init(struct verbs_qp *qp)
{
int ret = 0;
ret = pthread_mutex_init(&qp->qp.mutex, NULL);
if (ret)
return ret;
ret = pthread_cond_init(&qp->qp.cond, NULL);
if (ret)
goto err;
return ret;
err:
pthread_mutex_destroy(&qp->qp.mutex);
return ret;
}
static void create_qp_handle_resp_common(struct ibv_context *context,
struct verbs_qp *qp,
struct ibv_qp_init_attr_ex *qp_attr,
@ -913,8 +955,6 @@ static void create_qp_handle_resp_common(struct ibv_context *context,
qp->qp.qp_type = qp_attr->qp_type;
qp->qp.state = IBV_QPS_RESET;
qp->qp.events_completed = 0;
pthread_mutex_init(&qp->qp.mutex, NULL);
pthread_cond_init(&qp->qp.cond, NULL);
qp->comp_mask = 0;
if (vext_field_avail(struct verbs_qp, xrcd, vqp_sz) &&
@ -977,9 +1017,15 @@ int ibv_cmd_create_qp_ex2(struct ibv_context *context,
cmd->comp_mask = IBV_CREATE_QP_EX_KERNEL_MASK_IND_TABLE;
}
err = create_qp_handle_resp_common_init(qp);
if (err)
return err;
err = write(context->cmd_fd, cmd, cmd_size);
if (err != cmd_size)
return errno;
if (err != cmd_size) {
err = errno;
goto err;
}
(void)VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
@ -987,6 +1033,11 @@ int ibv_cmd_create_qp_ex2(struct ibv_context *context,
vqp_sz);
return 0;
err:
create_qp_handle_resp_common_cleanup(qp);
return err;
}
int ibv_cmd_create_qp_ex(struct ibv_context *context,
@ -1008,8 +1059,15 @@ int ibv_cmd_create_qp_ex(struct ibv_context *context,
if (err)
return err;
if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
return errno;
err = create_qp_handle_resp_common_init(qp);
if (err)
return err;
err = write(context->cmd_fd, cmd, cmd_size);
if (err != cmd_size) {
err = errno;
goto err;
}
(void)VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
@ -1032,6 +1090,11 @@ int ibv_cmd_create_qp_ex(struct ibv_context *context,
create_qp_handle_resp_common(context, qp, attr_ex, resp, vxrcd, vqp_sz);
return 0;
err:
create_qp_handle_resp_common_cleanup(qp);
return err;
}
int ibv_cmd_create_qp(struct ibv_pd *pd,
@ -1098,6 +1161,7 @@ int ibv_cmd_open_qp(struct ibv_context *context, struct verbs_qp *qp,
struct ibv_open_qp *cmd, size_t cmd_size,
struct ibv_create_qp_resp *resp, size_t resp_size)
{
int err = 0;
struct verbs_xrcd *xrcd;
IBV_INIT_CMD_RESP(cmd, cmd_size, OPEN_QP, resp, resp_size);
@ -1115,8 +1179,18 @@ int ibv_cmd_open_qp(struct ibv_context *context, struct verbs_qp *qp,
cmd->qpn = attr->qp_num;
cmd->qp_type = attr->qp_type;
if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
return errno;
err = pthread_mutex_init(&qp->qp.mutex, NULL);
if (err)
return err;
err = pthread_cond_init(&qp->qp.cond, NULL);
if (err)
goto err_mutex;
err = write(context->cmd_fd, cmd, cmd_size);
if (err != cmd_size) {
err = errno;
goto err_cond;
}
(void) VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
@ -1131,8 +1205,6 @@ int ibv_cmd_open_qp(struct ibv_context *context, struct verbs_qp *qp,
qp->qp.qp_type = attr->qp_type;
qp->qp.state = IBV_QPS_UNKNOWN;
qp->qp.events_completed = 0;
pthread_mutex_init(&qp->qp.mutex, NULL);
pthread_cond_init(&qp->qp.cond, NULL);
qp->comp_mask = 0;
if (vext_field_avail(struct verbs_qp, xrcd, vqp_sz)) {
qp->comp_mask = VERBS_QP_XRCD;
@ -1140,6 +1212,13 @@ int ibv_cmd_open_qp(struct ibv_context *context, struct verbs_qp *qp,
}
return 0;
err_cond:
pthread_cond_destroy(&qp->qp.cond);
err_mutex:
pthread_mutex_destroy(&qp->qp.mutex);
return err;
}
int ibv_cmd_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
@ -1644,6 +1723,9 @@ int ibv_cmd_destroy_qp(struct ibv_qp *qp)
pthread_cond_wait(&qp->cond, &qp->mutex);
pthread_mutex_unlock(&qp->mutex);
pthread_cond_destroy(&qp->cond);
pthread_mutex_destroy(&qp->mutex);
return 0;
}

View file

@ -132,13 +132,22 @@ __be64 __ibv_get_device_guid(struct ibv_device *device)
}
default_symver(__ibv_get_device_guid, ibv_get_device_guid);
void verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context,
int verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context,
struct ibv_comp_channel *channel,
void *cq_context)
{
int err = 0;
cq->context = context;
cq->channel = channel;
err = pthread_mutex_init(&cq->mutex, NULL);
if (err)
return err;
err = pthread_cond_init(&cq->cond, NULL);
if (err)
goto err;
if (cq->channel) {
pthread_mutex_lock(&context->mutex);
++cq->channel->refcnt;
@ -148,8 +157,19 @@ void verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context,
cq->cq_context = cq_context;
cq->comp_events_completed = 0;
cq->async_events_completed = 0;
pthread_mutex_init(&cq->mutex, NULL);
pthread_cond_init(&cq->cond, NULL);
return err;
err:
pthread_mutex_destroy(&cq->mutex);
return err;
}
void verbs_cleanup_cq(struct ibv_cq *cq)
{
pthread_cond_destroy(&cq->cond);
pthread_mutex_destroy(&cq->mutex);
}
static struct ibv_cq_ex *
@ -158,6 +178,7 @@ __lib_ibv_create_cq_ex(struct ibv_context *context,
{
struct verbs_context *vctx = verbs_get_ctx(context);
struct ibv_cq_ex *cq;
int err = 0;
if (cq_attr->wc_flags & ~IBV_CREATE_CQ_SUP_WC_FLAGS) {
errno = EOPNOTSUPP;
@ -165,12 +186,20 @@ __lib_ibv_create_cq_ex(struct ibv_context *context,
}
cq = vctx->priv->create_cq_ex(context, cq_attr);
if (!cq)
return NULL;
if (cq)
verbs_init_cq(ibv_cq_ex_to_cq(cq), context,
cq_attr->channel, cq_attr->cq_context);
err = verbs_init_cq(ibv_cq_ex_to_cq(cq), context,
cq_attr->channel, cq_attr->cq_context);
if (err)
goto err;
return cq;
err:
context->ops.destroy_cq(ibv_cq_ex_to_cq(cq));
return NULL;
}
struct ibv_context *__ibv_open_device(struct ibv_device *device)
@ -198,6 +227,11 @@ struct ibv_context *__ibv_open_device(struct ibv_device *device)
context = verbs_device->ops->alloc_context(device, cmd_fd);
if (!context)
goto err;
if (pthread_mutex_init(&context->mutex, NULL)) {
verbs_device->ops->free_context(context);
goto err;
}
} else {
struct verbs_ex_private *priv;
@ -212,8 +246,7 @@ struct ibv_context *__ibv_open_device(struct ibv_device *device)
priv = calloc(1, sizeof(*priv));
if (!priv) {
errno = ENOMEM;
free(context_ex);
goto err;
goto err_context;
}
context_ex->priv = priv;
@ -221,9 +254,12 @@ struct ibv_context *__ibv_open_device(struct ibv_device *device)
context_ex->sz = sizeof(*context_ex);
context = &context_ex->context;
if (pthread_mutex_init(&context->mutex, NULL))
goto verbs_err;
ret = verbs_device->ops->init_context(verbs_device, context, cmd_fd);
if (ret)
goto verbs_err;
goto err_mutex;
/*
* In order to maintain backward/forward binary compatibility
* with apps compiled against libibverbs-1.1.8 that use the
@ -247,12 +283,14 @@ struct ibv_context *__ibv_open_device(struct ibv_device *device)
context->device = device;
context->cmd_fd = cmd_fd;
pthread_mutex_init(&context->mutex, NULL);
return context;
err_mutex:
pthread_mutex_destroy(&context->mutex);
verbs_err:
free(context_ex->priv);
err_context:
free(context_ex);
err:
close(cmd_fd);
@ -267,6 +305,7 @@ int __ibv_close_device(struct ibv_context *context)
struct verbs_context *context_ex;
struct verbs_device *verbs_device = verbs_get_device(context->device);
pthread_mutex_destroy(&context->mutex);
context_ex = verbs_get_ctx(context);
if (context_ex) {
verbs_device->ops->uninit_context(verbs_device, context);
@ -393,3 +432,31 @@ void __ibv_ack_async_event(struct ibv_async_event *event)
}
}
default_symver(__ibv_ack_async_event, ibv_ack_async_event);
int __ibv_init_wq(struct ibv_wq *wq)
{
int err = 0;
wq->events_completed = 0;
err = pthread_mutex_init(&wq->mutex, NULL);
if (err)
return err;
err = pthread_cond_init(&wq->cond, NULL);
if (err)
goto err;
return err;
err:
pthread_mutex_destroy(&wq->mutex);
return err;
}
default_symver(__ibv_init_wq, ibv_init_wq);
void __ibv_cleanup_wq(struct ibv_wq *wq)
{
pthread_cond_destroy(&wq->mutex);
pthread_mutex_destroy(&wq->mutex);
}
default_symver(__ibv_cleanup_wq, ibv_cleanup_wq);

View file

@ -130,9 +130,12 @@ verbs_get_device(const struct ibv_device *dev)
typedef struct verbs_device *(*verbs_driver_init_func)(const char *uverbs_sys_path,
int abi_version);
void verbs_register_driver(const char *name, verbs_driver_init_func init_func);
void verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context,
int verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context,
struct ibv_comp_channel *channel,
void *cq_context);
void verbs_cleanup_cq(struct ibv_cq *cq);
int ibv_init_wq(struct ibv_wq *wq);
void ibv_cleanup_wq(struct ibv_wq *wq);
int ibv_cmd_get_context(struct ibv_context *context, struct ibv_get_context *cmd,
size_t cmd_size, struct ibv_get_context_resp *resp,

View file

@ -136,4 +136,7 @@ IBVERBS_PRIVATE_14 {
ibv_query_gid_type;
verbs_register_driver;
verbs_init_cq;
verbs_cleanup_cq;
ibv_init_wq;
ibv_cleanup_wq;
};

View file

@ -455,13 +455,23 @@ struct ibv_cq *__ibv_create_cq(struct ibv_context *context, int cqe, void *cq_co
struct ibv_comp_channel *channel, int comp_vector)
{
struct ibv_cq *cq;
int err = 0;
cq = context->ops.create_cq(context, cqe, channel, comp_vector);
if (cq)
verbs_init_cq(cq, context, channel, cq_context);
if (!cq)
return NULL;
err = verbs_init_cq(cq, context, channel, cq_context);
if (err)
goto err;
return cq;
err:
context->ops.destroy_cq(cq);
return NULL;
}
default_symver(__ibv_create_cq, ibv_create_cq);
@ -529,16 +539,26 @@ struct ibv_srq *__ibv_create_srq(struct ibv_pd *pd,
return NULL;
srq = pd->context->ops.create_srq(pd, srq_init_attr);
if (srq) {
srq->context = pd->context;
srq->srq_context = srq_init_attr->srq_context;
srq->pd = pd;
srq->events_completed = 0;
pthread_mutex_init(&srq->mutex, NULL);
pthread_cond_init(&srq->cond, NULL);
}
if (!srq)
return NULL;
srq->context = pd->context;
srq->srq_context = srq_init_attr->srq_context;
srq->pd = pd;
srq->events_completed = 0;
if (pthread_mutex_init(&srq->mutex, NULL))
goto err;
if (pthread_cond_init(&srq->cond, NULL))
goto err_mutex;
return srq;
err_mutex:
pthread_mutex_destroy(&srq->mutex);
err:
pd->context->ops.destroy_srq(srq);
return NULL;
}
default_symver(__ibv_create_srq, ibv_create_srq);
@ -558,6 +578,8 @@ default_symver(__ibv_query_srq, ibv_query_srq);
int __ibv_destroy_srq(struct ibv_srq *srq)
{
pthread_cond_destroy(&srq->cond);
pthread_mutex_destroy(&srq->mutex);
return srq->context->ops.destroy_srq(srq);
}
default_symver(__ibv_destroy_srq, ibv_destroy_srq);

View file

@ -2166,11 +2166,8 @@ static inline struct ibv_wq *ibv_create_wq(struct ibv_context *context,
}
wq = vctx->create_wq(context, wq_init_attr);
if (wq) {
if (wq)
wq->events_completed = 0;
pthread_mutex_init(&wq->mutex, NULL);
pthread_cond_init(&wq->cond, NULL);
}
return wq;
}

View file

@ -147,6 +147,7 @@ static int mlx4_init_context(struct verbs_device *v_device,
struct ibv_get_context cmd;
struct mlx4_alloc_ucontext_resp resp;
int i;
int ret;
struct mlx4_alloc_ucontext_resp_v3 resp_v3;
__u16 bf_reg_size;
struct mlx4_device *dev = to_mdev(&v_device->device);
@ -185,15 +186,22 @@ static int mlx4_init_context(struct verbs_device *v_device,
for (i = 0; i < MLX4_PORTS_NUM; ++i)
context->port_query_cache[i].valid = 0;
pthread_mutex_init(&context->qp_table_mutex, NULL);
ret = pthread_mutex_init(&context->qp_table_mutex, NULL);
if (ret)
return ret;
for (i = 0; i < MLX4_QP_TABLE_SIZE; ++i)
context->qp_table[i].refcnt = 0;
for (i = 0; i < MLX4_NUM_DB_TYPE; ++i)
context->db_list[i] = NULL;
mlx4_init_xsrq_table(&context->xsrq_table, context->num_qps);
pthread_mutex_init(&context->db_list_mutex, NULL);
ret = mlx4_init_xsrq_table(&context->xsrq_table, context->num_qps);
if (ret)
goto err;
ret = pthread_mutex_init(&context->db_list_mutex, NULL);
if (ret)
goto err_xsrq;
context->uar = mmap(NULL, dev->page_size, PROT_WRITE,
MAP_SHARED, cmd_fd, 0);
@ -212,14 +220,18 @@ static int mlx4_init_context(struct verbs_device *v_device,
} else {
context->bf_buf_size = bf_reg_size / 2;
context->bf_offset = 0;
pthread_spin_init(&context->bf_lock, PTHREAD_PROCESS_PRIVATE);
ret = pthread_spin_init(&context->bf_lock, PTHREAD_PROCESS_PRIVATE);
if (ret)
goto err_db_list;
}
} else {
context->bf_page = NULL;
context->bf_buf_size = 0;
}
pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE);
ret = pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE);
if (ret)
goto err_bf_lock;
ibv_ctx->ops = mlx4_ctx_ops;
context->hca_core_clock = NULL;
@ -248,6 +260,17 @@ static int mlx4_init_context(struct verbs_device *v_device,
return 0;
err_bf_lock:
if (context->bf_buf_size)
pthread_spin_destroy(&context->bf_lock);
err_db_list:
pthread_mutex_destroy(&context->db_list_mutex);
err_xsrq:
mlx4_cleanup_xsrq_table(&context->xsrq_table);
err:
pthread_mutex_destroy(&context->qp_table_mutex);
return ret;
}
static void mlx4_uninit_context(struct verbs_device *v_device,
@ -255,6 +278,12 @@ static void mlx4_uninit_context(struct verbs_device *v_device,
{
struct mlx4_context *context = to_mctx(ibv_ctx);
pthread_mutex_destroy(&context->qp_table_mutex);
mlx4_cleanup_xsrq_table(&context->xsrq_table);
pthread_mutex_destroy(&context->db_list_mutex);
pthread_spin_destroy(&context->bf_lock);
pthread_spin_destroy(&context->uar_lock);
munmap(context->uar, to_mdev(&v_device->device)->page_size);
if (context->bf_page)
munmap(context->bf_page, to_mdev(&v_device->device)->page_size);

View file

@ -414,7 +414,8 @@ int mlx4_destroy_srq(struct ibv_srq *srq);
int mlx4_destroy_xrc_srq(struct ibv_srq *srq);
int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
struct mlx4_srq *srq);
void mlx4_init_xsrq_table(struct mlx4_xsrq_table *xsrq_table, int size);
void mlx4_cleanup_xsrq_table(struct mlx4_xsrq_table *xsrq_table);
int mlx4_init_xsrq_table(struct mlx4_xsrq_table *xsrq_table, int size);
struct mlx4_srq *mlx4_find_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn);
int mlx4_store_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn,
struct mlx4_srq *srq);

View file

@ -172,14 +172,20 @@ int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
return 0;
}
void mlx4_init_xsrq_table(struct mlx4_xsrq_table *xsrq_table, int size)
void mlx4_cleanup_xsrq_table(struct mlx4_xsrq_table *xsrq_table)
{
pthread_mutex_destroy(&xsrq_table->mutex);
}
int mlx4_init_xsrq_table(struct mlx4_xsrq_table *xsrq_table, int size)
{
int ret;
memset(xsrq_table, 0, sizeof *xsrq_table);
xsrq_table->num_xsrq = size;
xsrq_table->shift = ffs(size) - 1 - MLX4_XSRQ_TABLE_BITS;
xsrq_table->mask = (1 << xsrq_table->shift) - 1;
pthread_mutex_init(&xsrq_table->mutex, NULL);
return pthread_mutex_init(&xsrq_table->mutex, NULL);
}
struct mlx4_srq *mlx4_find_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn)
@ -257,7 +263,7 @@ struct ibv_srq *mlx4_create_xrc_srq(struct ibv_context *context,
srq->ext_srq = 1;
if (mlx4_alloc_srq_buf(attr_ex->pd, &attr_ex->attr, srq))
goto err;
goto err_spl;
srq->db = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_RQ);
if (!srq->db)
@ -290,6 +296,8 @@ struct ibv_srq *mlx4_create_xrc_srq(struct ibv_context *context,
err_free:
free(srq->wrid);
mlx4_free_buf(&srq->buf);
err_spl:
pthread_spin_destroy(&srq->lock);
err:
free(srq);
return NULL;
@ -319,6 +327,7 @@ int mlx4_destroy_xrc_srq(struct ibv_srq *srq)
mlx4_free_db(mctx, MLX4_DB_TYPE_RQ, msrq->db);
mlx4_free_buf(&msrq->buf);
free(msrq->wrid);
pthread_spin_destroy(&msrq->lock);
free(msrq);
return 0;

View file

@ -497,7 +497,7 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context,
cq_attr->cqe = align_queue_size(cq_attr->cqe + 1);
if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cq_attr->cqe, mctx->cqe_size))
goto err;
goto err_spl;
cq->cqe_size = mctx->cqe_size;
cq->set_ci_db = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);
@ -535,6 +535,9 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context,
err_buf:
mlx4_free_buf(&cq->buf);
err_spl:
pthread_spin_destroy(&cq->lock);
err:
free(cq);
@ -631,6 +634,8 @@ int mlx4_destroy_cq(struct ibv_cq *cq)
if (ret)
return ret;
verbs_cleanup_cq(cq);
pthread_spin_destroy(&to_mcq(cq)->lock);
mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);
mlx4_free_buf(&to_mcq(cq)->buf);
free(to_mcq(cq));
@ -663,7 +668,7 @@ struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
srq->ext_srq = 0;
if (mlx4_alloc_srq_buf(pd, &attr->attr, srq))
goto err;
goto err_spl;
srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
if (!srq->db)
@ -689,6 +694,9 @@ struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
free(srq->wrid);
mlx4_free_buf(&srq->buf);
err_spl:
pthread_spin_destroy(&srq->lock);
err:
free(srq);
@ -738,6 +746,7 @@ int mlx4_destroy_srq(struct ibv_srq *srq)
mlx4_free_db(to_mctx(srq->context), MLX4_DB_TYPE_RQ, to_msrq(srq)->db);
mlx4_free_buf(&to_msrq(srq)->buf);
free(to_msrq(srq)->wrid);
pthread_spin_destroy(&to_msrq(srq)->lock);
free(to_msrq(srq));
return 0;
@ -841,14 +850,15 @@ struct ibv_qp *mlx4_create_qp_ex(struct ibv_context *context,
mlx4_init_qp_indices(qp);
if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) ||
pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE))
if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE))
goto err_free;
if (pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE))
goto err_sq_spl;
if (attr->cap.max_recv_sge) {
qp->db = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_RQ);
if (!qp->db)
goto err_free;
goto err_rq_spl;
*qp->db = 0;
cmd.db_addr = (uintptr_t) qp->db;
@ -903,7 +913,10 @@ struct ibv_qp *mlx4_create_qp_ex(struct ibv_context *context,
pthread_mutex_unlock(&to_mctx(context)->qp_table_mutex);
if (attr->cap.max_recv_sge)
mlx4_free_db(to_mctx(context), MLX4_DB_TYPE_RQ, qp->db);
err_rq_spl:
pthread_spin_destroy(&qp->rq.lock);
err_sq_spl:
pthread_spin_destroy(&qp->sq.lock);
err_free:
free(qp->sq.wrid);
if (qp->rq.wqe_cnt)
@ -1108,6 +1121,9 @@ int mlx4_destroy_qp(struct ibv_qp *ibqp)
mlx4_unlock_cqs(ibqp);
pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
pthread_spin_destroy(&qp->rq.lock);
pthread_spin_destroy(&qp->sq.lock);
if (qp->rq.wqe_cnt) {
mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db);
free(qp->rq.wrid);

View file

@ -850,9 +850,12 @@ static int mlx5_init_context(struct verbs_device *vdev,
context->cmds_supp_uhw = resp.cmds_supp_uhw;
context->vendor_cap_flags = 0;
pthread_mutex_init(&context->qp_table_mutex, NULL);
pthread_mutex_init(&context->srq_table_mutex, NULL);
pthread_mutex_init(&context->uidx_table_mutex, NULL);
if (pthread_mutex_init(&context->qp_table_mutex, NULL))
goto err_free_bf;
if (pthread_mutex_init(&context->srq_table_mutex, NULL))
goto err_qp_table_mutex;
if (pthread_mutex_init(&context->uidx_table_mutex, NULL))
goto err_srq_table_mutex;
for (i = 0; i < MLX5_QP_TABLE_SIZE; ++i)
context->qp_table[i].refcnt = 0;
@ -861,7 +864,8 @@ static int mlx5_init_context(struct verbs_device *vdev,
context->db_list = NULL;
pthread_mutex_init(&context->db_list_mutex, NULL);
if (pthread_mutex_init(&context->db_list_mutex, NULL))
goto err_uidx_table_mutex;
num_sys_page_map = context->tot_uuars / (context->num_uars_per_page * MLX5_NUM_NON_FP_BFREGS_PER_UAR);
for (i = 0; i < num_sys_page_map; ++i) {
@ -872,7 +876,7 @@ static int mlx5_init_context(struct verbs_device *vdev,
cmd_fd, page_size * offset);
if (context->uar[i] == MAP_FAILED) {
context->uar[i] = NULL;
goto err_free_bf;
goto err_db_list_mutex;
}
}
@ -883,7 +887,8 @@ static int mlx5_init_context(struct verbs_device *vdev,
context->bfs[bfi].reg = context->uar[i] + MLX5_ADAPTER_PAGE_SIZE * j +
MLX5_BF_OFFSET + k * context->bf_reg_size;
context->bfs[bfi].need_lock = need_uuar_lock(context, bfi);
mlx5_spinlock_init(&context->bfs[bfi].lock);
if (mlx5_spinlock_init(&context->bfs[bfi].lock))
goto err_bfs_spl;
context->bfs[bfi].offset = 0;
if (bfi)
context->bfs[bfi].buf_size = context->bf_reg_size / 2;
@ -900,13 +905,15 @@ static int mlx5_init_context(struct verbs_device *vdev,
mlx5_map_internal_clock(mdev, ctx);
}
mlx5_spinlock_init(&context->lock32);
if (mlx5_spinlock_init(&context->lock32))
goto err_bfs_spl;
context->prefer_bf = get_always_bf();
context->shut_up_bf = get_shut_up_bf();
mlx5_read_env(&vdev->device, context);
mlx5_spinlock_init(&context->hugetlb_lock);
if (mlx5_spinlock_init(&context->hugetlb_lock))
goto err_32_spl;
TAILQ_INIT(&context->hugetlb_list);
context->ibv_ctx.ops = mlx5_ctx_ops;
@ -944,6 +951,31 @@ static int mlx5_init_context(struct verbs_device *vdev,
return 0;
err_32_spl:
mlx5_spinlock_destroy(&context->lock32);
err_bfs_spl:
for (i = 0; i < num_sys_page_map; i++) {
for (j = 0; j < context->num_uars_per_page; j++) {
for (k = 0; k < NUM_BFREGS_PER_UAR; k++) {
bfi = (i * context->num_uars_per_page + j) * NUM_BFREGS_PER_UAR + k;
mlx5_spinlock_destroy(&context->bfs[bfi].lock);
}
}
}
err_db_list_mutex:
pthread_mutex_destroy(&context->db_list_mutex);
err_uidx_table_mutex:
pthread_mutex_destroy(&context->uidx_table_mutex);
err_srq_table_mutex:
pthread_mutex_destroy(&context->srq_table_mutex);
err_qp_table_mutex:
pthread_mutex_destroy(&context->qp_table_mutex);
err_free_bf:
free(context->bfs);
@ -962,6 +994,26 @@ static void mlx5_cleanup_context(struct verbs_device *device,
struct mlx5_context *context = to_mctx(ibctx);
int page_size = to_mdev(ibctx->device)->page_size;
int i;
int j;
int k;
int bfi;
int num_sys_page_map;
num_sys_page_map = context->tot_uuars / (context->num_uars_per_page * MLX5_NUM_NON_FP_BFREGS_PER_UAR);
for (i = 0; i < num_sys_page_map; i++) {
for (j = 0; j < context->num_uars_per_page; j++) {
for (k = 0; k < NUM_BFREGS_PER_UAR; k++) {
bfi = (i * context->num_uars_per_page + j) * NUM_BFREGS_PER_UAR + k;
mlx5_spinlock_destroy(&context->bfs[bfi].lock);
}
}
}
mlx5_spinlock_destroy(&context->hugetlb_lock);
mlx5_spinlock_destroy(&context->lock32);
pthread_mutex_destroy(&context->db_list_mutex);
pthread_mutex_destroy(&context->uidx_table_mutex);
pthread_mutex_destroy(&context->srq_table_mutex);
pthread_mutex_destroy(&context->qp_table_mutex);
free(context->bfs);
for (i = 0; i < MLX5_MAX_UARS; ++i) {

View file

@ -517,14 +517,23 @@ struct ibv_cq_ex *mlx5dv_create_cq(struct ibv_context *context,
struct mlx5dv_cq_init_attr *mlx5_cq_attr)
{
struct ibv_cq_ex *cq;
int err = 0;
cq = create_cq(context, cq_attr, MLX5_CQ_FLAGS_EXTENDED, mlx5_cq_attr);
if (!cq)
return NULL;
verbs_init_cq(ibv_cq_ex_to_cq(cq), context,
err = verbs_init_cq(ibv_cq_ex_to_cq(cq), context,
cq_attr->channel, cq_attr->cq_context);
if (err)
goto err;
return cq;
err:
context->ops.destroy_cq(ibv_cq_ex_to_cq(cq));
return NULL;
}
int mlx5_resize_cq(struct ibv_cq *ibcq, int cqe)
@ -598,13 +607,16 @@ int mlx5_resize_cq(struct ibv_cq *ibcq, int cqe)
int mlx5_destroy_cq(struct ibv_cq *cq)
{
int ret;
struct mlx5_cq *mcq = to_mcq(cq);
ret = ibv_cmd_destroy_cq(cq);
if (ret)
return ret;
verbs_cleanup_cq(cq);
mlx5_free_db(to_mctx(cq->context), to_mcq(cq)->dbrec);
mlx5_free_cq_buf(to_mctx(cq->context), to_mcq(cq)->active_buf);
mlx5_spinlock_destroy(&mcq->lock);
free(to_mcq(cq));
return 0;
@ -639,7 +651,7 @@ struct ibv_srq *mlx5_create_srq(struct ibv_pd *pd,
fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n", __func__, __LINE__,
attr->attr.max_wr, ctx->max_srq_recv_wr);
errno = EINVAL;
goto err;
goto err_spl;
}
/*
@ -652,7 +664,7 @@ struct ibv_srq *mlx5_create_srq(struct ibv_pd *pd,
fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n", __func__, __LINE__,
attr->attr.max_wr, ctx->max_srq_recv_wr);
errno = EINVAL;
goto err;
goto err_spl;
}
srq->max = align_queue_size(attr->attr.max_wr + 1);
@ -661,7 +673,7 @@ struct ibv_srq *mlx5_create_srq(struct ibv_pd *pd,
if (mlx5_alloc_srq_buf(pd->context, srq)) {
fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
goto err;
goto err_spl;
}
srq->db = mlx5_alloc_dbrec(to_mctx(pd->context));
@ -708,6 +720,9 @@ struct ibv_srq *mlx5_create_srq(struct ibv_pd *pd,
free(srq->wrid);
mlx5_free_buf(&srq->buf);
err_spl:
mlx5_spinlock_destroy(&srq->lock);
err:
free(srq);
@ -749,6 +764,7 @@ int mlx5_destroy_srq(struct ibv_srq *srq)
mlx5_free_db(ctx, msrq->db);
mlx5_free_buf(&msrq->buf);
free(msrq->wrid);
mlx5_spinlock_destroy(&msrq->lock);
free(msrq);
return 0;
@ -1306,14 +1322,16 @@ static struct ibv_qp *create_qp(struct ibv_context *context,
mlx5_init_qp_indices(qp);
if (mlx5_spinlock_init(&qp->sq.lock) ||
mlx5_spinlock_init(&qp->rq.lock))
if (mlx5_spinlock_init(&qp->sq.lock))
goto err_free_qp_buf;
if (mlx5_spinlock_init(&qp->rq.lock))
goto err_sq_spl;
qp->db = mlx5_alloc_dbrec(ctx);
if (!qp->db) {
mlx5_dbg(fp, MLX5_DBG_QP, "\n");
goto err_free_qp_buf;
goto err_rq_spl;
}
qp->db[MLX5_RCV_DBR] = 0;
@ -1398,6 +1416,12 @@ static struct ibv_qp *create_qp(struct ibv_context *context,
err_rq_db:
mlx5_free_db(to_mctx(context), qp->db);
err_rq_spl:
mlx5_spinlock_destroy(&qp->rq.lock);
err_sq_spl:
mlx5_spinlock_destroy(&qp->sq.lock);
err_free_qp_buf:
mlx5_free_qp_buf(qp);
@ -1510,6 +1534,8 @@ int mlx5_destroy_qp(struct ibv_qp *ibqp)
mlx5_clear_uidx(ctx, qp->rsc.rsn);
mlx5_free_db(ctx, qp->db);
mlx5_spinlock_destroy(&qp->rq.lock);
mlx5_spinlock_destroy(&qp->sq.lock);
mlx5_free_qp_buf(qp);
free:
free(qp);
@ -1823,7 +1849,7 @@ mlx5_create_xrc_srq(struct ibv_context *context,
__func__, __LINE__, attr->attr.max_wr,
ctx->max_srq_recv_wr);
errno = EINVAL;
goto err;
goto err_spl;
}
/*
@ -1837,7 +1863,7 @@ mlx5_create_xrc_srq(struct ibv_context *context,
__func__, __LINE__, attr->attr.max_wr,
ctx->max_srq_recv_wr);
errno = EINVAL;
goto err;
goto err_spl;
}
msrq->max = align_queue_size(attr->attr.max_wr + 1);
@ -1846,7 +1872,7 @@ mlx5_create_xrc_srq(struct ibv_context *context,
if (mlx5_alloc_srq_buf(context, msrq)) {
fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
goto err;
goto err_spl;
}
msrq->db = mlx5_alloc_dbrec(ctx);
@ -1912,6 +1938,9 @@ mlx5_create_xrc_srq(struct ibv_context *context,
free(msrq->wrid);
mlx5_free_buf(&msrq->buf);
err_spl:
mlx5_spinlock_destroy(&msrq->lock);
err:
free(msrq);
@ -2058,9 +2087,13 @@ struct ibv_wq *mlx5_create_wq(struct ibv_context *context,
goto err;
}
ret = ibv_init_wq(&rwq->wq);
if (ret < 0)
goto err;
rwq->buf_size = ret;
if (mlx5_alloc_rwq_buf(context, rwq, ret))
goto err;
goto err_cleanup_wq;
mlx5_init_rwq_indices(rwq);
@ -2069,7 +2102,7 @@ struct ibv_wq *mlx5_create_wq(struct ibv_context *context,
rwq->db = mlx5_alloc_dbrec(ctx);
if (!rwq->db)
goto err_free_rwq_buf;
goto err_spl;
rwq->db[MLX5_RCV_DBR] = 0;
rwq->db[MLX5_SND_DBR] = 0;
@ -2104,8 +2137,12 @@ struct ibv_wq *mlx5_create_wq(struct ibv_context *context,
mlx5_clear_uidx(ctx, cmd.drv.user_index);
err_free_db_rec:
mlx5_free_db(to_mctx(context), rwq->db);
err_spl:
mlx5_spinlock_destroy(&rwq->rq.lock);
err_free_rwq_buf:
mlx5_free_rwq_buf(rwq, context);
err_cleanup_wq:
ibv_cleanup_wq(&rwq->wq);
err:
free(rwq);
return NULL;
@ -2150,7 +2187,9 @@ int mlx5_destroy_wq(struct ibv_wq *wq)
mlx5_spin_unlock(&to_mcq(wq->cq)->lock);
mlx5_clear_uidx(to_mctx(wq->context), rwq->rsc.rsn);
mlx5_free_db(to_mctx(wq->context), rwq->db);
mlx5_spinlock_destroy(&rwq->rq.lock);
mlx5_free_rwq_buf(rwq, wq->context);
ibv_cleanup_wq(&rwq->wq);
free(rwq);
return 0;

View file

@ -502,7 +502,8 @@ static struct cma_id_private *ucma_alloc_id(struct rdma_event_channel *channel,
id_priv->id.channel = channel;
}
pthread_mutex_init(&id_priv->mut, NULL);
if (pthread_mutex_init(&id_priv->mut, NULL))
goto err;
if (pthread_cond_init(&id_priv->cond, NULL))
goto err;