mirror of
https://github.com/freebsd/freebsd-src
synced 2024-10-15 12:54:27 +00:00
Userspace library for Chelsio's Terminator 5 based iWARP RNICs (pretty
much every T5 card that does _not_ have "-SO" in its name is RDMA capable). This plugs into the OFED verbs framework and allows userspace RDMA applications to work over T5 RNICs. Tested with rping. Obtained from: Chelsio Communications Relnotes: Yes Sponsored by: Chelsio Communications
This commit is contained in:
parent
9aa9738938
commit
bd4d7b4fcb
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=273806
1
contrib/ofed/libcxgb4/AUTHORS
Normal file
1
contrib/ofed/libcxgb4/AUTHORS
Normal file
|
@ -0,0 +1 @@
|
|||
Chelsio Communications, Inc.
|
29
contrib/ofed/libcxgb4/COPYING
Normal file
29
contrib/ofed/libcxgb4/COPYING
Normal file
|
@ -0,0 +1,29 @@
|
|||
Copyright (c) 2010-2013. Chelsio Communications. All rights reserved.
|
||||
|
||||
This software is available to you under a choice of one of two
|
||||
licenses. You may choose to be licensed under the terms of the GNU
|
||||
General Public License (GPL) Version 2, available from the file
|
||||
COPYING in the main directory of this source tree, or the
|
||||
OpenIB.org BSD license below:
|
||||
|
||||
Redistribution and use in source and binary forms, with or
|
||||
without modification, are permitted provided that the following
|
||||
conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials
|
||||
provided with the distribution.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
1054
contrib/ofed/libcxgb4/ChangeLog
Normal file
1054
contrib/ofed/libcxgb4/ChangeLog
Normal file
File diff suppressed because it is too large
Load diff
19
contrib/ofed/libcxgb4/README
Normal file
19
contrib/ofed/libcxgb4/README
Normal file
|
@ -0,0 +1,19 @@
|
|||
Introduction
|
||||
============
|
||||
|
||||
libcxgb4 is a userspace driver for the Chelsio T5 iWARP RNICs. It works
|
||||
as a plug-in module for libibverbs that allows programs to use Chelsio
|
||||
RNICs directly from userspace. See the libibverbs package for more
|
||||
information.
|
||||
|
||||
Using libcxgb4
|
||||
===================
|
||||
|
||||
libcxgb4 will be loaded and used automatically by programs linked with
|
||||
libibverbs. The iw_cxgbe kernel module must be loaded for Chelsio RNIC
|
||||
devices to be detected and used.
|
||||
|
||||
Supported Hardware
|
||||
==================
|
||||
|
||||
libcxgb4 supports the Chelsio T5 family of adapters.
|
1
contrib/ofed/libcxgb4/cxgb4.driver
Normal file
1
contrib/ofed/libcxgb4/cxgb4.driver
Normal file
|
@ -0,0 +1 @@
|
|||
driver cxgb4
|
753
contrib/ofed/libcxgb4/src/cq.c
Normal file
753
contrib/ofed/libcxgb4/src/cq.c
Normal file
|
@ -0,0 +1,753 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#if HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif /* HAVE_CONFIG_H */
|
||||
|
||||
#include <stdio.h>
|
||||
#include <syslog.h>
|
||||
#include <pthread.h>
|
||||
#include <sys/errno.h>
|
||||
#include <netinet/in.h>
|
||||
#include <infiniband/opcode.h>
|
||||
#include "libcxgb4.h"
|
||||
#include "cxgb4-abi.h"
|
||||
|
||||
static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
|
||||
{
|
||||
struct t4_cqe cqe;
|
||||
|
||||
PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
|
||||
wq, cq, cq->sw_cidx, cq->sw_pidx);
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
|
||||
V_CQE_OPCODE(FW_RI_SEND) |
|
||||
V_CQE_TYPE(0) |
|
||||
V_CQE_SWCQE(1) |
|
||||
V_CQE_QPID(wq->sq.qid));
|
||||
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
|
||||
cq->sw_queue[cq->sw_pidx] = cqe;
|
||||
t4_swcq_produce(cq);
|
||||
}
|
||||
|
||||
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
|
||||
{
|
||||
int flushed = 0;
|
||||
int in_use = wq->rq.in_use - count;
|
||||
|
||||
BUG_ON(in_use < 0);
|
||||
PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
|
||||
wq, cq, wq->rq.in_use, count);
|
||||
while (in_use--) {
|
||||
insert_recv_cqe(wq, cq);
|
||||
flushed++;
|
||||
}
|
||||
return flushed;
|
||||
}
|
||||
|
||||
static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
|
||||
struct t4_swsqe *swcqe)
|
||||
{
|
||||
struct t4_cqe cqe;
|
||||
|
||||
PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
|
||||
wq, cq, cq->sw_cidx, cq->sw_pidx);
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
|
||||
V_CQE_OPCODE(swcqe->opcode) |
|
||||
V_CQE_TYPE(1) |
|
||||
V_CQE_SWCQE(1) |
|
||||
V_CQE_QPID(wq->sq.qid));
|
||||
CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
|
||||
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
|
||||
cq->sw_queue[cq->sw_pidx] = cqe;
|
||||
t4_swcq_produce(cq);
|
||||
}
|
||||
|
||||
static void advance_oldest_read(struct t4_wq *wq);
|
||||
|
||||
void c4iw_flush_sq(struct c4iw_qp *qhp)
|
||||
{
|
||||
unsigned short flushed = 0;
|
||||
struct t4_wq *wq = &qhp->wq;
|
||||
struct c4iw_cq *chp = to_c4iw_cq(qhp->ibv_qp.send_cq);
|
||||
struct t4_cq *cq = &chp->cq;
|
||||
int idx;
|
||||
struct t4_swsqe *swsqe;
|
||||
|
||||
if (wq->sq.flush_cidx == -1)
|
||||
wq->sq.flush_cidx = wq->sq.cidx;
|
||||
idx = wq->sq.flush_cidx;
|
||||
BUG_ON(idx >= wq->sq.size);
|
||||
while (idx != wq->sq.pidx) {
|
||||
swsqe = &wq->sq.sw_sq[idx];
|
||||
BUG_ON(swsqe->flushed);
|
||||
swsqe->flushed = 1;
|
||||
insert_sq_cqe(wq, cq, swsqe);
|
||||
if (wq->sq.oldest_read == swsqe) {
|
||||
BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
|
||||
advance_oldest_read(wq);
|
||||
}
|
||||
flushed++;
|
||||
if (++idx == wq->sq.size)
|
||||
idx = 0;
|
||||
}
|
||||
wq->sq.flush_cidx += flushed;
|
||||
if (wq->sq.flush_cidx >= wq->sq.size)
|
||||
wq->sq.flush_cidx -= wq->sq.size;
|
||||
}
|
||||
|
||||
static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
|
||||
{
|
||||
struct t4_swsqe *swsqe;
|
||||
unsigned short cidx;
|
||||
|
||||
if (wq->sq.flush_cidx == -1)
|
||||
wq->sq.flush_cidx = wq->sq.cidx;
|
||||
cidx = wq->sq.flush_cidx;
|
||||
BUG_ON(cidx >= wq->sq.size);
|
||||
|
||||
while (cidx != wq->sq.pidx) {
|
||||
swsqe = &wq->sq.sw_sq[cidx];
|
||||
if (!swsqe->signaled) {
|
||||
if (++cidx == wq->sq.size)
|
||||
cidx = 0;
|
||||
} else if (swsqe->complete) {
|
||||
|
||||
BUG_ON(swsqe->flushed);
|
||||
|
||||
/*
|
||||
* Insert this completed cqe into the swcq.
|
||||
*/
|
||||
PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
|
||||
__func__, cidx, cq->sw_pidx);
|
||||
|
||||
swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
|
||||
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
|
||||
t4_swcq_produce(cq);
|
||||
swsqe->flushed = 1;
|
||||
if (++cidx == wq->sq.size)
|
||||
cidx = 0;
|
||||
wq->sq.flush_cidx = cidx;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
|
||||
struct t4_cqe *read_cqe)
|
||||
{
|
||||
read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
|
||||
read_cqe->len = ntohl(wq->sq.oldest_read->read_len);
|
||||
read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
|
||||
V_CQE_SWCQE(SW_CQE(hw_cqe)) |
|
||||
V_CQE_OPCODE(FW_RI_READ_REQ) |
|
||||
V_CQE_TYPE(1));
|
||||
read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
|
||||
}
|
||||
|
||||
static void advance_oldest_read(struct t4_wq *wq)
|
||||
{
|
||||
|
||||
u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
|
||||
|
||||
if (rptr == wq->sq.size)
|
||||
rptr = 0;
|
||||
while (rptr != wq->sq.pidx) {
|
||||
wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
|
||||
|
||||
if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
|
||||
return;
|
||||
if (++rptr == wq->sq.size)
|
||||
rptr = 0;
|
||||
}
|
||||
wq->sq.oldest_read = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move all CQEs from the HWCQ into the SWCQ.
|
||||
* Deal with out-of-order and/or completions that complete
|
||||
* prior unsignalled WRs.
|
||||
*/
|
||||
void c4iw_flush_hw_cq(struct c4iw_cq *chp)
|
||||
{
|
||||
struct t4_cqe *hw_cqe, *swcqe, read_cqe;
|
||||
struct c4iw_qp *qhp;
|
||||
struct t4_swsqe *swsqe;
|
||||
int ret;
|
||||
|
||||
PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
|
||||
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
|
||||
|
||||
/*
|
||||
* This logic is similar to poll_cq(), but not quite the same
|
||||
* unfortunately. Need to move pertinent HW CQEs to the SW CQ but
|
||||
* also do any translation magic that poll_cq() normally does.
|
||||
*/
|
||||
while (!ret) {
|
||||
qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
|
||||
|
||||
/*
|
||||
* drop CQEs with no associated QP
|
||||
*/
|
||||
if (qhp == NULL)
|
||||
goto next_cqe;
|
||||
|
||||
if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
|
||||
goto next_cqe;
|
||||
|
||||
if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
|
||||
|
||||
/*
|
||||
* If we have reached here because of async
|
||||
* event or other error, and have egress error
|
||||
* then drop
|
||||
*/
|
||||
if (CQE_TYPE(hw_cqe) == 1) {
|
||||
syslog(LOG_CRIT, "%s: got egress error in \
|
||||
read-response, dropping!\n", __func__);
|
||||
goto next_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* drop peer2peer RTR reads.
|
||||
*/
|
||||
if (CQE_WRID_STAG(hw_cqe) == 1)
|
||||
goto next_cqe;
|
||||
|
||||
/*
|
||||
* Eat completions for unsignaled read WRs.
|
||||
*/
|
||||
if (!qhp->wq.sq.oldest_read->signaled) {
|
||||
advance_oldest_read(&qhp->wq);
|
||||
goto next_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't write to the HWCQ, create a new read req CQE
|
||||
* in local memory and move it into the swcq.
|
||||
*/
|
||||
create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
|
||||
hw_cqe = &read_cqe;
|
||||
advance_oldest_read(&qhp->wq);
|
||||
}
|
||||
|
||||
/* if its a SQ completion, then do the magic to move all the
|
||||
* unsignaled and now in-order completions into the swcq.
|
||||
*/
|
||||
if (SQ_TYPE(hw_cqe)) {
|
||||
int idx = CQE_WRID_SQ_IDX(hw_cqe);
|
||||
|
||||
BUG_ON(idx >= qhp->wq.sq.size);
|
||||
swsqe = &qhp->wq.sq.sw_sq[idx];
|
||||
swsqe->cqe = *hw_cqe;
|
||||
swsqe->complete = 1;
|
||||
flush_completed_wrs(&qhp->wq, &chp->cq);
|
||||
} else {
|
||||
swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
|
||||
*swcqe = *hw_cqe;
|
||||
swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
|
||||
t4_swcq_produce(&chp->cq);
|
||||
}
|
||||
next_cqe:
|
||||
t4_hwcq_consume(&chp->cq);
|
||||
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
|
||||
}
|
||||
}
|
||||
|
||||
static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
|
||||
{
|
||||
if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
|
||||
return 0;
|
||||
|
||||
if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
|
||||
return 0;
|
||||
|
||||
if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
|
||||
return 0;
|
||||
|
||||
if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
|
||||
{
|
||||
struct t4_cqe *cqe;
|
||||
u32 ptr;
|
||||
|
||||
*count = 0;
|
||||
ptr = cq->sw_cidx;
|
||||
BUG_ON(ptr >= cq->size);
|
||||
while (ptr != cq->sw_pidx) {
|
||||
cqe = &cq->sw_queue[ptr];
|
||||
if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
|
||||
(CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
|
||||
(*count)++;
|
||||
if (++ptr == cq->size)
|
||||
ptr = 0;
|
||||
}
|
||||
PDBG("%s cq %p count %d\n", __func__, cq, *count);
|
||||
}
|
||||
|
||||
static void dump_cqe(void *arg)
|
||||
{
|
||||
u64 *p = arg;
|
||||
syslog(LOG_NOTICE, "cxgb4 err cqe %016lx %016lx %016lx %016lx\n",
|
||||
be64_to_cpu(p[0]),
|
||||
be64_to_cpu(p[1]),
|
||||
be64_to_cpu(p[2]),
|
||||
be64_to_cpu(p[3]));
|
||||
}
|
||||
|
||||
/*
|
||||
* poll_cq
|
||||
*
|
||||
* Caller must:
|
||||
* check the validity of the first CQE,
|
||||
* supply the wq assicated with the qpid.
|
||||
*
|
||||
* credit: cq credit to return to sge.
|
||||
* cqe_flushed: 1 iff the CQE is flushed.
|
||||
* cqe: copy of the polled CQE.
|
||||
*
|
||||
* return value:
|
||||
* 0 CQE returned ok.
|
||||
* -EAGAIN CQE skipped, try again.
|
||||
* -EOVERFLOW CQ overflow detected.
|
||||
*/
|
||||
static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
||||
u8 *cqe_flushed, u64 *cookie, u32 *credit)
|
||||
{
|
||||
int ret = 0;
|
||||
struct t4_cqe *hw_cqe, read_cqe;
|
||||
|
||||
*cqe_flushed = 0;
|
||||
*credit = 0;
|
||||
|
||||
ret = t4_next_cqe(cq, &hw_cqe);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
|
||||
" opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
|
||||
__func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
|
||||
CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
|
||||
CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
|
||||
CQE_WRID_LOW(hw_cqe));
|
||||
|
||||
/*
|
||||
* skip cqe's not affiliated with a QP.
|
||||
*/
|
||||
if (wq == NULL) {
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Gotta tweak READ completions:
|
||||
* 1) the cqe doesn't contain the sq_wptr from the wr.
|
||||
* 2) opcode not reflected from the wr.
|
||||
* 3) read_len not reflected from the wr.
|
||||
* 4) T4 HW (for now) inserts target read response failures which
|
||||
* need to be skipped.
|
||||
*/
|
||||
if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
|
||||
|
||||
/*
|
||||
* If we have reached here because of async
|
||||
* event or other error, and have egress error
|
||||
* then drop
|
||||
*/
|
||||
if (CQE_TYPE(hw_cqe) == 1) {
|
||||
syslog(LOG_CRIT, "%s: got egress error in \
|
||||
read-response, dropping!\n", __func__);
|
||||
if (CQE_STATUS(hw_cqe))
|
||||
t4_set_wq_in_error(wq);
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is an unsolicited read response, then the read
|
||||
* was generated by the kernel driver as part of peer-2-peer
|
||||
* connection setup, or a target read response failure.
|
||||
* So skip the completion.
|
||||
*/
|
||||
if (CQE_WRID_STAG(hw_cqe) == 1) {
|
||||
if (CQE_STATUS(hw_cqe))
|
||||
t4_set_wq_in_error(wq);
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Eat completions for unsignaled read WRs.
|
||||
*/
|
||||
if (!wq->sq.oldest_read->signaled) {
|
||||
advance_oldest_read(wq);
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't write to the HWCQ, so create a new read req CQE
|
||||
* in local memory.
|
||||
*/
|
||||
create_read_req_cqe(wq, hw_cqe, &read_cqe);
|
||||
hw_cqe = &read_cqe;
|
||||
advance_oldest_read(wq);
|
||||
}
|
||||
|
||||
if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
|
||||
*cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
|
||||
wq->error = 1;
|
||||
|
||||
if (!*cqe_flushed && CQE_STATUS(hw_cqe))
|
||||
dump_cqe(hw_cqe);
|
||||
|
||||
BUG_ON((cqe_flushed == 0) && !SW_CQE(hw_cqe));
|
||||
goto proc_cqe;
|
||||
}
|
||||
|
||||
if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* RECV completion.
|
||||
*/
|
||||
if (RQ_TYPE(hw_cqe)) {
|
||||
|
||||
/*
|
||||
* HW only validates 4 bits of MSN. So we must validate that
|
||||
* the MSN in the SEND is the next expected MSN. If its not,
|
||||
* then we complete this with T4_ERR_MSN and mark the wq in
|
||||
* error.
|
||||
*/
|
||||
|
||||
if (t4_rq_empty(wq)) {
|
||||
t4_set_wq_in_error(wq);
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
|
||||
t4_set_wq_in_error(wq);
|
||||
hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
|
||||
goto proc_cqe;
|
||||
}
|
||||
goto proc_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here its a send completion.
|
||||
*
|
||||
* Handle out of order completion. These get stuffed
|
||||
* in the SW SQ. Then the SW SQ is walked to move any
|
||||
* now in-order completions into the SW CQ. This handles
|
||||
* 2 cases:
|
||||
* 1) reaping unsignaled WRs when the first subsequent
|
||||
* signaled WR is completed.
|
||||
* 2) out of order read completions.
|
||||
*/
|
||||
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
|
||||
struct t4_swsqe *swsqe;
|
||||
int idx = CQE_WRID_SQ_IDX(hw_cqe);
|
||||
|
||||
PDBG("%s out of order completion going in sw_sq at idx %u\n",
|
||||
__func__, idx);
|
||||
BUG_ON(idx >= wq->sq.size);
|
||||
swsqe = &wq->sq.sw_sq[idx];
|
||||
swsqe->cqe = *hw_cqe;
|
||||
swsqe->complete = 1;
|
||||
ret = -EAGAIN;
|
||||
goto flush_wq;
|
||||
}
|
||||
|
||||
proc_cqe:
|
||||
*cqe = *hw_cqe;
|
||||
|
||||
/*
|
||||
* Reap the associated WR(s) that are freed up with this
|
||||
* completion.
|
||||
*/
|
||||
if (SQ_TYPE(hw_cqe)) {
|
||||
int idx = CQE_WRID_SQ_IDX(hw_cqe);
|
||||
BUG_ON(idx >= wq->sq.size);
|
||||
|
||||
/*
|
||||
* Account for any unsignaled completions completed by
|
||||
* this signaled completion. In this case, cidx points
|
||||
* to the first unsignaled one, and idx points to the
|
||||
* signaled one. So adjust in_use based on this delta.
|
||||
* if this is not completing any unsigned wrs, then the
|
||||
* delta will be 0. Handle wrapping also!
|
||||
*/
|
||||
if (idx < wq->sq.cidx)
|
||||
wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
|
||||
else
|
||||
wq->sq.in_use -= idx - wq->sq.cidx;
|
||||
BUG_ON(wq->sq.in_use <= 0 || wq->sq.in_use >= wq->sq.size);
|
||||
|
||||
wq->sq.cidx = (u16)idx;
|
||||
PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
|
||||
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
|
||||
t4_sq_consume(wq);
|
||||
} else {
|
||||
PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
|
||||
BUG_ON(wq->rq.cidx >= wq->rq.size);
|
||||
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
|
||||
BUG_ON(t4_rq_empty(wq));
|
||||
t4_rq_consume(wq);
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
flush_wq:
|
||||
/*
|
||||
* Flush any completed cqes that are now in-order.
|
||||
*/
|
||||
flush_completed_wrs(wq, cq);
|
||||
|
||||
skip_cqe:
|
||||
if (SW_CQE(hw_cqe)) {
|
||||
PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
|
||||
__func__, cq, cq->cqid, cq->sw_cidx);
|
||||
t4_swcq_consume(cq);
|
||||
} else {
|
||||
PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
|
||||
__func__, cq, cq->cqid, cq->cidx);
|
||||
t4_hwcq_consume(cq);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get one cq entry from c4iw and map it to openib.
|
||||
*
|
||||
* Returns:
|
||||
* 0 cqe returned
|
||||
* -ENODATA EMPTY;
|
||||
* -EAGAIN caller must try again
|
||||
* any other -errno fatal error
|
||||
*/
|
||||
static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ibv_wc *wc)
|
||||
{
|
||||
struct c4iw_qp *qhp = NULL;
|
||||
struct t4_cqe cqe, *rd_cqe;
|
||||
struct t4_wq *wq;
|
||||
u32 credit = 0;
|
||||
u8 cqe_flushed;
|
||||
u64 cookie = 0;
|
||||
int ret;
|
||||
|
||||
ret = t4_next_cqe(&chp->cq, &rd_cqe);
|
||||
|
||||
if (ret) {
|
||||
#ifdef STALL_DETECTION
|
||||
if (ret == -ENODATA && stall_to && !chp->dumped) {
|
||||
struct timeval t;
|
||||
|
||||
gettimeofday(&t, NULL);
|
||||
if ((t.tv_sec - chp->time.tv_sec) > stall_to) {
|
||||
dump_state();
|
||||
chp->dumped = 1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef STALL_DETECTION
|
||||
gettimeofday(&chp->time, NULL);
|
||||
#endif
|
||||
|
||||
qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
|
||||
if (!qhp)
|
||||
wq = NULL;
|
||||
else {
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
wq = &(qhp->wq);
|
||||
}
|
||||
ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
INC_STAT(cqe);
|
||||
wc->wr_id = cookie;
|
||||
wc->qp_num = qhp->wq.sq.qid;
|
||||
wc->vendor_err = CQE_STATUS(&cqe);
|
||||
wc->wc_flags = 0;
|
||||
|
||||
PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
|
||||
"lo 0x%x cookie 0x%llx\n", __func__,
|
||||
CQE_QPID(&cqe), CQE_TYPE(&cqe),
|
||||
CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_WRID_HI(&cqe),
|
||||
CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
|
||||
|
||||
if (CQE_TYPE(&cqe) == 0) {
|
||||
if (!CQE_STATUS(&cqe))
|
||||
wc->byte_len = CQE_LEN(&cqe);
|
||||
else
|
||||
wc->byte_len = 0;
|
||||
wc->opcode = IBV_WC_RECV;
|
||||
} else {
|
||||
switch (CQE_OPCODE(&cqe)) {
|
||||
case FW_RI_RDMA_WRITE:
|
||||
wc->opcode = IBV_WC_RDMA_WRITE;
|
||||
break;
|
||||
case FW_RI_READ_REQ:
|
||||
wc->opcode = IBV_WC_RDMA_READ;
|
||||
wc->byte_len = CQE_LEN(&cqe);
|
||||
break;
|
||||
case FW_RI_SEND:
|
||||
case FW_RI_SEND_WITH_SE:
|
||||
case FW_RI_SEND_WITH_INV:
|
||||
case FW_RI_SEND_WITH_SE_INV:
|
||||
wc->opcode = IBV_WC_SEND;
|
||||
break;
|
||||
case FW_RI_BIND_MW:
|
||||
wc->opcode = IBV_WC_BIND_MW;
|
||||
break;
|
||||
default:
|
||||
PDBG("Unexpected opcode %d "
|
||||
"in the CQE received for QPID=0x%0x\n",
|
||||
CQE_OPCODE(&cqe), CQE_QPID(&cqe));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (cqe_flushed)
|
||||
wc->status = IBV_WC_WR_FLUSH_ERR;
|
||||
else {
|
||||
|
||||
switch (CQE_STATUS(&cqe)) {
|
||||
case T4_ERR_SUCCESS:
|
||||
wc->status = IBV_WC_SUCCESS;
|
||||
break;
|
||||
case T4_ERR_STAG:
|
||||
wc->status = IBV_WC_LOC_ACCESS_ERR;
|
||||
break;
|
||||
case T4_ERR_PDID:
|
||||
wc->status = IBV_WC_LOC_PROT_ERR;
|
||||
break;
|
||||
case T4_ERR_QPID:
|
||||
case T4_ERR_ACCESS:
|
||||
wc->status = IBV_WC_LOC_ACCESS_ERR;
|
||||
break;
|
||||
case T4_ERR_WRAP:
|
||||
wc->status = IBV_WC_GENERAL_ERR;
|
||||
break;
|
||||
case T4_ERR_BOUND:
|
||||
wc->status = IBV_WC_LOC_LEN_ERR;
|
||||
break;
|
||||
case T4_ERR_INVALIDATE_SHARED_MR:
|
||||
case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
|
||||
wc->status = IBV_WC_MW_BIND_ERR;
|
||||
break;
|
||||
case T4_ERR_CRC:
|
||||
case T4_ERR_MARKER:
|
||||
case T4_ERR_PDU_LEN_ERR:
|
||||
case T4_ERR_OUT_OF_RQE:
|
||||
case T4_ERR_DDP_VERSION:
|
||||
case T4_ERR_RDMA_VERSION:
|
||||
case T4_ERR_DDP_QUEUE_NUM:
|
||||
case T4_ERR_MSN:
|
||||
case T4_ERR_TBIT:
|
||||
case T4_ERR_MO:
|
||||
case T4_ERR_MSN_RANGE:
|
||||
case T4_ERR_IRD_OVERFLOW:
|
||||
case T4_ERR_OPCODE:
|
||||
case T4_ERR_INTERNAL_ERR:
|
||||
wc->status = IBV_WC_FATAL_ERR;
|
||||
break;
|
||||
case T4_ERR_SWFLUSH:
|
||||
wc->status = IBV_WC_WR_FLUSH_ERR;
|
||||
break;
|
||||
default:
|
||||
PDBG("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
|
||||
CQE_STATUS(&cqe), CQE_QPID(&cqe));
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
if (wc->status && wc->status != IBV_WC_WR_FLUSH_ERR)
|
||||
syslog(LOG_NOTICE, "cxgb4 app err cqid %u qpid %u "
|
||||
"type %u opcode %u status 0x%x\n",
|
||||
chp->cq.cqid, CQE_QPID(&cqe), CQE_TYPE(&cqe),
|
||||
CQE_OPCODE(&cqe), CQE_STATUS(&cqe));
|
||||
out:
|
||||
if (wq)
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int c4iw_poll_cq(struct ibv_cq *ibcq, int num_entries, struct ibv_wc *wc)
|
||||
{
|
||||
struct c4iw_cq *chp;
|
||||
int npolled;
|
||||
int err = 0;
|
||||
|
||||
chp = to_c4iw_cq(ibcq);
|
||||
|
||||
if (t4_cq_in_error(&chp->cq)) {
|
||||
t4_reset_cq_in_error(&chp->cq);
|
||||
c4iw_flush_qps(chp->rhp);
|
||||
}
|
||||
|
||||
if (!num_entries)
|
||||
return t4_cq_notempty(&chp->cq);
|
||||
|
||||
pthread_spin_lock(&chp->lock);
|
||||
for (npolled = 0; npolled < num_entries; ++npolled) {
|
||||
do {
|
||||
err = c4iw_poll_cq_one(chp, wc + npolled);
|
||||
} while (err == -EAGAIN);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
pthread_spin_unlock(&chp->lock);
|
||||
return !err || err == -ENODATA ? npolled : err;
|
||||
}
|
||||
|
||||
int c4iw_arm_cq(struct ibv_cq *ibcq, int solicited)
|
||||
{
|
||||
struct c4iw_cq *chp;
|
||||
int ret;
|
||||
|
||||
INC_STAT(arm);
|
||||
chp = to_c4iw_cq(ibcq);
|
||||
pthread_spin_lock(&chp->lock);
|
||||
ret = t4_arm_cq(&chp->cq, solicited);
|
||||
pthread_spin_unlock(&chp->lock);
|
||||
return ret;
|
||||
}
|
95
contrib/ofed/libcxgb4/src/cxgb4-abi.h
Normal file
95
contrib/ofed/libcxgb4/src/cxgb4-abi.h
Normal file
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef IWCH_ABI_H
|
||||
#define IWCH_ABI_H
|
||||
|
||||
#include <infiniband/kern-abi.h>
|
||||
|
||||
struct c4iw_alloc_ucontext_resp {
|
||||
struct ibv_get_context_resp ibv_resp;
|
||||
__u64 status_page_key;
|
||||
__u32 status_page_size;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct c4iw_alloc_pd_resp {
|
||||
struct ibv_alloc_pd_resp ibv_resp;
|
||||
uint32_t pdid;
|
||||
};
|
||||
|
||||
struct c4iw_create_cq_resp {
|
||||
struct ibv_create_cq_resp ibv_resp;
|
||||
__u64 key;
|
||||
__u64 gts_key;
|
||||
__u64 memsize;
|
||||
__u32 cqid;
|
||||
__u32 size;
|
||||
__u32 qid_mask;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
enum {
|
||||
C4IW_QPF_ONCHIP = (1<<0),
|
||||
};
|
||||
|
||||
struct c4iw_create_qp_resp_v0 {
|
||||
struct ibv_create_qp_resp ibv_resp;
|
||||
__u64 sq_key;
|
||||
__u64 rq_key;
|
||||
__u64 sq_db_gts_key;
|
||||
__u64 rq_db_gts_key;
|
||||
__u64 sq_memsize;
|
||||
__u64 rq_memsize;
|
||||
__u32 sqid;
|
||||
__u32 rqid;
|
||||
__u32 sq_size;
|
||||
__u32 rq_size;
|
||||
__u32 qid_mask;
|
||||
};
|
||||
|
||||
struct c4iw_create_qp_resp {
|
||||
struct ibv_create_qp_resp ibv_resp;
|
||||
__u64 ma_sync_key;
|
||||
__u64 sq_key;
|
||||
__u64 rq_key;
|
||||
__u64 sq_db_gts_key;
|
||||
__u64 rq_db_gts_key;
|
||||
__u64 sq_memsize;
|
||||
__u64 rq_memsize;
|
||||
__u32 sqid;
|
||||
__u32 rqid;
|
||||
__u32 sq_size;
|
||||
__u32 rq_size;
|
||||
__u32 qid_mask;
|
||||
__u32 flags;
|
||||
};
|
||||
#endif /* IWCH_ABI_H */
|
6
contrib/ofed/libcxgb4/src/cxgb4.map
Normal file
6
contrib/ofed/libcxgb4/src/cxgb4.map
Normal file
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
global:
|
||||
ibv_driver_init;
|
||||
openib_driver_init;
|
||||
local: *;
|
||||
};
|
558
contrib/ofed/libcxgb4/src/dev.c
Normal file
558
contrib/ofed/libcxgb4/src/dev.c
Normal file
|
@ -0,0 +1,558 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#if HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif /* HAVE_CONFIG_H */
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <sys/mman.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include "libcxgb4.h"
|
||||
#include "cxgb4-abi.h"
|
||||
|
||||
#define PCI_VENDOR_ID_CHELSIO 0x1425
|
||||
|
||||
/*
|
||||
* Macros needed to support the PCI Device ID Table ...
|
||||
*/
|
||||
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
|
||||
struct { \
|
||||
unsigned vendor; \
|
||||
unsigned device; \
|
||||
unsigned chip_version; \
|
||||
} hca_table[] = {
|
||||
|
||||
#define CH_PCI_DEVICE_ID_FUNCTION \
|
||||
0x4
|
||||
|
||||
#define CH_PCI_ID_TABLE_ENTRY(__DeviceID) \
|
||||
{ \
|
||||
.vendor = PCI_VENDOR_ID_CHELSIO, \
|
||||
.device = (__DeviceID), \
|
||||
.chip_version = CHELSIO_PCI_ID_CHIP_VERSION(__DeviceID), \
|
||||
}
|
||||
|
||||
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
|
||||
}
|
||||
|
||||
#include "t4_chip_type.h"
|
||||
#include "t4_pci_id_tbl.h"
|
||||
|
||||
unsigned long c4iw_page_size;
|
||||
unsigned long c4iw_page_shift;
|
||||
unsigned long c4iw_page_mask;
|
||||
int ma_wr;
|
||||
int t5_en_wc = 1;
|
||||
|
||||
SLIST_HEAD(devices_struct, c4iw_dev) devices;
|
||||
|
||||
static struct ibv_context_ops c4iw_ctx_ops = {
|
||||
.query_device = c4iw_query_device,
|
||||
.query_port = c4iw_query_port,
|
||||
.alloc_pd = c4iw_alloc_pd,
|
||||
.dealloc_pd = c4iw_free_pd,
|
||||
.reg_mr = c4iw_reg_mr,
|
||||
.dereg_mr = c4iw_dereg_mr,
|
||||
.create_cq = c4iw_create_cq,
|
||||
.resize_cq = c4iw_resize_cq,
|
||||
.destroy_cq = c4iw_destroy_cq,
|
||||
.create_srq = c4iw_create_srq,
|
||||
.modify_srq = c4iw_modify_srq,
|
||||
.destroy_srq = c4iw_destroy_srq,
|
||||
.create_qp = c4iw_create_qp,
|
||||
.modify_qp = c4iw_modify_qp,
|
||||
.destroy_qp = c4iw_destroy_qp,
|
||||
.query_qp = c4iw_query_qp,
|
||||
.create_ah = c4iw_create_ah,
|
||||
.destroy_ah = c4iw_destroy_ah,
|
||||
.attach_mcast = c4iw_attach_mcast,
|
||||
.detach_mcast = c4iw_detach_mcast,
|
||||
.post_srq_recv = c4iw_post_srq_recv,
|
||||
.req_notify_cq = c4iw_arm_cq,
|
||||
};
|
||||
|
||||
static struct ibv_context *c4iw_alloc_context(struct ibv_device *ibdev,
|
||||
int cmd_fd)
|
||||
{
|
||||
struct c4iw_context *context;
|
||||
struct ibv_get_context cmd;
|
||||
struct c4iw_alloc_ucontext_resp resp;
|
||||
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
|
||||
struct ibv_query_device qcmd;
|
||||
uint64_t raw_fw_ver;
|
||||
struct ibv_device_attr attr;
|
||||
|
||||
context = malloc(sizeof *context);
|
||||
if (!context)
|
||||
return NULL;
|
||||
|
||||
memset(context, 0, sizeof *context);
|
||||
context->ibv_ctx.cmd_fd = cmd_fd;
|
||||
|
||||
resp.status_page_size = 0;
|
||||
resp.reserved = 0;
|
||||
if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof cmd,
|
||||
&resp.ibv_resp, sizeof resp))
|
||||
goto err_free;
|
||||
|
||||
if (resp.reserved)
|
||||
PDBG("%s c4iw_alloc_ucontext_resp reserved field modified by kernel\n",
|
||||
__FUNCTION__);
|
||||
|
||||
context->status_page_size = resp.status_page_size;
|
||||
if (resp.status_page_size) {
|
||||
context->status_page = mmap(NULL, resp.status_page_size,
|
||||
PROT_READ, MAP_SHARED, cmd_fd,
|
||||
resp.status_page_key);
|
||||
if (context->status_page == MAP_FAILED)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
context->ibv_ctx.device = ibdev;
|
||||
context->ibv_ctx.ops = c4iw_ctx_ops;
|
||||
|
||||
switch (rhp->chip_version) {
|
||||
case CHELSIO_T5:
|
||||
PDBG("%s T5/T4 device\n", __FUNCTION__);
|
||||
case CHELSIO_T4:
|
||||
PDBG("%s T4 device\n", __FUNCTION__);
|
||||
context->ibv_ctx.ops.async_event = c4iw_async_event;
|
||||
context->ibv_ctx.ops.post_send = c4iw_post_send;
|
||||
context->ibv_ctx.ops.post_recv = c4iw_post_receive;
|
||||
context->ibv_ctx.ops.poll_cq = c4iw_poll_cq;
|
||||
context->ibv_ctx.ops.req_notify_cq = c4iw_arm_cq;
|
||||
break;
|
||||
default:
|
||||
PDBG("%s unknown hca type %d\n", __FUNCTION__,
|
||||
rhp->chip_version);
|
||||
goto err_unmap;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!rhp->mmid2ptr) {
|
||||
int ret;
|
||||
|
||||
ret = ibv_cmd_query_device(&context->ibv_ctx, &attr, &raw_fw_ver, &qcmd,
|
||||
sizeof qcmd);
|
||||
if (ret)
|
||||
goto err_unmap;
|
||||
rhp->max_mr = attr.max_mr;
|
||||
rhp->mmid2ptr = calloc(attr.max_mr, sizeof(void *));
|
||||
if (!rhp->mmid2ptr) {
|
||||
goto err_unmap;
|
||||
}
|
||||
rhp->max_qp = T4_QID_BASE + attr.max_cq;
|
||||
rhp->qpid2ptr = calloc(T4_QID_BASE + attr.max_cq, sizeof(void *));
|
||||
if (!rhp->qpid2ptr) {
|
||||
goto err_unmap;
|
||||
}
|
||||
rhp->max_cq = T4_QID_BASE + attr.max_cq;
|
||||
rhp->cqid2ptr = calloc(T4_QID_BASE + attr.max_cq, sizeof(void *));
|
||||
if (!rhp->cqid2ptr)
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
return &context->ibv_ctx;
|
||||
|
||||
err_unmap:
|
||||
munmap(context->status_page, context->status_page_size);
|
||||
err_free:
|
||||
if (rhp->cqid2ptr)
|
||||
free(rhp->cqid2ptr);
|
||||
if (rhp->qpid2ptr)
|
||||
free(rhp->cqid2ptr);
|
||||
if (rhp->mmid2ptr)
|
||||
free(rhp->cqid2ptr);
|
||||
free(context);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void c4iw_free_context(struct ibv_context *ibctx)
|
||||
{
|
||||
struct c4iw_context *context = to_c4iw_context(ibctx);
|
||||
|
||||
if (context->status_page_size)
|
||||
munmap(context->status_page, context->status_page_size);
|
||||
free(context);
|
||||
}
|
||||
|
||||
static struct ibv_device_ops c4iw_dev_ops = {
|
||||
.alloc_context = c4iw_alloc_context,
|
||||
.free_context = c4iw_free_context
|
||||
};
|
||||
|
||||
#ifdef STALL_DETECTION
|
||||
|
||||
int stall_to;
|
||||
|
||||
static void dump_cq(struct c4iw_cq *chp)
|
||||
{
|
||||
int i;
|
||||
|
||||
fprintf(stderr,
|
||||
"CQ: %p id %u queue %p cidx 0x%08x sw_queue %p sw_cidx %d sw_pidx %d sw_in_use %d depth %u error %u gen %d "
|
||||
"cidx_inc %d bits_type_ts %016" PRIx64 " notempty %d\n", chp,
|
||||
chp->cq.cqid, chp->cq.queue, chp->cq.cidx,
|
||||
chp->cq.sw_queue, chp->cq.sw_cidx, chp->cq.sw_pidx, chp->cq.sw_in_use,
|
||||
chp->cq.size, chp->cq.error, chp->cq.gen, chp->cq.cidx_inc, be64_to_cpu(chp->cq.bits_type_ts),
|
||||
t4_cq_notempty(&chp->cq) || (chp->iq ? t4_iq_notempty(chp->iq) : 0));
|
||||
|
||||
for (i=0; i < chp->cq.size; i++) {
|
||||
u64 *p = (u64 *)(chp->cq.queue + i);
|
||||
|
||||
fprintf(stderr, "%02x: %016" PRIx64 " %016" PRIx64, i, be64_to_cpu(p[0]), be64_to_cpu(p[1]));
|
||||
if (i == chp->cq.cidx)
|
||||
fprintf(stderr, " <-- cidx\n");
|
||||
else
|
||||
fprintf(stderr, "\n");
|
||||
p+= 2;
|
||||
fprintf(stderr, "%02x: %016" PRIx64 " %016" PRIx64 "\n", i, be64_to_cpu(p[0]), be64_to_cpu(p[1]));
|
||||
p+= 2;
|
||||
fprintf(stderr, "%02x: %016" PRIx64 " %016" PRIx64 "\n", i, be64_to_cpu(p[0]), be64_to_cpu(p[1]));
|
||||
p+= 2;
|
||||
fprintf(stderr, "%02x: %016" PRIx64 " %016" PRIx64 "\n", i, be64_to_cpu(p[0]), be64_to_cpu(p[1]));
|
||||
p+= 2;
|
||||
}
|
||||
}
|
||||
|
||||
static void dump_qp(struct c4iw_qp *qhp)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
struct t4_swsqe *swsqe;
|
||||
struct t4_swrqe *swrqe;
|
||||
u16 cidx, pidx;
|
||||
u64 *p;
|
||||
|
||||
fprintf(stderr,
|
||||
"QP: %p id %u error %d flushed %d qid_mask 0x%x\n"
|
||||
" SQ: id %u queue %p sw_queue %p cidx %u pidx %u in_use %u wq_pidx %u depth %u flags 0x%x flush_cidx %d\n"
|
||||
" RQ: id %u queue %p sw_queue %p cidx %u pidx %u in_use %u depth %u\n",
|
||||
qhp,
|
||||
qhp->wq.sq.qid,
|
||||
qhp->wq.error,
|
||||
qhp->wq.flushed,
|
||||
qhp->wq.qid_mask,
|
||||
qhp->wq.sq.qid,
|
||||
qhp->wq.sq.queue,
|
||||
qhp->wq.sq.sw_sq,
|
||||
qhp->wq.sq.cidx,
|
||||
qhp->wq.sq.pidx,
|
||||
qhp->wq.sq.in_use,
|
||||
qhp->wq.sq.wq_pidx,
|
||||
qhp->wq.sq.size,
|
||||
qhp->wq.sq.flags,
|
||||
qhp->wq.sq.flush_cidx,
|
||||
qhp->wq.rq.qid,
|
||||
qhp->wq.rq.queue,
|
||||
qhp->wq.rq.sw_rq,
|
||||
qhp->wq.rq.cidx,
|
||||
qhp->wq.rq.pidx,
|
||||
qhp->wq.rq.in_use,
|
||||
qhp->wq.rq.size);
|
||||
cidx = qhp->wq.sq.cidx;
|
||||
pidx = qhp->wq.sq.pidx;
|
||||
if (cidx != pidx)
|
||||
fprintf(stderr, "SQ: \n");
|
||||
while (cidx != pidx) {
|
||||
swsqe = &qhp->wq.sq.sw_sq[cidx];
|
||||
fprintf(stderr, "%04u: wr_id %016" PRIx64
|
||||
" sq_wptr %08x read_len %u opcode 0x%x "
|
||||
"complete %u signaled %u cqe %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 "\n",
|
||||
cidx,
|
||||
swsqe->wr_id,
|
||||
swsqe->idx,
|
||||
swsqe->read_len,
|
||||
swsqe->opcode,
|
||||
swsqe->complete,
|
||||
swsqe->signaled,
|
||||
cpu_to_be64(swsqe->cqe.u.flits[0]),
|
||||
cpu_to_be64(swsqe->cqe.u.flits[1]),
|
||||
cpu_to_be64((u64)swsqe->cqe.reserved),
|
||||
cpu_to_be64(swsqe->cqe.bits_type_ts));
|
||||
if (++cidx == qhp->wq.sq.size)
|
||||
cidx = 0;
|
||||
}
|
||||
|
||||
fprintf(stderr, "SQ WQ: \n");
|
||||
p = (u64 *)qhp->wq.sq.queue;
|
||||
for (i=0; i < qhp->wq.sq.size * T4_SQ_NUM_SLOTS; i++) {
|
||||
for (j=0; j < T4_EQ_ENTRY_SIZE / 16; j++) {
|
||||
fprintf(stderr, "%04u %016" PRIx64 " %016" PRIx64 " ",
|
||||
i, ntohll(p[0]), ntohll(p[1]));
|
||||
if (j == 0 && i == qhp->wq.sq.wq_pidx)
|
||||
fprintf(stderr, " <-- pidx");
|
||||
fprintf(stderr, "\n");
|
||||
p += 2;
|
||||
}
|
||||
}
|
||||
cidx = qhp->wq.rq.cidx;
|
||||
pidx = qhp->wq.rq.pidx;
|
||||
if (cidx != pidx)
|
||||
fprintf(stderr, "RQ: \n");
|
||||
while (cidx != pidx) {
|
||||
swrqe = &qhp->wq.rq.sw_rq[cidx];
|
||||
fprintf(stderr, "%04u: wr_id %016" PRIx64 "\n",
|
||||
cidx,
|
||||
swrqe->wr_id );
|
||||
if (++cidx == qhp->wq.rq.size)
|
||||
cidx = 0;
|
||||
}
|
||||
|
||||
fprintf(stderr, "RQ WQ: \n");
|
||||
p = (u64 *)qhp->wq.rq.queue;
|
||||
for (i=0; i < qhp->wq.rq.size * T4_RQ_NUM_SLOTS; i++) {
|
||||
for (j=0; j < T4_EQ_ENTRY_SIZE / 16; j++) {
|
||||
fprintf(stderr, "%04u %016" PRIx64 " %016" PRIx64 " ",
|
||||
i, ntohll(p[0]), ntohll(p[1]));
|
||||
if (j == 0 && i == qhp->wq.rq.pidx)
|
||||
fprintf(stderr, " <-- pidx");
|
||||
if (j == 0 && i == qhp->wq.rq.cidx)
|
||||
fprintf(stderr, " <-- cidx");
|
||||
fprintf(stderr, "\n");
|
||||
p+=2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dump_state()
|
||||
{
|
||||
struct c4iw_dev *dev;
|
||||
int i;
|
||||
|
||||
fprintf(stderr, "STALL DETECTED:\n");
|
||||
SLIST_FOREACH(dev, &devices, list) {
|
||||
//pthread_spin_lock(&dev->lock);
|
||||
fprintf(stderr, "Device %s\n", dev->ibv_dev.name);
|
||||
for (i=0; i < dev->max_cq; i++) {
|
||||
if (dev->cqid2ptr[i]) {
|
||||
struct c4iw_cq *chp = dev->cqid2ptr[i];
|
||||
//pthread_spin_lock(&chp->lock);
|
||||
dump_cq(chp);
|
||||
//pthread_spin_unlock(&chp->lock);
|
||||
}
|
||||
}
|
||||
for (i=0; i < dev->max_qp; i++) {
|
||||
if (dev->qpid2ptr[i]) {
|
||||
struct c4iw_qp *qhp = dev->qpid2ptr[i];
|
||||
//pthread_spin_lock(&qhp->lock);
|
||||
dump_qp(qhp);
|
||||
//pthread_spin_unlock(&qhp->lock);
|
||||
}
|
||||
}
|
||||
//pthread_spin_unlock(&dev->lock);
|
||||
}
|
||||
fprintf(stderr, "DUMP COMPLETE:\n");
|
||||
fflush(stderr);
|
||||
}
|
||||
#endif /* end of STALL_DETECTION */
|
||||
|
||||
/*
|
||||
* c4iw_abi_version is used to store ABI for iw_cxgb4 so the user mode library
|
||||
* can know if the driver supports the kernel mode db ringing.
|
||||
*/
|
||||
int c4iw_abi_version = 1;
|
||||
|
||||
static struct ibv_device *cxgb4_driver_init(const char *uverbs_sys_path,
|
||||
int abi_version)
|
||||
{
|
||||
char devstr[IBV_SYSFS_PATH_MAX], ibdev[16], value[128], *cp;
|
||||
char t5nexstr[IBV_SYSFS_PATH_MAX];
|
||||
struct c4iw_dev *dev;
|
||||
unsigned vendor, device, fw_maj, fw_min;
|
||||
int i;
|
||||
char devnum=0;
|
||||
char ib_param[16];
|
||||
|
||||
#ifndef __linux__
|
||||
if (ibv_read_sysfs_file(uverbs_sys_path, "ibdev",
|
||||
ibdev, sizeof ibdev) < 0)
|
||||
return NULL;
|
||||
/*
|
||||
* Extract the non-numeric part of ibdev
|
||||
* say "t5nex0" -> devname=="t5nex", devnum=0
|
||||
*/
|
||||
if (strstr(ibdev,"t5nex")) {
|
||||
devnum = atoi(ibdev+strlen("t5nex"));
|
||||
sprintf(t5nexstr, "/dev/t5nex/%d", devnum);
|
||||
} else
|
||||
return NULL;
|
||||
|
||||
if (ibv_read_sysfs_file(t5nexstr, "\%pnpinfo",
|
||||
value, sizeof value) < 0)
|
||||
return NULL;
|
||||
else {
|
||||
if (strstr(value,"vendor=")) {
|
||||
strncpy(ib_param, strstr(value,"vendor=")+strlen("vendor="),6);
|
||||
sscanf(ib_param,"%i",&vendor);
|
||||
}
|
||||
|
||||
if (strstr(value,"device=")) {
|
||||
strncpy(ib_param, strstr(value,"device=")+strlen("device="),6);
|
||||
sscanf(ib_param,"%i",&device);
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (ibv_read_sysfs_file(uverbs_sys_path, "device/vendor",
|
||||
value, sizeof value) < 0)
|
||||
return NULL;
|
||||
sscanf(value, "%i", &vendor);
|
||||
|
||||
if (ibv_read_sysfs_file(uverbs_sys_path, "device/device",
|
||||
value, sizeof value) < 0)
|
||||
return NULL;
|
||||
sscanf(value, "%i", &device);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < sizeof hca_table / sizeof hca_table[0]; ++i)
|
||||
if (vendor == hca_table[i].vendor &&
|
||||
device == hca_table[i].device)
|
||||
goto found;
|
||||
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
c4iw_abi_version = abi_version;
|
||||
|
||||
|
||||
#ifndef __linux__
|
||||
if (ibv_read_sysfs_file(t5nexstr, "firmware_version",
|
||||
value, sizeof value) < 0)
|
||||
return NULL;
|
||||
#else
|
||||
/*
|
||||
* Verify that the firmware major number matches. Major number
|
||||
* mismatches are fatal. Minor number mismatches are tolerated.
|
||||
*/
|
||||
if (ibv_read_sysfs_file(uverbs_sys_path, "ibdev",
|
||||
ibdev, sizeof ibdev) < 0)
|
||||
return NULL;
|
||||
|
||||
memset(devstr, 0, sizeof devstr);
|
||||
snprintf(devstr, sizeof devstr, "%s/class/infiniband/%s",
|
||||
ibv_get_sysfs_path(), ibdev);
|
||||
if (ibv_read_sysfs_file(devstr, "fw_ver", value, sizeof value) < 0)
|
||||
return NULL;
|
||||
#endif
|
||||
|
||||
cp = strtok(value+1, ".");
|
||||
sscanf(cp, "%i", &fw_maj);
|
||||
cp = strtok(NULL, ".");
|
||||
sscanf(cp, "%i", &fw_min);
|
||||
|
||||
if (fw_maj < FW_MAJ) {
|
||||
fprintf(stderr, "libcxgb4: Fatal firmware version mismatch. "
|
||||
"Firmware major number is %u and libcxgb4 needs %u.\n",
|
||||
fw_maj, FW_MAJ);
|
||||
fflush(stderr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
DBGLOG("libcxgb4");
|
||||
|
||||
if (fw_min < FW_MIN) {
|
||||
PDBG("libcxgb4: non-fatal firmware version mismatch. "
|
||||
"Firmware minor number is %u and libcxgb4 needs %u.\n",
|
||||
fw_maj, FW_MAJ);
|
||||
fflush(stderr);
|
||||
}
|
||||
|
||||
PDBG("%s found vendor %d device %d type %d\n",
|
||||
__FUNCTION__, vendor, device, hca_table[i].chip_version);
|
||||
|
||||
dev = calloc(1, sizeof *dev);
|
||||
if (!dev) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pthread_spin_init(&dev->lock, PTHREAD_PROCESS_PRIVATE);
|
||||
dev->ibv_dev.ops = c4iw_dev_ops;
|
||||
dev->chip_version = hca_table[i].chip_version;
|
||||
dev->abi_version = abi_version;
|
||||
|
||||
PDBG("%s device claimed\n", __FUNCTION__);
|
||||
SLIST_INSERT_HEAD(&devices, dev, list);
|
||||
#ifdef STALL_DETECTION
|
||||
{
|
||||
char *c = getenv("CXGB4_STALL_TIMEOUT");
|
||||
if (c) {
|
||||
stall_to = strtol(c, NULL, 0);
|
||||
if (errno || stall_to < 0)
|
||||
stall_to = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
{
|
||||
char *c = getenv("CXGB4_MA_WR");
|
||||
if (c) {
|
||||
ma_wr = strtol(c, NULL, 0);
|
||||
if (ma_wr != 1)
|
||||
ma_wr = 0;
|
||||
}
|
||||
}
|
||||
{
|
||||
char *c = getenv("T5_ENABLE_WC");
|
||||
if (c) {
|
||||
t5_en_wc = strtol(c, NULL, 0);
|
||||
if (t5_en_wc != 1)
|
||||
t5_en_wc = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return &dev->ibv_dev;
|
||||
}
|
||||
|
||||
static __attribute__((constructor)) void cxgb4_register_driver(void)
|
||||
{
|
||||
c4iw_page_size = sysconf(_SC_PAGESIZE);
|
||||
c4iw_page_shift = long_log2(c4iw_page_size);
|
||||
c4iw_page_mask = ~(c4iw_page_size - 1);
|
||||
ibv_register_driver("cxgb4", cxgb4_driver_init);
|
||||
}
|
||||
|
||||
#ifdef STATS
|
||||
void __attribute__ ((destructor)) cs_fini(void);
|
||||
void __attribute__ ((destructor)) cs_fini(void)
|
||||
{
|
||||
syslog(LOG_NOTICE, "cxgb4 stats - sends %lu recv %lu read %lu "
|
||||
"write %lu arm %lu cqe %lu mr %lu qp %lu cq %lu\n",
|
||||
c4iw_stats.send, c4iw_stats.recv, c4iw_stats.read,
|
||||
c4iw_stats.write, c4iw_stats.arm, c4iw_stats.cqe,
|
||||
c4iw_stats.mr, c4iw_stats.qp, c4iw_stats.cq);
|
||||
}
|
||||
#endif
|
257
contrib/ofed/libcxgb4/src/libcxgb4.h
Normal file
257
contrib/ofed/libcxgb4/src/libcxgb4.h
Normal file
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef IWCH_H
|
||||
#define IWCH_H
|
||||
|
||||
#include <pthread.h>
|
||||
#include <inttypes.h>
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/errno.h>
|
||||
#include <sys/time.h>
|
||||
#include <infiniband/driver.h>
|
||||
#include <infiniband/arch.h>
|
||||
#include "queue.h"
|
||||
#include "t4.h"
|
||||
|
||||
extern unsigned long c4iw_page_size;
|
||||
extern unsigned long c4iw_page_shift;
|
||||
extern unsigned long c4iw_page_mask;
|
||||
|
||||
struct c4iw_mr;
|
||||
|
||||
struct c4iw_dev {
|
||||
struct ibv_device ibv_dev;
|
||||
unsigned chip_version;
|
||||
int max_mr;
|
||||
struct c4iw_mr **mmid2ptr;
|
||||
int max_qp;
|
||||
struct c4iw_qp **qpid2ptr;
|
||||
int max_cq;
|
||||
struct c4iw_cq **cqid2ptr;
|
||||
pthread_spinlock_t lock;
|
||||
SLIST_ENTRY(c4iw_dev) list;
|
||||
int abi_version;
|
||||
};
|
||||
|
||||
static inline int dev_is_t5(struct c4iw_dev *dev)
|
||||
{
|
||||
return dev->chip_version == CHELSIO_T5;
|
||||
}
|
||||
|
||||
struct c4iw_context {
|
||||
struct ibv_context ibv_ctx;
|
||||
struct t4_dev_status_page *status_page;
|
||||
int status_page_size;
|
||||
};
|
||||
|
||||
struct c4iw_pd {
|
||||
struct ibv_pd ibv_pd;
|
||||
};
|
||||
|
||||
struct c4iw_mr {
|
||||
struct ibv_mr ibv_mr;
|
||||
uint64_t va_fbo;
|
||||
uint32_t len;
|
||||
};
|
||||
|
||||
static inline u32 c4iw_mmid(u32 stag)
|
||||
{
|
||||
return (stag >> 8);
|
||||
}
|
||||
|
||||
struct c4iw_cq {
|
||||
struct ibv_cq ibv_cq;
|
||||
struct c4iw_dev *rhp;
|
||||
struct t4_cq cq;
|
||||
pthread_spinlock_t lock;
|
||||
#ifdef STALL_DETECTION
|
||||
struct timeval time;
|
||||
int dumped;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct c4iw_qp {
|
||||
struct ibv_qp ibv_qp;
|
||||
struct c4iw_dev *rhp;
|
||||
struct t4_wq wq;
|
||||
pthread_spinlock_t lock;
|
||||
int sq_sig_all;
|
||||
};
|
||||
|
||||
#define to_c4iw_xxx(xxx, type) \
|
||||
((struct c4iw_##type *) \
|
||||
((void *) ib##xxx - offsetof(struct c4iw_##type, ibv_##xxx)))
|
||||
|
||||
static inline struct c4iw_dev *to_c4iw_dev(struct ibv_device *ibdev)
|
||||
{
|
||||
return to_c4iw_xxx(dev, dev);
|
||||
}
|
||||
|
||||
static inline struct c4iw_context *to_c4iw_context(struct ibv_context *ibctx)
|
||||
{
|
||||
return to_c4iw_xxx(ctx, context);
|
||||
}
|
||||
|
||||
static inline struct c4iw_pd *to_c4iw_pd(struct ibv_pd *ibpd)
|
||||
{
|
||||
return to_c4iw_xxx(pd, pd);
|
||||
}
|
||||
|
||||
static inline struct c4iw_cq *to_c4iw_cq(struct ibv_cq *ibcq)
|
||||
{
|
||||
return to_c4iw_xxx(cq, cq);
|
||||
}
|
||||
|
||||
static inline struct c4iw_qp *to_c4iw_qp(struct ibv_qp *ibqp)
|
||||
{
|
||||
return to_c4iw_xxx(qp, qp);
|
||||
}
|
||||
|
||||
static inline struct c4iw_mr *to_c4iw_mr(struct ibv_mr *ibmr)
|
||||
{
|
||||
return to_c4iw_xxx(mr, mr);
|
||||
}
|
||||
|
||||
static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qid)
|
||||
{
|
||||
return rhp->qpid2ptr[qid];
|
||||
}
|
||||
|
||||
static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 qid)
|
||||
{
|
||||
return rhp->cqid2ptr[qid];
|
||||
}
|
||||
|
||||
static inline unsigned long_log2(unsigned long x)
|
||||
{
|
||||
unsigned r = 0;
|
||||
for (x >>= 1; x > 0; x >>= 1)
|
||||
r++;
|
||||
return r;
|
||||
}
|
||||
|
||||
int c4iw_query_device(struct ibv_context *context,
|
||||
struct ibv_device_attr *attr);
|
||||
int c4iw_query_port(struct ibv_context *context, uint8_t port,
|
||||
struct ibv_port_attr *attr);
|
||||
|
||||
struct ibv_pd *c4iw_alloc_pd(struct ibv_context *context);
|
||||
int c4iw_free_pd(struct ibv_pd *pd);
|
||||
|
||||
struct ibv_mr *c4iw_reg_mr(struct ibv_pd *pd, void *addr,
|
||||
size_t length, int access);
|
||||
int c4iw_dereg_mr(struct ibv_mr *mr);
|
||||
|
||||
struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
|
||||
struct ibv_comp_channel *channel,
|
||||
int comp_vector);
|
||||
int c4iw_resize_cq(struct ibv_cq *cq, int cqe);
|
||||
int c4iw_destroy_cq(struct ibv_cq *cq);
|
||||
int c4iw_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
|
||||
int c4iw_arm_cq(struct ibv_cq *cq, int solicited);
|
||||
void c4iw_cq_event(struct ibv_cq *cq);
|
||||
void c4iw_init_cq_buf(struct c4iw_cq *cq, int nent);
|
||||
|
||||
struct ibv_srq *c4iw_create_srq(struct ibv_pd *pd,
|
||||
struct ibv_srq_init_attr *attr);
|
||||
int c4iw_modify_srq(struct ibv_srq *srq,
|
||||
struct ibv_srq_attr *attr,
|
||||
int mask);
|
||||
int c4iw_destroy_srq(struct ibv_srq *srq);
|
||||
int c4iw_post_srq_recv(struct ibv_srq *ibsrq,
|
||||
struct ibv_recv_wr *wr,
|
||||
struct ibv_recv_wr **bad_wr);
|
||||
|
||||
struct ibv_qp *c4iw_create_qp(struct ibv_pd *pd,
|
||||
struct ibv_qp_init_attr *attr);
|
||||
int c4iw_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
|
||||
int attr_mask);
|
||||
int c4iw_destroy_qp(struct ibv_qp *qp);
|
||||
int c4iw_query_qp(struct ibv_qp *qp,
|
||||
struct ibv_qp_attr *attr,
|
||||
int attr_mask,
|
||||
struct ibv_qp_init_attr *init_attr);
|
||||
void c4iw_flush_qp(struct c4iw_qp *qhp);
|
||||
void c4iw_flush_qps(struct c4iw_dev *dev);
|
||||
int c4iw_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
|
||||
struct ibv_send_wr **bad_wr);
|
||||
int c4iw_post_receive(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
|
||||
struct ibv_recv_wr **bad_wr);
|
||||
struct ibv_ah *c4iw_create_ah(struct ibv_pd *pd,
|
||||
struct ibv_ah_attr *ah_attr);
|
||||
int c4iw_destroy_ah(struct ibv_ah *ah);
|
||||
int c4iw_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
|
||||
uint16_t lid);
|
||||
int c4iw_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
|
||||
uint16_t lid);
|
||||
void c4iw_async_event(struct ibv_async_event *event);
|
||||
void c4iw_flush_hw_cq(struct c4iw_cq *chp);
|
||||
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
|
||||
void c4iw_flush_sq(struct c4iw_qp *qhp);
|
||||
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
|
||||
|
||||
#define FW_MAJ 0
|
||||
#define FW_MIN 0
|
||||
|
||||
static inline unsigned long align(unsigned long val, unsigned long align)
|
||||
{
|
||||
return (val + align - 1) & ~(align - 1);
|
||||
}
|
||||
|
||||
#ifdef STATS
|
||||
|
||||
#define INC_STAT(a) { c4iw_stats.a++; }
|
||||
|
||||
struct c4iw_stats {
|
||||
unsigned long send;
|
||||
unsigned long recv;
|
||||
unsigned long read;
|
||||
unsigned long write;
|
||||
unsigned long arm;
|
||||
unsigned long cqe;
|
||||
unsigned long mr;
|
||||
unsigned long qp;
|
||||
unsigned long cq;
|
||||
};
|
||||
extern struct c4iw_stats c4iw_stats;
|
||||
#else
|
||||
#define INC_STAT(a)
|
||||
#endif
|
||||
|
||||
#ifdef STALL_DETECTION
|
||||
void dump_state();
|
||||
extern int stall_to;
|
||||
#endif
|
||||
|
||||
#endif /* IWCH_H */
|
542
contrib/ofed/libcxgb4/src/qp.c
Normal file
542
contrib/ofed/libcxgb4/src/qp.c
Normal file
|
@ -0,0 +1,542 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#if HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif /* HAVE_CONFIG_H */
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <netinet/in.h>
|
||||
#include "libcxgb4.h"
|
||||
|
||||
#ifdef STATS
|
||||
struct c4iw_stats c4iw_stats;
|
||||
#endif
|
||||
|
||||
static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16)
|
||||
{
|
||||
u64 *src, *dst;
|
||||
|
||||
src = (u64 *)wqe;
|
||||
dst = (u64 *)((u8 *)wq->sq.queue + wq->sq.wq_pidx * T4_EQ_ENTRY_SIZE);
|
||||
if (t4_sq_onchip(wq)) {
|
||||
len16 = align(len16, 4);
|
||||
wc_wmb();
|
||||
}
|
||||
while (len16) {
|
||||
*dst++ = *src++;
|
||||
if (dst == (u64 *)&wq->sq.queue[wq->sq.size])
|
||||
dst = (u64 *)wq->sq.queue;
|
||||
*dst++ = *src++;
|
||||
if (dst == (u64 *)&wq->sq.queue[wq->sq.size])
|
||||
dst = (u64 *)wq->sq.queue;
|
||||
len16--;
|
||||
}
|
||||
}
|
||||
|
||||
static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16)
|
||||
{
|
||||
u64 *src, *dst;
|
||||
|
||||
src = (u64 *)wqe;
|
||||
dst = (u64 *)((u8 *)wq->rq.queue + wq->rq.wq_pidx * T4_EQ_ENTRY_SIZE);
|
||||
while (len16) {
|
||||
*dst++ = *src++;
|
||||
if (dst >= (u64 *)&wq->rq.queue[wq->rq.size])
|
||||
dst = (u64 *)wq->rq.queue;
|
||||
*dst++ = *src++;
|
||||
if (dst >= (u64 *)&wq->rq.queue[wq->rq.size])
|
||||
dst = (u64 *)wq->rq.queue;
|
||||
len16--;
|
||||
}
|
||||
}
|
||||
|
||||
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
|
||||
struct ibv_send_wr *wr, int max, u32 *plenp)
|
||||
{
|
||||
u8 *dstp, *srcp;
|
||||
u32 plen = 0;
|
||||
int i;
|
||||
int len;
|
||||
|
||||
dstp = (u8 *)immdp->data;
|
||||
for (i = 0; i < wr->num_sge; i++) {
|
||||
if ((plen + wr->sg_list[i].length) > max)
|
||||
return -EMSGSIZE;
|
||||
srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
|
||||
plen += wr->sg_list[i].length;
|
||||
len = wr->sg_list[i].length;
|
||||
memcpy(dstp, srcp, len);
|
||||
dstp += len;
|
||||
srcp += len;
|
||||
}
|
||||
len = ROUND_UP(plen + 8, 16) - (plen + 8);
|
||||
if (len)
|
||||
memset(dstp, 0, len);
|
||||
immdp->op = FW_RI_DATA_IMMD;
|
||||
immdp->r1 = 0;
|
||||
immdp->r2 = 0;
|
||||
immdp->immdlen = cpu_to_be32(plen);
|
||||
*plenp = plen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_isgl(struct fw_ri_isgl *isglp, struct ibv_sge *sg_list,
|
||||
int num_sge, u32 *plenp)
|
||||
{
|
||||
int i;
|
||||
u32 plen = 0;
|
||||
__be64 *flitp = (__be64 *)isglp->sge;
|
||||
|
||||
for (i = 0; i < num_sge; i++) {
|
||||
if ((plen + sg_list[i].length) < plen)
|
||||
return -EMSGSIZE;
|
||||
plen += sg_list[i].length;
|
||||
*flitp++ = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
|
||||
sg_list[i].length);
|
||||
*flitp++ = cpu_to_be64(sg_list[i].addr);
|
||||
}
|
||||
*flitp = 0;
|
||||
isglp->op = FW_RI_DATA_ISGL;
|
||||
isglp->r1 = 0;
|
||||
isglp->nsge = cpu_to_be16(num_sge);
|
||||
isglp->r2 = 0;
|
||||
if (plenp)
|
||||
*plenp = plen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
|
||||
struct ibv_send_wr *wr, u8 *len16)
|
||||
{
|
||||
u32 plen;
|
||||
int size;
|
||||
int ret;
|
||||
|
||||
if (wr->num_sge > T4_MAX_SEND_SGE)
|
||||
return -EINVAL;
|
||||
if (wr->send_flags & IBV_SEND_SOLICITED)
|
||||
wqe->send.sendop_pkd = cpu_to_be32(
|
||||
V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
|
||||
else
|
||||
wqe->send.sendop_pkd = cpu_to_be32(
|
||||
V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
|
||||
wqe->send.stag_inv = 0;
|
||||
wqe->send.r3 = 0;
|
||||
wqe->send.r4 = 0;
|
||||
|
||||
plen = 0;
|
||||
if (wr->num_sge) {
|
||||
if (wr->send_flags & IBV_SEND_INLINE) {
|
||||
ret = build_immd(sq, wqe->send.u.immd_src, wr,
|
||||
T4_MAX_SEND_INLINE, &plen);
|
||||
if (ret)
|
||||
return ret;
|
||||
size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
|
||||
plen;
|
||||
} else {
|
||||
ret = build_isgl(wqe->send.u.isgl_src,
|
||||
wr->sg_list, wr->num_sge, &plen);
|
||||
if (ret)
|
||||
return ret;
|
||||
size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
|
||||
wr->num_sge * sizeof (struct fw_ri_sge);
|
||||
}
|
||||
} else {
|
||||
wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
|
||||
wqe->send.u.immd_src[0].r1 = 0;
|
||||
wqe->send.u.immd_src[0].r2 = 0;
|
||||
wqe->send.u.immd_src[0].immdlen = 0;
|
||||
size = sizeof wqe->send + sizeof(struct fw_ri_immd);
|
||||
plen = 0;
|
||||
}
|
||||
*len16 = DIV_ROUND_UP(size, 16);
|
||||
wqe->send.plen = cpu_to_be32(plen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
|
||||
struct ibv_send_wr *wr, u8 *len16)
|
||||
{
|
||||
u32 plen;
|
||||
int size;
|
||||
int ret;
|
||||
|
||||
if (wr->num_sge > T4_MAX_SEND_SGE)
|
||||
return -EINVAL;
|
||||
wqe->write.r2 = 0;
|
||||
wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
|
||||
wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
if (wr->num_sge) {
|
||||
if (wr->send_flags & IBV_SEND_INLINE) {
|
||||
ret = build_immd(sq, wqe->write.u.immd_src, wr,
|
||||
T4_MAX_WRITE_INLINE, &plen);
|
||||
if (ret)
|
||||
return ret;
|
||||
size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
|
||||
plen;
|
||||
} else {
|
||||
ret = build_isgl(wqe->write.u.isgl_src,
|
||||
wr->sg_list, wr->num_sge, &plen);
|
||||
if (ret)
|
||||
return ret;
|
||||
size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
|
||||
wr->num_sge * sizeof (struct fw_ri_sge);
|
||||
}
|
||||
} else {
|
||||
wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
|
||||
wqe->write.u.immd_src[0].r1 = 0;
|
||||
wqe->write.u.immd_src[0].r2 = 0;
|
||||
wqe->write.u.immd_src[0].immdlen = 0;
|
||||
size = sizeof wqe->write + sizeof(struct fw_ri_immd);
|
||||
plen = 0;
|
||||
}
|
||||
*len16 = DIV_ROUND_UP(size, 16);
|
||||
wqe->write.plen = cpu_to_be32(plen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_rdma_read(union t4_wr *wqe, struct ibv_send_wr *wr, u8 *len16)
|
||||
{
|
||||
if (wr->num_sge > 1)
|
||||
return -EINVAL;
|
||||
if (wr->num_sge) {
|
||||
wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
|
||||
wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr >>32));
|
||||
wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
|
||||
wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
|
||||
wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
|
||||
wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr >> 32));
|
||||
wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
|
||||
} else {
|
||||
wqe->read.stag_src = cpu_to_be32(2);
|
||||
wqe->read.to_src_hi = 0;
|
||||
wqe->read.to_src_lo = 0;
|
||||
wqe->read.stag_sink = cpu_to_be32(2);
|
||||
wqe->read.plen = 0;
|
||||
wqe->read.to_sink_hi = 0;
|
||||
wqe->read.to_sink_lo = 0;
|
||||
}
|
||||
wqe->read.r2 = 0;
|
||||
wqe->read.r5 = 0;
|
||||
*len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
|
||||
struct ibv_recv_wr *wr, u8 *len16)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = build_isgl(&wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
*len16 = DIV_ROUND_UP(sizeof wqe->recv +
|
||||
wr->num_sge * sizeof(struct fw_ri_sge), 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dump_wqe(void *arg)
|
||||
{
|
||||
u64 *p = arg;
|
||||
int len16;
|
||||
|
||||
len16 = be64_to_cpu(*p) & 0xff;
|
||||
while (len16--) {
|
||||
printf("%02x: %016lx ", (u8)(unsigned long)p, be64_to_cpu(*p));
|
||||
p++;
|
||||
printf("%016lx\n", be64_to_cpu(*p));
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
static void ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 idx)
|
||||
{
|
||||
struct ibv_modify_qp cmd;
|
||||
struct ibv_qp_attr attr;
|
||||
int mask;
|
||||
int ret;
|
||||
|
||||
wc_wmb();
|
||||
if (qid == qhp->wq.sq.qid) {
|
||||
attr.sq_psn = idx;
|
||||
mask = IBV_QP_SQ_PSN;
|
||||
} else {
|
||||
attr.rq_psn = idx;
|
||||
mask = IBV_QP_RQ_PSN;
|
||||
}
|
||||
ret = ibv_cmd_modify_qp(&qhp->ibv_qp, &attr, mask, &cmd, sizeof cmd);
|
||||
assert(!ret);
|
||||
}
|
||||
|
||||
int c4iw_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
|
||||
struct ibv_send_wr **bad_wr)
|
||||
{
|
||||
int err = 0;
|
||||
u8 len16;
|
||||
enum fw_wr_opcodes fw_opcode;
|
||||
enum fw_ri_wr_flags fw_flags;
|
||||
struct c4iw_qp *qhp;
|
||||
union t4_wr *wqe, lwqe;
|
||||
u32 num_wrs;
|
||||
struct t4_swsqe *swsqe;
|
||||
u16 idx = 0;
|
||||
|
||||
qhp = to_c4iw_qp(ibqp);
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
if (t4_wq_in_error(&qhp->wq)) {
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
num_wrs = t4_sq_avail(&qhp->wq);
|
||||
if (num_wrs == 0) {
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
while (wr) {
|
||||
if (num_wrs == 0) {
|
||||
err = -ENOMEM;
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
wqe = &lwqe;
|
||||
fw_flags = 0;
|
||||
if (wr->send_flags & IBV_SEND_SOLICITED)
|
||||
fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
|
||||
if (wr->send_flags & IBV_SEND_SIGNALED || qhp->sq_sig_all)
|
||||
fw_flags |= FW_RI_COMPLETION_FLAG;
|
||||
swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
|
||||
switch (wr->opcode) {
|
||||
case IBV_WR_SEND:
|
||||
INC_STAT(send);
|
||||
if (wr->send_flags & IBV_SEND_FENCE)
|
||||
fw_flags |= FW_RI_READ_FENCE_FLAG;
|
||||
fw_opcode = FW_RI_SEND_WR;
|
||||
swsqe->opcode = FW_RI_SEND;
|
||||
err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
|
||||
break;
|
||||
case IBV_WR_RDMA_WRITE:
|
||||
INC_STAT(write);
|
||||
fw_opcode = FW_RI_RDMA_WRITE_WR;
|
||||
swsqe->opcode = FW_RI_RDMA_WRITE;
|
||||
err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
|
||||
break;
|
||||
case IBV_WR_RDMA_READ:
|
||||
INC_STAT(read);
|
||||
fw_opcode = FW_RI_RDMA_READ_WR;
|
||||
swsqe->opcode = FW_RI_READ_REQ;
|
||||
fw_flags = 0;
|
||||
err = build_rdma_read(wqe, wr, &len16);
|
||||
if (err)
|
||||
break;
|
||||
swsqe->read_len = wr->sg_list[0].length;
|
||||
if (!qhp->wq.sq.oldest_read)
|
||||
qhp->wq.sq.oldest_read = swsqe;
|
||||
break;
|
||||
default:
|
||||
PDBG("%s post of type=%d TBD!\n", __func__,
|
||||
wr->opcode);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (err) {
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
swsqe->idx = qhp->wq.sq.pidx;
|
||||
swsqe->complete = 0;
|
||||
swsqe->signaled = (wr->send_flags & IBV_SEND_SIGNALED) ||
|
||||
qhp->sq_sig_all;
|
||||
swsqe->flushed = 0;
|
||||
swsqe->wr_id = wr->wr_id;
|
||||
|
||||
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
|
||||
PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x\n",
|
||||
__func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
|
||||
swsqe->opcode);
|
||||
wr = wr->next;
|
||||
num_wrs--;
|
||||
copy_wr_to_sq(&qhp->wq, wqe, len16);
|
||||
t4_sq_produce(&qhp->wq, len16);
|
||||
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
|
||||
}
|
||||
if (t4_wq_db_enabled(&qhp->wq)) {
|
||||
t4_ring_sq_db(&qhp->wq, idx, dev_is_t5(qhp->rhp),
|
||||
len16, wqe);
|
||||
} else
|
||||
ring_kernel_db(qhp, qhp->wq.sq.qid, idx);
|
||||
qhp->wq.sq.queue[qhp->wq.sq.size].status.host_wq_pidx = \
|
||||
(qhp->wq.sq.wq_pidx);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int c4iw_post_receive(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
|
||||
struct ibv_recv_wr **bad_wr)
|
||||
{
|
||||
int err = 0;
|
||||
struct c4iw_qp *qhp;
|
||||
union t4_recv_wr *wqe, lwqe;
|
||||
u32 num_wrs;
|
||||
u8 len16 = 0;
|
||||
u16 idx = 0;
|
||||
|
||||
qhp = to_c4iw_qp(ibqp);
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
if (t4_wq_in_error(&qhp->wq)) {
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
INC_STAT(recv);
|
||||
num_wrs = t4_rq_avail(&qhp->wq);
|
||||
if (num_wrs == 0) {
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
while (wr) {
|
||||
if (wr->num_sge > T4_MAX_RECV_SGE) {
|
||||
err = -EINVAL;
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
wqe = &lwqe;
|
||||
if (num_wrs)
|
||||
err = build_rdma_recv(qhp, wqe, wr, &len16);
|
||||
else
|
||||
err = -ENOMEM;
|
||||
if (err) {
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
|
||||
|
||||
wqe->recv.opcode = FW_RI_RECV_WR;
|
||||
wqe->recv.r1 = 0;
|
||||
wqe->recv.wrid = qhp->wq.rq.pidx;
|
||||
wqe->recv.r2[0] = 0;
|
||||
wqe->recv.r2[1] = 0;
|
||||
wqe->recv.r2[2] = 0;
|
||||
wqe->recv.len16 = len16;
|
||||
PDBG("%s cookie 0x%llx pidx %u\n", __func__,
|
||||
(unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
|
||||
copy_wr_to_rq(&qhp->wq, wqe, len16);
|
||||
t4_rq_produce(&qhp->wq, len16);
|
||||
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
|
||||
wr = wr->next;
|
||||
num_wrs--;
|
||||
}
|
||||
if (t4_wq_db_enabled(&qhp->wq))
|
||||
t4_ring_rq_db(&qhp->wq, idx, dev_is_t5(qhp->rhp),
|
||||
len16, wqe);
|
||||
else
|
||||
ring_kernel_db(qhp, qhp->wq.rq.qid, idx);
|
||||
qhp->wq.rq.queue[qhp->wq.rq.size].status.host_wq_pidx = \
|
||||
(qhp->wq.rq.wq_pidx);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void update_qp_state(struct c4iw_qp *qhp)
|
||||
{
|
||||
struct ibv_query_qp cmd;
|
||||
struct ibv_qp_attr attr;
|
||||
struct ibv_qp_init_attr iattr;
|
||||
int ret;
|
||||
|
||||
ret = ibv_cmd_query_qp(&qhp->ibv_qp, &attr, IBV_QP_STATE, &iattr,
|
||||
&cmd, sizeof cmd);
|
||||
assert(!ret);
|
||||
if (!ret)
|
||||
qhp->ibv_qp.state = attr.qp_state;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assumes qhp lock is held.
|
||||
*/
|
||||
void c4iw_flush_qp(struct c4iw_qp *qhp)
|
||||
{
|
||||
struct c4iw_cq *rchp, *schp;
|
||||
int count;
|
||||
|
||||
if (qhp->wq.flushed)
|
||||
return;
|
||||
|
||||
update_qp_state(qhp);
|
||||
|
||||
rchp = to_c4iw_cq(qhp->ibv_qp.recv_cq);
|
||||
schp = to_c4iw_cq(qhp->ibv_qp.send_cq);
|
||||
|
||||
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
|
||||
qhp->wq.flushed = 1;
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
|
||||
/* locking heirarchy: cq lock first, then qp lock. */
|
||||
pthread_spin_lock(&rchp->lock);
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
c4iw_flush_hw_cq(rchp);
|
||||
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
|
||||
c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
pthread_spin_unlock(&rchp->lock);
|
||||
|
||||
/* locking heirarchy: cq lock first, then qp lock. */
|
||||
pthread_spin_lock(&schp->lock);
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
if (schp != rchp)
|
||||
c4iw_flush_hw_cq(schp);
|
||||
c4iw_flush_sq(qhp);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
pthread_spin_unlock(&schp->lock);
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
}
|
||||
|
||||
void c4iw_flush_qps(struct c4iw_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
pthread_spin_lock(&dev->lock);
|
||||
for (i=0; i < dev->max_qp; i++) {
|
||||
struct c4iw_qp *qhp = dev->qpid2ptr[i];
|
||||
if (qhp) {
|
||||
if (!qhp->wq.flushed && t4_wq_in_error(&qhp->wq)) {
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
c4iw_flush_qp(qhp);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
pthread_spin_unlock(&dev->lock);
|
||||
}
|
650
contrib/ofed/libcxgb4/src/queue.h
Normal file
650
contrib/ofed/libcxgb4/src/queue.h
Normal file
|
@ -0,0 +1,650 @@
|
|||
/*
|
||||
* Copyright (c) 2014 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 4. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)queue.h 8.5 (Berkeley) 8/20/94
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _SYS_QUEUE_H_
|
||||
#define _SYS_QUEUE_H_
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
/*
|
||||
* This file defines four types of data structures: singly-linked lists,
|
||||
* singly-linked tail queues, lists and tail queues.
|
||||
*
|
||||
* A singly-linked list is headed by a single forward pointer. The elements
|
||||
* are singly linked for minimum space and pointer manipulation overhead at
|
||||
* the expense of O(n) removal for arbitrary elements. New elements can be
|
||||
* added to the list after an existing element or at the head of the list.
|
||||
* Elements being removed from the head of the list should use the explicit
|
||||
* macro for this purpose for optimum efficiency. A singly-linked list may
|
||||
* only be traversed in the forward direction. Singly-linked lists are ideal
|
||||
* for applications with large datasets and few or no removals or for
|
||||
* implementing a LIFO queue.
|
||||
*
|
||||
* A singly-linked tail queue is headed by a pair of pointers, one to the
|
||||
* head of the list and the other to the tail of the list. The elements are
|
||||
* singly linked for minimum space and pointer manipulation overhead at the
|
||||
* expense of O(n) removal for arbitrary elements. New elements can be added
|
||||
* to the list after an existing element, at the head of the list, or at the
|
||||
* end of the list. Elements being removed from the head of the tail queue
|
||||
* should use the explicit macro for this purpose for optimum efficiency.
|
||||
* A singly-linked tail queue may only be traversed in the forward direction.
|
||||
* Singly-linked tail queues are ideal for applications with large datasets
|
||||
* and few or no removals or for implementing a FIFO queue.
|
||||
*
|
||||
* A list is headed by a single forward pointer (or an array of forward
|
||||
* pointers for a hash table header). The elements are doubly linked
|
||||
* so that an arbitrary element can be removed without a need to
|
||||
* traverse the list. New elements can be added to the list before
|
||||
* or after an existing element or at the head of the list. A list
|
||||
* may only be traversed in the forward direction.
|
||||
*
|
||||
* A tail queue is headed by a pair of pointers, one to the head of the
|
||||
* list and the other to the tail of the list. The elements are doubly
|
||||
* linked so that an arbitrary element can be removed without a need to
|
||||
* traverse the list. New elements can be added to the list before or
|
||||
* after an existing element, at the head of the list, or at the end of
|
||||
* the list. A tail queue may be traversed in either direction.
|
||||
*
|
||||
* For details on the use of these macros, see the queue(3) manual page.
|
||||
*
|
||||
*
|
||||
* SLIST LIST STAILQ TAILQ
|
||||
* _HEAD + + + +
|
||||
* _HEAD_INITIALIZER + + + +
|
||||
* _ENTRY + + + +
|
||||
* _INIT + + + +
|
||||
* _EMPTY + + + +
|
||||
* _FIRST + + + +
|
||||
* _NEXT + + + +
|
||||
* _PREV - - - +
|
||||
* _LAST - - + +
|
||||
* _FOREACH + + + +
|
||||
* _FOREACH_SAFE + + + +
|
||||
* _FOREACH_REVERSE - - - +
|
||||
* _FOREACH_REVERSE_SAFE - - - +
|
||||
* _INSERT_HEAD + + + +
|
||||
* _INSERT_BEFORE - + - +
|
||||
* _INSERT_AFTER + + + +
|
||||
* _INSERT_TAIL - - + +
|
||||
* _CONCAT - - + +
|
||||
* _REMOVE_HEAD + - + -
|
||||
* _REMOVE + + + +
|
||||
*
|
||||
*/
|
||||
#ifdef QUEUE_MACRO_DEBUG
|
||||
/* Store the last 2 places the queue element or head was altered */
|
||||
struct qm_trace {
|
||||
char * lastfile;
|
||||
int lastline;
|
||||
char * prevfile;
|
||||
int prevline;
|
||||
};
|
||||
|
||||
#define TRACEBUF struct qm_trace trace;
|
||||
#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
|
||||
|
||||
#define QMD_TRACE_HEAD(head) do { \
|
||||
(head)->trace.prevline = (head)->trace.lastline; \
|
||||
(head)->trace.prevfile = (head)->trace.lastfile; \
|
||||
(head)->trace.lastline = __LINE__; \
|
||||
(head)->trace.lastfile = __FILE__; \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TRACE_ELEM(elem) do { \
|
||||
(elem)->trace.prevline = (elem)->trace.lastline; \
|
||||
(elem)->trace.prevfile = (elem)->trace.lastfile; \
|
||||
(elem)->trace.lastline = __LINE__; \
|
||||
(elem)->trace.lastfile = __FILE__; \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
#define QMD_TRACE_ELEM(elem)
|
||||
#define QMD_TRACE_HEAD(head)
|
||||
#define TRACEBUF
|
||||
#define TRASHIT(x)
|
||||
#endif /* QUEUE_MACRO_DEBUG */
|
||||
|
||||
/*
|
||||
* Singly-linked List declarations.
|
||||
*/
|
||||
#define SLIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *slh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#define SLIST_HEAD_INITIALIZER(head) \
|
||||
{ NULL }
|
||||
|
||||
#define SLIST_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *sle_next; /* next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Singly-linked List functions.
|
||||
*/
|
||||
#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
|
||||
|
||||
#define SLIST_FIRST(head) ((head)->slh_first)
|
||||
|
||||
#define SLIST_FOREACH(var, head, field) \
|
||||
for ((var) = SLIST_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = SLIST_NEXT((var), field))
|
||||
|
||||
#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = SLIST_FIRST((head)); \
|
||||
(var) && ((tvar) = SLIST_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
|
||||
for ((varp) = &SLIST_FIRST((head)); \
|
||||
((var) = *(varp)) != NULL; \
|
||||
(varp) = &SLIST_NEXT((var), field))
|
||||
|
||||
#define SLIST_INIT(head) do { \
|
||||
SLIST_FIRST((head)) = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
|
||||
SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
|
||||
SLIST_NEXT((slistelm), field) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_INSERT_HEAD(head, elm, field) do { \
|
||||
SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
|
||||
SLIST_FIRST((head)) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
|
||||
|
||||
#define SLIST_REMOVE(head, elm, type, field) do { \
|
||||
if (SLIST_FIRST((head)) == (elm)) { \
|
||||
SLIST_REMOVE_HEAD((head), field); \
|
||||
} \
|
||||
else { \
|
||||
struct type *curelm = SLIST_FIRST((head)); \
|
||||
while (SLIST_NEXT(curelm, field) != (elm)) \
|
||||
curelm = SLIST_NEXT(curelm, field); \
|
||||
SLIST_NEXT(curelm, field) = \
|
||||
SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
|
||||
} \
|
||||
TRASHIT((elm)->field.sle_next); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_REMOVE_HEAD(head, field) do { \
|
||||
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Singly-linked Tail queue declarations.
|
||||
*/
|
||||
#define STAILQ_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *stqh_first;/* first element */ \
|
||||
struct type **stqh_last;/* addr of last next element */ \
|
||||
}
|
||||
|
||||
#define STAILQ_HEAD_INITIALIZER(head) \
|
||||
{ NULL, &(head).stqh_first }
|
||||
|
||||
#define STAILQ_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *stqe_next; /* next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Singly-linked Tail queue functions.
|
||||
*/
|
||||
#define STAILQ_CONCAT(head1, head2) do { \
|
||||
if (!STAILQ_EMPTY((head2))) { \
|
||||
*(head1)->stqh_last = (head2)->stqh_first; \
|
||||
(head1)->stqh_last = (head2)->stqh_last; \
|
||||
STAILQ_INIT((head2)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
|
||||
|
||||
#define STAILQ_FIRST(head) ((head)->stqh_first)
|
||||
|
||||
#define STAILQ_FOREACH(var, head, field) \
|
||||
for((var) = STAILQ_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = STAILQ_NEXT((var), field))
|
||||
|
||||
|
||||
#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = STAILQ_FIRST((head)); \
|
||||
(var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define STAILQ_INIT(head) do { \
|
||||
STAILQ_FIRST((head)) = NULL; \
|
||||
(head)->stqh_last = &STAILQ_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
|
||||
if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
STAILQ_NEXT((tqelm), field) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_HEAD(head, elm, field) do { \
|
||||
if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
STAILQ_FIRST((head)) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_TAIL(head, elm, field) do { \
|
||||
STAILQ_NEXT((elm), field) = NULL; \
|
||||
*(head)->stqh_last = (elm); \
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_LAST(head, type, field) \
|
||||
(STAILQ_EMPTY((head)) ? \
|
||||
NULL : \
|
||||
((struct type *)(void *) \
|
||||
((char *)((head)->stqh_last) - __offsetof(struct type, field))))
|
||||
|
||||
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
|
||||
|
||||
#define STAILQ_REMOVE(head, elm, type, field) do { \
|
||||
if (STAILQ_FIRST((head)) == (elm)) { \
|
||||
STAILQ_REMOVE_HEAD((head), field); \
|
||||
} \
|
||||
else { \
|
||||
struct type *curelm = STAILQ_FIRST((head)); \
|
||||
while (STAILQ_NEXT(curelm, field) != (elm)) \
|
||||
curelm = STAILQ_NEXT(curelm, field); \
|
||||
if ((STAILQ_NEXT(curelm, field) = \
|
||||
STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
|
||||
(head)->stqh_last = &STAILQ_NEXT((curelm), field);\
|
||||
} \
|
||||
TRASHIT((elm)->field.stqe_next); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_REMOVE_HEAD(head, field) do { \
|
||||
if ((STAILQ_FIRST((head)) = \
|
||||
STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
|
||||
(head)->stqh_last = &STAILQ_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* List declarations.
|
||||
*/
|
||||
#define LIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *lh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#define LIST_HEAD_INITIALIZER(head) \
|
||||
{ NULL }
|
||||
|
||||
#define LIST_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *le_next; /* next element */ \
|
||||
struct type **le_prev; /* address of previous next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* List functions.
|
||||
*/
|
||||
|
||||
#if (defined(_KERNEL) && defined(INVARIANTS))
|
||||
#define QMD_LIST_CHECK_HEAD(head, field) do { \
|
||||
if (LIST_FIRST((head)) != NULL && \
|
||||
LIST_FIRST((head))->field.le_prev != \
|
||||
&LIST_FIRST((head))) \
|
||||
panic("Bad list head %p first->prev != head", (head)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_LIST_CHECK_NEXT(elm, field) do { \
|
||||
if (LIST_NEXT((elm), field) != NULL && \
|
||||
LIST_NEXT((elm), field)->field.le_prev != \
|
||||
&((elm)->field.le_next)) \
|
||||
panic("Bad link elm %p next->prev != elm", (elm)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_LIST_CHECK_PREV(elm, field) do { \
|
||||
if (*(elm)->field.le_prev != (elm)) \
|
||||
panic("Bad link elm %p prev->next != elm", (elm)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define QMD_LIST_CHECK_HEAD(head, field)
|
||||
#define QMD_LIST_CHECK_NEXT(elm, field)
|
||||
#define QMD_LIST_CHECK_PREV(elm, field)
|
||||
#endif /* (_KERNEL && INVARIANTS) */
|
||||
|
||||
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
|
||||
|
||||
#define LIST_FIRST(head) ((head)->lh_first)
|
||||
|
||||
#define LIST_FOREACH(var, head, field) \
|
||||
for ((var) = LIST_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = LIST_NEXT((var), field))
|
||||
|
||||
#define LIST_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = LIST_FIRST((head)); \
|
||||
(var) && ((tvar) = LIST_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define LIST_INIT(head) do { \
|
||||
LIST_FIRST((head)) = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
|
||||
QMD_LIST_CHECK_NEXT(listelm, field); \
|
||||
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
|
||||
LIST_NEXT((listelm), field)->field.le_prev = \
|
||||
&LIST_NEXT((elm), field); \
|
||||
LIST_NEXT((listelm), field) = (elm); \
|
||||
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
QMD_LIST_CHECK_PREV(listelm, field); \
|
||||
(elm)->field.le_prev = (listelm)->field.le_prev; \
|
||||
LIST_NEXT((elm), field) = (listelm); \
|
||||
*(listelm)->field.le_prev = (elm); \
|
||||
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_HEAD(head, elm, field) do { \
|
||||
QMD_LIST_CHECK_HEAD((head), field); \
|
||||
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
|
||||
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
|
||||
LIST_FIRST((head)) = (elm); \
|
||||
(elm)->field.le_prev = &LIST_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
|
||||
|
||||
#define LIST_REMOVE(elm, field) do { \
|
||||
QMD_LIST_CHECK_NEXT(elm, field); \
|
||||
QMD_LIST_CHECK_PREV(elm, field); \
|
||||
if (LIST_NEXT((elm), field) != NULL) \
|
||||
LIST_NEXT((elm), field)->field.le_prev = \
|
||||
(elm)->field.le_prev; \
|
||||
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
|
||||
TRASHIT((elm)->field.le_next); \
|
||||
TRASHIT((elm)->field.le_prev); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Tail queue declarations.
|
||||
*/
|
||||
#define TAILQ_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *tqh_first; /* first element */ \
|
||||
struct type **tqh_last; /* addr of last next element */ \
|
||||
TRACEBUF \
|
||||
}
|
||||
|
||||
#define TAILQ_HEAD_INITIALIZER(head) \
|
||||
{ NULL, &(head).tqh_first }
|
||||
|
||||
#define TAILQ_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *tqe_next; /* next element */ \
|
||||
struct type **tqe_prev; /* address of previous next element */ \
|
||||
TRACEBUF \
|
||||
}
|
||||
|
||||
/*
|
||||
* Tail queue functions.
|
||||
*/
|
||||
#if (defined(_KERNEL) && defined(INVARIANTS))
|
||||
#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
|
||||
if (!TAILQ_EMPTY(head) && \
|
||||
TAILQ_FIRST((head))->field.tqe_prev != \
|
||||
&TAILQ_FIRST((head))) \
|
||||
panic("Bad tailq head %p first->prev != head", (head)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
|
||||
if (*(head)->tqh_last != NULL) \
|
||||
panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
|
||||
if (TAILQ_NEXT((elm), field) != NULL && \
|
||||
TAILQ_NEXT((elm), field)->field.tqe_prev != \
|
||||
&((elm)->field.tqe_next)) \
|
||||
panic("Bad link elm %p next->prev != elm", (elm)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
|
||||
if (*(elm)->field.tqe_prev != (elm)) \
|
||||
panic("Bad link elm %p prev->next != elm", (elm)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define QMD_TAILQ_CHECK_HEAD(head, field)
|
||||
#define QMD_TAILQ_CHECK_TAIL(head, headname)
|
||||
#define QMD_TAILQ_CHECK_NEXT(elm, field)
|
||||
#define QMD_TAILQ_CHECK_PREV(elm, field)
|
||||
#endif /* (_KERNEL && INVARIANTS) */
|
||||
|
||||
#define TAILQ_CONCAT(head1, head2, field) do { \
|
||||
if (!TAILQ_EMPTY(head2)) { \
|
||||
*(head1)->tqh_last = (head2)->tqh_first; \
|
||||
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
|
||||
(head1)->tqh_last = (head2)->tqh_last; \
|
||||
TAILQ_INIT((head2)); \
|
||||
QMD_TRACE_HEAD(head1); \
|
||||
QMD_TRACE_HEAD(head2); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
|
||||
|
||||
#define TAILQ_FIRST(head) ((head)->tqh_first)
|
||||
|
||||
#define TAILQ_FOREACH(var, head, field) \
|
||||
for ((var) = TAILQ_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = TAILQ_NEXT((var), field))
|
||||
|
||||
#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = TAILQ_FIRST((head)); \
|
||||
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
|
||||
for ((var) = TAILQ_LAST((head), headname); \
|
||||
(var); \
|
||||
(var) = TAILQ_PREV((var), headname, field))
|
||||
|
||||
#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
|
||||
for ((var) = TAILQ_LAST((head), headname); \
|
||||
(var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define TAILQ_INIT(head) do { \
|
||||
TAILQ_FIRST((head)) = NULL; \
|
||||
(head)->tqh_last = &TAILQ_FIRST((head)); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_NEXT(listelm, field); \
|
||||
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
|
||||
TAILQ_NEXT((elm), field)->field.tqe_prev = \
|
||||
&TAILQ_NEXT((elm), field); \
|
||||
else { \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} \
|
||||
TAILQ_NEXT((listelm), field) = (elm); \
|
||||
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
QMD_TRACE_ELEM(&listelm->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_PREV(listelm, field); \
|
||||
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
|
||||
TAILQ_NEXT((elm), field) = (listelm); \
|
||||
*(listelm)->field.tqe_prev = (elm); \
|
||||
(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
QMD_TRACE_ELEM(&listelm->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_HEAD(head, field); \
|
||||
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
|
||||
TAILQ_FIRST((head))->field.tqe_prev = \
|
||||
&TAILQ_NEXT((elm), field); \
|
||||
else \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
TAILQ_FIRST((head)) = (elm); \
|
||||
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_TAIL(head, field); \
|
||||
TAILQ_NEXT((elm), field) = NULL; \
|
||||
(elm)->field.tqe_prev = (head)->tqh_last; \
|
||||
*(head)->tqh_last = (elm); \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_LAST(head, headname) \
|
||||
(*(((struct headname *)((head)->tqh_last))->tqh_last))
|
||||
|
||||
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
|
||||
|
||||
#define TAILQ_PREV(elm, headname, field) \
|
||||
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
|
||||
|
||||
#define TAILQ_REMOVE(head, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_NEXT(elm, field); \
|
||||
QMD_TAILQ_CHECK_PREV(elm, field); \
|
||||
if ((TAILQ_NEXT((elm), field)) != NULL) \
|
||||
TAILQ_NEXT((elm), field)->field.tqe_prev = \
|
||||
(elm)->field.tqe_prev; \
|
||||
else { \
|
||||
(head)->tqh_last = (elm)->field.tqe_prev; \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} \
|
||||
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
|
||||
TRASHIT((elm)->field.tqe_next); \
|
||||
TRASHIT((elm)->field.tqe_prev); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
/*
|
||||
* XXX insque() and remque() are an old way of handling certain queues.
|
||||
* They bogusly assumes that all queue heads look alike.
|
||||
*/
|
||||
|
||||
struct quehead {
|
||||
struct quehead *qh_link;
|
||||
struct quehead *qh_rlink;
|
||||
};
|
||||
|
||||
#ifdef __CC_SUPPORTS___INLINE
|
||||
|
||||
static __inline void
|
||||
insque(void *a, void *b)
|
||||
{
|
||||
struct quehead *element = (struct quehead *)a,
|
||||
*head = (struct quehead *)b;
|
||||
|
||||
element->qh_link = head->qh_link;
|
||||
element->qh_rlink = head;
|
||||
head->qh_link = element;
|
||||
element->qh_link->qh_rlink = element;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
remque(void *a)
|
||||
{
|
||||
struct quehead *element = (struct quehead *)a;
|
||||
|
||||
element->qh_link->qh_rlink = element->qh_rlink;
|
||||
element->qh_rlink->qh_link = element->qh_link;
|
||||
element->qh_rlink = 0;
|
||||
}
|
||||
|
||||
#else /* !__CC_SUPPORTS___INLINE */
|
||||
|
||||
void insque(void *a, void *b);
|
||||
void remque(void *a);
|
||||
|
||||
#endif /* __CC_SUPPORTS___INLINE */
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* !_SYS_QUEUE_H_ */
|
715
contrib/ofed/libcxgb4/src/t4.h
Normal file
715
contrib/ofed/libcxgb4/src/t4.h
Normal file
|
@ -0,0 +1,715 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __T4_H__
|
||||
#define __T4_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <assert.h>
|
||||
#include <syslog.h>
|
||||
|
||||
#define ENODATA 95
|
||||
#undef ELAST
|
||||
#define ELAST ENODATA
|
||||
|
||||
/*
|
||||
* Try and minimize the changes from the kernel code that is pull in
|
||||
* here for kernel bypass ops.
|
||||
*/
|
||||
#define __u8 uint8_t
|
||||
#define u8 uint8_t
|
||||
#define __u16 uint16_t
|
||||
#define __be16 uint16_t
|
||||
#define u16 uint16_t
|
||||
#define __u32 uint32_t
|
||||
#define __be32 uint32_t
|
||||
#define u32 uint32_t
|
||||
#define __u64 uint64_t
|
||||
#define __be64 uint64_t
|
||||
#define u64 uint64_t
|
||||
#define DECLARE_PCI_UNMAP_ADDR(a)
|
||||
#define __iomem
|
||||
#define cpu_to_be16 htons
|
||||
#define cpu_to_be32 htonl
|
||||
#define cpu_to_be64 htonll
|
||||
#define be16_to_cpu ntohs
|
||||
#define be32_to_cpu ntohl
|
||||
#define be64_to_cpu ntohll
|
||||
#define BUG_ON(c) assert(!(c))
|
||||
#define unlikely
|
||||
#define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u))
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
# define cpu_to_pci32(val) ((val))
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
# define cpu_to_pci32(val) (__bswap_32((val)))
|
||||
#else
|
||||
# error __BYTE_ORDER not defined
|
||||
#endif
|
||||
|
||||
#define writel(v, a) do { *((volatile u32 *)(a)) = cpu_to_pci32(v); } while (0)
|
||||
|
||||
#include <arpa/inet.h> /* For htonl() and friends */
|
||||
#include "t4_regs.h"
|
||||
#include "t4_chip_type.h"
|
||||
#include "t4fw_interface.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
#define DBGLOG(s)
|
||||
#define PDBG(fmt, args...) do {syslog(LOG_DEBUG, fmt, ##args); } while (0)
|
||||
#else
|
||||
#define DBGLOG(s)
|
||||
#define PDBG(fmt, args...) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define T4_MAX_READ_DEPTH 16
|
||||
#define T4_QID_BASE 1024
|
||||
#define T4_MAX_QIDS 256
|
||||
#define T4_MAX_NUM_PD 65536
|
||||
#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
|
||||
#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
|
||||
#define T4_MAX_IQ_SIZE (65520 - 1)
|
||||
#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
|
||||
#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
|
||||
#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
|
||||
#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
|
||||
#define T4_MAX_NUM_STAG (1<<15)
|
||||
#define T4_MAX_MR_SIZE (~0ULL - 1)
|
||||
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
|
||||
#define T4_STAG_UNSET 0xffffffff
|
||||
#define T4_FW_MAJ 0
|
||||
|
||||
struct t4_status_page {
|
||||
__be32 rsvd1; /* flit 0 - hw owns */
|
||||
__be16 rsvd2;
|
||||
__be16 qid;
|
||||
__be16 cidx;
|
||||
__be16 pidx;
|
||||
u8 qp_err; /* flit 1 - sw owns */
|
||||
u8 db_off;
|
||||
u8 pad;
|
||||
u16 host_wq_pidx;
|
||||
u16 host_cidx;
|
||||
u16 host_pidx;
|
||||
};
|
||||
|
||||
#define T4_EQ_ENTRY_SIZE 64
|
||||
|
||||
#define T4_SQ_NUM_SLOTS 5
|
||||
#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
|
||||
#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - sizeof(struct fw_ri_isgl)) / sizeof (struct fw_ri_sge))
|
||||
#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - sizeof(struct fw_ri_immd)))
|
||||
#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_rdma_write_wr) - sizeof(struct fw_ri_immd)))
|
||||
#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_rdma_write_wr) - sizeof(struct fw_ri_isgl)) / sizeof (struct fw_ri_sge))
|
||||
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - sizeof(struct fw_ri_immd)))
|
||||
#define T4_MAX_FR_DEPTH 255
|
||||
|
||||
#define T4_RQ_NUM_SLOTS 2
|
||||
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
|
||||
#define T4_MAX_RECV_SGE 4
|
||||
|
||||
union t4_wr {
|
||||
struct fw_ri_res_wr res;
|
||||
struct fw_ri_wr init;
|
||||
struct fw_ri_rdma_write_wr write;
|
||||
struct fw_ri_send_wr send;
|
||||
struct fw_ri_rdma_read_wr read;
|
||||
struct fw_ri_bind_mw_wr bind;
|
||||
struct fw_ri_fr_nsmr_wr fr;
|
||||
struct fw_ri_inv_lstag_wr inv;
|
||||
struct t4_status_page status;
|
||||
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
|
||||
};
|
||||
|
||||
union t4_recv_wr {
|
||||
struct fw_ri_recv_wr recv;
|
||||
struct t4_status_page status;
|
||||
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
|
||||
};
|
||||
|
||||
static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
|
||||
enum fw_wr_opcodes opcode, u8 flags, u8 len16)
|
||||
{
|
||||
wqe->send.opcode = (u8)opcode;
|
||||
wqe->send.flags = flags;
|
||||
wqe->send.wrid = wrid;
|
||||
wqe->send.r1[0] = 0;
|
||||
wqe->send.r1[1] = 0;
|
||||
wqe->send.r1[2] = 0;
|
||||
wqe->send.len16 = len16;
|
||||
}
|
||||
|
||||
/* CQE/AE status codes */
|
||||
#define T4_ERR_SUCCESS 0x0
|
||||
#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
|
||||
/* STAG is offlimt, being 0, */
|
||||
/* or STAG_key mismatch */
|
||||
#define T4_ERR_PDID 0x2 /* PDID mismatch */
|
||||
#define T4_ERR_QPID 0x3 /* QPID mismatch */
|
||||
#define T4_ERR_ACCESS 0x4 /* Invalid access right */
|
||||
#define T4_ERR_WRAP 0x5 /* Wrap error */
|
||||
#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
|
||||
#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
|
||||
/* shared memory region */
|
||||
#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
|
||||
/* shared memory region */
|
||||
#define T4_ERR_ECC 0x9 /* ECC error detected */
|
||||
#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
|
||||
/* reading PSTAG for a MW */
|
||||
/* Invalidate */
|
||||
#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
|
||||
/* software error */
|
||||
#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
|
||||
#define T4_ERR_CRC 0x10 /* CRC error */
|
||||
#define T4_ERR_MARKER 0x11 /* Marker error */
|
||||
#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
|
||||
#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
|
||||
#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
|
||||
#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
|
||||
#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
|
||||
#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
|
||||
#define T4_ERR_MSN 0x18 /* MSN error */
|
||||
#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
|
||||
#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
|
||||
/* or READ_REQ */
|
||||
#define T4_ERR_MSN_GAP 0x1B
|
||||
#define T4_ERR_MSN_RANGE 0x1C
|
||||
#define T4_ERR_IRD_OVERFLOW 0x1D
|
||||
#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
|
||||
/* software error */
|
||||
#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
|
||||
/* mismatch) */
|
||||
/*
|
||||
* CQE defs
|
||||
*/
|
||||
struct t4_cqe {
|
||||
__be32 header;
|
||||
__be32 len;
|
||||
union {
|
||||
struct {
|
||||
__be32 stag;
|
||||
__be32 msn;
|
||||
} rcqe;
|
||||
struct {
|
||||
u32 nada1;
|
||||
u16 nada2;
|
||||
u16 cidx;
|
||||
} scqe;
|
||||
struct {
|
||||
__be32 wrid_hi;
|
||||
__be32 wrid_low;
|
||||
} gen;
|
||||
} u;
|
||||
__be64 reserved;
|
||||
__be64 bits_type_ts;
|
||||
};
|
||||
|
||||
/* macros for flit 0 of the cqe */
|
||||
|
||||
#define S_CQE_QPID 12
|
||||
#define M_CQE_QPID 0xFFFFF
|
||||
#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
|
||||
#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
|
||||
|
||||
#define S_CQE_SWCQE 11
|
||||
#define M_CQE_SWCQE 0x1
|
||||
#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
|
||||
#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
|
||||
|
||||
#define S_CQE_STATUS 5
|
||||
#define M_CQE_STATUS 0x1F
|
||||
#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
|
||||
#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
|
||||
|
||||
#define S_CQE_TYPE 4
|
||||
#define M_CQE_TYPE 0x1
|
||||
#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
|
||||
#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
|
||||
|
||||
#define S_CQE_OPCODE 0
|
||||
#define M_CQE_OPCODE 0xF
|
||||
#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
|
||||
#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
|
||||
|
||||
#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
|
||||
#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
|
||||
#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
|
||||
#define SQ_TYPE(x) (CQE_TYPE((x)))
|
||||
#define RQ_TYPE(x) (!CQE_TYPE((x)))
|
||||
#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
|
||||
#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
|
||||
|
||||
#define CQE_SEND_OPCODE(x)( \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
|
||||
|
||||
#define CQE_LEN(x) (be32_to_cpu((x)->len))
|
||||
|
||||
/* used for RQ completion processing */
|
||||
#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
|
||||
#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
|
||||
|
||||
/* used for SQ completion processing */
|
||||
#define CQE_WRID_SQ_IDX(x) (x)->u.scqe.cidx
|
||||
|
||||
/* generic accessor macros */
|
||||
#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
|
||||
#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
|
||||
|
||||
/* macros for flit 3 of the cqe */
|
||||
#define S_CQE_GENBIT 63
|
||||
#define M_CQE_GENBIT 0x1
|
||||
#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
|
||||
#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
|
||||
|
||||
#define S_CQE_OVFBIT 62
|
||||
#define M_CQE_OVFBIT 0x1
|
||||
#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
|
||||
|
||||
#define S_CQE_IQTYPE 60
|
||||
#define M_CQE_IQTYPE 0x3
|
||||
#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
|
||||
|
||||
#define M_CQE_TS 0x0fffffffffffffffULL
|
||||
#define G_CQE_TS(x) ((x) & M_CQE_TS)
|
||||
|
||||
#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
|
||||
#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
|
||||
#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
|
||||
|
||||
struct t4_swsqe {
|
||||
u64 wr_id;
|
||||
struct t4_cqe cqe;
|
||||
__be32 read_len;
|
||||
int opcode;
|
||||
int complete;
|
||||
int signaled;
|
||||
u16 idx;
|
||||
int flushed;
|
||||
};
|
||||
|
||||
enum {
|
||||
T4_SQ_ONCHIP = (1<<0),
|
||||
};
|
||||
|
||||
struct t4_sq {
|
||||
union t4_wr *queue;
|
||||
struct t4_swsqe *sw_sq;
|
||||
struct t4_swsqe *oldest_read;
|
||||
volatile u32 *udb;
|
||||
size_t memsize;
|
||||
u32 qid;
|
||||
void *ma_sync;
|
||||
u16 in_use;
|
||||
u16 size;
|
||||
u16 cidx;
|
||||
u16 pidx;
|
||||
u16 wq_pidx;
|
||||
u16 flags;
|
||||
short flush_cidx;
|
||||
};
|
||||
|
||||
struct t4_swrqe {
|
||||
u64 wr_id;
|
||||
};
|
||||
|
||||
struct t4_rq {
|
||||
union t4_recv_wr *queue;
|
||||
struct t4_swrqe *sw_rq;
|
||||
volatile u32 *udb;
|
||||
size_t memsize;
|
||||
u32 qid;
|
||||
u32 msn;
|
||||
u32 rqt_hwaddr;
|
||||
u16 rqt_size;
|
||||
u16 in_use;
|
||||
u16 size;
|
||||
u16 cidx;
|
||||
u16 pidx;
|
||||
u16 wq_pidx;
|
||||
};
|
||||
|
||||
struct t4_wq {
|
||||
struct t4_sq sq;
|
||||
struct t4_rq rq;
|
||||
struct c4iw_rdev *rdev;
|
||||
u32 qid_mask;
|
||||
int error;
|
||||
int flushed;
|
||||
u8 *db_offp;
|
||||
};
|
||||
|
||||
static inline void t4_ma_sync(struct t4_wq *wq, int page_size)
|
||||
{
|
||||
wc_wmb();
|
||||
*((volatile u32 *)wq->sq.ma_sync) = 1;
|
||||
}
|
||||
|
||||
static inline int t4_rqes_posted(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.in_use;
|
||||
}
|
||||
|
||||
static inline int t4_rq_empty(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.in_use == 0;
|
||||
}
|
||||
|
||||
static inline int t4_rq_full(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.in_use == (wq->rq.size - 1);
|
||||
}
|
||||
|
||||
static inline u32 t4_rq_avail(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.size - 1 - wq->rq.in_use;
|
||||
}
|
||||
|
||||
static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
|
||||
{
|
||||
wq->rq.in_use++;
|
||||
if (++wq->rq.pidx == wq->rq.size)
|
||||
wq->rq.pidx = 0;
|
||||
wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
|
||||
if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
|
||||
wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
|
||||
if (!wq->error)
|
||||
wq->rq.queue[wq->rq.size].status.host_pidx = wq->rq.pidx;
|
||||
}
|
||||
|
||||
static inline void t4_rq_consume(struct t4_wq *wq)
|
||||
{
|
||||
wq->rq.in_use--;
|
||||
wq->rq.msn++;
|
||||
if (++wq->rq.cidx == wq->rq.size)
|
||||
wq->rq.cidx = 0;
|
||||
assert((wq->rq.cidx != wq->rq.pidx) || wq->rq.in_use == 0);
|
||||
if (!wq->error)
|
||||
wq->rq.queue[wq->rq.size].status.host_cidx = wq->rq.cidx;
|
||||
}
|
||||
|
||||
static inline int t4_sq_empty(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.in_use == 0;
|
||||
}
|
||||
|
||||
static inline int t4_sq_full(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.in_use == (wq->sq.size - 1);
|
||||
}
|
||||
|
||||
static inline u32 t4_sq_avail(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.size - 1 - wq->sq.in_use;
|
||||
}
|
||||
|
||||
static inline int t4_sq_onchip(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.flags & T4_SQ_ONCHIP;
|
||||
}
|
||||
|
||||
static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
|
||||
{
|
||||
wq->sq.in_use++;
|
||||
if (++wq->sq.pidx == wq->sq.size)
|
||||
wq->sq.pidx = 0;
|
||||
wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
|
||||
if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
|
||||
wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
|
||||
if (!wq->error)
|
||||
wq->sq.queue[wq->sq.size].status.host_pidx = (wq->sq.pidx);
|
||||
}
|
||||
|
||||
static inline void t4_sq_consume(struct t4_wq *wq)
|
||||
{
|
||||
assert(wq->sq.in_use >= 1);
|
||||
if (wq->sq.cidx == wq->sq.flush_cidx)
|
||||
wq->sq.flush_cidx = -1;
|
||||
wq->sq.in_use--;
|
||||
if (++wq->sq.cidx == wq->sq.size)
|
||||
wq->sq.cidx = 0;
|
||||
assert((wq->sq.cidx != wq->sq.pidx) || wq->sq.in_use == 0);
|
||||
if (!wq->error)
|
||||
wq->sq.queue[wq->sq.size].status.host_cidx = wq->sq.cidx;
|
||||
}
|
||||
|
||||
static void copy_wqe_to_udb(volatile u32 *udb_offset, void *wqe)
|
||||
{
|
||||
u64 *src, *dst;
|
||||
int len16 = 4;
|
||||
|
||||
src = (u64 *)wqe;
|
||||
dst = (u64 *)udb_offset;
|
||||
|
||||
while (len16) {
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
len16--;
|
||||
}
|
||||
}
|
||||
|
||||
extern int ma_wr;
|
||||
extern int t5_en_wc;
|
||||
|
||||
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, u8 len16,
|
||||
union t4_wr *wqe)
|
||||
{
|
||||
wc_wmb();
|
||||
if (t5) {
|
||||
if (t5_en_wc && inc == 1) {
|
||||
PDBG("%s: WC wq->sq.pidx = %d; len16=%d\n",
|
||||
__func__, wq->sq.pidx, len16);
|
||||
copy_wqe_to_udb(wq->sq.udb + 14, wqe);
|
||||
} else {
|
||||
PDBG("%s: DB wq->sq.pidx = %d; len16=%d\n",
|
||||
__func__, wq->sq.pidx, len16);
|
||||
writel(V_PIDX_T5(inc), wq->sq.udb);
|
||||
}
|
||||
wc_wmb();
|
||||
return;
|
||||
}
|
||||
if (ma_wr) {
|
||||
if (t4_sq_onchip(wq)) {
|
||||
int i;
|
||||
for (i = 0; i < 16; i++)
|
||||
*(volatile u32 *)&wq->sq.queue[wq->sq.size].flits[2+i] = i;
|
||||
}
|
||||
} else {
|
||||
if (t4_sq_onchip(wq)) {
|
||||
int i;
|
||||
for (i = 0; i < 16; i++)
|
||||
*(u32 *)&wq->sq.queue[wq->sq.size].flits[2] = i;
|
||||
}
|
||||
}
|
||||
writel(V_QID(wq->sq.qid & wq->qid_mask) | V_PIDX(inc), wq->sq.udb);
|
||||
}
|
||||
|
||||
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, u8 len16,
|
||||
union t4_recv_wr *wqe)
|
||||
{
|
||||
wc_wmb();
|
||||
if (t5) {
|
||||
if (t5_en_wc && inc == 1) {
|
||||
PDBG("%s: WC wq->rq.pidx = %d; len16=%d\n",
|
||||
__func__, wq->rq.pidx, len16);
|
||||
copy_wqe_to_udb(wq->rq.udb + 14, wqe);
|
||||
} else {
|
||||
PDBG("%s: DB wq->rq.pidx = %d; len16=%d\n",
|
||||
__func__, wq->rq.pidx, len16);
|
||||
writel(V_PIDX_T5(inc), wq->rq.udb);
|
||||
}
|
||||
wc_wmb();
|
||||
return;
|
||||
}
|
||||
writel(V_QID(wq->rq.qid & wq->qid_mask) | V_PIDX(inc), wq->rq.udb);
|
||||
}
|
||||
|
||||
static inline int t4_wq_in_error(struct t4_wq *wq)
|
||||
{
|
||||
return wq->error || wq->rq.queue[wq->rq.size].status.qp_err;
|
||||
}
|
||||
|
||||
static inline void t4_set_wq_in_error(struct t4_wq *wq)
|
||||
{
|
||||
wq->rq.queue[wq->rq.size].status.qp_err = 1;
|
||||
}
|
||||
|
||||
extern int c4iw_abi_version;
|
||||
|
||||
static inline int t4_wq_db_enabled(struct t4_wq *wq)
|
||||
{
|
||||
/*
|
||||
* If iw_cxgb4 driver supports door bell drop recovery then its
|
||||
* c4iw_abi_version would be greater than or equal to 2. In such
|
||||
* case return the status of db_off flag to ring the kernel mode
|
||||
* DB from user mode library.
|
||||
*/
|
||||
if ( c4iw_abi_version >= 2 )
|
||||
return ! *wq->db_offp;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct t4_cq {
|
||||
struct t4_cqe *queue;
|
||||
struct t4_cqe *sw_queue;
|
||||
struct c4iw_rdev *rdev;
|
||||
volatile u32 *ugts;
|
||||
size_t memsize;
|
||||
u64 bits_type_ts;
|
||||
u32 cqid;
|
||||
u32 qid_mask;
|
||||
u16 size; /* including status page */
|
||||
u16 cidx;
|
||||
u16 sw_pidx;
|
||||
u16 sw_cidx;
|
||||
u16 sw_in_use;
|
||||
u16 cidx_inc;
|
||||
u8 gen;
|
||||
u8 error;
|
||||
};
|
||||
|
||||
static inline int t4_arm_cq(struct t4_cq *cq, int se)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
while (cq->cidx_inc > M_CIDXINC) {
|
||||
val = V_SEINTARM(0) | V_CIDXINC(M_CIDXINC) | V_TIMERREG(7) |
|
||||
V_INGRESSQID(cq->cqid & cq->qid_mask);
|
||||
writel(val, cq->ugts);
|
||||
cq->cidx_inc -= M_CIDXINC;
|
||||
}
|
||||
val = V_SEINTARM(se) | V_CIDXINC(cq->cidx_inc) | V_TIMERREG(6) |
|
||||
V_INGRESSQID(cq->cqid & cq->qid_mask);
|
||||
writel(val, cq->ugts);
|
||||
cq->cidx_inc = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void t4_swcq_produce(struct t4_cq *cq)
|
||||
{
|
||||
cq->sw_in_use++;
|
||||
if (cq->sw_in_use == cq->size) {
|
||||
syslog(LOG_NOTICE, "cxgb4 sw cq overflow cqid %u\n", cq->cqid);
|
||||
cq->error = 1;
|
||||
assert(0);
|
||||
}
|
||||
if (++cq->sw_pidx == cq->size)
|
||||
cq->sw_pidx = 0;
|
||||
}
|
||||
|
||||
static inline void t4_swcq_consume(struct t4_cq *cq)
|
||||
{
|
||||
assert(cq->sw_in_use >= 1);
|
||||
cq->sw_in_use--;
|
||||
if (++cq->sw_cidx == cq->size)
|
||||
cq->sw_cidx = 0;
|
||||
}
|
||||
|
||||
static inline void t4_hwcq_consume(struct t4_cq *cq)
|
||||
{
|
||||
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
|
||||
if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == M_CIDXINC) {
|
||||
uint32_t val;
|
||||
|
||||
val = V_SEINTARM(0) | V_CIDXINC(cq->cidx_inc) | V_TIMERREG(7) |
|
||||
V_INGRESSQID(cq->cqid & cq->qid_mask);
|
||||
writel(val, cq->ugts);
|
||||
cq->cidx_inc = 0;
|
||||
}
|
||||
if (++cq->cidx == cq->size) {
|
||||
cq->cidx = 0;
|
||||
cq->gen ^= 1;
|
||||
}
|
||||
((struct t4_status_page *)&cq->queue[cq->size])->host_cidx = cq->cidx;
|
||||
}
|
||||
|
||||
static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
|
||||
{
|
||||
return (CQE_GENBIT(cqe) == cq->gen);
|
||||
}
|
||||
|
||||
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
||||
{
|
||||
int ret;
|
||||
u16 prev_cidx;
|
||||
|
||||
if (cq->cidx == 0)
|
||||
prev_cidx = cq->size - 1;
|
||||
else
|
||||
prev_cidx = cq->cidx - 1;
|
||||
|
||||
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
|
||||
ret = -EOVERFLOW;
|
||||
syslog(LOG_NOTICE, "cxgb4 cq overflow cqid %u\n", cq->cqid);
|
||||
cq->error = 1;
|
||||
assert(0);
|
||||
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
|
||||
rmb();
|
||||
*cqe = &cq->queue[cq->cidx];
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -ENODATA;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
|
||||
{
|
||||
if (cq->sw_in_use == cq->size) {
|
||||
syslog(LOG_NOTICE, "cxgb4 sw cq overflow cqid %u\n", cq->cqid);
|
||||
cq->error = 1;
|
||||
assert(0);
|
||||
return NULL;
|
||||
}
|
||||
if (cq->sw_in_use)
|
||||
return &cq->sw_queue[cq->sw_cidx];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int t4_cq_notempty(struct t4_cq *cq)
|
||||
{
|
||||
return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
|
||||
}
|
||||
|
||||
static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (cq->error)
|
||||
ret = -ENODATA;
|
||||
else if (cq->sw_in_use)
|
||||
*cqe = &cq->sw_queue[cq->sw_cidx];
|
||||
else ret = t4_next_hw_cqe(cq, cqe);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int t4_cq_in_error(struct t4_cq *cq)
|
||||
{
|
||||
return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
|
||||
}
|
||||
|
||||
static inline void t4_set_cq_in_error(struct t4_cq *cq)
|
||||
{
|
||||
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
|
||||
}
|
||||
|
||||
static inline void t4_reset_cq_in_error(struct t4_cq *cq)
|
||||
{
|
||||
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 0;
|
||||
}
|
||||
|
||||
struct t4_dev_status_page
|
||||
{
|
||||
u8 db_off;
|
||||
};
|
||||
|
||||
#endif
|
96
contrib/ofed/libcxgb4/src/t4_chip_type.h
Normal file
96
contrib/ofed/libcxgb4/src/t4_chip_type.h
Normal file
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* This file is part of the Chelsio T4 Ethernet driver.
|
||||
*
|
||||
* Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
|
||||
* release for licensing terms and conditions.
|
||||
*/
|
||||
#ifndef __T4_CHIP_TYPE_H__
|
||||
#define __T4_CHIP_TYPE_H__
|
||||
|
||||
/*
|
||||
* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
|
||||
*
|
||||
* V = "4" for T4; "5" for T5, etc. or
|
||||
* = "a" for T4 FPGA; "b" for T4 FPGA, etc.
|
||||
* F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
|
||||
* PP = adapter product designation
|
||||
*
|
||||
* We use the "version" (V) of the adpater to code the Chip Version above
|
||||
* but separate out the FPGA as a separate boolean as per above.
|
||||
*/
|
||||
#define CHELSIO_PCI_ID_VER(__DeviceID) ((__DeviceID) >> 12)
|
||||
#define CHELSIO_PCI_ID_FUNC(__DeviceID) (((__DeviceID) >> 8) & 0xf)
|
||||
#define CHELSIO_PCI_ID_PROD(__DeviceID) ((__DeviceID) & 0xff)
|
||||
|
||||
#define CHELSIO_T4 0x4
|
||||
#define CHELSIO_T4_FPGA 0xa
|
||||
#define CHELSIO_T5 0x5
|
||||
#define CHELSIO_T5_FPGA 0xb
|
||||
|
||||
/*
|
||||
* Translate a PCI Device ID to a base Chelsio Chip Version -- CHELSIO_T4,
|
||||
* CHELSIO_T5, etc. If it weren't for the screwed up numbering of the FPGAs
|
||||
* we could do this simply as DeviceID >> 12 (because we know the real
|
||||
* encoding oc CHELSIO_Tx identifiers). However, the FPGAs _do_ have weird
|
||||
* Device IDs so we need to do this translation here. Note that only constant
|
||||
* arithmetic and comparisons can be done here since this is being used to
|
||||
* initialize static tables, etc.
|
||||
*
|
||||
* Finally: This will of course need to be expanded as future chips are
|
||||
* developed.
|
||||
*/
|
||||
#define CHELSIO_PCI_ID_CHIP_VERSION(__DeviceID) \
|
||||
(CHELSIO_PCI_ID_VER(__DeviceID) == CHELSIO_T4 || \
|
||||
CHELSIO_PCI_ID_VER(__DeviceID) == CHELSIO_T4_FPGA \
|
||||
? CHELSIO_T4 \
|
||||
: CHELSIO_T5)
|
||||
|
||||
/*
|
||||
* Internally we code the Chelsio T4 Family "Chip Code" as a tuple:
|
||||
*
|
||||
* (Is FPGA, Chip Version, Chip Revision)
|
||||
*
|
||||
* where:
|
||||
*
|
||||
* Is FPGA: is 0/1 indicating whether we're working with an FPGA
|
||||
* Chip Version: is T4, T5, etc.
|
||||
* Chip Revision: is the FAB "spin" of the Chip Version.
|
||||
*/
|
||||
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
|
||||
#define CHELSIO_CHIP_FPGA 0x100
|
||||
#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
|
||||
#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
|
||||
|
||||
enum chip_type {
|
||||
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
|
||||
T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
|
||||
T4_FIRST_REV = T4_A1,
|
||||
T4_LAST_REV = T4_A2,
|
||||
|
||||
T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
|
||||
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
|
||||
T5_FIRST_REV = T5_A0,
|
||||
T5_LAST_REV = T5_A1,
|
||||
};
|
||||
|
||||
static inline int is_t4(enum chip_type chip)
|
||||
{
|
||||
return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4);
|
||||
}
|
||||
|
||||
static inline int is_t5(enum chip_type chip)
|
||||
{
|
||||
|
||||
return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5);
|
||||
}
|
||||
|
||||
static inline int is_fpga(enum chip_type chip)
|
||||
{
|
||||
return chip & CHELSIO_CHIP_FPGA;
|
||||
}
|
||||
|
||||
#endif /* __T4_CHIP_TYPE_H__ */
|
175
contrib/ofed/libcxgb4/src/t4_pci_id_tbl.h
Normal file
175
contrib/ofed/libcxgb4/src/t4_pci_id_tbl.h
Normal file
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
* This file is part of the Chelsio T4 Ethernet driver.
|
||||
*
|
||||
* Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
|
||||
* release for licensing terms and conditions.
|
||||
*/
|
||||
#ifndef __T4_PCI_ID_TBL_H__
|
||||
#define __T4_PCI_ID_TBL_H__
|
||||
|
||||
/*
|
||||
* The Os-Dependent code can defined cpp macros for creating a PCI Device ID
|
||||
* Table. This is useful because it allows the PCI ID Table to be maintained
|
||||
* in a single place and all supporting OSes to get new PCI Device IDs
|
||||
* automatically.
|
||||
*
|
||||
* The macros are:
|
||||
*
|
||||
* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
|
||||
* -- Used to start the definition of the PCI ID Table.
|
||||
*
|
||||
* CH_PCI_DEVICE_ID_FUNCTION
|
||||
* -- The PCI Function Number to use in the PCI Device ID Table. "0"
|
||||
* -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4,
|
||||
* -- "8" for drivers attaching to SR-IOV Virtual Functions, etc.
|
||||
*
|
||||
* CH_PCI_DEVICE_ID_FUNCTION2 [optional]
|
||||
* -- If defined, create a PCI Device ID Table with both
|
||||
* -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated.
|
||||
*
|
||||
* CH_PCI_ID_TABLE_ENTRY(DeviceID)
|
||||
* -- Used for the individual PCI Device ID entries. Note that we will
|
||||
* -- be adding a trailing comma (",") after all of the entries (and
|
||||
* -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined).
|
||||
*
|
||||
* CH_PCI_DEVICE_ID_TABLE_DEFINE_END
|
||||
* -- Used to finish the definition of the PCI ID Table. Note that we
|
||||
* -- will be adding a trailing semi-colon (";") here.
|
||||
*
|
||||
* CH_PCI_DEVICE_ID_BYPASS_SUPPORTED [optional]
|
||||
* -- If defined, indicates that the OS Driver has support for Bypass
|
||||
* -- Adapters.
|
||||
*/
|
||||
#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
|
||||
|
||||
/*
|
||||
* Some sanity checks ...
|
||||
*/
|
||||
#ifndef CH_PCI_DEVICE_ID_FUNCTION
|
||||
#error CH_PCI_DEVICE_ID_FUNCTION not defined!
|
||||
#endif
|
||||
#ifndef CH_PCI_ID_TABLE_ENTRY
|
||||
#error CH_PCI_ID_TABLE_ENTRY not defined!
|
||||
#endif
|
||||
#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END
|
||||
#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined!
|
||||
#endif
|
||||
|
||||
/*
|
||||
* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where:
|
||||
*
|
||||
* V = "4" for T4; "5" for T5, etc.
|
||||
* F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
|
||||
* PP = adapter product designation
|
||||
*
|
||||
* We use this consistency in order to create the proper PCI Device IDs
|
||||
* for the specified CH_PCI_DEVICE_ID_FUNCTION.
|
||||
*/
|
||||
#ifndef CH_PCI_DEVICE_ID_FUNCTION2
|
||||
#define CH_PCI_ID_TABLE_FENTRY(__DeviceID) \
|
||||
CH_PCI_ID_TABLE_ENTRY((__DeviceID) | \
|
||||
((CH_PCI_DEVICE_ID_FUNCTION) << 8))
|
||||
#else
|
||||
#define CH_PCI_ID_TABLE_FENTRY(__DeviceID) \
|
||||
CH_PCI_ID_TABLE_ENTRY((__DeviceID) | \
|
||||
((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \
|
||||
CH_PCI_ID_TABLE_ENTRY((__DeviceID) | \
|
||||
((CH_PCI_DEVICE_ID_FUNCTION2) << 8))
|
||||
#endif
|
||||
|
||||
/* Note : The comments against each entry are used by the scripts in the vmware drivers
|
||||
* to correctly generate the pciid xml file, do not change the format currently used.
|
||||
*/
|
||||
|
||||
CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
|
||||
/*
|
||||
* FPGAs:
|
||||
*
|
||||
* Unfortunately the FPGA PCI Device IDs don't follow the ASIC PCI
|
||||
* Device ID numbering convetions for the Physical Functions.
|
||||
*/
|
||||
#if CH_PCI_DEVICE_ID_FUNCTION != 8
|
||||
CH_PCI_ID_TABLE_ENTRY(0xa000), /* PE10K FPGA */
|
||||
CH_PCI_ID_TABLE_ENTRY(0xb000), /* PF0 T5 PE10K5 FPGA */
|
||||
CH_PCI_ID_TABLE_ENTRY(0xb001), /* PF0 T5 PE10K FPGA */
|
||||
#else
|
||||
CH_PCI_ID_TABLE_FENTRY(0xa000), /* PE10K FPGA */
|
||||
CH_PCI_ID_TABLE_FENTRY(0xb000), /* PF0 T5 PE10K5 FPGA */
|
||||
CH_PCI_ID_TABLE_FENTRY(0xb001), /* PF0 T5 PE10K FPGA */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These FPGAs seem to be used only by the csiostor driver
|
||||
*/
|
||||
#if ((CH_PCI_DEVICE_ID_FUNCTION == 5) || (CH_PCI_DEVICE_ID_FUNCTION == 6))
|
||||
CH_PCI_ID_TABLE_ENTRY(0xa001), /* PF1 PE10K FPGA FCOE */
|
||||
CH_PCI_ID_TABLE_ENTRY(0xa002), /* PE10K FPGA iSCSI */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* T4 adapters:
|
||||
*/
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4000), /* T440-dbg */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4001), /* T420-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4002), /* T422-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4003), /* T440-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4004), /* T420-bch */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4005), /* T440-bch */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4006), /* T440-ch */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4007), /* T420-so */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4008), /* T420-cx */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4009), /* T420-bt */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x400a), /* T404-bt */
|
||||
#ifdef CH_PCI_DEVICE_ID_BYPASS_SUPPORTED
|
||||
CH_PCI_ID_TABLE_FENTRY(0x400b), /* B420-sr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x400c), /* B404-bt */
|
||||
#endif
|
||||
CH_PCI_ID_TABLE_FENTRY(0x400d), /* T480-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x400e), /* T440-LP-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4080), /* Custom T480-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4081), /* Custom T440-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4082), /* Custom T420-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4083), /* Custom T420-xaui */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4084), /* Custom T440-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4085), /* Custom T420-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4086), /* Custom T440-bt */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4087), /* Custom T440-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x4088), /* Custom T440 2-xaui, 2-xfi */
|
||||
|
||||
/*
|
||||
* T5 adapters:
|
||||
*/
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */
|
||||
#ifdef CH_PCI_DEVICE_ID_BYPASS_SUPPORTED
|
||||
CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */
|
||||
#endif
|
||||
CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
|
||||
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
|
||||
|
||||
#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
|
||||
|
||||
#endif /* __T4_PCI_ID_TBL_H__ */
|
42349
contrib/ofed/libcxgb4/src/t4_regs.h
Normal file
42349
contrib/ofed/libcxgb4/src/t4_regs.h
Normal file
File diff suppressed because it is too large
Load diff
7811
contrib/ofed/libcxgb4/src/t4fw_interface.h
Normal file
7811
contrib/ofed/libcxgb4/src/t4fw_interface.h
Normal file
File diff suppressed because it is too large
Load diff
702
contrib/ofed/libcxgb4/src/verbs.c
Normal file
702
contrib/ofed/libcxgb4/src/verbs.c
Normal file
|
@ -0,0 +1,702 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#if HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif /* HAVE_CONFIG_H */
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <sys/mman.h>
|
||||
#include <netinet/in.h>
|
||||
#include <inttypes.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "libcxgb4.h"
|
||||
#include "cxgb4-abi.h"
|
||||
|
||||
#define MASKED(x) (void *)((unsigned long)(x) & c4iw_page_mask)
|
||||
|
||||
int c4iw_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
|
||||
{
|
||||
struct ibv_query_device cmd;
|
||||
uint64_t raw_fw_ver;
|
||||
unsigned major, minor, sub_minor;
|
||||
int ret;
|
||||
|
||||
ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd,
|
||||
sizeof cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
major = (raw_fw_ver >> 32) & 0xffff;
|
||||
minor = (raw_fw_ver >> 16) & 0xffff;
|
||||
sub_minor = raw_fw_ver & 0xffff;
|
||||
|
||||
snprintf(attr->fw_ver, sizeof attr->fw_ver,
|
||||
"%d.%d.%d", major, minor, sub_minor);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int c4iw_query_port(struct ibv_context *context, uint8_t port,
|
||||
struct ibv_port_attr *attr)
|
||||
{
|
||||
struct ibv_query_port cmd;
|
||||
|
||||
return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
|
||||
}
|
||||
|
||||
struct ibv_pd *c4iw_alloc_pd(struct ibv_context *context)
|
||||
{
|
||||
struct ibv_alloc_pd cmd;
|
||||
struct c4iw_alloc_pd_resp resp;
|
||||
struct c4iw_pd *pd;
|
||||
|
||||
pd = malloc(sizeof *pd);
|
||||
if (!pd)
|
||||
return NULL;
|
||||
|
||||
if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
|
||||
&resp.ibv_resp, sizeof resp)) {
|
||||
free(pd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &pd->ibv_pd;
|
||||
}
|
||||
|
||||
int c4iw_free_pd(struct ibv_pd *pd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ibv_cmd_dealloc_pd(pd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
free(pd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ibv_mr *__c4iw_reg_mr(struct ibv_pd *pd, void *addr,
|
||||
size_t length, uint64_t hca_va,
|
||||
int access)
|
||||
{
|
||||
struct c4iw_mr *mhp;
|
||||
struct ibv_reg_mr cmd;
|
||||
struct ibv_reg_mr_resp resp;
|
||||
struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
|
||||
|
||||
mhp = malloc(sizeof *mhp);
|
||||
if (!mhp)
|
||||
return NULL;
|
||||
|
||||
if (ibv_cmd_reg_mr(pd, addr, length, hca_va,
|
||||
access, &mhp->ibv_mr, &cmd, sizeof cmd,
|
||||
&resp, sizeof resp)) {
|
||||
free(mhp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mhp->va_fbo = hca_va;
|
||||
mhp->len = length;
|
||||
|
||||
PDBG("%s stag 0x%x va_fbo 0x%" PRIx64 " len %d\n",
|
||||
__func__, mhp->ibv_mr.rkey, mhp->va_fbo, mhp->len);
|
||||
|
||||
pthread_spin_lock(&dev->lock);
|
||||
dev->mmid2ptr[c4iw_mmid(mhp->ibv_mr.lkey)] = mhp;
|
||||
pthread_spin_unlock(&dev->lock);
|
||||
INC_STAT(mr);
|
||||
return &mhp->ibv_mr;
|
||||
}
|
||||
|
||||
struct ibv_mr *c4iw_reg_mr(struct ibv_pd *pd, void *addr,
|
||||
size_t length, int access)
|
||||
{
|
||||
PDBG("%s addr %p length %ld\n", __func__, addr, length);
|
||||
return __c4iw_reg_mr(pd, addr, length, (uintptr_t) addr, access);
|
||||
}
|
||||
|
||||
int c4iw_dereg_mr(struct ibv_mr *mr)
|
||||
{
|
||||
int ret;
|
||||
struct c4iw_dev *dev = to_c4iw_dev(mr->pd->context->device);
|
||||
|
||||
ret = ibv_cmd_dereg_mr(mr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pthread_spin_lock(&dev->lock);
|
||||
dev->mmid2ptr[c4iw_mmid(mr->lkey)] = NULL;
|
||||
pthread_spin_unlock(&dev->lock);
|
||||
|
||||
free(to_c4iw_mr(mr));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
|
||||
struct ibv_comp_channel *channel, int comp_vector)
|
||||
{
|
||||
struct ibv_create_cq cmd;
|
||||
struct c4iw_create_cq_resp resp;
|
||||
struct c4iw_cq *chp;
|
||||
struct c4iw_dev *dev = to_c4iw_dev(context->device);
|
||||
int ret;
|
||||
|
||||
chp = calloc(1, sizeof *chp);
|
||||
if (!chp) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
resp.reserved = 0;
|
||||
ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
|
||||
&chp->ibv_cq, &cmd, sizeof cmd,
|
||||
&resp.ibv_resp, sizeof resp);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
if (resp.reserved)
|
||||
PDBG("%s c4iw_create_cq_resp reserved field modified by kernel\n",
|
||||
__FUNCTION__);
|
||||
|
||||
pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
|
||||
#ifdef STALL_DETECTION
|
||||
gettimeofday(&chp->time, NULL);
|
||||
#endif
|
||||
chp->rhp = dev;
|
||||
chp->cq.qid_mask = resp.qid_mask;
|
||||
chp->cq.cqid = resp.cqid;
|
||||
chp->cq.size = resp.size;
|
||||
chp->cq.memsize = resp.memsize;
|
||||
chp->cq.gen = 1;
|
||||
chp->cq.queue = mmap(NULL, chp->cq.memsize, PROT_READ|PROT_WRITE,
|
||||
MAP_SHARED, context->cmd_fd, resp.key);
|
||||
if (chp->cq.queue == MAP_FAILED)
|
||||
goto err2;
|
||||
|
||||
chp->cq.ugts = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
|
||||
context->cmd_fd, resp.gts_key);
|
||||
if (chp->cq.ugts == MAP_FAILED)
|
||||
goto err3;
|
||||
|
||||
if (dev_is_t5(chp->rhp))
|
||||
chp->cq.ugts += 3;
|
||||
else
|
||||
chp->cq.ugts += 1;
|
||||
chp->cq.sw_queue = calloc(chp->cq.size, sizeof *chp->cq.queue);
|
||||
if (!chp->cq.sw_queue)
|
||||
goto err4;
|
||||
|
||||
PDBG("%s cqid 0x%x key %" PRIx64 " va %p memsize %lu gts_key %"
|
||||
PRIx64 " va %p qid_mask 0x%x\n",
|
||||
__func__, chp->cq.cqid, resp.key, chp->cq.queue,
|
||||
chp->cq.memsize, resp.gts_key, chp->cq.ugts, chp->cq.qid_mask);
|
||||
|
||||
pthread_spin_lock(&dev->lock);
|
||||
dev->cqid2ptr[chp->cq.cqid] = chp;
|
||||
pthread_spin_unlock(&dev->lock);
|
||||
INC_STAT(cq);
|
||||
return &chp->ibv_cq;
|
||||
err4:
|
||||
munmap(MASKED(chp->cq.ugts), c4iw_page_size);
|
||||
err3:
|
||||
munmap(chp->cq.queue, chp->cq.memsize);
|
||||
err2:
|
||||
(void)ibv_cmd_destroy_cq(&chp->ibv_cq);
|
||||
err1:
|
||||
free(chp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int c4iw_resize_cq(struct ibv_cq *ibcq, int cqe)
|
||||
{
|
||||
#if 0
|
||||
int ret;
|
||||
|
||||
struct ibv_resize_cq cmd;
|
||||
struct ibv_resize_cq_resp resp;
|
||||
ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd, &resp, sizeof resp);
|
||||
PDBG("%s ret %d\n", __func__, ret);
|
||||
return ret;
|
||||
#else
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
}
|
||||
|
||||
int c4iw_destroy_cq(struct ibv_cq *ibcq)
|
||||
{
|
||||
int ret;
|
||||
struct c4iw_cq *chp = to_c4iw_cq(ibcq);
|
||||
struct c4iw_dev *dev = to_c4iw_dev(ibcq->context->device);
|
||||
|
||||
chp->cq.error = 1;
|
||||
ret = ibv_cmd_destroy_cq(ibcq);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
munmap(MASKED(chp->cq.ugts), c4iw_page_size);
|
||||
munmap(chp->cq.queue, chp->cq.memsize);
|
||||
|
||||
pthread_spin_lock(&dev->lock);
|
||||
dev->cqid2ptr[chp->cq.cqid] = NULL;
|
||||
pthread_spin_unlock(&dev->lock);
|
||||
|
||||
free(chp->cq.sw_queue);
|
||||
free(chp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ibv_srq *c4iw_create_srq(struct ibv_pd *pd,
|
||||
struct ibv_srq_init_attr *attr)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int c4iw_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr,
|
||||
int attr_mask)
|
||||
{
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
int c4iw_destroy_srq(struct ibv_srq *srq)
|
||||
{
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
int c4iw_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
|
||||
struct ibv_recv_wr **bad_wr)
|
||||
{
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
static struct ibv_qp *create_qp_v0(struct ibv_pd *pd,
|
||||
struct ibv_qp_init_attr *attr)
|
||||
{
|
||||
struct ibv_create_qp cmd;
|
||||
struct c4iw_create_qp_resp_v0 resp;
|
||||
struct c4iw_qp *qhp;
|
||||
struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
|
||||
int ret;
|
||||
void *dbva;
|
||||
|
||||
PDBG("%s enter qp\n", __func__);
|
||||
qhp = calloc(1, sizeof *qhp);
|
||||
if (!qhp)
|
||||
goto err1;
|
||||
|
||||
ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
|
||||
sizeof cmd, &resp.ibv_resp, sizeof resp);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64
|
||||
" rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64
|
||||
" qid_mask 0x%x\n",
|
||||
__func__,
|
||||
resp.sqid, resp.sq_key, resp.sq_db_gts_key,
|
||||
resp.rqid, resp.rq_key, resp.rq_db_gts_key, resp.qid_mask);
|
||||
|
||||
qhp->wq.qid_mask = resp.qid_mask;
|
||||
qhp->rhp = dev;
|
||||
qhp->wq.sq.qid = resp.sqid;
|
||||
qhp->wq.sq.size = resp.sq_size;
|
||||
qhp->wq.sq.memsize = resp.sq_memsize;
|
||||
qhp->wq.sq.flags = 0;
|
||||
qhp->wq.rq.msn = 1;
|
||||
qhp->wq.rq.qid = resp.rqid;
|
||||
qhp->wq.rq.size = resp.rq_size;
|
||||
qhp->wq.rq.memsize = resp.rq_memsize;
|
||||
pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
|
||||
|
||||
dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
|
||||
pd->context->cmd_fd, resp.sq_db_gts_key);
|
||||
if (dbva == MAP_FAILED)
|
||||
goto err3;
|
||||
|
||||
qhp->wq.sq.udb = dbva;
|
||||
qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
|
||||
PROT_WRITE, MAP_SHARED,
|
||||
pd->context->cmd_fd, resp.sq_key);
|
||||
if (qhp->wq.sq.queue == MAP_FAILED)
|
||||
goto err4;
|
||||
|
||||
dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
|
||||
pd->context->cmd_fd, resp.rq_db_gts_key);
|
||||
if (dbva == MAP_FAILED)
|
||||
goto err5;
|
||||
qhp->wq.rq.udb = dbva;
|
||||
qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
|
||||
PROT_WRITE, MAP_SHARED,
|
||||
pd->context->cmd_fd, resp.rq_key);
|
||||
if (qhp->wq.rq.queue == MAP_FAILED)
|
||||
goto err6;
|
||||
|
||||
qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
|
||||
if (!qhp->wq.sq.sw_sq)
|
||||
goto err7;
|
||||
|
||||
qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
|
||||
if (!qhp->wq.rq.sw_rq)
|
||||
goto err8;
|
||||
|
||||
PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
|
||||
" rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
|
||||
__func__,
|
||||
qhp->wq.sq.udb, qhp->wq.sq.queue,
|
||||
qhp->wq.sq.size, qhp->wq.sq.memsize,
|
||||
qhp->wq.rq.udb, qhp->wq.rq.queue,
|
||||
qhp->wq.rq.size, qhp->wq.rq.memsize);
|
||||
|
||||
qhp->sq_sig_all = attr->sq_sig_all;
|
||||
|
||||
pthread_spin_lock(&dev->lock);
|
||||
dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
|
||||
pthread_spin_unlock(&dev->lock);
|
||||
INC_STAT(qp);
|
||||
return &qhp->ibv_qp;
|
||||
err8:
|
||||
free(qhp->wq.sq.sw_sq);
|
||||
err7:
|
||||
munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
|
||||
err6:
|
||||
munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
|
||||
err5:
|
||||
munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
|
||||
err4:
|
||||
munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
|
||||
err3:
|
||||
(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
|
||||
err2:
|
||||
free(qhp);
|
||||
err1:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct ibv_qp *create_qp(struct ibv_pd *pd,
|
||||
struct ibv_qp_init_attr *attr)
|
||||
{
|
||||
struct ibv_create_qp cmd;
|
||||
struct c4iw_create_qp_resp resp;
|
||||
struct c4iw_qp *qhp;
|
||||
struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
|
||||
struct c4iw_context *ctx = to_c4iw_context(pd->context);
|
||||
int ret;
|
||||
void *dbva;
|
||||
|
||||
PDBG("%s enter qp\n", __func__);
|
||||
qhp = calloc(1, sizeof *qhp);
|
||||
if (!qhp)
|
||||
goto err1;
|
||||
|
||||
ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
|
||||
sizeof cmd, &resp.ibv_resp, sizeof resp);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64
|
||||
" rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64
|
||||
" qid_mask 0x%x\n",
|
||||
__func__,
|
||||
resp.sqid, resp.sq_key, resp.sq_db_gts_key,
|
||||
resp.rqid, resp.rq_key, resp.rq_db_gts_key, resp.qid_mask);
|
||||
|
||||
qhp->wq.qid_mask = resp.qid_mask;
|
||||
qhp->rhp = dev;
|
||||
qhp->wq.sq.qid = resp.sqid;
|
||||
qhp->wq.sq.size = resp.sq_size;
|
||||
qhp->wq.sq.memsize = resp.sq_memsize;
|
||||
qhp->wq.sq.flags = resp.flags & C4IW_QPF_ONCHIP ? T4_SQ_ONCHIP : 0;
|
||||
qhp->wq.sq.flush_cidx = -1;
|
||||
qhp->wq.rq.msn = 1;
|
||||
qhp->wq.rq.qid = resp.rqid;
|
||||
qhp->wq.rq.size = resp.rq_size;
|
||||
qhp->wq.rq.memsize = resp.rq_memsize;
|
||||
if (ma_wr && resp.sq_memsize < (resp.sq_size + 1) *
|
||||
sizeof *qhp->wq.sq.queue + 16*sizeof(__be64) ) {
|
||||
ma_wr = 0;
|
||||
fprintf(stderr, "libcxgb4 warning - downlevel iw_cxgb4 driver. "
|
||||
"MA workaround disabled.\n");
|
||||
}
|
||||
pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
|
||||
|
||||
dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
|
||||
pd->context->cmd_fd, resp.sq_db_gts_key);
|
||||
if (dbva == MAP_FAILED) {
|
||||
PDBG(" %s mmap for sq db failed\n", __func__);
|
||||
abort();
|
||||
goto err3;
|
||||
}
|
||||
qhp->wq.sq.udb = dbva;
|
||||
if (dev_is_t5(qhp->rhp)) {
|
||||
qhp->wq.sq.udb += (128*(qhp->wq.sq.qid & qhp->wq.qid_mask))/4;
|
||||
qhp->wq.sq.udb += 2;
|
||||
}
|
||||
|
||||
qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
pd->context->cmd_fd, resp.sq_key);
|
||||
if (qhp->wq.sq.queue == MAP_FAILED) {
|
||||
PDBG(" %s mmap for sq q failed size is qhp->wq.sq.memsize %zu \n", __func__, qhp->wq.sq.memsize);
|
||||
abort();
|
||||
goto err4;
|
||||
}
|
||||
|
||||
dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
|
||||
pd->context->cmd_fd, resp.rq_db_gts_key);
|
||||
if (dbva == MAP_FAILED)
|
||||
goto err5;
|
||||
qhp->wq.rq.udb = dbva;
|
||||
if (dev_is_t5(qhp->rhp)) {
|
||||
qhp->wq.rq.udb += (128*(qhp->wq.rq.qid & qhp->wq.qid_mask))/4;
|
||||
qhp->wq.rq.udb += 2;
|
||||
}
|
||||
qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
pd->context->cmd_fd, resp.rq_key);
|
||||
if (qhp->wq.rq.queue == MAP_FAILED)
|
||||
goto err6;
|
||||
|
||||
qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
|
||||
if (!qhp->wq.sq.sw_sq)
|
||||
goto err7;
|
||||
|
||||
qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
|
||||
if (!qhp->wq.rq.sw_rq)
|
||||
goto err8;
|
||||
|
||||
if (t4_sq_onchip(&qhp->wq)) {
|
||||
qhp->wq.sq.ma_sync = mmap(NULL, c4iw_page_size, PROT_WRITE,
|
||||
MAP_SHARED, pd->context->cmd_fd,
|
||||
resp.ma_sync_key);
|
||||
if (qhp->wq.sq.ma_sync == MAP_FAILED)
|
||||
goto err9;
|
||||
qhp->wq.sq.ma_sync += (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
|
||||
}
|
||||
|
||||
if (ctx->status_page_size) {
|
||||
qhp->wq.db_offp = &ctx->status_page->db_off;
|
||||
} else {
|
||||
qhp->wq.db_offp =
|
||||
&qhp->wq.rq.queue[qhp->wq.rq.size].status.db_off;
|
||||
}
|
||||
|
||||
PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
|
||||
" rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
|
||||
__func__,
|
||||
qhp->wq.sq.udb, qhp->wq.sq.queue,
|
||||
qhp->wq.sq.size, qhp->wq.sq.memsize,
|
||||
qhp->wq.rq.udb, qhp->wq.rq.queue,
|
||||
qhp->wq.rq.size, qhp->wq.rq.memsize);
|
||||
|
||||
qhp->sq_sig_all = attr->sq_sig_all;
|
||||
|
||||
pthread_spin_lock(&dev->lock);
|
||||
dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
|
||||
pthread_spin_unlock(&dev->lock);
|
||||
INC_STAT(qp);
|
||||
return &qhp->ibv_qp;
|
||||
err9:
|
||||
free(qhp->wq.rq.sw_rq);
|
||||
err8:
|
||||
free(qhp->wq.sq.sw_sq);
|
||||
err7:
|
||||
munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
|
||||
err6:
|
||||
munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
|
||||
err5:
|
||||
munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
|
||||
err4:
|
||||
munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
|
||||
err3:
|
||||
(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
|
||||
err2:
|
||||
free(qhp);
|
||||
err1:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct ibv_qp *c4iw_create_qp(struct ibv_pd *pd,
|
||||
struct ibv_qp_init_attr *attr)
|
||||
{
|
||||
struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
|
||||
|
||||
if (dev->abi_version == 0)
|
||||
return create_qp_v0(pd, attr);
|
||||
return create_qp(pd, attr);
|
||||
}
|
||||
|
||||
static void reset_qp(struct c4iw_qp *qhp)
|
||||
{
|
||||
PDBG("%s enter qp %p\n", __func__, qhp);
|
||||
qhp->wq.sq.cidx = 0;
|
||||
qhp->wq.sq.wq_pidx = qhp->wq.sq.pidx = qhp->wq.sq.in_use = 0;
|
||||
qhp->wq.rq.cidx = qhp->wq.rq.pidx = qhp->wq.rq.in_use = 0;
|
||||
qhp->wq.sq.oldest_read = NULL;
|
||||
memset(qhp->wq.sq.queue, 0, qhp->wq.sq.memsize);
|
||||
memset(qhp->wq.rq.queue, 0, qhp->wq.rq.memsize);
|
||||
}
|
||||
|
||||
int c4iw_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
|
||||
int attr_mask)
|
||||
{
|
||||
struct ibv_modify_qp cmd;
|
||||
struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
|
||||
int ret;
|
||||
|
||||
PDBG("%s enter qp %p new state %d\n", __func__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1);
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
if (t4_wq_in_error(&qhp->wq))
|
||||
c4iw_flush_qp(qhp);
|
||||
ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd);
|
||||
if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET)
|
||||
reset_qp(qhp);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int c4iw_destroy_qp(struct ibv_qp *ibqp)
|
||||
{
|
||||
int ret;
|
||||
struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
|
||||
struct c4iw_dev *dev = to_c4iw_dev(ibqp->context->device);
|
||||
|
||||
PDBG("%s enter qp %p\n", __func__, ibqp);
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
c4iw_flush_qp(qhp);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
|
||||
ret = ibv_cmd_destroy_qp(ibqp);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
if (t4_sq_onchip(&qhp->wq)) {
|
||||
qhp->wq.sq.ma_sync -= (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
|
||||
munmap((void *)qhp->wq.sq.ma_sync, c4iw_page_size);
|
||||
}
|
||||
munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
|
||||
munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
|
||||
munmap(qhp->wq.sq.queue, qhp->wq.sq.memsize);
|
||||
munmap(qhp->wq.rq.queue, qhp->wq.rq.memsize);
|
||||
|
||||
pthread_spin_lock(&dev->lock);
|
||||
dev->qpid2ptr[qhp->wq.sq.qid] = NULL;
|
||||
pthread_spin_unlock(&dev->lock);
|
||||
|
||||
free(qhp->wq.rq.sw_rq);
|
||||
free(qhp->wq.sq.sw_sq);
|
||||
free(qhp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int c4iw_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
|
||||
int attr_mask, struct ibv_qp_init_attr *init_attr)
|
||||
{
|
||||
struct ibv_query_qp cmd;
|
||||
struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
|
||||
int ret;
|
||||
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
if (t4_wq_in_error(&qhp->wq))
|
||||
c4iw_flush_qp(qhp);
|
||||
ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct ibv_ah *c4iw_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int c4iw_destroy_ah(struct ibv_ah *ah)
|
||||
{
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
int c4iw_attach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
|
||||
uint16_t lid)
|
||||
{
|
||||
struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
|
||||
int ret;
|
||||
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
if (t4_wq_in_error(&qhp->wq))
|
||||
c4iw_flush_qp(qhp);
|
||||
ret = ibv_cmd_attach_mcast(ibqp, gid, lid);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int c4iw_detach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
|
||||
uint16_t lid)
|
||||
{
|
||||
struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
|
||||
int ret;
|
||||
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
if (t4_wq_in_error(&qhp->wq))
|
||||
c4iw_flush_qp(qhp);
|
||||
ret = ibv_cmd_detach_mcast(ibqp, gid, lid);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void c4iw_async_event(struct ibv_async_event *event)
|
||||
{
|
||||
PDBG("%s type %d obj %p\n", __func__, event->event_type,
|
||||
event->element.cq);
|
||||
|
||||
switch (event->event_type) {
|
||||
case IBV_EVENT_CQ_ERR:
|
||||
break;
|
||||
case IBV_EVENT_QP_FATAL:
|
||||
case IBV_EVENT_QP_REQ_ERR:
|
||||
case IBV_EVENT_QP_ACCESS_ERR:
|
||||
case IBV_EVENT_PATH_MIG_ERR: {
|
||||
struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp);
|
||||
pthread_spin_lock(&qhp->lock);
|
||||
c4iw_flush_qp(qhp);
|
||||
pthread_spin_unlock(&qhp->lock);
|
||||
break;
|
||||
}
|
||||
case IBV_EVENT_SQ_DRAINED:
|
||||
case IBV_EVENT_PATH_MIG:
|
||||
case IBV_EVENT_COMM_EST:
|
||||
case IBV_EVENT_QP_LAST_WQE_REACHED:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
.include <bsd.own.mk>
|
||||
|
||||
SUBDIR = libibcommon libibmad libibumad libibverbs libmlx4 libmthca
|
||||
SUBDIR += libopensm libosmcomp libosmvendor libibcm librdmacm libsdp
|
||||
SUBDIR += libopensm libosmcomp libosmvendor libibcm librdmacm libsdp libcxgb4
|
||||
|
||||
.include <bsd.subdir.mk>
|
||||
|
|
24
contrib/ofed/usr.lib/libcxgb4/Makefile
Normal file
24
contrib/ofed/usr.lib/libcxgb4/Makefile
Normal file
|
@ -0,0 +1,24 @@
|
|||
# $FreeBSD$
|
||||
|
||||
SHLIBDIR?= /usr/lib
|
||||
|
||||
.include <bsd.own.mk>
|
||||
|
||||
CXGB4DIR= ${.CURDIR}/../../libcxgb4
|
||||
IBVERBSDIR= ${.CURDIR}/../../libibverbs
|
||||
CXGBSRCDIR= ${CXGB4DIR}/src
|
||||
|
||||
.PATH: ${CXGBSRCDIR}
|
||||
|
||||
LIB= cxgb4
|
||||
SHLIB_MAJOR= 1
|
||||
MK_PROFILE= no
|
||||
|
||||
SRCS= dev.c cq.c qp.c verbs.c
|
||||
|
||||
CFLAGS+= -g -DHAVE_CONFIG_H -DDEBUG
|
||||
CFLAGS+= -I${.CURDIR} -I${CXGBSRCDIR} -I${IBVERBSDIR}/include
|
||||
|
||||
VERSION_MAP= ${CXGBSRCDIR}/cxgb4.map
|
||||
|
||||
.include <bsd.lib.mk>
|
4
contrib/ofed/usr.lib/libcxgb4/config.h
Normal file
4
contrib/ofed/usr.lib/libcxgb4/config.h
Normal file
|
@ -0,0 +1,4 @@
|
|||
#define HAVE_IBV_DONTFORK_RANGE
|
||||
#define HAVE_IBV_DOFORK_RANGE
|
||||
#define HAVE_IBV_REGISTER_DRIVER
|
||||
#define HAVE_IBV_READ_SYSFS_FILE
|
Loading…
Reference in a new issue