dpaa2: Clean up channels in separate tasks

Each channel gets its own DMA resources, cleanup and "bufferpool"
tasks, and a separate cleanup taskqueue to isolate channels operation
as much as possible to avoid various kernel panics under heavy network
load.

As a side-effect of this work, dpaa2_buf structure is simplified and
all of the functions to re-seed those buffers are gathered now in
dpaa2_buf.h and .c files; functions to work with channels are
extracted into dpaa2_channel.h and .c files as well.

Reported by:		dch
Reviewed by:		bz
Approved by:		bz (mentor)
MFC after:		1 week
Differential Revision:	https://reviews.freebsd.org/D41296
This commit is contained in:
Dmitry Salychev 2023-06-18 17:03:24 +02:00
parent 83d941e0af
commit 58983e4b02
No known key found for this signature in database
GPG key ID: CEDA4464564B041D
17 changed files with 1833 additions and 1242 deletions

View file

@ -202,6 +202,8 @@ dev/axgbe/xgbe-phy-v1.c optional axa fdt
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/dpaa2/dpaa2_bp.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_buf.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_channel.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_cmd_if.m optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_con.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_console.c optional soc_nxp_ls dpaa2 fdt
@ -216,6 +218,7 @@ dev/dpaa2/dpaa2_ni.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_rc.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_swp.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_swp_if.m optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_types.c optional soc_nxp_ls dpaa2
dev/dpaa2/memac_mdio_acpi.c optional soc_nxp_ls dpaa2 acpi
dev/dpaa2/memac_mdio_common.c optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt
dev/dpaa2/memac_mdio_fdt.c optional soc_nxp_ls dpaa2 fdt

246
sys/dev/dpaa2/dpaa2_buf.c Normal file
View file

@ -0,0 +1,246 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/bus.h>
#include "dpaa2_types.h"
#include "dpaa2_buf.h"
#include "dpaa2_bp.h"
#include "dpaa2_channel.h"
#include "dpaa2_swp.h"
#include "dpaa2_swp_if.h"
#include "dpaa2_ni.h"
MALLOC_DEFINE(M_DPAA2_RXB, "dpaa2_rxb", "DPAA2 DMA-mapped buffer (Rx)");
/**
* @brief Allocate Rx buffers visible to QBMan and release them to the
* buffer pool.
*/
int
dpaa2_buf_seed_pool(device_t dev, device_t bpdev, void *arg, uint32_t count,
int size, struct mtx *dma_mtx)
{
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_bp_softc *bpsc = device_get_softc(bpdev);
struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
struct dpaa2_buf *buf;
const int alloc = DPAA2_ATOMIC_READ(&sc->buf_num);
const uint16_t bpid = bpsc->attr.bpid;
bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
int error, bufn = 0;
#if defined(INVARIANTS)
KASSERT(ch->rx_dmat != NULL, ("%s: no DMA tag?", __func__));
if (dma_mtx != NULL) {
mtx_assert(dma_mtx, MA_OWNED);
}
#endif /* INVARIANTS */
#ifdef _notyet_
/* Limit amount of buffers released to the pool */
count = (alloc + count > DPAA2_NI_BUFS_MAX)
? DPAA2_NI_BUFS_MAX - alloc : count;
#endif
/* Release "count" buffers to the pool */
for (int i = alloc; i < alloc + count; i++) {
/* Enough buffers were allocated for a single command */
if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr,
bufn);
if (error) {
device_printf(sc->dev, "%s: failed to release "
"buffers to the pool (1)\n", __func__);
return (error);
}
DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
bufn = 0;
}
buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_RXB, M_NOWAIT);
if (buf == NULL) {
device_printf(dev, "%s: malloc() failed\n", __func__);
return (ENOMEM);
}
DPAA2_BUF_INIT_TAGOPT(buf, ch->rx_dmat, ch);
error = dpaa2_buf_seed_rxb(dev, buf, size, dma_mtx);
if (error != 0) {
device_printf(dev, "%s: dpaa2_buf_seed_rxb() failed: "
"error=%d/n", __func__, error);
break;
}
paddr[bufn] = buf->paddr;
bufn++;
}
/* Release reminder of the buffers to the pool */
if (bufn > 0) {
error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, bufn);
if (error) {
device_printf(sc->dev, "%s: failed to release "
"buffers to the pool (2)\n", __func__);
return (error);
}
DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
}
return (0);
}
/**
* @brief Prepare Rx buffer to be released to the buffer pool.
*/
int
dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size,
struct mtx *dma_mtx)
{
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_fa *fa;
bool map_created = false;
bool mbuf_alloc = false;
int error;
#if defined(INVARIANTS)
DPAA2_BUF_ASSERT_RXPREP(buf);
if (dma_mtx != NULL) {
mtx_assert(dma_mtx, MA_OWNED);
}
#endif /* INVARIANTS */
if (__predict_false(buf->dmap == NULL)) {
error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
if (error != 0) {
device_printf(dev, "%s: failed to create DMA map: "
"error=%d\n", __func__, error);
goto fail_map_create;
}
map_created = true;
}
if (__predict_true(buf->m == NULL)) {
buf->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
if (__predict_false(buf->m == NULL)) {
device_printf(dev, "%s: m_getjcl() failed\n", __func__);
error = ENOMEM;
goto fail_mbuf_alloc;
}
buf->m->m_len = buf->m->m_ext.ext_size;
buf->m->m_pkthdr.len = buf->m->m_ext.ext_size;
mbuf_alloc = true;
}
error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, buf->m, &buf->seg,
&buf->nseg, BUS_DMA_NOWAIT);
KASSERT(buf->nseg == 1, ("%s: one segment expected: nseg=%d", __func__,
buf->nseg));
KASSERT(error == 0, ("%s: bus_dmamap_load_mbuf_sg() failed: error=%d",
__func__, error));
if (__predict_false(error != 0 || buf->nseg != 1)) {
device_printf(sc->dev, "%s: bus_dmamap_load_mbuf_sg() failed: "
"error=%d, nsegs=%d\n", __func__, error, buf->nseg);
goto fail_mbuf_map;
}
buf->paddr = buf->seg.ds_addr;
buf->vaddr = buf->m->m_data;
/* Populate frame annotation for future use */
fa = (struct dpaa2_fa *)buf->vaddr;
fa->magic = DPAA2_MAGIC;
fa->buf = buf;
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREREAD);
DPAA2_BUF_ASSERT_RXREADY(buf);
return (0);
fail_mbuf_map:
if (mbuf_alloc) {
m_freem(buf->m);
buf->m = NULL;
}
fail_mbuf_alloc:
if (map_created) {
(void)bus_dmamap_destroy(buf->dmat, buf->dmap);
}
fail_map_create:
return (error);
}
/**
* @brief Prepare Tx buffer to be added to the Tx ring.
*/
int
dpaa2_buf_seed_txb(device_t dev, struct dpaa2_buf *buf)
{
struct dpaa2_buf *sgt = buf->sgt;
bool map_created = false;
int error;
DPAA2_BUF_ASSERT_TXPREP(buf);
if (buf->dmap == NULL) {
error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
if (error != 0) {
device_printf(dev, "%s: bus_dmamap_create() failed: "
"error=%d\n", __func__, error);
goto fail_map_create;
}
map_created = true;
}
if (sgt->vaddr == NULL) {
error = bus_dmamem_alloc(sgt->dmat, (void **)&sgt->vaddr,
BUS_DMA_ZERO | BUS_DMA_COHERENT, &sgt->dmap);
if (error != 0) {
device_printf(dev, "%s: bus_dmamem_alloc() failed: "
"error=%d\n", __func__, error);
goto fail_mem_alloc;
}
}
DPAA2_BUF_ASSERT_TXREADY(buf);
return (0);
fail_mem_alloc:
if (map_created) {
(void)bus_dmamap_destroy(buf->dmat, buf->dmap);
}
fail_map_create:
return (error);
}

173
sys/dev/dpaa2/dpaa2_buf.h Normal file
View file

@ -0,0 +1,173 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _DPAA2_BUF_H
#define _DPAA2_BUF_H
#include <sys/types.h>
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/bus.h>
#define DPAA2_RX_BUF_SIZE (MJUM9BYTES)
struct dpaa2_buf {
bus_addr_t paddr;
caddr_t vaddr;
bus_dma_tag_t dmat;
bus_dmamap_t dmap;
bus_dma_segment_t seg;
int nseg;
struct mbuf *m;
struct dpaa2_buf *sgt;
void *opt;
};
#define DPAA2_BUF_INIT_TAGOPT(__buf, __tag, __opt) do { \
KASSERT((__buf) != NULL, ("%s: buf is NULL", __func__)); \
\
(__buf)->paddr = 0; \
(__buf)->vaddr = NULL; \
(__buf)->dmat = (__tag); \
(__buf)->dmap = NULL; \
(__buf)->seg.ds_addr = 0; \
(__buf)->seg.ds_len = 0; \
(__buf)->nseg = 0; \
(__buf)->m = NULL; \
(__buf)->sgt = NULL; \
(__buf)->opt = (__opt); \
} while(0)
#define DPAA2_BUF_INIT(__buf) DPAA2_BUF_INIT_TAGOPT((__buf), NULL, NULL)
#if defined(INVARIANTS)
/*
* TXPREP/TXREADY macros allow to verify whether Tx buffer is prepared to be
* seeded and/or ready to be used for transmission.
*
* NOTE: Any modification should be carefully analyzed and justified.
*/
#define DPAA2_BUF_ASSERT_TXPREP(__buf) do { \
struct dpaa2_buf *__sgt = (__buf)->sgt; \
KASSERT((__sgt) != NULL, ("%s: no S/G table?", __func__)); \
\
KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \
KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \
KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \
KASSERT((__buf)->dmap == NULL, ("%s: DMA map set?", __func__)); \
KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \
KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \
KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \
KASSERT((__buf)->opt != NULL, ("%s: no Tx ring?", __func__)); \
\
KASSERT((__sgt)->paddr == 0, ("%s: S/G paddr set?", __func__)); \
KASSERT((__sgt)->vaddr == NULL, ("%s: S/G vaddr set?", __func__)); \
KASSERT((__sgt)->dmat != NULL, ("%s: no S/G DMA tag?", __func__)); \
KASSERT((__sgt)->dmap == NULL, ("%s: S/G DMA map set?", __func__)); \
KASSERT((__sgt)->seg.ds_addr == 0, ("%s: S/G mapped?", __func__)); \
KASSERT((__sgt)->seg.ds_len == 0, ("%s: S/G mapped?", __func__)); \
KASSERT((__sgt)->nseg == 0, ("%s: S/G nseg > 0?", __func__)); \
KASSERT((__sgt)->m == NULL, ("%s: S/G mbuf set?", __func__)); \
KASSERT((__sgt)->opt == (__buf),("%s: buf not linked?", __func__)); \
} while(0)
#define DPAA2_BUF_ASSERT_TXREADY(__buf) do { \
struct dpaa2_buf *__sgt = (__buf)->sgt; \
KASSERT((__sgt) != NULL, ("%s: no S/G table?", __func__)); \
\
KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \
KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \
KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \
KASSERT((__buf)->dmap != NULL, ("%s: no DMA map?", __func__)); \
KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \
KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \
KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \
KASSERT((__buf)->opt != NULL, ("%s: no Tx ring?", __func__)); \
\
KASSERT((__sgt)->paddr == 0, ("%s: S/G paddr set?", __func__)); \
KASSERT((__sgt)->vaddr != NULL, ("%s: no S/G vaddr?", __func__)); \
KASSERT((__sgt)->dmat != NULL, ("%s: no S/G DMA tag?", __func__)); \
KASSERT((__sgt)->dmap != NULL, ("%s: no S/G DMA map?", __func__)); \
KASSERT((__sgt)->seg.ds_addr == 0, ("%s: S/G mapped?", __func__)); \
KASSERT((__sgt)->seg.ds_len == 0, ("%s: S/G mapped?", __func__)); \
KASSERT((__sgt)->nseg == 0, ("%s: S/G nseg > 0?", __func__)); \
KASSERT((__sgt)->m == NULL, ("%s: S/G mbuf set?", __func__)); \
KASSERT((__sgt)->opt == (__buf),("%s: buf not linked?", __func__)); \
} while(0)
#else /* !INVARIANTS */
#define DPAA2_BUF_ASSERT_TXPREP(__buf) do { \
} while(0)
#define DPAA2_BUF_ASSERT_TXREADY(__buf) do { \
} while(0)
#endif /* INVARIANTS */
#if defined(INVARIANTS)
/*
* RXPREP/RXREADY macros allow to verify whether Rx buffer is prepared to be
* seeded and/or ready to be used for reception.
*
* NOTE: Any modification should be carefully analyzed and justified.
*/
#define DPAA2_BUF_ASSERT_RXPREP(__buf) do { \
KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \
KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \
KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \
/* KASSERT((__buf)->dmap == NULL, ("%s: DMA map set?", __func__)); */ \
KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \
KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \
KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \
KASSERT((__buf)->sgt == NULL, ("%s: S/G table set?", __func__)); \
KASSERT((__buf)->opt != NULL, ("%s: no channel?", __func__)); \
} while(0)
#define DPAA2_BUF_ASSERT_RXREADY(__buf) do { \
KASSERT((__buf)->paddr != 0, ("%s: paddr not set?", __func__)); \
KASSERT((__buf)->vaddr != NULL, ("%s: vaddr not set?", __func__)); \
KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \
KASSERT((__buf)->dmap != NULL, ("%s: no DMA map?", __func__)); \
KASSERT((__buf)->seg.ds_addr != 0, ("%s: not mapped?", __func__)); \
KASSERT((__buf)->seg.ds_len != 0, ("%s: not mapped?", __func__)); \
KASSERT((__buf)->nseg == 1, ("%s: nseg != 1?", __func__)); \
KASSERT((__buf)->m != NULL, ("%s: no mbuf?", __func__)); \
KASSERT((__buf)->sgt == NULL, ("%s: S/G table set?", __func__)); \
KASSERT((__buf)->opt != NULL, ("%s: no channel?", __func__)); \
} while(0)
#else /* !INVARIANTS */
#define DPAA2_BUF_ASSERT_RXPREP(__buf) do { \
} while(0)
#define DPAA2_BUF_ASSERT_RXREADY(__buf) do { \
} while(0)
#endif /* INVARIANTS */
int dpaa2_buf_seed_pool(device_t, device_t, void *, uint32_t, int, struct mtx *);
int dpaa2_buf_seed_rxb(device_t, struct dpaa2_buf *, int, struct mtx *);
int dpaa2_buf_seed_txb(device_t, struct dpaa2_buf *);
#endif /* _DPAA2_BUF_H */

View file

@ -0,0 +1,557 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* QBMan channel to process ingress traffic (Rx, Tx confirmation, Rx error).
*
* NOTE: Several WQs are organized into a single channel.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/taskqueue.h>
#include <sys/sysctl.h>
#include <sys/buf_ring.h>
#include <sys/smp.h>
#include <sys/proc.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/atomic.h>
#include <machine/vmparam.h>
#include <net/ethernet.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include "dpaa2_types.h"
#include "dpaa2_channel.h"
#include "dpaa2_ni.h"
#include "dpaa2_mc.h"
#include "dpaa2_mc_if.h"
#include "dpaa2_mcp.h"
#include "dpaa2_io.h"
#include "dpaa2_con.h"
#include "dpaa2_buf.h"
#include "dpaa2_swp.h"
#include "dpaa2_swp_if.h"
#include "dpaa2_bp.h"
#include "dpaa2_cmd_if.h"
MALLOC_DEFINE(M_DPAA2_CH, "dpaa2_ch", "DPAA2 QBMan Channel");
#define RX_SEG_N (1u)
#define RX_SEG_SZ (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
#define RX_SEG_MAXSZ (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
CTASSERT(RX_SEG_SZ % PAGE_SIZE == 0);
CTASSERT(RX_SEG_MAXSZ % PAGE_SIZE == 0);
#define TX_SEG_N (16u) /* XXX-DSL: does DPAA2 limit exist? */
#define TX_SEG_SZ (PAGE_SIZE)
#define TX_SEG_MAXSZ (TX_SEG_N * TX_SEG_SZ)
CTASSERT(TX_SEG_SZ % PAGE_SIZE == 0);
CTASSERT(TX_SEG_MAXSZ % PAGE_SIZE == 0);
#define SGT_SEG_N (1u)
#define SGT_SEG_SZ (PAGE_SIZE)
#define SGT_SEG_MAXSZ (PAGE_SIZE)
CTASSERT(SGT_SEG_SZ % PAGE_SIZE == 0);
CTASSERT(SGT_SEG_MAXSZ % PAGE_SIZE == 0);
static int dpaa2_chan_setup_dma(device_t, struct dpaa2_channel *, bus_size_t);
static int dpaa2_chan_alloc_storage(device_t, struct dpaa2_channel *, bus_size_t,
int, bus_size_t);
static void dpaa2_chan_bp_task(void *, int);
/**
* @brief Сonfigures QBMan channel and registers data availability notifications.
*/
int
dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
struct dpaa2_channel **channel, uint32_t flowid, task_fn_t cleanup_task_fn)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_io_softc *iosc = device_get_softc(iodev);
struct dpaa2_con_softc *consc = device_get_softc(condev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *ioinfo = device_get_ivars(iodev);
struct dpaa2_devinfo *coninfo = device_get_ivars(condev);
struct dpaa2_con_notif_cfg notif_cfg;
struct dpaa2_io_notif_ctx *ctx;
struct dpaa2_channel *ch = NULL;
struct dpaa2_cmd cmd;
uint16_t rctk, contk;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rctk);
if (error) {
device_printf(dev, "%s: failed to open DPRC: id=%d, error=%d\n",
__func__, rcinfo->id, error);
goto fail_rc_open;
}
error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, coninfo->id, &contk);
if (error) {
device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n",
__func__, coninfo->id, error);
goto fail_con_open;
}
error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
if (error) {
device_printf(dev, "%s: failed to enable channel: dpcon_id=%d, "
"chan_id=%d\n", __func__, coninfo->id, consc->attr.chan_id);
goto fail_con_enable;
}
ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
if (ch == NULL) {
device_printf(dev, "%s: malloc() failed\n", __func__);
error = ENOMEM;
goto fail_malloc;
}
ch->ni_dev = dev;
ch->io_dev = iodev;
ch->con_dev = condev;
ch->id = consc->attr.chan_id;
ch->flowid = flowid;
ch->tx_frames = 0; /* for debug purposes */
ch->tx_dropped = 0; /* for debug purposes */
ch->store_sz = 0;
ch->store_idx = 0;
ch->recycled_n = 0;
ch->rxq_n = 0;
NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch);
NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch);
ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK,
taskqueue_thread_enqueue, &ch->cleanup_tq);
taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET,
&iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id);
error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align);
if (error != 0) {
device_printf(dev, "%s: failed to setup DMA\n", __func__);
goto fail_dma_setup;
}
mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF);
ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
&ch->xmit_mtx);
if (ch->xmit_br == NULL) {
device_printf(dev, "%s: buf_ring_alloc() failed\n", __func__);
error = ENOMEM;
goto fail_buf_ring;
}
DPAA2_BUF_INIT(&ch->store);
/* Register the new notification context */
ctx = &ch->ctx;
ctx->qman_ctx = (uint64_t)ctx;
ctx->cdan_en = true;
ctx->fq_chan_id = ch->id;
ctx->io_dev = ch->io_dev;
ctx->channel = ch;
error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx);
if (error) {
device_printf(dev, "%s: failed to register CDAN context\n",
__func__);
goto fail_dpcon_notif;
}
/* Register DPCON notification within Management Complex */
notif_cfg.dpio_id = ioinfo->id;
notif_cfg.prior = 0;
notif_cfg.qman_ctx = ctx->qman_ctx;
error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
if (error) {
device_printf(dev, "%s: failed to register DPCON "
"notifications: dpcon_id=%d, chan_id=%d\n", __func__,
coninfo->id, consc->attr.chan_id);
goto fail_dpcon_notif;
}
/* Allocate initial # of Rx buffers and a channel storage */
error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT,
DPAA2_RX_BUF_SIZE, NULL);
if (error) {
device_printf(dev, "%s: failed to seed buffer pool\n",
__func__);
goto fail_dpcon_notif;
}
error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE,
BUS_DMA_NOWAIT, sc->buf_align);
if (error != 0) {
device_printf(dev, "%s: failed to allocate channel storage\n",
__func__);
goto fail_dpcon_notif;
} else {
ch->store_sz = DPAA2_ETH_STORE_FRAMES;
}
/* Prepare queues for the channel */
error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF);
if (error) {
device_printf(dev, "%s: failed to prepare TxConf queue: "
"error=%d\n", __func__, error);
goto fail_fq_setup;
}
error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX);
if (error) {
device_printf(dev, "%s: failed to prepare Rx queue: error=%d\n",
__func__, error);
goto fail_fq_setup;
}
if (bootverbose) {
device_printf(dev, "channel: dpio_id=%d dpcon_id=%d chan_id=%d, "
"priorities=%d\n", ioinfo->id, coninfo->id, ch->id,
consc->attr.prior_num);
}
*channel = ch;
(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
return (0);
fail_fq_setup:
if (ch->store.vaddr != NULL) {
bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap);
}
if (ch->store.dmat != NULL) {
bus_dma_tag_destroy(ch->store.dmat);
}
ch->store.dmat = NULL;
ch->store.vaddr = NULL;
ch->store.paddr = 0;
ch->store.nseg = 0;
fail_dpcon_notif:
buf_ring_free(ch->xmit_br, M_DEVBUF);
fail_buf_ring:
mtx_destroy(&ch->xmit_mtx);
fail_dma_setup:
/* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */
/* taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
/* } */
/* taskqueue_free(ch->cleanup_tq); */
fail_malloc:
(void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk));
fail_con_enable:
(void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk));
fail_con_open:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
fail_rc_open:
return (error);
}
/**
* @brief Performs an initial configuration of the frame queue.
*/
int
dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch,
enum dpaa2_ni_queue_type queue_type)
{
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_ni_fq *fq;
switch (queue_type) {
case DPAA2_NI_QUEUE_TX_CONF:
/* One queue per channel */
fq = &ch->txc_queue;
fq->chan = ch;
fq->flowid = ch->flowid;
fq->tc = 0; /* ignored */
fq->type = queue_type;
break;
case DPAA2_NI_QUEUE_RX:
KASSERT(sc->attr.num.rx_tcs <= DPAA2_MAX_TCS,
("too many Rx traffic classes: rx_tcs=%d\n",
sc->attr.num.rx_tcs));
/* One queue per Rx traffic class within a channel */
for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
fq = &ch->rx_queues[i];
fq->chan = ch;
fq->flowid = ch->flowid;
fq->tc = (uint8_t) i;
fq->type = queue_type;
ch->rxq_n++;
}
break;
case DPAA2_NI_QUEUE_RX_ERR:
/* One queue per network interface */
fq = &sc->rxe_queue;
fq->chan = ch;
fq->flowid = 0; /* ignored */
fq->tc = 0; /* ignored */
fq->type = queue_type;
break;
default:
device_printf(dev, "%s: unexpected frame queue type: %d\n",
__func__, queue_type);
return (EINVAL);
}
return (0);
}
/**
* @brief Obtain the next dequeue response from the channel storage.
*/
int
dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq)
{
struct dpaa2_buf *buf = &ch->store;
struct dpaa2_dq *msgs = (struct dpaa2_dq *)buf->vaddr;
struct dpaa2_dq *msg = &msgs[ch->store_idx];
int rc = EINPROGRESS;
ch->store_idx++;
if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
rc = EALREADY; /* VDQ command is expired */
ch->store_idx = 0;
if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) {
msg = NULL; /* Null response, FD is invalid */
}
}
if (msg != NULL && (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY)) {
rc = ENOENT; /* FQ is empty */
ch->store_idx = 0;
}
if (dq != NULL) {
*dq = msg;
}
return (rc);
}
static int
dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch,
bus_size_t alignment)
{
int error;
mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF);
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
alignment, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* low restricted addr */
BUS_SPACE_MAXADDR, /* high restricted addr */
NULL, NULL, /* filter, filterarg */
RX_SEG_MAXSZ, /* maxsize */
RX_SEG_N, /* nsegments */
RX_SEG_SZ, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&ch->rx_dmat);
if (error) {
device_printf(dev, "%s: failed to create rx_dmat\n", __func__);
goto fail_rx_tag;
}
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
alignment, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* low restricted addr */
BUS_SPACE_MAXADDR, /* high restricted addr */
NULL, NULL, /* filter, filterarg */
TX_SEG_MAXSZ, /* maxsize */
TX_SEG_N, /* nsegments */
TX_SEG_SZ, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&ch->tx_dmat);
if (error) {
device_printf(dev, "%s: failed to create tx_dmat\n", __func__);
goto fail_tx_tag;
}
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
alignment, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* low restricted addr */
BUS_SPACE_MAXADDR, /* high restricted addr */
NULL, NULL, /* filter, filterarg */
SGT_SEG_MAXSZ, /* maxsize */
SGT_SEG_N, /* nsegments */
SGT_SEG_SZ, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&ch->sgt_dmat);
if (error) {
device_printf(dev, "%s: failed to create sgt_dmat\n", __func__);
goto fail_sgt_tag;
}
return (0);
fail_sgt_tag:
bus_dma_tag_destroy(ch->tx_dmat);
fail_tx_tag:
bus_dma_tag_destroy(ch->rx_dmat);
fail_rx_tag:
mtx_destroy(&ch->dma_mtx);
ch->rx_dmat = NULL;
ch->tx_dmat = NULL;
ch->sgt_dmat = NULL;
return (error);
}
/**
* @brief Allocate a DMA-mapped storage to keep responses from VDQ command.
*/
static int
dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size,
int mapflags, bus_size_t alignment)
{
struct dpaa2_buf *buf = &ch->store;
uint32_t maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
int error;
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
alignment, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* low restricted addr */
BUS_SPACE_MAXADDR, /* high restricted addr */
NULL, NULL, /* filter, filterarg */
maxsize, /* maxsize */
1, /* nsegments */
maxsize, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&buf->dmat);
if (error != 0) {
device_printf(dev, "%s: failed to create DMA tag\n", __func__);
goto fail_tag;
}
error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
if (error != 0) {
device_printf(dev, "%s: failed to allocate storage memory\n",
__func__);
goto fail_map_create;
}
buf->paddr = 0;
error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, size,
dpaa2_dmamap_oneseg_cb, &buf->paddr, mapflags);
if (error != 0) {
device_printf(dev, "%s: failed to map storage memory\n",
__func__);
goto fail_map_load;
}
bus_dmamap_sync(buf->dmat, buf->dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
buf->nseg = 1;
return (0);
fail_map_load:
bus_dmamem_free(buf->dmat, buf->vaddr, buf->dmap);
fail_map_create:
bus_dma_tag_destroy(buf->dmat);
fail_tag:
buf->dmat = NULL;
buf->vaddr = NULL;
buf->paddr = 0;
buf->nseg = 0;
return (error);
}
/**
* @brief Release new buffers to the buffer pool if necessary.
*/
static void
dpaa2_chan_bp_task(void *arg, int count)
{
struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
struct dpaa2_bp_softc *bpsc;
struct dpaa2_bp_conf bpconf;
const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
device_t bpdev;
int error;
/* There's only one buffer pool for now */
bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
bpsc = device_get_softc(bpdev);
/* Get state of the buffer pool */
error = DPAA2_SWP_QUERY_BP(ch->io_dev, bpsc->attr.bpid, &bpconf);
if (error) {
device_printf(sc->dev, "%s: DPAA2_SWP_QUERY_BP() failed: "
"error=%d\n", __func__, error);
return;
}
/* Double allocated Rx buffers if amount of free buffers is < 25% */
if (bpconf.free_bufn < (buf_num >> 2)) {
mtx_assert(&ch->dma_mtx, MA_NOTOWNED);
mtx_lock(&ch->dma_mtx);
(void)dpaa2_buf_seed_pool(ch->ni_dev, bpdev, ch, buf_num,
DPAA2_RX_BUF_SIZE, &ch->dma_mtx);
mtx_unlock(&ch->dma_mtx);
DPAA2_ATOMIC_XCHG(&sc->buf_free, bpconf.free_bufn);
}
}

View file

@ -0,0 +1,95 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _DPAA2_CHANNEL_H
#define _DPAA2_CHANNEL_H
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <sys/buf_ring.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include "dpaa2_types.h"
#include "dpaa2_io.h"
#include "dpaa2_ni.h"
#define DPAA2_TX_BUFRING_SZ (4096u)
/**
* @brief QBMan channel to process ingress traffic.
*
* NOTE: Several WQs are organized into a single channel.
*/
struct dpaa2_channel {
device_t ni_dev;
device_t io_dev;
device_t con_dev;
uint16_t id;
uint16_t flowid;
uint64_t tx_frames;
uint64_t tx_dropped;
struct mtx dma_mtx;
bus_dma_tag_t rx_dmat;
bus_dma_tag_t tx_dmat;
bus_dma_tag_t sgt_dmat;
struct dpaa2_io_notif_ctx ctx; /* to configure CDANs */
struct dpaa2_buf store; /* to keep VDQ responses */
uint32_t store_sz; /* in frames */
uint32_t store_idx; /* frame index */
uint32_t recycled_n;
struct dpaa2_buf *recycled[DPAA2_SWP_BUFS_PER_CMD];
uint32_t rxq_n;
struct dpaa2_ni_fq rx_queues[DPAA2_MAX_TCS];
struct dpaa2_ni_fq txc_queue;
struct taskqueue *cleanup_tq;
struct task cleanup_task;
struct task bp_task;
struct mtx xmit_mtx;
struct buf_ring *xmit_br;
} __aligned(CACHE_LINE_SIZE);
int dpaa2_chan_setup(device_t, device_t, device_t, device_t,
struct dpaa2_channel **, uint32_t, task_fn_t);
int dpaa2_chan_setup_fq(device_t, struct dpaa2_channel *,
enum dpaa2_ni_queue_type);
int dpaa2_chan_next_frame(struct dpaa2_channel *, struct dpaa2_dq **);
#endif /* _DPAA2_CHANNEL_H */

View file

@ -38,6 +38,8 @@
* using QBMan.
*/
#include "opt_rss.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bus.h>
@ -58,6 +60,10 @@
#include <dev/pci/pcivar.h>
#ifdef RSS
#include <net/rss_config.h>
#endif
#include "pcib_if.h"
#include "pci_if.h"
@ -68,6 +74,7 @@
#include "dpaa2_cmd_if.h"
#include "dpaa2_io.h"
#include "dpaa2_ni.h"
#include "dpaa2_channel.h"
#define DPIO_IRQ_INDEX 0 /* index of the only DPIO IRQ */
#define DPIO_POLL_MAX 32
@ -436,8 +443,14 @@ dpaa2_io_setup_irqs(device_t dev)
return (ENXIO);
}
/* Wrap DPIO ID around number of CPUs. */
bus_bind_intr(dev, sc->irq_resource, sc->attr.id % mp_ncpus);
/* Wrap DPIO ID around number of CPUs/RSS buckets */
#ifdef RSS
sc->cpu = rss_getcpu(sc->attr.id % rss_getnumbuckets());
#else
sc->cpu = sc->attr.id % mp_ncpus;
#endif
CPU_SETOF(sc->cpu, &sc->cpu_mask);
bus_bind_intr(dev, sc->irq_resource, sc->cpu);
/*
* Setup and enable Static Dequeue Command to receive CDANs from
@ -519,7 +532,9 @@ static void
dpaa2_io_intr(void *arg)
{
struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg;
/* struct dpaa2_ni_softc *nisc = NULL; */
struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX];
struct dpaa2_channel *chan;
struct dpaa2_dq dq;
uint32_t idx, status;
uint16_t flags;
@ -554,7 +569,9 @@ dpaa2_io_intr(void *arg)
DPAA2_SWP_UNLOCK(sc->swp);
for (int i = 0; i < cdan_n; i++) {
ctx[i]->poll(ctx[i]->channel);
chan = (struct dpaa2_channel *)ctx[i]->channel;
/* nisc = device_get_softc(chan->ni_dev); */
taskqueue_enqueue(chan->cleanup_tq, &chan->cleanup_task);
}
/* Enable software portal interrupts back */

View file

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2021-2022 Dmitry Salychev
* Copyright © 2021-2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -34,6 +34,7 @@
#include "dpaa2_types.h"
#include "dpaa2_mcp.h"
#include "dpaa2_swp.h"
/* Maximum resources per DPIO: 3 SYS_MEM + 1 DPMCP. */
#define DPAA2_IO_MAX_RESOURCES 4
@ -74,8 +75,6 @@ struct dpaa2_io_attr {
* (CDAN) on a particular WQ channel.
*/
struct dpaa2_io_notif_ctx {
void (*poll)(void *);
device_t io_dev;
void *channel;
uint64_t qman_ctx;
@ -98,6 +97,9 @@ struct dpaa2_io_softc {
int irq_rid[DPAA2_IO_MSI_COUNT];
struct resource *irq_resource;
void *intr; /* interrupt handle */
int cpu;
cpuset_t cpu_mask;
};
extern struct resource_spec dpaa2_io_spec[];

View file

@ -73,8 +73,6 @@
#define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r))
#define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v))
#define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0)
#define IORT_DEVICE_NAME "MCE"
/* MC Registers */
@ -671,68 +669,6 @@ dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev,
return (error);
}
/**
* @brief Convert DPAA2 device type to string.
*/
const char *
dpaa2_ttos(enum dpaa2_dev_type type)
{
switch (type) {
case DPAA2_DEV_MC:
return ("mc"); /* NOTE: to print as information only. */
case DPAA2_DEV_RC:
return ("dprc");
case DPAA2_DEV_IO:
return ("dpio");
case DPAA2_DEV_NI:
return ("dpni");
case DPAA2_DEV_MCP:
return ("dpmcp");
case DPAA2_DEV_BP:
return ("dpbp");
case DPAA2_DEV_CON:
return ("dpcon");
case DPAA2_DEV_MAC:
return ("dpmac");
case DPAA2_DEV_MUX:
return ("dpdmux");
case DPAA2_DEV_SW:
return ("dpsw");
default:
break;
}
return ("notype");
}
/**
* @brief Convert string to DPAA2 device type.
*/
enum dpaa2_dev_type
dpaa2_stot(const char *str)
{
if (COMPARE_TYPE(str, "dprc")) {
return (DPAA2_DEV_RC);
} else if (COMPARE_TYPE(str, "dpio")) {
return (DPAA2_DEV_IO);
} else if (COMPARE_TYPE(str, "dpni")) {
return (DPAA2_DEV_NI);
} else if (COMPARE_TYPE(str, "dpmcp")) {
return (DPAA2_DEV_MCP);
} else if (COMPARE_TYPE(str, "dpbp")) {
return (DPAA2_DEV_BP);
} else if (COMPARE_TYPE(str, "dpcon")) {
return (DPAA2_DEV_CON);
} else if (COMPARE_TYPE(str, "dpmac")) {
return (DPAA2_DEV_MAC);
} else if (COMPARE_TYPE(str, "dpdmux")) {
return (DPAA2_DEV_MUX);
} else if (COMPARE_TYPE(str, "dpsw")) {
return (DPAA2_DEV_SW);
}
return (DPAA2_DEV_NOTYPE);
}
/**
* @internal
*/

View file

@ -40,7 +40,6 @@
#include <sys/time.h>
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/condvar.h>
#include <sys/lock.h>
#include <machine/bus.h>

View file

@ -29,7 +29,6 @@
#define _DPAA2_MCP_H
#include <sys/rman.h>
#include <sys/condvar.h>
#include <sys/mutex.h>
#include "dpaa2_types.h"

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2021-2022 Dmitry Salychev
* Copyright © 2021-2023 Dmitry Salychev
* Copyright © 2022 Mathew McBride
*
* Redistribution and use in source and binary forms, with or without
@ -38,6 +38,7 @@
#include <sys/socket.h>
#include <sys/buf_ring.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <net/if.h>
@ -50,6 +51,7 @@
#include "dpaa2_io.h"
#include "dpaa2_mac.h"
#include "dpaa2_ni_dpkg.h"
#include "dpaa2_channel.h"
/* Name of the DPAA2 network interface. */
#define DPAA2_NI_IFNAME "dpni"
@ -58,17 +60,13 @@
#define DPAA2_NI_MAX_RESOURCES 34
#define DPAA2_NI_MSI_COUNT 1 /* MSIs per DPNI */
#define DPAA2_NI_MAX_CHANNELS 16 /* to distribute ingress traffic to cores */
#define DPAA2_NI_MAX_TCS 8 /* traffic classes per DPNI */
#define DPAA2_NI_MAX_POOLS 8 /* buffer pools per DPNI */
/* Maximum number of Rx buffers. */
#define DPAA2_NI_BUFS_INIT (50u * DPAA2_SWP_BUFS_PER_CMD)
#define DPAA2_NI_BUFS_MAX (1 << 15) /* 15 bits for buffer index max. */
#define DPAA2_NI_BUFS_INIT (5u * DPAA2_SWP_BUFS_PER_CMD)
/* Maximum number of buffers allocated per Tx ring. */
#define DPAA2_NI_BUFS_PER_TX (1 << 7)
#define DPAA2_NI_MAX_BPTX (1 << 8) /* 8 bits for buffer index max. */
#define DPAA2_NI_MAX_BPTX (1 << 8)
/* Number of the DPNI statistics counters. */
#define DPAA2_NI_STAT_COUNTERS 7u
@ -133,12 +131,30 @@
*/
#define DPAA2_NI_ENQUEUE_RETRIES 10
enum dpaa2_ni_queue_type {
DPAA2_NI_QUEUE_RX = 0,
DPAA2_NI_QUEUE_TX,
DPAA2_NI_QUEUE_TX_CONF,
DPAA2_NI_QUEUE_RX_ERR
};
/* Channel storage buffer configuration */
#define DPAA2_ETH_STORE_FRAMES 16u
#define DPAA2_ETH_STORE_SIZE \
((DPAA2_ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
/*
* NOTE: Don't forget to update dpaa2_ni_spec in case of any changes in macros!
*/
/* DPMCP resources */
#define DPAA2_NI_MCP_RES_NUM (1u)
#define DPAA2_NI_MCP_RID_OFF (0u)
#define DPAA2_NI_MCP_RID(rid) ((rid) + DPAA2_NI_MCP_RID_OFF)
/* DPIO resources (software portals) */
#define DPAA2_NI_IO_RES_NUM (16u)
#define DPAA2_NI_IO_RID_OFF (DPAA2_NI_MCP_RID_OFF + DPAA2_NI_MCP_RES_NUM)
#define DPAA2_NI_IO_RID(rid) ((rid) + DPAA2_NI_IO_RID_OFF)
/* DPBP resources (buffer pools) */
#define DPAA2_NI_BP_RES_NUM (1u)
#define DPAA2_NI_BP_RID_OFF (DPAA2_NI_IO_RID_OFF + DPAA2_NI_IO_RES_NUM)
#define DPAA2_NI_BP_RID(rid) ((rid) + DPAA2_NI_BP_RID_OFF)
/* DPCON resources (channels) */
#define DPAA2_NI_CON_RES_NUM (16u)
#define DPAA2_NI_CON_RID_OFF (DPAA2_NI_BP_RID_OFF + DPAA2_NI_BP_RES_NUM)
#define DPAA2_NI_CON_RID(rid) ((rid) + DPAA2_NI_CON_RID_OFF)
enum dpaa2_ni_dest_type {
DPAA2_NI_DEST_NONE = 0,
@ -172,7 +188,7 @@ enum dpaa2_ni_err_action {
DPAA2_NI_ERR_SEND_TO_ERROR_QUEUE
};
struct dpaa2_ni_channel;
struct dpaa2_channel;
struct dpaa2_ni_fq;
/**
@ -203,95 +219,6 @@ struct dpaa2_ni_attr {
} key_size;
};
/**
* @brief Tx ring.
*
* fq: Parent (TxConf) frame queue.
* fqid: ID of the logical Tx queue.
* mbuf_br: Ring buffer for mbufs to transmit.
* mbuf_lock: Lock for the ring buffer.
*/
struct dpaa2_ni_tx_ring {
struct dpaa2_ni_fq *fq;
uint32_t fqid;
uint32_t txid; /* Tx ring index */
/* Ring buffer for indexes in "buf" array. */
struct buf_ring *idx_br;
struct mtx lock;
/* Buffers to DMA load/unload Tx mbufs. */
struct dpaa2_buf buf[DPAA2_NI_BUFS_PER_TX];
};
/**
* @brief A Frame Queue is the basic queuing structure used by the QMan.
*
* It comprises a list of frame descriptors (FDs), so it can be thought of
* as a queue of frames.
*
* NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued
* onto a work queue (WQ).
*
* fqid: Frame queue ID, can be used to enqueue/dequeue or execute other
* commands on the queue through DPIO.
* txq_n: Number of configured Tx queues.
* tx_fqid: Frame queue IDs of the Tx queues which belong to the same flowid.
* Note that Tx queues are logical queues and not all management
* commands are available on these queue types.
* qdbin: Queue destination bin. Can be used with the DPIO enqueue
* operation based on QDID, QDBIN and QPRI. Note that all Tx queues
* with the same flowid have the same destination bin.
*/
struct dpaa2_ni_fq {
int (*consume)(struct dpaa2_ni_channel *,
struct dpaa2_ni_fq *, struct dpaa2_fd *);
struct dpaa2_ni_channel *chan;
uint32_t fqid;
uint16_t flowid;
uint8_t tc;
enum dpaa2_ni_queue_type type;
/* Optional fields (for TxConf queue). */
struct dpaa2_ni_tx_ring tx_rings[DPAA2_NI_MAX_TCS];
uint32_t tx_qdbin;
} __aligned(CACHE_LINE_SIZE);
/**
* @brief QBMan channel to process ingress traffic (Rx, Tx conf).
*
* NOTE: Several WQs are organized into a single WQ Channel.
*/
struct dpaa2_ni_channel {
device_t ni_dev;
device_t io_dev;
device_t con_dev;
uint16_t id;
uint16_t flowid;
/* For debug purposes only! */
uint64_t tx_frames;
uint64_t tx_dropped;
/* Context to configure CDAN. */
struct dpaa2_io_notif_ctx ctx;
/* Channel storage (to keep responses from VDQ command). */
struct dpaa2_buf store;
uint32_t store_sz; /* in frames */
uint32_t store_idx; /* frame index */
/* Recycled buffers to release back to the pool. */
uint32_t recycled_n;
struct dpaa2_buf *recycled[DPAA2_SWP_BUFS_PER_CMD];
/* Frame queues */
uint32_t rxq_n;
struct dpaa2_ni_fq rx_queues[DPAA2_NI_MAX_TCS];
struct dpaa2_ni_fq txc_queue;
};
/**
* @brief Configuration of the network interface queue.
*
@ -534,7 +461,6 @@ struct dpaa2_ni_softc {
uint16_t buf_align;
uint16_t buf_sz;
/* For debug purposes only! */
uint64_t rx_anomaly_frames;
uint64_t rx_single_buf_frames;
uint64_t rx_sg_buf_frames;
@ -543,10 +469,8 @@ struct dpaa2_ni_softc {
uint64_t tx_single_buf_frames;
uint64_t tx_sg_frames;
/* Attributes of the DPAA2 network interface. */
struct dpaa2_ni_attr attr;
/* For network interface and miibus. */
struct ifnet *ifp;
uint32_t if_flags;
struct mtx lock;
@ -556,37 +480,25 @@ struct dpaa2_ni_softc {
struct ifmedia fixed_ifmedia;
int media_status;
/* DMA resources */
bus_dma_tag_t bp_dmat; /* for buffer pool */
bus_dma_tag_t tx_dmat; /* for Tx buffers */
bus_dma_tag_t st_dmat; /* for channel storage */
bus_dma_tag_t rxd_dmat; /* for Rx distribution key */
bus_dma_tag_t qos_dmat; /* for QoS table key */
bus_dma_tag_t sgt_dmat; /* for scatter/gather tables */
struct dpaa2_buf qos_kcfg; /* QoS table key config. */
struct dpaa2_buf rxd_kcfg; /* Rx distribution key config. */
struct dpaa2_buf qos_kcfg; /* QoS table key config */
struct dpaa2_buf rxd_kcfg; /* Rx distribution key config */
/* Channels and RxError frame queue */
uint32_t chan_n;
struct dpaa2_ni_channel *channels[DPAA2_NI_MAX_CHANNELS];
struct dpaa2_ni_fq rxe_queue; /* one per network interface */
struct dpaa2_channel *channels[DPAA2_MAX_CHANNELS];
struct dpaa2_ni_fq rxe_queue; /* one per DPNI */
/* Rx buffers for buffer pool. */
struct dpaa2_atomic buf_num;
struct dpaa2_atomic buf_free; /* for sysctl(9) only */
struct dpaa2_buf buf[DPAA2_NI_BUFS_MAX];
/* Interrupts */
int irq_rid[DPAA2_NI_MSI_COUNT];
struct resource *irq_res;
void *intr; /* interrupt handle */
void *intr;
/* Tasks */
struct taskqueue *bp_taskq;
struct task bp_task;
/* Callouts */
struct callout mii_callout;
struct {
@ -594,7 +506,7 @@ struct dpaa2_ni_softc {
uint8_t addr[ETHER_ADDR_LEN];
device_t phy_dev;
int phy_loc;
} mac; /* Info about connected DPMAC (if exists). */
} mac; /* Info about connected DPMAC (if exists) */
};
extern struct resource_spec dpaa2_ni_spec[];

View file

@ -78,7 +78,6 @@
#include <sys/time.h>
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/condvar.h>
#include <sys/lock.h>
#include <machine/bus.h>
@ -298,8 +297,9 @@ dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc,
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
/* Initialize the portal with an IRQ threshold and timeout of 0us. */
dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 0);
/* TODO: sysctl(9) for the IRQ timeout? */
/* Initialize the portal with an IRQ threshold and timeout of 120us. */
dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 120);
*swp = p;
@ -724,16 +724,13 @@ dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf,
uint16_t flags;
int i, error;
KASSERT(swp != NULL, ("%s: swp is NULL", __func__));
KASSERT(frames_n != 0u, ("%s: cannot pull zero frames", __func__));
KASSERT(frames_n <= 16u, ("%s: too much frames to pull", __func__));
KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage "
"buffer", __func__));
cmd.numf = frames_n - 1;
cmd.tok = DPAA2_SWP_VDQCR_TOKEN;
cmd.dq_src = chan_id;
cmd.rsp_addr = (uint64_t) buf->store.paddr;
cmd.rsp_addr = (uint64_t)buf->paddr;
/* Dequeue command type */
cmd.verb &= ~(1 << QB_VDQCR_VERB_DCT0_SHIFT);
@ -763,10 +760,10 @@ dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf,
}
/* Let's sync before reading VDQ response from QBMan. */
bus_dmamap_sync(buf->store.dmat, buf->store.dmap, BUS_DMASYNC_POSTREAD);
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
/* Read VDQ response from QBMan. */
msg = (struct dpaa2_dq *) buf->store.vaddr;
msg = (struct dpaa2_dq *)buf->vaddr;
for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
if ((msg->fdr.desc.stat & DPAA2_DQ_STAT_VOLATILE) &&
(msg->fdr.desc.tok == DPAA2_SWP_VDQCR_TOKEN)) {

View file

@ -33,6 +33,7 @@
#include <sys/bus.h>
#include "dpaa2_types.h"
#include "dpaa2_buf.h"
#include "dpaa2_bp.h"
/*
@ -318,16 +319,16 @@ CTASSERT(sizeof(struct dpaa2_fd) == DPAA2_FD_SIZE);
struct dpaa2_fa {
uint32_t magic;
struct dpaa2_buf *buf;
#ifdef __notyet__
union {
struct { /* Tx frame annotation */
struct dpaa2_ni_tx_ring *tx;
};
#ifdef __notyet__
struct { /* Rx frame annotation */
uint64_t _notused;
};
#endif
};
#endif
} __packed;
CTASSERT(sizeof(struct dpaa2_fa) <= DPAA2_FA_SIZE);

114
sys/dev/dpaa2/dpaa2_types.c Normal file
View file

@ -0,0 +1,114 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include "dpaa2_types.h"
#define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0)
/**
* @brief Convert DPAA2 device type to string.
*/
const char *
dpaa2_ttos(enum dpaa2_dev_type type)
{
switch (type) {
case DPAA2_DEV_MC:
return ("mc"); /* NOTE: to print as information only. */
case DPAA2_DEV_RC:
return ("dprc");
case DPAA2_DEV_IO:
return ("dpio");
case DPAA2_DEV_NI:
return ("dpni");
case DPAA2_DEV_MCP:
return ("dpmcp");
case DPAA2_DEV_BP:
return ("dpbp");
case DPAA2_DEV_CON:
return ("dpcon");
case DPAA2_DEV_MAC:
return ("dpmac");
case DPAA2_DEV_MUX:
return ("dpdmux");
case DPAA2_DEV_SW:
return ("dpsw");
default:
break;
}
return ("notype");
}
/**
* @brief Convert string to DPAA2 device type.
*/
enum dpaa2_dev_type
dpaa2_stot(const char *str)
{
if (COMPARE_TYPE(str, "dprc")) {
return (DPAA2_DEV_RC);
} else if (COMPARE_TYPE(str, "dpio")) {
return (DPAA2_DEV_IO);
} else if (COMPARE_TYPE(str, "dpni")) {
return (DPAA2_DEV_NI);
} else if (COMPARE_TYPE(str, "dpmcp")) {
return (DPAA2_DEV_MCP);
} else if (COMPARE_TYPE(str, "dpbp")) {
return (DPAA2_DEV_BP);
} else if (COMPARE_TYPE(str, "dpcon")) {
return (DPAA2_DEV_CON);
} else if (COMPARE_TYPE(str, "dpmac")) {
return (DPAA2_DEV_MAC);
} else if (COMPARE_TYPE(str, "dpdmux")) {
return (DPAA2_DEV_MUX);
} else if (COMPARE_TYPE(str, "dpsw")) {
return (DPAA2_DEV_SW);
}
return (DPAA2_DEV_NOTYPE);
}
/**
* @brief Callback to obtain a physical address of the only DMA segment mapped.
*/
void
dpaa2_dmamap_oneseg_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error == 0) {
KASSERT(nseg == 1, ("%s: too many segments: nseg=%d\n",
__func__, nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
} else {
panic("%s: error=%d\n", __func__, error);
}
}

View file

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2021-2022 Dmitry Salychev
* Copyright © 2021-2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -28,10 +28,18 @@
#ifndef _DPAA2_TYPES_H
#define _DPAA2_TYPES_H
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#define DPAA2_MAGIC ((uint32_t) 0xD4AA2C0Du)
#define DPAA2_MAX_CHANNELS 16 /* CPU cores */
#define DPAA2_MAX_TCS 8 /* Traffic classes */
/**
* @brief Types of the DPAA2 devices.
*/
@ -51,56 +59,67 @@ enum dpaa2_dev_type {
};
/**
* @brief Types of the DPAA2 buffers.
* @brief Types of the DPNI queues.
*/
enum dpaa2_buf_type {
DPAA2_BUF_RX = 75, /* Rx buffer */
DPAA2_BUF_TX, /* Tx buffer */
DPAA2_BUF_STORE /* Channel storage, key configuration */
};
/**
* @brief DMA-mapped buffer (for Rx/Tx buffers, channel storage, etc.).
*/
struct dpaa2_buf {
enum dpaa2_buf_type type;
union {
struct {
bus_dma_tag_t dmat; /* DMA tag for this buffer */
bus_dmamap_t dmap;
bus_addr_t paddr;
void *vaddr;
struct mbuf *m; /* associated mbuf */
} rx;
struct {
bus_dma_tag_t dmat; /* DMA tag for this buffer */
bus_dmamap_t dmap;
bus_addr_t paddr;
void *vaddr;
struct mbuf *m; /* associated mbuf */
uint64_t idx;
/* for scatter/gather table */
bus_dma_tag_t sgt_dmat;
bus_dmamap_t sgt_dmap;
bus_addr_t sgt_paddr;
void *sgt_vaddr;
} tx;
struct {
bus_dma_tag_t dmat; /* DMA tag for this buffer */
bus_dmamap_t dmap;
bus_addr_t paddr;
void *vaddr;
} store;
};
enum dpaa2_ni_queue_type {
DPAA2_NI_QUEUE_RX = 0,
DPAA2_NI_QUEUE_TX,
DPAA2_NI_QUEUE_TX_CONF,
DPAA2_NI_QUEUE_RX_ERR
};
struct dpaa2_atomic {
volatile int counter;
};
/**
* @brief Tx ring.
*
* fq: Parent (TxConf) frame queue.
* fqid: ID of the logical Tx queue.
* br: Ring buffer for mbufs to transmit.
* lock: Lock for the ring buffer.
*/
struct dpaa2_ni_tx_ring {
struct dpaa2_ni_fq *fq;
uint32_t fqid;
uint32_t txid; /* Tx ring index */
struct buf_ring *br;
struct mtx lock;
} __aligned(CACHE_LINE_SIZE);
/**
* @brief Frame Queue is the basic queuing structure used by the QMan.
*
* It comprises a list of frame descriptors (FDs), so it can be thought of
* as a queue of frames.
*
* NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued
* onto a work queue (WQ).
*
* fqid: Frame queue ID, can be used to enqueue/dequeue or execute other
* commands on the queue through DPIO.
* txq_n: Number of configured Tx queues.
* tx_fqid: Frame queue IDs of the Tx queues which belong to the same flowid.
* Note that Tx queues are logical queues and not all management
* commands are available on these queue types.
* qdbin: Queue destination bin. Can be used with the DPIO enqueue
* operation based on QDID, QDBIN and QPRI. Note that all Tx queues
* with the same flowid have the same destination bin.
*/
struct dpaa2_ni_fq {
struct dpaa2_channel *chan;
uint32_t fqid;
uint16_t flowid;
uint8_t tc;
enum dpaa2_ni_queue_type type;
/* Optional fields (for TxConf queue). */
struct dpaa2_ni_tx_ring tx_rings[DPAA2_MAX_TCS];
uint32_t tx_qdbin;
} __aligned(CACHE_LINE_SIZE);
/* Handy wrappers over atomic operations. */
#define DPAA2_ATOMIC_XCHG(a, val) \
(atomic_swap_int(&(a)->counter, (val)))
@ -109,8 +128,8 @@ struct dpaa2_atomic {
#define DPAA2_ATOMIC_ADD(a, val) \
(atomic_add_acq_int(&(a)->counter, (val)))
/* Convert DPAA2 type to/from string. */
const char *dpaa2_ttos(enum dpaa2_dev_type type);
enum dpaa2_dev_type dpaa2_stot(const char *str);
const char *dpaa2_ttos(enum dpaa2_dev_type);
enum dpaa2_dev_type dpaa2_stot(const char *);
void dpaa2_dmamap_oneseg_cb(void *, bus_dma_segment_t *, int, int);
#endif /* _DPAA2_TYPES_H */

View file

@ -11,6 +11,9 @@ SRCS+= dpaa2_mcp.c
SRCS+= dpaa2_swp.c
SRCS+= dpaa2_mac.c
SRCS+= dpaa2_con.c
SRCS+= dpaa2_buf.c
SRCS+= dpaa2_channel.c
SRCS+= dpaa2_types.c
SRCS+= dpaa2_cmd_if.c dpaa2_cmd_if.h
SRCS+= dpaa2_swp_if.c dpaa2_swp_if.h
@ -21,12 +24,15 @@ SRCS+= dpaa2_console.c
SRCS+= bus_if.h device_if.h miibus_if.h
SRCS+= pcib_if.h pci_if.h
SRCS+= opt_acpi.h opt_platform.h
SRCS+= opt_acpi.h
SRCS+= opt_platform.h
SRCS+= opt_rss.h
SRCS.DEV_ACPI= dpaa2_mc_acpi.c \
memac_mdio_acpi.c \
acpi_if.h \
acpi_bus_if.h
memac_mdio_acpi.c \
acpi_if.h \
acpi_bus_if.h
.if !empty(OPT_FDT)
SRCS+= dpaa2_mc_fdt.c \