(This commit only touches code within the DEV_NETMAP blocks)

Introduce some functions to map NIC ring indexes into netmap ring
indexes and vice versa. This way we can implement the bound
checks only in one place (and hopefully in a correct way).

On passing, make the code and comments more uniform across the
various drivers.
This commit is contained in:
Luigi Rizzo 2012-02-15 23:13:29 +00:00
parent a7d022e777
commit 5644ccec61
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=231796
10 changed files with 97 additions and 107 deletions

View file

@ -3296,12 +3296,10 @@ em_setup_transmit_ring(struct tx_ring *txr)
}
#ifdef DEV_NETMAP
if (slot) {
int si = i + na->tx_rings[txr->me].nkr_hwofs;
int si = netmap_tidx_n2k(na, txr->me, i);
uint64_t paddr;
void *addr;
if (si >= na->num_tx_desc)
si -= na->num_tx_desc;
addr = PNMB(slot + si, &paddr);
txr->tx_base[i].buffer_addr = htole64(paddr);
/* reload the map for netmap mode */
@ -4053,13 +4051,10 @@ em_setup_receive_ring(struct rx_ring *rxr)
rxbuf = &rxr->rx_buffers[j];
#ifdef DEV_NETMAP
if (slot) {
/* slot si is mapped to the j-th NIC-ring entry */
int si = j + na->rx_rings[0].nkr_hwofs;
int si = netmap_ridx_n2k(na, rxr->me, j);
uint64_t paddr;
void *addr;
if (si > na->num_rx_desc)
si -= na->num_rx_desc;
addr = PNMB(slot + si, &paddr);
netmap_load_map(rxr->rxtag, rxbuf->map, addr);
/* Update descriptor */

View file

@ -3315,11 +3315,8 @@ igb_setup_transmit_ring(struct tx_ring *txr)
}
#ifdef DEV_NETMAP
if (slot) {
/* slot si is mapped to the i-th NIC-ring entry */
int si = i + na->tx_rings[txr->me].nkr_hwofs;
if (si < 0)
si += na->num_tx_desc;
int si = netmap_tidx_n2k(na, txr->me, i);
/* no need to set the address */
netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
}
#endif /* DEV_NETMAP */
@ -4060,12 +4057,10 @@ igb_setup_receive_ring(struct rx_ring *rxr)
#ifdef DEV_NETMAP
if (slot) {
/* slot sj is mapped to the i-th NIC-ring entry */
int sj = j + na->rx_rings[rxr->me].nkr_hwofs;
int sj = netmap_ridx_n2k(na, rxr->me, j);
uint64_t paddr;
void *addr;
if (sj < 0)
sj += na->num_rx_desc;
addr = PNMB(slot + sj, &paddr);
netmap_load_map(rxr->ptag, rxbuf->pmap, addr);
/* Update descriptor */

View file

@ -2668,13 +2668,11 @@ lem_setup_transmit_structures(struct adapter *adapter)
tx_buffer->m_head = NULL;
#ifdef DEV_NETMAP
if (slot) {
/* slot si is mapped to the i-th NIC-ring entry */
int si = i + na->tx_rings[0].nkr_hwofs;
/* the i-th NIC entry goes to slot si */
int si = netmap_tidx_n2k(na, 0, i);
uint64_t paddr;
void *addr;
if (si > na->num_tx_desc)
si -= na->num_tx_desc;
addr = PNMB(slot + si, &paddr);
adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
/* reload the map for netmap mode */
@ -3244,13 +3242,11 @@ lem_setup_receive_structures(struct adapter *adapter)
for (i = 0; i < adapter->num_rx_desc; i++) {
#ifdef DEV_NETMAP
if (slot) {
/* slot si is mapped to the i-th NIC-ring entry */
int si = i + na->rx_rings[0].nkr_hwofs;
/* the i-th NIC entry goes to slot si */
int si = netmap_ridx_n2k(na, 0, i);
uint64_t paddr;
void *addr;
if (si > na->num_rx_desc)
si -= na->num_rx_desc;
addr = PNMB(slot + si, &paddr);
netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
/* Update descriptor */

View file

@ -2969,14 +2969,11 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
* Slots in the netmap ring (indexed by "si") are
* kring->nkr_hwofs positions "ahead" wrt the
* corresponding slot in the NIC ring. In some drivers
* (not here) nkr_hwofs can be negative. When computing
* si = i + kring->nkr_hwofs make sure to handle wraparounds.
* (not here) nkr_hwofs can be negative. Function
* netmap_tidx_n2k() handles wraparounds properly.
*/
if (slot) {
int si = i + na->tx_rings[txr->me].nkr_hwofs;
if (si >= na->num_tx_desc)
si -= na->num_tx_desc;
int si = netmap_tidx_n2k(na, txr->me, i);
netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
}
#endif /* DEV_NETMAP */
@ -3925,12 +3922,10 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
* an mbuf, so end the block with a continue;
*/
if (slot) {
int sj = j + na->rx_rings[rxr->me].nkr_hwofs;
int sj = netmap_ridx_n2k(na, rxr->me, j);
uint64_t paddr;
void *addr;
if (sj >= na->num_rx_desc)
sj -= na->num_rx_desc;
addr = PNMB(slot + sj, &paddr);
netmap_load_map(rxr->ptag, rxbuf->pmap, addr);
/* Update descriptor */

View file

@ -27,7 +27,7 @@
* $FreeBSD$
* $Id: if_em_netmap.h 9802 2011-12-02 18:42:37Z luigi $
*
* netmap support for if_em.
* netmap support for if_em.c
*
* For structure and details on the individual functions please see
* ixgbe_netmap.h
@ -66,9 +66,6 @@ em_netmap_attach(struct adapter *adapter)
}
/*
* wrapper to export locks to the generic code
*/
static void
em_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
{
@ -214,9 +211,7 @@ em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
*/
j = kring->nr_hwcur;
if (j != k) { /* we have packets to send */
l = j - kring->nkr_hwofs;
if (l < 0)
l += lim + 1;
l = netmap_tidx_k2n(na, ring_nr, j);
while (j != k) {
struct netmap_slot *slot = &ring->slot[j];
struct e1000_tx_desc *curr = &txr->tx_base[l];
@ -322,12 +317,7 @@ em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
* j == (l + kring->nkr_hwofs) % ring_size
*/
l = rxr->next_to_check;
j = l + kring->nkr_hwofs;
/* XXX here nkr_hwofs can be negative so must check for j < 0 */
if (j < 0)
j += lim + 1;
else if (j > lim)
j -= lim + 1;
j = netmap_ridx_n2k(na, ring_nr, l);
for (n = 0; ; n++) {
struct e1000_rx_desc *curr = &rxr->rx_base[l];
@ -347,15 +337,10 @@ em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
}
/* skip past packets that userspace has already processed */
j = kring->nr_hwcur;
j = kring->nr_hwcur; /* netmap ring index */
if (j != k) { /* userspace has read some packets. */
n = 0;
l = j - kring->nkr_hwofs; /* NIC ring index */
/* here nkr_hwofs can be negative so check for l > lim */
if (l < 0)
l += lim + 1;
else if (l > lim)
l -= lim + 1;
l = netmap_ridx_k2n(na, ring_nr, j); /* NIC ring index */
while (j != k) {
struct netmap_slot *slot = &ring->slot[j];
struct e1000_rx_desc *curr = &rxr->rx_base[l];

View file

@ -169,9 +169,7 @@ igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
u32 olinfo_status =
(adapter->hw.mac.type == e1000_82575) ? (txr->me << 4) : 0;
l = j - kring->nkr_hwofs;
if (l < 0)
l += lim + 1;
l = netmap_tidx_k2n(na, ring_nr, j);
while (j != k) {
struct netmap_slot *slot = &ring->slot[j];
union e1000_adv_tx_desc *curr =
@ -287,9 +285,7 @@ igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
* j == (l + kring->nkr_hwofs) % ring_size
*/
l = rxr->next_to_check;
j = l + kring->nkr_hwofs;
if (j > lim)
j -= lim + 1;
j = netmap_ridx_n2k(na, ring_nr, l);
for (n = 0; ; n++) {
union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
uint32_t staterr = le32toh(curr->wb.upper.status_error);
@ -311,9 +307,7 @@ igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
j = kring->nr_hwcur;
if (j != k) { /* userspace has read some packets. */
n = 0;
l = j - kring->nkr_hwofs;
if (l < 0)
l += lim + 1;
l = netmap_ridx_k2n(na, ring_nr, j);
while (j != k) {
struct netmap_slot *slot = ring->slot + j;
union e1000_adv_rx_desc *curr = &rxr->rx_base[l];

View file

@ -45,8 +45,6 @@ static int lem_netmap_rxsync(struct ifnet *, u_int, int);
static void lem_netmap_lock_wrapper(struct ifnet *, int, u_int);
SYSCTL_NODE(_dev, OID_AUTO, lem, CTLFLAG_RW, 0, "lem card");
static void
lem_netmap_attach(struct adapter *adapter)
{
@ -153,7 +151,7 @@ lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = ifp->if_softc;
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->tx_rings[0];
struct netmap_kring *kring = &na->tx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
@ -176,9 +174,7 @@ lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
*/
j = kring->nr_hwcur;
if (j != k) { /* we have packets to send */
l = j - kring->nkr_hwofs;
if (l < 0)
l += lim + 1;
l = netmap_tidx_k2n(na, ring_nr, j);
while (j != k) {
struct netmap_slot *slot = &ring->slot[j];
struct e1000_tx_desc *curr = &adapter->tx_desc_base[l];
@ -260,7 +256,7 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = ifp->if_softc;
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->rx_rings[0];
struct netmap_kring *kring = &na->rx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
int j, k, l, n, lim = kring->nkr_num_slots - 1;
@ -283,9 +279,7 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
* j == (l + kring->nkr_hwofs) % ring_size
*/
l = adapter->next_rx_desc_to_check;
j = l + kring->nkr_hwofs;
if (j > lim)
j -= lim + 1;
j = netmap_ridx_n2k(na, ring_nr, l);
for (n = 0; ; n++) {
struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
int len;
@ -310,12 +304,10 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
}
/* skip past packets that userspace has already processed */
j = kring->nr_hwcur; /* netmap ring index */
j = kring->nr_hwcur; /* netmap ring index */
if (j != k) { /* userspace has read some packets. */
n = 0;
l = j - kring->nkr_hwofs; /* NIC ring index */
if (l < 0)
l += lim + 1;
l = netmap_ridx_k2n(na, ring_nr, j); /* NIC ring index */
while (j != k) {
struct netmap_slot *slot = &ring->slot[j];
struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
@ -332,7 +324,7 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
curr->status = 0;
if (slot->flags & NS_BUF_CHANGED) {
curr->buffer_addr = htole64(paddr);
/* buffer has changed, and reload map */
/* buffer has changed, reload map */
netmap_reload_map(adapter->rxtag, rxbuf->map, addr);
slot->flags &= ~NS_BUF_CHANGED;
}

View file

@ -263,7 +263,7 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
* is to limit the amount of data reported up to 'lim'
*/
l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
j = l + kring->nkr_hwofs;
j = netmap_ridx_n2k(na, ring_nr, l); /* the kring index */
for (n = kring->nr_hwavail; n < lim ; n++) {
struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l];
uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
@ -296,9 +296,7 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
j = kring->nr_hwcur;
if (j != k) { /* userspace has read some packets. */
n = 0;
l = kring->nr_hwcur - kring->nkr_hwofs;
if (l < 0)
l += lim + 1;
l = netmap_ridx_k2n(na, ring_nr, j); /* the NIC index */
while (j != k) {
struct netmap_slot *slot = ring->slot + j;
struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l];
@ -370,11 +368,7 @@ re_netmap_tx_init(struct rl_softc *sc)
for (i = 0; i < n; i++) {
void *addr;
uint64_t paddr;
struct netmap_kring *kring = &na->tx_rings[0];
int l = i + kring->nkr_hwofs;
if (l >= n)
l -= n;
int l = netmap_tidx_n2k(na, 0, i);
addr = PNMB(slot + l, &paddr);
desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
@ -391,19 +385,21 @@ re_netmap_rx_init(struct rl_softc *sc)
struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
uint32_t cmdstat;
int i, n;
int i, n, max_avail;
if (!slot)
return;
n = sc->rl_ldata.rl_rx_desc_cnt;
/*
* Userspace owned hwavail packets before the reset,
* so the NIC that last hwavail descriptors of the ring
* are still owned by the driver (and keep one empty).
*/
max_avail = n - 1 - na->rx_rings[0].nr_hwavail;
for (i = 0; i < n; i++) {
void *addr;
uint64_t paddr;
struct netmap_kring *kring = &na->rx_rings[0];
int l = i + kring->nkr_hwofs;
if (l >= n)
l -= n;
int l = netmap_ridx_n2k(na, 0, i);
addr = PNMB(slot + l, &paddr);
@ -414,14 +410,9 @@ re_netmap_rx_init(struct rl_softc *sc)
desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
cmdstat = na->buff_size;
if (i == n - 1)
if (i == n - 1) /* mark the end of ring */
cmdstat |= RL_RDESC_CMD_EOR;
/*
* userspace knows that hwavail packets were ready before the
* reset, so we need to tell the NIC that last hwavail
* descriptors of the ring are still owned by the driver.
*/
if (i < n - 1 - kring->nr_hwavail) // XXX + 1 ?
if (i < max_avail)
cmdstat |= RL_RDESC_CMD_OWN;
desc[i].rl_cmdstat = htole32(cmdstat);
}

View file

@ -210,6 +210,7 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
IXGBE_TX_LOCK(txr);
/* take a copy of ring->cur now, and never read it again */
k = ring->cur;
/* do a sanity check on cur - hwcur XXX verify */
l = k - kring->nr_hwcur;
if (l < 0)
l += lim + 1;
@ -240,9 +241,7 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
*/
j = kring->nr_hwcur;
if (j != k) { /* we have new packets to send */
l = j - kring->nkr_hwofs;
if (l < 0) /* wraparound */
l += lim + 1;
l = netmap_tidx_k2n(na, ring_nr, j); /* NIC index */
while (j != k) {
/*
@ -459,9 +458,7 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
* rxr->next_to_check is set to 0 on a ring reinit
*/
l = rxr->next_to_check;
j = rxr->next_to_check + kring->nkr_hwofs;
if (j > lim)
j -= lim + 1;
j = netmap_ridx_n2k(na, ring_nr, l);
if (netmap_no_pendintr || force_update) {
for (n = 0; ; n++) {
@ -493,9 +490,7 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
j = kring->nr_hwcur;
if (j != k) { /* userspace has read some packets. */
n = 0;
l = kring->nr_hwcur - kring->nkr_hwofs;
if (l < 0)
l += lim + 1;
l = netmap_ridx_k2n(na, ring_nr, j);
while (j != k) {
/* collect per-slot info, with similar validations
* and flag handling as in the txsync code.

View file

@ -250,6 +250,58 @@ netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
}
}
/*
* functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
*/
static inline int
netmap_ridx_n2k(struct netmap_adapter *na, int ring, int nic_idx)
{
int kring_idx = nic_idx + na->rx_rings[ring].nkr_hwofs;
if (kring_idx < 0)
return kring_idx + na->num_rx_desc;
else if (kring_idx < na->num_rx_desc)
return kring_idx;
else
return kring_idx - na->num_rx_desc;
}
static inline int
netmap_tidx_n2k(struct netmap_adapter *na, int ring, int nic_idx)
{
int kring_idx = nic_idx + na->tx_rings[ring].nkr_hwofs;
if (kring_idx < 0)
return kring_idx + na->num_tx_desc;
else if (kring_idx < na->num_tx_desc)
return kring_idx;
else
return kring_idx - na->num_tx_desc;
}
static inline int
netmap_ridx_k2n(struct netmap_adapter *na, int ring, int kring_idx)
{
int nic_idx = kring_idx - na->rx_rings[ring].nkr_hwofs;
if (nic_idx < 0)
return nic_idx + na->num_rx_desc;
else if (nic_idx < na->num_rx_desc)
return nic_idx;
else
return nic_idx - na->num_rx_desc;
}
static inline int
netmap_tidx_k2n(struct netmap_adapter *na, int ring, int kring_idx)
{
int nic_idx = kring_idx - na->tx_rings[ring].nkr_hwofs;
if (nic_idx < 0)
return nic_idx + na->num_tx_desc;
else if (nic_idx < na->num_tx_desc)
return nic_idx;
else
return nic_idx - na->num_tx_desc;
}
/*
* NMB return the virtual address of a buffer (buffer 0 on bad index)