Merge branch 'bnxt_en-2-xdp-bug-fixes'

Michael Chan says:

====================
bnxt_en: 2 XDP bug fixes

The first patch fixes XDP page pool logic on systems with page size >=
64K.  The second patch fixes the max_mtu setting when an XDP program
supporting multi buffers is attached.
====================

Link: https://lore.kernel.org/r/20230731142043.58855-1-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-08-01 15:05:00 -07:00
commit 4a4474e3bf
2 changed files with 39 additions and 26 deletions

View file

@ -699,17 +699,24 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
gfp_t gfp)
{
struct device *dev = &bp->pdev->dev;
struct page *page;
page = page_pool_dev_alloc_pages(rxr->page_pool);
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
BNXT_RX_PAGE_SIZE);
} else {
page = page_pool_dev_alloc_pages(rxr->page_pool);
*offset = 0;
}
if (!page)
return NULL;
*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@ -749,15 +756,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) {
unsigned int offset;
struct page *page =
__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
if (!page)
return -ENOMEM;
mapping += bp->rx_dma_offset;
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
} else {
u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
@ -817,7 +825,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
unsigned int offset = 0;
if (BNXT_RX_PAGE_MODE(bp)) {
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
if (!page)
return -ENOMEM;
@ -964,15 +972,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
skb = build_skb(page_address(page), PAGE_SIZE);
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_dma_offset);
skb_reserve(skb, bp->rx_offset);
__skb_put(skb, len);
return skb;
@ -998,8 +1006,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@ -1012,7 +1020,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
payload + NET_IP_ALIGN);
@ -1143,7 +1151,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
skb->data_len += total_frag_len;
skb->len += total_frag_len;
skb->truesize += PAGE_SIZE * agg_bufs;
skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
@ -2945,8 +2953,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
rx_buf->data = NULL;
if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
bp->rx_dir,
dma_unmap_page_attrs(&pdev->dev, mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
page_pool_recycle_direct(rxr->page_pool, data);
} else {
@ -3215,6 +3223,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.napi = &rxr->bnapi->napi;
pp.dev = &bp->pdev->dev;
pp.dma_dir = DMA_BIDIRECTIONAL;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
pp.flags |= PP_FLAG_PAGE_FRAG;
rxr->page_pool = page_pool_create(&pp);
if (IS_ERR(rxr->page_pool)) {
@ -3991,26 +4001,29 @@ void bnxt_set_ring_params(struct bnxt *bp)
*/
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
if (page_mode) {
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
if (bp->xdp_prog->aux->xdp_has_frags)
dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
else
dev->max_mtu =
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
bp->rx_skb_func = bnxt_rx_multi_page_skb;
bp->dev->max_mtu =
min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
} else {
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
bp->rx_skb_func = bnxt_rx_page_skb;
bp->dev->max_mtu =
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
}
bp->rx_dir = DMA_BIDIRECTIONAL;
/* Disable LRO or GRO_HW */
netdev_update_features(bp->dev);
netdev_update_features(dev);
} else {
bp->dev->max_mtu = bp->max_mtu;
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;

View file

@ -186,8 +186,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
u32 buflen = BNXT_RX_PAGE_SIZE;
struct bnxt_sw_rx_bd *rx_buf;
u32 buflen = PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
@ -303,7 +303,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
@ -486,7 +486,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
}
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
PAGE_SIZE * sinfo->nr_frags,
BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}