busdma: deduplicate _bus_dmamap_addseg() function

It is functionally identical in all implementations, so move the
function to subr_busdma_bounce.c. The KASSERT present in the x86 version
is now enabled for all architectures. It should be universally
applicable.

Reviewed by:	jhb
MFC after:	1 month
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D45047
This commit is contained in:
Mitchell Horne 2024-02-14 13:01:15 -04:00
parent b24e353f9e
commit 5604069824
6 changed files with 68 additions and 212 deletions

View file

@ -168,11 +168,15 @@ static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */
MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
#define dmat_alignment(dmat) ((dmat)->alignment)
#define dmat_bounce_flags(dmat) (0)
#define dmat_boundary(dmat) ((dmat)->boundary)
#define dmat_flags(dmat) ((dmat)->flags)
#define dmat_highaddr(dmat) ((dmat)->highaddr)
#define dmat_lowaddr(dmat) ((dmat)->lowaddr)
#define dmat_lockfunc(dmat) ((dmat)->lockfunc)
#define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg)
#define dmat_maxsegsz(dmat) ((dmat)->maxsegsz)
#define dmat_nsegments(dmat) ((dmat)->nsegments)
#include "../../kern/subr_busdma_bounce.c"
@ -842,47 +846,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map,
}
}
/*
* Add a single contiguous physical range to the segment list.
*/
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
int seg;
/*
* Make sure we don't cross any boundaries.
*/
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
vm_addr_bound_ok(segs[seg].ds_addr,
segs[seg].ds_len + sgsize, dmat->boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.

View file

@ -121,12 +121,16 @@ static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
#define dmat_alignment(dmat) ((dmat)->common.alignment)
#define dmat_bounce_flags(dmat) ((dmat)->bounce_flags)
#define dmat_boundary(dmat) ((dmat)->common.boundary)
#define dmat_domain(dmat) ((dmat)->common.domain)
#define dmat_flags(dmat) ((dmat)->common.flags)
#define dmat_highaddr(dmat) ((dmat)->common.highaddr)
#define dmat_lowaddr(dmat) ((dmat)->common.lowaddr)
#define dmat_lockfunc(dmat) ((dmat)->common.lockfunc)
#define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg)
#define dmat_maxsegsz(dmat) ((dmat)->common.maxsegsz)
#define dmat_nsegments(dmat) ((dmat)->common.nsegments)
#include "../../kern/subr_busdma_bounce.c"
@ -711,47 +715,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
}
}
/*
* Add a single contiguous physical range to the segment list.
*/
static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
int seg;
/*
* Make sure we don't cross any boundaries.
*/
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
vm_addr_bound_ok(segs[seg].ds_addr,
segs[seg].ds_len + sgsize, dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.

View file

@ -451,6 +451,54 @@ free_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
wakeup(&bounce_map_callbacklist);
}
/*
* Add a single contiguous physical range to the segment list.
*/
static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
int seg;
KASSERT(curaddr <= BUS_SPACE_MAXADDR,
("ds_addr %#jx > BUS_SPACE_MAXADDR %#jx; dmat %p fl %#x low %#jx "
"hi %#jx",
(uintmax_t)curaddr, (uintmax_t)BUS_SPACE_MAXADDR,
dmat, dmat_bounce_flags(dmat), (uintmax_t)dmat_lowaddr(dmat),
(uintmax_t)dmat_highaddr(dmat)));
/*
* Make sure we don't cross any boundaries.
*/
if (!vm_addr_bound_ok(curaddr, sgsize, dmat_boundary(dmat)))
sgsize = roundup2(curaddr, dmat_boundary(dmat)) - curaddr;
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat_maxsegsz(dmat) &&
vm_addr_bound_ok(segs[seg].ds_addr,
segs[seg].ds_len + sgsize, dmat_boundary(dmat)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat_nsegments(dmat))
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
static void
busdma_thread(void *dummy __unused)
{

View file

@ -100,11 +100,15 @@ struct bus_dmamap {
static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
#define dmat_alignment(dmat) ((dmat)->alignment)
#define dmat_bounce_flags(dmat) (0)
#define dmat_boundary(dmat) ((dmat)->boundary)
#define dmat_flags(dmat) ((dmat)->flags)
#define dmat_highaddr(dmat) ((dmat)->highaddr)
#define dmat_lowaddr(dmat) ((dmat)->lowaddr)
#define dmat_lockfunc(dmat) ((dmat)->lockfunc)
#define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg)
#define dmat_maxsegsz(dmat) ((dmat)->maxsegsz)
#define dmat_nsegments(dmat) ((dmat)->nsegments)
#include "../../kern/subr_busdma_bounce.c"
@ -535,47 +539,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
}
}
/*
* Add a single contiguous physical range to the segment list.
*/
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
int seg;
/*
* Make sure we don't cross any boundaries.
*/
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
vm_addr_bound_ok(segs[seg].ds_addr,
segs[seg].ds_len + sgsize, dmat->boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.

View file

@ -111,11 +111,15 @@ static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
#define dmat_alignment(dmat) ((dmat)->common.alignment)
#define dmat_bounce_flags(dmat) ((dmat)->bounce_flags)
#define dmat_boundary(dmat) ((dmat)->common.boundary)
#define dmat_flags(dmat) ((dmat)->common.flags)
#define dmat_highaddr(dmat) ((dmat)->common.highaddr)
#define dmat_lowaddr(dmat) ((dmat)->common.lowaddr)
#define dmat_lockfunc(dmat) ((dmat)->common.lockfunc)
#define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg)
#define dmat_maxsegsz(dmat) ((dmat)->common.maxsegsz)
#define dmat_nsegments(dmat) ((dmat)->common.nsegments)
#include "../../kern/subr_busdma_bounce.c"
@ -547,47 +551,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
}
}
/*
* Add a single contiguous physical range to the segment list.
*/
static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
int seg;
/*
* Make sure we don't cross any boundaries.
*/
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
vm_addr_bound_ok(segs[seg].ds_addr,
segs[seg].ds_len + sgsize, dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.

View file

@ -108,12 +108,16 @@ static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
#define dmat_alignment(dmat) ((dmat)->common.alignment)
#define dmat_bounce_flags(dmat) ((dmat)->bounce_flags)
#define dmat_boundary(dmat) ((dmat)->common.boundary)
#define dmat_domain(dmat) ((dmat)->common.domain)
#define dmat_flags(dmat) ((dmat)->common.flags)
#define dmat_highaddr(dmat) ((dmat)->common.highaddr)
#define dmat_lowaddr(dmat) ((dmat)->common.lowaddr)
#define dmat_lockfunc(dmat) ((dmat)->common.lockfunc)
#define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg)
#define dmat_maxsegsz(dmat) ((dmat)->common.maxsegsz)
#define dmat_nsegments(dmat) ((dmat)->common.nsegments)
#include "../../kern/subr_busdma_bounce.c"
@ -622,54 +626,6 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
}
}
/*
* Add a single contiguous physical range to the segment list.
*/
static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
int seg;
KASSERT(curaddr <= BUS_SPACE_MAXADDR,
("ds_addr %#jx > BUS_SPACE_MAXADDR %#jx; dmat %p fl %#x low %#jx "
"hi %#jx",
(uintmax_t)curaddr, (uintmax_t)BUS_SPACE_MAXADDR,
dmat, dmat->bounce_flags, (uintmax_t)dmat->common.lowaddr,
(uintmax_t)dmat->common.highaddr));
/*
* Make sure we don't cross any boundaries.
*/
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
vm_addr_bound_ok(segs[seg].ds_addr,
segs[seg].ds_len + sgsize, dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.