New bus_dma interfaces for use by crypto device drivers:

o bus_dmamap_load_mbuf
o bus_dmamap_load_uio

Test on i386.  Known to compile on alpha and sparc64, but not tested.
Otherwise untried.
This commit is contained in:
Sam Leffler 2002-10-04 20:40:39 +00:00
parent c4f9e3ae7f
commit 14c17bd293
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=104486
11 changed files with 1189 additions and 0 deletions

View file

@ -32,10 +32,14 @@
#include <sys/interrupt.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/bus.h>
#include <machine/sgmap.h>
@ -534,6 +538,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
static int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dma_segment_t segs[],
void *buf, bus_size_t buflen,
struct thread *td,
int flags,
vm_offset_t *lastaddrp,
int *segp,
int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
pmap = NULL;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
curaddr = pmap_kextract(vaddr);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr + alpha_XXX_dmamap_or;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr + alpha_XXX_dmamap_or;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error;
KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
("bus_dmamap_load_mbuf: No support for bounce pages!"));
KASSERT(m0->m_flags & M_PKTHDR,
("bus_dmamap_load_mbuf: no packet header"));
nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
error = _bus_dmamap_load_buffer(dmat,
dm_segments,
m->m_data, m->m_len,
NULL, flags, &lastaddr, &nsegs, first);
first = 0;
}
} else {
error = EINVAL;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, m0->m_pkthdr.len, error);
}
return (error);
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
vm_offset_t lastaddr;
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
struct thread *td = NULL;
KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
("bus_dmamap_load_uio: No support for bounce pages!"));
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
td = uio->uio_td;
KASSERT(td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
}
nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
error = _bus_dmamap_load_buffer(dmat,
dm_segments,
addr, minlen,
td, flags, &lastaddr, &nsegs, first);
first = 0;
resid -= minlen;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, uio->uio_resid, error);
}
return (error);
}
/*
* Release the mapping held by map.
*/

View file

@ -559,6 +559,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags);
/*
* Like bus_dmamap_callback but includes map size in bytes. This is
* defined as a separate interface to maintain compatiiblity for users
* of bus_dmamap_callback_t--at some point these interfaces should be merged.
*/
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
/*
* Like bus_dmamap_load but for mbufs. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Like bus_dmamap_load but for uios. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *ui,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Perform a syncronization operation on the given map.
*/

View file

@ -34,9 +34,12 @@
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/bus.h>
#include <machine/md_var.h>
@ -493,6 +496,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
static int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dma_segment_t segs[],
void *buf, bus_size_t buflen,
struct thread *td,
int flags,
vm_offset_t *lastaddrp,
int *segp,
int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
pmap = NULL;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
curaddr = pmap_kextract(vaddr);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error;
KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
("bus_dmamap_load_mbuf: No support for bounce pages!"));
KASSERT(m0->m_flags & M_PKTHDR,
("bus_dmamap_load_mbuf: no packet header"));
nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
error = _bus_dmamap_load_buffer(dmat,
dm_segments,
m->m_data, m->m_len,
NULL, flags, &lastaddr, &nsegs, first);
first = 0;
}
} else {
error = EINVAL;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, m0->m_pkthdr.len, error);
}
return (error);
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
vm_offset_t lastaddr;
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
struct thread *td = NULL;
KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
("bus_dmamap_load_uio: No support for bounce pages!"));
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
td = uio->uio_td;
KASSERT(td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
}
nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
error = _bus_dmamap_load_buffer(dmat,
dm_segments,
addr, minlen,
td, flags, &lastaddr, &nsegs, first);
first = 0;
resid -= minlen;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, uio->uio_resid, error);
}
return (error);
}
/*
* Release the mapping held by map.
*/

View file

@ -202,6 +202,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags);
/*
* Like bus_dmamap_callback but includes map size in bytes. This is
* defined as a separate interface to maintain compatiiblity for users
* of bus_dmamap_callback_t--at some point these interfaces should be merged.
*/
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
/*
* Like bus_dmamap_load but for mbufs. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Like bus_dmamap_load but for uios. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *ui,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Perform a syncronization operation on the given map.
*/

View file

@ -34,9 +34,12 @@
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/bus.h>
#include <machine/md_var.h>
@ -493,6 +496,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
static int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dma_segment_t segs[],
void *buf, bus_size_t buflen,
struct thread *td,
int flags,
vm_offset_t *lastaddrp,
int *segp,
int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
pmap = NULL;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
curaddr = pmap_kextract(vaddr);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error;
KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
("bus_dmamap_load_mbuf: No support for bounce pages!"));
KASSERT(m0->m_flags & M_PKTHDR,
("bus_dmamap_load_mbuf: no packet header"));
nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
error = _bus_dmamap_load_buffer(dmat,
dm_segments,
m->m_data, m->m_len,
NULL, flags, &lastaddr, &nsegs, first);
first = 0;
}
} else {
error = EINVAL;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, m0->m_pkthdr.len, error);
}
return (error);
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
vm_offset_t lastaddr;
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
struct thread *td = NULL;
KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
("bus_dmamap_load_uio: No support for bounce pages!"));
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
td = uio->uio_td;
KASSERT(td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
}
nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
error = _bus_dmamap_load_buffer(dmat,
dm_segments,
addr, minlen,
td, flags, &lastaddr, &nsegs, first);
first = 0;
resid -= minlen;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, uio->uio_resid, error);
}
return (error);
}
/*
* Release the mapping held by map.
*/

View file

@ -202,6 +202,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags);
/*
* Like bus_dmamap_callback but includes map size in bytes. This is
* defined as a separate interface to maintain compatiiblity for users
* of bus_dmamap_callback_t--at some point these interfaces should be merged.
*/
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
/*
* Like bus_dmamap_load but for mbufs. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Like bus_dmamap_load but for uios. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *ui,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Perform a syncronization operation on the given map.
*/

View file

@ -29,11 +29,15 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/proc.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/bus.h>
#include <machine/md_var.h>
@ -522,6 +526,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
static int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dma_segment_t segs[],
void *buf, bus_size_t buflen,
struct thread *td,
int flags,
vm_offset_t *lastaddrp,
int *segp,
int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
pmap = NULL;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
curaddr = pmap_kextract(vaddr);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error;
KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
("bus_dmamap_load_mbuf: No support for bounce pages!"));
KASSERT(m0->m_flags & M_PKTHDR,
("bus_dmamap_load_mbuf: no packet header"));
nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
error = _bus_dmamap_load_buffer(dmat,
dm_segments,
m->m_data, m->m_len,
NULL, flags, &lastaddr, &nsegs, first);
first = 0;
}
} else {
error = EINVAL;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, m0->m_pkthdr.len, error);
}
return (error);
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
vm_offset_t lastaddr;
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
struct thread *td = NULL;
KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
("bus_dmamap_load_uio: No support for bounce pages!"));
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
td = uio->uio_td;
KASSERT(td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
}
nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
error = _bus_dmamap_load_buffer(dmat,
dm_segments,
addr, minlen,
td, flags, &lastaddr, &nsegs, first);
first = 0;
resid -= minlen;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, uio->uio_resid, error);
}
return (error);
}
/*
* Release the mapping held by map.
*/

View file

@ -1217,6 +1217,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags);
/*
* Like bus_dmamap_callback but includes map size in bytes. This is
* defined as a separate interface to maintain compatiiblity for users
* of bus_dmamap_callback_t--at some point these interfaces should be merged.
*/
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
/*
* Like bus_dmamap_load but for mbufs. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Like bus_dmamap_load but for uios. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *ui,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Perform a syncronization operation on the given map.
*/

View file

@ -912,6 +912,13 @@ typedef struct bus_dma_segment bus_dma_segment_t;
*/
typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
/*
* Like bus_dmamap_callback but includes map size in bytes. This is
* defined as a separate interface to maintain compatiiblity for users
* of bus_dmamap_callback_t--at some point these interfaces should be merged.
*/
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
/*
* bus_dma_tag_t
*
@ -942,6 +949,10 @@ struct bus_dma_tag {
int (*dmamap_destroy)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
int (*dmamap_load)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
int (*dmamap_load_mbuf)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
struct mbuf *, bus_dmamap_callback2_t *, void *, int);
int (*dmamap_load_uio)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
struct uio *, bus_dmamap_callback2_t *, void *, int);
void (*dmamap_unload)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
void (*dmamap_sync)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dmasync_op_t);
@ -1010,6 +1021,32 @@ sparc64_dmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
#define bus_dmamap_load(t, m, p, s, cb, cba, f) \
sparc64_dmamap_load((t), (t), (m), (p), (s), (cb), (cba), (f))
static __inline int
sparc64_dmamap_load_mbuf(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
struct mbuf *mb, bus_dmamap_callback2_t *cb, void *cba, int f)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamap_load == NULL; lt = lt->parent)
;
return ((*lt->dmamap_load_mbuf)(lt, dt, m, mb, cb, cba, f));
}
#define bus_dmamap_load_mbuf(t, m, mb, cb, cba, f) \
sparc64_dmamap_load_mbuf((t), (t), (m), (mb), (cb), (cba), (f))
static __inline int
sparc64_dmamap_load_uio(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
struct uio *ui, bus_dmamap_callback2_t *cb, void *cba, int f)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamap_load == NULL; lt = lt->parent)
;
return ((*lt->dmamap_load_uio)(lt, dt, m, ui, cb, cba, f));
}
#define bus_dmamap_load_uio(t, m, ui, cb, cba, f) \
sparc64_dmamap_load_uio((t), (t), (m), (ui), (cb), (cba), (f))
static __inline void
sparc64_dmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p)
{

View file

@ -112,15 +112,18 @@
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_param.h>
#include <vm/vm_map.h>
#include <machine/asi.h>
#include <machine/bus.h>
@ -159,6 +162,10 @@ static int nexus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
static int nexus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
static int nexus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
struct mbuf *, bus_dmamap_callback2_t *, void *, int);
static int nexus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
struct uio *, bus_dmamap_callback2_t *, void *, int);
static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dmasync_op_t);
@ -211,6 +218,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->dmamap_create = NULL;
newtag->dmamap_destroy = NULL;
newtag->dmamap_load = NULL;
newtag->dmamap_load_mbuf = NULL;
newtag->dmamap_load_uio = NULL;
newtag->dmamap_unload = NULL;
newtag->dmamap_sync = NULL;
newtag->dmamem_alloc = NULL;
@ -367,6 +376,206 @@ nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
static int
_nexus_dmamap_load_buffer(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
bus_dma_segment_t segs[],
void *buf, bus_size_t buflen,
struct thread *td,
int flags,
vm_offset_t *lastaddrp,
int *segp,
int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
pmap = NULL;
lastaddr = *lastaddrp;
bmask = ~(ddmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
curaddr = pmap_kextract(vaddr);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (ddmat->boundary > 0) {
baddr = (curaddr + ddmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= ddmat->maxsegsz &&
(ddmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= ddmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
static int
nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
bus_dmamap_t map,
struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
#ifdef __GNUC__
bus_dma_segment_t dm_segments[ddmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error;
KASSERT(m0->m_flags & M_PKTHDR,
("nexus_dmamap_load_mbuf: no packet header"));
nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= ddmat->maxsize) {
int first = 1;
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
error = _nexus_dmamap_load_buffer(pdmat, ddmat,
dm_segments,
m->m_data, m->m_len,
NULL, flags, &lastaddr, &nsegs, first);
first = 0;
}
} else {
error = EINVAL;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, m0->m_pkthdr.len, error);
}
return (error);
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
static int
nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
bus_dmamap_t map,
struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
vm_offset_t lastaddr;
#ifdef __GNUC__
bus_dma_segment_t dm_segments[ddmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
struct thread *td = NULL;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
td = uio->uio_td;
KASSERT(td != NULL,
("nexus_dmamap_load_uio: USERSPACE but no proc"));
}
nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
error = _nexus_dmamap_load_buffer(pdmat, ddmat,
dm_segments,
addr, minlen,
td, flags, &lastaddr, &nsegs, first);
first = 0;
resid -= minlen;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
} else {
(*callback)(callback_arg, dm_segments,
nsegs+1, uio->uio_resid, error);
}
return (error);
}
/*
* Common function for unloading a DMA map. May be called by
* bus-specific DMA map unload functions.
@ -506,6 +715,8 @@ struct bus_dma_tag nexus_dmatag = {
nexus_dmamap_create,
nexus_dmamap_destroy,
nexus_dmamap_load,
nexus_dmamap_load_mbuf,
nexus_dmamap_load_uio,
nexus_dmamap_unload,
nexus_dmamap_sync,

View file

@ -202,6 +202,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags);
/*
* Like bus_dmamap_callback but includes map size in bytes. This is
* defined as a separate interface to maintain compatiiblity for users
* of bus_dmamap_callback_t--at some point these interfaces should be merged.
*/
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
/*
* Like bus_dmamap_load but for mbufs. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Like bus_dmamap_load but for uios. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *ui,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Perform a syncronization operation on the given map.
*/