Revamp the busdma implementation a bit:

- change the IOMMU support code so that it supports overcommittting the
  available DVMA memory, while still allocating as lazily as possible.
  This is achieved by limiting the preallocation, and deferring the
  allocation to map load time when it fails. In the latter case, the
  DVMA memory reserved for unloaded maps can be stolen to free up enough
  memory for loading a map.
- allow NULL settings in the method tables, and search the parent tags
  until an appropriate implementation is found. This allows to remove some
  kluges in the old implementation.
This commit is contained in:
Thomas Moestl 2002-03-24 02:50:53 +00:00
parent a52a303f5e
commit e8e2c56650
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=93070
7 changed files with 450 additions and 223 deletions

View file

@ -827,19 +827,22 @@ struct bus_dma_tag {
/*
* DMA mapping methods.
*/
int (*dmamap_create)(bus_dma_tag_t, int, bus_dmamap_t *);
int (*dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
int (*dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
bus_size_t, bus_dmamap_callback_t *, void *, int);
void (*dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
void (*dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
int (*dmamap_create)(bus_dma_tag_t, bus_dma_tag_t, int,
bus_dmamap_t *);
int (*dmamap_destroy)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
int (*dmamap_load)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
void (*dmamap_unload)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
void (*dmamap_sync)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dmasync_op_t);
/*
* DMA memory utility functions.
*/
int (*dmamem_alloc)(bus_dma_tag_t, void **, int, bus_dmamap_t *);
void (*dmamem_free)(bus_dma_tag_t, void *, bus_dmamap_t);
int (*dmamem_alloc)(bus_dma_tag_t, bus_dma_tag_t, void **, int,
bus_dmamap_t *);
void (*dmamem_free)(bus_dma_tag_t, bus_dma_tag_t, void *,
bus_dmamap_t);
};
/*
@ -859,29 +862,93 @@ int bus_dma_tag_destroy(bus_dma_tag_t);
int sparc64_dmamem_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp);
void sparc64_dmamem_free_map(bus_dma_tag_t dmat, bus_dmamap_t map);
static __inline int
sparc64_dmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, int f,
bus_dmamap_t *p)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamap_create == NULL; lt = lt->parent)
;
return ((*lt->dmamap_create)(lt, dt, f, p));
}
#define bus_dmamap_create(t, f, p) \
(*(t)->dmamap_create)((t), (f), (p))
sparc64_dmamap_create((t), (t), (f), (p))
static __inline int
sparc64_dmamap_destroy(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamap_destroy == NULL; lt = lt->parent)
;
return ((*lt->dmamap_destroy)(lt, dt, p));
}
#define bus_dmamap_destroy(t, p) \
(*(t)->dmamap_destroy)((t), (p))
sparc64_dmamap_destroy((t), (t), (p))
static __inline int
sparc64_dmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
void *p, bus_size_t s, bus_dmamap_callback_t *cb, void *cba, int f)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamap_load == NULL; lt = lt->parent)
;
return ((*lt->dmamap_load)(lt, dt, m, p, s, cb, cba, f));
}
#define bus_dmamap_load(t, m, p, s, cb, cba, f) \
(*(t)->dmamap_load)((t), (m), (p), (s), (cb), (cba), (f))
sparc64_dmamap_load((t), (t), (m), (p), (s), (cb), (cba), (f))
static __inline void
sparc64_dmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamap_unload == NULL; lt = lt->parent)
;
(*lt->dmamap_unload)(lt, dt, p);
}
#define bus_dmamap_unload(t, p) \
(*(t)->dmamap_unload)((t), (p))
sparc64_dmamap_unload((t), (t), (p))
static __inline void
sparc64_dmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
bus_dmasync_op_t op)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamap_sync == NULL; lt = lt->parent)
;
(*lt->dmamap_sync)(lt, dt, m, op);
}
#define bus_dmamap_sync(t, m, op) \
(void)((t)->dmamap_sync ? \
(*(t)->dmamap_sync)((t), (m), (op)) : (void)0)
sparc64_dmamap_sync((t), (t), (m), (op))
static __inline int
sparc64_dmamem_alloc(bus_dma_tag_t pt, bus_dma_tag_t dt, void **v, int f,
bus_dmamap_t *m)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamem_alloc == NULL; lt = lt->parent)
;
return ((*lt->dmamem_alloc)(lt, dt, v, f, m));
}
#define bus_dmamem_alloc(t, v, f, m) \
(*(t)->dmamem_alloc)((t), (v), (f), (m))
#define bus_dmamem_free(t, v, m) \
(*(t)->dmamem_free)((t), (v), (m))
sparc64_dmamem_alloc((t), (t), (v), (f), (m))
struct bus_dmamap {
bus_dma_tag_t dmat;
void *buf; /* unmapped buffer pointer */
bus_size_t buflen; /* unmapped buffer length */
bus_addr_t start; /* start of mapped region */
struct resource *res; /* associated resource */
};
static __inline void
sparc64_dmamem_free(bus_dma_tag_t pt, bus_dma_tag_t dt, void *v,
bus_dmamap_t m)
{
bus_dma_tag_t lt;
for (lt = pt; lt->dmamem_free == NULL; lt = lt->parent)
;
(*lt->dmamem_free)(lt, dt, v, m);
}
#define bus_dmamem_free(t, v, m) \
sparc64_dmamem_free((t), (t), (v), (m))
#endif /* !_MACHINE_BUS_H_ */

View file

@ -0,0 +1,47 @@
/*-
* Copyright (c) 1997, 1998 Justin T. Gibbs.
* Copyright (c) 2002 by Thomas Moestl <tmm@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.25 2002/01/05
*
* $FreeBSD$
*/
#ifndef _MACHINE_BUS_PRIVATE_H_
#define _MACHINE_BUS_PRIVATE_H_
#include <sys/queue.h>
struct bus_dmamap {
bus_dma_tag_t dmat;
void *buf; /* unmapped buffer pointer */
bus_size_t buflen; /* unmapped buffer length */
bus_addr_t start; /* start of mapped region */
struct resource *res; /* associated resource */
bus_size_t dvmaresv; /* reseved DVMA memory */
STAILQ_ENTRY(bus_dmamap) maplruq;
int onq;
};
#endif /* !_MACHINE_BUS_PRIVATE_H_ */

View file

@ -82,19 +82,19 @@ void iommu_enter(struct iommu_state *, vm_offset_t, vm_offset_t, int);
void iommu_remove(struct iommu_state *, vm_offset_t, size_t);
void iommu_decode_fault(struct iommu_state *, vm_offset_t);
int iommu_dvmamap_create(bus_dma_tag_t, struct iommu_state *, int,
bus_dmamap_t *);
int iommu_dvmamap_destroy(bus_dma_tag_t, struct iommu_state *,
int iommu_dvmamap_create(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
int, bus_dmamap_t *);
int iommu_dvmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
bus_dmamap_t);
int iommu_dvmamap_load(bus_dma_tag_t, struct iommu_state *, bus_dmamap_t,
void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
void iommu_dvmamap_unload(bus_dma_tag_t, struct iommu_state *,
bus_dmamap_t);
void iommu_dvmamap_sync(bus_dma_tag_t, struct iommu_state *, bus_dmamap_t,
bus_dmasync_op_t);
int iommu_dvmamem_alloc(bus_dma_tag_t, struct iommu_state *, void **, int,
bus_dmamap_t *);
void iommu_dvmamem_free(bus_dma_tag_t, struct iommu_state *, void *,
int iommu_dvmamap_load(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
bus_dmamap_t, void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
void iommu_dvmamap_unload(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
bus_dmamap_t);
void iommu_dvmamap_sync(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
bus_dmamap_t, bus_dmasync_op_t);
int iommu_dvmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
void **, int, bus_dmamap_t *);
void iommu_dvmamem_free(bus_dma_tag_t, bus_dma_tag_t, struct iommu_state *,
void *, bus_dmamap_t);
#endif /* !_MACHINE_IOMMUVAR_H_ */

View file

@ -99,14 +99,18 @@ static void psycho_iommu_init(struct psycho_softc *, int);
* bus space and bus dma support for UltraSPARC `psycho'. note that most
* of the bus dma support is provided by the iommu dvma controller.
*/
static int psycho_dmamap_create(bus_dma_tag_t, int, bus_dmamap_t *);
static int psycho_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
static int psycho_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
bus_dmamap_callback_t *, void *, int);
static void psycho_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
static void psycho_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
static int psycho_dmamem_alloc(bus_dma_tag_t, void **, int, bus_dmamap_t *);
static void psycho_dmamem_free(bus_dma_tag_t, void *, bus_dmamap_t);
static int psycho_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
bus_dmamap_t *);
static int psycho_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
static int psycho_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
static void psycho_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
static void psycho_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dmasync_op_t);
static int psycho_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
bus_dmamap_t *);
static void psycho_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *,
bus_dmamap_t);
/*
* autoconfiguration
@ -1274,69 +1278,74 @@ psycho_alloc_bus_tag(struct psycho_softc *sc, int type)
* hooks into the iommu dvma calls.
*/
static int
psycho_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, bus_dmamap_t *mapp)
psycho_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
int flags, bus_dmamap_t *mapp)
{
struct psycho_softc *sc;
sc = (struct psycho_softc *)dmat->cookie;
return (iommu_dvmamem_alloc(dmat, sc->sc_is, vaddr, flags, mapp));
sc = (struct psycho_softc *)pdmat->cookie;
return (iommu_dvmamem_alloc(pdmat, ddmat, sc->sc_is, vaddr, flags,
mapp));
}
static void
psycho_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
psycho_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr,
bus_dmamap_t map)
{
struct psycho_softc *sc;
sc = (struct psycho_softc *)dmat->cookie;
iommu_dvmamem_free(dmat, sc->sc_is, vaddr, map);
sc = (struct psycho_softc *)pdmat->cookie;
iommu_dvmamem_free(pdmat, ddmat, sc->sc_is, vaddr, map);
}
static int
psycho_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
psycho_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags,
bus_dmamap_t *mapp)
{
struct psycho_softc *sc;
sc = (struct psycho_softc *)dmat->cookie;
return (iommu_dvmamap_create(dmat, sc->sc_is, flags, mapp));
sc = (struct psycho_softc *)pdmat->cookie;
return (iommu_dvmamap_create(pdmat, ddmat, sc->sc_is, flags, mapp));
}
static int
psycho_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
psycho_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
bus_dmamap_t map)
{
struct psycho_softc *sc;
sc = (struct psycho_softc *)dmat->cookie;
return (iommu_dvmamap_destroy(dmat, sc->sc_is, map));
sc = (struct psycho_softc *)pdmat->cookie;
return (iommu_dvmamap_destroy(pdmat, ddmat, sc->sc_is, map));
}
static int
psycho_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
int flags)
psycho_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
{
struct psycho_softc *sc;
sc = (struct psycho_softc *)dmat->cookie;
return (iommu_dvmamap_load(dmat, sc->sc_is, map, buf, buflen, callback,
callback_arg, flags));
sc = (struct psycho_softc *)pdmat->cookie;
return (iommu_dvmamap_load(pdmat, ddmat, sc->sc_is, map, buf, buflen,
callback, callback_arg, flags));
}
static void
psycho_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
psycho_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
{
struct psycho_softc *sc;
sc = (struct psycho_softc *)dmat->cookie;
iommu_dvmamap_unload(dmat, sc->sc_is, map);
sc = (struct psycho_softc *)pdmat->cookie;
iommu_dvmamap_unload(pdmat, ddmat, sc->sc_is, map);
}
static void
psycho_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
psycho_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
bus_dmasync_op_t op)
{
struct psycho_softc *sc;
sc = (struct psycho_softc *)dmat->cookie;
iommu_dvmamap_sync(dmat, sc->sc_is, map, op);
sc = (struct psycho_softc *)pdmat->cookie;
iommu_dvmamap_sync(pdmat, ddmat, sc->sc_is, map, op);
}

View file

@ -231,14 +231,18 @@ static void sbus_pwrfail(void *);
/*
* DVMA routines
*/
static int sbus_dmamap_create(bus_dma_tag_t, int, bus_dmamap_t *);
static int sbus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
static int sbus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
bus_dmamap_callback_t *, void *, int);
static void sbus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
static void sbus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
static int sbus_dmamem_alloc(bus_dma_tag_t, void **, int, bus_dmamap_t *);
static void sbus_dmamem_free(bus_dma_tag_t, void *, bus_dmamap_t);
static int sbus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
bus_dmamap_t *);
static int sbus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
static int sbus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
bus_size_t, bus_dmamap_callback_t *, void *, int);
static void sbus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
static void sbus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dmasync_op_t);
static int sbus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
bus_dmamap_t *);
static void sbus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *,
bus_dmamap_t);
static device_method_t sbus_methods[] = {
/* Device interface */
@ -909,62 +913,66 @@ sbus_alloc_bustag(struct sbus_softc *sc)
}
static int
sbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
sbus_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags,
bus_dmamap_t *mapp)
{
struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
return (iommu_dvmamap_create(dmat, &sc->sc_is, flags, mapp));
return (iommu_dvmamap_create(pdmat, ddmat, &sc->sc_is, flags, mapp));
}
static int
sbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
sbus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
{
struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
return (iommu_dvmamap_destroy(dmat, &sc->sc_is, map));
return (iommu_dvmamap_destroy(pdmat, ddmat, &sc->sc_is, map));
}
static int
sbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
int flags)
sbus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
{
struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
return (iommu_dvmamap_load(dmat, &sc->sc_is, map, buf, buflen, callback,
callback_arg, flags));
return (iommu_dvmamap_load(pdmat, ddmat, &sc->sc_is, map, buf, buflen,
callback, callback_arg, flags));
}
static void
sbus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
sbus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
{
struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
iommu_dvmamap_unload(dmat, &sc->sc_is, map);
iommu_dvmamap_unload(pdmat, ddmat, &sc->sc_is, map);
}
static void
sbus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
sbus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
bus_dmasync_op_t op)
{
struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
iommu_dvmamap_sync(dmat, &sc->sc_is, map, op);
iommu_dvmamap_sync(pdmat, ddmat, &sc->sc_is, map, op);
}
static int
sbus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, bus_dmamap_t *mapp)
sbus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
int flags, bus_dmamap_t *mapp)
{
struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
return (iommu_dvmamem_alloc(dmat, &sc->sc_is, vaddr, flags, mapp));
return (iommu_dvmamem_alloc(pdmat, ddmat, &sc->sc_is, vaddr, flags,
mapp));
}
static void
sbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
sbus_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr,
bus_dmamap_t map)
{
struct sbus_softc *sc = (struct sbus_softc *)dmat->cookie;
struct sbus_softc *sc = (struct sbus_softc *)pdmat->cookie;
iommu_dvmamem_free(dmat, &sc->sc_is, vaddr, map);
iommu_dvmamem_free(pdmat, ddmat, &sc->sc_is, vaddr, map);
}

View file

@ -124,6 +124,7 @@
#include <machine/asi.h>
#include <machine/bus.h>
#include <machine/bus_private.h>
#include <machine/cache.h>
#include <machine/pmap.h>
#include <machine/smp.h>
@ -153,16 +154,25 @@ int bus_stream_asi[] = {
* Note: there is no support for bounce buffers yet.
*/
static int nexus_dmamap_create(bus_dma_tag_t, int, bus_dmamap_t *);
static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
static int nexus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
bus_dmamap_callback_t *, void *, int);
static void nexus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
static void nexus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
static int nexus_dmamem_alloc(bus_dma_tag_t, void **, int, bus_dmamap_t *);
static void nexus_dmamem_free(bus_dma_tag_t, void *, bus_dmamap_t);
static int nexus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
bus_dmamap_t *);
static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
static int nexus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
bus_dmasync_op_t);
static int nexus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
bus_dmamap_t *);
static void nexus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *,
bus_dmamap_t);
/*
* Since there is now way for a device to obtain a dma tag from its parent
* we use this kluge to handle different the different supported bus systems.
* The sparc64_root_dma_tag is used as parent for tags that have none, so that
* the correct methods will be used.
*/
bus_dma_tag_t sparc64_root_dma_tag;
/*
@ -175,7 +185,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
{
bus_dma_tag_t newtag, eparent;
bus_dma_tag_t newtag;
/* Return a NULL tag on failure */
*dmat = NULL;
@ -184,11 +194,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
if (newtag == NULL)
return (ENOMEM);
/* Ugh... */
eparent = parent != NULL ? parent : sparc64_root_dma_tag;
memcpy(newtag, eparent, sizeof(*newtag));
if (parent != NULL)
newtag->parent = parent;
newtag->parent = parent != NULL ? parent : sparc64_root_dma_tag;
newtag->alignment = alignment;
newtag->boundary = boundary;
newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
@ -199,9 +205,17 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->nsegments = nsegments;
newtag->maxsegsz = maxsegsz;
newtag->flags = flags;
newtag->ref_count = 1; /* Count ourself */
newtag->ref_count = 1; /* Count ourselves */
newtag->map_count = 0;
newtag->dmamap_create = NULL;
newtag->dmamap_destroy = NULL;
newtag->dmamap_load = NULL;
newtag->dmamap_unload = NULL;
newtag->dmamap_sync = NULL;
newtag->dmamem_alloc = NULL;
newtag->dmamem_free = NULL;
/* Take into account any restrictions imposed by our parent tag */
if (parent != NULL) {
newtag->lowaddr = ulmin(parent->lowaddr, newtag->lowaddr);
@ -211,10 +225,9 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
* all the way up the inheritence chain.
*/
newtag->boundary = ulmax(parent->boundary, newtag->boundary);
if (parent != NULL)
parent->ref_count++;
}
newtag->parent->ref_count++;
*dmat = newtag;
return (0);
}
@ -222,14 +235,12 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
bus_dma_tag_t parent;
if (dmat != NULL) {
if (dmat->map_count != 0)
return (EBUSY);
while (dmat != NULL) {
bus_dma_tag_t parent;
parent = dmat->parent;
dmat->ref_count--;
if (dmat->ref_count == 0) {
@ -252,12 +263,13 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
* DMA map creation functions.
*/
static int
nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
nexus_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags,
bus_dmamap_t *mapp)
{
/* Not much to do...? */
*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_WAITOK | M_ZERO);
dmat->map_count++;
ddmat->map_count++;
return (0);
}
@ -266,11 +278,11 @@ nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
* DMA map destruction functions.
*/
static int
nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
nexus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
{
free(map, M_DEVBUF);
dmat->map_count--;
ddmat->map_count--;
return (0);
}
@ -287,14 +299,14 @@ nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
* bypass DVMA.
*/
static int
nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
int flags)
nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
{
vm_offset_t vaddr;
vm_offset_t paddr;
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
bus_dma_segment_t dm_segments[ddmat->nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
@ -331,7 +343,7 @@ nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
/* Go to the next segment */
sg++;
seg++;
if (seg > dmat->nsegments)
if (seg > ddmat->nsegments)
break;
sg->ds_addr = paddr;
sg->ds_len = size;
@ -357,7 +369,7 @@ nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
* bus-specific DMA map unload functions.
*/
static void
nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
nexus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
{
/* Nothing to do...? */
@ -368,7 +380,8 @@ nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
* by bus-specific DMA map synchronization functions.
*/
static void
nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
bus_dmasync_op_t op)
{
/*
@ -412,7 +425,7 @@ sparc64_dmamem_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_WAITOK | M_ZERO);
if (*mapp == NULL)
return (ENOMEM);
dmat->map_count++;
return (0);
}
@ -430,12 +443,12 @@ sparc64_dmamem_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
* by bus-specific DMA memory allocation functions.
*/
static int
nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
bus_dmamap_t *mapp)
nexus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
int flags, bus_dmamap_t *mapp)
{
if ((dmat->maxsize <= PAGE_SIZE)) {
*vaddr = malloc(dmat->maxsize, M_DEVBUF,
if ((ddmat->maxsize <= PAGE_SIZE)) {
*vaddr = malloc(ddmat->maxsize, M_DEVBUF,
(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
} else {
/*
@ -443,10 +456,11 @@ nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
* and handles multi-seg allocations. Nobody is doing multi-seg
* allocations yet though.
*/
*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
*vaddr = contigmalloc(ddmat->maxsize, M_DEVBUF,
(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
0ul, dmat->lowaddr, dmat->alignment ? dmat->alignment : 1UL,
dmat->boundary);
0ul, ddmat->lowaddr,
ddmat->alignment ? ddmat->alignment : 1UL,
ddmat->boundary);
}
if (*vaddr == NULL) {
free(*mapp, M_DEVBUF);
@ -460,14 +474,15 @@ nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
* bus-specific DMA memory free functions.
*/
static void
nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
nexus_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr,
bus_dmamap_t map)
{
sparc64_dmamem_free_map(dmat, map);
if ((dmat->maxsize <= PAGE_SIZE))
sparc64_dmamem_free_map(ddmat, map);
if ((ddmat->maxsize <= PAGE_SIZE))
free(vaddr, M_DEVBUF);
else
contigfree(vaddr, dmat->maxsize, M_DEVBUF);
contigfree(vaddr, ddmat->maxsize, M_DEVBUF);
}
struct bus_dma_tag nexus_dmatag = {

View file

@ -123,6 +123,7 @@
#include <vm/pmap.h>
#include <machine/bus.h>
#include <machine/bus_private.h>
#include <machine/iommureg.h>
#include <machine/pmap.h>
#include <machine/resource.h>
@ -158,11 +159,43 @@ MALLOC_DEFINE(M_IOMMU, "dvmamem", "IOMMU DVMA Buffers");
bus_space_write_8((is)->is_bustag, (is)->is_bushandle, \
(is)->reg + (off), (v))
/*
* Always overallocate one page; this is needed to handle alignment of the
* buffer, so it makes sense using a lazy allocation scheme.
*/
#define IOMMU_SIZE_ROUNDUP(sz) \
(round_io_page(sz) + IO_PAGE_SIZE)
static int iommu_strbuf_flush_done(struct iommu_state *);
#ifdef IOMMU_DIAG
static void iommu_diag(struct iommu_state *, vm_offset_t va);
#endif
/*
* LRU queue handling for lazy resource allocation.
*/
static STAILQ_HEAD(, bus_dmamap) iommu_maplruq =
STAILQ_HEAD_INITIALIZER(iommu_maplruq);
static __inline void
iommu_map_insq(bus_dmamap_t map)
{
if (!map->onq && map->dvmaresv != 0) {
STAILQ_INSERT_TAIL(&iommu_maplruq, map, maplruq);
map->onq = 1;
}
}
static __inline void
iommu_map_remq(bus_dmamap_t map)
{
if (map->onq)
STAILQ_REMOVE(&iommu_maplruq, map, bus_dmamap, maplruq);
map->onq = 0;
}
/*
* initialise the UltraSPARC IOMMU (SBUS or PCI):
* - allocate and setup the iotsb.
@ -246,7 +279,7 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase)
* Now all the hardware's working we need to setup dvma resource
* management.
*/
printf("DVMA map: %lx to %lx\n",
printf("DVMA map: %#lx to %#lx\n",
is->is_dvmabase, is->is_dvmabase +
(size << (IO_PAGE_SHIFT - IOTTE_SHIFT)) - 1);
@ -400,6 +433,21 @@ iommu_decode_fault(struct iommu_state *is, vm_offset_t phys)
printf("IOMMU fault virtual address %#lx\n", (u_long)va);
}
void
iommu_decode_fault(struct iommu_state *is, vm_offset_t phys)
{
bus_addr_t va;
long idx;
idx = phys - is->is_ptsb;
if (phys < is->is_ptsb ||
idx > (PAGE_SIZE << is->is_tsbsize))
return;
va = is->is_dvmabase +
(((bus_addr_t)idx >> IOTTE_SHIFT) << IO_PAGE_SHIFT);
printf("IOMMU fault virtual address %#lx\n", (u_long)va);
}
static int
iommu_strbuf_flush_done(struct iommu_state *is)
{
@ -457,55 +505,50 @@ iommu_strbuf_flush_done(struct iommu_state *is)
/* Allocate DVMA virtual memory for a map. */
static int
iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map)
iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
bus_size_t size)
{
bus_size_t align, bound, sgsize, maxsize;
bus_size_t align, bound, sgsize;
/*
* Choose a maximum length. If a boundary is specified, a map cannot
* be larger than it.
* XXX: we end up overallocating quite a lot here, since we only know
* an upper bound for the tag, but not for the map, so we need to
* allocate the maximum size to each map. Usually, plenty of DVMA
* virtual memory is available (the minimum is 8MB), so this should
* not be much of a poblem right now.
* If a boundary is specified, a map cannot be larger than it; however
* we do not clip currently, as that does not play well with the lazy
* allocation code.
* Alignment to a page boundary is always enforced.
*/
if (t->boundary != 0)
maxsize = ulmin(t->maxsize, t->boundary);
else
maxsize = t->maxsize;
/* Alignment to a page boundary is always enforced. */
align = (t->alignment + IO_PAGE_MASK) >> IO_PAGE_SHIFT;
sgsize = round_io_page(maxsize) >> IO_PAGE_SHIFT;
sgsize = round_io_page(size) >> IO_PAGE_SHIFT;
if (t->boundary > 0 && t->boundary < IO_PAGE_SIZE)
panic("iommu_dvmamap_load: illegal boundary specified");
bound = ulmax(t->boundary >> IO_PAGE_SHIFT, 1);
map->dvmaresv = 0;
map->res = rman_reserve_resource_bound(&is->is_dvma_rman, 0L,
t->lowaddr, sgsize, bound >> IO_PAGE_SHIFT,
RF_ACTIVE | rman_make_alignment_flags(align), NULL);
if (map->res == NULL) {
printf("DVMA allocation failed!\n"); /* XXX */
if (map->res == NULL)
return (ENOMEM);
}
map->start = rman_get_start(map->res) * IO_PAGE_SIZE;
map->dvmaresv = size;
iommu_map_insq(map);
return (0);
}
/* Free DVMA virtual memory for a map. */
static void
iommu_dvma_vfree(bus_dma_tag_t t, bus_dmamap_t map)
iommu_dvma_vfree(bus_dmamap_t map)
{
if (rman_release_resource(map->res) != 0) {
iommu_map_remq(map);
if (map->res != NULL && rman_release_resource(map->res) != 0)
printf("warning: DVMA space lost\n");
}
map->res = NULL;
map->dvmaresv = 0;
}
int
iommu_dvmamem_alloc(bus_dma_tag_t t, struct iommu_state *is, void **vaddr,
int flags, bus_dmamap_t *mapp)
iommu_dvmamem_alloc(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
void **vaddr, int flags, bus_dmamap_t *mapp)
{
int error;
@ -513,62 +556,63 @@ iommu_dvmamem_alloc(bus_dma_tag_t t, struct iommu_state *is, void **vaddr,
* XXX: This will break for 32 bit transfers on machines with more than
* 16G (2 << 34 bytes) of memory.
*/
if ((error = sparc64_dmamem_alloc_map(t, mapp)) != 0)
if ((error = sparc64_dmamem_alloc_map(dt, mapp)) != 0)
return (error);
if ((*vaddr = malloc(t->maxsize, M_IOMMU,
if ((*vaddr = malloc(dt->maxsize, M_IOMMU,
(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
error = ENOMEM;
goto failm;
}
if ((error = iommu_dvma_valloc(t, is, *mapp)) != 0)
goto failv;
/*
* Try to preallocate DVMA memory. If this fails, it is retried at load
* time.
*/
iommu_dvma_valloc(dt, is, *mapp, IOMMU_SIZE_ROUNDUP(dt->maxsize));
return (0);
failv:
free(*vaddr, M_IOMMU);
failm:
sparc64_dmamem_free_map(t, *mapp);
sparc64_dmamem_free_map(dt, *mapp);
return (error);
}
void
iommu_dvmamem_free(bus_dma_tag_t t, struct iommu_state *is, void *vaddr,
bus_dmamap_t map)
iommu_dvmamem_free(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
void *vaddr, bus_dmamap_t map)
{
iommu_dvma_vfree(t, map);
sparc64_dmamem_free_map(t, map);
iommu_dvma_vfree(map);
sparc64_dmamem_free_map(dt, map);
free(vaddr, M_IOMMU);
}
int
iommu_dvmamap_create(bus_dma_tag_t t, struct iommu_state *is, int flags,
bus_dmamap_t *mapp)
iommu_dvmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
int flags, bus_dmamap_t *mapp)
{
int error;
if ((error = bus_dmamap_create(t->parent, flags, mapp)) != 0)
if ((error = sparc64_dmamap_create(pt->parent, dt, flags, mapp)) != 0)
return (error);
KASSERT((*mapp)->res == NULL,
("iommu_dvmamap_create: hierarchy botched"));
/*
* XXX: If already allocated, skip (this can happen in tag hierarchies
* where the parent is an iommu tag, too).
* Preallocate DMVA memory; if this fails now, it is retried at load
* time.
* Clamp preallocation to BUS_SPACE_MAXSIZE. In some situations we can
* handle more; that case is handled by reallocating at map load time.
*/
if ((*mapp)->res == NULL &&
(error = iommu_dvma_valloc(t, is, *mapp)) != 0) {
bus_dmamap_destroy(t->parent, *mapp);
return (error);
}
iommu_dvma_valloc(dt, is, *mapp,
ulmin(IOMMU_SIZE_ROUNDUP(dt->maxsize), BUS_SPACE_MAXSIZE));
return (0);
}
int
iommu_dvmamap_destroy(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map)
iommu_dvmamap_destroy(bus_dma_tag_t pt, bus_dma_tag_t dt,
struct iommu_state *is, bus_dmamap_t map)
{
/* XXX: if already freed, skip. */
if (map->res != NULL)
iommu_dvma_vfree(t, map);
return (bus_dmamap_destroy(t->parent, map));
iommu_dvma_vfree(map);
return (sparc64_dmamap_destroy(pt->parent, dt, map));
}
#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
@ -577,16 +621,17 @@ iommu_dvmamap_destroy(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map)
* IOMMU DVMA operations, common to SBUS and PCI.
*/
int
iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba,
int flags)
iommu_dvmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
void *cba, int flags)
{
#ifdef __GNUC__
bus_dma_segment_t sgs[t->nsegments];
bus_dma_segment_t sgs[dt->nsegments];
#else
bus_dma_segment_t sgs[BUS_DMAMAP_NSEGS];
#endif
bus_size_t sgsize;
bus_dmamap_t tm;
bus_size_t sgsize, fsize, maxsize;
vm_offset_t curaddr;
u_long dvmaddr;
vm_offset_t vaddr;
@ -597,15 +642,52 @@ iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
#ifdef DIAGNOSTIC
printf("iommu_dvmamap_load: map still in use\n");
#endif
bus_dmamap_unload(t, map);
bus_dmamap_unload(dt, map);
}
if (buflen > t->maxsize) {
if (buflen > dt->maxsize) {
DPRINTF(IDB_BUSDMA,
("iommu_dvmamap_load(): error %d > %d -- "
"map size exceeded!\n", (int)buflen, (int)map->buflen));
return (EINVAL);
}
maxsize = IOMMU_SIZE_ROUNDUP(buflen);
if (maxsize > map->dvmaresv) {
/*
* Need to allocate resources now; free any old allocation
* first.
*/
fsize = map->dvmaresv;
iommu_dvma_vfree(map);
while (iommu_dvma_valloc(dt, is, map, maxsize) == ENOMEM &&
!STAILQ_EMPTY(&iommu_maplruq)) {
/*
* Free the allocated DVMA of a few tags until
* the required size is reached. This is an
* approximation to not have to call the allocation
* function too often; most likely one free run
* will not suffice if not one map was large enough
* itself due to fragmentation.
*/
do {
tm = STAILQ_FIRST(&iommu_maplruq);
if (tm == NULL)
break;
fsize += tm->dvmaresv;
iommu_dvma_vfree(tm);
} while (fsize < maxsize);
fsize = 0;
}
if (map->dvmaresv < maxsize) {
printf("DVMA allocation failed: needed %ld, got %ld\n",
maxsize, map->dvmaresv);
return (ENOMEM);
}
}
/* Busy the map by removing it from the LRU queue. */
iommu_map_remq(map);
vaddr = (vm_offset_t)buf;
map->buf = buf;
map->buflen = buflen;
@ -634,14 +716,14 @@ iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
iommu_enter(is, trunc_io_page(dvmaddr), trunc_io_page(curaddr),
flags);
if (sgcnt == -1 || sgs[sgcnt].ds_len + sgsize > t->maxsegsz) {
if (sgsize > t->maxsegsz) {
if (sgcnt == -1 || sgs[sgcnt].ds_len + sgsize > dt->maxsegsz) {
if (sgsize > dt->maxsegsz) {
/* XXX: add fixup */
panic("iommu_dvmamap_load: magsegsz too "
"small\n");
}
sgcnt++;
if (sgcnt > t->nsegments || sgcnt > BUS_DMAMAP_NSEGS) {
if (sgcnt > dt->nsegments || sgcnt > BUS_DMAMAP_NSEGS) {
error = ENOMEM;
break;
}
@ -659,33 +741,32 @@ iommu_dvmamap_load(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
void
iommu_dvmamap_unload(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map)
iommu_dvmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
bus_dmamap_t map)
{
/*
* If the resource is already deallocated, just pass to the parent
* tag.
*/
if (map->buflen != 0) {
if (map->buflen == 0 || map->start == 0) {
panic("iommu_dvmamap_unload: map = %p, len = %d, "
"addr = %lx\n", map, (int)map->buflen,
(unsigned long)map->start);
}
DPRINTF(IDB_BUSDMA,
("iommu_dvmamap_unload: map %p removing va %lx size "
"%lx\n", map, (long)map->start, (long)map->buflen));
iommu_remove(is, map->start, map->buflen);
map->buflen = 0;
if (map->buflen == 0 || map->start == 0) {
panic("iommu_dvmamap_unload: map = %p, len = %d, addr = %lx\n",
map, (int)map->buflen, (unsigned long)map->start);
}
DPRINTF(IDB_BUSDMA,
("iommu_dvmamap_unload: map %p removing va %lx size "
"%lx\n", map, (long)map->start, (long)map->buflen));
iommu_remove(is, map->start, map->buflen);
map->buflen = 0;
iommu_map_insq(map);
/* Flush the caches */
bus_dmamap_unload(t->parent, map);
sparc64_dmamap_unload(pt->parent, dt, map);
}
void
iommu_dvmamap_sync(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
bus_dmasync_op_t op)
iommu_dvmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
bus_dmamap_t map, bus_dmasync_op_t op)
{
vm_offset_t va;
vm_size_t len;