dma: make dma access its own address space

Instead of accessing the cpu address space, use an address space
configured by the caller.

Eventually all dma functionality will be folded into AddressSpace,
but we have to start from something.

Reviewed-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Avi Kivity 2012-10-03 16:42:37 +02:00
parent ac1970fbe8
commit b90600eed3
3 changed files with 22 additions and 23 deletions

View file

@ -14,7 +14,8 @@
/* #define DEBUG_IOMMU */
static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len)
static void do_dma_memory_set(AddressSpace *as,
dma_addr_t addr, uint8_t c, dma_addr_t len)
{
#define FILLBUF_SIZE 512
uint8_t fillbuf[FILLBUF_SIZE];
@ -23,7 +24,7 @@ static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len)
memset(fillbuf, c, FILLBUF_SIZE);
while (len > 0) {
l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
cpu_physical_memory_rw(addr, fillbuf, l, true);
address_space_rw(as, addr, fillbuf, l, true);
len -= l;
addr += l;
}
@ -36,7 +37,7 @@ int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
if (dma_has_iommu(dma)) {
return iommu_dma_memory_set(dma, addr, c, len);
}
do_dma_memory_set(addr, c, len);
do_dma_memory_set(dma->as, addr, c, len);
return 0;
}
@ -332,8 +333,7 @@ int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
plen = len;
}
cpu_physical_memory_rw(paddr, buf, plen,
dir == DMA_DIRECTION_FROM_DEVICE);
address_space_rw(dma->as, paddr, buf, plen, dir == DMA_DIRECTION_FROM_DEVICE);
len -= plen;
addr += plen;
@ -366,7 +366,7 @@ int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
plen = len;
}
do_dma_memory_set(paddr, c, plen);
do_dma_memory_set(dma->as, paddr, c, plen);
len -= plen;
addr += plen;
@ -375,13 +375,14 @@ int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
return 0;
}
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
DMAMapFunc map, DMAUnmapFunc unmap)
{
#ifdef DEBUG_IOMMU
fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
dma, translate, map, unmap);
#endif
dma->as = as;
dma->translate = translate;
dma->map = map;
dma->unmap = unmap;
@ -407,14 +408,13 @@ void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
/*
* If this is true, the virtual region is contiguous,
* but the translated physical region isn't. We just
* clamp *len, much like cpu_physical_memory_map() does.
* clamp *len, much like address_space_map() does.
*/
if (plen < *len) {
*len = plen;
}
buf = cpu_physical_memory_map(paddr, &plen,
dir == DMA_DIRECTION_FROM_DEVICE);
buf = address_space_map(dma->as, paddr, &plen, dir == DMA_DIRECTION_FROM_DEVICE);
*len = plen;
return buf;
@ -428,8 +428,7 @@ void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
return;
}
cpu_physical_memory_unmap(buffer, len,
dir == DMA_DIRECTION_FROM_DEVICE,
access_len);
address_space_unmap(dma->as, buffer, len, dir == DMA_DIRECTION_FROM_DEVICE,
access_len);
}

17
dma.h
View file

@ -11,6 +11,7 @@
#define DMA_H
#include <stdio.h>
#include "memory.h"
#include "hw/hw.h"
#include "block.h"
#include "kvm.h"
@ -61,6 +62,7 @@ typedef void DMAUnmapFunc(DMAContext *dma,
dma_addr_t access_len);
struct DMAContext {
AddressSpace *as;
DMATranslateFunc *translate;
DMAMapFunc *map;
DMAUnmapFunc *unmap;
@ -93,7 +95,7 @@ static inline void dma_barrier(DMAContext *dma, DMADirection dir)
static inline bool dma_has_iommu(DMAContext *dma)
{
return !!dma;
return dma && dma->translate;
}
/* Checks that the given range of addresses is valid for DMA. This is
@ -120,8 +122,7 @@ static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
{
if (!dma_has_iommu(dma)) {
/* Fast-path for no IOMMU */
cpu_physical_memory_rw(addr, buf, len,
dir == DMA_DIRECTION_FROM_DEVICE);
address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
return 0;
} else {
return iommu_dma_memory_rw(dma, addr, buf, len, dir);
@ -179,8 +180,7 @@ static inline void *dma_memory_map(DMAContext *dma,
target_phys_addr_t xlen = *len;
void *p;
p = cpu_physical_memory_map(addr, &xlen,
dir == DMA_DIRECTION_FROM_DEVICE);
p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
*len = xlen;
return p;
} else {
@ -196,9 +196,8 @@ static inline void dma_memory_unmap(DMAContext *dma,
DMADirection dir, dma_addr_t access_len)
{
if (!dma_has_iommu(dma)) {
cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
dir == DMA_DIRECTION_FROM_DEVICE,
access_len);
address_space_unmap(dma->as, buffer, (target_phys_addr_t)len,
dir == DMA_DIRECTION_FROM_DEVICE, access_len);
} else {
iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
}
@ -242,7 +241,7 @@ DEFINE_LDST_DMA(q, q, 64, be);
#undef DEFINE_LDST_DMA
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
DMAMapFunc map, DMAUnmapFunc unmap);
struct ScatterGatherEntry {

View file

@ -21,6 +21,7 @@
#include "qdev.h"
#include "kvm_ppc.h"
#include "dma.h"
#include "exec-memory.h"
#include "hw/spapr.h"
@ -124,7 +125,7 @@ DMAContext *spapr_tce_new_dma_context(uint32_t liobn, size_t window_size)
}
tcet = g_malloc0(sizeof(*tcet));
dma_context_init(&tcet->dma, spapr_tce_translate, NULL, NULL);
dma_context_init(&tcet->dma, &address_space_memory, spapr_tce_translate, NULL, NULL);
tcet->liobn = liobn;
tcet->window_size = window_size;