If our buffer is not aligned on the cache line size, write back/invalidate

the first and last cache line in PREREAD, and just invalidate the cache
lines in POSTREAD, instead of write-back/invalidating in POSTREAD, which
could lead to stale data overriding what has been transfered by DMA.
This commit is contained in:
Olivier Houchard 2006-05-31 15:50:33 +00:00
parent 866fcf84ba
commit 4cd3385ee3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=159107

View file

@ -806,13 +806,16 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_PREWRITE)
cpu_dcache_wb_range((vm_offset_t)buf, len);
if (op & BUS_DMASYNC_POSTREAD) {
if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
cpu_dcache_inv_range((vm_offset_t)buf, len);
else
cpu_dcache_wbinv_range((vm_offset_t)buf, len);
if (op & BUS_DMASYNC_PREREAD) {
if ((vm_offset_t)buf & arm_dcache_align_mask)
cpu_dcache_wbinv_range((vm_offset_t)buf &
~arm_dcache_align_mask, arm_dcache_align);
if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
cpu_dcache_wbinv_range(((vm_offset_t)buf + len) &
~arm_dcache_align_mask, arm_dcache_align);
}
if (op & BUS_DMASYNC_POSTREAD)
cpu_dcache_inv_range((vm_offset_t)buf, len);
}
void
@ -823,7 +826,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
int resid;
struct iovec *iov;
if (!(op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)))
if (op == BUS_DMASYNC_POSTWRITE)
return;
if (map->flags & DMAMAP_COHERENT)
return;