busdma: fix page miscount for small segment sizes

For small segments (< PAGE_SIZE) there is a mismatch between how
required bounce pages are counted in _bus_dmamap_count_pages() and
bounce_bus_dmamap_load_buffer().

This problem has been observed on the RISC-V VisionFive v2 SoC (and
earlier revisions of the hardware) which has memory physically addressed
above 4GB. This requires some bouncing for the dwmmc driver, which has
has a maximum segment size of 2048 bytes. When attempting to load a
page-aligned 4-page buffer that requires bouncing, we can end up
counting 4 bounce pages for an 8-segment transfer. These pages will be
incorrectly configured to cover only the first half of the transfer (4 x
2048 bytes).

Fix the immediate issue by adding the maxsegsz check to
_bus_dmamap_count_pages(); this is what _bus_dmamap_count_phys() does
already. The result is that we will inefficiently allocate a separate
bounce page for each segment (8 pages for the example above), but the
transfer will proceed in its entirety.

The more complete fix is to address the shortcomings in how small
segments are assigned to bounce pages, so that we opportunistically
batch multiple segments to a page whenever they fit (e.g. two 2048 bytes
segments per 4096 page). This will be addressed more holistically in the
future. For now this change will prevent the (silent) incomplete
transfers that have been observed.

PR:		273694
Reported by:	Jari Sihvola <jsihv@gmx.com>
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D34118
This commit is contained in:
Mitchell Horne 2021-05-25 18:04:56 -03:00
parent ccb1b43e20
commit b134c10d65
5 changed files with 10 additions and 5 deletions

View file

@ -812,6 +812,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map,
vm_offset_t vaddr;
vm_offset_t vendaddr;
bus_addr_t paddr;
bus_size_t sg_len;
if (map->pagesneeded == 0) {
CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
@ -826,16 +827,16 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map,
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {
sg_len = MIN(vendaddr - vaddr,
(PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)));
sg_len = MIN(sg_len, dmat->maxsegsz);
if (__predict_true(pmap == kernel_pmap))
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(pmap, vaddr);
if (must_bounce(dmat, map, paddr,
min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr &
PAGE_MASK)))) != 0) {
if (must_bounce(dmat, map, paddr, sg_len) != 0)
map->pagesneeded++;
}
vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
vaddr += sg_len;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
}

View file

@ -693,6 +693,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
while (vaddr < vendaddr) {
sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
sg_len = MIN(sg_len, dmat->common.maxsegsz);
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else

View file

@ -520,6 +520,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
bus_size_t sg_len;
sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
sg_len = MIN(sg_len, dmat->maxsegsz);
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else

View file

@ -531,6 +531,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
while (vaddr < vendaddr) {
sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
sg_len = MIN(sg_len, dmat->common.maxsegsz);
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else

View file

@ -560,6 +560,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
while (vaddr < vendaddr) {
sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
sg_len = MIN(sg_len, dmat->common.maxsegsz);
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else