mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
iommu: export iommu_area_reserve helper function
x86 has set_bit_string() that does the exact same thing that set_bit_area() in lib/iommu-helper.c does. This patch exports set_bit_area() in lib/iommu-helper.c as iommu_area_reserve(), converts GART, Calgary, and AMD IOMMU to use it. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
832a90c304
commit
d26dbc5cf9
5 changed files with 6 additions and 6 deletions
|
@ -572,7 +572,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
|
|||
if (start_page + pages > last_page)
|
||||
pages = last_page - start_page;
|
||||
|
||||
set_bit_string(dom->bitmap, start_page, pages);
|
||||
iommu_area_reserve(dom->bitmap, start_page, pages);
|
||||
}
|
||||
|
||||
static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
|
||||
|
|
|
@ -261,7 +261,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
|
|||
badbit, tbl, start_addr, npages);
|
||||
}
|
||||
|
||||
set_bit_string(tbl->it_map, index, npages);
|
||||
iommu_area_reserve(tbl->it_map, index, npages);
|
||||
|
||||
spin_unlock_irqrestore(&tbl->it_lock, flags);
|
||||
}
|
||||
|
|
|
@ -827,7 +827,7 @@ void __init gart_iommu_init(void)
|
|||
* Out of IOMMU space handling.
|
||||
* Reserve some invalid pages at the beginning of the GART.
|
||||
*/
|
||||
set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
|
||||
iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
|
||||
|
||||
agp_memory_reserved = iommu_size;
|
||||
printk(KERN_INFO
|
||||
|
|
|
@ -11,6 +11,7 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
|
|||
extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
|
||||
unsigned long shift,
|
||||
unsigned long boundary_size);
|
||||
extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
|
||||
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
|
||||
unsigned long start, unsigned int nr,
|
||||
unsigned long shift,
|
||||
|
|
|
@ -30,8 +30,7 @@ static unsigned long find_next_zero_area(unsigned long *map,
|
|||
return index;
|
||||
}
|
||||
|
||||
static inline void set_bit_area(unsigned long *map, unsigned long i,
|
||||
int len)
|
||||
void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
|
||||
{
|
||||
unsigned long end = i + len;
|
||||
while (i < end) {
|
||||
|
@ -64,7 +63,7 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
|
|||
start = index + 1;
|
||||
goto again;
|
||||
}
|
||||
set_bit_area(map, index, nr);
|
||||
iommu_area_reserve(map, index, nr);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue