mirror of
https://github.com/torvalds/linux
synced 2024-10-15 07:47:34 +00:00
xtensa: mm: convert to GENERIC_IOREMAP
By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and iounmap() are all visible and available to arch. Arch needs to provide wrapper functions to override the generic versions if there's arch specific handling in its ioremap_prot(), ioremap() or iounmap(). This change will simplify implementation by removing duplicated code with generic_ioremap_prot() and generic_iounmap(), and has the equivalent functioality as before. Here, add wrapper functions ioremap_prot(), ioremap() and iounmap() for xtensa's special operation when ioremap() and iounmap(). Link: https://lkml.kernel.org/r/20230706154520.11257-14-bhe@redhat.com Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Chris Zankel <chris@zankel.net> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Brian Cain <bcain@quicinc.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: David Laight <David.Laight@ACULAB.COM> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Jonas Bonn <jonas@southpole.se> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nathan Chancellor <nathan@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Niklas Schnelle <schnelle@linux.ibm.com> Cc: Rich Felker <dalias@libc.org> Cc: Stafford Horne <shorne@gmail.com> Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0453c9a780
commit
ca6c1af381
|
@ -28,6 +28,7 @@ config XTENSA
|
|||
select GENERIC_LIB_UCMPDI2
|
||||
select GENERIC_PCI_IOMAP
|
||||
select GENERIC_SCHED_CLOCK
|
||||
select GENERIC_IOREMAP if MMU
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
|
||||
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <asm/vectors.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -24,22 +25,24 @@
|
|||
#define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR)
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
|
||||
void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
|
||||
void xtensa_iounmap(volatile void __iomem *addr);
|
||||
|
||||
/*
|
||||
* Return the virtual address for the specified bus memory.
|
||||
* I/O memory mapping functions.
|
||||
*/
|
||||
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
|
||||
unsigned long prot);
|
||||
#define ioremap_prot ioremap_prot
|
||||
#define iounmap iounmap
|
||||
|
||||
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
if (offset >= XCHAL_KIO_PADDR
|
||||
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
||||
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
|
||||
else
|
||||
return xtensa_ioremap_nocache(offset, size);
|
||||
return ioremap_prot(offset, size,
|
||||
pgprot_val(pgprot_noncached(PAGE_KERNEL)));
|
||||
}
|
||||
#define ioremap ioremap
|
||||
|
||||
static inline void __iomem *ioremap_cache(unsigned long offset,
|
||||
unsigned long size)
|
||||
|
@ -48,21 +51,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
|
|||
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
||||
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
|
||||
else
|
||||
return xtensa_ioremap_cache(offset, size);
|
||||
return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL));
|
||||
|
||||
}
|
||||
#define ioremap_cache ioremap_cache
|
||||
|
||||
static inline void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
unsigned long va = (unsigned long) addr;
|
||||
|
||||
if (!(va >= XCHAL_KIO_CACHED_VADDR &&
|
||||
va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
|
||||
!(va >= XCHAL_KIO_BYPASS_VADDR &&
|
||||
va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
|
||||
xtensa_iounmap(addr);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
|
|
@ -6,60 +6,30 @@
|
|||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
|
||||
pgprot_t prot)
|
||||
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
|
||||
unsigned long prot)
|
||||
{
|
||||
unsigned long offset = paddr & ~PAGE_MASK;
|
||||
unsigned long pfn = __phys_to_pfn(paddr);
|
||||
struct vm_struct *area;
|
||||
unsigned long vaddr;
|
||||
int err;
|
||||
|
||||
paddr &= PAGE_MASK;
|
||||
|
||||
unsigned long pfn = __phys_to_pfn((phys_addr));
|
||||
WARN_ON(pfn_valid(pfn));
|
||||
|
||||
size = PAGE_ALIGN(offset + size);
|
||||
|
||||
area = get_vm_area(size, VM_IOREMAP);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
vaddr = (unsigned long)area->addr;
|
||||
area->phys_addr = paddr;
|
||||
|
||||
err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
|
||||
|
||||
if (err) {
|
||||
vunmap((void *)vaddr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
flush_cache_vmap(vaddr, vaddr + size);
|
||||
return (void __iomem *)(offset + vaddr);
|
||||
return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_prot);
|
||||
|
||||
void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size)
|
||||
void iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL));
|
||||
}
|
||||
EXPORT_SYMBOL(xtensa_ioremap_nocache);
|
||||
unsigned long va = (unsigned long) addr;
|
||||
|
||||
void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size)
|
||||
{
|
||||
return xtensa_ioremap(addr, size, PAGE_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL(xtensa_ioremap_cache);
|
||||
if ((va >= XCHAL_KIO_CACHED_VADDR &&
|
||||
va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) ||
|
||||
(va >= XCHAL_KIO_BYPASS_VADDR &&
|
||||
va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
|
||||
return;
|
||||
|
||||
void xtensa_iounmap(volatile void __iomem *io_addr)
|
||||
{
|
||||
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
||||
|
||||
vunmap(addr);
|
||||
generic_iounmap(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(xtensa_iounmap);
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
|
Loading…
Reference in a new issue