This is an attempt to harden the typing on virt_to_pfn()

and pfn_to_virt().
 
 Making virt_to_pfn() a static inline taking a strongly typed
 (const void *) makes the contract of a passing a pointer of that
 type to the function explicit and exposes any misuse of the
 macro virt_to_pfn() acting polymorphic and accepting many types
 such as (void *), (unitptr_t) or (unsigned long) as arguments
 without warnings.
 
 For symmetry, we do the same with pfn_to_virt().
 
 The problem with this inconsistent typing was pointed out by
 Russell King:
 https://lore.kernel.org/linux-arm-kernel/YoJDKJXc0MJ2QZTb@shell.armlinux.org.uk/
 
 And confirmed by Andrew Morton:
 https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/
 
 So the recognition of the problem is widespread.
 
 These platforms have been chosen as initial conversion targets:
 
 - ARM
 - ARM64/Aarch64
 - asm-generic (including for example x86)
 - m68k
 
 The idea is that if this goes in, it will block further misuse
 of the function signatures due to the large compile coverage,
 and then I can go in and fix the remaining architectures on a
 one-by-one basis.
 
 Some of the patches have been circulated before but were not
 picked up by subsystem maintainers, so now the arch tree is
 target for this series.
 
 It has passed zeroday builds after a lot of iterations in my
 personal tree, but there could be some randconfig outliers.
 New added or deeply hidden problems appear all the time so
 some minor fallout can be expected.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEElDRnuGcz/wPCXQWMQRCzN7AZXXMFAmR0jVQACgkQQRCzN7AZ
 XXMujhAAmkp8mWsDpPqLw11HyTYOZgMOr6pil3uo2/X+x5fFgnJBL+aLN1zvriDK
 j2ywiN7dmAU+veIz3bYJ89e1uiMHSOCrzagSEF1fNDA4tsmp7zfOY9I1LhRK8JVd
 Wf+RplXDxcn3vwGXW5ycv5pwnDEOeQ7AESg18++SW+alRXItqybZijmpHgZrBAEN
 Ms2C6ZeW3zmL6gUpFkL1z8ghVgH46cCysbLKcDvC/fk4M26hzuc1X1Rl0s9BTyxV
 nZZDZsXwKMVmFtIDHMJFysFGRfsP6QbdHtAFuVVqJa8P01Oe3PgbcFEMQEVVJQLf
 u0Y2o1jVk7mCPw5NpI7S2g9293imqU4LgzTmJKnEqmznc8hLD3hWLzz5p07tw9dA
 KaZqzv7zzXWxQmaCW2k0yQRkyjapPo+UwbgSNfj4nyH54Tp3dBfActm3cYdB6WBl
 8ED48c4kFEJkRZ5YW6QDwcQDrZEVp1ZzcDf30t2pMUDFQh5F7/KCKALbkL/dVxuc
 INZDQMHqpQ5P1EmkX+HVyt9syxg5TnQuU+J0vVQBbR8RsdJ+Pap/OBb4EjcgbP6+
 DXS+ApbFwEw6m+jQZ4bnNJcdJfDmax46tmHfADIuUEXxF4I5s33hEz5ca+5xN4Ko
 C4gukN94T6GobcaSYZf035O4QURoDF53oEVfpqsoqsiREyBddFI=
 =7T6L
 -----END PGP SIGNATURE-----

Merge tag 'virt-to-pfn-for-arch-v6.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-integrator into asm-generic

This is an attempt to harden the typing on virt_to_pfn()
and pfn_to_virt().

Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.

For symmetry, we do the same with pfn_to_virt().

The problem with this inconsistent typing was pointed out by
Russell King:
https://lore.kernel.org/linux-arm-kernel/YoJDKJXc0MJ2QZTb@shell.armlinux.org.uk/

And confirmed by Andrew Morton:
https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/

So the recognition of the problem is widespread.

These platforms have been chosen as initial conversion targets:

- ARM
- ARM64/Aarch64
- asm-generic (including for example x86)
- m68k

The idea is that if this goes in, it will block further misuse
of the function signatures due to the large compile coverage,
and then I can go in and fix the remaining architectures on a
one-by-one basis.

Some of the patches have been circulated before but were not
picked up by subsystem maintainers, so now the arch tree is
target for this series.

It has passed zeroday builds after a lot of iterations in my
personal tree, but there could be some randconfig outliers.
New added or deeply hidden problems appear all the time so
some minor fallout can be expected.

* tag 'virt-to-pfn-for-arch-v6.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-integrator:
  m68k/mm: Make pfn accessors static inlines
  arm64: memory: Make virt_to_pfn() a static inline
  ARM: mm: Make virt_to_pfn() a static inline
  asm-generic/page.h: Make pfn accessors static inlines
  xen/netback: Pass (void *) to virt_to_page()
  netfs: Pass a pointer to virt_to_page()
  cifs: Pass a pointer to virt_to_page() in cifsglob
  cifs: Pass a pointer to virt_to_page()
  riscv: mm: init: Pass a pointer to virt_to_page()
  ARC: init: Pass a pointer to virt_to_pfn() in init
  m68k: Pass a pointer to virt_to_pfn() virt_to_page()
  fs/proc/kcore.c: Pass a pointer to virt_addr_valid()
This commit is contained in:
Arnd Bergmann 2023-05-31 16:33:56 +02:00
commit 3b1ddbb62e
64 changed files with 108 additions and 80 deletions

View file

@ -87,7 +87,7 @@ void __init setup_arch_memory(void)
setup_initial_init_mm(_text, _etext, _edata, _end);
/* first page of system - kernel .vector starts here */
min_low_pfn = virt_to_pfn(CONFIG_LINUX_RAM_BASE);
min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
/* Last usable page of low mem */
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);

View file

@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mach/sharpsl_param.h>
#include <asm/memory.h>
#include <asm/page.h>
/*
* Certain hardware parameters determined at the time of device manufacture,

View file

@ -7,7 +7,7 @@
#ifndef __ASM_ARM_DELAY_H
#define __ASM_ARM_DELAY_H
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/param.h> /* HZ */
/*

View file

@ -23,7 +23,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm-generic/pci_iomap.h>
/*

View file

@ -5,11 +5,16 @@
* Copyright (C) 2000-2002 Russell King
* modification for nommu, Hyok S. Choi, 2004
*
* Note: this file should not be included by non-asm/.h files
* Note: this file should not be included explicitly, include <asm/page.h>
* to get access to these definitions.
*/
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H
#ifndef _ASMARM_PAGE_H
#error "Do not include <asm/memory.h> directly"
#endif
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
@ -288,10 +293,12 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#endif
#define virt_to_pfn(kaddr) \
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET)
static inline unsigned long virt_to_pfn(const void *p)
{
unsigned long kaddr = (unsigned long)p;
return (((kaddr - PAGE_OFFSET) >> PAGE_SHIFT) +
PHYS_PFN_OFFSET);
}
#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
#ifdef CONFIG_DEBUG_VIRTUAL

View file

@ -161,10 +161,10 @@ extern int pfn_valid(unsigned long);
#define pfn_valid pfn_valid
#endif
#include <asm/memory.h>
#endif /* !__ASSEMBLY__ */
#include <asm/memory.h>
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
#include <asm-generic/getorder.h>

View file

@ -27,7 +27,7 @@ extern struct page *empty_zero_page;
#else
#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>

View file

@ -147,8 +147,6 @@ static inline void init_proc_vtable(const struct processor *p)
extern void cpu_resume(void);
#include <asm/memory.h>
#ifdef CONFIG_MMU
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)

View file

@ -2,7 +2,7 @@
#ifndef ASMARM_SPARSEMEM_H
#define ASMARM_SPARSEMEM_H
#include <asm/memory.h>
#include <asm/page.h>
/*
* Two definitions are required for sparsemem:

View file

@ -5,7 +5,7 @@
#include <asm/asm-offsets.h>
#include <asm/domain.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/thread_info.h>
.macro csdb

View file

@ -9,7 +9,7 @@
* User space memory access functions
*/
#include <linux/string.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/domain.h>
#include <asm/unaligned.h>
#include <asm/unified.h>

View file

@ -17,7 +17,7 @@
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/mpu.h>
#include <asm/procinfo.h>
#include <asm/suspend.h>

View file

@ -15,7 +15,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>

View file

@ -9,7 +9,7 @@
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include <asm/memory.h>
#include <asm/page.h>
#ifdef CONFIG_AEABI
#include <asm/unistd-oabi.h>
#endif

View file

@ -6,7 +6,7 @@
*
* Low-level vector interface routines for the ARMv7-M architecture
*/
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/glue.h>
#include <asm/thread_notify.h>
#include <asm/v7m.h>

View file

@ -14,12 +14,11 @@
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/cp15.h>
#include <asm/thread_info.h>
#include <asm/v7m.h>
#include <asm/mpu.h>
#include <asm/page.h>
/*
* Kernel startup entry point.

View file

@ -17,7 +17,7 @@
#include <asm/domain.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)

View file

@ -19,7 +19,7 @@
#include <asm/system_misc.h>
#include <asm/idmap.h>
#include <asm/suspend.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/sections.h>
#include "reboot.h"

View file

@ -8,7 +8,7 @@
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>

View file

@ -15,7 +15,7 @@
#include <linux/string.h> /* memcpy */
#include <asm/cputype.h>
#include <asm/mach/map.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/system_info.h>
#include <asm/traps.h>
#include <asm/tcm.h>

View file

@ -12,9 +12,8 @@
#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
#include <asm/mpu.h>
OUTPUT_ARCH(arm)
ENTRY(stext)

View file

@ -12,9 +12,8 @@
#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
#include <asm/mpu.h>
OUTPUT_ARCH(arm)
ENTRY(stext)

View file

@ -12,7 +12,7 @@
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>

View file

@ -18,7 +18,7 @@
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "memory.h"

View file

@ -10,7 +10,7 @@
#include <linux/platform_data/pm33xx.h>
#include <linux/ti-emif-sram.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "iomap.h"
#include "cm33xx.h"

View file

@ -11,7 +11,7 @@
#include <linux/platform_data/pm33xx.h>
#include <asm/assembler.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "cm33xx.h"
#include "common.h"

View file

@ -9,7 +9,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/smp_scu.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/hardware/cache-l2x0.h>
#include "omap-secure.h"

View file

@ -26,7 +26,7 @@
#include <linux/clk.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <linux/sizes.h>

View file

@ -6,7 +6,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/page.h>
.data
/*

View file

@ -29,7 +29,7 @@
#include <linux/time.h>
#include <mach/hardware.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/suspend.h>
#include <asm/mach/time.h>

View file

@ -7,7 +7,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/memory.h>
#include <asm/page.h>
/*
* Boot code for secondary CPUs.

View file

@ -11,7 +11,7 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/page.h>
#define SCTLR_MMU 0x01
#define BOOTROM_ADDRESS 0xE6340000

View file

@ -6,7 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/assembler.h>
.arch armv7-a

View file

@ -10,7 +10,7 @@
#ifndef __MACH_SPEAR_H
#define __MACH_SPEAR_H
#include <asm/memory.h>
#include <asm/page.h>
#if defined(CONFIG_ARCH_SPEAR3XX) || defined (CONFIG_ARCH_SPEAR6XX)

View file

@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "proc-macros.S"

View file

@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "proc-macros.S"

View file

@ -25,7 +25,7 @@
#include <linux/sizes.h>
#include <linux/cma.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

View file

@ -15,7 +15,7 @@
#include <asm/domain.h>
#include <asm/fixmap.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/ptdump.h>
static struct addr_marker address_markers[] = {

View file

@ -26,7 +26,7 @@
#include <asm/cp15.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>

View file

@ -17,7 +17,6 @@
#include <asm/cputype.h>
#include <asm/highmem.h>
#include <asm/mach/map.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/procinfo.h>

View file

@ -26,7 +26,7 @@
#include <asm/system_info.h>
#include <asm/traps.h>
#include <asm/procinfo.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/kasan_def.h>

View file

@ -6,7 +6,7 @@
#include <linux/mm.h>
#include <asm/sections.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/fixmap.h>
#include <asm/dma.h>

View file

@ -11,7 +11,7 @@
#include <asm/cputype.h>
#include <asm/mpu.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/sections.h>
#include "mm.h"

View file

@ -14,7 +14,7 @@
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "proc-macros.S"

View file

@ -9,7 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/v7m.h>
#include "proc-macros.S"

View file

@ -9,7 +9,7 @@
#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/cp15.h>
#include <asm/memory.h>
#include <asm/page.h>
.section ".idmap.text", "ax"

View file

@ -331,6 +331,14 @@ static inline void *phys_to_virt(phys_addr_t x)
return (void *)(__phys_to_virt(x));
}
/* Needed already here for resolving __phys_to_pfn() in virt_to_pfn() */
#include <asm-generic/memory_model.h>
static inline unsigned long virt_to_pfn(const void *kaddr)
{
return __phys_to_pfn(virt_to_phys(kaddr));
}
/*
* Drivers should NOT use these either.
*/
@ -339,7 +347,6 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*

View file

@ -115,7 +115,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
pgd_val(*pgdp) = virt_to_phys(pmdp);
}
#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
#define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK))
#define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd)))
static inline int pte_none(pte_t pte)
@ -134,7 +134,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_val(*ptep) = 0;
}
#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
#define pte_page(pte) virt_to_page(__pte_page(pte))
static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }

View file

@ -121,8 +121,15 @@ static inline void *__va(unsigned long x)
* TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
* of the shifts unnecessary.
*/
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
static inline unsigned long virt_to_pfn(const void *kaddr)
{
return __pa(kaddr) >> PAGE_SHIFT;
}
static inline void *pfn_to_virt(unsigned long pfn)
{
return __va(pfn << PAGE_SHIFT);
}
extern int m68k_virt_to_node_shift;

View file

@ -19,8 +19,15 @@ extern unsigned long memory_end;
#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
static inline unsigned long virt_to_pfn(const void *kaddr)
{
return __pa(kaddr) >> PAGE_SHIFT;
}
static inline void *pfn_to_virt(unsigned long pfn)
{
return __va(pfn << PAGE_SHIFT);
}
#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
#define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))

View file

@ -91,7 +91,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pmd_set(pmdp,ptep) do {} while (0)
#define __pte_page(pte) \
((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
(__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
@ -111,7 +111,7 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
#define pte_page(pte) virt_to_page(__pte_page(pte))
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }

View file

@ -69,7 +69,8 @@ void __init paging_init(void)
/* now change pg_table to kernel virtual addresses */
for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
pte_t pte = pfn_pte(virt_to_pfn((void *)address),
PAGE_INIT);
if (address >= (unsigned long) high_memory)
pte_val(pte) = 0;

View file

@ -102,7 +102,7 @@ static struct list_head ptable_list[2] = {
LIST_HEAD_INIT(ptable_list[1]),
};
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
@ -201,7 +201,7 @@ int free_pointer_table(void *table, int type)
list_del(dp);
mmu_page_dtor((void *)page);
if (type == TABLE_PTE)
pgtable_pte_page_dtor(virt_to_page(page));
pgtable_pte_page_dtor(virt_to_page((void *)page));
free_page (page);
return 1;
} else if (ptable_list[type].next != dp) {

View file

@ -75,7 +75,7 @@ void __init paging_init(void)
/* now change pg_table to kernel virtual addresses */
pg_table = (pte_t *) __va ((unsigned long) pg_table);
for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
pte_t pte = pfn_pte(virt_to_pfn((void *)address), PAGE_INIT);
if (address >= (unsigned long)high_memory)
pte_val (pte) = 0;
set_pte (pg_table, pte);

View file

@ -29,7 +29,7 @@ static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
j = *(volatile unsigned long *)kaddr;
*(volatile unsigned long *)kaddr = j;
ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL);
pte = pte_val(ptep);
// pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte);
if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {

View file

@ -125,7 +125,7 @@ inline int dvma_map_cpu(unsigned long kaddr,
do {
pr_debug("mapping %08lx phys to %08lx\n",
__pa(kaddr), vaddr);
set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
set_pte(pte, pfn_pte(virt_to_pfn((void *)kaddr),
PAGE_KERNEL));
pte++;
kaddr += PAGE_SIZE;

View file

@ -356,7 +356,7 @@ static phys_addr_t __init alloc_pte_late(uintptr_t va)
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));
BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page((void *)vaddr)));
return __pa(vaddr);
}
@ -439,7 +439,7 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va)
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr)));
BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page((void *)vaddr)));
return __pa(vaddr);
}

View file

@ -8,7 +8,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "emif.h"
#include "ti-emif-asm-offsets.h"

View file

@ -689,7 +689,7 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
prev_pending_idx = pending_idx;
txp = &queue->pending_tx_info[pending_idx].req;
page = virt_to_page(idx_to_kaddr(queue, pending_idx));
page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;

View file

@ -240,7 +240,7 @@ static ssize_t netfs_extract_kvec_to_sg(struct iov_iter *iter,
if (is_vmalloc_or_module_addr((void *)kaddr))
page = vmalloc_to_page((void *)kaddr);
else
page = virt_to_page(kaddr);
page = virt_to_page((void *)kaddr);
sg_set_page(sg, page, len, off);
sgtable->nents++;

View file

@ -199,7 +199,7 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
ent->addr = (unsigned long)page_to_virt(p);
ent->size = nr_pages << PAGE_SHIFT;
if (!virt_addr_valid(ent->addr))
if (!virt_addr_valid((void *)ent->addr))
goto free_out;
/* cut not-mapped area. ....from ppc-32 code. */

View file

@ -2218,7 +2218,7 @@ static inline void cifs_sg_set_buf(struct sg_table *sgtable,
} while (buflen);
} else {
sg_set_page(&sgtable->sgl[sgtable->nents++],
virt_to_page(addr), buflen, off);
virt_to_page((void *)addr), buflen, off);
}
}

View file

@ -2500,7 +2500,7 @@ static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter,
if (is_vmalloc_or_module_addr((void *)kaddr))
page = vmalloc_to_page((void *)kaddr);
else
page = virt_to_page(kaddr);
page = virt_to_page((void *)kaddr);
if (!smb_set_sge(rdma, page, off, seg))
return -EIO;

View file

@ -74,8 +74,16 @@ extern unsigned long memory_end;
#define __va(x) ((void *)((unsigned long) (x)))
#define __pa(x) ((unsigned long) (x))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
static inline unsigned long virt_to_pfn(const void *kaddr)
{
return __pa(kaddr) >> PAGE_SHIFT;
}
#define virt_to_pfn virt_to_pfn
static inline void *pfn_to_virt(unsigned long pfn)
{
return __va(pfn) << PAGE_SHIFT;
}
#define pfn_to_virt pfn_to_virt
#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))