linux/arch/m68k/include/asm/uaccess_mm.h
Sam Ravnborg 49148020bc m68k,m68knommu: merge header files
Merge header files for m68k and m68knommu to the single location:

    arch/m68k/include/asm

The majority of this patch was the result of the
script that is included in the changelog below.

The script was originally written by Arnd Bergman and
exten by me to cover a few more files.

When the header files differed the script uses the following:

The original m68k file is named <file>_mm.h  [mm for memory manager]
The m68knommu file is named <file>_no.h [no for no memory manager]

The files uses the following include guard:

This include gaurd works as the m68knommu toolchain set
the __uClinux__ symbol - so this should work in userspace too.

Merging the header files for m68k and m68knommu exposes the
(unexpected?) ABI differences thus it is easier to actually
identify these and thus to fix them.

The commit has been build tested with both a m68k and
a m68knommu toolchain - with success.

The commit has also been tested with "make headers_check"
and this patch fixes make headers_check for m68knommu.

The script used:
TARGET=arch/m68k/include/asm
SOURCE=arch/m68knommu/include/asm

INCLUDE="cachectl.h errno.h fcntl.h hwtest.h ioctls.h ipcbuf.h \
linkage.h math-emu.h md.h mman.h movs.h msgbuf.h openprom.h \
oplib.h poll.h posix_types.h resource.h rtc.h sembuf.h shmbuf.h \
shm.h shmparam.h socket.h sockios.h spinlock.h statfs.h stat.h \
termbits.h termios.h tlb.h types.h user.h"

EQUAL="auxvec.h cputime.h device.h emergency-restart.h futex.h \
ioctl.h irq_regs.h kdebug.h local.h mutex.h percpu.h \
sections.h topology.h"

NOMUUFILES="anchor.h bootstd.h coldfire.h commproc.h dbg.h \
elia.h flat.h m5206sim.h m520xsim.h m523xsim.h m5249sim.h \
m5272sim.h m527xsim.h m528xsim.h m5307sim.h m532xsim.h \
m5407sim.h m68360_enet.h m68360.h m68360_pram.h m68360_quicc.h \
m68360_regs.h MC68328.h MC68332.h MC68EZ328.h MC68VZ328.h \
mcfcache.h mcfdma.h mcfmbus.h mcfne.h mcfpci.h mcfpit.h \
mcfsim.h mcfsmc.h mcftimer.h mcfuart.h mcfwdebug.h \
nettel.h quicc_simple.h smp.h"

FILES="atomic.h bitops.h bootinfo.h bug.h bugs.h byteorder.h cache.h \
cacheflush.h checksum.h current.h delay.h div64.h \
dma-mapping.h dma.h elf.h entry.h fb.h fpu.h hardirq.h hw_irq.h io.h \
irq.h kmap_types.h machdep.h mc146818rtc.h mmu.h mmu_context.h \
module.h page.h page_offset.h param.h pci.h pgalloc.h \
pgtable.h processor.h ptrace.h scatterlist.h segment.h \
setup.h sigcontext.h siginfo.h signal.h string.h system.h swab.h \
thread_info.h timex.h tlbflush.h traps.h uaccess.h ucontext.h \
unaligned.h unistd.h"

mergefile() {
	BASE=${1%.h}
	git mv ${SOURCE}/$1 ${TARGET}/${BASE}_no.h
	git mv ${TARGET}/$1 ${TARGET}/${BASE}_mm.h

cat << EOF > ${TARGET}/$1
EOF

	git add ${TARGET}/$1
}

set -e

mkdir -p ${TARGET}

git mv include/asm-m68k/* ${TARGET}
rmdir include/asm-m68k

git rm ${SOURCE}/Kbuild
for F in $INCLUDE $EQUAL; do
	git rm ${SOURCE}/$F
done

for F in $NOMUUFILES; do
	git mv ${SOURCE}/$F ${TARGET}/$F
done

for F in $FILES ; do
	mergefile $F
done

rmdir arch/m68knommu/include/asm
rmdir arch/m68knommu/include

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
2009-01-16 21:58:10 +10:00

375 lines
9.8 KiB
C

#ifndef __M68K_UACCESS_H
#define __M68K_UACCESS_H
/*
* User space memory access functions
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/segment.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* We let the MMU do all checking */
static inline int access_ok(int type, const void __user *addr,
unsigned long size)
{
return 1;
}
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
extern int __put_user_bad(void);
extern int __get_user_bad(void);
#define __put_user_asm(res, x, ptr, bwl, reg, err) \
asm volatile ("\n" \
"1: moves."#bwl" %2,%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: moveq.l %3,%0\n" \
" jra 2b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .previous" \
: "+d" (res), "=m" (*(ptr)) \
: #reg (x), "i" (err))
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
#define __put_user(x, ptr) \
({ \
typeof(*(ptr)) __pu_val = (x); \
int __pu_err = 0; \
__chk_user_ptr(ptr); \
switch (sizeof (*(ptr))) { \
case 1: \
__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
break; \
case 2: \
__put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
break; \
case 4: \
__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
break; \
case 8: \
{ \
const void __user *__pu_ptr = (ptr); \
asm volatile ("\n" \
"1: moves.l %2,(%1)+\n" \
"2: moves.l %R2,(%1)\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: movel %3,%0\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .long 3b,10b\n" \
" .previous" \
: "+d" (__pu_err), "+a" (__pu_ptr) \
: "r" (__pu_val), "i" (-EFAULT) \
: "memory"); \
break; \
} \
default: \
__pu_err = __put_user_bad(); \
break; \
} \
__pu_err; \
})
#define put_user(x, ptr) __put_user(x, ptr)
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
type __gu_val; \
asm volatile ("\n" \
"1: moves."#bwl" %2,%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: move.l %3,%0\n" \
" sub."#bwl" %1,%1\n" \
" jra 2b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .previous" \
: "+d" (res), "=&" #reg (__gu_val) \
: "m" (*(ptr)), "i" (err)); \
(x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
})
#define __get_user(x, ptr) \
({ \
int __gu_err = 0; \
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
break; \
case 2: \
__get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
break; \
case 4: \
__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
break; \
/* case 8: disabled because gcc-4.1 has a broken typeof \
{ \
const void *__gu_ptr = (ptr); \
u64 __gu_val; \
asm volatile ("\n" \
"1: moves.l (%2)+,%1\n" \
"2: moves.l (%2),%R1\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: move.l %3,%0\n" \
" sub.l %1,%1\n" \
" sub.l %R1,%R1\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .previous" \
: "+d" (__gu_err), "=&r" (__gu_val), \
"+a" (__gu_ptr) \
: "i" (-EFAULT) \
: "memory"); \
(x) = (typeof(*(ptr)))__gu_val; \
break; \
} */ \
default: \
__gu_err = __get_user_bad(); \
break; \
} \
__gu_err; \
})
#define get_user(x, ptr) __get_user(x, ptr)
unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
asm volatile ("\n" \
"1: moves."#s1" (%2)+,%3\n" \
" move."#s1" %3,(%1)+\n" \
"2: moves."#s2" (%2)+,%3\n" \
" move."#s2" %3,(%1)+\n" \
" .ifnc \""#s3"\",\"\"\n" \
"3: moves."#s3" (%2)+,%3\n" \
" move."#s3" %3,(%1)+\n" \
" .endif\n" \
"4:\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10f\n" \
" .long 2b,20f\n" \
" .ifnc \""#s3"\",\"\"\n" \
" .long 3b,30f\n" \
" .endif\n" \
" .previous\n" \
"\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: clr."#s1" (%1)+\n" \
"20: clr."#s2" (%1)+\n" \
" .ifnc \""#s3"\",\"\"\n" \
"30: clr."#s3" (%1)+\n" \
" .endif\n" \
" moveq.l #"#n",%0\n" \
" jra 4b\n" \
" .previous\n" \
: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
: : "memory")
static __always_inline unsigned long
__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = 0, tmp;
switch (n) {
case 1:
__get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
break;
case 2:
__get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2);
break;
case 3:
__constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
break;
case 4:
__get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4);
break;
case 5:
__constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
break;
case 6:
__constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
break;
case 7:
__constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
break;
case 8:
__constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
break;
case 9:
__constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
break;
case 10:
__constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
break;
case 12:
__constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
break;
default:
/* we limit the inlined version to 3 moves */
return __generic_copy_from_user(to, from, n);
}
return res;
}
#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
asm volatile ("\n" \
" move."#s1" (%2)+,%3\n" \
"11: moves."#s1" %3,(%1)+\n" \
"12: move."#s2" (%2)+,%3\n" \
"21: moves."#s2" %3,(%1)+\n" \
"22:\n" \
" .ifnc \""#s3"\",\"\"\n" \
" move."#s3" (%2)+,%3\n" \
"31: moves."#s3" %3,(%1)+\n" \
"32:\n" \
" .endif\n" \
"4:\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 11b,5f\n" \
" .long 12b,5f\n" \
" .long 21b,5f\n" \
" .long 22b,5f\n" \
" .ifnc \""#s3"\",\"\"\n" \
" .long 31b,5f\n" \
" .long 32b,5f\n" \
" .endif\n" \
" .previous\n" \
"\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"5: moveq.l #"#n",%0\n" \
" jra 4b\n" \
" .previous\n" \
: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
: : "memory")
static __always_inline unsigned long
__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned long res = 0, tmp;
switch (n) {
case 1:
__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
break;
case 2:
__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
break;
case 3:
__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
break;
case 4:
__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
break;
case 5:
__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
break;
case 6:
__constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
break;
case 7:
__constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
break;
case 8:
__constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
break;
case 9:
__constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
break;
case 10:
__constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
break;
case 12:
__constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
break;
default:
/* limit the inlined version to 3 moves */
return __generic_copy_to_user(to, from, n);
}
return res;
}
#define __copy_from_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user(to, from, n) : \
__generic_copy_from_user(to, from, n))
#define __copy_to_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_to_user(to, from, n) : \
__generic_copy_to_user(to, from, n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
long strncpy_from_user(char *dst, const char __user *src, long count);
long strnlen_user(const char __user *src, long n);
unsigned long __clear_user(void __user *to, unsigned long n);
#define clear_user __clear_user
#define strlen_user(str) strnlen_user(str, 32767)
#endif /* _M68K_UACCESS_H */