parisc: Implement __get/put_kernel_nofault()

Remove CONFIG_SET_FS from parisc, so we need to add
__get_kernel_nofault() and __put_kernel_nofault(), define
HAVE_GET_KERNEL_NOFAULT and remove set_fs(), get_fs(), load_sr2(),
thread_info->addr_limit, KERNEL_DS and USER_DS.

The nice side-effect of this patch is that we now can directly access
userspace via sr3 without the need to use a temporary sr2 which is
either copied from sr3 or set to zero (for kernel space).

Signed-off-by: Helge Deller <deller@gmx.de>
Suggested-by: Arnd Bergmann <arnd@kernel.org>
This commit is contained in:
Helge Deller 2021-09-09 12:47:00 +02:00
parent d97180ad68
commit 6710287280
6 changed files with 62 additions and 86 deletions

View file

@ -64,7 +64,6 @@ config PARISC
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select SET_FS
select TRACE_IRQFLAGS_SUPPORT
help

View file

@ -101,10 +101,6 @@ DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
typedef struct {
int seg;
} mm_segment_t;
#define ARCH_MIN_TASKALIGN 8
struct thread_struct {

View file

@ -11,7 +11,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
unsigned long flags; /* thread_info flags (see TIF_*) */
mm_segment_t addr_limit; /* user-level address space limit */
__u32 cpu; /* current CPU */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
};
@ -21,7 +20,6 @@ struct thread_info {
.task = &tsk, \
.flags = 0, \
.cpu = 0, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
}

View file

@ -11,14 +11,6 @@
#include <linux/bug.h>
#include <linux/string.h>
#define KERNEL_DS ((mm_segment_t){0})
#define USER_DS ((mm_segment_t){1})
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
/*
* Note that since kernel addresses are in a separate address space on
* parisc, we don't need to do anything for access_ok().
@ -33,11 +25,11 @@
#define get_user __get_user
#if !defined(CONFIG_64BIT)
#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
#define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr)
#define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr)
#else
#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
#define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr)
#define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr)
#endif
/*
@ -67,28 +59,15 @@ struct exception_table_entry {
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
/*
* load_sr2() preloads the space register %%sr2 - based on the value of
* get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
* is 0), or with the current value of %%sr3 to access user space (USER_DS)
* memory. The following __get_user_asm() and __put_user_asm() functions have
* %%sr2 hard-coded to access the requested memory.
*/
#define load_sr2() \
__asm__(" or,= %0,%%r0,%%r0\n\t" \
" mfsp %%sr3,%0\n\t" \
" mtsp %0,%%sr2\n\t" \
: : "r"(get_fs()) : )
#define __get_user_internal(val, ptr) \
#define __get_user_internal(sr, val, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
\
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm(val, "ldb", ptr); break; \
case 2: __get_user_asm(val, "ldh", ptr); break; \
case 4: __get_user_asm(val, "ldw", ptr); break; \
case 8: LDD_USER(val, ptr); break; \
case 1: __get_user_asm(sr, val, "ldb", ptr); break; \
case 2: __get_user_asm(sr, val, "ldh", ptr); break; \
case 4: __get_user_asm(sr, val, "ldw", ptr); break; \
case 8: LDD_USER(sr, val, ptr); break; \
default: BUILD_BUG(); \
} \
\
@ -97,15 +76,14 @@ struct exception_table_entry {
#define __get_user(val, ptr) \
({ \
load_sr2(); \
__get_user_internal(val, ptr); \
__get_user_internal("%%sr3,", val, ptr); \
})
#define __get_user_asm(val, ldx, ptr) \
#define __get_user_asm(sr, val, ldx, ptr) \
{ \
register long __gu_val; \
\
__asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
__asm__("1: " ldx " 0(" sr "%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
@ -114,9 +92,22 @@ struct exception_table_entry {
(val) = (__force __typeof__(*(ptr))) __gu_val; \
}
#define HAVE_GET_KERNEL_NOFAULT
#define __get_kernel_nofault(dst, src, type, err_label) \
{ \
type __z; \
long __err; \
__err = __get_user_internal("%%sr0,", __z, (type *)(src)); \
if (unlikely(__err)) \
goto err_label; \
else \
*(type *)(dst) = __z; \
}
#if !defined(CONFIG_64BIT)
#define __get_user_asm64(val, ptr) \
#define __get_user_asm64(sr, val, ptr) \
{ \
union { \
unsigned long long l; \
@ -124,8 +115,8 @@ struct exception_table_entry {
} __gu_tmp; \
\
__asm__(" copy %%r0,%R0\n" \
"1: ldw 0(%%sr2,%2),%0\n" \
"2: ldw 4(%%sr2,%2),%R0\n" \
"1: ldw 0(" sr "%2),%0\n" \
"2: ldw 4(" sr "%2),%R0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
@ -138,16 +129,16 @@ struct exception_table_entry {
#endif /* !defined(CONFIG_64BIT) */
#define __put_user_internal(x, ptr) \
#define __put_user_internal(sr, x, ptr) \
({ \
register long __pu_err __asm__ ("r8") = 0; \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm("stb", __x, ptr); break; \
case 2: __put_user_asm("sth", __x, ptr); break; \
case 4: __put_user_asm("stw", __x, ptr); break; \
case 8: STD_USER(__x, ptr); break; \
case 1: __put_user_asm(sr, "stb", __x, ptr); break; \
case 2: __put_user_asm(sr, "sth", __x, ptr); break; \
case 4: __put_user_asm(sr, "stw", __x, ptr); break; \
case 8: STD_USER(sr, __x, ptr); break; \
default: BUILD_BUG(); \
} \
\
@ -156,10 +147,20 @@ struct exception_table_entry {
#define __put_user(x, ptr) \
({ \
load_sr2(); \
__put_user_internal(x, ptr); \
__put_user_internal("%%sr3,", x, ptr); \
})
#define __put_kernel_nofault(dst, src, type, err_label) \
{ \
type __z = *(type *)(src); \
long __err; \
__err = __put_user_internal("%%sr0,", __z, (type *)(dst)); \
if (unlikely(__err)) \
goto err_label; \
}
/*
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
@ -170,26 +171,26 @@ struct exception_table_entry {
* r8 is already listed as err.
*/
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
"1: " stx " %2,0(%%sr2,%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
#define __put_user_asm(sr, stx, x, ptr) \
__asm__ __volatile__ ( \
"1: " stx " %2,0(" sr "%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
"1: stw %2,0(%%sr2,%1)\n" \
"2: stw %R2,4(%%sr2,%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
: "r"(ptr), "r"(__val), "0"(__pu_err)); \
#define __put_user_asm64(sr, __val, ptr) do { \
__asm__ __volatile__ ( \
"1: stw %2,0(" sr "%1)\n" \
"2: stw %R2,4(" sr "%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
: "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
@ -200,12 +201,11 @@ struct exception_table_entry {
*/
extern long strncpy_from_user(char *, const char __user *, long);
extern unsigned lclear_user(void __user *, unsigned long);
extern __must_check unsigned lclear_user(void __user *, unsigned long);
extern __must_check long strnlen_user(const char __user *src, long n);
/*
* Complex access routines -- macros
*/
#define user_addr_max() (~0UL)
#define clear_user lclear_user
#define __clear_user lclear_user

View file

@ -230,7 +230,6 @@ int main(void)
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info));
/* THREAD_SZ_ALGN includes space for a stack frame. */

View file

@ -27,21 +27,6 @@
#include <asm/errno.h>
#include <linux/linkage.h>
/*
* get_sr gets the appropriate space value into
* sr1 for kernel/user space access, depending
* on the flag stored in the task structure.
*/
.macro get_sr
mfctl %cr30,%r1
ldw TI_SEGMENT(%r1),%r22
mfsp %sr3,%r1
or,<> %r22,%r0,%r0
copy %r0,%r1
mtsp %r1,%sr1
.endm
/*
* unsigned long lclear_user(void *to, unsigned long n)
*
@ -51,10 +36,9 @@
ENTRY_CFI(lclear_user)
comib,=,n 0,%r25,$lclu_done
get_sr
$lclu_loop:
addib,<> -1,%r25,$lclu_loop
1: stbs,ma %r0,1(%sr1,%r26)
1: stbs,ma %r0,1(%sr3,%r26)
$lclu_done:
bv %r0(%r2)