From 87c5c70036813d26e6e7e4393747a4fdc63cf193 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 3 Feb 2024 11:45:10 +0100 Subject: [PATCH] s390/fpu: rename save_fpu_regs() to save_user_fpu_regs(), etc Rename save_fpu_regs(), load_fpu_regs(), and struct thread_struct's fpu member to save_user_fpu_regs(), load_user_fpu_regs(), and ufpu. This way the function and variable names reflect for which context they are supposed to be used. This large and trivial conversion is a prerequisite for making the kernel fpu usage preemptible. Reviewed-by: Claudio Imbrenda Signed-off-by: Heiko Carstens --- arch/s390/include/asm/entry-common.h | 2 +- arch/s390/include/asm/fpu.h | 8 +-- arch/s390/include/asm/processor.h | 4 +- arch/s390/kernel/compat_signal.c | 18 +++---- arch/s390/kernel/fpu.c | 20 +++---- arch/s390/kernel/perf_regs.c | 6 +-- arch/s390/kernel/process.c | 8 +-- arch/s390/kernel/ptrace.c | 78 ++++++++++++++-------------- arch/s390/kernel/signal.c | 18 +++---- arch/s390/kernel/traps.c | 10 ++-- arch/s390/kvm/interrupt.c | 4 +- arch/s390/kvm/kvm-s390.c | 26 +++++----- arch/s390/kvm/vsie.c | 2 +- 13 files changed, 102 insertions(+), 102 deletions(-) diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index e479d39e1445..659e07d7f31a 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -42,7 +42,7 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, static __always_inline void arch_exit_to_user_mode(void) { if (test_thread_flag(TIF_FPU)) - __load_fpu_regs(); + __load_user_fpu_regs(); if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) debug_user_asce(1); diff --git a/arch/s390/include/asm/fpu.h b/arch/s390/include/asm/fpu.h index 3f076172a283..5d3533569925 100644 --- a/arch/s390/include/asm/fpu.h +++ b/arch/s390/include/asm/fpu.h @@ -57,9 +57,9 @@ static inline bool cpu_has_vx(void) return likely(test_facility(129)); } -void save_fpu_regs(void); -void load_fpu_regs(void); -void __load_fpu_regs(void); +void save_user_fpu_regs(void); +void load_user_fpu_regs(void); +void __load_user_fpu_regs(void); enum { KERNEL_FPC_BIT = 0, @@ -150,7 +150,7 @@ static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags) state->mask = S390_lowcore.fpu_flags; if (!test_thread_flag(TIF_FPU)) { /* Save user space FPU state and register contents */ - save_fpu_regs(); + save_user_fpu_regs(); } else if (state->mask & flags) { /* Save FPU/vector register in-use by the kernel */ __kernel_fpu_begin(state, flags); diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index f25617cbc49e..2e716bf34bf8 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -181,7 +181,7 @@ struct thread_struct { struct gs_cb *gs_cb; /* Current guarded storage cb */ struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */ struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */ - struct fpu fpu; /* FP and VX register save area */ + struct fpu ufpu; /* User FP and VX register save area */ }; /* Flag to disable transactions. */ @@ -200,7 +200,7 @@ typedef struct thread_struct thread_struct; #define INIT_THREAD { \ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ - .fpu.regs = (void *) init_task.thread.fpu.fprs, \ + .ufpu.regs = (void *)init_task.thread.ufpu.fprs, \ .last_break = 1, \ } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 6cd9bf925c82..1942e2a9f8db 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -56,7 +56,7 @@ typedef struct static void store_sigregs(void) { save_access_regs(current->thread.acrs); - save_fpu_regs(); + save_user_fpu_regs(); } /* Load registers after signal return */ @@ -79,7 +79,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; memcpy(&user_sregs.regs.acrs, current->thread.acrs, sizeof(user_sregs.regs.acrs)); - fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); + fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.ufpu); if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) return -EFAULT; return 0; @@ -113,7 +113,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); - fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); + fpregs_load((_s390_fp_regs *)&user_sregs.fpregs, ¤t->thread.ufpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; @@ -136,11 +136,11 @@ static int save_sigregs_ext32(struct pt_regs *regs, /* Save vector registers to signal stack */ if (cpu_has_vx()) { for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = current->thread.fpu.vxrs[i].low; + vxrs[i] = current->thread.ufpu.vxrs[i].low; if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, sizeof(sregs_ext->vxrs_low)) || __copy_to_user(&sregs_ext->vxrs_high, - current->thread.fpu.vxrs + __NUM_VXRS_LOW, + current->thread.ufpu.vxrs + __NUM_VXRS_LOW, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } @@ -165,12 +165,12 @@ static int restore_sigregs_ext32(struct pt_regs *regs, if (cpu_has_vx()) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || - __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, + __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW, &sregs_ext->vxrs_high, sizeof(sregs_ext->vxrs_high))) return -EFAULT; for (i = 0; i < __NUM_VXRS_LOW; i++) - current->thread.fpu.vxrs[i].low = vxrs[i]; + current->thread.ufpu.vxrs[i].low = vxrs[i]; } return 0; } @@ -184,7 +184,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask)) goto badframe; set_current_blocked(&set); - save_fpu_regs(); + save_user_fpu_regs(); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext32(regs, &frame->sregs_ext)) @@ -207,7 +207,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; - save_fpu_regs(); + save_user_fpu_regs(); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c index 12d6e9d97104..62c9b2809057 100644 --- a/arch/s390/kernel/fpu.c +++ b/arch/s390/kernel/fpu.c @@ -107,10 +107,10 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags) } EXPORT_SYMBOL(__kernel_fpu_end); -void __load_fpu_regs(void) +void __load_user_fpu_regs(void) { - struct fpu *state = ¤t->thread.fpu; - void *regs = current->thread.fpu.regs; + struct fpu *state = ¤t->thread.ufpu; + void *regs = current->thread.ufpu.regs; fpu_lfpc_safe(&state->fpc); if (likely(cpu_has_vx())) @@ -120,15 +120,15 @@ void __load_fpu_regs(void) clear_thread_flag(TIF_FPU); } -void load_fpu_regs(void) +void load_user_fpu_regs(void) { raw_local_irq_disable(); - __load_fpu_regs(); + __load_user_fpu_regs(); raw_local_irq_enable(); } -EXPORT_SYMBOL(load_fpu_regs); +EXPORT_SYMBOL(load_user_fpu_regs); -void save_fpu_regs(void) +void save_user_fpu_regs(void) { unsigned long flags; struct fpu *state; @@ -139,8 +139,8 @@ void save_fpu_regs(void) if (test_thread_flag(TIF_FPU)) goto out; - state = ¤t->thread.fpu; - regs = current->thread.fpu.regs; + state = ¤t->thread.ufpu; + regs = current->thread.ufpu.regs; fpu_stfpc(&state->fpc); if (likely(cpu_has_vx())) @@ -151,4 +151,4 @@ void save_fpu_regs(void) out: local_irq_restore(flags); } -EXPORT_SYMBOL(save_fpu_regs); +EXPORT_SYMBOL(save_user_fpu_regs); diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c index c8e8fb728ddb..511349b8bc5c 100644 --- a/arch/s390/kernel/perf_regs.c +++ b/arch/s390/kernel/perf_regs.c @@ -20,9 +20,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) idx -= PERF_REG_S390_FP0; if (cpu_has_vx()) - fp = *(freg_t *)(current->thread.fpu.vxrs + idx); + fp = *(freg_t *)(current->thread.ufpu.vxrs + idx); else - fp = current->thread.fpu.fprs[idx]; + fp = current->thread.ufpu.fprs[idx]; return fp.ui; } @@ -64,6 +64,6 @@ void perf_get_regs_user(struct perf_regs *regs_user, */ regs_user->regs = task_pt_regs(current); if (user_mode(regs_user->regs)) - save_fpu_regs(); + save_user_fpu_regs(); regs_user->abi = perf_reg_abi(current); } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index f4c355f080f2..62146daf9051 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -91,10 +91,10 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * task and set the TIF_FPU flag to lazy restore the FPU register * state when returning to user space. */ - save_fpu_regs(); + save_user_fpu_regs(); *dst = *src; - dst->thread.fpu.regs = dst->thread.fpu.fprs; + dst->thread.ufpu.regs = dst->thread.ufpu.fprs; /* * Don't transfer over the runtime instrumentation or the guarded @@ -190,13 +190,13 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) void execve_tail(void) { - current->thread.fpu.fpc = 0; + current->thread.ufpu.fpc = 0; fpu_sfpc(0); } struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) { - save_fpu_regs(); + save_user_fpu_regs(); save_access_regs(&prev->thread.acrs[0]); save_ri_cb(prev->thread.ri_cb); save_gs_cb(prev->thread.gs_cb); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 2eafd6dcd592..f1ca79073173 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -247,21 +247,21 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) /* * floating point control reg. is in the thread structure */ - tmp = child->thread.fpu.fpc; + tmp = child->thread.ufpu.fpc; tmp <<= BITS_PER_LONG - 32; } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { /* - * floating point regs. are either in child->thread.fpu - * or the child->thread.fpu.vxrs array + * floating point regs. are either in child->thread.ufpu + * or the child->thread.ufpu.vxrs array */ offset = addr - offsetof(struct user, regs.fp_regs.fprs); if (cpu_has_vx()) tmp = *(addr_t *) - ((addr_t) child->thread.fpu.vxrs + 2*offset); + ((addr_t)child->thread.ufpu.vxrs + 2 * offset); else tmp = *(addr_t *) - ((addr_t) child->thread.fpu.fprs + offset); + ((addr_t)child->thread.ufpu.fprs + offset); } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { /* @@ -396,20 +396,20 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) */ if ((unsigned int)data != 0) return -EINVAL; - child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); + child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32); } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { /* - * floating point regs. are either in child->thread.fpu - * or the child->thread.fpu.vxrs array + * floating point regs. are either in child->thread.ufpu + * or the child->thread.ufpu.vxrs array */ offset = addr - offsetof(struct user, regs.fp_regs.fprs); if (cpu_has_vx()) *(addr_t *)((addr_t) - child->thread.fpu.vxrs + 2*offset) = data; + child->thread.ufpu.vxrs + 2 * offset) = data; else *(addr_t *)((addr_t) - child->thread.fpu.fprs + offset) = data; + child->thread.ufpu.fprs + offset) = data; } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { /* @@ -623,20 +623,20 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) /* * floating point control reg. is in the thread structure */ - tmp = child->thread.fpu.fpc; + tmp = child->thread.ufpu.fpc; } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { /* - * floating point regs. are either in child->thread.fpu - * or the child->thread.fpu.vxrs array + * floating point regs. are either in child->thread.ufpu + * or the child->thread.ufpu.vxrs array */ offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); if (cpu_has_vx()) tmp = *(__u32 *) - ((addr_t) child->thread.fpu.vxrs + 2*offset); + ((addr_t)child->thread.ufpu.vxrs + 2 * offset); else tmp = *(__u32 *) - ((addr_t) child->thread.fpu.fprs + offset); + ((addr_t)child->thread.ufpu.fprs + offset); } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { /* @@ -749,20 +749,20 @@ static int __poke_user_compat(struct task_struct *child, /* * floating point control reg. is in the thread structure */ - child->thread.fpu.fpc = data; + child->thread.ufpu.fpc = data; } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { /* - * floating point regs. are either in child->thread.fpu - * or the child->thread.fpu.vxrs array + * floating point regs. are either in child->thread.ufpu + * or the child->thread.ufpu.vxrs array */ offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); if (cpu_has_vx()) *(__u32 *)((addr_t) - child->thread.fpu.vxrs + 2*offset) = tmp; + child->thread.ufpu.vxrs + 2 * offset) = tmp; else *(__u32 *)((addr_t) - child->thread.fpu.fprs + offset) = tmp; + child->thread.ufpu.fprs + offset) = tmp; } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { /* @@ -894,10 +894,10 @@ static int s390_fpregs_get(struct task_struct *target, _s390_fp_regs fp_regs; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); - fp_regs.fpc = target->thread.fpu.fpc; - fpregs_store(&fp_regs, &target->thread.fpu); + fp_regs.fpc = target->thread.ufpu.fpc; + fpregs_store(&fp_regs, &target->thread.ufpu); return membuf_write(&to, &fp_regs, sizeof(fp_regs)); } @@ -911,22 +911,22 @@ static int s390_fpregs_set(struct task_struct *target, freg_t fprs[__NUM_FPRS]; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); if (cpu_has_vx()) - convert_vx_to_fp(fprs, target->thread.fpu.vxrs); + convert_vx_to_fp(fprs, target->thread.ufpu.vxrs); else - memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); + memcpy(&fprs, target->thread.ufpu.fprs, sizeof(fprs)); if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { - u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; + u32 ufpc[2] = { target->thread.ufpu.fpc, 0 }; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc, 0, offsetof(s390_fp_regs, fprs)); if (rc) return rc; if (ufpc[1] != 0) return -EINVAL; - target->thread.fpu.fpc = ufpc[0]; + target->thread.ufpu.fpc = ufpc[0]; } if (rc == 0 && count > 0) @@ -936,9 +936,9 @@ static int s390_fpregs_set(struct task_struct *target, return rc; if (cpu_has_vx()) - convert_fp_to_vx(target->thread.fpu.vxrs, fprs); + convert_fp_to_vx(target->thread.ufpu.vxrs, fprs); else - memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); + memcpy(target->thread.ufpu.fprs, &fprs, sizeof(fprs)); return rc; } @@ -989,9 +989,9 @@ static int s390_vxrs_low_get(struct task_struct *target, if (!cpu_has_vx()) return -ENODEV; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = target->thread.fpu.vxrs[i].low; + vxrs[i] = target->thread.ufpu.vxrs[i].low; return membuf_write(&to, vxrs, sizeof(vxrs)); } @@ -1006,15 +1006,15 @@ static int s390_vxrs_low_set(struct task_struct *target, if (!cpu_has_vx()) return -ENODEV; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = target->thread.fpu.vxrs[i].low; + vxrs[i] = target->thread.ufpu.vxrs[i].low; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); if (rc == 0) for (i = 0; i < __NUM_VXRS_LOW; i++) - target->thread.fpu.vxrs[i].low = vxrs[i]; + target->thread.ufpu.vxrs[i].low = vxrs[i]; return rc; } @@ -1026,8 +1026,8 @@ static int s390_vxrs_high_get(struct task_struct *target, if (!cpu_has_vx()) return -ENODEV; if (target == current) - save_fpu_regs(); - return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, + save_user_fpu_regs(); + return membuf_write(&to, target->thread.ufpu.vxrs + __NUM_VXRS_LOW, __NUM_VXRS_HIGH * sizeof(__vector128)); } @@ -1041,10 +1041,10 @@ static int s390_vxrs_high_set(struct task_struct *target, if (!cpu_has_vx()) return -ENODEV; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); + target->thread.ufpu.vxrs + __NUM_VXRS_LOW, 0, -1); return rc; } diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 1517aa70678b..6c2cb345402f 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -109,7 +109,7 @@ struct rt_sigframe static void store_sigregs(void) { save_access_regs(current->thread.acrs); - save_fpu_regs(); + save_user_fpu_regs(); } /* Load registers after signal return */ @@ -131,7 +131,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); memcpy(&user_sregs.regs.acrs, current->thread.acrs, sizeof(user_sregs.regs.acrs)); - fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu); + fpregs_store(&user_sregs.fpregs, ¤t->thread.ufpu); if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) return -EFAULT; return 0; @@ -165,7 +165,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); - fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu); + fpregs_load(&user_sregs.fpregs, ¤t->thread.ufpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; @@ -181,11 +181,11 @@ static int save_sigregs_ext(struct pt_regs *regs, /* Save vector registers to signal stack */ if (cpu_has_vx()) { for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = current->thread.fpu.vxrs[i].low; + vxrs[i] = current->thread.ufpu.vxrs[i].low; if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, sizeof(sregs_ext->vxrs_low)) || __copy_to_user(&sregs_ext->vxrs_high, - current->thread.fpu.vxrs + __NUM_VXRS_LOW, + current->thread.ufpu.vxrs + __NUM_VXRS_LOW, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } @@ -202,12 +202,12 @@ static int restore_sigregs_ext(struct pt_regs *regs, if (cpu_has_vx()) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || - __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, + __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW, &sregs_ext->vxrs_high, sizeof(sregs_ext->vxrs_high))) return -EFAULT; for (i = 0; i < __NUM_VXRS_LOW; i++) - current->thread.fpu.vxrs[i].low = vxrs[i]; + current->thread.ufpu.vxrs[i].low = vxrs[i]; } return 0; } @@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; set_current_blocked(&set); - save_fpu_regs(); + save_user_fpu_regs(); if (restore_sigregs(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext(regs, &frame->sregs_ext)) @@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (restore_altstack(&frame->uc.uc_stack)) goto badframe; - save_fpu_regs(); + save_user_fpu_regs(); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext)) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 08f8aee96d8f..52578b5cecbd 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -201,8 +201,8 @@ static void vector_exception(struct pt_regs *regs) } /* get vector interrupt code from fpc */ - save_fpu_regs(); - vic = (current->thread.fpu.fpc & 0xf00) >> 8; + save_user_fpu_regs(); + vic = (current->thread.ufpu.fpc & 0xf00) >> 8; switch (vic) { case 1: /* invalid vector operation */ si_code = FPE_FLTINV; @@ -227,9 +227,9 @@ static void vector_exception(struct pt_regs *regs) static void data_exception(struct pt_regs *regs) { - save_fpu_regs(); - if (current->thread.fpu.fpc & FPC_DXC_MASK) - do_fp_trap(regs, current->thread.fpu.fpc); + save_user_fpu_regs(); + if (current->thread.ufpu.fpc & FPC_DXC_MASK) + do_fp_trap(regs, current->thread.ufpu.fpc); else do_trap(regs, SIGILL, ILL_ILLOPN, "data exception"); } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 6e435bf0c27e..9315203c2786 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -584,7 +584,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, mci.val = mchk->mcic; /* take care of lazy register loading */ - save_fpu_regs(); + save_user_fpu_regs(); save_access_regs(vcpu->run->s.regs.acrs); if (MACHINE_HAS_GS && vcpu->arch.gs_enabled) save_gs_cb(current->thread.gs_cb); @@ -648,7 +648,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, } rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA, vcpu->run->s.regs.gprs, 128); - rc |= put_guest_lc(vcpu, current->thread.fpu.fpc, + rc |= put_guest_lc(vcpu, current->thread.ufpu.fpc, (u32 __user *) __LC_FP_CREG_SAVE_AREA); rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr, (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0cee7192a1c2..3ce4029cabc2 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4830,7 +4830,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) sizeof(sie_page->pv_grregs)); } if (test_thread_flag(TIF_FPU)) - load_fpu_regs(); + load_user_fpu_regs(); exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); if (kvm_s390_pv_cpu_is_protected(vcpu)) { @@ -4952,14 +4952,14 @@ static void sync_regs(struct kvm_vcpu *vcpu) save_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->run->s.regs.acrs); /* save host (userspace) fprs/vrs */ - save_fpu_regs(); - vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; - vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; + save_user_fpu_regs(); + vcpu->arch.host_fpregs.fpc = current->thread.ufpu.fpc; + vcpu->arch.host_fpregs.regs = current->thread.ufpu.regs; if (cpu_has_vx()) - current->thread.fpu.regs = vcpu->run->s.regs.vrs; + current->thread.ufpu.regs = vcpu->run->s.regs.vrs; else - current->thread.fpu.regs = vcpu->run->s.regs.fprs; - current->thread.fpu.fpc = vcpu->run->s.regs.fpc; + current->thread.ufpu.regs = vcpu->run->s.regs.fprs; + current->thread.ufpu.fpc = vcpu->run->s.regs.fpc; /* Sync fmt2 only data */ if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) { @@ -5022,11 +5022,11 @@ static void store_regs(struct kvm_vcpu *vcpu) save_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->arch.host_acrs); /* Save guest register state */ - save_fpu_regs(); - vcpu->run->s.regs.fpc = current->thread.fpu.fpc; + save_user_fpu_regs(); + vcpu->run->s.regs.fpc = current->thread.ufpu.fpc; /* Restore will be done lazily at return */ - current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; - current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; + current->thread.ufpu.fpc = vcpu->arch.host_fpregs.fpc; + current->thread.ufpu.regs = vcpu->arch.host_fpregs.regs; if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) store_regs_fmt2(vcpu); } @@ -5172,8 +5172,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) * switch in the run ioctl. Let's update our copies before we save * it into the save area */ - save_fpu_regs(); - vcpu->run->s.regs.fpc = current->thread.fpu.fpc; + save_user_fpu_regs(); + vcpu->run->s.regs.fpc = current->thread.ufpu.fpc; save_access_regs(vcpu->run->s.regs.acrs); return kvm_s390_store_status_unloaded(vcpu, addr); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index b8a242360ed0..e0f79c9a4852 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -1150,7 +1150,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; barrier(); if (test_thread_flag(TIF_FPU)) - load_fpu_regs(); + load_user_fpu_regs(); if (!kvm_s390_vcpu_sie_inhibited(vcpu)) rc = sie64a(scb_s, vcpu->run->s.regs.gprs); barrier();