diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index ef427a6bdd1a..7b01ae4f3bc6 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -152,8 +152,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr, the fault. */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index f73c7cbfe326..4b578d02fd01 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -93,8 +93,11 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index ef78c2d66cdd..85c4d9ac8686 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -136,8 +136,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 4d2837eb3e2a..228128e45c67 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -138,8 +138,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, fault = handle_mm_fault(vma, address, flags, regs); pr_debug("handle_mm_fault returns %x\n", fault); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return 0; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 5c40c3ebe52f..687714db6f4d 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -219,8 +219,11 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + bad_page_fault(regs, address, SIGBUS); return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index edaca0a6c1c1..ca64eccea551 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -136,8 +136,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index b4762d66e9ef..6734fee3134f 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -162,8 +162,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 869204e97ec9..6941fdbf2517 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -308,8 +308,13 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) { + msg = "Page fault: fault signal on kernel memory"; + goto no_context; + } return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index eb0774d9c03b..460f785f6e09 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -326,8 +326,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs) * signal first. We do not need to release the mmap_lock because it * would already be released in __lock_page_or_retry in mm/filemap.c. */ - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, addr); return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 91259f291c54..179295b14664 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -187,8 +187,11 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!from_user) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 4acc12eafbf5..d91305de694c 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -424,8 +424,13 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (regs->tstate & TSTATE_PRIV) { + insn = get_fault_insn(regs, insn); + goto handle_kernel_fault; + } goto exit_exception; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED)