linux/arch/x86/lib/usercopy.c
Stephane Eranian 25f4298582 perf/x86: Fix broken LBR fixup code
I noticed that the LBR fixups were not working anymore
on programs where they used to. I tracked this down to
a recent change to copy_from_user_nmi():

 db0dc75d64 ("perf/x86: Check user address explicitly in copy_from_user_nmi()")

This commit added a call to __range_not_ok() to the
copy_from_user_nmi() routine. The problem is that the logic
of the test must be reversed. __range_not_ok() returns 0 if the
range is VALID. We want to return early from copy_from_user_nmi()
if the range is NOT valid.

Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Arun Sharma <asharma@fb.com>
Link: http://lkml.kernel.org/r/20120611134426.GA7542@quad
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-06-13 15:00:28 +02:00

50 lines
929 B
C

/*
* User address space access functions.
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/highmem.h>
#include <linux/module.h>
#include <asm/word-at-a-time.h>
#include <linux/sched.h>
/*
* best effort, GUP based copy_from_user() that is NMI-safe
*/
unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
unsigned long offset, addr = (unsigned long)from;
unsigned long size, len = 0;
struct page *page;
void *map;
int ret;
if (__range_not_ok(from, n, TASK_SIZE))
return len;
do {
ret = __get_user_pages_fast(addr, 1, 0, &page);
if (!ret)
break;
offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len);
map = kmap_atomic(page);
memcpy(to, map+offset, size);
kunmap_atomic(map);
put_page(page);
len += size;
to += size;
addr += size;
} while (len < n);
return len;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);