mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
mm/page_alloc.c: cleanup obsolete KM_USER*
It's been five years now that KM_* kmap flags have been removed and that we can call clear_highpage from any context. So we remove prep_zero_pages accordingly. Signed-off-by: Anisse Astier <anisse@astier.eu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c761471b58
commit
f4d2897b93
1 changed files with 2 additions and 15 deletions
|
@ -380,20 +380,6 @@ void prep_compound_page(struct page *page, unsigned long order)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void prep_zero_page(struct page *page, unsigned int order,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
|
||||
* and __GFP_HIGHMEM from hard or soft interrupt context.
|
||||
*/
|
||||
VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
|
||||
for (i = 0; i < (1 << order); i++)
|
||||
clear_highpage(page + i);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
unsigned int _debug_guardpage_minorder;
|
||||
bool _debug_pagealloc_enabled __read_mostly;
|
||||
|
@ -975,7 +961,8 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
|
|||
kasan_alloc_pages(page, order);
|
||||
|
||||
if (gfp_flags & __GFP_ZERO)
|
||||
prep_zero_page(page, order, gfp_flags);
|
||||
for (i = 0; i < (1 << order); i++)
|
||||
clear_highpage(page + i);
|
||||
|
||||
if (order && (gfp_flags & __GFP_COMP))
|
||||
prep_compound_page(page, order);
|
||||
|
|
Loading…
Reference in a new issue