linux/arch/sparc/include/asm/tlb_64.h
Benjamin Herrenschmidt 9e1b32caa5 mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()

Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.

Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.

The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-27 12:10:38 -07:00

112 lines
2.6 KiB
C

#ifndef _SPARC64_TLB_H
#define _SPARC64_TLB_H
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#define TLB_BATCH_NR 192
/*
* For UP we don't need to worry about TLB flush
* and page free order so much..
*/
#ifdef CONFIG_SMP
#define FREE_PTE_NR 506
#define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
#else
#define FREE_PTE_NR 1
#define tlb_fast_mode(bp) 1
#endif
struct mmu_gather {
struct mm_struct *mm;
unsigned int pages_nr;
unsigned int need_flush;
unsigned int fullmm;
unsigned int tlb_nr;
unsigned long vaddrs[TLB_BATCH_NR];
struct page *pages[FREE_PTE_NR];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
#ifdef CONFIG_SMP
extern void smp_flush_tlb_pending(struct mm_struct *,
unsigned long, unsigned long *);
#endif
extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
extern void flush_tlb_pending(void);
static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
BUG_ON(mp->tlb_nr);
mp->mm = mm;
mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
mp->fullmm = full_mm_flush;
return mp;
}
static inline void tlb_flush_mmu(struct mmu_gather *mp)
{
if (!mp->fullmm)
flush_tlb_pending();
if (mp->need_flush) {
free_pages_and_swap_cache(mp->pages, mp->pages_nr);
mp->pages_nr = 0;
mp->need_flush = 0;
}
}
#ifdef CONFIG_SMP
extern void smp_flush_tlb_mm(struct mm_struct *mm);
#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#else
#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
#endif
static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
{
tlb_flush_mmu(mp);
if (mp->fullmm)
mp->fullmm = 0;
/* keep the page table cache within bounds */
check_pgt_cache();
put_cpu_var(mmu_gathers);
}
static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
{
if (tlb_fast_mode(mp)) {
free_page_and_swap_cache(page);
return;
}
mp->need_flush = 1;
mp->pages[mp->pages_nr++] = page;
if (mp->pages_nr >= FREE_PTE_NR)
tlb_flush_mmu(mp);
}
#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#endif /* _SPARC64_TLB_H */