lguest: assume Switcher text is a single page.

ie. SHARED_SWITCHER_PAGES == 1.  It is well under a page, and it's a
minor simplification: it's nice to have *one* simplification in a
patch series!

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell 2013-04-22 14:10:38 +09:30
parent 856c608827
commit 93a2cdff98
5 changed files with 21 additions and 23 deletions

View file

@ -11,11 +11,8 @@
#define GUEST_PL 1
/* Every guest maps the core switcher code. */
#define SHARED_SWITCHER_PAGES \
DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
/* Pages for switcher itself, then two pages per cpu */
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
/* Page for Switcher text itself, then two pages per cpu */
#define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
/* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */
#ifdef CONFIG_X86_PAE

View file

@ -52,6 +52,13 @@ static __init int map_switcher(void)
* easy.
*/
/* We assume Switcher text fits into a single page. */
if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
end_switcher_text - start_switcher_text);
return -EINVAL;
}
/*
* We allocate an array of struct page pointers. map_vm_area() wants
* this, rather than just an array of pages.
@ -326,7 +333,7 @@ static int __init init(void)
goto out;
/* Now we set up the pagetable implementation for the Guests. */
err = init_pagetables(switcher_pages, SHARED_SWITCHER_PAGES);
err = init_pagetables(switcher_pages);
if (err)
goto unmap;

View file

@ -15,7 +15,7 @@
#include <asm/lguest.h>
void free_pagetables(void);
int init_pagetables(struct page **switcher_pages, unsigned int pages);
int init_pagetables(struct page **switcher_pages);
struct pgdir {
unsigned long gpgdir;

View file

@ -1079,25 +1079,20 @@ static void free_switcher_pte_pages(void)
/*H:520
* Setting up the Switcher PTE page for given CPU is fairly easy, given
* the CPU number and the "struct page"s for the Switcher code itself.
*
* Currently the Switcher is less than a page long, so "pages" is always 1.
* the CPU number and the "struct page"s for the Switcher and per-cpu pages.
*/
static __init void populate_switcher_pte_page(unsigned int cpu,
struct page *switcher_pages[],
unsigned int pages)
struct page *switcher_pages[])
{
unsigned int i;
pte_t *pte = switcher_pte_page(cpu);
int i;
/* The first entries are easy: they map the Switcher code. */
for (i = 0; i < pages; i++) {
set_pte(&pte[i], mk_pte(switcher_pages[i],
/* The first entries maps the Switcher code. */
set_pte(&pte[0], mk_pte(switcher_pages[0],
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
}
/* The only other thing we map is this CPU's pair of pages. */
i = pages + cpu*2;
i = 1 + cpu*2;
/* First page (Guest registers) is writable from the Guest */
set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_pages[i]),
@ -1128,7 +1123,7 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
* At boot or module load time, init_pagetables() allocates and populates
* the Switcher PTE page for each CPU.
*/
__init int init_pagetables(struct page **switcher_pages, unsigned int pages)
__init int init_pagetables(struct page **switcher_pages)
{
unsigned int i;
@ -1138,7 +1133,7 @@ __init int init_pagetables(struct page **switcher_pages, unsigned int pages)
free_switcher_pte_pages();
return -ENOMEM;
}
populate_switcher_pte_page(i, switcher_pages, pages);
populate_switcher_pte_page(i, switcher_pages);
}
return 0;
}

View file

@ -62,11 +62,10 @@ static unsigned long switcher_offset(void)
return switcher_addr - (unsigned long)start_switcher_text;
}
/* This cpu's struct lguest_pages. */
/* This cpu's struct lguest_pages (after the Switcher text page) */
static struct lguest_pages *lguest_pages(unsigned int cpu)
{
return &(((struct lguest_pages *)
(switcher_addr + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]);
}
static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);