From John Dyson: performance improvements to the new bounce buffer

code.
This commit is contained in:
David Greenman 1994-03-24 23:12:48 +00:00
parent c9ffdca0a7
commit ed7fcbd079
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=1307
7 changed files with 123 additions and 41 deletions

View file

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.18 1994/03/07 11:38:35 davidg Exp $
* $Id: trap.c,v 1.19 1994/03/14 21:54:03 davidg Exp $
*/
/*
@ -88,8 +88,8 @@ extern int grow(struct proc *,int);
struct sysent sysent[];
int nsysent;
extern short cpl;
extern short netmask, ttymask, biomask;
extern unsigned cpl;
extern unsigned netmask, ttymask, biomask;
#define MAX_TRAP_MSG 27
char *trap_msg[] = {
@ -290,6 +290,7 @@ trap(frame)
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
vm_page_t ptepg;
/*
* Keep swapout from messing with us during this
@ -318,12 +319,25 @@ trap(frame)
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
vm_page_hold(pmap_pte_vm_page(vm_map_pmap(map),v));
ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
vm_page_hold(ptepg);
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
vm_page_unhold(pmap_pte_vm_page(vm_map_pmap(map),v));
vm_page_unhold(ptepg);
/*
* page table pages don't need to be kept if they
* are not held
*/
if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
VM_PROT_NONE);
if( ptepg->flags & PG_CLEAN)
vm_page_free(ptepg);
}
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);

View file

@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.13 1994/03/21 09:35:10 davidg Exp $
* $Id: vm_machdep.c,v 1.14 1994/03/23 09:15:06 davidg Exp $
*/
#include "npx.h"
@ -57,10 +57,11 @@
caddr_t bouncememory;
vm_offset_t bouncepa, bouncepaend;
int bouncepages;
int bouncepages, bpwait;
vm_map_t bounce_map;
int bmwait, bmfreeing;
#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
int bounceallocarraysize;
unsigned *bounceallocarray;
int bouncefree;
@ -98,10 +99,11 @@ vm_bounce_page_find(count)
bounceallocarray[i] |= 1 << (bit - 1) ;
bouncefree -= count;
splx(s);
return bouncepa + (i * 8 * sizeof(unsigned) + (bit - 1)) * NBPG;
return bouncepa + (i * BITS_IN_UNSIGNED + (bit - 1)) * NBPG;
}
}
}
bpwait = 1;
tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
goto retry;
}
@ -126,13 +128,16 @@ vm_bounce_page_free(pa, count)
if ((index < 0) || (index >= bouncepages))
panic("vm_bounce_page_free -- bad index\n");
allocindex = index / (8 * sizeof(unsigned));
bit = index % (8 * sizeof(unsigned));
allocindex = index / BITS_IN_UNSIGNED;
bit = index % BITS_IN_UNSIGNED;
bounceallocarray[allocindex] &= ~(1 << bit);
bouncefree += count;
wakeup((caddr_t) &bounceallocarray);
if (bpwait) {
bpwait = 0;
wakeup((caddr_t) &bounceallocarray);
}
}
/*
@ -189,7 +194,7 @@ vm_bounce_init()
if (bouncepages == 0)
return;
bounceallocarraysize = (bouncepages + (8*sizeof(unsigned))-1) / (8 * sizeof(unsigned));
bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
if (!bounceallocarray)
@ -199,7 +204,7 @@ vm_bounce_init()
bounce_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
bouncepa = pmap_extract(kernel_pmap, (vm_offset_t) bouncememory);
bouncepa = pmap_kextract((vm_offset_t) bouncememory);
bouncepaend = bouncepa + bouncepages * NBPG;
bouncefree = bouncepages;
kvasfreecnt = 0;
@ -238,7 +243,7 @@ vm_bounce_alloc(bp)
*/
va = vapstart;
for (i = 0; i < countvmpg; i++) {
pa = pmap_extract(kernel_pmap, va);
pa = pmap_kextract(va);
if (pa >= SIXTEENMEG)
++dobounceflag;
va += NBPG;
@ -255,7 +260,7 @@ vm_bounce_alloc(bp)
kva = vm_bounce_kva(countvmpg);
va = vapstart;
for (i = 0; i < countvmpg; i++) {
pa = pmap_extract(kernel_pmap, va);
pa = pmap_kextract(va);
if (pa >= SIXTEENMEG) {
/*
* allocate a replacement page
@ -338,7 +343,7 @@ vm_bounce_free(bp)
vm_offset_t copycount;
copycount = i386_round_page(bouncekva + 1) - bouncekva;
mybouncepa = pmap_extract(kernel_pmap, i386_trunc_page(bouncekva));
mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
/*
* if this is a bounced pa, then process as one
@ -552,7 +557,7 @@ kvtop(void *addr)
{
vm_offset_t va;
va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
va = pmap_kextract((vm_offset_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
return((int)va);

View file

@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.10 1994/01/31 04:19:00 davidg Exp $
* $Id: pmap.h,v 1.11 1994/03/07 11:38:48 davidg Exp $
*/
#ifndef _PMAP_MACHINE_
@ -180,6 +180,21 @@ extern int IdlePTD; /* physical address of "Idle" state directory */
#define ptetoav(pt) (i386_ptob(pt - APTmap))
#define avtophys(va) (((int) (*avtopte(va))&PG_FRAME) | ((int)(va) & PGOFSET))
/*
* Routine: pmap_kextract
* Function:
* Extract the physical page address associated
* kernel virtual address.
*/
static inline vm_offset_t
pmap_kextract(va)
vm_offset_t va;
{
vm_offset_t pa = *(int *)vtopte(va);
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
return pa;
}
/*
* macros to generate page directory/table indicies
*/

View file

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.18 1994/03/07 11:38:35 davidg Exp $
* $Id: trap.c,v 1.19 1994/03/14 21:54:03 davidg Exp $
*/
/*
@ -88,8 +88,8 @@ extern int grow(struct proc *,int);
struct sysent sysent[];
int nsysent;
extern short cpl;
extern short netmask, ttymask, biomask;
extern unsigned cpl;
extern unsigned netmask, ttymask, biomask;
#define MAX_TRAP_MSG 27
char *trap_msg[] = {
@ -290,6 +290,7 @@ trap(frame)
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
vm_page_t ptepg;
/*
* Keep swapout from messing with us during this
@ -318,12 +319,25 @@ trap(frame)
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
vm_page_hold(pmap_pte_vm_page(vm_map_pmap(map),v));
ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
vm_page_hold(ptepg);
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
vm_page_unhold(pmap_pte_vm_page(vm_map_pmap(map),v));
vm_page_unhold(ptepg);
/*
* page table pages don't need to be kept if they
* are not held
*/
if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
VM_PROT_NONE);
if( ptepg->flags & PG_CLEAN)
vm_page_free(ptepg);
}
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);

View file

@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.13 1994/03/21 09:35:10 davidg Exp $
* $Id: vm_machdep.c,v 1.14 1994/03/23 09:15:06 davidg Exp $
*/
#include "npx.h"
@ -57,10 +57,11 @@
caddr_t bouncememory;
vm_offset_t bouncepa, bouncepaend;
int bouncepages;
int bouncepages, bpwait;
vm_map_t bounce_map;
int bmwait, bmfreeing;
#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
int bounceallocarraysize;
unsigned *bounceallocarray;
int bouncefree;
@ -98,10 +99,11 @@ vm_bounce_page_find(count)
bounceallocarray[i] |= 1 << (bit - 1) ;
bouncefree -= count;
splx(s);
return bouncepa + (i * 8 * sizeof(unsigned) + (bit - 1)) * NBPG;
return bouncepa + (i * BITS_IN_UNSIGNED + (bit - 1)) * NBPG;
}
}
}
bpwait = 1;
tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
goto retry;
}
@ -126,13 +128,16 @@ vm_bounce_page_free(pa, count)
if ((index < 0) || (index >= bouncepages))
panic("vm_bounce_page_free -- bad index\n");
allocindex = index / (8 * sizeof(unsigned));
bit = index % (8 * sizeof(unsigned));
allocindex = index / BITS_IN_UNSIGNED;
bit = index % BITS_IN_UNSIGNED;
bounceallocarray[allocindex] &= ~(1 << bit);
bouncefree += count;
wakeup((caddr_t) &bounceallocarray);
if (bpwait) {
bpwait = 0;
wakeup((caddr_t) &bounceallocarray);
}
}
/*
@ -189,7 +194,7 @@ vm_bounce_init()
if (bouncepages == 0)
return;
bounceallocarraysize = (bouncepages + (8*sizeof(unsigned))-1) / (8 * sizeof(unsigned));
bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
if (!bounceallocarray)
@ -199,7 +204,7 @@ vm_bounce_init()
bounce_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
bouncepa = pmap_extract(kernel_pmap, (vm_offset_t) bouncememory);
bouncepa = pmap_kextract((vm_offset_t) bouncememory);
bouncepaend = bouncepa + bouncepages * NBPG;
bouncefree = bouncepages;
kvasfreecnt = 0;
@ -238,7 +243,7 @@ vm_bounce_alloc(bp)
*/
va = vapstart;
for (i = 0; i < countvmpg; i++) {
pa = pmap_extract(kernel_pmap, va);
pa = pmap_kextract(va);
if (pa >= SIXTEENMEG)
++dobounceflag;
va += NBPG;
@ -255,7 +260,7 @@ vm_bounce_alloc(bp)
kva = vm_bounce_kva(countvmpg);
va = vapstart;
for (i = 0; i < countvmpg; i++) {
pa = pmap_extract(kernel_pmap, va);
pa = pmap_kextract(va);
if (pa >= SIXTEENMEG) {
/*
* allocate a replacement page
@ -338,7 +343,7 @@ vm_bounce_free(bp)
vm_offset_t copycount;
copycount = i386_round_page(bouncekva + 1) - bouncekva;
mybouncepa = pmap_extract(kernel_pmap, i386_trunc_page(bouncekva));
mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
/*
* if this is a bounced pa, then process as one
@ -552,7 +557,7 @@ kvtop(void *addr)
{
vm_offset_t va;
va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
va = pmap_kextract((vm_offset_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
return((int)va);

View file

@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.10 1994/01/31 04:19:00 davidg Exp $
* $Id: pmap.h,v 1.11 1994/03/07 11:38:48 davidg Exp $
*/
#ifndef _PMAP_MACHINE_
@ -180,6 +180,21 @@ extern int IdlePTD; /* physical address of "Idle" state directory */
#define ptetoav(pt) (i386_ptob(pt - APTmap))
#define avtophys(va) (((int) (*avtopte(va))&PG_FRAME) | ((int)(va) & PGOFSET))
/*
* Routine: pmap_kextract
* Function:
* Extract the physical page address associated
* kernel virtual address.
*/
static inline vm_offset_t
pmap_kextract(va)
vm_offset_t va;
{
vm_offset_t pa = *(int *)vtopte(va);
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
return pa;
}
/*
* macros to generate page directory/table indicies
*/

View file

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.18 1994/03/07 11:38:35 davidg Exp $
* $Id: trap.c,v 1.19 1994/03/14 21:54:03 davidg Exp $
*/
/*
@ -88,8 +88,8 @@ extern int grow(struct proc *,int);
struct sysent sysent[];
int nsysent;
extern short cpl;
extern short netmask, ttymask, biomask;
extern unsigned cpl;
extern unsigned netmask, ttymask, biomask;
#define MAX_TRAP_MSG 27
char *trap_msg[] = {
@ -290,6 +290,7 @@ trap(frame)
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
vm_page_t ptepg;
/*
* Keep swapout from messing with us during this
@ -318,12 +319,25 @@ trap(frame)
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
vm_page_hold(pmap_pte_vm_page(vm_map_pmap(map),v));
ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
vm_page_hold(ptepg);
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
vm_page_unhold(pmap_pte_vm_page(vm_map_pmap(map),v));
vm_page_unhold(ptepg);
/*
* page table pages don't need to be kept if they
* are not held
*/
if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
VM_PROT_NONE);
if( ptepg->flags & PG_CLEAN)
vm_page_free(ptepg);
}
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);