Changes from John Dyson and myself:

1) Removed all instances of disable_intr()/enable_intr() and changed
	them back to splimp/splx. The previous method was done to improve
	the performance, but Bruces recent changes to inline spl* have
	made this unnecessary.
2) Cleaned up vm_machdep.c considerably. Probably fixed a few bugs, too.
3) Added a new mechanism for collecting page statistics - now done by
	a new system process "pagescan". Previously this was done by the
	pageout daemon, but this proved to be impractical.
4) Improved the page usage statistics gathering mechanism - performance is
	much improved in small memory machines.
5) Modified mbuf.h to enable the support for an external free routine when
	using mbuf clusters. Added appropriate glue in various places to
	allow this to work.
6) Adapted a suggested change to the NFS code from Yuval Yurom to take
	advantage of #5.
7) Added fault/swap statistics support.
This commit is contained in:
David Greenman 1994-04-14 07:49:40 +00:00
parent e7ae632e5a
commit f690bbace7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=1362
4 changed files with 168 additions and 298 deletions

View file

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.21 1994/03/14 21:54:01 davidg Exp $
* $Id: pmap.c,v 1.22 1994/03/30 02:17:45 davidg Exp $
*/
/*
@ -773,7 +773,8 @@ pmap_remove_entry(pmap, pv, va)
{
pv_entry_t npv;
int wired;
disable_intr();
int s;
s = splimp();
if (pmap == pv->pv_pmap && va == pv->pv_va) {
npv = pv->pv_next;
if (npv) {
@ -794,7 +795,7 @@ pmap_remove_entry(pmap, pv, va)
free_pv_entry(npv);
}
}
enable_intr();
splx(s);
}
/*
@ -1704,12 +1705,13 @@ pmap_testbit(pa, bit)
{
register pv_entry_t pv;
pt_entry_t *pte;
int s;
if (!pmap_is_managed(pa))
return FALSE;
pv = pa_to_pvh(pa);
disable_intr();
s = splimp();
/*
* Not found, check current mappings returning
@ -1725,23 +1727,23 @@ pmap_testbit(pa, bit)
if (bit & PG_M ) {
if (pv->pv_va >= USRSTACK) {
if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) {
enable_intr();
splx(s);
return TRUE;
}
else if (pv->pv_va < UPT_MAX_ADDRESS) {
enable_intr();
splx(s);
return FALSE;
}
}
}
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
if ((int) *pte & bit) {
enable_intr();
splx(s);
return TRUE;
}
}
}
enable_intr();
splx(s);
return(FALSE);
}
@ -1764,7 +1766,7 @@ pmap_changebit(pa, bit, setem)
return;
pv = pa_to_pvh(pa);
disable_intr();
s = splimp();
/*
* Loop over all current mappings setting/clearing as appropos
@ -1795,7 +1797,7 @@ pmap_changebit(pa, bit, setem)
}
}
}
enable_intr();
splx(s);
/*
* tlbflush only if we need to
*/

View file

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.17 1994/03/30 02:47:13 davidg Exp $
* $Id: vm_machdep.c,v 1.18 1994/04/05 03:23:09 davidg Exp $
*/
#include "npx.h"
@ -442,6 +442,31 @@ vm_bounce_init()
}
static void
cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
vm_offset_t kvanew;
vm_offset_t orig1, orig1cnt;
vm_offset_t orig2, orig2cnt;
{
int i;
vm_offset_t pa;
/*
* enter the transfer physical addresses into the new kva
*/
for(i=0;i<orig1cnt;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig1 + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
for(i=0;i<orig2cnt;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig2 + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + orig1cnt) * PAGE_SIZE, pa);
}
pmap_update();
}
void
cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
{
@ -449,10 +474,6 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
int i, trycount=0;
vm_offset_t orig1pages, orig2pages;
vm_offset_t orig1begin, orig2begin;
vm_offset_t orig1end, orig2end;
vm_offset_t origpages, newpages;
vm_offset_t origbegin, newbegin;
vm_offset_t origend, newend;
vm_offset_t kvanew, kvaorig;
/*
@ -524,8 +545,8 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
* we currently only cluster I/O transfers that are at page-aligned
* kvas and transfers that are multiples of page lengths.
*/
if(((bp->b_bcount & (PAGE_SIZE-1)) == 0) &&
(((vm_offset_t) bp->b_un.b_addr & (PAGE_SIZE-1)) == 0)) {
if(((bp->b_bcount & PAGE_MASK) == 0) &&
(((vm_offset_t) bp->b_un.b_addr & PAGE_MASK) == 0)) {
/*
* merge with previous?
* conditions:
@ -540,29 +561,27 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
if( (ap->b_blkno + (ap->b_bcount / DEV_BSIZE) == bp->b_blkno) &&
(dp->b_actf != ap) &&
((ap->b_flags & ~B_CLUSTER) == bp->b_flags) &&
((ap->b_bcount & (PAGE_SIZE-1)) == 0) &&
(((vm_offset_t) ap->b_un.b_addr & (PAGE_SIZE-1)) == 0) &&
((ap->b_bcount & PAGE_MASK) == 0) &&
(((vm_offset_t) ap->b_un.b_addr & PAGE_MASK) == 0) &&
(ap->b_bcount + bp->b_bcount < maxio)) {
orig1begin = (vm_offset_t) ap->b_un.b_addr;
orig1pages = ap->b_bcount / PAGE_SIZE;
orig2begin = (vm_offset_t) bp->b_un.b_addr;
orig2pages = bp->b_bcount / PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
if( !kvanew) {
goto nocluster;
}
if( (ap->b_flags & B_CLUSTER) == 0) {
orig1begin = (vm_offset_t) ap->b_un.b_addr;
orig1end = (vm_offset_t) ap->b_un.b_addr + ap->b_bcount;
orig1pages = ap->b_bcount / PAGE_SIZE;
orig2begin = (vm_offset_t) bp->b_un.b_addr;
orig2end = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
orig2pages = bp->b_bcount / PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
if( !kvanew) {
goto nocluster;
}
/*
* get a physical buf pointer
*/
@ -572,20 +591,7 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
goto nocluster;
}
/*
* enter the transfer physical addresses into the new kva
*/
for(i=0;i<orig1pages;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig1begin + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
for(i=0;i<orig2pages;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig2begin + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + orig1pages) * PAGE_SIZE, pa);
}
cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
/*
* build the new bp to be handed off to the device
@ -624,44 +630,12 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
} else {
vm_offset_t addr;
origbegin = (vm_offset_t) ap->b_un.b_addr;
origend = (vm_offset_t) ap->b_un.b_addr + ap->b_bcount;
origpages = ap->b_bcount/PAGE_SIZE;
newbegin = (vm_offset_t) bp->b_un.b_addr;
newend = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
newpages = bp->b_bcount/PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (newpages + origpages), 0);
if( !kvanew) {
goto nocluster;
}
/*
* enter the transfer physical addresses into the new kva
*/
for(i=0; i<origpages; i++) {
vm_offset_t pa;
pa = pmap_kextract(origbegin + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
/*
* free the old kva
*/
vm_bounce_kva_free( origbegin, ap->b_bufsize, 0);
vm_bounce_kva_free( orig1begin, ap->b_bufsize, 0);
for(i=0; i<newpages; i++) {
vm_offset_t pa;
pa = pmap_kextract(newbegin + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + origpages) * PAGE_SIZE, pa);
}
ap->b_un.b_addr = (caddr_t) kvanew;
ap->b_clusterl->av_forw = bp;
@ -672,7 +646,6 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
ap->b_bcount += bp->b_bcount;
ap->b_bufsize = ap->b_bcount;
}
pmap_update();
return;
/*
* merge with next?
@ -686,32 +659,31 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
} else if( ap->av_forw &&
(bp->b_blkno + (bp->b_bcount / DEV_BSIZE) == ap->av_forw->b_blkno) &&
(bp->b_flags == (ap->av_forw->b_flags & ~B_CLUSTER)) &&
((ap->av_forw->b_bcount & (PAGE_SIZE-1)) == 0) &&
(((vm_offset_t) ap->av_forw->b_un.b_addr & (PAGE_SIZE-1)) == 0) &&
((ap->av_forw->b_bcount & PAGE_MASK) == 0) &&
(((vm_offset_t) ap->av_forw->b_un.b_addr & PAGE_MASK) == 0) &&
(ap->av_forw->b_bcount + bp->b_bcount < maxio)) {
orig1begin = (vm_offset_t) bp->b_un.b_addr;
orig1pages = bp->b_bcount / PAGE_SIZE;
orig2begin = (vm_offset_t) ap->av_forw->b_un.b_addr;
orig2pages = ap->av_forw->b_bcount / PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
if( !kvanew) {
goto nocluster;
}
/*
* if next isn't a cluster we need to create one
*/
if( (ap->av_forw->b_flags & B_CLUSTER) == 0) {
orig1begin = (vm_offset_t) bp->b_un.b_addr;
orig1end = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
orig1pages = bp->b_bcount / PAGE_SIZE;
orig2begin = (vm_offset_t) ap->av_forw->b_un.b_addr;
orig2end = (vm_offset_t) ap->av_forw->b_un.b_addr + ap->av_forw->b_bcount;
orig2pages = ap->av_forw->b_bcount / PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
if( !kvanew) {
goto nocluster;
}
/*
* get a physical buf pointer
*/
@ -721,20 +693,11 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
goto nocluster;
}
cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
pmap_update();
ap = ap->av_forw;
for(i=0;i<orig1pages;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig1begin + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
for(i=0;i<orig2pages;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig2begin + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + orig1pages) * PAGE_SIZE, pa);
}
*newbp = *ap;
newbp->b_flags |= B_CLUSTER;
newbp->b_un.b_addr = (caddr_t) kvanew;
@ -762,38 +725,9 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
} else {
vm_offset_t addr;
newbegin = (vm_offset_t) bp->b_un.b_addr;
newend = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
newpages = bp->b_bcount/PAGE_SIZE;
origbegin = (vm_offset_t) ap->av_forw->b_un.b_addr;
origend = (vm_offset_t) ap->av_forw->b_un.b_addr + ap->av_forw->b_bcount;
origpages = ap->av_forw->b_bcount/PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (newpages + origpages), 0);
if( !kvanew) {
goto nocluster;
}
cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
ap = ap->av_forw;
for(i=0; i<newpages; i++) {
vm_offset_t pa;
pa = pmap_kextract(newbegin + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
for(i=0; i<origpages; i++) {
vm_offset_t pa;
pa = pmap_kextract(origbegin + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + newpages) * PAGE_SIZE, pa);
}
vm_bounce_kva_free( origbegin, ap->b_bufsize, 0);
vm_bounce_kva_free( orig2begin, ap->b_bufsize, 0);
ap->b_un.b_addr = (caddr_t) kvanew;
bp->av_forw = ap->b_clusterf;
@ -806,7 +740,6 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
ap->b_bufsize = ap->b_bcount;
}
pmap_update();
return;
}
}

View file

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.21 1994/03/14 21:54:01 davidg Exp $
* $Id: pmap.c,v 1.22 1994/03/30 02:17:45 davidg Exp $
*/
/*
@ -773,7 +773,8 @@ pmap_remove_entry(pmap, pv, va)
{
pv_entry_t npv;
int wired;
disable_intr();
int s;
s = splimp();
if (pmap == pv->pv_pmap && va == pv->pv_va) {
npv = pv->pv_next;
if (npv) {
@ -794,7 +795,7 @@ pmap_remove_entry(pmap, pv, va)
free_pv_entry(npv);
}
}
enable_intr();
splx(s);
}
/*
@ -1704,12 +1705,13 @@ pmap_testbit(pa, bit)
{
register pv_entry_t pv;
pt_entry_t *pte;
int s;
if (!pmap_is_managed(pa))
return FALSE;
pv = pa_to_pvh(pa);
disable_intr();
s = splimp();
/*
* Not found, check current mappings returning
@ -1725,23 +1727,23 @@ pmap_testbit(pa, bit)
if (bit & PG_M ) {
if (pv->pv_va >= USRSTACK) {
if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) {
enable_intr();
splx(s);
return TRUE;
}
else if (pv->pv_va < UPT_MAX_ADDRESS) {
enable_intr();
splx(s);
return FALSE;
}
}
}
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
if ((int) *pte & bit) {
enable_intr();
splx(s);
return TRUE;
}
}
}
enable_intr();
splx(s);
return(FALSE);
}
@ -1764,7 +1766,7 @@ pmap_changebit(pa, bit, setem)
return;
pv = pa_to_pvh(pa);
disable_intr();
s = splimp();
/*
* Loop over all current mappings setting/clearing as appropos
@ -1795,7 +1797,7 @@ pmap_changebit(pa, bit, setem)
}
}
}
enable_intr();
splx(s);
/*
* tlbflush only if we need to
*/

View file

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.17 1994/03/30 02:47:13 davidg Exp $
* $Id: vm_machdep.c,v 1.18 1994/04/05 03:23:09 davidg Exp $
*/
#include "npx.h"
@ -442,6 +442,31 @@ vm_bounce_init()
}
static void
cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
vm_offset_t kvanew;
vm_offset_t orig1, orig1cnt;
vm_offset_t orig2, orig2cnt;
{
int i;
vm_offset_t pa;
/*
* enter the transfer physical addresses into the new kva
*/
for(i=0;i<orig1cnt;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig1 + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
for(i=0;i<orig2cnt;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig2 + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + orig1cnt) * PAGE_SIZE, pa);
}
pmap_update();
}
void
cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
{
@ -449,10 +474,6 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
int i, trycount=0;
vm_offset_t orig1pages, orig2pages;
vm_offset_t orig1begin, orig2begin;
vm_offset_t orig1end, orig2end;
vm_offset_t origpages, newpages;
vm_offset_t origbegin, newbegin;
vm_offset_t origend, newend;
vm_offset_t kvanew, kvaorig;
/*
@ -524,8 +545,8 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
* we currently only cluster I/O transfers that are at page-aligned
* kvas and transfers that are multiples of page lengths.
*/
if(((bp->b_bcount & (PAGE_SIZE-1)) == 0) &&
(((vm_offset_t) bp->b_un.b_addr & (PAGE_SIZE-1)) == 0)) {
if(((bp->b_bcount & PAGE_MASK) == 0) &&
(((vm_offset_t) bp->b_un.b_addr & PAGE_MASK) == 0)) {
/*
* merge with previous?
* conditions:
@ -540,29 +561,27 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
if( (ap->b_blkno + (ap->b_bcount / DEV_BSIZE) == bp->b_blkno) &&
(dp->b_actf != ap) &&
((ap->b_flags & ~B_CLUSTER) == bp->b_flags) &&
((ap->b_bcount & (PAGE_SIZE-1)) == 0) &&
(((vm_offset_t) ap->b_un.b_addr & (PAGE_SIZE-1)) == 0) &&
((ap->b_bcount & PAGE_MASK) == 0) &&
(((vm_offset_t) ap->b_un.b_addr & PAGE_MASK) == 0) &&
(ap->b_bcount + bp->b_bcount < maxio)) {
orig1begin = (vm_offset_t) ap->b_un.b_addr;
orig1pages = ap->b_bcount / PAGE_SIZE;
orig2begin = (vm_offset_t) bp->b_un.b_addr;
orig2pages = bp->b_bcount / PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
if( !kvanew) {
goto nocluster;
}
if( (ap->b_flags & B_CLUSTER) == 0) {
orig1begin = (vm_offset_t) ap->b_un.b_addr;
orig1end = (vm_offset_t) ap->b_un.b_addr + ap->b_bcount;
orig1pages = ap->b_bcount / PAGE_SIZE;
orig2begin = (vm_offset_t) bp->b_un.b_addr;
orig2end = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
orig2pages = bp->b_bcount / PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
if( !kvanew) {
goto nocluster;
}
/*
* get a physical buf pointer
*/
@ -572,20 +591,7 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
goto nocluster;
}
/*
* enter the transfer physical addresses into the new kva
*/
for(i=0;i<orig1pages;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig1begin + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
for(i=0;i<orig2pages;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig2begin + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + orig1pages) * PAGE_SIZE, pa);
}
cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
/*
* build the new bp to be handed off to the device
@ -624,44 +630,12 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
} else {
vm_offset_t addr;
origbegin = (vm_offset_t) ap->b_un.b_addr;
origend = (vm_offset_t) ap->b_un.b_addr + ap->b_bcount;
origpages = ap->b_bcount/PAGE_SIZE;
newbegin = (vm_offset_t) bp->b_un.b_addr;
newend = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
newpages = bp->b_bcount/PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (newpages + origpages), 0);
if( !kvanew) {
goto nocluster;
}
/*
* enter the transfer physical addresses into the new kva
*/
for(i=0; i<origpages; i++) {
vm_offset_t pa;
pa = pmap_kextract(origbegin + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
/*
* free the old kva
*/
vm_bounce_kva_free( origbegin, ap->b_bufsize, 0);
vm_bounce_kva_free( orig1begin, ap->b_bufsize, 0);
for(i=0; i<newpages; i++) {
vm_offset_t pa;
pa = pmap_kextract(newbegin + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + origpages) * PAGE_SIZE, pa);
}
ap->b_un.b_addr = (caddr_t) kvanew;
ap->b_clusterl->av_forw = bp;
@ -672,7 +646,6 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
ap->b_bcount += bp->b_bcount;
ap->b_bufsize = ap->b_bcount;
}
pmap_update();
return;
/*
* merge with next?
@ -686,32 +659,31 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
} else if( ap->av_forw &&
(bp->b_blkno + (bp->b_bcount / DEV_BSIZE) == ap->av_forw->b_blkno) &&
(bp->b_flags == (ap->av_forw->b_flags & ~B_CLUSTER)) &&
((ap->av_forw->b_bcount & (PAGE_SIZE-1)) == 0) &&
(((vm_offset_t) ap->av_forw->b_un.b_addr & (PAGE_SIZE-1)) == 0) &&
((ap->av_forw->b_bcount & PAGE_MASK) == 0) &&
(((vm_offset_t) ap->av_forw->b_un.b_addr & PAGE_MASK) == 0) &&
(ap->av_forw->b_bcount + bp->b_bcount < maxio)) {
orig1begin = (vm_offset_t) bp->b_un.b_addr;
orig1pages = bp->b_bcount / PAGE_SIZE;
orig2begin = (vm_offset_t) ap->av_forw->b_un.b_addr;
orig2pages = ap->av_forw->b_bcount / PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
if( !kvanew) {
goto nocluster;
}
/*
* if next isn't a cluster we need to create one
*/
if( (ap->av_forw->b_flags & B_CLUSTER) == 0) {
orig1begin = (vm_offset_t) bp->b_un.b_addr;
orig1end = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
orig1pages = bp->b_bcount / PAGE_SIZE;
orig2begin = (vm_offset_t) ap->av_forw->b_un.b_addr;
orig2end = (vm_offset_t) ap->av_forw->b_un.b_addr + ap->av_forw->b_bcount;
orig2pages = ap->av_forw->b_bcount / PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
if( !kvanew) {
goto nocluster;
}
/*
* get a physical buf pointer
*/
@ -721,20 +693,11 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
goto nocluster;
}
cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
pmap_update();
ap = ap->av_forw;
for(i=0;i<orig1pages;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig1begin + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
for(i=0;i<orig2pages;i++) {
vm_offset_t pa;
pa = pmap_kextract((caddr_t) orig2begin + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + orig1pages) * PAGE_SIZE, pa);
}
*newbp = *ap;
newbp->b_flags |= B_CLUSTER;
newbp->b_un.b_addr = (caddr_t) kvanew;
@ -762,38 +725,9 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
} else {
vm_offset_t addr;
newbegin = (vm_offset_t) bp->b_un.b_addr;
newend = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
newpages = bp->b_bcount/PAGE_SIZE;
origbegin = (vm_offset_t) ap->av_forw->b_un.b_addr;
origend = (vm_offset_t) ap->av_forw->b_un.b_addr + ap->av_forw->b_bcount;
origpages = ap->av_forw->b_bcount/PAGE_SIZE;
/*
* see if we can allocate a kva, if we cannot, the don't
* cluster.
*/
kvanew = vm_bounce_kva( PAGE_SIZE * (newpages + origpages), 0);
if( !kvanew) {
goto nocluster;
}
cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
ap = ap->av_forw;
for(i=0; i<newpages; i++) {
vm_offset_t pa;
pa = pmap_kextract(newbegin + i * PAGE_SIZE);
pmap_kenter(kvanew + i * PAGE_SIZE, pa);
}
for(i=0; i<origpages; i++) {
vm_offset_t pa;
pa = pmap_kextract(origbegin + i * PAGE_SIZE);
pmap_kenter(kvanew + (i + newpages) * PAGE_SIZE, pa);
}
vm_bounce_kva_free( origbegin, ap->b_bufsize, 0);
vm_bounce_kva_free( orig2begin, ap->b_bufsize, 0);
ap->b_un.b_addr = (caddr_t) kvanew;
bp->av_forw = ap->b_clusterf;
@ -806,7 +740,6 @@ cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
ap->b_bufsize = ap->b_bcount;
}
pmap_update();
return;
}
}