From a29b63cb739723c4874f328af2cbdfac80d184bd Mon Sep 17 00:00:00 2001 From: John Dyson Date: Sun, 3 Sep 1995 20:39:22 +0000 Subject: [PATCH] Machine dependent routines to support pre-zeroed free pages. This significantly improves demand zero performance. --- sys/amd64/amd64/cpu_switch.S | 5 ++++- sys/amd64/amd64/swtch.s | 5 ++++- sys/amd64/amd64/vm_machdep.c | 21 ++++++++++++++++++++- sys/i386/i386/swtch.s | 5 ++++- sys/i386/i386/vm_machdep.c | 21 ++++++++++++++++++++- 5 files changed, 52 insertions(+), 5 deletions(-) diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index e15298d6d046..db95daac6237 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.19 1995/01/21 15:20:23 bde Exp $ + * $Id: swtch.s,v 1.20 1995/02/17 02:22:42 phk Exp $ */ #include "npx.h" /* for NNPX */ @@ -278,6 +278,9 @@ idle_loop: cmpl $0,_whichidqs /* 'idle' queue */ jne idqr movb $0,_intr_nesting_level /* charge Idle for this loop */ + call _vm_page_zero_idle + testl %eax, %eax + jnz idle_loop #if NAPM > 0 #if APM_SLOWSTART <= 0 || !defined(APM_SLOWSTART) /* diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s index e15298d6d046..db95daac6237 100644 --- a/sys/amd64/amd64/swtch.s +++ b/sys/amd64/amd64/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.19 1995/01/21 15:20:23 bde Exp $ + * $Id: swtch.s,v 1.20 1995/02/17 02:22:42 phk Exp $ */ #include "npx.h" /* for NNPX */ @@ -278,6 +278,9 @@ idle_loop: cmpl $0,_whichidqs /* 'idle' queue */ jne idqr movb $0,_intr_nesting_level /* charge Idle for this loop */ + call _vm_page_zero_idle + testl %eax, %eax + jnz idle_loop #if NAPM > 0 #if APM_SLOWSTART <= 0 || !defined(APM_SLOWSTART) /* diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index 08dab5355036..debfe4a3ba71 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -38,7 +38,7 @@ * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ - * $Id: vm_machdep.c,v 1.40 1995/07/13 08:47:29 davidg Exp $ + * $Id: vm_machdep.c,v 1.41 1995/07/29 11:38:53 bde Exp $ */ #include "npx.h" @@ -862,3 +862,22 @@ grow(p, sp) return (1); } + +/* + * prototype routine to implement the pre-zeroed page mechanism + * this routine is called from the idle loop. + */ +int +vm_page_zero_idle() { + vm_page_t m; + if ((cnt.v_free_count > cnt.v_interrupt_free_min) && + (m = vm_page_queue_free.tqh_first)) { + TAILQ_REMOVE(&vm_page_queue_free, m, pageq); + enable_intr(); + pmap_zero_page(VM_PAGE_TO_PHYS(m)); + disable_intr(); + TAILQ_INSERT_TAIL(&vm_page_queue_zero, m, pageq); + return 1; + } + return 0; +} diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index e15298d6d046..db95daac6237 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.19 1995/01/21 15:20:23 bde Exp $ + * $Id: swtch.s,v 1.20 1995/02/17 02:22:42 phk Exp $ */ #include "npx.h" /* for NNPX */ @@ -278,6 +278,9 @@ idle_loop: cmpl $0,_whichidqs /* 'idle' queue */ jne idqr movb $0,_intr_nesting_level /* charge Idle for this loop */ + call _vm_page_zero_idle + testl %eax, %eax + jnz idle_loop #if NAPM > 0 #if APM_SLOWSTART <= 0 || !defined(APM_SLOWSTART) /* diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index 08dab5355036..debfe4a3ba71 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -38,7 +38,7 @@ * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ - * $Id: vm_machdep.c,v 1.40 1995/07/13 08:47:29 davidg Exp $ + * $Id: vm_machdep.c,v 1.41 1995/07/29 11:38:53 bde Exp $ */ #include "npx.h" @@ -862,3 +862,22 @@ grow(p, sp) return (1); } + +/* + * prototype routine to implement the pre-zeroed page mechanism + * this routine is called from the idle loop. + */ +int +vm_page_zero_idle() { + vm_page_t m; + if ((cnt.v_free_count > cnt.v_interrupt_free_min) && + (m = vm_page_queue_free.tqh_first)) { + TAILQ_REMOVE(&vm_page_queue_free, m, pageq); + enable_intr(); + pmap_zero_page(VM_PAGE_TO_PHYS(m)); + disable_intr(); + TAILQ_INSERT_TAIL(&vm_page_queue_zero, m, pageq); + return 1; + } + return 0; +}