Google Groups no longer supports new Usenet posts or subscriptions. Historical content remains viewable.
Dismiss

UVM page coloring

0 views
Skip to first unread message

Wilbern Cobb

unread,
Oct 15, 2001, 12:19:49 AM10/15/01
to
Here's an implementation of page coloring, using a round-robin bucket
selection algorithm (called `Bin Hopping' in Solaris) from NetBSD.

MD code may initialize the number of colors (based on cache sizes), the
default being 1 color. I've tested this on i386, sgi, sparc and sparc64.

-vedge

--- arch/i386/i386/pmap.c.orig Sun Oct 14 23:07:34 2001
+++ arch/i386/i386/pmap.c Sun Oct 14 23:16:48 2001
@@ -2116,10 +2116,13 @@
* pmap_zero_page_uncached: the same, except uncached.
*/

-void
+boolean_t
pmap_zero_page_uncached(pa)
paddr_t pa;
{
+ boolean_t rv = TRUE;
+ int i, *ptr;
+
simple_lock(&pmap_zero_page_lock);
#ifdef DIAGNOSTIC
if (*zero_pte)
@@ -2131,7 +2134,22 @@
memset(zerop, 0, NBPG); /* zero */
*zero_pte = 0; /* zap! */
pmap_update_pg((vaddr_t)zerop); /* flush TLB */
+
+ for (i = 0, ptr = (int *) zerop; i < PAGE_SIZE / sizeof(int); i++) {
+ if (whichqs != 0) {
+ /*
+ * A process has become ready. Abort now,
+ * so we don't keep it waiting while we
+ * do slow memory access to finish this
+ * page.
+ */
+ rv = FALSE;
+ break;
+ }
+ *ptr++ = 0;
+ }
simple_unlock(&pmap_zero_page_lock);
+ return (rv);
}

/*

--- arch/i386/include/pmap.h.orig Sun Oct 14 23:09:50 2001
+++ arch/i386/include/pmap.h Sun Oct 14 23:15:10 2001
@@ -413,7 +413,7 @@
/*
* Do idle page zero'ing uncached to avoid polluting the cache.
*/
-void pmap_zero_page_uncached __P((paddr_t));
+boolean_t pmap_zero_page_uncached __P((paddr_t));
#define PMAP_PAGEIDLEZERO(pa) pmap_zero_page_uncached((pa))

/*

--- uvm/uvm_extern.h.orig Sun Oct 14 21:10:52 2001
+++ uvm/uvm_extern.h Sun Oct 14 23:27:55 2001
@@ -299,6 +299,13 @@
/* kernel memory objects: managed by uvm_km_kmemalloc() only! */
struct uvm_object *kmem_object;
struct uvm_object *mb_object;
+
+ /* page coloring and idle zeroing */
+ int ncolors; /* number of page color buckets: must be p-o-2 */
+ int colormask; /* color bucket mask */
+ int colorhit; /* pagealloc where we got optimal color */
+ int colormiss; /* pagealloc where we didn't */
+ int zeroaborts; /* number of times page zeroing was aborted */
};

#ifdef _KERNEL

--- uvm/uvm.h.orig Sun Oct 14 22:39:29 2001
+++ uvm/uvm.h Sun Oct 14 23:27:55 2001
@@ -76,6 +76,7 @@
/* vm_page related parameters */
/* vm_page queues */
struct pgfreelist page_free[VM_NFREELIST]; /* unallocated pages */
+ int page_free_nextcolor; /* next color to allocate from */
struct pglist page_active; /* allocated pages, in use */
struct pglist page_inactive_swp;/* pages inactive (reclaim or free) */
struct pglist page_inactive_obj;/* pages inactive (reclaim or free) */

--- uvm/uvm_map.c.orig Sun Oct 14 22:12:01 2001
+++ uvm/uvm_map.c Sun Oct 14 23:27:55 2001
@@ -3376,8 +3376,10 @@
/* cross-verify page queue */
if (pg->pqflags & PQ_FREE) {
int fl = uvm_page_lookup_freelist(pg);
- pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ?
- PGFL_ZEROS : PGFL_UNKNOWN];
+ int color = VM_PGCOLOR_BUCKET(pg);
+
+ pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
+ ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
}
else if (pg->pqflags & PQ_INACTIVE)
pgl = (pg->pqflags & PQ_SWAPBACKED) ?

--- uvm/uvm_page.c.orig Sun Oct 14 21:13:58 2001
+++ uvm/uvm_page.c Sun Oct 14 23:27:55 2001
@@ -123,6 +123,16 @@
static struct pglist uvm_bootbucket;

/*
+ * we allocate an initial number of page colors in uvm_page_init(),
+ * and remember them. we may re-color pages as cache sizes are
+ * discovered during the autoconfiguration phrase. but we can never
+ * free the initial set of buckets, since they are allocated using
+ * uvm_pageboot_alloc().
+ */
+
+static boolean_t have_recolored_pages;
+
+/*
* local prototypes
*/

@@ -206,6 +216,18 @@

}

+static void
+uvm_page_init_buckets(struct pgfreelist *pgfl)
+{
+ int color, i;
+
+ for (color = 0; color < uvmexp.ncolors; color++) {
+ for (i = 0; i < PGFL_NQUEUES; i++) {
+ TAILQ_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]);
+ }
+ }
+}
+
/*
* uvm_page_init: init the page system. called from uvm_init().
*
@@ -216,19 +238,18 @@
uvm_page_init(kvm_startp, kvm_endp)
vaddr_t *kvm_startp, *kvm_endp;
{
- vsize_t freepages, pagecount, n;
+ vsize_t freepages, pagecount, bucketcount, n;
+ struct pgflbucket *bucketarray;
vm_page_t pagearray;
int lcv, i;
paddr_t paddr;


/*
- * step 1: init the page queues and page queue locks
+ * step 1: init the page queues and page queue locks, except
+ * the free list; we allocate that later (with the initial
+ * vm_page structures).
*/
- for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
- for (i = 0; i < PGFL_NQUEUES; i++)
- TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
- }
TAILQ_INIT(&uvm.page_active);
TAILQ_INIT(&uvm.page_inactive_swp);
TAILQ_INIT(&uvm.page_inactive_obj);
@@ -274,6 +295,14 @@
freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);

/*
+ * let MD code initialize the number of colors, or default
+ * to 1 color if MD code doesn't care.
+ */
+ if (uvmexp.ncolors == 0)
+ uvmexp.ncolors = 1;
+ uvmexp.colormask = uvmexp.ncolors - 1;
+
+ /*
* we now know we have (PAGE_SIZE * freepages) bytes of memory we can
* use. for each page of memory we use we need a vm_page structure.
* thus, the total number of pages we can use is the total size of
@@ -282,11 +311,22 @@
* truncation errors (since we can only allocate in terms of whole
* pages).
*/
-
+
+ bucketcount = uvmexp.ncolors * VM_NFREELIST;
pagecount = ((freepages + 1) << PAGE_SHIFT) /
(PAGE_SIZE + sizeof(struct vm_page));
+
+ bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
+ sizeof(struct pgflbucket)) + (pagecount *
+ sizeof(vm_page_t)));
pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
sizeof(struct vm_page));
+
+ for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
+ uvm.page_free[lcv].pgfl_buckets =
+ (bucketarray + (lcv * uvmexp.ncolors));
+ uvm_page_init_buckets(&uvm.page_free[lcv]);
+ }
memset(pagearray, 0, pagecount * sizeof(struct vm_page));

/*
@@ -295,14 +335,8 @@
*/

for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
-
n = vm_physmem[lcv].end - vm_physmem[lcv].start;
- if (n > pagecount) {
- printf("uvm_page_init: lost %ld page(s) in init\n",
- (long)(n - pagecount));
- panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */
- /* n = pagecount; */
- }
+
/* set up page array pointers */
vm_physmem[lcv].pgs = pagearray;
pagearray += n;
@@ -313,6 +347,7 @@
paddr = ptoa(vm_physmem[lcv].start);
for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
vm_physmem[lcv].pgs[i].phys_addr = paddr;
+
if (atop(paddr) >= vm_physmem[lcv].avail_start &&
atop(paddr) <= vm_physmem[lcv].avail_end) {
uvmexp.npages++;
@@ -321,6 +356,7 @@
}
}
}
+
/*
* step 5: pass up the values of virtual_space_start and
* virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
@@ -346,10 +382,6 @@
/*
* step 8: determine if we should zero pages in the idle
* loop.
- *
- * XXXJRT - might consider zero'ing up to the target *now*,
- * but that could take an awfully long time if you
- * have a lot of memory.
*/
uvm.page_idle_zero = vm_page_zero_enable;

@@ -649,7 +681,7 @@
paddr_t paddr;
npages = end - start; /* # of pages */
MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
- M_VMPAGE, M_NOWAIT);
+ M_VMPAGE, M_NOWAIT);
if (pgs == NULL) {
printf("uvm_page_physload: can not malloc vm_page "
"structs for segment\n");
@@ -832,6 +864,76 @@
return;
}

+/*
+ * uvm_page_recolor: Recolor the pages if the new bucket count is
+ * larger than the old one.
+ */
+
+void
+uvm_page_recolor(int newncolors)
+{
+ struct pgflbucket *bucketarray, *oldbucketarray;
+ struct pgfreelist pgfl;
+ struct vm_page *pg;
+ vsize_t bucketcount;
+ int s, lcv, color, i, ocolors;
+
+ if (newncolors <= uvmexp.ncolors)
+ return;
+
+ bucketcount = newncolors * VM_NFREELIST;
+ bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
+ M_VMPAGE, M_NOWAIT);
+ if (bucketarray == NULL) {
+ printf("WARNING: unable to allocate %ld page color buckets\n",
+ (long) bucketcount);
+ return;
+ }
+
+ s = uvm_lock_fpageq();
+
+ /* Make sure we should still do this. */
+ if (newncolors <= uvmexp.ncolors) {
+ uvm_unlock_fpageq(s);
+ free(bucketarray, M_VMPAGE);
+ return;
+ }
+
+ oldbucketarray = uvm.page_free[0].pgfl_buckets;
+ ocolors = uvmexp.ncolors;
+
+ uvmexp.ncolors = newncolors;
+ uvmexp.colormask = uvmexp.ncolors - 1;
+
+ for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
+ pgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
+ uvm_page_init_buckets(&pgfl);
+ for (color = 0; color < ocolors; color++) {
+ for (i = 0; i < PGFL_NQUEUES; i++) {
+ while ((pg = TAILQ_FIRST(&uvm.page_free[
+ lcv].pgfl_buckets[color].pgfl_queues[i]))
+ != NULL) {
+ TAILQ_REMOVE(&uvm.page_free[
+ lcv].pgfl_buckets[
+ color].pgfl_queues[i], pg, pageq);
+ TAILQ_INSERT_TAIL(&pgfl.pgfl_buckets[
+ VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
+ i], pg, pageq);
+ }
+ }
+ }
+ uvm.page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
+ }
+
+ if (have_recolored_pages) {
+ uvm_unlock_fpageq(s);
+ free(oldbucketarray, M_VMPAGE);
+ return;
+ }
+
+ have_recolored_pages = TRUE;
+ uvm_unlock_fpageq(s);
+}

#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */

@@ -863,6 +965,49 @@
#endif

/*
+ * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
+ */
+
+static __inline struct vm_page *
+uvm_pagealloc_pgfl(struct pgfreelist *pgfl, int try1, int try2,
+ unsigned int *trycolorp)
+{
+ struct pglist *freeq;
+ struct vm_page *pg;
+ int color, trycolor = *trycolorp;
+
+ color = trycolor;
+ do {
+ if ((pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL)
+ goto gotit;
+ if ((pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL)
+ goto gotit;
+ color = (color + 1) & uvmexp.colormask;
+ } while (color != trycolor);
+
+ return (NULL);
+
+ gotit:
+ TAILQ_REMOVE(freeq, pg, pageq);
+ uvmexp.free--;
+
+ /* update zero'd page count */
+ if (pg->flags & PG_ZERO)
+ uvmexp.zeropages--;
+
+ if (color == trycolor)
+ uvmexp.colorhit++;
+ else {
+ uvmexp.colormiss++;
+ *trycolorp = color;
+ }
+
+ return (pg);
+}
+
+/*
* uvm_pagealloc_strat: allocate vm_page from a particular free list.
*
* => return null if no pages free
@@ -888,19 +1033,29 @@
struct vm_anon *anon;
int strat, free_list;
{
- int lcv, try1, try2, s, zeroit = 0;
+ int lcv, try1, try2, s, zeroit = 0, color;
struct vm_page *pg;
- struct pglist *freeq;
- struct pgfreelist *pgfl;
boolean_t use_reserve;

#ifdef DIAGNOSTIC
- /* sanity check */
- if (obj && anon)
+ /* sanity checks */
+ if (obj != NULL && anon != NULL)
panic("uvm_pagealloc: obj and anon != NULL");
+ if (off != trunc_page(off))
+ panic("uvm_pagealloc: offset not page-aligned");
#endif

s = uvm_lock_fpageq(); /* lock free page queue */
+
+ /*
+ * This implements a global round-robin page coloring
+ * algorithm.
+ *
+ * XXXJRT: Should we make the `nextcolor' per-cpu?
+ * XXXJRT: What about virtually-indexed caches?
+ */
+
+ color = uvm.page_free_nextcolor;

/*
* check to see if we need to generate some free pages waking
@@ -949,11 +1104,9 @@
case UVM_PGA_STRAT_NORMAL:
/* Check all freelists in descending priority order. */
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
- pgfl = &uvm.page_free[lcv];
- if ((pg = TAILQ_FIRST((freeq =
- &pgfl->pgfl_queues[try1]))) != NULL ||
- (pg = TAILQ_FIRST((freeq =
- &pgfl->pgfl_queues[try2]))) != NULL)
+ pg = uvm_pagealloc_pgfl(&uvm.page_free[lcv],
+ try1, try2, &color);
+ if (pg != NULL)
goto gotit;
}

@@ -968,11 +1121,10 @@
panic("uvm_pagealloc_strat: bad free list %d",
free_list);
#endif
- pgfl = &uvm.page_free[free_list];
- if ((pg = TAILQ_FIRST((freeq =
- &pgfl->pgfl_queues[try1]))) != NULL ||
- (pg = TAILQ_FIRST((freeq =
- &pgfl->pgfl_queues[try2]))) != NULL)
+ /* Attempt to allocate from the specified free list.*/
+ pg = uvm_pagealloc_pgfl(&uvm.page_free[free_list],
+ try1, try2, &color);
+ if (pg != NULL)
goto gotit;

/* Fall back, if possible. */
@@ -990,14 +1142,15 @@
}

gotit:
- TAILQ_REMOVE(freeq, pg, pageq);
- uvmexp.free--;
-
- /* update zero'd page count */
- if (pg->flags & PG_ZERO)
- uvmexp.zeropages--;

/*
+ * we now know which color we actually allocated from; set
+ * the next color accordingly.*
+ */
+
+ uvm.page_free_nextcolor = (color + 1) & uvmexp.colormask;
+
+ /*
* update allocation statistics and remember if we have to
* zero the page
*/
@@ -1139,13 +1292,18 @@
*/

void uvm_pagefree(pg)
-
-struct vm_page *pg;
-
+ struct vm_page *pg;
{
int s;
int saved_loan_count = pg->loan_count;

+#ifdef DEBUG
+ if (pg->uobject == (void *)0xdeadbeef &&
+ pg->uanon == (void *)0xdeadbeef) {
+ panic("uvm_pagefree: freeing free page %p\n", pg);
+ }
+#endif
+
/*
* if the page was an object page (and thus "TABLED"), remove it
* from the object.
@@ -1234,7 +1392,8 @@

s = uvm_lock_fpageq();
TAILQ_INSERT_TAIL(&uvm.page_free[
- uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
+ uvm_page_lookup_freelist(pg)].pgfl_buckets[
+ VM_PGCOLOR_BUCKET(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
pg->pqflags = PQ_FREE;
#ifdef DEBUG
pg->uobject = (void *)0xdeadbeef;
@@ -1291,7 +1450,8 @@
/*
* uvm_pageidlezero: zero free pages while the system is idle.
*
- * => we do at least one iteration per call, if we are below the target.
+ * => try to complete one color bucket at a time, to reduce our impact
+ * on the CPU cache.
* => we loop until we either reach the target or whichqs indicates that
* there is a process ready to run.
*/
@@ -1300,61 +1460,68 @@
{
struct vm_page *pg;
struct pgfreelist *pgfl;
- int free_list, s;
-
- printf("uvm_pageidlezero\n");
+ int free_list, s, firstbucket;
+ static int nextbucket;

+ s = uvm_lock_fpageq();
+ firstbucket = nextbucket;
do {
- s = uvm_lock_fpageq();
-
- if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
- uvm.page_idle_zero = FALSE;
+ if (whichqs != 0) {
uvm_unlock_fpageq(s);
return;
}
-
- for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
- pgfl = &uvm.page_free[free_list];
- if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[
- PGFL_UNKNOWN])) != NULL)
- break;
- }
-
- if (pg == NULL) {
- /*
- * No non-zero'd pages; don't bother trying again
- * until we know we have non-zero'd pages free.
- */
+ if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
uvm.page_idle_zero = FALSE;
uvm_unlock_fpageq(s);
return;
}
-
- TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq);
- uvmexp.free--;
- uvm_unlock_fpageq(s);
-
+ for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
+ pgfl = &uvm.page_free[free_list];
+ while ((pg = TAILQ_FIRST(&pgfl->pgfl_buckets[
+ nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
+ if (whichqs != 0) {
+ uvm_unlock_fpageq(s);
+ return;
+ }
+
+ TAILQ_REMOVE(&pgfl->pgfl_buckets[
+ nextbucket].pgfl_queues[PGFL_UNKNOWN],
+ pg, pageq);
+ uvmexp.free--;
+ uvm_unlock_fpageq(s);
#ifdef PMAP_PAGEIDLEZERO
- PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg));
-#else
- /*
- * XXX This will toast the cache unless the pmap_zero_page()
- * XXX implementation does uncached access.
- */
- pmap_zero_page(VM_PAGE_TO_PHYS(pg));
-#endif
- pg->flags |= PG_ZERO;
+ if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {

- s = uvm_lock_fpageq();
- TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq);
- uvmexp.free++;
- uvmexp.zeropages++;
- uvm_unlock_fpageq(s);
-#if 0
- } while (whichqs == 0);
+ /*
+ * The machine-dependent code detected
+ * some reason for us to abort zeroing
+ * pages, probably because there is a
+ * process now ready to run.
+ */
+
+ s = uvm_lock_fpageq();
+ TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
+ nextbucket].pgfl_queues[
+ PGFL_UNKNOWN], pg, pageq);
+ uvmexp.free++;
+ uvmexp.zeroaborts++;
+ uvm_unlock_fpageq(s);
+ return;
+ }
#else
- } while (1); /* XXX work around lack of page coloring */
-#endif
-
- printf("uvm_pageidlezero: preempted\n");
+ pmap_zero_page(VM_PAGE_TO_PHYS(pg));
+#endif /* PMAP_PAGEIDLEZERO */
+ pg->flags |= PG_ZERO;
+
+ s = uvm_lock_fpageq();
+ TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
+ nextbucket].pgfl_queues[PGFL_ZEROS],
+ pg, pageq);
+ uvmexp.free++;
+ uvmexp.zeropages++;
+ }
+ }
+ nextbucket = (nextbucket + 1) & uvmexp.colormask;
+ } while (nextbucket != firstbucket);
+ uvm_unlock_fpageq(s);
}

--- uvm/uvm_page.h.orig Sun Oct 14 23:00:08 2001
+++ uvm/uvm_page.h Sun Oct 14 23:27:55 2001
@@ -95,6 +95,12 @@
#define UVM_PAGEZERO_TARGET (uvmexp.free)

/*
+ * Compute the page color bucket for a given page.
+ */
+#define VM_PGCOLOR_BUCKET(pg) \
+ (atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
+
+/*
* handle inline options
*/

@@ -116,6 +122,7 @@
boolean_t uvm_page_physget __P((paddr_t *));
#endif
void uvm_page_rehash __P((void));
+void uvm_page_recolor __P((int));
void uvm_pageidlezero __P((void));

PAGE_INLINE int uvm_lock_fpageq __P((void));

--- uvm/uvm_pglist.c.orig Sun Oct 14 22:42:11 2001
+++ uvm/uvm_pglist.c Sun Oct 14 23:27:55 2001
@@ -94,7 +94,7 @@
paddr_t try, idxpa, lastidxpa;
int psi;
struct vm_page *pgs;
- int s, tryidx, idx, pgflidx, end, error, free_list;
+ int s, tryidx, idx, pgflidx, end, error, free_list, color;
vm_page_t m;
u_long pagemask;
#ifdef DEBUG
@@ -209,10 +209,11 @@
while (idx < end) {
m = &pgs[idx];
free_list = uvm_page_lookup_freelist(m);
+ color = VM_PGCOLOR_BUCKET(m);
pgflidx = (m->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
#ifdef DEBUG
for (tp = TAILQ_FIRST(&uvm.page_free[
- free_list].pgfl_queues[pgflidx]);
+ free_list].pgfl_buckets[color].pgfl_queues[pgflidx]);
tp != NULL;
tp = TAILQ_NEXT(tp, pageq)) {
if (tp == m)
@@ -221,8 +222,8 @@
if (tp == NULL)
panic("uvm_pglistalloc: page not on freelist");
#endif
- TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_queues[pgflidx],
- m, pageq);
+ TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_buckets[
+ color].pgfl_queues[pgflidx], m, pageq);
uvmexp.free--;
if (m->flags & PG_ZERO)
uvmexp.zeropages--;
@@ -239,12 +240,9 @@
error = 0;

out:
- uvm_unlock_fpageq(s);
-
/*
* check to see if we need to generate some free pages waking
* the pagedaemon.
- * XXX: we read uvm.free without locking
*/

if (uvmexp.free < uvmexp.freemin ||
@@ -252,6 +250,8 @@
uvmexp.inactive < uvmexp.inactarg))
wakeup(&uvm.pagedaemon);

+ uvm_unlock_fpageq(s);
+
return (error);
}

@@ -280,9 +280,9 @@
#endif
TAILQ_REMOVE(list, m, pageq);
m->pqflags = PQ_FREE;
- TAILQ_INSERT_TAIL(&uvm.page_free[
- uvm_page_lookup_freelist(m)].pgfl_queues[PGFL_UNKNOWN],
- m, pageq);
+ TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(m)].
+ pgfl_buckets[VM_PGCOLOR_BUCKET(m)].
+ pgfl_queues[PGFL_UNKNOWN], m, pageq);
uvmexp.free++;
if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
uvm.page_idle_zero = vm_page_zero_enable;

--- uvm/uvm_pglist.h.orig Sun Oct 14 22:59:06 2001
+++ uvm/uvm_pglist.h Sun Oct 14 23:27:55 2001
@@ -54,8 +54,12 @@
#define PGFL_ZEROS 1
#define PGFL_NQUEUES 2

-struct pgfreelist {
+struct pgflbucket {
struct pglist pgfl_queues[PGFL_NQUEUES];
+};
+
+struct pgfreelist {
+ struct pgflbucket *pgfl_buckets;
};

#endif /* _PGLIST_H_ */

--- arch/sparc64/sparc64/pmap.c.orig Sun Oct 14 23:46:33 2001
+++ arch/sparc64/sparc64/pmap.c Sun Oct 14 23:46:41 2001
@@ -619,9 +619,7 @@
* set machine page size
*/
uvmexp.pagesize = NBPG;
-#ifdef notyet
uvmexp.ncolors = pmap_calculate_colors();
-#endif
uvm_setpagesize();

/*

--- sys/malloc.h.orig Sun Oct 14 22:34:42 2001
+++ sys/malloc.h Sun Oct 14 22:35:03 2001
@@ -152,6 +152,7 @@
#define M_DIRREM 90 /* Directory entry deleted */
#define M_VMPBUCKET 91 /* VM page buckets */
#define M_VMSWAP 92 /* VM swap structures */
+#define M_VMPAGE 93 /* VM page structures */

#define M_RAIDFRAME 97 /* Raidframe data */
#define M_UVMAMAP 98 /* UVM amap and related */

--- arch/i386/i386/machdep.c.orig Mon Oct 15 00:04:27 2001
+++ arch/i386/i386/machdep.c Mon Oct 15 00:23:30 2001
@@ -1250,6 +1250,9 @@
((*token) ? "\"" : ""), ((*token) ? token : ""),
((*token) ? "\" " : ""), classnames[class]);
}
+
+ if (cachesize > 0)
+ uvm_page_recolor(atop(cachesize));

/* configure the CPU if needed */
if (cpu_setup != NULL)
@@ -2088,6 +2091,13 @@

consinit(); /* XXX SHOULD NOT BE DONE HERE */
/* XXX here, until we can use bios for printfs */
+
+ /*
+ * Start with 2 color bins -- this is just a guess to get us
+ * started. We'll recolor when we determine the largest cache
+ * sizes on the system.
+ */
+ uvmexp.ncolors = 2;

/* call pmap initialization to make new kernel address space */
pmap_bootstrap((vm_offset_t)atdevbase + IOM_SIZE);

0 new messages