remove support for 'trademark files'
[unleashed/tickless.git] / kernel / vm / vm_page.c
blob3dd337d86a4f47c2455c274b4fff204bc2c28cac
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
24 * Copyright (c) 2015, 2016 by Delphix. All rights reserved.
27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 * The Regents of the University of California
33 * All Rights Reserved
35 * University Acknowledgment- Portions of this document are derived from
36 * software developed by the University of California, Berkeley, and its
37 * contributors.
41 * VM - physical page management.
44 #include <sys/types.h>
45 #include <sys/t_lock.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/errno.h>
49 #include <sys/time.h>
50 #include <sys/vnode.h>
51 #include <sys/vm.h>
52 #include <sys/vtrace.h>
53 #include <sys/swap.h>
54 #include <sys/cmn_err.h>
55 #include <sys/tuneable.h>
56 #include <sys/sysmacros.h>
57 #include <sys/cpuvar.h>
58 #include <sys/callb.h>
59 #include <sys/debug.h>
60 #include <sys/tnf_probe.h>
61 #include <sys/condvar_impl.h>
62 #include <sys/mem_config.h>
63 #include <sys/mem_cage.h>
64 #include <sys/kmem.h>
65 #include <sys/atomic.h>
66 #include <sys/strlog.h>
67 #include <sys/mman.h>
68 #include <sys/ontrap.h>
69 #include <sys/lgrp.h>
70 #include <sys/vfs.h>
72 #include <vm/hat.h>
73 #include <vm/anon.h>
74 #include <vm/page.h>
75 #include <vm/seg.h>
76 #include <vm/pvn.h>
77 #include <vm/seg_kmem.h>
78 #include <vm/vm_dep.h>
79 #include <sys/vm_usage.h>
80 #include <sys/fs_subr.h>
81 #include <sys/ddi.h>
82 #include <sys/modctl.h>
84 static pgcnt_t max_page_get; /* max page_get request size in pages */
85 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */
88 * freemem_lock protects all freemem variables:
89 * availrmem. Also this lock protects the globals which track the
90 * availrmem changes for accurate kernel footprint calculation.
91 * See below for an explanation of these
92 * globals.
94 kmutex_t freemem_lock;
95 pgcnt_t availrmem;
96 pgcnt_t availrmem_initial;
99 * These globals track availrmem changes to get a more accurate
100 * estimate of tke kernel size. Historically pp_kernel is used for
101 * kernel size and is based on availrmem. But availrmem is adjusted for
102 * locked pages in the system not just for kernel locked pages.
103 * These new counters will track the pages locked through segvn and
104 * by explicit user locking.
106 * pages_locked : How many pages are locked because of user specified
107 * locking through mlock or plock.
109 * pages_useclaim,pages_claimed : These two variables track the
110 * claim adjustments because of the protection changes on a segvn segment.
112 * All these globals are protected by the same lock which protects availrmem.
114 pgcnt_t pages_locked = 0;
115 pgcnt_t pages_useclaim = 0;
116 pgcnt_t pages_claimed = 0;
120 * new_freemem_lock protects freemem, freemem_wait & freemem_cv.
122 static kmutex_t new_freemem_lock;
123 static uint_t freemem_wait; /* someone waiting for freemem */
124 static kcondvar_t freemem_cv;
127 * The logical page free list is maintained as two lists, the 'free'
128 * and the 'cache' lists.
129 * The free list contains those pages that should be reused first.
131 * The implementation of the lists is machine dependent.
132 * page_get_freelist(), page_get_cachelist(),
133 * page_list_sub(), and page_list_add()
134 * form the interface to the machine dependent implementation.
136 * Pages with p_free set are on the cache list.
137 * Pages with p_free and p_age set are on the free list,
139 * A page may be locked while on either list.
143 * free list accounting stuff.
146 * Spread out the value for the number of pages on the
147 * page free and page cache lists. If there is just one
148 * value, then it must be under just one lock.
149 * The lock contention and cache traffic are a real bother.
151 * When we acquire and then drop a single pcf lock
152 * we can start in the middle of the array of pcf structures.
153 * If we acquire more than one pcf lock at a time, we need to
154 * start at the front to avoid deadlocking.
156 * pcf_count holds the number of pages in each pool.
158 * pcf_block is set when page_create_get_something() has asked the
159 * PSM page freelist and page cachelist routines without specifying
160 * a color and nothing came back. This is used to block anything
161 * else from moving pages from one list to the other while the
162 * lists are searched again. If a page is freeed while pcf_block is
163 * set, then pcf_reserve is incremented. pcgs_unblock() takes care
164 * of clearning pcf_block, doing the wakeups, etc.
167 #define MAX_PCF_FANOUT NCPU
168 static uint_t pcf_fanout = 1; /* Will get changed at boot time */
169 static uint_t pcf_fanout_mask = 0;
171 struct pcf {
172 kmutex_t pcf_lock; /* protects the structure */
173 uint_t pcf_count; /* page count */
174 uint_t pcf_wait; /* number of waiters */
175 uint_t pcf_block; /* pcgs flag to page_free() */
176 uint_t pcf_reserve; /* pages freed after pcf_block set */
177 uint_t pcf_fill[10]; /* to line up on the caches */
181 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where
182 * it will hash the cpu to). This is done to prevent a drain condition
183 * from happening. This drain condition will occur when pcf_count decrement
184 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An
185 * example of this shows up with device interrupts. The dma buffer is allocated
186 * by the cpu requesting the IO thus the pcf_count is decremented based on that.
187 * When the memory is returned by the interrupt thread, the pcf_count will be
188 * incremented based on the cpu servicing the interrupt.
190 static struct pcf pcf[MAX_PCF_FANOUT];
191 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \
192 (randtick() >> 24)) & (pcf_fanout_mask))
194 static int pcf_decrement_bucket(pgcnt_t);
195 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int);
197 kmutex_t pcgs_lock; /* serializes page_create_get_ */
198 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */
199 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */
200 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */
202 #ifdef VM_STATS
205 * No locks, but so what, they are only statistics.
208 static struct page_tcnt {
209 int pc_free_cache; /* free's into cache list */
210 int pc_free_dontneed; /* free's with dontneed */
211 int pc_free_pageout; /* free's from pageout */
212 int pc_free_free; /* free's into free list */
213 int pc_free_pages; /* free's into large page free list */
214 int pc_destroy_pages; /* large page destroy's */
215 int pc_get_cache; /* get's from cache list */
216 int pc_get_free; /* get's from free list */
217 int pc_reclaim; /* reclaim's */
218 int pc_abortfree; /* abort's of free pages */
219 int pc_find_hit; /* find's that find page */
220 int pc_find_miss; /* find's that don't find page */
221 int pc_destroy_free; /* # of free pages destroyed */
222 int pc_addclaim_pages;
223 int pc_subclaim_pages;
224 int pc_free_replacement_page[2];
225 int pc_try_demote_pages[6];
226 int pc_demote_pages[2];
227 } pagecnt;
229 uint_t hashin_count;
230 uint_t hashin_not_held;
231 uint_t hashin_already;
233 uint_t hashout_count;
234 uint_t hashout_not_held;
236 uint_t page_create_count;
237 uint_t page_create_not_enough;
238 uint_t page_create_not_enough_again;
239 uint_t page_create_zero;
240 uint_t page_create_hashout;
241 uint_t page_create_page_lock_failed;
242 uint_t page_create_trylock_failed;
243 uint_t page_create_found_one;
244 uint_t page_create_hashin_failed;
245 uint_t page_create_dropped_phm;
247 uint_t page_create_new;
248 uint_t page_create_exists;
249 uint_t page_create_putbacks;
250 uint_t page_create_overshoot;
252 uint_t page_reclaim_zero;
253 uint_t page_reclaim_zero_locked;
255 uint_t page_rename_exists;
256 uint_t page_rename_count;
258 uint_t page_lookup_cnt[20];
259 uint_t page_lookup_nowait_cnt[10];
260 uint_t page_find_cnt;
261 uint_t page_exists_cnt;
262 uint_t page_exists_forreal_cnt;
263 uint_t page_lookup_dev_cnt;
264 uint_t get_cachelist_cnt;
265 uint_t page_create_cnt[10];
266 uint_t alloc_pages[9];
267 uint_t page_exphcontg[19];
268 uint_t page_create_large_cnt[10];
270 #endif
272 static inline struct page *
273 find_page(struct vmobject *obj, uoff_t off)
275 struct page key = {
276 .p_offset = off,
278 struct page *page;
280 page = avl_find(&obj->tree, &key, NULL);
282 #ifdef VM_STATS
283 if (page != NULL)
284 pagecnt.pc_find_hit++;
285 else
286 pagecnt.pc_find_miss++;
287 #endif
289 return (page);
293 #ifdef DEBUG
294 #define MEMSEG_SEARCH_STATS
295 #endif
297 #ifdef MEMSEG_SEARCH_STATS
298 struct memseg_stats {
299 uint_t nsearch;
300 uint_t nlastwon;
301 uint_t nhashwon;
302 uint_t nnotfound;
303 } memseg_stats;
305 #define MEMSEG_STAT_INCR(v) \
306 atomic_inc_32(&memseg_stats.v)
307 #else
308 #define MEMSEG_STAT_INCR(x)
309 #endif
311 struct memseg *memsegs; /* list of memory segments */
314 * /etc/system tunable to control large page allocation hueristic.
316 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup
317 * for large page allocation requests. If a large page is not readily
318 * avaliable on the local freelists we will go through additional effort
319 * to create a large page, potentially moving smaller pages around to coalesce
320 * larger pages in the local lgroup.
321 * Default value of LPAP_DEFAULT will go to remote freelists if large pages
322 * are not readily available in the local lgroup.
324 enum lpap {
325 LPAP_DEFAULT, /* default large page allocation policy */
326 LPAP_LOCAL /* local large page allocation policy */
329 enum lpap lpg_alloc_prefer = LPAP_DEFAULT;
331 static void page_init_mem_config(void);
332 static int page_do_hashin(struct page *, struct vmobject *, uoff_t);
333 static void page_do_hashout(page_t *);
334 static void page_capture_init();
335 int page_capture_take_action(page_t *, uint_t, void *);
337 static void page_demote_vp_pages(page_t *);
340 void
341 pcf_init(void)
343 if (boot_ncpus != -1) {
344 pcf_fanout = boot_ncpus;
345 } else {
346 pcf_fanout = max_ncpus;
348 #ifdef sun4v
350 * Force at least 4 buckets if possible for sun4v.
352 pcf_fanout = MAX(pcf_fanout, 4);
353 #endif /* sun4v */
356 * Round up to the nearest power of 2.
358 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT);
359 if (!ISP2(pcf_fanout)) {
360 pcf_fanout = 1 << highbit(pcf_fanout);
362 if (pcf_fanout > MAX_PCF_FANOUT) {
363 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1);
366 pcf_fanout_mask = pcf_fanout - 1;
370 * vm subsystem related initialization
372 void
373 vm_init(void)
375 boolean_t callb_vm_cpr(void *, int);
377 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm");
378 page_init_mem_config();
379 page_retire_init();
380 vm_usage_init();
381 page_capture_init();
385 * This function is called at startup and when memory is added or deleted.
387 void
388 init_pages_pp_maximum()
390 static pgcnt_t p_min;
391 static pgcnt_t pages_pp_maximum_startup;
392 static pgcnt_t avrmem_delta;
393 static int init_done;
394 static int user_set; /* true if set in /etc/system */
396 if (init_done == 0) {
398 /* If the user specified a value, save it */
399 if (pages_pp_maximum != 0) {
400 user_set = 1;
401 pages_pp_maximum_startup = pages_pp_maximum;
405 * Setting of pages_pp_maximum is based first time
406 * on the value of availrmem just after the start-up
407 * allocations. To preserve this relationship at run
408 * time, use a delta from availrmem_initial.
410 ASSERT(availrmem_initial >= availrmem);
411 avrmem_delta = availrmem_initial - availrmem;
413 /* The allowable floor of pages_pp_maximum */
414 p_min = tune.t_minarmem + 100;
416 /* Make sure we don't come through here again. */
417 init_done = 1;
420 * Determine pages_pp_maximum, the number of currently available
421 * pages (availrmem) that can't be `locked'. If not set by
422 * the user, we set it to 4% of the currently available memory
423 * plus 4MB.
424 * But we also insist that it be greater than tune.t_minarmem;
425 * otherwise a process could lock down a lot of memory, get swapped
426 * out, and never have enough to get swapped back in.
428 if (user_set)
429 pages_pp_maximum = pages_pp_maximum_startup;
430 else
431 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25)
432 + btop(4 * 1024 * 1024);
434 if (pages_pp_maximum <= p_min) {
435 pages_pp_maximum = p_min;
439 void
440 set_max_page_get(pgcnt_t target_total_pages)
442 max_page_get = target_total_pages / 2;
445 static pgcnt_t pending_delete;
447 /*ARGSUSED*/
448 static void
449 page_mem_config_post_add(
450 void *arg,
451 pgcnt_t delta_pages)
453 set_max_page_get(total_pages - pending_delete);
454 init_pages_pp_maximum();
457 /*ARGSUSED*/
458 static int
459 page_mem_config_pre_del(
460 void *arg,
461 pgcnt_t delta_pages)
463 pgcnt_t nv;
465 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages);
466 set_max_page_get(total_pages - nv);
467 return (0);
470 /*ARGSUSED*/
471 static void
472 page_mem_config_post_del(
473 void *arg,
474 pgcnt_t delta_pages,
475 int cancelled)
477 pgcnt_t nv;
479 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages);
480 set_max_page_get(total_pages - nv);
481 if (!cancelled)
482 init_pages_pp_maximum();
485 static kphysm_setup_vector_t page_mem_config_vec = {
486 KPHYSM_SETUP_VECTOR_VERSION,
487 page_mem_config_post_add,
488 page_mem_config_pre_del,
489 page_mem_config_post_del,
492 static void
493 page_init_mem_config(void)
495 int ret;
497 ret = kphysm_setup_func_register(&page_mem_config_vec, NULL);
498 ASSERT(ret == 0);
502 * Evenly spread out the PCF counters for large free pages
504 static void
505 page_free_large_ctr(pgcnt_t npages)
507 static struct pcf *p = pcf;
508 pgcnt_t lump;
510 freemem += npages;
512 lump = roundup(npages, pcf_fanout) / pcf_fanout;
514 while (npages > 0) {
516 ASSERT(!p->pcf_block);
518 if (lump < npages) {
519 p->pcf_count += (uint_t)lump;
520 npages -= lump;
521 } else {
522 p->pcf_count += (uint_t)npages;
523 npages = 0;
526 ASSERT(!p->pcf_wait);
528 if (++p > &pcf[pcf_fanout - 1])
529 p = pcf;
532 ASSERT(npages == 0);
536 * Add a physical chunk of memory to the system free lists during startup.
537 * Platform specific startup() allocates the memory for the page structs.
539 * num - number of page structures
540 * base - page number (pfn) to be associated with the first page.
542 * Since we are doing this during startup (ie. single threaded), we will
543 * use shortcut routines to avoid any locking overhead while putting all
544 * these pages on the freelists.
546 * NOTE: Any changes performed to page_free(), must also be performed to
547 * add_physmem() since this is how we initialize all page_t's at
548 * boot time.
550 void
551 add_physmem(
552 page_t *pp,
553 pgcnt_t num,
554 pfn_t pnum)
556 page_t *root = NULL;
557 uint_t szc = page_num_pagesizes() - 1;
558 pgcnt_t large = page_get_pagecnt(szc);
559 pgcnt_t cnt = 0;
562 * Arbitrarily limit the max page_get request
563 * to 1/2 of the page structs we have.
565 total_pages += num;
566 set_max_page_get(total_pages);
568 PLCNT_MODIFY_MAX(pnum, (long)num);
571 * The physical space for the pages array
572 * representing ram pages has already been
573 * allocated. Here we initialize each lock
574 * in the page structure, and put each on
575 * the free list
577 for (; num; pp++, pnum++, num--) {
580 * this needs to fill in the page number
581 * and do any other arch specific initialization
583 add_physmem_cb(pp, pnum);
585 pp->p_lckcnt = 0;
586 pp->p_cowcnt = 0;
587 pp->p_slckcnt = 0;
590 * Initialize the page lock as unlocked, since nobody
591 * can see or access this page yet.
593 pp->p_selock = 0;
596 * Initialize IO lock
598 page_iolock_init(pp);
601 * initialize other fields in the page_t
603 PP_SETFREE(pp);
604 page_clr_all_props(pp);
605 PP_SETAGED(pp);
606 pp->p_offset = (uoff_t)-1;
607 pp->p_next = pp;
608 pp->p_prev = pp;
611 * Simple case: System doesn't support large pages.
613 if (szc == 0) {
614 pp->p_szc = 0;
615 page_free_at_startup(pp);
616 continue;
620 * Handle unaligned pages, we collect them up onto
621 * the root page until we have a full large page.
623 if (!IS_P2ALIGNED(pnum, large)) {
626 * If not in a large page,
627 * just free as small page.
629 if (root == NULL) {
630 pp->p_szc = 0;
631 page_free_at_startup(pp);
632 continue;
636 * Link a constituent page into the large page.
638 pp->p_szc = szc;
639 page_list_concat(&root, &pp);
642 * When large page is fully formed, free it.
644 if (++cnt == large) {
645 page_free_large_ctr(cnt);
646 page_list_add_pages(root, PG_LIST_ISINIT);
647 root = NULL;
648 cnt = 0;
650 continue;
654 * At this point we have a page number which
655 * is aligned. We assert that we aren't already
656 * in a different large page.
658 ASSERT(IS_P2ALIGNED(pnum, large));
659 ASSERT(root == NULL && cnt == 0);
662 * If insufficient number of pages left to form
663 * a large page, just free the small page.
665 if (num < large) {
666 pp->p_szc = 0;
667 page_free_at_startup(pp);
668 continue;
672 * Otherwise start a new large page.
674 pp->p_szc = szc;
675 cnt++;
676 root = pp;
678 ASSERT(root == NULL && cnt == 0);
682 * Find a page representing the specified [vp, offset].
683 * If we find the page but it is intransit coming in,
684 * it will have an "exclusive" lock and we wait for
685 * the i/o to complete. A page found on the free list
686 * is always reclaimed and then locked. On success, the page
687 * is locked, its data is valid and it isn't on the free
688 * list, while a NULL is returned if the page doesn't exist.
690 struct page *
691 page_lookup(struct vmobject *obj, uoff_t off, se_t se)
693 return (page_lookup_create(obj, off, se, NULL, NULL, 0));
697 * Find a page representing the specified [vp, offset].
698 * We either return the one we found or, if passed in,
699 * create one with identity of [vp, offset] of the
700 * pre-allocated page. If we find existing page but it is
701 * intransit coming in, it will have an "exclusive" lock
702 * and we wait for the i/o to complete. A page found on
703 * the free list is always reclaimed and then locked.
704 * On success, the page is locked, its data is valid and
705 * it isn't on the free list, while a NULL is returned
706 * if the page doesn't exist and newpp is NULL;
708 struct page *
709 page_lookup_create(
710 struct vmobject *obj,
711 uoff_t off,
712 se_t se,
713 struct page *newpp,
714 spgcnt_t *nrelocp,
715 int flags)
717 page_t *pp;
718 kmutex_t *phm;
719 ulong_t index;
720 uint_t es;
722 ASSERT(!VMOBJECT_LOCKED(obj));
723 VM_STAT_ADD(page_lookup_cnt[0]);
724 ASSERT(newpp ? PAGE_EXCL(newpp) : 1);
726 vmobject_lock(obj);
727 top:
728 pp = find_page(obj, off);
730 if (pp != NULL) {
731 VM_STAT_ADD(page_lookup_cnt[1]);
732 es = (newpp != NULL) ? 1 : 0;
733 es |= flags;
735 VM_STAT_ADD(page_lookup_cnt[4]);
736 if (!page_lock_es(pp, se, obj, P_RECLAIM, es)) {
737 VM_STAT_ADD(page_lookup_cnt[5]);
738 goto top;
741 VM_STAT_ADD(page_lookup_cnt[6]);
743 vmobject_unlock(obj);
745 if (newpp != NULL && pp->p_szc < newpp->p_szc &&
746 PAGE_EXCL(pp) && nrelocp != NULL) {
747 ASSERT(nrelocp != NULL);
748 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp,
749 NULL);
750 if (*nrelocp > 0) {
751 VM_STAT_COND_ADD(*nrelocp == 1,
752 page_lookup_cnt[11]);
753 VM_STAT_COND_ADD(*nrelocp > 1,
754 page_lookup_cnt[12]);
755 pp = newpp;
756 se = SE_EXCL;
757 } else {
758 if (se == SE_SHARED) {
759 page_downgrade(pp);
761 VM_STAT_ADD(page_lookup_cnt[13]);
763 } else if (newpp != NULL && nrelocp != NULL) {
764 if (PAGE_EXCL(pp) && se == SE_SHARED) {
765 page_downgrade(pp);
767 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc,
768 page_lookup_cnt[14]);
769 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc,
770 page_lookup_cnt[15]);
771 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc,
772 page_lookup_cnt[16]);
773 } else if (newpp != NULL && PAGE_EXCL(pp)) {
774 se = SE_EXCL;
776 } else if (newpp != NULL) {
778 * If we have a preallocated page then
779 * insert it now and basically behave like
780 * page_create.
782 VM_STAT_ADD(page_lookup_cnt[18]);
784 * Since we hold the page hash mutex and
785 * just searched for this page, page_hashin
786 * had better not fail. If it does, that
787 * means some thread did not follow the
788 * page hash mutex rules. Panic now and
789 * get it over with. As usual, go down
790 * holding all the locks.
792 if (!page_hashin(newpp, obj, off, true)) {
793 ASSERT(VMOBJECT_LOCKED(obj));
794 panic("page_lookup_create: hashin failed %p %p %llx",
795 newpp, obj, off);
796 /*NOTREACHED*/
798 ASSERT(VMOBJECT_LOCKED(obj));
799 vmobject_unlock(obj);
800 page_set_props(newpp, P_REF);
801 page_io_lock(newpp);
802 pp = newpp;
803 se = SE_EXCL;
804 } else {
805 VM_STAT_ADD(page_lookup_cnt[19]);
806 vmobject_unlock(obj);
809 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
811 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1);
813 return (pp);
817 * Search the hash list for the page representing the
818 * specified [vp, offset] and return it locked. Skip
819 * free pages and pages that cannot be locked as requested.
820 * Used while attempting to kluster pages.
822 struct page *
823 page_lookup_nowait(struct vmobject *obj, uoff_t off, se_t se)
825 page_t *pp;
827 ASSERT(!VMOBJECT_LOCKED(obj));
828 VM_STAT_ADD(page_lookup_nowait_cnt[0]);
830 vmobject_lock(obj);
831 pp = find_page(obj, off);
833 if (pp == NULL || PP_ISFREE(pp)) {
834 VM_STAT_ADD(page_lookup_nowait_cnt[2]);
835 pp = NULL;
836 } else {
837 if (!page_trylock(pp, se)) {
838 VM_STAT_ADD(page_lookup_nowait_cnt[3]);
839 pp = NULL;
840 } else {
841 VM_STAT_ADD(page_lookup_nowait_cnt[4]);
842 if (PP_ISFREE(pp)) {
843 VM_STAT_ADD(page_lookup_nowait_cnt[6]);
844 page_unlock(pp);
845 pp = NULL;
850 vmobject_unlock(obj);
852 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
854 return (pp);
858 * Search the hash list for a page with the specified [vp, off]
859 * that is known to exist and is already locked. This routine
860 * is typically used by segment SOFTUNLOCK routines.
862 struct page *
863 page_find(struct vmobject *obj, uoff_t off)
865 struct page *page;
867 ASSERT(!VMOBJECT_LOCKED(obj));
868 VM_STAT_ADD(page_find_cnt);
870 vmobject_lock(obj);
871 page = find_page(obj, off);
872 vmobject_unlock(obj);
874 ASSERT(page == NULL || PAGE_LOCKED(page) || panicstr);
875 return (page);
879 * Determine whether a page with the specified [vp, off]
880 * currently exists in the system. Obviously this should
881 * only be considered as a hint since nothing prevents the
882 * page from disappearing or appearing immediately after
883 * the return from this routine.
885 * Note: This is virtually identical to page_find. Can we combine them?
887 struct page *
888 page_exists(struct vmobject *obj, uoff_t off)
890 struct page *page;
892 ASSERT(!VMOBJECT_LOCKED(obj));
893 VM_STAT_ADD(page_exists_cnt);
895 vmobject_lock(obj);
896 page = find_page(obj, off);
897 vmobject_unlock(obj);
899 return (page);
903 * Determine if physically contiguous pages exist for [vp, off] - [vp, off +
904 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array
905 * with these pages locked SHARED. If necessary reclaim pages from
906 * freelist. Return 1 if contiguous pages exist and 0 otherwise.
908 * If we fail to lock pages still return 1 if pages exist and contiguous.
909 * But in this case return value is just a hint. ppa array won't be filled.
910 * Caller should initialize ppa[0] as NULL to distinguish return value.
912 * Returns 0 if pages don't exist or not physically contiguous.
914 * This routine doesn't work for anonymous(swapfs) pages.
917 page_exists_physcontig(struct vmobject *obj, uoff_t off, uint_t szc,
918 struct page **ppa)
920 pgcnt_t pages;
921 pfn_t pfn;
922 page_t *rootpp;
923 pgcnt_t i;
924 pgcnt_t j;
925 uoff_t save_off = off;
926 page_t *pp;
927 uint_t pszc;
928 int loopcnt = 0;
930 ASSERT(szc != 0);
931 ASSERT(obj != NULL);
932 ASSERT(!IS_SWAPFSVP(obj->vnode));
933 ASSERT(!VN_ISKAS(obj->vnode));
935 again:
936 if (++loopcnt > 3) {
937 VM_STAT_ADD(page_exphcontg[0]);
938 return (0);
941 vmobject_lock(obj);
942 pp = find_page(obj, off);
943 vmobject_unlock(obj);
945 VM_STAT_ADD(page_exphcontg[1]);
947 if (pp == NULL) {
948 VM_STAT_ADD(page_exphcontg[2]);
949 return (0);
952 pages = page_get_pagecnt(szc);
953 rootpp = pp;
954 pfn = rootpp->p_pagenum;
956 if ((pszc = pp->p_szc) >= szc && ppa != NULL) {
957 VM_STAT_ADD(page_exphcontg[3]);
958 if (!page_trylock(pp, SE_SHARED)) {
959 VM_STAT_ADD(page_exphcontg[4]);
960 return (1);
963 * Also check whether p_pagenum was modified by DR.
965 if (pp->p_szc != pszc || pp->p_vnode != obj->vnode ||
966 pp->p_offset != off || pp->p_pagenum != pfn) {
967 VM_STAT_ADD(page_exphcontg[5]);
968 page_unlock(pp);
969 off = save_off;
970 goto again;
973 * szc was non zero and vnode and offset matched after we
974 * locked the page it means it can't become free on us.
976 ASSERT(!PP_ISFREE(pp));
977 if (!IS_P2ALIGNED(pfn, pages)) {
978 page_unlock(pp);
979 return (0);
981 ppa[0] = pp;
982 pp++;
983 off += PAGESIZE;
984 pfn++;
985 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
986 if (!page_trylock(pp, SE_SHARED)) {
987 VM_STAT_ADD(page_exphcontg[6]);
988 pp--;
989 while (i-- > 0) {
990 page_unlock(pp);
991 pp--;
993 ppa[0] = NULL;
994 return (1);
996 if (pp->p_szc != pszc) {
997 VM_STAT_ADD(page_exphcontg[7]);
998 page_unlock(pp);
999 pp--;
1000 while (i-- > 0) {
1001 page_unlock(pp);
1002 pp--;
1004 ppa[0] = NULL;
1005 off = save_off;
1006 goto again;
1009 * szc the same as for previous already locked pages
1010 * with right identity. Since this page had correct
1011 * szc after we locked it can't get freed or destroyed
1012 * and therefore must have the expected identity.
1014 ASSERT(!PP_ISFREE(pp));
1015 if (pp->p_vnode != obj->vnode ||
1016 pp->p_offset != off) {
1017 panic("page_exists_physcontig: "
1018 "large page identity doesn't match");
1020 ppa[i] = pp;
1021 ASSERT(pp->p_pagenum == pfn);
1023 VM_STAT_ADD(page_exphcontg[8]);
1024 ppa[pages] = NULL;
1025 return (1);
1026 } else if (pszc >= szc) {
1027 VM_STAT_ADD(page_exphcontg[9]);
1028 if (!IS_P2ALIGNED(pfn, pages)) {
1029 return (0);
1031 return (1);
1034 if (!IS_P2ALIGNED(pfn, pages)) {
1035 VM_STAT_ADD(page_exphcontg[10]);
1036 return (0);
1039 if (page_numtomemseg_nolock(pfn) !=
1040 page_numtomemseg_nolock(pfn + pages - 1)) {
1041 VM_STAT_ADD(page_exphcontg[11]);
1042 return (0);
1046 * We loop up 4 times across pages to promote page size.
1047 * We're extra cautious to promote page size atomically with respect
1048 * to everybody else. But we can probably optimize into 1 loop if
1049 * this becomes an issue.
1052 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1053 if (!page_trylock(pp, SE_EXCL)) {
1054 VM_STAT_ADD(page_exphcontg[12]);
1055 break;
1058 * Check whether p_pagenum was modified by DR.
1060 if (pp->p_pagenum != pfn) {
1061 page_unlock(pp);
1062 break;
1064 if (pp->p_vnode != obj->vnode ||
1065 pp->p_offset != off) {
1066 VM_STAT_ADD(page_exphcontg[13]);
1067 page_unlock(pp);
1068 break;
1070 if (pp->p_szc >= szc) {
1071 ASSERT(i == 0);
1072 page_unlock(pp);
1073 off = save_off;
1074 goto again;
1078 if (i != pages) {
1079 VM_STAT_ADD(page_exphcontg[14]);
1080 --pp;
1081 while (i-- > 0) {
1082 page_unlock(pp);
1083 --pp;
1085 return (0);
1088 pp = rootpp;
1089 for (i = 0; i < pages; i++, pp++) {
1090 if (PP_ISFREE(pp)) {
1091 VM_STAT_ADD(page_exphcontg[15]);
1092 ASSERT(!PP_ISAGED(pp));
1093 ASSERT(pp->p_szc == 0);
1094 if (!page_reclaim(pp, NULL)) {
1095 break;
1097 } else {
1098 ASSERT(pp->p_szc < szc);
1099 VM_STAT_ADD(page_exphcontg[16]);
1100 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1103 if (i < pages) {
1104 VM_STAT_ADD(page_exphcontg[17]);
1106 * page_reclaim failed because we were out of memory.
1107 * drop the rest of the locks and return because this page
1108 * must be already reallocated anyway.
1110 pp = rootpp;
1111 for (j = 0; j < pages; j++, pp++) {
1112 if (j != i) {
1113 page_unlock(pp);
1116 return (0);
1119 off = save_off;
1120 pp = rootpp;
1121 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) {
1122 ASSERT(PAGE_EXCL(pp));
1123 ASSERT(!PP_ISFREE(pp));
1124 ASSERT(!hat_page_is_mapped(pp));
1125 VERIFY(pp->p_object == obj);
1126 ASSERT(pp->p_vnode == obj->vnode);
1127 ASSERT(pp->p_offset == off);
1128 pp->p_szc = szc;
1130 pp = rootpp;
1131 for (i = 0; i < pages; i++, pp++) {
1132 if (ppa == NULL) {
1133 page_unlock(pp);
1134 } else {
1135 ppa[i] = pp;
1136 page_downgrade(ppa[i]);
1139 if (ppa != NULL) {
1140 ppa[pages] = NULL;
1142 VM_STAT_ADD(page_exphcontg[18]);
1143 ASSERT(vn_has_cached_data(obj->vnode));
1144 return (1);
1148 * Determine whether a page with the specified [vp, off]
1149 * currently exists in the system and if so return its
1150 * size code. Obviously this should only be considered as
1151 * a hint since nothing prevents the page from disappearing
1152 * or appearing immediately after the return from this routine.
1155 page_exists_forreal(struct vmobject *obj, uoff_t off, uint_t *szc)
1157 page_t *pp;
1158 int rc = 0;
1160 ASSERT(!VMOBJECT_LOCKED(obj));
1161 ASSERT(szc != NULL);
1162 VM_STAT_ADD(page_exists_forreal_cnt);
1164 vmobject_lock(obj);
1165 pp = find_page(obj, off);
1166 if (pp != NULL) {
1167 *szc = pp->p_szc;
1168 rc = 1;
1170 vmobject_unlock(obj);
1171 return (rc);
1174 /* wakeup threads waiting for pages in page_create_get_something() */
1175 void
1176 wakeup_pcgs(void)
1178 if (!CV_HAS_WAITERS(&pcgs_cv))
1179 return;
1180 cv_broadcast(&pcgs_cv);
1184 * 'freemem' is used all over the kernel as an indication of how many
1185 * pages are free (either on the cache list or on the free page list)
1186 * in the system. In very few places is a really accurate 'freemem'
1187 * needed. To avoid contention of the lock protecting a the
1188 * single freemem, it was spread out into NCPU buckets. Set_freemem
1189 * sets freemem to the total of all NCPU buckets. It is called from
1190 * clock() on each TICK.
1192 void
1193 set_freemem()
1195 struct pcf *p;
1196 ulong_t t;
1197 uint_t i;
1199 t = 0;
1200 p = pcf;
1201 for (i = 0; i < pcf_fanout; i++) {
1202 t += p->pcf_count;
1203 p++;
1205 freemem = t;
1208 * Don't worry about grabbing mutex. It's not that
1209 * critical if we miss a tick or two. This is
1210 * where we wakeup possible delayers in
1211 * page_create_get_something().
1213 wakeup_pcgs();
1216 ulong_t
1217 get_freemem()
1219 struct pcf *p;
1220 ulong_t t;
1221 uint_t i;
1223 t = 0;
1224 p = pcf;
1225 for (i = 0; i < pcf_fanout; i++) {
1226 t += p->pcf_count;
1227 p++;
1230 * We just calculated it, might as well set it.
1232 freemem = t;
1233 return (t);
1237 * Acquire all of the page cache & free (pcf) locks.
1239 void
1240 pcf_acquire_all()
1242 struct pcf *p;
1243 uint_t i;
1245 p = pcf;
1246 for (i = 0; i < pcf_fanout; i++) {
1247 mutex_enter(&p->pcf_lock);
1248 p++;
1253 * Release all the pcf_locks.
1255 void
1256 pcf_release_all()
1258 struct pcf *p;
1259 uint_t i;
1261 p = pcf;
1262 for (i = 0; i < pcf_fanout; i++) {
1263 mutex_exit(&p->pcf_lock);
1264 p++;
1269 * Inform the VM system that we need some pages freed up.
1270 * Calls must be symmetric, e.g.:
1272 * page_needfree(100);
1273 * wait a bit;
1274 * page_needfree(-100);
1276 void
1277 page_needfree(spgcnt_t npages)
1279 mutex_enter(&new_freemem_lock);
1280 needfree += npages;
1281 mutex_exit(&new_freemem_lock);
1285 * Throttle for page_create(): try to prevent freemem from dropping
1286 * below throttlefree. We can't provide a 100% guarantee because
1287 * KM_NOSLEEP allocations, page_reclaim(), and various other things
1288 * nibble away at the freelist. However, we can block all PG_WAIT
1289 * allocations until memory becomes available. The motivation is
1290 * that several things can fall apart when there's no free memory:
1292 * (1) If pageout() needs memory to push a page, the system deadlocks.
1294 * (2) By (broken) specification, timeout(9F) can neither fail nor
1295 * block, so it has no choice but to panic the system if it
1296 * cannot allocate a callout structure.
1298 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block;
1299 * it panics if it cannot allocate a callback structure.
1301 * (4) Untold numbers of third-party drivers have not yet been hardened
1302 * against KM_NOSLEEP and/or allocb() failures; they simply assume
1303 * success and panic the system with a data fault on failure.
1304 * (The long-term solution to this particular problem is to ship
1305 * hostile fault-injecting DEBUG kernels with the DDK.)
1307 * It is theoretically impossible to guarantee success of non-blocking
1308 * allocations, but in practice, this throttle is very hard to break.
1310 static int
1311 page_create_throttle(pgcnt_t npages, int flags)
1313 ulong_t fm;
1314 uint_t i;
1315 pgcnt_t tf; /* effective value of throttlefree */
1318 * Normal priority allocations.
1320 if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) {
1321 ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE)));
1322 return (freemem >= npages + throttlefree);
1326 * Never deny pages when:
1327 * - it's a thread that cannot block [NOMEMWAIT()]
1328 * - the allocation cannot block and must not fail
1329 * - the allocation cannot block and is pageout dispensated
1331 if (NOMEMWAIT() ||
1332 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) ||
1333 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE))
1334 return (1);
1337 * If the allocation can't block, we look favorably upon it
1338 * unless we're below pageout_reserve. In that case we fail
1339 * the allocation because we want to make sure there are a few
1340 * pages available for pageout.
1342 if ((flags & PG_WAIT) == 0)
1343 return (freemem >= npages + pageout_reserve);
1345 /* Calculate the effective throttlefree value */
1346 tf = throttlefree -
1347 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0);
1349 cv_signal(&proc_pageout->p_cv);
1351 for (;;) {
1352 fm = 0;
1353 pcf_acquire_all();
1354 mutex_enter(&new_freemem_lock);
1355 for (i = 0; i < pcf_fanout; i++) {
1356 fm += pcf[i].pcf_count;
1357 pcf[i].pcf_wait++;
1358 mutex_exit(&pcf[i].pcf_lock);
1360 freemem = fm;
1361 if (freemem >= npages + tf) {
1362 mutex_exit(&new_freemem_lock);
1363 break;
1365 needfree += npages;
1366 freemem_wait++;
1367 cv_wait(&freemem_cv, &new_freemem_lock);
1368 freemem_wait--;
1369 needfree -= npages;
1370 mutex_exit(&new_freemem_lock);
1372 return (1);
1376 * page_create_wait() is called to either coalesce pages from the
1377 * different pcf buckets or to wait because there simply are not
1378 * enough pages to satisfy the caller's request.
1380 * Sadly, this is called from platform/vm/vm_machdep.c
1383 page_create_wait(pgcnt_t npages, uint_t flags)
1385 pgcnt_t total;
1386 uint_t i;
1387 struct pcf *p;
1390 * Wait until there are enough free pages to satisfy our
1391 * entire request.
1392 * We set needfree += npages before prodding pageout, to make sure
1393 * it does real work when npages > lotsfree > freemem.
1395 VM_STAT_ADD(page_create_not_enough);
1397 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1);
1398 checkagain:
1399 if ((flags & PG_NORELOC) &&
1400 kcage_freemem < kcage_throttlefree + npages)
1401 (void) kcage_create_throttle(npages, flags);
1403 if (freemem < npages + throttlefree)
1404 if (!page_create_throttle(npages, flags))
1405 return (0);
1407 if (pcf_decrement_bucket(npages) ||
1408 pcf_decrement_multiple(&total, npages, 0))
1409 return (1);
1412 * All of the pcf locks are held, there are not enough pages
1413 * to satisfy the request (npages < total).
1414 * Be sure to acquire the new_freemem_lock before dropping
1415 * the pcf locks. This prevents dropping wakeups in page_free().
1416 * The order is always pcf_lock then new_freemem_lock.
1418 * Since we hold all the pcf locks, it is a good time to set freemem.
1420 * If the caller does not want to wait, return now.
1421 * Else turn the pageout daemon loose to find something
1422 * and wait till it does.
1425 freemem = total;
1427 if ((flags & PG_WAIT) == 0) {
1428 pcf_release_all();
1430 return (0);
1433 ASSERT(proc_pageout != NULL);
1434 cv_signal(&proc_pageout->p_cv);
1437 * We are going to wait.
1438 * We currently hold all of the pcf_locks,
1439 * get the new_freemem_lock (it protects freemem_wait),
1440 * before dropping the pcf_locks.
1442 mutex_enter(&new_freemem_lock);
1444 p = pcf;
1445 for (i = 0; i < pcf_fanout; i++) {
1446 p->pcf_wait++;
1447 mutex_exit(&p->pcf_lock);
1448 p++;
1451 needfree += npages;
1452 freemem_wait++;
1454 cv_wait(&freemem_cv, &new_freemem_lock);
1456 freemem_wait--;
1457 needfree -= npages;
1459 mutex_exit(&new_freemem_lock);
1461 VM_STAT_ADD(page_create_not_enough_again);
1462 goto checkagain;
1465 * A routine to do the opposite of page_create_wait().
1467 void
1468 page_create_putback(spgcnt_t npages)
1470 struct pcf *p;
1471 pgcnt_t lump;
1472 uint_t *which;
1475 * When a contiguous lump is broken up, we have to
1476 * deal with lots of pages (min 64) so lets spread
1477 * the wealth around.
1479 lump = roundup(npages, pcf_fanout) / pcf_fanout;
1480 freemem += npages;
1482 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) {
1483 which = &p->pcf_count;
1485 mutex_enter(&p->pcf_lock);
1487 if (p->pcf_block) {
1488 which = &p->pcf_reserve;
1491 if (lump < npages) {
1492 *which += (uint_t)lump;
1493 npages -= lump;
1494 } else {
1495 *which += (uint_t)npages;
1496 npages = 0;
1499 if (p->pcf_wait) {
1500 mutex_enter(&new_freemem_lock);
1502 * Check to see if some other thread
1503 * is actually waiting. Another bucket
1504 * may have woken it up by now. If there
1505 * are no waiters, then set our pcf_wait
1506 * count to zero to avoid coming in here
1507 * next time.
1509 if (freemem_wait) {
1510 if (npages > 1) {
1511 cv_broadcast(&freemem_cv);
1512 } else {
1513 cv_signal(&freemem_cv);
1515 p->pcf_wait--;
1516 } else {
1517 p->pcf_wait = 0;
1519 mutex_exit(&new_freemem_lock);
1521 mutex_exit(&p->pcf_lock);
1523 ASSERT(npages == 0);
1527 * A helper routine for page_create_get_something.
1528 * The indenting got to deep down there.
1529 * Unblock the pcf counters. Any pages freed after
1530 * pcf_block got set are moved to pcf_count and
1531 * wakeups (cv_broadcast() or cv_signal()) are done as needed.
1533 static void
1534 pcgs_unblock(void)
1536 int i;
1537 struct pcf *p;
1539 /* Update freemem while we're here. */
1540 freemem = 0;
1541 p = pcf;
1542 for (i = 0; i < pcf_fanout; i++) {
1543 mutex_enter(&p->pcf_lock);
1544 ASSERT(p->pcf_count == 0);
1545 p->pcf_count = p->pcf_reserve;
1546 p->pcf_block = 0;
1547 freemem += p->pcf_count;
1548 if (p->pcf_wait) {
1549 mutex_enter(&new_freemem_lock);
1550 if (freemem_wait) {
1551 if (p->pcf_reserve > 1) {
1552 cv_broadcast(&freemem_cv);
1553 p->pcf_wait = 0;
1554 } else {
1555 cv_signal(&freemem_cv);
1556 p->pcf_wait--;
1558 } else {
1559 p->pcf_wait = 0;
1561 mutex_exit(&new_freemem_lock);
1563 p->pcf_reserve = 0;
1564 mutex_exit(&p->pcf_lock);
1565 p++;
1570 * Called from page_create_va() when both the cache and free lists
1571 * have been checked once.
1573 * Either returns a page or panics since the accounting was done
1574 * way before we got here.
1576 * We don't come here often, so leave the accounting on permanently.
1579 #define MAX_PCGS 100
1581 #ifdef DEBUG
1582 #define PCGS_TRIES 100
1583 #else /* DEBUG */
1584 #define PCGS_TRIES 10
1585 #endif /* DEBUG */
1587 #ifdef VM_STATS
1588 uint_t pcgs_counts[PCGS_TRIES];
1589 uint_t pcgs_too_many;
1590 uint_t pcgs_entered;
1591 uint_t pcgs_entered_noreloc;
1592 uint_t pcgs_locked;
1593 uint_t pcgs_cagelocked;
1594 #endif /* VM_STATS */
1596 static struct page *
1597 page_create_get_something(struct vmobject *obj, uoff_t off, struct seg *seg,
1598 caddr_t vaddr, uint_t flags)
1600 uint_t count;
1601 page_t *pp;
1602 uint_t locked, i;
1603 struct pcf *p;
1604 lgrp_t *lgrp;
1605 int cagelocked = 0;
1607 VM_STAT_ADD(pcgs_entered);
1610 * Tap any reserve freelists: if we fail now, we'll die
1611 * since the page(s) we're looking for have already been
1612 * accounted for.
1614 flags |= PG_PANIC;
1616 if ((flags & PG_NORELOC) != 0) {
1617 VM_STAT_ADD(pcgs_entered_noreloc);
1619 * Requests for free pages from critical threads
1620 * such as pageout still won't throttle here, but
1621 * we must try again, to give the cageout thread
1622 * another chance to catch up. Since we already
1623 * accounted for the pages, we had better get them
1624 * this time.
1626 * N.B. All non-critical threads acquire the pcgs_cagelock
1627 * to serialize access to the freelists. This implements a
1628 * turnstile-type synchornization to avoid starvation of
1629 * critical requests for PG_NORELOC memory by non-critical
1630 * threads: all non-critical threads must acquire a 'ticket'
1631 * before passing through, which entails making sure
1632 * kcage_freemem won't fall below minfree prior to grabbing
1633 * pages from the freelists.
1635 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) {
1636 mutex_enter(&pcgs_cagelock);
1637 cagelocked = 1;
1638 VM_STAT_ADD(pcgs_cagelocked);
1643 * Time to get serious.
1644 * We failed to get a `correctly colored' page from both the
1645 * free and cache lists.
1646 * We escalate in stage.
1648 * First try both lists without worring about color.
1650 * Then, grab all page accounting locks (ie. pcf[]) and
1651 * steal any pages that they have and set the pcf_block flag to
1652 * stop deletions from the lists. This will help because
1653 * a page can get added to the free list while we are looking
1654 * at the cache list, then another page could be added to the cache
1655 * list allowing the page on the free list to be removed as we
1656 * move from looking at the cache list to the free list. This
1657 * could happen over and over. We would never find the page
1658 * we have accounted for.
1660 * Noreloc pages are a subset of the global (relocatable) page pool.
1661 * They are not tracked separately in the pcf bins, so it is
1662 * impossible to know when doing pcf accounting if the available
1663 * page(s) are noreloc pages or not. When looking for a noreloc page
1664 * it is quite easy to end up here even if the global (relocatable)
1665 * page pool has plenty of free pages but the noreloc pool is empty.
1667 * When the noreloc pool is empty (or low), additional noreloc pages
1668 * are created by converting pages from the global page pool. This
1669 * process will stall during pcf accounting if the pcf bins are
1670 * already locked. Such is the case when a noreloc allocation is
1671 * looping here in page_create_get_something waiting for more noreloc
1672 * pages to appear.
1674 * Short of adding a new field to the pcf bins to accurately track
1675 * the number of free noreloc pages, we instead do not grab the
1676 * pcgs_lock, do not set the pcf blocks and do not timeout when
1677 * allocating a noreloc page. This allows noreloc allocations to
1678 * loop without blocking global page pool allocations.
1680 * NOTE: the behaviour of page_create_get_something has not changed
1681 * for the case of global page pool allocations.
1684 flags &= ~PG_MATCH_COLOR;
1685 locked = 0;
1686 #if defined(__i386) || defined(__amd64)
1687 flags = page_create_update_flags_x86(flags);
1688 #endif
1690 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
1692 for (count = 0; kcage_on || count < MAX_PCGS; count++) {
1693 pp = page_get_freelist(obj, off, seg, vaddr, PAGESIZE, flags,
1694 lgrp);
1695 if (pp == NULL) {
1696 pp = page_get_cachelist(obj, off, seg, vaddr, flags,
1697 lgrp);
1699 if (pp == NULL) {
1701 * Serialize. Don't fight with other pcgs().
1703 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) {
1704 mutex_enter(&pcgs_lock);
1705 VM_STAT_ADD(pcgs_locked);
1706 locked = 1;
1707 p = pcf;
1708 for (i = 0; i < pcf_fanout; i++) {
1709 mutex_enter(&p->pcf_lock);
1710 ASSERT(p->pcf_block == 0);
1711 p->pcf_block = 1;
1712 p->pcf_reserve = p->pcf_count;
1713 p->pcf_count = 0;
1714 mutex_exit(&p->pcf_lock);
1715 p++;
1717 freemem = 0;
1720 if (count) {
1722 * Since page_free() puts pages on
1723 * a list then accounts for it, we
1724 * just have to wait for page_free()
1725 * to unlock any page it was working
1726 * with. The page_lock()-page_reclaim()
1727 * path falls in the same boat.
1729 * We don't need to check on the
1730 * PG_WAIT flag, we have already
1731 * accounted for the page we are
1732 * looking for in page_create_va().
1734 * We just wait a moment to let any
1735 * locked pages on the lists free up,
1736 * then continue around and try again.
1738 * Will be awakened by set_freemem().
1740 mutex_enter(&pcgs_wait_lock);
1741 cv_wait(&pcgs_cv, &pcgs_wait_lock);
1742 mutex_exit(&pcgs_wait_lock);
1744 } else {
1745 #ifdef VM_STATS
1746 if (count >= PCGS_TRIES) {
1747 VM_STAT_ADD(pcgs_too_many);
1748 } else {
1749 VM_STAT_ADD(pcgs_counts[count]);
1751 #endif
1752 if (locked) {
1753 pcgs_unblock();
1754 mutex_exit(&pcgs_lock);
1756 if (cagelocked)
1757 mutex_exit(&pcgs_cagelock);
1758 return (pp);
1762 * we go down holding the pcf locks.
1764 panic("no %spage found %d",
1765 ((flags & PG_NORELOC) ? "non-reloc " : ""), count);
1766 /*NOTREACHED*/
1769 #ifdef DEBUG
1770 uint32_t pg_alloc_pgs_mtbf = 0;
1771 #endif
1774 * Used for large page support. It will attempt to allocate
1775 * a large page(s) off the freelist.
1777 * Returns non zero on failure.
1780 page_alloc_pages(struct vmobject *obj, struct seg *seg, caddr_t addr,
1781 struct page **basepp, struct page **ppa, uint_t szc, int anypgsz,
1782 int pgflags)
1784 pgcnt_t npgs, curnpgs, totpgs;
1785 size_t pgsz;
1786 page_t *pplist = NULL, *pp;
1787 int err = 0;
1788 lgrp_t *lgrp;
1790 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1));
1791 ASSERT(pgflags == 0 || pgflags == PG_LOCAL);
1794 * Check if system heavily prefers local large pages over remote
1795 * on systems with multiple lgroups.
1797 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) {
1798 pgflags = PG_LOCAL;
1801 VM_STAT_ADD(alloc_pages[0]);
1803 #ifdef DEBUG
1804 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) {
1805 return (ENOMEM);
1807 #endif
1810 * One must be NULL but not both.
1811 * And one must be non NULL but not both.
1813 ASSERT(basepp != NULL || ppa != NULL);
1814 ASSERT(basepp == NULL || ppa == NULL);
1816 #if defined(__i386) || defined(__amd64)
1817 while (page_chk_freelist(szc) == 0) {
1818 VM_STAT_ADD(alloc_pages[8]);
1819 if (anypgsz == 0 || --szc == 0)
1820 return (ENOMEM);
1822 #endif
1824 pgsz = page_get_pagesize(szc);
1825 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT;
1827 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0);
1829 (void) page_create_wait(npgs, PG_WAIT);
1831 while (npgs && szc) {
1832 lgrp = lgrp_mem_choose(seg, addr, pgsz);
1833 if (pgflags == PG_LOCAL) {
1834 pp = page_get_freelist(obj, 0, seg, addr, pgsz, pgflags,
1835 lgrp);
1836 if (pp == NULL) {
1837 pp = page_get_freelist(obj, 0, seg, addr, pgsz,
1838 0, lgrp);
1840 } else {
1841 pp = page_get_freelist(obj, 0, seg, addr, pgsz, 0, lgrp);
1843 if (pp != NULL) {
1844 VM_STAT_ADD(alloc_pages[1]);
1845 page_list_concat(&pplist, &pp);
1846 ASSERT(npgs >= curnpgs);
1847 npgs -= curnpgs;
1848 } else if (anypgsz) {
1849 VM_STAT_ADD(alloc_pages[2]);
1850 szc--;
1851 pgsz = page_get_pagesize(szc);
1852 curnpgs = pgsz >> PAGESHIFT;
1853 } else {
1854 VM_STAT_ADD(alloc_pages[3]);
1855 ASSERT(npgs == totpgs);
1856 page_create_putback(npgs);
1857 return (ENOMEM);
1860 if (szc == 0) {
1861 VM_STAT_ADD(alloc_pages[4]);
1862 ASSERT(npgs != 0);
1863 page_create_putback(npgs);
1864 err = ENOMEM;
1865 } else if (basepp != NULL) {
1866 ASSERT(npgs == 0);
1867 ASSERT(ppa == NULL);
1868 *basepp = pplist;
1871 npgs = totpgs - npgs;
1872 pp = pplist;
1875 * Clear the free and age bits. Also if we were passed in a ppa then
1876 * fill it in with all the constituent pages from the large page. But
1877 * if we failed to allocate all the pages just free what we got.
1879 while (npgs != 0) {
1880 ASSERT(PP_ISFREE(pp));
1881 ASSERT(PP_ISAGED(pp));
1882 if (ppa != NULL || err != 0) {
1883 if (err == 0) {
1884 VM_STAT_ADD(alloc_pages[5]);
1885 PP_CLRFREE(pp);
1886 PP_CLRAGED(pp);
1887 page_sub(&pplist, pp);
1888 *ppa++ = pp;
1889 npgs--;
1890 } else {
1891 VM_STAT_ADD(alloc_pages[6]);
1892 ASSERT(pp->p_szc != 0);
1893 curnpgs = page_get_pagecnt(pp->p_szc);
1894 page_list_break(&pp, &pplist, curnpgs);
1895 page_list_add_pages(pp, 0);
1896 page_create_putback(curnpgs);
1897 ASSERT(npgs >= curnpgs);
1898 npgs -= curnpgs;
1900 pp = pplist;
1901 } else {
1902 VM_STAT_ADD(alloc_pages[7]);
1903 PP_CLRFREE(pp);
1904 PP_CLRAGED(pp);
1905 pp = pp->p_next;
1906 npgs--;
1909 return (err);
1913 * Get a single large page off of the freelists, and set it up for use.
1914 * Number of bytes requested must be a supported page size.
1916 * Note that this call may fail even if there is sufficient
1917 * memory available or PG_WAIT is set, so the caller must
1918 * be willing to fallback on page_create_va(), block and retry,
1919 * or fail the requester.
1921 struct page *
1922 page_create_va_large(struct vmobject *obj, uoff_t off, size_t bytes,
1923 uint_t flags, struct seg *seg, caddr_t vaddr, void *arg)
1925 pgcnt_t npages;
1926 page_t *pp;
1927 page_t *rootpp;
1928 lgrp_t *lgrp;
1929 lgrp_id_t *lgrpid = (lgrp_id_t *)arg;
1931 ASSERT(obj != NULL);
1933 ASSERT((flags & ~(PG_EXCL | PG_WAIT |
1934 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0);
1935 /* but no others */
1937 ASSERT((flags & PG_EXCL) == PG_EXCL);
1939 npages = btop(bytes);
1941 if (!kcage_on || panicstr) {
1943 * Cage is OFF, or we are single threaded in
1944 * panic, so make everything a RELOC request.
1946 flags &= ~PG_NORELOC;
1950 * Make sure there's adequate physical memory available.
1951 * Note: PG_WAIT is ignored here.
1953 if (freemem <= throttlefree + npages) {
1954 VM_STAT_ADD(page_create_large_cnt[1]);
1955 return (NULL);
1959 * If cage is on, dampen draw from cage when available
1960 * cage space is low.
1962 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) &&
1963 kcage_freemem < kcage_throttlefree + npages) {
1966 * The cage is on, the caller wants PG_NORELOC
1967 * pages and available cage memory is very low.
1968 * Call kcage_create_throttle() to attempt to
1969 * control demand on the cage.
1971 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) {
1972 VM_STAT_ADD(page_create_large_cnt[2]);
1973 return (NULL);
1977 if (!pcf_decrement_bucket(npages) &&
1978 !pcf_decrement_multiple(NULL, npages, 1)) {
1979 VM_STAT_ADD(page_create_large_cnt[4]);
1980 return (NULL);
1984 * This is where this function behaves fundamentally differently
1985 * than page_create_va(); since we're intending to map the page
1986 * with a single TTE, we have to get it as a physically contiguous
1987 * hardware pagesize chunk. If we can't, we fail.
1989 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max &&
1990 LGRP_EXISTS(lgrp_table[*lgrpid]))
1991 lgrp = lgrp_table[*lgrpid];
1992 else
1993 lgrp = lgrp_mem_choose(seg, vaddr, bytes);
1995 if ((rootpp = page_get_freelist(&kvp.v_object, off, seg, vaddr,
1996 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) {
1997 page_create_putback(npages);
1998 VM_STAT_ADD(page_create_large_cnt[5]);
1999 return (NULL);
2003 * if we got the page with the wrong mtype give it back this is a
2004 * workaround for CR 6249718. When CR 6249718 is fixed we never get
2005 * inside "if" and the workaround becomes just a nop
2007 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) {
2008 page_list_add_pages(rootpp, 0);
2009 page_create_putback(npages);
2010 VM_STAT_ADD(page_create_large_cnt[6]);
2011 return (NULL);
2015 * If satisfying this request has left us with too little
2016 * memory, start the wheels turning to get some back. The
2017 * first clause of the test prevents waking up the pageout
2018 * daemon in situations where it would decide that there's
2019 * nothing to do.
2021 if (nscan < desscan && freemem < minfree) {
2022 cv_signal(&proc_pageout->p_cv);
2025 pp = rootpp;
2026 while (npages--) {
2027 ASSERT(PAGE_EXCL(pp));
2028 VERIFY(pp->p_object == NULL);
2029 ASSERT(pp->p_vnode == NULL);
2030 ASSERT(!hat_page_is_mapped(pp));
2031 PP_CLRFREE(pp);
2032 PP_CLRAGED(pp);
2033 if (!page_hashin(pp, obj, off, false))
2034 panic("page_create_large: hashin failed: page %p",
2035 (void *)pp);
2036 page_io_lock(pp);
2037 off += PAGESIZE;
2038 pp = pp->p_next;
2041 VM_STAT_ADD(page_create_large_cnt[0]);
2042 return (rootpp);
2047 * Create enough pages for "bytes" worth of data starting at
2048 * "off" in "obj".
2050 * Where flag must be one of:
2052 * PG_EXCL: Exclusive create (fail if any page already
2053 * exists in the page cache) which does not
2054 * wait for memory to become available.
2056 * PG_WAIT: Non-exclusive create which can wait for
2057 * memory to become available.
2059 * PG_PHYSCONTIG: Allocate physically contiguous pages.
2060 * (Not Supported)
2062 * A doubly linked list of pages is returned to the caller. Each page
2063 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock)
2064 * lock.
2066 * Unable to change the parameters to page_create() in a minor release,
2067 * we renamed page_create() to page_create_va(), and changed all known calls
2068 * from page_create() to page_create_va().
2070 * We should consider ditch this renaming by replacing all the strings
2071 * "page_create_va", with "page_create".
2073 * NOTE: There is a copy of this interface as page_create_io() in
2074 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied
2075 * there.
2077 struct page *
2078 page_create_va(struct vmobject *obj, uoff_t off, size_t bytes, uint_t flags,
2079 struct seg *seg, caddr_t vaddr)
2081 page_t *plist = NULL;
2082 pgcnt_t npages;
2083 pgcnt_t found_on_free = 0;
2084 pgcnt_t pages_req;
2085 page_t *npp = NULL;
2086 struct pcf *p;
2087 lgrp_t *lgrp;
2089 ASSERT(bytes != 0 && obj != NULL);
2091 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) {
2092 panic("page_create: invalid flags");
2093 /*NOTREACHED*/
2095 ASSERT((flags & ~(PG_EXCL | PG_WAIT |
2096 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0);
2097 /* but no others */
2099 pages_req = npages = btopr(bytes);
2101 * Try to see whether request is too large to *ever* be
2102 * satisfied, in order to prevent deadlock. We arbitrarily
2103 * decide to limit maximum size requests to max_page_get.
2105 if (npages >= max_page_get) {
2106 if ((flags & PG_WAIT) == 0) {
2107 return (NULL);
2108 } else {
2109 cmn_err(CE_WARN,
2110 "Request for too much kernel memory "
2111 "(%lu bytes), will hang forever", bytes);
2112 for (;;)
2113 delay(1000000000);
2117 if (!kcage_on || panicstr) {
2119 * Cage is OFF, or we are single threaded in
2120 * panic, so make everything a RELOC request.
2122 flags &= ~PG_NORELOC;
2125 if (freemem <= throttlefree + npages)
2126 if (!page_create_throttle(npages, flags))
2127 return (NULL);
2130 * If cage is on, dampen draw from cage when available
2131 * cage space is low.
2133 if ((flags & PG_NORELOC) &&
2134 kcage_freemem < kcage_throttlefree + npages) {
2137 * The cage is on, the caller wants PG_NORELOC
2138 * pages and available cage memory is very low.
2139 * Call kcage_create_throttle() to attempt to
2140 * control demand on the cage.
2142 if (kcage_create_throttle(npages, flags) == KCT_FAILURE)
2143 return (NULL);
2146 VM_STAT_ADD(page_create_cnt[0]);
2148 if (!pcf_decrement_bucket(npages)) {
2150 * Have to look harder. If npages is greater than
2151 * one, then we might have to coalesce the counters.
2153 * Go wait. We come back having accounted
2154 * for the memory.
2156 VM_STAT_ADD(page_create_cnt[1]);
2157 if (!page_create_wait(npages, flags)) {
2158 VM_STAT_ADD(page_create_cnt[2]);
2159 return (NULL);
2164 * If satisfying this request has left us with too little
2165 * memory, start the wheels turning to get some back. The
2166 * first clause of the test prevents waking up the pageout
2167 * daemon in situations where it would decide that there's
2168 * nothing to do.
2170 if (nscan < desscan && freemem < minfree) {
2171 cv_signal(&proc_pageout->p_cv);
2175 * Loop around collecting the requested number of pages.
2176 * Most of the time, we have to `create' a new page. With
2177 * this in mind, pull the page off the free list before
2178 * getting the hash lock. This will minimize the hash
2179 * lock hold time, nesting, and the like. If it turns
2180 * out we don't need the page, we put it back at the end.
2182 while (npages--) {
2183 page_t *pp;
2185 top:
2186 ASSERT(!VMOBJECT_LOCKED(obj));
2188 if (npp == NULL) {
2190 * Try to get a page from the freelist (ie,
2191 * a page with no [obj, off] tag). If that
2192 * fails, use the cachelist.
2194 * During the first attempt at both the free
2195 * and cache lists we try for the correct color.
2198 * XXXX-how do we deal with virtual indexed
2199 * caches and and colors?
2201 VM_STAT_ADD(page_create_cnt[4]);
2203 * Get lgroup to allocate next page of shared memory
2204 * from and use it to specify where to allocate
2205 * the physical memory
2207 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
2208 npp = page_get_freelist(obj, off, seg, vaddr, PAGESIZE,
2209 flags | PG_MATCH_COLOR, lgrp);
2210 if (npp == NULL) {
2211 npp = page_get_cachelist(obj, off, seg, vaddr,
2212 flags | PG_MATCH_COLOR,
2213 lgrp);
2214 if (npp == NULL) {
2215 npp = page_create_get_something(
2216 obj, off, seg, vaddr,
2217 flags & ~PG_MATCH_COLOR);
2220 if (PP_ISAGED(npp) == 0) {
2222 * Since this page came from the
2223 * cachelist, we must destroy the
2224 * old vnode association.
2226 page_hashout(npp, false);
2232 * We own this page!
2234 ASSERT(PAGE_EXCL(npp));
2235 VERIFY(npp->p_object == NULL);
2236 ASSERT(npp->p_vnode == NULL);
2237 ASSERT(!hat_page_is_mapped(npp));
2238 PP_CLRFREE(npp);
2239 PP_CLRAGED(npp);
2242 * Here we have a page in our hot little mits and are
2243 * just waiting to stuff it on the appropriate lists.
2244 * Get the mutex and check to see if it really does
2245 * not exist.
2247 vmobject_lock(obj);
2248 pp = find_page(obj, off);
2249 if (pp == NULL) {
2250 VM_STAT_ADD(page_create_new);
2251 pp = npp;
2252 npp = NULL;
2253 if (!page_hashin(pp, obj, off, true)) {
2255 * Since we hold the page vnode page cache
2256 * mutex and just searched for this page,
2257 * page_hashin had better not fail. If it
2258 * does, that means some thread did not
2259 * follow the page hash mutex rules. Panic
2260 * now and get it over with. As usual, go
2261 * down holding all the locks.
2263 ASSERT(VMOBJECT_LOCKED(obj));
2264 panic("page_create: "
2265 "hashin failed %p %p %llx", pp, obj, off);
2266 /*NOTREACHED*/
2268 ASSERT(VMOBJECT_LOCKED(obj));
2269 vmobject_unlock(obj);
2272 * Hat layer locking need not be done to set
2273 * the following bits since the page is not hashed
2274 * and was on the free list (i.e., had no mappings).
2276 * Set the reference bit to protect
2277 * against immediate pageout
2279 * XXXmh modify freelist code to set reference
2280 * bit so we don't have to do it here.
2282 page_set_props(pp, P_REF);
2283 found_on_free++;
2284 } else {
2285 VM_STAT_ADD(page_create_exists);
2286 if (flags & PG_EXCL) {
2288 * Found an existing page, and the caller
2289 * wanted all new pages. Undo all of the work
2290 * we have done.
2292 vmobject_unlock(obj);
2293 while (plist != NULL) {
2294 pp = plist;
2295 page_sub(&plist, pp);
2296 page_io_unlock(pp);
2297 /* large pages should not end up here */
2298 ASSERT(pp->p_szc == 0);
2300 VN_DISPOSE(pp, B_INVAL, 0, kcred);
2302 VM_STAT_ADD(page_create_found_one);
2303 goto fail;
2305 ASSERT(flags & PG_WAIT);
2306 if (!page_lock(pp, SE_EXCL, obj, P_NO_RECLAIM)) {
2308 * Start all over again if we blocked trying
2309 * to lock the page.
2311 vmobject_unlock(obj);
2312 VM_STAT_ADD(page_create_page_lock_failed);
2313 goto top;
2315 vmobject_unlock(obj);
2317 if (PP_ISFREE(pp)) {
2318 ASSERT(PP_ISAGED(pp) == 0);
2319 VM_STAT_ADD(pagecnt.pc_get_cache);
2320 page_list_sub(pp, PG_CACHE_LIST);
2321 PP_CLRFREE(pp);
2322 found_on_free++;
2327 * Got a page! It is locked. Acquire the i/o
2328 * lock since we are going to use the p_next and
2329 * p_prev fields to link the requested pages together.
2331 page_io_lock(pp);
2332 page_add(&plist, pp);
2333 plist = plist->p_next;
2334 off += PAGESIZE;
2335 vaddr += PAGESIZE;
2338 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1);
2339 fail:
2340 if (npp != NULL) {
2342 * Did not need this page after all.
2343 * Put it back on the free list.
2345 VM_STAT_ADD(page_create_putbacks);
2346 PP_SETFREE(npp);
2347 PP_SETAGED(npp);
2348 npp->p_offset = (uoff_t)-1;
2349 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
2350 page_unlock(npp);
2353 ASSERT(pages_req >= found_on_free);
2356 uint_t overshoot = (uint_t)(pages_req - found_on_free);
2358 if (overshoot) {
2359 VM_STAT_ADD(page_create_overshoot);
2360 p = &pcf[PCF_INDEX()];
2361 mutex_enter(&p->pcf_lock);
2362 if (p->pcf_block) {
2363 p->pcf_reserve += overshoot;
2364 } else {
2365 p->pcf_count += overshoot;
2366 if (p->pcf_wait) {
2367 mutex_enter(&new_freemem_lock);
2368 if (freemem_wait) {
2369 cv_signal(&freemem_cv);
2370 p->pcf_wait--;
2371 } else {
2372 p->pcf_wait = 0;
2374 mutex_exit(&new_freemem_lock);
2377 mutex_exit(&p->pcf_lock);
2378 /* freemem is approximate, so this test OK */
2379 if (!p->pcf_block)
2380 freemem += overshoot;
2384 return (plist);
2388 * One or more constituent pages of this large page has been marked
2389 * toxic. Simply demote the large page to PAGESIZE pages and let
2390 * page_free() handle it. This routine should only be called by
2391 * large page free routines (page_free_pages() and page_destroy_pages().
2392 * All pages are locked SE_EXCL and have already been marked free.
2394 static void
2395 page_free_toxic_pages(page_t *rootpp)
2397 page_t *tpp;
2398 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc);
2399 uint_t szc = rootpp->p_szc;
2401 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) {
2402 ASSERT(tpp->p_szc == szc);
2403 ASSERT((PAGE_EXCL(tpp) &&
2404 !page_iolock_assert(tpp)) || panicstr);
2405 tpp->p_szc = 0;
2408 while (rootpp != NULL) {
2409 tpp = rootpp;
2410 page_sub(&rootpp, tpp);
2411 ASSERT(PP_ISFREE(tpp));
2412 PP_CLRFREE(tpp);
2413 page_free(tpp, 1);
2418 * Put page on the "free" list.
2419 * The free list is really two lists maintained by
2420 * the PSM of whatever machine we happen to be on.
2422 void
2423 page_free(page_t *pp, int dontneed)
2425 struct pcf *p;
2426 uint_t pcf_index;
2428 ASSERT((PAGE_EXCL(pp) &&
2429 !page_iolock_assert(pp)) || panicstr);
2431 if (PP_ISFREE(pp)) {
2432 panic("page_free: page %p is free", (void *)pp);
2435 if (pp->p_szc != 0) {
2436 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
2437 PP_ISKAS(pp)) {
2438 panic("page_free: anon or kernel "
2439 "or no vnode large page %p", (void *)pp);
2441 page_demote_vp_pages(pp);
2442 ASSERT(pp->p_szc == 0);
2446 * The page_struct_lock need not be acquired to examine these
2447 * fields since the page has an "exclusive" lock.
2449 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
2450 pp->p_slckcnt != 0) {
2451 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d "
2452 "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt,
2453 pp->p_cowcnt, pp->p_slckcnt);
2454 /*NOTREACHED*/
2457 ASSERT(!hat_page_getshare(pp));
2459 PP_SETFREE(pp);
2460 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) ||
2461 !hat_ismod(pp));
2462 page_clr_all_props(pp);
2463 ASSERT(!hat_page_getshare(pp));
2466 * Now we add the page to the head of the free list.
2467 * But if this page is associated with a paged vnode
2468 * then we adjust the head forward so that the page is
2469 * effectively at the end of the list.
2471 if (pp->p_vnode == NULL) {
2473 * Page has no identity, put it on the free list.
2475 PP_SETAGED(pp);
2476 pp->p_offset = (uoff_t)-1;
2477 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
2478 VM_STAT_ADD(pagecnt.pc_free_free);
2479 } else {
2480 PP_CLRAGED(pp);
2482 if (!dontneed) {
2483 /* move it to the tail of the list */
2484 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
2486 VM_STAT_ADD(pagecnt.pc_free_cache);
2487 } else {
2488 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
2490 VM_STAT_ADD(pagecnt.pc_free_dontneed);
2493 page_unlock(pp);
2496 * Now do the `freemem' accounting.
2498 pcf_index = PCF_INDEX();
2499 p = &pcf[pcf_index];
2501 mutex_enter(&p->pcf_lock);
2502 if (p->pcf_block) {
2503 p->pcf_reserve += 1;
2504 } else {
2505 p->pcf_count += 1;
2506 if (p->pcf_wait) {
2507 mutex_enter(&new_freemem_lock);
2509 * Check to see if some other thread
2510 * is actually waiting. Another bucket
2511 * may have woken it up by now. If there
2512 * are no waiters, then set our pcf_wait
2513 * count to zero to avoid coming in here
2514 * next time. Also, since only one page
2515 * was put on the free list, just wake
2516 * up one waiter.
2518 if (freemem_wait) {
2519 cv_signal(&freemem_cv);
2520 p->pcf_wait--;
2521 } else {
2522 p->pcf_wait = 0;
2524 mutex_exit(&new_freemem_lock);
2527 mutex_exit(&p->pcf_lock);
2529 /* freemem is approximate, so this test OK */
2530 if (!p->pcf_block)
2531 freemem += 1;
2535 * Put page on the "free" list during intial startup.
2536 * This happens during initial single threaded execution.
2538 void
2539 page_free_at_startup(page_t *pp)
2541 struct pcf *p;
2542 uint_t pcf_index;
2544 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT);
2545 VM_STAT_ADD(pagecnt.pc_free_free);
2548 * Now do the `freemem' accounting.
2550 pcf_index = PCF_INDEX();
2551 p = &pcf[pcf_index];
2553 ASSERT(p->pcf_block == 0);
2554 ASSERT(p->pcf_wait == 0);
2555 p->pcf_count += 1;
2557 /* freemem is approximate, so this is OK */
2558 freemem += 1;
2561 void
2562 page_free_pages(page_t *pp)
2564 page_t *tpp, *rootpp = NULL;
2565 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc);
2566 pgcnt_t i;
2567 uint_t szc = pp->p_szc;
2569 VM_STAT_ADD(pagecnt.pc_free_pages);
2571 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
2572 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
2573 panic("page_free_pages: not root page %p", (void *)pp);
2574 /*NOTREACHED*/
2577 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
2578 ASSERT((PAGE_EXCL(tpp) &&
2579 !page_iolock_assert(tpp)) || panicstr);
2580 if (PP_ISFREE(tpp)) {
2581 panic("page_free_pages: page %p is free", (void *)tpp);
2582 /*NOTREACHED*/
2584 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 ||
2585 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) {
2586 panic("page_free_pages %p", (void *)tpp);
2587 /*NOTREACHED*/
2590 ASSERT(!hat_page_getshare(tpp));
2591 VERIFY(tpp->p_object == NULL);
2592 ASSERT(tpp->p_vnode == NULL);
2593 ASSERT(tpp->p_szc == szc);
2595 PP_SETFREE(tpp);
2596 page_clr_all_props(tpp);
2597 PP_SETAGED(tpp);
2598 tpp->p_offset = (uoff_t)-1;
2599 ASSERT(tpp->p_next == tpp);
2600 ASSERT(tpp->p_prev == tpp);
2601 page_list_concat(&rootpp, &tpp);
2603 ASSERT(rootpp == pp);
2605 page_list_add_pages(rootpp, 0);
2606 page_create_putback(pgcnt);
2609 int free_pages = 1;
2612 * This routine attempts to return pages to the cachelist via page_release().
2613 * It does not *have* to be successful in all cases, since the pageout scanner
2614 * will catch any pages it misses. It does need to be fast and not introduce
2615 * too much overhead.
2617 * If a page isn't found on the unlocked sweep of the page_hash bucket, we
2618 * don't lock and retry. This is ok, since the page scanner will eventually
2619 * find any page we miss in free_vp_pages().
2621 void
2622 free_vp_pages(struct vmobject *obj, uoff_t off, size_t len)
2624 page_t *pp;
2625 uoff_t eoff;
2626 extern int swap_in_range(vnode_t *, uoff_t, size_t);
2628 eoff = off + len;
2630 if (free_pages == 0)
2631 return;
2632 if (swap_in_range(obj->vnode, off, len))
2633 return;
2635 for (; off < eoff; off += PAGESIZE) {
2638 * find the page using a fast, but inexact search. It'll be OK
2639 * if a few pages slip through the cracks here.
2641 pp = page_exists(obj, off);
2644 * If we didn't find the page (it may not exist), the page
2645 * is free, looks still in use (shared), or we can't lock it,
2646 * just give up.
2648 if (pp == NULL ||
2649 PP_ISFREE(pp) ||
2650 page_share_cnt(pp) > 0 ||
2651 !page_trylock(pp, SE_EXCL))
2652 continue;
2655 * Once we have locked pp, verify that it's still the
2656 * correct page and not already free
2658 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL));
2659 if (pp->p_vnode != obj->vnode || pp->p_offset != off ||
2660 PP_ISFREE(pp)) {
2661 page_unlock(pp);
2662 continue;
2666 * try to release the page...
2668 (void) page_release(pp, 1);
2673 * Reclaim the given page from the free list.
2674 * If pp is part of a large pages, only the given constituent page is reclaimed
2675 * and the large page it belonged to will be demoted. This can only happen
2676 * if the page is not on the cachelist.
2678 * Returns 1 on success or 0 on failure.
2680 * The page is unlocked if it can't be reclaimed (when freemem == 0).
2681 * If `lock' is non-null, it will be dropped and re-acquired if
2682 * the routine must wait while freemem is 0.
2684 * As it turns out, boot_getpages() does this. It picks a page,
2685 * based on where OBP mapped in some address, gets its pfn, searches
2686 * the memsegs, locks the page, then pulls it off the free list!
2689 page_reclaim(struct page *pp, struct vmobject *obj)
2691 struct pcf *p;
2692 struct cpu *cpup;
2693 int enough;
2694 uint_t i;
2696 ASSERT(obj != NULL ? VMOBJECT_LOCKED(obj) : 1);
2697 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp));
2700 * If `freemem' is 0, we cannot reclaim this page from the
2701 * freelist, so release every lock we might hold: the page,
2702 * and the vnode page lock before blocking.
2704 * The only way `freemem' can become 0 while there are pages
2705 * marked free (have their p->p_free bit set) is when the
2706 * system is low on memory and doing a page_create(). In
2707 * order to guarantee that once page_create() starts acquiring
2708 * pages it will be able to get all that it needs since `freemem'
2709 * was decreased by the requested amount. So, we need to release
2710 * this page, and let page_create() have it.
2712 * Since `freemem' being zero is not supposed to happen, just
2713 * use the usual hash stuff as a starting point. If that bucket
2714 * is empty, then assume the worst, and start at the beginning
2715 * of the pcf array. If we always start at the beginning
2716 * when acquiring more than one pcf lock, there won't be any
2717 * deadlock problems.
2720 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */
2722 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) {
2723 pcf_acquire_all();
2724 goto page_reclaim_nomem;
2727 enough = pcf_decrement_bucket(1);
2729 if (!enough) {
2730 VM_STAT_ADD(page_reclaim_zero);
2732 * Check again. Its possible that some other thread
2733 * could have been right behind us, and added one
2734 * to a list somewhere. Acquire each of the pcf locks
2735 * until we find a page.
2737 p = pcf;
2738 for (i = 0; i < pcf_fanout; i++) {
2739 mutex_enter(&p->pcf_lock);
2740 if (p->pcf_count >= 1) {
2741 p->pcf_count -= 1;
2743 * freemem is not protected by any lock. Thus,
2744 * we cannot have any assertion containing
2745 * freemem here.
2747 freemem -= 1;
2748 enough = 1;
2749 break;
2751 p++;
2754 if (!enough) {
2755 page_reclaim_nomem:
2757 * We really can't have page `pp'.
2758 * Time for the no-memory dance with
2759 * page_free(). This is just like
2760 * page_create_wait(). Plus the added
2761 * attraction of releasing the vnode page lock.
2762 * Page_unlock() will wakeup any thread
2763 * waiting around for this page.
2765 if (obj != NULL) {
2766 VM_STAT_ADD(page_reclaim_zero_locked);
2767 vmobject_unlock(obj);
2769 page_unlock(pp);
2772 * get this before we drop all the pcf locks.
2774 mutex_enter(&new_freemem_lock);
2776 p = pcf;
2777 for (i = 0; i < pcf_fanout; i++) {
2778 p->pcf_wait++;
2779 mutex_exit(&p->pcf_lock);
2780 p++;
2783 freemem_wait++;
2784 cv_wait(&freemem_cv, &new_freemem_lock);
2785 freemem_wait--;
2787 mutex_exit(&new_freemem_lock);
2789 if (obj != NULL)
2790 vmobject_lock(obj);
2792 return (0);
2796 * The pcf accounting has been done,
2797 * though none of the pcf_wait flags have been set,
2798 * drop the locks and continue on.
2800 while (p >= pcf) {
2801 mutex_exit(&p->pcf_lock);
2802 p--;
2807 VM_STAT_ADD(pagecnt.pc_reclaim);
2810 * page_list_sub will handle the case where pp is a large page.
2811 * It's possible that the page was promoted while on the freelist
2813 if (PP_ISAGED(pp)) {
2814 page_list_sub(pp, PG_FREE_LIST);
2815 } else {
2816 page_list_sub(pp, PG_CACHE_LIST);
2820 * clear the p_free & p_age bits since this page is no longer
2821 * on the free list. Notice that there was a brief time where
2822 * a page is marked as free, but is not on the list.
2824 * Set the reference bit to protect against immediate pageout.
2826 PP_CLRFREE(pp);
2827 PP_CLRAGED(pp);
2828 page_set_props(pp, P_REF);
2830 CPU_STATS_ENTER_K();
2831 cpup = CPU; /* get cpup now that CPU cannot change */
2832 CPU_STATS_ADDQ(cpup, vm, pgrec, 1);
2833 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1);
2834 CPU_STATS_EXIT_K();
2835 ASSERT(pp->p_szc == 0);
2837 return (1);
2841 * Destroy identity of the page and put it back on
2842 * the page free list. Assumes that the caller has
2843 * acquired the "exclusive" lock on the page.
2845 void
2846 page_destroy(page_t *pp, int dontfree)
2848 ASSERT((PAGE_EXCL(pp) &&
2849 !page_iolock_assert(pp)) || panicstr);
2850 ASSERT(pp->p_slckcnt == 0 || panicstr);
2852 if (pp->p_szc != 0) {
2853 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
2854 PP_ISKAS(pp)) {
2855 panic("page_destroy: anon or kernel or no vnode "
2856 "large page %p", (void *)pp);
2858 page_demote_vp_pages(pp);
2859 ASSERT(pp->p_szc == 0);
2863 * Unload translations, if any, then hash out the
2864 * page to erase its identity.
2866 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
2867 page_hashout(pp, false);
2869 if (!dontfree) {
2871 * Acquire the "freemem_lock" for availrmem.
2872 * The page_struct_lock need not be acquired for lckcnt
2873 * and cowcnt since the page has an "exclusive" lock.
2874 * We are doing a modified version of page_pp_unlock here.
2876 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) {
2877 mutex_enter(&freemem_lock);
2878 if (pp->p_lckcnt != 0) {
2879 availrmem++;
2880 pages_locked--;
2881 pp->p_lckcnt = 0;
2883 if (pp->p_cowcnt != 0) {
2884 availrmem += pp->p_cowcnt;
2885 pages_locked -= pp->p_cowcnt;
2886 pp->p_cowcnt = 0;
2888 mutex_exit(&freemem_lock);
2891 * Put the page on the "free" list.
2893 page_free(pp, 0);
2897 void
2898 page_destroy_pages(page_t *pp)
2901 page_t *tpp, *rootpp = NULL;
2902 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc);
2903 pgcnt_t i, pglcks = 0;
2904 uint_t szc = pp->p_szc;
2906 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
2908 VM_STAT_ADD(pagecnt.pc_destroy_pages);
2910 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
2911 panic("page_destroy_pages: not root page %p", (void *)pp);
2912 /*NOTREACHED*/
2915 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
2916 ASSERT((PAGE_EXCL(tpp) &&
2917 !page_iolock_assert(tpp)) || panicstr);
2918 ASSERT(tpp->p_slckcnt == 0 || panicstr);
2919 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
2920 page_hashout(tpp, false);
2921 ASSERT(tpp->p_offset == (uoff_t)-1);
2922 if (tpp->p_lckcnt != 0) {
2923 pglcks++;
2924 tpp->p_lckcnt = 0;
2925 } else if (tpp->p_cowcnt != 0) {
2926 pglcks += tpp->p_cowcnt;
2927 tpp->p_cowcnt = 0;
2929 ASSERT(!hat_page_getshare(tpp));
2930 VERIFY(tpp->p_object == NULL);
2931 ASSERT(tpp->p_vnode == NULL);
2932 ASSERT(tpp->p_szc == szc);
2934 PP_SETFREE(tpp);
2935 page_clr_all_props(tpp);
2936 PP_SETAGED(tpp);
2937 ASSERT(tpp->p_next == tpp);
2938 ASSERT(tpp->p_prev == tpp);
2939 page_list_concat(&rootpp, &tpp);
2942 ASSERT(rootpp == pp);
2943 if (pglcks != 0) {
2944 mutex_enter(&freemem_lock);
2945 availrmem += pglcks;
2946 mutex_exit(&freemem_lock);
2949 page_list_add_pages(rootpp, 0);
2950 page_create_putback(pgcnt);
2954 * Similar to page_destroy(), but destroys pages which are
2955 * locked and known to be on the page free list. Since
2956 * the page is known to be free and locked, no one can access
2957 * it.
2959 * Also, the number of free pages does not change.
2961 void
2962 page_destroy_free(page_t *pp)
2964 ASSERT(PAGE_EXCL(pp));
2965 ASSERT(PP_ISFREE(pp));
2966 ASSERT(pp->p_vnode);
2967 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0);
2968 ASSERT(!hat_page_is_mapped(pp));
2969 ASSERT(PP_ISAGED(pp) == 0);
2970 ASSERT(pp->p_szc == 0);
2972 VM_STAT_ADD(pagecnt.pc_destroy_free);
2973 page_list_sub(pp, PG_CACHE_LIST);
2975 page_hashout(pp, false);
2976 VERIFY(pp->p_object == NULL);
2977 ASSERT(pp->p_vnode == NULL);
2978 ASSERT(pp->p_offset == (uoff_t)-1);
2980 PP_SETAGED(pp);
2981 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
2982 page_unlock(pp);
2984 mutex_enter(&new_freemem_lock);
2985 if (freemem_wait) {
2986 cv_signal(&freemem_cv);
2988 mutex_exit(&new_freemem_lock);
2992 * Rename the page "opp" to have an identity specified
2993 * by [vp, off]. If a page already exists with this name
2994 * it is locked and destroyed. Note that the page's
2995 * translations are not unloaded during the rename.
2997 * This routine is used by the anon layer to "steal" the
2998 * original page and is not unlike destroying a page and
2999 * creating a new page using the same page frame.
3001 * XXX -- Could deadlock if caller 1 tries to rename A to B while
3002 * caller 2 tries to rename B to A.
3004 void
3005 page_rename(struct page *opp, struct vmobject *obj, uoff_t off)
3007 page_t *pp;
3008 int olckcnt = 0;
3009 int ocowcnt = 0;
3011 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp));
3012 ASSERT(!VMOBJECT_LOCKED(obj));
3013 ASSERT(PP_ISFREE(opp) == 0);
3015 VM_STAT_ADD(page_rename_count);
3018 * CacheFS may call page_rename for a large NFS page
3019 * when both CacheFS and NFS mount points are used
3020 * by applications. Demote this large page before
3021 * renaming it, to ensure that there are no "partial"
3022 * large pages left lying around.
3024 if (opp->p_szc != 0) {
3025 vnode_t *ovp = opp->p_vnode;
3026 ASSERT(ovp != NULL);
3027 ASSERT(!IS_SWAPFSVP(ovp));
3028 ASSERT(!VN_ISKAS(ovp));
3029 page_demote_vp_pages(opp);
3030 ASSERT(opp->p_szc == 0);
3033 page_hashout(opp, false);
3034 PP_CLRAGED(opp);
3036 vmobject_lock(obj);
3037 top:
3039 * Look for an existing page with this name and destroy it if found.
3040 * By holding the page hash lock all the way to the page_hashin()
3041 * call, we are assured that no page can be created with this
3042 * identity. In the case when the phm lock is dropped to undo any
3043 * hat layer mappings, the existing page is held with an "exclusive"
3044 * lock, again preventing another page from being created with
3045 * this identity.
3047 pp = find_page(obj, off);
3048 if (pp != NULL) {
3049 VM_STAT_ADD(page_rename_exists);
3052 * As it turns out, this is one of only two places where
3053 * page_lock() needs to hold the passed in lock in the
3054 * successful case. In all of the others, the lock could
3055 * be dropped as soon as the attempt is made to lock
3056 * the page. It is tempting to add yet another arguement,
3057 * PL_KEEP or PL_DROP, to let page_lock know what to do.
3059 if (!page_lock(pp, SE_EXCL, obj, P_RECLAIM)) {
3061 * Went to sleep because the page could not
3062 * be locked. We were woken up when the page
3063 * was unlocked, or when the page was destroyed.
3064 * In either case, `phm' was dropped while we
3065 * slept. Hence we should not just roar through
3066 * this loop.
3068 goto top;
3072 * If an existing page is a large page, then demote
3073 * it to ensure that no "partial" large pages are
3074 * "created" after page_rename. An existing page
3075 * can be a CacheFS page, and can't belong to swapfs.
3077 if (hat_page_is_mapped(pp)) {
3079 * Unload translations. Since we hold the
3080 * exclusive lock on this page, the page
3081 * can not be changed while we drop phm.
3082 * This is also not a lock protocol violation,
3083 * but rather the proper way to do things.
3085 vmobject_unlock(obj);
3086 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3087 if (pp->p_szc != 0) {
3088 ASSERT(!IS_SWAPFSVP(obj->vnode));
3089 ASSERT(!VN_ISKAS(obj->vnode));
3090 page_demote_vp_pages(pp);
3091 ASSERT(pp->p_szc == 0);
3093 vmobject_lock(obj);
3094 } else if (pp->p_szc != 0) {
3095 ASSERT(!IS_SWAPFSVP(obj->vnode));
3096 ASSERT(!VN_ISKAS(obj->vnode));
3097 vmobject_unlock(obj);
3098 page_demote_vp_pages(pp);
3099 ASSERT(pp->p_szc == 0);
3100 vmobject_lock(obj);
3102 page_hashout(pp, true);
3105 * Hash in the page with the new identity.
3107 if (!page_hashin(opp, obj, off, true)) {
3109 * We were holding phm while we searched for [vp, off]
3110 * and only dropped phm if we found and locked a page.
3111 * If we can't create this page now, then some thing
3112 * is really broken.
3114 panic("page_rename: Can't hash in page: %p", (void *)pp);
3115 /*NOTREACHED*/
3118 ASSERT(VMOBJECT_LOCKED(obj));
3119 vmobject_unlock(obj);
3122 * Now that we have dropped phm, lets get around to finishing up
3123 * with pp.
3125 if (pp != NULL) {
3126 ASSERT(!hat_page_is_mapped(pp));
3127 /* for now large pages should not end up here */
3128 ASSERT(pp->p_szc == 0);
3130 * Save the locks for transfer to the new page and then
3131 * clear them so page_free doesn't think they're important.
3132 * The page_struct_lock need not be acquired for lckcnt and
3133 * cowcnt since the page has an "exclusive" lock.
3135 olckcnt = pp->p_lckcnt;
3136 ocowcnt = pp->p_cowcnt;
3137 pp->p_lckcnt = pp->p_cowcnt = 0;
3140 * Put the page on the "free" list after we drop
3141 * the lock. The less work under the lock the better.
3143 VN_DISPOSE(pp, B_FREE, 0, kcred);
3147 * Transfer the lock count from the old page (if any).
3148 * The page_struct_lock need not be acquired for lckcnt and
3149 * cowcnt since the page has an "exclusive" lock.
3151 opp->p_lckcnt += olckcnt;
3152 opp->p_cowcnt += ocowcnt;
3156 * low level routine to add page `page' to the AVL tree and vnode chains for
3157 * [vp, offset]
3159 * Pages are normally inserted at the start of a vnode's v_object list.
3160 * If the vnode is VMODSORT and the page is modified, it goes at the end.
3161 * This can happen when a modified page is relocated for DR.
3163 * Returns 1 on success and 0 on failure.
3165 static int
3166 page_do_hashin(struct page *page, struct vmobject *obj, uoff_t offset)
3168 avl_index_t where;
3169 page_t **listp;
3171 ASSERT(PAGE_EXCL(page));
3172 ASSERT(obj != NULL);
3173 ASSERT(obj->vnode != NULL);
3174 ASSERT(VMOBJECT_LOCKED(obj));
3177 * Be sure to set these up before the page is inserted into the AVL
3178 * tree. As soon as the page is placed on the list some other
3179 * thread might get confused and wonder how this page could
3180 * possibly hash to this list.
3182 page->p_object = obj;
3183 page->p_vnode = obj->vnode;
3184 page->p_offset = offset;
3187 * record if this page is on a swap vnode
3189 if ((obj->vnode->v_flag & VISSWAP) != 0)
3190 PP_SETSWAP(page);
3193 * Duplicates are not allowed - fail to insert if we already have a
3194 * page with this identity.
3196 if (avl_find(&obj->tree, page, &where) != NULL) {
3197 page->p_object = NULL;
3198 page->p_vnode = NULL;
3199 page->p_offset = (uoff_t)(-1);
3200 return (0);
3203 avl_insert(&obj->tree, page, where);
3206 * Add the page to the vnode's list of pages
3208 if (IS_VMODSORT(obj->vnode) && hat_ismod(page))
3209 vmobject_add_page_tail(obj, page);
3210 else
3211 vmobject_add_page_head(obj, page);
3213 return (1);
3217 * Add page `pp' to both the hash and vp chains for [vp, offset].
3219 * Returns 1 on success and 0 on failure.
3220 * If `locked` is true, we do *not* attempt to lock the vnode's page mutex.
3223 page_hashin(struct page *pp, struct vmobject *obj, uoff_t offset, bool locked)
3225 int rc;
3227 ASSERT(pp->p_fsdata == 0 || panicstr);
3229 VM_STAT_ADD(hashin_count);
3231 if (!locked) {
3232 VM_STAT_ADD(hashin_not_held);
3233 vmobject_lock(obj);
3236 rc = page_do_hashin(pp, obj, offset);
3238 if (!locked)
3239 vmobject_unlock(obj);
3241 if (rc == 0)
3242 VM_STAT_ADD(hashin_already);
3244 return (rc);
3248 * Remove page `page' from the AVL tree and vnode chains and remove its
3249 * vnode association. All mutexes must be held
3251 static void
3252 page_do_hashout(page_t *page)
3254 page_t **hpp;
3255 page_t *hp;
3256 vnode_t *vnode = page->p_vnode;
3258 ASSERT(vnode != NULL);
3259 ASSERT(VMOBJECT_LOCKED(&vnode->v_object));
3261 avl_remove(&vnode->v_object.tree, page);
3263 vmobject_remove_page(&vnode->v_object, page);
3265 page_clr_all_props(page);
3266 PP_CLRSWAP(page);
3267 page->p_object = NULL;
3268 page->p_vnode = NULL;
3269 page->p_offset = (uoff_t)-1;
3270 page->p_fsdata = 0;
3274 * Remove page `page' from the AVL tree and vnode chains and remove vnode
3275 * association.
3277 * When `locked` is true, we do *not* attempt to lock the vnode's page
3278 * mutex.
3280 void
3281 page_hashout(page_t *pp, bool locked)
3283 struct vmobject *obj;
3284 ulong_t index;
3285 kmutex_t *sep;
3287 ASSERT(hold != NULL ? MUTEX_HELD(hold) : 1);
3288 ASSERT(pp->p_vnode != NULL);
3289 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
3291 obj = &pp->p_vnode->v_object;
3293 if (!locked) {
3294 VM_STAT_ADD(hashout_not_held);
3295 vmobject_lock(obj);
3298 page_do_hashout(pp);
3300 if (!locked)
3301 vmobject_unlock(obj);
3304 * Wake up processes waiting for this page. The page's
3305 * identity has been changed, and is probably not the
3306 * desired page any longer.
3308 sep = page_se_mutex(pp);
3309 mutex_enter(sep);
3310 pp->p_selock &= ~SE_EWANTED;
3311 if (CV_HAS_WAITERS(&pp->p_cv))
3312 cv_broadcast(&pp->p_cv);
3313 mutex_exit(sep);
3317 * Add the page to the front of a linked list of pages
3318 * using the p_next & p_prev pointers for the list.
3319 * The caller is responsible for protecting the list pointers.
3321 void
3322 page_add(page_t **ppp, page_t *pp)
3324 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3326 page_add_common(ppp, pp);
3332 * Common code for page_add() and mach_page_add()
3334 void
3335 page_add_common(page_t **ppp, page_t *pp)
3337 if (*ppp == NULL) {
3338 pp->p_next = pp->p_prev = pp;
3339 } else {
3340 pp->p_next = *ppp;
3341 pp->p_prev = (*ppp)->p_prev;
3342 (*ppp)->p_prev = pp;
3343 pp->p_prev->p_next = pp;
3345 *ppp = pp;
3350 * Remove this page from a linked list of pages
3351 * using the p_next & p_prev pointers for the list.
3353 * The caller is responsible for protecting the list pointers.
3355 void
3356 page_sub(page_t **ppp, page_t *pp)
3358 ASSERT((PP_ISFREE(pp)) ? 1 :
3359 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3361 if (*ppp == NULL || pp == NULL) {
3362 panic("page_sub: bad arg(s): pp %p, *ppp %p",
3363 (void *)pp, (void *)(*ppp));
3364 /*NOTREACHED*/
3367 page_sub_common(ppp, pp);
3372 * Common code for page_sub() and mach_page_sub()
3374 void
3375 page_sub_common(page_t **ppp, page_t *pp)
3377 if (*ppp == pp)
3378 *ppp = pp->p_next; /* go to next page */
3380 if (*ppp == pp)
3381 *ppp = NULL; /* page list is gone */
3382 else {
3383 pp->p_prev->p_next = pp->p_next;
3384 pp->p_next->p_prev = pp->p_prev;
3386 pp->p_prev = pp->p_next = pp; /* make pp a list of one */
3391 * Break page list cppp into two lists with npages in the first list.
3392 * The tail is returned in nppp.
3394 void
3395 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages)
3397 page_t *s1pp = *oppp;
3398 page_t *s2pp;
3399 page_t *e1pp, *e2pp;
3400 long n = 0;
3402 if (s1pp == NULL) {
3403 *nppp = NULL;
3404 return;
3406 if (npages == 0) {
3407 *nppp = s1pp;
3408 *oppp = NULL;
3409 return;
3411 for (n = 0, s2pp = *oppp; n < npages; n++) {
3412 s2pp = s2pp->p_next;
3414 /* Fix head and tail of new lists */
3415 e1pp = s2pp->p_prev;
3416 e2pp = s1pp->p_prev;
3417 s1pp->p_prev = e1pp;
3418 e1pp->p_next = s1pp;
3419 s2pp->p_prev = e2pp;
3420 e2pp->p_next = s2pp;
3422 /* second list empty */
3423 if (s2pp == s1pp) {
3424 *oppp = s1pp;
3425 *nppp = NULL;
3426 } else {
3427 *oppp = s1pp;
3428 *nppp = s2pp;
3433 * Concatenate page list nppp onto the end of list ppp.
3435 void
3436 page_list_concat(page_t **ppp, page_t **nppp)
3438 page_t *s1pp, *s2pp, *e1pp, *e2pp;
3440 if (*nppp == NULL) {
3441 return;
3443 if (*ppp == NULL) {
3444 *ppp = *nppp;
3445 return;
3447 s1pp = *ppp;
3448 e1pp = s1pp->p_prev;
3449 s2pp = *nppp;
3450 e2pp = s2pp->p_prev;
3451 s1pp->p_prev = e2pp;
3452 e2pp->p_next = s1pp;
3453 e1pp->p_next = s2pp;
3454 s2pp->p_prev = e1pp;
3458 * return the next page in the page list
3460 page_t *
3461 page_list_next(page_t *pp)
3463 return (pp->p_next);
3468 * Add the page to the front of the linked list of pages
3469 * using p_list.vnode for the list.
3471 * The caller is responsible for protecting the lists.
3473 void
3474 page_vpadd(page_t **ppp, page_t *pp)
3476 panic("%s should not be used", __func__);
3479 void
3480 page_lpadd(page_t **ppp, page_t *pp)
3482 if (*ppp == NULL) {
3483 pp->p_list.largepg.next = pp->p_list.largepg.prev = pp;
3484 } else {
3485 pp->p_list.largepg.next = *ppp;
3486 pp->p_list.largepg.prev = (*ppp)->p_list.largepg.prev;
3487 (*ppp)->p_list.largepg.prev = pp;
3488 pp->p_list.largepg.prev->p_list.largepg.next = pp;
3490 *ppp = pp;
3494 * Remove this page from the linked list of pages
3495 * using p_list.vnode for the list.
3497 * The caller is responsible for protecting the lists.
3499 void
3500 page_vpsub(page_t **ppp, page_t *pp)
3502 panic("%s should not be used", __func__);
3505 void
3506 page_lpsub(page_t **ppp, page_t *pp)
3508 if (*ppp == NULL || pp == NULL) {
3509 panic("page_vpsub: bad arg(s): pp %p, *ppp %p",
3510 (void *)pp, (void *)(*ppp));
3511 /*NOTREACHED*/
3514 if (*ppp == pp)
3515 *ppp = pp->p_list.largepg.next; /* go to next page */
3517 if (*ppp == pp)
3518 *ppp = NULL; /* page list is gone */
3519 else {
3520 pp->p_list.largepg.prev->p_list.largepg.next = pp->p_list.largepg.next;
3521 pp->p_list.largepg.next->p_list.largepg.prev = pp->p_list.largepg.prev;
3523 pp->p_list.largepg.prev = pp->p_list.largepg.next = pp; /* make pp a list of one */
3527 * Lock a physical page into memory "long term". Used to support "lock
3528 * in memory" functions. Accepts the page to be locked, and a cow variable
3529 * to indicate whether a the lock will travel to the new page during
3530 * a potential copy-on-write.
3533 page_pp_lock(
3534 page_t *pp, /* page to be locked */
3535 int cow, /* cow lock */
3536 int kernel) /* must succeed -- ignore checking */
3538 int r = 0; /* result -- assume failure */
3540 ASSERT(PAGE_LOCKED(pp));
3542 page_struct_lock(pp);
3544 * Acquire the "freemem_lock" for availrmem.
3546 if (cow) {
3547 mutex_enter(&freemem_lock);
3548 if ((availrmem > pages_pp_maximum) &&
3549 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
3550 availrmem--;
3551 pages_locked++;
3552 mutex_exit(&freemem_lock);
3553 r = 1;
3554 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3555 cmn_err(CE_WARN,
3556 "COW lock limit reached on pfn 0x%lx",
3557 page_pptonum(pp));
3559 } else
3560 mutex_exit(&freemem_lock);
3561 } else {
3562 if (pp->p_lckcnt) {
3563 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
3564 r = 1;
3565 if (++pp->p_lckcnt ==
3566 (ushort_t)PAGE_LOCK_MAXIMUM) {
3567 cmn_err(CE_WARN, "Page lock limit "
3568 "reached on pfn 0x%lx",
3569 page_pptonum(pp));
3572 } else {
3573 if (kernel) {
3574 /* availrmem accounting done by caller */
3575 ++pp->p_lckcnt;
3576 r = 1;
3577 } else {
3578 mutex_enter(&freemem_lock);
3579 if (availrmem > pages_pp_maximum) {
3580 availrmem--;
3581 pages_locked++;
3582 ++pp->p_lckcnt;
3583 r = 1;
3585 mutex_exit(&freemem_lock);
3589 page_struct_unlock(pp);
3590 return (r);
3594 * Decommit a lock on a physical page frame. Account for cow locks if
3595 * appropriate.
3597 void
3598 page_pp_unlock(
3599 page_t *pp, /* page to be unlocked */
3600 int cow, /* expect cow lock */
3601 int kernel) /* this was a kernel lock */
3603 ASSERT(PAGE_LOCKED(pp));
3605 page_struct_lock(pp);
3607 * Acquire the "freemem_lock" for availrmem.
3608 * If cowcnt or lcknt is already 0 do nothing; i.e., we
3609 * could be called to unlock even if nothing is locked. This could
3610 * happen if locked file pages were truncated (removing the lock)
3611 * and the file was grown again and new pages faulted in; the new
3612 * pages are unlocked but the segment still thinks they're locked.
3614 if (cow) {
3615 if (pp->p_cowcnt) {
3616 mutex_enter(&freemem_lock);
3617 pp->p_cowcnt--;
3618 availrmem++;
3619 pages_locked--;
3620 mutex_exit(&freemem_lock);
3622 } else {
3623 if (pp->p_lckcnt && --pp->p_lckcnt == 0) {
3624 if (!kernel) {
3625 mutex_enter(&freemem_lock);
3626 availrmem++;
3627 pages_locked--;
3628 mutex_exit(&freemem_lock);
3632 page_struct_unlock(pp);
3636 * This routine reserves availrmem for npages;
3637 * flags: KM_NOSLEEP or KM_SLEEP
3638 * returns 1 on success or 0 on failure
3641 page_resv(pgcnt_t npages, uint_t flags)
3643 mutex_enter(&freemem_lock);
3644 while (availrmem < tune.t_minarmem + npages) {
3645 if (flags & KM_NOSLEEP) {
3646 mutex_exit(&freemem_lock);
3647 return (0);
3649 mutex_exit(&freemem_lock);
3650 page_needfree(npages);
3651 kmem_reap();
3652 ddi_msleep(250);
3653 page_needfree(-(spgcnt_t)npages);
3654 mutex_enter(&freemem_lock);
3656 availrmem -= npages;
3657 mutex_exit(&freemem_lock);
3658 return (1);
3662 * This routine unreserves availrmem for npages;
3664 void
3665 page_unresv(pgcnt_t npages)
3667 mutex_enter(&freemem_lock);
3668 availrmem += npages;
3669 mutex_exit(&freemem_lock);
3673 * See Statement at the beginning of segvn_lockop() regarding
3674 * the way we handle cowcnts and lckcnts.
3676 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage
3677 * that breaks COW has PROT_WRITE.
3679 * Note that, we may also break COW in case we are softlocking
3680 * on read access during physio;
3681 * in this softlock case, the vpage may not have PROT_WRITE.
3682 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp'
3683 * if the vpage doesn't have PROT_WRITE.
3685 * This routine is never called if we are stealing a page
3686 * in anon_private.
3688 * The caller subtracted from availrmem for read only mapping.
3689 * if lckcnt is 1 increment availrmem.
3691 void
3692 page_pp_useclaim(
3693 page_t *opp, /* original page frame losing lock */
3694 page_t *npp, /* new page frame gaining lock */
3695 uint_t write_perm) /* set if vpage has PROT_WRITE */
3697 int payback = 0;
3698 int nidx, oidx;
3700 ASSERT(PAGE_LOCKED(opp));
3701 ASSERT(PAGE_LOCKED(npp));
3704 * Since we have two pages we probably have two locks. We need to take
3705 * them in a defined order to avoid deadlocks. It's also possible they
3706 * both hash to the same lock in which case this is a non-issue.
3708 nidx = PAGE_LLOCK_HASH(PP_PAGEROOT(npp));
3709 oidx = PAGE_LLOCK_HASH(PP_PAGEROOT(opp));
3710 if (nidx < oidx) {
3711 page_struct_lock(npp);
3712 page_struct_lock(opp);
3713 } else if (oidx < nidx) {
3714 page_struct_lock(opp);
3715 page_struct_lock(npp);
3716 } else { /* The pages hash to the same lock */
3717 page_struct_lock(npp);
3720 ASSERT(npp->p_cowcnt == 0);
3721 ASSERT(npp->p_lckcnt == 0);
3723 /* Don't use claim if nothing is locked (see page_pp_unlock above) */
3724 if ((write_perm && opp->p_cowcnt != 0) ||
3725 (!write_perm && opp->p_lckcnt != 0)) {
3727 if (write_perm) {
3728 npp->p_cowcnt++;
3729 ASSERT(opp->p_cowcnt != 0);
3730 opp->p_cowcnt--;
3731 } else {
3733 ASSERT(opp->p_lckcnt != 0);
3736 * We didn't need availrmem decremented if p_lckcnt on
3737 * original page is 1. Here, we are unlocking
3738 * read-only copy belonging to original page and
3739 * are locking a copy belonging to new page.
3741 if (opp->p_lckcnt == 1)
3742 payback = 1;
3744 npp->p_lckcnt++;
3745 opp->p_lckcnt--;
3748 if (payback) {
3749 mutex_enter(&freemem_lock);
3750 availrmem++;
3751 pages_useclaim--;
3752 mutex_exit(&freemem_lock);
3755 if (nidx < oidx) {
3756 page_struct_unlock(opp);
3757 page_struct_unlock(npp);
3758 } else if (oidx < nidx) {
3759 page_struct_unlock(npp);
3760 page_struct_unlock(opp);
3761 } else { /* The pages hash to the same lock */
3762 page_struct_unlock(npp);
3767 * Simple claim adjust functions -- used to support changes in
3768 * claims due to changes in access permissions. Used by segvn_setprot().
3771 page_addclaim(page_t *pp)
3773 int r = 0; /* result */
3775 ASSERT(PAGE_LOCKED(pp));
3777 page_struct_lock(pp);
3778 ASSERT(pp->p_lckcnt != 0);
3780 if (pp->p_lckcnt == 1) {
3781 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
3782 --pp->p_lckcnt;
3783 r = 1;
3784 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3785 cmn_err(CE_WARN,
3786 "COW lock limit reached on pfn 0x%lx",
3787 page_pptonum(pp));
3790 } else {
3791 mutex_enter(&freemem_lock);
3792 if ((availrmem > pages_pp_maximum) &&
3793 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
3794 --availrmem;
3795 ++pages_claimed;
3796 mutex_exit(&freemem_lock);
3797 --pp->p_lckcnt;
3798 r = 1;
3799 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3800 cmn_err(CE_WARN,
3801 "COW lock limit reached on pfn 0x%lx",
3802 page_pptonum(pp));
3804 } else
3805 mutex_exit(&freemem_lock);
3807 page_struct_unlock(pp);
3808 return (r);
3812 page_subclaim(page_t *pp)
3814 int r = 0;
3816 ASSERT(PAGE_LOCKED(pp));
3818 page_struct_lock(pp);
3819 ASSERT(pp->p_cowcnt != 0);
3821 if (pp->p_lckcnt) {
3822 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
3823 r = 1;
3825 * for availrmem
3827 mutex_enter(&freemem_lock);
3828 availrmem++;
3829 pages_claimed--;
3830 mutex_exit(&freemem_lock);
3832 pp->p_cowcnt--;
3834 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3835 cmn_err(CE_WARN,
3836 "Page lock limit reached on pfn 0x%lx",
3837 page_pptonum(pp));
3840 } else {
3841 r = 1;
3842 pp->p_cowcnt--;
3843 pp->p_lckcnt++;
3845 page_struct_unlock(pp);
3846 return (r);
3850 * Variant of page_addclaim(), where ppa[] contains the pages of a single large
3851 * page.
3854 page_addclaim_pages(page_t **ppa)
3856 pgcnt_t lckpgs = 0, pg_idx;
3858 VM_STAT_ADD(pagecnt.pc_addclaim_pages);
3861 * Only need to take the page struct lock on the large page root.
3863 page_struct_lock(ppa[0]);
3864 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
3866 ASSERT(PAGE_LOCKED(ppa[pg_idx]));
3867 ASSERT(ppa[pg_idx]->p_lckcnt != 0);
3868 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3869 page_struct_unlock(ppa[0]);
3870 return (0);
3872 if (ppa[pg_idx]->p_lckcnt > 1)
3873 lckpgs++;
3876 if (lckpgs != 0) {
3877 mutex_enter(&freemem_lock);
3878 if (availrmem >= pages_pp_maximum + lckpgs) {
3879 availrmem -= lckpgs;
3880 pages_claimed += lckpgs;
3881 } else {
3882 mutex_exit(&freemem_lock);
3883 page_struct_unlock(ppa[0]);
3884 return (0);
3886 mutex_exit(&freemem_lock);
3889 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
3890 ppa[pg_idx]->p_lckcnt--;
3891 ppa[pg_idx]->p_cowcnt++;
3893 page_struct_unlock(ppa[0]);
3894 return (1);
3898 * Variant of page_subclaim(), where ppa[] contains the pages of a single large
3899 * page.
3902 page_subclaim_pages(page_t **ppa)
3904 pgcnt_t ulckpgs = 0, pg_idx;
3906 VM_STAT_ADD(pagecnt.pc_subclaim_pages);
3909 * Only need to take the page struct lock on the large page root.
3911 page_struct_lock(ppa[0]);
3912 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
3914 ASSERT(PAGE_LOCKED(ppa[pg_idx]));
3915 ASSERT(ppa[pg_idx]->p_cowcnt != 0);
3916 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3917 page_struct_unlock(ppa[0]);
3918 return (0);
3920 if (ppa[pg_idx]->p_lckcnt != 0)
3921 ulckpgs++;
3924 if (ulckpgs != 0) {
3925 mutex_enter(&freemem_lock);
3926 availrmem += ulckpgs;
3927 pages_claimed -= ulckpgs;
3928 mutex_exit(&freemem_lock);
3931 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
3932 ppa[pg_idx]->p_cowcnt--;
3933 ppa[pg_idx]->p_lckcnt++;
3936 page_struct_unlock(ppa[0]);
3937 return (1);
3940 page_t *
3941 page_numtopp(pfn_t pfnum, se_t se)
3943 page_t *pp;
3945 retry:
3946 pp = page_numtopp_nolock(pfnum);
3947 if (pp == NULL) {
3948 return (NULL);
3952 * Acquire the appropriate lock on the page.
3954 while (!page_lock(pp, se, NULL, P_RECLAIM)) {
3955 if (page_pptonum(pp) != pfnum)
3956 goto retry;
3957 continue;
3960 if (page_pptonum(pp) != pfnum) {
3961 page_unlock(pp);
3962 goto retry;
3965 return (pp);
3968 page_t *
3969 page_numtopp_noreclaim(pfn_t pfnum, se_t se)
3971 page_t *pp;
3973 retry:
3974 pp = page_numtopp_nolock(pfnum);
3975 if (pp == NULL) {
3976 return (NULL);
3980 * Acquire the appropriate lock on the page.
3982 while (!page_lock(pp, se, NULL, P_NO_RECLAIM)) {
3983 if (page_pptonum(pp) != pfnum)
3984 goto retry;
3985 continue;
3988 if (page_pptonum(pp) != pfnum) {
3989 page_unlock(pp);
3990 goto retry;
3993 return (pp);
3997 * This routine is like page_numtopp, but will only return page structs
3998 * for pages which are ok for loading into hardware using the page struct.
4000 page_t *
4001 page_numtopp_nowait(pfn_t pfnum, se_t se)
4003 page_t *pp;
4005 retry:
4006 pp = page_numtopp_nolock(pfnum);
4007 if (pp == NULL) {
4008 return (NULL);
4012 * Try to acquire the appropriate lock on the page.
4014 if (PP_ISFREE(pp))
4015 pp = NULL;
4016 else {
4017 if (!page_trylock(pp, se))
4018 pp = NULL;
4019 else {
4020 if (page_pptonum(pp) != pfnum) {
4021 page_unlock(pp);
4022 goto retry;
4024 if (PP_ISFREE(pp)) {
4025 page_unlock(pp);
4026 pp = NULL;
4030 return (pp);
4034 * Returns a count of dirty pages that are in the process
4035 * of being written out. If 'cleanit' is set, try to push the page.
4037 pgcnt_t
4038 page_busy(int cleanit)
4040 page_t *page0 = page_first();
4041 page_t *pp = page0;
4042 pgcnt_t nppbusy = 0;
4043 uoff_t off;
4045 do {
4046 vnode_t *vp = pp->p_vnode;
4048 * A page is a candidate for syncing if it is:
4050 * (a) On neither the freelist nor the cachelist
4051 * (b) Hashed onto a vnode
4052 * (c) Not a kernel page
4053 * (d) Dirty
4054 * (e) Not part of a swapfile
4055 * (f) a page which belongs to a real vnode; eg has a non-null
4056 * v_vfsp pointer.
4057 * (g) Backed by a filesystem which doesn't have a
4058 * stubbed-out sync operation
4060 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) &&
4061 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL &&
4062 vfs_can_sync(vp->v_vfsp)) {
4063 nppbusy++;
4065 if (!cleanit)
4066 continue;
4067 if (!page_trylock(pp, SE_EXCL))
4068 continue;
4070 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) ||
4071 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
4072 !(hat_pagesync(pp,
4073 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) {
4074 page_unlock(pp);
4075 continue;
4077 off = pp->p_offset;
4078 VN_HOLD(vp);
4079 page_unlock(pp);
4080 (void) fop_putpage(vp, off, PAGESIZE,
4081 B_ASYNC | B_FREE, kcred, NULL);
4082 VN_RELE(vp);
4084 } while ((pp = page_next(pp)) != page0);
4086 return (nppbusy);
4089 void page_invalidate_pages(void);
4092 * callback handler to vm sub-system
4094 * callers make sure no recursive entries to this func.
4096 /*ARGSUSED*/
4097 boolean_t
4098 callb_vm_cpr(void *arg, int code)
4100 if (code == CB_CODE_CPR_CHKPT)
4101 page_invalidate_pages();
4102 return (B_TRUE);
4106 * Invalidate all pages of the system.
4107 * It shouldn't be called until all user page activities are all stopped.
4109 void
4110 page_invalidate_pages()
4112 page_t *pp;
4113 page_t *page0;
4114 pgcnt_t nbusypages;
4115 int retry = 0;
4116 const int MAXRETRIES = 4;
4117 top:
4119 * Flush dirty pages and destroy the clean ones.
4121 nbusypages = 0;
4123 pp = page0 = page_first();
4124 do {
4125 struct vnode *vp;
4126 uoff_t offset;
4127 int mod;
4130 * skip the page if it has no vnode or the page associated
4131 * with the kernel vnode or prom allocated kernel mem.
4133 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp))
4134 continue;
4137 * skip the page which is already free invalidated.
4139 if (PP_ISFREE(pp) && PP_ISAGED(pp))
4140 continue;
4143 * skip pages that are already locked or can't be "exclusively"
4144 * locked or are already free. After we lock the page, check
4145 * the free and age bits again to be sure it's not destroyed
4146 * yet.
4147 * To achieve max. parallelization, we use page_trylock instead
4148 * of page_lock so that we don't get block on individual pages
4149 * while we have thousands of other pages to process.
4151 if (!page_trylock(pp, SE_EXCL)) {
4152 nbusypages++;
4153 continue;
4154 } else if (PP_ISFREE(pp)) {
4155 if (!PP_ISAGED(pp)) {
4156 page_destroy_free(pp);
4157 } else {
4158 page_unlock(pp);
4160 continue;
4163 * Is this page involved in some I/O? shared?
4165 * The page_struct_lock need not be acquired to
4166 * examine these fields since the page has an
4167 * "exclusive" lock.
4169 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
4170 page_unlock(pp);
4171 continue;
4174 if (vp->v_type == VCHR) {
4175 panic("vp->v_type == VCHR");
4176 /*NOTREACHED*/
4179 if (!page_try_demote_pages(pp)) {
4180 page_unlock(pp);
4181 continue;
4185 * Check the modified bit. Leave the bits alone in hardware
4186 * (they will be modified if we do the putpage).
4188 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD)
4189 & P_MOD);
4190 if (mod) {
4191 offset = pp->p_offset;
4193 * Hold the vnode before releasing the page lock
4194 * to prevent it from being freed and re-used by
4195 * some other thread.
4197 VN_HOLD(vp);
4198 page_unlock(pp);
4200 * No error return is checked here. Callers such as
4201 * cpr deals with the dirty pages at the dump time
4202 * if this putpage fails.
4204 (void) fop_putpage(vp, offset, PAGESIZE, B_INVAL,
4205 kcred, NULL);
4206 VN_RELE(vp);
4207 } else {
4208 VN_DISPOSE(pp, B_INVAL, 0, kcred);
4210 } while ((pp = page_next(pp)) != page0);
4211 if (nbusypages && retry++ < MAXRETRIES) {
4212 delay(1);
4213 goto top;
4218 * Replace the page "old" with the page "new" on the page hash and vnode lists
4220 * the replacement must be done in place, ie the equivalent sequence:
4222 * vp = old->p_vnode;
4223 * off = old->p_offset;
4224 * page_do_hashout(old)
4225 * page_do_hashin(new, obj, off)
4227 * doesn't work, since
4228 * 1) if old is the only page on the vnode, the v_object list has a window
4229 * where it looks empty. This will break file system assumptions.
4230 * and
4231 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_object list.
4233 static void
4234 page_do_relocate_hash(page_t *new, page_t *old)
4236 page_t **hash_list;
4237 vnode_t *vp = old->p_vnode;
4238 kmutex_t *sep;
4240 ASSERT(PAGE_EXCL(old));
4241 ASSERT(PAGE_EXCL(new));
4242 ASSERT(vp != NULL);
4243 ASSERT(VMOBJECT_LOCKED(&vp->v_object));
4246 * update new and replace old with new on the page hash list
4248 new->p_object = old->p_object;
4249 new->p_vnode = old->p_vnode;
4250 new->p_offset = old->p_offset;
4252 avl_remove(&vp->v_object.tree, old);
4253 avl_add(&vp->v_object.tree, new);
4255 if ((new->p_vnode->v_flag & VISSWAP) != 0)
4256 PP_SETSWAP(new);
4259 * replace old with new on the vnode's page list
4261 list_insert_before(&vp->v_object.list, old, new);
4262 list_remove(&vp->v_object.list, old);
4265 * clear out the old page
4267 old->p_object = NULL;
4268 old->p_vnode = NULL;
4269 PP_CLRSWAP(old);
4270 old->p_offset = (uoff_t)-1;
4271 page_clr_all_props(old);
4274 * Wake up processes waiting for this page. The page's
4275 * identity has been changed, and is probably not the
4276 * desired page any longer.
4278 sep = page_se_mutex(old);
4279 mutex_enter(sep);
4280 old->p_selock &= ~SE_EWANTED;
4281 if (CV_HAS_WAITERS(&old->p_cv))
4282 cv_broadcast(&old->p_cv);
4283 mutex_exit(sep);
4287 * This function moves the identity of page "pp_old" to page "pp_new".
4288 * Both pages must be locked on entry. "pp_new" is free, has no identity,
4289 * and need not be hashed out from anywhere.
4291 void
4292 page_relocate_hash(page_t *pp_new, page_t *pp_old)
4294 vnode_t *vp = pp_old->p_vnode;
4295 uoff_t off = pp_old->p_offset;
4298 * Rehash two pages
4300 ASSERT(PAGE_EXCL(pp_old));
4301 ASSERT(PAGE_EXCL(pp_new));
4302 ASSERT(vp != NULL);
4303 VERIFY(pp_new->p_object == NULL);
4304 ASSERT(pp_new->p_vnode == NULL);
4306 vmobject_lock(&vp->v_object);
4308 page_do_relocate_hash(pp_new, pp_old);
4309 pp_new->p_fsdata = pp_old->p_fsdata;
4310 pp_old->p_fsdata = 0;
4312 vmobject_unlock(&vp->v_object);
4315 * The page_struct_lock need not be acquired for lckcnt and
4316 * cowcnt since the page has an "exclusive" lock.
4318 ASSERT(pp_new->p_lckcnt == 0);
4319 ASSERT(pp_new->p_cowcnt == 0);
4320 pp_new->p_lckcnt = pp_old->p_lckcnt;
4321 pp_new->p_cowcnt = pp_old->p_cowcnt;
4322 pp_old->p_lckcnt = pp_old->p_cowcnt = 0;
4326 * Helper routine used to lock all remaining members of a
4327 * large page. The caller is responsible for passing in a locked
4328 * pp. If pp is a large page, then it succeeds in locking all the
4329 * remaining constituent pages or it returns with only the
4330 * original page locked.
4332 * Returns 1 on success, 0 on failure.
4334 * If success is returned this routine guarantees p_szc for all constituent
4335 * pages of a large page pp belongs to can't change. To achieve this we
4336 * recheck szc of pp after locking all constituent pages and retry if szc
4337 * changed (it could only decrease). Since hat_page_demote() needs an EXCL
4338 * lock on one of constituent pages it can't be running after all constituent
4339 * pages are locked. hat_page_demote() with a lock on a constituent page
4340 * outside of this large page (i.e. pp belonged to a larger large page) is
4341 * already done with all constituent pages of pp since the root's p_szc is
4342 * changed last. Therefore no need to synchronize with hat_page_demote() that
4343 * locked a constituent page outside of pp's current large page.
4345 #ifdef DEBUG
4346 uint32_t gpg_trylock_mtbf = 0;
4347 #endif
4350 group_page_trylock(page_t *pp, se_t se)
4352 page_t *tpp;
4353 pgcnt_t npgs, i, j;
4354 uint_t pszc = pp->p_szc;
4356 #ifdef DEBUG
4357 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) {
4358 return (0);
4360 #endif
4362 if (pp != PP_GROUPLEADER(pp, pszc)) {
4363 return (0);
4366 retry:
4367 ASSERT(PAGE_LOCKED_SE(pp, se));
4368 ASSERT(!PP_ISFREE(pp));
4369 if (pszc == 0) {
4370 return (1);
4372 npgs = page_get_pagecnt(pszc);
4373 tpp = pp + 1;
4374 for (i = 1; i < npgs; i++, tpp++) {
4375 if (!page_trylock(tpp, se)) {
4376 tpp = pp + 1;
4377 for (j = 1; j < i; j++, tpp++) {
4378 page_unlock(tpp);
4380 return (0);
4383 if (pp->p_szc != pszc) {
4384 ASSERT(pp->p_szc < pszc);
4385 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) &&
4386 !IS_SWAPFSVP(pp->p_vnode));
4387 tpp = pp + 1;
4388 for (i = 1; i < npgs; i++, tpp++) {
4389 page_unlock(tpp);
4391 pszc = pp->p_szc;
4392 goto retry;
4394 return (1);
4397 void
4398 group_page_unlock(page_t *pp)
4400 page_t *tpp;
4401 pgcnt_t npgs, i;
4403 ASSERT(PAGE_LOCKED(pp));
4404 ASSERT(!PP_ISFREE(pp));
4405 ASSERT(pp == PP_PAGEROOT(pp));
4406 npgs = page_get_pagecnt(pp->p_szc);
4407 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) {
4408 page_unlock(tpp);
4413 * returns
4414 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages
4415 * ERANGE : this is not a base page
4416 * EBUSY : failure to get locks on the page/pages
4417 * ENOMEM : failure to obtain replacement pages
4418 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel
4419 * EIO : An error occurred while trying to copy the page data
4421 * Return with all constituent members of target and replacement
4422 * SE_EXCL locked. It is the callers responsibility to drop the
4423 * locks.
4426 do_page_relocate(
4427 page_t **target,
4428 page_t **replacement,
4429 int grouplock,
4430 spgcnt_t *nrelocp,
4431 lgrp_t *lgrp)
4433 page_t *first_repl;
4434 page_t *repl;
4435 page_t *targ;
4436 page_t *pl = NULL;
4437 uint_t ppattr;
4438 pfn_t pfn, repl_pfn;
4439 uint_t szc;
4440 spgcnt_t npgs, i;
4441 int repl_contig = 0;
4442 uint_t flags = 0;
4443 spgcnt_t dofree = 0;
4445 *nrelocp = 0;
4449 * If this is not a base page,
4450 * just return with 0x0 pages relocated.
4452 targ = *target;
4453 ASSERT(PAGE_EXCL(targ));
4454 ASSERT(!PP_ISFREE(targ));
4455 szc = targ->p_szc;
4456 ASSERT(szc < mmu_page_sizes);
4457 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
4458 pfn = targ->p_pagenum;
4459 if (pfn != PFN_BASE(pfn, szc)) {
4460 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]);
4461 return (ERANGE);
4464 if ((repl = *replacement) != NULL && repl->p_szc >= szc) {
4465 repl_pfn = repl->p_pagenum;
4466 if (repl_pfn != PFN_BASE(repl_pfn, szc)) {
4467 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]);
4468 return (ERANGE);
4470 repl_contig = 1;
4474 * We must lock all members of this large page or we cannot
4475 * relocate any part of it.
4477 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) {
4478 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]);
4479 return (EBUSY);
4483 * reread szc it could have been decreased before
4484 * group_page_trylock() was done.
4486 szc = targ->p_szc;
4487 ASSERT(szc < mmu_page_sizes);
4488 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
4489 ASSERT(pfn == PFN_BASE(pfn, szc));
4491 npgs = page_get_pagecnt(targ->p_szc);
4493 if (repl == NULL) {
4494 dofree = npgs; /* Size of target page in MMU pages */
4495 if (!page_create_wait(dofree, 0)) {
4496 if (grouplock != 0) {
4497 group_page_unlock(targ);
4499 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
4500 return (ENOMEM);
4504 * seg kmem pages require that the target and replacement
4505 * page be the same pagesize.
4507 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0;
4508 repl = page_get_replacement_page(targ, lgrp, flags);
4509 if (repl == NULL) {
4510 if (grouplock != 0) {
4511 group_page_unlock(targ);
4513 page_create_putback(dofree);
4514 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
4515 return (ENOMEM);
4518 #ifdef DEBUG
4519 else {
4520 ASSERT(PAGE_LOCKED(repl));
4522 #endif /* DEBUG */
4525 first_repl = repl;
4527 for (i = 0; i < npgs; i++) {
4528 ASSERT(PAGE_EXCL(targ));
4529 ASSERT(targ->p_slckcnt == 0);
4530 ASSERT(repl->p_slckcnt == 0);
4532 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD);
4534 ASSERT(hat_page_getshare(targ) == 0);
4535 ASSERT(!PP_ISFREE(targ));
4536 ASSERT(targ->p_pagenum == (pfn + i));
4537 ASSERT(repl_contig == 0 ||
4538 repl->p_pagenum == (repl_pfn + i));
4541 * Copy the page contents and attributes then
4542 * relocate the page in the page hash.
4544 if (ppcopy(targ, repl) == 0) {
4545 targ = *target;
4546 repl = first_repl;
4547 VM_STAT_ADD(vmm_vmstats.ppr_copyfail);
4548 if (grouplock != 0) {
4549 group_page_unlock(targ);
4551 if (dofree) {
4552 *replacement = NULL;
4553 page_free_replacement_page(repl);
4554 page_create_putback(dofree);
4556 return (EIO);
4559 targ++;
4560 if (repl_contig != 0) {
4561 repl++;
4562 } else {
4563 repl = repl->p_next;
4567 repl = first_repl;
4568 targ = *target;
4570 for (i = 0; i < npgs; i++) {
4571 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO));
4572 page_clr_all_props(repl);
4573 page_set_props(repl, ppattr);
4574 page_relocate_hash(repl, targ);
4576 ASSERT(hat_page_getshare(targ) == 0);
4577 ASSERT(hat_page_getshare(repl) == 0);
4579 * Now clear the props on targ, after the
4580 * page_relocate_hash(), they no longer
4581 * have any meaning.
4583 page_clr_all_props(targ);
4584 ASSERT(targ->p_next == targ);
4585 ASSERT(targ->p_prev == targ);
4586 page_list_concat(&pl, &targ);
4588 targ++;
4589 if (repl_contig != 0) {
4590 repl++;
4591 } else {
4592 repl = repl->p_next;
4595 /* assert that we have come full circle with repl */
4596 ASSERT(repl_contig == 1 || first_repl == repl);
4598 *target = pl;
4599 if (*replacement == NULL) {
4600 ASSERT(first_repl == repl);
4601 *replacement = repl;
4603 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
4604 *nrelocp = npgs;
4605 return (0);
4608 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated.
4611 page_relocate(
4612 page_t **target,
4613 page_t **replacement,
4614 int grouplock,
4615 int freetarget,
4616 spgcnt_t *nrelocp,
4617 lgrp_t *lgrp)
4619 spgcnt_t ret;
4621 /* do_page_relocate returns 0 on success or errno value */
4622 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp);
4624 if (ret != 0 || freetarget == 0) {
4625 return (ret);
4627 if (*nrelocp == 1) {
4628 ASSERT(*target != NULL);
4629 page_free(*target, 1);
4630 } else {
4631 page_t *tpp = *target;
4632 uint_t szc = tpp->p_szc;
4633 pgcnt_t npgs = page_get_pagecnt(szc);
4634 ASSERT(npgs > 1);
4635 ASSERT(szc != 0);
4636 do {
4637 ASSERT(PAGE_EXCL(tpp));
4638 ASSERT(!hat_page_is_mapped(tpp));
4639 ASSERT(tpp->p_szc == szc);
4640 PP_SETFREE(tpp);
4641 PP_SETAGED(tpp);
4642 npgs--;
4643 } while ((tpp = tpp->p_next) != *target);
4644 ASSERT(npgs == 0);
4645 page_list_add_pages(*target, 0);
4646 npgs = page_get_pagecnt(szc);
4647 page_create_putback(npgs);
4649 return (ret);
4653 * it is up to the caller to deal with pcf accounting.
4655 void
4656 page_free_replacement_page(page_t *pplist)
4658 page_t *pp;
4660 while (pplist != NULL) {
4662 * pp_targ is a linked list.
4664 pp = pplist;
4665 if (pp->p_szc == 0) {
4666 page_sub(&pplist, pp);
4667 page_clr_all_props(pp);
4668 PP_SETFREE(pp);
4669 PP_SETAGED(pp);
4670 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
4671 page_unlock(pp);
4672 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]);
4673 } else {
4674 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc);
4675 page_t *tpp;
4676 page_list_break(&pp, &pplist, curnpgs);
4677 tpp = pp;
4678 do {
4679 ASSERT(PAGE_EXCL(tpp));
4680 ASSERT(!hat_page_is_mapped(tpp));
4681 page_clr_all_props(tpp);
4682 PP_SETFREE(tpp);
4683 PP_SETAGED(tpp);
4684 } while ((tpp = tpp->p_next) != pp);
4685 page_list_add_pages(pp, 0);
4686 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]);
4692 * Relocate target to non-relocatable replacement page.
4695 page_relocate_cage(page_t **target, page_t **replacement)
4697 page_t *tpp, *rpp;
4698 spgcnt_t pgcnt, npgs;
4699 int result;
4701 tpp = *target;
4703 ASSERT(PAGE_EXCL(tpp));
4704 ASSERT(tpp->p_szc == 0);
4706 pgcnt = btop(page_get_pagesize(tpp->p_szc));
4708 do {
4709 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC);
4710 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC);
4711 if (rpp == NULL) {
4712 page_create_putback(pgcnt);
4713 kcage_cageout_wakeup();
4715 } while (rpp == NULL);
4717 ASSERT(PP_ISNORELOC(rpp));
4719 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL);
4721 if (result == 0) {
4722 *replacement = rpp;
4723 if (pgcnt != npgs)
4724 panic("page_relocate_cage: partial relocation");
4727 return (result);
4731 * Release the page lock on a page, place on cachelist
4732 * tail if no longer mapped. Caller can let us know if
4733 * the page is known to be clean.
4736 page_release(page_t *pp, int checkmod)
4738 int status;
4740 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) &&
4741 (pp->p_vnode != NULL));
4743 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) &&
4744 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) &&
4745 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 &&
4746 !hat_page_is_mapped(pp)) {
4749 * If page is modified, unlock it
4751 * (p_nrm & P_MOD) bit has the latest stuff because:
4752 * (1) We found that this page doesn't have any mappings
4753 * _after_ holding SE_EXCL and
4754 * (2) We didn't drop SE_EXCL lock after the check in (1)
4756 if (checkmod && hat_ismod(pp)) {
4757 page_unlock(pp);
4758 status = PGREL_MOD;
4759 } else {
4760 VN_DISPOSE(pp, B_FREE, 0, kcred);
4761 status = PGREL_CLEAN;
4763 } else {
4764 page_unlock(pp);
4765 status = PGREL_NOTREL;
4767 return (status);
4771 * Given a constituent page, try to demote the large page on the freelist.
4773 * Returns nonzero if the page could be demoted successfully. Returns with
4774 * the constituent page still locked.
4777 page_try_demote_free_pages(page_t *pp)
4779 page_t *rootpp = pp;
4780 pfn_t pfn = page_pptonum(pp);
4781 spgcnt_t npgs;
4782 uint_t szc = pp->p_szc;
4784 ASSERT(PP_ISFREE(pp));
4785 ASSERT(PAGE_EXCL(pp));
4788 * Adjust rootpp and lock it, if `pp' is not the base
4789 * constituent page.
4791 npgs = page_get_pagecnt(pp->p_szc);
4792 if (npgs == 1) {
4793 return (0);
4796 if (!IS_P2ALIGNED(pfn, npgs)) {
4797 pfn = P2ALIGN(pfn, npgs);
4798 rootpp = page_numtopp_nolock(pfn);
4801 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) {
4802 return (0);
4805 if (rootpp->p_szc != szc) {
4806 if (pp != rootpp)
4807 page_unlock(rootpp);
4808 return (0);
4811 page_demote_free_pages(rootpp);
4813 if (pp != rootpp)
4814 page_unlock(rootpp);
4816 ASSERT(PP_ISFREE(pp));
4817 ASSERT(PAGE_EXCL(pp));
4818 return (1);
4822 * Given a constituent page, try to demote the large page.
4824 * Returns nonzero if the page could be demoted successfully. Returns with
4825 * the constituent page still locked.
4828 page_try_demote_pages(page_t *pp)
4830 page_t *tpp, *rootpp = pp;
4831 pfn_t pfn = page_pptonum(pp);
4832 spgcnt_t i, npgs;
4833 uint_t szc = pp->p_szc;
4834 vnode_t *vp = pp->p_vnode;
4836 ASSERT(PAGE_EXCL(pp));
4838 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]);
4840 if (pp->p_szc == 0) {
4841 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]);
4842 return (1);
4845 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) {
4846 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]);
4847 page_demote_vp_pages(pp);
4848 ASSERT(pp->p_szc == 0);
4849 return (1);
4853 * Adjust rootpp if passed in is not the base
4854 * constituent page.
4856 npgs = page_get_pagecnt(pp->p_szc);
4857 ASSERT(npgs > 1);
4858 if (!IS_P2ALIGNED(pfn, npgs)) {
4859 pfn = P2ALIGN(pfn, npgs);
4860 rootpp = page_numtopp_nolock(pfn);
4861 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]);
4862 ASSERT(rootpp->p_vnode != NULL);
4863 ASSERT(rootpp->p_szc == szc);
4867 * We can't demote kernel pages since we can't hat_unload()
4868 * the mappings.
4870 if (VN_ISKAS(rootpp->p_vnode))
4871 return (0);
4874 * Attempt to lock all constituent pages except the page passed
4875 * in since it's already locked.
4877 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
4878 ASSERT(!PP_ISFREE(tpp));
4879 ASSERT(tpp->p_vnode != NULL);
4881 if (tpp != pp && !page_trylock(tpp, SE_EXCL))
4882 break;
4883 ASSERT(tpp->p_szc == rootpp->p_szc);
4884 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i);
4888 * If we failed to lock them all then unlock what we have
4889 * locked so far and bail.
4891 if (i < npgs) {
4892 tpp = rootpp;
4893 while (i-- > 0) {
4894 if (tpp != pp)
4895 page_unlock(tpp);
4896 tpp++;
4898 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]);
4899 return (0);
4902 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
4903 ASSERT(PAGE_EXCL(tpp));
4904 ASSERT(tpp->p_slckcnt == 0);
4905 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
4906 tpp->p_szc = 0;
4910 * Unlock all pages except the page passed in.
4912 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
4913 ASSERT(!hat_page_is_mapped(tpp));
4914 if (tpp != pp)
4915 page_unlock(tpp);
4918 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]);
4919 return (1);
4923 * Called by page_free() and page_destroy() to demote the page size code
4924 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero
4925 * p_szc on free list, neither can we just clear p_szc of a single page_t
4926 * within a large page since it will break other code that relies on p_szc
4927 * being the same for all page_t's of a large page). Anonymous pages should
4928 * never end up here because anon_map_getpages() cannot deal with p_szc
4929 * changes after a single constituent page is locked. While anonymous or
4930 * kernel large pages are demoted or freed the entire large page at a time
4931 * with all constituent pages locked EXCL for the file system pages we
4932 * have to be able to demote a large page (i.e. decrease all constituent pages
4933 * p_szc) with only just an EXCL lock on one of constituent pages. The reason
4934 * we can easily deal with anonymous page demotion the entire large page at a
4935 * time is that those operation originate at address space level and concern
4936 * the entire large page region with actual demotion only done when pages are
4937 * not shared with any other processes (therefore we can always get EXCL lock
4938 * on all anonymous constituent pages after clearing segment page
4939 * cache). However file system pages can be truncated or invalidated at a
4940 * PAGESIZE level from the file system side and end up in page_free() or
4941 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed
4942 * and therefore pageout should be able to demote a large page by EXCL locking
4943 * any constituent page that is not under SOFTLOCK). In those cases we cannot
4944 * rely on being able to lock EXCL all constituent pages.
4946 * To prevent szc changes on file system pages one has to lock all constituent
4947 * pages at least SHARED (or call page_szc_lock()). The only subsystem that
4948 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to
4949 * prevent szc changes is hat layer that uses its own page level mlist
4950 * locks. hat assumes that szc doesn't change after mlist lock for a page is
4951 * taken. Therefore we need to change szc under hat level locks if we only
4952 * have an EXCL lock on a single constituent page and hat still references any
4953 * of constituent pages. (Note we can't "ignore" hat layer by simply
4954 * hat_pageunload() all constituent pages without having EXCL locks on all of
4955 * constituent pages). We use hat_page_demote() call to safely demote szc of
4956 * all constituent pages under hat locks when we only have an EXCL lock on one
4957 * of constituent pages.
4959 * This routine calls page_szc_lock() before calling hat_page_demote() to
4960 * allow segvn in one special case not to lock all constituent pages SHARED
4961 * before calling hat_memload_array() that relies on p_szc not changing even
4962 * before hat level mlist lock is taken. In that case segvn uses
4963 * page_szc_lock() to prevent hat_page_demote() changing p_szc values.
4965 * Anonymous or kernel page demotion still has to lock all pages exclusively
4966 * and do hat_pageunload() on all constituent pages before demoting the page
4967 * therefore there's no need for anonymous or kernel page demotion to use
4968 * hat_page_demote() mechanism.
4970 * hat_page_demote() removes all large mappings that map pp and then decreases
4971 * p_szc starting from the last constituent page of the large page. By working
4972 * from the tail of a large page in pfn decreasing order allows one looking at
4973 * the root page to know that hat_page_demote() is done for root's szc area.
4974 * e.g. if a root page has szc 1 one knows it only has to lock all constituent
4975 * pages within szc 1 area to prevent szc changes because hat_page_demote()
4976 * that started on this page when it had szc > 1 is done for this szc 1 area.
4978 * We are guaranteed that all constituent pages of pp's large page belong to
4979 * the same vnode with the consecutive offsets increasing in the direction of
4980 * the pfn i.e. the identity of constituent pages can't change until their
4981 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove
4982 * large mappings to pp even though we don't lock any constituent page except
4983 * pp (i.e. we won't unload e.g. kernel locked page).
4985 static void
4986 page_demote_vp_pages(page_t *pp)
4988 kmutex_t *mtx;
4990 ASSERT(PAGE_EXCL(pp));
4991 ASSERT(!PP_ISFREE(pp));
4992 ASSERT(pp->p_vnode != NULL);
4993 ASSERT(!IS_SWAPFSVP(pp->p_vnode));
4994 ASSERT(!PP_ISKAS(pp));
4996 VM_STAT_ADD(pagecnt.pc_demote_pages[0]);
4998 mtx = page_szc_lock(pp);
4999 if (mtx != NULL) {
5000 hat_page_demote(pp);
5001 mutex_exit(mtx);
5003 ASSERT(pp->p_szc == 0);
5007 * Mark any existing pages for migration in the given range
5009 void
5010 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len,
5011 struct anon_map *amp, ulong_t anon_index, struct vmobject *obj,
5012 uoff_t objoff, int rflag)
5014 struct anon *ap;
5015 struct vmobject *curobj;
5016 lgrp_t *from;
5017 pgcnt_t nlocked;
5018 uoff_t off;
5019 pfn_t pfn;
5020 size_t pgsz;
5021 size_t segpgsz;
5022 pgcnt_t pages;
5023 uint_t pszc;
5024 page_t *pp0, *pp;
5025 caddr_t va;
5026 ulong_t an_idx;
5027 anon_sync_obj_t cookie;
5029 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
5032 * Don't do anything if don't need to do lgroup optimizations
5033 * on this system
5035 if (!lgrp_optimizations())
5036 return;
5039 * Align address and length to (potentially large) page boundary
5041 segpgsz = page_get_pagesize(seg->s_szc);
5042 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz);
5043 if (rflag)
5044 len = P2ROUNDUP(len, segpgsz);
5047 * Do one (large) page at a time
5049 va = addr;
5050 while (va < addr + len) {
5052 * Lookup (root) page for vnode and offset corresponding to
5053 * this virtual address
5054 * Try anonmap first since there may be copy-on-write
5055 * pages, but initialize object pointer and offset using
5056 * arguments just in case there isn't an amp.
5058 curobj = obj;
5059 off = objoff + va - seg->s_base;
5060 if (amp) {
5061 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5062 an_idx = anon_index + seg_page(seg, va);
5063 anon_array_enter(amp, an_idx, &cookie);
5064 ap = anon_get_ptr(amp->ahp, an_idx);
5065 if (ap) {
5066 struct vnode *vn;
5068 swap_xlate(ap, &vn, &off);
5070 curobj = (vn != NULL) ? &vn->v_object : NULL;
5072 anon_array_exit(&cookie);
5073 ANON_LOCK_EXIT(&amp->a_rwlock);
5076 pp = NULL;
5077 if (curobj)
5078 pp = page_lookup(curobj, off, SE_SHARED);
5081 * If there isn't a page at this virtual address,
5082 * skip to next page
5084 if (pp == NULL) {
5085 va += PAGESIZE;
5086 continue;
5090 * Figure out which lgroup this page is in for kstats
5092 pfn = page_pptonum(pp);
5093 from = lgrp_pfn_to_lgrp(pfn);
5096 * Get page size, and round up and skip to next page boundary
5097 * if unaligned address
5099 pszc = pp->p_szc;
5100 pgsz = page_get_pagesize(pszc);
5101 pages = btop(pgsz);
5102 if (!IS_P2ALIGNED(va, pgsz) ||
5103 !IS_P2ALIGNED(pfn, pages) ||
5104 pgsz > segpgsz) {
5105 pgsz = MIN(pgsz, segpgsz);
5106 page_unlock(pp);
5107 pages = btop(P2END((uintptr_t)va, pgsz) -
5108 (uintptr_t)va);
5109 va = (caddr_t)P2END((uintptr_t)va, pgsz);
5110 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, pages);
5111 continue;
5115 * Upgrade to exclusive lock on page
5117 if (!page_tryupgrade(pp)) {
5118 page_unlock(pp);
5119 va += pgsz;
5120 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
5121 btop(pgsz));
5122 continue;
5125 pp0 = pp++;
5126 nlocked = 1;
5129 * Lock constituent pages if this is large page
5131 if (pages > 1) {
5133 * Lock all constituents except root page, since it
5134 * should be locked already.
5136 for (; nlocked < pages; nlocked++) {
5137 if (!page_trylock(pp, SE_EXCL)) {
5138 break;
5140 if (PP_ISFREE(pp) ||
5141 pp->p_szc != pszc) {
5143 * hat_page_demote() raced in with us.
5145 ASSERT(!IS_SWAPFSVP(curobj->vnode));
5146 page_unlock(pp);
5147 break;
5149 pp++;
5154 * If all constituent pages couldn't be locked,
5155 * unlock pages locked so far and skip to next page.
5157 if (nlocked < pages) {
5158 while (pp0 < pp) {
5159 page_unlock(pp0++);
5161 va += pgsz;
5162 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
5163 btop(pgsz));
5164 continue;
5168 * hat_page_demote() can no longer happen
5169 * since last cons page had the right p_szc after
5170 * all cons pages were locked. all cons pages
5171 * should now have the same p_szc.
5175 * All constituent pages locked successfully, so mark
5176 * large page for migration and unload the mappings of
5177 * constituent pages, so a fault will occur on any part of the
5178 * large page
5180 PP_SETMIGRATE(pp0);
5181 while (pp0 < pp) {
5182 (void) hat_pageunload(pp0, HAT_FORCE_PGUNLOAD);
5183 ASSERT(hat_page_getshare(pp0) == 0);
5184 page_unlock(pp0++);
5186 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked);
5188 va += pgsz;
5193 * Migrate any pages that have been marked for migration in the given range
5195 void
5196 page_migrate(
5197 struct seg *seg,
5198 caddr_t addr,
5199 page_t **ppa,
5200 pgcnt_t npages)
5202 lgrp_t *from;
5203 lgrp_t *to;
5204 page_t *newpp;
5205 page_t *pp;
5206 pfn_t pfn;
5207 size_t pgsz;
5208 spgcnt_t page_cnt;
5209 spgcnt_t i;
5210 uint_t pszc;
5212 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
5214 while (npages > 0) {
5215 pp = *ppa;
5216 pszc = pp->p_szc;
5217 pgsz = page_get_pagesize(pszc);
5218 page_cnt = btop(pgsz);
5221 * Check to see whether this page is marked for migration
5223 * Assume that root page of large page is marked for
5224 * migration and none of the other constituent pages
5225 * are marked. This really simplifies clearing the
5226 * migrate bit by not having to clear it from each
5227 * constituent page.
5229 * note we don't want to relocate an entire large page if
5230 * someone is only using one subpage.
5232 if (npages < page_cnt)
5233 break;
5236 * Is it marked for migration?
5238 if (!PP_ISMIGRATE(pp))
5239 goto next;
5242 * Determine lgroups that page is being migrated between
5244 pfn = page_pptonum(pp);
5245 if (!IS_P2ALIGNED(pfn, page_cnt)) {
5246 break;
5248 from = lgrp_pfn_to_lgrp(pfn);
5249 to = lgrp_mem_choose(seg, addr, pgsz);
5252 * Need to get exclusive lock's to migrate
5254 for (i = 0; i < page_cnt; i++) {
5255 ASSERT(PAGE_LOCKED(ppa[i]));
5256 if (page_pptonum(ppa[i]) != pfn + i ||
5257 ppa[i]->p_szc != pszc) {
5258 break;
5260 if (!page_tryupgrade(ppa[i])) {
5261 lgrp_stat_add(from->lgrp_id,
5262 LGRP_PM_FAIL_LOCK_PGS,
5263 page_cnt);
5264 break;
5268 * Check to see whether we are trying to migrate
5269 * page to lgroup where it is allocated already.
5270 * If so, clear the migrate bit and skip to next
5271 * page.
5273 if (i == 0 && to == from) {
5274 PP_CLRMIGRATE(ppa[0]);
5275 page_downgrade(ppa[0]);
5276 goto next;
5281 * If all constituent pages couldn't be locked,
5282 * unlock pages locked so far and skip to next page.
5284 if (i != page_cnt) {
5285 while (--i != -1) {
5286 page_downgrade(ppa[i]);
5288 goto next;
5291 (void) page_create_wait(page_cnt, PG_WAIT);
5292 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC);
5293 if (newpp == NULL) {
5294 page_create_putback(page_cnt);
5295 for (i = 0; i < page_cnt; i++) {
5296 page_downgrade(ppa[i]);
5298 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS,
5299 page_cnt);
5300 goto next;
5302 ASSERT(newpp->p_szc == pszc);
5304 * Clear migrate bit and relocate page
5306 PP_CLRMIGRATE(pp);
5307 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) {
5308 panic("page_migrate: page_relocate failed");
5310 ASSERT(page_cnt * PAGESIZE == pgsz);
5313 * Keep stats for number of pages migrated from and to
5314 * each lgroup
5316 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt);
5317 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt);
5319 * update the page_t array we were passed in and
5320 * unlink constituent pages of a large page.
5322 for (i = 0; i < page_cnt; ++i, ++pp) {
5323 ASSERT(PAGE_EXCL(newpp));
5324 ASSERT(newpp->p_szc == pszc);
5325 ppa[i] = newpp;
5326 pp = newpp;
5327 page_sub(&newpp, pp);
5328 page_downgrade(pp);
5330 ASSERT(newpp == NULL);
5331 next:
5332 addr += pgsz;
5333 ppa += page_cnt;
5334 npages -= page_cnt;
5338 uint_t page_reclaim_maxcnt = 60; /* max total iterations */
5339 uint_t page_reclaim_nofree_maxcnt = 3; /* max iterations without progress */
5341 * Reclaim/reserve availrmem for npages.
5342 * If there is not enough memory start reaping seg, kmem caches.
5343 * Start pageout scanner (via page_needfree()).
5344 * Exit after ~ MAX_CNT s regardless of how much memory has been released.
5345 * Note: There is no guarantee that any availrmem will be freed as
5346 * this memory typically is locked (kernel heap) or reserved for swap.
5347 * Also due to memory fragmentation kmem allocator may not be able
5348 * to free any memory (single user allocated buffer will prevent
5349 * freeing slab or a page).
5352 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust)
5354 int i = 0;
5355 int i_nofree = 0;
5356 int ret = 0;
5357 pgcnt_t deficit;
5358 pgcnt_t old_availrmem = 0;
5360 mutex_enter(&freemem_lock);
5361 while (availrmem < tune.t_minarmem + npages + epages &&
5362 i++ < page_reclaim_maxcnt) {
5363 /* ensure we made some progress in the last few iterations */
5364 if (old_availrmem < availrmem) {
5365 old_availrmem = availrmem;
5366 i_nofree = 0;
5367 } else if (i_nofree++ >= page_reclaim_nofree_maxcnt) {
5368 break;
5371 deficit = tune.t_minarmem + npages + epages - availrmem;
5372 mutex_exit(&freemem_lock);
5373 page_needfree(deficit);
5374 kmem_reap();
5375 ddi_sleep(1);
5376 page_needfree(-(spgcnt_t)deficit);
5377 mutex_enter(&freemem_lock);
5380 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) {
5381 availrmem -= npages;
5382 ret = 1;
5385 mutex_exit(&freemem_lock);
5387 return (ret);
5391 * Search the memory segments to locate the desired page. Within a
5392 * segment, pages increase linearly with one page structure per
5393 * physical page frame (size PAGESIZE). The search begins
5394 * with the segment that was accessed last, to take advantage of locality.
5395 * If the hint misses, we start from the beginning of the sorted memseg list
5400 * Some data structures for pfn to pp lookup.
5402 ulong_t mhash_per_slot;
5403 struct memseg *memseg_hash[N_MEM_SLOTS];
5405 page_t *
5406 page_numtopp_nolock(pfn_t pfnum)
5408 struct memseg *seg;
5409 page_t *pp;
5410 vm_cpu_data_t *vc;
5413 * We need to disable kernel preemption while referencing the
5414 * cpu_vm_data field in order to prevent us from being switched to
5415 * another cpu and trying to reference it after it has been freed.
5416 * This will keep us on cpu and prevent it from being removed while
5417 * we are still on it.
5419 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5420 * which is being resued by DR who will flush those references
5421 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5423 kpreempt_disable();
5424 vc = CPU->cpu_vm_data;
5425 ASSERT(vc != NULL);
5427 MEMSEG_STAT_INCR(nsearch);
5429 /* Try last winner first */
5430 if (((seg = vc->vc_pnum_memseg) != NULL) &&
5431 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5432 MEMSEG_STAT_INCR(nlastwon);
5433 pp = seg->pages + (pfnum - seg->pages_base);
5434 if (pp->p_pagenum == pfnum) {
5435 kpreempt_enable();
5436 return ((page_t *)pp);
5440 /* Else Try hash */
5441 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
5442 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5443 MEMSEG_STAT_INCR(nhashwon);
5444 vc->vc_pnum_memseg = seg;
5445 pp = seg->pages + (pfnum - seg->pages_base);
5446 if (pp->p_pagenum == pfnum) {
5447 kpreempt_enable();
5448 return ((page_t *)pp);
5452 /* Else Brute force */
5453 for (seg = memsegs; seg != NULL; seg = seg->next) {
5454 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
5455 vc->vc_pnum_memseg = seg;
5456 pp = seg->pages + (pfnum - seg->pages_base);
5457 if (pp->p_pagenum == pfnum) {
5458 kpreempt_enable();
5459 return ((page_t *)pp);
5463 vc->vc_pnum_memseg = NULL;
5464 kpreempt_enable();
5465 MEMSEG_STAT_INCR(nnotfound);
5466 return (NULL);
5470 struct memseg *
5471 page_numtomemseg_nolock(pfn_t pfnum)
5473 struct memseg *seg;
5474 page_t *pp;
5477 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5478 * which is being resued by DR who will flush those references
5479 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5481 kpreempt_disable();
5482 /* Try hash */
5483 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
5484 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5485 pp = seg->pages + (pfnum - seg->pages_base);
5486 if (pp->p_pagenum == pfnum) {
5487 kpreempt_enable();
5488 return (seg);
5492 /* Else Brute force */
5493 for (seg = memsegs; seg != NULL; seg = seg->next) {
5494 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
5495 pp = seg->pages + (pfnum - seg->pages_base);
5496 if (pp->p_pagenum == pfnum) {
5497 kpreempt_enable();
5498 return (seg);
5502 kpreempt_enable();
5503 return (NULL);
5507 * Given a page and a count return the page struct that is
5508 * n structs away from the current one in the global page
5509 * list.
5511 * This function wraps to the first page upon
5512 * reaching the end of the memseg list.
5514 page_t *
5515 page_nextn(page_t *pp, ulong_t n)
5517 struct memseg *seg;
5518 page_t *ppn;
5519 vm_cpu_data_t *vc;
5522 * We need to disable kernel preemption while referencing the
5523 * cpu_vm_data field in order to prevent us from being switched to
5524 * another cpu and trying to reference it after it has been freed.
5525 * This will keep us on cpu and prevent it from being removed while
5526 * we are still on it.
5528 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5529 * which is being resued by DR who will flush those references
5530 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5532 kpreempt_disable();
5533 vc = (vm_cpu_data_t *)CPU->cpu_vm_data;
5535 ASSERT(vc != NULL);
5537 if (((seg = vc->vc_pnext_memseg) == NULL) ||
5538 (seg->pages_base == seg->pages_end) ||
5539 !(pp >= seg->pages && pp < seg->epages)) {
5541 for (seg = memsegs; seg; seg = seg->next) {
5542 if (pp >= seg->pages && pp < seg->epages)
5543 break;
5546 if (seg == NULL) {
5547 /* Memory delete got in, return something valid. */
5548 /* TODO: fix me. */
5549 seg = memsegs;
5550 pp = seg->pages;
5554 /* check for wraparound - possible if n is large */
5555 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) {
5556 n -= seg->epages - pp;
5557 seg = seg->next;
5558 if (seg == NULL)
5559 seg = memsegs;
5560 pp = seg->pages;
5562 vc->vc_pnext_memseg = seg;
5563 kpreempt_enable();
5564 return (ppn);
5568 * Initialize for a loop using page_next_scan_large().
5570 page_t *
5571 page_next_scan_init(void **cookie)
5573 ASSERT(cookie != NULL);
5574 *cookie = (void *)memsegs;
5575 return ((page_t *)memsegs->pages);
5579 * Return the next page in a scan of page_t's, assuming we want
5580 * to skip over sub-pages within larger page sizes.
5582 * The cookie is used to keep track of the current memseg.
5584 page_t *
5585 page_next_scan_large(
5586 page_t *pp,
5587 ulong_t *n,
5588 void **cookie)
5590 struct memseg *seg = (struct memseg *)*cookie;
5591 page_t *new_pp;
5592 ulong_t cnt;
5593 pfn_t pfn;
5597 * get the count of page_t's to skip based on the page size
5599 ASSERT(pp != NULL);
5600 if (pp->p_szc == 0) {
5601 cnt = 1;
5602 } else {
5603 pfn = page_pptonum(pp);
5604 cnt = page_get_pagecnt(pp->p_szc);
5605 cnt -= pfn & (cnt - 1);
5607 *n += cnt;
5608 new_pp = pp + cnt;
5611 * Catch if we went past the end of the current memory segment. If so,
5612 * just move to the next segment with pages.
5614 if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) {
5615 do {
5616 seg = seg->next;
5617 if (seg == NULL)
5618 seg = memsegs;
5619 } while (seg->pages_base == seg->pages_end);
5620 new_pp = seg->pages;
5621 *cookie = (void *)seg;
5624 return (new_pp);
5629 * Returns next page in list. Note: this function wraps
5630 * to the first page in the list upon reaching the end
5631 * of the list. Callers should be aware of this fact.
5634 /* We should change this be a #define */
5636 page_t *
5637 page_next(page_t *pp)
5639 return (page_nextn(pp, 1));
5642 page_t *
5643 page_first()
5645 return ((page_t *)memsegs->pages);
5650 * This routine is called at boot with the initial memory configuration
5651 * and when memory is added or removed.
5653 void
5654 build_pfn_hash()
5656 pfn_t cur;
5657 pgcnt_t index;
5658 struct memseg *pseg;
5659 int i;
5662 * Clear memseg_hash array.
5663 * Since memory add/delete is designed to operate concurrently
5664 * with normal operation, the hash rebuild must be able to run
5665 * concurrently with page_numtopp_nolock(). To support this
5666 * functionality, assignments to memseg_hash array members must
5667 * be done atomically.
5669 * NOTE: bzero() does not currently guarantee this for kernel
5670 * threads, and cannot be used here.
5672 for (i = 0; i < N_MEM_SLOTS; i++)
5673 memseg_hash[i] = NULL;
5675 hat_kpm_mseghash_clear(N_MEM_SLOTS);
5678 * Physmax is the last valid pfn.
5680 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT;
5681 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) {
5682 index = MEMSEG_PFN_HASH(pseg->pages_base);
5683 cur = pseg->pages_base;
5684 do {
5685 if (index >= N_MEM_SLOTS)
5686 index = MEMSEG_PFN_HASH(cur);
5688 if (memseg_hash[index] == NULL ||
5689 memseg_hash[index]->pages_base > pseg->pages_base) {
5690 memseg_hash[index] = pseg;
5691 hat_kpm_mseghash_update(index, pseg);
5693 cur += mhash_per_slot;
5694 index++;
5695 } while (cur < pseg->pages_end);
5700 * Return the pagenum for the pp
5702 pfn_t
5703 page_pptonum(page_t *pp)
5705 return (pp->p_pagenum);
5709 * interface to the referenced and modified etc bits
5710 * in the PSM part of the page struct
5711 * when no locking is desired.
5713 void
5714 page_set_props(page_t *pp, uint_t flags)
5716 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0);
5717 pp->p_nrm |= (uchar_t)flags;
5720 void
5721 page_clr_all_props(page_t *pp)
5723 pp->p_nrm = 0;
5727 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required.
5730 page_clear_lck_cow(page_t *pp, int adjust)
5732 int f_amount;
5734 ASSERT(PAGE_EXCL(pp));
5737 * The page_struct_lock need not be acquired here since
5738 * we require the caller hold the page exclusively locked.
5740 f_amount = 0;
5741 if (pp->p_lckcnt) {
5742 f_amount = 1;
5743 pp->p_lckcnt = 0;
5745 if (pp->p_cowcnt) {
5746 f_amount += pp->p_cowcnt;
5747 pp->p_cowcnt = 0;
5750 if (adjust && f_amount) {
5751 mutex_enter(&freemem_lock);
5752 availrmem += f_amount;
5753 mutex_exit(&freemem_lock);
5756 return (f_amount);
5760 * The following functions is called from free_vp_pages()
5761 * for an inexact estimate of a newly free'd page...
5763 ulong_t
5764 page_share_cnt(page_t *pp)
5766 return (hat_page_getshare(pp));
5770 page_isshared(page_t *pp)
5772 return (hat_page_checkshare(pp, 1));
5776 page_isfree(page_t *pp)
5778 return (PP_ISFREE(pp));
5782 page_isref(page_t *pp)
5784 return (hat_page_getattr(pp, P_REF));
5788 page_ismod(page_t *pp)
5790 return (hat_page_getattr(pp, P_MOD));
5794 * The following code all currently relates to the page capture logic:
5796 * This logic is used for cases where there is a desire to claim a certain
5797 * physical page in the system for the caller. As it may not be possible
5798 * to capture the page immediately, the p_toxic bits are used in the page
5799 * structure to indicate that someone wants to capture this page. When the
5800 * page gets unlocked, the toxic flag will be noted and an attempt to capture
5801 * the page will be made. If it is successful, the original callers callback
5802 * will be called with the page to do with it what they please.
5804 * There is also an async thread which wakes up to attempt to capture
5805 * pages occasionally which have the capture bit set. All of the pages which
5806 * need to be captured asynchronously have been inserted into the
5807 * page_capture_hash and thus this thread walks that hash list. Items in the
5808 * hash have an expiration time so this thread handles that as well by removing
5809 * the item from the hash if it has expired.
5811 * Some important things to note are:
5812 * - if the PR_CAPTURE bit is set on a page, then the page is in the
5813 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed
5814 * to set and clear this bit, and while the lock is held is the only time
5815 * you can add or remove an entry from the hash.
5816 * - the PR_CAPTURE bit can only be set and cleared while holding the
5817 * page_capture_hash_head.pchh_mutex
5818 * - the t_flag field of the thread struct is used with the T_CAPTURING
5819 * flag to prevent recursion while dealing with large pages.
5820 * - pages which need to be retired never expire on the page_capture_hash.
5823 static void page_capture_thread(void);
5824 static kthread_t *pc_thread_id;
5825 kcondvar_t pc_cv;
5826 static kmutex_t pc_thread_mutex;
5827 static clock_t pc_thread_shortwait;
5828 static clock_t pc_thread_longwait;
5829 static int pc_thread_retry;
5831 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS];
5833 /* Note that this is a circular linked list */
5834 typedef struct page_capture_hash_bucket {
5835 page_t *pp;
5836 uchar_t szc;
5837 uchar_t pri;
5838 uint_t flags;
5839 clock_t expires; /* lbolt at which this request expires. */
5840 void *datap; /* Cached data passed in for callback */
5841 struct page_capture_hash_bucket *next;
5842 struct page_capture_hash_bucket *prev;
5843 } page_capture_hash_bucket_t;
5845 #define PC_PRI_HI 0 /* capture now */
5846 #define PC_PRI_LO 1 /* capture later */
5847 #define PC_NUM_PRI 2
5849 #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI)
5853 * Each hash bucket will have it's own mutex and two lists which are:
5854 * active (0): represents requests which have not been processed by
5855 * the page_capture async thread yet.
5856 * walked (1): represents requests which have been processed by the
5857 * page_capture async thread within it's given walk of this bucket.
5859 * These are all needed so that we can synchronize all async page_capture
5860 * events. When the async thread moves to a new bucket, it will append the
5861 * walked list to the active list and walk each item one at a time, moving it
5862 * from the active list to the walked list. Thus if there is an async request
5863 * outstanding for a given page, it will always be in one of the two lists.
5864 * New requests will always be added to the active list.
5865 * If we were not able to capture a page before the request expired, we'd free
5866 * up the request structure which would indicate to page_capture that there is
5867 * no longer a need for the given page, and clear the PR_CAPTURE flag if
5868 * possible.
5870 typedef struct page_capture_hash_head {
5871 kmutex_t pchh_mutex;
5872 uint_t num_pages[PC_NUM_PRI];
5873 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */
5874 } page_capture_hash_head_t;
5876 #ifdef DEBUG
5877 #define NUM_PAGE_CAPTURE_BUCKETS 4
5878 #else
5879 #define NUM_PAGE_CAPTURE_BUCKETS 64
5880 #endif
5882 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS];
5884 /* for now use a very simple hash based upon the size of a page struct */
5885 #define PAGE_CAPTURE_HASH(pp) \
5886 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1)))
5888 extern pgcnt_t swapfs_minfree;
5890 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap);
5893 * a callback function is required for page capture requests.
5895 void
5896 page_capture_register_callback(uint_t index, clock_t duration,
5897 int (*cb_func)(page_t *, void *, uint_t))
5899 ASSERT(pc_cb[index].cb_active == 0);
5900 ASSERT(cb_func != NULL);
5901 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER);
5902 pc_cb[index].duration = duration;
5903 pc_cb[index].cb_func = cb_func;
5904 pc_cb[index].cb_active = 1;
5905 rw_exit(&pc_cb[index].cb_rwlock);
5908 void
5909 page_capture_unregister_callback(uint_t index)
5911 int i, j;
5912 struct page_capture_hash_bucket *bp1;
5913 struct page_capture_hash_bucket *bp2;
5914 struct page_capture_hash_bucket *head = NULL;
5915 uint_t flags = (1 << index);
5917 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER);
5918 ASSERT(pc_cb[index].cb_active == 1);
5919 pc_cb[index].duration = 0; /* Paranoia */
5920 pc_cb[index].cb_func = NULL; /* Paranoia */
5921 pc_cb[index].cb_active = 0;
5922 rw_exit(&pc_cb[index].cb_rwlock);
5925 * Just move all the entries to a private list which we can walk
5926 * through without the need to hold any locks.
5927 * No more requests can get added to the hash lists for this consumer
5928 * as the cb_active field for the callback has been cleared.
5930 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
5931 mutex_enter(&page_capture_hash[i].pchh_mutex);
5932 for (j = 0; j < 2; j++) {
5933 bp1 = page_capture_hash[i].lists[j].next;
5934 /* walk through all but first (sentinel) element */
5935 while (bp1 != &page_capture_hash[i].lists[j]) {
5936 bp2 = bp1;
5937 if (bp2->flags & flags) {
5938 bp1 = bp2->next;
5939 bp1->prev = bp2->prev;
5940 bp2->prev->next = bp1;
5941 bp2->next = head;
5942 head = bp2;
5944 * Clear the PR_CAPTURE bit as we
5945 * hold appropriate locks here.
5947 page_clrtoxic(head->pp, PR_CAPTURE);
5948 page_capture_hash[i].
5949 num_pages[bp2->pri]--;
5950 continue;
5952 bp1 = bp1->next;
5955 mutex_exit(&page_capture_hash[i].pchh_mutex);
5958 while (head != NULL) {
5959 bp1 = head;
5960 head = head->next;
5961 kmem_free(bp1, sizeof (*bp1));
5967 * Find pp in the active list and move it to the walked list if it
5968 * exists.
5969 * Note that most often pp should be at the front of the active list
5970 * as it is currently used and thus there is no other sort of optimization
5971 * being done here as this is a linked list data structure.
5972 * Returns 1 on successful move or 0 if page could not be found.
5974 static int
5975 page_capture_move_to_walked(page_t *pp)
5977 page_capture_hash_bucket_t *bp;
5978 int index;
5980 index = PAGE_CAPTURE_HASH(pp);
5982 mutex_enter(&page_capture_hash[index].pchh_mutex);
5983 bp = page_capture_hash[index].lists[0].next;
5984 while (bp != &page_capture_hash[index].lists[0]) {
5985 if (bp->pp == pp) {
5986 /* Remove from old list */
5987 bp->next->prev = bp->prev;
5988 bp->prev->next = bp->next;
5990 /* Add to new list */
5991 bp->next = page_capture_hash[index].lists[1].next;
5992 bp->prev = &page_capture_hash[index].lists[1];
5993 page_capture_hash[index].lists[1].next = bp;
5994 bp->next->prev = bp;
5997 * There is a small probability of page on a free
5998 * list being retired while being allocated
5999 * and before P_RAF is set on it. The page may
6000 * end up marked as high priority request instead
6001 * of low priority request.
6002 * If P_RAF page is not marked as low priority request
6003 * change it to low priority request.
6005 page_capture_hash[index].num_pages[bp->pri]--;
6006 bp->pri = PAGE_CAPTURE_PRIO(pp);
6007 page_capture_hash[index].num_pages[bp->pri]++;
6008 mutex_exit(&page_capture_hash[index].pchh_mutex);
6009 return (1);
6011 bp = bp->next;
6013 mutex_exit(&page_capture_hash[index].pchh_mutex);
6014 return (0);
6018 * Add a new entry to the page capture hash. The only case where a new
6019 * entry is not added is when the page capture consumer is no longer registered.
6020 * In this case, we'll silently not add the page to the hash. We know that
6021 * page retire will always be registered for the case where we are currently
6022 * unretiring a page and thus there are no conflicts.
6024 static void
6025 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap)
6027 page_capture_hash_bucket_t *bp1;
6028 page_capture_hash_bucket_t *bp2;
6029 int index;
6030 int cb_index;
6031 int i;
6032 uchar_t pri;
6033 #ifdef DEBUG
6034 page_capture_hash_bucket_t *tp1;
6035 int l;
6036 #endif
6038 ASSERT(!(flags & CAPTURE_ASYNC));
6040 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP);
6042 bp1->pp = pp;
6043 bp1->szc = szc;
6044 bp1->flags = flags;
6045 bp1->datap = datap;
6047 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
6048 if ((flags >> cb_index) & 1) {
6049 break;
6053 ASSERT(cb_index != PC_NUM_CALLBACKS);
6055 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER);
6056 if (pc_cb[cb_index].cb_active) {
6057 if (pc_cb[cb_index].duration == -1) {
6058 bp1->expires = (clock_t)-1;
6059 } else {
6060 bp1->expires = ddi_get_lbolt() +
6061 pc_cb[cb_index].duration;
6063 } else {
6064 /* There's no callback registered so don't add to the hash */
6065 rw_exit(&pc_cb[cb_index].cb_rwlock);
6066 kmem_free(bp1, sizeof (*bp1));
6067 return;
6070 index = PAGE_CAPTURE_HASH(pp);
6073 * Only allow capture flag to be modified under this mutex.
6074 * Prevents multiple entries for same page getting added.
6076 mutex_enter(&page_capture_hash[index].pchh_mutex);
6079 * if not already on the hash, set capture bit and add to the hash
6081 if (!(pp->p_toxic & PR_CAPTURE)) {
6082 #ifdef DEBUG
6083 /* Check for duplicate entries */
6084 for (l = 0; l < 2; l++) {
6085 tp1 = page_capture_hash[index].lists[l].next;
6086 while (tp1 != &page_capture_hash[index].lists[l]) {
6087 if (tp1->pp == pp) {
6088 panic("page pp 0x%p already on hash "
6089 "at 0x%p\n",
6090 (void *)pp, (void *)tp1);
6092 tp1 = tp1->next;
6096 #endif
6097 page_settoxic(pp, PR_CAPTURE);
6098 pri = PAGE_CAPTURE_PRIO(pp);
6099 bp1->pri = pri;
6100 bp1->next = page_capture_hash[index].lists[0].next;
6101 bp1->prev = &page_capture_hash[index].lists[0];
6102 bp1->next->prev = bp1;
6103 page_capture_hash[index].lists[0].next = bp1;
6104 page_capture_hash[index].num_pages[pri]++;
6105 if (flags & CAPTURE_RETIRE) {
6106 page_retire_incr_pend_count(datap);
6108 mutex_exit(&page_capture_hash[index].pchh_mutex);
6109 rw_exit(&pc_cb[cb_index].cb_rwlock);
6110 cv_signal(&pc_cv);
6111 return;
6115 * A page retire request will replace any other request.
6116 * A second physmem request which is for a different process than
6117 * the currently registered one will be dropped as there is
6118 * no way to hold the private data for both calls.
6119 * In the future, once there are more callers, this will have to
6120 * be worked out better as there needs to be private storage for
6121 * at least each type of caller (maybe have datap be an array of
6122 * *void's so that we can index based upon callers index).
6125 /* walk hash list to update expire time */
6126 for (i = 0; i < 2; i++) {
6127 bp2 = page_capture_hash[index].lists[i].next;
6128 while (bp2 != &page_capture_hash[index].lists[i]) {
6129 if (bp2->pp == pp) {
6130 if (flags & CAPTURE_RETIRE) {
6131 if (!(bp2->flags & CAPTURE_RETIRE)) {
6132 page_retire_incr_pend_count(
6133 datap);
6134 bp2->flags = flags;
6135 bp2->expires = bp1->expires;
6136 bp2->datap = datap;
6138 } else {
6139 ASSERT(flags & CAPTURE_PHYSMEM);
6140 if (!(bp2->flags & CAPTURE_RETIRE) &&
6141 (datap == bp2->datap)) {
6142 bp2->expires = bp1->expires;
6145 mutex_exit(&page_capture_hash[index].
6146 pchh_mutex);
6147 rw_exit(&pc_cb[cb_index].cb_rwlock);
6148 kmem_free(bp1, sizeof (*bp1));
6149 return;
6151 bp2 = bp2->next;
6156 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes
6157 * and thus it either has to be set or not set and can't change
6158 * while holding the mutex above.
6160 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n",
6161 (void *)pp);
6165 * We have a page in our hands, lets try and make it ours by turning
6166 * it into a clean page like it had just come off the freelists.
6168 * Returns 0 on success, with the page still EXCL locked.
6169 * On failure, the page will be unlocked, and returns EAGAIN
6171 static int
6172 page_capture_clean_page(page_t *pp)
6174 page_t *newpp;
6175 int skip_unlock = 0;
6176 spgcnt_t count;
6177 page_t *tpp;
6178 int ret = 0;
6179 int extra;
6181 ASSERT(PAGE_EXCL(pp));
6182 ASSERT(!PP_RETIRED(pp));
6183 ASSERT(curthread->t_flag & T_CAPTURING);
6185 if (PP_ISFREE(pp)) {
6186 if (!page_reclaim(pp, NULL)) {
6187 skip_unlock = 1;
6188 ret = EAGAIN;
6189 goto cleanup;
6191 ASSERT(pp->p_szc == 0);
6192 if (pp->p_vnode != NULL) {
6194 * Since this page came from the
6195 * cachelist, we must destroy the
6196 * old vnode association.
6198 page_hashout(pp, false);
6200 goto cleanup;
6204 * If we know page_relocate will fail, skip it
6205 * It could still fail due to a UE on another page but we
6206 * can't do anything about that.
6208 if (pp->p_toxic & PR_UE) {
6209 goto skip_relocate;
6213 * It's possible that pages can not have a vnode as fsflush comes
6214 * through and cleans up these pages. It's ugly but that's how it is.
6216 if (pp->p_vnode == NULL) {
6217 goto skip_relocate;
6221 * Page was not free, so lets try to relocate it.
6222 * page_relocate only works with root pages, so if this is not a root
6223 * page, we need to demote it to try and relocate it.
6224 * Unfortunately this is the best we can do right now.
6226 newpp = NULL;
6227 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) {
6228 if (page_try_demote_pages(pp) == 0) {
6229 ret = EAGAIN;
6230 goto cleanup;
6233 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL);
6234 if (ret == 0) {
6235 page_t *npp;
6236 /* unlock the new page(s) */
6237 while (count-- > 0) {
6238 ASSERT(newpp != NULL);
6239 npp = newpp;
6240 page_sub(&newpp, npp);
6241 page_unlock(npp);
6243 ASSERT(newpp == NULL);
6245 * Check to see if the page we have is too large.
6246 * If so, demote it freeing up the extra pages.
6248 if (pp->p_szc > 0) {
6249 /* For now demote extra pages to szc == 0 */
6250 extra = page_get_pagecnt(pp->p_szc) - 1;
6251 while (extra > 0) {
6252 tpp = pp->p_next;
6253 page_sub(&pp, tpp);
6254 tpp->p_szc = 0;
6255 page_free(tpp, 1);
6256 extra--;
6258 /* Make sure to set our page to szc 0 as well */
6259 ASSERT(pp->p_next == pp && pp->p_prev == pp);
6260 pp->p_szc = 0;
6262 goto cleanup;
6263 } else if (ret == EIO) {
6264 ret = EAGAIN;
6265 goto cleanup;
6266 } else {
6268 * Need to reset return type as we failed to relocate the page
6269 * but that does not mean that some of the next steps will not
6270 * work.
6272 ret = 0;
6275 skip_relocate:
6277 if (pp->p_szc > 0) {
6278 if (page_try_demote_pages(pp) == 0) {
6279 ret = EAGAIN;
6280 goto cleanup;
6284 ASSERT(pp->p_szc == 0);
6286 if (hat_ismod(pp)) {
6287 ret = EAGAIN;
6288 goto cleanup;
6290 if (PP_ISKAS(pp)) {
6291 ret = EAGAIN;
6292 goto cleanup;
6294 if (pp->p_lckcnt || pp->p_cowcnt) {
6295 ret = EAGAIN;
6296 goto cleanup;
6299 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
6300 ASSERT(!hat_page_is_mapped(pp));
6302 if (hat_ismod(pp)) {
6304 * This is a semi-odd case as the page is now modified but not
6305 * mapped as we just unloaded the mappings above.
6307 ret = EAGAIN;
6308 goto cleanup;
6310 if (pp->p_vnode != NULL) {
6311 page_hashout(pp, false);
6315 * At this point, the page should be in a clean state and
6316 * we can do whatever we want with it.
6319 cleanup:
6320 if (ret != 0) {
6321 if (!skip_unlock) {
6322 page_unlock(pp);
6324 } else {
6325 ASSERT(pp->p_szc == 0);
6326 ASSERT(PAGE_EXCL(pp));
6328 pp->p_next = pp;
6329 pp->p_prev = pp;
6331 return (ret);
6335 * Various callers of page_trycapture() can have different restrictions upon
6336 * what memory they have access to.
6337 * Returns 0 on success, with the following error codes on failure:
6338 * EPERM - The requested page is long term locked, and thus repeated
6339 * requests to capture this page will likely fail.
6340 * ENOMEM - There was not enough free memory in the system to safely
6341 * map the requested page.
6342 * ENOENT - The requested page was inside the kernel cage, and the
6343 * PHYSMEM_CAGE flag was not set.
6346 page_capture_pre_checks(page_t *pp, uint_t flags)
6348 ASSERT(pp != NULL);
6350 if (PP_ISKAS(pp)) {
6351 return (EPERM);
6354 /* only physmem currently has the restrictions checked below */
6355 if (!(flags & CAPTURE_PHYSMEM)) {
6356 return (0);
6359 if (availrmem < swapfs_minfree) {
6361 * We won't try to capture this page as we are
6362 * running low on memory.
6364 return (ENOMEM);
6366 return (0);
6370 * Once we have a page in our mits, go ahead and complete the capture
6371 * operation.
6372 * Returns 1 on failure where page is no longer needed
6373 * Returns 0 on success
6374 * Returns -1 if there was a transient failure.
6375 * Failure cases must release the SE_EXCL lock on pp (usually via page_free).
6378 page_capture_take_action(page_t *pp, uint_t flags, void *datap)
6380 int cb_index;
6381 int ret = 0;
6382 page_capture_hash_bucket_t *bp1;
6383 page_capture_hash_bucket_t *bp2;
6384 int index;
6385 int found = 0;
6386 int i;
6388 ASSERT(PAGE_EXCL(pp));
6389 ASSERT(curthread->t_flag & T_CAPTURING);
6391 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
6392 if ((flags >> cb_index) & 1) {
6393 break;
6396 ASSERT(cb_index < PC_NUM_CALLBACKS);
6399 * Remove the entry from the page_capture hash, but don't free it yet
6400 * as we may need to put it back.
6401 * Since we own the page at this point in time, we should find it
6402 * in the hash if this is an ASYNC call. If we don't it's likely
6403 * that the page_capture_async() thread decided that this request
6404 * had expired, in which case we just continue on.
6406 if (flags & CAPTURE_ASYNC) {
6408 index = PAGE_CAPTURE_HASH(pp);
6410 mutex_enter(&page_capture_hash[index].pchh_mutex);
6411 for (i = 0; i < 2 && !found; i++) {
6412 bp1 = page_capture_hash[index].lists[i].next;
6413 while (bp1 != &page_capture_hash[index].lists[i]) {
6414 if (bp1->pp == pp) {
6415 bp1->next->prev = bp1->prev;
6416 bp1->prev->next = bp1->next;
6417 page_capture_hash[index].
6418 num_pages[bp1->pri]--;
6419 page_clrtoxic(pp, PR_CAPTURE);
6420 found = 1;
6421 break;
6423 bp1 = bp1->next;
6426 mutex_exit(&page_capture_hash[index].pchh_mutex);
6429 /* Synchronize with the unregister func. */
6430 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER);
6431 if (!pc_cb[cb_index].cb_active) {
6432 page_free(pp, 1);
6433 rw_exit(&pc_cb[cb_index].cb_rwlock);
6434 if (found) {
6435 kmem_free(bp1, sizeof (*bp1));
6437 return (1);
6441 * We need to remove the entry from the page capture hash and turn off
6442 * the PR_CAPTURE bit before calling the callback. We'll need to cache
6443 * the entry here, and then based upon the return value, cleanup
6444 * appropriately or re-add it to the hash, making sure that someone else
6445 * hasn't already done so.
6446 * It should be rare for the callback to fail and thus it's ok for
6447 * the failure path to be a bit complicated as the success path is
6448 * cleaner and the locking rules are easier to follow.
6451 ret = pc_cb[cb_index].cb_func(pp, datap, flags);
6453 rw_exit(&pc_cb[cb_index].cb_rwlock);
6456 * If this was an ASYNC request, we need to cleanup the hash if the
6457 * callback was successful or if the request was no longer valid.
6458 * For non-ASYNC requests, we return failure to map and the caller
6459 * will take care of adding the request to the hash.
6460 * Note also that the callback itself is responsible for the page
6461 * at this point in time in terms of locking ... The most common
6462 * case for the failure path should just be a page_free.
6464 if (ret >= 0) {
6465 if (found) {
6466 if (bp1->flags & CAPTURE_RETIRE) {
6467 page_retire_decr_pend_count(datap);
6469 kmem_free(bp1, sizeof (*bp1));
6471 return (ret);
6473 if (!found) {
6474 return (ret);
6477 ASSERT(flags & CAPTURE_ASYNC);
6480 * Check for expiration time first as we can just free it up if it's
6481 * expired.
6483 if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) {
6484 kmem_free(bp1, sizeof (*bp1));
6485 return (ret);
6489 * The callback failed and there used to be an entry in the hash for
6490 * this page, so we need to add it back to the hash.
6492 mutex_enter(&page_capture_hash[index].pchh_mutex);
6493 if (!(pp->p_toxic & PR_CAPTURE)) {
6494 /* just add bp1 back to head of walked list */
6495 page_settoxic(pp, PR_CAPTURE);
6496 bp1->next = page_capture_hash[index].lists[1].next;
6497 bp1->prev = &page_capture_hash[index].lists[1];
6498 bp1->next->prev = bp1;
6499 bp1->pri = PAGE_CAPTURE_PRIO(pp);
6500 page_capture_hash[index].lists[1].next = bp1;
6501 page_capture_hash[index].num_pages[bp1->pri]++;
6502 mutex_exit(&page_capture_hash[index].pchh_mutex);
6503 return (ret);
6507 * Otherwise there was a new capture request added to list
6508 * Need to make sure that our original data is represented if
6509 * appropriate.
6511 for (i = 0; i < 2; i++) {
6512 bp2 = page_capture_hash[index].lists[i].next;
6513 while (bp2 != &page_capture_hash[index].lists[i]) {
6514 if (bp2->pp == pp) {
6515 if (bp1->flags & CAPTURE_RETIRE) {
6516 if (!(bp2->flags & CAPTURE_RETIRE)) {
6517 bp2->szc = bp1->szc;
6518 bp2->flags = bp1->flags;
6519 bp2->expires = bp1->expires;
6520 bp2->datap = bp1->datap;
6522 } else {
6523 ASSERT(bp1->flags & CAPTURE_PHYSMEM);
6524 if (!(bp2->flags & CAPTURE_RETIRE)) {
6525 bp2->szc = bp1->szc;
6526 bp2->flags = bp1->flags;
6527 bp2->expires = bp1->expires;
6528 bp2->datap = bp1->datap;
6531 page_capture_hash[index].num_pages[bp2->pri]--;
6532 bp2->pri = PAGE_CAPTURE_PRIO(pp);
6533 page_capture_hash[index].num_pages[bp2->pri]++;
6534 mutex_exit(&page_capture_hash[index].
6535 pchh_mutex);
6536 kmem_free(bp1, sizeof (*bp1));
6537 return (ret);
6539 bp2 = bp2->next;
6542 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp);
6543 /*NOTREACHED*/
6547 * Try to capture the given page for the caller specified in the flags
6548 * parameter. The page will either be captured and handed over to the
6549 * appropriate callback, or will be queued up in the page capture hash
6550 * to be captured asynchronously.
6551 * If the current request is due to an async capture, the page must be
6552 * exclusively locked before calling this function.
6553 * Currently szc must be 0 but in the future this should be expandable to
6554 * other page sizes.
6555 * Returns 0 on success, with the following error codes on failure:
6556 * EPERM - The requested page is long term locked, and thus repeated
6557 * requests to capture this page will likely fail.
6558 * ENOMEM - There was not enough free memory in the system to safely
6559 * map the requested page.
6560 * ENOENT - The requested page was inside the kernel cage, and the
6561 * CAPTURE_GET_CAGE flag was not set.
6562 * EAGAIN - The requested page could not be capturead at this point in
6563 * time but future requests will likely work.
6564 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag
6565 * was not set.
6568 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap)
6570 int ret;
6571 int cb_index;
6573 if (flags & CAPTURE_ASYNC) {
6574 ASSERT(PAGE_EXCL(pp));
6575 goto async;
6578 /* Make sure there's enough availrmem ... */
6579 ret = page_capture_pre_checks(pp, flags);
6580 if (ret != 0) {
6581 return (ret);
6584 if (!page_trylock(pp, SE_EXCL)) {
6585 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
6586 if ((flags >> cb_index) & 1) {
6587 break;
6590 ASSERT(cb_index < PC_NUM_CALLBACKS);
6591 ret = EAGAIN;
6592 /* Special case for retired pages */
6593 if (PP_RETIRED(pp)) {
6594 if (flags & CAPTURE_GET_RETIRED) {
6595 if (!page_unretire_pp(pp, PR_UNR_TEMP)) {
6597 * Need to set capture bit and add to
6598 * hash so that the page will be
6599 * retired when freed.
6601 page_capture_add_hash(pp, szc,
6602 CAPTURE_RETIRE, NULL);
6603 ret = 0;
6604 goto own_page;
6606 } else {
6607 return (EBUSY);
6610 page_capture_add_hash(pp, szc, flags, datap);
6611 return (ret);
6614 async:
6615 ASSERT(PAGE_EXCL(pp));
6617 /* Need to check for physmem async requests that availrmem is sane */
6618 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) ==
6619 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) &&
6620 (availrmem < swapfs_minfree)) {
6621 page_unlock(pp);
6622 return (ENOMEM);
6625 ret = page_capture_clean_page(pp);
6627 if (ret != 0) {
6628 /* We failed to get the page, so lets add it to the hash */
6629 if (!(flags & CAPTURE_ASYNC)) {
6630 page_capture_add_hash(pp, szc, flags, datap);
6632 return (ret);
6635 own_page:
6636 ASSERT(PAGE_EXCL(pp));
6637 ASSERT(pp->p_szc == 0);
6639 /* Call the callback */
6640 ret = page_capture_take_action(pp, flags, datap);
6642 if (ret == 0) {
6643 return (0);
6647 * Note that in the failure cases from page_capture_take_action, the
6648 * EXCL lock will have already been dropped.
6650 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) {
6651 page_capture_add_hash(pp, szc, flags, datap);
6653 return (EAGAIN);
6657 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap)
6659 int ret;
6661 curthread->t_flag |= T_CAPTURING;
6662 ret = page_itrycapture(pp, szc, flags, datap);
6663 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */
6664 return (ret);
6668 * When unlocking a page which has the PR_CAPTURE bit set, this routine
6669 * gets called to try and capture the page.
6671 void
6672 page_unlock_capture(page_t *pp)
6674 page_capture_hash_bucket_t *bp;
6675 int index;
6676 int i;
6677 uint_t szc;
6678 uint_t flags = 0;
6679 void *datap;
6680 kmutex_t *mp;
6681 extern vnode_t retired_pages;
6684 * We need to protect against a possible deadlock here where we own
6685 * the vnode page hash mutex and want to acquire it again as there
6686 * are locations in the code, where we unlock a page while holding
6687 * the mutex which can lead to the page being captured and eventually
6688 * end up here. As we may be hashing out the old page and hashing into
6689 * the retire vnode, we need to make sure we don't own them.
6690 * Other callbacks who do hash operations also need to make sure that
6691 * before they hashin to a vnode that they do not currently own the
6692 * vphm mutex otherwise there will be a panic.
6694 if (VMOBJECT_LOCKED(&retired_pages.v_object)) {
6695 page_unlock_nocapture(pp);
6696 return;
6698 if (pp->p_vnode != NULL && VMOBJECT_LOCKED(&pp->p_vnode->v_object)) {
6699 page_unlock_nocapture(pp);
6700 return;
6703 index = PAGE_CAPTURE_HASH(pp);
6705 mp = &page_capture_hash[index].pchh_mutex;
6706 mutex_enter(mp);
6707 for (i = 0; i < 2; i++) {
6708 bp = page_capture_hash[index].lists[i].next;
6709 while (bp != &page_capture_hash[index].lists[i]) {
6710 if (bp->pp == pp) {
6711 szc = bp->szc;
6712 flags = bp->flags | CAPTURE_ASYNC;
6713 datap = bp->datap;
6714 mutex_exit(mp);
6715 (void) page_trycapture(pp, szc, flags, datap);
6716 return;
6718 bp = bp->next;
6722 /* Failed to find page in hash so clear flags and unlock it. */
6723 page_clrtoxic(pp, PR_CAPTURE);
6724 page_unlock(pp);
6726 mutex_exit(mp);
6729 void
6730 page_capture_init()
6732 int i;
6733 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
6734 page_capture_hash[i].lists[0].next =
6735 &page_capture_hash[i].lists[0];
6736 page_capture_hash[i].lists[0].prev =
6737 &page_capture_hash[i].lists[0];
6738 page_capture_hash[i].lists[1].next =
6739 &page_capture_hash[i].lists[1];
6740 page_capture_hash[i].lists[1].prev =
6741 &page_capture_hash[i].lists[1];
6744 pc_thread_shortwait = 23 * hz;
6745 pc_thread_longwait = 1201 * hz;
6746 pc_thread_retry = 3;
6747 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL);
6748 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL);
6749 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0,
6750 TS_RUN, minclsyspri);
6754 * It is necessary to scrub any failing pages prior to reboot in order to
6755 * prevent a latent error trap from occurring on the next boot.
6757 void
6758 page_retire_mdboot()
6760 page_t *pp;
6761 int i, j;
6762 page_capture_hash_bucket_t *bp;
6763 uchar_t pri;
6765 /* walk lists looking for pages to scrub */
6766 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
6767 for (pri = 0; pri < PC_NUM_PRI; pri++) {
6768 if (page_capture_hash[i].num_pages[pri] != 0) {
6769 break;
6772 if (pri == PC_NUM_PRI)
6773 continue;
6775 mutex_enter(&page_capture_hash[i].pchh_mutex);
6777 for (j = 0; j < 2; j++) {
6778 bp = page_capture_hash[i].lists[j].next;
6779 while (bp != &page_capture_hash[i].lists[j]) {
6780 pp = bp->pp;
6781 if (PP_TOXIC(pp)) {
6782 if (page_trylock(pp, SE_EXCL)) {
6783 PP_CLRFREE(pp);
6784 pagescrub(pp, 0, PAGESIZE);
6785 page_unlock(pp);
6788 bp = bp->next;
6791 mutex_exit(&page_capture_hash[i].pchh_mutex);
6796 * Walk the page_capture_hash trying to capture pages and also cleanup old
6797 * entries which have expired.
6799 void
6800 page_capture_async()
6802 page_t *pp;
6803 int i;
6804 int ret;
6805 page_capture_hash_bucket_t *bp1, *bp2;
6806 uint_t szc;
6807 uint_t flags;
6808 void *datap;
6809 uchar_t pri;
6811 /* If there are outstanding pages to be captured, get to work */
6812 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
6813 for (pri = 0; pri < PC_NUM_PRI; pri++) {
6814 if (page_capture_hash[i].num_pages[pri] != 0)
6815 break;
6817 if (pri == PC_NUM_PRI)
6818 continue;
6820 /* Append list 1 to list 0 and then walk through list 0 */
6821 mutex_enter(&page_capture_hash[i].pchh_mutex);
6822 bp1 = &page_capture_hash[i].lists[1];
6823 bp2 = bp1->next;
6824 if (bp1 != bp2) {
6825 bp1->prev->next = page_capture_hash[i].lists[0].next;
6826 bp2->prev = &page_capture_hash[i].lists[0];
6827 page_capture_hash[i].lists[0].next->prev = bp1->prev;
6828 page_capture_hash[i].lists[0].next = bp2;
6829 bp1->next = bp1;
6830 bp1->prev = bp1;
6833 /* list[1] will be empty now */
6835 bp1 = page_capture_hash[i].lists[0].next;
6836 while (bp1 != &page_capture_hash[i].lists[0]) {
6837 /* Check expiration time */
6838 if ((ddi_get_lbolt() > bp1->expires &&
6839 bp1->expires != -1) ||
6840 page_deleted(bp1->pp)) {
6841 page_capture_hash[i].lists[0].next = bp1->next;
6842 bp1->next->prev =
6843 &page_capture_hash[i].lists[0];
6844 page_capture_hash[i].num_pages[bp1->pri]--;
6847 * We can safely remove the PR_CAPTURE bit
6848 * without holding the EXCL lock on the page
6849 * as the PR_CAPTURE bit requres that the
6850 * page_capture_hash[].pchh_mutex be held
6851 * to modify it.
6853 page_clrtoxic(bp1->pp, PR_CAPTURE);
6854 mutex_exit(&page_capture_hash[i].pchh_mutex);
6855 kmem_free(bp1, sizeof (*bp1));
6856 mutex_enter(&page_capture_hash[i].pchh_mutex);
6857 bp1 = page_capture_hash[i].lists[0].next;
6858 continue;
6860 pp = bp1->pp;
6861 szc = bp1->szc;
6862 flags = bp1->flags;
6863 datap = bp1->datap;
6864 mutex_exit(&page_capture_hash[i].pchh_mutex);
6865 if (page_trylock(pp, SE_EXCL)) {
6866 ret = page_trycapture(pp, szc,
6867 flags | CAPTURE_ASYNC, datap);
6868 } else {
6869 ret = 1; /* move to walked hash */
6872 if (ret != 0) {
6873 /* Move to walked hash */
6874 (void) page_capture_move_to_walked(pp);
6876 mutex_enter(&page_capture_hash[i].pchh_mutex);
6877 bp1 = page_capture_hash[i].lists[0].next;
6880 mutex_exit(&page_capture_hash[i].pchh_mutex);
6885 * This function is called by the page_capture_thread, and is needed in
6886 * in order to initiate aio cleanup, so that pages used in aio
6887 * will be unlocked and subsequently retired by page_capture_thread.
6889 static int
6890 do_aio_cleanup(void)
6892 proc_t *procp;
6893 int (*aio_cleanup_dr_delete_memory)(proc_t *);
6894 int cleaned = 0;
6896 if (modload("sys", "kaio") == -1) {
6897 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio");
6898 return (0);
6901 * We use the aio_cleanup_dr_delete_memory function to
6902 * initiate the actual clean up; this function will wake
6903 * up the per-process aio_cleanup_thread.
6905 aio_cleanup_dr_delete_memory = (int (*)(proc_t *))
6906 modgetsymvalue("aio_cleanup_dr_delete_memory", 0);
6907 if (aio_cleanup_dr_delete_memory == NULL) {
6908 cmn_err(CE_WARN,
6909 "aio_cleanup_dr_delete_memory not found in kaio");
6910 return (0);
6912 mutex_enter(&pidlock);
6913 for (procp = practive; (procp != NULL); procp = procp->p_next) {
6914 mutex_enter(&procp->p_lock);
6915 if (procp->p_aio != NULL) {
6916 /* cleanup proc's outstanding kaio */
6917 cleaned += (*aio_cleanup_dr_delete_memory)(procp);
6919 mutex_exit(&procp->p_lock);
6921 mutex_exit(&pidlock);
6922 return (cleaned);
6926 * helper function for page_capture_thread
6928 static void
6929 page_capture_handle_outstanding(void)
6931 int ntry;
6933 /* Reap pages before attempting capture pages */
6934 kmem_reap();
6936 if ((page_retire_pend_count() > page_retire_pend_kas_count()) &&
6937 hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) {
6939 * Note: Purging only for platforms that support
6940 * ISM hat_pageunload() - mainly SPARC. On x86/x64
6941 * platforms ISM pages SE_SHARED locked until destroyed.
6944 /* disable and purge seg_pcache */
6945 (void) seg_p_disable();
6946 for (ntry = 0; ntry < pc_thread_retry; ntry++) {
6947 if (!page_retire_pend_count())
6948 break;
6949 if (do_aio_cleanup()) {
6951 * allow the apps cleanup threads
6952 * to run
6954 delay(pc_thread_shortwait);
6956 page_capture_async();
6958 /* reenable seg_pcache */
6959 seg_p_enable();
6961 /* completed what can be done. break out */
6962 return;
6966 * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap
6967 * and then attempt to capture.
6969 seg_preap();
6970 page_capture_async();
6974 * The page_capture_thread loops forever, looking to see if there are
6975 * pages still waiting to be captured.
6977 static void
6978 page_capture_thread(void)
6980 callb_cpr_t c;
6981 int i;
6982 int high_pri_pages;
6983 int low_pri_pages;
6984 clock_t timeout;
6986 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture");
6988 mutex_enter(&pc_thread_mutex);
6989 for (;;) {
6990 high_pri_pages = 0;
6991 low_pri_pages = 0;
6992 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
6993 high_pri_pages +=
6994 page_capture_hash[i].num_pages[PC_PRI_HI];
6995 low_pri_pages +=
6996 page_capture_hash[i].num_pages[PC_PRI_LO];
6999 timeout = pc_thread_longwait;
7000 if (high_pri_pages != 0) {
7001 timeout = pc_thread_shortwait;
7002 page_capture_handle_outstanding();
7003 } else if (low_pri_pages != 0) {
7004 page_capture_async();
7006 CALLB_CPR_SAFE_BEGIN(&c);
7007 (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex,
7008 timeout, TR_CLOCK_TICK);
7009 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex);
7011 /*NOTREACHED*/
7014 * Attempt to locate a bucket that has enough pages to satisfy the request.
7015 * The initial check is done without the lock to avoid unneeded contention.
7016 * The function returns 1 if enough pages were found, else 0 if it could not
7017 * find enough pages in a bucket.
7019 static int
7020 pcf_decrement_bucket(pgcnt_t npages)
7022 struct pcf *p;
7023 struct pcf *q;
7024 int i;
7026 p = &pcf[PCF_INDEX()];
7027 q = &pcf[pcf_fanout];
7028 for (i = 0; i < pcf_fanout; i++) {
7029 if (p->pcf_count > npages) {
7031 * a good one to try.
7033 mutex_enter(&p->pcf_lock);
7034 if (p->pcf_count > npages) {
7035 p->pcf_count -= (uint_t)npages;
7037 * freemem is not protected by any lock.
7038 * Thus, we cannot have any assertion
7039 * containing freemem here.
7041 freemem -= npages;
7042 mutex_exit(&p->pcf_lock);
7043 return (1);
7045 mutex_exit(&p->pcf_lock);
7047 p++;
7048 if (p >= q) {
7049 p = pcf;
7052 return (0);
7056 * Arguments:
7057 * pcftotal_ret: If the value is not NULL and we have walked all the
7058 * buckets but did not find enough pages then it will
7059 * be set to the total number of pages in all the pcf
7060 * buckets.
7061 * npages: Is the number of pages we have been requested to
7062 * find.
7063 * unlock: If set to 0 we will leave the buckets locked if the
7064 * requested number of pages are not found.
7066 * Go and try to satisfy the page request from any number of buckets.
7067 * This can be a very expensive operation as we have to lock the buckets
7068 * we are checking (and keep them locked), starting at bucket 0.
7070 * The function returns 1 if enough pages were found, else 0 if it could not
7071 * find enough pages in the buckets.
7074 static int
7075 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock)
7077 struct pcf *p;
7078 pgcnt_t pcftotal;
7079 int i;
7081 p = pcf;
7082 /* try to collect pages from several pcf bins */
7083 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) {
7084 mutex_enter(&p->pcf_lock);
7085 pcftotal += p->pcf_count;
7086 if (pcftotal >= npages) {
7088 * Wow! There are enough pages laying around
7089 * to satisfy the request. Do the accounting,
7090 * drop the locks we acquired, and go back.
7092 * freemem is not protected by any lock. So,
7093 * we cannot have any assertion containing
7094 * freemem.
7096 freemem -= npages;
7097 while (p >= pcf) {
7098 if (p->pcf_count <= npages) {
7099 npages -= p->pcf_count;
7100 p->pcf_count = 0;
7101 } else {
7102 p->pcf_count -= (uint_t)npages;
7103 npages = 0;
7105 mutex_exit(&p->pcf_lock);
7106 p--;
7108 ASSERT(npages == 0);
7109 return (1);
7111 p++;
7113 if (unlock) {
7114 /* failed to collect pages - release the locks */
7115 while (--p >= pcf) {
7116 mutex_exit(&p->pcf_lock);
7119 if (pcftotal_ret != NULL)
7120 *pcftotal_ret = pcftotal;
7121 return (0);
7124 static int
7125 vmobject_cmp(const void *va, const void *vb)
7127 const page_t *a = va;
7128 const page_t *b = vb;
7130 if (a->p_offset > b->p_offset)
7131 return (1);
7132 if (a->p_offset < b->p_offset)
7133 return (-1);
7134 return (0);
7137 void
7138 vmobject_init(struct vmobject *obj, struct vnode *vnode)
7140 avl_create(&obj->tree, vmobject_cmp, sizeof (struct page),
7141 offsetof(struct page, p_object_node));
7142 list_create(&obj->list, sizeof (struct page),
7143 offsetof(struct page, p_list.vnode));
7144 mutex_init(&obj->lock, NULL, MUTEX_DEFAULT, NULL);
7146 obj->vnode = vnode;
7149 void
7150 vmobject_fini(struct vmobject *obj)
7152 mutex_destroy(&obj->lock);
7153 list_destroy(&obj->list);
7154 avl_destroy(&obj->tree);