4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/types.h>
27 #include <sys/cmn_err.h>
30 #include <sys/systm.h>
31 #include <sys/machsystm.h> /* for page_freelist_coalesce() */
32 #include <sys/errno.h>
33 #include <sys/memnode.h>
34 #include <sys/memlist.h>
35 #include <sys/memlist_impl.h>
36 #include <sys/tuneable.h>
39 #include <sys/debug.h>
41 #include <sys/callb.h>
42 #include <sys/memlist_plat.h> /* for installed_top_size() */
43 #include <sys/condvar_impl.h> /* for CV_HAS_WAITERS() */
44 #include <sys/dumphdr.h> /* for dump_resize() */
45 #include <sys/atomic.h> /* for use in stats collection */
46 #include <sys/rwlock.h>
47 #include <sys/cpuvar.h>
48 #include <vm/seg_kmem.h>
49 #include <vm/seg_kpm.h>
51 #include <vm/vm_dep.h>
52 #define SUNDDI_IMPL /* so sunddi.h will not redefine splx() et al */
53 #include <sys/sunddi.h>
54 #include <sys/mem_config.h>
55 #include <sys/mem_cage.h>
58 #include <sys/modctl.h>
60 extern struct memlist
*phys_avail
;
62 extern uint_t
page_ctrs_adjust(int);
63 void page_ctrs_cleanup(void);
64 static void kphysm_setup_post_add(pgcnt_t
);
65 static int kphysm_setup_pre_del(pgcnt_t
);
66 static void kphysm_setup_post_del(pgcnt_t
, int);
68 static int kphysm_split_memseg(pfn_t base
, pgcnt_t npgs
);
70 static int delspan_reserve(pfn_t
, pgcnt_t
);
71 static void delspan_unreserve(pfn_t
, pgcnt_t
);
73 kmutex_t memseg_lists_lock
;
74 struct memseg
*memseg_va_avail
;
75 struct memseg
*memseg_alloc(void);
76 static struct memseg
*memseg_delete_junk
;
77 static struct memseg
*memseg_edit_junk
;
78 void memseg_remap_init(void);
79 static void memseg_remap_to_dummy(struct memseg
*);
80 static void kphysm_addmem_error_undospan(pfn_t
, pgcnt_t
);
81 static struct memseg
*memseg_reuse(pgcnt_t
);
83 static struct kmem_cache
*memseg_cache
;
86 * Interfaces to manage externally allocated
87 * page_t memory (metadata) for a memseg.
89 #pragma weak memseg_alloc_meta
90 #pragma weak memseg_free_meta
91 #pragma weak memseg_get_metapfn
92 #pragma weak memseg_remap_meta
94 extern int ppvm_enable
;
95 extern page_t
*ppvm_base
;
96 extern int memseg_alloc_meta(pfn_t
, pgcnt_t
, void **, pgcnt_t
*);
97 extern void memseg_free_meta(void *, pgcnt_t
);
98 extern pfn_t
memseg_get_metapfn(void *, pgcnt_t
);
99 extern void memseg_remap_meta(struct memseg
*);
100 static int memseg_is_dynamic(struct memseg
*);
101 static int memseg_includes_meta(struct memseg
*);
102 pfn_t
memseg_get_start(struct memseg
*);
103 static void memseg_cpu_vm_flush(void);
105 int meta_alloc_enable
;
108 static int memseg_debug
;
109 #define MEMSEG_DEBUG(args...) if (memseg_debug) printf(args)
111 #define MEMSEG_DEBUG(...)
115 * Add a chunk of memory to the system.
116 * base: starting PAGESIZE page of new memory.
117 * npgs: length in PAGESIZE pages.
119 * Adding mem this way doesn't increase the size of the hash tables;
120 * growing them would be too hard. This should be OK, but adding memory
121 * dynamically most likely means more hash misses, since the tables will
122 * be smaller than they otherwise would be.
125 kphysm_add_memory_dynamic(pfn_t base
, pgcnt_t npgs
)
128 page_t
*opp
, *oepp
, *segpp
;
132 pfn_t pt_base
= base
;
145 void *metabase
= (void *)base
;
147 offset_t kpm_pages_off
;
150 "?kphysm_add_memory_dynamic: adding %ldK at 0x%" PRIx64
"\n",
151 npgs
<< (PAGESHIFT
- 10), (uint64_t)base
<< PAGESHIFT
);
154 * Add this span in the delete list to prevent interactions.
156 if (!delspan_reserve(base
, npgs
)) {
157 return (KPHYSM_ESPAN
);
160 * Check to see if any of the memory span has been added
161 * by trying an add to the installed memory list. This
162 * forms the interlocking process for add.
165 memlist_write_lock();
167 mlret
= memlist_add_span((uint64_t)(pt_base
) << PAGESHIFT
,
168 (uint64_t)(tpgs
) << PAGESHIFT
, &phys_install
);
170 if (mlret
== MEML_SPANOP_OK
)
171 installed_top_size(phys_install
, &physmax
, &physinstalled
);
173 memlist_write_unlock();
175 if (mlret
!= MEML_SPANOP_OK
) {
176 if (mlret
== MEML_SPANOP_EALLOC
) {
177 delspan_unreserve(pt_base
, tpgs
);
178 return (KPHYSM_ERESOURCE
);
179 } else if (mlret
== MEML_SPANOP_ESPAN
) {
180 delspan_unreserve(pt_base
, tpgs
);
181 return (KPHYSM_ESPAN
);
183 delspan_unreserve(pt_base
, tpgs
);
184 return (KPHYSM_ERESOURCE
);
188 if (meta_alloc_enable
) {
190 * Allocate the page_t's from existing memory;
191 * if that fails, allocate from the incoming memory.
193 rv
= memseg_alloc_meta(base
, npgs
, &metabase
, &metapgs
);
194 if (rv
== KPHYSM_OK
) {
196 ASSERT(btopr(npgs
* sizeof (page_t
)) <= metapgs
);
203 * We store the page_t's for this new memory in the first
204 * few pages of the chunk. Here, we go and get'em ...
208 * The expression after the '-' gives the number of pages
209 * that will fit in the new memory based on a requirement
210 * of (PAGESIZE + sizeof (page_t)) bytes per page.
212 metapgs
= npgs
- (((uint64_t)(npgs
) << PAGESHIFT
) /
213 (PAGESIZE
+ sizeof (page_t
)));
218 ASSERT(btopr(npgs
* sizeof (page_t
)) <= metapgs
);
220 exhausted
= (metapgs
== 0 || npgs
== 0);
222 if (kpm_enable
&& !exhausted
) {
223 pgcnt_t start
, end
, nkpmpgs_prelim
;
227 * A viable kpm large page mapping must not overlap two
228 * dynamic memsegs. Therefore the total size is checked
229 * to be at least kpm_pgsz and also whether start and end
230 * points are at least kpm_pgsz aligned.
232 if (ptokpmp(tpgs
) < 1 || pmodkpmp(pt_base
) ||
233 pmodkpmp(base
+ npgs
)) {
235 kphysm_addmem_error_undospan(pt_base
, tpgs
);
238 * There is no specific error code for violating
239 * kpm granularity constraints.
241 return (KPHYSM_ENOTVIABLE
);
244 start
= kpmptop(ptokpmp(base
));
245 end
= kpmptop(ptokpmp(base
+ npgs
));
246 nkpmpgs_prelim
= ptokpmp(end
- start
);
247 ptsz
= npgs
* sizeof (page_t
);
248 metapgs
= btopr(ptsz
+ nkpmpgs_prelim
* KPMPAGE_T_SZ
);
249 exhausted
= (tpgs
<= metapgs
);
251 npgs
= tpgs
- metapgs
;
252 base
= pt_base
+ metapgs
;
255 start
= kpmptop(ptokpmp(base
));
256 nkpmpgs
= ptokpmp(end
- start
);
257 kpm_pages_off
= ptsz
+
258 (nkpmpgs_prelim
- nkpmpgs
) * KPMPAGE_T_SZ
;
263 * Is memory area supplied too small?
266 kphysm_addmem_error_undospan(pt_base
, tpgs
);
268 * There is no specific error code for 'too small'.
270 return (KPHYSM_ERESOURCE
);
275 * We may re-use a previously allocated VA space for the page_ts
276 * eventually, but we need to initialize and lock the pages first.
280 * Get an address in the kernel address map, map
281 * the page_t pages and see if we can touch them.
284 mapva
= vmem_alloc(heap_arena
, ptob(metapgs
), VM_NOSLEEP
);
286 cmn_err(CE_WARN
, "kphysm_add_memory_dynamic:"
287 " Can't allocate VA for page_ts");
290 memseg_free_meta(metabase
, metapgs
);
291 kphysm_addmem_error_undospan(pt_base
, tpgs
);
293 return (KPHYSM_ERESOURCE
);
297 if (physmax
< (pt_base
+ tpgs
))
298 physmax
= (pt_base
+ tpgs
);
301 * In the remapping code we map one page at a time so we must do
302 * the same here to match mapping sizes.
306 for (pnum
= 0; pnum
< metapgs
; pnum
++) {
308 pfn
= memseg_get_metapfn(metabase
, (pgcnt_t
)pnum
);
309 hat_devload(kas
.a_hat
, vaddr
, ptob(1), pfn
,
310 PROT_READ
| PROT_WRITE
,
311 HAT_LOAD
| HAT_LOAD_LOCK
| HAT_LOAD_NOCONSIST
);
316 if (ddi_peek32((dev_info_t
*)NULL
,
317 (int32_t *)pp
, (int32_t *)0) == DDI_FAILURE
) {
319 cmn_err(CE_WARN
, "kphysm_add_memory_dynamic:"
320 " Can't access pp array at 0x%p [phys 0x%lx]",
321 (void *)pp
, pt_base
);
323 hat_unload(kas
.a_hat
, (caddr_t
)pp
, ptob(metapgs
),
324 HAT_UNLOAD_UNMAP
|HAT_UNLOAD_UNLOCK
);
326 vmem_free(heap_arena
, mapva
, ptob(metapgs
));
328 memseg_free_meta(metabase
, metapgs
);
329 kphysm_addmem_error_undospan(pt_base
, tpgs
);
331 return (KPHYSM_EFAULT
);
335 * Add this memory slice to its memory node translation.
337 * Note that right now, each node may have only one slice;
338 * this may change with COD or in larger SSM systems with
339 * nested latency groups, so we must not assume that the
340 * node does not yet exist.
342 * Note that there may be multiple memory nodes associated with
343 * a single lgrp node on x86 systems.
345 pnum
= pt_base
+ tpgs
- 1;
346 mem_node_add_range(pt_base
, pnum
);
349 * Allocate or resize page counters as necessary to accommodate
350 * the increase in memory pages.
352 mnode
= PFN_2_MEM_NODE(pnum
);
353 PAGE_CTRS_ADJUST(base
, npgs
, rv
);
356 mem_node_del_range(pt_base
, pnum
);
358 /* cleanup the page counters */
361 hat_unload(kas
.a_hat
, (caddr_t
)pp
, ptob(metapgs
),
362 HAT_UNLOAD_UNMAP
|HAT_UNLOAD_UNLOCK
);
364 vmem_free(heap_arena
, mapva
, ptob(metapgs
));
366 memseg_free_meta(metabase
, metapgs
);
367 kphysm_addmem_error_undospan(pt_base
, tpgs
);
369 return (KPHYSM_ERESOURCE
);
373 * Update the phys_avail memory list.
374 * The phys_install list was done at the start.
377 memlist_write_lock();
379 mlret
= memlist_add_span((uint64_t)(base
) << PAGESHIFT
,
380 (uint64_t)(npgs
) << PAGESHIFT
, &phys_avail
);
381 ASSERT(mlret
== MEML_SPANOP_OK
);
383 memlist_write_unlock();
385 /* See if we can find a memseg to re-use. */
387 seg
= memseg_reuse(0);
388 reuse
= 1; /* force unmapping of temp mapva */
389 flags
= MEMSEG_DYNAMIC
| MEMSEG_META_ALLOC
;
391 * There is a 1:1 fixed relationship between a pfn
392 * and a page_t VA. The pfn is used as an index into
393 * the ppvm_base page_t table in order to calculate
394 * the page_t base address for a given pfn range.
396 segpp
= ppvm_base
+ base
;
398 seg
= memseg_reuse(metapgs
);
399 reuse
= (seg
!= NULL
);
400 flags
= MEMSEG_DYNAMIC
| MEMSEG_META_INCL
;
405 * Initialize the memseg structure representing this memory
406 * and add it to the existing list of memsegs. Do some basic
407 * initialization and add the memory to the system.
408 * In order to prevent lock deadlocks, the add_physmem()
409 * code is repeated here, but split into several stages.
411 * If a memseg is reused, invalidate memseg pointers in
412 * all cpu vm caches. We need to do this this since the check
413 * pp >= seg->pages && pp < seg->epages
414 * used in various places is not atomic and so the first compare
415 * can happen before reuse and the second compare after reuse.
416 * The invalidation ensures that a memseg is not deferenced while
417 * it's page/pfn pointers are changing.
420 seg
= memseg_alloc();
422 seg
->msegflags
= flags
;
423 MEMSEG_DEBUG("memseg_get: alloc seg=0x%p, pages=0x%p",
424 (void *)seg
, (void *)(seg
->pages
));
427 ASSERT(seg
->msegflags
== flags
);
428 ASSERT(seg
->pages_base
== seg
->pages_end
);
429 MEMSEG_DEBUG("memseg_get: reuse seg=0x%p, pages=0x%p",
430 (void *)seg
, (void *)(seg
->pages
));
432 memseg_cpu_vm_flush();
437 seg
->epages
= seg
->pages
+ npgs
;
438 seg
->pages_base
= base
;
439 seg
->pages_end
= base
+ npgs
;
442 * Initialize metadata. The page_ts are set to locked state
445 bzero((caddr_t
)pp
, ptob(metapgs
));
447 pfn
= seg
->pages_base
;
448 /* Save the original pp base in case we reuse a memseg. */
451 for (pp
= opp
; pp
< oepp
; pp
++) {
454 page_iolock_init(pp
);
455 while (!page_lock(pp
, SE_EXCL
, (kmutex_t
*)NULL
, P_RECLAIM
))
457 pp
->p_offset
= (u_offset_t
)-1;
461 /* Remap our page_ts to the re-used memseg VA space. */
463 vaddr
= (caddr_t
)seg
->pages
;
464 for (pnum
= 0; pnum
< metapgs
; pnum
++) {
466 pfn
= memseg_get_metapfn(metabase
,
468 hat_devload(kas
.a_hat
, vaddr
, ptob(1), pfn
,
469 PROT_READ
| PROT_WRITE
,
470 HAT_LOAD_REMAP
| HAT_LOAD
| HAT_LOAD_NOCONSIST
);
475 hat_unload(kas
.a_hat
, (caddr_t
)opp
, ptob(metapgs
),
476 HAT_UNLOAD_UNMAP
|HAT_UNLOAD_UNLOCK
);
478 vmem_free(heap_arena
, mapva
, ptob(metapgs
));
481 hat_kpm_addmem_mseg_update(seg
, nkpmpgs
, kpm_pages_off
);
486 * The new memseg is inserted at the beginning of the list.
487 * Not only does this save searching for the tail, but in the
488 * case of a re-used memseg, it solves the problem of what
489 * happens if some process has still got a pointer to the
490 * memseg and follows the next pointer to continue traversing
494 hat_kpm_addmem_mseg_insert(seg
);
499 hat_kpm_addmem_memsegs_update(seg
);
508 * Recalculate the paging parameters now total_pages has changed.
509 * This will also cause the clock hands to be reset before next use.
515 PLCNT_MODIFY_MAX(seg
->pages_base
, (long)npgs
);
518 * Free the pages outside the lock to avoid locking loops.
520 for (pp
= seg
->pages
; pp
< seg
->epages
; pp
++) {
525 * Now that we've updated the appropriate memory lists we
526 * need to reset a number of globals, since we've increased memory.
527 * Several have already been updated for us as noted above. The
528 * globals we're interested in at this point are:
529 * physmax - highest page frame number.
530 * physinstalled - number of pages currently installed (done earlier)
531 * maxmem - max free pages in the system
532 * physmem - physical memory pages available
533 * availrmem - real memory available
536 mutex_enter(&freemem_lock
);
540 availrmem_initial
+= npgs
;
542 mutex_exit(&freemem_lock
);
546 page_freelist_coalesce_all(mnode
);
548 kphysm_setup_post_add(npgs
);
550 cmn_err(CE_CONT
, "?kphysm_add_memory_dynamic: mem = %ldK "
552 physinstalled
<< (PAGESHIFT
- 10),
553 (uint64_t)physinstalled
<< PAGESHIFT
);
555 avmem
= (uint64_t)freemem
<< PAGESHIFT
;
556 cmn_err(CE_CONT
, "?kphysm_add_memory_dynamic: "
557 "avail mem = %" PRId64
"\n", avmem
);
560 * Update lgroup generation number on single lgroup systems
563 lgrp_config(LGRP_CONFIG_GEN_UPDATE
, 0, 0);
566 * Inform DDI of update
568 ddi_mem_update((uint64_t)(pt_base
) << PAGESHIFT
,
569 (uint64_t)(tpgs
) << PAGESHIFT
);
571 delspan_unreserve(pt_base
, tpgs
);
573 return (KPHYSM_OK
); /* Successfully added system memory */
577 * There are various error conditions in kphysm_add_memory_dynamic()
578 * which require a rollback of already changed global state.
581 kphysm_addmem_error_undospan(pfn_t pt_base
, pgcnt_t tpgs
)
585 /* Unreserve memory span. */
586 memlist_write_lock();
588 mlret
= memlist_delete_span(
589 (uint64_t)(pt_base
) << PAGESHIFT
,
590 (uint64_t)(tpgs
) << PAGESHIFT
, &phys_install
);
592 ASSERT(mlret
== MEML_SPANOP_OK
);
593 phys_install_has_changed();
594 installed_top_size(phys_install
, &physmax
, &physinstalled
);
596 memlist_write_unlock();
597 delspan_unreserve(pt_base
, tpgs
);
601 * Only return an available memseg of exactly the right size
602 * if size is required.
603 * When the meta data area has it's own virtual address space
604 * we will need to manage this more carefully and do best fit
605 * allocations, possibly splitting an available area.
608 memseg_reuse(pgcnt_t metapgs
)
611 struct memseg
**segpp
, *seg
;
613 mutex_enter(&memseg_lists_lock
);
615 segpp
= &memseg_va_avail
;
616 for (; (seg
= *segpp
) != NULL
; segpp
= &seg
->lnext
) {
620 * Make sure we are reusing the right segment type.
622 type
= metapgs
? MEMSEG_META_INCL
: MEMSEG_META_ALLOC
;
624 if ((seg
->msegflags
& (MEMSEG_META_INCL
| MEMSEG_META_ALLOC
))
629 end
= hat_kpm_mseg_reuse(seg
);
631 end
= (caddr_t
)seg
->epages
;
634 * Check for the right size if it is provided.
636 if (!metapgs
|| btopr(end
- (caddr_t
)seg
->pages
) == metapgs
) {
642 mutex_exit(&memseg_lists_lock
);
647 static uint_t handle_gen
;
650 struct memdelspan
*mds_next
;
654 uint_t
*mds_bitmap_retired
;
657 #define NBPBMW (sizeof (uint_t) * NBBY)
658 #define MDS_BITMAPBYTES(MDSP) \
659 ((((MDSP)->mds_npgs + NBPBMW - 1) / NBPBMW) * sizeof (uint_t))
661 struct transit_list
{
662 struct transit_list
*trl_next
;
663 struct memdelspan
*trl_spans
;
667 struct transit_list_head
{
669 struct transit_list
*trh_head
;
672 static struct transit_list_head transit_list_head
;
675 static void transit_list_collect(struct mem_handle
*, int);
676 static void transit_list_insert(struct transit_list
*);
677 static void transit_list_remove(struct transit_list
*);
680 #define MEM_DEL_STATS
684 static int mem_del_stat_print
= 0;
685 struct mem_del_stat
{
698 uint_t first_notfree
;
708 uint64_t nticks_total
;
709 uint64_t nticks_pgrp
;
719 * The stat values are only incremented in the delete thread
720 * so no locking or atomic required.
722 #define MDSTAT_INCR(MHP, FLD) (MHP)->mh_delstat.FLD++
723 #define MDSTAT_TOTAL(MHP, ntck) ((MHP)->mh_delstat.nticks_total += (ntck))
724 #define MDSTAT_PGRP(MHP, ntck) ((MHP)->mh_delstat.nticks_pgrp += (ntck))
725 static void mem_del_stat_print_func(struct mem_handle
*);
726 #define MDSTAT_PRINT(MHP) mem_del_stat_print_func((MHP))
727 #else /* MEM_DEL_STATS */
728 #define MDSTAT_INCR(MHP, FLD)
729 #define MDSTAT_TOTAL(MHP, ntck)
730 #define MDSTAT_PGRP(MHP, ntck)
731 #define MDSTAT_PRINT(MHP)
732 #endif /* MEM_DEL_STATS */
734 typedef enum mhnd_state
{MHND_FREE
= 0, MHND_INIT
, MHND_STARTING
,
735 MHND_RUNNING
, MHND_DONE
, MHND_RELEASE
} mhnd_state_t
;
738 * mh_mutex must be taken to examine or change mh_exthandle and mh_state.
739 * The mutex may not be required for other fields, dependent on mh_state.
743 struct mem_handle
*mh_next
;
744 memhandle_t mh_exthandle
;
745 mhnd_state_t mh_state
;
746 struct transit_list mh_transit
;
747 pgcnt_t mh_phys_pages
;
749 pgcnt_t mh_hold_todo
;
750 void (*mh_delete_complete
)(void *, int error
);
751 void *mh_delete_complete_arg
;
752 volatile uint_t mh_cancel
;
753 volatile uint_t mh_dr_aio_cleanup_cancel
;
754 volatile uint_t mh_aio_cleanup_done
;
756 kthread_id_t mh_thread_id
;
757 page_t
*mh_deleted
; /* link through p_next */
759 struct mem_del_stat mh_delstat
;
760 #endif /* MEM_DEL_STATS */
763 static struct mem_handle
*mem_handle_head
;
764 static kmutex_t mem_handle_list_mutex
;
766 static struct mem_handle
*
767 kphysm_allocate_mem_handle()
769 struct mem_handle
*mhp
;
771 mhp
= kmem_zalloc(sizeof (struct mem_handle
), KM_SLEEP
);
772 mutex_init(&mhp
->mh_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
773 mutex_enter(&mem_handle_list_mutex
);
774 mutex_enter(&mhp
->mh_mutex
);
775 /* handle_gen is protected by list mutex. */
776 mhp
->mh_exthandle
= (memhandle_t
)(uintptr_t)(++handle_gen
);
777 mhp
->mh_next
= mem_handle_head
;
778 mem_handle_head
= mhp
;
779 mutex_exit(&mem_handle_list_mutex
);
785 kphysm_free_mem_handle(struct mem_handle
*mhp
)
787 struct mem_handle
**mhpp
;
789 ASSERT(mutex_owned(&mhp
->mh_mutex
));
790 ASSERT(mhp
->mh_state
== MHND_FREE
);
792 * Exit the mutex to preserve locking order. This is OK
793 * here as once in the FREE state, the handle cannot
794 * be found by a lookup.
796 mutex_exit(&mhp
->mh_mutex
);
798 mutex_enter(&mem_handle_list_mutex
);
799 mhpp
= &mem_handle_head
;
800 while (*mhpp
!= NULL
&& *mhpp
!= mhp
)
801 mhpp
= &(*mhpp
)->mh_next
;
802 ASSERT(*mhpp
== mhp
);
804 * No need to lock the handle (mh_mutex) as only
805 * mh_next changing and this is the only thread that
806 * can be referncing mhp.
808 *mhpp
= mhp
->mh_next
;
809 mutex_exit(&mem_handle_list_mutex
);
811 mutex_destroy(&mhp
->mh_mutex
);
812 kmem_free(mhp
, sizeof (struct mem_handle
));
816 * This function finds the internal mem_handle corresponding to an
817 * external handle and returns it with the mh_mutex held.
819 static struct mem_handle
*
820 kphysm_lookup_mem_handle(memhandle_t handle
)
822 struct mem_handle
*mhp
;
824 mutex_enter(&mem_handle_list_mutex
);
825 for (mhp
= mem_handle_head
; mhp
!= NULL
; mhp
= mhp
->mh_next
) {
826 if (mhp
->mh_exthandle
== handle
) {
827 mutex_enter(&mhp
->mh_mutex
);
829 * The state of the handle could have been changed
830 * by kphysm_del_release() while waiting for mh_mutex.
832 if (mhp
->mh_state
== MHND_FREE
) {
833 mutex_exit(&mhp
->mh_mutex
);
839 mutex_exit(&mem_handle_list_mutex
);
844 kphysm_del_gethandle(memhandle_t
*xmhp
)
846 struct mem_handle
*mhp
;
848 mhp
= kphysm_allocate_mem_handle();
850 * The handle is allocated using KM_SLEEP, so cannot fail.
851 * If the implementation is changed, the correct error to return
852 * here would be KPHYSM_ENOHANDLES.
854 ASSERT(mhp
->mh_state
== MHND_FREE
);
855 mhp
->mh_state
= MHND_INIT
;
856 *xmhp
= mhp
->mh_exthandle
;
857 mutex_exit(&mhp
->mh_mutex
);
862 overlapping(pfn_t b1
, pgcnt_t l1
, pfn_t b2
, pgcnt_t l2
)
869 return (!(b2
>= e1
|| b1
>= e2
));
872 static int can_remove_pgs(pgcnt_t
);
874 static struct memdelspan
*
875 span_to_install(pfn_t base
, pgcnt_t npgs
)
877 struct memdelspan
*mdsp
;
878 struct memdelspan
*mdsp_new
;
879 uint64_t address
, size
, thislen
;
884 address
= (uint64_t)base
<< PAGESHIFT
;
885 size
= (uint64_t)npgs
<< PAGESHIFT
;
888 for (mlp
= phys_install
; mlp
!= NULL
; mlp
= mlp
->ml_next
) {
889 if (address
>= (mlp
->ml_address
+ mlp
->ml_size
))
891 if ((address
+ size
) > mlp
->ml_address
)
899 if (address
< mlp
->ml_address
) {
900 size
-= (mlp
->ml_address
- address
);
901 address
= mlp
->ml_address
;
903 ASSERT(address
>= mlp
->ml_address
);
904 if ((address
+ size
) >
905 (mlp
->ml_address
+ mlp
->ml_size
)) {
907 mlp
->ml_size
- (address
- mlp
->ml_address
);
912 memlist_read_unlock();
913 /* TODO: phys_install could change now */
916 mdsp
= kmem_zalloc(sizeof (struct memdelspan
), KM_SLEEP
);
917 mdsp
->mds_base
= btop(address
);
918 mdsp
->mds_npgs
= btop(thislen
);
919 mdsp
->mds_next
= mdsp_new
;
928 free_delspans(struct memdelspan
*mdsp
)
930 struct memdelspan
*amdsp
;
932 while ((amdsp
= mdsp
) != NULL
) {
933 mdsp
= amdsp
->mds_next
;
934 kmem_free(amdsp
, sizeof (struct memdelspan
));
939 * Concatenate lists. No list ordering is required.
943 delspan_concat(struct memdelspan
**mdspp
, struct memdelspan
*mdsp
)
945 while (*mdspp
!= NULL
)
946 mdspp
= &(*mdspp
)->mds_next
;
952 * Given a new list of delspans, check there is no overlap with
953 * all existing span activity (add or delete) and then concatenate
954 * the new spans to the given list.
955 * Return 1 for OK, 0 if overlapping.
959 struct transit_list
*my_tlp
,
960 struct memdelspan
*mdsp_new
)
962 struct transit_list_head
*trh
;
963 struct transit_list
*tlp
;
966 trh
= &transit_list_head
;
968 ASSERT(my_tlp
!= NULL
);
969 ASSERT(mdsp_new
!= NULL
);
972 mutex_enter(&trh
->trh_lock
);
973 /* ASSERT(my_tlp->trl_spans == NULL || tlp_in_list(trh, my_tlp)); */
974 for (tlp
= trh
->trh_head
; tlp
!= NULL
; tlp
= tlp
->trl_next
) {
975 struct memdelspan
*mdsp
;
977 for (mdsp
= tlp
->trl_spans
; mdsp
!= NULL
;
978 mdsp
= mdsp
->mds_next
) {
979 struct memdelspan
*nmdsp
;
981 for (nmdsp
= mdsp_new
; nmdsp
!= NULL
;
982 nmdsp
= nmdsp
->mds_next
) {
983 if (overlapping(mdsp
->mds_base
, mdsp
->mds_npgs
,
984 nmdsp
->mds_base
, nmdsp
->mds_npgs
)) {
993 if (my_tlp
->trl_spans
== NULL
)
994 transit_list_insert(my_tlp
);
995 delspan_concat(&my_tlp
->trl_spans
, mdsp_new
);
997 mutex_exit(&trh
->trh_lock
);
1003 struct transit_list
*my_tlp
,
1007 struct transit_list_head
*trh
;
1008 struct memdelspan
*mdsp
;
1010 trh
= &transit_list_head
;
1012 ASSERT(my_tlp
!= NULL
);
1014 mutex_enter(&trh
->trh_lock
);
1015 if ((mdsp
= my_tlp
->trl_spans
) != NULL
) {
1017 my_tlp
->trl_spans
= NULL
;
1018 free_delspans(mdsp
);
1019 transit_list_remove(my_tlp
);
1021 struct memdelspan
**prv
;
1023 prv
= &my_tlp
->trl_spans
;
1024 while (mdsp
!= NULL
) {
1027 p_end
= mdsp
->mds_base
+ mdsp
->mds_npgs
;
1028 if (mdsp
->mds_base
>= base
&&
1029 p_end
<= (base
+ npgs
)) {
1030 *prv
= mdsp
->mds_next
;
1031 mdsp
->mds_next
= NULL
;
1032 free_delspans(mdsp
);
1034 prv
= &mdsp
->mds_next
;
1038 if (my_tlp
->trl_spans
== NULL
)
1039 transit_list_remove(my_tlp
);
1042 mutex_exit(&trh
->trh_lock
);
1046 * Reserve interface for add to stop delete before add finished.
1047 * This list is only accessed through the delspan_insert/remove
1048 * functions and so is fully protected by the mutex in struct transit_list.
1051 static struct transit_list reserve_transit
;
1054 delspan_reserve(pfn_t base
, pgcnt_t npgs
)
1056 struct memdelspan
*mdsp
;
1059 mdsp
= kmem_zalloc(sizeof (struct memdelspan
), KM_SLEEP
);
1060 mdsp
->mds_base
= base
;
1061 mdsp
->mds_npgs
= npgs
;
1062 if ((ret
= delspan_insert(&reserve_transit
, mdsp
)) == 0) {
1063 free_delspans(mdsp
);
1069 delspan_unreserve(pfn_t base
, pgcnt_t npgs
)
1071 delspan_remove(&reserve_transit
, base
, npgs
);
1075 * Return whether memseg was created by kphysm_add_memory_dynamic().
1078 memseg_is_dynamic(struct memseg
*seg
)
1080 return (seg
->msegflags
& MEMSEG_DYNAMIC
);
1089 struct mem_handle
*mhp
;
1091 struct memdelspan
*mdsp
;
1092 struct memdelspan
*mdsp_new
;
1093 pgcnt_t phys_pages
, vm_pages
;
1098 mhp
= kphysm_lookup_mem_handle(handle
);
1100 return (KPHYSM_EHANDLE
);
1102 if (mhp
->mh_state
!= MHND_INIT
) {
1103 mutex_exit(&mhp
->mh_mutex
);
1104 return (KPHYSM_ESEQUENCE
);
1108 * Intersect the span with the installed memory list (phys_install).
1110 mdsp_new
= span_to_install(base
, npgs
);
1111 if (mdsp_new
== NULL
) {
1113 * No physical memory in this range. Is this an
1114 * error? If an attempt to start the delete is made
1115 * for OK returns from del_span such as this, start will
1117 * Could return KPHYSM_ENOWORK.
1120 * It is assumed that there are no error returns
1121 * from span_to_install() due to kmem_alloc failure.
1123 mutex_exit(&mhp
->mh_mutex
);
1127 * Does this span overlap an existing span?
1129 if (delspan_insert(&mhp
->mh_transit
, mdsp_new
) == 0) {
1131 * Differentiate between already on list for this handle
1132 * (KPHYSM_EDUP) and busy elsewhere (KPHYSM_EBUSY).
1135 for (mdsp
= mhp
->mh_transit
.trl_spans
; mdsp
!= NULL
;
1136 mdsp
= mdsp
->mds_next
) {
1137 if (overlapping(mdsp
->mds_base
, mdsp
->mds_npgs
,
1143 mutex_exit(&mhp
->mh_mutex
);
1144 free_delspans(mdsp_new
);
1148 * At this point the spans in mdsp_new have been inserted into the
1149 * list of spans for this handle and thereby to the global list of
1150 * spans being processed. Each of these spans must now be checked
1151 * for relocatability. As a side-effect segments in the memseg list
1154 * Note that mdsp_new can no longer be used as it is now part of
1155 * a larger list. Select elements of this larger list based
1162 for (mdsp
= mhp
->mh_transit
.trl_spans
; mdsp
!= NULL
;
1163 mdsp
= mdsp
->mds_next
) {
1164 pgcnt_t pages_checked
;
1166 if (!overlapping(mdsp
->mds_base
, mdsp
->mds_npgs
, base
, npgs
)) {
1169 p_end
= mdsp
->mds_base
+ mdsp
->mds_npgs
;
1171 * The pages_checked count is a hack. All pages should be
1172 * checked for relocatability. Those not covered by memsegs
1173 * should be tested with arch_kphysm_del_span_ok().
1176 for (seg
= memsegs
; seg
; seg
= seg
->next
) {
1179 if (seg
->pages_base
>= p_end
||
1180 seg
->pages_end
<= mdsp
->mds_base
) {
1181 /* Span and memseg don't overlap. */
1184 mseg_start
= memseg_get_start(seg
);
1185 /* Check that segment is suitable for delete. */
1186 if (memseg_includes_meta(seg
)) {
1188 * Check that this segment is completely
1191 if (mseg_start
< mdsp
->mds_base
||
1192 seg
->pages_end
> p_end
) {
1196 pages_checked
+= seg
->pages_end
- mseg_start
;
1199 * If this segment is larger than the span,
1200 * try to split it. After the split, it
1201 * is necessary to restart.
1203 if (seg
->pages_base
< mdsp
->mds_base
||
1204 seg
->pages_end
> p_end
) {
1209 /* Split required. */
1210 if (mdsp
->mds_base
< seg
->pages_base
)
1211 abase
= seg
->pages_base
;
1213 abase
= mdsp
->mds_base
;
1214 if (p_end
> seg
->pages_end
)
1215 anpgs
= seg
->pages_end
- abase
;
1217 anpgs
= p_end
- abase
;
1218 s_ret
= kphysm_split_memseg(abase
,
1222 ret
= KPHYSM_ERESOURCE
;
1228 seg
->pages_end
- seg
->pages_base
;
1231 * The memseg is wholly within the delete span.
1232 * The individual pages can now be checked.
1235 for (pp
= seg
->pages
; pp
< seg
->epages
; pp
++) {
1236 if (PP_ISNORELOC(pp
)) {
1237 ret
= KPHYSM_ENONRELOC
;
1241 if (ret
!= KPHYSM_OK
) {
1244 phys_pages
+= (seg
->pages_end
- mseg_start
);
1245 vm_pages
+= MSEG_NPAGES(seg
);
1247 if (ret
!= KPHYSM_OK
)
1249 if (pages_checked
!= mdsp
->mds_npgs
) {
1250 ret
= KPHYSM_ENONRELOC
;
1255 if (ret
== KPHYSM_OK
) {
1256 mhp
->mh_phys_pages
+= phys_pages
;
1257 mhp
->mh_vm_pages
+= vm_pages
;
1260 * Keep holding the mh_mutex to prevent it going away.
1262 delspan_remove(&mhp
->mh_transit
, base
, npgs
);
1264 mutex_exit(&mhp
->mh_mutex
);
1269 kphysm_del_span_query(
1274 struct memdelspan
*mdsp
;
1275 struct memdelspan
*mdsp_new
;
1276 int done_first_nonreloc
;
1278 mqp
->phys_pages
= 0;
1280 mqp
->nonrelocatable
= 0;
1281 mqp
->first_nonrelocatable
= 0;
1282 mqp
->last_nonrelocatable
= 0;
1284 mdsp_new
= span_to_install(base
, npgs
);
1286 * It is OK to proceed here if mdsp_new == NULL.
1288 done_first_nonreloc
= 0;
1289 for (mdsp
= mdsp_new
; mdsp
!= NULL
; mdsp
= mdsp
->mds_next
) {
1293 mqp
->phys_pages
+= mdsp
->mds_npgs
;
1294 sbase
= mdsp
->mds_base
;
1295 snpgs
= mdsp
->mds_npgs
;
1296 while (snpgs
!= 0) {
1297 struct memseg
*lseg
, *seg
;
1302 p_end
= sbase
+ snpgs
;
1304 * Find the lowest addressed memseg that starts
1305 * after sbase and account for it.
1306 * This is to catch dynamic memsegs whose start
1310 for (lseg
= memsegs
; lseg
!= NULL
; lseg
= lseg
->next
) {
1311 if ((lseg
->pages_base
>= sbase
) ||
1312 (lseg
->pages_base
< p_end
&&
1313 lseg
->pages_end
> sbase
)) {
1315 seg
->pages_base
> lseg
->pages_base
)
1320 mseg_start
= memseg_get_start(seg
);
1322 * Now have the full extent of the memseg so
1323 * do the range check.
1325 if (mseg_start
>= p_end
||
1326 seg
->pages_end
<= sbase
) {
1327 /* Span does not overlap memseg. */
1332 * Account for gap either before the segment if
1333 * there is one or to the end of the span.
1335 if (seg
== NULL
|| mseg_start
> sbase
) {
1338 a_end
= (seg
== NULL
) ? p_end
: mseg_start
;
1340 * Check with arch layer for relocatability.
1342 if (arch_kphysm_del_span_ok(sbase
,
1345 * No non-relocatble pages in this
1346 * area, avoid the fine-grained
1349 snpgs
-= (a_end
- sbase
);
1352 while (sbase
< a_end
) {
1353 if (!arch_kphysm_del_span_ok(sbase
,
1355 mqp
->nonrelocatable
++;
1356 if (!done_first_nonreloc
) {
1358 first_nonrelocatable
1360 done_first_nonreloc
= 1;
1362 mqp
->last_nonrelocatable
=
1370 ASSERT(mseg_start
<= sbase
);
1371 if (seg
->pages_base
!= mseg_start
&&
1372 seg
->pages_base
> sbase
) {
1376 * Skip the page_t area of a
1379 skip_pgs
= seg
->pages_base
- sbase
;
1380 if (snpgs
<= skip_pgs
) {
1389 ASSERT(seg
->pages_base
<= sbase
);
1391 * The individual pages can now be checked.
1393 for (pp
= seg
->pages
+
1394 (sbase
- seg
->pages_base
);
1395 snpgs
!= 0 && pp
< seg
->epages
; pp
++) {
1397 if (PP_ISNORELOC(pp
)) {
1398 mqp
->nonrelocatable
++;
1399 if (!done_first_nonreloc
) {
1401 first_nonrelocatable
1403 done_first_nonreloc
= 1;
1405 mqp
->last_nonrelocatable
=
1415 free_delspans(mdsp_new
);
1421 * This release function can be called at any stage as follows:
1422 * _gethandle only called
1423 * _span(s) only called
1424 * _start called but failed
1425 * delete thread exited
1428 kphysm_del_release(memhandle_t handle
)
1430 struct mem_handle
*mhp
;
1432 mhp
= kphysm_lookup_mem_handle(handle
);
1434 return (KPHYSM_EHANDLE
);
1436 switch (mhp
->mh_state
) {
1439 mutex_exit(&mhp
->mh_mutex
);
1440 return (KPHYSM_ENOTFINISHED
);
1442 ASSERT(mhp
->mh_state
!= MHND_FREE
);
1443 mutex_exit(&mhp
->mh_mutex
);
1444 return (KPHYSM_EHANDLE
);
1450 mutex_exit(&mhp
->mh_mutex
);
1451 return (KPHYSM_ESEQUENCE
);
1454 cmn_err(CE_WARN
, "kphysm_del_release(0x%p) state corrupt %d",
1455 (void *)mhp
, mhp
->mh_state
);
1457 mutex_exit(&mhp
->mh_mutex
);
1458 return (KPHYSM_EHANDLE
);
1461 * Set state so that we can wait if necessary.
1462 * Also this means that we have read/write access to all
1463 * fields except mh_exthandle and mh_state.
1465 mhp
->mh_state
= MHND_RELEASE
;
1467 * The mem_handle cannot be de-allocated by any other operation
1468 * now, so no need to hold mh_mutex.
1470 mutex_exit(&mhp
->mh_mutex
);
1472 delspan_remove(&mhp
->mh_transit
, 0, 0);
1473 mhp
->mh_phys_pages
= 0;
1474 mhp
->mh_vm_pages
= 0;
1475 mhp
->mh_hold_todo
= 0;
1476 mhp
->mh_delete_complete
= NULL
;
1477 mhp
->mh_delete_complete_arg
= NULL
;
1480 mutex_enter(&mhp
->mh_mutex
);
1481 ASSERT(mhp
->mh_state
== MHND_RELEASE
);
1482 mhp
->mh_state
= MHND_FREE
;
1484 kphysm_free_mem_handle(mhp
);
1490 * This cancel function can only be called with the thread running.
1493 kphysm_del_cancel(memhandle_t handle
)
1495 struct mem_handle
*mhp
;
1497 mhp
= kphysm_lookup_mem_handle(handle
);
1499 return (KPHYSM_EHANDLE
);
1501 if (mhp
->mh_state
!= MHND_STARTING
&& mhp
->mh_state
!= MHND_RUNNING
) {
1502 mutex_exit(&mhp
->mh_mutex
);
1503 return (KPHYSM_ENOTRUNNING
);
1506 * Set the cancel flag and wake the delete thread up.
1507 * The thread may be waiting on I/O, so the effect of the cancel
1510 if (mhp
->mh_cancel
== 0) {
1511 mhp
->mh_cancel
= KPHYSM_ECANCELLED
;
1512 cv_signal(&mhp
->mh_cv
);
1514 mutex_exit(&mhp
->mh_mutex
);
1521 memdelstat_t
*mdstp
)
1523 struct mem_handle
*mhp
;
1525 mhp
= kphysm_lookup_mem_handle(handle
);
1527 return (KPHYSM_EHANDLE
);
1530 * Calling kphysm_del_status() is allowed before the delete
1531 * is started to allow for status display.
1533 if (mhp
->mh_state
!= MHND_INIT
&& mhp
->mh_state
!= MHND_STARTING
&&
1534 mhp
->mh_state
!= MHND_RUNNING
) {
1535 mutex_exit(&mhp
->mh_mutex
);
1536 return (KPHYSM_ENOTRUNNING
);
1538 mdstp
->phys_pages
= mhp
->mh_phys_pages
;
1539 mdstp
->managed
= mhp
->mh_vm_pages
;
1540 mdstp
->collected
= mhp
->mh_vm_pages
- mhp
->mh_hold_todo
;
1541 mutex_exit(&mhp
->mh_mutex
);
1545 static int mem_delete_additional_pages
= 100;
1548 can_remove_pgs(pgcnt_t npgs
)
1551 * If all pageable pages were paged out, freemem would
1552 * equal availrmem. There is a minimum requirement for
1555 if ((availrmem
- (tune
.t_minarmem
+ mem_delete_additional_pages
))
1558 /* TODO: check swap space, etc. */
1563 get_availrmem(pgcnt_t npgs
)
1567 mutex_enter(&freemem_lock
);
1568 ret
= can_remove_pgs(npgs
);
1571 mutex_exit(&freemem_lock
);
1576 put_availrmem(pgcnt_t npgs
)
1578 mutex_enter(&freemem_lock
);
1580 mutex_exit(&freemem_lock
);
1583 #define FREEMEM_INCR 100
1584 static pgcnt_t freemem_incr
= FREEMEM_INCR
;
1585 #define DEL_FREE_WAIT_FRAC 4
1586 #define DEL_FREE_WAIT_TICKS ((hz+DEL_FREE_WAIT_FRAC-1)/DEL_FREE_WAIT_FRAC)
1588 #define DEL_BUSY_WAIT_FRAC 20
1589 #define DEL_BUSY_WAIT_TICKS ((hz+DEL_BUSY_WAIT_FRAC-1)/DEL_BUSY_WAIT_FRAC)
1591 static void kphysm_del_cleanup(struct mem_handle
*);
1593 static void page_delete_collect(page_t
*, struct mem_handle
*);
1596 delthr_get_freemem(struct mem_handle
*mhp
)
1601 ASSERT(MUTEX_HELD(&mhp
->mh_mutex
));
1603 MDSTAT_INCR(mhp
, need_free
);
1605 * Get up to freemem_incr pages.
1607 free_get
= freemem_incr
;
1608 if (free_get
> mhp
->mh_hold_todo
)
1609 free_get
= mhp
->mh_hold_todo
;
1611 * Take free_get pages away from freemem,
1612 * waiting if necessary.
1615 while (!mhp
->mh_cancel
) {
1616 mutex_exit(&mhp
->mh_mutex
);
1617 MDSTAT_INCR(mhp
, free_loop
);
1619 * Duplicate test from page_create_throttle()
1620 * but don't override with !PG_WAIT.
1622 if (freemem
< (free_get
+ throttlefree
)) {
1623 MDSTAT_INCR(mhp
, free_low
);
1626 ret
= page_create_wait(free_get
, 0);
1629 MDSTAT_INCR(mhp
, free_failed
);
1633 mutex_enter(&mhp
->mh_mutex
);
1638 * Put pressure on pageout.
1640 page_needfree(free_get
);
1641 cv_signal(&proc_pageout
->p_cv
);
1643 mutex_enter(&mhp
->mh_mutex
);
1644 (void) cv_reltimedwait(&mhp
->mh_cv
, &mhp
->mh_mutex
,
1645 DEL_FREE_WAIT_TICKS
, TR_CLOCK_TICK
);
1646 mutex_exit(&mhp
->mh_mutex
);
1647 page_needfree(-(spgcnt_t
)free_get
);
1649 mutex_enter(&mhp
->mh_mutex
);
1654 #define DR_AIO_CLEANUP_DELAY 25000 /* 0.025secs, in usec */
1655 #define DR_AIO_CLEANUP_MAXLOOPS_NODELAY 100
1657 * This function is run as a helper thread for delete_memory_thread.
1658 * It is needed in order to force kaio cleanup, so that pages used in kaio
1659 * will be unlocked and subsequently relocated by delete_memory_thread.
1660 * The address of the delete_memory_threads's mem_handle is passed in to
1661 * this thread function, and is used to set the mh_aio_cleanup_done member
1662 * prior to calling thread_exit().
1665 dr_aio_cleanup_thread(caddr_t amhp
)
1668 int (*aio_cleanup_dr_delete_memory
)(proc_t
*);
1671 struct mem_handle
*mhp
;
1672 volatile uint_t
*pcancel
;
1674 mhp
= (struct mem_handle
*)amhp
;
1675 ASSERT(mhp
!= NULL
);
1676 pcancel
= &mhp
->mh_dr_aio_cleanup_cancel
;
1677 if (modload("sys", "kaio") == -1) {
1678 mhp
->mh_aio_cleanup_done
= 1;
1679 cmn_err(CE_WARN
, "dr_aio_cleanup_thread: cannot load kaio");
1682 aio_cleanup_dr_delete_memory
= (int (*)(proc_t
*))
1683 modgetsymvalue("aio_cleanup_dr_delete_memory", 0);
1684 if (aio_cleanup_dr_delete_memory
== NULL
) {
1685 mhp
->mh_aio_cleanup_done
= 1;
1687 "aio_cleanup_dr_delete_memory not found in kaio");
1692 mutex_enter(&pidlock
);
1693 for (procp
= practive
; (*pcancel
== 0) && (procp
!= NULL
);
1694 procp
= procp
->p_next
) {
1695 mutex_enter(&procp
->p_lock
);
1696 if (procp
->p_aio
!= NULL
) {
1697 /* cleanup proc's outstanding kaio */
1699 (*aio_cleanup_dr_delete_memory
)(procp
);
1701 mutex_exit(&procp
->p_lock
);
1703 mutex_exit(&pidlock
);
1704 if ((*pcancel
== 0) &&
1705 (!cleaned
|| (++n
== DR_AIO_CLEANUP_MAXLOOPS_NODELAY
))) {
1706 /* delay a bit before retrying all procs again */
1707 delay(drv_usectohz(DR_AIO_CLEANUP_DELAY
));
1710 } while (*pcancel
== 0);
1711 mhp
->mh_aio_cleanup_done
= 1;
1716 delete_memory_thread(caddr_t amhp
)
1718 struct mem_handle
*mhp
;
1719 struct memdelspan
*mdsp
;
1720 callb_cpr_t cprinfo
;
1722 spgcnt_t freemem_left
;
1723 void (*del_complete_funcp
)(void *, int error
);
1724 void *del_complete_arg
;
1729 #ifdef MEM_DEL_STATS
1730 uint64_t start_total
, ntick_total
;
1731 uint64_t start_pgrp
, ntick_pgrp
;
1732 #endif /* MEM_DEL_STATS */
1734 mhp
= (struct mem_handle
*)amhp
;
1736 #ifdef MEM_DEL_STATS
1737 start_total
= ddi_get_lbolt();
1738 #endif /* MEM_DEL_STATS */
1740 CALLB_CPR_INIT(&cprinfo
, &mhp
->mh_mutex
,
1741 callb_generic_cpr
, "memdel");
1743 mutex_enter(&mhp
->mh_mutex
);
1744 ASSERT(mhp
->mh_state
== MHND_STARTING
);
1746 mhp
->mh_state
= MHND_RUNNING
;
1747 mhp
->mh_thread_id
= curthread
;
1749 mhp
->mh_hold_todo
= mhp
->mh_vm_pages
;
1750 mutex_exit(&mhp
->mh_mutex
);
1752 /* Allocate the remap pages now, if necessary. */
1753 memseg_remap_init();
1756 * Subtract from availrmem now if possible as availrmem
1757 * may not be available by the end of the delete.
1759 if (!get_availrmem(mhp
->mh_vm_pages
)) {
1760 comp_code
= KPHYSM_ENOTVIABLE
;
1761 mutex_enter(&mhp
->mh_mutex
);
1765 ret
= kphysm_setup_pre_del(mhp
->mh_vm_pages
);
1767 mutex_enter(&mhp
->mh_mutex
);
1770 mhp
->mh_cancel
= KPHYSM_EREFUSED
;
1774 transit_list_collect(mhp
, 1);
1776 for (mdsp
= mhp
->mh_transit
.trl_spans
; mdsp
!= NULL
;
1777 mdsp
= mdsp
->mds_next
) {
1778 ASSERT(mdsp
->mds_bitmap
== NULL
);
1779 mdsp
->mds_bitmap
= kmem_zalloc(MDS_BITMAPBYTES(mdsp
), KM_SLEEP
);
1780 mdsp
->mds_bitmap_retired
= kmem_zalloc(MDS_BITMAPBYTES(mdsp
),
1787 * Start dr_aio_cleanup_thread, which periodically iterates
1788 * through the process list and invokes aio cleanup. This
1789 * is needed in order to avoid a deadly embrace between the
1790 * delete_memory_thread (waiting on writer lock for page, with the
1791 * exclusive-wanted bit set), kaio read request threads (waiting for a
1792 * reader lock on the same page that is wanted by the
1793 * delete_memory_thread), and threads waiting for kaio completion
1794 * (blocked on spt_amp->lock).
1796 mhp
->mh_dr_aio_cleanup_cancel
= 0;
1797 mhp
->mh_aio_cleanup_done
= 0;
1798 (void) thread_create(NULL
, 0, dr_aio_cleanup_thread
,
1799 (caddr_t
)mhp
, 0, &p0
, TS_RUN
, maxclsyspri
- 1);
1800 while ((mhp
->mh_hold_todo
!= 0) && (mhp
->mh_cancel
== 0)) {
1803 MDSTAT_INCR(mhp
, nloop
);
1805 for (mdsp
= mhp
->mh_transit
.trl_spans
; (mdsp
!= NULL
) &&
1806 (mhp
->mh_cancel
== 0); mdsp
= mdsp
->mds_next
) {
1809 p_end
= mdsp
->mds_base
+ mdsp
->mds_npgs
;
1810 for (pfn
= mdsp
->mds_base
; (pfn
< p_end
) &&
1811 (mhp
->mh_cancel
== 0); pfn
++) {
1812 page_t
*pp
, *tpp
, *tpp_targ
;
1819 bit
= pfn
- mdsp
->mds_base
;
1820 if ((mdsp
->mds_bitmap
[bit
/ NBPBMW
] &
1821 (1 << (bit
% NBPBMW
))) != 0) {
1822 MDSTAT_INCR(mhp
, already_done
);
1825 if (freemem_left
== 0) {
1826 freemem_left
+= delthr_get_freemem(mhp
);
1827 if (freemem_left
== 0)
1832 * Release mh_mutex - some of this
1833 * stuff takes some time (eg PUTPAGE).
1836 mutex_exit(&mhp
->mh_mutex
);
1837 MDSTAT_INCR(mhp
, ncheck
);
1839 pp
= page_numtopp_nolock(pfn
);
1842 * Not covered by a page_t - will
1843 * be dealt with elsewhere.
1845 MDSTAT_INCR(mhp
, nopaget
);
1846 mutex_enter(&mhp
->mh_mutex
);
1847 mdsp
->mds_bitmap
[bit
/ NBPBMW
] |=
1848 (1 << (bit
% NBPBMW
));
1852 if (!page_try_reclaim_lock(pp
, SE_EXCL
,
1853 SE_EXCL_WANTED
| SE_RETIRED
)) {
1855 * Page in use elsewhere. Skip it.
1857 MDSTAT_INCR(mhp
, lockfail
);
1858 mutex_enter(&mhp
->mh_mutex
);
1862 * See if the cage expanded into the delete.
1863 * This can happen as we have to allow the
1866 if (PP_ISNORELOC(pp
)) {
1868 mutex_enter(&mhp
->mh_mutex
);
1869 mhp
->mh_cancel
= KPHYSM_ENONRELOC
;
1872 if (PP_RETIRED(pp
)) {
1874 * Page has been retired and is
1875 * not part of the cage so we
1876 * can now do the accounting for
1879 MDSTAT_INCR(mhp
, retired
);
1880 mutex_enter(&mhp
->mh_mutex
);
1881 mdsp
->mds_bitmap
[bit
/ NBPBMW
]
1882 |= (1 << (bit
% NBPBMW
));
1883 mdsp
->mds_bitmap_retired
[bit
/
1885 (1 << (bit
% NBPBMW
));
1886 mhp
->mh_hold_todo
--;
1889 ASSERT(freemem_left
!= 0);
1890 if (PP_ISFREE(pp
)) {
1892 * Like page_reclaim() only 'freemem'
1893 * processing is already done.
1895 MDSTAT_INCR(mhp
, nfree
);
1897 if (PP_ISAGED(pp
)) {
1907 mutex_enter(&mhp
->mh_mutex
);
1908 page_delete_collect(pp
, mhp
);
1909 mdsp
->mds_bitmap
[bit
/ NBPBMW
] |=
1910 (1 << (bit
% NBPBMW
));
1914 ASSERT(pp
->p_vnode
!= NULL
);
1916 MDSTAT_INCR(mhp
, first_notfree
);
1918 mutex_enter(&mhp
->mh_mutex
);
1922 * Keep stats on pages encountered that
1923 * are marked for retirement.
1926 MDSTAT_INCR(mhp
, toxic
);
1927 } else if (PP_PR_REQ(pp
)) {
1928 MDSTAT_INCR(mhp
, failing
);
1931 * In certain cases below, special exceptions
1932 * are made for pages that are toxic. This
1933 * is because the current meaning of toxic
1934 * is that an uncorrectable error has been
1935 * previously associated with the page.
1937 if (pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0) {
1938 if (!PP_TOXIC(pp
)) {
1940 * Must relocate locked in
1943 #ifdef MEM_DEL_STATS
1944 start_pgrp
= ddi_get_lbolt();
1945 #endif /* MEM_DEL_STATS */
1947 * Lock all constituent pages
1948 * of a large page to ensure
1949 * that p_szc won't change.
1951 if (!group_page_trylock(pp
,
1960 MDSTAT_INCR(mhp
, npplocked
);
1962 page_get_replacement_page(
1964 if (pp_targ
!= NULL
) {
1965 #ifdef MEM_DEL_STATS
1970 #endif /* MEM_DEL_STATS */
1977 group_page_unlock(pp
);
1979 #ifdef MEM_DEL_STATS
1981 (uint64_t)ddi_get_lbolt() -
1983 #endif /* MEM_DEL_STATS */
1984 MDSTAT_PGRP(mhp
, ntick_pgrp
);
1985 MDSTAT_INCR(mhp
, nnorepl
);
1986 mutex_enter(&mhp
->mh_mutex
);
1990 * Cannot do anything about
1991 * this page because it is
1994 MDSTAT_INCR(mhp
, npplkdtoxic
);
1996 mutex_enter(&mhp
->mh_mutex
);
2001 * Unload the mappings and check if mod bit
2004 ASSERT(!PP_ISKAS(pp
));
2005 (void) hat_pageunload(pp
, HAT_FORCE_PGUNLOAD
);
2006 mod
= hat_ismod(pp
);
2008 #ifdef MEM_DEL_STATS
2009 start_pgrp
= ddi_get_lbolt();
2010 #endif /* MEM_DEL_STATS */
2011 if (mod
&& !PP_TOXIC(pp
)) {
2013 * Lock all constituent pages
2014 * of a large page to ensure
2015 * that p_szc won't change.
2017 if (!group_page_trylock(pp
, SE_EXCL
)) {
2018 MDSTAT_INCR(mhp
, gptlmodfail
);
2020 mutex_enter(&mhp
->mh_mutex
);
2023 pp_targ
= page_get_replacement_page(pp
,
2025 if (pp_targ
!= NULL
) {
2026 MDSTAT_INCR(mhp
, nmodreloc
);
2027 #ifdef MEM_DEL_STATS
2029 (uint64_t)ddi_get_lbolt() -
2031 #endif /* MEM_DEL_STATS */
2032 MDSTAT_PGRP(mhp
, ntick_pgrp
);
2035 group_page_unlock(pp
);
2038 if (!page_try_demote_pages(pp
)) {
2039 MDSTAT_INCR(mhp
, demotefail
);
2041 #ifdef MEM_DEL_STATS
2042 ntick_pgrp
= (uint64_t)ddi_get_lbolt() -
2044 #endif /* MEM_DEL_STATS */
2045 MDSTAT_PGRP(mhp
, ntick_pgrp
);
2046 mutex_enter(&mhp
->mh_mutex
);
2051 * Regular 'page-out'.
2054 MDSTAT_INCR(mhp
, ndestroy
);
2055 page_destroy(pp
, 1);
2057 * page_destroy was called with
2058 * dontfree. As long as p_lckcnt
2059 * and p_cowcnt are both zero, the
2060 * only additional action of
2061 * page_destroy with !dontfree is to
2062 * call page_free, so we can collect
2066 #ifdef MEM_DEL_STATS
2067 ntick_pgrp
= (uint64_t)ddi_get_lbolt() -
2069 #endif /* MEM_DEL_STATS */
2070 MDSTAT_PGRP(mhp
, ntick_pgrp
);
2071 mutex_enter(&mhp
->mh_mutex
);
2072 page_delete_collect(pp
, mhp
);
2073 mdsp
->mds_bitmap
[bit
/ NBPBMW
] |=
2074 (1 << (bit
% NBPBMW
));
2078 * The page is toxic and the mod bit is
2079 * set, we cannot do anything here to deal
2084 #ifdef MEM_DEL_STATS
2085 ntick_pgrp
= (uint64_t)ddi_get_lbolt() -
2087 #endif /* MEM_DEL_STATS */
2088 MDSTAT_PGRP(mhp
, ntick_pgrp
);
2089 MDSTAT_INCR(mhp
, modtoxic
);
2090 mutex_enter(&mhp
->mh_mutex
);
2093 MDSTAT_INCR(mhp
, nputpage
);
2095 offset
= pp
->p_offset
;
2098 (void) VOP_PUTPAGE(vp
, offset
, PAGESIZE
,
2099 B_INVAL
|B_FORCE
, kcred
, NULL
);
2101 #ifdef MEM_DEL_STATS
2102 ntick_pgrp
= (uint64_t)ddi_get_lbolt() -
2104 #endif /* MEM_DEL_STATS */
2105 MDSTAT_PGRP(mhp
, ntick_pgrp
);
2107 * Try to get the page back immediately
2108 * so that it can be collected.
2110 pp
= page_numtopp_nolock(pfn
);
2112 MDSTAT_INCR(mhp
, nnoreclaim
);
2114 * This should not happen as this
2115 * thread is deleting the page.
2116 * If this code is generalized, this
2117 * becomes a reality.
2121 "delete_memory_thread(0x%p) "
2122 "pfn 0x%lx has no page_t",
2125 mutex_enter(&mhp
->mh_mutex
);
2128 if (page_try_reclaim_lock(pp
, SE_EXCL
,
2129 SE_EXCL_WANTED
| SE_RETIRED
)) {
2130 if (PP_ISFREE(pp
)) {
2131 goto free_page_collect
;
2135 MDSTAT_INCR(mhp
, nnoreclaim
);
2136 mutex_enter(&mhp
->mh_mutex
);
2141 * Got some freemem and a target
2142 * page, so move the data to avoid
2143 * I/O and lock problems.
2145 ASSERT(!page_iolock_assert(pp
));
2146 MDSTAT_INCR(mhp
, nreloc
);
2148 * page_relocate() will return pgcnt: the
2149 * number of consecutive pages relocated.
2150 * If it is successful, pp will be a
2151 * linked list of the page structs that
2152 * were relocated. If page_relocate() is
2153 * unsuccessful, pp will be unmodified.
2155 #ifdef MEM_DEL_STATS
2156 start_pgrp
= ddi_get_lbolt();
2157 #endif /* MEM_DEL_STATS */
2158 result
= page_relocate(&pp
, &pp_targ
, 0, 0,
2160 #ifdef MEM_DEL_STATS
2161 ntick_pgrp
= (uint64_t)ddi_get_lbolt() -
2163 #endif /* MEM_DEL_STATS */
2164 MDSTAT_PGRP(mhp
, ntick_pgrp
);
2166 MDSTAT_INCR(mhp
, nrelocfail
);
2168 * We did not succeed. We need
2169 * to give the pp_targ pages back.
2170 * page_free(pp_targ, 1) without
2171 * the freemem accounting.
2173 group_page_unlock(pp
);
2174 page_free_replacement_page(pp_targ
);
2176 mutex_enter(&mhp
->mh_mutex
);
2181 * We will then collect pgcnt pages.
2184 mutex_enter(&mhp
->mh_mutex
);
2186 * We need to make sure freemem_left is
2189 while ((freemem_left
< pgcnt
) &&
2190 (!mhp
->mh_cancel
)) {
2192 delthr_get_freemem(mhp
);
2196 * Do not proceed if mh_cancel is set.
2198 if (mhp
->mh_cancel
) {
2199 while (pp_targ
!= NULL
) {
2201 * Unlink and unlock each page.
2204 page_sub(&pp_targ
, tpp_targ
);
2205 page_unlock(tpp_targ
);
2208 * We need to give the pp pages back.
2209 * page_free(pp, 1) without the
2210 * freemem accounting.
2212 page_free_replacement_page(pp
);
2216 /* Now remove pgcnt from freemem_left */
2217 freemem_left
-= pgcnt
;
2218 ASSERT(freemem_left
>= 0);
2220 while (pp
!= NULL
) {
2222 * pp and pp_targ were passed back as
2223 * a linked list of pages.
2224 * Unlink and unlock each page.
2227 page_sub(&pp_targ
, tpp_targ
);
2228 page_unlock(tpp_targ
);
2230 * The original page is now free
2231 * so remove it from the linked
2232 * list and collect it.
2236 pfn
= page_pptonum(tpp
);
2238 ASSERT(PAGE_EXCL(tpp
));
2239 ASSERT(tpp
->p_vnode
== NULL
);
2240 ASSERT(!hat_page_is_mapped(tpp
));
2241 ASSERT(tpp
->p_szc
== szc
);
2243 page_delete_collect(tpp
, mhp
);
2244 bit
= pfn
- mdsp
->mds_base
;
2245 mdsp
->mds_bitmap
[bit
/ NBPBMW
] |=
2246 (1 << (bit
% NBPBMW
));
2248 ASSERT(pp_targ
== NULL
);
2252 if ((mhp
->mh_cancel
== 0) && (mhp
->mh_hold_todo
!= 0) &&
2255 * This code is needed as we cannot wait
2256 * for a page to be locked OR the delete to
2257 * be cancelled. Also, we must delay so
2258 * that other threads get a chance to run
2259 * on our cpu, otherwise page locks may be
2260 * held indefinitely by those threads.
2262 MDSTAT_INCR(mhp
, ndelay
);
2263 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
2264 (void) cv_reltimedwait(&mhp
->mh_cv
, &mhp
->mh_mutex
,
2265 DEL_BUSY_WAIT_TICKS
, TR_CLOCK_TICK
);
2266 CALLB_CPR_SAFE_END(&cprinfo
, &mhp
->mh_mutex
);
2269 /* stop the dr aio cleanup thread */
2270 mhp
->mh_dr_aio_cleanup_cancel
= 1;
2271 transit_list_collect(mhp
, 0);
2272 if (freemem_left
!= 0) {
2273 /* Return any surplus. */
2274 page_create_putback(freemem_left
);
2277 #ifdef MEM_DEL_STATS
2278 ntick_total
= (uint64_t)ddi_get_lbolt() - start_total
;
2279 #endif /* MEM_DEL_STATS */
2280 MDSTAT_TOTAL(mhp
, ntick_total
);
2284 * If the memory delete was cancelled, exclusive-wanted bits must
2285 * be cleared. If there are retired pages being deleted, they need
2288 for (mdsp
= mhp
->mh_transit
.trl_spans
; mdsp
!= NULL
;
2289 mdsp
= mdsp
->mds_next
) {
2292 p_end
= mdsp
->mds_base
+ mdsp
->mds_npgs
;
2293 for (pfn
= mdsp
->mds_base
; pfn
< p_end
; pfn
++) {
2297 bit
= pfn
- mdsp
->mds_base
;
2298 if (mhp
->mh_cancel
) {
2299 pp
= page_numtopp_nolock(pfn
);
2301 if ((mdsp
->mds_bitmap
[bit
/ NBPBMW
] &
2302 (1 << (bit
% NBPBMW
))) == 0) {
2303 page_lock_clr_exclwanted(pp
);
2309 if ((mdsp
->mds_bitmap_retired
[bit
/ NBPBMW
] &
2310 (1 << (bit
% NBPBMW
))) != 0) {
2311 /* do we already have pp? */
2313 pp
= page_numtopp_nolock(pfn
);
2316 ASSERT(PP_RETIRED(pp
));
2317 if (mhp
->mh_cancel
!= 0) {
2320 * To satisfy ASSERT below in
2323 mhp
->mh_hold_todo
++;
2325 (void) page_unretire_pp(pp
,
2332 * Free retired page bitmap and collected page bitmap
2334 for (mdsp
= mhp
->mh_transit
.trl_spans
; mdsp
!= NULL
;
2335 mdsp
= mdsp
->mds_next
) {
2336 ASSERT(mdsp
->mds_bitmap_retired
!= NULL
);
2337 kmem_free(mdsp
->mds_bitmap_retired
, MDS_BITMAPBYTES(mdsp
));
2338 mdsp
->mds_bitmap_retired
= NULL
; /* Paranoia. */
2339 ASSERT(mdsp
->mds_bitmap
!= NULL
);
2340 kmem_free(mdsp
->mds_bitmap
, MDS_BITMAPBYTES(mdsp
));
2341 mdsp
->mds_bitmap
= NULL
; /* Paranoia. */
2344 /* wait for our dr aio cancel thread to exit */
2345 while (!(mhp
->mh_aio_cleanup_done
)) {
2346 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
2347 delay(drv_usectohz(DR_AIO_CLEANUP_DELAY
));
2348 CALLB_CPR_SAFE_END(&cprinfo
, &mhp
->mh_mutex
);
2351 if (mhp
->mh_cancel
!= 0) {
2354 comp_code
= mhp
->mh_cancel
;
2356 * Go through list of deleted pages (mh_deleted) freeing
2359 while ((pp
= mhp
->mh_deleted
) != NULL
) {
2360 mhp
->mh_deleted
= pp
->p_next
;
2361 mhp
->mh_hold_todo
++;
2362 mutex_exit(&mhp
->mh_mutex
);
2363 /* Restore p_next. */
2364 pp
->p_next
= pp
->p_prev
;
2365 if (PP_ISFREE(pp
)) {
2371 mutex_enter(&mhp
->mh_mutex
);
2373 ASSERT(mhp
->mh_hold_todo
== mhp
->mh_vm_pages
);
2375 mutex_exit(&mhp
->mh_mutex
);
2376 put_availrmem(mhp
->mh_vm_pages
);
2377 mutex_enter(&mhp
->mh_mutex
);
2383 * All the pages are no longer in use and are exclusively locked.
2386 mhp
->mh_deleted
= NULL
;
2388 kphysm_del_cleanup(mhp
);
2391 * mem_node_del_range needs to be after kphysm_del_cleanup so
2392 * that the mem_node_config[] will remain intact for the cleanup.
2394 for (mdsp
= mhp
->mh_transit
.trl_spans
; mdsp
!= NULL
;
2395 mdsp
= mdsp
->mds_next
) {
2396 mem_node_del_range(mdsp
->mds_base
,
2397 mdsp
->mds_base
+ mdsp
->mds_npgs
- 1);
2399 /* cleanup the page counters */
2400 page_ctrs_cleanup();
2402 comp_code
= KPHYSM_OK
;
2405 mutex_exit(&mhp
->mh_mutex
);
2406 kphysm_setup_post_del(mhp
->mh_vm_pages
,
2407 (comp_code
== KPHYSM_OK
) ? 0 : 1);
2408 mutex_enter(&mhp
->mh_mutex
);
2411 /* mhp->mh_mutex exited by CALLB_CPR_EXIT() */
2412 mhp
->mh_state
= MHND_DONE
;
2413 del_complete_funcp
= mhp
->mh_delete_complete
;
2414 del_complete_arg
= mhp
->mh_delete_complete_arg
;
2415 CALLB_CPR_EXIT(&cprinfo
);
2416 (*del_complete_funcp
)(del_complete_arg
, comp_code
);
2422 * Start the delete of the memory from the system.
2427 void (*complete
)(void *, int),
2430 struct mem_handle
*mhp
;
2432 mhp
= kphysm_lookup_mem_handle(handle
);
2434 return (KPHYSM_EHANDLE
);
2436 switch (mhp
->mh_state
) {
2438 ASSERT(mhp
->mh_state
!= MHND_FREE
);
2439 mutex_exit(&mhp
->mh_mutex
);
2440 return (KPHYSM_EHANDLE
);
2445 mutex_exit(&mhp
->mh_mutex
);
2446 return (KPHYSM_ESEQUENCE
);
2448 mutex_exit(&mhp
->mh_mutex
);
2449 return (KPHYSM_ESEQUENCE
);
2451 mutex_exit(&mhp
->mh_mutex
);
2452 return (KPHYSM_ESEQUENCE
);
2455 cmn_err(CE_WARN
, "kphysm_del_start(0x%p) state corrupt %d",
2456 (void *)mhp
, mhp
->mh_state
);
2458 mutex_exit(&mhp
->mh_mutex
);
2459 return (KPHYSM_EHANDLE
);
2462 if (mhp
->mh_transit
.trl_spans
== NULL
) {
2463 mutex_exit(&mhp
->mh_mutex
);
2464 return (KPHYSM_ENOWORK
);
2467 ASSERT(complete
!= NULL
);
2468 mhp
->mh_delete_complete
= complete
;
2469 mhp
->mh_delete_complete_arg
= complete_arg
;
2470 mhp
->mh_state
= MHND_STARTING
;
2472 * Release the mutex in case thread_create sleeps.
2474 mutex_exit(&mhp
->mh_mutex
);
2477 * The "obvious" process for this thread is pageout (proc_pageout)
2478 * but this gives the thread too much power over freemem
2479 * which results in freemem starvation.
2481 (void) thread_create(NULL
, 0, delete_memory_thread
, mhp
, 0, &p0
,
2482 TS_RUN
, maxclsyspri
- 1);
2487 static kmutex_t pp_dummy_lock
; /* Protects init. of pp_dummy. */
2488 static caddr_t pp_dummy
;
2489 static pgcnt_t pp_dummy_npages
;
2490 static pfn_t
*pp_dummy_pfn
; /* Array of dummy pfns. */
2493 memseg_remap_init_pages(page_t
*pages
, page_t
*epages
)
2497 for (pp
= pages
; pp
< epages
; pp
++) {
2498 pp
->p_pagenum
= PFN_INVALID
; /* XXXX */
2499 pp
->p_offset
= (u_offset_t
)-1;
2500 page_iolock_init(pp
);
2501 while (!page_lock(pp
, SE_EXCL
, (kmutex_t
*)NULL
, P_RECLAIM
))
2503 page_lock_delete(pp
);
2510 mutex_enter(&pp_dummy_lock
);
2511 if (pp_dummy
== NULL
) {
2516 * dpages starts off as the size of the structure and
2517 * ends up as the minimum number of pages that will
2518 * hold a whole number of page_t structures.
2520 dpages
= sizeof (page_t
);
2521 ASSERT(dpages
!= 0);
2522 ASSERT(dpages
<= MMU_PAGESIZE
);
2524 while ((dpages
& 1) == 0)
2527 pp_dummy_npages
= dpages
;
2529 * Allocate pp_dummy pages directly from static_arena,
2530 * since these are whole page allocations and are
2531 * referenced by physical address. This also has the
2532 * nice fringe benefit of hiding the memory from
2533 * ::findleaks since it doesn't deal well with allocated
2534 * kernel heap memory that doesn't have any mappings.
2536 pp_dummy
= vmem_xalloc(static_arena
, ptob(pp_dummy_npages
),
2537 PAGESIZE
, 0, 0, NULL
, NULL
, VM_SLEEP
);
2538 bzero(pp_dummy
, ptob(pp_dummy_npages
));
2539 ASSERT(((uintptr_t)pp_dummy
& MMU_PAGEOFFSET
) == 0);
2540 pp_dummy_pfn
= kmem_alloc(sizeof (*pp_dummy_pfn
) *
2541 pp_dummy_npages
, KM_SLEEP
);
2542 for (i
= 0; i
< pp_dummy_npages
; i
++) {
2543 pp_dummy_pfn
[i
] = hat_getpfnum(kas
.a_hat
,
2544 &pp_dummy
[MMU_PAGESIZE
* i
]);
2545 ASSERT(pp_dummy_pfn
[i
] != PFN_INVALID
);
2548 * Initialize the page_t's to a known 'deleted' state
2549 * that matches the state of deleted pages.
2551 memseg_remap_init_pages((page_t
*)pp_dummy
,
2552 (page_t
*)(pp_dummy
+ ptob(pp_dummy_npages
)));
2553 /* Remove kmem mappings for the pages for safety. */
2554 hat_unload(kas
.a_hat
, pp_dummy
, ptob(pp_dummy_npages
),
2556 /* Leave pp_dummy pointer set as flag that init is done. */
2558 mutex_exit(&pp_dummy_lock
);
2562 * Remap a page-aglined range of page_t's to dummy pages.
2565 remap_to_dummy(caddr_t va
, pgcnt_t metapgs
)
2569 ASSERT(IS_P2ALIGNED((uint64_t)(uintptr_t)va
, PAGESIZE
));
2572 * We may start remapping at a non-zero page offset
2573 * within the dummy pages since the low/high ends
2574 * of the outgoing pp's could be shared by other
2575 * memsegs (see memseg_remap_meta).
2577 phase
= btop((uint64_t)(uintptr_t)va
) % pp_dummy_npages
;
2579 ASSERT(PAGESIZE
% sizeof (page_t
) || phase
== 0);
2581 while (metapgs
!= 0) {
2585 n
= pp_dummy_npages
;
2588 for (i
= 0; i
< n
; i
++) {
2589 j
= (i
+ phase
) % pp_dummy_npages
;
2590 hat_devload(kas
.a_hat
, va
, ptob(1), pp_dummy_pfn
[j
],
2592 HAT_LOAD
| HAT_LOAD_NOCONSIST
|
2601 memseg_remap_to_dummy(struct memseg
*seg
)
2606 ASSERT(memseg_is_dynamic(seg
));
2607 ASSERT(pp_dummy
!= NULL
);
2610 if (!memseg_includes_meta(seg
)) {
2611 memseg_remap_meta(seg
);
2615 pp
= (caddr_t
)seg
->pages
;
2616 metapgs
= seg
->pages_base
- memseg_get_start(seg
);
2617 ASSERT(metapgs
!= 0);
2619 seg
->pages_end
= seg
->pages_base
;
2621 remap_to_dummy(pp
, metapgs
);
2625 * Transition all the deleted pages to the deleted state so that
2626 * page_lock will not wait. The page_lock_delete call will
2627 * also wake up any waiters.
2630 memseg_lock_delete_all(struct memseg
*seg
)
2634 for (pp
= seg
->pages
; pp
< seg
->epages
; pp
++) {
2635 pp
->p_pagenum
= PFN_INVALID
; /* XXXX */
2636 page_lock_delete(pp
);
2641 kphysm_del_cleanup(struct mem_handle
*mhp
)
2643 struct memdelspan
*mdsp
;
2645 struct memseg
**segpp
;
2646 struct memseg
*seglist
;
2652 avpgs
= mhp
->mh_vm_pages
;
2657 * remove from main segment list.
2661 for (mdsp
= mhp
->mh_transit
.trl_spans
; mdsp
!= NULL
;
2662 mdsp
= mdsp
->mds_next
) {
2663 p_end
= mdsp
->mds_base
+ mdsp
->mds_npgs
;
2664 for (segpp
= &memsegs
; (seg
= *segpp
) != NULL
; ) {
2665 if (seg
->pages_base
>= p_end
||
2666 seg
->pages_end
<= mdsp
->mds_base
) {
2667 /* Span and memseg don't overlap. */
2668 segpp
= &((*segpp
)->next
);
2671 ASSERT(seg
->pages_base
>= mdsp
->mds_base
);
2672 ASSERT(seg
->pages_end
<= p_end
);
2674 PLCNT_MODIFY_MAX(seg
->pages_base
,
2675 seg
->pages_base
- seg
->pages_end
);
2677 /* Hide the memseg from future scans. */
2678 hat_kpm_delmem_mseg_update(seg
, segpp
);
2680 membar_producer(); /* TODO: Needed? */
2681 npgs
+= MSEG_NPAGES(seg
);
2684 * Leave the deleted segment's next pointer intact
2685 * in case a memsegs scanning loop is walking this
2686 * segment concurrently.
2688 seg
->lnext
= seglist
;
2695 ASSERT(npgs
< total_pages
);
2696 total_pages
-= npgs
;
2699 * Recalculate the paging parameters now total_pages has changed.
2700 * This will also cause the clock hands to be reset before next use.
2706 mutex_exit(&mhp
->mh_mutex
);
2708 while ((seg
= seglist
) != NULL
) {
2710 pfn_t mseg_base
, mseg_end
;
2714 seglist
= seg
->lnext
;
2717 * Put the page_t's into the deleted state to stop
2718 * cv_wait()s on the pages. When we remap, the dummy
2719 * page_t's will be in the same state.
2721 memseg_lock_delete_all(seg
);
2723 * Collect up information based on pages_base and pages_end
2724 * early so that we can flag early that the memseg has been
2725 * deleted by setting pages_end == pages_base.
2727 mseg_base
= seg
->pages_base
;
2728 mseg_end
= seg
->pages_end
;
2729 mseg_npgs
= MSEG_NPAGES(seg
);
2730 mseg_start
= memseg_get_start(seg
);
2732 if (memseg_is_dynamic(seg
)) {
2733 /* Remap the meta data to our special dummy area. */
2734 memseg_remap_to_dummy(seg
);
2736 mutex_enter(&memseg_lists_lock
);
2737 seg
->lnext
= memseg_va_avail
;
2738 memseg_va_avail
= seg
;
2739 mutex_exit(&memseg_lists_lock
);
2742 * For memory whose page_ts were allocated
2743 * at boot, we need to find a new use for
2744 * the page_t memory.
2745 * For the moment, just leak it.
2746 * (It is held in the memseg_delete_junk list.)
2748 seg
->pages_end
= seg
->pages_base
;
2750 mutex_enter(&memseg_lists_lock
);
2751 seg
->lnext
= memseg_delete_junk
;
2752 memseg_delete_junk
= seg
;
2753 mutex_exit(&memseg_lists_lock
);
2756 /* Must not use seg now as it could be re-used. */
2758 memlist_write_lock();
2760 mlret
= memlist_delete_span(
2761 (uint64_t)(mseg_base
) << PAGESHIFT
,
2762 (uint64_t)(mseg_npgs
) << PAGESHIFT
,
2764 ASSERT(mlret
== MEML_SPANOP_OK
);
2766 mlret
= memlist_delete_span(
2767 (uint64_t)(mseg_start
) << PAGESHIFT
,
2768 (uint64_t)(mseg_end
- mseg_start
) <<
2771 ASSERT(mlret
== MEML_SPANOP_OK
);
2772 phys_install_has_changed();
2774 memlist_write_unlock();
2777 memlist_read_lock();
2778 installed_top_size(phys_install
, &physmax
, &physinstalled
);
2779 memlist_read_unlock();
2781 mutex_enter(&freemem_lock
);
2784 /* availrmem is adjusted during the delete. */
2785 availrmem_initial
-= avpgs
;
2787 mutex_exit(&freemem_lock
);
2791 cmn_err(CE_CONT
, "?kphysm_delete: mem = %ldK "
2792 "(0x%" PRIx64
")\n",
2793 physinstalled
<< (PAGESHIFT
- 10),
2794 (uint64_t)physinstalled
<< PAGESHIFT
);
2796 avmem
= (uint64_t)freemem
<< PAGESHIFT
;
2797 cmn_err(CE_CONT
, "?kphysm_delete: "
2798 "avail mem = %" PRId64
"\n", avmem
);
2801 * Update lgroup generation number on single lgroup systems
2804 lgrp_config(LGRP_CONFIG_GEN_UPDATE
, 0, 0);
2806 /* Successfully deleted system memory */
2807 mutex_enter(&mhp
->mh_mutex
);
2810 static uint_t mdel_nullvp_waiter
;
2813 page_delete_collect(
2815 struct mem_handle
*mhp
)
2818 page_hashout(pp
, (kmutex_t
*)NULL
);
2819 /* do not do PP_SETAGED(pp); */
2823 sep
= page_se_mutex(pp
);
2825 if (CV_HAS_WAITERS(&pp
->p_cv
)) {
2826 mdel_nullvp_waiter
++;
2827 cv_broadcast(&pp
->p_cv
);
2831 ASSERT(pp
->p_next
== pp
->p_prev
);
2832 ASSERT(pp
->p_next
== NULL
|| pp
->p_next
== pp
);
2833 pp
->p_next
= mhp
->mh_deleted
;
2834 mhp
->mh_deleted
= pp
;
2835 ASSERT(mhp
->mh_hold_todo
!= 0);
2836 mhp
->mh_hold_todo
--;
2840 transit_list_collect(struct mem_handle
*mhp
, int v
)
2842 struct transit_list_head
*trh
;
2844 trh
= &transit_list_head
;
2845 mutex_enter(&trh
->trh_lock
);
2846 mhp
->mh_transit
.trl_collect
= v
;
2847 mutex_exit(&trh
->trh_lock
);
2851 transit_list_insert(struct transit_list
*tlp
)
2853 struct transit_list_head
*trh
;
2855 trh
= &transit_list_head
;
2856 ASSERT(MUTEX_HELD(&trh
->trh_lock
));
2857 tlp
->trl_next
= trh
->trh_head
;
2858 trh
->trh_head
= tlp
;
2862 transit_list_remove(struct transit_list
*tlp
)
2864 struct transit_list_head
*trh
;
2865 struct transit_list
**tlpp
;
2867 trh
= &transit_list_head
;
2868 tlpp
= &trh
->trh_head
;
2869 ASSERT(MUTEX_HELD(&trh
->trh_lock
));
2870 while (*tlpp
!= NULL
&& *tlpp
!= tlp
)
2871 tlpp
= &(*tlpp
)->trl_next
;
2872 ASSERT(*tlpp
!= NULL
);
2874 *tlpp
= tlp
->trl_next
;
2875 tlp
->trl_next
= NULL
;
2878 static struct transit_list
*
2879 pfnum_to_transit_list(struct transit_list_head
*trh
, pfn_t pfnum
)
2881 struct transit_list
*tlp
;
2883 for (tlp
= trh
->trh_head
; tlp
!= NULL
; tlp
= tlp
->trl_next
) {
2884 struct memdelspan
*mdsp
;
2886 for (mdsp
= tlp
->trl_spans
; mdsp
!= NULL
;
2887 mdsp
= mdsp
->mds_next
) {
2888 if (pfnum
>= mdsp
->mds_base
&&
2889 pfnum
< (mdsp
->mds_base
+ mdsp
->mds_npgs
)) {
2898 pfn_is_being_deleted(pfn_t pfnum
)
2900 struct transit_list_head
*trh
;
2901 struct transit_list
*tlp
;
2904 trh
= &transit_list_head
;
2905 if (trh
->trh_head
== NULL
)
2908 mutex_enter(&trh
->trh_lock
);
2909 tlp
= pfnum_to_transit_list(trh
, pfnum
);
2910 ret
= (tlp
!= NULL
&& tlp
->trl_collect
);
2911 mutex_exit(&trh
->trh_lock
);
2916 #ifdef MEM_DEL_STATS
2919 mem_del_stat_print_func(struct mem_handle
*mhp
)
2923 if (mem_del_stat_print
) {
2924 printf("memory delete loop %x/%x, statistics%s\n",
2925 (uint_t
)mhp
->mh_transit
.trl_spans
->mds_base
,
2926 (uint_t
)mhp
->mh_transit
.trl_spans
->mds_npgs
,
2927 (mhp
->mh_cancel
? " (cancelled)" : ""));
2928 printf("\t%8u nloop\n", mhp
->mh_delstat
.nloop
);
2929 printf("\t%8u need_free\n", mhp
->mh_delstat
.need_free
);
2930 printf("\t%8u free_loop\n", mhp
->mh_delstat
.free_loop
);
2931 printf("\t%8u free_low\n", mhp
->mh_delstat
.free_low
);
2932 printf("\t%8u free_failed\n", mhp
->mh_delstat
.free_failed
);
2933 printf("\t%8u ncheck\n", mhp
->mh_delstat
.ncheck
);
2934 printf("\t%8u nopaget\n", mhp
->mh_delstat
.nopaget
);
2935 printf("\t%8u lockfail\n", mhp
->mh_delstat
.lockfail
);
2936 printf("\t%8u nfree\n", mhp
->mh_delstat
.nfree
);
2937 printf("\t%8u nreloc\n", mhp
->mh_delstat
.nreloc
);
2938 printf("\t%8u nrelocfail\n", mhp
->mh_delstat
.nrelocfail
);
2939 printf("\t%8u already_done\n", mhp
->mh_delstat
.already_done
);
2940 printf("\t%8u first_notfree\n", mhp
->mh_delstat
.first_notfree
);
2941 printf("\t%8u npplocked\n", mhp
->mh_delstat
.npplocked
);
2942 printf("\t%8u nlockreloc\n", mhp
->mh_delstat
.nlockreloc
);
2943 printf("\t%8u nnorepl\n", mhp
->mh_delstat
.nnorepl
);
2944 printf("\t%8u nmodreloc\n", mhp
->mh_delstat
.nmodreloc
);
2945 printf("\t%8u ndestroy\n", mhp
->mh_delstat
.ndestroy
);
2946 printf("\t%8u nputpage\n", mhp
->mh_delstat
.nputpage
);
2947 printf("\t%8u nnoreclaim\n", mhp
->mh_delstat
.nnoreclaim
);
2948 printf("\t%8u ndelay\n", mhp
->mh_delstat
.ndelay
);
2949 printf("\t%8u demotefail\n", mhp
->mh_delstat
.demotefail
);
2950 printf("\t%8u retired\n", mhp
->mh_delstat
.retired
);
2951 printf("\t%8u toxic\n", mhp
->mh_delstat
.toxic
);
2952 printf("\t%8u failing\n", mhp
->mh_delstat
.failing
);
2953 printf("\t%8u modtoxic\n", mhp
->mh_delstat
.modtoxic
);
2954 printf("\t%8u npplkdtoxic\n", mhp
->mh_delstat
.npplkdtoxic
);
2955 printf("\t%8u gptlmodfail\n", mhp
->mh_delstat
.gptlmodfail
);
2956 printf("\t%8u gptllckfail\n", mhp
->mh_delstat
.gptllckfail
);
2957 tmp
= mhp
->mh_delstat
.nticks_total
/ hz
; /* seconds */
2959 "\t%"PRIu64
" nticks_total - %"PRIu64
" min %"PRIu64
" sec\n",
2960 mhp
->mh_delstat
.nticks_total
, tmp
/ 60, tmp
% 60);
2962 tmp
= mhp
->mh_delstat
.nticks_pgrp
/ hz
; /* seconds */
2964 "\t%"PRIu64
" nticks_pgrp - %"PRIu64
" min %"PRIu64
" sec\n",
2965 mhp
->mh_delstat
.nticks_pgrp
, tmp
/ 60, tmp
% 60);
2968 #endif /* MEM_DEL_STATS */
2970 struct mem_callback
{
2971 kphysm_setup_vector_t
*vec
;
2975 #define NMEMCALLBACKS 100
2977 static struct mem_callback mem_callbacks
[NMEMCALLBACKS
];
2978 static uint_t nmemcallbacks
;
2979 static krwlock_t mem_callback_rwlock
;
2982 kphysm_setup_func_register(kphysm_setup_vector_t
*vec
, void *arg
)
2987 * This test will become more complicated when the version must
2990 if (vec
->version
!= KPHYSM_SETUP_VECTOR_VERSION
)
2993 if (vec
->post_add
== NULL
|| vec
->pre_del
== NULL
||
2994 vec
->post_del
== NULL
)
2997 rw_enter(&mem_callback_rwlock
, RW_WRITER
);
2998 for (i
= 0, found
= 0; i
< nmemcallbacks
; i
++) {
2999 if (mem_callbacks
[i
].vec
== NULL
&& found
== 0)
3001 if (mem_callbacks
[i
].vec
== vec
&&
3002 mem_callbacks
[i
].arg
== arg
) {
3004 /* Catch this in DEBUG kernels. */
3005 cmn_err(CE_WARN
, "kphysm_setup_func_register"
3006 "(0x%p, 0x%p) duplicate registration from 0x%p",
3007 (void *)vec
, arg
, (void *)caller());
3009 rw_exit(&mem_callback_rwlock
);
3016 ASSERT(nmemcallbacks
< NMEMCALLBACKS
);
3017 if (nmemcallbacks
== NMEMCALLBACKS
) {
3018 rw_exit(&mem_callback_rwlock
);
3021 i
= nmemcallbacks
++;
3023 mem_callbacks
[i
].vec
= vec
;
3024 mem_callbacks
[i
].arg
= arg
;
3025 rw_exit(&mem_callback_rwlock
);
3030 kphysm_setup_func_unregister(kphysm_setup_vector_t
*vec
, void *arg
)
3034 rw_enter(&mem_callback_rwlock
, RW_WRITER
);
3035 for (i
= 0; i
< nmemcallbacks
; i
++) {
3036 if (mem_callbacks
[i
].vec
== vec
&&
3037 mem_callbacks
[i
].arg
== arg
) {
3038 mem_callbacks
[i
].vec
= NULL
;
3039 mem_callbacks
[i
].arg
= NULL
;
3040 if (i
== (nmemcallbacks
- 1))
3045 rw_exit(&mem_callback_rwlock
);
3049 kphysm_setup_post_add(pgcnt_t delta_pages
)
3053 rw_enter(&mem_callback_rwlock
, RW_READER
);
3054 for (i
= 0; i
< nmemcallbacks
; i
++) {
3055 if (mem_callbacks
[i
].vec
!= NULL
) {
3056 (*mem_callbacks
[i
].vec
->post_add
)
3057 (mem_callbacks
[i
].arg
, delta_pages
);
3060 rw_exit(&mem_callback_rwlock
);
3064 * Note the locking between pre_del and post_del: The reader lock is held
3065 * between the two calls to stop the set of functions from changing.
3069 kphysm_setup_pre_del(pgcnt_t delta_pages
)
3076 rw_enter(&mem_callback_rwlock
, RW_READER
);
3077 for (i
= 0; i
< nmemcallbacks
; i
++) {
3078 if (mem_callbacks
[i
].vec
!= NULL
) {
3079 aret
= (*mem_callbacks
[i
].vec
->pre_del
)
3080 (mem_callbacks
[i
].arg
, delta_pages
);
3089 kphysm_setup_post_del(pgcnt_t delta_pages
, int cancelled
)
3093 for (i
= 0; i
< nmemcallbacks
; i
++) {
3094 if (mem_callbacks
[i
].vec
!= NULL
) {
3095 (*mem_callbacks
[i
].vec
->post_del
)
3096 (mem_callbacks
[i
].arg
, delta_pages
, cancelled
);
3099 rw_exit(&mem_callback_rwlock
);
3103 kphysm_split_memseg(
3108 struct memseg
**segpp
;
3109 pgcnt_t size_low
, size_high
;
3110 struct memseg
*seg_low
, *seg_mid
, *seg_high
;
3113 * Lock the memsegs list against other updates now
3118 * Find boot time memseg that wholly covers this area.
3121 /* First find the memseg with page 'base' in it. */
3122 for (segpp
= &memsegs
; (seg
= *segpp
) != NULL
;
3123 segpp
= &((*segpp
)->next
)) {
3124 if (base
>= seg
->pages_base
&& base
< seg
->pages_end
)
3131 if (memseg_includes_meta(seg
)) {
3135 if ((base
+ npgs
) > seg
->pages_end
) {
3141 * Work out the size of the two segments that will
3142 * surround the new segment, one for low address
3145 ASSERT(base
>= seg
->pages_base
);
3146 size_low
= base
- seg
->pages_base
;
3147 ASSERT(seg
->pages_end
>= (base
+ npgs
));
3148 size_high
= seg
->pages_end
- (base
+ npgs
);
3153 if ((size_low
+ size_high
) == 0) {
3159 * Allocate the new structures. The old memseg will not be freed
3160 * as there may be a reference to it.
3166 seg_low
= memseg_alloc();
3168 seg_mid
= memseg_alloc();
3171 seg_high
= memseg_alloc();
3174 * All allocation done now.
3176 if (size_low
!= 0) {
3177 seg_low
->pages
= seg
->pages
;
3178 seg_low
->epages
= seg_low
->pages
+ size_low
;
3179 seg_low
->pages_base
= seg
->pages_base
;
3180 seg_low
->pages_end
= seg_low
->pages_base
+ size_low
;
3181 seg_low
->next
= seg_mid
;
3182 seg_low
->msegflags
= seg
->msegflags
;
3184 if (size_high
!= 0) {
3185 seg_high
->pages
= seg
->epages
- size_high
;
3186 seg_high
->epages
= seg_high
->pages
+ size_high
;
3187 seg_high
->pages_base
= seg
->pages_end
- size_high
;
3188 seg_high
->pages_end
= seg_high
->pages_base
+ size_high
;
3189 seg_high
->next
= seg
->next
;
3190 seg_high
->msegflags
= seg
->msegflags
;
3193 seg_mid
->pages
= seg
->pages
+ size_low
;
3194 seg_mid
->pages_base
= seg
->pages_base
+ size_low
;
3195 seg_mid
->epages
= seg
->epages
- size_high
;
3196 seg_mid
->pages_end
= seg
->pages_end
- size_high
;
3197 seg_mid
->next
= (seg_high
!= NULL
) ? seg_high
: seg
->next
;
3198 seg_mid
->msegflags
= seg
->msegflags
;
3201 * Update hat_kpm specific info of all involved memsegs and
3202 * allow hat_kpm specific global chain updates.
3204 hat_kpm_split_mseg_update(seg
, segpp
, seg_low
, seg_mid
, seg_high
);
3207 * At this point we have two equivalent memseg sub-chains,
3208 * seg and seg_low/seg_mid/seg_high, which both chain on to
3209 * the same place in the global chain. By re-writing the pointer
3210 * in the previous element we switch atomically from using the old
3213 *segpp
= (seg_low
!= NULL
) ? seg_low
: seg_mid
;
3221 * We leave the old segment, 'seg', intact as there may be
3222 * references to it. Also, as the value of total_pages has not
3223 * changed and the memsegs list is effectively the same when
3224 * accessed via the old or the new pointer, we do not have to
3225 * cause pageout_scanner() to re-evaluate its hand pointers.
3227 * We currently do not re-use or reclaim the page_t memory.
3228 * If we do, then this may have to change.
3231 mutex_enter(&memseg_lists_lock
);
3232 seg
->lnext
= memseg_edit_junk
;
3233 memseg_edit_junk
= seg
;
3234 mutex_exit(&memseg_lists_lock
);
3240 * The sfmmu hat layer (e.g.) accesses some parts of the memseg
3241 * structure using physical addresses. Therefore a kmem_cache is
3242 * used with KMC_NOHASH to avoid page crossings within a memseg
3243 * structure. KMC_NOHASH requires that no external (outside of
3244 * slab) information is allowed. This, in turn, implies that the
3245 * cache's slabsize must be exactly a single page, since per-slab
3246 * information (e.g. the freelist for the slab) is kept at the
3247 * end of the slab, where it is easy to locate. Should be changed
3248 * when a more obvious kmem_cache interface/flag will become
3254 memseg_cache
= kmem_cache_create("memseg_cache", sizeof (struct memseg
),
3255 0, NULL
, NULL
, NULL
, NULL
, static_arena
, KMC_NOHASH
);
3263 seg
= kmem_cache_alloc(memseg_cache
, KM_SLEEP
);
3264 bzero(seg
, sizeof (struct memseg
));
3270 * Return whether the page_t memory for this memseg
3271 * is included in the memseg itself.
3274 memseg_includes_meta(struct memseg
*seg
)
3276 return (seg
->msegflags
& MEMSEG_META_INCL
);
3280 memseg_get_start(struct memseg
*seg
)
3284 if (memseg_includes_meta(seg
)) {
3285 pt_start
= hat_getpfnum(kas
.a_hat
, (caddr_t
)seg
->pages
);
3287 /* Meta data is required to be at the beginning */
3288 ASSERT(pt_start
< seg
->pages_base
);
3290 pt_start
= seg
->pages_base
;
3296 * Invalidate memseg pointers in cpu private vm data caches.
3299 memseg_cpu_vm_flush()
3304 mutex_enter(&cpu_lock
);
3305 pause_cpus(NULL
, NULL
);
3309 vc
= cp
->cpu_vm_data
;
3310 vc
->vc_pnum_memseg
= NULL
;
3311 vc
->vc_pnext_memseg
= NULL
;
3313 } while ((cp
= cp
->cpu_next
) != cpu_list
);
3316 mutex_exit(&cpu_lock
);