4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 * The Regents of the University of California
35 * University Acknowledgment- Portions of this document are derived from
36 * software developed by the University of California, Berkeley, and its
41 * VM - shared or copy-on-write from a vnode/anonymous memory.
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/t_lock.h>
47 #include <sys/errno.h>
48 #include <sys/systm.h>
50 #include <sys/debug.h>
52 #include <sys/vmsystm.h>
53 #include <sys/tuneable.h>
54 #include <sys/bitmap.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vtrace.h>
59 #include <sys/cmn_err.h>
60 #include <sys/callb.h>
62 #include <sys/dumphdr.h>
68 #include <vm/seg_vn.h>
75 #include <sys/project.h>
77 #include <sys/shm_impl.h>
80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 * the time, it creates a small (FAULT_TMP_PAGES_NUM entry) array and uses
82 * it if it can. In the rare case when this page list is not large enough,
83 * it goes and gets a large enough array from kmem.
85 #define FAULT_TMP_PAGES_NUM 0x8
86 #define FAULT_TMP_PAGES_SZ ptob(FAULT_TMP_PAGES_NUM)
89 * Private seg op routines.
91 static int segvn_dup(struct seg
*seg
, struct seg
*newseg
);
92 static int segvn_unmap(struct seg
*seg
, caddr_t addr
, size_t len
);
93 static void segvn_free(struct seg
*seg
);
94 static faultcode_t
segvn_fault(struct hat
*hat
, struct seg
*seg
,
95 caddr_t addr
, size_t len
, enum fault_type type
,
97 static faultcode_t
segvn_faulta(struct seg
*seg
, caddr_t addr
);
98 static int segvn_setprot(struct seg
*seg
, caddr_t addr
,
99 size_t len
, uint_t prot
);
100 static int segvn_checkprot(struct seg
*seg
, caddr_t addr
,
101 size_t len
, uint_t prot
);
102 static int segvn_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
);
103 static int segvn_sync(struct seg
*seg
, caddr_t addr
, size_t len
,
104 int attr
, uint_t flags
);
105 static size_t segvn_incore(struct seg
*seg
, caddr_t addr
, size_t len
,
107 static int segvn_lockop(struct seg
*seg
, caddr_t addr
, size_t len
,
108 int attr
, int op
, ulong_t
*lockmap
, size_t pos
);
109 static int segvn_getprot(struct seg
*seg
, caddr_t addr
, size_t len
,
111 static uoff_t
segvn_getoffset(struct seg
*seg
, caddr_t addr
);
112 static int segvn_gettype(struct seg
*seg
, caddr_t addr
);
113 static int segvn_getvp(struct seg
*seg
, caddr_t addr
,
115 static int segvn_advise(struct seg
*seg
, caddr_t addr
, size_t len
,
117 static void segvn_dump(struct seg
*seg
);
118 static int segvn_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
,
119 struct page
***ppp
, enum lock_type type
, enum seg_rw rw
);
120 static int segvn_setpagesize(struct seg
*seg
, caddr_t addr
, size_t len
,
122 static int segvn_getmemid(struct seg
*seg
, caddr_t addr
,
124 static lgrp_mem_policy_info_t
*segvn_getpolicy(struct seg
*, caddr_t
);
125 static int segvn_inherit(struct seg
*, caddr_t
, size_t, uint_t
);
127 const struct seg_ops segvn_ops
= {
129 .unmap
= segvn_unmap
,
131 .fault
= segvn_fault
,
132 .faulta
= segvn_faulta
,
133 .setprot
= segvn_setprot
,
134 .checkprot
= segvn_checkprot
,
135 .kluster
= segvn_kluster
,
137 .incore
= segvn_incore
,
138 .lockop
= segvn_lockop
,
139 .getprot
= segvn_getprot
,
140 .getoffset
= segvn_getoffset
,
141 .gettype
= segvn_gettype
,
142 .getvp
= segvn_getvp
,
143 .advise
= segvn_advise
,
145 .pagelock
= segvn_pagelock
,
146 .setpagesize
= segvn_setpagesize
,
147 .getmemid
= segvn_getmemid
,
148 .getpolicy
= segvn_getpolicy
,
149 .inherit
= segvn_inherit
,
153 * Common zfod structures, provided as a shorthand for others to use.
155 static segvn_crargs_t zfod_segvn_crargs
=
156 SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
157 static segvn_crargs_t kzfod_segvn_crargs
=
158 SEGVN_ZFOD_ARGS(PROT_ZFOD
& ~PROT_USER
,
159 PROT_ALL
& ~PROT_USER
);
160 static segvn_crargs_t stack_noexec_crargs
=
161 SEGVN_ZFOD_ARGS(PROT_ZFOD
& ~PROT_EXEC
, PROT_ALL
);
163 caddr_t zfod_argsp
= (caddr_t
)&zfod_segvn_crargs
; /* user zfod argsp */
164 caddr_t kzfod_argsp
= (caddr_t
)&kzfod_segvn_crargs
; /* kernel zfod argsp */
165 caddr_t stack_exec_argsp
= (caddr_t
)&zfod_segvn_crargs
; /* executable stack */
166 caddr_t stack_noexec_argsp
= (caddr_t
)&stack_noexec_crargs
; /* noexec stack */
168 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
170 size_t segvn_comb_thrshld
= UINT_MAX
; /* patchable -- see 1196681 */
172 size_t segvn_pglock_comb_thrshld
= (1UL << 16); /* 64K */
173 size_t segvn_pglock_comb_balign
= (1UL << 16); /* 64K */
174 uint_t segvn_pglock_comb_bshift
;
175 size_t segvn_pglock_comb_palign
;
177 static int segvn_concat(struct seg
*, struct seg
*, int);
178 static int segvn_extend_prev(struct seg
*, struct seg
*,
179 struct segvn_crargs
*, size_t);
180 static int segvn_extend_next(struct seg
*, struct seg
*,
181 struct segvn_crargs
*, size_t);
182 static void segvn_softunlock(struct seg
*, caddr_t
, size_t, enum seg_rw
);
183 static void segvn_pagelist_rele(page_t
**);
184 static void segvn_setvnode_mpss(vnode_t
*);
185 static void segvn_relocate_pages(page_t
**, page_t
*);
186 static int segvn_full_szcpages(page_t
**, uint_t
, int *, uint_t
*);
187 static int segvn_fill_vp_pages(struct segvn_data
*, vnode_t
*, uoff_t
,
188 uint_t
, page_t
**, page_t
**, uint_t
*, int *);
189 static faultcode_t
segvn_fault_vnodepages(struct hat
*, struct seg
*, caddr_t
,
190 caddr_t
, enum fault_type
, enum seg_rw
, caddr_t
, caddr_t
, int);
191 static faultcode_t
segvn_fault_anonpages(struct hat
*, struct seg
*, caddr_t
,
192 caddr_t
, enum fault_type
, enum seg_rw
, caddr_t
, caddr_t
, int);
193 static faultcode_t
segvn_faultpage(struct hat
*, struct seg
*, caddr_t
,
194 uoff_t
, struct vpage
*, page_t
**, uint_t
,
195 enum fault_type
, enum seg_rw
, int);
196 static void segvn_vpage(struct seg
*);
197 static size_t segvn_count_swap_by_vpages(struct seg
*);
199 static void segvn_purge(struct seg
*seg
);
200 static int segvn_reclaim(void *, caddr_t
, size_t, struct page
**,
202 static int shamp_reclaim(void *, caddr_t
, size_t, struct page
**,
205 static int sameprot(struct seg
*, caddr_t
, size_t);
207 static int segvn_demote_range(struct seg
*, caddr_t
, size_t, int, uint_t
);
208 static int segvn_clrszc(struct seg
*);
209 static struct seg
*segvn_split_seg(struct seg
*, caddr_t
);
210 static int segvn_claim_pages(struct seg
*, struct vpage
*, uoff_t
,
213 static void segvn_hat_rgn_unload_callback(caddr_t
, caddr_t
, caddr_t
,
214 size_t, void *, uoff_t
);
216 static struct kmem_cache
*segvn_cache
;
217 static struct kmem_cache
**segvn_szc_cache
;
220 static struct segvnvmstats_str
{
221 ulong_t fill_vp_pages
[31];
222 ulong_t fltvnpages
[49];
223 ulong_t fullszcpages
[10];
224 ulong_t relocatepages
[3];
225 ulong_t fltanpages
[17];
227 ulong_t demoterange
[3];
229 #endif /* VM_STATS */
231 #define SDR_RANGE 1 /* demote entire range */
232 #define SDR_END 2 /* demote non aligned ends only */
234 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
236 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
237 ASSERT(lpgaddr >= (seg)->s_base); \
238 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
240 ASSERT(lpgeaddr > lpgaddr); \
241 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
243 lpgeaddr = lpgaddr = (addr); \
249 segvn_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
251 struct segvn_data
*svd
= buf
;
253 rw_init(&svd
->lock
, NULL
, RW_DEFAULT
, NULL
);
254 mutex_init(&svd
->segfree_syncmtx
, NULL
, MUTEX_DEFAULT
, NULL
);
255 svd
->svn_trnext
= svd
->svn_trprev
= NULL
;
261 segvn_cache_destructor(void *buf
, void *cdrarg
)
263 struct segvn_data
*svd
= buf
;
265 rw_destroy(&svd
->lock
);
266 mutex_destroy(&svd
->segfree_syncmtx
);
271 svntr_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
273 bzero(buf
, sizeof (svntr_t
));
278 * Patching this variable to non-zero allows the system to run with
279 * stacks marked as "not executable". It's a bit of a kludge, but is
280 * provided as a tweakable for platforms that export those ABIs
281 * (e.g. sparc V8) that have executable stacks enabled by default.
282 * There are also some restrictions for platforms that don't actually
283 * implement 'noexec' protections.
285 * Once enabled, the system is (therefore) unable to provide a fully
286 * ABI-compliant execution environment, though practically speaking,
287 * most everything works. The exceptions are generally some interpreters
288 * and debuggers that create executable code on the stack and jump
289 * into it (without explicitly mprotecting the address range to include
292 * One important class of applications that are disabled are those
293 * that have been transformed into malicious agents using one of the
294 * numerous "buffer overflow" attacks. See 4007890.
296 int noexec_user_stack
= 0;
297 int noexec_user_stack_log
= 1;
299 int segvn_lpg_disable
= 0;
300 uint_t segvn_maxpgszc
= 0;
302 ulong_t segvn_vmpss_clrszc_cnt
;
303 ulong_t segvn_vmpss_clrszc_err
;
304 ulong_t segvn_fltvnpages_clrszc_cnt
;
305 ulong_t segvn_fltvnpages_clrszc_err
;
306 ulong_t segvn_setpgsz_align_err
;
307 ulong_t segvn_setpgsz_anon_align_err
;
308 ulong_t segvn_setpgsz_getattr_err
;
309 ulong_t segvn_setpgsz_eof_err
;
310 ulong_t segvn_faultvnmpss_align_err1
;
311 ulong_t segvn_faultvnmpss_align_err2
;
312 ulong_t segvn_faultvnmpss_align_err3
;
313 ulong_t segvn_faultvnmpss_align_err4
;
314 ulong_t segvn_faultvnmpss_align_err5
;
315 ulong_t segvn_vmpss_pageio_deadlk_err
;
317 int segvn_use_regions
= 1;
320 * Segvn supports text replication optimization for NUMA platforms. Text
321 * replica's are represented by anon maps (amp). There's one amp per text file
322 * region per lgroup. A process chooses the amp for each of its text mappings
323 * based on the lgroup assignment of its main thread (t_tid = 1). All
324 * processes that want a replica on a particular lgroup for the same text file
325 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
326 * with vp,off,size,szc used as a key. Text replication segments are read only
327 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
328 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
329 * pages. Replication amp is assigned to a segment when it gets its first
330 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
331 * rechecks periodically if the process still maps an amp local to the main
332 * thread. If not async thread forces process to remap to an amp in the new
333 * home lgroup of the main thread. Current text replication implementation
334 * only provides the benefit to workloads that do most of their work in the
335 * main thread of a process or all the threads of a process run in the same
336 * lgroup. To extend text replication benefit to different types of
337 * multithreaded workloads further work would be needed in the hat layer to
338 * allow the same virtual address in the same hat to simultaneously map
339 * different physical addresses (i.e. page table replication would be needed
342 * amp pages are used instead of vnode pages as long as segment has a very
343 * simple life cycle. It's created via segvn_create(), handles S_EXEC
344 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
345 * happens such as protection is changed, real COW fault happens, pagesize is
346 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
347 * text replication by converting the segment back to vnode only segment
348 * (unmap segment's address range and set svd->amp to NULL).
350 * The original file can be changed after amp is inserted into
351 * svntr_hashtab. Processes that are launched after the file is already
352 * changed can't use the replica's created prior to the file change. To
353 * implement this functionality hash entries are timestamped. Replica's can
354 * only be used if current file modification time is the same as the timestamp
355 * saved when hash entry was created. However just timestamps alone are not
356 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
357 * deal with file changes via MAP_SHARED mappings differently. When writable
358 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
359 * existing replica's for this vnode as not usable for future text
360 * mappings. And we don't create new replica's for files that currently have
361 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
365 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
366 size_t segvn_textrepl_max_bytes_factor
= SEGVN_TEXTREPL_MAXBYTES_FACTOR
;
368 static ulong_t svntr_hashtab_sz
= 512;
369 static svntr_bucket_t
*svntr_hashtab
= NULL
;
370 static struct kmem_cache
*svntr_cache
;
371 static svntr_stats_t
*segvn_textrepl_stats
;
372 static ksema_t segvn_trasync_sem
;
374 int segvn_disable_textrepl
= 1;
375 size_t textrepl_size_thresh
= (size_t)-1;
376 size_t segvn_textrepl_bytes
= 0;
377 size_t segvn_textrepl_max_bytes
= 0;
378 clock_t segvn_update_textrepl_interval
= 0;
379 int segvn_update_tr_time
= 10;
380 int segvn_disable_textrepl_update
= 0;
382 static void segvn_textrepl(struct seg
*);
383 static void segvn_textunrepl(struct seg
*, int);
384 static void segvn_inval_trcache(vnode_t
*);
385 static void segvn_trasync_thread(void);
386 static void segvn_trupdate_wakeup(void *);
387 static void segvn_trupdate(void);
388 static void segvn_trupdate_seg(struct seg
*, segvn_data_t
*, svntr_t
*,
392 * Initialize segvn data structures
401 segvn_cache
= kmem_cache_create("segvn_cache",
402 sizeof (struct segvn_data
), 0,
403 segvn_cache_constructor
, segvn_cache_destructor
, NULL
,
406 if (segvn_lpg_disable
== 0) {
407 szc
= maxszc
= page_num_pagesizes() - 1;
409 segvn_lpg_disable
= 1;
411 if (page_get_pagesize(0) != PAGESIZE
) {
412 panic("segvn_init: bad szc 0");
416 pgsz
= page_get_pagesize(szc
);
417 if (pgsz
<= PAGESIZE
|| !IS_P2ALIGNED(pgsz
, pgsz
)) {
418 panic("segvn_init: bad szc %d", szc
);
423 if (segvn_maxpgszc
== 0 || segvn_maxpgszc
> maxszc
)
424 segvn_maxpgszc
= maxszc
;
427 if (segvn_maxpgszc
) {
428 segvn_szc_cache
= (struct kmem_cache
**)kmem_alloc(
429 (segvn_maxpgszc
+ 1) * sizeof (struct kmem_cache
*),
433 for (szc
= 1; szc
<= segvn_maxpgszc
; szc
++) {
436 (void) sprintf(str
, "segvn_szc_cache%d", szc
);
437 segvn_szc_cache
[szc
] = kmem_cache_create(str
,
438 page_get_pagecnt(szc
) * sizeof (page_t
*), 0,
439 NULL
, NULL
, NULL
, NULL
, NULL
, KMC_NODEBUG
);
443 if (segvn_use_regions
&& !hat_supported(HAT_SHARED_REGIONS
, NULL
))
444 segvn_use_regions
= 0;
447 * For now shared regions and text replication segvn support
448 * are mutually exclusive. This is acceptable because
449 * currently significant benefit from text replication was
450 * only observed on AMD64 NUMA platforms (due to relatively
451 * small L2$ size) and currently we don't support shared
454 if (segvn_use_regions
&& !segvn_disable_textrepl
) {
455 segvn_disable_textrepl
= 1;
459 if (lgrp_optimizations() && textrepl_size_thresh
!= (size_t)-1 &&
460 !segvn_disable_textrepl
) {
462 size_t hsz
= svntr_hashtab_sz
* sizeof (svntr_bucket_t
);
464 svntr_cache
= kmem_cache_create("svntr_cache",
465 sizeof (svntr_t
), 0, svntr_cache_constructor
, NULL
,
466 NULL
, NULL
, NULL
, 0);
467 svntr_hashtab
= kmem_zalloc(hsz
, KM_SLEEP
);
468 for (i
= 0; i
< svntr_hashtab_sz
; i
++) {
469 mutex_init(&svntr_hashtab
[i
].tr_lock
, NULL
,
470 MUTEX_DEFAULT
, NULL
);
472 segvn_textrepl_max_bytes
= ptob(physmem
) /
473 segvn_textrepl_max_bytes_factor
;
474 segvn_textrepl_stats
= kmem_zalloc(NCPU
*
475 sizeof (svntr_stats_t
), KM_SLEEP
);
476 sema_init(&segvn_trasync_sem
, 0, NULL
, SEMA_DEFAULT
, NULL
);
477 (void) thread_create(NULL
, 0, segvn_trasync_thread
,
478 NULL
, 0, &p0
, TS_RUN
, minclsyspri
);
482 if (!ISP2(segvn_pglock_comb_balign
) ||
483 segvn_pglock_comb_balign
< PAGESIZE
) {
484 segvn_pglock_comb_balign
= 1UL << 16; /* 64K */
486 segvn_pglock_comb_bshift
= highbit(segvn_pglock_comb_balign
) - 1;
487 segvn_pglock_comb_palign
= btop(segvn_pglock_comb_balign
);
490 #define SEGVN_PAGEIO ((void *)0x1)
491 #define SEGVN_NOPAGEIO ((void *)0x2)
494 segvn_setvnode_mpss(vnode_t
*vp
)
498 ASSERT(vp
->v_mpssdata
== NULL
||
499 vp
->v_mpssdata
== SEGVN_PAGEIO
||
500 vp
->v_mpssdata
== SEGVN_NOPAGEIO
);
502 if (vp
->v_mpssdata
== NULL
) {
503 if (vn_vmpss_usepageio(vp
)) {
504 err
= fop_pageio(vp
, NULL
,
505 0, 0, 0, CRED(), NULL
);
510 * set v_mpssdata just once per vnode life
511 * so that it never changes.
513 mutex_enter(&vp
->v_lock
);
514 if (vp
->v_mpssdata
== NULL
) {
516 vp
->v_mpssdata
= SEGVN_PAGEIO
;
518 vp
->v_mpssdata
= SEGVN_NOPAGEIO
;
521 mutex_exit(&vp
->v_lock
);
526 segvn_create(struct seg
*seg
, void *argsp
)
528 extern lgrp_mem_policy_t lgrp_mem_default_policy
;
529 struct segvn_crargs
*a
= (struct segvn_crargs
*)argsp
;
530 struct segvn_data
*svd
;
533 struct anon_map
*amp
;
536 lgrp_mem_policy_t mpolicy
= lgrp_mem_default_policy
;
540 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
542 if (a
->type
!= MAP_PRIVATE
&& a
->type
!= MAP_SHARED
) {
543 panic("segvn_create type");
548 * Check arguments. If a shared anon structure is given then
549 * it is illegal to also specify a vp.
551 if (a
->amp
!= NULL
&& a
->vp
!= NULL
) {
552 panic("segvn_create anon_map");
556 if (a
->type
== MAP_PRIVATE
&& (a
->flags
& MAP_TEXT
) &&
557 a
->vp
!= NULL
&& a
->prot
== (PROT_USER
| PROT_READ
| PROT_EXEC
) &&
562 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
563 if (a
->type
== MAP_SHARED
)
564 a
->flags
&= ~MAP_NORESERVE
;
567 if (segvn_lpg_disable
!= 0 || (a
->szc
== AS_MAP_NO_LPOOB
) ||
568 (a
->amp
!= NULL
&& a
->type
== MAP_PRIVATE
) ||
569 (a
->flags
& MAP_NORESERVE
) || seg
->s_as
== &kas
) {
572 if (a
->szc
> segvn_maxpgszc
)
573 a
->szc
= segvn_maxpgszc
;
574 pgsz
= page_get_pagesize(a
->szc
);
575 if (!IS_P2ALIGNED(seg
->s_base
, pgsz
) ||
576 !IS_P2ALIGNED(seg
->s_size
, pgsz
)) {
578 } else if (a
->vp
!= NULL
) {
579 if (IS_SWAPFSVP(a
->vp
) || VN_ISKAS(a
->vp
)) {
582 * hat_page_demote() is not supported
586 } else if (map_addr_vacalign_check(seg
->s_base
,
587 a
->offset
& PAGEMASK
)) {
590 } else if (a
->amp
!= NULL
) {
591 pgcnt_t anum
= btopr(a
->offset
);
592 pgcnt_t pgcnt
= page_get_pagecnt(a
->szc
);
593 if (!IS_P2ALIGNED(anum
, pgcnt
)) {
601 * If segment may need private pages, reserve them now.
603 if (!(a
->flags
& MAP_NORESERVE
) && ((a
->vp
== NULL
&& a
->amp
== NULL
) ||
604 (a
->type
== MAP_PRIVATE
&& (a
->prot
& PROT_WRITE
)))) {
605 if (anon_resv_zone(seg
->s_size
,
606 seg
->s_as
->a_proc
->p_zone
) == 0)
608 swresv
= seg
->s_size
;
612 * Reserve any mapping structures that may be required.
614 * Don't do it for segments that may use regions. It's currently a
615 * noop in the hat implementations anyway.
618 hat_map(seg
->s_as
->a_hat
, seg
->s_base
, seg
->s_size
, HAT_MAP
);
625 crhold(cred
= CRED());
628 /* Inform the vnode of the new mapping */
630 error
= fop_addmap(a
->vp
, a
->offset
& PAGEMASK
,
631 seg
->s_as
, seg
->s_base
, seg
->s_size
, a
->prot
,
632 a
->maxprot
, a
->type
, cred
, NULL
);
635 anon_unresv_zone(swresv
,
636 seg
->s_as
->a_proc
->p_zone
);
640 hat_unload(seg
->s_as
->a_hat
, seg
->s_base
,
641 seg
->s_size
, HAT_UNLOAD_UNMAP
);
646 * svntr_hashtab will be NULL if we support shared regions.
648 trok
= ((a
->flags
& MAP_TEXT
) &&
649 (seg
->s_size
> textrepl_size_thresh
||
650 (a
->flags
& _MAP_TEXTREPL
)) &&
651 lgrp_optimizations() && svntr_hashtab
!= NULL
&&
652 a
->type
== MAP_PRIVATE
&& swresv
== 0 &&
653 !(a
->flags
& MAP_NORESERVE
) &&
654 seg
->s_as
!= &kas
&& a
->vp
->v_type
== VREG
);
656 ASSERT(!trok
|| !use_rgn
);
660 * MAP_NORESERVE mappings don't count towards the VSZ of a process
661 * until we fault the pages in.
663 if ((a
->vp
== NULL
|| a
->vp
->v_type
!= VREG
) &&
664 a
->flags
& MAP_NORESERVE
) {
665 seg
->s_as
->a_resvsize
-= seg
->s_size
;
669 * If more than one segment in the address space, and they're adjacent
670 * virtually, try to concatenate them. Don't concatenate if an
671 * explicit anon_map structure was supplied (e.g., SystemV shared
672 * memory) or if we'll use text replication for this segment.
674 if (a
->amp
== NULL
&& !use_rgn
&& !trok
) {
675 struct seg
*pseg
, *nseg
;
676 struct segvn_data
*psvd
, *nsvd
;
677 lgrp_mem_policy_t ppolicy
, npolicy
;
678 uint_t lgrp_mem_policy_flags
= 0;
681 * Memory policy flags (lgrp_mem_policy_flags) is valid when
682 * extending stack/heap segments.
684 if ((a
->vp
== NULL
) && (a
->type
== MAP_PRIVATE
) &&
685 !(a
->flags
& MAP_NORESERVE
) && (seg
->s_as
!= &kas
)) {
686 lgrp_mem_policy_flags
= a
->lgrp_mem_policy_flags
;
689 * Get policy when not extending it from another segment
691 mpolicy
= lgrp_mem_policy_default(seg
->s_size
, a
->type
);
695 * First, try to concatenate the previous and new segments
697 pseg
= AS_SEGPREV(seg
->s_as
, seg
);
699 pseg
->s_base
+ pseg
->s_size
== seg
->s_base
&&
700 pseg
->s_ops
== &segvn_ops
) {
702 * Get memory allocation policy from previous segment.
703 * When extension is specified (e.g. for heap) apply
704 * this policy to the new segment regardless of the
705 * outcome of segment concatenation. Extension occurs
706 * for non-default policy otherwise default policy is
707 * used and is based on extended segment size.
709 psvd
= (struct segvn_data
*)pseg
->s_data
;
710 ppolicy
= psvd
->policy_info
.mem_policy
;
711 if (lgrp_mem_policy_flags
==
712 LGRP_MP_FLAG_EXTEND_UP
) {
713 if (ppolicy
!= lgrp_mem_default_policy
) {
716 mpolicy
= lgrp_mem_policy_default(
717 pseg
->s_size
+ seg
->s_size
,
722 if (mpolicy
== ppolicy
&&
723 (pseg
->s_size
+ seg
->s_size
<=
724 segvn_comb_thrshld
|| psvd
->amp
== NULL
) &&
725 segvn_extend_prev(pseg
, seg
, a
, swresv
) == 0) {
727 * success! now try to concatenate
731 nseg
= AS_SEGNEXT(pseg
->s_as
, pseg
);
734 nseg
->s_ops
== &segvn_ops
&&
735 pseg
->s_base
+ pseg
->s_size
==
737 (void) segvn_concat(pseg
, nseg
, 0);
738 ASSERT(pseg
->s_szc
== 0 ||
739 (a
->szc
== pseg
->s_szc
&&
740 IS_P2ALIGNED(pseg
->s_base
, pgsz
) &&
741 IS_P2ALIGNED(pseg
->s_size
, pgsz
)));
747 * Failed, so try to concatenate with following seg
749 nseg
= AS_SEGNEXT(seg
->s_as
, seg
);
751 seg
->s_base
+ seg
->s_size
== nseg
->s_base
&&
752 nseg
->s_ops
== &segvn_ops
) {
754 * Get memory allocation policy from next segment.
755 * When extension is specified (e.g. for stack) apply
756 * this policy to the new segment regardless of the
757 * outcome of segment concatenation. Extension occurs
758 * for non-default policy otherwise default policy is
759 * used and is based on extended segment size.
761 nsvd
= (struct segvn_data
*)nseg
->s_data
;
762 npolicy
= nsvd
->policy_info
.mem_policy
;
763 if (lgrp_mem_policy_flags
==
764 LGRP_MP_FLAG_EXTEND_DOWN
) {
765 if (npolicy
!= lgrp_mem_default_policy
) {
768 mpolicy
= lgrp_mem_policy_default(
769 nseg
->s_size
+ seg
->s_size
,
774 if (mpolicy
== npolicy
&&
775 segvn_extend_next(seg
, nseg
, a
, swresv
) == 0) {
777 ASSERT(nseg
->s_szc
== 0 ||
778 (a
->szc
== nseg
->s_szc
&&
779 IS_P2ALIGNED(nseg
->s_base
, pgsz
) &&
780 IS_P2ALIGNED(nseg
->s_size
, pgsz
)));
788 if (a
->type
== MAP_SHARED
)
789 lgrp_shm_policy_init(NULL
, a
->vp
);
791 svd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
793 seg
->s_ops
= &segvn_ops
;
794 seg
->s_data
= (void *)svd
;
800 * Anonymous mappings have no backing file so the offset is meaningless.
802 svd
->offset
= a
->vp
? (a
->offset
& PAGEMASK
) : 0;
804 svd
->maxprot
= a
->maxprot
;
809 svd
->advice
= MADV_NORMAL
;
811 svd
->flags
= (ushort_t
)a
->flags
;
812 svd
->softlockcnt
= 0;
813 svd
->softlockcnt_sbase
= 0;
814 svd
->softlockcnt_send
= 0;
816 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
819 if (a
->szc
!= 0 && a
->vp
!= NULL
) {
820 segvn_setvnode_mpss(a
->vp
);
822 if (svd
->type
== MAP_SHARED
&& svd
->vp
!= NULL
&&
823 (svd
->vp
->v_flag
& VVMEXEC
) && (svd
->prot
& PROT_WRITE
)) {
824 ASSERT(vn_is_mapped(svd
->vp
, V_WRITE
));
825 segvn_inval_trcache(svd
->vp
);
829 if ((svd
->amp
= amp
) == NULL
) {
831 if (svd
->type
== MAP_SHARED
) {
834 * Shared mappings to a vp need no other setup.
835 * If we have a shared mapping to an anon_map object
836 * which hasn't been allocated yet, allocate the
837 * struct now so that it will be properly shared
838 * by remembering the swap reservation there.
841 svd
->amp
= anonmap_alloc(seg
->s_size
, swresv
,
843 svd
->amp
->a_szc
= seg
->s_szc
;
847 * Private mapping (with or without a vp).
848 * Allocate anon_map when needed.
850 svd
->swresv
= swresv
;
856 * Mapping to an existing anon_map structure without a vp.
857 * For now we will insure that the segment size isn't larger
858 * than the size - offset gives us. Later on we may wish to
859 * have the anon array dynamically allocated itself so that
860 * we don't always have to allocate all the anon pointer slots.
861 * This of course involves adding extra code to check that we
862 * aren't trying to use an anon pointer slot beyond the end
863 * of the currently allocated anon array.
865 if ((amp
->size
- a
->offset
) < seg
->s_size
) {
866 panic("segvn_create anon_map size");
870 anon_num
= btopr(a
->offset
);
872 if (a
->type
== MAP_SHARED
) {
874 * SHARED mapping to a given anon_map.
876 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
878 if (a
->szc
> amp
->a_szc
) {
881 ANON_LOCK_EXIT(&
->a_rwlock
);
882 svd
->anon_index
= anon_num
;
886 * PRIVATE mapping to a given anon_map.
887 * Make sure that all the needed anon
888 * structures are created (so that we will
889 * share the underlying pages if nothing
890 * is written by this mapping) and then
891 * duplicate the anon array as is done
892 * when a privately mapped segment is dup'ed.
898 int hat_flag
= HAT_LOAD
;
900 if (svd
->flags
& MAP_TEXT
) {
901 hat_flag
|= HAT_LOAD_TEXT
;
904 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
905 svd
->amp
->a_szc
= seg
->s_szc
;
907 svd
->swresv
= swresv
;
910 * Prevent 2 threads from allocating anon
911 * slots simultaneously.
913 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
914 eaddr
= seg
->s_base
+ seg
->s_size
;
916 for (anon_idx
= anon_num
, addr
= seg
->s_base
;
917 addr
< eaddr
; addr
+= PAGESIZE
, anon_idx
++) {
920 if ((ap
= anon_get_ptr(amp
->ahp
,
925 * Allocate the anon struct now.
926 * Might as well load up translation
927 * to the page while we're at it...
929 pp
= anon_zero(seg
, addr
, &ap
, cred
);
930 if (ap
== NULL
|| pp
== NULL
) {
931 panic("segvn_create anon_zero");
936 * Re-acquire the anon_map lock and
937 * initialize the anon array entry.
939 ASSERT(anon_get_ptr(amp
->ahp
,
941 (void) anon_set_ptr(amp
->ahp
, anon_idx
, ap
,
944 ASSERT(seg
->s_szc
== 0);
945 ASSERT(!IS_VMODSORT(pp
->p_vnode
));
947 ASSERT(use_rgn
== 0);
948 hat_memload(seg
->s_as
->a_hat
, addr
, pp
,
949 svd
->prot
& ~PROT_WRITE
, hat_flag
);
953 ASSERT(seg
->s_szc
== 0);
954 anon_dup(amp
->ahp
, anon_num
, svd
->amp
->ahp
,
956 ANON_LOCK_EXIT(&
->a_rwlock
);
961 * Set default memory allocation policy for segment
963 * Always set policy for private memory at least for initialization
964 * even if this is a shared memory segment
966 (void) lgrp_privm_policy_set(mpolicy
, &svd
->policy_info
, seg
->s_size
);
968 if (svd
->type
== MAP_SHARED
)
969 (void) lgrp_shm_policy_set(mpolicy
, svd
->amp
, svd
->anon_index
,
970 svd
->vp
, svd
->offset
, seg
->s_size
);
974 ASSERT(svd
->amp
== NULL
);
975 svd
->rcookie
= hat_join_region(seg
->s_as
->a_hat
, seg
->s_base
,
976 seg
->s_size
, (void *)svd
->vp
, svd
->offset
, svd
->prot
,
977 (uchar_t
)seg
->s_szc
, segvn_hat_rgn_unload_callback
,
981 ASSERT(!trok
|| !(svd
->prot
& PROT_WRITE
));
982 svd
->tr_state
= trok
? SEGVN_TR_INIT
: SEGVN_TR_OFF
;
988 * Concatenate two existing segments, if possible.
989 * Return 0 on success, -1 if two segments are not compatible
990 * or -2 on memory allocation failure.
991 * If amp_cat == 1 then try and concat segments with anon maps
994 segvn_concat(struct seg
*seg1
, struct seg
*seg2
, int amp_cat
)
996 struct segvn_data
*svd1
= seg1
->s_data
;
997 struct segvn_data
*svd2
= seg2
->s_data
;
998 struct anon_map
*amp1
= svd1
->amp
;
999 struct anon_map
*amp2
= svd2
->amp
;
1000 struct vpage
*vpage1
= svd1
->vpage
;
1001 struct vpage
*vpage2
= svd2
->vpage
, *nvpage
= NULL
;
1002 size_t size
, nvpsize
;
1003 pgcnt_t npages1
, npages2
;
1005 ASSERT(seg1
->s_as
&& seg2
->s_as
&& seg1
->s_as
== seg2
->s_as
);
1006 ASSERT(AS_WRITE_HELD(seg1
->s_as
));
1007 ASSERT(seg1
->s_ops
== seg2
->s_ops
);
1009 if (HAT_IS_REGION_COOKIE_VALID(svd1
->rcookie
) ||
1010 HAT_IS_REGION_COOKIE_VALID(svd2
->rcookie
)) {
1014 /* both segments exist, try to merge them */
1015 #define incompat(x) (svd1->x != svd2->x)
1016 if (incompat(vp
) || incompat(maxprot
) ||
1017 (!svd1
->pageadvice
&& !svd2
->pageadvice
&& incompat(advice
)) ||
1018 (!svd1
->pageprot
&& !svd2
->pageprot
&& incompat(prot
)) ||
1019 incompat(type
) || incompat(cred
) || incompat(flags
) ||
1020 seg1
->s_szc
!= seg2
->s_szc
|| incompat(policy_info
.mem_policy
) ||
1021 (svd2
->softlockcnt
> 0) || svd1
->softlockcnt_send
> 0)
1026 * vp == NULL implies zfod, offset doesn't matter
1028 if (svd1
->vp
!= NULL
&&
1029 svd1
->offset
+ seg1
->s_size
!= svd2
->offset
) {
1034 * Don't concatenate if either segment uses text replication.
1036 if (svd1
->tr_state
!= SEGVN_TR_OFF
|| svd2
->tr_state
!= SEGVN_TR_OFF
) {
1041 * Fail early if we're not supposed to concatenate
1042 * segments with non NULL amp.
1044 if (amp_cat
== 0 && (amp1
!= NULL
|| amp2
!= NULL
)) {
1048 if (svd1
->vp
== NULL
&& svd1
->type
== MAP_SHARED
) {
1052 if (amp1
!= NULL
&& svd1
->anon_index
+ btop(seg1
->s_size
) !=
1056 ASSERT(amp1
== NULL
|| amp1
->refcnt
>= 2);
1060 * If either seg has vpages, create a new merged vpage array.
1062 if (vpage1
!= NULL
|| vpage2
!= NULL
) {
1063 struct vpage
*vp
, *evp
;
1065 npages1
= seg_pages(seg1
);
1066 npages2
= seg_pages(seg2
);
1067 nvpsize
= vpgtob(npages1
+ npages2
);
1069 if ((nvpage
= kmem_zalloc(nvpsize
, KM_NOSLEEP
)) == NULL
) {
1073 if (vpage1
!= NULL
) {
1074 bcopy(vpage1
, nvpage
, vpgtob(npages1
));
1076 evp
= nvpage
+ npages1
;
1077 for (vp
= nvpage
; vp
< evp
; vp
++) {
1078 VPP_SETPROT(vp
, svd1
->prot
);
1079 VPP_SETADVICE(vp
, svd1
->advice
);
1083 if (vpage2
!= NULL
) {
1084 bcopy(vpage2
, nvpage
+ npages1
, vpgtob(npages2
));
1086 evp
= nvpage
+ npages1
+ npages2
;
1087 for (vp
= nvpage
+ npages1
; vp
< evp
; vp
++) {
1088 VPP_SETPROT(vp
, svd2
->prot
);
1089 VPP_SETADVICE(vp
, svd2
->advice
);
1093 if (svd2
->pageswap
&& (!svd1
->pageswap
&& svd1
->swresv
)) {
1094 ASSERT(svd1
->swresv
== seg1
->s_size
);
1095 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1096 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1097 evp
= nvpage
+ npages1
;
1098 for (vp
= nvpage
; vp
< evp
; vp
++) {
1103 if (svd1
->pageswap
&& (!svd2
->pageswap
&& svd2
->swresv
)) {
1104 ASSERT(svd2
->swresv
== seg2
->s_size
);
1105 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1106 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1107 vp
= nvpage
+ npages1
;
1109 for (; vp
< evp
; vp
++) {
1114 ASSERT((vpage1
!= NULL
|| vpage2
!= NULL
) ||
1115 (svd1
->pageswap
== 0 && svd2
->pageswap
== 0));
1118 * If either segment has private pages, create a new merged anon
1119 * array. If mergeing shared anon segments just decrement anon map's
1122 if (amp1
!= NULL
&& svd1
->type
== MAP_SHARED
) {
1123 ASSERT(amp1
== amp2
&& svd1
->vp
== NULL
);
1124 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1125 ASSERT(amp1
->refcnt
>= 2);
1127 ANON_LOCK_EXIT(&1
->a_rwlock
);
1129 } else if (amp1
!= NULL
|| amp2
!= NULL
) {
1130 struct anon_hdr
*nahp
;
1131 struct anon_map
*namp
= NULL
;
1134 ASSERT(svd1
->type
== MAP_PRIVATE
);
1136 asize
= seg1
->s_size
+ seg2
->s_size
;
1137 if ((nahp
= anon_create(btop(asize
), ANON_NOSLEEP
)) == NULL
) {
1138 if (nvpage
!= NULL
) {
1139 kmem_free(nvpage
, nvpsize
);
1145 * XXX anon rwlock is not really needed because
1146 * this is a private segment and we are writers.
1148 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1149 ASSERT(amp1
->refcnt
== 1);
1150 if (anon_copy_ptr(amp1
->ahp
, svd1
->anon_index
,
1151 nahp
, 0, btop(seg1
->s_size
), ANON_NOSLEEP
)) {
1152 anon_release(nahp
, btop(asize
));
1153 ANON_LOCK_EXIT(&1
->a_rwlock
);
1154 if (nvpage
!= NULL
) {
1155 kmem_free(nvpage
, nvpsize
);
1161 ANON_LOCK_ENTER(&2
->a_rwlock
, RW_WRITER
);
1162 ASSERT(amp2
->refcnt
== 1);
1163 if (anon_copy_ptr(amp2
->ahp
, svd2
->anon_index
,
1164 nahp
, btop(seg1
->s_size
), btop(seg2
->s_size
),
1166 anon_release(nahp
, btop(asize
));
1167 ANON_LOCK_EXIT(&2
->a_rwlock
);
1169 ANON_LOCK_EXIT(&1
->a_rwlock
);
1171 if (nvpage
!= NULL
) {
1172 kmem_free(nvpage
, nvpsize
);
1179 anon_release(amp1
->ahp
, btop(amp1
->size
));
1183 ASSERT(amp1
== NULL
);
1185 anon_release(amp2
->ahp
, btop(amp2
->size
));
1188 ANON_LOCK_EXIT(&2
->a_rwlock
);
1191 svd2
->amp
= NULL
; /* needed for seg_free */
1196 svd1
->anon_index
= 0;
1197 ANON_LOCK_EXIT(&namp
->a_rwlock
);
1200 * Now free the old vpage structures.
1202 if (nvpage
!= NULL
) {
1203 if (vpage1
!= NULL
) {
1204 kmem_free(vpage1
, vpgtob(npages1
));
1206 if (vpage2
!= NULL
) {
1208 kmem_free(vpage2
, vpgtob(npages2
));
1210 if (svd2
->pageprot
) {
1213 if (svd2
->pageadvice
) {
1214 svd1
->pageadvice
= 1;
1216 if (svd2
->pageswap
) {
1219 svd1
->vpage
= nvpage
;
1222 /* all looks ok, merge segments */
1223 svd1
->swresv
+= svd2
->swresv
;
1224 svd2
->swresv
= 0; /* so seg_free doesn't release swap space */
1225 size
= seg2
->s_size
;
1227 seg1
->s_size
+= size
;
1232 * Extend the previous segment (seg1) to include the
1233 * new segment (seg2 + a), if possible.
1234 * Return 0 on success.
1237 segvn_extend_prev(seg1
, seg2
, a
, swresv
)
1238 struct seg
*seg1
, *seg2
;
1239 struct segvn_crargs
*a
;
1242 struct segvn_data
*svd1
= (struct segvn_data
*)seg1
->s_data
;
1244 struct anon_map
*amp1
;
1245 struct vpage
*new_vpage
;
1248 * We don't need any segment level locks for "segvn" data
1249 * since the address space is "write" locked.
1251 ASSERT(seg1
->s_as
&& AS_WRITE_HELD(seg1
->s_as
));
1253 if (HAT_IS_REGION_COOKIE_VALID(svd1
->rcookie
)) {
1257 /* second segment is new, try to extend first */
1258 /* XXX - should also check cred */
1259 if (svd1
->vp
!= a
->vp
|| svd1
->maxprot
!= a
->maxprot
||
1260 (!svd1
->pageprot
&& (svd1
->prot
!= a
->prot
)) ||
1261 svd1
->type
!= a
->type
|| svd1
->flags
!= a
->flags
||
1262 seg1
->s_szc
!= a
->szc
|| svd1
->softlockcnt_send
> 0)
1265 /* vp == NULL implies zfod, offset doesn't matter */
1266 if (svd1
->vp
!= NULL
&&
1267 svd1
->offset
+ seg1
->s_size
!= (a
->offset
& PAGEMASK
))
1270 if (svd1
->tr_state
!= SEGVN_TR_OFF
) {
1279 * Segment has private pages, can data structures
1282 * Acquire the anon_map lock to prevent it from changing,
1283 * if it is shared. This ensures that the anon_map
1284 * will not change while a thread which has a read/write
1285 * lock on an address space references it.
1286 * XXX - Don't need the anon_map lock at all if "refcnt"
1289 * Can't grow a MAP_SHARED segment with an anonmap because
1290 * there may be existing anon slots where we want to extend
1291 * the segment and we wouldn't know what to do with them
1292 * (e.g., for tmpfs right thing is to just leave them there,
1293 * for /dev/zero they should be cleared out).
1295 if (svd1
->type
== MAP_SHARED
)
1298 ANON_LOCK_ENTER(&1
->a_rwlock
, RW_WRITER
);
1299 if (amp1
->refcnt
> 1) {
1300 ANON_LOCK_EXIT(&1
->a_rwlock
);
1303 newpgs
= anon_grow(amp1
->ahp
, &svd1
->anon_index
,
1304 btop(seg1
->s_size
), btop(seg2
->s_size
), ANON_NOSLEEP
);
1307 ANON_LOCK_EXIT(&1
->a_rwlock
);
1310 amp1
->size
= ptob(newpgs
);
1311 ANON_LOCK_EXIT(&1
->a_rwlock
);
1313 if (svd1
->vpage
!= NULL
) {
1314 struct vpage
*vp
, *evp
;
1316 kmem_zalloc(vpgtob(seg_pages(seg1
) + seg_pages(seg2
)),
1318 if (new_vpage
== NULL
)
1320 bcopy(svd1
->vpage
, new_vpage
, vpgtob(seg_pages(seg1
)));
1321 kmem_free(svd1
->vpage
, vpgtob(seg_pages(seg1
)));
1322 svd1
->vpage
= new_vpage
;
1324 vp
= new_vpage
+ seg_pages(seg1
);
1325 evp
= vp
+ seg_pages(seg2
);
1326 for (; vp
< evp
; vp
++)
1327 VPP_SETPROT(vp
, a
->prot
);
1328 if (svd1
->pageswap
&& swresv
) {
1329 ASSERT(!(svd1
->flags
& MAP_NORESERVE
));
1330 ASSERT(swresv
== seg2
->s_size
);
1331 vp
= new_vpage
+ seg_pages(seg1
);
1332 for (; vp
< evp
; vp
++) {
1337 ASSERT(svd1
->vpage
!= NULL
|| svd1
->pageswap
== 0);
1338 size
= seg2
->s_size
;
1340 seg1
->s_size
+= size
;
1341 svd1
->swresv
+= swresv
;
1342 if (svd1
->pageprot
&& (a
->prot
& PROT_WRITE
) &&
1343 svd1
->type
== MAP_SHARED
&& svd1
->vp
!= NULL
&&
1344 (svd1
->vp
->v_flag
& VVMEXEC
)) {
1345 ASSERT(vn_is_mapped(svd1
->vp
, V_WRITE
));
1346 segvn_inval_trcache(svd1
->vp
);
1352 * Extend the next segment (seg2) to include the
1353 * new segment (seg1 + a), if possible.
1354 * Return 0 on success.
1360 struct segvn_crargs
*a
,
1363 struct segvn_data
*svd2
= (struct segvn_data
*)seg2
->s_data
;
1365 struct anon_map
*amp2
;
1366 struct vpage
*new_vpage
;
1369 * We don't need any segment level locks for "segvn" data
1370 * since the address space is "write" locked.
1372 ASSERT(seg2
->s_as
&& AS_WRITE_HELD(seg2
->s_as
));
1374 if (HAT_IS_REGION_COOKIE_VALID(svd2
->rcookie
)) {
1378 /* first segment is new, try to extend second */
1379 /* XXX - should also check cred */
1380 if (svd2
->vp
!= a
->vp
|| svd2
->maxprot
!= a
->maxprot
||
1381 (!svd2
->pageprot
&& (svd2
->prot
!= a
->prot
)) ||
1382 svd2
->type
!= a
->type
|| svd2
->flags
!= a
->flags
||
1383 seg2
->s_szc
!= a
->szc
|| svd2
->softlockcnt_sbase
> 0)
1385 /* vp == NULL implies zfod, offset doesn't matter */
1386 if (svd2
->vp
!= NULL
&&
1387 (a
->offset
& PAGEMASK
) + seg1
->s_size
!= svd2
->offset
)
1390 if (svd2
->tr_state
!= SEGVN_TR_OFF
) {
1399 * Segment has private pages, can data structures
1402 * Acquire the anon_map lock to prevent it from changing,
1403 * if it is shared. This ensures that the anon_map
1404 * will not change while a thread which has a read/write
1405 * lock on an address space references it.
1407 * XXX - Don't need the anon_map lock at all if "refcnt"
1410 if (svd2
->type
== MAP_SHARED
)
1413 ANON_LOCK_ENTER(&2
->a_rwlock
, RW_WRITER
);
1414 if (amp2
->refcnt
> 1) {
1415 ANON_LOCK_EXIT(&2
->a_rwlock
);
1418 newpgs
= anon_grow(amp2
->ahp
, &svd2
->anon_index
,
1419 btop(seg2
->s_size
), btop(seg1
->s_size
),
1420 ANON_NOSLEEP
| ANON_GROWDOWN
);
1423 ANON_LOCK_EXIT(&2
->a_rwlock
);
1426 amp2
->size
= ptob(newpgs
);
1427 ANON_LOCK_EXIT(&2
->a_rwlock
);
1429 if (svd2
->vpage
!= NULL
) {
1430 struct vpage
*vp
, *evp
;
1432 kmem_zalloc(vpgtob(seg_pages(seg1
) + seg_pages(seg2
)),
1434 if (new_vpage
== NULL
) {
1435 /* Not merging segments so adjust anon_index back */
1437 svd2
->anon_index
+= seg_pages(seg1
);
1440 bcopy(svd2
->vpage
, new_vpage
+ seg_pages(seg1
),
1441 vpgtob(seg_pages(seg2
)));
1442 kmem_free(svd2
->vpage
, vpgtob(seg_pages(seg2
)));
1443 svd2
->vpage
= new_vpage
;
1446 evp
= vp
+ seg_pages(seg1
);
1447 for (; vp
< evp
; vp
++)
1448 VPP_SETPROT(vp
, a
->prot
);
1449 if (svd2
->pageswap
&& swresv
) {
1450 ASSERT(!(svd2
->flags
& MAP_NORESERVE
));
1451 ASSERT(swresv
== seg1
->s_size
);
1453 for (; vp
< evp
; vp
++) {
1458 ASSERT(svd2
->vpage
!= NULL
|| svd2
->pageswap
== 0);
1459 size
= seg1
->s_size
;
1461 seg2
->s_size
+= size
;
1462 seg2
->s_base
-= size
;
1463 svd2
->offset
-= size
;
1464 svd2
->swresv
+= swresv
;
1465 if (svd2
->pageprot
&& (a
->prot
& PROT_WRITE
) &&
1466 svd2
->type
== MAP_SHARED
&& svd2
->vp
!= NULL
&&
1467 (svd2
->vp
->v_flag
& VVMEXEC
)) {
1468 ASSERT(vn_is_mapped(svd2
->vp
, V_WRITE
));
1469 segvn_inval_trcache(svd2
->vp
);
1475 * Duplicate all the pages in the segment. This may break COW sharing for a
1476 * given page. If the page is marked with inherit zero set, then instead of
1477 * duplicating the page, we zero the page.
1480 segvn_dup_pages(struct seg
*seg
, struct seg
*newseg
)
1485 struct anon
*ap
, *newap
;
1489 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1490 struct segvn_data
*newsvd
= (struct segvn_data
*)newseg
->s_data
;
1491 ulong_t old_idx
= svd
->anon_index
;
1492 ulong_t new_idx
= 0;
1494 i
= btopr(seg
->s_size
);
1498 * XXX break cow sharing using PAGESIZE
1499 * pages. They will be relocated into larger
1500 * pages at fault time.
1503 if ((ap
= anon_get_ptr(svd
->amp
->ahp
, old_idx
)) != NULL
) {
1506 vpp
= &svd
->vpage
[seg_page(seg
, addr
)];
1509 * prot need not be computed below 'cause anon_private
1510 * is going to ignore it anyway as child doesn't inherit
1511 * pagelock from parent.
1513 prot
= svd
->pageprot
? VPP_PROT(vpp
) : svd
->prot
;
1516 * Check whether we should zero this or dup it.
1518 if (svd
->svn_inz
== SEGVN_INZ_ALL
||
1519 (svd
->svn_inz
== SEGVN_INZ_VPP
&&
1520 VPP_ISINHZERO(vpp
))) {
1521 pp
= anon_zero(newseg
, addr
, &newap
,
1524 page_t
*anon_pl
[1+1];
1526 error
= anon_getpage(&ap
, &vpprot
, anon_pl
,
1527 PAGESIZE
, seg
, addr
, S_READ
, svd
->cred
);
1531 pp
= anon_private(&newap
, newseg
, addr
, prot
,
1532 anon_pl
[0], 0, newsvd
->cred
);
1537 (void) anon_set_ptr(newsvd
->amp
->ahp
, new_idx
, newap
,
1550 segvn_dup(struct seg
*seg
, struct seg
*newseg
)
1552 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1553 struct segvn_data
*newsvd
;
1554 pgcnt_t npages
= seg_pages(seg
);
1557 struct anon_map
*amp
;
1559 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1560 ASSERT(newseg
->s_as
->a_proc
->p_parent
== curproc
);
1563 * If segment has anon reserved, reserve more for the new seg.
1564 * For a MAP_NORESERVE segment swresv will be a count of all the
1565 * allocated anon slots; thus we reserve for the child as many slots
1566 * as the parent has allocated. This semantic prevents the child or
1567 * parent from dieing during a copy-on-write fault caused by trying
1568 * to write a shared pre-existing anon page.
1570 if ((len
= svd
->swresv
) != 0) {
1571 if (anon_resv(svd
->swresv
) == 0)
1575 newsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
1577 newseg
->s_ops
= &segvn_ops
;
1578 newseg
->s_data
= (void *)newsvd
;
1579 newseg
->s_szc
= seg
->s_szc
;
1581 newsvd
->seg
= newseg
;
1582 if ((newsvd
->vp
= svd
->vp
) != NULL
) {
1584 if (svd
->type
== MAP_SHARED
)
1585 lgrp_shm_policy_init(NULL
, svd
->vp
);
1587 newsvd
->offset
= svd
->offset
;
1588 newsvd
->prot
= svd
->prot
;
1589 newsvd
->maxprot
= svd
->maxprot
;
1590 newsvd
->pageprot
= svd
->pageprot
;
1591 newsvd
->type
= svd
->type
;
1592 newsvd
->cred
= svd
->cred
;
1593 crhold(newsvd
->cred
);
1594 newsvd
->advice
= svd
->advice
;
1595 newsvd
->pageadvice
= svd
->pageadvice
;
1596 newsvd
->svn_inz
= svd
->svn_inz
;
1597 newsvd
->swresv
= svd
->swresv
;
1598 newsvd
->pageswap
= svd
->pageswap
;
1599 newsvd
->flags
= svd
->flags
;
1600 newsvd
->softlockcnt
= 0;
1601 newsvd
->softlockcnt_sbase
= 0;
1602 newsvd
->softlockcnt_send
= 0;
1603 newsvd
->policy_info
= svd
->policy_info
;
1604 newsvd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1606 if ((amp
= svd
->amp
) == NULL
|| svd
->tr_state
== SEGVN_TR_ON
) {
1608 * Not attaching to a shared anon object.
1610 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
) ||
1611 svd
->tr_state
== SEGVN_TR_OFF
);
1612 if (svd
->tr_state
== SEGVN_TR_ON
) {
1613 ASSERT(newsvd
->vp
!= NULL
&& amp
!= NULL
);
1614 newsvd
->tr_state
= SEGVN_TR_INIT
;
1616 newsvd
->tr_state
= svd
->tr_state
;
1619 newsvd
->anon_index
= 0;
1621 /* regions for now are only used on pure vnode segments */
1622 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
1623 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1624 newsvd
->tr_state
= SEGVN_TR_OFF
;
1625 if (svd
->type
== MAP_SHARED
) {
1626 ASSERT(svd
->svn_inz
== SEGVN_INZ_NONE
);
1628 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
1630 ANON_LOCK_EXIT(&
->a_rwlock
);
1631 newsvd
->anon_index
= svd
->anon_index
;
1636 * Allocate and initialize new anon_map structure.
1638 newsvd
->amp
= anonmap_alloc(newseg
->s_size
, 0,
1640 newsvd
->amp
->a_szc
= newseg
->s_szc
;
1641 newsvd
->anon_index
= 0;
1642 ASSERT(svd
->svn_inz
== SEGVN_INZ_NONE
||
1643 svd
->svn_inz
== SEGVN_INZ_ALL
||
1644 svd
->svn_inz
== SEGVN_INZ_VPP
);
1647 * We don't have to acquire the anon_map lock
1648 * for the new segment (since it belongs to an
1649 * address space that is still not associated
1650 * with any process), or the segment in the old
1651 * address space (since all threads in it
1652 * are stopped while duplicating the address space).
1656 * The goal of the following code is to make sure that
1657 * softlocked pages do not end up as copy on write
1658 * pages. This would cause problems where one
1659 * thread writes to a page that is COW and a different
1660 * thread in the same process has softlocked it. The
1661 * softlock lock would move away from this process
1662 * because the write would cause this process to get
1663 * a copy (without the softlock).
1665 * The strategy here is to just break the
1666 * sharing on pages that could possibly be
1669 * In addition, if any pages have been marked that they
1670 * should be inherited as zero, then we immediately go
1671 * ahead and break COW and zero them. In the case of a
1672 * softlocked page that should be inherited zero, we
1673 * break COW and just get a zero page.
1676 if (svd
->softlockcnt
||
1677 svd
->svn_inz
!= SEGVN_INZ_NONE
) {
1679 * The softlock count might be non zero
1680 * because some pages are still stuck in the
1681 * cache for lazy reclaim. Flush the cache
1682 * now. This should drop the count to zero.
1683 * [or there is really I/O going on to these
1684 * pages]. Note, we have the writers lock so
1685 * nothing gets inserted during the flush.
1687 if (svd
->softlockcnt
&& reclaim
== 1) {
1693 error
= segvn_dup_pages(seg
, newseg
);
1695 newsvd
->vpage
= NULL
;
1698 } else { /* common case */
1699 if (seg
->s_szc
!= 0) {
1701 * If at least one of anon slots of a
1702 * large page exists then make sure
1703 * all anon slots of a large page
1704 * exist to avoid partial cow sharing
1705 * of a large page in the future.
1707 anon_dup_fill_holes(amp
->ahp
,
1708 svd
->anon_index
, newsvd
->amp
->ahp
,
1709 0, seg
->s_size
, seg
->s_szc
,
1712 anon_dup(amp
->ahp
, svd
->anon_index
,
1713 newsvd
->amp
->ahp
, 0, seg
->s_size
);
1716 hat_clrattr(seg
->s_as
->a_hat
, seg
->s_base
,
1717 seg
->s_size
, PROT_WRITE
);
1722 * If necessary, create a vpage structure for the new segment.
1723 * Do not copy any page lock indications.
1725 if (svd
->vpage
!= NULL
) {
1727 struct vpage
*ovp
= svd
->vpage
;
1730 nvp
= newsvd
->vpage
=
1731 kmem_alloc(vpgtob(npages
), KM_SLEEP
);
1732 for (i
= 0; i
< npages
; i
++) {
1734 VPP_CLRPPLOCK(nvp
++);
1737 newsvd
->vpage
= NULL
;
1739 /* Inform the vnode of the new mapping */
1740 if (newsvd
->vp
!= NULL
) {
1741 error
= fop_addmap(newsvd
->vp
, (offset_t
)newsvd
->offset
,
1742 newseg
->s_as
, newseg
->s_base
, newseg
->s_size
, newsvd
->prot
,
1743 newsvd
->maxprot
, newsvd
->type
, newsvd
->cred
, NULL
);
1746 if (error
== 0 && HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1747 ASSERT(newsvd
->amp
== NULL
);
1748 ASSERT(newsvd
->tr_state
== SEGVN_TR_OFF
);
1749 newsvd
->rcookie
= svd
->rcookie
;
1750 hat_dup_region(newseg
->s_as
->a_hat
, newsvd
->rcookie
);
1757 * callback function to invoke free_vp_pages() for only those pages actually
1758 * processed by the HAT when a shared region is destroyed.
1760 extern int free_pages
;
1763 segvn_hat_rgn_unload_callback(caddr_t saddr
, caddr_t eaddr
, caddr_t r_saddr
,
1764 size_t r_size
, void *r_obj
, uoff_t r_objoff
)
1768 vnode_t
*vp
= (vnode_t
*)r_obj
;
1770 ASSERT(eaddr
> saddr
);
1771 ASSERT(saddr
>= r_saddr
);
1772 ASSERT(saddr
< r_saddr
+ r_size
);
1773 ASSERT(eaddr
> r_saddr
);
1774 ASSERT(eaddr
<= r_saddr
+ r_size
);
1781 len
= eaddr
- saddr
;
1782 off
= (saddr
- r_saddr
) + r_objoff
;
1783 free_vp_pages(&vp
->v_object
, off
, len
);
1787 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1788 * those pages actually processed by the HAT
1791 segvn_hat_unload_callback(hat_callback_t
*cb
)
1793 struct seg
*seg
= cb
->hcb_data
;
1794 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1798 ASSERT(svd
->vp
!= NULL
);
1799 ASSERT(cb
->hcb_end_addr
> cb
->hcb_start_addr
);
1800 ASSERT(cb
->hcb_start_addr
>= seg
->s_base
);
1802 len
= cb
->hcb_end_addr
- cb
->hcb_start_addr
;
1803 off
= cb
->hcb_start_addr
- seg
->s_base
;
1804 free_vp_pages(&svd
->vp
->v_object
, svd
->offset
+ off
, len
);
1808 * This function determines the number of bytes of swap reserved by
1809 * a segment for which per-page accounting is present. It is used to
1810 * calculate the correct value of a segvn_data's swresv.
1813 segvn_count_swap_by_vpages(struct seg
*seg
)
1815 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1816 struct vpage
*vp
, *evp
;
1817 size_t nswappages
= 0;
1819 ASSERT(svd
->pageswap
);
1820 ASSERT(svd
->vpage
!= NULL
);
1822 evp
= &svd
->vpage
[seg_page(seg
, seg
->s_base
+ seg
->s_size
)];
1824 for (vp
= svd
->vpage
; vp
< evp
; vp
++) {
1825 if (VPP_ISSWAPRES(vp
))
1829 return (nswappages
<< PAGESHIFT
);
1833 segvn_unmap(struct seg
*seg
, caddr_t addr
, size_t len
)
1835 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
1836 struct segvn_data
*nsvd
;
1838 struct anon_map
*amp
;
1839 pgcnt_t opages
; /* old segment size in pages */
1840 pgcnt_t npages
; /* new segment size in pages */
1841 pgcnt_t dpages
; /* pages being deleted (unmapped) */
1842 hat_callback_t callback
; /* used for free_vp_pages() */
1843 hat_callback_t
*cbp
= NULL
;
1850 * We don't need any segment level locks for "segvn" data
1851 * since the address space is "write" locked.
1853 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
1856 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1857 * softlockcnt is protected from change by the as write lock.
1860 if (svd
->softlockcnt
> 0) {
1861 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1864 * If this is shared segment non 0 softlockcnt
1865 * means locked pages are still in use.
1867 if (svd
->type
== MAP_SHARED
) {
1872 * since we do have the writers lock nobody can fill
1873 * the cache during the purge. The flush either succeeds
1874 * or we still have pending I/Os.
1885 * Check for bad sizes
1887 if (addr
< seg
->s_base
|| addr
+ len
> seg
->s_base
+ seg
->s_size
||
1888 (len
& PAGEOFFSET
) || ((uintptr_t)addr
& PAGEOFFSET
)) {
1889 panic("segvn_unmap");
1893 if (seg
->s_szc
!= 0) {
1894 size_t pgsz
= page_get_pagesize(seg
->s_szc
);
1896 if (!IS_P2ALIGNED(addr
, pgsz
) || !IS_P2ALIGNED(len
, pgsz
)) {
1897 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
1898 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1899 ASSERT(svd
->amp
== NULL
);
1900 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1901 hat_leave_region(seg
->s_as
->a_hat
,
1902 svd
->rcookie
, HAT_REGION_TEXT
);
1903 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1905 * could pass a flag to segvn_demote_range()
1906 * below to tell it not to do any unloads but
1907 * this case is rare enough to not bother for
1910 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
1911 svd
->tr_state
= SEGVN_TR_OFF
;
1912 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
1913 ASSERT(svd
->amp
!= NULL
);
1914 segvn_textunrepl(seg
, 1);
1915 ASSERT(svd
->amp
== NULL
);
1916 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1918 VM_STAT_ADD(segvnvmstats
.demoterange
[0]);
1919 err
= segvn_demote_range(seg
, addr
, len
, SDR_END
, 0);
1927 /* Inform the vnode of the unmapping. */
1931 error
= fop_delmap(svd
->vp
,
1932 (offset_t
)svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
),
1933 seg
->s_as
, addr
, len
, svd
->prot
, svd
->maxprot
,
1934 svd
->type
, svd
->cred
, NULL
);
1936 if (error
== EAGAIN
)
1941 * Remove any page locks set through this mapping.
1942 * If text replication is not off no page locks could have been
1943 * established via this mapping.
1945 if (svd
->tr_state
== SEGVN_TR_OFF
) {
1946 (void) segvn_lockop(seg
, addr
, len
, 0, MC_UNLOCK
, NULL
, 0);
1949 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
1950 ASSERT(svd
->amp
== NULL
);
1951 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
1952 ASSERT(svd
->type
== MAP_PRIVATE
);
1953 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
1955 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
1956 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
1957 ASSERT(svd
->amp
!= NULL
);
1958 ASSERT(svd
->pageprot
== 0 && !(svd
->prot
& PROT_WRITE
));
1959 segvn_textunrepl(seg
, 1);
1960 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
1962 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
1963 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
1964 svd
->tr_state
= SEGVN_TR_OFF
;
1967 * Unload any hardware translations in the range to be taken
1968 * out. Use a callback to invoke free_vp_pages() effectively.
1970 if (svd
->vp
!= NULL
&& free_pages
!= 0) {
1971 callback
.hcb_data
= seg
;
1972 callback
.hcb_function
= segvn_hat_unload_callback
;
1975 hat_unload_callback(seg
->s_as
->a_hat
, addr
, len
,
1976 HAT_UNLOAD_UNMAP
, cbp
);
1978 if (svd
->type
== MAP_SHARED
&& svd
->vp
!= NULL
&&
1979 (svd
->vp
->v_flag
& VVMEXEC
) &&
1980 ((svd
->prot
& PROT_WRITE
) || svd
->pageprot
)) {
1981 segvn_inval_trcache(svd
->vp
);
1986 * Check for entire segment
1988 if (addr
== seg
->s_base
&& len
== seg
->s_size
) {
1993 opages
= seg_pages(seg
);
1995 npages
= opages
- dpages
;
1997 ASSERT(amp
== NULL
|| amp
->a_szc
>= seg
->s_szc
);
2000 * Check for beginning of segment
2002 if (addr
== seg
->s_base
) {
2003 if (svd
->vpage
!= NULL
) {
2005 struct vpage
*ovpage
;
2007 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2009 nbytes
= vpgtob(npages
);
2010 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2011 bcopy(&ovpage
[dpages
], svd
->vpage
, nbytes
);
2013 /* free up old vpage */
2014 kmem_free(ovpage
, vpgtob(opages
));
2017 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2018 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2020 * Shared anon map is no longer in use. Before
2021 * freeing its pages purge all entries from
2022 * pcache that belong to this amp.
2024 if (svd
->type
== MAP_SHARED
) {
2025 ASSERT(amp
->refcnt
== 1);
2026 ASSERT(svd
->softlockcnt
== 0);
2030 * Free up now unused parts of anon_map array.
2032 if (amp
->a_szc
== seg
->s_szc
) {
2033 if (seg
->s_szc
!= 0) {
2034 anon_free_pages(amp
->ahp
,
2035 svd
->anon_index
, len
,
2043 ASSERT(svd
->type
== MAP_SHARED
);
2044 ASSERT(amp
->a_szc
> seg
->s_szc
);
2045 anon_shmap_free_pages(amp
,
2046 svd
->anon_index
, len
);
2050 * Unreserve swap space for the
2051 * unmapped chunk of this segment in
2052 * case it's MAP_SHARED
2054 if (svd
->type
== MAP_SHARED
) {
2055 anon_unresv_zone(len
,
2056 seg
->s_as
->a_proc
->p_zone
);
2060 ANON_LOCK_EXIT(&
->a_rwlock
);
2061 svd
->anon_index
+= dpages
;
2063 if (svd
->vp
!= NULL
)
2070 if (svd
->flags
& MAP_NORESERVE
) {
2072 oswresv
= svd
->swresv
;
2074 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2075 svd
->anon_index
, npages
));
2076 anon_unresv_zone(oswresv
- svd
->swresv
,
2077 seg
->s_as
->a_proc
->p_zone
);
2078 if (SEG_IS_PARTIAL_RESV(seg
))
2079 seg
->s_as
->a_resvsize
-= oswresv
-
2084 if (svd
->pageswap
) {
2085 oswresv
= svd
->swresv
;
2087 segvn_count_swap_by_vpages(seg
);
2088 ASSERT(oswresv
>= svd
->swresv
);
2089 unlen
= oswresv
- svd
->swresv
;
2092 ASSERT(svd
->swresv
== seg
->s_size
);
2095 anon_unresv_zone(unlen
,
2096 seg
->s_as
->a_proc
->p_zone
);
2104 * Check for end of segment
2106 if (addr
+ len
== seg
->s_base
+ seg
->s_size
) {
2107 if (svd
->vpage
!= NULL
) {
2109 struct vpage
*ovpage
;
2111 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2113 nbytes
= vpgtob(npages
);
2114 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2115 bcopy(ovpage
, svd
->vpage
, nbytes
);
2117 /* free up old vpage */
2118 kmem_free(ovpage
, vpgtob(opages
));
2122 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2123 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2125 * Free up now unused parts of anon_map array.
2127 ulong_t an_idx
= svd
->anon_index
+ npages
;
2130 * Shared anon map is no longer in use. Before
2131 * freeing its pages purge all entries from
2132 * pcache that belong to this amp.
2134 if (svd
->type
== MAP_SHARED
) {
2135 ASSERT(amp
->refcnt
== 1);
2136 ASSERT(svd
->softlockcnt
== 0);
2140 if (amp
->a_szc
== seg
->s_szc
) {
2141 if (seg
->s_szc
!= 0) {
2142 anon_free_pages(amp
->ahp
,
2146 anon_free(amp
->ahp
, an_idx
,
2150 ASSERT(svd
->type
== MAP_SHARED
);
2151 ASSERT(amp
->a_szc
> seg
->s_szc
);
2152 anon_shmap_free_pages(amp
,
2157 * Unreserve swap space for the
2158 * unmapped chunk of this segment in
2159 * case it's MAP_SHARED
2161 if (svd
->type
== MAP_SHARED
) {
2162 anon_unresv_zone(len
,
2163 seg
->s_as
->a_proc
->p_zone
);
2167 ANON_LOCK_EXIT(&
->a_rwlock
);
2173 if (svd
->flags
& MAP_NORESERVE
) {
2175 oswresv
= svd
->swresv
;
2176 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2177 svd
->anon_index
, npages
));
2178 anon_unresv_zone(oswresv
- svd
->swresv
,
2179 seg
->s_as
->a_proc
->p_zone
);
2180 if (SEG_IS_PARTIAL_RESV(seg
))
2181 seg
->s_as
->a_resvsize
-= oswresv
-
2186 if (svd
->pageswap
) {
2187 oswresv
= svd
->swresv
;
2189 segvn_count_swap_by_vpages(seg
);
2190 ASSERT(oswresv
>= svd
->swresv
);
2191 unlen
= oswresv
- svd
->swresv
;
2194 ASSERT(svd
->swresv
== seg
->s_size
);
2197 anon_unresv_zone(unlen
,
2198 seg
->s_as
->a_proc
->p_zone
);
2206 * The section to go is in the middle of the segment,
2207 * have to make it into two segments. nseg is made for
2208 * the high end while seg is cut down at the low end.
2210 nbase
= addr
+ len
; /* new seg base */
2211 nsize
= (seg
->s_base
+ seg
->s_size
) - nbase
; /* new seg size */
2212 seg
->s_size
= addr
- seg
->s_base
; /* shrink old seg */
2213 nseg
= seg_alloc(seg
->s_as
, nbase
, nsize
);
2215 panic("segvn_unmap seg_alloc");
2218 nseg
->s_ops
= seg
->s_ops
;
2219 nsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
2220 nseg
->s_data
= (void *)nsvd
;
2221 nseg
->s_szc
= seg
->s_szc
;
2224 nsvd
->offset
= svd
->offset
+ (uintptr_t)(nseg
->s_base
- seg
->s_base
);
2226 nsvd
->softlockcnt
= 0;
2227 nsvd
->softlockcnt_sbase
= 0;
2228 nsvd
->softlockcnt_send
= 0;
2229 nsvd
->svn_inz
= svd
->svn_inz
;
2230 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2232 if (svd
->vp
!= NULL
) {
2234 if (nsvd
->type
== MAP_SHARED
)
2235 lgrp_shm_policy_init(NULL
, nsvd
->vp
);
2239 if (svd
->vpage
== NULL
) {
2242 /* need to split vpage into two arrays */
2244 struct vpage
*ovpage
;
2246 ovpage
= svd
->vpage
; /* keep pointer to vpage */
2248 npages
= seg_pages(seg
); /* seg has shrunk */
2249 nbytes
= vpgtob(npages
);
2250 svd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2252 bcopy(ovpage
, svd
->vpage
, nbytes
);
2254 npages
= seg_pages(nseg
);
2255 nbytes
= vpgtob(npages
);
2256 nsvd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
2258 bcopy(&ovpage
[opages
- npages
], nsvd
->vpage
, nbytes
);
2260 /* free up old vpage */
2261 kmem_free(ovpage
, vpgtob(opages
));
2266 nsvd
->anon_index
= 0;
2269 * Need to create a new anon map for the new segment.
2270 * We'll also allocate a new smaller array for the old
2271 * smaller segment to save space.
2273 opages
= btop((uintptr_t)(addr
- seg
->s_base
));
2274 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2275 if (amp
->refcnt
== 1 || svd
->type
== MAP_PRIVATE
) {
2277 * Free up now unused parts of anon_map array.
2279 ulong_t an_idx
= svd
->anon_index
+ opages
;
2282 * Shared anon map is no longer in use. Before
2283 * freeing its pages purge all entries from
2284 * pcache that belong to this amp.
2286 if (svd
->type
== MAP_SHARED
) {
2287 ASSERT(amp
->refcnt
== 1);
2288 ASSERT(svd
->softlockcnt
== 0);
2292 if (amp
->a_szc
== seg
->s_szc
) {
2293 if (seg
->s_szc
!= 0) {
2294 anon_free_pages(amp
->ahp
, an_idx
, len
,
2297 anon_free(amp
->ahp
, an_idx
,
2301 ASSERT(svd
->type
== MAP_SHARED
);
2302 ASSERT(amp
->a_szc
> seg
->s_szc
);
2303 anon_shmap_free_pages(amp
, an_idx
, len
);
2307 * Unreserve swap space for the
2308 * unmapped chunk of this segment in
2309 * case it's MAP_SHARED
2311 if (svd
->type
== MAP_SHARED
) {
2312 anon_unresv_zone(len
,
2313 seg
->s_as
->a_proc
->p_zone
);
2317 nsvd
->anon_index
= svd
->anon_index
+
2318 btop((uintptr_t)(nseg
->s_base
- seg
->s_base
));
2319 if (svd
->type
== MAP_SHARED
) {
2323 struct anon_map
*namp
;
2324 struct anon_hdr
*nahp
;
2326 ASSERT(svd
->type
== MAP_PRIVATE
);
2327 nahp
= anon_create(btop(seg
->s_size
), ANON_SLEEP
);
2328 namp
= anonmap_alloc(nseg
->s_size
, 0, ANON_SLEEP
);
2329 namp
->a_szc
= seg
->s_szc
;
2330 (void) anon_copy_ptr(amp
->ahp
, svd
->anon_index
, nahp
,
2331 0, btop(seg
->s_size
), ANON_SLEEP
);
2332 (void) anon_copy_ptr(amp
->ahp
, nsvd
->anon_index
,
2333 namp
->ahp
, 0, btop(nseg
->s_size
), ANON_SLEEP
);
2334 anon_release(amp
->ahp
, btop(amp
->size
));
2335 svd
->anon_index
= 0;
2336 nsvd
->anon_index
= 0;
2338 amp
->size
= seg
->s_size
;
2341 ANON_LOCK_EXIT(&
->a_rwlock
);
2344 if (svd
->flags
& MAP_NORESERVE
) {
2346 oswresv
= svd
->swresv
;
2347 svd
->swresv
= ptob(anon_pages(amp
->ahp
,
2348 svd
->anon_index
, btop(seg
->s_size
)));
2349 nsvd
->swresv
= ptob(anon_pages(nsvd
->amp
->ahp
,
2350 nsvd
->anon_index
, btop(nseg
->s_size
)));
2351 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
2352 anon_unresv_zone(oswresv
- (svd
->swresv
+ nsvd
->swresv
),
2353 seg
->s_as
->a_proc
->p_zone
);
2354 if (SEG_IS_PARTIAL_RESV(seg
))
2355 seg
->s_as
->a_resvsize
-= oswresv
-
2356 (svd
->swresv
+ nsvd
->swresv
);
2360 if (svd
->pageswap
) {
2361 oswresv
= svd
->swresv
;
2362 svd
->swresv
= segvn_count_swap_by_vpages(seg
);
2363 nsvd
->swresv
= segvn_count_swap_by_vpages(nseg
);
2364 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
2365 unlen
= oswresv
- (svd
->swresv
+ nsvd
->swresv
);
2367 if (seg
->s_size
+ nseg
->s_size
+ len
!=
2369 panic("segvn_unmap: cannot split "
2370 "swap reservation");
2373 svd
->swresv
= seg
->s_size
;
2374 nsvd
->swresv
= nseg
->s_size
;
2377 anon_unresv_zone(unlen
,
2378 seg
->s_as
->a_proc
->p_zone
);
2382 return (0); /* I'm glad that's all over with! */
2386 segvn_free(struct seg
*seg
)
2388 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2389 pgcnt_t npages
= seg_pages(seg
);
2390 struct anon_map
*amp
;
2394 * We don't need any segment level locks for "segvn" data
2395 * since the address space is "write" locked.
2397 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
2398 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2400 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2403 * Be sure to unlock pages. XXX Why do things get free'ed instead
2406 (void) segvn_lockop(seg
, seg
->s_base
, seg
->s_size
,
2407 0, MC_UNLOCK
, NULL
, 0);
2410 * Deallocate the vpage and anon pointers if necessary and possible.
2412 if (svd
->vpage
!= NULL
) {
2413 kmem_free(svd
->vpage
, vpgtob(npages
));
2416 if ((amp
= svd
->amp
) != NULL
) {
2418 * If there are no more references to this anon_map
2419 * structure, then deallocate the structure after freeing
2420 * up all the anon slot pointers that we can.
2422 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
2423 ASSERT(amp
->a_szc
>= seg
->s_szc
);
2424 if (--amp
->refcnt
== 0) {
2425 if (svd
->type
== MAP_PRIVATE
) {
2427 * Private - we only need to anon_free
2428 * the part that this segment refers to.
2430 if (seg
->s_szc
!= 0) {
2431 anon_free_pages(amp
->ahp
,
2432 svd
->anon_index
, seg
->s_size
,
2435 anon_free(amp
->ahp
, svd
->anon_index
,
2441 * Shared anon map is no longer in use. Before
2442 * freeing its pages purge all entries from
2443 * pcache that belong to this amp.
2445 ASSERT(svd
->softlockcnt
== 0);
2449 * Shared - anon_free the entire
2450 * anon_map's worth of stuff and
2451 * release any swap reservation.
2453 if (amp
->a_szc
!= 0) {
2454 anon_shmap_free_pages(amp
, 0,
2457 anon_free(amp
->ahp
, 0, amp
->size
);
2459 if ((len
= amp
->swresv
) != 0) {
2460 anon_unresv_zone(len
,
2461 seg
->s_as
->a_proc
->p_zone
);
2465 ANON_LOCK_EXIT(&
->a_rwlock
);
2467 } else if (svd
->type
== MAP_PRIVATE
) {
2469 * We had a private mapping which still has
2470 * a held anon_map so just free up all the
2471 * anon slot pointers that we were using.
2473 if (seg
->s_szc
!= 0) {
2474 anon_free_pages(amp
->ahp
, svd
->anon_index
,
2475 seg
->s_size
, seg
->s_szc
);
2477 anon_free(amp
->ahp
, svd
->anon_index
,
2480 ANON_LOCK_EXIT(&
->a_rwlock
);
2482 ANON_LOCK_EXIT(&
->a_rwlock
);
2487 * Release swap reservation.
2489 if ((len
= svd
->swresv
) != 0) {
2490 anon_unresv_zone(svd
->swresv
,
2491 seg
->s_as
->a_proc
->p_zone
);
2492 if (SEG_IS_PARTIAL_RESV(seg
))
2493 seg
->s_as
->a_resvsize
-= svd
->swresv
;
2497 * Release claim on vnode, credentials, and finally free the
2500 if (svd
->vp
!= NULL
) {
2501 if (svd
->type
== MAP_SHARED
)
2502 lgrp_shm_policy_fini(NULL
, svd
->vp
);
2508 svd
->pageadvice
= 0;
2513 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2514 * still working with this segment without holding as lock (in case
2515 * it's called by pcache async thread).
2517 ASSERT(svd
->softlockcnt
== 0);
2518 mutex_enter(&svd
->segfree_syncmtx
);
2519 mutex_exit(&svd
->segfree_syncmtx
);
2522 kmem_cache_free(segvn_cache
, svd
);
2526 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2527 * already been F_SOFTLOCK'ed.
2528 * Caller must always match addr and len of a softunlock with a previous
2529 * softlock with exactly the same addr and len.
2532 segvn_softunlock(struct seg
*seg
, caddr_t addr
, size_t len
, enum seg_rw rw
)
2534 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2540 struct anon_map
*amp
;
2541 struct anon
*ap
= NULL
;
2543 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
2544 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
2546 if ((amp
= svd
->amp
) != NULL
)
2547 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
2549 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
2550 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2551 hat_unlock_region(seg
->s_as
->a_hat
, addr
, len
, svd
->rcookie
);
2553 hat_unlock(seg
->s_as
->a_hat
, addr
, len
);
2555 for (adr
= addr
; adr
< addr
+ len
; adr
+= PAGESIZE
) {
2557 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
2558 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
++))
2560 swap_xlate(ap
, &vp
, &offset
);
2563 offset
= svd
->offset
+
2564 (uintptr_t)(adr
- seg
->s_base
);
2566 ANON_LOCK_EXIT(&
->a_rwlock
);
2569 offset
= svd
->offset
+
2570 (uintptr_t)(adr
- seg
->s_base
);
2574 * Use page_find() instead of page_lookup() to
2575 * find the page since we know that it is locked.
2577 pp
= page_find(&vp
->v_object
, offset
);
2580 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2581 (void *)adr
, (void *)ap
, (void *)vp
, offset
);
2585 if (rw
== S_WRITE
) {
2587 if (seg
->s_as
->a_vbits
)
2588 hat_setstat(seg
->s_as
, adr
, PAGESIZE
,
2590 } else if (rw
!= S_OTHER
) {
2592 if (seg
->s_as
->a_vbits
)
2593 hat_setstat(seg
->s_as
, adr
, PAGESIZE
, P_REF
);
2597 ASSERT(svd
->softlockcnt
>= btop(len
));
2598 if (!atomic_add_long_nv((ulong_t
*)&svd
->softlockcnt
, -btop(len
))) {
2600 * All SOFTLOCKS are gone. Wakeup any waiting
2601 * unmappers so they can try again to unmap.
2602 * Check for waiters first without the mutex
2603 * held so we don't always grab the mutex on
2606 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2607 mutex_enter(&seg
->s_as
->a_contents
);
2608 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
2609 AS_CLRUNMAPWAIT(seg
->s_as
);
2610 cv_broadcast(&seg
->s_as
->a_cv
);
2612 mutex_exit(&seg
->s_as
->a_contents
);
2617 #define PAGE_HANDLED ((page_t *)-1)
2620 * Release all the pages in the NULL terminated ppp list
2621 * which haven't already been converted to PAGE_HANDLED.
2624 segvn_pagelist_rele(page_t
**ppp
)
2626 for (; *ppp
!= NULL
; ppp
++) {
2627 if (*ppp
!= PAGE_HANDLED
)
2632 static int stealcow
= 1;
2635 * Workaround for viking chip bug. See bug id 1220902.
2636 * To fix this down in pagefault() would require importing so
2637 * much as and segvn code as to be unmaintainable.
2639 int enable_mbit_wa
= 0;
2642 * Handles all the dirty work of getting the right
2643 * anonymous pages and loading up the translations.
2644 * This routine is called only from segvn_fault()
2645 * when looping over the range of addresses requested.
2647 * The basic algorithm here is:
2648 * If this is an anon_zero case
2649 * Call anon_zero to allocate page
2650 * Load up translation
2653 * If this is an anon page
2654 * Use anon_getpage to get the page
2656 * Find page in pl[] list passed in
2659 * Load up the translation to the page
2662 * Call anon_private to handle cow
2663 * Load up (writable) translation to new page
2667 struct hat
*hat
, /* the hat to use for mapping */
2668 struct seg
*seg
, /* seg_vn of interest */
2669 caddr_t addr
, /* address in as */
2670 uoff_t off
, /* offset in vp */
2671 struct vpage
*vpage
, /* pointer to vpage for vp, off */
2672 page_t
*pl
[], /* object source page pointer */
2673 uint_t vpprot
, /* access allowed to object pages */
2674 enum fault_type type
, /* type of fault */
2675 enum seg_rw rw
, /* type of access at fault */
2676 int brkcow
) /* we may need to break cow */
2678 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
2680 uint_t pageflags
= 0;
2681 page_t
*anon_pl
[1 + 1];
2682 page_t
*opp
= NULL
; /* original page */
2689 struct anon
*ap
, *oldap
;
2690 struct anon_map
*amp
;
2691 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
2693 anon_sync_obj_t cookie
;
2695 if (svd
->flags
& MAP_TEXT
) {
2696 hat_flag
|= HAT_LOAD_TEXT
;
2699 ASSERT(SEGVN_READ_HELD(seg
->s_as
, &svd
->lock
));
2700 ASSERT(seg
->s_szc
== 0);
2701 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
2704 * Initialize protection value for this page.
2705 * If we have per page protection values check it now.
2707 if (svd
->pageprot
) {
2712 protchk
= PROT_READ
;
2715 protchk
= PROT_WRITE
;
2718 protchk
= PROT_EXEC
;
2722 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
2726 prot
= VPP_PROT(vpage
);
2727 if ((prot
& protchk
) == 0)
2728 return (FC_PROT
); /* illegal access type */
2733 if (type
== F_SOFTLOCK
) {
2734 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt
);
2738 * Always acquire the anon array lock to prevent 2 threads from
2739 * allocating separate anon slots for the same "addr".
2742 if ((amp
= svd
->amp
) != NULL
) {
2743 ASSERT(RW_READ_HELD(&
->a_rwlock
));
2744 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
2745 anon_array_enter(amp
, anon_index
, &cookie
);
2749 if (svd
->vp
== NULL
&& amp
!= NULL
) {
2750 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) == NULL
) {
2752 * Allocate a (normally) writable anonymous page of
2753 * zeroes. If no advance reservations, reserve now.
2755 if (svd
->flags
& MAP_NORESERVE
) {
2756 if (anon_resv_zone(ptob(1),
2757 seg
->s_as
->a_proc
->p_zone
)) {
2758 atomic_add_long(&svd
->swresv
, ptob(1));
2759 atomic_add_long(&seg
->s_as
->a_resvsize
,
2766 if ((pp
= anon_zero(seg
, addr
, &ap
,
2767 svd
->cred
)) == NULL
) {
2769 goto out
; /* out of swap space */
2772 * Re-acquire the anon_map lock and
2773 * initialize the anon array entry.
2775 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
,
2778 ASSERT(pp
->p_szc
== 0);
2781 * Handle pages that have been marked for migration
2783 if (lgrp_optimizations())
2784 page_migrate(seg
, addr
, &pp
, 1);
2786 if (enable_mbit_wa
) {
2789 else if (!hat_ismod(pp
))
2790 prot
&= ~PROT_WRITE
;
2793 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2794 * with MC_LOCKAS, MCL_FUTURE) and this is a
2795 * MAP_NORESERVE segment, we may need to
2796 * permanently lock the page as it is being faulted
2797 * for the first time. The following text applies
2798 * only to MAP_NORESERVE segments:
2800 * As per memcntl(2), if this segment was created
2801 * after MCL_FUTURE was applied (a "future"
2802 * segment), its pages must be locked. If this
2803 * segment existed at MCL_FUTURE application (a
2804 * "past" segment), the interface is unclear.
2806 * We decide to lock only if vpage is present:
2808 * - "future" segments will have a vpage array (see
2809 * as_map), and so will be locked as required
2811 * - "past" segments may not have a vpage array,
2812 * depending on whether events (such as
2813 * mprotect) have occurred. Locking if vpage
2814 * exists will preserve legacy behavior. Not
2815 * locking if vpage is absent, will not break
2816 * the interface or legacy behavior. Note that
2817 * allocating vpage here if it's absent requires
2818 * upgrading the segvn reader lock, the cost of
2819 * which does not seem worthwhile.
2821 * Usually testing and setting VPP_ISPPLOCK and
2822 * VPP_SETPPLOCK requires holding the segvn lock as
2823 * writer, but in this case all readers are
2824 * serializing on the anon array lock.
2826 if (AS_ISPGLCK(seg
->s_as
) && vpage
!= NULL
&&
2827 (svd
->flags
& MAP_NORESERVE
) &&
2828 !VPP_ISPPLOCK(vpage
)) {
2829 proc_t
*p
= seg
->s_as
->a_proc
;
2830 ASSERT(svd
->type
== MAP_PRIVATE
);
2831 mutex_enter(&p
->p_lock
);
2832 if (rctl_incr_locked_mem(p
, NULL
, PAGESIZE
,
2834 claim
= VPP_PROT(vpage
) & PROT_WRITE
;
2835 if (page_pp_lock(pp
, claim
, 0)) {
2836 VPP_SETPPLOCK(vpage
);
2838 rctl_decr_locked_mem(p
, NULL
,
2842 mutex_exit(&p
->p_lock
);
2845 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2846 hat_memload(hat
, addr
, pp
, prot
, hat_flag
);
2848 if (!(hat_flag
& HAT_LOAD_LOCK
))
2851 anon_array_exit(&cookie
);
2857 * Obtain the page structure via anon_getpage() if it is
2858 * a private copy of an object (the result of a previous
2862 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) != NULL
) {
2863 err
= anon_getpage(&ap
, &vpprot
, anon_pl
, PAGESIZE
,
2864 seg
, addr
, rw
, svd
->cred
);
2868 if (svd
->type
== MAP_SHARED
) {
2870 * If this is a shared mapping to an
2871 * anon_map, then ignore the write
2872 * permissions returned by anon_getpage().
2873 * They apply to the private mappings
2876 vpprot
|= PROT_WRITE
;
2883 * Search the pl[] list passed in if it is from the
2884 * original object (i.e., not a private copy).
2888 * Find original page. We must be bringing it in
2889 * from the list in pl[].
2891 for (ppp
= pl
; (opp
= *ppp
) != NULL
; ppp
++) {
2892 if (opp
== PAGE_HANDLED
)
2894 VERIFY(opp
->p_object
== &svd
->vp
->v_object
); /* XXX */
2895 ASSERT(opp
->p_vnode
== svd
->vp
); /* XXX */
2896 if (opp
->p_offset
== off
)
2900 panic("segvn_faultpage not found");
2903 *ppp
= PAGE_HANDLED
;
2907 ASSERT(PAGE_LOCKED(opp
));
2910 * The fault is treated as a copy-on-write fault if a
2911 * write occurs on a private segment and the object
2912 * page (i.e., mapping) is write protected. We assume
2913 * that fatal protection checks have already been made.
2917 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
2918 cow
= !(vpprot
& PROT_WRITE
);
2919 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
2921 * If we are doing text replication COW on first touch.
2923 ASSERT(amp
!= NULL
);
2924 ASSERT(svd
->vp
!= NULL
);
2925 ASSERT(rw
!= S_WRITE
);
2932 * If not a copy-on-write case load the translation
2938 * Handle pages that have been marked for migration
2940 if (lgrp_optimizations())
2941 page_migrate(seg
, addr
, &opp
, 1);
2943 if (IS_VMODSORT(opp
->p_vnode
) || enable_mbit_wa
) {
2946 else if (rw
!= S_OTHER
&& !hat_ismod(opp
))
2947 prot
&= ~PROT_WRITE
;
2950 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
||
2951 (!svd
->pageprot
&& svd
->prot
== (prot
& vpprot
)));
2952 ASSERT(amp
== NULL
||
2953 svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2954 hat_memload_region(hat
, addr
, opp
, prot
& vpprot
, hat_flag
,
2957 if (!(hat_flag
& HAT_LOAD_LOCK
))
2961 anon_array_exit(&cookie
);
2966 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
2970 ASSERT(amp
!= NULL
&& anon_lock
);
2973 * Steal the page only if it isn't a private page
2974 * since stealing a private page is not worth the effort.
2976 if ((ap
= anon_get_ptr(amp
->ahp
, anon_index
)) == NULL
)
2980 * Steal the original page if the following conditions are true:
2982 * We are low on memory, the page is not private, page is not large,
2983 * not shared, not modified, not `locked' or if we have it `locked'
2984 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
2985 * that the page is not shared) and if it doesn't have any
2986 * translations. page_struct_lock isn't needed to look at p_cowcnt
2987 * and p_lckcnt because we first get exclusive lock on page.
2989 (void) hat_pagesync(opp
, HAT_SYNC_DONTZERO
| HAT_SYNC_STOPON_MOD
);
2991 if (stealcow
&& freemem
< minfree
&& steal
&& opp
->p_szc
== 0 &&
2992 page_tryupgrade(opp
) && !hat_ismod(opp
) &&
2993 ((opp
->p_lckcnt
== 0 && opp
->p_cowcnt
== 0) ||
2994 (opp
->p_lckcnt
== 0 && opp
->p_cowcnt
== 1 &&
2995 vpage
!= NULL
&& VPP_ISPPLOCK(vpage
)))) {
2997 * Check if this page has other translations
2998 * after unloading our translation.
3000 if (hat_page_is_mapped(opp
)) {
3001 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3002 hat_unload(seg
->s_as
->a_hat
, addr
, PAGESIZE
,
3007 * hat_unload() might sync back someone else's recent
3008 * modification, so check again.
3010 if (!hat_ismod(opp
) && !hat_page_is_mapped(opp
))
3011 pageflags
|= STEAL_PAGE
;
3015 * If we have a vpage pointer, see if it indicates that we have
3016 * ``locked'' the page we map -- if so, tell anon_private to
3017 * transfer the locking resource to the new page.
3019 * See Statement at the beginning of segvn_lockop regarding
3020 * the way lockcnts/cowcnts are handled during COW.
3023 if (vpage
!= NULL
&& VPP_ISPPLOCK(vpage
))
3024 pageflags
|= LOCK_PAGE
;
3027 * Allocate a private page and perform the copy.
3028 * For MAP_NORESERVE reserve swap space now, unless this
3029 * is a cow fault on an existing anon page in which case
3030 * MAP_NORESERVE will have made advance reservations.
3032 if ((svd
->flags
& MAP_NORESERVE
) && (ap
== NULL
)) {
3033 if (anon_resv_zone(ptob(1), seg
->s_as
->a_proc
->p_zone
)) {
3034 atomic_add_long(&svd
->swresv
, ptob(1));
3035 atomic_add_long(&seg
->s_as
->a_resvsize
, ptob(1));
3043 pp
= anon_private(&ap
, seg
, addr
, prot
, opp
, pageflags
, svd
->cred
);
3045 err
= ENOMEM
; /* out of swap space */
3050 * If we copied away from an anonymous page, then
3051 * we are one step closer to freeing up an anon slot.
3053 * NOTE: The original anon slot must be released while
3054 * holding the "anon_map" lock. This is necessary to prevent
3055 * other threads from obtaining a pointer to the anon slot
3056 * which may be freed if its "refcnt" is 1.
3061 (void) anon_set_ptr(amp
->ahp
, anon_index
, ap
, ANON_SLEEP
);
3064 * Handle pages that have been marked for migration
3066 if (lgrp_optimizations())
3067 page_migrate(seg
, addr
, &pp
, 1);
3069 ASSERT(pp
->p_szc
== 0);
3071 ASSERT(!IS_VMODSORT(pp
->p_vnode
));
3072 if (enable_mbit_wa
) {
3075 else if (!hat_ismod(pp
))
3076 prot
&= ~PROT_WRITE
;
3079 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
3080 hat_memload(hat
, addr
, pp
, prot
, hat_flag
);
3082 if (!(hat_flag
& HAT_LOAD_LOCK
))
3086 anon_array_exit(&cookie
);
3090 anon_array_exit(&cookie
);
3092 if (type
== F_SOFTLOCK
) {
3093 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt
);
3095 return (FC_MAKE_ERR(err
));
3099 * relocate a bunch of smaller targ pages into one large repl page. all targ
3100 * pages must be complete pages smaller than replacement pages.
3101 * it's assumed that no page's szc can change since they are all PAGESIZE or
3102 * complete large pages locked SHARED.
3105 segvn_relocate_pages(page_t
**targ
, page_t
*replacement
)
3108 pgcnt_t repl_npgs
, curnpgs
;
3110 uint_t repl_szc
= replacement
->p_szc
;
3111 page_t
*first_repl
= replacement
;
3115 VM_STAT_ADD(segvnvmstats
.relocatepages
[0]);
3117 ASSERT(repl_szc
!= 0);
3118 npgs
= repl_npgs
= page_get_pagecnt(repl_szc
);
3124 ASSERT(replacement
!= NULL
);
3126 ASSERT(pp
->p_szc
< repl_szc
);
3127 ASSERT(PAGE_EXCL(pp
));
3128 ASSERT(!PP_ISFREE(pp
));
3129 curnpgs
= page_get_pagecnt(pp
->p_szc
);
3131 VM_STAT_ADD(segvnvmstats
.relocatepages
[1]);
3133 page_sub(&replacement
, repl
);
3134 ASSERT(PAGE_EXCL(repl
));
3135 ASSERT(!PP_ISFREE(repl
));
3136 ASSERT(repl
->p_szc
== repl_szc
);
3138 page_t
*repl_savepp
;
3140 VM_STAT_ADD(segvnvmstats
.relocatepages
[2]);
3141 repl_savepp
= replacement
;
3142 for (j
= 0; j
< curnpgs
; j
++) {
3144 page_sub(&replacement
, repl
);
3145 ASSERT(PAGE_EXCL(repl
));
3146 ASSERT(!PP_ISFREE(repl
));
3147 ASSERT(repl
->p_szc
== repl_szc
);
3148 ASSERT(page_pptonum(targ
[i
+ j
]) ==
3149 page_pptonum(targ
[i
]) + j
);
3152 ASSERT(IS_P2ALIGNED(page_pptonum(repl
), curnpgs
));
3154 err
= page_relocate(&pp
, &repl
, 0, 1, &nreloc
, NULL
);
3155 if (err
|| nreloc
!= curnpgs
) {
3156 panic("segvn_relocate_pages: "
3157 "page_relocate failed err=%d curnpgs=%ld "
3158 "nreloc=%ld", err
, curnpgs
, nreloc
);
3160 ASSERT(curnpgs
<= repl_npgs
);
3161 repl_npgs
-= curnpgs
;
3164 ASSERT(replacement
== NULL
);
3168 for (i
= 0; i
< repl_npgs
; i
++) {
3169 ASSERT(PAGE_EXCL(repl
));
3170 ASSERT(!PP_ISFREE(repl
));
3172 page_downgrade(targ
[i
]);
3178 * Check if all pages in ppa array are complete smaller than szc pages and
3179 * their roots will still be aligned relative to their current size if the
3180 * entire ppa array is relocated into one szc page. If these conditions are
3183 * If all pages are properly aligned attempt to upgrade their locks
3184 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3185 * upgrdfail was set to 0 by caller.
3187 * Return 1 if all pages are aligned and locked exclusively.
3189 * If all pages in ppa array happen to be physically contiguous to make one
3190 * szc page and all exclusive locks are successfully obtained promote the page
3191 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3194 segvn_full_szcpages(page_t
**ppa
, uint_t szc
, int *upgrdfail
, uint_t
*pszc
)
3198 pgcnt_t totnpgs
= page_get_pagecnt(szc
);
3209 VM_STAT_ADD(segvnvmstats
.fullszcpages
[0]);
3211 for (i
= 0; i
< totnpgs
; i
++) {
3213 ASSERT(PAGE_SHARED(pp
));
3214 ASSERT(!PP_ISFREE(pp
));
3215 pfn
= page_pptonum(pp
);
3217 if (!IS_P2ALIGNED(pfn
, totnpgs
)) {
3222 } else if (contig
&& pfn
!= first_pfn
+ i
) {
3225 if (pp
->p_szc
== 0) {
3227 VM_STAT_ADD(segvnvmstats
.fullszcpages
[1]);
3231 if ((curszc
= pp
->p_szc
) >= szc
) {
3232 VM_STAT_ADD(segvnvmstats
.fullszcpages
[2]);
3237 * p_szc changed means we don't have all pages
3238 * locked. return failure.
3240 VM_STAT_ADD(segvnvmstats
.fullszcpages
[3]);
3243 curnpgs
= page_get_pagecnt(curszc
);
3244 if (!IS_P2ALIGNED(pfn
, curnpgs
) ||
3245 !IS_P2ALIGNED(i
, curnpgs
)) {
3246 VM_STAT_ADD(segvnvmstats
.fullszcpages
[4]);
3252 VM_STAT_ADD(segvnvmstats
.fullszcpages
[5]);
3253 if (pp
->p_szc
!= curszc
) {
3254 VM_STAT_ADD(segvnvmstats
.fullszcpages
[6]);
3257 if (pfn
- 1 != page_pptonum(ppa
[i
- 1])) {
3258 panic("segvn_full_szcpages: "
3259 "large page not physically contiguous");
3261 if (P2PHASE(pfn
, curnpgs
) == curnpgs
- 1) {
3267 for (i
= 0; i
< totnpgs
; i
++) {
3268 ASSERT(ppa
[i
]->p_szc
< szc
);
3269 if (!page_tryupgrade(ppa
[i
])) {
3270 for (j
= 0; j
< i
; j
++) {
3271 page_downgrade(ppa
[j
]);
3273 *pszc
= ppa
[i
]->p_szc
;
3275 VM_STAT_ADD(segvnvmstats
.fullszcpages
[7]);
3281 * When a page is put a free cachelist its szc is set to 0. if file
3282 * system reclaimed pages from cachelist targ pages will be physically
3283 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3284 * pages without any relocations.
3285 * To avoid any hat issues with previous small mappings
3286 * hat_pageunload() the target pages first.
3289 VM_STAT_ADD(segvnvmstats
.fullszcpages
[8]);
3290 for (i
= 0; i
< totnpgs
; i
++) {
3291 (void) hat_pageunload(ppa
[i
], HAT_FORCE_PGUNLOAD
);
3293 for (i
= 0; i
< totnpgs
; i
++) {
3294 ppa
[i
]->p_szc
= szc
;
3296 for (i
= 0; i
< totnpgs
; i
++) {
3297 ASSERT(PAGE_EXCL(ppa
[i
]));
3298 page_downgrade(ppa
[i
]);
3304 VM_STAT_ADD(segvnvmstats
.fullszcpages
[9]);
3309 * Create physically contiguous pages for [vp, off] - [vp, off +
3310 * page_size(szc)) range and for private segment return them in ppa array.
3311 * Pages are created either via IO or relocations.
3313 * Return 1 on success and 0 on failure.
3315 * If physically contiguous pages already exist for this range return 1 without
3316 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3317 * array wasn't filled. In this case caller fills ppa array via fop_getpage().
3321 segvn_fill_vp_pages(struct segvn_data
*svd
, vnode_t
*vp
, uoff_t off
,
3322 uint_t szc
, page_t
**ppa
, page_t
**ppplist
, uint_t
*ret_pszc
,
3326 page_t
*pplist
= *ppplist
;
3327 size_t pgsz
= page_get_pagesize(szc
);
3328 pgcnt_t pages
= btop(pgsz
);
3329 ulong_t start_off
= off
;
3330 uoff_t eoff
= off
+ pgsz
;
3332 uoff_t io_off
= off
;
3334 page_t
*io_pplist
= NULL
;
3335 page_t
*done_pplist
= NULL
;
3344 page_t
*targ_pplist
= NULL
;
3345 page_t
*repl_pplist
= NULL
;
3351 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[0]);
3354 ASSERT(pplist
->p_szc
== szc
);
3357 * downsize will be set to 1 only if we fail to lock pages. this will
3358 * allow subsequent faults to try to relocate the page again. If we
3359 * fail due to misalignment don't downsize and let the caller map the
3360 * whole region with small mappings to avoid more faults into the area
3361 * where we can't get large pages anyway.
3365 while (off
< eoff
) {
3367 ASSERT(newpp
!= NULL
);
3368 ASSERT(PAGE_EXCL(newpp
));
3369 ASSERT(!PP_ISFREE(newpp
));
3371 * we pass NULL for nrelocp to page_lookup_create()
3372 * so that it doesn't relocate. We relocate here
3373 * later only after we make sure we can lock all
3374 * pages in the range we handle and they are all
3377 pp
= page_lookup_create(&vp
->v_object
, off
, SE_SHARED
, newpp
,
3380 ASSERT(!PP_ISFREE(pp
));
3381 VERIFY(pp
->p_object
== &vp
->v_object
);
3382 ASSERT(pp
->p_vnode
== vp
);
3383 ASSERT(pp
->p_offset
== off
);
3385 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[1]);
3386 page_sub(&pplist
, pp
);
3387 ASSERT(PAGE_EXCL(pp
));
3388 ASSERT(page_iolock_assert(pp
));
3389 page_list_concat(&io_pplist
, &pp
);
3393 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[2]);
3394 pfn
= page_pptonum(pp
);
3396 if (pszc
>= szc
&& targ_pplist
== NULL
&& io_pplist
== NULL
&&
3397 IS_P2ALIGNED(pfn
, pages
)) {
3398 ASSERT(repl_pplist
== NULL
);
3399 ASSERT(done_pplist
== NULL
);
3400 ASSERT(pplist
== *ppplist
);
3402 page_free_replacement_page(pplist
);
3403 page_create_putback(pages
);
3405 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[3]);
3410 segvn_faultvnmpss_align_err1
++;
3413 ppages
= page_get_pagecnt(pszc
);
3414 if (!IS_P2ALIGNED(pfn
, ppages
)) {
3417 * sizing down to pszc won't help.
3420 segvn_faultvnmpss_align_err2
++;
3423 pfn
= page_pptonum(newpp
);
3424 if (!IS_P2ALIGNED(pfn
, ppages
)) {
3427 * sizing down to pszc won't help.
3430 segvn_faultvnmpss_align_err3
++;
3433 if (!PAGE_EXCL(pp
)) {
3434 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[4]);
3437 *ret_pszc
= pp
->p_szc
;
3441 if (io_pplist
!= NULL
) {
3442 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[5]);
3443 io_len
= off
- io_off
;
3445 * Some file systems like NFS don't check EOF
3446 * conditions in fop_pageio(). Check it here
3447 * now that pages are locked SE_EXCL. Any file
3448 * truncation will wait until the pages are
3449 * unlocked so no need to worry that file will
3450 * be truncated after we check its size here.
3451 * XXX fix NFS to remove this check.
3453 va
.va_mask
= AT_SIZE
;
3454 if (fop_getattr(vp
, &va
, ATTR_HINT
, svd
->cred
, NULL
)) {
3455 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[6]);
3456 page_unlock(targpp
);
3459 if (btopr(va
.va_size
) < btopr(io_off
+ io_len
)) {
3460 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[7]);
3463 page_unlock(targpp
);
3466 io_err
= fop_pageio(vp
, io_pplist
, io_off
, io_len
,
3467 B_READ
, svd
->cred
, NULL
);
3469 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[8]);
3470 page_unlock(targpp
);
3471 if (io_err
== EDEADLK
) {
3472 segvn_vmpss_pageio_deadlk_err
++;
3477 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[9]);
3478 while (io_pplist
!= NULL
) {
3480 page_sub(&io_pplist
, pp
);
3481 ASSERT(page_iolock_assert(pp
));
3483 pgidx
= (pp
->p_offset
- start_off
) >>
3485 ASSERT(pgidx
< pages
);
3487 page_list_concat(&done_pplist
, &pp
);
3491 ASSERT(PAGE_EXCL(pp
));
3492 ASSERT(pp
->p_szc
<= pszc
);
3493 if (pszc
!= 0 && !group_page_trylock(pp
, SE_EXCL
)) {
3494 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[10]);
3497 *ret_pszc
= pp
->p_szc
;
3500 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[11]);
3502 * page szc chould have changed before the entire group was
3503 * locked. reread page szc.
3506 ppages
= page_get_pagecnt(pszc
);
3508 /* link just the roots */
3509 page_list_concat(&targ_pplist
, &pp
);
3510 page_sub(&pplist
, newpp
);
3511 page_list_concat(&repl_pplist
, &newpp
);
3513 while (--ppages
!= 0) {
3515 page_sub(&pplist
, newpp
);
3520 if (io_pplist
!= NULL
) {
3521 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[12]);
3522 io_len
= eoff
- io_off
;
3523 va
.va_mask
= AT_SIZE
;
3524 if (fop_getattr(vp
, &va
, ATTR_HINT
, svd
->cred
, NULL
) != 0) {
3525 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[13]);
3528 if (btopr(va
.va_size
) < btopr(io_off
+ io_len
)) {
3529 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[14]);
3534 io_err
= fop_pageio(vp
, io_pplist
, io_off
, io_len
,
3535 B_READ
, svd
->cred
, NULL
);
3537 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[15]);
3538 if (io_err
== EDEADLK
) {
3539 segvn_vmpss_pageio_deadlk_err
++;
3544 while (io_pplist
!= NULL
) {
3546 page_sub(&io_pplist
, pp
);
3547 ASSERT(page_iolock_assert(pp
));
3549 pgidx
= (pp
->p_offset
- start_off
) >> PAGESHIFT
;
3550 ASSERT(pgidx
< pages
);
3555 * we're now bound to succeed or panic.
3556 * remove pages from done_pplist. it's not needed anymore.
3558 while (done_pplist
!= NULL
) {
3560 page_sub(&done_pplist
, pp
);
3562 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[16]);
3563 ASSERT(pplist
== NULL
);
3565 while (targ_pplist
!= NULL
) {
3567 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[17]);
3568 ASSERT(repl_pplist
);
3570 page_sub(&targ_pplist
, pp
);
3571 pgidx
= (pp
->p_offset
- start_off
) >> PAGESHIFT
;
3572 newpp
= repl_pplist
;
3573 page_sub(&repl_pplist
, newpp
);
3575 pfn
= page_pptonum(pp
);
3577 ppages
= page_get_pagecnt(pszc
);
3578 ASSERT(IS_P2ALIGNED(pfn
, ppages
));
3579 pfn
= page_pptonum(newpp
);
3580 ASSERT(IS_P2ALIGNED(pfn
, ppages
));
3581 ASSERT(P2PHASE(pfn
, pages
) == pgidx
);
3584 ret
= page_relocate(&pp
, &newpp
, 0, 1, &nreloc
, NULL
);
3585 if (ret
!= 0 || nreloc
== 0) {
3586 panic("segvn_fill_vp_pages: "
3587 "page_relocate failed");
3590 while (nreloc
-- != 0) {
3591 ASSERT(PAGE_EXCL(pp
));
3592 VERIFY(pp
->p_object
== &vp
->v_object
);
3593 ASSERT(pp
->p_vnode
== vp
);
3595 ((pp
->p_offset
- start_off
) >> PAGESHIFT
));
3601 if (svd
->type
== MAP_PRIVATE
) {
3602 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[18]);
3603 for (i
= 0; i
< pages
; i
++) {
3604 ASSERT(ppa
[i
] != NULL
);
3605 ASSERT(PAGE_EXCL(ppa
[i
]));
3606 VERIFY(ppa
[i
]->p_object
== &vp
->v_object
);
3607 ASSERT(ppa
[i
]->p_vnode
== vp
);
3608 ASSERT(ppa
[i
]->p_offset
==
3609 start_off
+ (i
<< PAGESHIFT
));
3610 page_downgrade(ppa
[i
]);
3614 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[19]);
3616 * the caller will still call fop_getpage() for shared segments
3617 * to check FS write permissions. For private segments we map
3618 * file read only anyway. so no fop_getpage is needed.
3620 for (i
= 0; i
< pages
; i
++) {
3621 ASSERT(ppa
[i
] != NULL
);
3622 ASSERT(PAGE_EXCL(ppa
[i
]));
3623 VERIFY(ppa
[i
]->p_object
== &vp
->v_object
);
3624 ASSERT(ppa
[i
]->p_vnode
== vp
);
3625 ASSERT(ppa
[i
]->p_offset
==
3626 start_off
+ (i
<< PAGESHIFT
));
3627 page_unlock(ppa
[i
]);
3635 * Do the cleanup. Unlock target pages we didn't relocate. They are
3636 * linked on targ_pplist by root pages. reassemble unused replacement
3637 * and io pages back to pplist.
3639 if (io_pplist
!= NULL
) {
3640 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[20]);
3643 VERIFY(pp
->p_object
== &vp
->v_object
);
3644 ASSERT(pp
->p_vnode
== vp
);
3645 ASSERT(pp
->p_offset
== io_off
);
3646 ASSERT(page_iolock_assert(pp
));
3648 page_hashout(pp
, false);
3650 } while ((pp
= pp
->p_next
) != io_pplist
);
3651 page_list_concat(&io_pplist
, &pplist
);
3655 while (targ_pplist
!= NULL
) {
3656 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[21]);
3658 ASSERT(PAGE_EXCL(pp
));
3659 page_sub(&targ_pplist
, pp
);
3662 ppages
= page_get_pagecnt(pszc
);
3663 ASSERT(IS_P2ALIGNED(page_pptonum(pp
), ppages
));
3666 group_page_unlock(pp
);
3672 ASSERT(PAGE_EXCL(pp
));
3673 ASSERT(pp
->p_szc
== szc
);
3674 page_sub(&repl_pplist
, pp
);
3676 ASSERT(IS_P2ALIGNED(page_pptonum(pp
), ppages
));
3678 /* relink replacement page */
3679 page_list_concat(&tmp_pplist
, &pp
);
3680 while (--ppages
!= 0) {
3681 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[22]);
3683 ASSERT(PAGE_EXCL(pp
));
3684 ASSERT(pp
->p_szc
== szc
);
3685 page_list_concat(&tmp_pplist
, &pp
);
3688 if (tmp_pplist
!= NULL
) {
3689 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[23]);
3690 page_list_concat(&tmp_pplist
, &pplist
);
3691 pplist
= tmp_pplist
;
3694 * at this point all pages are either on done_pplist or
3695 * pplist. They can't be all on done_pplist otherwise
3696 * we'd've been done.
3698 ASSERT(pplist
!= NULL
);
3700 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[24]);
3703 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[25]);
3704 ASSERT(pp
->p_szc
== szc
);
3705 ASSERT(PAGE_EXCL(pp
));
3706 VERIFY(pp
->p_object
!= &vp
->v_object
);
3707 ASSERT(pp
->p_vnode
!= vp
);
3709 } while ((pp
= pp
->p_next
) != pplist
);
3713 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[26]);
3714 ASSERT(pp
->p_szc
== szc
);
3715 ASSERT(PAGE_EXCL(pp
));
3716 VERIFY(pp
->p_object
== &vp
->v_object
);
3717 ASSERT(pp
->p_vnode
== vp
);
3719 } while ((pp
= pp
->p_next
) != done_pplist
);
3721 while (pplist
!= NULL
) {
3722 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[27]);
3724 page_sub(&pplist
, pp
);
3728 while (done_pplist
!= NULL
) {
3729 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[28]);
3731 page_sub(&done_pplist
, pp
);
3737 ASSERT(pplist
== *ppplist
);
3739 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[29]);
3741 * don't downsize on io error.
3742 * see if vop_getpage succeeds.
3743 * pplist may still be used in this case
3748 VM_STAT_ADD(segvnvmstats
.fill_vp_pages
[30]);
3749 page_free_replacement_page(pplist
);
3750 page_create_putback(pages
);
3755 int segvn_anypgsz
= 0;
3757 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3758 if ((type) == F_SOFTLOCK) { \
3759 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3763 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3764 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3765 if ((rw) == S_WRITE) { \
3766 for (i = 0; i < (pages); i++) { \
3767 VERIFY((ppa)[i]->p_object == \
3768 (ppa)[i]->p_object); \
3769 ASSERT((ppa)[i]->p_vnode == \
3770 (ppa)[0]->p_vnode); \
3771 hat_setmod((ppa)[i]); \
3773 } else if ((rw) != S_OTHER && \
3774 ((prot) & (vpprot) & PROT_WRITE)) { \
3775 for (i = 0; i < (pages); i++) { \
3776 VERIFY((ppa)[i]->p_object == \
3777 (ppa)[i]->p_object); \
3778 ASSERT((ppa)[i]->p_vnode == \
3779 (ppa)[0]->p_vnode); \
3780 if (!hat_ismod((ppa)[i])) { \
3781 prot &= ~PROT_WRITE; \
3788 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3789 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3792 segvn_fault_vnodepages(struct hat
*hat
, struct seg
*seg
, caddr_t lpgaddr
,
3793 caddr_t lpgeaddr
, enum fault_type type
, enum seg_rw rw
, caddr_t addr
,
3794 caddr_t eaddr
, int brkcow
)
3796 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
3797 struct anon_map
*amp
= svd
->amp
;
3798 uchar_t segtype
= svd
->type
;
3799 uint_t szc
= seg
->s_szc
;
3800 size_t pgsz
= page_get_pagesize(szc
);
3801 size_t maxpgsz
= pgsz
;
3802 pgcnt_t pages
= btop(pgsz
);
3803 pgcnt_t maxpages
= pages
;
3804 size_t ppasize
= (pages
+ 1) * sizeof (page_t
*);
3805 caddr_t a
= lpgaddr
;
3806 caddr_t maxlpgeaddr
= lpgeaddr
;
3807 uoff_t off
= svd
->offset
+ (uintptr_t)(a
- seg
->s_base
);
3808 ulong_t aindx
= svd
->anon_index
+ seg_page(seg
, a
);
3809 struct vpage
*vpage
= (svd
->vpage
!= NULL
) ?
3810 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
3811 vnode_t
*vp
= svd
->vp
;
3816 faultcode_t err
= 0;
3818 int vop_size_err
= 0;
3819 uint_t protchk
, prot
, vpprot
;
3821 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
3822 anon_sync_obj_t an_cookie
;
3824 int alloc_failed
= 0;
3831 int segvn_anypgsz_vnode
= 0; /* for now map vnode with 2 page sizes */
3832 int tron
= (svd
->tr_state
== SEGVN_TR_ON
);
3836 ASSERT(brkcow
== 0 || amp
!= NULL
);
3837 ASSERT(tron
== 0 || amp
!= NULL
);
3838 ASSERT(enable_mbit_wa
== 0); /* no mbit simulations with large pages */
3839 ASSERT(!(svd
->flags
& MAP_NORESERVE
));
3840 ASSERT(type
!= F_SOFTUNLOCK
);
3841 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
3842 ASSERT(amp
== NULL
|| IS_P2ALIGNED(aindx
, maxpages
));
3843 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
3844 ASSERT(seg
->s_szc
< NBBY
* sizeof (int));
3845 ASSERT(type
!= F_SOFTLOCK
|| lpgeaddr
- a
== maxpgsz
);
3846 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
3848 VM_STAT_COND_ADD(type
== F_SOFTLOCK
, segvnvmstats
.fltvnpages
[0]);
3849 VM_STAT_COND_ADD(type
!= F_SOFTLOCK
, segvnvmstats
.fltvnpages
[1]);
3851 if (svd
->flags
& MAP_TEXT
) {
3852 hat_flag
|= HAT_LOAD_TEXT
;
3855 if (svd
->pageprot
) {
3858 protchk
= PROT_READ
;
3861 protchk
= PROT_WRITE
;
3864 protchk
= PROT_EXEC
;
3868 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
3873 /* caller has already done segment level protection check. */
3876 if (rw
== S_WRITE
&& segtype
== MAP_PRIVATE
) {
3877 SEGVN_VMSTAT_FLTVNPAGES(2);
3883 ppa
= kmem_alloc(ppasize
, KM_SLEEP
);
3885 VM_STAT_COND_ADD(amp
!= NULL
, segvnvmstats
.fltvnpages
[3]);
3889 for (; a
< lpgeaddr
; a
+= pgsz
, off
+= pgsz
, aindx
+= pages
) {
3891 while (szc
< seg
->s_szc
) {
3894 tszc
= segvn_anypgsz_vnode
? szc
+ 1 :
3896 ppgsz
= page_get_pagesize(tszc
);
3897 if (!IS_P2ALIGNED(a
, ppgsz
) ||
3898 ((alloc_failed
>> tszc
) & 0x1)) {
3901 SEGVN_VMSTAT_FLTVNPAGES(4);
3905 e
= P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
3906 lpgeaddr
= (caddr_t
)e
;
3911 if (IS_P2ALIGNED(a
, maxpgsz
) && amp
!= NULL
) {
3912 ASSERT(IS_P2ALIGNED(aindx
, maxpages
));
3913 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
3914 anon_array_enter(amp
, aindx
, &an_cookie
);
3915 if (anon_get_ptr(amp
->ahp
, aindx
) != NULL
) {
3916 SEGVN_VMSTAT_FLTVNPAGES(5);
3917 ASSERT(anon_pages(amp
->ahp
, aindx
,
3918 maxpages
) == maxpages
);
3919 anon_array_exit(&an_cookie
);
3920 ANON_LOCK_EXIT(&
->a_rwlock
);
3921 err
= segvn_fault_anonpages(hat
, seg
,
3922 a
, a
+ maxpgsz
, type
, rw
,
3924 MIN(a
+ maxpgsz
, eaddr
), brkcow
);
3926 SEGVN_VMSTAT_FLTVNPAGES(6);
3929 if (szc
< seg
->s_szc
) {
3933 lpgeaddr
= maxlpgeaddr
;
3937 ASSERT(anon_pages(amp
->ahp
, aindx
,
3939 SEGVN_VMSTAT_FLTVNPAGES(7);
3940 anon_array_exit(&an_cookie
);
3941 ANON_LOCK_EXIT(&
->a_rwlock
);
3944 ASSERT(!brkcow
|| IS_P2ALIGNED(a
, maxpgsz
));
3945 ASSERT(!tron
|| IS_P2ALIGNED(a
, maxpgsz
));
3947 if (svd
->pageprot
!= 0 && IS_P2ALIGNED(a
, maxpgsz
)) {
3948 ASSERT(vpage
!= NULL
);
3949 prot
= VPP_PROT(vpage
);
3950 ASSERT(sameprot(seg
, a
, maxpgsz
));
3951 if ((prot
& protchk
) == 0) {
3952 SEGVN_VMSTAT_FLTVNPAGES(8);
3957 if (type
== F_SOFTLOCK
) {
3958 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
3965 if (!brkcow
&& !tron
&& szc
&&
3966 !page_exists_physcontig(&vp
->v_object
, off
, szc
,
3967 segtype
== MAP_PRIVATE
? ppa
: NULL
)) {
3968 SEGVN_VMSTAT_FLTVNPAGES(9);
3969 if (page_alloc_pages(&vp
->v_object
, seg
, a
,
3970 &pplist
, NULL
, szc
, 0, 0) &&
3971 type
!= F_SOFTLOCK
) {
3972 SEGVN_VMSTAT_FLTVNPAGES(10);
3975 alloc_failed
|= (1 << szc
);
3978 if (pplist
!= NULL
&&
3979 vp
->v_mpssdata
== SEGVN_PAGEIO
) {
3981 SEGVN_VMSTAT_FLTVNPAGES(11);
3982 physcontig
= segvn_fill_vp_pages(svd
,
3983 vp
, off
, szc
, ppa
, &pplist
,
3985 ASSERT(!physcontig
|| pplist
== NULL
);
3986 if (!physcontig
&& downsize
&&
3987 type
!= F_SOFTLOCK
) {
3988 ASSERT(pplist
== NULL
);
3989 SEGVN_VMSTAT_FLTVNPAGES(12);
3993 ASSERT(!physcontig
||
3994 segtype
== MAP_PRIVATE
||
3996 if (physcontig
&& ppa
[0] == NULL
) {
4000 } else if (!brkcow
&& !tron
&& szc
&& ppa
[0] != NULL
) {
4001 SEGVN_VMSTAT_FLTVNPAGES(13);
4002 ASSERT(segtype
== MAP_PRIVATE
);
4007 SEGVN_VMSTAT_FLTVNPAGES(14);
4009 ierr
= fop_getpage(vp
, (offset_t
)off
, pgsz
,
4010 &vpprot
, ppa
, pgsz
, seg
, a
, arw
,
4014 for (i
= 0; i
< pages
; i
++) {
4015 ASSERT(PAGE_LOCKED(ppa
[i
]));
4016 ASSERT(!PP_ISFREE(ppa
[i
]));
4017 VERIFY(ppa
[i
]->p_object
==
4019 ASSERT(ppa
[i
]->p_vnode
== vp
);
4020 ASSERT(ppa
[i
]->p_offset
==
4021 off
+ (i
<< PAGESHIFT
));
4025 if (segtype
== MAP_PRIVATE
) {
4026 SEGVN_VMSTAT_FLTVNPAGES(15);
4027 vpprot
&= ~PROT_WRITE
;
4030 ASSERT(segtype
== MAP_PRIVATE
);
4031 SEGVN_VMSTAT_FLTVNPAGES(16);
4032 vpprot
= PROT_ALL
& ~PROT_WRITE
;
4037 SEGVN_VMSTAT_FLTVNPAGES(17);
4038 if (pplist
!= NULL
) {
4039 SEGVN_VMSTAT_FLTVNPAGES(18);
4040 page_free_replacement_page(pplist
);
4041 page_create_putback(pages
);
4043 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4044 if (a
+ pgsz
<= eaddr
) {
4045 SEGVN_VMSTAT_FLTVNPAGES(19);
4046 err
= FC_MAKE_ERR(ierr
);
4049 va
.va_mask
= AT_SIZE
;
4050 if (fop_getattr(vp
, &va
, 0, svd
->cred
, NULL
)) {
4051 SEGVN_VMSTAT_FLTVNPAGES(20);
4052 err
= FC_MAKE_ERR(EIO
);
4055 if (btopr(va
.va_size
) >= btopr(off
+ pgsz
)) {
4056 SEGVN_VMSTAT_FLTVNPAGES(21);
4057 err
= FC_MAKE_ERR(ierr
);
4060 if (btopr(va
.va_size
) <
4061 btopr(off
+ (eaddr
- a
))) {
4062 SEGVN_VMSTAT_FLTVNPAGES(22);
4063 err
= FC_MAKE_ERR(ierr
);
4066 if (brkcow
|| tron
|| type
== F_SOFTLOCK
) {
4067 /* can't reduce map area */
4068 SEGVN_VMSTAT_FLTVNPAGES(23);
4072 SEGVN_VMSTAT_FLTVNPAGES(24);
4080 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
4081 anon_array_enter(amp
, aindx
, &an_cookie
);
4084 anon_get_ptr(amp
->ahp
, aindx
) != NULL
) {
4085 ulong_t taindx
= P2ALIGN(aindx
, maxpages
);
4087 SEGVN_VMSTAT_FLTVNPAGES(25);
4088 ASSERT(anon_pages(amp
->ahp
, taindx
,
4089 maxpages
) == maxpages
);
4090 for (i
= 0; i
< pages
; i
++) {
4091 page_unlock(ppa
[i
]);
4093 anon_array_exit(&an_cookie
);
4094 ANON_LOCK_EXIT(&
->a_rwlock
);
4095 if (pplist
!= NULL
) {
4096 page_free_replacement_page(pplist
);
4097 page_create_putback(pages
);
4099 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4100 if (szc
< seg
->s_szc
) {
4101 SEGVN_VMSTAT_FLTVNPAGES(26);
4103 * For private segments SOFTLOCK
4104 * either always breaks cow (any rw
4105 * type except S_READ_NOCOW) or
4106 * address space is locked as writer
4107 * (S_READ_NOCOW case) and anon slots
4108 * can't show up on second check.
4109 * Therefore if we are here for
4110 * SOFTLOCK case it must be a cow
4111 * break but cow break never reduces
4112 * szc. text replication (tron) in
4113 * this case works as cow break.
4114 * Thus the assert below.
4116 ASSERT(!brkcow
&& !tron
&&
4117 type
!= F_SOFTLOCK
);
4122 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4127 ulong_t taindx
= P2ALIGN(aindx
, maxpages
);
4128 ASSERT(!anon_pages(amp
->ahp
, taindx
, maxpages
));
4132 if (brkcow
|| tron
) {
4133 ASSERT(amp
!= NULL
);
4134 ASSERT(pplist
== NULL
);
4135 ASSERT(szc
== seg
->s_szc
);
4136 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4137 ASSERT(IS_P2ALIGNED(aindx
, maxpages
));
4138 SEGVN_VMSTAT_FLTVNPAGES(27);
4139 ierr
= anon_map_privatepages(amp
, aindx
, szc
,
4140 seg
, a
, prot
, ppa
, vpage
, segvn_anypgsz
,
4141 tron
? PG_LOCAL
: 0, svd
->cred
);
4143 SEGVN_VMSTAT_FLTVNPAGES(28);
4144 anon_array_exit(&an_cookie
);
4145 ANON_LOCK_EXIT(&
->a_rwlock
);
4146 SEGVN_RESTORE_SOFTLOCK_VP(type
, pages
);
4147 err
= FC_MAKE_ERR(ierr
);
4151 ASSERT(!IS_VMODSORT(ppa
[0]->p_vnode
));
4153 * p_szc can't be changed for locked
4156 ASSERT(svd
->rcookie
==
4157 HAT_INVALID_REGION_COOKIE
);
4158 hat_memload_array(hat
, a
, pgsz
, ppa
, prot
,
4161 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4162 SEGVN_VMSTAT_FLTVNPAGES(29);
4163 for (i
= 0; i
< pages
; i
++) {
4164 page_unlock(ppa
[i
]);
4167 anon_array_exit(&an_cookie
);
4168 ANON_LOCK_EXIT(&
->a_rwlock
);
4172 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
||
4173 (!svd
->pageprot
&& svd
->prot
== (prot
& vpprot
)));
4175 pfn
= page_pptonum(ppa
[0]);
4177 * hat_page_demote() needs an SE_EXCL lock on one of
4178 * constituent page_t's and it decreases root's p_szc
4179 * last. This means if root's p_szc is equal szc and
4180 * all its constituent pages are locked
4181 * hat_page_demote() that could have changed p_szc to
4182 * szc is already done and no new have page_demote()
4183 * can start for this large page.
4187 * we need to make sure same mapping size is used for
4188 * the same address range if there's a possibility the
4189 * adddress is already mapped because hat layer panics
4190 * when translation is loaded for the range already
4191 * mapped with a different page size. We achieve it
4192 * by always using largest page size possible subject
4193 * to the constraints of page size, segment page size
4194 * and page alignment. Since mappings are invalidated
4195 * when those constraints change and make it
4196 * impossible to use previously used mapping size no
4197 * mapping size conflicts should happen.
4201 if ((pszc
= ppa
[0]->p_szc
) == szc
&&
4202 IS_P2ALIGNED(pfn
, pages
)) {
4204 SEGVN_VMSTAT_FLTVNPAGES(30);
4206 for (i
= 0; i
< pages
; i
++) {
4207 ASSERT(PAGE_LOCKED(ppa
[i
]));
4208 ASSERT(!PP_ISFREE(ppa
[i
]));
4209 ASSERT(page_pptonum(ppa
[i
]) ==
4211 ASSERT(ppa
[i
]->p_szc
== szc
);
4212 VERIFY(ppa
[i
]->p_object
== &vp
->v_object
);
4213 ASSERT(ppa
[i
]->p_vnode
== vp
);
4214 ASSERT(ppa
[i
]->p_offset
==
4215 off
+ (i
<< PAGESHIFT
));
4219 * All pages are of szc we need and they are
4220 * all locked so they can't change szc. load
4223 * if page got promoted since last check
4224 * we don't need pplist.
4226 if (pplist
!= NULL
) {
4227 page_free_replacement_page(pplist
);
4228 page_create_putback(pages
);
4230 if (PP_ISMIGRATE(ppa
[0])) {
4231 page_migrate(seg
, a
, ppa
, pages
);
4233 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4235 hat_memload_array_region(hat
, a
, pgsz
,
4236 ppa
, prot
& vpprot
, hat_flag
,
4239 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4240 for (i
= 0; i
< pages
; i
++) {
4241 page_unlock(ppa
[i
]);
4245 anon_array_exit(&an_cookie
);
4246 ANON_LOCK_EXIT(&
->a_rwlock
);
4252 * See if upsize is possible.
4254 if (pszc
> szc
&& szc
< seg
->s_szc
&&
4255 (segvn_anypgsz_vnode
|| pszc
>= seg
->s_szc
)) {
4257 uint_t pszc1
= MIN(pszc
, seg
->s_szc
);
4258 ppgsz
= page_get_pagesize(pszc1
);
4259 ppages
= btop(ppgsz
);
4260 aphase
= btop(P2PHASE((uintptr_t)a
, ppgsz
));
4262 ASSERT(type
!= F_SOFTLOCK
);
4264 SEGVN_VMSTAT_FLTVNPAGES(31);
4265 if (aphase
!= P2PHASE(pfn
, ppages
)) {
4266 segvn_faultvnmpss_align_err4
++;
4268 SEGVN_VMSTAT_FLTVNPAGES(32);
4269 if (pplist
!= NULL
) {
4270 page_t
*pl
= pplist
;
4271 page_free_replacement_page(pl
);
4272 page_create_putback(pages
);
4274 for (i
= 0; i
< pages
; i
++) {
4275 page_unlock(ppa
[i
]);
4278 anon_array_exit(&an_cookie
);
4279 ANON_LOCK_EXIT(&
->a_rwlock
);
4288 * check if we should use smallest mapping size.
4293 !IS_P2ALIGNED(pfn
, pages
)) ||
4295 !segvn_full_szcpages(ppa
, szc
, &upgrdfail
,
4298 if (upgrdfail
&& type
!= F_SOFTLOCK
) {
4300 * segvn_full_szcpages failed to lock
4301 * all pages EXCL. Size down.
4305 SEGVN_VMSTAT_FLTVNPAGES(33);
4307 if (pplist
!= NULL
) {
4308 page_t
*pl
= pplist
;
4309 page_free_replacement_page(pl
);
4310 page_create_putback(pages
);
4313 for (i
= 0; i
< pages
; i
++) {
4314 page_unlock(ppa
[i
]);
4317 anon_array_exit(&an_cookie
);
4318 ANON_LOCK_EXIT(&
->a_rwlock
);
4323 if (szc
!= 0 && !upgrdfail
) {
4324 segvn_faultvnmpss_align_err5
++;
4326 SEGVN_VMSTAT_FLTVNPAGES(34);
4327 if (pplist
!= NULL
) {
4328 page_free_replacement_page(pplist
);
4329 page_create_putback(pages
);
4331 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4333 if (upgrdfail
&& segvn_anypgsz_vnode
) {
4335 hat_memload_array_region(hat
, a
, pgsz
,
4336 ppa
, prot
& vpprot
, hat_flag
,
4339 for (i
= 0; i
< pages
; i
++) {
4340 hat_memload_region(hat
,
4341 a
+ (i
<< PAGESHIFT
),
4342 ppa
[i
], prot
& vpprot
,
4343 hat_flag
, svd
->rcookie
);
4346 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4347 for (i
= 0; i
< pages
; i
++) {
4348 page_unlock(ppa
[i
]);
4352 anon_array_exit(&an_cookie
);
4353 ANON_LOCK_EXIT(&
->a_rwlock
);
4360 * segvn_full_szcpages() upgraded pages szc.
4362 ASSERT(pszc
== ppa
[0]->p_szc
);
4363 ASSERT(IS_P2ALIGNED(pfn
, pages
));
4369 SEGVN_VMSTAT_FLTVNPAGES(35);
4371 * p_szc of ppa[0] can change since we haven't
4372 * locked all constituent pages. Call
4373 * page_lock_szc() to prevent szc changes.
4374 * This should be a rare case that happens when
4375 * multiple segments use a different page size
4376 * to map the same file offsets.
4378 szcmtx
= page_szc_lock(ppa
[0]);
4379 pszc
= ppa
[0]->p_szc
;
4380 ASSERT(szcmtx
!= NULL
|| pszc
== 0);
4381 ASSERT(ppa
[0]->p_szc
<= pszc
);
4383 SEGVN_VMSTAT_FLTVNPAGES(36);
4384 if (szcmtx
!= NULL
) {
4389 if (pplist
!= NULL
) {
4391 * page got promoted since last check.
4392 * we don't need preaalocated large
4395 SEGVN_VMSTAT_FLTVNPAGES(37);
4396 page_free_replacement_page(pplist
);
4397 page_create_putback(pages
);
4399 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
,
4401 hat_memload_array_region(hat
, a
, pgsz
, ppa
,
4402 prot
& vpprot
, hat_flag
, svd
->rcookie
);
4404 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4405 for (i
= 0; i
< pages
; i
++) {
4406 page_unlock(ppa
[i
]);
4410 anon_array_exit(&an_cookie
);
4411 ANON_LOCK_EXIT(&
->a_rwlock
);
4417 * if page got demoted since last check
4418 * we could have not allocated larger page.
4421 if (pplist
== NULL
&&
4422 page_alloc_pages(&vp
->v_object
, seg
, a
, &pplist
,
4423 NULL
, szc
, 0, 0) && type
!= F_SOFTLOCK
) {
4424 SEGVN_VMSTAT_FLTVNPAGES(38);
4425 for (i
= 0; i
< pages
; i
++) {
4426 page_unlock(ppa
[i
]);
4429 anon_array_exit(&an_cookie
);
4430 ANON_LOCK_EXIT(&
->a_rwlock
);
4433 alloc_failed
|= (1 << szc
);
4437 SEGVN_VMSTAT_FLTVNPAGES(39);
4439 if (pplist
!= NULL
) {
4440 segvn_relocate_pages(ppa
, pplist
);
4443 ASSERT(type
== F_SOFTLOCK
);
4444 SEGVN_VMSTAT_FLTVNPAGES(40);
4448 SEGVN_UPDATE_MODBITS(ppa
, pages
, rw
, prot
, vpprot
);
4450 if (pplist
== NULL
&& segvn_anypgsz_vnode
== 0) {
4451 ASSERT(type
== F_SOFTLOCK
);
4452 for (i
= 0; i
< pages
; i
++) {
4453 ASSERT(ppa
[i
]->p_szc
< szc
);
4454 hat_memload_region(hat
,
4455 a
+ (i
<< PAGESHIFT
),
4456 ppa
[i
], prot
& vpprot
, hat_flag
,
4460 ASSERT(pplist
!= NULL
|| type
== F_SOFTLOCK
);
4461 hat_memload_array_region(hat
, a
, pgsz
, ppa
,
4462 prot
& vpprot
, hat_flag
, svd
->rcookie
);
4464 if (!(hat_flag
& HAT_LOAD_LOCK
)) {
4465 for (i
= 0; i
< pages
; i
++) {
4466 ASSERT(PAGE_SHARED(ppa
[i
]));
4467 page_unlock(ppa
[i
]);
4471 anon_array_exit(&an_cookie
);
4472 ANON_LOCK_EXIT(&
->a_rwlock
);
4476 if (vpage
!= NULL
) {
4483 ASSERT(a
< lpgeaddr
);
4485 ASSERT(!brkcow
&& !tron
&& type
!= F_SOFTLOCK
);
4488 * ierr == -1 means we failed to map with a large page.
4489 * (either due to allocation/relocation failures or
4490 * misalignment with other mappings to this file.
4492 * ierr == -2 means some other thread allocated a large page
4493 * after we gave up tp map with a large page. retry with
4496 ASSERT(ierr
== -1 || ierr
== -2);
4497 ASSERT(ierr
== -2 || szc
!= 0);
4498 ASSERT(ierr
== -1 || szc
< seg
->s_szc
);
4500 SEGVN_VMSTAT_FLTVNPAGES(41);
4501 ASSERT(pszc
> szc
&& pszc
<= seg
->s_szc
);
4503 } else if (segvn_anypgsz_vnode
) {
4504 SEGVN_VMSTAT_FLTVNPAGES(42);
4507 SEGVN_VMSTAT_FLTVNPAGES(43);
4510 * other process created pszc large page.
4511 * but we still have to drop to 0 szc.
4516 pgsz
= page_get_pagesize(szc
);
4520 * Size up case. Note lpgaddr may only be needed for
4521 * softlock case so we don't adjust it here.
4523 a
= (caddr_t
)P2ALIGN((uintptr_t)a
, pgsz
);
4524 ASSERT(a
>= lpgaddr
);
4525 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4526 off
= svd
->offset
+ (uintptr_t)(a
- seg
->s_base
);
4527 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4528 vpage
= (svd
->vpage
!= NULL
) ?
4529 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4532 * Size down case. Note lpgaddr may only be needed for
4533 * softlock case so we don't adjust it here.
4535 ASSERT(IS_P2ALIGNED(a
, pgsz
));
4536 ASSERT(IS_P2ALIGNED(lpgeaddr
, pgsz
));
4537 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4538 ASSERT(a
< lpgeaddr
);
4540 SEGVN_VMSTAT_FLTVNPAGES(44);
4542 * The beginning of the large page region can
4543 * be pulled to the right to make a smaller
4544 * region. We haven't yet faulted a single
4547 a
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
4548 ASSERT(a
>= lpgaddr
);
4550 (uintptr_t)(a
- seg
->s_base
);
4551 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4552 vpage
= (svd
->vpage
!= NULL
) ?
4553 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4558 kmem_free(ppa
, ppasize
);
4559 if (!err
&& !vop_size_err
) {
4560 SEGVN_VMSTAT_FLTVNPAGES(45);
4563 if (type
== F_SOFTLOCK
&& a
> lpgaddr
) {
4564 SEGVN_VMSTAT_FLTVNPAGES(46);
4565 segvn_softunlock(seg
, lpgaddr
, a
- lpgaddr
, S_OTHER
);
4567 if (!vop_size_err
) {
4568 SEGVN_VMSTAT_FLTVNPAGES(47);
4571 ASSERT(brkcow
|| tron
|| type
== F_SOFTLOCK
);
4573 * Large page end is mapped beyond the end of file and it's a cow
4574 * fault (can be a text replication induced cow) or softlock so we can't
4575 * reduce the map area. For now just demote the segment. This should
4576 * really only happen if the end of the file changed after the mapping
4577 * was established since when large page segments are created we make
4578 * sure they don't extend beyond the end of the file.
4580 SEGVN_VMSTAT_FLTVNPAGES(48);
4582 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4583 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4585 if (seg
->s_szc
!= 0) {
4586 segvn_fltvnpages_clrszc_cnt
++;
4587 ASSERT(svd
->softlockcnt
== 0);
4588 err
= segvn_clrszc(seg
);
4590 segvn_fltvnpages_clrszc_err
++;
4593 ASSERT(err
|| seg
->s_szc
== 0);
4594 SEGVN_LOCK_DOWNGRADE(seg
->s_as
, &svd
->lock
);
4595 /* segvn_fault will do its job as if szc had been zero to begin with */
4596 return (err
== 0 ? IE_RETRY
: FC_MAKE_ERR(err
));
4600 * This routine will attempt to fault in one large page.
4601 * it will use smaller pages if that fails.
4602 * It should only be called for pure anonymous segments.
4605 segvn_fault_anonpages(struct hat
*hat
, struct seg
*seg
, caddr_t lpgaddr
,
4606 caddr_t lpgeaddr
, enum fault_type type
, enum seg_rw rw
, caddr_t addr
,
4607 caddr_t eaddr
, int brkcow
)
4609 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
4610 struct anon_map
*amp
= svd
->amp
;
4611 uchar_t segtype
= svd
->type
;
4612 uint_t szc
= seg
->s_szc
;
4613 size_t pgsz
= page_get_pagesize(szc
);
4614 size_t maxpgsz
= pgsz
;
4615 pgcnt_t pages
= btop(pgsz
);
4616 uint_t ppaszc
= szc
;
4617 caddr_t a
= lpgaddr
;
4618 ulong_t aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4619 struct vpage
*vpage
= (svd
->vpage
!= NULL
) ?
4620 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4625 uint_t protchk
, prot
, vpprot
;
4627 int hat_flag
= (type
== F_SOFTLOCK
) ? HAT_LOAD_LOCK
: HAT_LOAD
;
4628 anon_sync_obj_t cookie
;
4630 int pgflags
= (svd
->tr_state
== SEGVN_TR_ON
) ? PG_LOCAL
: 0;
4633 ASSERT(amp
!= NULL
);
4634 ASSERT(enable_mbit_wa
== 0); /* no mbit simulations with large pages */
4635 ASSERT(!(svd
->flags
& MAP_NORESERVE
));
4636 ASSERT(type
!= F_SOFTUNLOCK
);
4637 ASSERT(IS_P2ALIGNED(a
, maxpgsz
));
4638 ASSERT(!brkcow
|| svd
->tr_state
== SEGVN_TR_OFF
);
4639 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
4641 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
4643 VM_STAT_COND_ADD(type
== F_SOFTLOCK
, segvnvmstats
.fltanpages
[0]);
4644 VM_STAT_COND_ADD(type
!= F_SOFTLOCK
, segvnvmstats
.fltanpages
[1]);
4646 if (svd
->flags
& MAP_TEXT
) {
4647 hat_flag
|= HAT_LOAD_TEXT
;
4650 if (svd
->pageprot
) {
4653 protchk
= PROT_READ
;
4656 protchk
= PROT_WRITE
;
4659 protchk
= PROT_EXEC
;
4663 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
4666 VM_STAT_ADD(segvnvmstats
.fltanpages
[2]);
4669 /* caller has already done segment level protection check. */
4672 ppa
= kmem_cache_alloc(segvn_szc_cache
[ppaszc
], KM_SLEEP
);
4673 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
4676 for (; a
< lpgeaddr
; a
+= pgsz
, aindx
+= pages
) {
4677 if (svd
->pageprot
!= 0 && IS_P2ALIGNED(a
, maxpgsz
)) {
4678 VM_STAT_ADD(segvnvmstats
.fltanpages
[3]);
4679 ASSERT(vpage
!= NULL
);
4680 prot
= VPP_PROT(vpage
);
4681 ASSERT(sameprot(seg
, a
, maxpgsz
));
4682 if ((prot
& protchk
) == 0) {
4687 if (adjszc_chk
&& IS_P2ALIGNED(a
, maxpgsz
) &&
4689 ASSERT(a
> lpgaddr
);
4693 ASSERT(IS_P2ALIGNED(aindx
, pages
));
4694 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
,
4697 if (type
== F_SOFTLOCK
) {
4698 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
4701 anon_array_enter(amp
, aindx
, &cookie
);
4702 ppa_szc
= (uint_t
)-1;
4703 ierr
= anon_map_getpages(amp
, aindx
, szc
, seg
, a
,
4704 prot
, &vpprot
, ppa
, &ppa_szc
, vpage
, rw
, brkcow
,
4705 segvn_anypgsz
, pgflags
, svd
->cred
);
4707 anon_array_exit(&cookie
);
4708 VM_STAT_ADD(segvnvmstats
.fltanpages
[4]);
4709 if (type
== F_SOFTLOCK
) {
4711 (ulong_t
*)&svd
->softlockcnt
,
4715 VM_STAT_ADD(segvnvmstats
.fltanpages
[6]);
4716 err
= FC_MAKE_ERR(ierr
);
4722 ASSERT(!IS_VMODSORT(ppa
[0]->p_vnode
));
4724 ASSERT(segtype
== MAP_SHARED
||
4725 ppa
[0]->p_szc
<= szc
);
4726 ASSERT(segtype
== MAP_PRIVATE
||
4727 ppa
[0]->p_szc
>= szc
);
4730 * Handle pages that have been marked for migration
4732 if (lgrp_optimizations())
4733 page_migrate(seg
, a
, ppa
, pages
);
4735 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
4737 if (segtype
== MAP_SHARED
) {
4738 vpprot
|= PROT_WRITE
;
4741 hat_memload_array(hat
, a
, pgsz
, ppa
,
4742 prot
& vpprot
, hat_flag
);
4744 if (hat_flag
& HAT_LOAD_LOCK
) {
4745 VM_STAT_ADD(segvnvmstats
.fltanpages
[7]);
4747 VM_STAT_ADD(segvnvmstats
.fltanpages
[8]);
4748 for (i
= 0; i
< pages
; i
++)
4749 page_unlock(ppa
[i
]);
4754 anon_array_exit(&cookie
);
4759 ASSERT(a
< lpgeaddr
);
4761 * ierr == -1 means we failed to allocate a large page.
4762 * so do a size down operation.
4764 * ierr == -2 means some other process that privately shares
4765 * pages with this process has allocated a larger page and we
4766 * need to retry with larger pages. So do a size up
4767 * operation. This relies on the fact that large pages are
4768 * never partially shared i.e. if we share any constituent
4769 * page of a large page with another process we must share the
4770 * entire large page. Note this cannot happen for SOFTLOCK
4771 * case, unless current address (a) is at the beginning of the
4772 * next page size boundary because the other process couldn't
4773 * have relocated locked pages.
4775 ASSERT(ierr
== -1 || ierr
== -2);
4777 if (segvn_anypgsz
) {
4778 ASSERT(ierr
== -2 || szc
!= 0);
4779 ASSERT(ierr
== -1 || szc
< seg
->s_szc
);
4780 szc
= (ierr
== -1) ? szc
- 1 : szc
+ 1;
4783 * For non COW faults and segvn_anypgsz == 0
4784 * we need to be careful not to loop forever
4785 * if existing page is found with szc other
4786 * than 0 or seg->s_szc. This could be due
4787 * to page relocations on behalf of DR or
4788 * more likely large page creation. For this
4789 * case simply re-size to existing page's szc
4790 * if returned by anon_map_getpages().
4792 if (ppa_szc
== (uint_t
)-1) {
4793 szc
= (ierr
== -1) ? 0 : seg
->s_szc
;
4795 ASSERT(ppa_szc
<= seg
->s_szc
);
4796 ASSERT(ierr
== -2 || ppa_szc
< szc
);
4797 ASSERT(ierr
== -1 || ppa_szc
> szc
);
4802 pgsz
= page_get_pagesize(szc
);
4804 ASSERT(type
!= F_SOFTLOCK
|| ierr
== -1 ||
4805 (IS_P2ALIGNED(a
, pgsz
) && IS_P2ALIGNED(lpgeaddr
, pgsz
)));
4806 if (type
== F_SOFTLOCK
) {
4808 * For softlocks we cannot reduce the fault area
4809 * (calculated based on the largest page size for this
4810 * segment) for size down and a is already next
4811 * page size aligned as assertted above for size
4812 * ups. Therefore just continue in case of softlock.
4814 VM_STAT_ADD(segvnvmstats
.fltanpages
[9]);
4815 continue; /* keep lint happy */
4816 } else if (ierr
== -2) {
4819 * Size up case. Note lpgaddr may only be needed for
4820 * softlock case so we don't adjust it here.
4822 VM_STAT_ADD(segvnvmstats
.fltanpages
[10]);
4823 a
= (caddr_t
)P2ALIGN((uintptr_t)a
, pgsz
);
4824 ASSERT(a
>= lpgaddr
);
4825 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4826 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4827 vpage
= (svd
->vpage
!= NULL
) ?
4828 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4831 * Size down case. Note lpgaddr may only be needed for
4832 * softlock case so we don't adjust it here.
4834 VM_STAT_ADD(segvnvmstats
.fltanpages
[11]);
4835 ASSERT(IS_P2ALIGNED(a
, pgsz
));
4836 ASSERT(IS_P2ALIGNED(lpgeaddr
, pgsz
));
4837 lpgeaddr
= (caddr_t
)P2ROUNDUP((uintptr_t)eaddr
, pgsz
);
4838 ASSERT(a
< lpgeaddr
);
4841 * The beginning of the large page region can
4842 * be pulled to the right to make a smaller
4843 * region. We haven't yet faulted a single
4846 VM_STAT_ADD(segvnvmstats
.fltanpages
[12]);
4847 a
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
4848 ASSERT(a
>= lpgaddr
);
4849 aindx
= svd
->anon_index
+ seg_page(seg
, a
);
4850 vpage
= (svd
->vpage
!= NULL
) ?
4851 &svd
->vpage
[seg_page(seg
, a
)] : NULL
;
4855 VM_STAT_ADD(segvnvmstats
.fltanpages
[13]);
4856 ANON_LOCK_EXIT(&
->a_rwlock
);
4857 kmem_cache_free(segvn_szc_cache
[ppaszc
], ppa
);
4860 VM_STAT_ADD(segvnvmstats
.fltanpages
[14]);
4861 ANON_LOCK_EXIT(&
->a_rwlock
);
4862 kmem_cache_free(segvn_szc_cache
[ppaszc
], ppa
);
4863 if (type
== F_SOFTLOCK
&& a
> lpgaddr
) {
4864 VM_STAT_ADD(segvnvmstats
.fltanpages
[15]);
4865 segvn_softunlock(seg
, lpgaddr
, a
- lpgaddr
, S_OTHER
);
4870 int fltadvice
= 1; /* set to free behind pages for sequential access */
4873 * This routine is called via a machine specific fault handling routine.
4874 * It is also called by software routines wishing to lock or unlock
4875 * a range of addresses.
4877 * Here is the basic algorithm:
4879 * Call segvn_softunlock
4882 * Checking and set up work
4883 * If we will need some non-anonymous pages
4884 * Call fop_getpage over the range of non-anonymous pages
4886 * Loop over all addresses requested
4887 * Call segvn_faultpage passing in page list
4888 * to load up translations and handle anonymous pages
4890 * Load up translation to any additional pages in page list not
4891 * already handled that fit into this segment
4894 segvn_fault(struct hat
*hat
, struct seg
*seg
, caddr_t addr
, size_t len
,
4895 enum fault_type type
, enum seg_rw rw
)
4897 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
4898 page_t
**plp
, **ppp
, *pp
;
4901 struct vpage
*vpage
;
4902 uint_t vpprot
, prot
;
4904 page_t
*pl
[FAULT_TMP_PAGES_NUM
+ 1];
4905 size_t plsz
, pl_alloc_sz
;
4908 struct anon_map
*amp
;
4910 caddr_t lpgaddr
, lpgeaddr
;
4912 anon_sync_obj_t cookie
;
4913 int brkcow
= BREAK_COW_SHARE(rw
, type
, svd
->type
);
4915 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
4916 ASSERT(svd
->amp
== NULL
|| svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
4919 * First handle the easy stuff
4921 if (type
== F_SOFTUNLOCK
) {
4922 if (rw
== S_READ_NOCOW
) {
4924 ASSERT(AS_WRITE_HELD(seg
->s_as
));
4926 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
4927 pgsz
= (seg
->s_szc
== 0) ? PAGESIZE
:
4928 page_get_pagesize(seg
->s_szc
);
4929 VM_STAT_COND_ADD(pgsz
> PAGESIZE
, segvnvmstats
.fltanpages
[16]);
4930 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
4931 segvn_softunlock(seg
, lpgaddr
, lpgeaddr
- lpgaddr
, rw
);
4932 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4936 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
||
4937 !HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
4939 if (svd
->tr_state
== SEGVN_TR_INIT
) {
4940 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4941 if (svd
->tr_state
== SEGVN_TR_INIT
) {
4942 ASSERT(svd
->vp
!= NULL
&& svd
->amp
== NULL
);
4943 ASSERT(svd
->flags
& MAP_TEXT
);
4944 ASSERT(svd
->type
== MAP_PRIVATE
);
4945 segvn_textrepl(seg
);
4946 ASSERT(svd
->tr_state
!= SEGVN_TR_INIT
);
4947 ASSERT(svd
->tr_state
!= SEGVN_TR_ON
||
4950 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4952 } else if (svd
->tr_state
!= SEGVN_TR_OFF
) {
4953 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
4955 if (rw
== S_WRITE
&& svd
->tr_state
!= SEGVN_TR_OFF
) {
4956 ASSERT(!svd
->pageprot
&& !(svd
->prot
& PROT_WRITE
));
4957 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4961 if (svd
->tr_state
== SEGVN_TR_ON
) {
4962 ASSERT(svd
->vp
!= NULL
&& svd
->amp
!= NULL
);
4963 segvn_textunrepl(seg
, 0);
4964 ASSERT(svd
->amp
== NULL
&&
4965 svd
->tr_state
== SEGVN_TR_OFF
);
4966 } else if (svd
->tr_state
!= SEGVN_TR_OFF
) {
4967 svd
->tr_state
= SEGVN_TR_OFF
;
4969 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
4970 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
4974 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
4977 * If we have the same protections for the entire segment,
4978 * insure that the access being attempted is legitimate.
4981 if (svd
->pageprot
== 0) {
4987 protchk
= PROT_READ
;
4990 protchk
= PROT_WRITE
;
4993 protchk
= PROT_EXEC
;
4997 protchk
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
5001 if ((svd
->prot
& protchk
) == 0) {
5002 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5003 return (FC_PROT
); /* illegal access type */
5007 if (brkcow
&& HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5008 /* this must be SOFTLOCK S_READ fault */
5009 ASSERT(svd
->amp
== NULL
);
5010 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5011 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5012 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5013 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5015 * this must be the first ever non S_READ_NOCOW
5016 * softlock for this segment.
5018 ASSERT(svd
->softlockcnt
== 0);
5019 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
5021 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
5023 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5028 * We can't allow the long term use of softlocks for vmpss segments,
5029 * because in some file truncation cases we should be able to demote
5030 * the segment, which requires that there are no softlocks. The
5031 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5032 * segment is S_READ_NOCOW, where the caller holds the address space
5033 * locked as writer and calls softunlock before dropping the as lock.
5034 * S_READ_NOCOW is used by /proc to read memory from another user.
5036 * Another deadlock between SOFTLOCK and file truncation can happen
5037 * because segvn_fault_vnodepages() calls the FS one pagesize at
5038 * a time. A second fop_getpage() call by segvn_fault_vnodepages()
5039 * can cause a deadlock because the first set of page_t's remain
5040 * locked SE_SHARED. To avoid this, we demote segments on a first
5041 * SOFTLOCK if they have a length greater than the segment's
5044 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5045 * the access type is S_READ_NOCOW and the fault length is less than
5046 * or equal to the segment's page size. While this is quite restrictive,
5047 * it should be the most common case of SOFTLOCK against a vmpss
5050 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5051 * caller makes sure no COW will be caused by another thread for a
5054 if (type
== F_SOFTLOCK
&& svd
->vp
!= NULL
&& seg
->s_szc
!= 0) {
5057 if (rw
!= S_READ_NOCOW
) {
5060 if (!demote
&& len
> PAGESIZE
) {
5061 pgsz
= page_get_pagesize(seg
->s_szc
);
5062 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
,
5064 if (lpgeaddr
- lpgaddr
> pgsz
) {
5069 ASSERT(demote
|| AS_WRITE_HELD(seg
->s_as
));
5072 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5073 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5074 if (seg
->s_szc
!= 0) {
5075 segvn_vmpss_clrszc_cnt
++;
5076 ASSERT(svd
->softlockcnt
== 0);
5077 err
= segvn_clrszc(seg
);
5079 segvn_vmpss_clrszc_err
++;
5080 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5081 return (FC_MAKE_ERR(err
));
5084 ASSERT(seg
->s_szc
== 0);
5085 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5091 * Check to see if we need to allocate an anon_map structure.
5093 if (svd
->amp
== NULL
&& (svd
->vp
== NULL
|| brkcow
)) {
5094 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5096 * Drop the "read" lock on the segment and acquire
5097 * the "write" version since we have to allocate the
5100 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5101 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5103 if (svd
->amp
== NULL
) {
5104 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
5105 svd
->amp
->a_szc
= seg
->s_szc
;
5107 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5110 * Start all over again since segment protections
5111 * may have changed after we dropped the "read" lock.
5117 * S_READ_NOCOW vs S_READ distinction was
5118 * only needed for the code above. After
5119 * that we treat it as S_READ.
5121 if (rw
== S_READ_NOCOW
) {
5122 ASSERT(type
== F_SOFTLOCK
);
5123 ASSERT(AS_WRITE_HELD(seg
->s_as
));
5130 * MADV_SEQUENTIAL work is ignored for large page segments.
5132 if (seg
->s_szc
!= 0) {
5133 pgsz
= page_get_pagesize(seg
->s_szc
);
5134 ASSERT(SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
5135 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
5136 if (svd
->vp
== NULL
) {
5137 err
= segvn_fault_anonpages(hat
, seg
, lpgaddr
,
5138 lpgeaddr
, type
, rw
, addr
, addr
+ len
, brkcow
);
5140 err
= segvn_fault_vnodepages(hat
, seg
, lpgaddr
,
5141 lpgeaddr
, type
, rw
, addr
, addr
+ len
, brkcow
);
5142 if (err
== IE_RETRY
) {
5143 ASSERT(seg
->s_szc
== 0);
5144 ASSERT(SEGVN_READ_HELD(seg
->s_as
, &svd
->lock
));
5145 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5149 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5153 page
= seg_page(seg
, addr
);
5155 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5156 anon_index
= svd
->anon_index
+ page
;
5158 if (type
== F_PROT
&& rw
== S_READ
&&
5159 svd
->tr_state
== SEGVN_TR_OFF
&&
5160 svd
->type
== MAP_PRIVATE
&& svd
->pageprot
== 0) {
5161 size_t index
= anon_index
;
5164 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5166 * The fast path could apply to S_WRITE also, except
5167 * that the protection fault could be caused by lazy
5168 * tlb flush when ro->rw. In this case, the pte is
5169 * RW already. But RO in the other cpu's tlb causes
5170 * the fault. Since hat_chgprot won't do anything if
5171 * pte doesn't change, we may end up faulting
5172 * indefinitely until the RO tlb entry gets replaced.
5174 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, index
++) {
5175 anon_array_enter(amp
, index
, &cookie
);
5176 ap
= anon_get_ptr(amp
->ahp
, index
);
5177 anon_array_exit(&cookie
);
5178 if ((ap
== NULL
) || (ap
->an_refcnt
!= 1)) {
5179 ANON_LOCK_EXIT(&
->a_rwlock
);
5183 hat_chgprot(seg
->s_as
->a_hat
, addr
, len
, svd
->prot
);
5184 ANON_LOCK_EXIT(&
->a_rwlock
);
5185 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5191 if (svd
->vpage
== NULL
)
5194 vpage
= &svd
->vpage
[page
];
5196 off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
5199 * If MADV_SEQUENTIAL has been set for the particular page we
5200 * are faulting on, free behind all pages in the segment and put
5201 * them on the free list.
5204 if ((page
!= 0) && fltadvice
&& svd
->tr_state
!= SEGVN_TR_ON
) {
5206 ulong_t fanon_index
;
5208 uoff_t pgoff
, fpgoff
;
5210 struct anon
*fap
= NULL
;
5212 if (svd
->advice
== MADV_SEQUENTIAL
||
5214 VPP_ADVICE(vpage
) == MADV_SEQUENTIAL
)) {
5215 pgoff
= off
- PAGESIZE
;
5218 vpp
= &svd
->vpage
[fpage
];
5220 fanon_index
= svd
->anon_index
+ fpage
;
5222 while (pgoff
> svd
->offset
) {
5223 if (svd
->advice
!= MADV_SEQUENTIAL
&&
5224 (!svd
->pageadvice
|| (vpage
&&
5225 VPP_ADVICE(vpp
) != MADV_SEQUENTIAL
)))
5229 * If this is an anon page, we must find the
5230 * correct <vp, offset> for it
5234 ANON_LOCK_ENTER(&
->a_rwlock
,
5236 anon_array_enter(amp
, fanon_index
,
5238 fap
= anon_get_ptr(amp
->ahp
,
5241 swap_xlate(fap
, &fvp
, &fpgoff
);
5246 anon_array_exit(&cookie
);
5247 ANON_LOCK_EXIT(&
->a_rwlock
);
5255 * Skip pages that are free or have an
5258 pp
= page_lookup_nowait(&fvp
->v_object
,
5263 * We don't need the page_struct_lock to test
5264 * as this is only advisory; even if we
5265 * acquire it someone might race in and lock
5266 * the page after we unlock and before the
5267 * PUTPAGE, then fop_putpage will do nothing.
5269 if (pp
->p_lckcnt
== 0 && pp
->p_cowcnt
== 0) {
5271 * Hold the vnode before releasing
5272 * the page lock to prevent it from
5273 * being freed and re-used by some
5279 * We should build a page list
5280 * to kluster putpages XXX
5282 (void) fop_putpage(fvp
,
5283 (offset_t
)fpgoff
, PAGESIZE
,
5284 (B_DONTNEED
|B_FREE
|B_ASYNC
),
5289 * XXX - Should the loop terminate if
5290 * the page is `locked'?
5306 * See if we need to call fop_getpage for
5307 * *any* of the range being faulted on.
5308 * We can skip all of this work if there
5309 * was no original vnode.
5311 if (svd
->vp
!= NULL
) {
5324 * Only acquire reader lock to prevent amp->ahp
5325 * from being changed. It's ok to miss pages,
5326 * hence we don't do anon_array_enter
5328 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5329 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
5331 if (len
<= PAGESIZE
)
5332 /* inline non_anon() */
5333 dogetpage
= (ap
== NULL
);
5335 dogetpage
= non_anon(amp
->ahp
, anon_index
,
5337 ANON_LOCK_EXIT(&
->a_rwlock
);
5342 struct as
*as
= seg
->s_as
;
5344 if (len
> FAULT_TMP_PAGES_SZ
) {
5346 * Page list won't fit in local array,
5347 * allocate one of the needed size.
5350 (btop(len
) + 1) * sizeof (page_t
*);
5351 plp
= kmem_alloc(pl_alloc_sz
, KM_SLEEP
);
5354 } else if (rw
== S_WRITE
&& svd
->type
== MAP_PRIVATE
||
5355 svd
->tr_state
== SEGVN_TR_ON
|| rw
== S_OTHER
||
5356 (((size_t)(addr
+ PAGESIZE
) <
5357 (size_t)(seg
->s_base
+ seg
->s_size
)) &&
5358 hat_probe(as
->a_hat
, addr
+ PAGESIZE
))) {
5360 * Ask fop_getpage to return the exact number
5362 * (a) this is a COW fault, or
5363 * (b) this is a software fault, or
5364 * (c) next page is already mapped.
5369 * Ask fop_getpage to return adjacent pages
5370 * within the segment.
5372 plsz
= MIN((size_t)FAULT_TMP_PAGES_SZ
, (size_t)
5373 ((seg
->s_base
+ seg
->s_size
) - addr
));
5374 ASSERT((addr
+ plsz
) <=
5375 (seg
->s_base
+ seg
->s_size
));
5379 * Need to get some non-anonymous pages.
5380 * We need to make only one call to GETPAGE to do
5381 * this to prevent certain deadlocking conditions
5382 * when we are doing locking. In this case
5383 * non_anon() should have picked up the smallest
5384 * range which includes all the non-anonymous
5385 * pages in the requested range. We have to
5386 * be careful regarding which rw flag to pass in
5387 * because on a private mapping, the underlying
5388 * object is never allowed to be written.
5390 if (rw
== S_WRITE
&& svd
->type
== MAP_PRIVATE
) {
5396 err
= fop_getpage(vp
, (offset_t
)vp_off
, vp_len
,
5397 &vpprot
, plp
, plsz
, seg
, addr
+ (vp_off
- off
), arw
,
5400 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5401 segvn_pagelist_rele(plp
);
5403 kmem_free(plp
, pl_alloc_sz
);
5404 return (FC_MAKE_ERR(err
));
5406 if (svd
->type
== MAP_PRIVATE
)
5407 vpprot
&= ~PROT_WRITE
;
5412 * N.B. at this time the plp array has all the needed non-anon
5413 * pages in addition to (possibly) having some adjacent pages.
5417 * Always acquire the anon_array_lock to prevent
5418 * 2 threads from allocating separate anon slots for
5421 * If this is a copy-on-write fault and we don't already
5422 * have the anon_array_lock, acquire it to prevent the
5423 * fault routine from handling multiple copy-on-write faults
5424 * on the same "addr" in the same address space.
5426 * Only one thread should deal with the fault since after
5427 * it is handled, the other threads can acquire a translation
5428 * to the newly created private page. This prevents two or
5429 * more threads from creating different private pages for the
5432 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5433 * to prevent deadlock between this thread and another thread
5434 * which has soft-locked this page and wants to acquire serial_lock.
5437 * The fix for bug 4026339 becomes unnecessary when using the
5438 * locking scheme with per amp rwlock and a global set of hash
5439 * lock, anon_array_lock. If we steal a vnode page when low
5440 * on memory and upgrad the page lock through page_rename,
5441 * then the page is PAGE_HANDLED, nothing needs to be done
5442 * for this page after returning from segvn_faultpage.
5444 * But really, the page lock should be downgraded after
5445 * the stolen page is page_rename'd.
5449 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5452 * Ok, now loop over the address range and handle faults
5454 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, off
+= PAGESIZE
) {
5455 err
= segvn_faultpage(hat
, seg
, a
, off
, vpage
, plp
, vpprot
,
5459 ANON_LOCK_EXIT(&
->a_rwlock
);
5460 if (type
== F_SOFTLOCK
&& a
> addr
) {
5461 segvn_softunlock(seg
, addr
, (a
- addr
),
5464 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5465 segvn_pagelist_rele(plp
);
5467 kmem_free(plp
, pl_alloc_sz
);
5472 } else if (svd
->vpage
) {
5473 page
= seg_page(seg
, addr
);
5474 vpage
= &svd
->vpage
[++page
];
5478 /* Didn't get pages from the underlying fs so we're done */
5483 * Now handle any other pages in the list returned.
5484 * If the page can be used, load up the translations now.
5485 * Note that the for loop will only be entered if "plp"
5486 * is pointing to a non-NULL page pointer which means that
5487 * fop_getpage() was called and vpprot has been initialized.
5489 if (svd
->pageprot
== 0)
5490 prot
= svd
->prot
& vpprot
;
5494 * Large Files: diff should be unsigned value because we started
5495 * supporting > 2GB segment sizes from 2.5.1 and when a
5496 * large file of size > 2GB gets mapped to address space
5497 * the diff value can be > 2GB.
5500 for (ppp
= plp
; (pp
= *ppp
) != NULL
; ppp
++) {
5504 anon_sync_obj_t cookie
;
5505 int hat_flag
= HAT_LOAD_ADV
;
5507 if (svd
->flags
& MAP_TEXT
) {
5508 hat_flag
|= HAT_LOAD_TEXT
;
5511 if (pp
== PAGE_HANDLED
)
5514 if (svd
->tr_state
!= SEGVN_TR_ON
&&
5515 pp
->p_offset
>= svd
->offset
&&
5516 pp
->p_offset
< svd
->offset
+ seg
->s_size
) {
5518 diff
= pp
->p_offset
- svd
->offset
;
5521 * Large Files: Following is the assertion
5522 * validating the above cast.
5524 VERIFY(&svd
->vp
->v_object
== pp
->p_object
);
5525 ASSERT(svd
->vp
== pp
->p_vnode
);
5529 prot
= VPP_PROT(&svd
->vpage
[page
]) & vpprot
;
5532 * Prevent other threads in the address space from
5533 * creating private pages (i.e., allocating anon slots)
5534 * while we are in the process of loading translations
5535 * to additional pages returned by the underlying
5539 anon_index
= svd
->anon_index
+ page
;
5540 anon_array_enter(amp
, anon_index
, &cookie
);
5541 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
5543 if ((amp
== NULL
) || (ap
== NULL
)) {
5544 if (IS_VMODSORT(pp
->p_vnode
) ||
5548 else if (rw
!= S_OTHER
&&
5550 prot
&= ~PROT_WRITE
;
5553 * Skip mapping read ahead pages marked
5554 * for migration, so they will get migrated
5557 ASSERT(amp
== NULL
||
5558 svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
5559 if ((prot
& PROT_READ
) && !PP_ISMIGRATE(pp
)) {
5560 hat_memload_region(hat
,
5567 anon_array_exit(&cookie
);
5573 ANON_LOCK_EXIT(&
->a_rwlock
);
5574 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5576 kmem_free(plp
, pl_alloc_sz
);
5581 * This routine is used to start I/O on pages asynchronously. XXX it will
5582 * only create PAGESIZE pages. At fault time they will be relocated into
5586 segvn_faulta(struct seg
*seg
, caddr_t addr
)
5588 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
5590 struct anon_map
*amp
;
5593 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5595 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
5596 if ((amp
= svd
->amp
) != NULL
) {
5600 * Reader lock to prevent amp->ahp from being changed.
5601 * This is advisory, it's ok to miss a page, so
5602 * we don't do anon_array_enter lock.
5604 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5605 if ((ap
= anon_get_ptr(amp
->ahp
,
5606 svd
->anon_index
+ seg_page(seg
, addr
))) != NULL
) {
5608 err
= anon_getpage(&ap
, NULL
, NULL
,
5609 0, seg
, addr
, S_READ
, svd
->cred
);
5611 ANON_LOCK_EXIT(&
->a_rwlock
);
5612 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5614 return (FC_MAKE_ERR(err
));
5617 ANON_LOCK_EXIT(&
->a_rwlock
);
5620 if (svd
->vp
== NULL
) {
5621 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5622 return (0); /* zfod page - do nothing now */
5626 err
= fop_getpage(vp
,
5627 (offset_t
)(svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
)),
5628 PAGESIZE
, NULL
, NULL
, 0, seg
, addr
,
5629 S_OTHER
, svd
->cred
, NULL
);
5631 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5633 return (FC_MAKE_ERR(err
));
5638 segvn_setprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
5640 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
5641 struct vpage
*cvp
, *svp
, *evp
;
5645 anon_sync_obj_t cookie
;
5646 int unload_done
= 0;
5648 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5650 if ((svd
->maxprot
& prot
) != prot
)
5651 return (EACCES
); /* violated maxprot */
5653 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
5655 /* return if prot is the same */
5656 if (!svd
->pageprot
&& svd
->prot
== prot
) {
5657 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5662 * Since we change protections we first have to flush the cache.
5663 * This makes sure all the pagelock calls have to recheck
5666 if (svd
->softlockcnt
> 0) {
5667 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5670 * If this is shared segment non 0 softlockcnt
5671 * means locked pages are still in use.
5673 if (svd
->type
== MAP_SHARED
) {
5674 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5679 * Since we do have the segvn writers lock nobody can fill
5680 * the cache with entries belonging to this seg during
5681 * the purge. The flush either succeeds or we still have
5685 if (svd
->softlockcnt
> 0) {
5686 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5691 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
5692 ASSERT(svd
->amp
== NULL
);
5693 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
5694 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
5696 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
5698 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
5699 svd
->tr_state
= SEGVN_TR_OFF
;
5700 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
5701 ASSERT(svd
->amp
!= NULL
);
5702 segvn_textunrepl(seg
, 0);
5703 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
5707 if ((prot
& PROT_WRITE
) && svd
->type
== MAP_SHARED
&&
5708 svd
->vp
!= NULL
&& (svd
->vp
->v_flag
& VVMEXEC
)) {
5709 ASSERT(vn_is_mapped(svd
->vp
, V_WRITE
));
5710 segvn_inval_trcache(svd
->vp
);
5712 if (seg
->s_szc
!= 0) {
5714 pgsz
= page_get_pagesize(seg
->s_szc
);
5715 pgcnt
= pgsz
>> PAGESHIFT
;
5716 ASSERT(IS_P2ALIGNED(pgcnt
, pgcnt
));
5717 if (!IS_P2ALIGNED(addr
, pgsz
) || !IS_P2ALIGNED(len
, pgsz
)) {
5718 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5719 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
5721 * If we are holding the as lock as a reader then
5722 * we need to return IE_RETRY and let the as
5723 * layer drop and re-acquire the lock as a writer.
5725 if (AS_READ_HELD(seg
->s_as
))
5727 VM_STAT_ADD(segvnvmstats
.demoterange
[1]);
5728 if (svd
->type
== MAP_PRIVATE
|| svd
->vp
!= NULL
) {
5729 err
= segvn_demote_range(seg
, addr
, len
,
5732 uint_t szcvec
= map_pgszcvec(seg
->s_base
,
5733 pgsz
, (uintptr_t)seg
->s_base
,
5734 (svd
->flags
& MAP_TEXT
), MAPPGSZC_SHM
, 0);
5735 err
= segvn_demote_range(seg
, addr
, len
,
5748 * If it's a private mapping and we're making it writable then we
5749 * may have to reserve the additional swap space now. If we are
5750 * making writable only a part of the segment then we use its vpage
5751 * array to keep a record of the pages for which we have reserved
5752 * swap. In this case we set the pageswap field in the segment's
5753 * segvn structure to record this.
5755 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5756 * removing write permission on the entire segment and we haven't
5757 * modified any pages, we can release the swap space.
5759 if (svd
->type
== MAP_PRIVATE
) {
5760 if (prot
& PROT_WRITE
) {
5761 if (!(svd
->flags
& MAP_NORESERVE
) &&
5762 !(svd
->swresv
&& svd
->pageswap
== 0)) {
5766 * Start by determining how much swap
5767 * space is required.
5769 if (addr
== seg
->s_base
&&
5770 len
== seg
->s_size
&&
5771 svd
->pageswap
== 0) {
5772 /* The whole segment */
5776 * Make sure that the vpage array
5777 * exists, and make a note of the
5778 * range of elements corresponding
5782 if (svd
->vpage
== NULL
) {
5783 SEGVN_LOCK_EXIT(seg
->s_as
,
5787 svp
= &svd
->vpage
[seg_page(seg
, addr
)];
5788 evp
= &svd
->vpage
[seg_page(seg
,
5791 if (svd
->pageswap
== 0) {
5793 * This is the first time we've
5794 * asked for a part of this
5795 * segment, so we need to
5796 * reserve everything we've
5802 * We have to count the number
5803 * of pages required.
5805 for (cvp
= svp
; cvp
< evp
;
5807 if (!VPP_ISSWAPRES(cvp
))
5814 /* Try to reserve the necessary swap. */
5815 if (anon_resv_zone(sz
,
5816 seg
->s_as
->a_proc
->p_zone
) == 0) {
5817 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5822 * Make a note of how much swap space
5825 if (svd
->pageswap
== 0 && sz
== seg
->s_size
) {
5828 ASSERT(svd
->vpage
!= NULL
);
5831 for (cvp
= svp
; cvp
< evp
; cvp
++) {
5832 if (!VPP_ISSWAPRES(cvp
))
5833 VPP_SETSWAPRES(cvp
);
5839 * Swap space is released only if this segment
5840 * does not map anonymous memory, since read faults
5841 * on such segments still need an anon slot to read
5844 if (svd
->swresv
!= 0 && svd
->vp
!= NULL
&&
5845 svd
->amp
== NULL
&& addr
== seg
->s_base
&&
5846 len
== seg
->s_size
&& svd
->pageprot
== 0) {
5847 ASSERT(svd
->pageswap
== 0);
5848 anon_unresv_zone(svd
->swresv
,
5849 seg
->s_as
->a_proc
->p_zone
);
5855 if (addr
== seg
->s_base
&& len
== seg
->s_size
&& svd
->vpage
== NULL
) {
5856 if (svd
->prot
== prot
) {
5857 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5858 return (0); /* all done */
5860 svd
->prot
= (uchar_t
)prot
;
5861 } else if (svd
->type
== MAP_PRIVATE
) {
5862 struct anon
*ap
= NULL
;
5865 struct anon_map
*amp
;
5866 ulong_t anon_idx
= 0;
5869 * A vpage structure exists or else the change does not
5870 * involve the entire segment. Establish a vpage structure
5871 * if none is there. Then, for each page in the range,
5872 * adjust its individual permissions. Note that write-
5873 * enabling a MAP_PRIVATE page can affect the claims for
5874 * locked down memory. Overcommitting memory terminates
5878 if (svd
->vpage
== NULL
) {
5879 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5883 if ((amp
= svd
->amp
) != NULL
) {
5884 anon_idx
= svd
->anon_index
+ seg_page(seg
, addr
);
5885 ASSERT(seg
->s_szc
== 0 ||
5886 IS_P2ALIGNED(anon_idx
, pgcnt
));
5887 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5890 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
5891 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
5894 * See Statement at the beginning of segvn_lockop regarding
5895 * the way cowcnts and lckcnts are handled.
5897 for (svp
= &svd
->vpage
[seg_page(seg
, addr
)]; svp
< evp
; svp
++) {
5899 if (seg
->s_szc
!= 0) {
5901 anon_array_enter(amp
, anon_idx
,
5904 if (IS_P2ALIGNED(anon_idx
, pgcnt
) &&
5905 !segvn_claim_pages(seg
, svp
, offset
,
5908 anon_array_exit(&cookie
);
5913 anon_array_exit(&cookie
);
5918 anon_array_enter(amp
, anon_idx
,
5920 ap
= anon_get_ptr(amp
->ahp
, anon_idx
++);
5923 if (VPP_ISPPLOCK(svp
) &&
5924 VPP_PROT(svp
) != prot
) {
5926 if (amp
== NULL
|| ap
== NULL
) {
5930 swap_xlate(ap
, &vp
, &off
);
5932 anon_array_exit(&cookie
);
5934 if ((pp
= page_lookup(&vp
->v_object
, off
, SE_SHARED
)) == NULL
) {
5935 panic("segvn_setprot: no page");
5938 ASSERT(seg
->s_szc
== 0);
5939 if ((VPP_PROT(svp
) ^ prot
) &
5941 if (prot
& PROT_WRITE
) {
5956 } else if (amp
!= NULL
)
5957 anon_array_exit(&cookie
);
5959 VPP_SETPROT(svp
, prot
);
5963 ANON_LOCK_EXIT(&
->a_rwlock
);
5966 * Did we terminate prematurely? If so, simply unload
5967 * the translations to the things we've updated so far.
5971 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5974 len
= (svp
- &svd
->vpage
[seg_page(seg
, addr
)]) *
5976 ASSERT(seg
->s_szc
== 0 || IS_P2ALIGNED(len
, pgsz
));
5978 hat_unload(seg
->s_as
->a_hat
, addr
,
5980 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5985 if (svd
->vpage
== NULL
) {
5986 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
5990 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
5991 for (svp
= &svd
->vpage
[seg_page(seg
, addr
)]; svp
< evp
; svp
++) {
5992 VPP_SETPROT(svp
, prot
);
5997 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6001 if (((prot
& PROT_WRITE
) != 0 &&
6002 (svd
->vp
!= NULL
|| svd
->type
== MAP_PRIVATE
)) ||
6003 (prot
& ~PROT_USER
) == PROT_NONE
) {
6005 * Either private or shared data with write access (in
6006 * which case we need to throw out all former translations
6007 * so that we get the right translations set up on fault
6008 * and we don't allow write access to any copy-on-write pages
6009 * that might be around or to prevent write access to pages
6010 * representing holes in a file), or we don't have permission
6011 * to access the memory at all (in which case we have to
6012 * unload any current translations that might exist).
6014 hat_unload(seg
->s_as
->a_hat
, addr
, len
, HAT_UNLOAD
);
6017 * A shared mapping or a private mapping in which write
6018 * protection is going to be denied - just change all the
6019 * protections over the range of addresses in question.
6020 * segvn does not support any other attributes other
6021 * than prot so we can use hat_chgattr.
6023 hat_chgattr(seg
->s_as
->a_hat
, addr
, len
, prot
);
6026 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6032 * segvn_setpagesize is called via segop_setpagesize from as_setpagesize,
6033 * to determine if the seg is capable of mapping the requested szc.
6036 segvn_setpagesize(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t szc
)
6038 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6039 struct segvn_data
*nsvd
;
6040 struct anon_map
*amp
= svd
->amp
;
6042 caddr_t eaddr
= addr
+ len
, a
;
6043 size_t pgsz
= page_get_pagesize(szc
);
6044 pgcnt_t pgcnt
= page_get_pagecnt(szc
);
6046 uoff_t off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
6048 ASSERT(seg
->s_as
&& AS_WRITE_HELD(seg
->s_as
));
6049 ASSERT(addr
>= seg
->s_base
&& eaddr
<= seg
->s_base
+ seg
->s_size
);
6051 if (seg
->s_szc
== szc
|| segvn_lpg_disable
!= 0) {
6056 * addr should always be pgsz aligned but eaddr may be misaligned if
6057 * it's at the end of the segment.
6059 * XXX we should assert this condition since as_setpagesize() logic
6062 if (!IS_P2ALIGNED(addr
, pgsz
) ||
6063 (!IS_P2ALIGNED(eaddr
, pgsz
) &&
6064 eaddr
!= seg
->s_base
+ seg
->s_size
)) {
6066 segvn_setpgsz_align_err
++;
6070 if (amp
!= NULL
&& svd
->type
== MAP_SHARED
) {
6071 ulong_t an_idx
= svd
->anon_index
+ seg_page(seg
, addr
);
6072 if (!IS_P2ALIGNED(an_idx
, pgcnt
)) {
6074 segvn_setpgsz_anon_align_err
++;
6079 if ((svd
->flags
& MAP_NORESERVE
) || seg
->s_as
== &kas
||
6080 szc
> segvn_maxpgszc
) {
6084 /* paranoid check */
6085 if (svd
->vp
!= NULL
&&
6086 (IS_SWAPFSVP(svd
->vp
) || VN_ISKAS(svd
->vp
))) {
6090 if (seg
->s_szc
== 0 && svd
->vp
!= NULL
&&
6091 map_addr_vacalign_check(addr
, off
)) {
6096 * Check that protections are the same within new page
6099 if (svd
->pageprot
) {
6100 for (a
= addr
; a
< eaddr
; a
+= pgsz
) {
6101 if ((a
+ pgsz
) > eaddr
) {
6102 if (!sameprot(seg
, a
, eaddr
- a
)) {
6106 if (!sameprot(seg
, a
, pgsz
)) {
6114 * Since we are changing page size we first have to flush
6115 * the cache. This makes sure all the pagelock calls have
6116 * to recheck protections.
6118 if (svd
->softlockcnt
> 0) {
6119 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6122 * If this is shared segment non 0 softlockcnt
6123 * means locked pages are still in use.
6125 if (svd
->type
== MAP_SHARED
) {
6130 * Since we do have the segvn writers lock nobody can fill
6131 * the cache with entries belonging to this seg during
6132 * the purge. The flush either succeeds or we still have
6136 if (svd
->softlockcnt
> 0) {
6141 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
6142 ASSERT(svd
->amp
== NULL
);
6143 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6144 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
6146 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
6147 } else if (svd
->tr_state
== SEGVN_TR_INIT
) {
6148 svd
->tr_state
= SEGVN_TR_OFF
;
6149 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
6150 ASSERT(svd
->amp
!= NULL
);
6151 segvn_textunrepl(seg
, 1);
6152 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
6157 * Operation for sub range of existing segment.
6159 if (addr
!= seg
->s_base
|| eaddr
!= (seg
->s_base
+ seg
->s_size
)) {
6160 if (szc
< seg
->s_szc
) {
6161 VM_STAT_ADD(segvnvmstats
.demoterange
[2]);
6162 err
= segvn_demote_range(seg
, addr
, len
, SDR_RANGE
, 0);
6166 if (err
== ENOMEM
) {
6171 if (addr
!= seg
->s_base
) {
6172 nseg
= segvn_split_seg(seg
, addr
);
6173 if (eaddr
!= (nseg
->s_base
+ nseg
->s_size
)) {
6174 /* eaddr is szc aligned */
6175 (void) segvn_split_seg(nseg
, eaddr
);
6179 if (eaddr
!= (seg
->s_base
+ seg
->s_size
)) {
6180 /* eaddr is szc aligned */
6181 (void) segvn_split_seg(seg
, eaddr
);
6187 * Break any low level sharing and reset seg->s_szc to 0.
6189 if ((err
= segvn_clrszc(seg
)) != 0) {
6190 if (err
== ENOMEM
) {
6195 ASSERT(seg
->s_szc
== 0);
6198 * If the end of the current segment is not pgsz aligned
6199 * then attempt to concatenate with the next segment.
6201 if (!IS_P2ALIGNED(eaddr
, pgsz
)) {
6202 nseg
= AS_SEGNEXT(seg
->s_as
, seg
);
6203 if (nseg
== NULL
|| nseg
== seg
|| eaddr
!= nseg
->s_base
) {
6206 if (nseg
->s_ops
!= &segvn_ops
) {
6209 nsvd
= (struct segvn_data
*)nseg
->s_data
;
6210 if (nsvd
->softlockcnt
> 0) {
6212 * If this is shared segment non 0 softlockcnt
6213 * means locked pages are still in use.
6215 if (nsvd
->type
== MAP_SHARED
) {
6219 if (nsvd
->softlockcnt
> 0) {
6223 err
= segvn_clrszc(nseg
);
6224 if (err
== ENOMEM
) {
6230 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6231 err
= segvn_concat(seg
, nseg
, 1);
6242 * May need to re-align anon array to
6246 if (!IS_P2ALIGNED(svd
->anon_index
, pgcnt
)) {
6247 struct anon_hdr
*nahp
;
6249 ASSERT(svd
->type
== MAP_PRIVATE
);
6251 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6252 ASSERT(amp
->refcnt
== 1);
6253 nahp
= anon_create(btop(amp
->size
), ANON_NOSLEEP
);
6255 ANON_LOCK_EXIT(&
->a_rwlock
);
6258 if (anon_copy_ptr(amp
->ahp
, svd
->anon_index
,
6259 nahp
, 0, btop(seg
->s_size
), ANON_NOSLEEP
)) {
6260 anon_release(nahp
, btop(amp
->size
));
6261 ANON_LOCK_EXIT(&
->a_rwlock
);
6264 anon_release(amp
->ahp
, btop(amp
->size
));
6266 svd
->anon_index
= 0;
6267 ANON_LOCK_EXIT(&
->a_rwlock
);
6270 if (svd
->vp
!= NULL
&& szc
!= 0) {
6272 uoff_t eoffpage
= svd
->offset
;
6273 va
.va_mask
= AT_SIZE
;
6274 eoffpage
+= seg
->s_size
;
6275 eoffpage
= btopr(eoffpage
);
6276 if (fop_getattr(svd
->vp
, &va
, 0, svd
->cred
, NULL
) != 0) {
6277 segvn_setpgsz_getattr_err
++;
6280 if (btopr(va
.va_size
) < eoffpage
) {
6281 segvn_setpgsz_eof_err
++;
6286 * anon_fill_cow_holes() may call fop_getpage().
6287 * don't take anon map lock here to avoid holding it
6288 * across fop_getpage() calls that may call back into
6289 * segvn for klsutering checks. We don't really need
6290 * anon map lock here since it's a private segment and
6291 * we hold as level lock as writers.
6293 if ((err
= anon_fill_cow_holes(seg
, seg
->s_base
,
6294 amp
->ahp
, svd
->anon_index
, svd
->vp
, svd
->offset
,
6295 seg
->s_size
, szc
, svd
->prot
, svd
->vpage
,
6300 segvn_setvnode_mpss(svd
->vp
);
6304 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6305 if (svd
->type
== MAP_PRIVATE
) {
6307 } else if (szc
> amp
->a_szc
) {
6310 ANON_LOCK_EXIT(&
->a_rwlock
);
6319 segvn_clrszc(struct seg
*seg
)
6321 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6322 struct anon_map
*amp
= svd
->amp
;
6326 caddr_t a
= seg
->s_base
;
6327 caddr_t ea
= a
+ seg
->s_size
;
6328 ulong_t an_idx
= svd
->anon_index
;
6329 vnode_t
*vp
= svd
->vp
;
6330 struct vpage
*vpage
= svd
->vpage
;
6331 page_t
*anon_pl
[1 + 1], *pp
;
6332 struct anon
*ap
, *oldap
;
6333 uint_t prot
= svd
->prot
, vpprot
;
6336 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
6337 SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
6338 ASSERT(svd
->softlockcnt
== 0);
6340 if (vp
== NULL
&& amp
== NULL
) {
6341 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6346 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
6347 ASSERT(svd
->amp
== NULL
);
6348 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6349 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
6351 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
6352 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
6353 ASSERT(svd
->amp
!= NULL
);
6354 segvn_textunrepl(seg
, 1);
6355 ASSERT(svd
->amp
== NULL
&& svd
->tr_state
== SEGVN_TR_OFF
);
6358 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
6359 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
6360 svd
->tr_state
= SEGVN_TR_OFF
;
6364 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6365 * unload argument is 0 when we are freeing the segment
6366 * and unload was already done.
6368 hat_unload(seg
->s_as
->a_hat
, seg
->s_base
, seg
->s_size
,
6372 if (amp
== NULL
|| svd
->type
== MAP_SHARED
) {
6377 pgsz
= page_get_pagesize(seg
->s_szc
);
6381 * XXX anon rwlock is not really needed because this is a
6382 * private segment and we are writers.
6384 ANON_LOCK_ENTER(&
->a_rwlock
, RW_WRITER
);
6386 for (; a
< ea
; a
+= pgsz
, an_idx
+= pages
) {
6387 if ((oldap
= anon_get_ptr(amp
->ahp
, an_idx
)) != NULL
) {
6388 ASSERT(vpage
!= NULL
|| svd
->pageprot
== 0);
6389 if (vpage
!= NULL
) {
6390 ASSERT(sameprot(seg
, a
, pgsz
));
6391 prot
= VPP_PROT(vpage
);
6392 pageflag
= VPP_ISPPLOCK(vpage
) ? LOCK_PAGE
: 0;
6394 if (seg
->s_szc
!= 0) {
6395 ASSERT(vp
== NULL
|| anon_pages(amp
->ahp
,
6396 an_idx
, pages
) == pages
);
6397 if ((err
= anon_map_demotepages(amp
, an_idx
,
6398 seg
, a
, prot
, vpage
, svd
->cred
)) != 0) {
6402 if (oldap
->an_refcnt
== 1) {
6405 if ((err
= anon_getpage(&oldap
, &vpprot
,
6406 anon_pl
, PAGESIZE
, seg
, a
, S_READ
,
6410 if ((pp
= anon_private(&ap
, seg
, a
, prot
,
6411 anon_pl
[0], pageflag
, svd
->cred
)) == NULL
) {
6416 (void) anon_set_ptr(amp
->ahp
, an_idx
, ap
,
6421 vpage
= (vpage
== NULL
) ? NULL
: vpage
+ pages
;
6427 ANON_LOCK_EXIT(&
->a_rwlock
);
6439 pgcnt_t pgcnt
= page_get_pagecnt(seg
->s_szc
);
6440 size_t ppasize
= (pgcnt
+ 1) * sizeof (page_t
*);
6442 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6443 struct anon_map
*amp
= svd
->amp
;
6444 struct vpage
*evp
= svp
+ pgcnt
;
6445 caddr_t addr
= ((uintptr_t)(svp
- svd
->vpage
) << PAGESHIFT
)
6448 struct vnode
*vp
= svd
->vp
;
6453 int anon
= (amp
!= NULL
) ? 1 : 0;
6455 ASSERT(svd
->type
== MAP_PRIVATE
);
6456 ASSERT(svd
->vpage
!= NULL
);
6457 ASSERT(seg
->s_szc
!= 0);
6458 ASSERT(IS_P2ALIGNED(pgcnt
, pgcnt
));
6459 ASSERT(amp
== NULL
|| IS_P2ALIGNED(anon_idx
, pgcnt
));
6460 ASSERT(sameprot(seg
, addr
, pgcnt
<< PAGESHIFT
));
6462 if (VPP_PROT(svp
) == prot
)
6464 if (!((VPP_PROT(svp
) ^ prot
) & PROT_WRITE
))
6467 ppa
= kmem_alloc(ppasize
, KM_SLEEP
);
6468 if (anon
&& vp
!= NULL
) {
6469 if (anon_get_ptr(amp
->ahp
, anon_idx
) == NULL
) {
6471 ASSERT(!anon_pages(amp
->ahp
, anon_idx
, pgcnt
));
6474 anon_pages(amp
->ahp
, anon_idx
, pgcnt
) == pgcnt
);
6477 for (*ppa
= NULL
, pg_idx
= 0; svp
< evp
; svp
++, anon_idx
++) {
6478 if (!VPP_ISPPLOCK(svp
))
6481 ap
= anon_get_ptr(amp
->ahp
, anon_idx
);
6483 panic("segvn_claim_pages: no anon slot");
6485 swap_xlate(ap
, &vp
, &aoff
);
6489 if ((pp
= page_lookup(&vp
->v_object
, (uoff_t
)off
, SE_SHARED
)) == NULL
) {
6490 panic("segvn_claim_pages: no page");
6496 if (ppa
[0] == NULL
) {
6497 kmem_free(ppa
, ppasize
);
6501 ASSERT(pg_idx
<= pgcnt
);
6505 /* Find each large page within ppa, and adjust its claim */
6507 /* Does ppa cover a single large page? */
6508 if (ppa
[0]->p_szc
== seg
->s_szc
) {
6509 if (prot
& PROT_WRITE
)
6510 err
= page_addclaim_pages(ppa
);
6512 err
= page_subclaim_pages(ppa
);
6514 for (i
= 0; ppa
[i
]; i
+= pgcnt
) {
6515 ASSERT(IS_P2ALIGNED(page_pptonum(ppa
[i
]), pgcnt
));
6516 if (prot
& PROT_WRITE
)
6517 err
= page_addclaim_pages(&ppa
[i
]);
6519 err
= page_subclaim_pages(&ppa
[i
]);
6525 for (i
= 0; i
< pg_idx
; i
++) {
6526 ASSERT(ppa
[i
] != NULL
);
6527 page_unlock(ppa
[i
]);
6530 kmem_free(ppa
, ppasize
);
6535 * Returns right (upper address) segment if split occurred.
6536 * If the address is equal to the beginning or end of its segment it returns
6537 * the current segment.
6540 segvn_split_seg(struct seg
*seg
, caddr_t addr
)
6542 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6545 struct segvn_data
*nsvd
;
6547 ASSERT(AS_WRITE_HELD(seg
->s_as
));
6548 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6550 ASSERT(addr
>= seg
->s_base
);
6551 ASSERT(addr
<= seg
->s_base
+ seg
->s_size
);
6552 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6554 if (addr
== seg
->s_base
|| addr
== seg
->s_base
+ seg
->s_size
)
6557 nsize
= seg
->s_base
+ seg
->s_size
- addr
;
6558 seg
->s_size
= addr
- seg
->s_base
;
6559 nseg
= seg_alloc(seg
->s_as
, addr
, nsize
);
6560 ASSERT(nseg
!= NULL
);
6561 nseg
->s_ops
= seg
->s_ops
;
6562 nsvd
= kmem_cache_alloc(segvn_cache
, KM_SLEEP
);
6563 nseg
->s_data
= (void *)nsvd
;
6564 nseg
->s_szc
= seg
->s_szc
;
6566 ASSERT(nsvd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6568 rw_init(&nsvd
->lock
, NULL
, RW_DEFAULT
, NULL
);
6570 if (nsvd
->vp
!= NULL
) {
6572 nsvd
->offset
= svd
->offset
+
6573 (uintptr_t)(nseg
->s_base
- seg
->s_base
);
6574 if (nsvd
->type
== MAP_SHARED
)
6575 lgrp_shm_policy_init(NULL
, nsvd
->vp
);
6578 * The offset for an anonymous segment has no signifigance in
6579 * terms of an offset into a file. If we were to use the above
6580 * calculation instead, the structures read out of
6581 * /proc/<pid>/xmap would be more difficult to decipher since
6582 * it would be unclear whether two seemingly contiguous
6583 * prxmap_t structures represented different segments or a
6584 * single segment that had been split up into multiple prxmap_t
6585 * structures (e.g. if some part of the segment had not yet
6591 ASSERT(svd
->softlockcnt
== 0);
6592 ASSERT(svd
->softlockcnt_sbase
== 0);
6593 ASSERT(svd
->softlockcnt_send
== 0);
6596 if (svd
->vpage
!= NULL
) {
6597 size_t bytes
= vpgtob(seg_pages(seg
));
6598 size_t nbytes
= vpgtob(seg_pages(nseg
));
6599 struct vpage
*ovpage
= svd
->vpage
;
6601 svd
->vpage
= kmem_alloc(bytes
, KM_SLEEP
);
6602 bcopy(ovpage
, svd
->vpage
, bytes
);
6603 nsvd
->vpage
= kmem_alloc(nbytes
, KM_SLEEP
);
6604 bcopy(ovpage
+ seg_pages(seg
), nsvd
->vpage
, nbytes
);
6605 kmem_free(ovpage
, bytes
+ nbytes
);
6607 if (svd
->amp
!= NULL
&& svd
->type
== MAP_PRIVATE
) {
6608 struct anon_map
*oamp
= svd
->amp
, *namp
;
6609 struct anon_hdr
*nahp
;
6611 ANON_LOCK_ENTER(&oamp
->a_rwlock
, RW_WRITER
);
6612 ASSERT(oamp
->refcnt
== 1);
6613 nahp
= anon_create(btop(seg
->s_size
), ANON_SLEEP
);
6614 (void) anon_copy_ptr(oamp
->ahp
, svd
->anon_index
,
6615 nahp
, 0, btop(seg
->s_size
), ANON_SLEEP
);
6617 namp
= anonmap_alloc(nseg
->s_size
, 0, ANON_SLEEP
);
6618 namp
->a_szc
= nseg
->s_szc
;
6619 (void) anon_copy_ptr(oamp
->ahp
,
6620 svd
->anon_index
+ btop(seg
->s_size
),
6621 namp
->ahp
, 0, btop(nseg
->s_size
), ANON_SLEEP
);
6622 anon_release(oamp
->ahp
, btop(oamp
->size
));
6624 oamp
->size
= seg
->s_size
;
6625 svd
->anon_index
= 0;
6627 nsvd
->anon_index
= 0;
6628 ANON_LOCK_EXIT(&oamp
->a_rwlock
);
6629 } else if (svd
->amp
!= NULL
) {
6630 pgcnt_t pgcnt
= page_get_pagecnt(seg
->s_szc
);
6631 ASSERT(svd
->amp
== nsvd
->amp
);
6632 ASSERT(seg
->s_szc
<= svd
->amp
->a_szc
);
6633 nsvd
->anon_index
= svd
->anon_index
+ seg_pages(seg
);
6634 ASSERT(IS_P2ALIGNED(nsvd
->anon_index
, pgcnt
));
6635 ANON_LOCK_ENTER(&svd
->amp
->a_rwlock
, RW_WRITER
);
6637 ANON_LOCK_EXIT(&svd
->amp
->a_rwlock
);
6641 * Split the amount of swap reserved.
6645 * For MAP_NORESERVE, only allocate swap reserve for pages
6646 * being used. Other segments get enough to cover whole
6649 if (svd
->flags
& MAP_NORESERVE
) {
6653 oswresv
= svd
->swresv
;
6654 svd
->swresv
= ptob(anon_pages(svd
->amp
->ahp
,
6655 svd
->anon_index
, btop(seg
->s_size
)));
6656 nsvd
->swresv
= ptob(anon_pages(nsvd
->amp
->ahp
,
6657 nsvd
->anon_index
, btop(nseg
->s_size
)));
6658 ASSERT(oswresv
>= (svd
->swresv
+ nsvd
->swresv
));
6660 if (svd
->pageswap
) {
6661 svd
->swresv
= segvn_count_swap_by_vpages(seg
);
6662 ASSERT(nsvd
->swresv
>= svd
->swresv
);
6663 nsvd
->swresv
-= svd
->swresv
;
6665 ASSERT(svd
->swresv
== seg
->s_size
+
6667 svd
->swresv
= seg
->s_size
;
6668 nsvd
->swresv
= nseg
->s_size
;
6677 * called on memory operations (unmap, setprot, setpagesize) for a subset
6678 * of a large page segment to either demote the memory range (SDR_RANGE)
6679 * or the ends (SDR_END) by addr/len.
6681 * returns 0 on success. returns errno, including ENOMEM, on failure.
6691 caddr_t eaddr
= addr
+ len
;
6692 caddr_t lpgaddr
, lpgeaddr
;
6694 struct seg
*badseg1
= NULL
;
6695 struct seg
*badseg2
= NULL
;
6697 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6699 uint_t szc
= seg
->s_szc
;
6702 ASSERT(AS_WRITE_HELD(seg
->s_as
));
6703 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
6705 pgsz
= page_get_pagesize(szc
);
6706 ASSERT(seg
->s_base
!= addr
|| seg
->s_size
!= len
);
6707 ASSERT(addr
>= seg
->s_base
&& eaddr
<= seg
->s_base
+ seg
->s_size
);
6708 ASSERT(svd
->softlockcnt
== 0);
6709 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
6710 ASSERT(szcvec
== 0 || (flag
== SDR_END
&& svd
->type
== MAP_SHARED
));
6712 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
6713 ASSERT(flag
== SDR_RANGE
|| eaddr
< lpgeaddr
|| addr
> lpgaddr
);
6714 if (flag
== SDR_RANGE
) {
6715 /* demote entire range */
6716 badseg1
= nseg
= segvn_split_seg(seg
, lpgaddr
);
6717 (void) segvn_split_seg(nseg
, lpgeaddr
);
6718 ASSERT(badseg1
->s_base
== lpgaddr
);
6719 ASSERT(badseg1
->s_size
== lpgeaddr
- lpgaddr
);
6720 } else if (addr
!= lpgaddr
) {
6721 ASSERT(flag
== SDR_END
);
6722 badseg1
= nseg
= segvn_split_seg(seg
, lpgaddr
);
6723 if (eaddr
!= lpgeaddr
&& eaddr
> lpgaddr
+ pgsz
&&
6724 eaddr
< lpgaddr
+ 2 * pgsz
) {
6725 (void) segvn_split_seg(nseg
, lpgeaddr
);
6726 ASSERT(badseg1
->s_base
== lpgaddr
);
6727 ASSERT(badseg1
->s_size
== 2 * pgsz
);
6729 nseg
= segvn_split_seg(nseg
, lpgaddr
+ pgsz
);
6730 ASSERT(badseg1
->s_base
== lpgaddr
);
6731 ASSERT(badseg1
->s_size
== pgsz
);
6732 if (eaddr
!= lpgeaddr
&& eaddr
> lpgaddr
+ pgsz
) {
6733 ASSERT(lpgeaddr
- lpgaddr
> 2 * pgsz
);
6734 nseg
= segvn_split_seg(nseg
, lpgeaddr
- pgsz
);
6736 (void) segvn_split_seg(nseg
, lpgeaddr
);
6737 ASSERT(badseg2
->s_base
== lpgeaddr
- pgsz
);
6738 ASSERT(badseg2
->s_size
== pgsz
);
6742 ASSERT(flag
== SDR_END
);
6743 ASSERT(eaddr
< lpgeaddr
);
6744 badseg1
= nseg
= segvn_split_seg(seg
, lpgeaddr
- pgsz
);
6745 (void) segvn_split_seg(nseg
, lpgeaddr
);
6746 ASSERT(badseg1
->s_base
== lpgeaddr
- pgsz
);
6747 ASSERT(badseg1
->s_size
== pgsz
);
6750 ASSERT(badseg1
!= NULL
);
6751 ASSERT(badseg1
->s_szc
== szc
);
6752 ASSERT(flag
== SDR_RANGE
|| badseg1
->s_size
== pgsz
||
6753 badseg1
->s_size
== 2 * pgsz
);
6754 ASSERT(sameprot(badseg1
, badseg1
->s_base
, pgsz
));
6755 ASSERT(badseg1
->s_size
== pgsz
||
6756 sameprot(badseg1
, badseg1
->s_base
+ pgsz
, pgsz
));
6757 if (err
= segvn_clrszc(badseg1
)) {
6760 ASSERT(badseg1
->s_szc
== 0);
6762 if (szc
> 1 && (tszcvec
= P2PHASE(szcvec
, 1 << szc
)) > 1) {
6763 uint_t tszc
= highbit(tszcvec
) - 1;
6764 caddr_t ta
= MAX(addr
, badseg1
->s_base
);
6766 size_t tpgsz
= page_get_pagesize(tszc
);
6768 ASSERT(svd
->type
== MAP_SHARED
);
6769 ASSERT(flag
== SDR_END
);
6770 ASSERT(tszc
< szc
&& tszc
> 0);
6772 if (eaddr
> badseg1
->s_base
+ badseg1
->s_size
) {
6773 te
= badseg1
->s_base
+ badseg1
->s_size
;
6779 badseg1
->s_szc
= tszc
;
6780 if (!IS_P2ALIGNED(ta
, tpgsz
) || !IS_P2ALIGNED(te
, tpgsz
)) {
6781 if (badseg2
!= NULL
) {
6782 err
= segvn_demote_range(badseg1
, ta
, te
- ta
,
6788 return (segvn_demote_range(badseg1
, ta
,
6789 te
- ta
, SDR_END
, tszcvec
));
6794 if (badseg2
== NULL
)
6796 ASSERT(badseg2
->s_szc
== szc
);
6797 ASSERT(badseg2
->s_size
== pgsz
);
6798 ASSERT(sameprot(badseg2
, badseg2
->s_base
, badseg2
->s_size
));
6799 if (err
= segvn_clrszc(badseg2
)) {
6802 ASSERT(badseg2
->s_szc
== 0);
6804 if (szc
> 1 && (tszcvec
= P2PHASE(szcvec
, 1 << szc
)) > 1) {
6805 uint_t tszc
= highbit(tszcvec
) - 1;
6806 size_t tpgsz
= page_get_pagesize(tszc
);
6808 ASSERT(svd
->type
== MAP_SHARED
);
6809 ASSERT(flag
== SDR_END
);
6810 ASSERT(tszc
< szc
&& tszc
> 0);
6811 ASSERT(badseg2
->s_base
> addr
);
6812 ASSERT(eaddr
> badseg2
->s_base
);
6813 ASSERT(eaddr
< badseg2
->s_base
+ badseg2
->s_size
);
6815 badseg2
->s_szc
= tszc
;
6816 if (!IS_P2ALIGNED(eaddr
, tpgsz
)) {
6817 return (segvn_demote_range(badseg2
, badseg2
->s_base
,
6818 eaddr
- badseg2
->s_base
, SDR_END
, tszcvec
));
6826 segvn_checkprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t prot
)
6828 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6829 struct vpage
*vp
, *evp
;
6831 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6833 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
6835 * If segment protection can be used, simply check against them.
6837 if (svd
->pageprot
== 0) {
6840 err
= ((svd
->prot
& prot
) != prot
) ? EACCES
: 0;
6841 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6846 * Have to check down to the vpage level.
6848 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
6849 for (vp
= &svd
->vpage
[seg_page(seg
, addr
)]; vp
< evp
; vp
++) {
6850 if ((VPP_PROT(vp
) & prot
) != prot
) {
6851 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6855 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6860 segvn_getprot(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t
*protv
)
6862 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6863 size_t pgno
= seg_page(seg
, addr
+ len
) - seg_page(seg
, addr
) + 1;
6865 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6868 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
6869 if (svd
->pageprot
== 0) {
6871 protv
[--pgno
] = svd
->prot
;
6872 } while (pgno
!= 0);
6874 size_t pgoff
= seg_page(seg
, addr
);
6878 protv
[pgno
] = VPP_PROT(&svd
->vpage
[pgno
+pgoff
]);
6879 } while (pgno
!= 0);
6881 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
6887 segvn_getoffset(struct seg
*seg
, caddr_t addr
)
6889 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6891 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6893 return (svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
));
6898 segvn_gettype(struct seg
*seg
, caddr_t addr
)
6900 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6902 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6904 return (svd
->type
| (svd
->flags
& (MAP_NORESERVE
| MAP_TEXT
|
6910 segvn_getvp(struct seg
*seg
, caddr_t addr
, struct vnode
**vpp
)
6912 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6914 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6921 * Check to see if it makes sense to do kluster/read ahead to
6922 * addr + delta relative to the mapping at addr. We assume here
6923 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6925 * For segvn, we currently "approve" of the action if we are
6926 * still in the segment and it maps from the same vp/off,
6927 * or if the advice stored in segvn_data or vpages allows it.
6928 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6931 segvn_kluster(struct seg
*seg
, caddr_t addr
, ssize_t delta
)
6933 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
6934 struct anon
*oap
, *ap
;
6937 struct vnode
*vp1
, *vp2
;
6939 struct anon_map
*amp
;
6941 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
6942 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
6943 SEGVN_LOCK_HELD(seg
->s_as
, &svd
->lock
));
6945 if (addr
+ delta
< seg
->s_base
||
6946 addr
+ delta
>= (seg
->s_base
+ seg
->s_size
))
6947 return (-1); /* exceeded segment bounds */
6949 pd
= delta
/ (ssize_t
)PAGESIZE
; /* divide to preserve sign bit */
6950 page
= seg_page(seg
, addr
);
6953 * Check to see if either of the pages addr or addr + delta
6954 * have advice set that prevents klustering (if MADV_RANDOM advice
6955 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6958 if (svd
->advice
== MADV_RANDOM
||
6959 svd
->advice
== MADV_SEQUENTIAL
&& delta
< 0)
6961 else if (svd
->pageadvice
&& svd
->vpage
) {
6962 struct vpage
*bvpp
, *evpp
;
6964 bvpp
= &svd
->vpage
[page
];
6965 evpp
= &svd
->vpage
[page
+ pd
];
6966 if (VPP_ADVICE(bvpp
) == MADV_RANDOM
||
6967 VPP_ADVICE(evpp
) == MADV_SEQUENTIAL
&& delta
< 0)
6969 if (VPP_ADVICE(bvpp
) != VPP_ADVICE(evpp
) &&
6970 VPP_ADVICE(evpp
) == MADV_RANDOM
)
6974 if (svd
->type
== MAP_SHARED
)
6975 return (0); /* shared mapping - all ok */
6977 if ((amp
= svd
->amp
) == NULL
)
6978 return (0); /* off original vnode */
6980 page
+= svd
->anon_index
;
6982 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
6984 oap
= anon_get_ptr(amp
->ahp
, page
);
6985 ap
= anon_get_ptr(amp
->ahp
, page
+ pd
);
6987 ANON_LOCK_EXIT(&
->a_rwlock
);
6989 if ((oap
== NULL
&& ap
!= NULL
) || (oap
!= NULL
&& ap
== NULL
)) {
6990 return (-1); /* one with and one without an anon */
6993 if (oap
== NULL
) { /* implies that ap == NULL */
6994 return (0); /* off original vnode */
6998 * Now we know we have two anon pointers - check to
6999 * see if they happen to be properly allocated.
7003 * XXX We cheat here and don't lock the anon slots. We can't because
7004 * we may have been called from the anon layer which might already
7005 * have locked them. We are holding a refcnt on the slots so they
7006 * can't disappear. The worst that will happen is we'll get the wrong
7007 * names (vp, off) for the slots and make a poor klustering decision.
7009 swap_xlate(ap
, &vp1
, &off1
);
7010 swap_xlate(oap
, &vp2
, &off2
);
7013 if (!fop_cmp(vp1
, vp2
, NULL
) || off1
- off2
!= delta
)
7019 * Synchronize primary storage cache with real object in virtual memory.
7021 * XXX - Anonymous pages should not be sync'ed out at all.
7024 segvn_sync(struct seg
*seg
, caddr_t addr
, size_t len
, int attr
, uint_t flags
)
7026 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7039 struct anon_map
*amp
;
7041 anon_sync_obj_t cookie
;
7043 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7045 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7047 if (svd
->softlockcnt
> 0) {
7049 * If this is shared segment non 0 softlockcnt
7050 * means locked pages are still in use.
7052 if (svd
->type
== MAP_SHARED
) {
7053 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7058 * flush all pages from seg cache
7059 * otherwise we may deadlock in swap_putpage
7060 * for B_INVAL page (4175402).
7062 * Even if we grab segvn WRITER's lock
7063 * here, there might be another thread which could've
7064 * successfully performed lookup/insert just before
7065 * we acquired the lock here. So, grabbing either
7066 * lock here is of not much use. Until we devise
7067 * a strategy at upper layers to solve the
7068 * synchronization issues completely, we expect
7069 * applications to handle this appropriately.
7072 if (svd
->softlockcnt
> 0) {
7073 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7076 } else if (svd
->type
== MAP_SHARED
&& svd
->amp
!= NULL
&&
7077 svd
->amp
->a_softlockcnt
> 0) {
7079 * Try to purge this amp's entries from pcache. It will
7080 * succeed only if other segments that share the amp have no
7081 * outstanding softlock's.
7084 if (svd
->amp
->a_softlockcnt
> 0 || svd
->softlockcnt
> 0) {
7085 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7091 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7092 bflags
= ((flags
& MS_ASYNC
) ? B_ASYNC
: 0) |
7093 ((flags
& MS_INVALIDATE
) ? B_INVAL
: 0);
7096 pageprot
= attr
& ~(SHARED
|PRIVATE
);
7097 segtype
= (attr
& SHARED
) ? MAP_SHARED
: MAP_PRIVATE
;
7100 * We are done if the segment types don't match
7101 * or if we have segment level protections and
7104 if (svd
->type
!= segtype
) {
7105 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7109 if (svd
->prot
!= pageprot
) {
7110 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7115 vpp
= &svd
->vpage
[seg_page(seg
, addr
)];
7117 } else if (svd
->vp
&& svd
->amp
== NULL
&&
7118 (flags
& MS_INVALIDATE
) == 0) {
7121 * No attributes, no anonymous pages and MS_INVALIDATE flag
7122 * is not on, just use one big request.
7124 err
= fop_putpage(svd
->vp
, (offset_t
)offset
, len
,
7125 bflags
, svd
->cred
, NULL
);
7126 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7130 if ((amp
= svd
->amp
) != NULL
)
7131 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
7133 for (eaddr
= addr
+ len
; addr
< eaddr
; addr
+= PAGESIZE
) {
7136 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7137 anon_array_enter(amp
, anon_index
, &cookie
);
7138 ap
= anon_get_ptr(amp
->ahp
, anon_index
++);
7140 swap_xlate(ap
, &vp
, &off
);
7145 anon_array_exit(&cookie
);
7146 ANON_LOCK_EXIT(&
->a_rwlock
);
7153 if (vp
== NULL
) /* untouched zfod page */
7158 prot
= VPP_PROT(vpp
);
7161 if (prot
!= pageprot
) {
7167 * See if any of these pages are locked -- if so, then we
7168 * will have to truncate an invalidate request at the first
7169 * locked one. We don't need the page_struct_lock to test
7170 * as this is only advisory; even if we acquire it someone
7171 * might race in and lock the page after we unlock and before
7172 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7174 if (flags
& MS_INVALIDATE
) {
7175 if ((pp
= page_lookup(&vp
->v_object
, off
, SE_SHARED
)) != NULL
) {
7176 if (pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0) {
7178 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7181 if (ap
!= NULL
&& pp
->p_szc
!= 0 &&
7182 page_tryupgrade(pp
)) {
7183 if (pp
->p_lckcnt
== 0 &&
7184 pp
->p_cowcnt
== 0) {
7186 * swapfs VN_DISPOSE() won't
7187 * invalidate large pages.
7188 * Attempt to demote.
7189 * XXX can't help it if it
7190 * fails. But for swapfs
7191 * pages it is no big deal.
7193 (void) page_try_demote_pages(
7199 } else if (svd
->type
== MAP_SHARED
&& amp
!= NULL
) {
7201 * Avoid writing out to disk ISM's large pages
7202 * because segspt_free_pages() relies on NULL an_pvp
7203 * of anon slots of such pages.
7206 ASSERT(svd
->vp
== NULL
);
7208 * swapfs uses page_lookup_nowait if not freeing or
7209 * invalidating and skips a page if
7210 * page_lookup_nowait returns NULL.
7212 pp
= page_lookup_nowait(&vp
->v_object
, off
, SE_SHARED
);
7216 if (pp
->p_szc
!= 0) {
7222 * Note ISM pages are created large so (vp, off)'s
7223 * page cannot suddenly become large after we unlock
7229 * XXX - Should ultimately try to kluster
7230 * calls to fop_putpage() for performance.
7233 err
= fop_putpage(vp
, (offset_t
)off
, PAGESIZE
,
7234 (bflags
| (IS_SWAPFSVP(vp
) ? B_PAGE_NOWAIT
: 0)),
7241 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7246 * Determine if we have data corresponding to pages in the
7247 * primary storage virtual memory cache (i.e., "in core").
7250 segvn_incore(struct seg
*seg
, caddr_t addr
, size_t len
, char *vec
)
7252 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7253 struct vnode
*vp
, *avp
;
7254 uoff_t offset
, aoffset
;
7260 struct anon_map
*amp
; /* XXX - for locknest */
7263 anon_sync_obj_t cookie
;
7265 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7267 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7268 if (svd
->amp
== NULL
&& svd
->vp
== NULL
) {
7269 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7270 bzero(vec
, btopr(len
));
7271 return (len
); /* no anonymous pages created yet */
7274 p
= seg_page(seg
, addr
);
7275 ep
= seg_page(seg
, addr
+ len
);
7276 start
= svd
->vp
? SEG_PAGE_VNODEBACKED
: 0;
7279 for (; p
< ep
; p
++, addr
+= PAGESIZE
) {
7280 vpp
= (svd
->vpage
) ? &svd
->vpage
[p
]: NULL
;
7284 /* Grab the vnode/offset for the anon slot */
7286 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7287 anon_array_enter(amp
, svd
->anon_index
+ p
, &cookie
);
7288 ap
= anon_get_ptr(amp
->ahp
, svd
->anon_index
+ p
);
7290 swap_xlate(ap
, &avp
, &aoffset
);
7292 anon_array_exit(&cookie
);
7293 ANON_LOCK_EXIT(&
->a_rwlock
);
7295 if ((avp
!= NULL
) && page_exists(&avp
->v_object
, aoffset
)) {
7296 /* A page exists for the anon slot */
7297 ret
|= SEG_PAGE_INCORE
;
7300 * If page is mapped and writable
7303 if ((hat_getattr(seg
->s_as
->a_hat
, addr
,
7304 &attr
) != -1) && (attr
& PROT_WRITE
)) {
7305 ret
|= SEG_PAGE_ANON
;
7308 * Don't get page_struct lock for lckcnt and cowcnt,
7309 * since this is purely advisory.
7311 if ((pp
= page_lookup_nowait(&avp
->v_object
,
7313 SE_SHARED
)) != NULL
) {
7315 ret
|= SEG_PAGE_SOFTLOCK
;
7317 ret
|= SEG_PAGE_HASCOW
;
7322 /* Gather vnode statistics */
7324 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7328 * Try to obtain a "shared" lock on the page
7329 * without blocking. If this fails, determine
7330 * if the page is in memory.
7332 pp
= page_lookup_nowait(&vp
->v_object
, offset
,
7334 if ((pp
== NULL
) && (page_exists(&vp
->v_object
, offset
))) {
7335 /* Page is incore, and is named */
7336 ret
|= (SEG_PAGE_INCORE
| SEG_PAGE_VNODE
);
7339 * Don't get page_struct lock for lckcnt and cowcnt,
7340 * since this is purely advisory.
7343 ret
|= (SEG_PAGE_INCORE
| SEG_PAGE_VNODE
);
7345 ret
|= SEG_PAGE_SOFTLOCK
;
7347 ret
|= SEG_PAGE_HASCOW
;
7352 /* Gather virtual page information */
7354 if (VPP_ISPPLOCK(vpp
))
7355 ret
|= SEG_PAGE_LOCKED
;
7361 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7366 * Statement for p_cowcnts/p_lckcnts.
7368 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7369 * irrespective of the following factors or anything else:
7371 * (1) anon slots are populated or not
7372 * (2) cow is broken or not
7373 * (3) refcnt on ap is 1 or greater than 1
7375 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7379 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7381 * if vpage has PROT_WRITE
7382 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7384 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7386 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7387 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7389 * We may also break COW if softlocking on read access in the physio case.
7390 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7391 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7392 * vpage doesn't have PROT_WRITE.
7395 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7397 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7398 * increment p_lckcnt by calling page_subclaim() which takes care of
7399 * availrmem accounting and p_lckcnt overflow.
7401 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7402 * increment p_cowcnt by calling page_addclaim() which takes care of
7403 * availrmem availability and p_cowcnt overflow.
7407 * Lock down (or unlock) pages mapped by this segment.
7409 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7410 * At fault time they will be relocated into larger pages.
7413 segvn_lockop(struct seg
*seg
, caddr_t addr
, size_t len
,
7414 int attr
, int op
, ulong_t
*lockmap
, size_t pos
)
7416 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7427 struct anon_map
*amp
;
7430 anon_sync_obj_t cookie
;
7431 struct kshmid
*sp
= NULL
;
7432 struct proc
*p
= curproc
;
7433 kproject_t
*proj
= NULL
;
7435 size_t locked_bytes
= 0;
7436 size_t unlocked_bytes
= 0;
7440 * Hold write lock on address space because may split or concatenate
7443 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7446 * If this is a shm, use shm's project and zone, else use
7447 * project and zone of calling process
7450 /* Determine if this segment backs a sysV shm */
7451 if (svd
->amp
!= NULL
&& svd
->amp
->a_sp
!= NULL
) {
7452 ASSERT(svd
->type
== MAP_SHARED
);
7453 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
7454 sp
= svd
->amp
->a_sp
;
7455 proj
= sp
->shm_perm
.ipc_proj
;
7459 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
7461 pageprot
= attr
& ~(SHARED
|PRIVATE
);
7462 segtype
= attr
& SHARED
? MAP_SHARED
: MAP_PRIVATE
;
7465 * We are done if the segment types don't match
7466 * or if we have segment level protections and
7469 if (svd
->type
!= segtype
) {
7470 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7473 if (svd
->pageprot
== 0 && svd
->prot
!= pageprot
) {
7474 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7479 if (op
== MC_LOCK
) {
7480 if (svd
->tr_state
== SEGVN_TR_INIT
) {
7481 svd
->tr_state
= SEGVN_TR_OFF
;
7482 } else if (svd
->tr_state
== SEGVN_TR_ON
) {
7483 ASSERT(svd
->amp
!= NULL
);
7484 segvn_textunrepl(seg
, 0);
7485 ASSERT(svd
->amp
== NULL
&&
7486 svd
->tr_state
== SEGVN_TR_OFF
);
7491 * If we're locking, then we must create a vpage structure if
7492 * none exists. If we're unlocking, then check to see if there
7493 * is a vpage -- if not, then we could not have locked anything.
7496 if ((vpp
= svd
->vpage
) == NULL
) {
7497 if (op
== MC_LOCK
) {
7499 if (svd
->vpage
== NULL
) {
7500 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7504 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7510 * The anonymous data vector (i.e., previously
7511 * unreferenced mapping to swap space) can be allocated
7512 * by lazily testing for its existence.
7514 if (op
== MC_LOCK
&& svd
->amp
== NULL
&& svd
->vp
== NULL
) {
7515 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
7516 svd
->amp
= anonmap_alloc(seg
->s_size
, 0, ANON_SLEEP
);
7517 svd
->amp
->a_szc
= seg
->s_szc
;
7520 if ((amp
= svd
->amp
) != NULL
) {
7521 anon_index
= svd
->anon_index
+ seg_page(seg
, addr
);
7524 offset
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
7525 evp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
7528 mutex_enter(&sp
->shm_mlock
);
7530 /* determine number of unlocked bytes in range for lock operation */
7531 if (op
== MC_LOCK
) {
7534 for (vpp
= &svd
->vpage
[seg_page(seg
, addr
)]; vpp
< evp
;
7536 if (!VPP_ISPPLOCK(vpp
))
7537 unlocked_bytes
+= PAGESIZE
;
7540 ulong_t i_idx
, i_edx
;
7541 anon_sync_obj_t i_cookie
;
7546 /* Only count sysV pages once for locked memory */
7547 i_edx
= svd
->anon_index
+ seg_page(seg
, addr
+ len
);
7548 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7549 for (i_idx
= anon_index
; i_idx
< i_edx
; i_idx
++) {
7550 anon_array_enter(amp
, i_idx
, &i_cookie
);
7551 i_ap
= anon_get_ptr(amp
->ahp
, i_idx
);
7553 unlocked_bytes
+= PAGESIZE
;
7554 anon_array_exit(&i_cookie
);
7557 swap_xlate(i_ap
, &i_vp
, &i_off
);
7558 anon_array_exit(&i_cookie
);
7559 pp
= page_lookup(&i_vp
->v_object
, i_off
,
7562 unlocked_bytes
+= PAGESIZE
;
7564 } else if (pp
->p_lckcnt
== 0)
7565 unlocked_bytes
+= PAGESIZE
;
7568 ANON_LOCK_EXIT(&
->a_rwlock
);
7571 mutex_enter(&p
->p_lock
);
7572 err
= rctl_incr_locked_mem(p
, proj
, unlocked_bytes
,
7574 mutex_exit(&p
->p_lock
);
7578 mutex_exit(&sp
->shm_mlock
);
7579 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7584 * Loop over all pages in the range. Process if we're locking and
7585 * page has not already been locked in this mapping; or if we're
7586 * unlocking and the page has been locked.
7588 for (vpp
= &svd
->vpage
[seg_page(seg
, addr
)]; vpp
< evp
;
7589 vpp
++, pos
++, addr
+= PAGESIZE
, offset
+= PAGESIZE
, anon_index
++) {
7590 if ((attr
== 0 || VPP_PROT(vpp
) == pageprot
) &&
7591 ((op
== MC_LOCK
&& !VPP_ISPPLOCK(vpp
)) ||
7592 (op
== MC_UNLOCK
&& VPP_ISPPLOCK(vpp
)))) {
7595 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7597 * If this isn't a MAP_NORESERVE segment and
7598 * we're locking, allocate anon slots if they
7599 * don't exist. The page is brought in later on.
7601 if (op
== MC_LOCK
&& svd
->vp
== NULL
&&
7602 ((svd
->flags
& MAP_NORESERVE
) == 0) &&
7604 ((ap
= anon_get_ptr(amp
->ahp
, anon_index
))
7606 anon_array_enter(amp
, anon_index
, &cookie
);
7608 if ((ap
= anon_get_ptr(amp
->ahp
,
7609 anon_index
)) == NULL
) {
7610 pp
= anon_zero(seg
, addr
, &ap
,
7613 anon_array_exit(&cookie
);
7614 ANON_LOCK_EXIT(&
->a_rwlock
);
7618 ASSERT(anon_get_ptr(amp
->ahp
,
7619 anon_index
) == NULL
);
7620 (void) anon_set_ptr(amp
->ahp
,
7621 anon_index
, ap
, ANON_SLEEP
);
7624 anon_array_exit(&cookie
);
7628 * Get name for page, accounting for
7629 * existence of private copy.
7633 anon_array_enter(amp
, anon_index
, &cookie
);
7634 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
7636 swap_xlate(ap
, &vp
, &off
);
7638 if (svd
->vp
== NULL
&&
7639 (svd
->flags
& MAP_NORESERVE
)) {
7640 anon_array_exit(&cookie
);
7641 ANON_LOCK_EXIT(&
->a_rwlock
);
7647 if (op
!= MC_LOCK
|| ap
== NULL
) {
7648 anon_array_exit(&cookie
);
7649 ANON_LOCK_EXIT(&
->a_rwlock
);
7657 * Get page frame. It's ok if the page is
7658 * not available when we're unlocking, as this
7659 * may simply mean that a page we locked got
7660 * truncated out of existence after we locked it.
7662 * Invoke fop_getpage() to obtain the page struct
7663 * since we may need to read it from disk if its
7667 pp
= page_lookup(&vp
->v_object
, off
,
7675 error
= fop_getpage(vp
, (offset_t
)off
, PAGESIZE
,
7676 (uint_t
*)NULL
, pl
, PAGESIZE
, seg
, addr
,
7677 S_OTHER
, svd
->cred
, NULL
);
7679 if (error
&& ap
!= NULL
) {
7680 anon_array_exit(&cookie
);
7681 ANON_LOCK_EXIT(&
->a_rwlock
);
7685 * If the error is EDEADLK then we must bounce
7686 * up and drop all vm subsystem locks and then
7687 * retry the operation later
7688 * This behavior is a temporary measure because
7689 * ufs/sds logging is badly designed and will
7690 * deadlock if we don't allow this bounce to
7691 * happen. The real solution is to re-design
7692 * the logging code to work properly. See bug
7693 * 4125102 for details of the problem.
7695 if (error
== EDEADLK
) {
7700 * Quit if we fail to fault in the page. Treat
7701 * the failure as an error, unless the addr
7702 * is mapped beyond the end of a file.
7704 if (error
&& svd
->vp
) {
7705 va
.va_mask
= AT_SIZE
;
7706 if (fop_getattr(svd
->vp
, &va
, 0,
7707 svd
->cred
, NULL
) != 0) {
7711 if (btopr(va
.va_size
) >=
7727 * See Statement at the beginning of this routine.
7729 * claim is always set if MAP_PRIVATE and PROT_WRITE
7730 * irrespective of following factors:
7732 * (1) anon slots are populated or not
7733 * (2) cow is broken or not
7734 * (3) refcnt on ap is 1 or greater than 1
7736 * See 4140683 for details
7738 claim
= ((VPP_PROT(vpp
) & PROT_WRITE
) &&
7739 (svd
->type
== MAP_PRIVATE
));
7742 * Perform page-level operation appropriate to
7743 * operation. If locking, undo the SOFTLOCK
7744 * performed to bring the page into memory
7745 * after setting the lock. If unlocking,
7746 * and no page was found, account for the claim
7749 if (op
== MC_LOCK
) {
7750 int ret
= 1; /* Assume success */
7752 ASSERT(!VPP_ISPPLOCK(vpp
));
7754 ret
= page_pp_lock(pp
, claim
, 0);
7756 if (ap
->an_pvp
!= NULL
) {
7757 anon_swap_free(ap
, pp
);
7759 anon_array_exit(&cookie
);
7760 ANON_LOCK_EXIT(&
->a_rwlock
);
7763 /* locking page failed */
7770 if (pp
->p_lckcnt
== 1)
7771 locked_bytes
+= PAGESIZE
;
7773 locked_bytes
+= PAGESIZE
;
7775 if (lockmap
!= NULL
)
7776 BT_SET(lockmap
, pos
);
7780 ASSERT(VPP_ISPPLOCK(vpp
));
7782 /* sysV pages should be locked */
7783 ASSERT(sp
== NULL
|| pp
->p_lckcnt
> 0);
7784 page_pp_unlock(pp
, claim
, 0);
7786 if (pp
->p_lckcnt
== 0)
7790 unlocked_bytes
+= PAGESIZE
;
7794 unlocked_bytes
+= PAGESIZE
;
7801 if (op
== MC_LOCK
) {
7802 /* Credit back bytes that did not get locked */
7803 if ((unlocked_bytes
- locked_bytes
) > 0) {
7805 mutex_enter(&p
->p_lock
);
7806 rctl_decr_locked_mem(p
, proj
,
7807 (unlocked_bytes
- locked_bytes
), chargeproc
);
7809 mutex_exit(&p
->p_lock
);
7813 /* Account bytes that were unlocked */
7814 if (unlocked_bytes
> 0) {
7816 mutex_enter(&p
->p_lock
);
7817 rctl_decr_locked_mem(p
, proj
, unlocked_bytes
,
7820 mutex_exit(&p
->p_lock
);
7824 mutex_exit(&sp
->shm_mlock
);
7825 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7831 * Set advice from user for specified pages
7832 * There are 10 types of advice:
7833 * MADV_NORMAL - Normal (default) behavior (whatever that is)
7834 * MADV_RANDOM - Random page references
7835 * do not allow readahead or 'klustering'
7836 * MADV_SEQUENTIAL - Sequential page references
7837 * Pages previous to the one currently being
7838 * accessed (determined by fault) are 'not needed'
7839 * and are freed immediately
7840 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
7841 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
7842 * MADV_FREE - Contents can be discarded
7843 * MADV_ACCESS_DEFAULT- Default access
7844 * MADV_ACCESS_LWP - Next LWP will access heavily
7845 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
7846 * MADV_PURGE - Contents will be immediately discarded
7849 segvn_advise(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
7851 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
7855 struct anon_map
*amp
;
7858 lgrp_mem_policy_t policy
;
7862 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
7865 * In case of MADV_FREE/MADV_PURGE, we won't be modifying any segment
7866 * private data structures; so, we only need to grab READER's lock
7868 if (behav
!= MADV_FREE
&& behav
!= MADV_PURGE
) {
7869 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
7870 if (svd
->tr_state
!= SEGVN_TR_OFF
) {
7871 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7875 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
7879 * Large pages are assumed to be only turned on when accesses to the
7880 * segment's address range have spatial and temporal locality. That
7881 * justifies ignoring MADV_SEQUENTIAL for large page segments.
7882 * Also, ignore advice affecting lgroup memory allocation
7883 * if don't need to do lgroup optimizations on this system
7886 if ((behav
== MADV_SEQUENTIAL
&&
7887 (seg
->s_szc
!= 0 || HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
))) ||
7888 (!lgrp_optimizations() && (behav
== MADV_ACCESS_DEFAULT
||
7889 behav
== MADV_ACCESS_LWP
|| behav
== MADV_ACCESS_MANY
))) {
7890 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7894 if (behav
== MADV_SEQUENTIAL
|| behav
== MADV_ACCESS_DEFAULT
||
7895 behav
== MADV_ACCESS_LWP
|| behav
== MADV_ACCESS_MANY
) {
7897 * Since we are going to unload hat mappings
7898 * we first have to flush the cache. Otherwise
7899 * this might lead to system panic if another
7900 * thread is doing physio on the range whose
7901 * mappings are unloaded by madvise(3C).
7903 if (svd
->softlockcnt
> 0) {
7905 * If this is shared segment non 0 softlockcnt
7906 * means locked pages are still in use.
7908 if (svd
->type
== MAP_SHARED
) {
7909 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7913 * Since we do have the segvn writers lock
7914 * nobody can fill the cache with entries
7915 * belonging to this seg during the purge.
7916 * The flush either succeeds or we still
7917 * have pending I/Os. In the later case,
7918 * madvise(3C) fails.
7921 if (svd
->softlockcnt
> 0) {
7923 * Since madvise(3C) is advisory and
7924 * it's not part of UNIX98, madvise(3C)
7925 * failure here doesn't cause any hardship.
7926 * Note that we don't block in "as" layer.
7928 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7931 } else if (svd
->type
== MAP_SHARED
&& svd
->amp
!= NULL
&&
7932 svd
->amp
->a_softlockcnt
> 0) {
7934 * Try to purge this amp's entries from pcache. It
7935 * will succeed only if other segments that share the
7936 * amp have no outstanding softlock's.
7944 if (behav
== MADV_FREE
|| behav
== MADV_PURGE
) {
7947 if (behav
== MADV_FREE
&& (vp
!= NULL
|| amp
== NULL
)) {
7949 * MADV_FREE is not supported for segments with an
7950 * underlying object; if anonmap is NULL, anon slots
7951 * are not yet populated and there is nothing for us
7952 * to do. As MADV_FREE is advisory, we don't return an
7953 * error in either case.
7955 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7961 * If we're here with a NULL anonmap, it's because we
7962 * are doing a MADV_PURGE. We have nothing to do, but
7963 * because MADV_PURGE isn't merely advisory, we return
7964 * an error in this case.
7966 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7972 page
= seg_page(seg
, addr
);
7973 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
7974 err
= anon_disclaim(amp
,
7975 svd
->anon_index
+ page
, len
, behav
, &purged
);
7977 if (purged
!= 0 && (svd
->flags
& MAP_NORESERVE
)) {
7979 * If we purged pages on a MAP_NORESERVE mapping, we
7980 * need to be sure to now unreserve our reserved swap.
7981 * (We use the atomic operations to manipulate our
7982 * segment and address space counters because we only
7983 * have the corresponding locks held as reader, not
7986 ssize_t bytes
= ptob(purged
);
7988 anon_unresv_zone(bytes
, seg
->s_as
->a_proc
->p_zone
);
7989 atomic_add_long(&svd
->swresv
, -bytes
);
7990 atomic_add_long(&seg
->s_as
->a_resvsize
, -bytes
);
7993 ANON_LOCK_EXIT(&
->a_rwlock
);
7994 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
7997 * MADV_PURGE and MADV_FREE differ in their return semantics:
7998 * because MADV_PURGE is designed to be bug-for-bug compatible
7999 * with its clumsy Linux forebear, it will fail where MADV_FREE
8002 return (behav
== MADV_PURGE
? err
: 0);
8006 * If advice is to be applied to entire segment,
8007 * use advice field in seg_data structure
8008 * otherwise use appropriate vpage entry.
8010 if ((addr
== seg
->s_base
) && (len
== seg
->s_size
)) {
8012 case MADV_ACCESS_LWP
:
8013 case MADV_ACCESS_MANY
:
8014 case MADV_ACCESS_DEFAULT
:
8016 * Set memory allocation policy for this segment
8018 policy
= lgrp_madv_to_policy(behav
, len
, svd
->type
);
8019 if (svd
->type
== MAP_SHARED
)
8020 already_set
= lgrp_shm_policy_set(policy
, amp
,
8021 svd
->anon_index
, vp
, svd
->offset
, len
);
8024 * For private memory, need writers lock on
8025 * address space because the segment may be
8026 * split or concatenated when changing policy
8028 if (AS_READ_HELD(seg
->s_as
)) {
8029 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8033 already_set
= lgrp_privm_policy_set(policy
,
8034 &svd
->policy_info
, len
);
8038 * If policy set already and it shouldn't be reapplied,
8039 * don't do anything.
8042 !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
8046 * Mark any existing pages in given range for
8049 page_mark_migrate(seg
, addr
, len
, amp
, svd
->anon_index
,
8050 &vp
->v_object
, svd
->offset
, 1);
8053 * If same policy set already or this is a shared
8054 * memory segment, don't need to try to concatenate
8055 * segment with adjacent ones.
8057 if (already_set
|| svd
->type
== MAP_SHARED
)
8061 * Try to concatenate this segment with previous
8062 * one and next one, since we changed policy for
8063 * this one and it may be compatible with adjacent
8066 prev
= AS_SEGPREV(seg
->s_as
, seg
);
8067 next
= AS_SEGNEXT(seg
->s_as
, seg
);
8069 if (next
&& next
->s_ops
== &segvn_ops
&&
8070 addr
+ len
== next
->s_base
)
8071 (void) segvn_concat(seg
, next
, 1);
8073 if (prev
&& prev
->s_ops
== &segvn_ops
&&
8074 addr
== prev
->s_base
+ prev
->s_size
) {
8076 * Drop lock for private data of current
8077 * segment before concatenating (deleting) it
8078 * and return IE_REATTACH to tell as_ctl() that
8079 * current segment has changed
8081 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8082 if (!segvn_concat(prev
, seg
, 1))
8089 case MADV_SEQUENTIAL
:
8091 * unloading mapping guarantees
8092 * detection in segvn_fault
8094 ASSERT(seg
->s_szc
== 0);
8095 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
8096 hat_unload(seg
->s_as
->a_hat
, addr
, len
,
8101 svd
->advice
= (uchar_t
)behav
;
8102 svd
->pageadvice
= 0;
8104 case MADV_WILLNEED
: /* handled in memcntl */
8105 case MADV_DONTNEED
: /* handled in memcntl */
8106 case MADV_FREE
: /* handled above */
8107 case MADV_PURGE
: /* handled above */
8114 struct seg
*new_seg
;
8115 struct segvn_data
*new_svd
;
8119 page
= seg_page(seg
, addr
);
8122 if (svd
->vpage
== NULL
) {
8123 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8128 struct vpage
*bvpp
, *evpp
;
8130 case MADV_ACCESS_LWP
:
8131 case MADV_ACCESS_MANY
:
8132 case MADV_ACCESS_DEFAULT
:
8134 * Set memory allocation policy for portion of this
8139 * Align address and length of advice to page
8140 * boundaries for large pages
8142 if (seg
->s_szc
!= 0) {
8145 pgsz
= page_get_pagesize(seg
->s_szc
);
8146 addr
= (caddr_t
)P2ALIGN((uintptr_t)addr
, pgsz
);
8147 len
= P2ROUNDUP(len
, pgsz
);
8151 * Check to see whether policy is set already
8153 policy
= lgrp_madv_to_policy(behav
, len
, svd
->type
);
8155 anon_index
= svd
->anon_index
+ page
;
8156 off
= svd
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
8158 if (svd
->type
== MAP_SHARED
)
8159 already_set
= lgrp_shm_policy_set(policy
, amp
,
8160 anon_index
, vp
, off
, len
);
8163 (policy
== svd
->policy_info
.mem_policy
);
8166 * If policy set already and it shouldn't be reapplied,
8167 * don't do anything.
8170 !LGRP_MEM_POLICY_REAPPLICABLE(policy
))
8174 * For private memory, need writers lock on
8175 * address space because the segment may be
8176 * split or concatenated when changing policy
8178 if (svd
->type
== MAP_PRIVATE
&&
8179 AS_READ_HELD(seg
->s_as
)) {
8180 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8185 * Mark any existing pages in given range for
8188 page_mark_migrate(seg
, addr
, len
, amp
, svd
->anon_index
,
8189 &vp
->v_object
, svd
->offset
, 1);
8192 * Don't need to try to split or concatenate
8193 * segments, since policy is same or this is a shared
8196 if (already_set
|| svd
->type
== MAP_SHARED
)
8199 if (HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
)) {
8200 ASSERT(svd
->amp
== NULL
);
8201 ASSERT(svd
->tr_state
== SEGVN_TR_OFF
);
8202 ASSERT(svd
->softlockcnt
== 0);
8203 hat_leave_region(seg
->s_as
->a_hat
, svd
->rcookie
,
8205 svd
->rcookie
= HAT_INVALID_REGION_COOKIE
;
8209 * Split off new segment if advice only applies to a
8210 * portion of existing segment starting in middle
8214 oldeaddr
= seg
->s_base
+ seg
->s_size
;
8215 if (addr
> seg
->s_base
) {
8217 * Must flush I/O page cache
8218 * before splitting segment
8220 if (svd
->softlockcnt
> 0)
8224 * Split segment and return IE_REATTACH to tell
8225 * as_ctl() that current segment changed
8227 new_seg
= segvn_split_seg(seg
, addr
);
8228 new_svd
= (struct segvn_data
*)new_seg
->s_data
;
8232 * If new segment ends where old one
8233 * did, try to concatenate the new
8234 * segment with next one.
8236 if (eaddr
== oldeaddr
) {
8238 * Set policy for new segment
8240 (void) lgrp_privm_policy_set(policy
,
8241 &new_svd
->policy_info
,
8244 next
= AS_SEGNEXT(new_seg
->s_as
,
8248 next
->s_ops
== &segvn_ops
&&
8249 eaddr
== next
->s_base
)
8250 (void) segvn_concat(new_seg
,
8256 * Split off end of existing segment if advice only
8257 * applies to a portion of segment ending before
8258 * end of the existing segment
8260 if (eaddr
< oldeaddr
) {
8262 * Must flush I/O page cache
8263 * before splitting segment
8265 if (svd
->softlockcnt
> 0)
8269 * If beginning of old segment was already
8270 * split off, use new segment to split end off
8273 if (new_seg
!= NULL
&& new_seg
!= seg
) {
8277 (void) segvn_split_seg(new_seg
, eaddr
);
8280 * Set policy for new segment
8282 (void) lgrp_privm_policy_set(policy
,
8283 &new_svd
->policy_info
,
8287 * Split segment and return IE_REATTACH
8288 * to tell as_ctl() that current
8291 (void) segvn_split_seg(seg
, eaddr
);
8294 (void) lgrp_privm_policy_set(policy
,
8295 &svd
->policy_info
, seg
->s_size
);
8298 * If new segment starts where old one
8299 * did, try to concatenate it with
8302 if (addr
== seg
->s_base
) {
8303 prev
= AS_SEGPREV(seg
->s_as
,
8307 * Drop lock for private data
8308 * of current segment before
8309 * concatenating (deleting) it
8314 addr
== prev
->s_base
+
8319 (void) segvn_concat(
8327 case MADV_SEQUENTIAL
:
8328 ASSERT(seg
->s_szc
== 0);
8329 ASSERT(svd
->rcookie
== HAT_INVALID_REGION_COOKIE
);
8330 hat_unload(seg
->s_as
->a_hat
, addr
, len
, HAT_UNLOAD
);
8334 bvpp
= &svd
->vpage
[page
];
8335 evpp
= &svd
->vpage
[page
+ (len
>> PAGESHIFT
)];
8336 for (; bvpp
< evpp
; bvpp
++)
8337 VPP_SETADVICE(bvpp
, behav
);
8338 svd
->advice
= MADV_NORMAL
;
8340 case MADV_WILLNEED
: /* handled in memcntl */
8341 case MADV_DONTNEED
: /* handled in memcntl */
8342 case MADV_FREE
: /* handled above */
8343 case MADV_PURGE
: /* handled above */
8349 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8354 * There is one kind of inheritance that can be specified for pages:
8356 * SEGP_INH_ZERO - Pages should be zeroed in the child
8359 segvn_inherit(struct seg
*seg
, caddr_t addr
, size_t len
, uint_t behav
)
8361 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8362 struct vpage
*bvpp
, *evpp
;
8366 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8368 /* Can't support something we don't know about */
8369 if (behav
!= SEGP_INH_ZERO
)
8372 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
);
8375 * This must be a straightforward anonymous segment that is mapped
8376 * privately and is not backed by a vnode.
8378 if (svd
->tr_state
!= SEGVN_TR_OFF
||
8379 svd
->type
!= MAP_PRIVATE
||
8386 * If the entire segment has been marked as inherit zero, then no reason
8387 * to do anything else.
8389 if (svd
->svn_inz
== SEGVN_INZ_ALL
) {
8395 * If this applies to the entire segment, simply mark it and we're done.
8397 if ((addr
== seg
->s_base
) && (len
== seg
->s_size
)) {
8398 svd
->svn_inz
= SEGVN_INZ_ALL
;
8404 * We've been asked to mark a subset of this segment as inherit zero,
8405 * therefore we need to mainpulate its vpages.
8407 if (svd
->vpage
== NULL
) {
8409 if (svd
->vpage
== NULL
) {
8415 svd
->svn_inz
= SEGVN_INZ_VPP
;
8416 page
= seg_page(seg
, addr
);
8417 bvpp
= &svd
->vpage
[page
];
8418 evpp
= &svd
->vpage
[page
+ (len
>> PAGESHIFT
)];
8419 for (; bvpp
< evpp
; bvpp
++)
8420 VPP_SETINHZERO(bvpp
);
8424 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8429 * Create a vpage structure for this seg.
8432 segvn_vpage(struct seg
*seg
)
8434 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8435 struct vpage
*vp
, *evp
;
8436 static pgcnt_t page_limit
= 0;
8438 ASSERT(SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
8441 * If no vpage structure exists, allocate one. Copy the protections
8442 * and the advice from the segment itself to the individual pages.
8444 if (svd
->vpage
== NULL
) {
8446 * Start by calculating the number of pages we must allocate to
8447 * track the per-page vpage structs needs for this entire
8448 * segment. If we know now that it will require more than our
8449 * heuristic for the maximum amount of kmem we can consume then
8450 * fail. We do this here, instead of trying to detect this deep
8451 * in page_resv and propagating the error up, since the entire
8452 * memory allocation stack is not amenable to passing this
8453 * back. Instead, it wants to keep trying.
8455 * As a heuristic we set a page limit of 5/8s of total_pages
8456 * for this allocation. We use shifts so that no floating
8457 * point conversion takes place and only need to do the
8460 ulong_t mem_needed
= seg_pages(seg
) * sizeof (struct vpage
);
8461 pgcnt_t npages
= mem_needed
>> PAGESHIFT
;
8463 if (page_limit
== 0)
8464 page_limit
= (total_pages
>> 1) + (total_pages
>> 3);
8466 if (npages
> page_limit
)
8469 svd
->pageadvice
= 1;
8470 svd
->vpage
= kmem_zalloc(mem_needed
, KM_SLEEP
);
8471 evp
= &svd
->vpage
[seg_page(seg
, seg
->s_base
+ seg
->s_size
)];
8472 for (vp
= svd
->vpage
; vp
< evp
; vp
++) {
8473 VPP_SETPROT(vp
, svd
->prot
);
8474 VPP_SETADVICE(vp
, svd
->advice
);
8480 * Dump the pages belonging to this segvn segment.
8483 segvn_dump(struct seg
*seg
)
8485 struct segvn_data
*svd
;
8487 struct anon_map
*amp
;
8492 pgcnt_t page
, npages
;
8495 npages
= seg_pages(seg
);
8496 svd
= (struct segvn_data
*)seg
->s_data
;
8498 off
= offset
= svd
->offset
;
8501 if ((amp
= svd
->amp
) != NULL
) {
8502 anon_index
= svd
->anon_index
;
8503 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
8506 for (page
= 0; page
< npages
; page
++, offset
+= PAGESIZE
) {
8510 if (amp
&& (ap
= anon_get_ptr(svd
->amp
->ahp
, anon_index
++))) {
8511 swap_xlate_nopanic(ap
, &vp
, &off
);
8518 * If pp == NULL, the page either does not exist
8519 * or is exclusively locked. So determine if it
8520 * exists before searching for it.
8523 if ((pp
= page_lookup_nowait(&vp
->v_object
, off
, SE_SHARED
)))
8526 pp
= page_exists(&vp
->v_object
, off
);
8529 pfn
= page_pptonum(pp
);
8530 dump_addpage(seg
->s_as
, addr
, pfn
);
8535 dump_timeleft
= dump_timeout
;
8539 ANON_LOCK_EXIT(&
->a_rwlock
);
8543 static uint32_t segvn_pglock_mtbf
= 0;
8546 #define PCACHE_SHWLIST ((page_t *)-2)
8547 #define NOPCACHE_SHWLIST ((page_t *)-1)
8550 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8551 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8552 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8553 * the same parts of the segment. Currently shadow list creation is only
8554 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8555 * tagged with segment pointer, starting virtual address and length. This
8556 * approach for MAP_SHARED segments may add many pcache entries for the same
8557 * set of pages and lead to long hash chains that decrease pcache lookup
8558 * performance. To avoid this issue for shared segments shared anon map and
8559 * starting anon index are used for pcache entry tagging. This allows all
8560 * segments to share pcache entries for the same anon range and reduces pcache
8561 * chain's length as well as memory overhead from duplicate shadow lists and
8564 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8565 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8566 * part of softlockcnt accounting is done differently for private and shared
8567 * segments. In private segment case softlock is only incremented when a new
8568 * shadow list is created but not when an existing one is found via
8569 * seg_plookup(). pcache entries have reference count incremented/decremented
8570 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8571 * reference count can be purged (and purging is needed before segment can be
8572 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8573 * decrement softlockcnt. Since in private segment case each of its pcache
8574 * entries only belongs to this segment we can expect that when
8575 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8576 * segment purge will succeed and softlockcnt will drop to 0. In shared
8577 * segment case reference count in pcache entry counts active locks from many
8578 * different segments so we can't expect segment purging to succeed even when
8579 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8580 * segment. To be able to determine when there're no pending pagelocks in
8581 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8582 * but instead softlockcnt is incremented and decremented for every
8583 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8584 * list was created or an existing one was found. When softlockcnt drops to 0
8585 * this segment no longer has any claims for pcached shadow lists and the
8586 * segment can be freed even if there're still active pcache entries
8587 * shared by this segment anon map. Shared segment pcache entries belong to
8588 * anon map and are typically removed when anon map is freed after all
8589 * processes destroy the segments that use this anon map.
8592 segvn_pagelock(struct seg
*seg
, caddr_t addr
, size_t len
, struct page
***ppp
,
8593 enum lock_type type
, enum seg_rw rw
)
8595 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
8597 pgcnt_t adjustpages
;
8600 uint_t protchk
= (rw
== S_READ
) ? PROT_READ
: PROT_WRITE
;
8602 struct anon_map
*amp
;
8604 struct page
**pplist
, **pl
, *pp
;
8607 caddr_t lpgaddr
, lpgeaddr
;
8608 anon_sync_obj_t cookie
;
8610 struct anon_map
*pamp
;
8612 seg_preclaim_cbfunc_t preclaim_callback
;
8617 int sftlck_sbase
= 0;
8618 int sftlck_send
= 0;
8621 if (type
== L_PAGELOCK
&& segvn_pglock_mtbf
) {
8622 hrtime_t ts
= gethrtime();
8623 if ((ts
% segvn_pglock_mtbf
) == 0) {
8626 if ((ts
% segvn_pglock_mtbf
) == 1) {
8632 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
8633 ASSERT(type
== L_PAGELOCK
|| type
== L_PAGEUNLOCK
);
8635 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
8638 * for now we only support pagelock to anon memory. We would have to
8639 * check protections for vnode objects and call into the vnode driver.
8640 * That's too much for a fast path. Let the fault entry point handle
8643 if (svd
->vp
!= NULL
) {
8644 if (type
== L_PAGELOCK
) {
8648 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8650 if ((amp
= svd
->amp
) == NULL
) {
8651 if (type
== L_PAGELOCK
) {
8655 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8657 if (rw
!= S_READ
&& rw
!= S_WRITE
) {
8658 if (type
== L_PAGELOCK
) {
8662 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8665 if (seg
->s_szc
!= 0) {
8667 * We are adjusting the pagelock region to the large page size
8668 * boundary because the unlocked part of a large page cannot
8669 * be freed anyway unless all constituent pages of a large
8670 * page are locked. Bigger regions reduce pcache chain length
8671 * and improve lookup performance. The tradeoff is that the
8672 * very first segvn_pagelock() call for a given page is more
8673 * expensive if only 1 page_t is needed for IO. This is only
8674 * an issue if pcache entry doesn't get reused by several
8675 * subsequent calls. We optimize here for the case when pcache
8676 * is heavily used by repeated IOs to the same address range.
8678 * Note segment's page size cannot change while we are holding
8679 * as lock. And then it cannot change while softlockcnt is
8680 * not 0. This will allow us to correctly recalculate large
8681 * page size region for the matching pageunlock/reclaim call
8682 * since as_pageunlock() caller must always match
8683 * as_pagelock() call's addr and len.
8685 * For pageunlock *ppp points to the pointer of page_t that
8686 * corresponds to the real unadjusted start address. Similar
8687 * for pagelock *ppp must point to the pointer of page_t that
8688 * corresponds to the real unadjusted start address.
8690 pgsz
= page_get_pagesize(seg
->s_szc
);
8691 CALC_LPG_REGION(pgsz
, seg
, addr
, len
, lpgaddr
, lpgeaddr
);
8692 adjustpages
= btop((uintptr_t)(addr
- lpgaddr
));
8693 } else if (len
< segvn_pglock_comb_thrshld
) {
8695 lpgeaddr
= addr
+ len
;
8700 * Align the address range of large enough requests to allow
8701 * combining of different shadow lists into 1 to reduce memory
8702 * overhead from potentially overlapping large shadow lists
8703 * (worst case is we have a 1MB IO into buffers with start
8704 * addresses separated by 4K). Alignment is only possible if
8705 * padded chunks have sufficient access permissions. Note
8706 * permissions won't change between L_PAGELOCK and
8707 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8708 * segvn_setprot() to wait until softlockcnt drops to 0. This
8709 * allows us to determine in L_PAGEUNLOCK the same range we
8710 * computed in L_PAGELOCK.
8712 * If alignment is limited by segment ends set
8713 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8714 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8715 * per segment counters. In L_PAGEUNLOCK case decrease
8716 * softlockcnt_sbase/softlockcnt_send counters if
8717 * sftlck_sbase/sftlck_send flags are set. When
8718 * softlockcnt_sbase/softlockcnt_send are non 0
8719 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8720 * won't merge the segments. This restriction combined with
8721 * restriction on segment unmapping and splitting for segments
8722 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8723 * correctly determine the same range that was previously
8724 * locked by matching L_PAGELOCK.
8726 pflags
= SEGP_PSHIFT
| (segvn_pglock_comb_bshift
<< 16);
8728 if (svd
->type
== MAP_PRIVATE
) {
8729 lpgaddr
= (caddr_t
)P2ALIGN((uintptr_t)addr
,
8730 segvn_pglock_comb_balign
);
8731 if (lpgaddr
< seg
->s_base
) {
8732 lpgaddr
= seg
->s_base
;
8736 ulong_t aix
= svd
->anon_index
+ seg_page(seg
, addr
);
8737 ulong_t aaix
= P2ALIGN(aix
, segvn_pglock_comb_palign
);
8738 if (aaix
< svd
->anon_index
) {
8739 lpgaddr
= seg
->s_base
;
8742 lpgaddr
= addr
- ptob(aix
- aaix
);
8743 ASSERT(lpgaddr
>= seg
->s_base
);
8746 if (svd
->pageprot
&& lpgaddr
!= addr
) {
8747 struct vpage
*vp
= &svd
->vpage
[seg_page(seg
, lpgaddr
)];
8748 struct vpage
*evp
= &svd
->vpage
[seg_page(seg
, addr
)];
8750 if ((VPP_PROT(vp
) & protchk
) == 0) {
8760 lpgeaddr
= addr
+ len
;
8762 if (svd
->type
== MAP_PRIVATE
) {
8763 lpgeaddr
= (caddr_t
)P2ROUNDUP(
8764 (uintptr_t)lpgeaddr
,
8765 segvn_pglock_comb_balign
);
8767 ulong_t aix
= svd
->anon_index
+
8768 seg_page(seg
, lpgeaddr
);
8769 ulong_t aaix
= P2ROUNDUP(aix
,
8770 segvn_pglock_comb_palign
);
8774 lpgeaddr
+= ptob(aaix
- aix
);
8777 if (lpgeaddr
== 0 ||
8778 lpgeaddr
> seg
->s_base
+ seg
->s_size
) {
8779 lpgeaddr
= seg
->s_base
+ seg
->s_size
;
8783 if (svd
->pageprot
&& lpgeaddr
!= addr
+ len
) {
8787 vp
= &svd
->vpage
[seg_page(seg
, addr
+ len
)];
8788 evp
= &svd
->vpage
[seg_page(seg
, lpgeaddr
)];
8791 if ((VPP_PROT(vp
) & protchk
) == 0) {
8797 lpgeaddr
= addr
+ len
;
8800 adjustpages
= btop((uintptr_t)(addr
- lpgaddr
));
8804 * For MAP_SHARED segments we create pcache entries tagged by amp and
8805 * anon index so that we can share pcache entries with other segments
8806 * that map this amp. For private segments pcache entries are tagged
8807 * with segment and virtual address.
8809 if (svd
->type
== MAP_SHARED
) {
8811 paddr
= (caddr_t
)((lpgaddr
- seg
->s_base
) +
8812 ptob(svd
->anon_index
));
8813 preclaim_callback
= shamp_reclaim
;
8817 preclaim_callback
= segvn_reclaim
;
8820 if (type
== L_PAGEUNLOCK
) {
8821 VM_STAT_ADD(segvnvmstats
.pagelock
[0]);
8824 * update hat ref bits for /proc. We need to make sure
8825 * that threads tracing the ref and mod bits of the
8826 * address space get the right data.
8827 * Note: page ref and mod bits are updated at reclaim time
8829 if (seg
->s_as
->a_vbits
) {
8830 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
) {
8831 if (rw
== S_WRITE
) {
8832 hat_setstat(seg
->s_as
, a
,
8833 PAGESIZE
, P_REF
| P_MOD
);
8835 hat_setstat(seg
->s_as
, a
,
8842 * Check the shadow list entry after the last page used in
8843 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8844 * was not inserted into pcache and is not large page
8845 * adjusted. In this case call reclaim callback directly and
8846 * don't adjust the shadow list start and size for large
8850 if ((*ppp
)[npages
] == NOPCACHE_SHWLIST
) {
8853 ASSERT(svd
->type
== MAP_SHARED
);
8854 ptag
= (void *)pamp
;
8855 paddr
= (caddr_t
)((addr
- seg
->s_base
) +
8856 ptob(svd
->anon_index
));
8861 (*preclaim_callback
)(ptag
, paddr
, len
, *ppp
, rw
, 0);
8863 ASSERT((*ppp
)[npages
] == PCACHE_SHWLIST
||
8864 IS_SWAPFSVP((*ppp
)[npages
]->p_vnode
));
8865 len
= lpgeaddr
- lpgaddr
;
8867 seg_pinactive(seg
, pamp
, paddr
, len
,
8868 *ppp
- adjustpages
, rw
, pflags
, preclaim_callback
);
8872 ASSERT(svd
->type
== MAP_SHARED
);
8873 ASSERT(svd
->softlockcnt
>= npages
);
8874 atomic_add_long((ulong_t
*)&svd
->softlockcnt
, -npages
);
8878 ASSERT(svd
->softlockcnt_sbase
> 0);
8879 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
8882 ASSERT(svd
->softlockcnt_send
> 0);
8883 atomic_dec_ulong((ulong_t
*)&svd
->softlockcnt_send
);
8887 * If someone is blocked while unmapping, we purge
8888 * segment page cache and thus reclaim pplist synchronously
8889 * without waiting for seg_pasync_thread. This speeds up
8890 * unmapping in cases where munmap(2) is called, while
8891 * raw async i/o is still in progress or where a thread
8892 * exits on data fault in a multithreaded application.
8894 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
8895 if (svd
->softlockcnt
== 0) {
8896 mutex_enter(&seg
->s_as
->a_contents
);
8897 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
8898 AS_CLRUNMAPWAIT(seg
->s_as
);
8899 cv_broadcast(&seg
->s_as
->a_cv
);
8901 mutex_exit(&seg
->s_as
->a_contents
);
8902 } else if (pamp
== NULL
) {
8904 * softlockcnt is not 0 and this is a
8905 * MAP_PRIVATE segment. Try to purge its
8906 * pcache entries to reduce softlockcnt.
8907 * If it drops to 0 segvn_reclaim()
8908 * will wake up a thread waiting on
8911 * We don't purge MAP_SHARED segments with non
8912 * 0 softlockcnt since IO is still in progress
8913 * for such segments.
8915 ASSERT(svd
->type
== MAP_PRIVATE
);
8919 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8923 /* The L_PAGELOCK case ... */
8925 VM_STAT_ADD(segvnvmstats
.pagelock
[1]);
8928 * For MAP_SHARED segments we have to check protections before
8929 * seg_plookup() since pcache entries may be shared by many segments
8930 * with potentially different page protections.
8933 ASSERT(svd
->type
== MAP_SHARED
);
8934 if (svd
->pageprot
== 0) {
8935 if ((svd
->prot
& protchk
) == 0) {
8941 * check page protections
8952 for (; a
< ea
; a
+= pgsz
) {
8955 ASSERT(seg
->s_szc
== 0 ||
8956 sameprot(seg
, a
, pgsz
));
8957 vp
= &svd
->vpage
[seg_page(seg
, a
)];
8958 if ((VPP_PROT(vp
) & protchk
) == 0) {
8967 * try to find pages in segment page cache
8969 pplist
= seg_plookup(seg
, pamp
, paddr
, lpgeaddr
- lpgaddr
, rw
, pflags
);
8970 if (pplist
!= NULL
) {
8972 npages
= btop((uintptr_t)(lpgeaddr
- lpgaddr
));
8973 ASSERT(svd
->type
== MAP_SHARED
);
8974 atomic_add_long((ulong_t
*)&svd
->softlockcnt
,
8978 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
8981 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_send
);
8983 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
8984 *ppp
= pplist
+ adjustpages
;
8989 * For MAP_SHARED segments we already verified above that segment
8990 * protections allow this pagelock operation.
8993 ASSERT(svd
->type
== MAP_PRIVATE
);
8994 if (svd
->pageprot
== 0) {
8995 if ((svd
->prot
& protchk
) == 0) {
8999 if (svd
->prot
& PROT_WRITE
) {
9000 wlen
= lpgeaddr
- lpgaddr
;
9003 ASSERT(rw
== S_READ
);
9008 * check page protections
9010 for (a
= lpgaddr
, wlen
= 0; a
< lpgeaddr
; a
+= pgsz
) {
9013 ASSERT(seg
->s_szc
== 0 ||
9014 sameprot(seg
, a
, pgsz
));
9015 vp
= &svd
->vpage
[seg_page(seg
, a
)];
9016 if ((VPP_PROT(vp
) & protchk
) == 0) {
9020 if (wcont
&& (VPP_PROT(vp
) & PROT_WRITE
)) {
9024 ASSERT(rw
== S_READ
);
9028 ASSERT(rw
== S_READ
|| wlen
== lpgeaddr
- lpgaddr
);
9029 ASSERT(rw
== S_WRITE
|| wlen
<= lpgeaddr
- lpgaddr
);
9033 * Only build large page adjusted shadow list if we expect to insert
9034 * it into pcache. For large enough pages it's a big overhead to
9035 * create a shadow list of the entire large page. But this overhead
9036 * should be amortized over repeated pcache hits on subsequent reuse
9037 * of this shadow list (IO into any range within this shadow list will
9038 * find it in pcache since we large page align the request for pcache
9039 * lookups). pcache performance is improved with bigger shadow lists
9040 * as it reduces the time to pcache the entire big segment and reduces
9041 * pcache chain length.
9043 if (seg_pinsert_check(seg
, pamp
, paddr
,
9044 lpgeaddr
- lpgaddr
, pflags
) == SEGP_SUCCESS
) {
9046 len
= lpgeaddr
- lpgaddr
;
9051 * Since this entry will not be inserted into the pcache, we
9052 * will not do any adjustments to the starting address or
9053 * size of the memory to be locked.
9059 pplist
= kmem_alloc(sizeof (page_t
*) * (npages
+ 1), KM_SLEEP
);
9061 *ppp
= pplist
+ adjustpages
;
9063 * If use_pcache is 0 this shadow list is not large page adjusted.
9064 * Record this info in the last entry of shadow array so that
9065 * L_PAGEUNLOCK can determine if it should large page adjust the
9066 * address range to find the real range that was locked.
9068 pl
[npages
] = use_pcache
? PCACHE_SHWLIST
: NOPCACHE_SHWLIST
;
9070 page
= seg_page(seg
, addr
);
9071 anon_index
= svd
->anon_index
+ page
;
9074 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
9075 ASSERT(amp
->a_szc
>= seg
->s_szc
);
9076 anpgcnt
= page_get_pagecnt(amp
->a_szc
);
9077 for (a
= addr
; a
< addr
+ len
; a
+= PAGESIZE
, anon_index
++) {
9083 * Lock and unlock anon array only once per large page.
9084 * anon_array_enter() locks the root anon slot according to
9085 * a_szc which can't change while anon map is locked. We lock
9086 * anon the first time through this loop and each time we
9087 * reach anon index that corresponds to a root of a large
9090 if (a
== addr
|| P2PHASE(anon_index
, anpgcnt
) == 0) {
9091 ASSERT(anlock
== 0);
9092 anon_array_enter(amp
, anon_index
, &cookie
);
9095 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9098 * We must never use seg_pcache for COW pages
9099 * because we might end up with original page still
9100 * lying in seg_pcache even after private page is
9101 * created. This leads to data corruption as
9102 * aio_write refers to the page still in cache
9103 * while all other accesses refer to the private
9106 if (ap
== NULL
|| ap
->an_refcnt
!= 1) {
9107 struct vpage
*vpage
;
9113 if (svd
->vpage
!= NULL
) {
9114 vpage
= &svd
->vpage
[seg_page(seg
, a
)];
9119 anon_array_exit(&cookie
);
9122 error
= segvn_faultpage(seg
->s_as
->a_hat
, seg
, a
, 0,
9123 vpage
, &pp
, 0, F_INVAL
, rw
, 1);
9125 error
= fc_decode(error
);
9128 anon_array_enter(amp
, anon_index
, &cookie
);
9130 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9131 if (ap
== NULL
|| ap
->an_refcnt
!= 1) {
9136 swap_xlate(ap
, &vp
, &off
);
9137 pp
= page_lookup_nowait(&vp
->v_object
, off
, SE_SHARED
);
9142 if (ap
->an_pvp
!= NULL
) {
9143 anon_swap_free(ap
, pp
);
9146 * Unlock anon if this is the last slot in a large page.
9148 if (P2PHASE(anon_index
, anpgcnt
) == anpgcnt
- 1) {
9150 anon_array_exit(&cookie
);
9155 if (anlock
) { /* Ensure the lock is dropped */
9156 anon_array_exit(&cookie
);
9158 ANON_LOCK_EXIT(&
->a_rwlock
);
9160 if (a
>= addr
+ len
) {
9161 atomic_add_long((ulong_t
*)&svd
->softlockcnt
, npages
);
9163 ASSERT(svd
->type
== MAP_SHARED
);
9164 atomic_add_long((ulong_t
*)&pamp
->a_softlockcnt
,
9169 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_sbase
);
9172 atomic_inc_ulong((ulong_t
*)&svd
->softlockcnt_send
);
9175 (void) seg_pinsert(seg
, pamp
, paddr
, len
, wlen
, pl
,
9176 rw
, pflags
, preclaim_callback
);
9178 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9183 np
= ((uintptr_t)(a
- addr
)) >> PAGESHIFT
;
9184 while (np
> (uint_t
)0) {
9185 ASSERT(PAGE_LOCKED(*pplist
));
9186 page_unlock(*pplist
);
9190 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9192 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9198 * purge any cached pages in the I/O page cache
9201 segvn_purge(struct seg
*seg
)
9203 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9206 * pcache is only used by pure anon segments.
9208 if (svd
->amp
== NULL
|| svd
->vp
!= NULL
) {
9213 * For MAP_SHARED segments non 0 segment's softlockcnt means
9214 * active IO is still in progress via this segment. So we only
9215 * purge MAP_SHARED segments when their softlockcnt is 0.
9217 if (svd
->type
== MAP_PRIVATE
) {
9218 if (svd
->softlockcnt
) {
9219 seg_ppurge(seg
, NULL
, 0);
9221 } else if (svd
->softlockcnt
== 0 && svd
->amp
->a_softlockcnt
!= 0) {
9222 seg_ppurge(seg
, svd
->amp
, 0);
9227 * If async argument is not 0 we are called from pcache async thread and don't
9233 segvn_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
9234 enum seg_rw rw
, int async
)
9236 struct seg
*seg
= (struct seg
*)ptag
;
9237 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9241 npages
= np
= btop(len
);
9244 ASSERT(svd
->vp
== NULL
&& svd
->amp
!= NULL
);
9245 ASSERT(svd
->softlockcnt
>= npages
);
9246 ASSERT(async
|| AS_LOCK_HELD(seg
->s_as
));
9250 ASSERT(pl
[np
] == NOPCACHE_SHWLIST
|| pl
[np
] == PCACHE_SHWLIST
);
9251 ASSERT(!async
|| pl
[np
] == PCACHE_SHWLIST
);
9253 while (np
> (uint_t
)0) {
9254 if (rw
== S_WRITE
) {
9255 hat_setrefmod(*pplist
);
9257 hat_setref(*pplist
);
9259 page_unlock(*pplist
);
9264 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9267 * If we are pcache async thread we don't hold AS lock. This means if
9268 * softlockcnt drops to 0 after the decrement below address space may
9269 * get freed. We can't allow it since after softlock derement to 0 we
9270 * still need to access as structure for possible wakeup of unmap
9271 * waiters. To prevent the disappearance of as we take this segment
9272 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9273 * make sure this routine completes before segment is freed.
9275 * The second complication we have to deal with in async case is a
9276 * possibility of missed wake up of unmap wait thread. When we don't
9277 * hold as lock here we may take a_contents lock before unmap wait
9278 * thread that was first to see softlockcnt was still not 0. As a
9279 * result we'll fail to wake up an unmap wait thread. To avoid this
9280 * race we set nounmapwait flag in as structure if we drop softlockcnt
9281 * to 0 when we were called by pcache async thread. unmapwait thread
9282 * will not block if this flag is set.
9285 mutex_enter(&svd
->segfree_syncmtx
);
9288 if (!atomic_add_long_nv((ulong_t
*)&svd
->softlockcnt
, -npages
)) {
9289 if (async
|| AS_ISUNMAPWAIT(seg
->s_as
)) {
9290 mutex_enter(&seg
->s_as
->a_contents
);
9292 AS_SETNOUNMAPWAIT(seg
->s_as
);
9294 if (AS_ISUNMAPWAIT(seg
->s_as
)) {
9295 AS_CLRUNMAPWAIT(seg
->s_as
);
9296 cv_broadcast(&seg
->s_as
->a_cv
);
9298 mutex_exit(&seg
->s_as
->a_contents
);
9303 mutex_exit(&svd
->segfree_syncmtx
);
9310 shamp_reclaim(void *ptag
, caddr_t addr
, size_t len
, struct page
**pplist
,
9311 enum seg_rw rw
, int async
)
9313 amp_t
*amp
= (amp_t
*)ptag
;
9317 npages
= np
= btop(len
);
9319 ASSERT(amp
->a_softlockcnt
>= npages
);
9323 ASSERT(pl
[np
] == NOPCACHE_SHWLIST
|| pl
[np
] == PCACHE_SHWLIST
);
9324 ASSERT(!async
|| pl
[np
] == PCACHE_SHWLIST
);
9326 while (np
> (uint_t
)0) {
9327 if (rw
== S_WRITE
) {
9328 hat_setrefmod(*pplist
);
9330 hat_setref(*pplist
);
9332 page_unlock(*pplist
);
9337 kmem_free(pl
, sizeof (page_t
*) * (npages
+ 1));
9340 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9341 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9342 * and anonmap_purge() acquires a_purgemtx.
9344 mutex_enter(&
->a_purgemtx
);
9345 if (!atomic_add_long_nv((ulong_t
*)&
->a_softlockcnt
, -npages
) &&
9347 amp
->a_purgewait
= 0;
9348 cv_broadcast(&
->a_purgecv
);
9350 mutex_exit(&
->a_purgemtx
);
9355 * get a memory ID for an addr in a given segment
9357 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9358 * At fault time they will be relocated into larger pages.
9361 segvn_getmemid(struct seg
*seg
, caddr_t addr
, memid_t
*memidp
)
9363 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9364 struct anon
*ap
= NULL
;
9366 struct anon_map
*amp
;
9367 anon_sync_obj_t cookie
;
9369 if (svd
->type
== MAP_PRIVATE
) {
9370 memidp
->val
[0] = (uintptr_t)seg
->s_as
;
9371 memidp
->val
[1] = (uintptr_t)addr
;
9375 if (svd
->type
== MAP_SHARED
) {
9377 memidp
->val
[0] = (uintptr_t)svd
->vp
;
9378 memidp
->val
[1] = (u_longlong_t
)svd
->offset
+
9379 (uintptr_t)(addr
- seg
->s_base
);
9383 SEGVN_LOCK_ENTER(seg
->s_as
, &svd
->lock
, RW_READER
);
9384 if ((amp
= svd
->amp
) != NULL
) {
9385 anon_index
= svd
->anon_index
+
9386 seg_page(seg
, addr
);
9388 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
9390 ASSERT(amp
!= NULL
);
9392 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
9393 anon_array_enter(amp
, anon_index
, &cookie
);
9394 ap
= anon_get_ptr(amp
->ahp
, anon_index
);
9398 pp
= anon_zero(seg
, addr
, &ap
, svd
->cred
);
9400 anon_array_exit(&cookie
);
9401 ANON_LOCK_EXIT(&
->a_rwlock
);
9404 ASSERT(anon_get_ptr(amp
->ahp
, anon_index
)
9406 (void) anon_set_ptr(amp
->ahp
, anon_index
,
9411 anon_array_exit(&cookie
);
9412 ANON_LOCK_EXIT(&
->a_rwlock
);
9414 memidp
->val
[0] = (uintptr_t)ap
;
9415 memidp
->val
[1] = (uintptr_t)addr
& PAGEOFFSET
;
9423 sameprot(struct seg
*seg
, caddr_t a
, size_t len
)
9425 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9426 struct vpage
*vpage
;
9427 spgcnt_t pages
= btop(len
);
9430 if (svd
->pageprot
== 0)
9433 ASSERT(svd
->vpage
!= NULL
);
9435 vpage
= &svd
->vpage
[seg_page(seg
, a
)];
9436 prot
= VPP_PROT(vpage
);
9439 while (pages
-- > 0) {
9440 if (prot
!= VPP_PROT(vpage
))
9448 * Get memory allocation policy info for specified address in given segment
9450 static lgrp_mem_policy_info_t
*
9451 segvn_getpolicy(struct seg
*seg
, caddr_t addr
)
9453 struct anon_map
*amp
;
9455 lgrp_mem_policy_info_t
*policy_info
;
9456 struct segvn_data
*svn_data
;
9460 ASSERT(seg
!= NULL
);
9462 svn_data
= (struct segvn_data
*)seg
->s_data
;
9463 if (svn_data
== NULL
)
9467 * Get policy info for private or shared memory
9469 if (svn_data
->type
!= MAP_SHARED
) {
9470 if (svn_data
->tr_state
!= SEGVN_TR_ON
) {
9471 policy_info
= &svn_data
->policy_info
;
9473 policy_info
= &svn_data
->tr_policy_info
;
9474 ASSERT(policy_info
->mem_policy
==
9475 LGRP_MEM_POLICY_NEXT_SEG
);
9478 amp
= svn_data
->amp
;
9479 anon_index
= svn_data
->anon_index
+ seg_page(seg
, addr
);
9481 vn_off
= svn_data
->offset
+ (uintptr_t)(addr
- seg
->s_base
);
9482 policy_info
= lgrp_shm_policy_get(amp
, anon_index
, vp
, vn_off
);
9485 return (policy_info
);
9489 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9490 * established to per vnode mapping per lgroup amp pages instead of to vnode
9491 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9492 * may share the same text replication amp. If a suitable amp doesn't already
9493 * exist in svntr hash table create a new one. We may fail to bind to amp if
9494 * segment is not eligible for text replication. Code below first checks for
9495 * these conditions. If binding is successful segment tr_state is set to on
9496 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9497 * svd->amp remains as NULL.
9500 segvn_textrepl(struct seg
*seg
)
9502 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9503 vnode_t
*vp
= svd
->vp
;
9504 uoff_t off
= svd
->offset
;
9505 size_t size
= seg
->s_size
;
9506 uoff_t eoff
= off
+ size
;
9507 uint_t szc
= seg
->s_szc
;
9508 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
9511 proc_t
*p
= seg
->s_as
->a_proc
;
9515 struct anon_map
*amp
;
9517 ASSERT(AS_LOCK_HELD(seg
->s_as
));
9518 ASSERT(SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
9520 ASSERT(svd
->tr_state
== SEGVN_TR_INIT
);
9521 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
9522 ASSERT(svd
->flags
& MAP_TEXT
);
9523 ASSERT(svd
->type
== MAP_PRIVATE
);
9524 ASSERT(vp
!= NULL
&& svd
->amp
== NULL
);
9525 ASSERT(!svd
->pageprot
&& !(svd
->prot
& PROT_WRITE
));
9526 ASSERT(!(svd
->flags
& MAP_NORESERVE
) && svd
->swresv
== 0);
9527 ASSERT(seg
->s_as
!= &kas
);
9529 ASSERT(svntr_hashtab
!= NULL
);
9532 * If numa optimizations are no longer desired bail out.
9534 if (!lgrp_optimizations()) {
9535 svd
->tr_state
= SEGVN_TR_OFF
;
9540 * Avoid creating anon maps with size bigger than the file size.
9541 * If fop_getattr() call fails bail out.
9543 va
.va_mask
= AT_SIZE
| AT_MTIME
| AT_CTIME
;
9544 if (fop_getattr(vp
, &va
, 0, svd
->cred
, NULL
) != 0) {
9545 svd
->tr_state
= SEGVN_TR_OFF
;
9546 SEGVN_TR_ADDSTAT(gaerr
);
9549 if (btopr(va
.va_size
) < btopr(eoff
)) {
9550 svd
->tr_state
= SEGVN_TR_OFF
;
9551 SEGVN_TR_ADDSTAT(overmap
);
9556 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9557 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9558 * mapping that checks if trcache for this vnode needs to be
9559 * invalidated can't miss us.
9561 if (!(vp
->v_flag
& VVMEXEC
)) {
9562 mutex_enter(&vp
->v_lock
);
9563 vp
->v_flag
|= VVMEXEC
;
9564 mutex_exit(&vp
->v_lock
);
9566 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9568 * Bail out if potentially MAP_SHARED writable mappings exist to this
9569 * vnode. We don't want to use old file contents from existing
9570 * replicas if this mapping was established after the original file
9573 if (vn_is_mapped(vp
, V_WRITE
)) {
9574 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9575 svd
->tr_state
= SEGVN_TR_OFF
;
9576 SEGVN_TR_ADDSTAT(wrcnt
);
9579 svntrp
= svntr_hashtab
[hash
].tr_head
;
9580 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
9581 ASSERT(svntrp
->tr_refcnt
!= 0);
9582 if (svntrp
->tr_vp
!= vp
) {
9587 * Bail out if the file or its attributes were changed after
9588 * this replication entry was created since we need to use the
9589 * latest file contents. Note that mtime test alone is not
9590 * sufficient because a user can explicitly change mtime via
9591 * utimes(2) interfaces back to the old value after modifiying
9592 * the file contents. To detect this case we also have to test
9593 * ctime which among other things records the time of the last
9594 * mtime change by utimes(2). ctime is not changed when the file
9595 * is only read or executed so we expect that typically existing
9596 * replication amp's can be used most of the time.
9598 if (!svntrp
->tr_valid
||
9599 svntrp
->tr_mtime
.tv_sec
!= va
.va_mtime
.tv_sec
||
9600 svntrp
->tr_mtime
.tv_nsec
!= va
.va_mtime
.tv_nsec
||
9601 svntrp
->tr_ctime
.tv_sec
!= va
.va_ctime
.tv_sec
||
9602 svntrp
->tr_ctime
.tv_nsec
!= va
.va_ctime
.tv_nsec
) {
9603 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9604 svd
->tr_state
= SEGVN_TR_OFF
;
9605 SEGVN_TR_ADDSTAT(stale
);
9609 * if off, eoff and szc match current segment we found the
9610 * existing entry we can use.
9612 if (svntrp
->tr_off
== off
&& svntrp
->tr_eoff
== eoff
&&
9613 svntrp
->tr_szc
== szc
) {
9617 * Don't create different but overlapping in file offsets
9618 * entries to avoid replication of the same file pages more
9619 * than once per lgroup.
9621 if ((off
>= svntrp
->tr_off
&& off
< svntrp
->tr_eoff
) ||
9622 (eoff
> svntrp
->tr_off
&& eoff
<= svntrp
->tr_eoff
)) {
9623 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9624 svd
->tr_state
= SEGVN_TR_OFF
;
9625 SEGVN_TR_ADDSTAT(overlap
);
9630 * If we didn't find existing entry create a new one.
9632 if (svntrp
== NULL
) {
9633 svntrp
= kmem_cache_alloc(svntr_cache
, KM_NOSLEEP
);
9634 if (svntrp
== NULL
) {
9635 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9636 svd
->tr_state
= SEGVN_TR_OFF
;
9637 SEGVN_TR_ADDSTAT(nokmem
);
9643 for (i
= 0; i
< NLGRPS_MAX
; i
++) {
9644 ASSERT(svntrp
->tr_amp
[i
] == NULL
);
9649 svntrp
->tr_off
= off
;
9650 svntrp
->tr_eoff
= eoff
;
9651 svntrp
->tr_szc
= szc
;
9652 svntrp
->tr_valid
= 1;
9653 svntrp
->tr_mtime
= va
.va_mtime
;
9654 svntrp
->tr_ctime
= va
.va_ctime
;
9655 svntrp
->tr_refcnt
= 0;
9656 svntrp
->tr_next
= svntr_hashtab
[hash
].tr_head
;
9657 svntr_hashtab
[hash
].tr_head
= svntrp
;
9662 * We want to pick a replica with pages on main thread's (t_tid = 1,
9663 * aka T1) lgrp. Currently text replication is only optimized for
9664 * workloads that either have all threads of a process on the same
9665 * lgrp or execute their large text primarily on main thread.
9667 lgrp_id
= p
->p_t1_lgrpid
;
9668 if (lgrp_id
== LGRP_NONE
) {
9670 * In case exec() prefaults text on non main thread use
9671 * current thread lgrpid. It will become main thread anyway
9674 lgrp_id
= lgrp_home_id(curthread
);
9677 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9678 * just set it to NLGRPS_MAX if it's different from current process T1
9679 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9680 * replication and T1 new home is different from lgrp used for text
9681 * replication. When this happens asyncronous segvn thread rechecks if
9682 * segments should change lgrps used for text replication. If we fail
9683 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9684 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9685 * we want to use. We don't need to use cas in this case because
9686 * another thread that races in between our non atomic check and set
9687 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9689 ASSERT(lgrp_id
!= LGRP_NONE
&& lgrp_id
< NLGRPS_MAX
);
9690 olid
= p
->p_tr_lgrpid
;
9691 if (lgrp_id
!= olid
&& olid
!= NLGRPS_MAX
) {
9692 lgrp_id_t nlid
= (olid
== LGRP_NONE
) ? lgrp_id
: NLGRPS_MAX
;
9693 if (atomic_cas_32((uint32_t *)&p
->p_tr_lgrpid
, olid
, nlid
) !=
9695 olid
= p
->p_tr_lgrpid
;
9696 ASSERT(olid
!= LGRP_NONE
);
9697 if (olid
!= lgrp_id
&& olid
!= NLGRPS_MAX
) {
9698 p
->p_tr_lgrpid
= NLGRPS_MAX
;
9701 ASSERT(p
->p_tr_lgrpid
!= LGRP_NONE
);
9704 * lgrp_move_thread() won't schedule async recheck after
9705 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9706 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9709 if (first
&& p
->p_t1_lgrpid
!= LGRP_NONE
&&
9710 p
->p_t1_lgrpid
!= lgrp_id
) {
9716 * If no amp was created yet for lgrp_id create a new one as long as
9717 * we have enough memory to afford it.
9719 if ((amp
= svntrp
->tr_amp
[lgrp_id
]) == NULL
) {
9720 size_t trmem
= atomic_add_long_nv(&segvn_textrepl_bytes
, size
);
9721 if (trmem
> segvn_textrepl_max_bytes
) {
9722 SEGVN_TR_ADDSTAT(normem
);
9725 if (anon_try_resv_zone(size
, NULL
) == 0) {
9726 SEGVN_TR_ADDSTAT(noanon
);
9729 amp
= anonmap_alloc(size
, size
, ANON_NOSLEEP
);
9731 anon_unresv_zone(size
, NULL
);
9732 SEGVN_TR_ADDSTAT(nokmem
);
9735 ASSERT(amp
->refcnt
== 1);
9737 svntrp
->tr_amp
[lgrp_id
] = amp
;
9738 SEGVN_TR_ADDSTAT(newamp
);
9740 svntrp
->tr_refcnt
++;
9741 ASSERT(svd
->svn_trnext
== NULL
);
9742 ASSERT(svd
->svn_trprev
== NULL
);
9743 svd
->svn_trnext
= svntrp
->tr_svnhead
;
9744 svd
->svn_trprev
= NULL
;
9745 if (svntrp
->tr_svnhead
!= NULL
) {
9746 svntrp
->tr_svnhead
->svn_trprev
= svd
;
9748 svntrp
->tr_svnhead
= svd
;
9749 ASSERT(amp
->a_szc
== szc
&& amp
->size
== size
&& amp
->swresv
== size
);
9750 ASSERT(amp
->refcnt
>= 1);
9752 svd
->anon_index
= 0;
9753 svd
->tr_policy_info
.mem_policy
= LGRP_MEM_POLICY_NEXT_SEG
;
9754 svd
->tr_policy_info
.mem_lgrpid
= lgrp_id
;
9755 svd
->tr_state
= SEGVN_TR_ON
;
9756 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9757 SEGVN_TR_ADDSTAT(repl
);
9760 ASSERT(segvn_textrepl_bytes
>= size
);
9761 atomic_add_long(&segvn_textrepl_bytes
, -size
);
9762 ASSERT(svntrp
!= NULL
);
9763 ASSERT(svntrp
->tr_amp
[lgrp_id
] == NULL
);
9764 if (svntrp
->tr_refcnt
== 0) {
9765 ASSERT(svntrp
== svntr_hashtab
[hash
].tr_head
);
9766 svntr_hashtab
[hash
].tr_head
= svntrp
->tr_next
;
9767 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9768 kmem_cache_free(svntr_cache
, svntrp
);
9770 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9772 svd
->tr_state
= SEGVN_TR_OFF
;
9776 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9777 * replication amp. This routine is most typically called when segment is
9778 * unmapped but can also be called when segment no longer qualifies for text
9779 * replication (e.g. due to protection changes). If unload_unmap is set use
9780 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9781 * svntr free all its anon maps and remove it from the hash table.
9784 segvn_textunrepl(struct seg
*seg
, int unload_unmap
)
9786 struct segvn_data
*svd
= (struct segvn_data
*)seg
->s_data
;
9787 vnode_t
*vp
= svd
->vp
;
9788 uoff_t off
= svd
->offset
;
9789 size_t size
= seg
->s_size
;
9790 uoff_t eoff
= off
+ size
;
9791 uint_t szc
= seg
->s_szc
;
9792 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
9794 svntr_t
**prv_svntrp
;
9795 lgrp_id_t lgrp_id
= svd
->tr_policy_info
.mem_lgrpid
;
9798 ASSERT(AS_LOCK_HELD(seg
->s_as
));
9799 ASSERT(AS_WRITE_HELD(seg
->s_as
) ||
9800 SEGVN_WRITE_HELD(seg
->s_as
, &svd
->lock
));
9801 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
9802 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
9803 ASSERT(svd
->amp
!= NULL
);
9804 ASSERT(svd
->amp
->refcnt
>= 1);
9805 ASSERT(svd
->anon_index
== 0);
9806 ASSERT(lgrp_id
!= LGRP_NONE
&& lgrp_id
< NLGRPS_MAX
);
9807 ASSERT(svntr_hashtab
!= NULL
);
9809 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9810 prv_svntrp
= &svntr_hashtab
[hash
].tr_head
;
9811 for (; (svntrp
= *prv_svntrp
) != NULL
; prv_svntrp
= &svntrp
->tr_next
) {
9812 ASSERT(svntrp
->tr_refcnt
!= 0);
9813 if (svntrp
->tr_vp
== vp
&& svntrp
->tr_off
== off
&&
9814 svntrp
->tr_eoff
== eoff
&& svntrp
->tr_szc
== szc
) {
9818 if (svntrp
== NULL
) {
9819 panic("segvn_textunrepl: svntr record not found");
9821 if (svntrp
->tr_amp
[lgrp_id
] != svd
->amp
) {
9822 panic("segvn_textunrepl: amp mismatch");
9824 svd
->tr_state
= SEGVN_TR_OFF
;
9826 if (svd
->svn_trprev
== NULL
) {
9827 ASSERT(svntrp
->tr_svnhead
== svd
);
9828 svntrp
->tr_svnhead
= svd
->svn_trnext
;
9829 if (svntrp
->tr_svnhead
!= NULL
) {
9830 svntrp
->tr_svnhead
->svn_trprev
= NULL
;
9832 svd
->svn_trnext
= NULL
;
9834 svd
->svn_trprev
->svn_trnext
= svd
->svn_trnext
;
9835 if (svd
->svn_trnext
!= NULL
) {
9836 svd
->svn_trnext
->svn_trprev
= svd
->svn_trprev
;
9837 svd
->svn_trnext
= NULL
;
9839 svd
->svn_trprev
= NULL
;
9841 if (--svntrp
->tr_refcnt
) {
9842 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9845 *prv_svntrp
= svntrp
->tr_next
;
9846 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9847 for (i
= 0; i
< NLGRPS_MAX
; i
++) {
9848 struct anon_map
*amp
= svntrp
->tr_amp
[i
];
9852 ASSERT(amp
->refcnt
== 1);
9853 ASSERT(amp
->swresv
== size
);
9854 ASSERT(amp
->size
== size
);
9855 ASSERT(amp
->a_szc
== szc
);
9856 if (amp
->a_szc
!= 0) {
9857 anon_free_pages(amp
->ahp
, 0, size
, szc
);
9859 anon_free(amp
->ahp
, 0, size
);
9861 svntrp
->tr_amp
[i
] = NULL
;
9862 ASSERT(segvn_textrepl_bytes
>= size
);
9863 atomic_add_long(&segvn_textrepl_bytes
, -size
);
9864 anon_unresv_zone(amp
->swresv
, NULL
);
9868 kmem_cache_free(svntr_cache
, svntrp
);
9870 hat_unload_callback(seg
->s_as
->a_hat
, seg
->s_base
, size
,
9871 unload_unmap
? HAT_UNLOAD_UNMAP
: 0, NULL
);
9875 * This is called when a MAP_SHARED writable mapping is created to a vnode
9876 * that is currently used for execution (VVMEXEC flag is set). In this case we
9877 * need to prevent further use of existing replicas.
9880 segvn_inval_trcache(vnode_t
*vp
)
9882 ulong_t hash
= SVNTR_HASH_FUNC(vp
);
9885 ASSERT(vp
->v_flag
& VVMEXEC
);
9887 if (svntr_hashtab
== NULL
) {
9891 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9892 svntrp
= svntr_hashtab
[hash
].tr_head
;
9893 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
9894 ASSERT(svntrp
->tr_refcnt
!= 0);
9895 if (svntrp
->tr_vp
== vp
&& svntrp
->tr_valid
) {
9896 svntrp
->tr_valid
= 0;
9899 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9903 segvn_trasync_thread(void)
9905 callb_cpr_t cpr_info
;
9906 kmutex_t cpr_lock
; /* just for CPR stuff */
9908 mutex_init(&cpr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
9910 CALLB_CPR_INIT(&cpr_info
, &cpr_lock
,
9911 callb_generic_cpr
, "segvn_async");
9913 if (segvn_update_textrepl_interval
== 0) {
9914 segvn_update_textrepl_interval
= segvn_update_tr_time
* hz
;
9916 segvn_update_textrepl_interval
*= hz
;
9918 (void) timeout(segvn_trupdate_wakeup
, NULL
,
9919 segvn_update_textrepl_interval
);
9922 mutex_enter(&cpr_lock
);
9923 CALLB_CPR_SAFE_BEGIN(&cpr_info
);
9924 mutex_exit(&cpr_lock
);
9925 sema_p(&segvn_trasync_sem
);
9926 mutex_enter(&cpr_lock
);
9927 CALLB_CPR_SAFE_END(&cpr_info
, &cpr_lock
);
9928 mutex_exit(&cpr_lock
);
9933 static uint64_t segvn_lgrp_trthr_migrs_snpsht
= 0;
9936 segvn_trupdate_wakeup(void *dummy
)
9938 uint64_t cur_lgrp_trthr_migrs
= lgrp_get_trthr_migrations();
9940 if (cur_lgrp_trthr_migrs
!= segvn_lgrp_trthr_migrs_snpsht
) {
9941 segvn_lgrp_trthr_migrs_snpsht
= cur_lgrp_trthr_migrs
;
9942 sema_v(&segvn_trasync_sem
);
9945 if (!segvn_disable_textrepl_update
&&
9946 segvn_update_textrepl_interval
!= 0) {
9947 (void) timeout(segvn_trupdate_wakeup
, dummy
,
9948 segvn_update_textrepl_interval
);
9953 segvn_trupdate(void)
9959 ASSERT(svntr_hashtab
!= NULL
);
9961 for (hash
= 0; hash
< svntr_hashtab_sz
; hash
++) {
9962 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
9963 svntrp
= svntr_hashtab
[hash
].tr_head
;
9964 for (; svntrp
!= NULL
; svntrp
= svntrp
->tr_next
) {
9965 ASSERT(svntrp
->tr_refcnt
!= 0);
9966 svd
= svntrp
->tr_svnhead
;
9967 for (; svd
!= NULL
; svd
= svd
->svn_trnext
) {
9968 segvn_trupdate_seg(svd
->seg
, svd
, svntrp
,
9972 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
9977 segvn_trupdate_seg(struct seg
*seg
,
9986 struct anon_map
*amp
;
9988 ASSERT(svd
->vp
!= NULL
);
9989 ASSERT(svd
->vp
== svntrp
->tr_vp
);
9990 ASSERT(svd
->offset
== svntrp
->tr_off
);
9991 ASSERT(svd
->offset
+ seg
->s_size
== svntrp
->tr_eoff
);
9992 ASSERT(seg
!= NULL
);
9993 ASSERT(svd
->seg
== seg
);
9994 ASSERT(seg
->s_data
== (void *)svd
);
9995 ASSERT(seg
->s_szc
== svntrp
->tr_szc
);
9996 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
9997 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd
->rcookie
));
9998 ASSERT(svd
->amp
!= NULL
);
9999 ASSERT(svd
->tr_policy_info
.mem_policy
== LGRP_MEM_POLICY_NEXT_SEG
);
10000 ASSERT(svd
->tr_policy_info
.mem_lgrpid
!= LGRP_NONE
);
10001 ASSERT(svd
->tr_policy_info
.mem_lgrpid
< NLGRPS_MAX
);
10002 ASSERT(svntrp
->tr_amp
[svd
->tr_policy_info
.mem_lgrpid
] == svd
->amp
);
10003 ASSERT(svntrp
->tr_refcnt
!= 0);
10004 ASSERT(mutex_owned(&svntr_hashtab
[hash
].tr_lock
));
10007 ASSERT(as
!= NULL
&& as
!= &kas
);
10010 ASSERT(p
->p_tr_lgrpid
!= LGRP_NONE
);
10011 lgrp_id
= p
->p_t1_lgrpid
;
10012 if (lgrp_id
== LGRP_NONE
) {
10015 ASSERT(lgrp_id
< NLGRPS_MAX
);
10016 if (svd
->tr_policy_info
.mem_lgrpid
== lgrp_id
) {
10021 * Use tryenter locking since we are locking as/seg and svntr hash
10022 * lock in reverse from syncrounous thread order.
10024 if (!AS_LOCK_TRYENTER(as
, RW_READER
)) {
10025 SEGVN_TR_ADDSTAT(nolock
);
10026 if (segvn_lgrp_trthr_migrs_snpsht
) {
10027 segvn_lgrp_trthr_migrs_snpsht
= 0;
10031 if (!SEGVN_LOCK_TRYENTER(seg
->s_as
, &svd
->lock
, RW_WRITER
)) {
10033 SEGVN_TR_ADDSTAT(nolock
);
10034 if (segvn_lgrp_trthr_migrs_snpsht
) {
10035 segvn_lgrp_trthr_migrs_snpsht
= 0;
10039 size
= seg
->s_size
;
10040 if (svntrp
->tr_amp
[lgrp_id
] == NULL
) {
10041 size_t trmem
= atomic_add_long_nv(&segvn_textrepl_bytes
, size
);
10042 if (trmem
> segvn_textrepl_max_bytes
) {
10043 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10045 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10046 SEGVN_TR_ADDSTAT(normem
);
10049 if (anon_try_resv_zone(size
, NULL
) == 0) {
10050 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10052 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10053 SEGVN_TR_ADDSTAT(noanon
);
10056 amp
= anonmap_alloc(size
, size
, KM_NOSLEEP
);
10058 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10060 atomic_add_long(&segvn_textrepl_bytes
, -size
);
10061 anon_unresv_zone(size
, NULL
);
10062 SEGVN_TR_ADDSTAT(nokmem
);
10065 ASSERT(amp
->refcnt
== 1);
10066 amp
->a_szc
= seg
->s_szc
;
10067 svntrp
->tr_amp
[lgrp_id
] = amp
;
10070 * We don't need to drop the bucket lock but here we give other
10071 * threads a chance. svntr and svd can't be unlinked as long as
10072 * segment lock is held as a writer and AS held as well. After we
10073 * retake bucket lock we'll continue from where we left. We'll be able
10074 * to reach the end of either list since new entries are always added
10075 * to the beginning of the lists.
10077 mutex_exit(&svntr_hashtab
[hash
].tr_lock
);
10078 hat_unload_callback(as
->a_hat
, seg
->s_base
, size
, 0, NULL
);
10079 mutex_enter(&svntr_hashtab
[hash
].tr_lock
);
10081 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10082 ASSERT(svd
->amp
!= NULL
);
10083 ASSERT(svd
->tr_policy_info
.mem_policy
== LGRP_MEM_POLICY_NEXT_SEG
);
10084 ASSERT(svd
->tr_policy_info
.mem_lgrpid
!= lgrp_id
);
10085 ASSERT(svd
->amp
!= svntrp
->tr_amp
[lgrp_id
]);
10087 svd
->tr_policy_info
.mem_lgrpid
= lgrp_id
;
10088 svd
->amp
= svntrp
->tr_amp
[lgrp_id
];
10089 p
->p_tr_lgrpid
= NLGRPS_MAX
;
10090 SEGVN_LOCK_EXIT(seg
->s_as
, &svd
->lock
);
10093 ASSERT(svntrp
->tr_refcnt
!= 0);
10094 ASSERT(svd
->vp
== svntrp
->tr_vp
);
10095 ASSERT(svd
->tr_policy_info
.mem_lgrpid
== lgrp_id
);
10096 ASSERT(svd
->amp
!= NULL
&& svd
->amp
== svntrp
->tr_amp
[lgrp_id
]);
10097 ASSERT(svd
->seg
== seg
);
10098 ASSERT(svd
->tr_state
== SEGVN_TR_ON
);
10100 SEGVN_TR_ADDSTAT(asyncrepl
);