4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
22 # Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 # Use is subject to license terms.
27 PATH
=/usr
/bin
:/usr
/sbin
30 .
/usr
/lib
/brand
/shared
/common.ksh
32 # Values for service tags.
33 STCLIENT
=/usr
/bin
/stclient
34 ST_PRODUCT_NAME
="Solaris 10 Containers"
36 ST_PRODUCT_UUID
="urn:uuid:2f459121-dec7-11de-9af7-080020a9ed93"
38 w_sanity_detail
=$
(gettext " WARNING: Skipping image sanity checks.")
39 f_sanity_detail
=$
(gettext "Missing %s at %s")
40 f_sanity_sparse
=$
(gettext "Is this a sparse zone image? The image must be whole-root.")
41 f_sanity_vers
=$
(gettext "The image release version must be 10 (got %s), the zone is not usable on this system.")
42 f_not_s10_image
=$
(gettext "%s doesn't look like a Solaris 10 image.")
43 f_sanity_nopatch
=$
(gettext "Unable to determine the image's patch level.")
44 f_sanity_downrev
=$
(gettext "The image patch level is downrev for running in a solaris10 branded zone.\n(patchlist %s)")
45 f_need_newer_emul
=$
(gettext "The image requires a newer version of the solaris10 brand emulation.")
46 f_zfs_create
=$
(gettext "Unable to create the zone's ZFS dataset.")
47 f_no_ds
=$
(gettext "No zonepath dataset; the zonepath must be a ZFS dataset.")
48 f_multiple_ds
=$
(gettext "Multiple active datasets.")
49 f_no_active_ds
=$
(gettext "No active dataset; the zone's ZFS root dataset must be configured as\n\ta zone boot environment.")
50 f_zfs_unmount
=$
(gettext "Unable to unmount the zone's root ZFS dataset (%s).\nIs there a global zone process inside the zone root?\nThe current zone boot environment will remain mounted.\n")
51 f_zfs_mount
=$
(gettext "Unable to mount the zone's ZFS dataset.")
52 incompat_options
=$
(gettext "mutually exclusive options.\n%s")
54 sanity_ok
=$
(gettext " Sanity Check: Passed. Looks like a Solaris 10 image.")
55 sanity_fail
=$
(gettext " Sanity Check: FAILED (see log for details).")
57 e_badboot
=$
(gettext "Zone boot failed")
58 e_nosingleuser
=$
(gettext "ERROR: zone did not finish booting to single-user.")
59 e_unconfig
=$
(gettext "sys-unconfig failed")
60 v_unconfig
=$
(gettext "Performing zone sys-unconfig")
62 v_no_tags
=$
(gettext "Service tags facility not present.")
63 e_bad_uuid
=$
(gettext "Failed to get zone UUID")
64 v_addtag
=$
(gettext "Adding service tag: %s")
65 v_deltag
=$
(gettext "Removing service tag: %s")
66 e_addtag_fail
=$
(gettext "Adding service tag failed (error: %s)")
74 # Check for some required directories and make sure this isn't a
77 checks
="etc etc/svc var var/svc"
79 if [[ ! -e $dir/$x ]]; then
80 log
"$f_sanity_detail" "$x" "$dir"
84 # Files from SUNWcsr and SUNWcsu that are in sparse inherit-pkg-dirs.
85 checks
="lib/svc sbin/zonename usr/bin/chmod"
87 if [[ ! -e $dir/$x ]]; then
88 log
"$f_sanity_detail" "$x" "$dir"
89 log
"$f_sanity_sparse"
94 if (( $res != 0 )); then
96 fatal
"$install_fail" "$ZONENAME"
99 if [[ "$SANITY_SKIP" == 1 ]]; then
100 log
"$w_sanity_detail"
105 # Check image release to be sure its S10.
108 if [[ -f $dir/var
/sadm
/system
/admin
/INST_RELEASE
]]; then
109 image_vers
=$
(nawk
-F= '{if ($1 == "VERSION") print $2}' \
110 $dir/var
/sadm
/system
/admin
/INST_RELEASE
)
113 if [[ "$image_vers" != "10" ]]; then
114 log
"$f_sanity_vers" "$image_vers"
119 # Make sure we have the minimal KU patch we support. These are the
122 if [[ $
(uname
-p) == "i386" ]]; then
123 req_patch
="141445-09"
125 req_patch
="141444-09"
128 for i
in $dir/var
/sadm
/pkg
/SUNWcakr
*
130 if [[ ! -d $i ||
! -f $i/pkginfo
]]; then
131 log
"$f_sanity_nopatch"
137 # Check the core kernel pkg for the required KU patch.
140 for i
in $dir/var
/sadm
/pkg
/SUNWcakr
*/pkginfo
142 patches
=$
(nawk
-F= '{if ($1 == "PATCHLIST") print $2}' $i)
143 for patch in $patches
145 if [[ $patch == $req_patch ]]; then
151 if (( $found == 1 )); then
156 if (( $found != 1 )); then
157 log
"$f_sanity_downrev" "$patches"
162 # Check the S10 image for a required version of the emulation.
164 VERS_FILE
=/usr
/lib
/brand
/solaris10
/version
166 if [[ -f $dir/$VERS_FILE ]]; then
167 s10vers_needs
=$
(/usr
/bin
/egrep -v "^#" $dir/$VERS_FILE)
170 # Now get the current emulation version.
171 emul_vers
=$
(/usr
/bin
/egrep -v "^#" $VERS_FILE)
173 # Verify that the emulation can run this version of S10.
174 if (( $s10vers_needs > $emul_vers )); then
175 log
"$f_need_newer_emul"
179 if (( $res != 0 )); then
181 fatal
"$install_fail" "$ZONENAME"
187 # Find the active dataset under the zonepath dataset to mount on zonepath/root.
190 ACTIVE_DS
=$1/ROOT
/zbe-0
194 # Make sure the active dataset is mounted for the zone.
197 get_zonepath_ds
$zonepath
198 get_active_ds
$ZONEPATH_DS
200 # If already mounted then we're done.
201 mnted
=`zfs get -H mounted $ACTIVE_DS | cut -f3`
202 [[ $mnted = "yes" ]] && return
204 mount
-F zfs
$ACTIVE_DS $zonepath/root || fail_fatal
"$f_zfs_mount"
208 # Set up ZFS dataset hierarchy for the zone root dataset.
211 # Find the zone's current dataset. This should have been created by
212 # zoneadm (or the attach hook).
213 get_zonepath_ds
$zonepath
216 # We need to tolerate errors while creating the datasets and making the
217 # mountpoint, since these could already exist from an attach scenario.
220 /usr
/sbin
/zfs list
-H -o name
$ZONEPATH_DS/ROOT
>/dev
/null
2>&1
221 if (( $?
!= 0 )); then
222 /usr
/sbin
/zfs create
-o mountpoint
=legacy
-o zoned
=on \
224 if (( $?
!= 0 )); then
225 fail_fatal
"$f_zfs_create"
228 /usr
/sbin
/zfs
set mountpoint
=legacy
$ZONEPATH_DS/ROOT \
230 /usr
/sbin
/zfs
set zoned
=on
$ZONEPATH_DS/ROOT \
234 get_active_ds
$ZONEPATH_DS
235 zfs list
-H -o name
$ACTIVE_DS >/dev
/null
2>&1
236 if (( $?
!= 0 )); then
237 zfs create
-o canmount
=noauto
$ACTIVE_DS
238 (( $?
!= 0 )) && fail_fatal
"$f_zfs_create"
240 zfs
set canmount
=noauto
$ACTIVE_DS >/dev
/null
2>&1
241 zfs inherit mountpoint
$ACTIVE_DS >/dev
/null
2>&1
242 zfs inherit zoned
$ACTIVE_DS >/dev
/null
2>&1
245 if [ ! -d $ZONEROOT ]; then
246 /usr
/bin
/mkdir
-m 0755 -p $ZONEROOT || \
247 fail_fatal
"$f_mkdir" "$ZONEROOT"
249 /usr
/bin
/chmod 700 $ZONEPATH || fail_fatal
"$f_chmod" "$ZONEPATH"
251 mount
-F zfs
$ACTIVE_DS $ZONEROOT || fail_fatal
"$f_zfs_mount"
255 # Before booting the zone we may need to create a few mnt points, just in
256 # case they don't exist for some reason.
258 # Whenever we reach into the zone while running in the global zone we
259 # need to validate that none of the interim directories are symlinks
260 # that could cause us to inadvertently modify the global zone.
264 if [[ ! -f $ZONEROOT/tmp
&& ! -d $ZONEROOT/tmp
]]; then
265 mkdir
-m 1777 -p $ZONEROOT/tmp ||
exit $EXIT_CODE
267 if [[ ! -f $ZONEROOT/var
/run
&& ! -d $ZONEROOT/var
/run
]]; then
268 mkdir
-m 1755 -p $ZONEROOT/var
/run ||
exit $EXIT_CODE
270 if [[ ! -f $ZONEROOT/var
/tmp
&& ! -d $ZONEROOT/var
/tmp
]]; then
271 mkdir
-m 1777 -p $ZONEROOT/var
/tmp ||
exit $EXIT_CODE
273 if [[ ! -h $ZONEROOT/etc
&& ! -f $ZONEROOT/etc
/mnttab
]]; then
274 /usr
/bin
/touch $ZONEROOT/etc
/mnttab ||
exit $EXIT_CODE
275 /usr
/bin
/chmod 444 $ZONEROOT/etc
/mnttab ||
exit $EXIT_CODE
277 if [[ ! -f $ZONEROOT/proc
&& ! -d $ZONEROOT/proc
]]; then
278 mkdir
-m 755 -p $ZONEROOT/proc ||
exit $EXIT_CODE
280 if [[ ! -f $ZONEROOT/dev
&& ! -d $ZONEROOT/dev
]]; then
281 mkdir
-m 755 -p $ZONEROOT/dev ||
exit $EXIT_CODE
283 if [[ ! -h $ZONEROOT/etc
&& ! -h $ZONEROOT/etc
/svc
&& \
284 ! -d $ZONEROOT/etc
/svc
]]; then
285 mkdir
-m 755 -p $ZONEROOT/etc
/svc
/volatile ||
exit $EXIT_CODE
290 # We're sys-unconfig-ing the zone. This will normally halt the zone, however
291 # there are problems with sys-unconfig and it can hang when the zone is booted
292 # to milestone=none. Sys-unconfig also sometimes hangs halting the zone.
293 # Thus, we take some care to workaround these sys-unconfig limitations.
295 # On entry we expect the zone to be booted. We use sys-unconfig -R to make it
296 # think its working on an alternate root and let the caller halt the zone.
299 /usr
/sbin
/zlogin
-S $ZONENAME /usr
/sbin
/sys-unconfig
-R /.
/ \
301 if (( $?
!= 0 )); then
310 # Get zone's uuid for service tag.
314 typeset ZONENAME
="$1"
316 ZONEUUID
=`zoneadm -z $ZONENAME list -p | nawk -F: '{print $5}'`
317 [[ $?
-ne 0 ||
-z $ZONEUUID ]] && return 1
319 INSTANCE_UUID
="urn:st:${ZONEUUID}"
324 # Add a service tag for a given zone. We use two UUIDs-- the first,
325 # the Product UUID, comes from the Sun swoRDFish ontology. The second
326 # is the UUID of the zone itself, which forms the instance UUID.
330 typeset ZONENAME
="$1"
333 if [ ! -x $STCLIENT ]; then
338 get_inst_uuid
"$ZONENAME" ||
(error
"$e_bad_uuid"; return 1)
340 vlog
"$v_addtag" "$INSTANCE_UUID"
342 -p "$ST_PRODUCT_NAME" \
343 -e "$ST_PRODUCT_REV" \
344 -t "$ST_PRODUCT_UUID" \
345 -i "$INSTANCE_UUID" \
350 -S "$SOURCE" >/dev
/null
2>&1
354 # 226 means "duplicate record," which we can ignore.
355 if [[ $err -ne 0 && $err -ne 226 ]]; then
356 error
"$e_addtag_fail" "$err"
363 # Remove a service tag for a given zone.
367 typeset ZONENAME
="$1"
369 if [ ! -x $STCLIENT ]; then
374 get_inst_uuid
"$ZONENAME" ||
(error
"$e_bad_uuid"; return 1)
376 vlog
"$v_deltag" "$INSTANCE_UUID"
377 $STCLIENT -d -i "$INSTANCE_UUID" >/dev
/null
2>&1