4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * ZFS volume emulation driver.
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
32 * /dev/zvol/dsk/<pool_name>/<dataset_name>
33 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
35 * These links are created by the ZFS-specific devfsadm link generator.
36 * Volumes are persistent through reboot. No user command needs to be
37 * run before opening and using a device.
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/errno.h>
45 #include <sys/modctl.h>
49 #include <sys/cmn_err.h>
54 #include <sys/dmu_traverse.h>
55 #include <sys/dnode.h>
56 #include <sys/dsl_dataset.h>
57 #include <sys/dsl_prop.h>
59 #include <sys/efi_partition.h>
60 #include <sys/byteorder.h>
61 #include <sys/pathname.h>
63 #include <sys/sunddi.h>
64 #include <sys/crc32.h>
65 #include <sys/dirent.h>
66 #include <sys/policy.h>
67 #include <sys/fs/zfs.h>
68 #include <sys/zfs_ioctl.h>
69 #include <sys/mkdev.h>
71 #include <sys/refcount.h>
72 #include <sys/zfs_znode.h>
73 #include <sys/zfs_rlock.h>
74 #include <sys/vdev_disk.h>
75 #include <sys/vdev_impl.h>
79 #include <sys/disklabel.h>
82 #include <prop/proplib.h>
85 #include "zfs_namecheck.h"
87 static void *zvol_state
;
89 #define ZVOL_DUMPSIZE "dumpsize"
91 void zvol_minphys(struct buf
*);
93 static struct dkdriver zvol_dkdriver
= { zvol_strategy
, zvol_minphys
};
96 * This lock protects the zvol_state structure from being modified
97 * while it's being used, e.g. an open that comes in before a create
98 * finishes. It also protects temporary opens of the dataset so that,
99 * e.g., an open doesn't get a spurious EBUSY.
101 static kmutex_t zvol_state_lock
;
102 static uint32_t zvol_minors
;
104 typedef struct zvol_extent
{
106 dva_t ze_dva
; /* dva associated with this extent */
107 uint64_t ze_nblks
; /* number of blocks in extent */
111 * The in-core state of each volume.
113 typedef struct zvol_state
{
114 char zv_name
[MAXPATHLEN
]; /* pool/dd name */
115 uint64_t zv_volsize
; /* amount of space we advertise */
116 uint64_t zv_volblocksize
; /* volume block size */
117 minor_t zv_minor
; /* minor number */
118 uint8_t zv_min_bs
; /* minimum addressable block shift */
119 uint8_t zv_flags
; /* readonly; dumpified */
120 objset_t
*zv_objset
; /* objset handle */
121 uint32_t zv_mode
; /* DS_MODE_* flags at open time */
122 uint32_t zv_open_count
[OTYPCNT
]; /* open counts */
123 uint32_t zv_total_opens
; /* total open count */
124 zilog_t
*zv_zilog
; /* ZIL handle */
125 list_t zv_extents
; /* List of extents for dump */
126 uint64_t zv_txg_assign
; /* txg to assign during ZIL replay */
127 znode_t zv_znode
; /* for range locking */
129 struct disk zv_dk
; /* disk statistics */
130 kmutex_t zv_dklock
; /* disk statistics */
135 * zvol specific flags
137 #define ZVOL_RDONLY 0x1
138 #define ZVOL_DUMPIFIED 0x2
139 #define ZVOL_EXCL 0x4
142 * zvol maximum transfer in one DMU tx.
144 int zvol_maxphys
= DMU_MAX_ACCESS
/2;
146 extern int zfs_set_prop_nvlist(const char *, nvlist_t
*);
147 static int zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
);
148 static int zvol_dumpify(zvol_state_t
*zv
);
149 static int zvol_dump_fini(zvol_state_t
*zv
);
150 static int zvol_dump_init(zvol_state_t
*zv
, boolean_t resize
);
153 zvol_size_changed(zvol_state_t
*zv
, major_t maj
)
156 prop_dictionary_t disk_info
, odisk_info
, geom
;
161 disk_info
= prop_dictionary_create();
163 prop_dictionary_set_cstring_nocopy(disk_info
, "type", "ESDI");
165 geom
= prop_dictionary_create();
167 prop_dictionary_set_uint64(geom
, "sectors-per-unit", zv
->zv_volsize
);
169 prop_dictionary_set_uint32(geom
, "sector-size",
170 DEV_BSIZE
/* XXX 512? */);
172 prop_dictionary_set_uint32(geom
, "sectors-per-track", 32);
174 prop_dictionary_set_uint32(geom
, "tracks-per-cylinder", 64);
176 prop_dictionary_set_uint32(geom
, "cylinders-per-unit", zv
->zv_volsize
/ 2048);
178 prop_dictionary_set(disk_info
, "geometry", geom
);
179 prop_object_release(geom
);
181 odisk_info
= disk
->dk_info
;
183 disk
->dk_info
= disk_info
;
185 if (odisk_info
!= NULL
)
186 prop_object_release(odisk_info
);
189 dev_t dev
= makedevice(maj
, zv
->zv_minor
);
191 VERIFY(ddi_prop_update_int64(dev
, zfs_dip
,
192 "Size", zv
->zv_volsize
) == DDI_SUCCESS
);
193 VERIFY(ddi_prop_update_int64(dev
, zfs_dip
,
194 "Nblocks", lbtodb(zv
->zv_volsize
)) == DDI_SUCCESS
);
196 /* Notify specfs to invalidate the cached size */
197 spec_size_invalidate(dev
, VBLK
);
198 spec_size_invalidate(dev
, VCHR
);
203 zvol_check_volsize(uint64_t volsize
, uint64_t blocksize
)
208 if (volsize
% blocksize
!= 0)
212 if (volsize
- 1 > SPEC_MAXOFFSET_T
)
219 zvol_check_volblocksize(uint64_t volblocksize
)
221 if (volblocksize
< SPA_MINBLOCKSIZE
||
222 volblocksize
> SPA_MAXBLOCKSIZE
||
230 zvol_readonly_changed_cb(void *arg
, uint64_t newval
)
232 zvol_state_t
*zv
= arg
;
235 zv
->zv_flags
|= ZVOL_RDONLY
;
237 zv
->zv_flags
&= ~ZVOL_RDONLY
;
241 zvol_get_stats(objset_t
*os
, nvlist_t
*nv
)
244 dmu_object_info_t doi
;
248 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &val
);
252 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLSIZE
, val
);
254 error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
);
257 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLBLOCKSIZE
,
258 doi
.doi_data_block_size
);
265 * Find a free minor number.
268 zvol_minor_alloc(void)
272 ASSERT(MUTEX_HELD(&zvol_state_lock
));
274 for (minor
= 1; minor
<= ZVOL_MAX_MINOR
; minor
++)
275 if (ddi_get_soft_state(zvol_state
, minor
) == NULL
)
281 static zvol_state_t
*
282 zvol_minor_lookup(const char *name
)
287 ASSERT(MUTEX_HELD(&zvol_state_lock
));
289 for (minor
= 1; minor
<= ZVOL_MAX_MINOR
; minor
++) {
290 zv
= ddi_get_soft_state(zvol_state
, minor
);
293 if (strcmp(zv
->zv_name
, name
) == 0)
300 /* extent mapping arg */
308 zvol_map_block(spa_t
*spa
, blkptr_t
*bp
, const zbookmark_t
*zb
,
309 const dnode_phys_t
*dnp
, void *arg
)
311 struct maparg
*ma
= arg
;
313 int bs
= ma
->ma_zv
->zv_volblocksize
;
315 if (bp
== NULL
|| zb
->zb_object
!= ZVOL_OBJ
|| zb
->zb_level
!= 0)
318 VERIFY3U(ma
->ma_blks
, ==, zb
->zb_blkid
);
321 /* Abort immediately if we have encountered gang blocks */
326 * See if the block is at the end of the previous extent.
328 ze
= list_tail(&ma
->ma_zv
->zv_extents
);
330 DVA_GET_VDEV(BP_IDENTITY(bp
)) == DVA_GET_VDEV(&ze
->ze_dva
) &&
331 DVA_GET_OFFSET(BP_IDENTITY(bp
)) ==
332 DVA_GET_OFFSET(&ze
->ze_dva
) + ze
->ze_nblks
* bs
) {
337 dprintf_bp(bp
, "%s", "next blkptr:");
339 /* start a new extent */
340 ze
= kmem_zalloc(sizeof (zvol_extent_t
), KM_SLEEP
);
341 ze
->ze_dva
= bp
->blk_dva
[0]; /* structure assignment */
343 list_insert_tail(&ma
->ma_zv
->zv_extents
, ze
);
348 zvol_free_extents(zvol_state_t
*zv
)
352 while (ze
= list_head(&zv
->zv_extents
)) {
353 list_remove(&zv
->zv_extents
, ze
);
354 kmem_free(ze
, sizeof (zvol_extent_t
));
359 zvol_get_lbas(zvol_state_t
*zv
)
366 zvol_free_extents(zv
);
368 err
= traverse_dataset(dmu_objset_ds(zv
->zv_objset
), 0,
369 TRAVERSE_PRE
| TRAVERSE_PREFETCH_METADATA
, zvol_map_block
, &ma
);
370 if (err
|| ma
.ma_blks
!= (zv
->zv_volsize
/ zv
->zv_volblocksize
)) {
371 zvol_free_extents(zv
);
372 return (err
? err
: EIO
);
380 zvol_create_cb(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
)
382 zfs_creat_t
*zct
= arg
;
383 nvlist_t
*nvprops
= zct
->zct_props
;
385 uint64_t volblocksize
, volsize
;
387 VERIFY(nvlist_lookup_uint64(nvprops
,
388 zfs_prop_to_name(ZFS_PROP_VOLSIZE
), &volsize
) == 0);
389 if (nvlist_lookup_uint64(nvprops
,
390 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &volblocksize
) != 0)
391 volblocksize
= zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE
);
394 * These properties must be removed from the list so the generic
395 * property setting step won't apply to them.
397 VERIFY(nvlist_remove_all(nvprops
,
398 zfs_prop_to_name(ZFS_PROP_VOLSIZE
)) == 0);
399 (void) nvlist_remove_all(nvprops
,
400 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
));
402 error
= dmu_object_claim(os
, ZVOL_OBJ
, DMU_OT_ZVOL
, volblocksize
,
406 error
= zap_create_claim(os
, ZVOL_ZAP_OBJ
, DMU_OT_ZVOL_PROP
,
410 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
, tx
);
415 * Replay a TX_WRITE ZIL transaction that didn't get committed
416 * after a system failure
419 zvol_replay_write(zvol_state_t
*zv
, lr_write_t
*lr
, boolean_t byteswap
)
421 objset_t
*os
= zv
->zv_objset
;
422 char *data
= (char *)(lr
+ 1); /* data follows lr_write_t */
423 uint64_t off
= lr
->lr_offset
;
424 uint64_t len
= lr
->lr_length
;
429 byteswap_uint64_array(lr
, sizeof (*lr
));
431 tx
= dmu_tx_create(os
);
432 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, len
);
433 error
= dmu_tx_assign(tx
, zv
->zv_txg_assign
);
437 dmu_write(os
, ZVOL_OBJ
, off
, len
, data
, tx
);
446 zvol_replay_err(zvol_state_t
*zv
, lr_t
*lr
, boolean_t byteswap
)
452 * Callback vectors for replaying records.
453 * Only TX_WRITE is needed for zvol.
455 zil_replay_func_t
*zvol_replay_vector
[TX_MAX_TYPE
] = {
456 zvol_replay_err
, /* 0 no such transaction type */
457 zvol_replay_err
, /* TX_CREATE */
458 zvol_replay_err
, /* TX_MKDIR */
459 zvol_replay_err
, /* TX_MKXATTR */
460 zvol_replay_err
, /* TX_SYMLINK */
461 zvol_replay_err
, /* TX_REMOVE */
462 zvol_replay_err
, /* TX_RMDIR */
463 zvol_replay_err
, /* TX_LINK */
464 zvol_replay_err
, /* TX_RENAME */
465 zvol_replay_write
, /* TX_WRITE */
466 zvol_replay_err
, /* TX_TRUNCATE */
467 zvol_replay_err
, /* TX_SETATTR */
468 zvol_replay_err
, /* TX_ACL */
472 * Create a minor node (plus a whole lot more) for the specified volume.
475 zvol_create_minor(const char *name
, major_t maj
)
479 dmu_object_info_t doi
;
482 int ds_mode
= DS_MODE_OWNER
;
485 size_t devpathlen
= strlen(ZVOL_FULL_DEV_DIR
) + strlen(name
) + 1;
488 mutex_enter(&zvol_state_lock
);
490 if ((zv
= zvol_minor_lookup(name
)) != NULL
) {
491 mutex_exit(&zvol_state_lock
);
495 if (strchr(name
, '@') != 0)
496 ds_mode
|= DS_MODE_READONLY
;
498 error
= dmu_objset_open(name
, DMU_OST_ZVOL
, ds_mode
, &os
);
501 mutex_exit(&zvol_state_lock
);
505 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
);
508 dmu_objset_close(os
);
509 mutex_exit(&zvol_state_lock
);
514 * If there's an existing /dev/zvol symlink, try to use the
515 * same minor number we used last time.
517 devpath
= kmem_alloc(devpathlen
, KM_SLEEP
);
519 /* Get full path to ZFS volume disk device */
520 (void) sprintf(devpath
, "%s/%s", ZVOL_FULL_DEV_DIR
, name
);
522 error
= lookupname(devpath
, UIO_SYSSPACE
, NULL
, &vp
);
524 if (error
== 0 && vp
->v_type
!= VBLK
) {
530 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
531 error
= vn_stat(vp
, &sb
);
534 minor
= getminor(sb
.st_rdev
);
542 * If we found a minor but it's already in use, we must pick a new one.
544 if (minor
!= 0 && ddi_get_soft_state(zvol_state
, minor
) != NULL
)
548 minor
= zvol_minor_alloc();
551 dmu_objset_close(os
);
552 mutex_exit(&zvol_state_lock
);
553 kmem_free(devpath
, devpathlen
);
557 if (ddi_soft_state_zalloc(zvol_state
, minor
) != DDI_SUCCESS
) {
558 dmu_objset_close(os
);
559 mutex_exit(&zvol_state_lock
);
560 kmem_free(devpath
, devpathlen
);
564 (void) ddi_prop_update_string(minor
, zfs_dip
, ZVOL_PROP_NAME
,
567 if (ddi_create_minor_node(zfs_dip
, (char *)name
, S_IFCHR
,
568 minor
, DDI_PSEUDO
, maj
) == DDI_FAILURE
) {
569 ddi_soft_state_free(zvol_state
, minor
);
570 dmu_objset_close(os
);
571 mutex_exit(&zvol_state_lock
);
572 kmem_free(devpath
, devpathlen
);
576 if (ddi_create_minor_node(zfs_dip
, (char *)name
, S_IFBLK
,
577 minor
, DDI_PSEUDO
, maj
) == DDI_FAILURE
) {
578 ddi_remove_minor_node(zfs_dip
, (char *)name
);
579 ddi_soft_state_free(zvol_state
, minor
);
580 dmu_objset_close(os
);
581 mutex_exit(&zvol_state_lock
);
582 kmem_free(devpath
, devpathlen
);
585 zv
= ddi_get_soft_state(zvol_state
, minor
);
587 (void) strcpy(zv
->zv_name
, name
);
588 zv
->zv_min_bs
= DEV_BSHIFT
;
589 zv
->zv_minor
= minor
;
590 zv
->zv_volsize
= volsize
;
592 zv
->zv_mode
= ds_mode
;
593 zv
->zv_zilog
= zil_open(os
, zvol_get_data
);
594 mutex_init(&zv
->zv_znode
.z_range_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
595 avl_create(&zv
->zv_znode
.z_range_avl
, zfs_range_compare
,
596 sizeof (rl_t
), offsetof(rl_t
, r_node
));
597 list_create(&zv
->zv_extents
, sizeof (zvol_extent_t
),
598 offsetof(zvol_extent_t
, ze_node
));
599 /* get and cache the blocksize */
600 error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
);
602 zv
->zv_volblocksize
= doi
.doi_data_block_size
;
604 disk_init(&zv
->zv_dk
, name
, &zvol_dkdriver
);
605 disk_attach(&zv
->zv_dk
);
606 mutex_init(&zv
->zv_dklock
, NULL
, MUTEX_DEFAULT
, NULL
);
608 zil_replay(os
, zv
, &zv
->zv_txg_assign
, zvol_replay_vector
, NULL
);
609 zvol_size_changed(zv
, maj
);
611 /* XXX this should handle the possible i/o error */
612 VERIFY(dsl_prop_register(dmu_objset_ds(zv
->zv_objset
),
613 "readonly", zvol_readonly_changed_cb
, zv
) == 0);
617 mutex_exit(&zvol_state_lock
);
619 // kmem_free(devpath, devpathlen);
625 * Remove minor node for the specified volume.
628 zvol_remove_minor(const char *name
)
631 char namebuf
[30], blkbuf
[30];
633 mutex_enter(&zvol_state_lock
);
635 if ((zv
= zvol_minor_lookup(name
)) == NULL
) {
636 mutex_exit(&zvol_state_lock
);
640 if (zv
->zv_total_opens
!= 0) {
641 mutex_exit(&zvol_state_lock
);
645 (void) snprintf(namebuf
, sizeof(namebuf
), "%s", name
);
646 ddi_remove_minor_node(zfs_dip
, namebuf
);
648 (void) snprintf(blkbuf
, sizeof(blkbuf
), "%s", name
);
649 ddi_remove_minor_node(zfs_dip
, blkbuf
);
651 VERIFY(dsl_prop_unregister(dmu_objset_ds(zv
->zv_objset
),
652 "readonly", zvol_readonly_changed_cb
, zv
) == 0);
654 zil_close(zv
->zv_zilog
);
656 dmu_objset_close(zv
->zv_objset
);
657 zv
->zv_objset
= NULL
;
658 avl_destroy(&zv
->zv_znode
.z_range_avl
);
659 mutex_destroy(&zv
->zv_znode
.z_range_lock
);
661 ddi_soft_state_free(zvol_state
, zv
->zv_minor
);
665 disk_detach(&zv
->zv_dk
);
666 disk_destroy(&zv
->zv_dk
);
667 mutex_destroy(&zv
->zv_dklock
);
669 mutex_exit(&zvol_state_lock
);
675 zvol_prealloc(zvol_state_t
*zv
)
677 objset_t
*os
= zv
->zv_objset
;
679 uint64_t refd
, avail
, usedobjs
, availobjs
;
680 uint64_t resid
= zv
->zv_volsize
;
683 /* Check the space usage before attempting to allocate the space */
684 dmu_objset_space(os
, &refd
, &avail
, &usedobjs
, &availobjs
);
685 if (avail
< zv
->zv_volsize
)
688 /* Free old extents if they exist */
689 zvol_free_extents(zv
);
693 uint64_t bytes
= MIN(resid
, SPA_MAXBLOCKSIZE
);
695 tx
= dmu_tx_create(os
);
696 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
697 error
= dmu_tx_assign(tx
, TXG_WAIT
);
700 (void) dmu_free_long_range(os
, ZVOL_OBJ
, 0, off
);
703 dmu_prealloc(os
, ZVOL_OBJ
, off
, bytes
, tx
);
708 txg_wait_synced(dmu_objset_pool(os
), 0);
714 zvol_update_volsize(zvol_state_t
*zv
, major_t maj
, uint64_t volsize
)
719 ASSERT(MUTEX_HELD(&zvol_state_lock
));
721 tx
= dmu_tx_create(zv
->zv_objset
);
722 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
723 error
= dmu_tx_assign(tx
, TXG_WAIT
);
729 error
= zap_update(zv
->zv_objset
, ZVOL_ZAP_OBJ
, "size", 8, 1,
734 error
= dmu_free_long_range(zv
->zv_objset
,
735 ZVOL_OBJ
, volsize
, DMU_OBJECT_END
);
738 * If we are using a faked-up state (zv_minor == 0) then don't
739 * try to update the in-core zvol state.
741 if (error
== 0 && zv
->zv_minor
) {
742 zv
->zv_volsize
= volsize
;
743 zvol_size_changed(zv
, maj
);
749 zvol_set_volsize(const char *name
, major_t maj
, uint64_t volsize
)
753 dmu_object_info_t doi
;
754 uint64_t old_volsize
= 0ULL;
755 zvol_state_t state
= { 0 };
757 mutex_enter(&zvol_state_lock
);
759 if ((zv
= zvol_minor_lookup(name
)) == NULL
) {
761 * If we are doing a "zfs clone -o volsize=", then the
762 * minor node won't exist yet.
764 error
= dmu_objset_open(name
, DMU_OST_ZVOL
, DS_MODE_OWNER
,
770 old_volsize
= zv
->zv_volsize
;
772 if ((error
= dmu_object_info(zv
->zv_objset
, ZVOL_OBJ
, &doi
)) != 0 ||
773 (error
= zvol_check_volsize(volsize
,
774 doi
.doi_data_block_size
)) != 0)
777 if (zv
->zv_flags
& ZVOL_RDONLY
|| (zv
->zv_mode
& DS_MODE_READONLY
)) {
782 error
= zvol_update_volsize(zv
, maj
, volsize
);
786 * Reinitialize the dump area to the new size. If we
787 * failed to resize the dump area then restore the it back to
788 * it's original size.
790 if (error
== 0 && zv
->zv_flags
& ZVOL_DUMPIFIED
) {
791 if ((error
= zvol_dumpify(zv
)) != 0 ||
792 (error
= dumpvp_resize()) != 0) {
793 (void) zvol_update_volsize(zv
, maj
, old_volsize
);
794 error
= zvol_dumpify(zv
);
801 dmu_objset_close(state
.zv_objset
);
803 mutex_exit(&zvol_state_lock
);
809 zvol_set_volblocksize(const char *name
, uint64_t volblocksize
)
817 * The lock may already be held if we are being called from
820 needlock
= !MUTEX_HELD(&zvol_state_lock
);
822 mutex_enter(&zvol_state_lock
);
824 if ((zv
= zvol_minor_lookup(name
)) == NULL
) {
826 mutex_exit(&zvol_state_lock
);
829 if (zv
->zv_flags
& ZVOL_RDONLY
|| (zv
->zv_mode
& DS_MODE_READONLY
)) {
831 mutex_exit(&zvol_state_lock
);
835 tx
= dmu_tx_create(zv
->zv_objset
);
836 dmu_tx_hold_bonus(tx
, ZVOL_OBJ
);
837 error
= dmu_tx_assign(tx
, TXG_WAIT
);
841 error
= dmu_object_set_blocksize(zv
->zv_objset
, ZVOL_OBJ
,
842 volblocksize
, 0, tx
);
843 if (error
== ENOTSUP
)
847 zv
->zv_volblocksize
= volblocksize
;
851 mutex_exit(&zvol_state_lock
);
858 zvol_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*cr
)
860 minor_t minor
= getminor(*devp
);
863 if (minor
== 0) /* This is the control device */
866 mutex_enter(&zvol_state_lock
);
868 zv
= ddi_get_soft_state(zvol_state
, minor
);
870 mutex_exit(&zvol_state_lock
);
874 ASSERT(zv
->zv_objset
!= NULL
);
876 if ((flag
& FWRITE
) &&
877 (zv
->zv_flags
& ZVOL_RDONLY
|| (zv
->zv_mode
& DS_MODE_READONLY
))) {
878 mutex_exit(&zvol_state_lock
);
881 if (zv
->zv_flags
& ZVOL_EXCL
) {
882 mutex_exit(&zvol_state_lock
);
886 if (zv
->zv_total_opens
!= 0) {
887 mutex_exit(&zvol_state_lock
);
890 zv
->zv_flags
|= ZVOL_EXCL
;
893 if (zv
->zv_open_count
[otyp
] == 0 || otyp
== OTYP_LYR
) {
894 zv
->zv_open_count
[otyp
]++;
895 zv
->zv_total_opens
++;
898 mutex_exit(&zvol_state_lock
);
904 zvol_close(dev_t dev
, int flag
, int otyp
, cred_t
*cr
)
906 minor_t minor
= getminor(dev
);
909 if (minor
== 0) /* This is the control device */
912 mutex_enter(&zvol_state_lock
);
914 zv
= ddi_get_soft_state(zvol_state
, minor
);
916 mutex_exit(&zvol_state_lock
);
920 if (zv
->zv_flags
& ZVOL_EXCL
) {
921 ASSERT(zv
->zv_total_opens
== 1);
922 zv
->zv_flags
&= ~ZVOL_EXCL
;
926 * If the open count is zero, this is a spurious close.
927 * That indicates a bug in the kernel / DDI framework.
929 ASSERT(zv
->zv_open_count
[otyp
] != 0);
930 ASSERT(zv
->zv_total_opens
!= 0);
933 * You may get multiple opens, but only one close.
935 zv
->zv_open_count
[otyp
]--;
936 zv
->zv_total_opens
--;
938 mutex_exit(&zvol_state_lock
);
944 zvol_get_done(dmu_buf_t
*db
, void *vzgd
)
946 zgd_t
*zgd
= (zgd_t
*)vzgd
;
947 rl_t
*rl
= zgd
->zgd_rl
;
949 dmu_buf_rele(db
, vzgd
);
950 zfs_range_unlock(rl
);
951 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
952 kmem_free(zgd
, sizeof (zgd_t
));
956 * Get data to generate a TX_WRITE intent log record.
959 zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
961 zvol_state_t
*zv
= arg
;
962 objset_t
*os
= zv
->zv_objset
;
966 uint64_t boff
; /* block starting offset */
967 int dlen
= lr
->lr_length
; /* length of user data */
974 * Write records come in two flavors: immediate and indirect.
975 * For small writes it's cheaper to store the data with the
976 * log record (immediate); for large writes it's cheaper to
977 * sync the data and get a pointer to it (indirect) so that
978 * we don't have to write the data twice.
980 if (buf
!= NULL
) /* immediate write */
981 return (dmu_read(os
, ZVOL_OBJ
, lr
->lr_offset
, dlen
, buf
));
983 zgd
= (zgd_t
*)kmem_alloc(sizeof (zgd_t
), KM_SLEEP
);
984 zgd
->zgd_zilog
= zv
->zv_zilog
;
985 zgd
->zgd_bp
= &lr
->lr_blkptr
;
988 * Lock the range of the block to ensure that when the data is
989 * written out and its checksum is being calculated that no other
990 * thread can change the block.
992 boff
= P2ALIGN_TYPED(lr
->lr_offset
, zv
->zv_volblocksize
, uint64_t);
993 rl
= zfs_range_lock(&zv
->zv_znode
, boff
, zv
->zv_volblocksize
,
997 VERIFY(0 == dmu_buf_hold(os
, ZVOL_OBJ
, lr
->lr_offset
, zgd
, &db
));
998 error
= dmu_sync(zio
, db
, &lr
->lr_blkptr
,
999 lr
->lr_common
.lrc_txg
, zvol_get_done
, zgd
);
1001 zil_add_block(zv
->zv_zilog
, &lr
->lr_blkptr
);
1003 * If we get EINPROGRESS, then we need to wait for a
1004 * write IO initiated by dmu_sync() to complete before
1005 * we can release this dbuf. We will finish everything
1006 * up in the zvol_get_done() callback.
1008 if (error
== EINPROGRESS
)
1010 dmu_buf_rele(db
, zgd
);
1011 zfs_range_unlock(rl
);
1012 kmem_free(zgd
, sizeof (zgd_t
));
1017 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1019 * We store data in the log buffers if it's small enough.
1020 * Otherwise we will later flush the data out via dmu_sync().
1022 ssize_t zvol_immediate_write_sz
= 32768;
1025 zvol_log_write(zvol_state_t
*zv
, dmu_tx_t
*tx
, offset_t off
, ssize_t len
)
1027 uint32_t blocksize
= zv
->zv_volblocksize
;
1031 ssize_t nbytes
= MIN(len
, blocksize
- P2PHASE(off
, blocksize
));
1032 itx_t
*itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
));
1035 len
> zvol_immediate_write_sz
? WR_INDIRECT
: WR_NEED_COPY
;
1036 itx
->itx_private
= zv
;
1037 lr
= (lr_write_t
*)&itx
->itx_lr
;
1038 lr
->lr_foid
= ZVOL_OBJ
;
1039 lr
->lr_offset
= off
;
1040 lr
->lr_length
= nbytes
;
1041 lr
->lr_blkoff
= off
- P2ALIGN_TYPED(off
, blocksize
, uint64_t);
1042 BP_ZERO(&lr
->lr_blkptr
);
1044 (void) zil_itx_assign(zv
->zv_zilog
, itx
, tx
);
1052 zvol_dumpio_vdev(vdev_t
*vd
, void *addr
, uint64_t offset
, uint64_t size
,
1053 boolean_t doread
, boolean_t isdump
)
1059 for (c
= 0; c
< vd
->vdev_children
; c
++) {
1060 ASSERT(vd
->vdev_ops
== &vdev_mirror_ops
);
1061 int err
= zvol_dumpio_vdev(vd
->vdev_child
[c
],
1062 addr
, offset
, size
, doread
, isdump
);
1065 } else if (doread
) {
1070 if (!vd
->vdev_ops
->vdev_op_leaf
)
1071 return (numerrors
< vd
->vdev_children
? 0 : EIO
);
1073 if (doread
&& !vdev_readable(vd
))
1075 else if (!doread
&& !vdev_writeable(vd
))
1079 ASSERT3P(dvd
, !=, NULL
);
1080 offset
+= VDEV_LABEL_START_SIZE
;
1082 if (ddi_in_panic() || isdump
) {
1086 return (ldi_dump(dvd
->vd_lh
, addr
, lbtodb(offset
),
1089 return (vdev_disk_physio(dvd
->vd_lh
, addr
, size
, offset
,
1090 doread
? B_READ
: B_WRITE
));
1095 zvol_dumpio(zvol_state_t
*zv
, void *addr
, uint64_t offset
, uint64_t size
,
1096 boolean_t doread
, boolean_t isdump
)
1101 spa_t
*spa
= dmu_objset_spa(zv
->zv_objset
);
1103 /* Must be sector aligned, and not stradle a block boundary. */
1104 if (P2PHASE(offset
, DEV_BSIZE
) || P2PHASE(size
, DEV_BSIZE
) ||
1105 P2BOUNDARY(offset
, size
, zv
->zv_volblocksize
)) {
1108 ASSERT(size
<= zv
->zv_volblocksize
);
1110 /* Locate the extent this belongs to */
1111 ze
= list_head(&zv
->zv_extents
);
1112 while (offset
>= ze
->ze_nblks
* zv
->zv_volblocksize
) {
1113 offset
-= ze
->ze_nblks
* zv
->zv_volblocksize
;
1114 ze
= list_next(&zv
->zv_extents
, ze
);
1116 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
1117 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&ze
->ze_dva
));
1118 offset
+= DVA_GET_OFFSET(&ze
->ze_dva
);
1119 error
= zvol_dumpio_vdev(vd
, addr
, offset
, size
, doread
, isdump
);
1120 spa_config_exit(spa
, SCL_STATE
, FTAG
);
1123 #endif /* __NetBSD__ */
1126 zvol_strategy(buf_t
*bp
)
1128 zvol_state_t
*zv
= ddi_get_soft_state(zvol_state
, getminor(bp
->b_edev
));
1129 uint64_t off
, volsize
;
1135 boolean_t doread
= bp
->b_flags
& B_READ
;
1136 boolean_t is_dump
= zv
->zv_flags
& ZVOL_DUMPIFIED
;
1139 bioerror(bp
, ENXIO
);
1144 if (getminor(bp
->b_edev
) == 0) {
1145 bioerror(bp
, EINVAL
);
1150 if (!(bp
->b_flags
& B_READ
) &&
1151 (zv
->zv_flags
& ZVOL_RDONLY
||
1152 zv
->zv_mode
& DS_MODE_READONLY
)) {
1153 bioerror(bp
, EROFS
);
1158 off
= (uint64_t)bp
->b_blkno
* DEV_BSIZE
;
1159 volsize
= zv
->zv_volsize
;
1165 resid
= bp
->b_bcount
;
1167 if (resid
> 0 && (off
< 0 || off
>= volsize
)) {
1174 * There must be no buffer changes when doing a dmu_sync() because
1175 * we can't change the data whilst calculating the checksum.
1177 mutex_enter(&zv
->zv_dklock
);
1178 disk_busy(&zv
->zv_dk
);
1179 mutex_exit(&zv
->zv_dklock
);
1181 rl
= zfs_range_lock(&zv
->zv_znode
, off
, resid
,
1182 doread
? RL_READER
: RL_WRITER
);
1184 while (resid
!= 0 && off
< volsize
) {
1185 size_t size
= MIN(resid
, zvol_maxphys
);
1188 printf("XXXNETBSD zvol_strategy: how?");
1190 size
= MIN(size
, P2END(off
, zv
->zv_volblocksize
) - off
);
1191 error
= zvol_dumpio(zv
, addr
, off
, size
,
1194 } else if (doread
) {
1195 error
= dmu_read(os
, ZVOL_OBJ
, off
, size
, addr
);
1197 dmu_tx_t
*tx
= dmu_tx_create(os
);
1198 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, size
);
1199 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1203 dmu_write(os
, ZVOL_OBJ
, off
, size
, addr
, tx
);
1204 zvol_log_write(zv
, tx
, off
, size
);
1209 /* convert checksum errors into IO errors */
1210 if (error
== ECKSUM
)
1218 zfs_range_unlock(rl
);
1220 if ((bp
->b_resid
= resid
) == bp
->b_bcount
)
1221 bioerror(bp
, off
> volsize
? EINVAL
: error
);
1223 if (!(bp
->b_flags
& B_ASYNC
) && !doread
&& !zil_disable
&& !is_dump
)
1224 zil_commit(zv
->zv_zilog
, UINT64_MAX
, ZVOL_OBJ
);
1225 mutex_enter(&zv
->zv_dklock
);
1226 disk_unbusy(&zv
->zv_dk
, bp
->b_bcount
- bp
->b_resid
, doread
);
1227 mutex_exit(&zv
->zv_dklock
);
1234 * Set the buffer count to the zvol maximum transfer.
1235 * Using our own routine instead of the default minphys()
1236 * means that for larger writes we write bigger buffers on X86
1237 * (128K instead of 56K) and flush the disk write cache less often
1238 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1239 * 56K on X86 and 128K on sparc).
1242 zvol_minphys(struct buf
*bp
)
1244 if (bp
->b_bcount
> zvol_maxphys
)
1245 bp
->b_bcount
= zvol_maxphys
;
1250 zvol_dump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblocks
)
1252 minor_t minor
= getminor(dev
);
1259 if (minor
== 0) /* This is the control device */
1262 zv
= ddi_get_soft_state(zvol_state
, minor
);
1266 boff
= ldbtob(blkno
);
1267 resid
= ldbtob(nblocks
);
1269 VERIFY3U(boff
+ resid
, <=, zv
->zv_volsize
);
1272 size
= MIN(resid
, P2END(boff
, zv
->zv_volblocksize
) - boff
);
1273 error
= zvol_dumpio(zv
, addr
, boff
, size
, B_FALSE
, B_TRUE
);
1283 #endif /* !__NetBSD__ */
1287 zvol_read(dev_t dev
, uio_t
*uio
, cred_t
*cr
)
1289 minor_t minor
= getminor(dev
);
1295 if (minor
== 0) /* This is the control device */
1298 zv
= ddi_get_soft_state(zvol_state
, minor
);
1302 volsize
= zv
->zv_volsize
;
1303 if (uio
->uio_resid
> 0 &&
1304 (uio
->uio_loffset
< 0 || uio
->uio_loffset
>= volsize
))
1307 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
1308 error
= physio(zvol_strategy
, NULL
, dev
, B_READ
,
1313 rl
= zfs_range_lock(&zv
->zv_znode
, uio
->uio_loffset
, uio
->uio_resid
,
1315 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
1316 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
1318 /* don't read past the end */
1319 if (bytes
> volsize
- uio
->uio_loffset
)
1320 bytes
= volsize
- uio
->uio_loffset
;
1322 error
= dmu_read_uio(zv
->zv_objset
, ZVOL_OBJ
, uio
, bytes
);
1324 /* convert checksum errors into IO errors */
1325 if (error
== ECKSUM
)
1330 zfs_range_unlock(rl
);
1336 zvol_write(dev_t dev
, uio_t
*uio
, cred_t
*cr
)
1338 minor_t minor
= getminor(dev
);
1344 if (minor
== 0) /* This is the control device */
1347 zv
= ddi_get_soft_state(zvol_state
, minor
);
1351 volsize
= zv
->zv_volsize
;
1352 if (uio
->uio_resid
> 0 &&
1353 (uio
->uio_loffset
< 0 || uio
->uio_loffset
>= volsize
))
1356 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
1357 error
= physio(zvol_strategy
, NULL
, dev
, B_WRITE
,
1362 rl
= zfs_range_lock(&zv
->zv_znode
, uio
->uio_loffset
, uio
->uio_resid
,
1364 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
1365 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
1366 uint64_t off
= uio
->uio_loffset
;
1367 dmu_tx_t
*tx
= dmu_tx_create(zv
->zv_objset
);
1369 if (bytes
> volsize
- off
) /* don't write past the end */
1370 bytes
= volsize
- off
;
1372 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
1373 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1378 error
= dmu_write_uio(zv
->zv_objset
, ZVOL_OBJ
, uio
, bytes
, tx
);
1380 zvol_log_write(zv
, tx
, off
, bytes
);
1386 zfs_range_unlock(rl
);
1393 * Dirtbag ioctls to support newfs(1) for UFS filesystems.
1397 zvol_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int flag
, cred_t
*cr
, int *rvalp
)
1402 mutex_enter(&zvol_state_lock
);
1404 zv
= ddi_get_soft_state(zvol_state
, getminor(dev
));
1407 mutex_exit(&zvol_state_lock
);
1412 case DIOCGWEDGEINFO
:
1414 struct dkwedge_info
*dkw
= (void *) arg
;
1416 strlcpy(dkw
->dkw_devname
, zv
->zv_name
, 16);
1417 strlcpy(dkw
->dkw_wname
, zv
->zv_name
, MAXPATHLEN
);
1418 strlcpy(dkw
->dkw_parent
, zv
->zv_name
, 16);
1420 dkw
->dkw_offset
= 0;
1421 /* XXX NetBSD supports only DEV_BSIZE device block
1422 size zv_volblocksize >> DEV_BSIZE*/
1423 dkw
->dkw_size
= (zv
->zv_volsize
/ DEV_BSIZE
);
1424 dprintf("dkw %"PRIu64
" volsize %"PRIu64
" volblock %"PRIu64
" \n",
1425 dkw
->dkw_size
, zv
->zv_volsize
, zv
->zv_volblocksize
);
1426 strcpy(dkw
->dkw_ptype
, DKW_PTYPE_FFS
);
1433 struct plistref
*pref
= (struct plistref
*) arg
;
1435 if (zv
->zv_dk
.dk_info
== NULL
) {
1436 mutex_exit(&zvol_state_lock
);
1439 prop_dictionary_copyout_ioctl(pref
, cmd
,
1446 aprint_debug("unknown disk_ioctl called\n");
1451 mutex_exit(&zvol_state_lock
);
1455 #else /* __NetBSD__ */
1458 zvol_getefi(void *arg
, int flag
, uint64_t vs
, uint8_t bs
)
1460 struct uuid uuid
= EFI_RESERVED
;
1461 efi_gpe_t gpe
= { 0 };
1467 if (ddi_copyin(arg
, &efi
, sizeof (dk_efi_t
), flag
))
1469 ptr
= (char *)(uintptr_t)efi
.dki_data_64
;
1470 length
= efi
.dki_length
;
1472 * Some clients may attempt to request a PMBR for the
1473 * zvol. Currently this interface will return EINVAL to
1474 * such requests. These requests could be supported by
1475 * adding a check for lba == 0 and consing up an appropriate
1478 if (efi
.dki_lba
< 1 || efi
.dki_lba
> 2 || length
<= 0)
1481 gpe
.efi_gpe_StartingLBA
= LE_64(34ULL);
1482 gpe
.efi_gpe_EndingLBA
= LE_64((vs
>> bs
) - 1);
1483 UUID_LE_CONVERT(gpe
.efi_gpe_PartitionTypeGUID
, uuid
);
1485 if (efi
.dki_lba
== 1) {
1486 efi_gpt_t gpt
= { 0 };
1488 gpt
.efi_gpt_Signature
= LE_64(EFI_SIGNATURE
);
1489 gpt
.efi_gpt_Revision
= LE_32(EFI_VERSION_CURRENT
);
1490 gpt
.efi_gpt_HeaderSize
= LE_32(sizeof (gpt
));
1491 gpt
.efi_gpt_MyLBA
= LE_64(1ULL);
1492 gpt
.efi_gpt_FirstUsableLBA
= LE_64(34ULL);
1493 gpt
.efi_gpt_LastUsableLBA
= LE_64((vs
>> bs
) - 1);
1494 gpt
.efi_gpt_PartitionEntryLBA
= LE_64(2ULL);
1495 gpt
.efi_gpt_NumberOfPartitionEntries
= LE_32(1);
1496 gpt
.efi_gpt_SizeOfPartitionEntry
=
1497 LE_32(sizeof (efi_gpe_t
));
1498 CRC32(crc
, &gpe
, sizeof (gpe
), -1U, crc32_table
);
1499 gpt
.efi_gpt_PartitionEntryArrayCRC32
= LE_32(~crc
);
1500 CRC32(crc
, &gpt
, sizeof (gpt
), -1U, crc32_table
);
1501 gpt
.efi_gpt_HeaderCRC32
= LE_32(~crc
);
1502 if (ddi_copyout(&gpt
, ptr
, MIN(sizeof (gpt
), length
),
1505 ptr
+= sizeof (gpt
);
1506 length
-= sizeof (gpt
);
1508 if (length
> 0 && ddi_copyout(&gpe
, ptr
, MIN(sizeof (gpe
),
1515 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1519 zvol_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int flag
, cred_t
*cr
, int *rvalp
)
1522 struct dk_cinfo dki
;
1523 struct dk_minfo dkm
;
1524 struct dk_callback
*dkc
;
1528 mutex_enter(&zvol_state_lock
);
1530 zv
= ddi_get_soft_state(zvol_state
, getminor(dev
));
1533 mutex_exit(&zvol_state_lock
);
1540 bzero(&dki
, sizeof (dki
));
1541 (void) strcpy(dki
.dki_cname
, "zvol");
1542 (void) strcpy(dki
.dki_dname
, "zvol");
1543 dki
.dki_ctype
= DKC_UNKNOWN
;
1544 dki
.dki_maxtransfer
= 1 << (SPA_MAXBLOCKSHIFT
- zv
->zv_min_bs
);
1545 mutex_exit(&zvol_state_lock
);
1546 if (ddi_copyout(&dki
, (void *)arg
, sizeof (dki
), flag
))
1550 case DKIOCGMEDIAINFO
:
1551 bzero(&dkm
, sizeof (dkm
));
1552 dkm
.dki_lbsize
= 1U << zv
->zv_min_bs
;
1553 dkm
.dki_capacity
= zv
->zv_volsize
>> zv
->zv_min_bs
;
1554 dkm
.dki_media_type
= DK_UNKNOWN
;
1555 mutex_exit(&zvol_state_lock
);
1556 if (ddi_copyout(&dkm
, (void *)arg
, sizeof (dkm
), flag
))
1562 uint64_t vs
= zv
->zv_volsize
;
1563 uint8_t bs
= zv
->zv_min_bs
;
1565 mutex_exit(&zvol_state_lock
);
1566 error
= zvol_getefi((void *)arg
, flag
, vs
, bs
);
1570 case DKIOCFLUSHWRITECACHE
:
1571 dkc
= (struct dk_callback
*)arg
;
1572 zil_commit(zv
->zv_zilog
, UINT64_MAX
, ZVOL_OBJ
);
1573 if ((flag
& FKIOCTL
) && dkc
!= NULL
&& dkc
->dkc_callback
) {
1574 (*dkc
->dkc_callback
)(dkc
->dkc_cookie
, error
);
1582 * commands using these (like prtvtoc) expect ENOTSUP
1583 * since we're emulating an EFI label
1589 rl
= zfs_range_lock(&zv
->zv_znode
, 0, zv
->zv_volsize
,
1591 error
= zvol_dumpify(zv
);
1592 zfs_range_unlock(rl
);
1596 rl
= zfs_range_lock(&zv
->zv_znode
, 0, zv
->zv_volsize
,
1598 error
= zvol_dump_fini(zv
);
1599 zfs_range_unlock(rl
);
1607 mutex_exit(&zvol_state_lock
);
1611 #endif /* __NetBSD__ */
1616 return (zvol_minors
!= 0);
1622 VERIFY(ddi_soft_state_init(&zvol_state
, sizeof (zvol_state_t
), 1) == 0);
1623 mutex_init(&zvol_state_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1629 mutex_destroy(&zvol_state_lock
);
1630 ddi_soft_state_fini(&zvol_state
);
1635 zvol_is_swap(zvol_state_t
*zv
)
1638 boolean_t ret
= B_FALSE
;
1643 devpathlen
= strlen(ZVOL_FULL_DEV_DIR
) + strlen(zv
->zv_name
) + 1;
1644 devpath
= kmem_alloc(devpathlen
, KM_SLEEP
);
1645 (void) sprintf(devpath
, "%s%s", ZVOL_FULL_DEV_DIR
, zv
->zv_name
);
1646 error
= lookupname(devpath
, UIO_SYSSPACE
, FOLLOW
, NULLVPP
, &vp
);
1647 kmem_free(devpath
, devpathlen
);
1649 ret
= !error
&& IS_SWAPVP(common_specvp(vp
));
1658 zvol_dump_init(zvol_state_t
*zv
, boolean_t resize
)
1662 objset_t
*os
= zv
->zv_objset
;
1663 nvlist_t
*nv
= NULL
;
1665 ASSERT(MUTEX_HELD(&zvol_state_lock
));
1667 tx
= dmu_tx_create(os
);
1668 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
1669 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1676 * If we are resizing the dump device then we only need to
1677 * update the refreservation to match the newly updated
1678 * zvolsize. Otherwise, we save off the original state of the
1679 * zvol so that we can restore them if the zvol is ever undumpified.
1682 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
1683 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1,
1684 &zv
->zv_volsize
, tx
);
1686 uint64_t checksum
, compress
, refresrv
, vbs
;
1688 error
= dsl_prop_get_integer(zv
->zv_name
,
1689 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), &compress
, NULL
);
1690 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1691 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), &checksum
, NULL
);
1692 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1693 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), &refresrv
, NULL
);
1694 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1695 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &vbs
, NULL
);
1697 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1698 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), 8, 1,
1700 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1701 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), 8, 1, &checksum
, tx
);
1702 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1703 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1,
1705 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1706 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), 8, 1,
1711 /* Truncate the file */
1713 error
= dmu_free_long_range(zv
->zv_objset
,
1714 ZVOL_OBJ
, 0, DMU_OBJECT_END
);
1720 * We only need update the zvol's property if we are initializing
1721 * the dump area for the first time.
1724 VERIFY(nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1725 VERIFY(nvlist_add_uint64(nv
,
1726 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 0) == 0);
1727 VERIFY(nvlist_add_uint64(nv
,
1728 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
1729 ZIO_COMPRESS_OFF
) == 0);
1730 VERIFY(nvlist_add_uint64(nv
,
1731 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
1732 ZIO_CHECKSUM_OFF
) == 0);
1733 VERIFY(nvlist_add_uint64(nv
,
1734 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
),
1735 SPA_MAXBLOCKSIZE
) == 0);
1737 error
= zfs_set_prop_nvlist(zv
->zv_name
, nv
);
1744 /* Allocate the space for the dump */
1745 error
= zvol_prealloc(zv
);
1750 zvol_dumpify(zvol_state_t
*zv
)
1753 uint64_t dumpsize
= 0;
1755 objset_t
*os
= zv
->zv_objset
;
1757 if (zv
->zv_flags
& ZVOL_RDONLY
|| (zv
->zv_mode
& DS_MODE_READONLY
))
1761 * We do not support swap devices acting as dump devices.
1763 if (zvol_is_swap(zv
))
1766 if (zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
,
1767 8, 1, &dumpsize
) != 0 || dumpsize
!= zv
->zv_volsize
) {
1768 boolean_t resize
= (dumpsize
> 0) ? B_TRUE
: B_FALSE
;
1770 if ((error
= zvol_dump_init(zv
, resize
)) != 0) {
1771 (void) zvol_dump_fini(zv
);
1777 * Build up our lba mapping.
1779 error
= zvol_get_lbas(zv
);
1781 (void) zvol_dump_fini(zv
);
1785 tx
= dmu_tx_create(os
);
1786 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
1787 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1790 (void) zvol_dump_fini(zv
);
1794 zv
->zv_flags
|= ZVOL_DUMPIFIED
;
1795 error
= zap_update(os
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
, 8, 1,
1796 &zv
->zv_volsize
, tx
);
1800 (void) zvol_dump_fini(zv
);
1804 txg_wait_synced(dmu_objset_pool(os
), 0);
1809 zvol_dump_fini(zvol_state_t
*zv
)
1812 objset_t
*os
= zv
->zv_objset
;
1815 uint64_t checksum
, compress
, refresrv
, vbs
;
1818 * Attempt to restore the zvol back to its pre-dumpified state.
1819 * This is a best-effort attempt as it's possible that not all
1820 * of these properties were initialized during the dumpify process
1821 * (i.e. error during zvol_dump_init).
1824 tx
= dmu_tx_create(os
);
1825 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
1826 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1831 (void) zap_remove(os
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
, tx
);
1834 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1835 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), 8, 1, &checksum
);
1836 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1837 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), 8, 1, &compress
);
1838 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1839 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1, &refresrv
);
1840 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1841 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), 8, 1, &vbs
);
1843 VERIFY(nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1844 (void) nvlist_add_uint64(nv
,
1845 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), checksum
);
1846 (void) nvlist_add_uint64(nv
,
1847 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), compress
);
1848 (void) nvlist_add_uint64(nv
,
1849 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), refresrv
);
1850 (void) nvlist_add_uint64(nv
,
1851 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), vbs
);
1852 (void) zfs_set_prop_nvlist(zv
->zv_name
, nv
);
1855 zvol_free_extents(zv
);
1856 zv
->zv_flags
&= ~ZVOL_DUMPIFIED
;
1857 (void) dmu_free_long_range(os
, ZVOL_OBJ
, 0, DMU_OBJECT_END
);
1861 #endif /* !__NetBSD__ */