4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 Joyent, Inc.
30 #include <sys/spa_impl.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dsl_dataset.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/cmn_err.h>
38 #include <sys/sunddi.h>
40 #include "zfs_comutil.h"
41 #include "zfs_gitrev.h"
47 * Routines to manage the on-disk history log.
49 * The history log is stored as a dmu object containing
50 * <packed record length, record nvlist> tuples.
52 * Where "record nvlist" is an nvlist containing uint64_ts and strings, and
53 * "packed record length" is the packed length of the "record nvlist" stored
54 * as a little endian uint64_t.
56 * The log is implemented as a ring buffer, though the original creation
57 * of the pool ('zpool create') is never overwritten.
59 * The history log is tracked as object 'spa_t::spa_history'. The bonus buffer
60 * of 'spa_history' stores the offsets for logging/retrieving history as
61 * 'spa_history_phys_t'. 'sh_pool_create_len' is the ending offset in bytes of
62 * where the 'zpool create' record is stored. This allows us to never
63 * overwrite the original creation of the pool. 'sh_phys_max_off' is the
64 * physical ending offset in bytes of the log. This tells you the length of
65 * the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record
66 * is added, 'sh_eof' is incremented by the size of the record.
67 * 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes).
68 * This is where the consumer should start reading from after reading in
69 * the 'zpool create' portion of the log.
71 * 'sh_records_lost' keeps track of how many records have been overwritten
72 * and permanently lost.
75 /* convert a logical offset to physical */
77 spa_history_log_to_phys(uint64_t log_off
, spa_history_phys_t
*shpp
)
81 phys_len
= shpp
->sh_phys_max_off
- shpp
->sh_pool_create_len
;
82 return ((log_off
- shpp
->sh_pool_create_len
) % phys_len
83 + shpp
->sh_pool_create_len
);
87 spa_history_create_obj(spa_t
*spa
, dmu_tx_t
*tx
)
90 spa_history_phys_t
*shpp
;
91 objset_t
*mos
= spa
->spa_meta_objset
;
93 ASSERT0(spa
->spa_history
);
94 spa
->spa_history
= dmu_object_alloc(mos
, DMU_OT_SPA_HISTORY
,
95 SPA_OLD_MAXBLOCKSIZE
, DMU_OT_SPA_HISTORY_OFFSETS
,
96 sizeof (spa_history_phys_t
), tx
);
98 VERIFY0(zap_add(mos
, DMU_POOL_DIRECTORY_OBJECT
,
99 DMU_POOL_HISTORY
, sizeof (uint64_t), 1,
100 &spa
->spa_history
, tx
));
102 VERIFY0(dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
));
103 ASSERT3U(dbp
->db_size
, >=, sizeof (spa_history_phys_t
));
106 dmu_buf_will_dirty(dbp
, tx
);
109 * Figure out maximum size of history log. We set it at
110 * 0.1% of pool size, with a max of 1G and min of 128KB.
112 shpp
->sh_phys_max_off
=
113 metaslab_class_get_dspace(spa_normal_class(spa
)) / 1000;
114 shpp
->sh_phys_max_off
= MIN(shpp
->sh_phys_max_off
, 1<<30);
115 shpp
->sh_phys_max_off
= MAX(shpp
->sh_phys_max_off
, 128<<10);
117 dmu_buf_rele(dbp
, FTAG
);
121 * Change 'sh_bof' to the beginning of the next record.
124 spa_history_advance_bof(spa_t
*spa
, spa_history_phys_t
*shpp
)
126 objset_t
*mos
= spa
->spa_meta_objset
;
127 uint64_t firstread
, reclen
, phys_bof
;
128 char buf
[sizeof (reclen
)];
131 phys_bof
= spa_history_log_to_phys(shpp
->sh_bof
, shpp
);
132 firstread
= MIN(sizeof (reclen
), shpp
->sh_phys_max_off
- phys_bof
);
134 if ((err
= dmu_read(mos
, spa
->spa_history
, phys_bof
, firstread
,
135 buf
, DMU_READ_PREFETCH
)) != 0)
137 if (firstread
!= sizeof (reclen
)) {
138 if ((err
= dmu_read(mos
, spa
->spa_history
,
139 shpp
->sh_pool_create_len
, sizeof (reclen
) - firstread
,
140 buf
+ firstread
, DMU_READ_PREFETCH
)) != 0)
144 reclen
= LE_64(*((uint64_t *)buf
));
145 shpp
->sh_bof
+= reclen
+ sizeof (reclen
);
146 shpp
->sh_records_lost
++;
151 spa_history_write(spa_t
*spa
, void *buf
, uint64_t len
, spa_history_phys_t
*shpp
,
154 uint64_t firstwrite
, phys_eof
;
155 objset_t
*mos
= spa
->spa_meta_objset
;
158 ASSERT(MUTEX_HELD(&spa
->spa_history_lock
));
160 /* see if we need to reset logical BOF */
161 while (shpp
->sh_phys_max_off
- shpp
->sh_pool_create_len
-
162 (shpp
->sh_eof
- shpp
->sh_bof
) <= len
) {
163 if ((err
= spa_history_advance_bof(spa
, shpp
)) != 0) {
168 phys_eof
= spa_history_log_to_phys(shpp
->sh_eof
, shpp
);
169 firstwrite
= MIN(len
, shpp
->sh_phys_max_off
- phys_eof
);
171 dmu_write(mos
, spa
->spa_history
, phys_eof
, firstwrite
, buf
, tx
);
175 /* write out the rest at the beginning of physical file */
176 dmu_write(mos
, spa
->spa_history
, shpp
->sh_pool_create_len
,
177 len
, (char *)buf
+ firstwrite
, tx
);
184 * Post a history sysevent.
186 * The nvlist_t* passed into this function will be transformed into a new
189 * 1. Nested nvlists will be flattened to a single level
190 * 2. Keys will have their names normalized (to remove any problematic
191 * characters, such as whitespace)
193 * The nvlist_t passed into this function will duplicated and should be freed
198 spa_history_log_notify(spa_t
*spa
, nvlist_t
*nvl
)
200 nvlist_t
*hist_nvl
= fnvlist_alloc();
204 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_CMD
, &string
) == 0)
205 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_CMD
, string
);
207 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
, &string
) == 0)
208 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_NAME
, string
);
210 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_ZONE
, &string
) == 0)
211 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_ZONE
, string
);
213 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_HOST
, &string
) == 0)
214 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_HOST
, string
);
216 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_DSNAME
, &string
) == 0)
217 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_DSNAME
, string
);
219 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
, &string
) == 0)
220 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_STR
, string
);
222 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_IOCTL
, &string
) == 0)
223 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_IOCTL
, string
);
225 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
, &string
) == 0)
226 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_NAME
, string
);
228 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_DSID
, &uint64
) == 0)
229 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_DSID
, uint64
);
231 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_TXG
, &uint64
) == 0)
232 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_TXG
, uint64
);
234 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_TIME
, &uint64
) == 0)
235 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_TIME
, uint64
);
237 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_WHO
, &uint64
) == 0)
238 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_WHO
, uint64
);
240 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_INT_EVENT
, &uint64
) == 0)
241 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_INT_EVENT
, uint64
);
243 spa_event_notify(spa
, NULL
, hist_nvl
, ESC_ZFS_HISTORY_EVENT
);
245 nvlist_free(hist_nvl
);
249 * Write out a history event.
253 spa_history_log_sync(void *arg
, dmu_tx_t
*tx
)
256 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
257 objset_t
*mos
= spa
->spa_meta_objset
;
259 spa_history_phys_t
*shpp
;
262 char *record_packed
= NULL
;
266 * If we have an older pool that doesn't have a command
267 * history object, create it now.
269 mutex_enter(&spa
->spa_history_lock
);
270 if (!spa
->spa_history
)
271 spa_history_create_obj(spa
, tx
);
272 mutex_exit(&spa
->spa_history_lock
);
275 * Get the offset of where we need to write via the bonus buffer.
276 * Update the offset when the write completes.
278 VERIFY0(dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
));
281 dmu_buf_will_dirty(dbp
, tx
);
285 dmu_object_info_t doi
;
286 dmu_object_info_from_db(dbp
, &doi
);
287 ASSERT3U(doi
.doi_bonus_type
, ==, DMU_OT_SPA_HISTORY_OFFSETS
);
291 fnvlist_add_string(nvl
, ZPOOL_HIST_HOST
, utsname()->nodename
);
293 if (nvlist_exists(nvl
, ZPOOL_HIST_CMD
)) {
294 zfs_dbgmsg("command: %s",
295 fnvlist_lookup_string(nvl
, ZPOOL_HIST_CMD
));
296 } else if (nvlist_exists(nvl
, ZPOOL_HIST_INT_NAME
)) {
297 if (nvlist_exists(nvl
, ZPOOL_HIST_DSNAME
)) {
298 zfs_dbgmsg("txg %lld %s %s (id %llu) %s",
299 (longlong_t
)fnvlist_lookup_uint64(nvl
,
301 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
),
302 fnvlist_lookup_string(nvl
, ZPOOL_HIST_DSNAME
),
303 (u_longlong_t
)fnvlist_lookup_uint64(nvl
,
305 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
));
307 zfs_dbgmsg("txg %lld %s %s",
308 (longlong_t
)fnvlist_lookup_uint64(nvl
,
310 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
),
311 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
));
314 * The history sysevent is posted only for internal history
315 * messages to show what has happened, not how it happened. For
316 * example, the following command:
318 * # zfs destroy -r tank/foo
320 * will result in one sysevent posted per dataset that is
321 * destroyed as a result of the command - which could be more
322 * than one event in total. By contrast, if the sysevent was
323 * posted as a result of the ZPOOL_HIST_CMD key being present
324 * it would result in only one sysevent being posted with the
325 * full command line arguments, requiring the consumer to know
326 * how to parse and understand zfs(8) command invocations.
328 spa_history_log_notify(spa
, nvl
);
329 } else if (nvlist_exists(nvl
, ZPOOL_HIST_IOCTL
)) {
330 zfs_dbgmsg("ioctl %s",
331 fnvlist_lookup_string(nvl
, ZPOOL_HIST_IOCTL
));
334 VERIFY3U(nvlist_pack(nvl
, &record_packed
, &reclen
, NV_ENCODE_NATIVE
,
337 mutex_enter(&spa
->spa_history_lock
);
339 /* write out the packed length as little endian */
340 le_len
= LE_64((uint64_t)reclen
);
341 ret
= spa_history_write(spa
, &le_len
, sizeof (le_len
), shpp
, tx
);
343 ret
= spa_history_write(spa
, record_packed
, reclen
, shpp
, tx
);
345 /* The first command is the create, which we keep forever */
346 if (ret
== 0 && shpp
->sh_pool_create_len
== 0 &&
347 nvlist_exists(nvl
, ZPOOL_HIST_CMD
)) {
348 shpp
->sh_pool_create_len
= shpp
->sh_bof
= shpp
->sh_eof
;
351 mutex_exit(&spa
->spa_history_lock
);
352 fnvlist_pack_free(record_packed
, reclen
);
353 dmu_buf_rele(dbp
, FTAG
);
358 * Write out a history event.
361 spa_history_log(spa_t
*spa
, const char *msg
)
364 nvlist_t
*nvl
= fnvlist_alloc();
366 fnvlist_add_string(nvl
, ZPOOL_HIST_CMD
, msg
);
367 err
= spa_history_log_nvl(spa
, nvl
);
373 spa_history_log_nvl(spa_t
*spa
, nvlist_t
*nvl
)
377 nvlist_t
*nvarg
, *in_nvl
= NULL
;
379 if (spa_version(spa
) < SPA_VERSION_ZPOOL_HISTORY
|| !spa_writeable(spa
))
380 return (SET_ERROR(EINVAL
));
382 err
= nvlist_lookup_nvlist(nvl
, ZPOOL_HIST_INPUT_NVL
, &in_nvl
);
384 (void) nvlist_remove_all(in_nvl
, ZPOOL_HIDDEN_ARGS
);
387 tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
388 err
= dmu_tx_assign(tx
, TXG_WAIT
);
394 VERIFY0(nvlist_dup(nvl
, &nvarg
, KM_SLEEP
));
395 if (spa_history_zone() != NULL
) {
396 fnvlist_add_string(nvarg
, ZPOOL_HIST_ZONE
,
399 fnvlist_add_uint64(nvarg
, ZPOOL_HIST_WHO
, crgetruid(CRED()));
402 * Since the history is recorded asynchronously, the effective time is
403 * now, which may be considerably before the change is made on disk.
405 fnvlist_add_uint64(nvarg
, ZPOOL_HIST_TIME
, gethrestime_sec());
407 /* Kick this off asynchronously; errors are ignored. */
408 dsl_sync_task_nowait(spa_get_dsl(spa
), spa_history_log_sync
, nvarg
, tx
);
411 /* spa_history_log_sync will free nvl */
416 * Read out the command history.
419 spa_history_get(spa_t
*spa
, uint64_t *offp
, uint64_t *len
, char *buf
)
421 objset_t
*mos
= spa
->spa_meta_objset
;
423 uint64_t read_len
, phys_read_off
, phys_eof
;
424 uint64_t leftover
= 0;
425 spa_history_phys_t
*shpp
;
429 * If the command history doesn't exist (older pool),
430 * that's ok, just return ENOENT.
432 if (!spa
->spa_history
)
433 return (SET_ERROR(ENOENT
));
436 * The history is logged asynchronously, so when they request
437 * the first chunk of history, make sure everything has been
438 * synced to disk so that we get it.
440 if (*offp
== 0 && spa_writeable(spa
))
441 txg_wait_synced(spa_get_dsl(spa
), 0);
443 if ((err
= dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
)) != 0)
449 dmu_object_info_t doi
;
450 dmu_object_info_from_db(dbp
, &doi
);
451 ASSERT3U(doi
.doi_bonus_type
, ==, DMU_OT_SPA_HISTORY_OFFSETS
);
455 mutex_enter(&spa
->spa_history_lock
);
456 phys_eof
= spa_history_log_to_phys(shpp
->sh_eof
, shpp
);
458 if (*offp
< shpp
->sh_pool_create_len
) {
459 /* read in just the zpool create history */
460 phys_read_off
= *offp
;
461 read_len
= MIN(*len
, shpp
->sh_pool_create_len
-
465 * Need to reset passed in offset to BOF if the passed in
466 * offset has since been overwritten.
468 *offp
= MAX(*offp
, shpp
->sh_bof
);
469 phys_read_off
= spa_history_log_to_phys(*offp
, shpp
);
472 * Read up to the minimum of what the user passed down or
473 * the EOF (physical or logical). If we hit physical EOF,
474 * use 'leftover' to read from the physical BOF.
476 if (phys_read_off
<= phys_eof
) {
477 read_len
= MIN(*len
, phys_eof
- phys_read_off
);
480 shpp
->sh_phys_max_off
- phys_read_off
);
481 if (phys_read_off
+ *len
> shpp
->sh_phys_max_off
) {
482 leftover
= MIN(*len
- read_len
,
483 phys_eof
- shpp
->sh_pool_create_len
);
488 /* offset for consumer to use next */
489 *offp
+= read_len
+ leftover
;
491 /* tell the consumer how much you actually read */
492 *len
= read_len
+ leftover
;
495 mutex_exit(&spa
->spa_history_lock
);
496 dmu_buf_rele(dbp
, FTAG
);
500 err
= dmu_read(mos
, spa
->spa_history
, phys_read_off
, read_len
, buf
,
502 if (leftover
&& err
== 0) {
503 err
= dmu_read(mos
, spa
->spa_history
, shpp
->sh_pool_create_len
,
504 leftover
, buf
+ read_len
, DMU_READ_PREFETCH
);
506 mutex_exit(&spa
->spa_history_lock
);
508 dmu_buf_rele(dbp
, FTAG
);
513 * The nvlist will be consumed by this call.
516 log_internal(nvlist_t
*nvl
, const char *operation
, spa_t
*spa
,
517 dmu_tx_t
*tx
, const char *fmt
, va_list adx
)
522 * If this is part of creating a pool, not everything is
523 * initialized yet, so don't bother logging the internal events.
524 * Likewise if the pool is not writeable.
526 if (spa_is_initializing(spa
) || !spa_writeable(spa
)) {
531 msg
= kmem_vasprintf(fmt
, adx
);
532 fnvlist_add_string(nvl
, ZPOOL_HIST_INT_STR
, msg
);
535 fnvlist_add_string(nvl
, ZPOOL_HIST_INT_NAME
, operation
);
536 fnvlist_add_uint64(nvl
, ZPOOL_HIST_TXG
, tx
->tx_txg
);
537 fnvlist_add_uint64(nvl
, ZPOOL_HIST_TIME
, gethrestime_sec());
539 if (dmu_tx_is_syncing(tx
)) {
540 spa_history_log_sync(nvl
, tx
);
542 dsl_sync_task_nowait(spa_get_dsl(spa
),
543 spa_history_log_sync
, nvl
, tx
);
545 /* spa_history_log_sync() will free nvl */
549 spa_history_log_internal(spa_t
*spa
, const char *operation
,
550 dmu_tx_t
*tx
, const char *fmt
, ...)
555 /* create a tx if we didn't get one */
557 htx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
558 if (dmu_tx_assign(htx
, TXG_WAIT
) != 0) {
565 log_internal(fnvlist_alloc(), operation
, spa
, htx
, fmt
, adx
);
568 /* if we didn't get a tx from the caller, commit the one we made */
574 spa_history_log_internal_ds(dsl_dataset_t
*ds
, const char *operation
,
575 dmu_tx_t
*tx
, const char *fmt
, ...)
578 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
579 nvlist_t
*nvl
= fnvlist_alloc();
583 dsl_dataset_name(ds
, namebuf
);
584 fnvlist_add_string(nvl
, ZPOOL_HIST_DSNAME
, namebuf
);
585 fnvlist_add_uint64(nvl
, ZPOOL_HIST_DSID
, ds
->ds_object
);
588 log_internal(nvl
, operation
, dsl_dataset_get_spa(ds
), tx
, fmt
, adx
);
593 spa_history_log_internal_dd(dsl_dir_t
*dd
, const char *operation
,
594 dmu_tx_t
*tx
, const char *fmt
, ...)
597 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
598 nvlist_t
*nvl
= fnvlist_alloc();
602 dsl_dir_name(dd
, namebuf
);
603 fnvlist_add_string(nvl
, ZPOOL_HIST_DSNAME
, namebuf
);
604 fnvlist_add_uint64(nvl
, ZPOOL_HIST_DSID
,
605 dsl_dir_phys(dd
)->dd_head_dataset_obj
);
608 log_internal(nvl
, operation
, dd
->dd_pool
->dp_spa
, tx
, fmt
, adx
);
613 spa_history_log_version(spa_t
*spa
, const char *operation
, dmu_tx_t
*tx
)
615 utsname_t
*u
= utsname();
617 spa_history_log_internal(spa
, operation
, tx
,
618 "pool version %llu; software version %s; uts %s %s %s %s",
619 (u_longlong_t
)spa_version(spa
), ZFS_META_GITREV
,
620 u
->nodename
, u
->release
, u
->version
, u
->machine
);
625 spa_history_zone(void)
632 EXPORT_SYMBOL(spa_history_create_obj
);
633 EXPORT_SYMBOL(spa_history_get
);
634 EXPORT_SYMBOL(spa_history_log
);
635 EXPORT_SYMBOL(spa_history_log_internal
);
636 EXPORT_SYMBOL(spa_history_log_version
);