4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 Joyent, Inc.
30 #include <sys/spa_impl.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dsl_dataset.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/cmn_err.h>
38 #include <sys/sunddi.h>
40 #include "zfs_comutil.h"
41 #include "zfs_gitrev.h"
47 * Routines to manage the on-disk history log.
49 * The history log is stored as a dmu object containing
50 * <packed record length, record nvlist> tuples.
52 * Where "record nvlist" is an nvlist containing uint64_ts and strings, and
53 * "packed record length" is the packed length of the "record nvlist" stored
54 * as a little endian uint64_t.
56 * The log is implemented as a ring buffer, though the original creation
57 * of the pool ('zpool create') is never overwritten.
59 * The history log is tracked as object 'spa_t::spa_history'. The bonus buffer
60 * of 'spa_history' stores the offsets for logging/retrieving history as
61 * 'spa_history_phys_t'. 'sh_pool_create_len' is the ending offset in bytes of
62 * where the 'zpool create' record is stored. This allows us to never
63 * overwrite the original creation of the pool. 'sh_phys_max_off' is the
64 * physical ending offset in bytes of the log. This tells you the length of
65 * the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record
66 * is added, 'sh_eof' is incremented by the size of the record.
67 * 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes).
68 * This is where the consumer should start reading from after reading in
69 * the 'zpool create' portion of the log.
71 * 'sh_records_lost' keeps track of how many records have been overwritten
72 * and permanently lost.
75 /* convert a logical offset to physical */
77 spa_history_log_to_phys(uint64_t log_off
, spa_history_phys_t
*shpp
)
81 phys_len
= shpp
->sh_phys_max_off
- shpp
->sh_pool_create_len
;
82 return ((log_off
- shpp
->sh_pool_create_len
) % phys_len
83 + shpp
->sh_pool_create_len
);
87 spa_history_create_obj(spa_t
*spa
, dmu_tx_t
*tx
)
90 spa_history_phys_t
*shpp
;
91 objset_t
*mos
= spa
->spa_meta_objset
;
93 ASSERT0(spa
->spa_history
);
94 spa
->spa_history
= dmu_object_alloc(mos
, DMU_OT_SPA_HISTORY
,
95 SPA_OLD_MAXBLOCKSIZE
, DMU_OT_SPA_HISTORY_OFFSETS
,
96 sizeof (spa_history_phys_t
), tx
);
98 VERIFY0(zap_add(mos
, DMU_POOL_DIRECTORY_OBJECT
,
99 DMU_POOL_HISTORY
, sizeof (uint64_t), 1,
100 &spa
->spa_history
, tx
));
102 VERIFY0(dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
));
103 ASSERT3U(dbp
->db_size
, >=, sizeof (spa_history_phys_t
));
106 dmu_buf_will_dirty(dbp
, tx
);
109 * Figure out maximum size of history log. We set it at
110 * 0.1% of pool size, with a max of 1G and min of 128KB.
112 shpp
->sh_phys_max_off
=
113 metaslab_class_get_dspace(spa_normal_class(spa
)) / 1000;
114 shpp
->sh_phys_max_off
= MIN(shpp
->sh_phys_max_off
, 1<<30);
115 shpp
->sh_phys_max_off
= MAX(shpp
->sh_phys_max_off
, 128<<10);
117 dmu_buf_rele(dbp
, FTAG
);
121 * Change 'sh_bof' to the beginning of the next record.
124 spa_history_advance_bof(spa_t
*spa
, spa_history_phys_t
*shpp
)
126 objset_t
*mos
= spa
->spa_meta_objset
;
127 uint64_t firstread
, reclen
, phys_bof
;
128 char buf
[sizeof (reclen
)];
131 phys_bof
= spa_history_log_to_phys(shpp
->sh_bof
, shpp
);
132 firstread
= MIN(sizeof (reclen
), shpp
->sh_phys_max_off
- phys_bof
);
134 if ((err
= dmu_read(mos
, spa
->spa_history
, phys_bof
, firstread
,
135 buf
, DMU_READ_PREFETCH
)) != 0)
137 if (firstread
!= sizeof (reclen
)) {
138 if ((err
= dmu_read(mos
, spa
->spa_history
,
139 shpp
->sh_pool_create_len
, sizeof (reclen
) - firstread
,
140 buf
+ firstread
, DMU_READ_PREFETCH
)) != 0)
144 reclen
= LE_64(*((uint64_t *)buf
));
145 shpp
->sh_bof
+= reclen
+ sizeof (reclen
);
146 shpp
->sh_records_lost
++;
151 spa_history_write(spa_t
*spa
, void *buf
, uint64_t len
, spa_history_phys_t
*shpp
,
154 uint64_t firstwrite
, phys_eof
;
155 objset_t
*mos
= spa
->spa_meta_objset
;
158 ASSERT(MUTEX_HELD(&spa
->spa_history_lock
));
160 /* see if we need to reset logical BOF */
161 while (shpp
->sh_phys_max_off
- shpp
->sh_pool_create_len
-
162 (shpp
->sh_eof
- shpp
->sh_bof
) <= len
) {
163 if ((err
= spa_history_advance_bof(spa
, shpp
)) != 0) {
168 phys_eof
= spa_history_log_to_phys(shpp
->sh_eof
, shpp
);
169 firstwrite
= MIN(len
, shpp
->sh_phys_max_off
- phys_eof
);
171 dmu_write(mos
, spa
->spa_history
, phys_eof
, firstwrite
, buf
, tx
);
175 /* write out the rest at the beginning of physical file */
176 dmu_write(mos
, spa
->spa_history
, shpp
->sh_pool_create_len
,
177 len
, (char *)buf
+ firstwrite
, tx
);
184 * Post a history sysevent.
186 * The nvlist_t* passed into this function will be transformed into a new
189 * 1. Nested nvlists will be flattened to a single level
190 * 2. Keys will have their names normalized (to remove any problematic
191 * characters, such as whitespace)
193 * The nvlist_t passed into this function will duplicated and should be freed
198 spa_history_log_notify(spa_t
*spa
, nvlist_t
*nvl
)
200 nvlist_t
*hist_nvl
= fnvlist_alloc();
204 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_CMD
, &string
) == 0)
205 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_CMD
, string
);
207 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
, &string
) == 0)
208 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_NAME
, string
);
210 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_ZONE
, &string
) == 0)
211 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_ZONE
, string
);
213 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_HOST
, &string
) == 0)
214 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_HOST
, string
);
216 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_DSNAME
, &string
) == 0)
217 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_DSNAME
, string
);
219 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
, &string
) == 0)
220 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_STR
, string
);
222 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_IOCTL
, &string
) == 0)
223 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_IOCTL
, string
);
225 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
, &string
) == 0)
226 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_NAME
, string
);
228 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_DSID
, &uint64
) == 0)
229 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_DSID
, uint64
);
231 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_TXG
, &uint64
) == 0)
232 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_TXG
, uint64
);
234 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_TIME
, &uint64
) == 0)
235 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_TIME
, uint64
);
237 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_WHO
, &uint64
) == 0)
238 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_WHO
, uint64
);
240 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_INT_EVENT
, &uint64
) == 0)
241 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_INT_EVENT
, uint64
);
243 spa_event_notify(spa
, NULL
, hist_nvl
, ESC_ZFS_HISTORY_EVENT
);
245 nvlist_free(hist_nvl
);
249 * Write out a history event.
252 spa_history_log_sync(void *arg
, dmu_tx_t
*tx
)
255 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
256 objset_t
*mos
= spa
->spa_meta_objset
;
258 spa_history_phys_t
*shpp
;
261 char *record_packed
= NULL
;
265 * If we have an older pool that doesn't have a command
266 * history object, create it now.
268 mutex_enter(&spa
->spa_history_lock
);
269 if (!spa
->spa_history
)
270 spa_history_create_obj(spa
, tx
);
271 mutex_exit(&spa
->spa_history_lock
);
274 * Get the offset of where we need to write via the bonus buffer.
275 * Update the offset when the write completes.
277 VERIFY0(dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
));
280 dmu_buf_will_dirty(dbp
, tx
);
284 dmu_object_info_t doi
;
285 dmu_object_info_from_db(dbp
, &doi
);
286 ASSERT3U(doi
.doi_bonus_type
, ==, DMU_OT_SPA_HISTORY_OFFSETS
);
290 fnvlist_add_string(nvl
, ZPOOL_HIST_HOST
, utsname()->nodename
);
292 if (nvlist_exists(nvl
, ZPOOL_HIST_CMD
)) {
293 zfs_dbgmsg("command: %s",
294 fnvlist_lookup_string(nvl
, ZPOOL_HIST_CMD
));
295 } else if (nvlist_exists(nvl
, ZPOOL_HIST_INT_NAME
)) {
296 if (nvlist_exists(nvl
, ZPOOL_HIST_DSNAME
)) {
297 zfs_dbgmsg("txg %lld %s %s (id %llu) %s",
298 (longlong_t
)fnvlist_lookup_uint64(nvl
,
300 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
),
301 fnvlist_lookup_string(nvl
, ZPOOL_HIST_DSNAME
),
302 (u_longlong_t
)fnvlist_lookup_uint64(nvl
,
304 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
));
306 zfs_dbgmsg("txg %lld %s %s",
307 (longlong_t
)fnvlist_lookup_uint64(nvl
,
309 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
),
310 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
));
313 * The history sysevent is posted only for internal history
314 * messages to show what has happened, not how it happened. For
315 * example, the following command:
317 * # zfs destroy -r tank/foo
319 * will result in one sysevent posted per dataset that is
320 * destroyed as a result of the command - which could be more
321 * than one event in total. By contrast, if the sysevent was
322 * posted as a result of the ZPOOL_HIST_CMD key being present
323 * it would result in only one sysevent being posted with the
324 * full command line arguments, requiring the consumer to know
325 * how to parse and understand zfs(8) command invocations.
327 spa_history_log_notify(spa
, nvl
);
328 } else if (nvlist_exists(nvl
, ZPOOL_HIST_IOCTL
)) {
329 zfs_dbgmsg("ioctl %s",
330 fnvlist_lookup_string(nvl
, ZPOOL_HIST_IOCTL
));
333 VERIFY3U(nvlist_pack(nvl
, &record_packed
, &reclen
, NV_ENCODE_NATIVE
,
336 mutex_enter(&spa
->spa_history_lock
);
338 /* write out the packed length as little endian */
339 le_len
= LE_64((uint64_t)reclen
);
340 ret
= spa_history_write(spa
, &le_len
, sizeof (le_len
), shpp
, tx
);
342 ret
= spa_history_write(spa
, record_packed
, reclen
, shpp
, tx
);
344 /* The first command is the create, which we keep forever */
345 if (ret
== 0 && shpp
->sh_pool_create_len
== 0 &&
346 nvlist_exists(nvl
, ZPOOL_HIST_CMD
)) {
347 shpp
->sh_pool_create_len
= shpp
->sh_bof
= shpp
->sh_eof
;
350 mutex_exit(&spa
->spa_history_lock
);
351 fnvlist_pack_free(record_packed
, reclen
);
352 dmu_buf_rele(dbp
, FTAG
);
357 * Write out a history event.
360 spa_history_log(spa_t
*spa
, const char *msg
)
363 nvlist_t
*nvl
= fnvlist_alloc();
365 fnvlist_add_string(nvl
, ZPOOL_HIST_CMD
, msg
);
366 err
= spa_history_log_nvl(spa
, nvl
);
372 spa_history_log_nvl(spa_t
*spa
, nvlist_t
*nvl
)
376 nvlist_t
*nvarg
, *in_nvl
= NULL
;
378 if (spa_version(spa
) < SPA_VERSION_ZPOOL_HISTORY
|| !spa_writeable(spa
))
379 return (SET_ERROR(EINVAL
));
381 err
= nvlist_lookup_nvlist(nvl
, ZPOOL_HIST_INPUT_NVL
, &in_nvl
);
383 (void) nvlist_remove_all(in_nvl
, ZPOOL_HIDDEN_ARGS
);
386 tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
387 err
= dmu_tx_assign(tx
, TXG_WAIT
);
393 ASSERT3UF(tx
->tx_txg
, <=, spa_final_dirty_txg(spa
),
394 "Logged %s after final txg was set!", "nvlist");
396 VERIFY0(nvlist_dup(nvl
, &nvarg
, KM_SLEEP
));
397 if (spa_history_zone() != NULL
) {
398 fnvlist_add_string(nvarg
, ZPOOL_HIST_ZONE
,
401 fnvlist_add_uint64(nvarg
, ZPOOL_HIST_WHO
, crgetruid(CRED()));
404 * Since the history is recorded asynchronously, the effective time is
405 * now, which may be considerably before the change is made on disk.
407 fnvlist_add_uint64(nvarg
, ZPOOL_HIST_TIME
, gethrestime_sec());
409 /* Kick this off asynchronously; errors are ignored. */
410 dsl_sync_task_nowait(spa_get_dsl(spa
), spa_history_log_sync
, nvarg
, tx
);
413 /* spa_history_log_sync will free nvl */
418 * Read out the command history.
421 spa_history_get(spa_t
*spa
, uint64_t *offp
, uint64_t *len
, char *buf
)
423 objset_t
*mos
= spa
->spa_meta_objset
;
425 uint64_t read_len
, phys_read_off
, phys_eof
;
426 uint64_t leftover
= 0;
427 spa_history_phys_t
*shpp
;
431 * If the command history doesn't exist (older pool),
432 * that's ok, just return ENOENT.
434 if (!spa
->spa_history
)
435 return (SET_ERROR(ENOENT
));
438 * The history is logged asynchronously, so when they request
439 * the first chunk of history, make sure everything has been
440 * synced to disk so that we get it.
442 if (*offp
== 0 && spa_writeable(spa
))
443 txg_wait_synced(spa_get_dsl(spa
), 0);
445 if ((err
= dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
)) != 0)
451 dmu_object_info_t doi
;
452 dmu_object_info_from_db(dbp
, &doi
);
453 ASSERT3U(doi
.doi_bonus_type
, ==, DMU_OT_SPA_HISTORY_OFFSETS
);
457 mutex_enter(&spa
->spa_history_lock
);
458 phys_eof
= spa_history_log_to_phys(shpp
->sh_eof
, shpp
);
460 if (*offp
< shpp
->sh_pool_create_len
) {
461 /* read in just the zpool create history */
462 phys_read_off
= *offp
;
463 read_len
= MIN(*len
, shpp
->sh_pool_create_len
-
467 * Need to reset passed in offset to BOF if the passed in
468 * offset has since been overwritten.
470 *offp
= MAX(*offp
, shpp
->sh_bof
);
471 phys_read_off
= spa_history_log_to_phys(*offp
, shpp
);
474 * Read up to the minimum of what the user passed down or
475 * the EOF (physical or logical). If we hit physical EOF,
476 * use 'leftover' to read from the physical BOF.
478 if (phys_read_off
<= phys_eof
) {
479 read_len
= MIN(*len
, phys_eof
- phys_read_off
);
482 shpp
->sh_phys_max_off
- phys_read_off
);
483 if (phys_read_off
+ *len
> shpp
->sh_phys_max_off
) {
484 leftover
= MIN(*len
- read_len
,
485 phys_eof
- shpp
->sh_pool_create_len
);
490 /* offset for consumer to use next */
491 *offp
+= read_len
+ leftover
;
493 /* tell the consumer how much you actually read */
494 *len
= read_len
+ leftover
;
497 mutex_exit(&spa
->spa_history_lock
);
498 dmu_buf_rele(dbp
, FTAG
);
502 err
= dmu_read(mos
, spa
->spa_history
, phys_read_off
, read_len
, buf
,
504 if (leftover
&& err
== 0) {
505 err
= dmu_read(mos
, spa
->spa_history
, shpp
->sh_pool_create_len
,
506 leftover
, buf
+ read_len
, DMU_READ_PREFETCH
);
508 mutex_exit(&spa
->spa_history_lock
);
510 dmu_buf_rele(dbp
, FTAG
);
515 * The nvlist will be consumed by this call.
518 log_internal(nvlist_t
*nvl
, const char *operation
, spa_t
*spa
,
519 dmu_tx_t
*tx
, const char *fmt
, va_list adx
)
524 * If this is part of creating a pool, not everything is
525 * initialized yet, so don't bother logging the internal events.
526 * Likewise if the pool is not writeable.
528 if (spa_is_initializing(spa
) || !spa_writeable(spa
)) {
533 ASSERT3UF(tx
->tx_txg
, <=, spa_final_dirty_txg(spa
),
534 "Logged after final txg was set: %s %s", operation
, fmt
);
536 msg
= kmem_vasprintf(fmt
, adx
);
537 fnvlist_add_string(nvl
, ZPOOL_HIST_INT_STR
, msg
);
540 fnvlist_add_string(nvl
, ZPOOL_HIST_INT_NAME
, operation
);
541 fnvlist_add_uint64(nvl
, ZPOOL_HIST_TXG
, tx
->tx_txg
);
542 fnvlist_add_uint64(nvl
, ZPOOL_HIST_TIME
, gethrestime_sec());
544 if (dmu_tx_is_syncing(tx
)) {
545 spa_history_log_sync(nvl
, tx
);
547 dsl_sync_task_nowait(spa_get_dsl(spa
),
548 spa_history_log_sync
, nvl
, tx
);
550 /* spa_history_log_sync() will free nvl */
554 spa_history_log_internal(spa_t
*spa
, const char *operation
,
555 dmu_tx_t
*tx
, const char *fmt
, ...)
560 /* create a tx if we didn't get one */
562 htx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
563 if (dmu_tx_assign(htx
, TXG_WAIT
) != 0) {
570 log_internal(fnvlist_alloc(), operation
, spa
, htx
, fmt
, adx
);
573 /* if we didn't get a tx from the caller, commit the one we made */
579 spa_history_log_internal_ds(dsl_dataset_t
*ds
, const char *operation
,
580 dmu_tx_t
*tx
, const char *fmt
, ...)
583 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
584 nvlist_t
*nvl
= fnvlist_alloc();
588 dsl_dataset_name(ds
, namebuf
);
589 fnvlist_add_string(nvl
, ZPOOL_HIST_DSNAME
, namebuf
);
590 fnvlist_add_uint64(nvl
, ZPOOL_HIST_DSID
, ds
->ds_object
);
593 log_internal(nvl
, operation
, dsl_dataset_get_spa(ds
), tx
, fmt
, adx
);
598 spa_history_log_internal_dd(dsl_dir_t
*dd
, const char *operation
,
599 dmu_tx_t
*tx
, const char *fmt
, ...)
602 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
603 nvlist_t
*nvl
= fnvlist_alloc();
607 dsl_dir_name(dd
, namebuf
);
608 fnvlist_add_string(nvl
, ZPOOL_HIST_DSNAME
, namebuf
);
609 fnvlist_add_uint64(nvl
, ZPOOL_HIST_DSID
,
610 dsl_dir_phys(dd
)->dd_head_dataset_obj
);
613 log_internal(nvl
, operation
, dd
->dd_pool
->dp_spa
, tx
, fmt
, adx
);
618 spa_history_log_version(spa_t
*spa
, const char *operation
, dmu_tx_t
*tx
)
620 utsname_t
*u
= utsname();
622 spa_history_log_internal(spa
, operation
, tx
,
623 "pool version %llu; software version %s; uts %s %s %s %s",
624 (u_longlong_t
)spa_version(spa
), ZFS_META_GITREV
,
625 u
->nodename
, u
->release
, u
->version
, u
->machine
);
630 spa_history_zone(void)
637 EXPORT_SYMBOL(spa_history_create_obj
);
638 EXPORT_SYMBOL(spa_history_get
);
639 EXPORT_SYMBOL(spa_history_log
);
640 EXPORT_SYMBOL(spa_history_log_internal
);
641 EXPORT_SYMBOL(spa_history_log_version
);