4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 Joyent, Inc.
30 #include <sys/spa_impl.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dsl_dataset.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/cmn_err.h>
38 #include <sys/sunddi.h>
40 #include "zfs_comutil.h"
41 #include "zfs_gitrev.h"
47 * Routines to manage the on-disk history log.
49 * The history log is stored as a dmu object containing
50 * <packed record length, record nvlist> tuples.
52 * Where "record nvlist" is an nvlist containing uint64_ts and strings, and
53 * "packed record length" is the packed length of the "record nvlist" stored
54 * as a little endian uint64_t.
56 * The log is implemented as a ring buffer, though the original creation
57 * of the pool ('zpool create') is never overwritten.
59 * The history log is tracked as object 'spa_t::spa_history'. The bonus buffer
60 * of 'spa_history' stores the offsets for logging/retrieving history as
61 * 'spa_history_phys_t'. 'sh_pool_create_len' is the ending offset in bytes of
62 * where the 'zpool create' record is stored. This allows us to never
63 * overwrite the original creation of the pool. 'sh_phys_max_off' is the
64 * physical ending offset in bytes of the log. This tells you the length of
65 * the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record
66 * is added, 'sh_eof' is incremented by the size of the record.
67 * 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes).
68 * This is where the consumer should start reading from after reading in
69 * the 'zpool create' portion of the log.
71 * 'sh_records_lost' keeps track of how many records have been overwritten
72 * and permanently lost.
75 /* convert a logical offset to physical */
77 spa_history_log_to_phys(uint64_t log_off
, spa_history_phys_t
*shpp
)
81 phys_len
= shpp
->sh_phys_max_off
- shpp
->sh_pool_create_len
;
82 return ((log_off
- shpp
->sh_pool_create_len
) % phys_len
83 + shpp
->sh_pool_create_len
);
87 spa_history_create_obj(spa_t
*spa
, dmu_tx_t
*tx
)
90 spa_history_phys_t
*shpp
;
91 objset_t
*mos
= spa
->spa_meta_objset
;
93 ASSERT0(spa
->spa_history
);
94 spa
->spa_history
= dmu_object_alloc(mos
, DMU_OT_SPA_HISTORY
,
95 SPA_OLD_MAXBLOCKSIZE
, DMU_OT_SPA_HISTORY_OFFSETS
,
96 sizeof (spa_history_phys_t
), tx
);
98 VERIFY0(zap_add(mos
, DMU_POOL_DIRECTORY_OBJECT
,
99 DMU_POOL_HISTORY
, sizeof (uint64_t), 1,
100 &spa
->spa_history
, tx
));
102 VERIFY0(dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
));
103 ASSERT3U(dbp
->db_size
, >=, sizeof (spa_history_phys_t
));
106 dmu_buf_will_dirty(dbp
, tx
);
109 * Figure out maximum size of history log. We set it at
110 * 0.1% of pool size, with a max of 1G and min of 128KB.
112 shpp
->sh_phys_max_off
=
113 metaslab_class_get_dspace(spa_normal_class(spa
)) / 1000;
114 shpp
->sh_phys_max_off
= MIN(shpp
->sh_phys_max_off
, 1<<30);
115 shpp
->sh_phys_max_off
= MAX(shpp
->sh_phys_max_off
, 128<<10);
117 dmu_buf_rele(dbp
, FTAG
);
121 * Change 'sh_bof' to the beginning of the next record.
124 spa_history_advance_bof(spa_t
*spa
, spa_history_phys_t
*shpp
)
126 objset_t
*mos
= spa
->spa_meta_objset
;
127 uint64_t firstread
, reclen
, phys_bof
;
128 char buf
[sizeof (reclen
)];
131 phys_bof
= spa_history_log_to_phys(shpp
->sh_bof
, shpp
);
132 firstread
= MIN(sizeof (reclen
), shpp
->sh_phys_max_off
- phys_bof
);
134 if ((err
= dmu_read(mos
, spa
->spa_history
, phys_bof
, firstread
,
135 buf
, DMU_READ_PREFETCH
)) != 0)
137 if (firstread
!= sizeof (reclen
)) {
138 if ((err
= dmu_read(mos
, spa
->spa_history
,
139 shpp
->sh_pool_create_len
, sizeof (reclen
) - firstread
,
140 buf
+ firstread
, DMU_READ_PREFETCH
)) != 0)
144 reclen
= LE_64(*((uint64_t *)buf
));
145 shpp
->sh_bof
+= reclen
+ sizeof (reclen
);
146 shpp
->sh_records_lost
++;
151 spa_history_write(spa_t
*spa
, void *buf
, uint64_t len
, spa_history_phys_t
*shpp
,
154 uint64_t firstwrite
, phys_eof
;
155 objset_t
*mos
= spa
->spa_meta_objset
;
158 ASSERT(MUTEX_HELD(&spa
->spa_history_lock
));
160 /* see if we need to reset logical BOF */
161 while (shpp
->sh_phys_max_off
- shpp
->sh_pool_create_len
-
162 (shpp
->sh_eof
- shpp
->sh_bof
) <= len
) {
163 if ((err
= spa_history_advance_bof(spa
, shpp
)) != 0) {
168 phys_eof
= spa_history_log_to_phys(shpp
->sh_eof
, shpp
);
169 firstwrite
= MIN(len
, shpp
->sh_phys_max_off
- phys_eof
);
171 dmu_write(mos
, spa
->spa_history
, phys_eof
, firstwrite
, buf
, tx
);
175 /* write out the rest at the beginning of physical file */
176 dmu_write(mos
, spa
->spa_history
, shpp
->sh_pool_create_len
,
177 len
, (char *)buf
+ firstwrite
, tx
);
184 * Post a history sysevent.
186 * The nvlist_t* passed into this function will be transformed into a new
189 * 1. Nested nvlists will be flattened to a single level
190 * 2. Keys will have their names normalized (to remove any problematic
191 * characters, such as whitespace)
193 * The nvlist_t passed into this function will duplicated and should be freed
198 spa_history_log_notify(spa_t
*spa
, nvlist_t
*nvl
)
200 nvlist_t
*hist_nvl
= fnvlist_alloc();
204 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_CMD
, &string
) == 0)
205 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_CMD
, string
);
207 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
, &string
) == 0)
208 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_NAME
, string
);
210 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_ZONE
, &string
) == 0)
211 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_ZONE
, string
);
213 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_HOST
, &string
) == 0)
214 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_HOST
, string
);
216 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_DSNAME
, &string
) == 0)
217 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_DSNAME
, string
);
219 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
, &string
) == 0)
220 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_STR
, string
);
222 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_IOCTL
, &string
) == 0)
223 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_IOCTL
, string
);
225 if (nvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
, &string
) == 0)
226 fnvlist_add_string(hist_nvl
, ZFS_EV_HIST_INT_NAME
, string
);
228 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_DSID
, &uint64
) == 0)
229 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_DSID
, uint64
);
231 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_TXG
, &uint64
) == 0)
232 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_TXG
, uint64
);
234 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_TIME
, &uint64
) == 0)
235 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_TIME
, uint64
);
237 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_WHO
, &uint64
) == 0)
238 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_WHO
, uint64
);
240 if (nvlist_lookup_uint64(nvl
, ZPOOL_HIST_INT_EVENT
, &uint64
) == 0)
241 fnvlist_add_uint64(hist_nvl
, ZFS_EV_HIST_INT_EVENT
, uint64
);
243 spa_event_notify(spa
, NULL
, hist_nvl
, ESC_ZFS_HISTORY_EVENT
);
245 nvlist_free(hist_nvl
);
249 * Write out a history event.
252 spa_history_log_sync(void *arg
, dmu_tx_t
*tx
)
255 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
256 objset_t
*mos
= spa
->spa_meta_objset
;
258 spa_history_phys_t
*shpp
;
261 char *record_packed
= NULL
;
265 * If we have an older pool that doesn't have a command
266 * history object, create it now.
268 mutex_enter(&spa
->spa_history_lock
);
269 if (!spa
->spa_history
)
270 spa_history_create_obj(spa
, tx
);
271 mutex_exit(&spa
->spa_history_lock
);
274 * Get the offset of where we need to write via the bonus buffer.
275 * Update the offset when the write completes.
277 VERIFY0(dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
));
280 dmu_buf_will_dirty(dbp
, tx
);
284 dmu_object_info_t doi
;
285 dmu_object_info_from_db(dbp
, &doi
);
286 ASSERT3U(doi
.doi_bonus_type
, ==, DMU_OT_SPA_HISTORY_OFFSETS
);
290 fnvlist_add_string(nvl
, ZPOOL_HIST_HOST
, utsname()->nodename
);
292 if (nvlist_exists(nvl
, ZPOOL_HIST_CMD
)) {
293 zfs_dbgmsg("command: %s",
294 fnvlist_lookup_string(nvl
, ZPOOL_HIST_CMD
));
295 } else if (nvlist_exists(nvl
, ZPOOL_HIST_INT_NAME
)) {
296 if (nvlist_exists(nvl
, ZPOOL_HIST_DSNAME
)) {
297 zfs_dbgmsg("txg %lld %s %s (id %llu) %s",
298 (longlong_t
)fnvlist_lookup_uint64(nvl
,
300 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
),
301 fnvlist_lookup_string(nvl
, ZPOOL_HIST_DSNAME
),
302 (u_longlong_t
)fnvlist_lookup_uint64(nvl
,
304 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
));
306 zfs_dbgmsg("txg %lld %s %s",
307 (longlong_t
)fnvlist_lookup_uint64(nvl
,
309 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_NAME
),
310 fnvlist_lookup_string(nvl
, ZPOOL_HIST_INT_STR
));
313 * The history sysevent is posted only for internal history
314 * messages to show what has happened, not how it happened. For
315 * example, the following command:
317 * # zfs destroy -r tank/foo
319 * will result in one sysevent posted per dataset that is
320 * destroyed as a result of the command - which could be more
321 * than one event in total. By contrast, if the sysevent was
322 * posted as a result of the ZPOOL_HIST_CMD key being present
323 * it would result in only one sysevent being posted with the
324 * full command line arguments, requiring the consumer to know
325 * how to parse and understand zfs(8) command invocations.
327 spa_history_log_notify(spa
, nvl
);
328 } else if (nvlist_exists(nvl
, ZPOOL_HIST_IOCTL
)) {
329 zfs_dbgmsg("ioctl %s",
330 fnvlist_lookup_string(nvl
, ZPOOL_HIST_IOCTL
));
333 VERIFY3U(nvlist_pack(nvl
, &record_packed
, &reclen
, NV_ENCODE_NATIVE
,
336 mutex_enter(&spa
->spa_history_lock
);
338 /* write out the packed length as little endian */
339 le_len
= LE_64((uint64_t)reclen
);
340 ret
= spa_history_write(spa
, &le_len
, sizeof (le_len
), shpp
, tx
);
342 ret
= spa_history_write(spa
, record_packed
, reclen
, shpp
, tx
);
344 /* The first command is the create, which we keep forever */
345 if (ret
== 0 && shpp
->sh_pool_create_len
== 0 &&
346 nvlist_exists(nvl
, ZPOOL_HIST_CMD
)) {
347 shpp
->sh_pool_create_len
= shpp
->sh_bof
= shpp
->sh_eof
;
350 mutex_exit(&spa
->spa_history_lock
);
351 fnvlist_pack_free(record_packed
, reclen
);
352 dmu_buf_rele(dbp
, FTAG
);
357 * Write out a history event.
360 spa_history_log(spa_t
*spa
, const char *msg
)
363 nvlist_t
*nvl
= fnvlist_alloc();
365 fnvlist_add_string(nvl
, ZPOOL_HIST_CMD
, msg
);
366 err
= spa_history_log_nvl(spa
, nvl
);
372 spa_history_log_nvl(spa_t
*spa
, nvlist_t
*nvl
)
376 nvlist_t
*nvarg
, *in_nvl
= NULL
;
378 if (spa_version(spa
) < SPA_VERSION_ZPOOL_HISTORY
|| !spa_writeable(spa
))
379 return (SET_ERROR(EINVAL
));
381 err
= nvlist_lookup_nvlist(nvl
, ZPOOL_HIST_INPUT_NVL
, &in_nvl
);
383 (void) nvlist_remove_all(in_nvl
, ZPOOL_HIDDEN_ARGS
);
386 tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
387 err
= dmu_tx_assign(tx
, TXG_WAIT
);
393 VERIFY0(nvlist_dup(nvl
, &nvarg
, KM_SLEEP
));
394 if (spa_history_zone() != NULL
) {
395 fnvlist_add_string(nvarg
, ZPOOL_HIST_ZONE
,
398 fnvlist_add_uint64(nvarg
, ZPOOL_HIST_WHO
, crgetruid(CRED()));
401 * Since the history is recorded asynchronously, the effective time is
402 * now, which may be considerably before the change is made on disk.
404 fnvlist_add_uint64(nvarg
, ZPOOL_HIST_TIME
, gethrestime_sec());
406 /* Kick this off asynchronously; errors are ignored. */
407 dsl_sync_task_nowait(spa_get_dsl(spa
), spa_history_log_sync
, nvarg
, tx
);
410 /* spa_history_log_sync will free nvl */
415 * Read out the command history.
418 spa_history_get(spa_t
*spa
, uint64_t *offp
, uint64_t *len
, char *buf
)
420 objset_t
*mos
= spa
->spa_meta_objset
;
422 uint64_t read_len
, phys_read_off
, phys_eof
;
423 uint64_t leftover
= 0;
424 spa_history_phys_t
*shpp
;
428 * If the command history doesn't exist (older pool),
429 * that's ok, just return ENOENT.
431 if (!spa
->spa_history
)
432 return (SET_ERROR(ENOENT
));
435 * The history is logged asynchronously, so when they request
436 * the first chunk of history, make sure everything has been
437 * synced to disk so that we get it.
439 if (*offp
== 0 && spa_writeable(spa
))
440 txg_wait_synced(spa_get_dsl(spa
), 0);
442 if ((err
= dmu_bonus_hold(mos
, spa
->spa_history
, FTAG
, &dbp
)) != 0)
448 dmu_object_info_t doi
;
449 dmu_object_info_from_db(dbp
, &doi
);
450 ASSERT3U(doi
.doi_bonus_type
, ==, DMU_OT_SPA_HISTORY_OFFSETS
);
454 mutex_enter(&spa
->spa_history_lock
);
455 phys_eof
= spa_history_log_to_phys(shpp
->sh_eof
, shpp
);
457 if (*offp
< shpp
->sh_pool_create_len
) {
458 /* read in just the zpool create history */
459 phys_read_off
= *offp
;
460 read_len
= MIN(*len
, shpp
->sh_pool_create_len
-
464 * Need to reset passed in offset to BOF if the passed in
465 * offset has since been overwritten.
467 *offp
= MAX(*offp
, shpp
->sh_bof
);
468 phys_read_off
= spa_history_log_to_phys(*offp
, shpp
);
471 * Read up to the minimum of what the user passed down or
472 * the EOF (physical or logical). If we hit physical EOF,
473 * use 'leftover' to read from the physical BOF.
475 if (phys_read_off
<= phys_eof
) {
476 read_len
= MIN(*len
, phys_eof
- phys_read_off
);
479 shpp
->sh_phys_max_off
- phys_read_off
);
480 if (phys_read_off
+ *len
> shpp
->sh_phys_max_off
) {
481 leftover
= MIN(*len
- read_len
,
482 phys_eof
- shpp
->sh_pool_create_len
);
487 /* offset for consumer to use next */
488 *offp
+= read_len
+ leftover
;
490 /* tell the consumer how much you actually read */
491 *len
= read_len
+ leftover
;
494 mutex_exit(&spa
->spa_history_lock
);
495 dmu_buf_rele(dbp
, FTAG
);
499 err
= dmu_read(mos
, spa
->spa_history
, phys_read_off
, read_len
, buf
,
501 if (leftover
&& err
== 0) {
502 err
= dmu_read(mos
, spa
->spa_history
, shpp
->sh_pool_create_len
,
503 leftover
, buf
+ read_len
, DMU_READ_PREFETCH
);
505 mutex_exit(&spa
->spa_history_lock
);
507 dmu_buf_rele(dbp
, FTAG
);
512 * The nvlist will be consumed by this call.
515 log_internal(nvlist_t
*nvl
, const char *operation
, spa_t
*spa
,
516 dmu_tx_t
*tx
, const char *fmt
, va_list adx
)
521 * If this is part of creating a pool, not everything is
522 * initialized yet, so don't bother logging the internal events.
523 * Likewise if the pool is not writeable.
525 if (spa_is_initializing(spa
) || !spa_writeable(spa
)) {
530 msg
= kmem_vasprintf(fmt
, adx
);
531 fnvlist_add_string(nvl
, ZPOOL_HIST_INT_STR
, msg
);
534 fnvlist_add_string(nvl
, ZPOOL_HIST_INT_NAME
, operation
);
535 fnvlist_add_uint64(nvl
, ZPOOL_HIST_TXG
, tx
->tx_txg
);
536 fnvlist_add_uint64(nvl
, ZPOOL_HIST_TIME
, gethrestime_sec());
538 if (dmu_tx_is_syncing(tx
)) {
539 spa_history_log_sync(nvl
, tx
);
541 dsl_sync_task_nowait(spa_get_dsl(spa
),
542 spa_history_log_sync
, nvl
, tx
);
544 /* spa_history_log_sync() will free nvl */
548 spa_history_log_internal(spa_t
*spa
, const char *operation
,
549 dmu_tx_t
*tx
, const char *fmt
, ...)
554 /* create a tx if we didn't get one */
556 htx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
557 if (dmu_tx_assign(htx
, TXG_WAIT
) != 0) {
564 log_internal(fnvlist_alloc(), operation
, spa
, htx
, fmt
, adx
);
567 /* if we didn't get a tx from the caller, commit the one we made */
573 spa_history_log_internal_ds(dsl_dataset_t
*ds
, const char *operation
,
574 dmu_tx_t
*tx
, const char *fmt
, ...)
577 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
578 nvlist_t
*nvl
= fnvlist_alloc();
582 dsl_dataset_name(ds
, namebuf
);
583 fnvlist_add_string(nvl
, ZPOOL_HIST_DSNAME
, namebuf
);
584 fnvlist_add_uint64(nvl
, ZPOOL_HIST_DSID
, ds
->ds_object
);
587 log_internal(nvl
, operation
, dsl_dataset_get_spa(ds
), tx
, fmt
, adx
);
592 spa_history_log_internal_dd(dsl_dir_t
*dd
, const char *operation
,
593 dmu_tx_t
*tx
, const char *fmt
, ...)
596 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
597 nvlist_t
*nvl
= fnvlist_alloc();
601 dsl_dir_name(dd
, namebuf
);
602 fnvlist_add_string(nvl
, ZPOOL_HIST_DSNAME
, namebuf
);
603 fnvlist_add_uint64(nvl
, ZPOOL_HIST_DSID
,
604 dsl_dir_phys(dd
)->dd_head_dataset_obj
);
607 log_internal(nvl
, operation
, dd
->dd_pool
->dp_spa
, tx
, fmt
, adx
);
612 spa_history_log_version(spa_t
*spa
, const char *operation
, dmu_tx_t
*tx
)
614 utsname_t
*u
= utsname();
616 spa_history_log_internal(spa
, operation
, tx
,
617 "pool version %llu; software version %s; uts %s %s %s %s",
618 (u_longlong_t
)spa_version(spa
), ZFS_META_GITREV
,
619 u
->nodename
, u
->release
, u
->version
, u
->machine
);
624 spa_history_zone(void)
631 EXPORT_SYMBOL(spa_history_create_obj
);
632 EXPORT_SYMBOL(spa_history_get
);
633 EXPORT_SYMBOL(spa_history_log
);
634 EXPORT_SYMBOL(spa_history_log_internal
);
635 EXPORT_SYMBOL(spa_history_log_version
);