4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
29 * To handle fault injection, we keep track of a series of zinject_record_t
30 * structures which describe which logical block(s) should be injected with a
31 * fault. These are kept in a global list. Each record corresponds to a given
32 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
33 * or exported while the injection record exists.
35 * Device level injection is done using the 'zi_guid' field. If this is set, it
36 * means that the error is destined for a particular device, not a piece of
39 * This is a rather poor data structure and algorithm, but we don't expect more
40 * than a few faults at any one time, so it should be sufficient for our needs.
44 #include <sys/zio_impl.h>
45 #include <sys/zfs_ioctl.h>
46 #include <sys/vdev_impl.h>
47 #include <sys/dmu_objset.h>
48 #include <sys/fs/zfs.h>
50 uint32_t zio_injection_enabled
;
53 * Data describing each zinject handler registered on the system, and
54 * contains the list node linking the handler in the global zinject
57 typedef struct inject_handler
{
60 zinject_record_t zi_record
;
67 * List of all zinject handlers registered on the system, protected by
68 * the inject_lock defined below.
70 static list_t inject_handlers
;
73 * This protects insertion into, and traversal of, the inject handler
74 * list defined above; as well as the inject_delay_count. Any time a
75 * handler is inserted or removed from the list, this lock should be
76 * taken as a RW_WRITER; and any time traversal is done over the list
77 * (without modification to it) this lock should be taken as a RW_READER.
79 static krwlock_t inject_lock
;
82 * This holds the number of zinject delay handlers that have been
83 * registered on the system. It is protected by the inject_lock defined
84 * above. Thus modifications to this count must be a RW_WRITER of the
85 * inject_lock, and reads of this count must be (at least) a RW_READER
88 static int inject_delay_count
= 0;
91 * This lock is used only in zio_handle_io_delay(), refer to the comment
92 * in that function for more details.
94 static kmutex_t inject_delay_mtx
;
97 * Used to assign unique identifying numbers to each new zinject handler.
99 static int inject_next_id
= 1;
102 * Returns true if the given record matches the I/O in progress.
105 zio_match_handler(zbookmark_phys_t
*zb
, uint64_t type
,
106 zinject_record_t
*record
, int error
)
109 * Check for a match against the MOS, which is based on type
111 if (zb
->zb_objset
== DMU_META_OBJSET
&&
112 record
->zi_objset
== DMU_META_OBJSET
&&
113 record
->zi_object
== DMU_META_DNODE_OBJECT
) {
114 if (record
->zi_type
== DMU_OT_NONE
||
115 type
== record
->zi_type
)
116 return (record
->zi_freq
== 0 ||
117 spa_get_random(100) < record
->zi_freq
);
123 * Check for an exact match.
125 if (zb
->zb_objset
== record
->zi_objset
&&
126 zb
->zb_object
== record
->zi_object
&&
127 zb
->zb_level
== record
->zi_level
&&
128 zb
->zb_blkid
>= record
->zi_start
&&
129 zb
->zb_blkid
<= record
->zi_end
&&
130 error
== record
->zi_error
)
131 return (record
->zi_freq
== 0 ||
132 spa_get_random(100) < record
->zi_freq
);
138 * Panic the system when a config change happens in the function
142 zio_handle_panic_injection(spa_t
*spa
, char *tag
, uint64_t type
)
144 inject_handler_t
*handler
;
146 rw_enter(&inject_lock
, RW_READER
);
148 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
149 handler
= list_next(&inject_handlers
, handler
)) {
151 if (spa
!= handler
->zi_spa
)
154 if (handler
->zi_record
.zi_type
== type
&&
155 strcmp(tag
, handler
->zi_record
.zi_func
) == 0)
156 panic("Panic requested in function %s\n", tag
);
159 rw_exit(&inject_lock
);
163 * Determine if the I/O in question should return failure. Returns the errno
164 * to be returned to the caller.
167 zio_handle_fault_injection(zio_t
*zio
, int error
)
170 inject_handler_t
*handler
;
173 * Ignore I/O not associated with any logical data.
175 if (zio
->io_logical
== NULL
)
179 * Currently, we only support fault injection on reads.
181 if (zio
->io_type
!= ZIO_TYPE_READ
)
184 rw_enter(&inject_lock
, RW_READER
);
186 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
187 handler
= list_next(&inject_handlers
, handler
)) {
189 if (zio
->io_spa
!= handler
->zi_spa
||
190 handler
->zi_record
.zi_cmd
!= ZINJECT_DATA_FAULT
)
193 /* If this handler matches, return EIO */
194 if (zio_match_handler(&zio
->io_logical
->io_bookmark
,
195 zio
->io_bp
? BP_GET_TYPE(zio
->io_bp
) : DMU_OT_NONE
,
196 &handler
->zi_record
, error
)) {
202 rw_exit(&inject_lock
);
208 * Determine if the zio is part of a label update and has an injection
209 * handler associated with that portion of the label. Currently, we
210 * allow error injection in either the nvlist or the uberblock region of
214 zio_handle_label_injection(zio_t
*zio
, int error
)
216 inject_handler_t
*handler
;
217 vdev_t
*vd
= zio
->io_vd
;
218 uint64_t offset
= zio
->io_offset
;
222 if (offset
>= VDEV_LABEL_START_SIZE
&&
223 offset
< vd
->vdev_psize
- VDEV_LABEL_END_SIZE
)
226 rw_enter(&inject_lock
, RW_READER
);
228 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
229 handler
= list_next(&inject_handlers
, handler
)) {
230 uint64_t start
= handler
->zi_record
.zi_start
;
231 uint64_t end
= handler
->zi_record
.zi_end
;
233 if (handler
->zi_record
.zi_cmd
!= ZINJECT_LABEL_FAULT
)
237 * The injection region is the relative offsets within a
238 * vdev label. We must determine the label which is being
239 * updated and adjust our region accordingly.
241 label
= vdev_label_number(vd
->vdev_psize
, offset
);
242 start
= vdev_label_offset(vd
->vdev_psize
, label
, start
);
243 end
= vdev_label_offset(vd
->vdev_psize
, label
, end
);
245 if (zio
->io_vd
->vdev_guid
== handler
->zi_record
.zi_guid
&&
246 (offset
>= start
&& offset
<= end
)) {
251 rw_exit(&inject_lock
);
257 zio_handle_device_injection(vdev_t
*vd
, zio_t
*zio
, int error
)
259 inject_handler_t
*handler
;
263 * We skip over faults in the labels unless it's during
264 * device open (i.e. zio == NULL).
267 uint64_t offset
= zio
->io_offset
;
269 if (offset
< VDEV_LABEL_START_SIZE
||
270 offset
>= vd
->vdev_psize
- VDEV_LABEL_END_SIZE
)
274 rw_enter(&inject_lock
, RW_READER
);
276 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
277 handler
= list_next(&inject_handlers
, handler
)) {
279 if (handler
->zi_record
.zi_cmd
!= ZINJECT_DEVICE_FAULT
)
282 if (vd
->vdev_guid
== handler
->zi_record
.zi_guid
) {
283 if (handler
->zi_record
.zi_failfast
&&
284 (zio
== NULL
|| (zio
->io_flags
&
285 (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))) {
289 /* Handle type specific I/O failures */
291 handler
->zi_record
.zi_iotype
!= ZIO_TYPES
&&
292 handler
->zi_record
.zi_iotype
!= zio
->io_type
)
295 if (handler
->zi_record
.zi_error
== error
) {
297 * For a failed open, pretend like the device
301 vd
->vdev_stat
.vs_aux
=
302 VDEV_AUX_OPEN_FAILED
;
305 * Treat these errors as if they had been
306 * retried so that all the appropriate stats
307 * and FMA events are generated.
309 if (!handler
->zi_record
.zi_failfast
&&
311 zio
->io_flags
|= ZIO_FLAG_IO_RETRY
;
316 if (handler
->zi_record
.zi_error
== ENXIO
) {
317 ret
= SET_ERROR(EIO
);
323 rw_exit(&inject_lock
);
329 * Simulate hardware that ignores cache flushes. For requested number
330 * of seconds nix the actual writing to disk.
333 zio_handle_ignored_writes(zio_t
*zio
)
335 inject_handler_t
*handler
;
337 rw_enter(&inject_lock
, RW_READER
);
339 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
340 handler
= list_next(&inject_handlers
, handler
)) {
342 /* Ignore errors not destined for this pool */
343 if (zio
->io_spa
!= handler
->zi_spa
||
344 handler
->zi_record
.zi_cmd
!= ZINJECT_IGNORED_WRITES
)
348 * Positive duration implies # of seconds, negative
351 if (handler
->zi_record
.zi_timer
== 0) {
352 if (handler
->zi_record
.zi_duration
> 0)
353 handler
->zi_record
.zi_timer
= ddi_get_lbolt64();
355 handler
->zi_record
.zi_timer
= zio
->io_txg
;
358 /* Have a "problem" writing 60% of the time */
359 if (spa_get_random(100) < 60)
360 zio
->io_pipeline
&= ~ZIO_VDEV_IO_STAGES
;
364 rw_exit(&inject_lock
);
368 spa_handle_ignored_writes(spa_t
*spa
)
370 inject_handler_t
*handler
;
372 if (zio_injection_enabled
== 0)
375 rw_enter(&inject_lock
, RW_READER
);
377 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
378 handler
= list_next(&inject_handlers
, handler
)) {
380 if (spa
!= handler
->zi_spa
||
381 handler
->zi_record
.zi_cmd
!= ZINJECT_IGNORED_WRITES
)
384 if (handler
->zi_record
.zi_duration
> 0) {
385 VERIFY(handler
->zi_record
.zi_timer
== 0 ||
386 handler
->zi_record
.zi_timer
+
387 handler
->zi_record
.zi_duration
* hz
>
390 /* duration is negative so the subtraction here adds */
391 VERIFY(handler
->zi_record
.zi_timer
== 0 ||
392 handler
->zi_record
.zi_timer
-
393 handler
->zi_record
.zi_duration
>=
394 spa_syncing_txg(spa
));
398 rw_exit(&inject_lock
);
402 zio_handle_io_delay(zio_t
*zio
)
404 vdev_t
*vd
= zio
->io_vd
;
405 inject_handler_t
*min_handler
= NULL
;
406 hrtime_t min_target
= 0;
408 rw_enter(&inject_lock
, RW_READER
);
411 * inject_delay_count is a subset of zio_injection_enabled that
412 * is only incremented for delay handlers. These checks are
413 * mainly added to remind the reader why we're not explicitly
414 * checking zio_injection_enabled like the other functions.
416 IMPLY(inject_delay_count
> 0, zio_injection_enabled
> 0);
417 IMPLY(zio_injection_enabled
== 0, inject_delay_count
== 0);
420 * If there aren't any inject delay handlers registered, then we
421 * can short circuit and simply return 0 here. A value of zero
422 * informs zio_delay_interrupt() that this request should not be
423 * delayed. This short circuit keeps us from acquiring the
424 * inject_delay_mutex unnecessarily.
426 if (inject_delay_count
== 0) {
427 rw_exit(&inject_lock
);
432 * Each inject handler has a number of "lanes" associated with
433 * it. Each lane is able to handle requests independently of one
434 * another, and at a latency defined by the inject handler
435 * record's zi_timer field. Thus if a handler in configured with
436 * a single lane with a 10ms latency, it will delay requests
437 * such that only a single request is completed every 10ms. So,
438 * if more than one request is attempted per each 10ms interval,
439 * the average latency of the requests will be greater than
440 * 10ms; but if only a single request is submitted each 10ms
441 * interval the average latency will be 10ms.
443 * We need to acquire this mutex to prevent multiple concurrent
444 * threads being assigned to the same lane of a given inject
445 * handler. The mutex allows us to perform the following two
446 * operations atomically:
448 * 1. determine the minimum handler and minimum target
449 * value of all the possible handlers
450 * 2. update that minimum handler's lane array
452 * Without atomicity, two (or more) threads could pick the same
453 * lane in step (1), and then conflict with each other in step
454 * (2). This could allow a single lane handler to process
455 * multiple requests simultaneously, which shouldn't be possible.
457 mutex_enter(&inject_delay_mtx
);
459 for (inject_handler_t
*handler
= list_head(&inject_handlers
);
460 handler
!= NULL
; handler
= list_next(&inject_handlers
, handler
)) {
461 if (handler
->zi_record
.zi_cmd
!= ZINJECT_DELAY_IO
)
464 if (vd
->vdev_guid
!= handler
->zi_record
.zi_guid
)
468 * Defensive; should never happen as the array allocation
469 * occurs prior to inserting this handler on the list.
471 ASSERT3P(handler
->zi_lanes
, !=, NULL
);
474 * This should never happen, the zinject command should
475 * prevent a user from setting an IO delay with zero lanes.
477 ASSERT3U(handler
->zi_record
.zi_nlanes
, !=, 0);
479 ASSERT3U(handler
->zi_record
.zi_nlanes
, >,
480 handler
->zi_next_lane
);
483 * We want to issue this IO to the lane that will become
484 * idle the soonest, so we compare the soonest this
485 * specific handler can complete the IO with all other
486 * handlers, to find the lowest value of all possible
487 * lanes. We then use this lane to submit the request.
489 * Since each handler has a constant value for its
490 * delay, we can just use the "next" lane for that
491 * handler; as it will always be the lane with the
492 * lowest value for that particular handler (i.e. the
493 * lane that will become idle the soonest). This saves a
494 * scan of each handler's lanes array.
496 * There's two cases to consider when determining when
497 * this specific IO request should complete. If this
498 * lane is idle, we want to "submit" the request now so
499 * it will complete after zi_timer milliseconds. Thus,
500 * we set the target to now + zi_timer.
502 * If the lane is busy, we want this request to complete
503 * zi_timer milliseconds after the lane becomes idle.
504 * Since the 'zi_lanes' array holds the time at which
505 * each lane will become idle, we use that value to
506 * determine when this request should complete.
508 hrtime_t idle
= handler
->zi_record
.zi_timer
+ gethrtime();
509 hrtime_t busy
= handler
->zi_record
.zi_timer
+
510 handler
->zi_lanes
[handler
->zi_next_lane
];
511 hrtime_t target
= MAX(idle
, busy
);
513 if (min_handler
== NULL
) {
514 min_handler
= handler
;
519 ASSERT3P(min_handler
, !=, NULL
);
520 ASSERT3U(min_target
, !=, 0);
523 * We don't yet increment the "next lane" variable since
524 * we still might find a lower value lane in another
525 * handler during any remaining iterations. Once we're
526 * sure we've selected the absolute minimum, we'll claim
527 * the lane and increment the handler's "next lane"
531 if (target
< min_target
) {
532 min_handler
= handler
;
538 * 'min_handler' will be NULL if no IO delays are registered for
539 * this vdev, otherwise it will point to the handler containing
540 * the lane that will become idle the soonest.
542 if (min_handler
!= NULL
) {
543 ASSERT3U(min_target
, !=, 0);
544 min_handler
->zi_lanes
[min_handler
->zi_next_lane
] = min_target
;
547 * If we've used all possible lanes for this handler,
548 * loop back and start using the first lane again;
549 * otherwise, just increment the lane index.
551 min_handler
->zi_next_lane
= (min_handler
->zi_next_lane
+ 1) %
552 min_handler
->zi_record
.zi_nlanes
;
555 mutex_exit(&inject_delay_mtx
);
556 rw_exit(&inject_lock
);
562 * Create a new handler for the given record. We add it to the list, adding
563 * a reference to the spa_t in the process. We increment zio_injection_enabled,
564 * which is the switch to trigger all fault injection.
567 zio_inject_fault(char *name
, int flags
, int *id
, zinject_record_t
*record
)
569 inject_handler_t
*handler
;
574 * If this is pool-wide metadata, make sure we unload the corresponding
575 * spa_t, so that the next attempt to load it will trigger the fault.
576 * We call spa_reset() to unload the pool appropriately.
578 if (flags
& ZINJECT_UNLOAD_SPA
)
579 if ((error
= spa_reset(name
)) != 0)
582 if (record
->zi_cmd
== ZINJECT_DELAY_IO
) {
584 * A value of zero for the number of lanes or for the
585 * delay time doesn't make sense.
587 if (record
->zi_timer
== 0 || record
->zi_nlanes
== 0)
588 return (SET_ERROR(EINVAL
));
591 * The number of lanes is directly mapped to the size of
592 * an array used by the handler. Thus, to ensure the
593 * user doesn't trigger an allocation that's "too large"
594 * we cap the number of lanes here.
596 if (record
->zi_nlanes
>= UINT16_MAX
)
597 return (SET_ERROR(EINVAL
));
600 if (!(flags
& ZINJECT_NULL
)) {
602 * spa_inject_ref() will add an injection reference, which will
603 * prevent the pool from being removed from the namespace while
604 * still allowing it to be unloaded.
606 if ((spa
= spa_inject_addref(name
)) == NULL
)
607 return (SET_ERROR(ENOENT
));
609 handler
= kmem_alloc(sizeof (inject_handler_t
), KM_SLEEP
);
611 handler
->zi_spa
= spa
;
612 handler
->zi_record
= *record
;
614 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
615 handler
->zi_lanes
= kmem_zalloc(
616 sizeof (*handler
->zi_lanes
) *
617 handler
->zi_record
.zi_nlanes
, KM_SLEEP
);
618 handler
->zi_next_lane
= 0;
620 handler
->zi_lanes
= NULL
;
621 handler
->zi_next_lane
= 0;
624 rw_enter(&inject_lock
, RW_WRITER
);
627 * We can't move this increment into the conditional
628 * above because we need to hold the RW_WRITER lock of
629 * inject_lock, and we don't want to hold that while
630 * allocating the handler's zi_lanes array.
632 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
633 ASSERT3S(inject_delay_count
, >=, 0);
634 inject_delay_count
++;
635 ASSERT3S(inject_delay_count
, >, 0);
638 *id
= handler
->zi_id
= inject_next_id
++;
639 list_insert_tail(&inject_handlers
, handler
);
640 atomic_inc_32(&zio_injection_enabled
);
642 rw_exit(&inject_lock
);
646 * Flush the ARC, so that any attempts to read this data will end up
647 * going to the ZIO layer. Note that this is a little overkill, but
648 * we don't have the necessary ARC interfaces to do anything else, and
649 * fault injection isn't a performance critical path.
651 if (flags
& ZINJECT_FLUSH_ARC
)
653 * We must use FALSE to ensure arc_flush returns, since
654 * we're not preventing concurrent ARC insertions.
656 arc_flush(NULL
, FALSE
);
662 * Returns the next record with an ID greater than that supplied to the
663 * function. Used to iterate over all handlers in the system.
666 zio_inject_list_next(int *id
, char *name
, size_t buflen
,
667 zinject_record_t
*record
)
669 inject_handler_t
*handler
;
672 mutex_enter(&spa_namespace_lock
);
673 rw_enter(&inject_lock
, RW_READER
);
675 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
676 handler
= list_next(&inject_handlers
, handler
))
677 if (handler
->zi_id
> *id
)
681 *record
= handler
->zi_record
;
682 *id
= handler
->zi_id
;
683 (void) strncpy(name
, spa_name(handler
->zi_spa
), buflen
);
686 ret
= SET_ERROR(ENOENT
);
689 rw_exit(&inject_lock
);
690 mutex_exit(&spa_namespace_lock
);
696 * Clear the fault handler with the given identifier, or return ENOENT if none
700 zio_clear_fault(int id
)
702 inject_handler_t
*handler
;
704 rw_enter(&inject_lock
, RW_WRITER
);
706 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
707 handler
= list_next(&inject_handlers
, handler
))
708 if (handler
->zi_id
== id
)
711 if (handler
== NULL
) {
712 rw_exit(&inject_lock
);
713 return (SET_ERROR(ENOENT
));
716 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
717 ASSERT3S(inject_delay_count
, >, 0);
718 inject_delay_count
--;
719 ASSERT3S(inject_delay_count
, >=, 0);
722 list_remove(&inject_handlers
, handler
);
723 rw_exit(&inject_lock
);
725 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
726 ASSERT3P(handler
->zi_lanes
, !=, NULL
);
727 kmem_free(handler
->zi_lanes
, sizeof (*handler
->zi_lanes
) *
728 handler
->zi_record
.zi_nlanes
);
730 ASSERT3P(handler
->zi_lanes
, ==, NULL
);
733 spa_inject_delref(handler
->zi_spa
);
734 kmem_free(handler
, sizeof (inject_handler_t
));
735 atomic_dec_32(&zio_injection_enabled
);
741 zio_inject_init(void)
743 rw_init(&inject_lock
, NULL
, RW_DEFAULT
, NULL
);
744 mutex_init(&inject_delay_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
745 list_create(&inject_handlers
, sizeof (inject_handler_t
),
746 offsetof(inject_handler_t
, zi_link
));
750 zio_inject_fini(void)
752 list_destroy(&inject_handlers
);
753 mutex_destroy(&inject_delay_mtx
);
754 rw_destroy(&inject_lock
);