4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * DVA-based Adjustable Replacement Cache
29 * While much of the theory of operation used here is
30 * based on the self-tuning, low overhead replacement cache
31 * presented by Megiddo and Modha at FAST 2003, there are some
32 * significant differences:
34 * 1. The Megiddo and Modha model assumes any page is evictable.
35 * Pages in its cache cannot be "locked" into memory. This makes
36 * the eviction algorithm simple: evict the last page in the list.
37 * This also make the performance characteristics easy to reason
38 * about. Our cache is not so simple. At any given moment, some
39 * subset of the blocks in the cache are un-evictable because we
40 * have handed out a reference to them. Blocks are only evictable
41 * when there are no external references active. This makes
42 * eviction far more problematic: we choose to evict the evictable
43 * blocks that are the "lowest" in the list.
45 * There are times when it is not possible to evict the requested
46 * space. In these circumstances we are unable to adjust the cache
47 * size. To prevent the cache growing unbounded at these times we
48 * implement a "cache throttle" that slows the flow of new data
49 * into the cache until we can make space available.
51 * 2. The Megiddo and Modha model assumes a fixed cache size.
52 * Pages are evicted when the cache is full and there is a cache
53 * miss. Our model has a variable sized cache. It grows with
54 * high use, but also tries to react to memory pressure from the
55 * operating system: decreasing its size when system memory is
58 * 3. The Megiddo and Modha model assumes a fixed page size. All
59 * elements of the cache are therefor exactly the same size. So
60 * when adjusting the cache size following a cache miss, its simply
61 * a matter of choosing a single page to evict. In our model, we
62 * have variable sized cache blocks (rangeing from 512 bytes to
63 * 128K bytes). We therefor choose a set of blocks to evict to make
64 * space for a cache miss that approximates as closely as possible
65 * the space used by the new block.
67 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
68 * by N. Megiddo & D. Modha, FAST 2003
74 * A new reference to a cache buffer can be obtained in two
75 * ways: 1) via a hash table lookup using the DVA as a key,
76 * or 2) via one of the ARC lists. The arc_read() interface
77 * uses method 1, while the internal arc algorithms for
78 * adjusting the cache use method 2. We therefor provide two
79 * types of locks: 1) the hash table lock array, and 2) the
82 * Buffers do not have their own mutexs, rather they rely on the
83 * hash table mutexs for the bulk of their protection (i.e. most
84 * fields in the arc_buf_hdr_t are protected by these mutexs).
86 * buf_hash_find() returns the appropriate mutex (held) when it
87 * locates the requested buffer in the hash table. It returns
88 * NULL for the mutex if the buffer was not in the table.
90 * buf_hash_remove() expects the appropriate hash mutex to be
91 * already held before it is invoked.
93 * Each arc state also has a mutex which is used to protect the
94 * buffer list associated with the state. When attempting to
95 * obtain a hash table lock while holding an arc list lock you
96 * must use: mutex_tryenter() to avoid deadlock. Also note that
97 * the active state mutex must be held before the ghost state mutex.
99 * Arc buffers may have an associated eviction callback function.
100 * This function will be invoked prior to removing the buffer (e.g.
101 * in arc_do_user_evicts()). Note however that the data associated
102 * with the buffer may be evicted prior to the callback. The callback
103 * must be made with *no locks held* (to prevent deadlock). Additionally,
104 * the users of callbacks must ensure that their private data is
105 * protected from simultaneous callbacks from arc_buf_evict()
106 * and arc_do_user_evicts().
108 * Note that the majority of the performance stats are manipulated
109 * with atomic operations.
111 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
113 * - L2ARC buflist creation
114 * - L2ARC buflist eviction
115 * - L2ARC write completion, which walks L2ARC buflists
116 * - ARC header destruction, as it removes from L2ARC buflists
117 * - ARC header release, as it removes from L2ARC buflists
122 #include <sys/zio_checksum.h>
123 #include <sys/zfs_context.h>
125 #include <sys/refcount.h>
126 #include <sys/vdev.h>
128 #include <sys/vmsystm.h>
130 #include <sys/fs/swapnode.h>
131 #include <sys/dnlc.h>
133 #include <sys/callb.h>
134 #include <sys/kstat.h>
139 #define btop(x) ((x) / PAGE_SIZE)
141 #define needfree (uvmexp.free < uvmexp.freetarg ? uvmexp.freetarg : 0)
142 #define buf_init arc_buf_init
143 #define freemem uvmexp.free
144 #define minfree uvmexp.freemin
145 #define desfree uvmexp.freetarg
146 #define lotsfree (desfree * 2)
147 #define availrmem desfree
148 #define swapfs_minfree 0
149 #define swapfs_reserve 0
151 #define curproc curlwp
152 #define proc_pageout uvm.pagedaemon_lwp
154 #define heap_arena kernel_map
158 vmem_size(struct vm_map
*map
, int flag
)
164 return vm_map_max(map
) - vm_map_min(map
) - map
->size
;
165 case VMEM_FREE
|VMEM_ALLOC
:
166 return vm_map_max(map
) - vm_map_min(map
);
171 static void *zio_arena
;
173 #include <sys/callback.h>
174 /* Structures used for memory and kva space reclaim. */
175 static struct callback_entry arc_kva_reclaim_entry
;
176 static struct uvm_reclaim_hook arc_hook
;
178 #endif /* __NetBSD__ */
180 static kmutex_t arc_reclaim_thr_lock
;
181 static kcondvar_t arc_reclaim_thr_cv
; /* used to signal reclaim thr */
182 static uint8_t arc_thread_exit
;
184 extern int zfs_write_limit_shift
;
185 extern uint64_t zfs_write_limit_max
;
186 extern kmutex_t zfs_write_limit_lock
;
188 #define ARC_REDUCE_DNLC_PERCENT 3
189 uint_t arc_reduce_dnlc_percent
= ARC_REDUCE_DNLC_PERCENT
;
191 typedef enum arc_reclaim_strategy
{
192 ARC_RECLAIM_AGGR
, /* Aggressive reclaim strategy */
193 ARC_RECLAIM_CONS
/* Conservative reclaim strategy */
194 } arc_reclaim_strategy_t
;
196 /* number of seconds before growing cache again */
197 static int arc_grow_retry
= 60;
200 * minimum lifespan of a prefetch block in clock ticks
201 * (initialized in arc_init())
203 static int arc_min_prefetch_lifespan
;
208 * The arc has filled available memory and has now warmed up.
210 static boolean_t arc_warm
;
213 * These tunables are for performance analysis.
215 uint64_t zfs_arc_max
;
216 uint64_t zfs_arc_min
;
217 uint64_t zfs_arc_meta_limit
= 0;
218 int zfs_mdcomp_disable
= 0;
221 * Note that buffers can be in one of 6 states:
222 * ARC_anon - anonymous (discussed below)
223 * ARC_mru - recently used, currently cached
224 * ARC_mru_ghost - recentely used, no longer in cache
225 * ARC_mfu - frequently used, currently cached
226 * ARC_mfu_ghost - frequently used, no longer in cache
227 * ARC_l2c_only - exists in L2ARC but not other states
228 * When there are no active references to the buffer, they are
229 * are linked onto a list in one of these arc states. These are
230 * the only buffers that can be evicted or deleted. Within each
231 * state there are multiple lists, one for meta-data and one for
232 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
233 * etc.) is tracked separately so that it can be managed more
234 * explicitly: favored over data, limited explicitly.
236 * Anonymous buffers are buffers that are not associated with
237 * a DVA. These are buffers that hold dirty block copies
238 * before they are written to stable storage. By definition,
239 * they are "ref'd" and are considered part of arc_mru
240 * that cannot be freed. Generally, they will aquire a DVA
241 * as they are written and migrate onto the arc_mru list.
243 * The ARC_l2c_only state is for buffers that are in the second
244 * level ARC but no longer in any of the ARC_m* lists. The second
245 * level ARC itself may also contain buffers that are in any of
246 * the ARC_m* states - meaning that a buffer can exist in two
247 * places. The reason for the ARC_l2c_only state is to keep the
248 * buffer header in the hash table, so that reads that hit the
249 * second level ARC benefit from these fast lookups.
252 typedef struct arc_state
{
253 list_t arcs_list
[ARC_BUFC_NUMTYPES
]; /* list of evictable buffers */
254 uint64_t arcs_lsize
[ARC_BUFC_NUMTYPES
]; /* amount of evictable data */
255 uint64_t arcs_size
; /* total amount of data in this state */
260 static arc_state_t ARC_anon
;
261 static arc_state_t ARC_mru
;
262 static arc_state_t ARC_mru_ghost
;
263 static arc_state_t ARC_mfu
;
264 static arc_state_t ARC_mfu_ghost
;
265 static arc_state_t ARC_l2c_only
;
267 typedef struct arc_stats
{
268 kstat_named_t arcstat_hits
;
269 kstat_named_t arcstat_misses
;
270 kstat_named_t arcstat_demand_data_hits
;
271 kstat_named_t arcstat_demand_data_misses
;
272 kstat_named_t arcstat_demand_metadata_hits
;
273 kstat_named_t arcstat_demand_metadata_misses
;
274 kstat_named_t arcstat_prefetch_data_hits
;
275 kstat_named_t arcstat_prefetch_data_misses
;
276 kstat_named_t arcstat_prefetch_metadata_hits
;
277 kstat_named_t arcstat_prefetch_metadata_misses
;
278 kstat_named_t arcstat_mru_hits
;
279 kstat_named_t arcstat_mru_ghost_hits
;
280 kstat_named_t arcstat_mfu_hits
;
281 kstat_named_t arcstat_mfu_ghost_hits
;
282 kstat_named_t arcstat_deleted
;
283 kstat_named_t arcstat_recycle_miss
;
284 kstat_named_t arcstat_mutex_miss
;
285 kstat_named_t arcstat_evict_skip
;
286 kstat_named_t arcstat_hash_elements
;
287 kstat_named_t arcstat_hash_elements_max
;
288 kstat_named_t arcstat_hash_collisions
;
289 kstat_named_t arcstat_hash_chains
;
290 kstat_named_t arcstat_hash_chain_max
;
291 kstat_named_t arcstat_p
;
292 kstat_named_t arcstat_c
;
293 kstat_named_t arcstat_c_min
;
294 kstat_named_t arcstat_c_max
;
295 kstat_named_t arcstat_size
;
296 kstat_named_t arcstat_hdr_size
;
297 kstat_named_t arcstat_l2_hits
;
298 kstat_named_t arcstat_l2_misses
;
299 kstat_named_t arcstat_l2_feeds
;
300 kstat_named_t arcstat_l2_rw_clash
;
301 kstat_named_t arcstat_l2_writes_sent
;
302 kstat_named_t arcstat_l2_writes_done
;
303 kstat_named_t arcstat_l2_writes_error
;
304 kstat_named_t arcstat_l2_writes_hdr_miss
;
305 kstat_named_t arcstat_l2_evict_lock_retry
;
306 kstat_named_t arcstat_l2_evict_reading
;
307 kstat_named_t arcstat_l2_free_on_write
;
308 kstat_named_t arcstat_l2_abort_lowmem
;
309 kstat_named_t arcstat_l2_cksum_bad
;
310 kstat_named_t arcstat_l2_io_error
;
311 kstat_named_t arcstat_l2_size
;
312 kstat_named_t arcstat_l2_hdr_size
;
313 kstat_named_t arcstat_memory_throttle_count
;
316 static arc_stats_t arc_stats
= {
317 { "hits", KSTAT_DATA_UINT64
},
318 { "misses", KSTAT_DATA_UINT64
},
319 { "demand_data_hits", KSTAT_DATA_UINT64
},
320 { "demand_data_misses", KSTAT_DATA_UINT64
},
321 { "demand_metadata_hits", KSTAT_DATA_UINT64
},
322 { "demand_metadata_misses", KSTAT_DATA_UINT64
},
323 { "prefetch_data_hits", KSTAT_DATA_UINT64
},
324 { "prefetch_data_misses", KSTAT_DATA_UINT64
},
325 { "prefetch_metadata_hits", KSTAT_DATA_UINT64
},
326 { "prefetch_metadata_misses", KSTAT_DATA_UINT64
},
327 { "mru_hits", KSTAT_DATA_UINT64
},
328 { "mru_ghost_hits", KSTAT_DATA_UINT64
},
329 { "mfu_hits", KSTAT_DATA_UINT64
},
330 { "mfu_ghost_hits", KSTAT_DATA_UINT64
},
331 { "deleted", KSTAT_DATA_UINT64
},
332 { "recycle_miss", KSTAT_DATA_UINT64
},
333 { "mutex_miss", KSTAT_DATA_UINT64
},
334 { "evict_skip", KSTAT_DATA_UINT64
},
335 { "hash_elements", KSTAT_DATA_UINT64
},
336 { "hash_elements_max", KSTAT_DATA_UINT64
},
337 { "hash_collisions", KSTAT_DATA_UINT64
},
338 { "hash_chains", KSTAT_DATA_UINT64
},
339 { "hash_chain_max", KSTAT_DATA_UINT64
},
340 { "p", KSTAT_DATA_UINT64
},
341 { "c", KSTAT_DATA_UINT64
},
342 { "c_min", KSTAT_DATA_UINT64
},
343 { "c_max", KSTAT_DATA_UINT64
},
344 { "size", KSTAT_DATA_UINT64
},
345 { "hdr_size", KSTAT_DATA_UINT64
},
346 { "l2_hits", KSTAT_DATA_UINT64
},
347 { "l2_misses", KSTAT_DATA_UINT64
},
348 { "l2_feeds", KSTAT_DATA_UINT64
},
349 { "l2_rw_clash", KSTAT_DATA_UINT64
},
350 { "l2_writes_sent", KSTAT_DATA_UINT64
},
351 { "l2_writes_done", KSTAT_DATA_UINT64
},
352 { "l2_writes_error", KSTAT_DATA_UINT64
},
353 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64
},
354 { "l2_evict_lock_retry", KSTAT_DATA_UINT64
},
355 { "l2_evict_reading", KSTAT_DATA_UINT64
},
356 { "l2_free_on_write", KSTAT_DATA_UINT64
},
357 { "l2_abort_lowmem", KSTAT_DATA_UINT64
},
358 { "l2_cksum_bad", KSTAT_DATA_UINT64
},
359 { "l2_io_error", KSTAT_DATA_UINT64
},
360 { "l2_size", KSTAT_DATA_UINT64
},
361 { "l2_hdr_size", KSTAT_DATA_UINT64
},
362 { "memory_throttle_count", KSTAT_DATA_UINT64
}
365 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
367 #define ARCSTAT_INCR(stat, val) \
368 atomic_add_64(&arc_stats.stat.value.ui64, (val));
370 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
371 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
373 #define ARCSTAT_MAX(stat, val) { \
375 while ((val) > (m = arc_stats.stat.value.ui64) && \
376 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
380 #define ARCSTAT_MAXSTAT(stat) \
381 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
384 * We define a macro to allow ARC hits/misses to be easily broken down by
385 * two separate conditions, giving a total of four different subtypes for
386 * each of hits and misses (so eight statistics total).
388 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
391 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
393 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
397 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
399 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
404 static arc_state_t
*arc_anon
;
405 static arc_state_t
*arc_mru
;
406 static arc_state_t
*arc_mru_ghost
;
407 static arc_state_t
*arc_mfu
;
408 static arc_state_t
*arc_mfu_ghost
;
409 static arc_state_t
*arc_l2c_only
;
412 * There are several ARC variables that are critical to export as kstats --
413 * but we don't want to have to grovel around in the kstat whenever we wish to
414 * manipulate them. For these variables, we therefore define them to be in
415 * terms of the statistic variable. This assures that we are not introducing
416 * the possibility of inconsistency by having shadow copies of the variables,
417 * while still allowing the code to be readable.
419 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
420 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
421 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
422 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
423 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
425 static int arc_no_grow
; /* Don't try to grow cache size */
426 static uint64_t arc_tempreserve
;
427 static uint64_t arc_meta_used
;
428 static uint64_t arc_meta_limit
;
429 static uint64_t arc_meta_max
= 0;
431 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t
;
433 typedef struct arc_callback arc_callback_t
;
435 struct arc_callback
{
437 arc_done_func_t
*acb_done
;
439 zio_t
*acb_zio_dummy
;
440 arc_callback_t
*acb_next
;
443 typedef struct arc_write_callback arc_write_callback_t
;
445 struct arc_write_callback
{
447 arc_done_func_t
*awcb_ready
;
448 arc_done_func_t
*awcb_done
;
453 /* protected by hash lock */
458 kmutex_t b_freeze_lock
;
459 zio_cksum_t
*b_freeze_cksum
;
461 arc_buf_hdr_t
*b_hash_next
;
466 arc_callback_t
*b_acb
;
470 arc_buf_contents_t b_type
;
474 /* protected by arc state mutex */
475 arc_state_t
*b_state
;
476 list_node_t b_arc_node
;
478 /* updated atomically */
479 clock_t b_arc_access
;
481 /* self protecting */
484 l2arc_buf_hdr_t
*b_l2hdr
;
485 list_node_t b_l2node
;
488 static arc_buf_t
*arc_eviction_list
;
489 static kmutex_t arc_eviction_mtx
;
490 static arc_buf_hdr_t arc_eviction_hdr
;
491 static void arc_get_data_buf(arc_buf_t
*buf
);
492 static void arc_access(arc_buf_hdr_t
*buf
, kmutex_t
*hash_lock
);
493 static int arc_evict_needed(arc_buf_contents_t type
);
494 static void arc_evict_ghost(arc_state_t
*state
, spa_t
*spa
, int64_t bytes
);
496 #define GHOST_STATE(state) \
497 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
498 (state) == arc_l2c_only)
501 * Private ARC flags. These flags are private ARC only flags that will show up
502 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
503 * be passed in as arc_flags in things like arc_read. However, these flags
504 * should never be passed and should only be set by ARC code. When adding new
505 * public flags, make sure not to smash the private ones.
508 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
509 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
510 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
511 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
512 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
513 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
514 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
515 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
516 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
517 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
518 #define ARC_STORED (1 << 19) /* has been store()d to */
520 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
521 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
522 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
523 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
524 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
525 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
526 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
527 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
528 (hdr)->b_l2hdr != NULL)
529 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
530 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
531 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
537 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
538 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
541 * Hash table routines
544 #define HT_LOCK_PAD 64
549 unsigned char pad
[(HT_LOCK_PAD
- sizeof (kmutex_t
))];
553 #define BUF_LOCKS 256
554 typedef struct buf_hash_table
{
556 arc_buf_hdr_t
**ht_table
;
557 struct ht_lock ht_locks
[BUF_LOCKS
];
560 static buf_hash_table_t buf_hash_table
;
562 #define BUF_HASH_INDEX(spa, dva, birth) \
563 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
564 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
565 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
566 #define HDR_LOCK(buf) \
567 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
569 uint64_t zfs_crc64_table
[256];
575 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
576 #define L2ARC_HEADROOM 4 /* num of writes */
577 #define L2ARC_FEED_SECS 1 /* caching interval */
579 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
580 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
583 * L2ARC Performance Tunables
585 uint64_t l2arc_write_max
= L2ARC_WRITE_SIZE
; /* default max write size */
586 uint64_t l2arc_write_boost
= L2ARC_WRITE_SIZE
; /* extra write during warmup */
587 uint64_t l2arc_headroom
= L2ARC_HEADROOM
; /* number of dev writes */
588 uint64_t l2arc_feed_secs
= L2ARC_FEED_SECS
; /* interval seconds */
589 boolean_t l2arc_noprefetch
= B_TRUE
; /* don't cache prefetch bufs */
594 typedef struct l2arc_dev
{
595 vdev_t
*l2ad_vdev
; /* vdev */
596 spa_t
*l2ad_spa
; /* spa */
597 uint64_t l2ad_hand
; /* next write location */
598 uint64_t l2ad_write
; /* desired write size, bytes */
599 uint64_t l2ad_boost
; /* warmup write boost, bytes */
600 uint64_t l2ad_start
; /* first addr on device */
601 uint64_t l2ad_end
; /* last addr on device */
602 uint64_t l2ad_evict
; /* last addr eviction reached */
603 boolean_t l2ad_first
; /* first sweep through */
604 list_t
*l2ad_buflist
; /* buffer list */
605 list_node_t l2ad_node
; /* device list node */
608 static list_t L2ARC_dev_list
; /* device list */
609 static list_t
*l2arc_dev_list
; /* device list pointer */
610 static kmutex_t l2arc_dev_mtx
; /* device list mutex */
611 static l2arc_dev_t
*l2arc_dev_last
; /* last device used */
612 static kmutex_t l2arc_buflist_mtx
; /* mutex for all buflists */
613 static list_t L2ARC_free_on_write
; /* free after write buf list */
614 static list_t
*l2arc_free_on_write
; /* free after write list ptr */
615 static kmutex_t l2arc_free_on_write_mtx
; /* mutex for list */
616 static uint64_t l2arc_ndev
; /* number of devices */
618 typedef struct l2arc_read_callback
{
619 arc_buf_t
*l2rcb_buf
; /* read buffer */
620 spa_t
*l2rcb_spa
; /* spa */
621 blkptr_t l2rcb_bp
; /* original blkptr */
622 zbookmark_t l2rcb_zb
; /* original bookmark */
623 int l2rcb_flags
; /* original flags */
624 } l2arc_read_callback_t
;
626 typedef struct l2arc_write_callback
{
627 l2arc_dev_t
*l2wcb_dev
; /* device info */
628 arc_buf_hdr_t
*l2wcb_head
; /* head of write buflist */
629 } l2arc_write_callback_t
;
631 struct l2arc_buf_hdr
{
632 /* protected by arc_buf_hdr mutex */
633 l2arc_dev_t
*b_dev
; /* L2ARC device */
634 daddr_t b_daddr
; /* disk address, offset byte */
637 typedef struct l2arc_data_free
{
638 /* protected by l2arc_free_on_write_mtx */
641 void (*l2df_func
)(void *, size_t);
642 list_node_t l2df_list_node
;
645 static kmutex_t l2arc_feed_thr_lock
;
646 static kcondvar_t l2arc_feed_thr_cv
;
647 static uint8_t l2arc_thread_exit
;
649 static void l2arc_read_done(zio_t
*zio
);
650 static void l2arc_hdr_stat_add(void);
651 static void l2arc_hdr_stat_remove(void);
654 buf_hash(spa_t
*spa
, const dva_t
*dva
, uint64_t birth
)
656 uintptr_t spav
= (uintptr_t)spa
;
657 uint8_t *vdva
= (uint8_t *)dva
;
658 uint64_t crc
= -1ULL;
661 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
663 for (i
= 0; i
< sizeof (dva_t
); i
++)
664 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ vdva
[i
]) & 0xFF];
666 crc
^= (spav
>>8) ^ birth
;
671 #define BUF_EMPTY(buf) \
672 ((buf)->b_dva.dva_word[0] == 0 && \
673 (buf)->b_dva.dva_word[1] == 0 && \
676 #define BUF_EQUAL(spa, dva, birth, buf) \
677 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
678 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
679 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
681 static arc_buf_hdr_t
*
682 buf_hash_find(spa_t
*spa
, const dva_t
*dva
, uint64_t birth
, kmutex_t
**lockp
)
684 uint64_t idx
= BUF_HASH_INDEX(spa
, dva
, birth
);
685 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
688 mutex_enter(hash_lock
);
689 for (buf
= buf_hash_table
.ht_table
[idx
]; buf
!= NULL
;
690 buf
= buf
->b_hash_next
) {
691 if (BUF_EQUAL(spa
, dva
, birth
, buf
)) {
696 mutex_exit(hash_lock
);
702 * Insert an entry into the hash table. If there is already an element
703 * equal to elem in the hash table, then the already existing element
704 * will be returned and the new element will not be inserted.
705 * Otherwise returns NULL.
707 static arc_buf_hdr_t
*
708 buf_hash_insert(arc_buf_hdr_t
*buf
, kmutex_t
**lockp
)
710 uint64_t idx
= BUF_HASH_INDEX(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
);
711 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
715 ASSERT(!HDR_IN_HASH_TABLE(buf
));
717 mutex_enter(hash_lock
);
718 for (fbuf
= buf_hash_table
.ht_table
[idx
], i
= 0; fbuf
!= NULL
;
719 fbuf
= fbuf
->b_hash_next
, i
++) {
720 if (BUF_EQUAL(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
, fbuf
))
724 buf
->b_hash_next
= buf_hash_table
.ht_table
[idx
];
725 buf_hash_table
.ht_table
[idx
] = buf
;
726 buf
->b_flags
|= ARC_IN_HASH_TABLE
;
728 /* collect some hash table performance data */
730 ARCSTAT_BUMP(arcstat_hash_collisions
);
732 ARCSTAT_BUMP(arcstat_hash_chains
);
734 ARCSTAT_MAX(arcstat_hash_chain_max
, i
);
737 ARCSTAT_BUMP(arcstat_hash_elements
);
738 ARCSTAT_MAXSTAT(arcstat_hash_elements
);
744 buf_hash_remove(arc_buf_hdr_t
*buf
)
746 arc_buf_hdr_t
*fbuf
, **bufp
;
747 uint64_t idx
= BUF_HASH_INDEX(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
);
749 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx
)));
750 ASSERT(HDR_IN_HASH_TABLE(buf
));
752 bufp
= &buf_hash_table
.ht_table
[idx
];
753 while ((fbuf
= *bufp
) != buf
) {
754 ASSERT(fbuf
!= NULL
);
755 bufp
= &fbuf
->b_hash_next
;
757 *bufp
= buf
->b_hash_next
;
758 buf
->b_hash_next
= NULL
;
759 buf
->b_flags
&= ~ARC_IN_HASH_TABLE
;
761 /* collect some hash table performance data */
762 ARCSTAT_BUMPDOWN(arcstat_hash_elements
);
764 if (buf_hash_table
.ht_table
[idx
] &&
765 buf_hash_table
.ht_table
[idx
]->b_hash_next
== NULL
)
766 ARCSTAT_BUMPDOWN(arcstat_hash_chains
);
770 * Global data structures and functions for the buf kmem cache.
772 static kmem_cache_t
*hdr_cache
;
773 static kmem_cache_t
*buf_cache
;
780 kmem_free(buf_hash_table
.ht_table
,
781 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
782 for (i
= 0; i
< BUF_LOCKS
; i
++)
783 mutex_destroy(&buf_hash_table
.ht_locks
[i
].ht_lock
);
784 kmem_cache_destroy(hdr_cache
);
785 kmem_cache_destroy(buf_cache
);
789 * Constructor callback - called when the cache is empty
790 * and a new buf is requested.
794 hdr_cons(void *vbuf
, void *unused
, int kmflag
)
796 arc_buf_hdr_t
*buf
= unused
;
798 bzero(buf
, sizeof (arc_buf_hdr_t
));
799 refcount_create(&buf
->b_refcnt
);
800 cv_init(&buf
->b_cv
, NULL
, CV_DEFAULT
, NULL
);
801 mutex_init(&buf
->b_freeze_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
803 ARCSTAT_INCR(arcstat_hdr_size
, HDR_SIZE
);
809 buf_cons(void *vbuf
, void *unused
, int kmflag
)
811 arc_buf_t
*buf
= unused
;
813 bzero(buf
, sizeof (arc_buf_t
));
814 rw_init(&buf
->b_lock
, NULL
, RW_DEFAULT
, NULL
);
819 * Destructor callback - called when a cached buf is
820 * no longer required.
824 hdr_dest(void *vbuf
, void *unused
)
826 arc_buf_hdr_t
*buf
= unused
;
828 refcount_destroy(&buf
->b_refcnt
);
829 cv_destroy(&buf
->b_cv
);
830 mutex_destroy(&buf
->b_freeze_lock
);
832 ARCSTAT_INCR(arcstat_hdr_size
, -HDR_SIZE
);
837 buf_dest(void *vbuf
, void *unused
)
839 arc_buf_t
*buf
= unused
;
841 rw_destroy(&buf
->b_lock
);
845 * Reclaim callback -- invoked when memory is low.
849 hdr_recl(void *unused
)
851 dprintf("hdr_recl called\n");
853 * umem calls the reclaim func when we destroy the buf cache,
854 * which is after we do arc_fini().
857 cv_signal(&arc_reclaim_thr_cv
);
864 uint64_t hsize
= 1ULL << 12;
868 * The hash table is big enough to fill all of physical memory
869 * with an average 64K block size. The table will take up
870 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
872 while (hsize
* 65536 < (uint64_t)physmem
* PAGESIZE
)
875 buf_hash_table
.ht_mask
= hsize
- 1;
876 buf_hash_table
.ht_table
=
877 kmem_zalloc(hsize
* sizeof (void*), KM_NOSLEEP
);
878 if (buf_hash_table
.ht_table
== NULL
) {
879 ASSERT(hsize
> (1ULL << 8));
884 hdr_cache
= kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t
),
885 0, hdr_cons
, hdr_dest
, hdr_recl
, NULL
, NULL
, 0);
886 buf_cache
= kmem_cache_create("arc_buf_t", sizeof (arc_buf_t
),
887 0, buf_cons
, buf_dest
, NULL
, NULL
, NULL
, 0);
889 for (i
= 0; i
< 256; i
++)
890 for (ct
= zfs_crc64_table
+ i
, *ct
= i
, j
= 8; j
> 0; j
--)
891 *ct
= (*ct
>> 1) ^ (-(*ct
& 1) & ZFS_CRC64_POLY
);
893 for (i
= 0; i
< BUF_LOCKS
; i
++) {
894 mutex_init(&buf_hash_table
.ht_locks
[i
].ht_lock
,
895 NULL
, MUTEX_DEFAULT
, NULL
);
899 #define ARC_MINTIME (hz>>4) /* 62 ms */
902 arc_cksum_verify(arc_buf_t
*buf
)
906 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
909 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
910 if (buf
->b_hdr
->b_freeze_cksum
== NULL
||
911 (buf
->b_hdr
->b_flags
& ARC_IO_ERROR
)) {
912 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
915 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
916 if (!ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
))
917 panic("buffer modified while frozen!");
918 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
922 arc_cksum_equal(arc_buf_t
*buf
)
927 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
928 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
929 equal
= ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
);
930 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
936 arc_cksum_compute(arc_buf_t
*buf
, boolean_t force
)
938 if (!force
&& !(zfs_flags
& ZFS_DEBUG_MODIFY
))
941 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
942 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
943 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
946 buf
->b_hdr
->b_freeze_cksum
= kmem_alloc(sizeof (zio_cksum_t
), KM_SLEEP
);
947 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
,
948 buf
->b_hdr
->b_freeze_cksum
);
949 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
953 arc_buf_thaw(arc_buf_t
*buf
)
955 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
956 if (buf
->b_hdr
->b_state
!= arc_anon
)
957 panic("modifying non-anon buffer!");
958 if (buf
->b_hdr
->b_flags
& ARC_IO_IN_PROGRESS
)
959 panic("modifying buffer while i/o in progress!");
960 arc_cksum_verify(buf
);
963 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
964 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
965 kmem_free(buf
->b_hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
966 buf
->b_hdr
->b_freeze_cksum
= NULL
;
968 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
972 arc_buf_freeze(arc_buf_t
*buf
)
974 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
977 ASSERT(buf
->b_hdr
->b_freeze_cksum
!= NULL
||
978 buf
->b_hdr
->b_state
== arc_anon
);
979 arc_cksum_compute(buf
, B_FALSE
);
983 add_reference(arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
, void *tag
)
985 ASSERT(MUTEX_HELD(hash_lock
));
987 if ((refcount_add(&ab
->b_refcnt
, tag
) == 1) &&
988 (ab
->b_state
!= arc_anon
)) {
989 uint64_t delta
= ab
->b_size
* ab
->b_datacnt
;
990 list_t
*list
= &ab
->b_state
->arcs_list
[ab
->b_type
];
991 uint64_t *size
= &ab
->b_state
->arcs_lsize
[ab
->b_type
];
993 ASSERT(!MUTEX_HELD(&ab
->b_state
->arcs_mtx
));
994 mutex_enter(&ab
->b_state
->arcs_mtx
);
995 ASSERT(list_link_active(&ab
->b_arc_node
));
996 list_remove(list
, ab
);
997 if (GHOST_STATE(ab
->b_state
)) {
998 ASSERT3U(ab
->b_datacnt
, ==, 0);
999 ASSERT3P(ab
->b_buf
, ==, NULL
);
1003 ASSERT3U(*size
, >=, delta
);
1004 atomic_add_64(size
, -delta
);
1005 mutex_exit(&ab
->b_state
->arcs_mtx
);
1006 /* remove the prefetch flag if we get a reference */
1007 if (ab
->b_flags
& ARC_PREFETCH
)
1008 ab
->b_flags
&= ~ARC_PREFETCH
;
1013 remove_reference(arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
, void *tag
)
1016 arc_state_t
*state
= ab
->b_state
;
1018 ASSERT(state
== arc_anon
|| MUTEX_HELD(hash_lock
));
1019 ASSERT(!GHOST_STATE(state
));
1021 if (((cnt
= refcount_remove(&ab
->b_refcnt
, tag
)) == 0) &&
1022 (state
!= arc_anon
)) {
1023 uint64_t *size
= &state
->arcs_lsize
[ab
->b_type
];
1025 ASSERT(!MUTEX_HELD(&state
->arcs_mtx
));
1026 mutex_enter(&state
->arcs_mtx
);
1027 ASSERT(!list_link_active(&ab
->b_arc_node
));
1028 list_insert_head(&state
->arcs_list
[ab
->b_type
], ab
);
1029 ASSERT(ab
->b_datacnt
> 0);
1030 atomic_add_64(size
, ab
->b_size
* ab
->b_datacnt
);
1031 mutex_exit(&state
->arcs_mtx
);
1037 * Move the supplied buffer to the indicated state. The mutex
1038 * for the buffer must be held by the caller.
1041 arc_change_state(arc_state_t
*new_state
, arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
)
1043 arc_state_t
*old_state
= ab
->b_state
;
1044 int64_t refcnt
= refcount_count(&ab
->b_refcnt
);
1045 uint64_t from_delta
, to_delta
;
1047 ASSERT(MUTEX_HELD(hash_lock
));
1048 ASSERT(new_state
!= old_state
);
1049 ASSERT(refcnt
== 0 || ab
->b_datacnt
> 0);
1050 ASSERT(ab
->b_datacnt
== 0 || !GHOST_STATE(new_state
));
1052 from_delta
= to_delta
= ab
->b_datacnt
* ab
->b_size
;
1055 * If this buffer is evictable, transfer it from the
1056 * old state list to the new state list.
1059 if (old_state
!= arc_anon
) {
1060 int use_mutex
= !MUTEX_HELD(&old_state
->arcs_mtx
);
1061 uint64_t *size
= &old_state
->arcs_lsize
[ab
->b_type
];
1064 mutex_enter(&old_state
->arcs_mtx
);
1066 ASSERT(list_link_active(&ab
->b_arc_node
));
1067 list_remove(&old_state
->arcs_list
[ab
->b_type
], ab
);
1070 * If prefetching out of the ghost cache,
1071 * we will have a non-null datacnt.
1073 if (GHOST_STATE(old_state
) && ab
->b_datacnt
== 0) {
1074 /* ghost elements have a ghost size */
1075 ASSERT(ab
->b_buf
== NULL
);
1076 from_delta
= ab
->b_size
;
1078 ASSERT3U(*size
, >=, from_delta
);
1079 atomic_add_64(size
, -from_delta
);
1082 mutex_exit(&old_state
->arcs_mtx
);
1084 if (new_state
!= arc_anon
) {
1085 int use_mutex
= !MUTEX_HELD(&new_state
->arcs_mtx
);
1086 uint64_t *size
= &new_state
->arcs_lsize
[ab
->b_type
];
1089 mutex_enter(&new_state
->arcs_mtx
);
1091 list_insert_head(&new_state
->arcs_list
[ab
->b_type
], ab
);
1093 /* ghost elements have a ghost size */
1094 if (GHOST_STATE(new_state
)) {
1095 ASSERT(ab
->b_datacnt
== 0);
1096 ASSERT(ab
->b_buf
== NULL
);
1097 to_delta
= ab
->b_size
;
1099 atomic_add_64(size
, to_delta
);
1102 mutex_exit(&new_state
->arcs_mtx
);
1106 ASSERT(!BUF_EMPTY(ab
));
1107 if (new_state
== arc_anon
) {
1108 buf_hash_remove(ab
);
1111 /* adjust state sizes */
1113 atomic_add_64(&new_state
->arcs_size
, to_delta
);
1115 ASSERT3U(old_state
->arcs_size
, >=, from_delta
);
1116 atomic_add_64(&old_state
->arcs_size
, -from_delta
);
1118 ab
->b_state
= new_state
;
1120 /* adjust l2arc hdr stats */
1121 if (new_state
== arc_l2c_only
)
1122 l2arc_hdr_stat_add();
1123 else if (old_state
== arc_l2c_only
)
1124 l2arc_hdr_stat_remove();
1128 arc_space_consume(uint64_t space
)
1130 atomic_add_64(&arc_meta_used
, space
);
1131 atomic_add_64(&arc_size
, space
);
1135 arc_space_return(uint64_t space
)
1137 ASSERT(arc_meta_used
>= space
);
1138 if (arc_meta_max
< arc_meta_used
)
1139 arc_meta_max
= arc_meta_used
;
1140 atomic_add_64(&arc_meta_used
, -space
);
1141 ASSERT(arc_size
>= space
);
1142 atomic_add_64(&arc_size
, -space
);
1146 arc_data_buf_alloc(uint64_t size
)
1148 if (arc_evict_needed(ARC_BUFC_DATA
))
1149 cv_signal(&arc_reclaim_thr_cv
);
1150 atomic_add_64(&arc_size
, size
);
1151 return (zio_data_buf_alloc(size
));
1155 arc_data_buf_free(void *buf
, uint64_t size
)
1157 zio_data_buf_free(buf
, size
);
1158 ASSERT(arc_size
>= size
);
1159 atomic_add_64(&arc_size
, -size
);
1163 arc_buf_alloc(spa_t
*spa
, int size
, void *tag
, arc_buf_contents_t type
)
1168 ASSERT3U(size
, >, 0);
1169 hdr
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
1170 ASSERT(BUF_EMPTY(hdr
));
1174 hdr
->b_state
= arc_anon
;
1175 hdr
->b_arc_access
= 0;
1176 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1179 buf
->b_efunc
= NULL
;
1180 buf
->b_private
= NULL
;
1183 arc_get_data_buf(buf
);
1186 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1187 (void) refcount_add(&hdr
->b_refcnt
, tag
);
1193 arc_buf_clone(arc_buf_t
*from
)
1196 arc_buf_hdr_t
*hdr
= from
->b_hdr
;
1197 uint64_t size
= hdr
->b_size
;
1199 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1202 buf
->b_efunc
= NULL
;
1203 buf
->b_private
= NULL
;
1204 buf
->b_next
= hdr
->b_buf
;
1206 arc_get_data_buf(buf
);
1207 bcopy(from
->b_data
, buf
->b_data
, size
);
1208 hdr
->b_datacnt
+= 1;
1213 arc_buf_add_ref(arc_buf_t
*buf
, void* tag
)
1216 kmutex_t
*hash_lock
;
1219 * Check to see if this buffer is evicted. Callers
1220 * must verify b_data != NULL to know if the add_ref
1223 rw_enter(&buf
->b_lock
, RW_READER
);
1224 if (buf
->b_data
== NULL
) {
1225 rw_exit(&buf
->b_lock
);
1229 ASSERT(hdr
!= NULL
);
1230 hash_lock
= HDR_LOCK(hdr
);
1231 mutex_enter(hash_lock
);
1232 rw_exit(&buf
->b_lock
);
1234 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
1235 add_reference(hdr
, hash_lock
, tag
);
1236 arc_access(hdr
, hash_lock
);
1237 mutex_exit(hash_lock
);
1238 ARCSTAT_BUMP(arcstat_hits
);
1239 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
1240 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
1241 data
, metadata
, hits
);
1245 * Free the arc data buffer. If it is an l2arc write in progress,
1246 * the buffer is placed on l2arc_free_on_write to be freed later.
1249 arc_buf_data_free(arc_buf_hdr_t
*hdr
, void (*free_func
)(void *, size_t),
1250 void *data
, size_t size
)
1252 if (HDR_L2_WRITING(hdr
)) {
1253 l2arc_data_free_t
*df
;
1254 df
= kmem_alloc(sizeof (l2arc_data_free_t
), KM_SLEEP
);
1255 df
->l2df_data
= data
;
1256 df
->l2df_size
= size
;
1257 df
->l2df_func
= free_func
;
1258 mutex_enter(&l2arc_free_on_write_mtx
);
1259 list_insert_head(l2arc_free_on_write
, df
);
1260 mutex_exit(&l2arc_free_on_write_mtx
);
1261 ARCSTAT_BUMP(arcstat_l2_free_on_write
);
1263 free_func(data
, size
);
1268 arc_buf_destroy(arc_buf_t
*buf
, boolean_t recycle
, boolean_t all
)
1272 /* free up data associated with the buf */
1274 arc_state_t
*state
= buf
->b_hdr
->b_state
;
1275 uint64_t size
= buf
->b_hdr
->b_size
;
1276 arc_buf_contents_t type
= buf
->b_hdr
->b_type
;
1278 arc_cksum_verify(buf
);
1280 if (type
== ARC_BUFC_METADATA
) {
1281 arc_buf_data_free(buf
->b_hdr
, zio_buf_free
,
1283 arc_space_return(size
);
1285 ASSERT(type
== ARC_BUFC_DATA
);
1286 arc_buf_data_free(buf
->b_hdr
,
1287 zio_data_buf_free
, buf
->b_data
, size
);
1288 atomic_add_64(&arc_size
, -size
);
1291 if (list_link_active(&buf
->b_hdr
->b_arc_node
)) {
1292 uint64_t *cnt
= &state
->arcs_lsize
[type
];
1294 ASSERT(refcount_is_zero(&buf
->b_hdr
->b_refcnt
));
1295 ASSERT(state
!= arc_anon
);
1297 ASSERT3U(*cnt
, >=, size
);
1298 atomic_add_64(cnt
, -size
);
1300 ASSERT3U(state
->arcs_size
, >=, size
);
1301 atomic_add_64(&state
->arcs_size
, -size
);
1303 ASSERT(buf
->b_hdr
->b_datacnt
> 0);
1304 buf
->b_hdr
->b_datacnt
-= 1;
1307 /* only remove the buf if requested */
1311 /* remove the buf from the hdr list */
1312 for (bufp
= &buf
->b_hdr
->b_buf
; *bufp
!= buf
; bufp
= &(*bufp
)->b_next
)
1314 *bufp
= buf
->b_next
;
1316 ASSERT(buf
->b_efunc
== NULL
);
1318 /* clean up the buf */
1320 kmem_cache_free(buf_cache
, buf
);
1324 arc_hdr_destroy(arc_buf_hdr_t
*hdr
)
1326 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1327 ASSERT3P(hdr
->b_state
, ==, arc_anon
);
1328 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
1329 ASSERT(!(hdr
->b_flags
& ARC_STORED
));
1331 if (hdr
->b_l2hdr
!= NULL
) {
1332 if (!MUTEX_HELD(&l2arc_buflist_mtx
)) {
1334 * To prevent arc_free() and l2arc_evict() from
1335 * attempting to free the same buffer at the same time,
1336 * a FREE_IN_PROGRESS flag is given to arc_free() to
1337 * give it priority. l2arc_evict() can't destroy this
1338 * header while we are waiting on l2arc_buflist_mtx.
1340 * The hdr may be removed from l2ad_buflist before we
1341 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1343 mutex_enter(&l2arc_buflist_mtx
);
1344 if (hdr
->b_l2hdr
!= NULL
) {
1345 list_remove(hdr
->b_l2hdr
->b_dev
->l2ad_buflist
,
1348 mutex_exit(&l2arc_buflist_mtx
);
1350 list_remove(hdr
->b_l2hdr
->b_dev
->l2ad_buflist
, hdr
);
1352 ARCSTAT_INCR(arcstat_l2_size
, -hdr
->b_size
);
1353 kmem_free(hdr
->b_l2hdr
, sizeof (l2arc_buf_hdr_t
));
1354 if (hdr
->b_state
== arc_l2c_only
)
1355 l2arc_hdr_stat_remove();
1356 hdr
->b_l2hdr
= NULL
;
1359 if (!BUF_EMPTY(hdr
)) {
1360 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
1361 bzero(&hdr
->b_dva
, sizeof (dva_t
));
1365 while (hdr
->b_buf
) {
1366 arc_buf_t
*buf
= hdr
->b_buf
;
1369 mutex_enter(&arc_eviction_mtx
);
1370 rw_enter(&buf
->b_lock
, RW_WRITER
);
1371 ASSERT(buf
->b_hdr
!= NULL
);
1372 arc_buf_destroy(hdr
->b_buf
, FALSE
, FALSE
);
1373 hdr
->b_buf
= buf
->b_next
;
1374 buf
->b_hdr
= &arc_eviction_hdr
;
1375 buf
->b_next
= arc_eviction_list
;
1376 arc_eviction_list
= buf
;
1377 rw_exit(&buf
->b_lock
);
1378 mutex_exit(&arc_eviction_mtx
);
1380 arc_buf_destroy(hdr
->b_buf
, FALSE
, TRUE
);
1383 if (hdr
->b_freeze_cksum
!= NULL
) {
1384 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
1385 hdr
->b_freeze_cksum
= NULL
;
1388 ASSERT(!list_link_active(&hdr
->b_arc_node
));
1389 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
1390 ASSERT3P(hdr
->b_acb
, ==, NULL
);
1391 kmem_cache_free(hdr_cache
, hdr
);
1395 arc_buf_free(arc_buf_t
*buf
, void *tag
)
1397 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1398 int hashed
= hdr
->b_state
!= arc_anon
;
1400 ASSERT(buf
->b_efunc
== NULL
);
1401 ASSERT(buf
->b_data
!= NULL
);
1404 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
1406 mutex_enter(hash_lock
);
1407 (void) remove_reference(hdr
, hash_lock
, tag
);
1408 if (hdr
->b_datacnt
> 1)
1409 arc_buf_destroy(buf
, FALSE
, TRUE
);
1411 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
1412 mutex_exit(hash_lock
);
1413 } else if (HDR_IO_IN_PROGRESS(hdr
)) {
1416 * We are in the middle of an async write. Don't destroy
1417 * this buffer unless the write completes before we finish
1418 * decrementing the reference count.
1420 mutex_enter(&arc_eviction_mtx
);
1421 (void) remove_reference(hdr
, NULL
, tag
);
1422 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1423 destroy_hdr
= !HDR_IO_IN_PROGRESS(hdr
);
1424 mutex_exit(&arc_eviction_mtx
);
1426 arc_hdr_destroy(hdr
);
1428 if (remove_reference(hdr
, NULL
, tag
) > 0) {
1429 ASSERT(HDR_IO_ERROR(hdr
));
1430 arc_buf_destroy(buf
, FALSE
, TRUE
);
1432 arc_hdr_destroy(hdr
);
1438 arc_buf_remove_ref(arc_buf_t
*buf
, void* tag
)
1440 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1441 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
1442 int no_callback
= (buf
->b_efunc
== NULL
);
1444 if (hdr
->b_state
== arc_anon
) {
1445 arc_buf_free(buf
, tag
);
1446 return (no_callback
);
1449 mutex_enter(hash_lock
);
1450 ASSERT(hdr
->b_state
!= arc_anon
);
1451 ASSERT(buf
->b_data
!= NULL
);
1453 (void) remove_reference(hdr
, hash_lock
, tag
);
1454 if (hdr
->b_datacnt
> 1) {
1456 arc_buf_destroy(buf
, FALSE
, TRUE
);
1457 } else if (no_callback
) {
1458 ASSERT(hdr
->b_buf
== buf
&& buf
->b_next
== NULL
);
1459 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
1461 ASSERT(no_callback
|| hdr
->b_datacnt
> 1 ||
1462 refcount_is_zero(&hdr
->b_refcnt
));
1463 mutex_exit(hash_lock
);
1464 return (no_callback
);
1468 arc_buf_size(arc_buf_t
*buf
)
1470 return (buf
->b_hdr
->b_size
);
1474 * Evict buffers from list until we've removed the specified number of
1475 * bytes. Move the removed buffers to the appropriate evict state.
1476 * If the recycle flag is set, then attempt to "recycle" a buffer:
1477 * - look for a buffer to evict that is `bytes' long.
1478 * - return the data block from this buffer rather than freeing it.
1479 * This flag is used by callers that are trying to make space for a
1480 * new buffer in a full arc cache.
1482 * This function makes a "best effort". It skips over any buffers
1483 * it can't get a hash_lock on, and so may not catch all candidates.
1484 * It may also return without evicting as much space as requested.
1487 arc_evict(arc_state_t
*state
, spa_t
*spa
, int64_t bytes
, boolean_t recycle
,
1488 arc_buf_contents_t type
)
1490 arc_state_t
*evicted_state
;
1491 uint64_t bytes_evicted
= 0, skipped
= 0, missed
= 0;
1492 arc_buf_hdr_t
*ab
, *ab_prev
= NULL
;
1493 list_t
*list
= &state
->arcs_list
[type
];
1494 kmutex_t
*hash_lock
;
1495 boolean_t have_lock
;
1496 void *stolen
= NULL
;
1498 ASSERT(state
== arc_mru
|| state
== arc_mfu
);
1500 evicted_state
= (state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
1502 mutex_enter(&state
->arcs_mtx
);
1503 mutex_enter(&evicted_state
->arcs_mtx
);
1505 for (ab
= list_tail(list
); ab
; ab
= ab_prev
) {
1506 ab_prev
= list_prev(list
, ab
);
1507 /* prefetch buffers have a minimum lifespan */
1508 if (HDR_IO_IN_PROGRESS(ab
) ||
1509 (spa
&& ab
->b_spa
!= spa
) ||
1510 (ab
->b_flags
& (ARC_PREFETCH
|ARC_INDIRECT
) &&
1511 lbolt
- ab
->b_arc_access
< arc_min_prefetch_lifespan
)) {
1515 /* "lookahead" for better eviction candidate */
1516 if (recycle
&& ab
->b_size
!= bytes
&&
1517 ab_prev
&& ab_prev
->b_size
== bytes
)
1519 hash_lock
= HDR_LOCK(ab
);
1520 have_lock
= MUTEX_HELD(hash_lock
);
1521 if (have_lock
|| mutex_tryenter(hash_lock
)) {
1522 ASSERT3U(refcount_count(&ab
->b_refcnt
), ==, 0);
1523 ASSERT(ab
->b_datacnt
> 0);
1525 arc_buf_t
*buf
= ab
->b_buf
;
1526 if (!rw_tryenter(&buf
->b_lock
, RW_WRITER
)) {
1531 bytes_evicted
+= ab
->b_size
;
1532 if (recycle
&& ab
->b_type
== type
&&
1533 ab
->b_size
== bytes
&&
1534 !HDR_L2_WRITING(ab
)) {
1535 stolen
= buf
->b_data
;
1540 mutex_enter(&arc_eviction_mtx
);
1541 arc_buf_destroy(buf
,
1542 buf
->b_data
== stolen
, FALSE
);
1543 ab
->b_buf
= buf
->b_next
;
1544 buf
->b_hdr
= &arc_eviction_hdr
;
1545 buf
->b_next
= arc_eviction_list
;
1546 arc_eviction_list
= buf
;
1547 mutex_exit(&arc_eviction_mtx
);
1548 rw_exit(&buf
->b_lock
);
1550 rw_exit(&buf
->b_lock
);
1551 arc_buf_destroy(buf
,
1552 buf
->b_data
== stolen
, TRUE
);
1555 if (ab
->b_datacnt
== 0) {
1556 arc_change_state(evicted_state
, ab
, hash_lock
);
1557 ASSERT(HDR_IN_HASH_TABLE(ab
));
1558 ab
->b_flags
|= ARC_IN_HASH_TABLE
;
1559 ab
->b_flags
&= ~ARC_BUF_AVAILABLE
;
1560 DTRACE_PROBE1(arc__evict
, arc_buf_hdr_t
*, ab
);
1563 mutex_exit(hash_lock
);
1564 if (bytes
>= 0 && bytes_evicted
>= bytes
)
1571 mutex_exit(&evicted_state
->arcs_mtx
);
1572 mutex_exit(&state
->arcs_mtx
);
1574 if (bytes_evicted
< bytes
)
1575 dprintf("only evicted %lld bytes from %x",
1576 (longlong_t
)bytes_evicted
, state
);
1579 ARCSTAT_INCR(arcstat_evict_skip
, skipped
);
1582 ARCSTAT_INCR(arcstat_mutex_miss
, missed
);
1585 * We have just evicted some date into the ghost state, make
1586 * sure we also adjust the ghost state size if necessary.
1589 arc_mru_ghost
->arcs_size
+ arc_mfu_ghost
->arcs_size
> arc_c
) {
1590 int64_t mru_over
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
+
1591 arc_mru_ghost
->arcs_size
- arc_c
;
1593 if (mru_over
> 0 && arc_mru_ghost
->arcs_lsize
[type
] > 0) {
1595 MIN(arc_mru_ghost
->arcs_lsize
[type
], mru_over
);
1596 arc_evict_ghost(arc_mru_ghost
, NULL
, todelete
);
1597 } else if (arc_mfu_ghost
->arcs_lsize
[type
] > 0) {
1598 int64_t todelete
= MIN(arc_mfu_ghost
->arcs_lsize
[type
],
1599 arc_mru_ghost
->arcs_size
+
1600 arc_mfu_ghost
->arcs_size
- arc_c
);
1601 arc_evict_ghost(arc_mfu_ghost
, NULL
, todelete
);
1609 * Remove buffers from list until we've removed the specified number of
1610 * bytes. Destroy the buffers that are removed.
1613 arc_evict_ghost(arc_state_t
*state
, spa_t
*spa
, int64_t bytes
)
1615 arc_buf_hdr_t
*ab
, *ab_prev
;
1616 list_t
*list
= &state
->arcs_list
[ARC_BUFC_DATA
];
1617 kmutex_t
*hash_lock
;
1618 uint64_t bytes_deleted
= 0;
1619 uint64_t bufs_skipped
= 0;
1621 ASSERT(GHOST_STATE(state
));
1623 mutex_enter(&state
->arcs_mtx
);
1624 for (ab
= list_tail(list
); ab
; ab
= ab_prev
) {
1625 ab_prev
= list_prev(list
, ab
);
1626 if (spa
&& ab
->b_spa
!= spa
)
1628 hash_lock
= HDR_LOCK(ab
);
1629 if (mutex_tryenter(hash_lock
)) {
1630 ASSERT(!HDR_IO_IN_PROGRESS(ab
));
1631 ASSERT(ab
->b_buf
== NULL
);
1632 ARCSTAT_BUMP(arcstat_deleted
);
1633 bytes_deleted
+= ab
->b_size
;
1635 if (ab
->b_l2hdr
!= NULL
) {
1637 * This buffer is cached on the 2nd Level ARC;
1638 * don't destroy the header.
1640 arc_change_state(arc_l2c_only
, ab
, hash_lock
);
1641 mutex_exit(hash_lock
);
1643 arc_change_state(arc_anon
, ab
, hash_lock
);
1644 mutex_exit(hash_lock
);
1645 arc_hdr_destroy(ab
);
1648 DTRACE_PROBE1(arc__delete
, arc_buf_hdr_t
*, ab
);
1649 if (bytes
>= 0 && bytes_deleted
>= bytes
)
1653 mutex_exit(&state
->arcs_mtx
);
1654 mutex_enter(hash_lock
);
1655 mutex_exit(hash_lock
);
1661 mutex_exit(&state
->arcs_mtx
);
1663 if (list
== &state
->arcs_list
[ARC_BUFC_DATA
] &&
1664 (bytes
< 0 || bytes_deleted
< bytes
)) {
1665 list
= &state
->arcs_list
[ARC_BUFC_METADATA
];
1670 ARCSTAT_INCR(arcstat_mutex_miss
, bufs_skipped
);
1674 if (bytes_deleted
< bytes
)
1675 dprintf("only deleted %lld bytes from %p",
1676 (longlong_t
)bytes_deleted
, state
);
1682 int64_t top_sz
, mru_over
, arc_over
, todelete
;
1684 top_sz
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
+ arc_meta_used
;
1686 if (top_sz
> arc_p
&& arc_mru
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
1688 MIN(arc_mru
->arcs_lsize
[ARC_BUFC_DATA
], top_sz
- arc_p
);
1689 (void) arc_evict(arc_mru
, NULL
, toevict
, FALSE
, ARC_BUFC_DATA
);
1690 top_sz
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
;
1693 if (top_sz
> arc_p
&& arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
1695 MIN(arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
], top_sz
- arc_p
);
1696 (void) arc_evict(arc_mru
, NULL
, toevict
, FALSE
,
1698 top_sz
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
;
1701 mru_over
= top_sz
+ arc_mru_ghost
->arcs_size
- arc_c
;
1704 if (arc_mru_ghost
->arcs_size
> 0) {
1705 todelete
= MIN(arc_mru_ghost
->arcs_size
, mru_over
);
1706 arc_evict_ghost(arc_mru_ghost
, NULL
, todelete
);
1710 if ((arc_over
= arc_size
- arc_c
) > 0) {
1713 if (arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
1715 MIN(arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
], arc_over
);
1716 (void) arc_evict(arc_mfu
, NULL
, toevict
, FALSE
,
1718 arc_over
= arc_size
- arc_c
;
1722 arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
1724 MIN(arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
],
1726 (void) arc_evict(arc_mfu
, NULL
, toevict
, FALSE
,
1730 tbl_over
= arc_size
+ arc_mru_ghost
->arcs_size
+
1731 arc_mfu_ghost
->arcs_size
- arc_c
* 2;
1733 if (tbl_over
> 0 && arc_mfu_ghost
->arcs_size
> 0) {
1734 todelete
= MIN(arc_mfu_ghost
->arcs_size
, tbl_over
);
1735 arc_evict_ghost(arc_mfu_ghost
, NULL
, todelete
);
1741 arc_do_user_evicts(void)
1743 mutex_enter(&arc_eviction_mtx
);
1744 while (arc_eviction_list
!= NULL
) {
1745 arc_buf_t
*buf
= arc_eviction_list
;
1746 arc_eviction_list
= buf
->b_next
;
1747 rw_enter(&buf
->b_lock
, RW_WRITER
);
1749 rw_exit(&buf
->b_lock
);
1750 mutex_exit(&arc_eviction_mtx
);
1752 if (buf
->b_efunc
!= NULL
)
1753 VERIFY(buf
->b_efunc(buf
) == 0);
1755 buf
->b_efunc
= NULL
;
1756 buf
->b_private
= NULL
;
1757 kmem_cache_free(buf_cache
, buf
);
1758 mutex_enter(&arc_eviction_mtx
);
1760 mutex_exit(&arc_eviction_mtx
);
1764 * Flush all *evictable* data from the cache for the given spa.
1765 * NOTE: this will not touch "active" (i.e. referenced) data.
1768 arc_flush(spa_t
*spa
)
1770 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_DATA
])) {
1771 (void) arc_evict(arc_mru
, spa
, -1, FALSE
, ARC_BUFC_DATA
);
1775 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
])) {
1776 (void) arc_evict(arc_mru
, spa
, -1, FALSE
, ARC_BUFC_METADATA
);
1780 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
])) {
1781 (void) arc_evict(arc_mfu
, spa
, -1, FALSE
, ARC_BUFC_DATA
);
1785 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
])) {
1786 (void) arc_evict(arc_mfu
, spa
, -1, FALSE
, ARC_BUFC_METADATA
);
1791 arc_evict_ghost(arc_mru_ghost
, spa
, -1);
1792 arc_evict_ghost(arc_mfu_ghost
, spa
, -1);
1794 mutex_enter(&arc_reclaim_thr_lock
);
1795 arc_do_user_evicts();
1796 mutex_exit(&arc_reclaim_thr_lock
);
1797 ASSERT(spa
|| arc_eviction_list
== NULL
);
1800 int arc_shrink_shift
= 5; /* log2(fraction of arc to reclaim) */
1805 if (arc_c
> arc_c_min
) {
1809 to_free
= MAX(arc_c
>> arc_shrink_shift
, ptob(needfree
));
1811 to_free
= arc_c
>> arc_shrink_shift
;
1813 if (arc_c
> arc_c_min
+ to_free
)
1814 atomic_add_64(&arc_c
, -to_free
);
1818 atomic_add_64(&arc_p
, -(arc_p
>> arc_shrink_shift
));
1819 if (arc_c
> arc_size
)
1820 arc_c
= MAX(arc_size
, arc_c_min
);
1822 arc_p
= (arc_c
>> 1);
1823 ASSERT(arc_c
>= arc_c_min
);
1824 ASSERT((int64_t)arc_p
>= 0);
1827 if (arc_size
> arc_c
)
1832 arc_reclaim_needed(void)
1842 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1847 * check that we're out of range of the pageout scanner. It starts to
1848 * schedule paging if freemem is less than lotsfree and needfree.
1849 * lotsfree is the high-water mark for pageout, and needfree is the
1850 * number of needed free pages. We add extra pages here to make sure
1851 * the scanner doesn't start up while we're freeing memory.
1853 if (freemem
< lotsfree
+ needfree
+ extra
)
1857 * check to make sure that swapfs has enough space so that anon
1858 * reservations can still succeed. anon_resvmem() checks that the
1859 * availrmem is greater than swapfs_minfree, and the number of reserved
1860 * swap pages. We also add a bit of extra here just to prevent
1861 * circumstances from getting really dire.
1863 if (availrmem
< swapfs_minfree
+ swapfs_reserve
+ extra
)
1868 * If we're on an i386 platform, it's possible that we'll exhaust the
1869 * kernel heap space before we ever run out of available physical
1870 * memory. Most checks of the size of the heap_area compare against
1871 * tune.t_minarmem, which is the minimum available real memory that we
1872 * can have in the system. However, this is generally fixed at 25 pages
1873 * which is so low that it's useless. In this comparison, we seek to
1874 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1875 * heap is allocated. (Or, in the calculation, if less than 1/4th is
1878 if (btop(vmem_size(heap_arena
, VMEM_FREE
)) <
1879 (btop(vmem_size(heap_arena
, VMEM_FREE
| VMEM_ALLOC
)) >> 2))
1884 if (spa_get_random(100) == 0)
1891 arc_kmem_reap_now(arc_reclaim_strategy_t strat
)
1894 kmem_cache_t
*prev_cache
= NULL
;
1895 kmem_cache_t
*prev_data_cache
= NULL
;
1896 extern kmem_cache_t
*zio_buf_cache
[];
1897 extern kmem_cache_t
*zio_data_buf_cache
[];
1900 if (arc_meta_used
>= arc_meta_limit
) {
1902 * We are exceeding our meta-data cache limit.
1903 * Purge some DNLC entries to release holds on meta-data.
1905 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent
);
1909 * Reclaim unused memory from all kmem caches.
1916 * An aggressive reclamation will shrink the cache size as well as
1917 * reap free buffers from the arc kmem caches.
1919 if (strat
== ARC_RECLAIM_AGGR
)
1922 for (i
= 0; i
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; i
++) {
1923 if (zio_buf_cache
[i
] != prev_cache
) {
1924 prev_cache
= zio_buf_cache
[i
];
1925 kmem_cache_reap_now(zio_buf_cache
[i
]);
1927 if (zio_data_buf_cache
[i
] != prev_data_cache
) {
1928 prev_data_cache
= zio_data_buf_cache
[i
];
1929 kmem_cache_reap_now(zio_data_buf_cache
[i
]);
1932 kmem_cache_reap_now(buf_cache
);
1933 kmem_cache_reap_now(hdr_cache
);
1937 arc_reclaim_thread(void)
1939 clock_t growtime
= 0;
1940 arc_reclaim_strategy_t last_reclaim
= ARC_RECLAIM_CONS
;
1943 CALLB_CPR_INIT(&cpr
, &arc_reclaim_thr_lock
, callb_generic_cpr
, FTAG
);
1945 mutex_enter(&arc_reclaim_thr_lock
);
1946 while (arc_thread_exit
== 0) {
1947 if (arc_reclaim_needed()) {
1950 if (last_reclaim
== ARC_RECLAIM_CONS
) {
1951 last_reclaim
= ARC_RECLAIM_AGGR
;
1953 last_reclaim
= ARC_RECLAIM_CONS
;
1957 last_reclaim
= ARC_RECLAIM_AGGR
;
1961 /* reset the growth delay for every reclaim */
1962 growtime
= lbolt
+ (arc_grow_retry
* hz
);
1964 arc_kmem_reap_now(last_reclaim
);
1967 } else if (arc_no_grow
&& lbolt
>= growtime
) {
1968 arc_no_grow
= FALSE
;
1971 if (2 * arc_c
< arc_size
+
1972 arc_mru_ghost
->arcs_size
+ arc_mfu_ghost
->arcs_size
)
1975 if (arc_eviction_list
!= NULL
)
1976 arc_do_user_evicts();
1978 /* block until needed, or one second, whichever is shorter */
1979 CALLB_CPR_SAFE_BEGIN(&cpr
);
1980 (void) cv_timedwait(&arc_reclaim_thr_cv
,
1981 &arc_reclaim_thr_lock
, (hz
));
1982 CALLB_CPR_SAFE_END(&cpr
, &arc_reclaim_thr_lock
);
1985 arc_thread_exit
= 0;
1986 cv_broadcast(&arc_reclaim_thr_cv
);
1987 CALLB_CPR_EXIT(&cpr
); /* drops arc_reclaim_thr_lock */
1992 * Adapt arc info given the number of bytes we are trying to add and
1993 * the state that we are comming from. This function is only called
1994 * when we are adding new content to the cache.
1997 arc_adapt(int bytes
, arc_state_t
*state
)
2001 if (state
== arc_l2c_only
)
2006 * Adapt the target size of the MRU list:
2007 * - if we just hit in the MRU ghost list, then increase
2008 * the target size of the MRU list.
2009 * - if we just hit in the MFU ghost list, then increase
2010 * the target size of the MFU list by decreasing the
2011 * target size of the MRU list.
2013 if (state
== arc_mru_ghost
) {
2014 mult
= ((arc_mru_ghost
->arcs_size
>= arc_mfu_ghost
->arcs_size
) ?
2015 1 : (arc_mfu_ghost
->arcs_size
/arc_mru_ghost
->arcs_size
));
2017 arc_p
= MIN(arc_c
, arc_p
+ bytes
* mult
);
2018 } else if (state
== arc_mfu_ghost
) {
2019 mult
= ((arc_mfu_ghost
->arcs_size
>= arc_mru_ghost
->arcs_size
) ?
2020 1 : (arc_mru_ghost
->arcs_size
/arc_mfu_ghost
->arcs_size
));
2022 arc_p
= MAX(0, (int64_t)arc_p
- bytes
* mult
);
2024 ASSERT((int64_t)arc_p
>= 0);
2026 if (arc_reclaim_needed()) {
2027 cv_signal(&arc_reclaim_thr_cv
);
2034 if (arc_c
>= arc_c_max
)
2038 * If we're within (2 * maxblocksize) bytes of the target
2039 * cache size, increment the target cache size
2041 if (arc_size
> arc_c
- (2ULL << SPA_MAXBLOCKSHIFT
)) {
2042 atomic_add_64(&arc_c
, (int64_t)bytes
);
2043 if (arc_c
> arc_c_max
)
2045 else if (state
== arc_anon
)
2046 atomic_add_64(&arc_p
, (int64_t)bytes
);
2050 ASSERT((int64_t)arc_p
>= 0);
2054 * Check if the cache has reached its limits and eviction is required
2058 arc_evict_needed(arc_buf_contents_t type
)
2060 if (type
== ARC_BUFC_METADATA
&& arc_meta_used
>= arc_meta_limit
)
2065 * If zio data pages are being allocated out of a separate heap segment,
2066 * then enforce that the size of available vmem for this area remains
2067 * above about 1/32nd free.
2069 if (type
== ARC_BUFC_DATA
&& zio_arena
!= NULL
&&
2070 vmem_size(zio_arena
, VMEM_FREE
) <
2071 (vmem_size(zio_arena
, VMEM_ALLOC
) >> 5))
2075 if (arc_reclaim_needed())
2078 return (arc_size
> arc_c
);
2082 * The buffer, supplied as the first argument, needs a data block.
2083 * So, if we are at cache max, determine which cache should be victimized.
2084 * We have the following cases:
2086 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2087 * In this situation if we're out of space, but the resident size of the MFU is
2088 * under the limit, victimize the MFU cache to satisfy this insertion request.
2090 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2091 * Here, we've used up all of the available space for the MRU, so we need to
2092 * evict from our own cache instead. Evict from the set of resident MRU
2095 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2096 * c minus p represents the MFU space in the cache, since p is the size of the
2097 * cache that is dedicated to the MRU. In this situation there's still space on
2098 * the MFU side, so the MRU side needs to be victimized.
2100 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2101 * MFU's resident set is consuming more space than it has been allotted. In
2102 * this situation, we must victimize our own cache, the MFU, for this insertion.
2105 arc_get_data_buf(arc_buf_t
*buf
)
2107 arc_state_t
*state
= buf
->b_hdr
->b_state
;
2108 uint64_t size
= buf
->b_hdr
->b_size
;
2109 arc_buf_contents_t type
= buf
->b_hdr
->b_type
;
2111 arc_adapt(size
, state
);
2114 * We have not yet reached cache maximum size,
2115 * just allocate a new buffer.
2117 if (!arc_evict_needed(type
)) {
2118 if (type
== ARC_BUFC_METADATA
) {
2119 buf
->b_data
= zio_buf_alloc(size
);
2120 arc_space_consume(size
);
2122 ASSERT(type
== ARC_BUFC_DATA
);
2123 buf
->b_data
= zio_data_buf_alloc(size
);
2124 atomic_add_64(&arc_size
, size
);
2130 * If we are prefetching from the mfu ghost list, this buffer
2131 * will end up on the mru list; so steal space from there.
2133 if (state
== arc_mfu_ghost
)
2134 state
= buf
->b_hdr
->b_flags
& ARC_PREFETCH
? arc_mru
: arc_mfu
;
2135 else if (state
== arc_mru_ghost
)
2138 if (state
== arc_mru
|| state
== arc_anon
) {
2139 uint64_t mru_used
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
;
2140 state
= (arc_mfu
->arcs_lsize
[type
] > 0 &&
2141 arc_p
> mru_used
) ? arc_mfu
: arc_mru
;
2144 uint64_t mfu_space
= arc_c
- arc_p
;
2145 state
= (arc_mru
->arcs_lsize
[type
] > 0 &&
2146 mfu_space
> arc_mfu
->arcs_size
) ? arc_mru
: arc_mfu
;
2148 if ((buf
->b_data
= arc_evict(state
, NULL
, size
, TRUE
, type
)) == NULL
) {
2149 if (type
== ARC_BUFC_METADATA
) {
2150 buf
->b_data
= zio_buf_alloc(size
);
2151 arc_space_consume(size
);
2153 ASSERT(type
== ARC_BUFC_DATA
);
2154 buf
->b_data
= zio_data_buf_alloc(size
);
2155 atomic_add_64(&arc_size
, size
);
2157 ARCSTAT_BUMP(arcstat_recycle_miss
);
2159 ASSERT(buf
->b_data
!= NULL
);
2162 * Update the state size. Note that ghost states have a
2163 * "ghost size" and so don't need to be updated.
2165 if (!GHOST_STATE(buf
->b_hdr
->b_state
)) {
2166 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2168 atomic_add_64(&hdr
->b_state
->arcs_size
, size
);
2169 if (list_link_active(&hdr
->b_arc_node
)) {
2170 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
2171 atomic_add_64(&hdr
->b_state
->arcs_lsize
[type
], size
);
2174 * If we are growing the cache, and we are adding anonymous
2175 * data, and we have outgrown arc_p, update arc_p
2177 if (arc_size
< arc_c
&& hdr
->b_state
== arc_anon
&&
2178 arc_anon
->arcs_size
+ arc_mru
->arcs_size
> arc_p
)
2179 arc_p
= MIN(arc_c
, arc_p
+ size
);
2184 * This routine is called whenever a buffer is accessed.
2185 * NOTE: the hash lock is dropped in this function.
2188 arc_access(arc_buf_hdr_t
*buf
, kmutex_t
*hash_lock
)
2190 ASSERT(MUTEX_HELD(hash_lock
));
2192 if (buf
->b_state
== arc_anon
) {
2194 * This buffer is not in the cache, and does not
2195 * appear in our "ghost" list. Add the new buffer
2199 ASSERT(buf
->b_arc_access
== 0);
2200 buf
->b_arc_access
= lbolt
;
2201 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, buf
);
2202 arc_change_state(arc_mru
, buf
, hash_lock
);
2204 } else if (buf
->b_state
== arc_mru
) {
2206 * If this buffer is here because of a prefetch, then either:
2207 * - clear the flag if this is a "referencing" read
2208 * (any subsequent access will bump this into the MFU state).
2210 * - move the buffer to the head of the list if this is
2211 * another prefetch (to make it less likely to be evicted).
2213 if ((buf
->b_flags
& ARC_PREFETCH
) != 0) {
2214 if (refcount_count(&buf
->b_refcnt
) == 0) {
2215 ASSERT(list_link_active(&buf
->b_arc_node
));
2217 buf
->b_flags
&= ~ARC_PREFETCH
;
2218 ARCSTAT_BUMP(arcstat_mru_hits
);
2220 buf
->b_arc_access
= lbolt
;
2225 * This buffer has been "accessed" only once so far,
2226 * but it is still in the cache. Move it to the MFU
2229 if (lbolt
> buf
->b_arc_access
+ ARC_MINTIME
) {
2231 * More than 125ms have passed since we
2232 * instantiated this buffer. Move it to the
2233 * most frequently used state.
2235 buf
->b_arc_access
= lbolt
;
2236 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2237 arc_change_state(arc_mfu
, buf
, hash_lock
);
2239 ARCSTAT_BUMP(arcstat_mru_hits
);
2240 } else if (buf
->b_state
== arc_mru_ghost
) {
2241 arc_state_t
*new_state
;
2243 * This buffer has been "accessed" recently, but
2244 * was evicted from the cache. Move it to the
2248 if (buf
->b_flags
& ARC_PREFETCH
) {
2249 new_state
= arc_mru
;
2250 if (refcount_count(&buf
->b_refcnt
) > 0)
2251 buf
->b_flags
&= ~ARC_PREFETCH
;
2252 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, buf
);
2254 new_state
= arc_mfu
;
2255 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2258 buf
->b_arc_access
= lbolt
;
2259 arc_change_state(new_state
, buf
, hash_lock
);
2261 ARCSTAT_BUMP(arcstat_mru_ghost_hits
);
2262 } else if (buf
->b_state
== arc_mfu
) {
2264 * This buffer has been accessed more than once and is
2265 * still in the cache. Keep it in the MFU state.
2267 * NOTE: an add_reference() that occurred when we did
2268 * the arc_read() will have kicked this off the list.
2269 * If it was a prefetch, we will explicitly move it to
2270 * the head of the list now.
2272 if ((buf
->b_flags
& ARC_PREFETCH
) != 0) {
2273 ASSERT(refcount_count(&buf
->b_refcnt
) == 0);
2274 ASSERT(list_link_active(&buf
->b_arc_node
));
2276 ARCSTAT_BUMP(arcstat_mfu_hits
);
2277 buf
->b_arc_access
= lbolt
;
2278 } else if (buf
->b_state
== arc_mfu_ghost
) {
2279 arc_state_t
*new_state
= arc_mfu
;
2281 * This buffer has been accessed more than once but has
2282 * been evicted from the cache. Move it back to the
2286 if (buf
->b_flags
& ARC_PREFETCH
) {
2288 * This is a prefetch access...
2289 * move this block back to the MRU state.
2291 ASSERT3U(refcount_count(&buf
->b_refcnt
), ==, 0);
2292 new_state
= arc_mru
;
2295 buf
->b_arc_access
= lbolt
;
2296 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2297 arc_change_state(new_state
, buf
, hash_lock
);
2299 ARCSTAT_BUMP(arcstat_mfu_ghost_hits
);
2300 } else if (buf
->b_state
== arc_l2c_only
) {
2302 * This buffer is on the 2nd Level ARC.
2305 buf
->b_arc_access
= lbolt
;
2306 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2307 arc_change_state(arc_mfu
, buf
, hash_lock
);
2309 ASSERT(!"invalid arc state");
2313 /* a generic arc_done_func_t which you can use */
2316 arc_bcopy_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
2318 bcopy(buf
->b_data
, arg
, buf
->b_hdr
->b_size
);
2319 VERIFY(arc_buf_remove_ref(buf
, arg
) == 1);
2322 /* a generic arc_done_func_t */
2324 arc_getbuf_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
2326 arc_buf_t
**bufp
= arg
;
2327 if (zio
&& zio
->io_error
) {
2328 VERIFY(arc_buf_remove_ref(buf
, arg
) == 1);
2336 arc_read_done(zio_t
*zio
)
2338 arc_buf_hdr_t
*hdr
, *found
;
2340 arc_buf_t
*abuf
; /* buffer we're assigning to callback */
2341 kmutex_t
*hash_lock
;
2342 arc_callback_t
*callback_list
, *acb
;
2343 int freeable
= FALSE
;
2345 buf
= zio
->io_private
;
2349 * The hdr was inserted into hash-table and removed from lists
2350 * prior to starting I/O. We should find this header, since
2351 * it's in the hash table, and it should be legit since it's
2352 * not possible to evict it during the I/O. The only possible
2353 * reason for it not to be found is if we were freed during the
2356 found
= buf_hash_find(zio
->io_spa
, &hdr
->b_dva
, hdr
->b_birth
,
2359 ASSERT((found
== NULL
&& HDR_FREED_IN_READ(hdr
) && hash_lock
== NULL
) ||
2360 (found
== hdr
&& DVA_EQUAL(&hdr
->b_dva
, BP_IDENTITY(zio
->io_bp
))) ||
2361 (found
== hdr
&& HDR_L2_READING(hdr
)));
2363 hdr
->b_flags
&= ~ARC_L2_EVICTED
;
2364 if (l2arc_noprefetch
&& (hdr
->b_flags
& ARC_PREFETCH
))
2365 hdr
->b_flags
&= ~ARC_L2CACHE
;
2367 /* byteswap if necessary */
2368 callback_list
= hdr
->b_acb
;
2369 ASSERT(callback_list
!= NULL
);
2370 if (BP_SHOULD_BYTESWAP(zio
->io_bp
)) {
2371 arc_byteswap_func_t
*func
= BP_GET_LEVEL(zio
->io_bp
) > 0 ?
2372 byteswap_uint64_array
:
2373 dmu_ot
[BP_GET_TYPE(zio
->io_bp
)].ot_byteswap
;
2374 func(buf
->b_data
, hdr
->b_size
);
2377 arc_cksum_compute(buf
, B_FALSE
);
2379 /* create copies of the data buffer for the callers */
2381 for (acb
= callback_list
; acb
; acb
= acb
->acb_next
) {
2382 if (acb
->acb_done
) {
2384 abuf
= arc_buf_clone(buf
);
2385 acb
->acb_buf
= abuf
;
2390 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
2391 ASSERT(!HDR_BUF_AVAILABLE(hdr
));
2393 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
2395 ASSERT(refcount_is_zero(&hdr
->b_refcnt
) || callback_list
!= NULL
);
2397 if (zio
->io_error
!= 0) {
2398 hdr
->b_flags
|= ARC_IO_ERROR
;
2399 if (hdr
->b_state
!= arc_anon
)
2400 arc_change_state(arc_anon
, hdr
, hash_lock
);
2401 if (HDR_IN_HASH_TABLE(hdr
))
2402 buf_hash_remove(hdr
);
2403 freeable
= refcount_is_zero(&hdr
->b_refcnt
);
2407 * Broadcast before we drop the hash_lock to avoid the possibility
2408 * that the hdr (and hence the cv) might be freed before we get to
2409 * the cv_broadcast().
2411 cv_broadcast(&hdr
->b_cv
);
2415 * Only call arc_access on anonymous buffers. This is because
2416 * if we've issued an I/O for an evicted buffer, we've already
2417 * called arc_access (to prevent any simultaneous readers from
2418 * getting confused).
2420 if (zio
->io_error
== 0 && hdr
->b_state
== arc_anon
)
2421 arc_access(hdr
, hash_lock
);
2422 mutex_exit(hash_lock
);
2425 * This block was freed while we waited for the read to
2426 * complete. It has been removed from the hash table and
2427 * moved to the anonymous state (so that it won't show up
2430 ASSERT3P(hdr
->b_state
, ==, arc_anon
);
2431 freeable
= refcount_is_zero(&hdr
->b_refcnt
);
2434 /* execute each callback and free its structure */
2435 while ((acb
= callback_list
) != NULL
) {
2437 acb
->acb_done(zio
, acb
->acb_buf
, acb
->acb_private
);
2439 if (acb
->acb_zio_dummy
!= NULL
) {
2440 acb
->acb_zio_dummy
->io_error
= zio
->io_error
;
2441 zio_nowait(acb
->acb_zio_dummy
);
2444 callback_list
= acb
->acb_next
;
2445 kmem_free(acb
, sizeof (arc_callback_t
));
2449 arc_hdr_destroy(hdr
);
2453 * "Read" the block block at the specified DVA (in bp) via the
2454 * cache. If the block is found in the cache, invoke the provided
2455 * callback immediately and return. Note that the `zio' parameter
2456 * in the callback will be NULL in this case, since no IO was
2457 * required. If the block is not in the cache pass the read request
2458 * on to the spa with a substitute callback function, so that the
2459 * requested block will be added to the cache.
2461 * If a read request arrives for a block that has a read in-progress,
2462 * either wait for the in-progress read to complete (and return the
2463 * results); or, if this is a read with a "done" func, add a record
2464 * to the read to invoke the "done" func when the read completes,
2465 * and return; or just return.
2467 * arc_read_done() will invoke all the requested "done" functions
2468 * for readers of this block.
2470 * Normal callers should use arc_read and pass the arc buffer and offset
2471 * for the bp. But if you know you don't need locking, you can use
2472 * arc_read_nolock. Callers cannot use a "done" function in a prefetch
2473 * call (i.e., with ARC_NOWAIT set).
2476 arc_read(zio_t
*pio
, spa_t
*spa
, blkptr_t
*bp
, arc_buf_t
*pbuf
,
2477 arc_done_func_t
*done
, void *private, int priority
, int zio_flags
,
2478 uint32_t *arc_flags
, const zbookmark_t
*zb
)
2481 arc_buf_hdr_t
*hdr
= pbuf
->b_hdr
;
2483 ASSERT(!refcount_is_zero(&pbuf
->b_hdr
->b_refcnt
));
2484 ASSERT3U((char *)bp
- (char *)pbuf
->b_data
, <, pbuf
->b_hdr
->b_size
);
2485 rw_enter(&pbuf
->b_lock
, RW_READER
);
2487 err
= arc_read_nolock(pio
, spa
, bp
, done
, private, priority
,
2488 zio_flags
, arc_flags
, zb
);
2490 ASSERT3P(hdr
, ==, pbuf
->b_hdr
);
2491 rw_exit(&pbuf
->b_lock
);
2496 arc_read_nolock(zio_t
*pio
, spa_t
*spa
, blkptr_t
*bp
,
2497 arc_done_func_t
*done
, void *private, int priority
, int zio_flags
,
2498 uint32_t *arc_flags
, const zbookmark_t
*zb
)
2502 kmutex_t
*hash_lock
;
2506 hdr
= buf_hash_find(spa
, BP_IDENTITY(bp
), bp
->blk_birth
, &hash_lock
);
2507 if (hdr
&& hdr
->b_datacnt
> 0) {
2509 *arc_flags
|= ARC_CACHED
;
2511 if (HDR_IO_IN_PROGRESS(hdr
)) {
2513 if (*arc_flags
& ARC_WAIT
) {
2514 cv_wait(&hdr
->b_cv
, hash_lock
);
2515 mutex_exit(hash_lock
);
2518 ASSERT(*arc_flags
& ARC_NOWAIT
);
2521 arc_callback_t
*acb
= NULL
;
2523 acb
= kmem_zalloc(sizeof (arc_callback_t
),
2525 acb
->acb_done
= done
;
2526 acb
->acb_private
= private;
2528 acb
->acb_zio_dummy
= zio_null(pio
,
2529 spa
, NULL
, NULL
, zio_flags
);
2531 ASSERT(acb
->acb_done
!= NULL
);
2532 acb
->acb_next
= hdr
->b_acb
;
2534 add_reference(hdr
, hash_lock
, private);
2535 mutex_exit(hash_lock
);
2538 mutex_exit(hash_lock
);
2542 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
2545 add_reference(hdr
, hash_lock
, private);
2547 * If this block is already in use, create a new
2548 * copy of the data so that we will be guaranteed
2549 * that arc_release() will always succeed.
2553 ASSERT(buf
->b_data
);
2554 if (HDR_BUF_AVAILABLE(hdr
)) {
2555 ASSERT(buf
->b_efunc
== NULL
);
2556 hdr
->b_flags
&= ~ARC_BUF_AVAILABLE
;
2558 buf
= arc_buf_clone(buf
);
2560 } else if (*arc_flags
& ARC_PREFETCH
&&
2561 refcount_count(&hdr
->b_refcnt
) == 0) {
2562 hdr
->b_flags
|= ARC_PREFETCH
;
2564 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
2565 arc_access(hdr
, hash_lock
);
2566 if (*arc_flags
& ARC_L2CACHE
)
2567 hdr
->b_flags
|= ARC_L2CACHE
;
2568 mutex_exit(hash_lock
);
2569 ARCSTAT_BUMP(arcstat_hits
);
2570 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
2571 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
2572 data
, metadata
, hits
);
2575 done(NULL
, buf
, private);
2577 uint64_t size
= BP_GET_LSIZE(bp
);
2578 arc_callback_t
*acb
;
2583 /* this block is not in the cache */
2584 arc_buf_hdr_t
*exists
;
2585 arc_buf_contents_t type
= BP_GET_BUFC_TYPE(bp
);
2586 buf
= arc_buf_alloc(spa
, size
, private, type
);
2588 hdr
->b_dva
= *BP_IDENTITY(bp
);
2589 hdr
->b_birth
= bp
->blk_birth
;
2590 hdr
->b_cksum0
= bp
->blk_cksum
.zc_word
[0];
2591 exists
= buf_hash_insert(hdr
, &hash_lock
);
2593 /* somebody beat us to the hash insert */
2594 mutex_exit(hash_lock
);
2595 bzero(&hdr
->b_dva
, sizeof (dva_t
));
2598 (void) arc_buf_remove_ref(buf
, private);
2599 goto top
; /* restart the IO request */
2601 /* if this is a prefetch, we don't have a reference */
2602 if (*arc_flags
& ARC_PREFETCH
) {
2603 (void) remove_reference(hdr
, hash_lock
,
2605 hdr
->b_flags
|= ARC_PREFETCH
;
2607 if (*arc_flags
& ARC_L2CACHE
)
2608 hdr
->b_flags
|= ARC_L2CACHE
;
2609 if (BP_GET_LEVEL(bp
) > 0)
2610 hdr
->b_flags
|= ARC_INDIRECT
;
2612 /* this block is in the ghost cache */
2613 ASSERT(GHOST_STATE(hdr
->b_state
));
2614 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
2615 ASSERT3U(refcount_count(&hdr
->b_refcnt
), ==, 0);
2616 ASSERT(hdr
->b_buf
== NULL
);
2618 /* if this is a prefetch, we don't have a reference */
2619 if (*arc_flags
& ARC_PREFETCH
)
2620 hdr
->b_flags
|= ARC_PREFETCH
;
2622 add_reference(hdr
, hash_lock
, private);
2623 if (*arc_flags
& ARC_L2CACHE
)
2624 hdr
->b_flags
|= ARC_L2CACHE
;
2625 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
2628 buf
->b_efunc
= NULL
;
2629 buf
->b_private
= NULL
;
2632 arc_get_data_buf(buf
);
2633 ASSERT(hdr
->b_datacnt
== 0);
2638 acb
= kmem_zalloc(sizeof (arc_callback_t
), KM_SLEEP
);
2639 acb
->acb_done
= done
;
2640 acb
->acb_private
= private;
2642 ASSERT(hdr
->b_acb
== NULL
);
2644 hdr
->b_flags
|= ARC_IO_IN_PROGRESS
;
2647 * If the buffer has been evicted, migrate it to a present state
2648 * before issuing the I/O. Once we drop the hash-table lock,
2649 * the header will be marked as I/O in progress and have an
2650 * attached buffer. At this point, anybody who finds this
2651 * buffer ought to notice that it's legit but has a pending I/O.
2654 if (GHOST_STATE(hdr
->b_state
))
2655 arc_access(hdr
, hash_lock
);
2657 if (HDR_L2CACHE(hdr
) && hdr
->b_l2hdr
!= NULL
&&
2658 (vd
= hdr
->b_l2hdr
->b_dev
->l2ad_vdev
) != NULL
) {
2659 addr
= hdr
->b_l2hdr
->b_daddr
;
2661 * Lock out device removal.
2663 if (vdev_is_dead(vd
) ||
2664 !spa_config_tryenter(spa
, SCL_L2ARC
, vd
, RW_READER
))
2668 mutex_exit(hash_lock
);
2670 ASSERT3U(hdr
->b_size
, ==, size
);
2671 DTRACE_PROBE3(arc__miss
, blkptr_t
*, bp
, uint64_t, size
,
2673 ARCSTAT_BUMP(arcstat_misses
);
2674 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
2675 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
2676 data
, metadata
, misses
);
2680 * Read from the L2ARC if the following are true:
2681 * 1. The L2ARC vdev was previously cached.
2682 * 2. This buffer still has L2ARC metadata.
2683 * 3. This buffer isn't currently writing to the L2ARC.
2684 * 4. The L2ARC entry wasn't evicted, which may
2685 * also have invalidated the vdev.
2687 if (hdr
->b_l2hdr
!= NULL
&&
2688 !HDR_L2_WRITING(hdr
) && !HDR_L2_EVICTED(hdr
)) {
2689 l2arc_read_callback_t
*cb
;
2691 DTRACE_PROBE1(l2arc__hit
, arc_buf_hdr_t
*, hdr
);
2692 ARCSTAT_BUMP(arcstat_l2_hits
);
2694 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
),
2696 cb
->l2rcb_buf
= buf
;
2697 cb
->l2rcb_spa
= spa
;
2700 cb
->l2rcb_flags
= zio_flags
;
2703 * l2arc read. The SCL_L2ARC lock will be
2704 * released by l2arc_read_done().
2706 rzio
= zio_read_phys(pio
, vd
, addr
, size
,
2707 buf
->b_data
, ZIO_CHECKSUM_OFF
,
2708 l2arc_read_done
, cb
, priority
, zio_flags
|
2709 ZIO_FLAG_DONT_CACHE
| ZIO_FLAG_CANFAIL
|
2710 ZIO_FLAG_DONT_PROPAGATE
|
2711 ZIO_FLAG_DONT_RETRY
, B_FALSE
);
2712 DTRACE_PROBE2(l2arc__read
, vdev_t
*, vd
,
2715 if (*arc_flags
& ARC_NOWAIT
) {
2720 ASSERT(*arc_flags
& ARC_WAIT
);
2721 if (zio_wait(rzio
) == 0)
2724 /* l2arc read error; goto zio_read() */
2726 DTRACE_PROBE1(l2arc__miss
,
2727 arc_buf_hdr_t
*, hdr
);
2728 ARCSTAT_BUMP(arcstat_l2_misses
);
2729 if (HDR_L2_WRITING(hdr
))
2730 ARCSTAT_BUMP(arcstat_l2_rw_clash
);
2731 spa_config_exit(spa
, SCL_L2ARC
, vd
);
2735 rzio
= zio_read(pio
, spa
, bp
, buf
->b_data
, size
,
2736 arc_read_done
, buf
, priority
, zio_flags
, zb
);
2738 if (*arc_flags
& ARC_WAIT
)
2739 return (zio_wait(rzio
));
2741 ASSERT(*arc_flags
& ARC_NOWAIT
);
2748 * arc_read() variant to support pool traversal. If the block is already
2749 * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2750 * The idea is that we don't want pool traversal filling up memory, but
2751 * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2754 arc_tryread(spa_t
*spa
, blkptr_t
*bp
, void *data
)
2760 hdr
= buf_hash_find(spa
, BP_IDENTITY(bp
), bp
->blk_birth
, &hash_mtx
);
2762 if (hdr
&& hdr
->b_datacnt
> 0 && !HDR_IO_IN_PROGRESS(hdr
)) {
2763 arc_buf_t
*buf
= hdr
->b_buf
;
2766 while (buf
->b_data
== NULL
) {
2770 bcopy(buf
->b_data
, data
, hdr
->b_size
);
2776 mutex_exit(hash_mtx
);
2782 arc_set_callback(arc_buf_t
*buf
, arc_evict_func_t
*func
, void *private)
2784 ASSERT(buf
->b_hdr
!= NULL
);
2785 ASSERT(buf
->b_hdr
->b_state
!= arc_anon
);
2786 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_refcnt
) || func
== NULL
);
2787 buf
->b_efunc
= func
;
2788 buf
->b_private
= private;
2792 * This is used by the DMU to let the ARC know that a buffer is
2793 * being evicted, so the ARC should clean up. If this arc buf
2794 * is not yet in the evicted state, it will be put there.
2797 arc_buf_evict(arc_buf_t
*buf
)
2800 kmutex_t
*hash_lock
;
2803 rw_enter(&buf
->b_lock
, RW_WRITER
);
2807 * We are in arc_do_user_evicts().
2809 ASSERT(buf
->b_data
== NULL
);
2810 rw_exit(&buf
->b_lock
);
2812 } else if (buf
->b_data
== NULL
) {
2813 arc_buf_t copy
= *buf
; /* structure assignment */
2815 * We are on the eviction list; process this buffer now
2816 * but let arc_do_user_evicts() do the reaping.
2818 buf
->b_efunc
= NULL
;
2819 rw_exit(&buf
->b_lock
);
2820 VERIFY(copy
.b_efunc(©
) == 0);
2823 hash_lock
= HDR_LOCK(hdr
);
2824 mutex_enter(hash_lock
);
2826 ASSERT(buf
->b_hdr
== hdr
);
2827 ASSERT3U(refcount_count(&hdr
->b_refcnt
), <, hdr
->b_datacnt
);
2828 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
2831 * Pull this buffer off of the hdr
2834 while (*bufp
!= buf
)
2835 bufp
= &(*bufp
)->b_next
;
2836 *bufp
= buf
->b_next
;
2838 ASSERT(buf
->b_data
!= NULL
);
2839 arc_buf_destroy(buf
, FALSE
, FALSE
);
2841 if (hdr
->b_datacnt
== 0) {
2842 arc_state_t
*old_state
= hdr
->b_state
;
2843 arc_state_t
*evicted_state
;
2845 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
2848 (old_state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
2850 mutex_enter(&old_state
->arcs_mtx
);
2851 mutex_enter(&evicted_state
->arcs_mtx
);
2853 arc_change_state(evicted_state
, hdr
, hash_lock
);
2854 ASSERT(HDR_IN_HASH_TABLE(hdr
));
2855 hdr
->b_flags
|= ARC_IN_HASH_TABLE
;
2856 hdr
->b_flags
&= ~ARC_BUF_AVAILABLE
;
2858 mutex_exit(&evicted_state
->arcs_mtx
);
2859 mutex_exit(&old_state
->arcs_mtx
);
2861 mutex_exit(hash_lock
);
2862 rw_exit(&buf
->b_lock
);
2864 VERIFY(buf
->b_efunc(buf
) == 0);
2865 buf
->b_efunc
= NULL
;
2866 buf
->b_private
= NULL
;
2868 kmem_cache_free(buf_cache
, buf
);
2873 * Release this buffer from the cache. This must be done
2874 * after a read and prior to modifying the buffer contents.
2875 * If the buffer has more than one reference, we must make
2876 * a new hdr for the buffer.
2879 arc_release(arc_buf_t
*buf
, void *tag
)
2882 kmutex_t
*hash_lock
;
2883 l2arc_buf_hdr_t
*l2hdr
;
2886 rw_enter(&buf
->b_lock
, RW_WRITER
);
2889 /* this buffer is not on any list */
2890 ASSERT(refcount_count(&hdr
->b_refcnt
) > 0);
2891 ASSERT(!(hdr
->b_flags
& ARC_STORED
));
2893 if (hdr
->b_state
== arc_anon
) {
2894 /* this buffer is already released */
2895 ASSERT3U(refcount_count(&hdr
->b_refcnt
), ==, 1);
2896 ASSERT(BUF_EMPTY(hdr
));
2897 ASSERT(buf
->b_efunc
== NULL
);
2899 rw_exit(&buf
->b_lock
);
2903 hash_lock
= HDR_LOCK(hdr
);
2904 mutex_enter(hash_lock
);
2906 l2hdr
= hdr
->b_l2hdr
;
2908 mutex_enter(&l2arc_buflist_mtx
);
2909 hdr
->b_l2hdr
= NULL
;
2910 buf_size
= hdr
->b_size
;
2914 * Do we have more than one buf?
2916 if (hdr
->b_datacnt
> 1) {
2917 arc_buf_hdr_t
*nhdr
;
2919 uint64_t blksz
= hdr
->b_size
;
2920 spa_t
*spa
= hdr
->b_spa
;
2921 arc_buf_contents_t type
= hdr
->b_type
;
2922 uint32_t flags
= hdr
->b_flags
;
2924 ASSERT(hdr
->b_buf
!= buf
|| buf
->b_next
!= NULL
);
2926 * Pull the data off of this buf and attach it to
2927 * a new anonymous buf.
2929 (void) remove_reference(hdr
, hash_lock
, tag
);
2931 while (*bufp
!= buf
)
2932 bufp
= &(*bufp
)->b_next
;
2933 *bufp
= (*bufp
)->b_next
;
2936 ASSERT3U(hdr
->b_state
->arcs_size
, >=, hdr
->b_size
);
2937 atomic_add_64(&hdr
->b_state
->arcs_size
, -hdr
->b_size
);
2938 if (refcount_is_zero(&hdr
->b_refcnt
)) {
2939 uint64_t *size
= &hdr
->b_state
->arcs_lsize
[hdr
->b_type
];
2940 ASSERT3U(*size
, >=, hdr
->b_size
);
2941 atomic_add_64(size
, -hdr
->b_size
);
2943 hdr
->b_datacnt
-= 1;
2944 arc_cksum_verify(buf
);
2946 mutex_exit(hash_lock
);
2948 nhdr
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
2949 nhdr
->b_size
= blksz
;
2951 nhdr
->b_type
= type
;
2953 nhdr
->b_state
= arc_anon
;
2954 nhdr
->b_arc_access
= 0;
2955 nhdr
->b_flags
= flags
& ARC_L2_WRITING
;
2956 nhdr
->b_l2hdr
= NULL
;
2957 nhdr
->b_datacnt
= 1;
2958 nhdr
->b_freeze_cksum
= NULL
;
2959 (void) refcount_add(&nhdr
->b_refcnt
, tag
);
2961 rw_exit(&buf
->b_lock
);
2962 atomic_add_64(&arc_anon
->arcs_size
, blksz
);
2964 rw_exit(&buf
->b_lock
);
2965 ASSERT(refcount_count(&hdr
->b_refcnt
) == 1);
2966 ASSERT(!list_link_active(&hdr
->b_arc_node
));
2967 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
2968 arc_change_state(arc_anon
, hdr
, hash_lock
);
2969 hdr
->b_arc_access
= 0;
2970 mutex_exit(hash_lock
);
2972 bzero(&hdr
->b_dva
, sizeof (dva_t
));
2977 buf
->b_efunc
= NULL
;
2978 buf
->b_private
= NULL
;
2981 list_remove(l2hdr
->b_dev
->l2ad_buflist
, hdr
);
2982 kmem_free(l2hdr
, sizeof (l2arc_buf_hdr_t
));
2983 ARCSTAT_INCR(arcstat_l2_size
, -buf_size
);
2984 mutex_exit(&l2arc_buflist_mtx
);
2989 arc_released(arc_buf_t
*buf
)
2993 rw_enter(&buf
->b_lock
, RW_READER
);
2994 released
= (buf
->b_data
!= NULL
&& buf
->b_hdr
->b_state
== arc_anon
);
2995 rw_exit(&buf
->b_lock
);
3000 arc_has_callback(arc_buf_t
*buf
)
3004 rw_enter(&buf
->b_lock
, RW_READER
);
3005 callback
= (buf
->b_efunc
!= NULL
);
3006 rw_exit(&buf
->b_lock
);
3012 arc_referenced(arc_buf_t
*buf
)
3016 rw_enter(&buf
->b_lock
, RW_READER
);
3017 referenced
= (refcount_count(&buf
->b_hdr
->b_refcnt
));
3018 rw_exit(&buf
->b_lock
);
3019 return (referenced
);
3024 arc_write_ready(zio_t
*zio
)
3026 arc_write_callback_t
*callback
= zio
->io_private
;
3027 arc_buf_t
*buf
= callback
->awcb_buf
;
3028 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3030 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_refcnt
));
3031 callback
->awcb_ready(zio
, buf
, callback
->awcb_private
);
3034 * If the IO is already in progress, then this is a re-write
3035 * attempt, so we need to thaw and re-compute the cksum.
3036 * It is the responsibility of the callback to handle the
3037 * accounting for any re-write attempt.
3039 if (HDR_IO_IN_PROGRESS(hdr
)) {
3040 mutex_enter(&hdr
->b_freeze_lock
);
3041 if (hdr
->b_freeze_cksum
!= NULL
) {
3042 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
3043 hdr
->b_freeze_cksum
= NULL
;
3045 mutex_exit(&hdr
->b_freeze_lock
);
3047 arc_cksum_compute(buf
, B_FALSE
);
3048 hdr
->b_flags
|= ARC_IO_IN_PROGRESS
;
3052 arc_write_done(zio_t
*zio
)
3054 arc_write_callback_t
*callback
= zio
->io_private
;
3055 arc_buf_t
*buf
= callback
->awcb_buf
;
3056 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3060 hdr
->b_dva
= *BP_IDENTITY(zio
->io_bp
);
3061 hdr
->b_birth
= zio
->io_bp
->blk_birth
;
3062 hdr
->b_cksum0
= zio
->io_bp
->blk_cksum
.zc_word
[0];
3064 * If the block to be written was all-zero, we may have
3065 * compressed it away. In this case no write was performed
3066 * so there will be no dva/birth-date/checksum. The buffer
3067 * must therefor remain anonymous (and uncached).
3069 if (!BUF_EMPTY(hdr
)) {
3070 arc_buf_hdr_t
*exists
;
3071 kmutex_t
*hash_lock
;
3073 arc_cksum_verify(buf
);
3075 exists
= buf_hash_insert(hdr
, &hash_lock
);
3078 * This can only happen if we overwrite for
3079 * sync-to-convergence, because we remove
3080 * buffers from the hash table when we arc_free().
3082 ASSERT(zio
->io_flags
& ZIO_FLAG_IO_REWRITE
);
3083 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio
->io_bp_orig
),
3084 BP_IDENTITY(zio
->io_bp
)));
3085 ASSERT3U(zio
->io_bp_orig
.blk_birth
, ==,
3086 zio
->io_bp
->blk_birth
);
3088 ASSERT(refcount_is_zero(&exists
->b_refcnt
));
3089 arc_change_state(arc_anon
, exists
, hash_lock
);
3090 mutex_exit(hash_lock
);
3091 arc_hdr_destroy(exists
);
3092 exists
= buf_hash_insert(hdr
, &hash_lock
);
3093 ASSERT3P(exists
, ==, NULL
);
3095 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3096 /* if it's not anon, we are doing a scrub */
3097 if (hdr
->b_state
== arc_anon
)
3098 arc_access(hdr
, hash_lock
);
3099 mutex_exit(hash_lock
);
3100 } else if (callback
->awcb_done
== NULL
) {
3103 * This is an anonymous buffer with no user callback,
3104 * destroy it if there are no active references.
3106 mutex_enter(&arc_eviction_mtx
);
3107 destroy_hdr
= refcount_is_zero(&hdr
->b_refcnt
);
3108 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3109 mutex_exit(&arc_eviction_mtx
);
3111 arc_hdr_destroy(hdr
);
3113 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3115 hdr
->b_flags
&= ~ARC_STORED
;
3117 if (callback
->awcb_done
) {
3118 ASSERT(!refcount_is_zero(&hdr
->b_refcnt
));
3119 callback
->awcb_done(zio
, buf
, callback
->awcb_private
);
3122 kmem_free(callback
, sizeof (arc_write_callback_t
));
3126 write_policy(spa_t
*spa
, const writeprops_t
*wp
, zio_prop_t
*zp
)
3128 boolean_t ismd
= (wp
->wp_level
> 0 || dmu_ot
[wp
->wp_type
].ot_metadata
);
3130 /* Determine checksum setting */
3133 * Metadata always gets checksummed. If the data
3134 * checksum is multi-bit correctable, and it's not a
3135 * ZBT-style checksum, then it's suitable for metadata
3136 * as well. Otherwise, the metadata checksum defaults
3139 if (zio_checksum_table
[wp
->wp_oschecksum
].ci_correctable
&&
3140 !zio_checksum_table
[wp
->wp_oschecksum
].ci_zbt
)
3141 zp
->zp_checksum
= wp
->wp_oschecksum
;
3143 zp
->zp_checksum
= ZIO_CHECKSUM_FLETCHER_4
;
3145 zp
->zp_checksum
= zio_checksum_select(wp
->wp_dnchecksum
,
3149 /* Determine compression setting */
3152 * XXX -- we should design a compression algorithm
3153 * that specializes in arrays of bps.
3155 zp
->zp_compress
= zfs_mdcomp_disable
? ZIO_COMPRESS_EMPTY
:
3158 zp
->zp_compress
= zio_compress_select(wp
->wp_dncompress
,
3162 zp
->zp_type
= wp
->wp_type
;
3163 zp
->zp_level
= wp
->wp_level
;
3164 zp
->zp_ndvas
= MIN(wp
->wp_copies
+ ismd
, spa_max_replication(spa
));
3168 arc_write(zio_t
*pio
, spa_t
*spa
, const writeprops_t
*wp
,
3169 boolean_t l2arc
, uint64_t txg
, blkptr_t
*bp
, arc_buf_t
*buf
,
3170 arc_done_func_t
*ready
, arc_done_func_t
*done
, void *private, int priority
,
3171 int zio_flags
, const zbookmark_t
*zb
)
3173 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3174 arc_write_callback_t
*callback
;
3178 ASSERT(ready
!= NULL
);
3179 ASSERT(!HDR_IO_ERROR(hdr
));
3180 ASSERT((hdr
->b_flags
& ARC_IO_IN_PROGRESS
) == 0);
3181 ASSERT(hdr
->b_acb
== 0);
3183 hdr
->b_flags
|= ARC_L2CACHE
;
3184 callback
= kmem_zalloc(sizeof (arc_write_callback_t
), KM_SLEEP
);
3185 callback
->awcb_ready
= ready
;
3186 callback
->awcb_done
= done
;
3187 callback
->awcb_private
= private;
3188 callback
->awcb_buf
= buf
;
3190 write_policy(spa
, wp
, &zp
);
3191 zio
= zio_write(pio
, spa
, txg
, bp
, buf
->b_data
, hdr
->b_size
, &zp
,
3192 arc_write_ready
, arc_write_done
, callback
, priority
, zio_flags
, zb
);
3198 arc_free(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
3199 zio_done_func_t
*done
, void *private, uint32_t arc_flags
)
3202 kmutex_t
*hash_lock
;
3206 * If this buffer is in the cache, release it, so it
3209 ab
= buf_hash_find(spa
, BP_IDENTITY(bp
), bp
->blk_birth
, &hash_lock
);
3212 * The checksum of blocks to free is not always
3213 * preserved (eg. on the deadlist). However, if it is
3214 * nonzero, it should match what we have in the cache.
3216 ASSERT(bp
->blk_cksum
.zc_word
[0] == 0 ||
3217 bp
->blk_cksum
.zc_word
[0] == ab
->b_cksum0
||
3218 bp
->blk_fill
== BLK_FILL_ALREADY_FREED
);
3220 if (ab
->b_state
!= arc_anon
)
3221 arc_change_state(arc_anon
, ab
, hash_lock
);
3222 if (HDR_IO_IN_PROGRESS(ab
)) {
3224 * This should only happen when we prefetch.
3226 ASSERT(ab
->b_flags
& ARC_PREFETCH
);
3227 ASSERT3U(ab
->b_datacnt
, ==, 1);
3228 ab
->b_flags
|= ARC_FREED_IN_READ
;
3229 if (HDR_IN_HASH_TABLE(ab
))
3230 buf_hash_remove(ab
);
3231 ab
->b_arc_access
= 0;
3232 bzero(&ab
->b_dva
, sizeof (dva_t
));
3235 ab
->b_buf
->b_efunc
= NULL
;
3236 ab
->b_buf
->b_private
= NULL
;
3237 mutex_exit(hash_lock
);
3238 } else if (refcount_is_zero(&ab
->b_refcnt
)) {
3239 ab
->b_flags
|= ARC_FREE_IN_PROGRESS
;
3240 mutex_exit(hash_lock
);
3241 arc_hdr_destroy(ab
);
3242 ARCSTAT_BUMP(arcstat_deleted
);
3245 * We still have an active reference on this
3246 * buffer. This can happen, e.g., from
3247 * dbuf_unoverride().
3249 ASSERT(!HDR_IN_HASH_TABLE(ab
));
3250 ab
->b_arc_access
= 0;
3251 bzero(&ab
->b_dva
, sizeof (dva_t
));
3254 ab
->b_buf
->b_efunc
= NULL
;
3255 ab
->b_buf
->b_private
= NULL
;
3256 mutex_exit(hash_lock
);
3260 zio
= zio_free(pio
, spa
, txg
, bp
, done
, private, ZIO_FLAG_MUSTSUCCEED
);
3262 if (arc_flags
& ARC_WAIT
)
3263 return (zio_wait(zio
));
3265 ASSERT(arc_flags
& ARC_NOWAIT
);
3272 arc_memory_throttle(uint64_t reserve
, uint64_t txg
)
3275 uint64_t inflight_data
= arc_anon
->arcs_size
;
3276 uint64_t available_memory
= ptob(freemem
);
3277 static uint64_t page_load
= 0;
3278 static uint64_t last_txg
= 0;
3281 MIN(available_memory
, vmem_size(heap_arena
, VMEM_FREE
));
3282 if (available_memory
>= zfs_write_limit_max
)
3285 if (txg
> last_txg
) {
3290 * If we are in pageout, we know that memory is already tight,
3291 * the arc is already going to be evicting, so we just want to
3292 * continue to let page writes occur as quickly as possible.
3294 if (curproc
== proc_pageout
) {
3295 if (page_load
> MAX(ptob(minfree
), available_memory
) / 4)
3297 /* Note: reserve is inflated, so we deflate */
3298 page_load
+= reserve
/ 8;
3300 } else if (page_load
> 0 && arc_reclaim_needed()) {
3301 /* memory is low, delay before restarting */
3302 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
3307 if (arc_size
> arc_c_min
) {
3308 uint64_t evictable_memory
=
3309 arc_mru
->arcs_lsize
[ARC_BUFC_DATA
] +
3310 arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] +
3311 arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
] +
3312 arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
];
3313 available_memory
+= MIN(evictable_memory
, arc_size
- arc_c_min
);
3316 if (inflight_data
> available_memory
/ 4) {
3317 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
3325 arc_tempreserve_clear(uint64_t reserve
)
3327 atomic_add_64(&arc_tempreserve
, -reserve
);
3328 ASSERT((int64_t)arc_tempreserve
>= 0);
3332 arc_tempreserve_space(uint64_t reserve
, uint64_t txg
)
3338 * Once in a while, fail for no reason. Everything should cope.
3340 if (spa_get_random(10000) == 0) {
3341 dprintf("forcing random failure\n");
3345 if (reserve
> arc_c
/4 && !arc_no_grow
)
3346 arc_c
= MIN(arc_c_max
, reserve
* 4);
3347 if (reserve
> arc_c
)
3351 * Writes will, almost always, require additional memory allocations
3352 * in order to compress/encrypt/etc the data. We therefor need to
3353 * make sure that there is sufficient available memory for this.
3355 if (error
= arc_memory_throttle(reserve
, txg
))
3359 * Throttle writes when the amount of dirty data in the cache
3360 * gets too large. We try to keep the cache less than half full
3361 * of dirty blocks so that our sync times don't grow too large.
3362 * Note: if two requests come in concurrently, we might let them
3363 * both succeed, when one of them should fail. Not a huge deal.
3365 if (reserve
+ arc_tempreserve
+ arc_anon
->arcs_size
> arc_c
/ 2 &&
3366 arc_anon
->arcs_size
> arc_c
/ 4) {
3367 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3368 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3369 arc_tempreserve
>>10,
3370 arc_anon
->arcs_lsize
[ARC_BUFC_METADATA
]>>10,
3371 arc_anon
->arcs_lsize
[ARC_BUFC_DATA
]>>10,
3372 reserve
>>10, arc_c
>>10);
3375 atomic_add_64(&arc_tempreserve
, reserve
);
3379 #if defined(__NetBSD__) && defined(_KERNEL)
3380 /* Reclaim hook registered to uvm for reclaiming KVM and memory */
3382 arc_uvm_reclaim_hook(void)
3385 if (mutex_tryenter(&arc_reclaim_thr_lock
)) {
3386 cv_broadcast(&arc_reclaim_thr_cv
);
3387 mutex_exit(&arc_reclaim_thr_lock
);
3392 arc_kva_reclaim_callback(struct callback_entry
*ce
, void *obj
, void *arg
)
3396 if (mutex_tryenter(&arc_reclaim_thr_lock
)) {
3397 cv_broadcast(&arc_reclaim_thr_cv
);
3398 mutex_exit(&arc_reclaim_thr_lock
);
3401 return CALLBACK_CHAIN_CONTINUE
;
3404 #endif /* __NetBSD__ */
3409 mutex_init(&arc_reclaim_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3410 cv_init(&arc_reclaim_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
3412 /* Convert seconds to clock ticks */
3413 arc_min_prefetch_lifespan
= 1 * hz
;
3415 /* Start out with 1/8 of all memory */
3416 arc_c
= physmem
* PAGESIZE
/ 8;
3420 * On architectures where the physical memory can be larger
3421 * than the addressable space (intel in 32-bit mode), we may
3422 * need to limit the cache to 1/8 of VM size.
3424 arc_c
= MIN(arc_c
, vmem_size(heap_arena
, VMEM_ALLOC
| VMEM_FREE
) / 8);
3427 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3428 arc_c_min
= MAX(arc_c
/ 4, 64<<20);
3429 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3430 if (arc_c
* 8 >= 1<<30)
3431 arc_c_max
= (arc_c
* 8) - (1<<30);
3433 arc_c_max
= arc_c_min
;
3434 arc_c_max
= MAX(arc_c
* 6, arc_c_max
);
3437 * Allow the tunables to override our calculations if they are
3438 * reasonable (ie. over 64MB)
3440 if (zfs_arc_max
> 64<<20 && zfs_arc_max
< physmem
* PAGESIZE
)
3441 arc_c_max
= zfs_arc_max
;
3442 if (zfs_arc_min
> 64<<20 && zfs_arc_min
<= arc_c_max
)
3443 arc_c_min
= zfs_arc_min
;
3446 arc_p
= (arc_c
>> 1);
3448 /* limit meta-data to 1/4 of the arc capacity */
3449 arc_meta_limit
= arc_c_max
/ 4;
3451 /* Allow the tunable to override if it is reasonable */
3452 if (zfs_arc_meta_limit
> 0 && zfs_arc_meta_limit
<= arc_c_max
)
3453 arc_meta_limit
= zfs_arc_meta_limit
;
3455 if (arc_c_min
< arc_meta_limit
/ 2 && zfs_arc_min
== 0)
3456 arc_c_min
= arc_meta_limit
/ 2;
3458 /* if kmem_flags are set, lets try to use less memory */
3459 if (kmem_debugging())
3461 if (arc_c
< arc_c_min
)
3464 arc_anon
= &ARC_anon
;
3466 arc_mru_ghost
= &ARC_mru_ghost
;
3468 arc_mfu_ghost
= &ARC_mfu_ghost
;
3469 arc_l2c_only
= &ARC_l2c_only
;
3472 mutex_init(&arc_anon
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3473 mutex_init(&arc_mru
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3474 mutex_init(&arc_mru_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3475 mutex_init(&arc_mfu
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3476 mutex_init(&arc_mfu_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3477 mutex_init(&arc_l2c_only
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3479 list_create(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
],
3480 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3481 list_create(&arc_mru
->arcs_list
[ARC_BUFC_DATA
],
3482 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3483 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
],
3484 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3485 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
],
3486 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3487 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
],
3488 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3489 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
],
3490 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3491 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
],
3492 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3493 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
],
3494 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3495 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
],
3496 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3497 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
],
3498 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3502 arc_thread_exit
= 0;
3503 arc_eviction_list
= NULL
;
3504 mutex_init(&arc_eviction_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3505 bzero(&arc_eviction_hdr
, sizeof (arc_buf_hdr_t
));
3507 arc_ksp
= kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED
,
3508 sizeof (arc_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
3510 if (arc_ksp
!= NULL
) {
3511 arc_ksp
->ks_data
= &arc_stats
;
3512 kstat_install(arc_ksp
);
3515 (void) thread_create(NULL
, 0, arc_reclaim_thread
, NULL
, 0, &p0
,
3516 TS_RUN
, maxclsyspri
);
3518 #if defined(__NetBSD__) && defined(_KERNEL)
3519 arc_hook
.uvm_reclaim_hook
= &arc_uvm_reclaim_hook
;
3521 uvm_reclaim_hook_add(&arc_hook
);
3522 callback_register(&vm_map_to_kernel(kernel_map
)->vmk_reclaim_callback
,
3523 &arc_kva_reclaim_entry
, NULL
, arc_kva_reclaim_callback
);
3530 if (zfs_write_limit_max
== 0)
3531 zfs_write_limit_max
= ptob(physmem
) >> zfs_write_limit_shift
;
3533 zfs_write_limit_shift
= 0;
3534 mutex_init(&zfs_write_limit_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3540 mutex_enter(&arc_reclaim_thr_lock
);
3541 arc_thread_exit
= 1;
3542 while (arc_thread_exit
!= 0)
3543 cv_wait(&arc_reclaim_thr_cv
, &arc_reclaim_thr_lock
);
3544 mutex_exit(&arc_reclaim_thr_lock
);
3550 if (arc_ksp
!= NULL
) {
3551 kstat_delete(arc_ksp
);
3555 mutex_destroy(&arc_eviction_mtx
);
3556 mutex_destroy(&arc_reclaim_thr_lock
);
3557 cv_destroy(&arc_reclaim_thr_cv
);
3559 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
]);
3560 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
3561 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
]);
3562 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
3563 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_DATA
]);
3564 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
]);
3565 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
]);
3566 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
]);
3568 mutex_destroy(&arc_anon
->arcs_mtx
);
3569 mutex_destroy(&arc_mru
->arcs_mtx
);
3570 mutex_destroy(&arc_mru_ghost
->arcs_mtx
);
3571 mutex_destroy(&arc_mfu
->arcs_mtx
);
3572 mutex_destroy(&arc_mfu_ghost
->arcs_mtx
);
3573 mutex_destroy(&arc_l2c_only
->arcs_mtx
);
3575 mutex_destroy(&zfs_write_limit_lock
);
3577 #if defined(__NetBSD__) && defined(_KERNEL)
3578 uvm_reclaim_hook_del(&arc_hook
);
3579 callback_unregister(&vm_map_to_kernel(kernel_map
)->vmk_reclaim_callback
,
3580 &arc_kva_reclaim_entry
);
3589 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3590 * It uses dedicated storage devices to hold cached data, which are populated
3591 * using large infrequent writes. The main role of this cache is to boost
3592 * the performance of random read workloads. The intended L2ARC devices
3593 * include short-stroked disks, solid state disks, and other media with
3594 * substantially faster read latency than disk.
3596 * +-----------------------+
3598 * +-----------------------+
3601 * l2arc_feed_thread() arc_read()
3605 * +---------------+ |
3607 * +---------------+ |
3612 * +-------+ +-------+
3614 * | cache | | cache |
3615 * +-------+ +-------+
3616 * +=========+ .-----.
3617 * : L2ARC : |-_____-|
3618 * : devices : | Disks |
3619 * +=========+ `-_____-'
3621 * Read requests are satisfied from the following sources, in order:
3624 * 2) vdev cache of L2ARC devices
3626 * 4) vdev cache of disks
3629 * Some L2ARC device types exhibit extremely slow write performance.
3630 * To accommodate for this there are some significant differences between
3631 * the L2ARC and traditional cache design:
3633 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3634 * the ARC behave as usual, freeing buffers and placing headers on ghost
3635 * lists. The ARC does not send buffers to the L2ARC during eviction as
3636 * this would add inflated write latencies for all ARC memory pressure.
3638 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3639 * It does this by periodically scanning buffers from the eviction-end of
3640 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3641 * not already there. It scans until a headroom of buffers is satisfied,
3642 * which itself is a buffer for ARC eviction. The thread that does this is
3643 * l2arc_feed_thread(), illustrated below; example sizes are included to
3644 * provide a better sense of ratio than this diagram:
3647 * +---------------------+----------+
3648 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3649 * +---------------------+----------+ | o L2ARC eligible
3650 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3651 * +---------------------+----------+ |
3652 * 15.9 Gbytes ^ 32 Mbytes |
3654 * l2arc_feed_thread()
3656 * l2arc write hand <--[oooo]--'
3660 * +==============================+
3661 * L2ARC dev |####|#|###|###| |####| ... |
3662 * +==============================+
3665 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3666 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3667 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3668 * safe to say that this is an uncommon case, since buffers at the end of
3669 * the ARC lists have moved there due to inactivity.
3671 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3672 * then the L2ARC simply misses copying some buffers. This serves as a
3673 * pressure valve to prevent heavy read workloads from both stalling the ARC
3674 * with waits and clogging the L2ARC with writes. This also helps prevent
3675 * the potential for the L2ARC to churn if it attempts to cache content too
3676 * quickly, such as during backups of the entire pool.
3678 * 5. After system boot and before the ARC has filled main memory, there are
3679 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3680 * lists can remain mostly static. Instead of searching from tail of these
3681 * lists as pictured, the l2arc_feed_thread() will search from the list heads
3682 * for eligible buffers, greatly increasing its chance of finding them.
3684 * The L2ARC device write speed is also boosted during this time so that
3685 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
3686 * there are no L2ARC reads, and no fear of degrading read performance
3687 * through increased writes.
3689 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3690 * the vdev queue can aggregate them into larger and fewer writes. Each
3691 * device is written to in a rotor fashion, sweeping writes through
3692 * available space then repeating.
3694 * 7. The L2ARC does not store dirty content. It never needs to flush
3695 * write buffers back to disk based storage.
3697 * 8. If an ARC buffer is written (and dirtied) which also exists in the
3698 * L2ARC, the now stale L2ARC buffer is immediately dropped.
3700 * The performance of the L2ARC can be tweaked by a number of tunables, which
3701 * may be necessary for different workloads:
3703 * l2arc_write_max max write bytes per interval
3704 * l2arc_write_boost extra write bytes during device warmup
3705 * l2arc_noprefetch skip caching prefetched buffers
3706 * l2arc_headroom number of max device writes to precache
3707 * l2arc_feed_secs seconds between L2ARC writing
3709 * Tunables may be removed or added as future performance improvements are
3710 * integrated, and also may become zpool properties.
3714 l2arc_hdr_stat_add(void)
3716 ARCSTAT_INCR(arcstat_l2_hdr_size
, HDR_SIZE
+ L2HDR_SIZE
);
3717 ARCSTAT_INCR(arcstat_hdr_size
, -HDR_SIZE
);
3721 l2arc_hdr_stat_remove(void)
3723 ARCSTAT_INCR(arcstat_l2_hdr_size
, -(HDR_SIZE
+ L2HDR_SIZE
));
3724 ARCSTAT_INCR(arcstat_hdr_size
, HDR_SIZE
);
3728 * Cycle through L2ARC devices. This is how L2ARC load balances.
3729 * If a device is returned, this also returns holding the spa config lock.
3731 static l2arc_dev_t
*
3732 l2arc_dev_get_next(void)
3734 l2arc_dev_t
*first
, *next
= NULL
;
3737 * Lock out the removal of spas (spa_namespace_lock), then removal
3738 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
3739 * both locks will be dropped and a spa config lock held instead.
3741 mutex_enter(&spa_namespace_lock
);
3742 mutex_enter(&l2arc_dev_mtx
);
3744 /* if there are no vdevs, there is nothing to do */
3745 if (l2arc_ndev
== 0)
3749 next
= l2arc_dev_last
;
3751 /* loop around the list looking for a non-faulted vdev */
3753 next
= list_head(l2arc_dev_list
);
3755 next
= list_next(l2arc_dev_list
, next
);
3757 next
= list_head(l2arc_dev_list
);
3760 /* if we have come back to the start, bail out */
3763 else if (next
== first
)
3766 } while (vdev_is_dead(next
->l2ad_vdev
));
3768 /* if we were unable to find any usable vdevs, return NULL */
3769 if (vdev_is_dead(next
->l2ad_vdev
))
3772 l2arc_dev_last
= next
;
3775 mutex_exit(&l2arc_dev_mtx
);
3778 * Grab the config lock to prevent the 'next' device from being
3779 * removed while we are writing to it.
3782 spa_config_enter(next
->l2ad_spa
, SCL_L2ARC
, next
, RW_READER
);
3783 mutex_exit(&spa_namespace_lock
);
3789 * Free buffers that were tagged for destruction.
3792 l2arc_do_free_on_write()
3795 l2arc_data_free_t
*df
, *df_prev
;
3797 mutex_enter(&l2arc_free_on_write_mtx
);
3798 buflist
= l2arc_free_on_write
;
3800 for (df
= list_tail(buflist
); df
; df
= df_prev
) {
3801 df_prev
= list_prev(buflist
, df
);
3802 ASSERT(df
->l2df_data
!= NULL
);
3803 ASSERT(df
->l2df_func
!= NULL
);
3804 df
->l2df_func(df
->l2df_data
, df
->l2df_size
);
3805 list_remove(buflist
, df
);
3806 kmem_free(df
, sizeof (l2arc_data_free_t
));
3809 mutex_exit(&l2arc_free_on_write_mtx
);
3813 * A write to a cache device has completed. Update all headers to allow
3814 * reads from these buffers to begin.
3817 l2arc_write_done(zio_t
*zio
)
3819 l2arc_write_callback_t
*cb
;
3822 arc_buf_hdr_t
*head
, *ab
, *ab_prev
;
3823 l2arc_buf_hdr_t
*abl2
;
3824 kmutex_t
*hash_lock
;
3826 cb
= zio
->io_private
;
3828 dev
= cb
->l2wcb_dev
;
3829 ASSERT(dev
!= NULL
);
3830 head
= cb
->l2wcb_head
;
3831 ASSERT(head
!= NULL
);
3832 buflist
= dev
->l2ad_buflist
;
3833 ASSERT(buflist
!= NULL
);
3834 DTRACE_PROBE2(l2arc__iodone
, zio_t
*, zio
,
3835 l2arc_write_callback_t
*, cb
);
3837 if (zio
->io_error
!= 0)
3838 ARCSTAT_BUMP(arcstat_l2_writes_error
);
3840 mutex_enter(&l2arc_buflist_mtx
);
3843 * All writes completed, or an error was hit.
3845 for (ab
= list_prev(buflist
, head
); ab
; ab
= ab_prev
) {
3846 ab_prev
= list_prev(buflist
, ab
);
3848 hash_lock
= HDR_LOCK(ab
);
3849 if (!mutex_tryenter(hash_lock
)) {
3851 * This buffer misses out. It may be in a stage
3852 * of eviction. Its ARC_L2_WRITING flag will be
3853 * left set, denying reads to this buffer.
3855 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss
);
3859 if (zio
->io_error
!= 0) {
3861 * Error - drop L2ARC entry.
3863 list_remove(buflist
, ab
);
3866 kmem_free(abl2
, sizeof (l2arc_buf_hdr_t
));
3867 ARCSTAT_INCR(arcstat_l2_size
, -ab
->b_size
);
3871 * Allow ARC to begin reads to this L2ARC entry.
3873 ab
->b_flags
&= ~ARC_L2_WRITING
;
3875 mutex_exit(hash_lock
);
3878 atomic_inc_64(&l2arc_writes_done
);
3879 list_remove(buflist
, head
);
3880 kmem_cache_free(hdr_cache
, head
);
3881 mutex_exit(&l2arc_buflist_mtx
);
3883 l2arc_do_free_on_write();
3885 kmem_free(cb
, sizeof (l2arc_write_callback_t
));
3889 * A read to a cache device completed. Validate buffer contents before
3890 * handing over to the regular ARC routines.
3893 l2arc_read_done(zio_t
*zio
)
3895 l2arc_read_callback_t
*cb
;
3898 kmutex_t
*hash_lock
;
3901 ASSERT(zio
->io_vd
!= NULL
);
3902 ASSERT(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
);
3904 spa_config_exit(zio
->io_spa
, SCL_L2ARC
, zio
->io_vd
);
3906 cb
= zio
->io_private
;
3908 buf
= cb
->l2rcb_buf
;
3909 ASSERT(buf
!= NULL
);
3911 ASSERT(hdr
!= NULL
);
3913 hash_lock
= HDR_LOCK(hdr
);
3914 mutex_enter(hash_lock
);
3917 * Check this survived the L2ARC journey.
3919 equal
= arc_cksum_equal(buf
);
3920 if (equal
&& zio
->io_error
== 0 && !HDR_L2_EVICTED(hdr
)) {
3921 mutex_exit(hash_lock
);
3922 zio
->io_private
= buf
;
3923 zio
->io_bp_copy
= cb
->l2rcb_bp
; /* XXX fix in L2ARC 2.0 */
3924 zio
->io_bp
= &zio
->io_bp_copy
; /* XXX fix in L2ARC 2.0 */
3927 mutex_exit(hash_lock
);
3929 * Buffer didn't survive caching. Increment stats and
3930 * reissue to the original storage device.
3932 if (zio
->io_error
!= 0) {
3933 ARCSTAT_BUMP(arcstat_l2_io_error
);
3935 zio
->io_error
= EIO
;
3938 ARCSTAT_BUMP(arcstat_l2_cksum_bad
);
3941 * If there's no waiter, issue an async i/o to the primary
3942 * storage now. If there *is* a waiter, the caller must
3943 * issue the i/o in a context where it's OK to block.
3945 if (zio
->io_waiter
== NULL
)
3946 zio_nowait(zio_read(zio
->io_parent
,
3947 cb
->l2rcb_spa
, &cb
->l2rcb_bp
,
3948 buf
->b_data
, zio
->io_size
, arc_read_done
, buf
,
3949 zio
->io_priority
, cb
->l2rcb_flags
, &cb
->l2rcb_zb
));
3952 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
3956 * This is the list priority from which the L2ARC will search for pages to
3957 * cache. This is used within loops (0..3) to cycle through lists in the
3958 * desired order. This order can have a significant effect on cache
3961 * Currently the metadata lists are hit first, MFU then MRU, followed by
3962 * the data lists. This function returns a locked list, and also returns
3966 l2arc_list_locked(int list_num
, kmutex_t
**lock
)
3970 ASSERT(list_num
>= 0 && list_num
<= 3);
3974 list
= &arc_mfu
->arcs_list
[ARC_BUFC_METADATA
];
3975 *lock
= &arc_mfu
->arcs_mtx
;
3978 list
= &arc_mru
->arcs_list
[ARC_BUFC_METADATA
];
3979 *lock
= &arc_mru
->arcs_mtx
;
3982 list
= &arc_mfu
->arcs_list
[ARC_BUFC_DATA
];
3983 *lock
= &arc_mfu
->arcs_mtx
;
3986 list
= &arc_mru
->arcs_list
[ARC_BUFC_DATA
];
3987 *lock
= &arc_mru
->arcs_mtx
;
3991 ASSERT(!(MUTEX_HELD(*lock
)));
3997 * Evict buffers from the device write hand to the distance specified in
3998 * bytes. This distance may span populated buffers, it may span nothing.
3999 * This is clearing a region on the L2ARC device ready for writing.
4000 * If the 'all' boolean is set, every buffer is evicted.
4003 l2arc_evict(l2arc_dev_t
*dev
, uint64_t distance
, boolean_t all
)
4006 l2arc_buf_hdr_t
*abl2
;
4007 arc_buf_hdr_t
*ab
, *ab_prev
;
4008 kmutex_t
*hash_lock
;
4011 buflist
= dev
->l2ad_buflist
;
4013 if (buflist
== NULL
)
4016 if (!all
&& dev
->l2ad_first
) {
4018 * This is the first sweep through the device. There is
4024 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- (2 * distance
))) {
4026 * When nearing the end of the device, evict to the end
4027 * before the device write hand jumps to the start.
4029 taddr
= dev
->l2ad_end
;
4031 taddr
= dev
->l2ad_hand
+ distance
;
4033 DTRACE_PROBE4(l2arc__evict
, l2arc_dev_t
*, dev
, list_t
*, buflist
,
4034 uint64_t, taddr
, boolean_t
, all
);
4037 mutex_enter(&l2arc_buflist_mtx
);
4038 for (ab
= list_tail(buflist
); ab
; ab
= ab_prev
) {
4039 ab_prev
= list_prev(buflist
, ab
);
4041 hash_lock
= HDR_LOCK(ab
);
4042 if (!mutex_tryenter(hash_lock
)) {
4044 * Missed the hash lock. Retry.
4046 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry
);
4047 mutex_exit(&l2arc_buflist_mtx
);
4048 mutex_enter(hash_lock
);
4049 mutex_exit(hash_lock
);
4053 if (HDR_L2_WRITE_HEAD(ab
)) {
4055 * We hit a write head node. Leave it for
4056 * l2arc_write_done().
4058 list_remove(buflist
, ab
);
4059 mutex_exit(hash_lock
);
4063 if (!all
&& ab
->b_l2hdr
!= NULL
&&
4064 (ab
->b_l2hdr
->b_daddr
> taddr
||
4065 ab
->b_l2hdr
->b_daddr
< dev
->l2ad_hand
)) {
4067 * We've evicted to the target address,
4068 * or the end of the device.
4070 mutex_exit(hash_lock
);
4074 if (HDR_FREE_IN_PROGRESS(ab
)) {
4076 * Already on the path to destruction.
4078 mutex_exit(hash_lock
);
4082 if (ab
->b_state
== arc_l2c_only
) {
4083 ASSERT(!HDR_L2_READING(ab
));
4085 * This doesn't exist in the ARC. Destroy.
4086 * arc_hdr_destroy() will call list_remove()
4087 * and decrement arcstat_l2_size.
4089 arc_change_state(arc_anon
, ab
, hash_lock
);
4090 arc_hdr_destroy(ab
);
4093 * Invalidate issued or about to be issued
4094 * reads, since we may be about to write
4095 * over this location.
4097 if (HDR_L2_READING(ab
)) {
4098 ARCSTAT_BUMP(arcstat_l2_evict_reading
);
4099 ab
->b_flags
|= ARC_L2_EVICTED
;
4103 * Tell ARC this no longer exists in L2ARC.
4105 if (ab
->b_l2hdr
!= NULL
) {
4108 kmem_free(abl2
, sizeof (l2arc_buf_hdr_t
));
4109 ARCSTAT_INCR(arcstat_l2_size
, -ab
->b_size
);
4111 list_remove(buflist
, ab
);
4114 * This may have been leftover after a
4117 ab
->b_flags
&= ~ARC_L2_WRITING
;
4119 mutex_exit(hash_lock
);
4121 mutex_exit(&l2arc_buflist_mtx
);
4123 spa_l2cache_space_update(dev
->l2ad_vdev
, 0, -(taddr
- dev
->l2ad_evict
));
4124 dev
->l2ad_evict
= taddr
;
4128 * Find and write ARC buffers to the L2ARC device.
4130 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4131 * for reading until they have completed writing.
4134 l2arc_write_buffers(spa_t
*spa
, l2arc_dev_t
*dev
, uint64_t target_sz
)
4136 arc_buf_hdr_t
*ab
, *ab_prev
, *head
;
4137 l2arc_buf_hdr_t
*hdrl2
;
4139 uint64_t passed_sz
, write_sz
, buf_sz
, headroom
;
4141 kmutex_t
*hash_lock
, *list_lock
;
4142 boolean_t have_lock
, full
;
4143 l2arc_write_callback_t
*cb
;
4146 ASSERT(dev
->l2ad_vdev
!= NULL
);
4151 head
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
4152 head
->b_flags
|= ARC_L2_WRITE_HEAD
;
4155 * Copy buffers for L2ARC writing.
4157 mutex_enter(&l2arc_buflist_mtx
);
4158 for (int try = 0; try <= 3; try++) {
4159 list
= l2arc_list_locked(try, &list_lock
);
4163 * L2ARC fast warmup.
4165 * Until the ARC is warm and starts to evict, read from the
4166 * head of the ARC lists rather than the tail.
4168 headroom
= target_sz
* l2arc_headroom
;
4169 if (arc_warm
== B_FALSE
)
4170 ab
= list_head(list
);
4172 ab
= list_tail(list
);
4174 for (; ab
; ab
= ab_prev
) {
4175 if (arc_warm
== B_FALSE
)
4176 ab_prev
= list_next(list
, ab
);
4178 ab_prev
= list_prev(list
, ab
);
4180 hash_lock
= HDR_LOCK(ab
);
4181 have_lock
= MUTEX_HELD(hash_lock
);
4182 if (!have_lock
&& !mutex_tryenter(hash_lock
)) {
4184 * Skip this buffer rather than waiting.
4189 passed_sz
+= ab
->b_size
;
4190 if (passed_sz
> headroom
) {
4194 mutex_exit(hash_lock
);
4198 if (ab
->b_spa
!= spa
) {
4199 mutex_exit(hash_lock
);
4203 if (ab
->b_l2hdr
!= NULL
) {
4207 mutex_exit(hash_lock
);
4211 if (HDR_IO_IN_PROGRESS(ab
) || !HDR_L2CACHE(ab
)) {
4212 mutex_exit(hash_lock
);
4216 if ((write_sz
+ ab
->b_size
) > target_sz
) {
4218 mutex_exit(hash_lock
);
4222 if (ab
->b_buf
== NULL
) {
4223 DTRACE_PROBE1(l2arc__buf__null
, void *, ab
);
4224 mutex_exit(hash_lock
);
4230 * Insert a dummy header on the buflist so
4231 * l2arc_write_done() can find where the
4232 * write buffers begin without searching.
4234 list_insert_head(dev
->l2ad_buflist
, head
);
4237 sizeof (l2arc_write_callback_t
), KM_SLEEP
);
4238 cb
->l2wcb_dev
= dev
;
4239 cb
->l2wcb_head
= head
;
4240 pio
= zio_root(spa
, l2arc_write_done
, cb
,
4245 * Create and add a new L2ARC header.
4247 hdrl2
= kmem_zalloc(sizeof (l2arc_buf_hdr_t
), KM_SLEEP
);
4249 hdrl2
->b_daddr
= dev
->l2ad_hand
;
4251 ab
->b_flags
|= ARC_L2_WRITING
;
4252 ab
->b_l2hdr
= hdrl2
;
4253 list_insert_head(dev
->l2ad_buflist
, ab
);
4254 buf_data
= ab
->b_buf
->b_data
;
4255 buf_sz
= ab
->b_size
;
4258 * Compute and store the buffer cksum before
4259 * writing. On debug the cksum is verified first.
4261 arc_cksum_verify(ab
->b_buf
);
4262 arc_cksum_compute(ab
->b_buf
, B_TRUE
);
4264 mutex_exit(hash_lock
);
4266 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
,
4267 dev
->l2ad_hand
, buf_sz
, buf_data
, ZIO_CHECKSUM_OFF
,
4268 NULL
, NULL
, ZIO_PRIORITY_ASYNC_WRITE
,
4269 ZIO_FLAG_CANFAIL
, B_FALSE
);
4271 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
,
4273 (void) zio_nowait(wzio
);
4276 * Keep the clock hand suitably device-aligned.
4278 buf_sz
= vdev_psize_to_asize(dev
->l2ad_vdev
, buf_sz
);
4281 dev
->l2ad_hand
+= buf_sz
;
4284 mutex_exit(list_lock
);
4289 mutex_exit(&l2arc_buflist_mtx
);
4292 ASSERT3U(write_sz
, ==, 0);
4293 kmem_cache_free(hdr_cache
, head
);
4297 ASSERT3U(write_sz
, <=, target_sz
);
4298 ARCSTAT_BUMP(arcstat_l2_writes_sent
);
4299 ARCSTAT_INCR(arcstat_l2_size
, write_sz
);
4300 spa_l2cache_space_update(dev
->l2ad_vdev
, 0, write_sz
);
4303 * Bump device hand to the device start if it is approaching the end.
4304 * l2arc_evict() will already have evicted ahead for this case.
4306 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- target_sz
)) {
4307 spa_l2cache_space_update(dev
->l2ad_vdev
, 0,
4308 dev
->l2ad_end
- dev
->l2ad_hand
);
4309 dev
->l2ad_hand
= dev
->l2ad_start
;
4310 dev
->l2ad_evict
= dev
->l2ad_start
;
4311 dev
->l2ad_first
= B_FALSE
;
4314 (void) zio_wait(pio
);
4318 * This thread feeds the L2ARC at regular intervals. This is the beating
4319 * heart of the L2ARC.
4322 l2arc_feed_thread(void)
4329 CALLB_CPR_INIT(&cpr
, &l2arc_feed_thr_lock
, callb_generic_cpr
, FTAG
);
4331 mutex_enter(&l2arc_feed_thr_lock
);
4333 while (l2arc_thread_exit
== 0) {
4335 * Pause for l2arc_feed_secs seconds between writes.
4337 CALLB_CPR_SAFE_BEGIN(&cpr
);
4338 (void) cv_timedwait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
,
4339 (hz
* l2arc_feed_secs
));
4340 CALLB_CPR_SAFE_END(&cpr
, &l2arc_feed_thr_lock
);
4343 * Quick check for L2ARC devices.
4345 mutex_enter(&l2arc_dev_mtx
);
4346 if (l2arc_ndev
== 0) {
4347 mutex_exit(&l2arc_dev_mtx
);
4350 mutex_exit(&l2arc_dev_mtx
);
4353 * This selects the next l2arc device to write to, and in
4354 * doing so the next spa to feed from: dev->l2ad_spa. This
4355 * will return NULL if there are now no l2arc devices or if
4356 * they are all faulted.
4358 * If a device is returned, its spa's config lock is also
4359 * held to prevent device removal. l2arc_dev_get_next()
4360 * will grab and release l2arc_dev_mtx.
4362 if ((dev
= l2arc_dev_get_next()) == NULL
)
4365 spa
= dev
->l2ad_spa
;
4366 ASSERT(spa
!= NULL
);
4369 * Avoid contributing to memory pressure.
4371 if (arc_reclaim_needed()) {
4372 ARCSTAT_BUMP(arcstat_l2_abort_lowmem
);
4373 spa_config_exit(spa
, SCL_L2ARC
, dev
);
4377 ARCSTAT_BUMP(arcstat_l2_feeds
);
4379 size
= dev
->l2ad_write
;
4380 if (arc_warm
== B_FALSE
)
4381 size
+= dev
->l2ad_boost
;
4384 * Evict L2ARC buffers that will be overwritten.
4386 l2arc_evict(dev
, size
, B_FALSE
);
4389 * Write ARC buffers.
4391 l2arc_write_buffers(spa
, dev
, size
);
4392 spa_config_exit(spa
, SCL_L2ARC
, dev
);
4395 l2arc_thread_exit
= 0;
4396 cv_broadcast(&l2arc_feed_thr_cv
);
4397 CALLB_CPR_EXIT(&cpr
); /* drops l2arc_feed_thr_lock */
4402 l2arc_vdev_present(vdev_t
*vd
)
4406 mutex_enter(&l2arc_dev_mtx
);
4407 for (dev
= list_head(l2arc_dev_list
); dev
!= NULL
;
4408 dev
= list_next(l2arc_dev_list
, dev
)) {
4409 if (dev
->l2ad_vdev
== vd
)
4412 mutex_exit(&l2arc_dev_mtx
);
4414 return (dev
!= NULL
);
4418 * Add a vdev for use by the L2ARC. By this point the spa has already
4419 * validated the vdev and opened it.
4422 l2arc_add_vdev(spa_t
*spa
, vdev_t
*vd
, uint64_t start
, uint64_t end
)
4424 l2arc_dev_t
*adddev
;
4426 ASSERT(!l2arc_vdev_present(vd
));
4429 * Create a new l2arc device entry.
4431 adddev
= kmem_zalloc(sizeof (l2arc_dev_t
), KM_SLEEP
);
4432 adddev
->l2ad_spa
= spa
;
4433 adddev
->l2ad_vdev
= vd
;
4434 adddev
->l2ad_write
= l2arc_write_max
;
4435 adddev
->l2ad_boost
= l2arc_write_boost
;
4436 adddev
->l2ad_start
= start
;
4437 adddev
->l2ad_end
= end
;
4438 adddev
->l2ad_hand
= adddev
->l2ad_start
;
4439 adddev
->l2ad_evict
= adddev
->l2ad_start
;
4440 adddev
->l2ad_first
= B_TRUE
;
4441 ASSERT3U(adddev
->l2ad_write
, >, 0);
4444 * This is a list of all ARC buffers that are still valid on the
4447 adddev
->l2ad_buflist
= kmem_zalloc(sizeof (list_t
), KM_SLEEP
);
4448 list_create(adddev
->l2ad_buflist
, sizeof (arc_buf_hdr_t
),
4449 offsetof(arc_buf_hdr_t
, b_l2node
));
4451 spa_l2cache_space_update(vd
, adddev
->l2ad_end
- adddev
->l2ad_hand
, 0);
4454 * Add device to global list
4456 mutex_enter(&l2arc_dev_mtx
);
4457 list_insert_head(l2arc_dev_list
, adddev
);
4458 atomic_inc_64(&l2arc_ndev
);
4459 mutex_exit(&l2arc_dev_mtx
);
4463 * Remove a vdev from the L2ARC.
4466 l2arc_remove_vdev(vdev_t
*vd
)
4468 l2arc_dev_t
*dev
, *nextdev
, *remdev
= NULL
;
4471 * Find the device by vdev
4473 mutex_enter(&l2arc_dev_mtx
);
4474 for (dev
= list_head(l2arc_dev_list
); dev
; dev
= nextdev
) {
4475 nextdev
= list_next(l2arc_dev_list
, dev
);
4476 if (vd
== dev
->l2ad_vdev
) {
4481 ASSERT(remdev
!= NULL
);
4484 * Remove device from global list
4486 list_remove(l2arc_dev_list
, remdev
);
4487 l2arc_dev_last
= NULL
; /* may have been invalidated */
4488 atomic_dec_64(&l2arc_ndev
);
4489 mutex_exit(&l2arc_dev_mtx
);
4492 * Clear all buflists and ARC references. L2ARC device flush.
4494 l2arc_evict(remdev
, 0, B_TRUE
);
4495 list_destroy(remdev
->l2ad_buflist
);
4496 kmem_free(remdev
->l2ad_buflist
, sizeof (list_t
));
4497 kmem_free(remdev
, sizeof (l2arc_dev_t
));
4503 l2arc_thread_exit
= 0;
4505 l2arc_writes_sent
= 0;
4506 l2arc_writes_done
= 0;
4508 mutex_init(&l2arc_feed_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
4509 cv_init(&l2arc_feed_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
4510 mutex_init(&l2arc_dev_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4511 mutex_init(&l2arc_buflist_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4512 mutex_init(&l2arc_free_on_write_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4514 l2arc_dev_list
= &L2ARC_dev_list
;
4515 l2arc_free_on_write
= &L2ARC_free_on_write
;
4516 list_create(l2arc_dev_list
, sizeof (l2arc_dev_t
),
4517 offsetof(l2arc_dev_t
, l2ad_node
));
4518 list_create(l2arc_free_on_write
, sizeof (l2arc_data_free_t
),
4519 offsetof(l2arc_data_free_t
, l2df_list_node
));
4526 * This is called from dmu_fini(), which is called from spa_fini();
4527 * Because of this, we can assume that all l2arc devices have
4528 * already been removed when the pools themselves were removed.
4531 l2arc_do_free_on_write();
4533 mutex_destroy(&l2arc_feed_thr_lock
);
4534 cv_destroy(&l2arc_feed_thr_cv
);
4535 mutex_destroy(&l2arc_dev_mtx
);
4536 mutex_destroy(&l2arc_buflist_mtx
);
4537 mutex_destroy(&l2arc_free_on_write_mtx
);
4539 list_destroy(l2arc_dev_list
);
4540 list_destroy(l2arc_free_on_write
);
4546 if (!(spa_mode
& FWRITE
))
4549 (void) thread_create(NULL
, 0, l2arc_feed_thread
, NULL
, 0, &p0
,
4550 TS_RUN
, minclsyspri
);
4556 if (!(spa_mode
& FWRITE
))
4559 mutex_enter(&l2arc_feed_thr_lock
);
4560 cv_signal(&l2arc_feed_thr_cv
); /* kick thread out of startup */
4561 l2arc_thread_exit
= 1;
4562 while (l2arc_thread_exit
!= 0)
4563 cv_wait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
);
4564 mutex_exit(&l2arc_feed_thr_lock
);