1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2023 Red Hat
8 #include <linux/atomic.h>
9 #include <linux/blkdev.h>
12 #include "memory-alloc.h"
13 #include "permassert.h"
14 #include "string-utils.h"
16 #include "admin-state.h"
17 #include "completion.h"
18 #include "constants.h"
21 #include "encodings.h"
22 #include "io-submitter.h"
23 #include "physical-zone.h"
24 #include "status-codes.h"
28 static const struct version_number COMPRESSED_BLOCK_1_0
= {
33 #define COMPRESSED_BLOCK_1_0_SIZE (4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS))
36 * vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed
38 * @mapping_state [in] The mapping state for the look up.
39 * @compressed_block [in] The compressed block that was read from disk.
40 * @fragment_offset [out] The offset of the fragment within a compressed block.
41 * @fragment_size [out] The size of the fragment.
43 * Return: If a valid compressed fragment is found, VDO_SUCCESS; otherwise, VDO_INVALID_FRAGMENT if
44 * the fragment is invalid.
46 int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state
,
47 struct compressed_block
*block
,
48 u16
*fragment_offset
, u16
*fragment_size
)
54 struct version_number version
;
56 if (!vdo_is_state_compressed(mapping_state
))
57 return VDO_INVALID_FRAGMENT
;
59 version
= vdo_unpack_version_number(block
->header
.version
);
60 if (!vdo_are_same_version(version
, COMPRESSED_BLOCK_1_0
))
61 return VDO_INVALID_FRAGMENT
;
63 slot
= mapping_state
- VDO_MAPPING_STATE_COMPRESSED_BASE
;
64 if (slot
>= VDO_MAX_COMPRESSION_SLOTS
)
65 return VDO_INVALID_FRAGMENT
;
67 compressed_size
= __le16_to_cpu(block
->header
.sizes
[slot
]);
68 for (i
= 0; i
< slot
; i
++) {
69 offset
+= __le16_to_cpu(block
->header
.sizes
[i
]);
70 if (offset
>= VDO_COMPRESSED_BLOCK_DATA_SIZE
)
71 return VDO_INVALID_FRAGMENT
;
74 if ((offset
+ compressed_size
) > VDO_COMPRESSED_BLOCK_DATA_SIZE
)
75 return VDO_INVALID_FRAGMENT
;
77 *fragment_offset
= offset
;
78 *fragment_size
= compressed_size
;
83 * assert_on_packer_thread() - Check that we are on the packer thread.
84 * @packer: The packer.
85 * @caller: The function which is asserting.
87 static inline void assert_on_packer_thread(struct packer
*packer
, const char *caller
)
89 VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer
->thread_id
),
90 "%s() called from packer thread", caller
);
94 * insert_in_sorted_list() - Insert a bin to the list.
95 * @packer: The packer.
96 * @bin: The bin to move to its sorted position.
98 * The list is in ascending order of free space. Since all bins are already in the list, this
99 * actually moves the bin to the correct position in the list.
101 static void insert_in_sorted_list(struct packer
*packer
, struct packer_bin
*bin
)
103 struct packer_bin
*active_bin
;
105 list_for_each_entry(active_bin
, &packer
->bins
, list
)
106 if (active_bin
->free_space
> bin
->free_space
) {
107 list_move_tail(&bin
->list
, &active_bin
->list
);
111 list_move_tail(&bin
->list
, &packer
->bins
);
115 * make_bin() - Allocate a bin and put it into the packer's list.
116 * @packer: The packer.
118 static int __must_check
make_bin(struct packer
*packer
)
120 struct packer_bin
*bin
;
123 result
= vdo_allocate_extended(struct packer_bin
, VDO_MAX_COMPRESSION_SLOTS
,
124 struct vio
*, __func__
, &bin
);
125 if (result
!= VDO_SUCCESS
)
128 bin
->free_space
= VDO_COMPRESSED_BLOCK_DATA_SIZE
;
129 INIT_LIST_HEAD(&bin
->list
);
130 list_add_tail(&bin
->list
, &packer
->bins
);
135 * vdo_make_packer() - Make a new block packer.
137 * @vdo: The vdo to which this packer belongs.
138 * @bin_count: The number of partial bins to keep in memory.
139 * @packer_ptr: A pointer to hold the new packer.
141 * Return: VDO_SUCCESS or an error
143 int vdo_make_packer(struct vdo
*vdo
, block_count_t bin_count
, struct packer
**packer_ptr
)
145 struct packer
*packer
;
149 result
= vdo_allocate(1, struct packer
, __func__
, &packer
);
150 if (result
!= VDO_SUCCESS
)
153 packer
->thread_id
= vdo
->thread_config
.packer_thread
;
154 packer
->size
= bin_count
;
155 INIT_LIST_HEAD(&packer
->bins
);
156 vdo_set_admin_state_code(&packer
->state
, VDO_ADMIN_STATE_NORMAL_OPERATION
);
158 for (i
= 0; i
< bin_count
; i
++) {
159 result
= make_bin(packer
);
160 if (result
!= VDO_SUCCESS
) {
161 vdo_free_packer(packer
);
167 * The canceled bin can hold up to half the number of user vios. Every canceled vio in the
168 * bin must have a canceler for which it is waiting, and any canceler will only have
169 * canceled one lock holder at a time.
171 result
= vdo_allocate_extended(struct packer_bin
, MAXIMUM_VDO_USER_VIOS
/ 2,
172 struct vio
*, __func__
, &packer
->canceled_bin
);
173 if (result
!= VDO_SUCCESS
) {
174 vdo_free_packer(packer
);
178 result
= vdo_make_default_thread(vdo
, packer
->thread_id
);
179 if (result
!= VDO_SUCCESS
) {
180 vdo_free_packer(packer
);
184 *packer_ptr
= packer
;
189 * vdo_free_packer() - Free a block packer.
190 * @packer: The packer to free.
192 void vdo_free_packer(struct packer
*packer
)
194 struct packer_bin
*bin
, *tmp
;
199 list_for_each_entry_safe(bin
, tmp
, &packer
->bins
, list
) {
200 list_del_init(&bin
->list
);
204 vdo_free(vdo_forget(packer
->canceled_bin
));
209 * get_packer_from_data_vio() - Get the packer from a data_vio.
210 * @data_vio: The data_vio.
212 * Return: The packer from the VDO to which the data_vio belongs.
214 static inline struct packer
*get_packer_from_data_vio(struct data_vio
*data_vio
)
216 return vdo_from_data_vio(data_vio
)->packer
;
220 * vdo_get_packer_statistics() - Get the current statistics from the packer.
221 * @packer: The packer to query.
223 * Return: a copy of the current statistics for the packer.
225 struct packer_statistics
vdo_get_packer_statistics(const struct packer
*packer
)
227 const struct packer_statistics
*stats
= &packer
->statistics
;
229 return (struct packer_statistics
) {
230 .compressed_fragments_written
= READ_ONCE(stats
->compressed_fragments_written
),
231 .compressed_blocks_written
= READ_ONCE(stats
->compressed_blocks_written
),
232 .compressed_fragments_in_packer
= READ_ONCE(stats
->compressed_fragments_in_packer
),
237 * abort_packing() - Abort packing a data_vio.
238 * @data_vio: The data_vio to abort.
240 static void abort_packing(struct data_vio
*data_vio
)
242 struct packer
*packer
= get_packer_from_data_vio(data_vio
);
244 WRITE_ONCE(packer
->statistics
.compressed_fragments_in_packer
,
245 packer
->statistics
.compressed_fragments_in_packer
- 1);
247 write_data_vio(data_vio
);
251 * release_compressed_write_waiter() - Update a data_vio for which a successful compressed write
252 * has completed and send it on its way.
253 * @data_vio: The data_vio to release.
254 * @allocation: The allocation to which the compressed block was written.
256 static void release_compressed_write_waiter(struct data_vio
*data_vio
,
257 struct allocation
*allocation
)
259 data_vio
->new_mapped
= (struct zoned_pbn
) {
260 .pbn
= allocation
->pbn
,
261 .zone
= allocation
->zone
,
262 .state
= data_vio
->compression
.slot
+ VDO_MAPPING_STATE_COMPRESSED_BASE
,
265 vdo_share_compressed_write_lock(data_vio
, allocation
->lock
);
266 update_metadata_for_data_vio_write(data_vio
, allocation
->lock
);
270 * finish_compressed_write() - Finish a compressed block write.
271 * @completion: The compressed write completion.
273 * This callback is registered in continue_after_allocation().
275 static void finish_compressed_write(struct vdo_completion
*completion
)
277 struct data_vio
*agent
= as_data_vio(completion
);
278 struct data_vio
*client
, *next
;
280 assert_data_vio_in_allocated_zone(agent
);
283 * Process all the non-agent waiters first to ensure that the pbn lock can not be released
284 * until all of them have had a chance to journal their increfs.
286 for (client
= agent
->compression
.next_in_batch
; client
!= NULL
; client
= next
) {
287 next
= client
->compression
.next_in_batch
;
288 release_compressed_write_waiter(client
, &agent
->allocation
);
291 completion
->error_handler
= handle_data_vio_error
;
292 release_compressed_write_waiter(agent
, &agent
->allocation
);
295 static void handle_compressed_write_error(struct vdo_completion
*completion
)
297 struct data_vio
*agent
= as_data_vio(completion
);
298 struct allocation
*allocation
= &agent
->allocation
;
299 struct data_vio
*client
, *next
;
301 if (vdo_requeue_completion_if_needed(completion
, allocation
->zone
->thread_id
))
304 update_vio_error_stats(as_vio(completion
),
305 "Completing compressed write vio for physical block %llu with error",
306 (unsigned long long) allocation
->pbn
);
308 for (client
= agent
->compression
.next_in_batch
; client
!= NULL
; client
= next
) {
309 next
= client
->compression
.next_in_batch
;
310 write_data_vio(client
);
313 /* Now that we've released the batch from the packer, forget the error and continue on. */
314 vdo_reset_completion(completion
);
315 completion
->error_handler
= handle_data_vio_error
;
316 write_data_vio(agent
);
320 * add_to_bin() - Put a data_vio in a specific packer_bin in which it will definitely fit.
321 * @bin: The bin in which to put the data_vio.
322 * @data_vio: The data_vio to add.
324 static void add_to_bin(struct packer_bin
*bin
, struct data_vio
*data_vio
)
326 data_vio
->compression
.bin
= bin
;
327 data_vio
->compression
.slot
= bin
->slots_used
;
328 bin
->incoming
[bin
->slots_used
++] = data_vio
;
332 * remove_from_bin() - Get the next data_vio whose compression has not been canceled from a bin.
333 * @packer: The packer.
334 * @bin: The bin from which to get a data_vio.
336 * Any canceled data_vios will be moved to the canceled bin.
337 * Return: An uncanceled data_vio from the bin or NULL if there are none.
339 static struct data_vio
*remove_from_bin(struct packer
*packer
, struct packer_bin
*bin
)
341 while (bin
->slots_used
> 0) {
342 struct data_vio
*data_vio
= bin
->incoming
[--bin
->slots_used
];
344 if (!advance_data_vio_compression_stage(data_vio
).may_not_compress
) {
345 data_vio
->compression
.bin
= NULL
;
349 add_to_bin(packer
->canceled_bin
, data_vio
);
352 /* The bin is now empty. */
353 bin
->free_space
= VDO_COMPRESSED_BLOCK_DATA_SIZE
;
358 * initialize_compressed_block() - Initialize a compressed block.
359 * @block: The compressed block to initialize.
360 * @size: The size of the agent's fragment.
362 * This method initializes the compressed block in the compressed write agent. Because the
363 * compressor already put the agent's compressed fragment at the start of the compressed block's
364 * data field, it needn't be copied. So all we need do is initialize the header and set the size of
365 * the agent's fragment.
367 static void initialize_compressed_block(struct compressed_block
*block
, u16 size
)
370 * Make sure the block layout isn't accidentally changed by changing the length of the
373 BUILD_BUG_ON(sizeof(struct compressed_block_header
) != COMPRESSED_BLOCK_1_0_SIZE
);
375 block
->header
.version
= vdo_pack_version_number(COMPRESSED_BLOCK_1_0
);
376 block
->header
.sizes
[0] = __cpu_to_le16(size
);
380 * pack_fragment() - Pack a data_vio's fragment into the compressed block in which it is already
382 * @compression: The agent's compression_state to pack in to.
383 * @data_vio: The data_vio to pack.
384 * @offset: The offset into the compressed block at which to pack the fragment.
385 * @block: The compressed block which will be written out when batch is fully packed.
387 * Return: The new amount of space used.
389 static block_size_t __must_check
pack_fragment(struct compression_state
*compression
,
390 struct data_vio
*data_vio
,
391 block_size_t offset
, slot_number_t slot
,
392 struct compressed_block
*block
)
394 struct compression_state
*to_pack
= &data_vio
->compression
;
395 char *fragment
= to_pack
->block
->data
;
397 to_pack
->next_in_batch
= compression
->next_in_batch
;
398 compression
->next_in_batch
= data_vio
;
399 to_pack
->slot
= slot
;
400 block
->header
.sizes
[slot
] = __cpu_to_le16(to_pack
->size
);
401 memcpy(&block
->data
[offset
], fragment
, to_pack
->size
);
402 return (offset
+ to_pack
->size
);
406 * compressed_write_end_io() - The bio_end_io for a compressed block write.
407 * @bio: The bio for the compressed write.
409 static void compressed_write_end_io(struct bio
*bio
)
411 struct data_vio
*data_vio
= vio_as_data_vio(bio
->bi_private
);
413 vdo_count_completed_bios(bio
);
414 set_data_vio_allocated_zone_callback(data_vio
, finish_compressed_write
);
415 continue_data_vio_with_error(data_vio
, blk_status_to_errno(bio
->bi_status
));
419 * write_bin() - Write out a bin.
420 * @packer: The packer.
421 * @bin: The bin to write.
423 static void write_bin(struct packer
*packer
, struct packer_bin
*bin
)
427 slot_number_t slot
= 1;
428 struct compression_state
*compression
;
429 struct compressed_block
*block
;
430 struct data_vio
*agent
= remove_from_bin(packer
, bin
);
431 struct data_vio
*client
;
432 struct packer_statistics
*stats
;
437 compression
= &agent
->compression
;
438 compression
->slot
= 0;
439 block
= compression
->block
;
440 initialize_compressed_block(block
, compression
->size
);
441 offset
= compression
->size
;
443 while ((client
= remove_from_bin(packer
, bin
)) != NULL
)
444 offset
= pack_fragment(compression
, client
, offset
, slot
++, block
);
447 * If the batch contains only a single vio, then we save nothing by saving the compressed
448 * form. Continue processing the single vio in the batch.
451 abort_packing(agent
);
455 if (slot
< VDO_MAX_COMPRESSION_SLOTS
) {
456 /* Clear out the sizes of the unused slots */
457 memset(&block
->header
.sizes
[slot
], 0,
458 (VDO_MAX_COMPRESSION_SLOTS
- slot
) * sizeof(__le16
));
461 agent
->vio
.completion
.error_handler
= handle_compressed_write_error
;
462 if (vdo_is_read_only(vdo_from_data_vio(agent
))) {
463 continue_data_vio_with_error(agent
, VDO_READ_ONLY
);
467 result
= vio_reset_bio(&agent
->vio
, (char *) block
, compressed_write_end_io
,
468 REQ_OP_WRITE
, agent
->allocation
.pbn
);
469 if (result
!= VDO_SUCCESS
) {
470 continue_data_vio_with_error(agent
, result
);
475 * Once the compressed write is submitted, the fragments are no longer in the packer, so
478 stats
= &packer
->statistics
;
479 WRITE_ONCE(stats
->compressed_fragments_in_packer
,
480 (stats
->compressed_fragments_in_packer
- slot
));
481 WRITE_ONCE(stats
->compressed_fragments_written
,
482 (stats
->compressed_fragments_written
+ slot
));
483 WRITE_ONCE(stats
->compressed_blocks_written
,
484 stats
->compressed_blocks_written
+ 1);
486 vdo_submit_data_vio(agent
);
490 * add_data_vio_to_packer_bin() - Add a data_vio to a bin's incoming queue
491 * @packer: The packer.
492 * @bin: The bin to which to add the data_vio.
493 * @data_vio: The data_vio to add to the bin's queue.
495 * Adds a data_vio to a bin's incoming queue, handles logical space change, and calls physical
498 static void add_data_vio_to_packer_bin(struct packer
*packer
, struct packer_bin
*bin
,
499 struct data_vio
*data_vio
)
501 /* If the selected bin doesn't have room, start a new batch to make room. */
502 if (bin
->free_space
< data_vio
->compression
.size
)
503 write_bin(packer
, bin
);
505 add_to_bin(bin
, data_vio
);
506 bin
->free_space
-= data_vio
->compression
.size
;
508 /* If we happen to exactly fill the bin, start a new batch. */
509 if ((bin
->slots_used
== VDO_MAX_COMPRESSION_SLOTS
) ||
510 (bin
->free_space
== 0))
511 write_bin(packer
, bin
);
513 /* Now that we've finished changing the free space, restore the sort order. */
514 insert_in_sorted_list(packer
, bin
);
518 * select_bin() - Select the bin that should be used to pack the compressed data in a data_vio with
520 * @packer: The packer.
521 * @data_vio: The data_vio.
523 static struct packer_bin
* __must_check
select_bin(struct packer
*packer
,
524 struct data_vio
*data_vio
)
527 * First best fit: select the bin with the least free space that has enough room for the
528 * compressed data in the data_vio.
530 struct packer_bin
*bin
, *fullest_bin
;
532 list_for_each_entry(bin
, &packer
->bins
, list
) {
533 if (bin
->free_space
>= data_vio
->compression
.size
)
538 * None of the bins have enough space for the data_vio. We're not allowed to create new
539 * bins, so we have to overflow one of the existing bins. It's pretty intuitive to select
540 * the fullest bin, since that "wastes" the least amount of free space in the compressed
541 * block. But if the space currently used in the fullest bin is smaller than the compressed
542 * size of the incoming block, it seems wrong to force that bin to write when giving up on
543 * compressing the incoming data_vio would likewise "waste" the least amount of free space.
545 fullest_bin
= list_first_entry(&packer
->bins
, struct packer_bin
, list
);
546 if (data_vio
->compression
.size
>=
547 (VDO_COMPRESSED_BLOCK_DATA_SIZE
- fullest_bin
->free_space
))
551 * The fullest bin doesn't have room, but writing it out and starting a new batch with the
552 * incoming data_vio will increase the packer's free space.
558 * vdo_attempt_packing() - Attempt to rewrite the data in this data_vio as part of a compressed
560 * @data_vio: The data_vio to pack.
562 void vdo_attempt_packing(struct data_vio
*data_vio
)
565 struct packer_bin
*bin
;
566 struct data_vio_compression_status status
= get_data_vio_compression_status(data_vio
);
567 struct packer
*packer
= get_packer_from_data_vio(data_vio
);
569 assert_on_packer_thread(packer
, __func__
);
571 result
= VDO_ASSERT((status
.stage
== DATA_VIO_COMPRESSING
),
572 "attempt to pack data_vio not ready for packing, stage: %u",
574 if (result
!= VDO_SUCCESS
)
578 * Increment whether or not this data_vio will be packed or not since abort_packing()
579 * always decrements the counter.
581 WRITE_ONCE(packer
->statistics
.compressed_fragments_in_packer
,
582 packer
->statistics
.compressed_fragments_in_packer
+ 1);
585 * If packing of this data_vio is disallowed for administrative reasons, give up before
586 * making any state changes.
588 if (!vdo_is_state_normal(&packer
->state
) ||
589 (data_vio
->flush_generation
< packer
->flush_generation
)) {
590 abort_packing(data_vio
);
595 * The advance_data_vio_compression_stage() check here verifies that the data_vio is
596 * allowed to be compressed (if it has already been canceled, we'll fall out here). Once
597 * the data_vio is in the DATA_VIO_PACKING state, it must be guaranteed to be put in a bin
598 * before any more requests can be processed by the packer thread. Otherwise, a canceling
599 * data_vio could attempt to remove the canceled data_vio from the packer and fail to
600 * rendezvous with it. Thus, we must call select_bin() first to ensure that we will
601 * actually add the data_vio to a bin before advancing to the DATA_VIO_PACKING stage.
603 bin
= select_bin(packer
, data_vio
);
605 (advance_data_vio_compression_stage(data_vio
).stage
!= DATA_VIO_PACKING
)) {
606 abort_packing(data_vio
);
610 add_data_vio_to_packer_bin(packer
, bin
, data_vio
);
614 * check_for_drain_complete() - Check whether the packer has drained.
615 * @packer: The packer.
617 static void check_for_drain_complete(struct packer
*packer
)
619 if (vdo_is_state_draining(&packer
->state
) && (packer
->canceled_bin
->slots_used
== 0))
620 vdo_finish_draining(&packer
->state
);
624 * write_all_non_empty_bins() - Write out all non-empty bins on behalf of a flush or suspend.
625 * @packer: The packer being flushed.
627 static void write_all_non_empty_bins(struct packer
*packer
)
629 struct packer_bin
*bin
;
631 list_for_each_entry(bin
, &packer
->bins
, list
)
632 write_bin(packer
, bin
);
634 * We don't need to re-sort the bin here since this loop will make every bin have
635 * the same amount of free space, so every ordering is sorted.
638 check_for_drain_complete(packer
);
642 * vdo_flush_packer() - Request that the packer flush asynchronously.
643 * @packer: The packer to flush.
645 * All bins with at least two compressed data blocks will be written out, and any solitary pending
646 * VIOs will be released from the packer. While flushing is in progress, any VIOs submitted to
647 * vdo_attempt_packing() will be continued immediately without attempting to pack them.
649 void vdo_flush_packer(struct packer
*packer
)
651 assert_on_packer_thread(packer
, __func__
);
652 if (vdo_is_state_normal(&packer
->state
))
653 write_all_non_empty_bins(packer
);
657 * vdo_remove_lock_holder_from_packer() - Remove a lock holder from the packer.
658 * @completion: The data_vio which needs a lock held by a data_vio in the packer. The data_vio's
659 * compression.lock_holder field will point to the data_vio to remove.
661 void vdo_remove_lock_holder_from_packer(struct vdo_completion
*completion
)
663 struct data_vio
*data_vio
= as_data_vio(completion
);
664 struct packer
*packer
= get_packer_from_data_vio(data_vio
);
665 struct data_vio
*lock_holder
;
666 struct packer_bin
*bin
;
669 assert_data_vio_in_packer_zone(data_vio
);
671 lock_holder
= vdo_forget(data_vio
->compression
.lock_holder
);
672 bin
= lock_holder
->compression
.bin
;
673 VDO_ASSERT_LOG_ONLY((bin
!= NULL
), "data_vio in packer has a bin");
675 slot
= lock_holder
->compression
.slot
;
677 if (slot
< bin
->slots_used
) {
678 bin
->incoming
[slot
] = bin
->incoming
[bin
->slots_used
];
679 bin
->incoming
[slot
]->compression
.slot
= slot
;
682 lock_holder
->compression
.bin
= NULL
;
683 lock_holder
->compression
.slot
= 0;
685 if (bin
!= packer
->canceled_bin
) {
686 bin
->free_space
+= lock_holder
->compression
.size
;
687 insert_in_sorted_list(packer
, bin
);
690 abort_packing(lock_holder
);
691 check_for_drain_complete(packer
);
695 * vdo_increment_packer_flush_generation() - Increment the flush generation in the packer.
696 * @packer: The packer.
698 * This will also cause the packer to flush so that any VIOs from previous generations will exit
701 void vdo_increment_packer_flush_generation(struct packer
*packer
)
703 assert_on_packer_thread(packer
, __func__
);
704 packer
->flush_generation
++;
705 vdo_flush_packer(packer
);
709 * initiate_drain() - Initiate a drain.
711 * Implements vdo_admin_initiator_fn.
713 static void initiate_drain(struct admin_state
*state
)
715 struct packer
*packer
= container_of(state
, struct packer
, state
);
717 write_all_non_empty_bins(packer
);
721 * vdo_drain_packer() - Drain the packer by preventing any more VIOs from entering the packer and
723 * @packer: The packer to drain.
724 * @completion: The completion to finish when the packer has drained.
726 void vdo_drain_packer(struct packer
*packer
, struct vdo_completion
*completion
)
728 assert_on_packer_thread(packer
, __func__
);
729 vdo_start_draining(&packer
->state
, VDO_ADMIN_STATE_SUSPENDING
, completion
,
734 * vdo_resume_packer() - Resume a packer which has been suspended.
735 * @packer: The packer to resume.
736 * @parent: The completion to finish when the packer has resumed.
738 void vdo_resume_packer(struct packer
*packer
, struct vdo_completion
*parent
)
740 assert_on_packer_thread(packer
, __func__
);
741 vdo_continue_completion(parent
, vdo_resume_if_quiescent(&packer
->state
));
744 static void dump_packer_bin(const struct packer_bin
*bin
, bool canceled
)
746 if (bin
->slots_used
== 0)
747 /* Don't dump empty bins. */
750 vdo_log_info(" %sBin slots_used=%u free_space=%zu",
751 (canceled
? "Canceled" : ""), bin
->slots_used
, bin
->free_space
);
754 * FIXME: dump vios in bin->incoming? The vios should have been dumped from the vio pool.
755 * Maybe just dump their addresses so it's clear they're here?
760 * vdo_dump_packer() - Dump the packer.
761 * @packer: The packer.
763 * Context: dumps in a thread-unsafe fashion.
765 void vdo_dump_packer(const struct packer
*packer
)
767 struct packer_bin
*bin
;
769 vdo_log_info("packer");
770 vdo_log_info(" flushGeneration=%llu state %s packer_bin_count=%llu",
771 (unsigned long long) packer
->flush_generation
,
772 vdo_get_admin_state_code(&packer
->state
)->name
,
773 (unsigned long long) packer
->size
);
775 list_for_each_entry(bin
, &packer
->bins
, list
)
776 dump_packer_bin(bin
, false);
778 dump_packer_bin(packer
->canceled_bin
, true);