1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright 2023 Red Hat
9 #include <linux/atomic.h>
10 #include <linux/blk_types.h>
11 #include <linux/completion.h>
12 #include <linux/dm-kcopyd.h>
13 #include <linux/list.h>
14 #include <linux/spinlock.h>
16 #include "admin-state.h"
17 #include "encodings.h"
18 #include "funnel-workqueue.h"
20 #include "physical-zone.h"
21 #include "statistics.h"
22 #include "thread-registry.h"
26 /* Notifications are allowed but not in progress */
28 /* A notification is in progress */
30 /* Notifications are not allowed */
32 /* A notification has completed */
37 * typedef vdo_read_only_notification_fn - A function to notify a listener that the VDO has gone
39 * @listener: The object to notify.
40 * @parent: The completion to notify in order to acknowledge the notification.
42 typedef void (*vdo_read_only_notification_fn
)(void *listener
, struct vdo_completion
*parent
);
45 * An object to be notified when the VDO enters read-only mode
47 struct read_only_listener
{
50 /* The method to call to notify the listener */
51 vdo_read_only_notification_fn notify
;
52 /* A pointer to the next listener */
53 struct read_only_listener
*next
;
58 thread_id_t thread_id
;
59 struct vdo_work_queue
*queue
;
61 * Each thread maintains its own notion of whether the VDO is read-only so that the
62 * read-only state can be checked from any base thread without worrying about
63 * synchronization or thread safety. This does mean that knowledge of the VDO going
64 * read-only does not occur simultaneously across the VDO's threads, but that does not seem
65 * to cause any problems.
69 * A list of objects waiting to be notified on this thread that the VDO has entered
72 struct read_only_listener
*listeners
;
73 struct registered_thread allocating_thread
;
76 /* Keep struct bio statistics atomically */
77 struct atomic_bio_stats
{
78 atomic64_t read
; /* Number of not REQ_WRITE bios */
79 atomic64_t write
; /* Number of REQ_WRITE bios */
80 atomic64_t discard
; /* Number of REQ_DISCARD bios */
81 atomic64_t flush
; /* Number of REQ_FLUSH bios */
82 atomic64_t empty_flush
; /* Number of REQ_PREFLUSH bios without data */
83 atomic64_t fua
; /* Number of REQ_FUA bios */
86 /* Counters are atomic since updates can arrive concurrently from arbitrary threads. */
87 struct atomic_statistics
{
88 atomic64_t bios_submitted
;
89 atomic64_t bios_completed
;
91 atomic64_t invalid_advice_pbn_count
;
92 atomic64_t no_space_error_count
;
93 atomic64_t read_only_error_count
;
94 struct atomic_bio_stats bios_in
;
95 struct atomic_bio_stats bios_in_partial
;
96 struct atomic_bio_stats bios_out
;
97 struct atomic_bio_stats bios_out_completed
;
98 struct atomic_bio_stats bios_acknowledged
;
99 struct atomic_bio_stats bios_acknowledged_partial
;
100 struct atomic_bio_stats bios_meta
;
101 struct atomic_bio_stats bios_meta_completed
;
102 struct atomic_bio_stats bios_journal
;
103 struct atomic_bio_stats bios_journal_completed
;
104 struct atomic_bio_stats bios_page_cache
;
105 struct atomic_bio_stats bios_page_cache_completed
;
108 struct read_only_notifier
{
109 /* The completion for entering read-only mode */
110 struct vdo_completion completion
;
111 /* A completion waiting for notifications to be drained or enabled */
112 struct vdo_completion
*waiter
;
113 /* Lock to protect the next two fields */
115 /* The code of the error which put the VDO into read-only mode */
117 /* The current state of the notifier (values described above) */
118 enum notifier_state state
;
122 * The thread ID returned when the current thread is not a vdo thread, or can not be determined
123 * (usually due to being at interrupt context).
125 #define VDO_INVALID_THREAD_ID ((thread_id_t) -1)
127 struct thread_config
{
128 zone_count_t logical_zone_count
;
129 zone_count_t physical_zone_count
;
130 zone_count_t hash_zone_count
;
131 thread_count_t bio_thread_count
;
132 thread_count_t thread_count
;
133 thread_id_t admin_thread
;
134 thread_id_t journal_thread
;
135 thread_id_t packer_thread
;
136 thread_id_t dedupe_thread
;
137 thread_id_t bio_ack_thread
;
138 thread_id_t cpu_thread
;
139 thread_id_t
*logical_threads
;
140 thread_id_t
*physical_threads
;
141 thread_id_t
*hash_zone_threads
;
142 thread_id_t
*bio_threads
;
145 struct thread_count_config
;
147 struct vdo_super_block
{
148 /* The vio for reading and writing the super block to disk */
150 /* A buffer to hold the super block */
152 /* Whether this super block may not be written */
156 struct data_vio_pool
;
158 struct vdo_administrator
{
159 struct vdo_completion completion
;
160 struct admin_state state
;
163 struct completion callback_sync
;
167 char thread_name_prefix
[MAX_VDO_WORK_QUEUE_NAME_LEN
];
168 struct vdo_thread
*threads
;
169 vdo_action_fn action
;
170 struct vdo_completion
*completion
;
171 struct vio_tracer
*vio_tracer
;
173 /* The atomic version of the state of this vdo */
175 /* The full state of all components */
176 struct vdo_component_states states
;
178 * A counter value to attach to thread names and log messages to identify the individual
181 unsigned int instance
;
182 /* The read-only notifier */
183 struct read_only_notifier read_only_notifier
;
184 /* The load-time configuration of this vdo */
185 struct device_config
*device_config
;
186 /* The thread mapping */
187 struct thread_config thread_config
;
189 /* The super block */
190 struct vdo_super_block super_block
;
192 /* The partitioning of the underlying storage */
193 struct layout layout
;
194 struct layout next_layout
;
195 struct dm_kcopyd_client
*partition_copier
;
198 struct block_map
*block_map
;
200 /* The journal for block map recovery */
201 struct recovery_journal
*recovery_journal
;
204 struct slab_depot
*depot
;
206 /* The compressed-block packer */
207 struct packer
*packer
;
208 /* Whether incoming data should be compressed */
211 /* The handler for flush requests */
212 struct flusher
*flusher
;
214 /* The state the vdo was in when loaded (primarily for unit tests) */
215 enum vdo_state load_state
;
217 /* The logical zones of this vdo */
218 struct logical_zones
*logical_zones
;
220 /* The physical zones of this vdo */
221 struct physical_zones
*physical_zones
;
223 /* The hash lock zones of this vdo */
224 struct hash_zones
*hash_zones
;
226 /* Bio submission manager used for sending bios to the storage device. */
227 struct io_submitter
*io_submitter
;
229 /* The pool of data_vios for servicing incoming bios */
230 struct data_vio_pool
*data_vio_pool
;
232 /* The manager for administrative operations */
233 struct vdo_administrator admin
;
235 /* Flags controlling administrative operations */
236 const struct admin_state_code
*suspend_type
;
237 bool allocations_allowed
;
238 bool dump_on_shutdown
;
239 atomic_t processing_message
;
243 * Atomic stats counters
245 struct atomic_statistics stats
;
246 /* Used to gather statistics without allocating memory */
247 struct vdo_statistics stats_buffer
;
248 /* Protects the stats_buffer */
249 struct mutex stats_mutex
;
251 /* A list of all device_configs referencing this vdo */
252 struct list_head device_config_list
;
254 /* This VDO's list entry for the device registry */
255 struct list_head registration
;
257 /* Underlying block device info. */
258 u64 starting_sector_offset
;
259 struct volume_geometry geometry
;
261 /* N blobs of context data for LZ4 code, one per CPU thread. */
262 char **compression_context
;
266 * vdo_uses_bio_ack_queue() - Indicate whether the vdo is configured to use a separate work queue
267 * for acknowledging received and processed bios.
270 * Note that this directly controls the handling of write operations, but the compile-time flag
271 * VDO_USE_BIO_ACK_QUEUE_FOR_READ is also checked for read operations.
273 * Return: Whether a bio-acknowledgement work queue is in use.
275 static inline bool vdo_uses_bio_ack_queue(struct vdo
*vdo
)
277 return vdo
->device_config
->thread_counts
.bio_ack_threads
> 0;
281 * typedef vdo_filter_fn - Method type for vdo matching methods.
283 * A filter function returns false if the vdo doesn't match.
285 typedef bool (*vdo_filter_fn
)(struct vdo
*vdo
, const void *context
);
287 void vdo_initialize_device_registry_once(void);
288 struct vdo
* __must_check
vdo_find_matching(vdo_filter_fn filter
, const void *context
);
290 int __must_check
vdo_make_thread(struct vdo
*vdo
, thread_id_t thread_id
,
291 const struct vdo_work_queue_type
*type
,
292 unsigned int queue_count
, void *contexts
[]);
294 static inline int __must_check
vdo_make_default_thread(struct vdo
*vdo
,
295 thread_id_t thread_id
)
297 return vdo_make_thread(vdo
, thread_id
, NULL
, 1, NULL
);
300 int __must_check
vdo_make(unsigned int instance
, struct device_config
*config
,
301 char **reason
, struct vdo
**vdo_ptr
);
303 void vdo_destroy(struct vdo
*vdo
);
305 void vdo_load_super_block(struct vdo
*vdo
, struct vdo_completion
*parent
);
307 struct block_device
* __must_check
vdo_get_backing_device(const struct vdo
*vdo
);
309 const char * __must_check
vdo_get_device_name(const struct dm_target
*target
);
311 int __must_check
vdo_synchronous_flush(struct vdo
*vdo
);
313 const struct admin_state_code
* __must_check
vdo_get_admin_state(const struct vdo
*vdo
);
315 bool vdo_set_compressing(struct vdo
*vdo
, bool enable
);
317 bool vdo_get_compressing(struct vdo
*vdo
);
319 void vdo_fetch_statistics(struct vdo
*vdo
, struct vdo_statistics
*stats
);
321 thread_id_t
vdo_get_callback_thread_id(void);
323 enum vdo_state __must_check
vdo_get_state(const struct vdo
*vdo
);
325 void vdo_set_state(struct vdo
*vdo
, enum vdo_state state
);
327 void vdo_save_components(struct vdo
*vdo
, struct vdo_completion
*parent
);
329 int vdo_register_read_only_listener(struct vdo
*vdo
, void *listener
,
330 vdo_read_only_notification_fn notification
,
331 thread_id_t thread_id
);
333 int vdo_enable_read_only_entry(struct vdo
*vdo
);
335 void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion
*parent
);
337 void vdo_allow_read_only_mode_entry(struct vdo_completion
*parent
);
339 void vdo_enter_read_only_mode(struct vdo
*vdo
, int error_code
);
341 bool __must_check
vdo_is_read_only(struct vdo
*vdo
);
343 bool __must_check
vdo_in_read_only_mode(const struct vdo
*vdo
);
345 bool __must_check
vdo_in_recovery_mode(const struct vdo
*vdo
);
347 void vdo_enter_recovery_mode(struct vdo
*vdo
);
349 void vdo_assert_on_admin_thread(const struct vdo
*vdo
, const char *name
);
351 void vdo_assert_on_logical_zone_thread(const struct vdo
*vdo
, zone_count_t logical_zone
,
354 void vdo_assert_on_physical_zone_thread(const struct vdo
*vdo
, zone_count_t physical_zone
,
357 int __must_check
vdo_get_physical_zone(const struct vdo
*vdo
, physical_block_number_t pbn
,
358 struct physical_zone
**zone_ptr
);
360 void vdo_dump_status(const struct vdo
*vdo
);