1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Internal definitions for network filesystem support
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include <linux/folio_queue.h>
11 #include <linux/netfs.h>
12 #include <linux/fscache.h>
13 #include <linux/fscache-cache.h>
14 #include <trace/events/netfs.h>
15 #include <trace/events/fscache.h>
21 #define pr_fmt(fmt) "netfs: " fmt
26 void netfs_cache_read_terminated(void *priv
, ssize_t transferred_or_error
, bool was_async
);
27 int netfs_prefetch_for_write(struct file
*file
, struct folio
*folio
,
28 size_t offset
, size_t len
);
33 extern unsigned int netfs_debug
;
34 extern struct list_head netfs_io_requests
;
35 extern spinlock_t netfs_proc_lock
;
36 extern mempool_t netfs_request_pool
;
37 extern mempool_t netfs_subrequest_pool
;
40 static inline void netfs_proc_add_rreq(struct netfs_io_request
*rreq
)
42 spin_lock(&netfs_proc_lock
);
43 list_add_tail_rcu(&rreq
->proc_link
, &netfs_io_requests
);
44 spin_unlock(&netfs_proc_lock
);
46 static inline void netfs_proc_del_rreq(struct netfs_io_request
*rreq
)
48 if (!list_empty(&rreq
->proc_link
)) {
49 spin_lock(&netfs_proc_lock
);
50 list_del_rcu(&rreq
->proc_link
);
51 spin_unlock(&netfs_proc_lock
);
55 static inline void netfs_proc_add_rreq(struct netfs_io_request
*rreq
) {}
56 static inline void netfs_proc_del_rreq(struct netfs_io_request
*rreq
) {}
62 struct folio_queue
*netfs_buffer_make_space(struct netfs_io_request
*rreq
,
63 enum netfs_folioq_trace trace
);
64 void netfs_reset_iter(struct netfs_io_subrequest
*subreq
);
69 struct netfs_io_request
*netfs_alloc_request(struct address_space
*mapping
,
71 loff_t start
, size_t len
,
72 enum netfs_io_origin origin
);
73 void netfs_get_request(struct netfs_io_request
*rreq
, enum netfs_rreq_ref_trace what
);
74 void netfs_clear_subrequests(struct netfs_io_request
*rreq
, bool was_async
);
75 void netfs_put_request(struct netfs_io_request
*rreq
, bool was_async
,
76 enum netfs_rreq_ref_trace what
);
77 struct netfs_io_subrequest
*netfs_alloc_subrequest(struct netfs_io_request
*rreq
);
79 static inline void netfs_see_request(struct netfs_io_request
*rreq
,
80 enum netfs_rreq_ref_trace what
)
82 trace_netfs_rreq_ref(rreq
->debug_id
, refcount_read(&rreq
->ref
), what
);
85 static inline void netfs_see_subrequest(struct netfs_io_subrequest
*subreq
,
86 enum netfs_sreq_ref_trace what
)
88 trace_netfs_sreq_ref(subreq
->rreq
->debug_id
, subreq
->debug_index
,
89 refcount_read(&subreq
->ref
), what
);
95 void netfs_read_collection_worker(struct work_struct
*work
);
96 void netfs_wake_read_collector(struct netfs_io_request
*rreq
);
97 void netfs_cache_read_terminated(void *priv
, ssize_t transferred_or_error
, bool was_async
);
98 ssize_t
netfs_wait_for_read(struct netfs_io_request
*rreq
);
99 void netfs_wait_for_pause(struct netfs_io_request
*rreq
);
104 void netfs_pgpriv2_copy_to_cache(struct netfs_io_request
*rreq
, struct folio
*folio
);
105 void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request
*rreq
);
106 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request
*wreq
);
111 void netfs_retry_reads(struct netfs_io_request
*rreq
);
112 void netfs_unlock_abandoned_read_pages(struct netfs_io_request
*rreq
);
117 #ifdef CONFIG_NETFS_STATS
118 extern atomic_t netfs_n_rh_dio_read
;
119 extern atomic_t netfs_n_rh_readahead
;
120 extern atomic_t netfs_n_rh_read_folio
;
121 extern atomic_t netfs_n_rh_read_single
;
122 extern atomic_t netfs_n_rh_rreq
;
123 extern atomic_t netfs_n_rh_sreq
;
124 extern atomic_t netfs_n_rh_download
;
125 extern atomic_t netfs_n_rh_download_done
;
126 extern atomic_t netfs_n_rh_download_failed
;
127 extern atomic_t netfs_n_rh_download_instead
;
128 extern atomic_t netfs_n_rh_read
;
129 extern atomic_t netfs_n_rh_read_done
;
130 extern atomic_t netfs_n_rh_read_failed
;
131 extern atomic_t netfs_n_rh_zero
;
132 extern atomic_t netfs_n_rh_short_read
;
133 extern atomic_t netfs_n_rh_write
;
134 extern atomic_t netfs_n_rh_write_begin
;
135 extern atomic_t netfs_n_rh_write_done
;
136 extern atomic_t netfs_n_rh_write_failed
;
137 extern atomic_t netfs_n_rh_write_zskip
;
138 extern atomic_t netfs_n_wh_buffered_write
;
139 extern atomic_t netfs_n_wh_writethrough
;
140 extern atomic_t netfs_n_wh_dio_write
;
141 extern atomic_t netfs_n_wh_writepages
;
142 extern atomic_t netfs_n_wh_copy_to_cache
;
143 extern atomic_t netfs_n_wh_wstream_conflict
;
144 extern atomic_t netfs_n_wh_upload
;
145 extern atomic_t netfs_n_wh_upload_done
;
146 extern atomic_t netfs_n_wh_upload_failed
;
147 extern atomic_t netfs_n_wh_write
;
148 extern atomic_t netfs_n_wh_write_done
;
149 extern atomic_t netfs_n_wh_write_failed
;
150 extern atomic_t netfs_n_wb_lock_skip
;
151 extern atomic_t netfs_n_wb_lock_wait
;
152 extern atomic_t netfs_n_folioq
;
154 int netfs_stats_show(struct seq_file
*m
, void *v
);
156 static inline void netfs_stat(atomic_t
*stat
)
161 static inline void netfs_stat_d(atomic_t
*stat
)
167 #define netfs_stat(x) do {} while(0)
168 #define netfs_stat_d(x) do {} while(0)
174 int netfs_folio_written_back(struct folio
*folio
);
175 void netfs_write_collection_worker(struct work_struct
*work
);
176 void netfs_wake_write_collector(struct netfs_io_request
*wreq
, bool was_async
);
181 struct netfs_io_request
*netfs_create_write_req(struct address_space
*mapping
,
184 enum netfs_io_origin origin
);
185 void netfs_reissue_write(struct netfs_io_stream
*stream
,
186 struct netfs_io_subrequest
*subreq
,
187 struct iov_iter
*source
);
188 void netfs_issue_write(struct netfs_io_request
*wreq
,
189 struct netfs_io_stream
*stream
);
190 size_t netfs_advance_write(struct netfs_io_request
*wreq
,
191 struct netfs_io_stream
*stream
,
192 loff_t start
, size_t len
, bool to_eof
);
193 struct netfs_io_request
*netfs_begin_writethrough(struct kiocb
*iocb
, size_t len
);
194 int netfs_advance_writethrough(struct netfs_io_request
*wreq
, struct writeback_control
*wbc
,
195 struct folio
*folio
, size_t copied
, bool to_page_end
,
196 struct folio
**writethrough_cache
);
197 int netfs_end_writethrough(struct netfs_io_request
*wreq
, struct writeback_control
*wbc
,
198 struct folio
*writethrough_cache
);
199 int netfs_unbuffered_write(struct netfs_io_request
*wreq
, bool may_wait
, size_t len
);
204 void netfs_retry_writes(struct netfs_io_request
*wreq
);
207 * Miscellaneous functions.
209 static inline bool netfs_is_cache_enabled(struct netfs_inode
*ctx
)
211 #if IS_ENABLED(CONFIG_FSCACHE)
212 struct fscache_cookie
*cookie
= ctx
->cache
;
214 return fscache_cookie_valid(cookie
) && cookie
->cache_priv
&&
215 fscache_cookie_enabled(cookie
);
222 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
224 static inline struct netfs_group
*netfs_get_group(struct netfs_group
*netfs_group
)
226 if (netfs_group
&& netfs_group
!= NETFS_FOLIO_COPY_TO_CACHE
)
227 refcount_inc(&netfs_group
->ref
);
232 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
234 static inline void netfs_put_group(struct netfs_group
*netfs_group
)
237 netfs_group
!= NETFS_FOLIO_COPY_TO_CACHE
&&
238 refcount_dec_and_test(&netfs_group
->ref
))
239 netfs_group
->free(netfs_group
);
243 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
245 static inline void netfs_put_group_many(struct netfs_group
*netfs_group
, int nr
)
248 netfs_group
!= NETFS_FOLIO_COPY_TO_CACHE
&&
249 refcount_sub_and_test(nr
, &netfs_group
->ref
))
250 netfs_group
->free(netfs_group
);
256 #ifdef CONFIG_PROC_FS
257 extern const struct seq_operations fscache_caches_seq_ops
;
259 bool fscache_begin_cache_access(struct fscache_cache
*cache
, enum fscache_access_trace why
);
260 void fscache_end_cache_access(struct fscache_cache
*cache
, enum fscache_access_trace why
);
261 struct fscache_cache
*fscache_lookup_cache(const char *name
, bool is_cache
);
262 void fscache_put_cache(struct fscache_cache
*cache
, enum fscache_cache_trace where
);
264 static inline enum fscache_cache_state
fscache_cache_state(const struct fscache_cache
*cache
)
266 return smp_load_acquire(&cache
->state
);
269 static inline bool fscache_cache_is_live(const struct fscache_cache
*cache
)
271 return fscache_cache_state(cache
) == FSCACHE_CACHE_IS_ACTIVE
;
274 static inline void fscache_set_cache_state(struct fscache_cache
*cache
,
275 enum fscache_cache_state new_state
)
277 smp_store_release(&cache
->state
, new_state
);
281 static inline bool fscache_set_cache_state_maybe(struct fscache_cache
*cache
,
282 enum fscache_cache_state old_state
,
283 enum fscache_cache_state new_state
)
285 return try_cmpxchg_release(&cache
->state
, &old_state
, new_state
);
291 extern struct kmem_cache
*fscache_cookie_jar
;
292 #ifdef CONFIG_PROC_FS
293 extern const struct seq_operations fscache_cookies_seq_ops
;
295 extern struct timer_list fscache_cookie_lru_timer
;
297 extern void fscache_print_cookie(struct fscache_cookie
*cookie
, char prefix
);
298 extern bool fscache_begin_cookie_access(struct fscache_cookie
*cookie
,
299 enum fscache_access_trace why
);
301 static inline void fscache_see_cookie(struct fscache_cookie
*cookie
,
302 enum fscache_cookie_trace where
)
304 trace_fscache_cookie(cookie
->debug_id
, refcount_read(&cookie
->ref
),
311 extern unsigned int fscache_hash(unsigned int salt
, const void *data
, size_t len
);
312 #ifdef CONFIG_FSCACHE
313 int __init
fscache_init(void);
314 void __exit
fscache_exit(void);
316 static inline int fscache_init(void) { return 0; }
317 static inline void fscache_exit(void) {}
323 #ifdef CONFIG_PROC_FS
324 extern int __init
fscache_proc_init(void);
325 extern void fscache_proc_cleanup(void);
327 #define fscache_proc_init() (0)
328 #define fscache_proc_cleanup() do {} while (0)
334 #ifdef CONFIG_FSCACHE_STATS
335 extern atomic_t fscache_n_volumes
;
336 extern atomic_t fscache_n_volumes_collision
;
337 extern atomic_t fscache_n_volumes_nomem
;
338 extern atomic_t fscache_n_cookies
;
339 extern atomic_t fscache_n_cookies_lru
;
340 extern atomic_t fscache_n_cookies_lru_expired
;
341 extern atomic_t fscache_n_cookies_lru_removed
;
342 extern atomic_t fscache_n_cookies_lru_dropped
;
344 extern atomic_t fscache_n_acquires
;
345 extern atomic_t fscache_n_acquires_ok
;
346 extern atomic_t fscache_n_acquires_oom
;
348 extern atomic_t fscache_n_invalidates
;
350 extern atomic_t fscache_n_relinquishes
;
351 extern atomic_t fscache_n_relinquishes_retire
;
352 extern atomic_t fscache_n_relinquishes_dropped
;
354 extern atomic_t fscache_n_resizes
;
355 extern atomic_t fscache_n_resizes_null
;
357 static inline void fscache_stat(atomic_t
*stat
)
362 static inline void fscache_stat_d(atomic_t
*stat
)
367 #define __fscache_stat(stat) (stat)
369 int fscache_stats_show(struct seq_file
*m
);
372 #define __fscache_stat(stat) (NULL)
373 #define fscache_stat(stat) do {} while (0)
374 #define fscache_stat_d(stat) do {} while (0)
376 static inline int fscache_stats_show(struct seq_file
*m
) { return 0; }
382 #ifdef CONFIG_PROC_FS
383 extern const struct seq_operations fscache_volumes_seq_ops
;
386 struct fscache_volume
*fscache_get_volume(struct fscache_volume
*volume
,
387 enum fscache_volume_trace where
);
388 bool fscache_begin_volume_access(struct fscache_volume
*volume
,
389 struct fscache_cookie
*cookie
,
390 enum fscache_access_trace why
);
391 void fscache_create_volume(struct fscache_volume
*volume
, bool wait
);
393 /*****************************************************************************/
397 #define dbgprintk(FMT, ...) \
398 printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
400 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
401 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
402 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
405 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
406 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
407 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
409 #elif defined(CONFIG_NETFS_DEBUG)
410 #define _enter(FMT, ...) \
413 kenter(FMT, ##__VA_ARGS__); \
416 #define _leave(FMT, ...) \
419 kleave(FMT, ##__VA_ARGS__); \
422 #define _debug(FMT, ...) \
425 kdebug(FMT, ##__VA_ARGS__); \
429 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
430 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
431 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
437 #if 1 /* defined(__KDEBUGALL) */
441 if (unlikely(!(X))) { \
443 pr_err("Assertion failed\n"); \
448 #define ASSERTCMP(X, OP, Y) \
450 if (unlikely(!((X) OP (Y)))) { \
452 pr_err("Assertion failed\n"); \
453 pr_err("%lx " #OP " %lx is false\n", \
454 (unsigned long)(X), (unsigned long)(Y)); \
459 #define ASSERTIF(C, X) \
461 if (unlikely((C) && !(X))) { \
463 pr_err("Assertion failed\n"); \
468 #define ASSERTIFCMP(C, X, OP, Y) \
470 if (unlikely((C) && !((X) OP (Y)))) { \
472 pr_err("Assertion failed\n"); \
473 pr_err("%lx " #OP " %lx is false\n", \
474 (unsigned long)(X), (unsigned long)(Y)); \
481 #define ASSERT(X) do {} while (0)
482 #define ASSERTCMP(X, OP, Y) do {} while (0)
483 #define ASSERTIF(C, X) do {} while (0)
484 #define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
486 #endif /* assert or not */