1 #ifndef _FS_CEPH_MDS_CLIENT_H
2 #define _FS_CEPH_MDS_CLIENT_H
4 #include <linux/completion.h>
5 #include <linux/kref.h>
6 #include <linux/list.h>
7 #include <linux/mutex.h>
8 #include <linux/rbtree.h>
9 #include <linux/spinlock.h>
11 #include <linux/ceph/types.h>
12 #include <linux/ceph/messenger.h>
13 #include <linux/ceph/mdsmap.h>
14 #include <linux/ceph/auth.h>
17 * Some lock dependencies:
25 * mdsc->snap_flush_lock
26 * mdsc->cap_delay_lock
30 struct ceph_fs_client
;
34 * parsed info about a single inode. pointers are into the encoded
35 * on-wire structures within the mds reply message payload.
37 struct ceph_mds_reply_info_in
{
38 struct ceph_mds_reply_inode
*in
;
39 struct ceph_dir_layout dir_layout
;
51 struct ceph_mds_reply_dir_entry
{
54 struct ceph_mds_reply_lease
*lease
;
55 struct ceph_mds_reply_info_in inode
;
60 * parsed info about an mds reply, including information about
61 * either: 1) the target inode and/or its parent directory and dentry,
62 * and directory contents (for readdir results), or
63 * 2) the file range lock info (for fcntl F_GETLK results).
65 struct ceph_mds_reply_info_parsed
{
66 struct ceph_mds_reply_head
*head
;
69 struct ceph_mds_reply_info_in diri
, targeti
;
70 struct ceph_mds_reply_dirfrag
*dirfrag
;
73 struct ceph_mds_reply_lease
*dlease
;
77 /* for fcntl F_GETLK results */
78 struct ceph_filelock
*filelock_reply
;
80 /* for readdir results */
82 struct ceph_mds_reply_dirfrag
*dir_dir
;
88 struct ceph_mds_reply_dir_entry
*dir_entries
;
91 /* for create results */
98 /* encoded blob describing snapshot contexts for certain
99 operations (e.g., open) */
106 * cap releases are batched and sent to the MDS en masse.
108 #define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - \
109 sizeof(struct ceph_mds_cap_release)) / \
110 sizeof(struct ceph_mds_cap_item))
114 * state associated with each MDS<->client session
117 CEPH_MDS_SESSION_NEW
= 1,
118 CEPH_MDS_SESSION_OPENING
= 2,
119 CEPH_MDS_SESSION_OPEN
= 3,
120 CEPH_MDS_SESSION_HUNG
= 4,
121 CEPH_MDS_SESSION_CLOSING
= 5,
122 CEPH_MDS_SESSION_RESTARTING
= 6,
123 CEPH_MDS_SESSION_RECONNECTING
= 7,
124 CEPH_MDS_SESSION_REJECTED
= 8,
127 struct ceph_mds_session
{
128 struct ceph_mds_client
*s_mdsc
;
131 unsigned long s_ttl
; /* time until mds kills us */
132 u64 s_seq
; /* incoming msg seq # */
133 struct mutex s_mutex
; /* serialize session messages */
135 struct ceph_connection s_con
;
137 struct ceph_auth_handshake s_auth
;
139 /* protected by s_gen_ttl_lock */
140 spinlock_t s_gen_ttl_lock
;
141 u32 s_cap_gen
; /* inc each time we get mds stale msg */
142 unsigned long s_cap_ttl
; /* when session caps expire */
144 /* protected by s_cap_lock */
145 spinlock_t s_cap_lock
;
146 struct list_head s_caps
; /* all caps issued by this session */
147 int s_nr_caps
, s_trim_caps
;
148 int s_num_cap_releases
;
151 struct list_head s_cap_releases
; /* waiting cap_release messages */
152 struct ceph_cap
*s_cap_iterator
;
154 /* protected by mutex */
155 struct list_head s_cap_flushing
; /* inodes w/ flushing caps */
156 unsigned long s_renew_requested
; /* last time we sent a renew req */
160 struct list_head s_waiting
; /* waiting requests */
161 struct list_head s_unsafe
; /* unsafe requests */
165 * modes of choosing which MDS to send a request to
170 USE_AUTH_MDS
, /* prefer authoritative mds for this metadata item */
173 struct ceph_mds_request
;
174 struct ceph_mds_client
;
177 * request completion callback
179 typedef void (*ceph_mds_request_callback_t
) (struct ceph_mds_client
*mdsc
,
180 struct ceph_mds_request
*req
);
182 * wait for request completion callback
184 typedef int (*ceph_mds_request_wait_callback_t
) (struct ceph_mds_client
*mdsc
,
185 struct ceph_mds_request
*req
);
188 * an in-flight mds request
190 struct ceph_mds_request
{
191 u64 r_tid
; /* transaction id */
192 struct rb_node r_node
;
193 struct ceph_mds_client
*r_mdsc
;
195 int r_op
; /* mds op code */
197 /* operation on what? */
198 struct inode
*r_inode
; /* arg1 */
199 struct dentry
*r_dentry
; /* arg1 */
200 struct dentry
*r_old_dentry
; /* arg2: rename from or link from */
201 struct inode
*r_old_dentry_dir
; /* arg2: old dentry's parent dir */
202 char *r_path1
, *r_path2
;
203 struct ceph_vino r_ino1
, r_ino2
;
205 struct inode
*r_parent
; /* parent dir inode */
206 struct inode
*r_target_inode
; /* resulting inode */
208 #define CEPH_MDS_R_DIRECT_IS_HASH (1) /* r_direct_hash is valid */
209 #define CEPH_MDS_R_ABORTED (2) /* call was aborted */
210 #define CEPH_MDS_R_GOT_UNSAFE (3) /* got an unsafe reply */
211 #define CEPH_MDS_R_GOT_SAFE (4) /* got a safe reply */
212 #define CEPH_MDS_R_GOT_RESULT (5) /* got a result */
213 #define CEPH_MDS_R_DID_PREPOPULATE (6) /* prepopulated readdir */
214 #define CEPH_MDS_R_PARENT_LOCKED (7) /* is r_parent->i_rwsem wlocked? */
215 unsigned long r_req_flags
;
217 struct mutex r_fill_mutex
;
219 union ceph_mds_request_args r_args
;
220 int r_fmode
; /* file mode, if expecting cap */
223 struct timespec r_stamp
;
225 /* for choosing which mds to send this request to */
227 u32 r_direct_hash
; /* choose dir frag based on this dentry hash */
229 /* data payload is used for xattr ops */
230 struct ceph_pagelist
*r_pagelist
;
232 /* what caps shall we drop? */
233 int r_inode_drop
, r_inode_unless
;
234 int r_dentry_drop
, r_dentry_unless
;
235 int r_old_dentry_drop
, r_old_dentry_unless
;
236 struct inode
*r_old_inode
;
237 int r_old_inode_drop
, r_old_inode_unless
;
239 struct ceph_msg
*r_request
; /* original request */
240 int r_request_release_offset
;
241 struct ceph_msg
*r_reply
;
242 struct ceph_mds_reply_info_parsed r_reply_info
;
243 struct page
*r_locked_page
;
246 unsigned long r_timeout
; /* optional. jiffies, 0 is "wait forever" */
247 unsigned long r_started
; /* start time to measure timeout against */
248 unsigned long r_request_started
; /* start time for mds request only,
249 used to measure lease durations */
251 /* link unsafe requests to parent directory, for fsync */
252 struct inode
*r_unsafe_dir
;
253 struct list_head r_unsafe_dir_item
;
255 /* unsafe requests that modify the target inode */
256 struct list_head r_unsafe_target_item
;
258 struct ceph_mds_session
*r_session
;
260 int r_attempts
; /* resend attempts */
261 int r_num_fwd
; /* number of forward attempts */
262 int r_resend_mds
; /* mds to resend to next, if any*/
263 u32 r_sent_on_mseq
; /* cap mseq request was sent at*/
266 struct list_head r_wait
;
267 struct completion r_completion
;
268 struct completion r_safe_completion
;
269 ceph_mds_request_callback_t r_callback
;
270 ceph_mds_request_wait_callback_t r_wait_for_completion
;
271 struct list_head r_unsafe_item
; /* per-session unsafe list item */
273 long long r_dir_release_cnt
;
274 long long r_dir_ordered_cnt
;
275 int r_readdir_cache_idx
;
276 u32 r_readdir_offset
;
278 struct ceph_cap_reservation r_caps_reservation
;
282 struct ceph_pool_perm
{
293 struct ceph_mds_client
{
294 struct ceph_fs_client
*fsc
;
295 struct mutex mutex
; /* all nested structures */
297 struct ceph_mdsmap
*mdsmap
;
298 struct completion safe_umount_waiters
;
299 wait_queue_head_t session_close_wq
;
300 struct list_head waiting_for_map
;
303 struct ceph_mds_session
**sessions
; /* NULL for mds if no session */
304 atomic_t num_sessions
;
305 int max_sessions
; /* len of s_mds_sessions */
306 int stopping
; /* true if shutting down */
309 * snap_rwsem will cover cap linkage into snaprealms, and
310 * realm snap contexts. (later, we can do per-realm snap
311 * contexts locks..) the empty list contains realms with no
312 * references (implying they contain no inodes with caps) that
313 * should be destroyed.
316 struct rw_semaphore snap_rwsem
;
317 struct rb_root snap_realms
;
318 struct list_head snap_empty
;
319 spinlock_t snap_empty_lock
; /* protect snap_empty */
321 u64 last_tid
; /* most recent mds request */
322 u64 oldest_tid
; /* oldest incomplete mds request,
323 excluding setfilelock requests */
324 struct rb_root request_tree
; /* pending mds requests */
325 struct delayed_work delayed_work
; /* delayed work */
326 unsigned long last_renew_caps
; /* last time we renewed our caps */
327 struct list_head cap_delay_list
; /* caps with delayed release */
328 spinlock_t cap_delay_lock
; /* protects cap_delay_list */
329 struct list_head snap_flush_list
; /* cap_snaps ready to flush */
330 spinlock_t snap_flush_lock
;
332 u64 last_cap_flush_tid
;
333 struct list_head cap_flush_list
;
334 struct list_head cap_dirty
; /* inodes with dirty caps */
335 struct list_head cap_dirty_migrating
; /* ...that are migration... */
336 int num_cap_flushing
; /* # caps we are flushing */
337 spinlock_t cap_dirty_lock
; /* protects above items */
338 wait_queue_head_t cap_flushing_wq
;
343 * Maintain a global pool of preallocated struct ceph_caps, referenced
344 * by struct ceph_caps_reservations. This ensures that we preallocate
345 * memory needed to successfully process an MDS response. (If an MDS
346 * sends us cap information and we fail to process it, we will have
347 * problems due to the client and MDS being out of sync.)
349 * Reservations are 'owned' by a ceph_cap_reservation context.
351 spinlock_t caps_list_lock
;
352 struct list_head caps_list
; /* unused (reserved or
354 int caps_total_count
; /* total caps allocated */
355 int caps_use_count
; /* in use */
356 int caps_reserve_count
; /* unused, reserved */
357 int caps_avail_count
; /* unused, unreserved */
358 int caps_min_count
; /* keep at least this many
360 spinlock_t dentry_lru_lock
;
361 struct list_head dentry_lru
;
364 struct rw_semaphore pool_perm_rwsem
;
365 struct rb_root pool_perm_tree
;
368 extern const char *ceph_mds_op_name(int op
);
370 extern struct ceph_mds_session
*
371 __ceph_lookup_mds_session(struct ceph_mds_client
*, int mds
);
373 static inline struct ceph_mds_session
*
374 ceph_get_mds_session(struct ceph_mds_session
*s
)
376 atomic_inc(&s
->s_ref
);
380 extern const char *ceph_session_state_name(int s
);
382 extern void ceph_put_mds_session(struct ceph_mds_session
*s
);
384 extern int ceph_send_msg_mds(struct ceph_mds_client
*mdsc
,
385 struct ceph_msg
*msg
, int mds
);
387 extern int ceph_mdsc_init(struct ceph_fs_client
*fsc
);
388 extern void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
);
389 extern void ceph_mdsc_force_umount(struct ceph_mds_client
*mdsc
);
390 extern void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
);
392 extern void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
);
394 extern void ceph_invalidate_dir_request(struct ceph_mds_request
*req
);
395 extern int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request
*req
,
397 extern struct ceph_mds_request
*
398 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
);
399 extern void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
400 struct ceph_mds_request
*req
);
401 extern int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
403 struct ceph_mds_request
*req
);
404 static inline void ceph_mdsc_get_request(struct ceph_mds_request
*req
)
406 kref_get(&req
->r_kref
);
408 extern void ceph_mdsc_release_request(struct kref
*kref
);
409 static inline void ceph_mdsc_put_request(struct ceph_mds_request
*req
)
411 kref_put(&req
->r_kref
, ceph_mdsc_release_request
);
414 extern void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
415 struct ceph_mds_session
*session
);
417 extern void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
);
419 extern char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
422 extern void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
);
423 extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
425 struct dentry
*dentry
, char action
,
428 extern void ceph_mdsc_handle_mdsmap(struct ceph_mds_client
*mdsc
,
429 struct ceph_msg
*msg
);
430 extern void ceph_mdsc_handle_fsmap(struct ceph_mds_client
*mdsc
,
431 struct ceph_msg
*msg
);
433 extern struct ceph_mds_session
*
434 ceph_mdsc_open_export_target_session(struct ceph_mds_client
*mdsc
, int target
);
435 extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
436 struct ceph_mds_session
*session
);