io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
[linux/fpc-iii.git] / fs / gfs2 / incore.h
blob8e1ab8ed4abc7891e5301cce4b7a1dbea538bcd9
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
7 #ifndef __INCORE_DOT_H__
8 #define __INCORE_DOT_H__
10 #include <linux/fs.h>
11 #include <linux/kobject.h>
12 #include <linux/workqueue.h>
13 #include <linux/dlm.h>
14 #include <linux/buffer_head.h>
15 #include <linux/rcupdate.h>
16 #include <linux/rculist_bl.h>
17 #include <linux/completion.h>
18 #include <linux/rbtree.h>
19 #include <linux/ktime.h>
20 #include <linux/percpu.h>
21 #include <linux/lockref.h>
22 #include <linux/rhashtable.h>
24 #define DIO_WAIT 0x00000010
25 #define DIO_METADATA 0x00000020
27 struct gfs2_log_operations;
28 struct gfs2_bufdata;
29 struct gfs2_holder;
30 struct gfs2_glock;
31 struct gfs2_quota_data;
32 struct gfs2_trans;
33 struct gfs2_jdesc;
34 struct gfs2_sbd;
35 struct lm_lockops;
37 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
39 struct gfs2_log_header_host {
40 u64 lh_sequence; /* Sequence number of this transaction */
41 u32 lh_flags; /* GFS2_LOG_HEAD_... */
42 u32 lh_tail; /* Block number of log tail */
43 u32 lh_blkno;
45 s64 lh_local_total;
46 s64 lh_local_free;
47 s64 lh_local_dinodes;
51 * Structure of operations that are associated with each
52 * type of element in the log.
55 struct gfs2_log_operations {
56 void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
57 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
58 void (*lo_before_scan) (struct gfs2_jdesc *jd,
59 struct gfs2_log_header_host *head, int pass);
60 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
61 struct gfs2_log_descriptor *ld, __be64 *ptr,
62 int pass);
63 void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
64 const char *lo_name;
67 #define GBF_FULL 1
69 /**
70 * Clone bitmaps (bi_clone):
72 * - When a block is freed, we remember the previous state of the block in the
73 * clone bitmap, and only mark the block as free in the real bitmap.
75 * - When looking for a block to allocate, we check for a free block in the
76 * clone bitmap, and if no clone bitmap exists, in the real bitmap.
78 * - For allocating a block, we mark it as allocated in the real bitmap, and if
79 * a clone bitmap exists, also in the clone bitmap.
81 * - At the end of a log_flush, we copy the real bitmap into the clone bitmap
82 * to make the clone bitmap reflect the current allocation state.
83 * (Alternatively, we could remove the clone bitmap.)
85 * The clone bitmaps are in-core only, and is never written to disk.
87 * These steps ensure that blocks which have been freed in a transaction cannot
88 * be reallocated in that same transaction.
90 struct gfs2_bitmap {
91 struct buffer_head *bi_bh;
92 char *bi_clone;
93 unsigned long bi_flags;
94 u32 bi_offset;
95 u32 bi_start;
96 u32 bi_bytes;
97 u32 bi_blocks;
100 struct gfs2_rgrpd {
101 struct rb_node rd_node; /* Link with superblock */
102 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
103 u64 rd_addr; /* grp block disk address */
104 u64 rd_data0; /* first data location */
105 u32 rd_length; /* length of rgrp header in fs blocks */
106 u32 rd_data; /* num of data blocks in rgrp */
107 u32 rd_bitbytes; /* number of bytes in data bitmaps */
108 u32 rd_free;
109 u32 rd_reserved; /* number of blocks reserved */
110 u32 rd_free_clone;
111 u32 rd_dinodes;
112 u64 rd_igeneration;
113 struct gfs2_bitmap *rd_bits;
114 struct gfs2_sbd *rd_sbd;
115 struct gfs2_rgrp_lvb *rd_rgl;
116 u32 rd_last_alloc;
117 u32 rd_flags;
118 u32 rd_extfail_pt; /* extent failure point */
119 #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
120 #define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
121 #define GFS2_RDF_ERROR 0x40000000 /* error in rg */
122 #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
123 #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
124 spinlock_t rd_rsspin; /* protects reservation related vars */
125 struct rb_root rd_rstree; /* multi-block reservation tree */
128 struct gfs2_rbm {
129 struct gfs2_rgrpd *rgd;
130 u32 offset; /* The offset is bitmap relative */
131 int bii; /* Bitmap index */
134 static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
136 return rbm->rgd->rd_bits + rbm->bii;
139 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
141 BUG_ON(rbm->offset >= rbm->rgd->rd_data);
142 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
143 rbm->offset;
146 static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
147 const struct gfs2_rbm *rbm2)
149 return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
150 (rbm1->offset == rbm2->offset);
153 enum gfs2_state_bits {
154 BH_Pinned = BH_PrivateStart,
155 BH_Escaped = BH_PrivateStart + 1,
158 BUFFER_FNS(Pinned, pinned)
159 TAS_BUFFER_FNS(Pinned, pinned)
160 BUFFER_FNS(Escaped, escaped)
161 TAS_BUFFER_FNS(Escaped, escaped)
163 struct gfs2_bufdata {
164 struct buffer_head *bd_bh;
165 struct gfs2_glock *bd_gl;
166 u64 bd_blkno;
168 struct list_head bd_list;
170 struct gfs2_trans *bd_tr;
171 struct list_head bd_ail_st_list;
172 struct list_head bd_ail_gl_list;
176 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
177 * prefix of lock_dlm_ gets awkward.
180 #define GDLM_STRNAME_BYTES 25
181 #define GDLM_LVB_SIZE 32
184 * ls_recover_flags:
186 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
187 * held by failed nodes whose journals need recovery. Those locks should
188 * only be used for journal recovery until the journal recovery is done.
189 * This is set by the dlm recover_prep callback and cleared by the
190 * gfs2_control thread when journal recovery is complete. To avoid
191 * races between recover_prep setting and gfs2_control clearing, recover_spin
192 * is held while changing this bit and reading/writing recover_block
193 * and recover_start.
195 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
197 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
198 * recovery of all journals before allowing other nodes to mount the fs.
199 * This is cleared when FIRST_MOUNT_DONE is set.
201 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
202 * recovery of all journals, and now allows other nodes to mount the fs.
204 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
205 * BLOCK_LOCKS for the first time. The gfs2_control thread should now
206 * control clearing BLOCK_LOCKS for further recoveries.
208 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
210 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
211 * and recover_done(), i.e. set while recover_block == recover_start.
214 enum {
215 DFL_BLOCK_LOCKS = 0,
216 DFL_NO_DLM_OPS = 1,
217 DFL_FIRST_MOUNT = 2,
218 DFL_FIRST_MOUNT_DONE = 3,
219 DFL_MOUNT_DONE = 4,
220 DFL_UNMOUNT = 5,
221 DFL_DLM_RECOVERY = 6,
225 * We are using struct lm_lockname as an rhashtable key. Avoid holes within
226 * the struct; padding at the end is fine.
228 struct lm_lockname {
229 u64 ln_number;
230 struct gfs2_sbd *ln_sbd;
231 unsigned int ln_type;
234 #define lm_name_equal(name1, name2) \
235 (((name1)->ln_number == (name2)->ln_number) && \
236 ((name1)->ln_type == (name2)->ln_type) && \
237 ((name1)->ln_sbd == (name2)->ln_sbd))
240 struct gfs2_glock_operations {
241 int (*go_sync) (struct gfs2_glock *gl);
242 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
243 void (*go_inval) (struct gfs2_glock *gl, int flags);
244 int (*go_demote_ok) (const struct gfs2_glock *gl);
245 int (*go_lock) (struct gfs2_holder *gh);
246 void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
247 const char *fs_id_buf);
248 void (*go_callback)(struct gfs2_glock *gl, bool remote);
249 void (*go_free)(struct gfs2_glock *gl);
250 const int go_subclass;
251 const int go_type;
252 const unsigned long go_flags;
253 #define GLOF_ASPACE 1 /* address space attached */
254 #define GLOF_LVB 2 /* Lock Value Block attached */
255 #define GLOF_LRU 4 /* LRU managed */
256 #define GLOF_NONDISK 8 /* not I/O related */
259 enum {
260 GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */
261 GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */
262 GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */
263 GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */
264 GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */
265 GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */
266 GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */
267 GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */
268 GFS2_NR_LKSTATS
271 struct gfs2_lkstats {
272 u64 stats[GFS2_NR_LKSTATS];
275 enum {
276 /* States */
277 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
278 HIF_FIRST = 7,
279 HIF_WAIT = 10,
282 struct gfs2_holder {
283 struct list_head gh_list;
285 struct gfs2_glock *gh_gl;
286 struct pid *gh_owner_pid;
287 u16 gh_flags;
288 u16 gh_state;
290 int gh_error;
291 unsigned long gh_iflags; /* HIF_... */
292 unsigned long gh_ip;
295 /* Number of quota types we support */
296 #define GFS2_MAXQUOTAS 2
298 struct gfs2_qadata { /* quota allocation data */
299 /* Quota stuff */
300 struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS];
301 struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS];
302 unsigned int qa_qd_num;
303 int qa_ref;
306 /* Resource group multi-block reservation, in order of appearance:
308 Step 1. Function prepares to write, allocates a mb, sets the size hint.
309 Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
310 Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
311 Step 4. Bits are assigned from the rgrp based on either the reservation
312 or wherever it can.
315 struct gfs2_blkreserv {
316 struct rb_node rs_node; /* link to other block reservations */
317 struct gfs2_rbm rs_rbm; /* Start of reservation */
318 u32 rs_free; /* how many blocks are still free */
322 * Allocation parameters
323 * @target: The number of blocks we'd ideally like to allocate
324 * @aflags: The flags (e.g. Orlov flag)
326 * The intent is to gradually expand this structure over time in
327 * order to give more information, e.g. alignment, min extent size
328 * to the allocation code.
330 struct gfs2_alloc_parms {
331 u64 target;
332 u32 min_target;
333 u32 aflags;
334 u64 allowed;
337 enum {
338 GLF_LOCK = 1,
339 GLF_DEMOTE = 3,
340 GLF_PENDING_DEMOTE = 4,
341 GLF_DEMOTE_IN_PROGRESS = 5,
342 GLF_DIRTY = 6,
343 GLF_LFLUSH = 7,
344 GLF_INVALIDATE_IN_PROGRESS = 8,
345 GLF_REPLY_PENDING = 9,
346 GLF_INITIAL = 10,
347 GLF_FROZEN = 11,
348 GLF_LRU = 13,
349 GLF_OBJECT = 14, /* Used only for tracing */
350 GLF_BLOCKING = 15,
351 GLF_PENDING_DELETE = 17,
352 GLF_FREEING = 18, /* Wait for glock to be freed */
355 struct gfs2_glock {
356 unsigned long gl_flags; /* GLF_... */
357 struct lm_lockname gl_name;
359 struct lockref gl_lockref;
361 /* State fields protected by gl_lockref.lock */
362 unsigned int gl_state:2, /* Current state */
363 gl_target:2, /* Target state */
364 gl_demote_state:2, /* State requested by remote node */
365 gl_req:2, /* State in last dlm request */
366 gl_reply:8; /* Last reply from the dlm */
368 unsigned long gl_demote_time; /* time of first demote request */
369 long gl_hold_time;
370 struct list_head gl_holders;
372 const struct gfs2_glock_operations *gl_ops;
373 ktime_t gl_dstamp;
374 struct gfs2_lkstats gl_stats;
375 struct dlm_lksb gl_lksb;
376 unsigned long gl_tchange;
377 void *gl_object;
379 struct list_head gl_lru;
380 struct list_head gl_ail_list;
381 atomic_t gl_ail_count;
382 atomic_t gl_revokes;
383 struct delayed_work gl_work;
384 /* For iopen glocks only */
385 struct {
386 struct delayed_work gl_delete;
387 u64 gl_no_formal_ino;
389 struct rcu_head gl_rcu;
390 struct rhash_head gl_node;
393 enum {
394 GIF_INVALID = 0,
395 GIF_QD_LOCKED = 1,
396 GIF_ALLOC_FAILED = 2,
397 GIF_SW_PAGED = 3,
398 GIF_FREE_VFS_INODE = 5,
399 GIF_GLOP_PENDING = 6,
400 GIF_DEFERRED_DELETE = 7,
403 struct gfs2_inode {
404 struct inode i_inode;
405 u64 i_no_addr;
406 u64 i_no_formal_ino;
407 u64 i_generation;
408 u64 i_eattr;
409 unsigned long i_flags; /* GIF_... */
410 struct gfs2_glock *i_gl; /* Move into i_gh? */
411 struct gfs2_holder i_iopen_gh;
412 struct gfs2_holder i_gh; /* for prepare/commit_write only */
413 struct gfs2_qadata *i_qadata; /* quota allocation data */
414 struct gfs2_holder i_rgd_gh;
415 struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
416 u64 i_goal; /* goal block for allocations */
417 atomic_t i_sizehint; /* hint of the write size */
418 struct rw_semaphore i_rw_mutex;
419 struct list_head i_ordered;
420 struct list_head i_trunc_list;
421 __be64 *i_hash_cache;
422 u32 i_entries;
423 u32 i_diskflags;
424 u8 i_height;
425 u8 i_depth;
426 u16 i_rahead;
430 * Since i_inode is the first element of struct gfs2_inode,
431 * this is effectively a cast.
433 static inline struct gfs2_inode *GFS2_I(struct inode *inode)
435 return container_of(inode, struct gfs2_inode, i_inode);
438 static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
440 return inode->i_sb->s_fs_info;
443 struct gfs2_file {
444 struct mutex f_fl_mutex;
445 struct gfs2_holder f_fl_gh;
448 struct gfs2_revoke_replay {
449 struct list_head rr_list;
450 u64 rr_blkno;
451 unsigned int rr_where;
454 enum {
455 QDF_CHANGE = 1,
456 QDF_LOCKED = 2,
457 QDF_REFRESH = 3,
458 QDF_QMSG_QUIET = 4,
461 struct gfs2_quota_data {
462 struct hlist_bl_node qd_hlist;
463 struct list_head qd_list;
464 struct kqid qd_id;
465 struct gfs2_sbd *qd_sbd;
466 struct lockref qd_lockref;
467 struct list_head qd_lru;
468 unsigned qd_hash;
470 unsigned long qd_flags; /* QDF_... */
472 s64 qd_change;
473 s64 qd_change_sync;
475 unsigned int qd_slot;
476 unsigned int qd_slot_count;
478 struct buffer_head *qd_bh;
479 struct gfs2_quota_change *qd_bh_qc;
480 unsigned int qd_bh_count;
482 struct gfs2_glock *qd_gl;
483 struct gfs2_quota_lvb qd_qb;
485 u64 qd_sync_gen;
486 unsigned long qd_last_warn;
487 struct rcu_head qd_rcu;
490 enum {
491 TR_TOUCHED = 1,
492 TR_ATTACHED = 2,
493 TR_ALLOCED = 3,
496 struct gfs2_trans {
497 unsigned long tr_ip;
499 unsigned int tr_blocks;
500 unsigned int tr_revokes;
501 unsigned int tr_reserved;
502 unsigned long tr_flags;
504 unsigned int tr_num_buf_new;
505 unsigned int tr_num_databuf_new;
506 unsigned int tr_num_buf_rm;
507 unsigned int tr_num_databuf_rm;
508 unsigned int tr_num_revoke;
509 unsigned int tr_num_revoke_rm;
511 struct list_head tr_list;
512 struct list_head tr_databuf;
513 struct list_head tr_buf;
515 unsigned int tr_first;
516 struct list_head tr_ail1_list;
517 struct list_head tr_ail2_list;
520 struct gfs2_journal_extent {
521 struct list_head list;
523 unsigned int lblock; /* First logical block */
524 u64 dblock; /* First disk block */
525 u64 blocks;
528 struct gfs2_jdesc {
529 struct list_head jd_list;
530 struct list_head extent_list;
531 unsigned int nr_extents;
532 struct work_struct jd_work;
533 struct inode *jd_inode;
534 unsigned long jd_flags;
535 #define JDF_RECOVERY 1
536 unsigned int jd_jid;
537 u32 jd_blocks;
538 int jd_recover_error;
539 /* Replay stuff */
541 unsigned int jd_found_blocks;
542 unsigned int jd_found_revokes;
543 unsigned int jd_replayed_blocks;
545 struct list_head jd_revoke_list;
546 unsigned int jd_replay_tail;
548 u64 jd_no_addr;
551 struct gfs2_statfs_change_host {
552 s64 sc_total;
553 s64 sc_free;
554 s64 sc_dinodes;
557 #define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
558 #define GFS2_QUOTA_OFF 0
559 #define GFS2_QUOTA_ACCOUNT 1
560 #define GFS2_QUOTA_ON 2
562 #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
563 #define GFS2_DATA_WRITEBACK 1
564 #define GFS2_DATA_ORDERED 2
566 #define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
567 #define GFS2_ERRORS_WITHDRAW 0
568 #define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
569 #define GFS2_ERRORS_RO 2 /* place holder for future feature */
570 #define GFS2_ERRORS_PANIC 3
572 struct gfs2_args {
573 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
574 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
575 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
576 unsigned int ar_spectator:1; /* Don't get a journal */
577 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
578 unsigned int ar_debug:1; /* Oops on errors */
579 unsigned int ar_posix_acl:1; /* Enable posix acls */
580 unsigned int ar_quota:2; /* off/account/on */
581 unsigned int ar_suiddir:1; /* suiddir support */
582 unsigned int ar_data:2; /* ordered/writeback */
583 unsigned int ar_meta:1; /* mount metafs */
584 unsigned int ar_discard:1; /* discard requests */
585 unsigned int ar_errors:2; /* errors=withdraw | panic */
586 unsigned int ar_nobarrier:1; /* do not send barriers */
587 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
588 unsigned int ar_loccookie:1; /* use location based readdir
589 cookies */
590 s32 ar_commit; /* Commit interval */
591 s32 ar_statfs_quantum; /* The fast statfs interval */
592 s32 ar_quota_quantum; /* The quota interval */
593 s32 ar_statfs_percent; /* The % change to force sync */
596 struct gfs2_tune {
597 spinlock_t gt_spin;
599 unsigned int gt_logd_secs;
601 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
602 unsigned int gt_quota_scale_num; /* Numerator */
603 unsigned int gt_quota_scale_den; /* Denominator */
604 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
605 unsigned int gt_new_files_jdata;
606 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
607 unsigned int gt_complain_secs;
608 unsigned int gt_statfs_quantum;
609 unsigned int gt_statfs_slow;
612 enum {
613 SDF_JOURNAL_CHECKED = 0,
614 SDF_JOURNAL_LIVE = 1,
615 SDF_WITHDRAWN = 2,
616 SDF_NOBARRIERS = 3,
617 SDF_NORECOVERY = 4,
618 SDF_DEMOTE = 5,
619 SDF_NOJOURNALID = 6,
620 SDF_RORECOVERY = 7, /* read only recovery */
621 SDF_SKIP_DLM_UNLOCK = 8,
622 SDF_FORCE_AIL_FLUSH = 9,
623 SDF_FS_FROZEN = 10,
624 SDF_WITHDRAWING = 11, /* Will withdraw eventually */
625 SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */
626 SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
627 SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
628 withdrawing */
631 enum gfs2_freeze_state {
632 SFS_UNFROZEN = 0,
633 SFS_STARTING_FREEZE = 1,
634 SFS_FROZEN = 2,
637 #define GFS2_FSNAME_LEN 256
639 struct gfs2_inum_host {
640 u64 no_formal_ino;
641 u64 no_addr;
644 struct gfs2_sb_host {
645 u32 sb_magic;
646 u32 sb_type;
647 u32 sb_format;
649 u32 sb_fs_format;
650 u32 sb_multihost_format;
651 u32 sb_bsize;
652 u32 sb_bsize_shift;
654 struct gfs2_inum_host sb_master_dir;
655 struct gfs2_inum_host sb_root_dir;
657 char sb_lockproto[GFS2_LOCKNAME_LEN];
658 char sb_locktable[GFS2_LOCKNAME_LEN];
662 * lm_mount() return values
664 * ls_jid - the journal ID this node should use
665 * ls_first - this node is the first to mount the file system
666 * ls_lockspace - lock module's context for this file system
667 * ls_ops - lock module's functions
670 struct lm_lockstruct {
671 int ls_jid;
672 unsigned int ls_first;
673 const struct lm_lockops *ls_ops;
674 dlm_lockspace_t *ls_dlm;
676 int ls_recover_jid_done; /* These two are deprecated, */
677 int ls_recover_jid_status; /* used previously by gfs_controld */
679 struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
680 struct dlm_lksb ls_control_lksb; /* control_lock */
681 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
682 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
683 char *ls_lvb_bits;
685 spinlock_t ls_recover_spin; /* protects following fields */
686 unsigned long ls_recover_flags; /* DFL_ */
687 uint32_t ls_recover_mount; /* gen in first recover_done cb */
688 uint32_t ls_recover_start; /* gen in last recover_done cb */
689 uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
690 uint32_t ls_recover_size; /* size of recover_submit, recover_result */
691 uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
692 uint32_t *ls_recover_result; /* result of last jid recovery */
695 struct gfs2_pcpu_lkstats {
696 /* One struct for each glock type */
697 struct gfs2_lkstats lkstats[10];
700 /* List of local (per node) statfs inodes */
701 struct local_statfs_inode {
702 struct list_head si_list;
703 struct inode *si_sc_inode;
704 unsigned int si_jid; /* journal id this statfs inode corresponds to */
707 struct gfs2_sbd {
708 struct super_block *sd_vfs;
709 struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
710 struct kobject sd_kobj;
711 struct completion sd_kobj_unregister;
712 unsigned long sd_flags; /* SDF_... */
713 struct gfs2_sb_host sd_sb;
715 /* Constants computed on mount */
717 u32 sd_fsb2bb;
718 u32 sd_fsb2bb_shift;
719 u32 sd_diptrs; /* Number of pointers in a dinode */
720 u32 sd_inptrs; /* Number of pointers in a indirect block */
721 u32 sd_ldptrs; /* Number of pointers in a log descriptor block */
722 u32 sd_jbsize; /* Size of a journaled data block */
723 u32 sd_hash_bsize; /* sizeof(exhash block) */
724 u32 sd_hash_bsize_shift;
725 u32 sd_hash_ptrs; /* Number of pointers in a hash block */
726 u32 sd_qc_per_block;
727 u32 sd_blocks_per_bitmap;
728 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
729 u32 sd_max_height; /* Max height of a file's metadata tree */
730 u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
731 u32 sd_max_dents_per_leaf; /* Max number of dirents in a leaf block */
733 struct gfs2_args sd_args; /* Mount arguments */
734 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
736 /* Lock Stuff */
738 struct lm_lockstruct sd_lockstruct;
739 struct gfs2_holder sd_live_gh;
740 struct gfs2_glock *sd_rename_gl;
741 struct gfs2_glock *sd_freeze_gl;
742 struct work_struct sd_freeze_work;
743 wait_queue_head_t sd_glock_wait;
744 wait_queue_head_t sd_async_glock_wait;
745 atomic_t sd_glock_disposal;
746 struct completion sd_locking_init;
747 struct completion sd_wdack;
748 struct delayed_work sd_control_work;
750 /* Inode Stuff */
752 struct dentry *sd_master_dir;
753 struct dentry *sd_root_dir;
755 struct inode *sd_jindex;
756 struct inode *sd_statfs_inode;
757 struct inode *sd_sc_inode;
758 struct list_head sd_sc_inodes_list;
759 struct inode *sd_qc_inode;
760 struct inode *sd_rindex;
761 struct inode *sd_quota_inode;
763 /* StatFS stuff */
765 spinlock_t sd_statfs_spin;
766 struct gfs2_statfs_change_host sd_statfs_master;
767 struct gfs2_statfs_change_host sd_statfs_local;
768 int sd_statfs_force_sync;
770 /* Resource group stuff */
772 int sd_rindex_uptodate;
773 spinlock_t sd_rindex_spin;
774 struct rb_root sd_rindex_tree;
775 unsigned int sd_rgrps;
776 unsigned int sd_max_rg_data;
778 /* Journal index stuff */
780 struct list_head sd_jindex_list;
781 spinlock_t sd_jindex_spin;
782 struct mutex sd_jindex_mutex;
783 unsigned int sd_journals;
785 struct gfs2_jdesc *sd_jdesc;
786 struct gfs2_holder sd_journal_gh;
787 struct gfs2_holder sd_jinode_gh;
788 struct gfs2_glock *sd_jinode_gl;
790 struct gfs2_holder sd_sc_gh;
791 struct gfs2_holder sd_qc_gh;
793 struct completion sd_journal_ready;
795 /* Daemon stuff */
797 struct task_struct *sd_logd_process;
798 struct task_struct *sd_quotad_process;
800 /* Quota stuff */
802 struct list_head sd_quota_list;
803 atomic_t sd_quota_count;
804 struct mutex sd_quota_mutex;
805 struct mutex sd_quota_sync_mutex;
806 wait_queue_head_t sd_quota_wait;
807 struct list_head sd_trunc_list;
808 spinlock_t sd_trunc_lock;
810 unsigned int sd_quota_slots;
811 unsigned long *sd_quota_bitmap;
812 spinlock_t sd_bitmap_lock;
814 u64 sd_quota_sync_gen;
816 /* Log stuff */
818 struct address_space sd_aspace;
820 spinlock_t sd_log_lock;
822 struct gfs2_trans *sd_log_tr;
823 unsigned int sd_log_blks_reserved;
824 int sd_log_committed_revoke;
826 atomic_t sd_log_pinned;
827 unsigned int sd_log_num_revoke;
829 struct list_head sd_log_revokes;
830 struct list_head sd_log_ordered;
831 spinlock_t sd_ordered_lock;
833 atomic_t sd_log_thresh1;
834 atomic_t sd_log_thresh2;
835 atomic_t sd_log_blks_free;
836 atomic_t sd_log_blks_needed;
837 wait_queue_head_t sd_log_waitq;
838 wait_queue_head_t sd_logd_waitq;
840 u64 sd_log_sequence;
841 unsigned int sd_log_head;
842 unsigned int sd_log_tail;
843 int sd_log_idle;
845 struct rw_semaphore sd_log_flush_lock;
846 atomic_t sd_log_in_flight;
847 struct bio *sd_log_bio;
848 wait_queue_head_t sd_log_flush_wait;
849 int sd_log_error; /* First log error */
850 wait_queue_head_t sd_withdraw_wait;
852 atomic_t sd_reserving_log;
853 wait_queue_head_t sd_reserving_log_wait;
855 unsigned int sd_log_flush_head;
857 spinlock_t sd_ail_lock;
858 struct list_head sd_ail1_list;
859 struct list_head sd_ail2_list;
861 /* For quiescing the filesystem */
862 struct gfs2_holder sd_freeze_gh;
863 atomic_t sd_freeze_state;
864 struct mutex sd_freeze_mutex;
866 char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
867 char sd_table_name[GFS2_FSNAME_LEN];
868 char sd_proto_name[GFS2_FSNAME_LEN];
870 /* Debugging crud */
872 unsigned long sd_last_warning;
873 struct dentry *debugfs_dir; /* debugfs directory */
874 unsigned long sd_glock_dqs_held;
877 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
879 gl->gl_stats.stats[which]++;
882 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
884 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
885 preempt_disable();
886 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
887 preempt_enable();
890 extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
892 static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
894 return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
897 #endif /* __INCORE_DOT_H__ */