Merge tag 'hwmon-for-v6.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / fs / gfs2 / incore.h
blob4e19cce3d906d7468162ec8a1391df1b101e72d0
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
7 #ifndef __INCORE_DOT_H__
8 #define __INCORE_DOT_H__
10 #include <linux/fs.h>
11 #include <linux/kobject.h>
12 #include <linux/workqueue.h>
13 #include <linux/dlm.h>
14 #include <linux/buffer_head.h>
15 #include <linux/rcupdate.h>
16 #include <linux/rculist_bl.h>
17 #include <linux/completion.h>
18 #include <linux/rbtree.h>
19 #include <linux/ktime.h>
20 #include <linux/percpu.h>
21 #include <linux/lockref.h>
22 #include <linux/rhashtable.h>
23 #include <linux/mutex.h>
25 #define DIO_WAIT 0x00000010
26 #define DIO_METADATA 0x00000020
28 struct gfs2_log_operations;
29 struct gfs2_bufdata;
30 struct gfs2_holder;
31 struct gfs2_glock;
32 struct gfs2_quota_data;
33 struct gfs2_trans;
34 struct gfs2_jdesc;
35 struct gfs2_sbd;
36 struct lm_lockops;
38 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
40 struct gfs2_log_header_host {
41 u64 lh_sequence; /* Sequence number of this transaction */
42 u32 lh_flags; /* GFS2_LOG_HEAD_... */
43 u32 lh_tail; /* Block number of log tail */
44 u32 lh_blkno;
46 s64 lh_local_total;
47 s64 lh_local_free;
48 s64 lh_local_dinodes;
52 * Structure of operations that are associated with each
53 * type of element in the log.
56 struct gfs2_log_operations {
57 void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
58 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
59 void (*lo_before_scan) (struct gfs2_jdesc *jd,
60 struct gfs2_log_header_host *head, int pass);
61 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
62 struct gfs2_log_descriptor *ld, __be64 *ptr,
63 int pass);
64 void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
65 const char *lo_name;
68 #define GBF_FULL 1
70 /**
71 * Clone bitmaps (bi_clone):
73 * - When a block is freed, we remember the previous state of the block in the
74 * clone bitmap, and only mark the block as free in the real bitmap.
76 * - When looking for a block to allocate, we check for a free block in the
77 * clone bitmap, and if no clone bitmap exists, in the real bitmap.
79 * - For allocating a block, we mark it as allocated in the real bitmap, and if
80 * a clone bitmap exists, also in the clone bitmap.
82 * - At the end of a log_flush, we copy the real bitmap into the clone bitmap
83 * to make the clone bitmap reflect the current allocation state.
84 * (Alternatively, we could remove the clone bitmap.)
86 * The clone bitmaps are in-core only, and is never written to disk.
88 * These steps ensure that blocks which have been freed in a transaction cannot
89 * be reallocated in that same transaction.
91 struct gfs2_bitmap {
92 struct buffer_head *bi_bh;
93 char *bi_clone;
94 unsigned long bi_flags;
95 u32 bi_offset;
96 u32 bi_start;
97 u32 bi_bytes;
98 u32 bi_blocks;
101 struct gfs2_rgrpd {
102 struct rb_node rd_node; /* Link with superblock */
103 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
104 u64 rd_addr; /* grp block disk address */
105 u64 rd_data0; /* first data location */
106 u32 rd_length; /* length of rgrp header in fs blocks */
107 u32 rd_data; /* num of data blocks in rgrp */
108 u32 rd_bitbytes; /* number of bytes in data bitmaps */
109 u32 rd_free;
110 u32 rd_requested; /* number of blocks in rd_rstree */
111 u32 rd_reserved; /* number of reserved blocks */
112 u32 rd_free_clone;
113 u32 rd_dinodes;
114 u64 rd_igeneration;
115 struct gfs2_bitmap *rd_bits;
116 struct gfs2_sbd *rd_sbd;
117 struct gfs2_rgrp_lvb *rd_rgl;
118 u32 rd_last_alloc;
119 u32 rd_flags;
120 u32 rd_extfail_pt; /* extent failure point */
121 #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
122 #define GFS2_RDF_ERROR 0x40000000 /* error in rg */
123 #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
124 #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
125 spinlock_t rd_rsspin; /* protects reservation related vars */
126 struct mutex rd_mutex;
127 struct rb_root rd_rstree; /* multi-block reservation tree */
130 enum gfs2_state_bits {
131 BH_Pinned = BH_PrivateStart,
132 BH_Escaped = BH_PrivateStart + 1,
135 BUFFER_FNS(Pinned, pinned)
136 TAS_BUFFER_FNS(Pinned, pinned)
137 BUFFER_FNS(Escaped, escaped)
138 TAS_BUFFER_FNS(Escaped, escaped)
140 struct gfs2_bufdata {
141 struct buffer_head *bd_bh;
142 struct gfs2_glock *bd_gl;
143 u64 bd_blkno;
145 struct list_head bd_list;
147 struct gfs2_trans *bd_tr;
148 struct list_head bd_ail_st_list;
149 struct list_head bd_ail_gl_list;
153 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
154 * prefix of lock_dlm_ gets awkward.
157 #define GDLM_STRNAME_BYTES 25
158 #define GDLM_LVB_SIZE 32
161 * ls_recover_flags:
163 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
164 * held by failed nodes whose journals need recovery. Those locks should
165 * only be used for journal recovery until the journal recovery is done.
166 * This is set by the dlm recover_prep callback and cleared by the
167 * gfs2_control thread when journal recovery is complete. To avoid
168 * races between recover_prep setting and gfs2_control clearing, recover_spin
169 * is held while changing this bit and reading/writing recover_block
170 * and recover_start.
172 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
174 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
175 * recovery of all journals before allowing other nodes to mount the fs.
176 * This is cleared when FIRST_MOUNT_DONE is set.
178 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
179 * recovery of all journals, and now allows other nodes to mount the fs.
181 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
182 * BLOCK_LOCKS for the first time. The gfs2_control thread should now
183 * control clearing BLOCK_LOCKS for further recoveries.
185 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
187 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
188 * and recover_done(), i.e. set while recover_block == recover_start.
191 enum {
192 DFL_BLOCK_LOCKS = 0,
193 DFL_NO_DLM_OPS = 1,
194 DFL_FIRST_MOUNT = 2,
195 DFL_FIRST_MOUNT_DONE = 3,
196 DFL_MOUNT_DONE = 4,
197 DFL_UNMOUNT = 5,
198 DFL_DLM_RECOVERY = 6,
202 * We are using struct lm_lockname as an rhashtable key. Avoid holes within
203 * the struct; padding at the end is fine.
205 struct lm_lockname {
206 u64 ln_number;
207 struct gfs2_sbd *ln_sbd;
208 unsigned int ln_type;
211 #define lm_name_equal(name1, name2) \
212 (((name1)->ln_number == (name2)->ln_number) && \
213 ((name1)->ln_type == (name2)->ln_type) && \
214 ((name1)->ln_sbd == (name2)->ln_sbd))
217 struct gfs2_glock_operations {
218 int (*go_sync) (struct gfs2_glock *gl);
219 int (*go_xmote_bh)(struct gfs2_glock *gl);
220 void (*go_inval) (struct gfs2_glock *gl, int flags);
221 int (*go_instantiate) (struct gfs2_glock *gl);
222 int (*go_held)(struct gfs2_holder *gh);
223 void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl,
224 const char *fs_id_buf);
225 void (*go_callback)(struct gfs2_glock *gl, bool remote);
226 void (*go_unlocked)(struct gfs2_glock *gl);
227 const int go_subclass;
228 const int go_type;
229 const unsigned long go_flags;
230 #define GLOF_ASPACE 1 /* address space attached */
231 #define GLOF_LVB 2 /* Lock Value Block attached */
232 #define GLOF_NONDISK 8 /* not I/O related */
235 enum {
236 GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */
237 GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */
238 GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */
239 GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */
240 GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */
241 GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */
242 GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */
243 GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */
244 GFS2_NR_LKSTATS
247 struct gfs2_lkstats {
248 u64 stats[GFS2_NR_LKSTATS];
251 enum {
252 /* States */
253 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
254 HIF_WAIT = 10,
257 struct gfs2_holder {
258 struct list_head gh_list;
260 struct gfs2_glock *gh_gl;
261 struct pid *gh_owner_pid;
262 u16 gh_flags;
263 u16 gh_state;
265 int gh_error;
266 unsigned long gh_iflags; /* HIF_... */
267 unsigned long gh_ip;
270 /* Number of quota types we support */
271 #define GFS2_MAXQUOTAS 2
273 struct gfs2_qadata { /* quota allocation data */
274 /* Quota stuff */
275 struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS];
276 struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS];
277 unsigned int qa_qd_num;
278 int qa_ref;
281 /* Resource group multi-block reservation, in order of appearance:
283 Step 1. Function prepares to write, allocates a mb, sets the size hint.
284 Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
285 Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
286 Step 4. Bits are assigned from the rgrp based on either the reservation
287 or wherever it can.
290 struct gfs2_blkreserv {
291 struct rb_node rs_node; /* node within rd_rstree */
292 struct gfs2_rgrpd *rs_rgd;
293 u64 rs_start;
294 u32 rs_requested;
295 u32 rs_reserved; /* number of reserved blocks */
299 * Allocation parameters
300 * @target: The number of blocks we'd ideally like to allocate
301 * @aflags: The flags (e.g. Orlov flag)
303 * The intent is to gradually expand this structure over time in
304 * order to give more information, e.g. alignment, min extent size
305 * to the allocation code.
307 struct gfs2_alloc_parms {
308 u64 target;
309 u32 min_target;
310 u32 aflags;
311 u64 allowed;
314 enum {
315 GLF_LOCK = 1,
316 GLF_INSTANTIATE_NEEDED = 2, /* needs instantiate */
317 GLF_DEMOTE = 3,
318 GLF_PENDING_DEMOTE = 4,
319 GLF_DEMOTE_IN_PROGRESS = 5,
320 GLF_DIRTY = 6,
321 GLF_LFLUSH = 7,
322 GLF_INVALIDATE_IN_PROGRESS = 8,
323 GLF_HAVE_REPLY = 9,
324 GLF_INITIAL = 10,
325 GLF_HAVE_FROZEN_REPLY = 11,
326 GLF_INSTANTIATE_IN_PROG = 12, /* instantiate happening now */
327 GLF_LRU = 13,
328 GLF_OBJECT = 14, /* Used only for tracing */
329 GLF_BLOCKING = 15,
330 GLF_UNLOCKED = 16, /* Wait for glock to be unlocked */
331 GLF_TRY_TO_EVICT = 17, /* iopen glocks only */
332 GLF_VERIFY_DELETE = 18, /* iopen glocks only */
335 struct gfs2_glock {
336 unsigned long gl_flags; /* GLF_... */
337 struct lm_lockname gl_name;
339 struct lockref gl_lockref;
341 /* State fields protected by gl_lockref.lock */
342 unsigned int gl_state:2, /* Current state */
343 gl_target:2, /* Target state */
344 gl_demote_state:2, /* State requested by remote node */
345 gl_req:2, /* State in last dlm request */
346 gl_reply:8; /* Last reply from the dlm */
348 unsigned long gl_demote_time; /* time of first demote request */
349 long gl_hold_time;
350 struct list_head gl_holders;
352 const struct gfs2_glock_operations *gl_ops;
353 ktime_t gl_dstamp;
354 struct gfs2_lkstats gl_stats;
355 struct dlm_lksb gl_lksb;
356 unsigned long gl_tchange;
357 void *gl_object;
359 struct list_head gl_lru;
360 struct list_head gl_ail_list;
361 atomic_t gl_ail_count;
362 atomic_t gl_revokes;
363 struct delayed_work gl_work;
364 /* For iopen glocks only */
365 struct {
366 struct delayed_work gl_delete;
367 u64 gl_no_formal_ino;
369 struct rcu_head gl_rcu;
370 struct rhash_head gl_node;
373 enum {
374 GIF_QD_LOCKED = 1,
375 GIF_ALLOC_FAILED = 2,
376 GIF_SW_PAGED = 3,
377 GIF_FREE_VFS_INODE = 5,
378 GIF_GLOP_PENDING = 6,
379 GIF_DEFER_DELETE = 7,
382 struct gfs2_inode {
383 struct inode i_inode;
384 u64 i_no_addr;
385 u64 i_no_formal_ino;
386 u64 i_generation;
387 u64 i_eattr;
388 unsigned long i_flags; /* GIF_... */
389 struct gfs2_glock *i_gl;
390 struct gfs2_holder i_iopen_gh;
391 struct gfs2_qadata *i_qadata; /* quota allocation data */
392 struct gfs2_holder i_rgd_gh;
393 struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
394 u64 i_goal; /* goal block for allocations */
395 atomic_t i_sizehint; /* hint of the write size */
396 struct rw_semaphore i_rw_mutex;
397 struct list_head i_ordered;
398 __be64 *i_hash_cache;
399 u32 i_entries;
400 u32 i_diskflags;
401 u8 i_height;
402 u8 i_depth;
403 u16 i_rahead;
407 * Since i_inode is the first element of struct gfs2_inode,
408 * this is effectively a cast.
410 static inline struct gfs2_inode *GFS2_I(struct inode *inode)
412 return container_of(inode, struct gfs2_inode, i_inode);
415 static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
417 return inode->i_sb->s_fs_info;
420 struct gfs2_file {
421 struct mutex f_fl_mutex;
422 struct gfs2_holder f_fl_gh;
425 struct gfs2_revoke_replay {
426 struct list_head rr_list;
427 u64 rr_blkno;
428 unsigned int rr_where;
431 enum {
432 QDF_CHANGE = 1,
433 QDF_LOCKED = 2,
434 QDF_REFRESH = 3,
435 QDF_QMSG_QUIET = 4,
438 struct gfs2_quota_data {
439 struct hlist_bl_node qd_hlist;
440 struct list_head qd_list;
441 struct kqid qd_id;
442 struct gfs2_sbd *qd_sbd;
443 struct lockref qd_lockref;
444 struct list_head qd_lru;
445 unsigned qd_hash;
447 unsigned long qd_flags; /* QDF_... */
449 s64 qd_change;
450 s64 qd_change_sync;
452 unsigned int qd_slot;
453 unsigned int qd_slot_ref;
455 struct buffer_head *qd_bh;
456 struct gfs2_quota_change *qd_bh_qc;
457 unsigned int qd_bh_count;
459 struct gfs2_glock *qd_gl;
460 struct gfs2_quota_lvb qd_qb;
462 u64 qd_sync_gen;
463 unsigned long qd_last_warn;
464 struct rcu_head qd_rcu;
467 enum {
468 TR_TOUCHED = 1,
469 TR_ATTACHED = 2,
470 TR_ONSTACK = 3,
473 struct gfs2_trans {
474 unsigned long tr_ip;
476 unsigned int tr_blocks;
477 unsigned int tr_revokes;
478 unsigned int tr_reserved;
479 unsigned long tr_flags;
481 unsigned int tr_num_buf_new;
482 unsigned int tr_num_databuf_new;
483 unsigned int tr_num_buf_rm;
484 unsigned int tr_num_databuf_rm;
485 unsigned int tr_num_revoke;
487 struct list_head tr_list;
488 struct list_head tr_databuf;
489 struct list_head tr_buf;
491 unsigned int tr_first;
492 struct list_head tr_ail1_list;
493 struct list_head tr_ail2_list;
496 struct gfs2_journal_extent {
497 struct list_head list;
499 unsigned int lblock; /* First logical block */
500 u64 dblock; /* First disk block */
501 u64 blocks;
504 struct gfs2_jdesc {
505 struct list_head jd_list;
506 struct list_head extent_list;
507 unsigned int nr_extents;
508 struct work_struct jd_work;
509 struct inode *jd_inode;
510 struct bio *jd_log_bio;
511 unsigned long jd_flags;
512 #define JDF_RECOVERY 1
513 unsigned int jd_jid;
514 u32 jd_blocks;
515 int jd_recover_error;
516 /* Replay stuff */
518 unsigned int jd_found_blocks;
519 unsigned int jd_found_revokes;
520 unsigned int jd_replayed_blocks;
522 struct list_head jd_revoke_list;
523 unsigned int jd_replay_tail;
525 u64 jd_no_addr;
528 struct gfs2_statfs_change_host {
529 s64 sc_total;
530 s64 sc_free;
531 s64 sc_dinodes;
534 #define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
535 #define GFS2_QUOTA_OFF 0
536 #define GFS2_QUOTA_ACCOUNT 1
537 #define GFS2_QUOTA_ON 2
538 #define GFS2_QUOTA_QUIET 3 /* on but not complaining */
540 #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
541 #define GFS2_DATA_WRITEBACK 1
542 #define GFS2_DATA_ORDERED 2
544 #define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
545 #define GFS2_ERRORS_WITHDRAW 0
546 #define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
547 #define GFS2_ERRORS_RO 2 /* place holder for future feature */
548 #define GFS2_ERRORS_PANIC 3
550 struct gfs2_args {
551 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
552 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
553 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
554 unsigned int ar_spectator:1; /* Don't get a journal */
555 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
556 unsigned int ar_debug:1; /* Oops on errors */
557 unsigned int ar_posix_acl:1; /* Enable posix acls */
558 unsigned int ar_quota:2; /* off/account/on */
559 unsigned int ar_suiddir:1; /* suiddir support */
560 unsigned int ar_data:2; /* ordered/writeback */
561 unsigned int ar_meta:1; /* mount metafs */
562 unsigned int ar_discard:1; /* discard requests */
563 unsigned int ar_errors:2; /* errors=withdraw | panic */
564 unsigned int ar_nobarrier:1; /* do not send barriers */
565 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
566 unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */
567 unsigned int ar_loccookie:1; /* use location based readdir
568 cookies */
569 s32 ar_commit; /* Commit interval */
570 s32 ar_statfs_quantum; /* The fast statfs interval */
571 s32 ar_quota_quantum; /* The quota interval */
572 s32 ar_statfs_percent; /* The % change to force sync */
575 struct gfs2_tune {
576 spinlock_t gt_spin;
578 unsigned int gt_logd_secs;
580 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
581 unsigned int gt_quota_scale_num; /* Numerator */
582 unsigned int gt_quota_scale_den; /* Denominator */
583 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
584 unsigned int gt_new_files_jdata;
585 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
586 unsigned int gt_complain_secs;
587 unsigned int gt_statfs_quantum;
588 unsigned int gt_statfs_slow;
591 enum {
592 SDF_JOURNAL_CHECKED = 0,
593 SDF_JOURNAL_LIVE = 1,
594 SDF_WITHDRAWN = 2,
595 SDF_NOBARRIERS = 3,
596 SDF_NORECOVERY = 4,
597 SDF_DEMOTE = 5,
598 SDF_NOJOURNALID = 6,
599 SDF_RORECOVERY = 7, /* read only recovery */
600 SDF_SKIP_DLM_UNLOCK = 8,
601 SDF_FORCE_AIL_FLUSH = 9,
602 SDF_FREEZE_INITIATOR = 10,
603 SDF_WITHDRAWING = 11, /* Will withdraw eventually */
604 SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */
605 SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
606 SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
607 withdrawing */
608 SDF_KILL = 15,
609 SDF_EVICTING = 16,
610 SDF_FROZEN = 17,
613 #define GFS2_FSNAME_LEN 256
615 struct gfs2_inum_host {
616 u64 no_formal_ino;
617 u64 no_addr;
620 struct gfs2_sb_host {
621 u32 sb_magic;
622 u32 sb_type;
624 u32 sb_fs_format;
625 u32 sb_multihost_format;
626 u32 sb_bsize;
627 u32 sb_bsize_shift;
629 struct gfs2_inum_host sb_master_dir;
630 struct gfs2_inum_host sb_root_dir;
632 char sb_lockproto[GFS2_LOCKNAME_LEN];
633 char sb_locktable[GFS2_LOCKNAME_LEN];
637 * lm_mount() return values
639 * ls_jid - the journal ID this node should use
640 * ls_first - this node is the first to mount the file system
641 * ls_lockspace - lock module's context for this file system
642 * ls_ops - lock module's functions
645 struct lm_lockstruct {
646 int ls_jid;
647 unsigned int ls_first;
648 const struct lm_lockops *ls_ops;
649 dlm_lockspace_t *ls_dlm;
651 int ls_recover_jid_done; /* These two are deprecated, */
652 int ls_recover_jid_status; /* used previously by gfs_controld */
654 struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
655 struct dlm_lksb ls_control_lksb; /* control_lock */
656 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
657 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
658 char *ls_lvb_bits;
660 spinlock_t ls_recover_spin; /* protects following fields */
661 unsigned long ls_recover_flags; /* DFL_ */
662 uint32_t ls_recover_mount; /* gen in first recover_done cb */
663 uint32_t ls_recover_start; /* gen in last recover_done cb */
664 uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
665 uint32_t ls_recover_size; /* size of recover_submit, recover_result */
666 uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
667 uint32_t *ls_recover_result; /* result of last jid recovery */
670 struct gfs2_pcpu_lkstats {
671 /* One struct for each glock type */
672 struct gfs2_lkstats lkstats[10];
675 /* List of local (per node) statfs inodes */
676 struct local_statfs_inode {
677 struct list_head si_list;
678 struct inode *si_sc_inode;
679 unsigned int si_jid; /* journal id this statfs inode corresponds to */
682 struct gfs2_sbd {
683 struct super_block *sd_vfs;
684 struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
685 struct kobject sd_kobj;
686 struct completion sd_kobj_unregister;
687 unsigned long sd_flags; /* SDF_... */
688 struct gfs2_sb_host sd_sb;
690 /* Constants computed on mount */
692 u32 sd_fsb2bb;
693 u32 sd_fsb2bb_shift;
694 u32 sd_diptrs; /* Number of pointers in a dinode */
695 u32 sd_inptrs; /* Number of pointers in a indirect block */
696 u32 sd_ldptrs; /* Number of pointers in a log descriptor block */
697 u32 sd_jbsize; /* Size of a journaled data block */
698 u32 sd_hash_bsize; /* sizeof(exhash block) */
699 u32 sd_hash_bsize_shift;
700 u32 sd_hash_ptrs; /* Number of pointers in a hash block */
701 u32 sd_qc_per_block;
702 u32 sd_blocks_per_bitmap;
703 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
704 u32 sd_max_height; /* Max height of a file's metadata tree */
705 u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
706 u32 sd_max_dents_per_leaf; /* Max number of dirents in a leaf block */
708 struct gfs2_args sd_args; /* Mount arguments */
709 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
711 /* Lock Stuff */
713 struct lm_lockstruct sd_lockstruct;
714 struct gfs2_holder sd_live_gh;
715 struct gfs2_glock *sd_rename_gl;
716 struct gfs2_glock *sd_freeze_gl;
717 struct work_struct sd_freeze_work;
718 wait_queue_head_t sd_kill_wait;
719 wait_queue_head_t sd_async_glock_wait;
720 atomic_t sd_glock_disposal;
721 struct completion sd_locking_init;
722 struct completion sd_wdack;
723 struct delayed_work sd_control_work;
725 /* Inode Stuff */
727 struct dentry *sd_master_dir;
728 struct dentry *sd_root_dir;
730 struct inode *sd_jindex;
731 struct inode *sd_statfs_inode;
732 struct inode *sd_sc_inode;
733 struct list_head sd_sc_inodes_list;
734 struct inode *sd_qc_inode;
735 struct inode *sd_rindex;
736 struct inode *sd_quota_inode;
738 /* StatFS stuff */
740 spinlock_t sd_statfs_spin;
741 struct gfs2_statfs_change_host sd_statfs_master;
742 struct gfs2_statfs_change_host sd_statfs_local;
743 int sd_statfs_force_sync;
745 /* Resource group stuff */
747 int sd_rindex_uptodate;
748 spinlock_t sd_rindex_spin;
749 struct rb_root sd_rindex_tree;
750 unsigned int sd_rgrps;
751 unsigned int sd_max_rg_data;
753 /* Journal index stuff */
755 struct list_head sd_jindex_list;
756 spinlock_t sd_jindex_spin;
757 struct mutex sd_jindex_mutex;
758 unsigned int sd_journals;
760 struct gfs2_jdesc *sd_jdesc;
761 struct gfs2_holder sd_journal_gh;
762 struct gfs2_holder sd_jinode_gh;
763 struct gfs2_glock *sd_jinode_gl;
765 struct gfs2_holder sd_sc_gh;
766 struct buffer_head *sd_sc_bh;
767 struct gfs2_holder sd_qc_gh;
769 struct completion sd_journal_ready;
771 /* Workqueue stuff */
773 struct workqueue_struct *sd_glock_wq;
774 struct workqueue_struct *sd_delete_wq;
776 /* Daemon stuff */
778 struct task_struct *sd_logd_process;
779 struct task_struct *sd_quotad_process;
781 /* Quota stuff */
783 struct list_head sd_quota_list;
784 atomic_t sd_quota_count;
785 struct mutex sd_quota_sync_mutex;
786 wait_queue_head_t sd_quota_wait;
788 unsigned int sd_quota_slots;
789 unsigned long *sd_quota_bitmap;
790 spinlock_t sd_bitmap_lock;
792 u64 sd_quota_sync_gen;
794 /* Log stuff */
796 struct address_space sd_aspace;
798 spinlock_t sd_log_lock;
800 struct gfs2_trans *sd_log_tr;
801 unsigned int sd_log_blks_reserved;
803 atomic_t sd_log_pinned;
804 unsigned int sd_log_num_revoke;
806 struct list_head sd_log_revokes;
807 struct list_head sd_log_ordered;
808 spinlock_t sd_ordered_lock;
810 atomic_t sd_log_thresh1;
811 atomic_t sd_log_thresh2;
812 atomic_t sd_log_blks_free;
813 atomic_t sd_log_blks_needed;
814 atomic_t sd_log_revokes_available;
815 wait_queue_head_t sd_log_waitq;
816 wait_queue_head_t sd_logd_waitq;
818 u64 sd_log_sequence;
819 int sd_log_idle;
821 struct rw_semaphore sd_log_flush_lock;
822 atomic_t sd_log_in_flight;
823 wait_queue_head_t sd_log_flush_wait;
824 int sd_log_error; /* First log error */
825 wait_queue_head_t sd_withdraw_wait;
827 unsigned int sd_log_tail;
828 unsigned int sd_log_flush_tail;
829 unsigned int sd_log_head;
830 unsigned int sd_log_flush_head;
832 spinlock_t sd_ail_lock;
833 struct list_head sd_ail1_list;
834 struct list_head sd_ail2_list;
836 /* For quiescing the filesystem */
837 struct gfs2_holder sd_freeze_gh;
838 struct mutex sd_freeze_mutex;
839 struct list_head sd_dead_glocks;
841 char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
842 char sd_table_name[GFS2_FSNAME_LEN];
843 char sd_proto_name[GFS2_FSNAME_LEN];
845 /* Debugging crud */
847 unsigned long sd_last_warning;
848 struct dentry *debugfs_dir; /* debugfs directory */
849 unsigned long sd_glock_dqs_held;
852 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
854 gl->gl_stats.stats[which]++;
857 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
859 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
860 preempt_disable();
861 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
862 preempt_enable();
865 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
867 static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
869 return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
872 #endif /* __INCORE_DOT_H__ */