mm/hmm.c: remove superfluous RCU protection around radix tree lookup
[linux/fpc-iii.git] / fs / gfs2 / incore.h
blob1b6b1e3f5cafbd863edc47e3dc61ffe4d6eafad3
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #ifndef __INCORE_DOT_H__
11 #define __INCORE_DOT_H__
13 #include <linux/fs.h>
14 #include <linux/kobject.h>
15 #include <linux/workqueue.h>
16 #include <linux/dlm.h>
17 #include <linux/buffer_head.h>
18 #include <linux/rcupdate.h>
19 #include <linux/rculist_bl.h>
20 #include <linux/completion.h>
21 #include <linux/rbtree.h>
22 #include <linux/ktime.h>
23 #include <linux/percpu.h>
24 #include <linux/lockref.h>
25 #include <linux/rhashtable.h>
27 #define DIO_WAIT 0x00000010
28 #define DIO_METADATA 0x00000020
30 struct gfs2_log_operations;
31 struct gfs2_bufdata;
32 struct gfs2_holder;
33 struct gfs2_glock;
34 struct gfs2_quota_data;
35 struct gfs2_trans;
36 struct gfs2_jdesc;
37 struct gfs2_sbd;
38 struct lm_lockops;
40 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
42 struct gfs2_log_header_host {
43 u64 lh_sequence; /* Sequence number of this transaction */
44 u32 lh_flags; /* GFS2_LOG_HEAD_... */
45 u32 lh_tail; /* Block number of log tail */
46 u32 lh_blkno;
50 * Structure of operations that are associated with each
51 * type of element in the log.
54 struct gfs2_log_operations {
55 void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
56 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
57 void (*lo_before_scan) (struct gfs2_jdesc *jd,
58 struct gfs2_log_header_host *head, int pass);
59 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
60 struct gfs2_log_descriptor *ld, __be64 *ptr,
61 int pass);
62 void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
63 const char *lo_name;
66 #define GBF_FULL 1
68 struct gfs2_bitmap {
69 struct buffer_head *bi_bh;
70 char *bi_clone;
71 unsigned long bi_flags;
72 u32 bi_offset;
73 u32 bi_start;
74 u32 bi_len;
75 u32 bi_blocks;
78 struct gfs2_rgrpd {
79 struct rb_node rd_node; /* Link with superblock */
80 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
81 u64 rd_addr; /* grp block disk address */
82 u64 rd_data0; /* first data location */
83 u32 rd_length; /* length of rgrp header in fs blocks */
84 u32 rd_data; /* num of data blocks in rgrp */
85 u32 rd_bitbytes; /* number of bytes in data bitmaps */
86 u32 rd_free;
87 u32 rd_reserved; /* number of blocks reserved */
88 u32 rd_free_clone;
89 u32 rd_dinodes;
90 u64 rd_igeneration;
91 struct gfs2_bitmap *rd_bits;
92 struct gfs2_sbd *rd_sbd;
93 struct gfs2_rgrp_lvb *rd_rgl;
94 u32 rd_last_alloc;
95 u32 rd_flags;
96 u32 rd_extfail_pt; /* extent failure point */
97 #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
98 #define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
99 #define GFS2_RDF_ERROR 0x40000000 /* error in rg */
100 #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
101 #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
102 spinlock_t rd_rsspin; /* protects reservation related vars */
103 struct rb_root rd_rstree; /* multi-block reservation tree */
106 struct gfs2_rbm {
107 struct gfs2_rgrpd *rgd;
108 u32 offset; /* The offset is bitmap relative */
109 int bii; /* Bitmap index */
112 static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
114 return rbm->rgd->rd_bits + rbm->bii;
117 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
119 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
120 rbm->offset;
123 static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
124 const struct gfs2_rbm *rbm2)
126 return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
127 (rbm1->offset == rbm2->offset);
130 enum gfs2_state_bits {
131 BH_Pinned = BH_PrivateStart,
132 BH_Escaped = BH_PrivateStart + 1,
135 BUFFER_FNS(Pinned, pinned)
136 TAS_BUFFER_FNS(Pinned, pinned)
137 BUFFER_FNS(Escaped, escaped)
138 TAS_BUFFER_FNS(Escaped, escaped)
140 struct gfs2_bufdata {
141 struct buffer_head *bd_bh;
142 struct gfs2_glock *bd_gl;
143 u64 bd_blkno;
145 struct list_head bd_list;
146 const struct gfs2_log_operations *bd_ops;
148 struct gfs2_trans *bd_tr;
149 struct list_head bd_ail_st_list;
150 struct list_head bd_ail_gl_list;
154 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
155 * prefix of lock_dlm_ gets awkward.
158 #define GDLM_STRNAME_BYTES 25
159 #define GDLM_LVB_SIZE 32
162 * ls_recover_flags:
164 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
165 * held by failed nodes whose journals need recovery. Those locks should
166 * only be used for journal recovery until the journal recovery is done.
167 * This is set by the dlm recover_prep callback and cleared by the
168 * gfs2_control thread when journal recovery is complete. To avoid
169 * races between recover_prep setting and gfs2_control clearing, recover_spin
170 * is held while changing this bit and reading/writing recover_block
171 * and recover_start.
173 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
175 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
176 * recovery of all journals before allowing other nodes to mount the fs.
177 * This is cleared when FIRST_MOUNT_DONE is set.
179 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
180 * recovery of all journals, and now allows other nodes to mount the fs.
182 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
183 * BLOCK_LOCKS for the first time. The gfs2_control thread should now
184 * control clearing BLOCK_LOCKS for further recoveries.
186 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
188 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
189 * and recover_done(), i.e. set while recover_block == recover_start.
192 enum {
193 DFL_BLOCK_LOCKS = 0,
194 DFL_NO_DLM_OPS = 1,
195 DFL_FIRST_MOUNT = 2,
196 DFL_FIRST_MOUNT_DONE = 3,
197 DFL_MOUNT_DONE = 4,
198 DFL_UNMOUNT = 5,
199 DFL_DLM_RECOVERY = 6,
203 * We are using struct lm_lockname as an rhashtable key. Avoid holes within
204 * the struct; padding at the end is fine.
206 struct lm_lockname {
207 u64 ln_number;
208 struct gfs2_sbd *ln_sbd;
209 unsigned int ln_type;
212 #define lm_name_equal(name1, name2) \
213 (((name1)->ln_number == (name2)->ln_number) && \
214 ((name1)->ln_type == (name2)->ln_type) && \
215 ((name1)->ln_sbd == (name2)->ln_sbd))
218 struct gfs2_glock_operations {
219 void (*go_sync) (struct gfs2_glock *gl);
220 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
221 void (*go_inval) (struct gfs2_glock *gl, int flags);
222 int (*go_demote_ok) (const struct gfs2_glock *gl);
223 int (*go_lock) (struct gfs2_holder *gh);
224 void (*go_unlock) (struct gfs2_holder *gh);
225 void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
226 void (*go_callback)(struct gfs2_glock *gl, bool remote);
227 const int go_type;
228 const unsigned long go_flags;
229 #define GLOF_ASPACE 1
230 #define GLOF_LVB 2
231 #define GLOF_LRU 4
234 enum {
235 GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */
236 GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */
237 GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */
238 GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */
239 GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */
240 GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */
241 GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */
242 GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */
243 GFS2_NR_LKSTATS
246 struct gfs2_lkstats {
247 u64 stats[GFS2_NR_LKSTATS];
250 enum {
251 /* States */
252 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
253 HIF_FIRST = 7,
254 HIF_WAIT = 10,
257 struct gfs2_holder {
258 struct list_head gh_list;
260 struct gfs2_glock *gh_gl;
261 struct pid *gh_owner_pid;
262 u16 gh_flags;
263 u16 gh_state;
265 int gh_error;
266 unsigned long gh_iflags; /* HIF_... */
267 unsigned long gh_ip;
270 /* Number of quota types we support */
271 #define GFS2_MAXQUOTAS 2
273 struct gfs2_qadata { /* quota allocation data */
274 /* Quota stuff */
275 struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS];
276 struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS];
277 unsigned int qa_qd_num;
280 /* Resource group multi-block reservation, in order of appearance:
282 Step 1. Function prepares to write, allocates a mb, sets the size hint.
283 Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
284 Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
285 Step 4. Bits are assigned from the rgrp based on either the reservation
286 or wherever it can.
289 struct gfs2_blkreserv {
290 /* components used during write (step 1): */
291 atomic_t rs_sizehint; /* hint of the write size */
293 struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
294 struct rb_node rs_node; /* link to other block reservations */
295 struct gfs2_rbm rs_rbm; /* Start of reservation */
296 u32 rs_free; /* how many blocks are still free */
297 u64 rs_inum; /* Inode number for reservation */
301 * Allocation parameters
302 * @target: The number of blocks we'd ideally like to allocate
303 * @aflags: The flags (e.g. Orlov flag)
305 * The intent is to gradually expand this structure over time in
306 * order to give more information, e.g. alignment, min extent size
307 * to the allocation code.
309 struct gfs2_alloc_parms {
310 u64 target;
311 u32 min_target;
312 u32 aflags;
313 u64 allowed;
316 enum {
317 GLF_LOCK = 1,
318 GLF_DEMOTE = 3,
319 GLF_PENDING_DEMOTE = 4,
320 GLF_DEMOTE_IN_PROGRESS = 5,
321 GLF_DIRTY = 6,
322 GLF_LFLUSH = 7,
323 GLF_INVALIDATE_IN_PROGRESS = 8,
324 GLF_REPLY_PENDING = 9,
325 GLF_INITIAL = 10,
326 GLF_FROZEN = 11,
327 GLF_QUEUED = 12,
328 GLF_LRU = 13,
329 GLF_OBJECT = 14, /* Used only for tracing */
330 GLF_BLOCKING = 15,
331 GLF_INODE_CREATING = 16, /* Inode creation occurring */
334 struct gfs2_glock {
335 unsigned long gl_flags; /* GLF_... */
336 struct lm_lockname gl_name;
338 struct lockref gl_lockref;
340 /* State fields protected by gl_lockref.lock */
341 unsigned int gl_state:2, /* Current state */
342 gl_target:2, /* Target state */
343 gl_demote_state:2, /* State requested by remote node */
344 gl_req:2, /* State in last dlm request */
345 gl_reply:8; /* Last reply from the dlm */
347 unsigned long gl_demote_time; /* time of first demote request */
348 long gl_hold_time;
349 struct list_head gl_holders;
351 const struct gfs2_glock_operations *gl_ops;
352 ktime_t gl_dstamp;
353 struct gfs2_lkstats gl_stats;
354 struct dlm_lksb gl_lksb;
355 unsigned long gl_tchange;
356 void *gl_object;
358 struct list_head gl_lru;
359 struct list_head gl_ail_list;
360 atomic_t gl_ail_count;
361 atomic_t gl_revokes;
362 struct delayed_work gl_work;
363 union {
364 /* For inode and iopen glocks only */
365 struct work_struct gl_delete;
366 /* For rgrp glocks only */
367 struct {
368 loff_t start;
369 loff_t end;
370 } gl_vm;
372 struct rcu_head gl_rcu;
373 struct rhash_head gl_node;
376 #define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
378 enum {
379 GIF_INVALID = 0,
380 GIF_QD_LOCKED = 1,
381 GIF_ALLOC_FAILED = 2,
382 GIF_SW_PAGED = 3,
383 GIF_ORDERED = 4,
384 GIF_FREE_VFS_INODE = 5,
385 GIF_GLOP_PENDING = 6,
388 struct gfs2_inode {
389 struct inode i_inode;
390 u64 i_no_addr;
391 u64 i_no_formal_ino;
392 u64 i_generation;
393 u64 i_eattr;
394 unsigned long i_flags; /* GIF_... */
395 struct gfs2_glock *i_gl; /* Move into i_gh? */
396 struct gfs2_holder i_iopen_gh;
397 struct gfs2_holder i_gh; /* for prepare/commit_write only */
398 struct gfs2_qadata *i_qadata; /* quota allocation data */
399 struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
400 struct gfs2_rgrpd *i_rgd;
401 u64 i_goal; /* goal block for allocations */
402 struct rw_semaphore i_rw_mutex;
403 struct list_head i_ordered;
404 struct list_head i_trunc_list;
405 __be64 *i_hash_cache;
406 u32 i_entries;
407 u32 i_diskflags;
408 u8 i_height;
409 u8 i_depth;
410 u16 i_rahead;
414 * Since i_inode is the first element of struct gfs2_inode,
415 * this is effectively a cast.
417 static inline struct gfs2_inode *GFS2_I(struct inode *inode)
419 return container_of(inode, struct gfs2_inode, i_inode);
422 static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
424 return inode->i_sb->s_fs_info;
427 struct gfs2_file {
428 struct mutex f_fl_mutex;
429 struct gfs2_holder f_fl_gh;
432 struct gfs2_revoke_replay {
433 struct list_head rr_list;
434 u64 rr_blkno;
435 unsigned int rr_where;
438 enum {
439 QDF_CHANGE = 1,
440 QDF_LOCKED = 2,
441 QDF_REFRESH = 3,
442 QDF_QMSG_QUIET = 4,
445 struct gfs2_quota_data {
446 struct hlist_bl_node qd_hlist;
447 struct list_head qd_list;
448 struct kqid qd_id;
449 struct gfs2_sbd *qd_sbd;
450 struct lockref qd_lockref;
451 struct list_head qd_lru;
452 unsigned qd_hash;
454 unsigned long qd_flags; /* QDF_... */
456 s64 qd_change;
457 s64 qd_change_sync;
459 unsigned int qd_slot;
460 unsigned int qd_slot_count;
462 struct buffer_head *qd_bh;
463 struct gfs2_quota_change *qd_bh_qc;
464 unsigned int qd_bh_count;
466 struct gfs2_glock *qd_gl;
467 struct gfs2_quota_lvb qd_qb;
469 u64 qd_sync_gen;
470 unsigned long qd_last_warn;
471 struct rcu_head qd_rcu;
474 enum {
475 TR_TOUCHED = 1,
476 TR_ATTACHED = 2,
477 TR_ALLOCED = 3,
480 struct gfs2_trans {
481 unsigned long tr_ip;
483 unsigned int tr_blocks;
484 unsigned int tr_revokes;
485 unsigned int tr_reserved;
486 unsigned long tr_flags;
488 unsigned int tr_num_buf_new;
489 unsigned int tr_num_databuf_new;
490 unsigned int tr_num_buf_rm;
491 unsigned int tr_num_databuf_rm;
492 unsigned int tr_num_revoke;
493 unsigned int tr_num_revoke_rm;
495 struct list_head tr_list;
496 struct list_head tr_databuf;
497 struct list_head tr_buf;
499 unsigned int tr_first;
500 struct list_head tr_ail1_list;
501 struct list_head tr_ail2_list;
504 struct gfs2_journal_extent {
505 struct list_head list;
507 unsigned int lblock; /* First logical block */
508 u64 dblock; /* First disk block */
509 u64 blocks;
512 struct gfs2_jdesc {
513 struct list_head jd_list;
514 struct list_head extent_list;
515 unsigned int nr_extents;
516 struct work_struct jd_work;
517 struct inode *jd_inode;
518 unsigned long jd_flags;
519 #define JDF_RECOVERY 1
520 unsigned int jd_jid;
521 unsigned int jd_blocks;
522 int jd_recover_error;
523 /* Replay stuff */
525 unsigned int jd_found_blocks;
526 unsigned int jd_found_revokes;
527 unsigned int jd_replayed_blocks;
529 struct list_head jd_revoke_list;
530 unsigned int jd_replay_tail;
534 struct gfs2_statfs_change_host {
535 s64 sc_total;
536 s64 sc_free;
537 s64 sc_dinodes;
540 #define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
541 #define GFS2_QUOTA_OFF 0
542 #define GFS2_QUOTA_ACCOUNT 1
543 #define GFS2_QUOTA_ON 2
545 #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
546 #define GFS2_DATA_WRITEBACK 1
547 #define GFS2_DATA_ORDERED 2
549 #define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
550 #define GFS2_ERRORS_WITHDRAW 0
551 #define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
552 #define GFS2_ERRORS_RO 2 /* place holder for future feature */
553 #define GFS2_ERRORS_PANIC 3
555 struct gfs2_args {
556 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
557 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
558 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
559 unsigned int ar_spectator:1; /* Don't get a journal */
560 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
561 unsigned int ar_debug:1; /* Oops on errors */
562 unsigned int ar_posix_acl:1; /* Enable posix acls */
563 unsigned int ar_quota:2; /* off/account/on */
564 unsigned int ar_suiddir:1; /* suiddir support */
565 unsigned int ar_data:2; /* ordered/writeback */
566 unsigned int ar_meta:1; /* mount metafs */
567 unsigned int ar_discard:1; /* discard requests */
568 unsigned int ar_errors:2; /* errors=withdraw | panic */
569 unsigned int ar_nobarrier:1; /* do not send barriers */
570 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
571 unsigned int ar_loccookie:1; /* use location based readdir
572 cookies */
573 int ar_commit; /* Commit interval */
574 int ar_statfs_quantum; /* The fast statfs interval */
575 int ar_quota_quantum; /* The quota interval */
576 int ar_statfs_percent; /* The % change to force sync */
579 struct gfs2_tune {
580 spinlock_t gt_spin;
582 unsigned int gt_logd_secs;
584 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
585 unsigned int gt_quota_scale_num; /* Numerator */
586 unsigned int gt_quota_scale_den; /* Denominator */
587 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
588 unsigned int gt_new_files_jdata;
589 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
590 unsigned int gt_complain_secs;
591 unsigned int gt_statfs_quantum;
592 unsigned int gt_statfs_slow;
595 enum {
596 SDF_JOURNAL_CHECKED = 0,
597 SDF_JOURNAL_LIVE = 1,
598 SDF_SHUTDOWN = 2,
599 SDF_NOBARRIERS = 3,
600 SDF_NORECOVERY = 4,
601 SDF_DEMOTE = 5,
602 SDF_NOJOURNALID = 6,
603 SDF_RORECOVERY = 7, /* read only recovery */
604 SDF_SKIP_DLM_UNLOCK = 8,
605 SDF_FORCE_AIL_FLUSH = 9,
608 enum gfs2_freeze_state {
609 SFS_UNFROZEN = 0,
610 SFS_STARTING_FREEZE = 1,
611 SFS_FROZEN = 2,
614 #define GFS2_FSNAME_LEN 256
616 struct gfs2_inum_host {
617 u64 no_formal_ino;
618 u64 no_addr;
621 struct gfs2_sb_host {
622 u32 sb_magic;
623 u32 sb_type;
624 u32 sb_format;
626 u32 sb_fs_format;
627 u32 sb_multihost_format;
628 u32 sb_bsize;
629 u32 sb_bsize_shift;
631 struct gfs2_inum_host sb_master_dir;
632 struct gfs2_inum_host sb_root_dir;
634 char sb_lockproto[GFS2_LOCKNAME_LEN];
635 char sb_locktable[GFS2_LOCKNAME_LEN];
639 * lm_mount() return values
641 * ls_jid - the journal ID this node should use
642 * ls_first - this node is the first to mount the file system
643 * ls_lockspace - lock module's context for this file system
644 * ls_ops - lock module's functions
647 struct lm_lockstruct {
648 int ls_jid;
649 unsigned int ls_first;
650 const struct lm_lockops *ls_ops;
651 dlm_lockspace_t *ls_dlm;
653 int ls_recover_jid_done; /* These two are deprecated, */
654 int ls_recover_jid_status; /* used previously by gfs_controld */
656 struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
657 struct dlm_lksb ls_control_lksb; /* control_lock */
658 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
659 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
660 char *ls_lvb_bits;
662 spinlock_t ls_recover_spin; /* protects following fields */
663 unsigned long ls_recover_flags; /* DFL_ */
664 uint32_t ls_recover_mount; /* gen in first recover_done cb */
665 uint32_t ls_recover_start; /* gen in last recover_done cb */
666 uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
667 uint32_t ls_recover_size; /* size of recover_submit, recover_result */
668 uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
669 uint32_t *ls_recover_result; /* result of last jid recovery */
672 struct gfs2_pcpu_lkstats {
673 /* One struct for each glock type */
674 struct gfs2_lkstats lkstats[10];
677 struct gfs2_sbd {
678 struct super_block *sd_vfs;
679 struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
680 struct kobject sd_kobj;
681 unsigned long sd_flags; /* SDF_... */
682 struct gfs2_sb_host sd_sb;
684 /* Constants computed on mount */
686 u32 sd_fsb2bb;
687 u32 sd_fsb2bb_shift;
688 u32 sd_diptrs; /* Number of pointers in a dinode */
689 u32 sd_inptrs; /* Number of pointers in a indirect block */
690 u32 sd_jbsize; /* Size of a journaled data block */
691 u32 sd_hash_bsize; /* sizeof(exhash block) */
692 u32 sd_hash_bsize_shift;
693 u32 sd_hash_ptrs; /* Number of pointers in a hash block */
694 u32 sd_qc_per_block;
695 u32 sd_blocks_per_bitmap;
696 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
697 u32 sd_max_height; /* Max height of a file's metadata tree */
698 u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
699 u32 sd_max_jheight; /* Max height of journaled file's meta tree */
700 u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
701 u32 sd_max_dents_per_leaf; /* Max number of dirents in a leaf block */
703 struct gfs2_args sd_args; /* Mount arguments */
704 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
706 /* Lock Stuff */
708 struct lm_lockstruct sd_lockstruct;
709 struct gfs2_holder sd_live_gh;
710 struct gfs2_glock *sd_rename_gl;
711 struct gfs2_glock *sd_freeze_gl;
712 struct work_struct sd_freeze_work;
713 wait_queue_head_t sd_glock_wait;
714 atomic_t sd_glock_disposal;
715 struct completion sd_locking_init;
716 struct completion sd_wdack;
717 struct delayed_work sd_control_work;
719 /* Inode Stuff */
721 struct dentry *sd_master_dir;
722 struct dentry *sd_root_dir;
724 struct inode *sd_jindex;
725 struct inode *sd_statfs_inode;
726 struct inode *sd_sc_inode;
727 struct inode *sd_qc_inode;
728 struct inode *sd_rindex;
729 struct inode *sd_quota_inode;
731 /* StatFS stuff */
733 spinlock_t sd_statfs_spin;
734 struct gfs2_statfs_change_host sd_statfs_master;
735 struct gfs2_statfs_change_host sd_statfs_local;
736 int sd_statfs_force_sync;
738 /* Resource group stuff */
740 int sd_rindex_uptodate;
741 spinlock_t sd_rindex_spin;
742 struct rb_root sd_rindex_tree;
743 unsigned int sd_rgrps;
744 unsigned int sd_max_rg_data;
746 /* Journal index stuff */
748 struct list_head sd_jindex_list;
749 spinlock_t sd_jindex_spin;
750 struct mutex sd_jindex_mutex;
751 unsigned int sd_journals;
753 struct gfs2_jdesc *sd_jdesc;
754 struct gfs2_holder sd_journal_gh;
755 struct gfs2_holder sd_jinode_gh;
757 struct gfs2_holder sd_sc_gh;
758 struct gfs2_holder sd_qc_gh;
760 struct completion sd_journal_ready;
762 /* Daemon stuff */
764 struct task_struct *sd_logd_process;
765 struct task_struct *sd_quotad_process;
767 /* Quota stuff */
769 struct list_head sd_quota_list;
770 atomic_t sd_quota_count;
771 struct mutex sd_quota_mutex;
772 struct mutex sd_quota_sync_mutex;
773 wait_queue_head_t sd_quota_wait;
774 struct list_head sd_trunc_list;
775 spinlock_t sd_trunc_lock;
777 unsigned int sd_quota_slots;
778 unsigned long *sd_quota_bitmap;
779 spinlock_t sd_bitmap_lock;
781 u64 sd_quota_sync_gen;
783 /* Log stuff */
785 struct address_space sd_aspace;
787 spinlock_t sd_log_lock;
789 struct gfs2_trans *sd_log_tr;
790 unsigned int sd_log_blks_reserved;
791 int sd_log_commited_revoke;
793 atomic_t sd_log_pinned;
794 unsigned int sd_log_num_revoke;
796 struct list_head sd_log_le_revoke;
797 struct list_head sd_log_le_ordered;
798 spinlock_t sd_ordered_lock;
800 atomic_t sd_log_thresh1;
801 atomic_t sd_log_thresh2;
802 atomic_t sd_log_blks_free;
803 atomic_t sd_log_blks_needed;
804 wait_queue_head_t sd_log_waitq;
805 wait_queue_head_t sd_logd_waitq;
807 u64 sd_log_sequence;
808 unsigned int sd_log_head;
809 unsigned int sd_log_tail;
810 int sd_log_idle;
812 struct rw_semaphore sd_log_flush_lock;
813 atomic_t sd_log_in_flight;
814 struct bio *sd_log_bio;
815 wait_queue_head_t sd_log_flush_wait;
816 int sd_log_error;
818 atomic_t sd_reserving_log;
819 wait_queue_head_t sd_reserving_log_wait;
821 unsigned int sd_log_flush_head;
823 spinlock_t sd_ail_lock;
824 struct list_head sd_ail1_list;
825 struct list_head sd_ail2_list;
827 /* For quiescing the filesystem */
828 struct gfs2_holder sd_freeze_gh;
829 atomic_t sd_freeze_state;
830 struct mutex sd_freeze_mutex;
832 char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
833 char sd_table_name[GFS2_FSNAME_LEN];
834 char sd_proto_name[GFS2_FSNAME_LEN];
836 /* Debugging crud */
838 unsigned long sd_last_warning;
839 struct dentry *debugfs_dir; /* debugfs directory */
840 struct dentry *debugfs_dentry_glocks;
841 struct dentry *debugfs_dentry_glstats;
842 struct dentry *debugfs_dentry_sbstats;
845 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
847 gl->gl_stats.stats[which]++;
850 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
852 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
853 preempt_disable();
854 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
855 preempt_enable();
858 extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
860 static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
862 return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
865 #endif /* __INCORE_DOT_H__ */