of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / fs / gfs2 / incore.h
blobde7b4f97ac755c26d1f68891290018acd6093d35
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #ifndef __INCORE_DOT_H__
11 #define __INCORE_DOT_H__
13 #include <linux/fs.h>
14 #include <linux/kobject.h>
15 #include <linux/workqueue.h>
16 #include <linux/dlm.h>
17 #include <linux/buffer_head.h>
18 #include <linux/rcupdate.h>
19 #include <linux/rculist_bl.h>
20 #include <linux/completion.h>
21 #include <linux/rbtree.h>
22 #include <linux/ktime.h>
23 #include <linux/percpu.h>
24 #include <linux/lockref.h>
25 #include <linux/rhashtable.h>
27 #define DIO_WAIT 0x00000010
28 #define DIO_METADATA 0x00000020
30 struct gfs2_log_operations;
31 struct gfs2_bufdata;
32 struct gfs2_holder;
33 struct gfs2_glock;
34 struct gfs2_quota_data;
35 struct gfs2_trans;
36 struct gfs2_jdesc;
37 struct gfs2_sbd;
38 struct lm_lockops;
40 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
42 struct gfs2_log_header_host {
43 u64 lh_sequence; /* Sequence number of this transaction */
44 u32 lh_flags; /* GFS2_LOG_HEAD_... */
45 u32 lh_tail; /* Block number of log tail */
46 u32 lh_blkno;
47 u32 lh_hash;
51 * Structure of operations that are associated with each
52 * type of element in the log.
55 struct gfs2_log_operations {
56 void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
57 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
58 void (*lo_before_scan) (struct gfs2_jdesc *jd,
59 struct gfs2_log_header_host *head, int pass);
60 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
61 struct gfs2_log_descriptor *ld, __be64 *ptr,
62 int pass);
63 void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
64 const char *lo_name;
67 #define GBF_FULL 1
69 struct gfs2_bitmap {
70 struct buffer_head *bi_bh;
71 char *bi_clone;
72 unsigned long bi_flags;
73 u32 bi_offset;
74 u32 bi_start;
75 u32 bi_len;
76 u32 bi_blocks;
79 struct gfs2_rgrpd {
80 struct rb_node rd_node; /* Link with superblock */
81 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
82 u64 rd_addr; /* grp block disk address */
83 u64 rd_data0; /* first data location */
84 u32 rd_length; /* length of rgrp header in fs blocks */
85 u32 rd_data; /* num of data blocks in rgrp */
86 u32 rd_bitbytes; /* number of bytes in data bitmaps */
87 u32 rd_free;
88 u32 rd_reserved; /* number of blocks reserved */
89 u32 rd_free_clone;
90 u32 rd_dinodes;
91 u64 rd_igeneration;
92 struct gfs2_bitmap *rd_bits;
93 struct gfs2_sbd *rd_sbd;
94 struct gfs2_rgrp_lvb *rd_rgl;
95 u32 rd_last_alloc;
96 u32 rd_flags;
97 u32 rd_extfail_pt; /* extent failure point */
98 #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
99 #define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
100 #define GFS2_RDF_ERROR 0x40000000 /* error in rg */
101 #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
102 #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
103 spinlock_t rd_rsspin; /* protects reservation related vars */
104 struct rb_root rd_rstree; /* multi-block reservation tree */
107 struct gfs2_rbm {
108 struct gfs2_rgrpd *rgd;
109 u32 offset; /* The offset is bitmap relative */
110 int bii; /* Bitmap index */
113 static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
115 return rbm->rgd->rd_bits + rbm->bii;
118 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
120 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
121 rbm->offset;
124 static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
125 const struct gfs2_rbm *rbm2)
127 return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
128 (rbm1->offset == rbm2->offset);
131 enum gfs2_state_bits {
132 BH_Pinned = BH_PrivateStart,
133 BH_Escaped = BH_PrivateStart + 1,
134 BH_Zeronew = BH_PrivateStart + 2,
137 BUFFER_FNS(Pinned, pinned)
138 TAS_BUFFER_FNS(Pinned, pinned)
139 BUFFER_FNS(Escaped, escaped)
140 TAS_BUFFER_FNS(Escaped, escaped)
141 BUFFER_FNS(Zeronew, zeronew)
142 TAS_BUFFER_FNS(Zeronew, zeronew)
144 struct gfs2_bufdata {
145 struct buffer_head *bd_bh;
146 struct gfs2_glock *bd_gl;
147 u64 bd_blkno;
149 struct list_head bd_list;
150 const struct gfs2_log_operations *bd_ops;
152 struct gfs2_trans *bd_tr;
153 struct list_head bd_ail_st_list;
154 struct list_head bd_ail_gl_list;
158 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
159 * prefix of lock_dlm_ gets awkward.
162 #define GDLM_STRNAME_BYTES 25
163 #define GDLM_LVB_SIZE 32
166 * ls_recover_flags:
168 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
169 * held by failed nodes whose journals need recovery. Those locks should
170 * only be used for journal recovery until the journal recovery is done.
171 * This is set by the dlm recover_prep callback and cleared by the
172 * gfs2_control thread when journal recovery is complete. To avoid
173 * races between recover_prep setting and gfs2_control clearing, recover_spin
174 * is held while changing this bit and reading/writing recover_block
175 * and recover_start.
177 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
179 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
180 * recovery of all journals before allowing other nodes to mount the fs.
181 * This is cleared when FIRST_MOUNT_DONE is set.
183 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
184 * recovery of all journals, and now allows other nodes to mount the fs.
186 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
187 * BLOCK_LOCKS for the first time. The gfs2_control thread should now
188 * control clearing BLOCK_LOCKS for further recoveries.
190 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
192 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
193 * and recover_done(), i.e. set while recover_block == recover_start.
196 enum {
197 DFL_BLOCK_LOCKS = 0,
198 DFL_NO_DLM_OPS = 1,
199 DFL_FIRST_MOUNT = 2,
200 DFL_FIRST_MOUNT_DONE = 3,
201 DFL_MOUNT_DONE = 4,
202 DFL_UNMOUNT = 5,
203 DFL_DLM_RECOVERY = 6,
206 struct lm_lockname {
207 struct gfs2_sbd *ln_sbd;
208 u64 ln_number;
209 unsigned int ln_type;
212 #define lm_name_equal(name1, name2) \
213 (((name1)->ln_number == (name2)->ln_number) && \
214 ((name1)->ln_type == (name2)->ln_type) && \
215 ((name1)->ln_sbd == (name2)->ln_sbd))
218 struct gfs2_glock_operations {
219 void (*go_sync) (struct gfs2_glock *gl);
220 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
221 void (*go_inval) (struct gfs2_glock *gl, int flags);
222 int (*go_demote_ok) (const struct gfs2_glock *gl);
223 int (*go_lock) (struct gfs2_holder *gh);
224 void (*go_unlock) (struct gfs2_holder *gh);
225 void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
226 void (*go_callback)(struct gfs2_glock *gl, bool remote);
227 const int go_type;
228 const unsigned long go_flags;
229 #define GLOF_ASPACE 1
230 #define GLOF_LVB 2
231 #define GLOF_LRU 4
234 enum {
235 GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */
236 GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */
237 GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */
238 GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */
239 GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */
240 GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */
241 GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */
242 GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */
243 GFS2_NR_LKSTATS
246 struct gfs2_lkstats {
247 u64 stats[GFS2_NR_LKSTATS];
250 enum {
251 /* States */
252 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
253 HIF_FIRST = 7,
254 HIF_WAIT = 10,
257 struct gfs2_holder {
258 struct list_head gh_list;
260 struct gfs2_glock *gh_gl;
261 struct pid *gh_owner_pid;
262 unsigned int gh_state;
263 unsigned gh_flags;
265 int gh_error;
266 unsigned long gh_iflags; /* HIF_... */
267 unsigned long gh_ip;
270 /* Number of quota types we support */
271 #define GFS2_MAXQUOTAS 2
273 /* Resource group multi-block reservation, in order of appearance:
275 Step 1. Function prepares to write, allocates a mb, sets the size hint.
276 Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
277 Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
278 Step 4. Bits are assigned from the rgrp based on either the reservation
279 or wherever it can.
282 struct gfs2_blkreserv {
283 /* components used during write (step 1): */
284 atomic_t rs_sizehint; /* hint of the write size */
286 struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
287 struct rb_node rs_node; /* link to other block reservations */
288 struct gfs2_rbm rs_rbm; /* Start of reservation */
289 u32 rs_free; /* how many blocks are still free */
290 u64 rs_inum; /* Inode number for reservation */
292 /* ancillary quota stuff */
293 struct gfs2_quota_data *rs_qa_qd[2 * GFS2_MAXQUOTAS];
294 struct gfs2_holder rs_qa_qd_ghs[2 * GFS2_MAXQUOTAS];
295 unsigned int rs_qa_qd_num;
299 * Allocation parameters
300 * @target: The number of blocks we'd ideally like to allocate
301 * @aflags: The flags (e.g. Orlov flag)
303 * The intent is to gradually expand this structure over time in
304 * order to give more information, e.g. alignment, min extent size
305 * to the allocation code.
307 struct gfs2_alloc_parms {
308 u64 target;
309 u32 min_target;
310 u32 aflags;
311 u64 allowed;
314 enum {
315 GLF_LOCK = 1,
316 GLF_DEMOTE = 3,
317 GLF_PENDING_DEMOTE = 4,
318 GLF_DEMOTE_IN_PROGRESS = 5,
319 GLF_DIRTY = 6,
320 GLF_LFLUSH = 7,
321 GLF_INVALIDATE_IN_PROGRESS = 8,
322 GLF_REPLY_PENDING = 9,
323 GLF_INITIAL = 10,
324 GLF_FROZEN = 11,
325 GLF_QUEUED = 12,
326 GLF_LRU = 13,
327 GLF_OBJECT = 14, /* Used only for tracing */
328 GLF_BLOCKING = 15,
331 struct gfs2_glock {
332 struct hlist_bl_node gl_list;
333 unsigned long gl_flags; /* GLF_... */
334 struct lm_lockname gl_name;
336 struct lockref gl_lockref;
338 /* State fields protected by gl_lockref.lock */
339 unsigned int gl_state:2, /* Current state */
340 gl_target:2, /* Target state */
341 gl_demote_state:2, /* State requested by remote node */
342 gl_req:2, /* State in last dlm request */
343 gl_reply:8; /* Last reply from the dlm */
345 unsigned long gl_demote_time; /* time of first demote request */
346 long gl_hold_time;
347 struct list_head gl_holders;
349 const struct gfs2_glock_operations *gl_ops;
350 ktime_t gl_dstamp;
351 struct gfs2_lkstats gl_stats;
352 struct dlm_lksb gl_lksb;
353 unsigned long gl_tchange;
354 void *gl_object;
356 struct list_head gl_lru;
357 struct list_head gl_ail_list;
358 atomic_t gl_ail_count;
359 atomic_t gl_revokes;
360 struct delayed_work gl_work;
361 union {
362 /* For inode and iopen glocks only */
363 struct work_struct gl_delete;
364 /* For rgrp glocks only */
365 struct {
366 loff_t start;
367 loff_t end;
368 } gl_vm;
370 struct rhash_head gl_node;
373 #define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
375 enum {
376 GIF_INVALID = 0,
377 GIF_QD_LOCKED = 1,
378 GIF_ALLOC_FAILED = 2,
379 GIF_SW_PAGED = 3,
380 GIF_ORDERED = 4,
381 GIF_FREE_VFS_INODE = 5,
384 struct gfs2_inode {
385 struct inode i_inode;
386 u64 i_no_addr;
387 u64 i_no_formal_ino;
388 u64 i_generation;
389 u64 i_eattr;
390 unsigned long i_flags; /* GIF_... */
391 struct gfs2_glock *i_gl; /* Move into i_gh? */
392 struct gfs2_holder i_iopen_gh;
393 struct gfs2_holder i_gh; /* for prepare/commit_write only */
394 struct gfs2_blkreserv *i_res; /* rgrp multi-block reservation */
395 struct gfs2_rgrpd *i_rgd;
396 u64 i_goal; /* goal block for allocations */
397 struct rw_semaphore i_rw_mutex;
398 struct list_head i_ordered;
399 struct list_head i_trunc_list;
400 __be64 *i_hash_cache;
401 u32 i_entries;
402 u32 i_diskflags;
403 u8 i_height;
404 u8 i_depth;
408 * Since i_inode is the first element of struct gfs2_inode,
409 * this is effectively a cast.
411 static inline struct gfs2_inode *GFS2_I(struct inode *inode)
413 return container_of(inode, struct gfs2_inode, i_inode);
416 static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
418 return inode->i_sb->s_fs_info;
421 struct gfs2_file {
422 struct mutex f_fl_mutex;
423 struct gfs2_holder f_fl_gh;
426 struct gfs2_revoke_replay {
427 struct list_head rr_list;
428 u64 rr_blkno;
429 unsigned int rr_where;
432 enum {
433 QDF_CHANGE = 1,
434 QDF_LOCKED = 2,
435 QDF_REFRESH = 3,
436 QDF_QMSG_QUIET = 4,
439 struct gfs2_quota_data {
440 struct hlist_bl_node qd_hlist;
441 struct list_head qd_list;
442 struct kqid qd_id;
443 struct gfs2_sbd *qd_sbd;
444 struct lockref qd_lockref;
445 struct list_head qd_lru;
446 unsigned qd_hash;
448 unsigned long qd_flags; /* QDF_... */
450 s64 qd_change;
451 s64 qd_change_sync;
453 unsigned int qd_slot;
454 unsigned int qd_slot_count;
456 struct buffer_head *qd_bh;
457 struct gfs2_quota_change *qd_bh_qc;
458 unsigned int qd_bh_count;
460 struct gfs2_glock *qd_gl;
461 struct gfs2_quota_lvb qd_qb;
463 u64 qd_sync_gen;
464 unsigned long qd_last_warn;
465 struct rcu_head qd_rcu;
468 struct gfs2_trans {
469 unsigned long tr_ip;
471 unsigned int tr_blocks;
472 unsigned int tr_revokes;
473 unsigned int tr_reserved;
474 unsigned int tr_touched:1;
475 unsigned int tr_attached:1;
476 unsigned int tr_alloced:1;
478 unsigned int tr_num_buf_new;
479 unsigned int tr_num_databuf_new;
480 unsigned int tr_num_buf_rm;
481 unsigned int tr_num_databuf_rm;
482 unsigned int tr_num_revoke;
483 unsigned int tr_num_revoke_rm;
485 struct list_head tr_list;
486 struct list_head tr_databuf;
487 struct list_head tr_buf;
489 unsigned int tr_first;
490 struct list_head tr_ail1_list;
491 struct list_head tr_ail2_list;
494 struct gfs2_journal_extent {
495 struct list_head list;
497 unsigned int lblock; /* First logical block */
498 u64 dblock; /* First disk block */
499 u64 blocks;
502 struct gfs2_jdesc {
503 struct list_head jd_list;
504 struct list_head extent_list;
505 unsigned int nr_extents;
506 struct work_struct jd_work;
507 struct inode *jd_inode;
508 unsigned long jd_flags;
509 #define JDF_RECOVERY 1
510 unsigned int jd_jid;
511 unsigned int jd_blocks;
512 int jd_recover_error;
513 /* Replay stuff */
515 unsigned int jd_found_blocks;
516 unsigned int jd_found_revokes;
517 unsigned int jd_replayed_blocks;
519 struct list_head jd_revoke_list;
520 unsigned int jd_replay_tail;
524 struct gfs2_statfs_change_host {
525 s64 sc_total;
526 s64 sc_free;
527 s64 sc_dinodes;
530 #define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
531 #define GFS2_QUOTA_OFF 0
532 #define GFS2_QUOTA_ACCOUNT 1
533 #define GFS2_QUOTA_ON 2
535 #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
536 #define GFS2_DATA_WRITEBACK 1
537 #define GFS2_DATA_ORDERED 2
539 #define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
540 #define GFS2_ERRORS_WITHDRAW 0
541 #define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
542 #define GFS2_ERRORS_RO 2 /* place holder for future feature */
543 #define GFS2_ERRORS_PANIC 3
545 struct gfs2_args {
546 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
547 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
548 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
549 unsigned int ar_spectator:1; /* Don't get a journal */
550 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
551 unsigned int ar_debug:1; /* Oops on errors */
552 unsigned int ar_posix_acl:1; /* Enable posix acls */
553 unsigned int ar_quota:2; /* off/account/on */
554 unsigned int ar_suiddir:1; /* suiddir support */
555 unsigned int ar_data:2; /* ordered/writeback */
556 unsigned int ar_meta:1; /* mount metafs */
557 unsigned int ar_discard:1; /* discard requests */
558 unsigned int ar_errors:2; /* errors=withdraw | panic */
559 unsigned int ar_nobarrier:1; /* do not send barriers */
560 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
561 int ar_commit; /* Commit interval */
562 int ar_statfs_quantum; /* The fast statfs interval */
563 int ar_quota_quantum; /* The quota interval */
564 int ar_statfs_percent; /* The % change to force sync */
567 struct gfs2_tune {
568 spinlock_t gt_spin;
570 unsigned int gt_logd_secs;
572 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
573 unsigned int gt_quota_scale_num; /* Numerator */
574 unsigned int gt_quota_scale_den; /* Denominator */
575 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
576 unsigned int gt_new_files_jdata;
577 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
578 unsigned int gt_complain_secs;
579 unsigned int gt_statfs_quantum;
580 unsigned int gt_statfs_slow;
583 enum {
584 SDF_JOURNAL_CHECKED = 0,
585 SDF_JOURNAL_LIVE = 1,
586 SDF_SHUTDOWN = 2,
587 SDF_NOBARRIERS = 3,
588 SDF_NORECOVERY = 4,
589 SDF_DEMOTE = 5,
590 SDF_NOJOURNALID = 6,
591 SDF_RORECOVERY = 7, /* read only recovery */
592 SDF_SKIP_DLM_UNLOCK = 8,
595 enum gfs2_freeze_state {
596 SFS_UNFROZEN = 0,
597 SFS_STARTING_FREEZE = 1,
598 SFS_FROZEN = 2,
601 #define GFS2_FSNAME_LEN 256
603 struct gfs2_inum_host {
604 u64 no_formal_ino;
605 u64 no_addr;
608 struct gfs2_sb_host {
609 u32 sb_magic;
610 u32 sb_type;
611 u32 sb_format;
613 u32 sb_fs_format;
614 u32 sb_multihost_format;
615 u32 sb_bsize;
616 u32 sb_bsize_shift;
618 struct gfs2_inum_host sb_master_dir;
619 struct gfs2_inum_host sb_root_dir;
621 char sb_lockproto[GFS2_LOCKNAME_LEN];
622 char sb_locktable[GFS2_LOCKNAME_LEN];
626 * lm_mount() return values
628 * ls_jid - the journal ID this node should use
629 * ls_first - this node is the first to mount the file system
630 * ls_lockspace - lock module's context for this file system
631 * ls_ops - lock module's functions
634 struct lm_lockstruct {
635 int ls_jid;
636 unsigned int ls_first;
637 const struct lm_lockops *ls_ops;
638 dlm_lockspace_t *ls_dlm;
640 int ls_recover_jid_done; /* These two are deprecated, */
641 int ls_recover_jid_status; /* used previously by gfs_controld */
643 struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
644 struct dlm_lksb ls_control_lksb; /* control_lock */
645 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
646 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
647 char *ls_lvb_bits;
649 spinlock_t ls_recover_spin; /* protects following fields */
650 unsigned long ls_recover_flags; /* DFL_ */
651 uint32_t ls_recover_mount; /* gen in first recover_done cb */
652 uint32_t ls_recover_start; /* gen in last recover_done cb */
653 uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
654 uint32_t ls_recover_size; /* size of recover_submit, recover_result */
655 uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
656 uint32_t *ls_recover_result; /* result of last jid recovery */
659 struct gfs2_pcpu_lkstats {
660 /* One struct for each glock type */
661 struct gfs2_lkstats lkstats[10];
664 struct gfs2_sbd {
665 struct super_block *sd_vfs;
666 struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
667 struct kobject sd_kobj;
668 unsigned long sd_flags; /* SDF_... */
669 struct gfs2_sb_host sd_sb;
671 /* Constants computed on mount */
673 u32 sd_fsb2bb;
674 u32 sd_fsb2bb_shift;
675 u32 sd_diptrs; /* Number of pointers in a dinode */
676 u32 sd_inptrs; /* Number of pointers in a indirect block */
677 u32 sd_jbsize; /* Size of a journaled data block */
678 u32 sd_hash_bsize; /* sizeof(exhash block) */
679 u32 sd_hash_bsize_shift;
680 u32 sd_hash_ptrs; /* Number of pointers in a hash block */
681 u32 sd_qc_per_block;
682 u32 sd_blocks_per_bitmap;
683 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
684 u32 sd_max_height; /* Max height of a file's metadata tree */
685 u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
686 u32 sd_max_jheight; /* Max height of journaled file's meta tree */
687 u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
689 struct gfs2_args sd_args; /* Mount arguments */
690 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
692 /* Lock Stuff */
694 struct lm_lockstruct sd_lockstruct;
695 struct gfs2_holder sd_live_gh;
696 struct gfs2_glock *sd_rename_gl;
697 struct gfs2_glock *sd_freeze_gl;
698 struct work_struct sd_freeze_work;
699 wait_queue_head_t sd_glock_wait;
700 atomic_t sd_glock_disposal;
701 struct completion sd_locking_init;
702 struct completion sd_wdack;
703 struct delayed_work sd_control_work;
705 /* Inode Stuff */
707 struct dentry *sd_master_dir;
708 struct dentry *sd_root_dir;
710 struct inode *sd_jindex;
711 struct inode *sd_statfs_inode;
712 struct inode *sd_sc_inode;
713 struct inode *sd_qc_inode;
714 struct inode *sd_rindex;
715 struct inode *sd_quota_inode;
717 /* StatFS stuff */
719 spinlock_t sd_statfs_spin;
720 struct gfs2_statfs_change_host sd_statfs_master;
721 struct gfs2_statfs_change_host sd_statfs_local;
722 int sd_statfs_force_sync;
724 /* Resource group stuff */
726 int sd_rindex_uptodate;
727 spinlock_t sd_rindex_spin;
728 struct rb_root sd_rindex_tree;
729 unsigned int sd_rgrps;
730 unsigned int sd_max_rg_data;
732 /* Journal index stuff */
734 struct list_head sd_jindex_list;
735 spinlock_t sd_jindex_spin;
736 struct mutex sd_jindex_mutex;
737 unsigned int sd_journals;
739 struct gfs2_jdesc *sd_jdesc;
740 struct gfs2_holder sd_journal_gh;
741 struct gfs2_holder sd_jinode_gh;
743 struct gfs2_holder sd_sc_gh;
744 struct gfs2_holder sd_qc_gh;
746 struct completion sd_journal_ready;
748 /* Daemon stuff */
750 struct task_struct *sd_logd_process;
751 struct task_struct *sd_quotad_process;
753 /* Quota stuff */
755 struct list_head sd_quota_list;
756 atomic_t sd_quota_count;
757 struct mutex sd_quota_mutex;
758 struct mutex sd_quota_sync_mutex;
759 wait_queue_head_t sd_quota_wait;
760 struct list_head sd_trunc_list;
761 spinlock_t sd_trunc_lock;
763 unsigned int sd_quota_slots;
764 unsigned long *sd_quota_bitmap;
765 spinlock_t sd_bitmap_lock;
767 u64 sd_quota_sync_gen;
769 /* Log stuff */
771 struct address_space sd_aspace;
773 spinlock_t sd_log_lock;
775 struct gfs2_trans *sd_log_tr;
776 unsigned int sd_log_blks_reserved;
777 int sd_log_commited_revoke;
779 atomic_t sd_log_pinned;
780 unsigned int sd_log_num_revoke;
782 struct list_head sd_log_le_revoke;
783 struct list_head sd_log_le_ordered;
784 spinlock_t sd_ordered_lock;
786 atomic_t sd_log_thresh1;
787 atomic_t sd_log_thresh2;
788 atomic_t sd_log_blks_free;
789 wait_queue_head_t sd_log_waitq;
790 wait_queue_head_t sd_logd_waitq;
792 u64 sd_log_sequence;
793 unsigned int sd_log_head;
794 unsigned int sd_log_tail;
795 int sd_log_idle;
797 struct rw_semaphore sd_log_flush_lock;
798 atomic_t sd_log_in_flight;
799 struct bio *sd_log_bio;
800 wait_queue_head_t sd_log_flush_wait;
801 int sd_log_error;
803 atomic_t sd_reserving_log;
804 wait_queue_head_t sd_reserving_log_wait;
806 unsigned int sd_log_flush_head;
807 u64 sd_log_flush_wrapped;
809 spinlock_t sd_ail_lock;
810 struct list_head sd_ail1_list;
811 struct list_head sd_ail2_list;
813 /* For quiescing the filesystem */
814 struct gfs2_holder sd_freeze_gh;
815 atomic_t sd_freeze_state;
816 struct mutex sd_freeze_mutex;
818 char sd_fsname[GFS2_FSNAME_LEN];
819 char sd_table_name[GFS2_FSNAME_LEN];
820 char sd_proto_name[GFS2_FSNAME_LEN];
822 /* Debugging crud */
824 unsigned long sd_last_warning;
825 struct dentry *debugfs_dir; /* debugfs directory */
826 struct dentry *debugfs_dentry_glocks;
827 struct dentry *debugfs_dentry_glstats;
828 struct dentry *debugfs_dentry_sbstats;
831 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
833 gl->gl_stats.stats[which]++;
836 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
838 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
839 preempt_disable();
840 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
841 preempt_enable();
844 #endif /* __INCORE_DOT_H__ */