perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / fs / gfs2 / incore.h
blob888b62cfd6d1a8366c7837f9b37d51d4118e275f
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #ifndef __INCORE_DOT_H__
11 #define __INCORE_DOT_H__
13 #include <linux/fs.h>
14 #include <linux/kobject.h>
15 #include <linux/workqueue.h>
16 #include <linux/dlm.h>
17 #include <linux/buffer_head.h>
18 #include <linux/rcupdate.h>
19 #include <linux/rculist_bl.h>
20 #include <linux/completion.h>
21 #include <linux/rbtree.h>
22 #include <linux/ktime.h>
23 #include <linux/percpu.h>
24 #include <linux/lockref.h>
25 #include <linux/rhashtable.h>
27 #define DIO_WAIT 0x00000010
28 #define DIO_METADATA 0x00000020
30 struct gfs2_log_operations;
31 struct gfs2_bufdata;
32 struct gfs2_holder;
33 struct gfs2_glock;
34 struct gfs2_quota_data;
35 struct gfs2_trans;
36 struct gfs2_jdesc;
37 struct gfs2_sbd;
38 struct lm_lockops;
40 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
42 struct gfs2_log_header_host {
43 u64 lh_sequence; /* Sequence number of this transaction */
44 u32 lh_flags; /* GFS2_LOG_HEAD_... */
45 u32 lh_tail; /* Block number of log tail */
46 u32 lh_blkno;
50 * Structure of operations that are associated with each
51 * type of element in the log.
54 struct gfs2_log_operations {
55 void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
56 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
57 void (*lo_before_scan) (struct gfs2_jdesc *jd,
58 struct gfs2_log_header_host *head, int pass);
59 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
60 struct gfs2_log_descriptor *ld, __be64 *ptr,
61 int pass);
62 void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
63 const char *lo_name;
66 #define GBF_FULL 1
68 /**
69 * Clone bitmaps (bi_clone):
71 * - When a block is freed, we remember the previous state of the block in the
72 * clone bitmap, and only mark the block as free in the real bitmap.
74 * - When looking for a block to allocate, we check for a free block in the
75 * clone bitmap, and if no clone bitmap exists, in the real bitmap.
77 * - For allocating a block, we mark it as allocated in the real bitmap, and if
78 * a clone bitmap exists, also in the clone bitmap.
80 * - At the end of a log_flush, we copy the real bitmap into the clone bitmap
81 * to make the clone bitmap reflect the current allocation state.
82 * (Alternatively, we could remove the clone bitmap.)
84 * The clone bitmaps are in-core only, and is never written to disk.
86 * These steps ensure that blocks which have been freed in a transaction cannot
87 * be reallocated in that same transaction.
89 struct gfs2_bitmap {
90 struct buffer_head *bi_bh;
91 char *bi_clone;
92 unsigned long bi_flags;
93 u32 bi_offset;
94 u32 bi_start;
95 u32 bi_bytes;
96 u32 bi_blocks;
99 struct gfs2_rgrpd {
100 struct rb_node rd_node; /* Link with superblock */
101 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
102 u64 rd_addr; /* grp block disk address */
103 u64 rd_data0; /* first data location */
104 u32 rd_length; /* length of rgrp header in fs blocks */
105 u32 rd_data; /* num of data blocks in rgrp */
106 u32 rd_bitbytes; /* number of bytes in data bitmaps */
107 u32 rd_free;
108 u32 rd_reserved; /* number of blocks reserved */
109 u32 rd_free_clone;
110 u32 rd_dinodes;
111 u64 rd_igeneration;
112 struct gfs2_bitmap *rd_bits;
113 struct gfs2_sbd *rd_sbd;
114 struct gfs2_rgrp_lvb *rd_rgl;
115 u32 rd_last_alloc;
116 u32 rd_flags;
117 u32 rd_extfail_pt; /* extent failure point */
118 #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
119 #define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
120 #define GFS2_RDF_ERROR 0x40000000 /* error in rg */
121 #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
122 #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
123 spinlock_t rd_rsspin; /* protects reservation related vars */
124 struct rb_root rd_rstree; /* multi-block reservation tree */
127 struct gfs2_rbm {
128 struct gfs2_rgrpd *rgd;
129 u32 offset; /* The offset is bitmap relative */
130 int bii; /* Bitmap index */
133 static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
135 return rbm->rgd->rd_bits + rbm->bii;
138 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
140 BUG_ON(rbm->offset >= rbm->rgd->rd_data);
141 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
142 rbm->offset;
145 static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
146 const struct gfs2_rbm *rbm2)
148 return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
149 (rbm1->offset == rbm2->offset);
152 enum gfs2_state_bits {
153 BH_Pinned = BH_PrivateStart,
154 BH_Escaped = BH_PrivateStart + 1,
157 BUFFER_FNS(Pinned, pinned)
158 TAS_BUFFER_FNS(Pinned, pinned)
159 BUFFER_FNS(Escaped, escaped)
160 TAS_BUFFER_FNS(Escaped, escaped)
162 struct gfs2_bufdata {
163 struct buffer_head *bd_bh;
164 struct gfs2_glock *bd_gl;
165 u64 bd_blkno;
167 struct list_head bd_list;
168 const struct gfs2_log_operations *bd_ops;
170 struct gfs2_trans *bd_tr;
171 struct list_head bd_ail_st_list;
172 struct list_head bd_ail_gl_list;
176 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
177 * prefix of lock_dlm_ gets awkward.
180 #define GDLM_STRNAME_BYTES 25
181 #define GDLM_LVB_SIZE 32
184 * ls_recover_flags:
186 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
187 * held by failed nodes whose journals need recovery. Those locks should
188 * only be used for journal recovery until the journal recovery is done.
189 * This is set by the dlm recover_prep callback and cleared by the
190 * gfs2_control thread when journal recovery is complete. To avoid
191 * races between recover_prep setting and gfs2_control clearing, recover_spin
192 * is held while changing this bit and reading/writing recover_block
193 * and recover_start.
195 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
197 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
198 * recovery of all journals before allowing other nodes to mount the fs.
199 * This is cleared when FIRST_MOUNT_DONE is set.
201 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
202 * recovery of all journals, and now allows other nodes to mount the fs.
204 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
205 * BLOCK_LOCKS for the first time. The gfs2_control thread should now
206 * control clearing BLOCK_LOCKS for further recoveries.
208 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
210 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
211 * and recover_done(), i.e. set while recover_block == recover_start.
214 enum {
215 DFL_BLOCK_LOCKS = 0,
216 DFL_NO_DLM_OPS = 1,
217 DFL_FIRST_MOUNT = 2,
218 DFL_FIRST_MOUNT_DONE = 3,
219 DFL_MOUNT_DONE = 4,
220 DFL_UNMOUNT = 5,
221 DFL_DLM_RECOVERY = 6,
225 * We are using struct lm_lockname as an rhashtable key. Avoid holes within
226 * the struct; padding at the end is fine.
228 struct lm_lockname {
229 u64 ln_number;
230 struct gfs2_sbd *ln_sbd;
231 unsigned int ln_type;
234 #define lm_name_equal(name1, name2) \
235 (((name1)->ln_number == (name2)->ln_number) && \
236 ((name1)->ln_type == (name2)->ln_type) && \
237 ((name1)->ln_sbd == (name2)->ln_sbd))
240 struct gfs2_glock_operations {
241 void (*go_sync) (struct gfs2_glock *gl);
242 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
243 void (*go_inval) (struct gfs2_glock *gl, int flags);
244 int (*go_demote_ok) (const struct gfs2_glock *gl);
245 int (*go_lock) (struct gfs2_holder *gh);
246 void (*go_unlock) (struct gfs2_holder *gh);
247 void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
248 void (*go_callback)(struct gfs2_glock *gl, bool remote);
249 const int go_type;
250 const unsigned long go_flags;
251 #define GLOF_ASPACE 1
252 #define GLOF_LVB 2
253 #define GLOF_LRU 4
256 enum {
257 GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */
258 GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */
259 GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */
260 GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */
261 GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */
262 GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */
263 GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */
264 GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */
265 GFS2_NR_LKSTATS
268 struct gfs2_lkstats {
269 u64 stats[GFS2_NR_LKSTATS];
272 enum {
273 /* States */
274 HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
275 HIF_FIRST = 7,
276 HIF_WAIT = 10,
279 struct gfs2_holder {
280 struct list_head gh_list;
282 struct gfs2_glock *gh_gl;
283 struct pid *gh_owner_pid;
284 u16 gh_flags;
285 u16 gh_state;
287 int gh_error;
288 unsigned long gh_iflags; /* HIF_... */
289 unsigned long gh_ip;
292 /* Number of quota types we support */
293 #define GFS2_MAXQUOTAS 2
295 struct gfs2_qadata { /* quota allocation data */
296 /* Quota stuff */
297 struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS];
298 struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS];
299 unsigned int qa_qd_num;
302 /* Resource group multi-block reservation, in order of appearance:
304 Step 1. Function prepares to write, allocates a mb, sets the size hint.
305 Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
306 Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
307 Step 4. Bits are assigned from the rgrp based on either the reservation
308 or wherever it can.
311 struct gfs2_blkreserv {
312 struct rb_node rs_node; /* link to other block reservations */
313 struct gfs2_rbm rs_rbm; /* Start of reservation */
314 u32 rs_free; /* how many blocks are still free */
318 * Allocation parameters
319 * @target: The number of blocks we'd ideally like to allocate
320 * @aflags: The flags (e.g. Orlov flag)
322 * The intent is to gradually expand this structure over time in
323 * order to give more information, e.g. alignment, min extent size
324 * to the allocation code.
326 struct gfs2_alloc_parms {
327 u64 target;
328 u32 min_target;
329 u32 aflags;
330 u64 allowed;
333 enum {
334 GLF_LOCK = 1,
335 GLF_DEMOTE = 3,
336 GLF_PENDING_DEMOTE = 4,
337 GLF_DEMOTE_IN_PROGRESS = 5,
338 GLF_DIRTY = 6,
339 GLF_LFLUSH = 7,
340 GLF_INVALIDATE_IN_PROGRESS = 8,
341 GLF_REPLY_PENDING = 9,
342 GLF_INITIAL = 10,
343 GLF_FROZEN = 11,
344 GLF_QUEUED = 12,
345 GLF_LRU = 13,
346 GLF_OBJECT = 14, /* Used only for tracing */
347 GLF_BLOCKING = 15,
348 GLF_INODE_CREATING = 16, /* Inode creation occurring */
351 struct gfs2_glock {
352 unsigned long gl_flags; /* GLF_... */
353 struct lm_lockname gl_name;
355 struct lockref gl_lockref;
357 /* State fields protected by gl_lockref.lock */
358 unsigned int gl_state:2, /* Current state */
359 gl_target:2, /* Target state */
360 gl_demote_state:2, /* State requested by remote node */
361 gl_req:2, /* State in last dlm request */
362 gl_reply:8; /* Last reply from the dlm */
364 unsigned long gl_demote_time; /* time of first demote request */
365 long gl_hold_time;
366 struct list_head gl_holders;
368 const struct gfs2_glock_operations *gl_ops;
369 ktime_t gl_dstamp;
370 struct gfs2_lkstats gl_stats;
371 struct dlm_lksb gl_lksb;
372 unsigned long gl_tchange;
373 void *gl_object;
375 struct list_head gl_lru;
376 struct list_head gl_ail_list;
377 atomic_t gl_ail_count;
378 atomic_t gl_revokes;
379 struct delayed_work gl_work;
380 union {
381 /* For inode and iopen glocks only */
382 struct work_struct gl_delete;
383 /* For rgrp glocks only */
384 struct {
385 loff_t start;
386 loff_t end;
387 } gl_vm;
389 struct rcu_head gl_rcu;
390 struct rhash_head gl_node;
393 #define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
395 enum {
396 GIF_INVALID = 0,
397 GIF_QD_LOCKED = 1,
398 GIF_ALLOC_FAILED = 2,
399 GIF_SW_PAGED = 3,
400 GIF_ORDERED = 4,
401 GIF_FREE_VFS_INODE = 5,
402 GIF_GLOP_PENDING = 6,
405 struct gfs2_inode {
406 struct inode i_inode;
407 u64 i_no_addr;
408 u64 i_no_formal_ino;
409 u64 i_generation;
410 u64 i_eattr;
411 unsigned long i_flags; /* GIF_... */
412 struct gfs2_glock *i_gl; /* Move into i_gh? */
413 struct gfs2_holder i_iopen_gh;
414 struct gfs2_holder i_gh; /* for prepare/commit_write only */
415 struct gfs2_qadata *i_qadata; /* quota allocation data */
416 struct gfs2_holder i_rgd_gh;
417 struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
418 u64 i_goal; /* goal block for allocations */
419 atomic_t i_sizehint; /* hint of the write size */
420 struct rw_semaphore i_rw_mutex;
421 struct list_head i_ordered;
422 struct list_head i_trunc_list;
423 __be64 *i_hash_cache;
424 u32 i_entries;
425 u32 i_diskflags;
426 u8 i_height;
427 u8 i_depth;
428 u16 i_rahead;
432 * Since i_inode is the first element of struct gfs2_inode,
433 * this is effectively a cast.
435 static inline struct gfs2_inode *GFS2_I(struct inode *inode)
437 return container_of(inode, struct gfs2_inode, i_inode);
440 static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
442 return inode->i_sb->s_fs_info;
445 struct gfs2_file {
446 struct mutex f_fl_mutex;
447 struct gfs2_holder f_fl_gh;
450 struct gfs2_revoke_replay {
451 struct list_head rr_list;
452 u64 rr_blkno;
453 unsigned int rr_where;
456 enum {
457 QDF_CHANGE = 1,
458 QDF_LOCKED = 2,
459 QDF_REFRESH = 3,
460 QDF_QMSG_QUIET = 4,
463 struct gfs2_quota_data {
464 struct hlist_bl_node qd_hlist;
465 struct list_head qd_list;
466 struct kqid qd_id;
467 struct gfs2_sbd *qd_sbd;
468 struct lockref qd_lockref;
469 struct list_head qd_lru;
470 unsigned qd_hash;
472 unsigned long qd_flags; /* QDF_... */
474 s64 qd_change;
475 s64 qd_change_sync;
477 unsigned int qd_slot;
478 unsigned int qd_slot_count;
480 struct buffer_head *qd_bh;
481 struct gfs2_quota_change *qd_bh_qc;
482 unsigned int qd_bh_count;
484 struct gfs2_glock *qd_gl;
485 struct gfs2_quota_lvb qd_qb;
487 u64 qd_sync_gen;
488 unsigned long qd_last_warn;
489 struct rcu_head qd_rcu;
492 enum {
493 TR_TOUCHED = 1,
494 TR_ATTACHED = 2,
495 TR_ALLOCED = 3,
498 struct gfs2_trans {
499 unsigned long tr_ip;
501 unsigned int tr_blocks;
502 unsigned int tr_revokes;
503 unsigned int tr_reserved;
504 unsigned long tr_flags;
506 unsigned int tr_num_buf_new;
507 unsigned int tr_num_databuf_new;
508 unsigned int tr_num_buf_rm;
509 unsigned int tr_num_databuf_rm;
510 unsigned int tr_num_revoke;
511 unsigned int tr_num_revoke_rm;
513 struct list_head tr_list;
514 struct list_head tr_databuf;
515 struct list_head tr_buf;
517 unsigned int tr_first;
518 struct list_head tr_ail1_list;
519 struct list_head tr_ail2_list;
522 struct gfs2_journal_extent {
523 struct list_head list;
525 unsigned int lblock; /* First logical block */
526 u64 dblock; /* First disk block */
527 u64 blocks;
530 struct gfs2_jdesc {
531 struct list_head jd_list;
532 struct list_head extent_list;
533 unsigned int nr_extents;
534 struct work_struct jd_work;
535 struct inode *jd_inode;
536 unsigned long jd_flags;
537 #define JDF_RECOVERY 1
538 unsigned int jd_jid;
539 unsigned int jd_blocks;
540 int jd_recover_error;
541 /* Replay stuff */
543 unsigned int jd_found_blocks;
544 unsigned int jd_found_revokes;
545 unsigned int jd_replayed_blocks;
547 struct list_head jd_revoke_list;
548 unsigned int jd_replay_tail;
552 struct gfs2_statfs_change_host {
553 s64 sc_total;
554 s64 sc_free;
555 s64 sc_dinodes;
558 #define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
559 #define GFS2_QUOTA_OFF 0
560 #define GFS2_QUOTA_ACCOUNT 1
561 #define GFS2_QUOTA_ON 2
563 #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
564 #define GFS2_DATA_WRITEBACK 1
565 #define GFS2_DATA_ORDERED 2
567 #define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
568 #define GFS2_ERRORS_WITHDRAW 0
569 #define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
570 #define GFS2_ERRORS_RO 2 /* place holder for future feature */
571 #define GFS2_ERRORS_PANIC 3
573 struct gfs2_args {
574 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
575 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
576 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
577 unsigned int ar_spectator:1; /* Don't get a journal */
578 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
579 unsigned int ar_debug:1; /* Oops on errors */
580 unsigned int ar_posix_acl:1; /* Enable posix acls */
581 unsigned int ar_quota:2; /* off/account/on */
582 unsigned int ar_suiddir:1; /* suiddir support */
583 unsigned int ar_data:2; /* ordered/writeback */
584 unsigned int ar_meta:1; /* mount metafs */
585 unsigned int ar_discard:1; /* discard requests */
586 unsigned int ar_errors:2; /* errors=withdraw | panic */
587 unsigned int ar_nobarrier:1; /* do not send barriers */
588 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
589 unsigned int ar_loccookie:1; /* use location based readdir
590 cookies */
591 int ar_commit; /* Commit interval */
592 int ar_statfs_quantum; /* The fast statfs interval */
593 int ar_quota_quantum; /* The quota interval */
594 int ar_statfs_percent; /* The % change to force sync */
597 struct gfs2_tune {
598 spinlock_t gt_spin;
600 unsigned int gt_logd_secs;
602 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
603 unsigned int gt_quota_scale_num; /* Numerator */
604 unsigned int gt_quota_scale_den; /* Denominator */
605 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
606 unsigned int gt_new_files_jdata;
607 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
608 unsigned int gt_complain_secs;
609 unsigned int gt_statfs_quantum;
610 unsigned int gt_statfs_slow;
613 enum {
614 SDF_JOURNAL_CHECKED = 0,
615 SDF_JOURNAL_LIVE = 1,
616 SDF_SHUTDOWN = 2,
617 SDF_NOBARRIERS = 3,
618 SDF_NORECOVERY = 4,
619 SDF_DEMOTE = 5,
620 SDF_NOJOURNALID = 6,
621 SDF_RORECOVERY = 7, /* read only recovery */
622 SDF_SKIP_DLM_UNLOCK = 8,
623 SDF_FORCE_AIL_FLUSH = 9,
624 SDF_AIL1_IO_ERROR = 10,
627 enum gfs2_freeze_state {
628 SFS_UNFROZEN = 0,
629 SFS_STARTING_FREEZE = 1,
630 SFS_FROZEN = 2,
633 #define GFS2_FSNAME_LEN 256
635 struct gfs2_inum_host {
636 u64 no_formal_ino;
637 u64 no_addr;
640 struct gfs2_sb_host {
641 u32 sb_magic;
642 u32 sb_type;
643 u32 sb_format;
645 u32 sb_fs_format;
646 u32 sb_multihost_format;
647 u32 sb_bsize;
648 u32 sb_bsize_shift;
650 struct gfs2_inum_host sb_master_dir;
651 struct gfs2_inum_host sb_root_dir;
653 char sb_lockproto[GFS2_LOCKNAME_LEN];
654 char sb_locktable[GFS2_LOCKNAME_LEN];
658 * lm_mount() return values
660 * ls_jid - the journal ID this node should use
661 * ls_first - this node is the first to mount the file system
662 * ls_lockspace - lock module's context for this file system
663 * ls_ops - lock module's functions
666 struct lm_lockstruct {
667 int ls_jid;
668 unsigned int ls_first;
669 const struct lm_lockops *ls_ops;
670 dlm_lockspace_t *ls_dlm;
672 int ls_recover_jid_done; /* These two are deprecated, */
673 int ls_recover_jid_status; /* used previously by gfs_controld */
675 struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
676 struct dlm_lksb ls_control_lksb; /* control_lock */
677 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
678 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
679 char *ls_lvb_bits;
681 spinlock_t ls_recover_spin; /* protects following fields */
682 unsigned long ls_recover_flags; /* DFL_ */
683 uint32_t ls_recover_mount; /* gen in first recover_done cb */
684 uint32_t ls_recover_start; /* gen in last recover_done cb */
685 uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
686 uint32_t ls_recover_size; /* size of recover_submit, recover_result */
687 uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
688 uint32_t *ls_recover_result; /* result of last jid recovery */
691 struct gfs2_pcpu_lkstats {
692 /* One struct for each glock type */
693 struct gfs2_lkstats lkstats[10];
696 struct gfs2_sbd {
697 struct super_block *sd_vfs;
698 struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
699 struct kobject sd_kobj;
700 unsigned long sd_flags; /* SDF_... */
701 struct gfs2_sb_host sd_sb;
703 /* Constants computed on mount */
705 u32 sd_fsb2bb;
706 u32 sd_fsb2bb_shift;
707 u32 sd_diptrs; /* Number of pointers in a dinode */
708 u32 sd_inptrs; /* Number of pointers in a indirect block */
709 u32 sd_jbsize; /* Size of a journaled data block */
710 u32 sd_hash_bsize; /* sizeof(exhash block) */
711 u32 sd_hash_bsize_shift;
712 u32 sd_hash_ptrs; /* Number of pointers in a hash block */
713 u32 sd_qc_per_block;
714 u32 sd_blocks_per_bitmap;
715 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
716 u32 sd_max_height; /* Max height of a file's metadata tree */
717 u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
718 u32 sd_max_dents_per_leaf; /* Max number of dirents in a leaf block */
720 struct gfs2_args sd_args; /* Mount arguments */
721 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
723 /* Lock Stuff */
725 struct lm_lockstruct sd_lockstruct;
726 struct gfs2_holder sd_live_gh;
727 struct gfs2_glock *sd_rename_gl;
728 struct gfs2_glock *sd_freeze_gl;
729 struct work_struct sd_freeze_work;
730 wait_queue_head_t sd_glock_wait;
731 atomic_t sd_glock_disposal;
732 struct completion sd_locking_init;
733 struct completion sd_wdack;
734 struct delayed_work sd_control_work;
736 /* Inode Stuff */
738 struct dentry *sd_master_dir;
739 struct dentry *sd_root_dir;
741 struct inode *sd_jindex;
742 struct inode *sd_statfs_inode;
743 struct inode *sd_sc_inode;
744 struct inode *sd_qc_inode;
745 struct inode *sd_rindex;
746 struct inode *sd_quota_inode;
748 /* StatFS stuff */
750 spinlock_t sd_statfs_spin;
751 struct gfs2_statfs_change_host sd_statfs_master;
752 struct gfs2_statfs_change_host sd_statfs_local;
753 int sd_statfs_force_sync;
755 /* Resource group stuff */
757 int sd_rindex_uptodate;
758 spinlock_t sd_rindex_spin;
759 struct rb_root sd_rindex_tree;
760 unsigned int sd_rgrps;
761 unsigned int sd_max_rg_data;
763 /* Journal index stuff */
765 struct list_head sd_jindex_list;
766 spinlock_t sd_jindex_spin;
767 struct mutex sd_jindex_mutex;
768 unsigned int sd_journals;
770 struct gfs2_jdesc *sd_jdesc;
771 struct gfs2_holder sd_journal_gh;
772 struct gfs2_holder sd_jinode_gh;
774 struct gfs2_holder sd_sc_gh;
775 struct gfs2_holder sd_qc_gh;
777 struct completion sd_journal_ready;
779 /* Daemon stuff */
781 struct task_struct *sd_logd_process;
782 struct task_struct *sd_quotad_process;
784 /* Quota stuff */
786 struct list_head sd_quota_list;
787 atomic_t sd_quota_count;
788 struct mutex sd_quota_mutex;
789 struct mutex sd_quota_sync_mutex;
790 wait_queue_head_t sd_quota_wait;
791 struct list_head sd_trunc_list;
792 spinlock_t sd_trunc_lock;
794 unsigned int sd_quota_slots;
795 unsigned long *sd_quota_bitmap;
796 spinlock_t sd_bitmap_lock;
798 u64 sd_quota_sync_gen;
800 /* Log stuff */
802 struct address_space sd_aspace;
804 spinlock_t sd_log_lock;
806 struct gfs2_trans *sd_log_tr;
807 unsigned int sd_log_blks_reserved;
808 int sd_log_commited_revoke;
810 atomic_t sd_log_pinned;
811 unsigned int sd_log_num_revoke;
813 struct list_head sd_log_le_revoke;
814 struct list_head sd_log_le_ordered;
815 spinlock_t sd_ordered_lock;
817 atomic_t sd_log_thresh1;
818 atomic_t sd_log_thresh2;
819 atomic_t sd_log_blks_free;
820 atomic_t sd_log_blks_needed;
821 wait_queue_head_t sd_log_waitq;
822 wait_queue_head_t sd_logd_waitq;
824 u64 sd_log_sequence;
825 unsigned int sd_log_head;
826 unsigned int sd_log_tail;
827 int sd_log_idle;
829 struct rw_semaphore sd_log_flush_lock;
830 atomic_t sd_log_in_flight;
831 struct bio *sd_log_bio;
832 wait_queue_head_t sd_log_flush_wait;
833 int sd_log_error;
835 atomic_t sd_reserving_log;
836 wait_queue_head_t sd_reserving_log_wait;
838 unsigned int sd_log_flush_head;
840 spinlock_t sd_ail_lock;
841 struct list_head sd_ail1_list;
842 struct list_head sd_ail2_list;
844 /* For quiescing the filesystem */
845 struct gfs2_holder sd_freeze_gh;
846 atomic_t sd_freeze_state;
847 struct mutex sd_freeze_mutex;
849 char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
850 char sd_table_name[GFS2_FSNAME_LEN];
851 char sd_proto_name[GFS2_FSNAME_LEN];
853 /* Debugging crud */
855 unsigned long sd_last_warning;
856 struct dentry *debugfs_dir; /* debugfs directory */
857 struct dentry *debugfs_dentry_glocks;
858 struct dentry *debugfs_dentry_glstats;
859 struct dentry *debugfs_dentry_sbstats;
862 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
864 gl->gl_stats.stats[which]++;
867 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
869 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
870 preempt_disable();
871 this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
872 preempt_enable();
875 extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
877 static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
879 return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
882 #endif /* __INCORE_DOT_H__ */