initial commit with v2.6.32.60
[linux-2.6.32.60-moxart.git] / include / linux / iocontext.h
blob19abfc12536957ae5689d7e9df48825c55144db5
1 #ifndef IOCONTEXT_H
2 #define IOCONTEXT_H
4 #include <linux/radix-tree.h>
5 #include <linux/rcupdate.h>
7 /*
8 * This is the per-process anticipatory I/O scheduler state.
9 */
10 struct as_io_context {
11 spinlock_t lock;
13 void (*dtor)(struct as_io_context *aic); /* destructor */
14 void (*exit)(struct as_io_context *aic); /* called on task exit */
16 unsigned long state;
17 atomic_t nr_queued; /* queued reads & sync writes */
18 atomic_t nr_dispatched; /* number of requests gone to the drivers */
20 /* IO History tracking */
21 /* Thinktime */
22 unsigned long last_end_request;
23 unsigned long ttime_total;
24 unsigned long ttime_samples;
25 unsigned long ttime_mean;
26 /* Layout pattern */
27 unsigned int seek_samples;
28 sector_t last_request_pos;
29 u64 seek_total;
30 sector_t seek_mean;
33 struct cfq_queue;
34 struct cfq_io_context {
35 void *key;
36 unsigned long dead_key;
38 struct cfq_queue *cfqq[2];
40 struct io_context *ioc;
42 unsigned long last_end_request;
44 unsigned long ttime_total;
45 unsigned long ttime_samples;
46 unsigned long ttime_mean;
48 struct list_head queue_list;
49 struct hlist_node cic_list;
51 void (*dtor)(struct io_context *); /* destructor */
52 void (*exit)(struct io_context *); /* called on task exit */
54 struct rcu_head rcu_head;
58 * I/O subsystem state of the associated processes. It is refcounted
59 * and kmalloc'ed. These could be shared between processes.
61 struct io_context {
62 atomic_long_t refcount;
63 atomic_t nr_tasks;
65 /* all the fields below are protected by this lock */
66 spinlock_t lock;
68 unsigned short ioprio;
69 unsigned short ioprio_changed;
72 * For request batching
74 unsigned long last_waited; /* Time last woken after wait for request */
75 int nr_batch_requests; /* Number of requests left in the batch */
77 struct as_io_context *aic;
78 struct radix_tree_root radix_root;
79 struct hlist_head cic_list;
80 void *ioc_data;
83 static inline struct io_context *ioc_task_link(struct io_context *ioc)
86 * if ref count is zero, don't allow sharing (ioc is going away, it's
87 * a race).
89 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
90 atomic_inc(&ioc->nr_tasks);
91 return ioc;
94 return NULL;
97 struct task_struct;
98 #ifdef CONFIG_BLOCK
99 int put_io_context(struct io_context *ioc);
100 void exit_io_context(struct task_struct *task);
101 struct io_context *get_io_context(gfp_t gfp_flags, int node);
102 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
103 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
104 #else
105 static inline void exit_io_context(struct task_struct *task)
109 struct io_context;
110 static inline int put_io_context(struct io_context *ioc)
112 return 1;
114 #endif
116 #endif