x86/mm: Add TLB purge to free pmd/pte page interfaces
[linux/fpc-iii.git] / block / blk-mq.h
blobc55bcf67b9560fba32e4e53318ba149dba4320ad
1 #ifndef INT_BLK_MQ_H
2 #define INT_BLK_MQ_H
4 struct blk_mq_tag_set;
6 struct blk_mq_ctx {
7 struct {
8 spinlock_t lock;
9 struct list_head rq_list;
10 } ____cacheline_aligned_in_smp;
12 unsigned int cpu;
13 unsigned int index_hw;
15 /* incremented at dispatch time */
16 unsigned long rq_dispatched[2];
17 unsigned long rq_merged;
19 /* incremented at completion time */
20 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
22 struct request_queue *queue;
23 struct kobject kobj;
24 } ____cacheline_aligned_in_smp;
26 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
27 void blk_mq_freeze_queue(struct request_queue *q);
28 void blk_mq_free_queue(struct request_queue *q);
29 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
30 void blk_mq_wake_waiters(struct request_queue *q);
33 * CPU hotplug helpers
35 void blk_mq_enable_hotplug(void);
36 void blk_mq_disable_hotplug(void);
39 * CPU -> queue mappings
41 int blk_mq_map_queues(struct blk_mq_tag_set *set);
42 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
44 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
45 int cpu)
47 return q->queue_hw_ctx[q->mq_map[cpu]];
51 * sysfs helpers
53 extern void blk_mq_sysfs_init(struct request_queue *q);
54 extern int blk_mq_sysfs_register(struct request_queue *q);
55 extern void blk_mq_sysfs_unregister(struct request_queue *q);
56 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
58 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
60 void blk_mq_release(struct request_queue *q);
62 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
63 unsigned int cpu)
65 return per_cpu_ptr(q->queue_ctx, cpu);
69 * This assumes per-cpu software queueing queues. They could be per-node
70 * as well, for instance. For now this is hardcoded as-is. Note that we don't
71 * care about preemption, since we know the ctx's are persistent. This does
72 * mean that we can't rely on ctx always matching the currently running CPU.
74 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
76 return __blk_mq_get_ctx(q, get_cpu());
79 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
81 put_cpu();
84 struct blk_mq_alloc_data {
85 /* input parameter */
86 struct request_queue *q;
87 unsigned int flags;
89 /* input & output parameter */
90 struct blk_mq_ctx *ctx;
91 struct blk_mq_hw_ctx *hctx;
94 static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
95 struct request_queue *q, unsigned int flags,
96 struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
98 data->q = q;
99 data->flags = flags;
100 data->ctx = ctx;
101 data->hctx = hctx;
104 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
106 return hctx->nr_ctx && hctx->tags;
109 #endif