1 #ifndef INT_BLK_MQ_TAG_H
2 #define INT_BLK_MQ_TAG_H
11 struct bt_wait_state
{
13 wait_queue_head_t wait
;
14 } ____cacheline_aligned_in_smp
;
16 #define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
17 #define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
19 struct blk_mq_bitmap_tags
{
21 unsigned int wake_cnt
;
22 unsigned int bits_per_word
;
25 struct blk_align_bitmap
*map
;
28 struct bt_wait_state
*bs
;
32 * Tag address space map.
36 unsigned int nr_reserved_tags
;
38 atomic_t active_queues
;
40 struct blk_mq_bitmap_tags bitmap_tags
;
41 struct blk_mq_bitmap_tags breserved_tags
;
44 struct list_head page_list
;
48 extern struct blk_mq_tags
*blk_mq_init_tags(unsigned int nr_tags
, unsigned int reserved_tags
, int node
);
49 extern void blk_mq_free_tags(struct blk_mq_tags
*tags
);
51 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
);
52 extern void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, unsigned int tag
, unsigned int *last_tag
);
53 extern bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
);
54 extern ssize_t
blk_mq_tag_sysfs_show(struct blk_mq_tags
*tags
, char *page
);
55 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags
*tags
, unsigned int *last_tag
);
56 extern int blk_mq_tag_update_depth(struct blk_mq_tags
*tags
, unsigned int depth
);
57 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool);
60 BLK_MQ_TAG_CACHE_MIN
= 1,
61 BLK_MQ_TAG_CACHE_MAX
= 64,
65 BLK_MQ_TAG_FAIL
= -1U,
66 BLK_MQ_TAG_MIN
= BLK_MQ_TAG_CACHE_MIN
,
67 BLK_MQ_TAG_MAX
= BLK_MQ_TAG_FAIL
- 1,
70 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*);
71 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*);
73 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
75 if (!(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
78 return __blk_mq_tag_busy(hctx
);
81 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
83 if (!(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
86 __blk_mq_tag_idle(hctx
);