1 #ifndef INT_BLK_MQ_TAG_H
2 #define INT_BLK_MQ_TAG_H
11 struct bt_wait_state
{
13 wait_queue_head_t wait
;
14 } ____cacheline_aligned_in_smp
;
16 #define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
17 #define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
19 struct blk_mq_bitmap_tags
{
21 unsigned int wake_cnt
;
22 unsigned int bits_per_word
;
25 struct blk_align_bitmap
*map
;
28 struct bt_wait_state
*bs
;
32 * Tag address space map.
36 unsigned int nr_reserved_tags
;
38 atomic_t active_queues
;
40 struct blk_mq_bitmap_tags bitmap_tags
;
41 struct blk_mq_bitmap_tags breserved_tags
;
44 struct list_head page_list
;
48 extern struct blk_mq_tags
*blk_mq_init_tags(unsigned int nr_tags
, unsigned int reserved_tags
, int node
);
49 extern void blk_mq_free_tags(struct blk_mq_tags
*tags
);
51 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
);
52 extern void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, unsigned int tag
, unsigned int *last_tag
);
53 extern bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
);
54 extern ssize_t
blk_mq_tag_sysfs_show(struct blk_mq_tags
*tags
, char *page
);
55 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags
*tags
, unsigned int *last_tag
);
56 extern int blk_mq_tag_update_depth(struct blk_mq_tags
*tags
, unsigned int depth
);
59 BLK_MQ_TAG_CACHE_MIN
= 1,
60 BLK_MQ_TAG_CACHE_MAX
= 64,
64 BLK_MQ_TAG_FAIL
= -1U,
65 BLK_MQ_TAG_MIN
= BLK_MQ_TAG_CACHE_MIN
,
66 BLK_MQ_TAG_MAX
= BLK_MQ_TAG_FAIL
- 1,
69 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*);
70 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*);
72 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
74 if (!(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
77 return __blk_mq_tag_busy(hctx
);
80 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
82 if (!(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
85 __blk_mq_tag_idle(hctx
);