1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
6 #include <linux/interrupt.h>
13 struct fun_rsp_common
;
15 typedef void (*cq_callback_t
)(struct fun_queue
*funq
, void *data
, void *msg
,
16 const struct fun_cqe_info
*info
);
23 /* A queue group consisting of an SQ, a CQ, and an optional RQ. */
28 dma_addr_t cq_dma_addr
;
29 dma_addr_t sq_dma_addr
;
30 dma_addr_t rq_dma_addr
;
38 struct fun_eprq_rqbuf
*rqes
;
39 struct fun_rq_info
*rq_info
;
63 u8 cq_intcoal_nentries
;
65 u8 sq_intcoal_nentries
;
71 /* SQ head writeback */
74 volatile __be64
*sq_head
;
79 irq_handler_t irq_handler
;
90 static inline void *fun_sqe_at(const struct fun_queue
*funq
, unsigned int pos
)
92 return funq
->sq_cmds
+ (pos
<< funq
->sqe_size_log2
);
95 static inline void funq_sq_post_tail(struct fun_queue
*funq
, u16 tail
)
97 if (++tail
== funq
->sq_depth
)
100 writel(tail
, funq
->sq_db
);
103 static inline struct fun_cqe_info
*funq_cqe_info(const struct fun_queue
*funq
,
106 return cqe
+ funq
->cqe_info_offset
;
109 static inline void funq_rq_post(struct fun_queue
*funq
)
111 writel(funq
->rq_tail
, funq
->rq_db
);
114 struct fun_queue_alloc_req
{
127 u8 cq_intcoal_nentries
;
129 u8 sq_intcoal_nentries
;
132 int fun_sq_create(struct fun_dev
*fdev
, u16 flags
, u32 sqid
, u32 cqid
,
133 u8 sqe_size_log2
, u32 sq_depth
, dma_addr_t dma_addr
,
134 u8 coal_nentries
, u8 coal_usec
, u32 irq_num
,
135 u32 scan_start_id
, u32 scan_end_id
,
136 u32 rq_buf_size_log2
, u32
*sqidp
, u32 __iomem
**dbp
);
137 int fun_cq_create(struct fun_dev
*fdev
, u16 flags
, u32 cqid
, u32 rqid
,
138 u8 cqe_size_log2
, u32 cq_depth
, dma_addr_t dma_addr
,
139 u16 headroom
, u16 tailroom
, u8 coal_nentries
, u8 coal_usec
,
140 u32 irq_num
, u32 scan_start_id
, u32 scan_end_id
,
141 u32
*cqidp
, u32 __iomem
**dbp
);
142 void *fun_alloc_ring_mem(struct device
*dma_dev
, size_t depth
,
143 size_t hw_desc_sz
, size_t sw_desc_size
, bool wb
,
144 int numa_node
, dma_addr_t
*dma_addr
, void **sw_va
,
145 volatile __be64
**wb_va
);
146 void fun_free_ring_mem(struct device
*dma_dev
, size_t depth
, size_t hw_desc_sz
,
147 bool wb
, void *hw_va
, dma_addr_t dma_addr
, void *sw_va
);
149 #define fun_destroy_sq(fdev, sqid) \
150 fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
151 #define fun_destroy_cq(fdev, cqid) \
152 fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
154 struct fun_queue
*fun_alloc_queue(struct fun_dev
*fdev
, int qid
,
155 const struct fun_queue_alloc_req
*req
);
156 void fun_free_queue(struct fun_queue
*funq
);
158 static inline void fun_set_cq_callback(struct fun_queue
*funq
, cq_callback_t cb
,
162 funq
->cb_data
= cb_data
;
165 int fun_create_rq(struct fun_queue
*funq
);
167 void fun_free_irq(struct fun_queue
*funq
);
168 int fun_request_irq(struct fun_queue
*funq
, const char *devname
,
169 irq_handler_t handler
, void *data
);
171 unsigned int __fun_process_cq(struct fun_queue
*funq
, unsigned int max
);
172 unsigned int fun_process_cq(struct fun_queue
*funq
, unsigned int max
);
174 #endif /* _FUN_QEUEUE_H */