1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
12 #include <linux/types.h>
13 #include <asm/byteorder.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 #include <linux/qed/common_hsi.h>
19 /* dma_addr_t manip */
20 #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
21 #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
22 #define DMA_REGPAIR_LE(x, val) do { \
23 (x).hi = DMA_HI_LE((val)); \
24 (x).lo = DMA_LO_LE((val)); \
27 #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
28 #define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
29 #define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
30 #define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
31 #define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
34 /* Each Page contains a next pointer at its end */
35 QED_CHAIN_MODE_NEXT_PTR
,
37 /* Chain is a single page (next ptr) is unrequired */
38 QED_CHAIN_MODE_SINGLE
,
40 /* Page pointers are located in a side list */
44 enum qed_chain_use_mode
{
45 QED_CHAIN_USE_TO_PRODUCE
, /* Chain starts empty */
46 QED_CHAIN_USE_TO_CONSUME
, /* Chain starts full */
47 QED_CHAIN_USE_TO_CONSUME_PRODUCE
, /* Chain starts empty */
50 struct qed_chain_next
{
51 struct regpair next_phys
;
55 struct qed_chain_pbl
{
56 dma_addr_t p_phys_table
;
64 dma_addr_t p_phys_addr
;
68 enum qed_chain_mode mode
;
69 enum qed_chain_use_mode intended_use
; /* used to produce/consume */
70 u16 capacity
; /*< number of _usable_ elements */
71 u16 size
; /* number of elements */
75 u16 elem_per_page_mask
;
80 struct qed_chain_pbl pbl
;
83 #define QED_CHAIN_PBL_ENTRY_SIZE (8)
84 #define QED_CHAIN_PAGE_SIZE (0x1000)
85 #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
87 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
88 ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
89 (1 + ((sizeof(struct qed_chain_next) - 1) / \
92 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
93 ((u32)(ELEMS_PER_PAGE(elem_size) - \
94 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
96 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
97 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
100 static inline u16
qed_chain_get_prod_idx(struct qed_chain
*p_chain
)
102 return p_chain
->prod_idx
;
105 static inline u16
qed_chain_get_cons_idx(struct qed_chain
*p_chain
)
107 return p_chain
->cons_idx
;
110 static inline u16
qed_chain_get_elem_left(struct qed_chain
*p_chain
)
114 /* we don't need to trancate upon assignmet, as we assign u32->u16 */
115 used
= ((u32
)0x10000u
+ (u32
)(p_chain
->prod_idx
)) -
116 (u32
)p_chain
->cons_idx
;
117 if (p_chain
->mode
== QED_CHAIN_MODE_NEXT_PTR
)
118 used
-= p_chain
->prod_idx
/ p_chain
->elem_per_page
-
119 p_chain
->cons_idx
/ p_chain
->elem_per_page
;
121 return p_chain
->capacity
- used
;
124 static inline u8
qed_chain_is_full(struct qed_chain
*p_chain
)
126 return qed_chain_get_elem_left(p_chain
) == p_chain
->capacity
;
129 static inline u8
qed_chain_is_empty(struct qed_chain
*p_chain
)
131 return qed_chain_get_elem_left(p_chain
) == 0;
134 static inline u16
qed_chain_get_elem_per_page(
135 struct qed_chain
*p_chain
)
137 return p_chain
->elem_per_page
;
140 static inline u16
qed_chain_get_usable_per_page(
141 struct qed_chain
*p_chain
)
143 return p_chain
->usable_per_page
;
146 static inline u16
qed_chain_get_unusable_per_page(
147 struct qed_chain
*p_chain
)
149 return p_chain
->elem_unusable
;
152 static inline u16
qed_chain_get_size(struct qed_chain
*p_chain
)
154 return p_chain
->size
;
157 static inline dma_addr_t
158 qed_chain_get_pbl_phys(struct qed_chain
*p_chain
)
160 return p_chain
->pbl
.p_phys_table
;
164 * @brief qed_chain_advance_page -
166 * Advance the next element accros pages for a linked chain
174 qed_chain_advance_page(struct qed_chain
*p_chain
,
180 switch (p_chain
->mode
) {
181 case QED_CHAIN_MODE_NEXT_PTR
:
183 struct qed_chain_next
*p_next
= *p_next_elem
;
184 *p_next_elem
= p_next
->next_virt
;
185 *idx_to_inc
+= p_chain
->elem_unusable
;
188 case QED_CHAIN_MODE_SINGLE
:
189 *p_next_elem
= p_chain
->p_virt_addr
;
192 case QED_CHAIN_MODE_PBL
:
193 /* It is assumed pages are sequential, next element needs
194 * to change only when passing going back to first from last.
196 if (++(*page_to_inc
) == p_chain
->page_cnt
) {
198 *p_next_elem
= p_chain
->p_virt_addr
;
203 #define is_unusable_idx(p, idx) \
204 (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
206 #define is_unusable_next_idx(p, idx) \
207 ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
209 #define test_ans_skip(p, idx) \
211 if (is_unusable_idx(p, idx)) { \
212 (p)->idx += (p)->elem_unusable; \
217 * @brief qed_chain_return_multi_produced -
219 * A chain in which the driver "Produces" elements should use this API
220 * to indicate previous produced elements are now consumed.
226 qed_chain_return_multi_produced(struct qed_chain
*p_chain
,
229 p_chain
->cons_idx
+= num
;
230 test_ans_skip(p_chain
, cons_idx
);
234 * @brief qed_chain_return_produced -
236 * A chain in which the driver "Produces" elements should use this API
237 * to indicate previous produced elements are now consumed.
241 static inline void qed_chain_return_produced(struct qed_chain
*p_chain
)
244 test_ans_skip(p_chain
, cons_idx
);
248 * @brief qed_chain_produce -
250 * A chain in which the driver "Produces" elements should use this to get
251 * a pointer to the next element which can be "Produced". It's driver
252 * responsibility to validate that the chain has room for new element.
256 * @return void*, a pointer to next element
258 static inline void *qed_chain_produce(struct qed_chain
*p_chain
)
262 if ((p_chain
->prod_idx
& p_chain
->elem_per_page_mask
) ==
263 p_chain
->next_page_mask
) {
264 qed_chain_advance_page(p_chain
, &p_chain
->p_prod_elem
,
266 &p_chain
->pbl
.prod_page_idx
);
269 ret
= p_chain
->p_prod_elem
;
271 p_chain
->p_prod_elem
= (void *)(((u8
*)p_chain
->p_prod_elem
) +
278 * @brief qed_chain_get_capacity -
280 * Get the maximum number of BDs in chain
285 * @return u16, number of unusable BDs
287 static inline u16
qed_chain_get_capacity(struct qed_chain
*p_chain
)
289 return p_chain
->capacity
;
293 * @brief qed_chain_recycle_consumed -
295 * Returns an element which was previously consumed;
296 * Increments producers so they could be written to FW.
301 qed_chain_recycle_consumed(struct qed_chain
*p_chain
)
303 test_ans_skip(p_chain
, prod_idx
);
308 * @brief qed_chain_consume -
310 * A Chain in which the driver utilizes data written by a different source
311 * (i.e., FW) should use this to access passed buffers.
315 * @return void*, a pointer to the next buffer written
317 static inline void *qed_chain_consume(struct qed_chain
*p_chain
)
321 if ((p_chain
->cons_idx
& p_chain
->elem_per_page_mask
) ==
322 p_chain
->next_page_mask
) {
323 qed_chain_advance_page(p_chain
, &p_chain
->p_cons_elem
,
325 &p_chain
->pbl
.cons_page_idx
);
328 ret
= p_chain
->p_cons_elem
;
330 p_chain
->p_cons_elem
= (void *)(((u8
*)p_chain
->p_cons_elem
) +
337 * @brief qed_chain_reset - Resets the chain to its start state
339 * @param p_chain pointer to a previously allocted chain
341 static inline void qed_chain_reset(struct qed_chain
*p_chain
)
345 p_chain
->prod_idx
= 0;
346 p_chain
->cons_idx
= 0;
347 p_chain
->p_cons_elem
= p_chain
->p_virt_addr
;
348 p_chain
->p_prod_elem
= p_chain
->p_virt_addr
;
350 if (p_chain
->mode
== QED_CHAIN_MODE_PBL
) {
351 p_chain
->pbl
.prod_page_idx
= p_chain
->page_cnt
- 1;
352 p_chain
->pbl
.cons_page_idx
= p_chain
->page_cnt
- 1;
355 switch (p_chain
->intended_use
) {
356 case QED_CHAIN_USE_TO_CONSUME_PRODUCE
:
357 case QED_CHAIN_USE_TO_PRODUCE
:
361 case QED_CHAIN_USE_TO_CONSUME
:
362 /* produce empty elements */
363 for (i
= 0; i
< p_chain
->capacity
; i
++)
364 qed_chain_recycle_consumed(p_chain
);
370 * @brief qed_chain_init - Initalizes a basic chain struct
374 * @param p_phys_addr physical address of allocated buffer's beginning
375 * @param page_cnt number of pages in the allocated buffer
376 * @param elem_size size of each element in the chain
377 * @param intended_use
380 static inline void qed_chain_init(struct qed_chain
*p_chain
,
382 dma_addr_t p_phys_addr
,
385 enum qed_chain_use_mode intended_use
,
386 enum qed_chain_mode mode
)
388 /* chain fixed parameters */
389 p_chain
->p_virt_addr
= p_virt_addr
;
390 p_chain
->p_phys_addr
= p_phys_addr
;
391 p_chain
->elem_size
= elem_size
;
392 p_chain
->page_cnt
= page_cnt
;
393 p_chain
->mode
= mode
;
395 p_chain
->intended_use
= intended_use
;
396 p_chain
->elem_per_page
= ELEMS_PER_PAGE(elem_size
);
397 p_chain
->usable_per_page
=
398 USABLE_ELEMS_PER_PAGE(elem_size
, mode
);
399 p_chain
->capacity
= p_chain
->usable_per_page
* page_cnt
;
400 p_chain
->size
= p_chain
->elem_per_page
* page_cnt
;
401 p_chain
->elem_per_page_mask
= p_chain
->elem_per_page
- 1;
403 p_chain
->elem_unusable
= UNUSABLE_ELEMS_PER_PAGE(elem_size
, mode
);
405 p_chain
->next_page_mask
= (p_chain
->usable_per_page
&
406 p_chain
->elem_per_page_mask
);
408 if (mode
== QED_CHAIN_MODE_NEXT_PTR
) {
409 struct qed_chain_next
*p_next
;
412 for (i
= 0; i
< page_cnt
- 1; i
++) {
413 /* Increment mem_phy to the next page. */
414 p_phys_addr
+= QED_CHAIN_PAGE_SIZE
;
416 /* Initialize the physical address of the next page. */
417 p_next
= (struct qed_chain_next
*)((u8
*)p_virt_addr
+
422 p_next
->next_phys
.lo
= DMA_LO_LE(p_phys_addr
);
423 p_next
->next_phys
.hi
= DMA_HI_LE(p_phys_addr
);
425 /* Initialize the virtual address of the next page. */
426 p_next
->next_virt
= (void *)((u8
*)p_virt_addr
+
427 QED_CHAIN_PAGE_SIZE
);
429 /* Move to the next page. */
430 p_virt_addr
= p_next
->next_virt
;
433 /* Last page's next should point to beginning of the chain */
434 p_next
= (struct qed_chain_next
*)((u8
*)p_virt_addr
+
436 p_chain
->usable_per_page
);
438 p_next
->next_phys
.lo
= DMA_LO_LE(p_chain
->p_phys_addr
);
439 p_next
->next_phys
.hi
= DMA_HI_LE(p_chain
->p_phys_addr
);
440 p_next
->next_virt
= p_chain
->p_virt_addr
;
442 qed_chain_reset(p_chain
);
446 * @brief qed_chain_pbl_init - Initalizes a basic pbl chain
449 * @param p_virt_addr virtual address of allocated buffer's beginning
450 * @param p_phys_addr physical address of allocated buffer's beginning
451 * @param page_cnt number of pages in the allocated buffer
452 * @param elem_size size of each element in the chain
454 * @param p_phys_pbl pointer to a pre-allocated side table
455 * which will hold physical page addresses.
456 * @param p_virt_pbl pointer to a pre allocated side table
457 * which will hold virtual page addresses.
460 qed_chain_pbl_init(struct qed_chain
*p_chain
,
462 dma_addr_t p_phys_addr
,
465 enum qed_chain_use_mode use_mode
,
466 dma_addr_t p_phys_pbl
,
467 dma_addr_t
*p_virt_pbl
)
469 dma_addr_t
*p_pbl_dma
= p_virt_pbl
;
472 qed_chain_init(p_chain
, p_virt_addr
, p_phys_addr
, page_cnt
,
473 elem_size
, use_mode
, QED_CHAIN_MODE_PBL
);
475 p_chain
->pbl
.p_phys_table
= p_phys_pbl
;
476 p_chain
->pbl
.p_virt_table
= p_virt_pbl
;
478 /* Fill the PBL with physical addresses*/
479 for (i
= 0; i
< page_cnt
; i
++) {
480 *p_pbl_dma
= p_phys_addr
;
481 p_phys_addr
+= QED_CHAIN_PAGE_SIZE
;
487 * @brief qed_chain_set_prod - sets the prod to the given
493 static inline void qed_chain_set_prod(struct qed_chain
*p_chain
,
497 p_chain
->prod_idx
= prod_idx
;
498 p_chain
->p_prod_elem
= p_prod_elem
;
502 * @brief qed_chain_get_elem -
504 * get a pointer to an element represented by absolute idx
507 * @assumption p_chain->size is a power of 2
509 * @return void*, a pointer to next element
511 static inline void *qed_chain_sge_get_elem(struct qed_chain
*p_chain
,
516 if (idx
>= p_chain
->size
)
519 ret
= (u8
*)p_chain
->p_virt_addr
+ p_chain
->elem_size
* idx
;
525 * @brief qed_chain_sge_inc_cons_prod
527 * for sge chains, producer isn't increased serially, the ring
528 * is expected to be full at all times. Once elements are
529 * consumed, they are immediately produced.
534 * @return inline void
537 qed_chain_sge_inc_cons_prod(struct qed_chain
*p_chain
,
540 p_chain
->prod_idx
+= cnt
;
541 p_chain
->cons_idx
+= cnt
;