1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/types.h>
37 #include <asm/byteorder.h>
38 #include <linux/kernel.h>
39 #include <linux/list.h>
40 #include <linux/slab.h>
41 #include <linux/qed/common_hsi.h>
44 /* Each Page contains a next pointer at its end */
45 QED_CHAIN_MODE_NEXT_PTR
,
47 /* Chain is a single page (next ptr) is unrequired */
48 QED_CHAIN_MODE_SINGLE
,
50 /* Page pointers are located in a side list */
54 enum qed_chain_use_mode
{
55 QED_CHAIN_USE_TO_PRODUCE
, /* Chain starts empty */
56 QED_CHAIN_USE_TO_CONSUME
, /* Chain starts full */
57 QED_CHAIN_USE_TO_CONSUME_PRODUCE
, /* Chain starts empty */
60 enum qed_chain_cnt_type
{
61 /* The chain's size/prod/cons are kept in 16-bit variables */
62 QED_CHAIN_CNT_TYPE_U16
,
64 /* The chain's size/prod/cons are kept in 32-bit variables */
65 QED_CHAIN_CNT_TYPE_U32
,
68 struct qed_chain_next
{
69 struct regpair next_phys
;
73 struct qed_chain_pbl_u16
{
78 struct qed_chain_pbl_u32
{
83 struct qed_chain_ext_pbl
{
84 dma_addr_t p_pbl_phys
;
88 struct qed_chain_u16
{
89 /* Cyclic index of next element to produce/consme */
94 struct qed_chain_u32
{
95 /* Cyclic index of next element to produce/consme */
101 /* fastpath portion of the chain - required for commands such
102 * as produce / consume.
104 /* Point to next element to produce/consume */
108 /* Fastpath portions of the PBL [if exists] */
110 /* Table for keeping the virtual addresses of the chain pages,
111 * respectively to the physical addresses in the pbl table.
113 void **pp_virt_addr_tbl
;
116 struct qed_chain_pbl_u16 u16
;
117 struct qed_chain_pbl_u32 u32
;
122 struct qed_chain_u16 chain16
;
123 struct qed_chain_u32 chain32
;
126 /* Capacity counts only usable elements */
130 enum qed_chain_mode mode
;
132 /* Elements information for fast calculations */
134 u16 elem_per_page_mask
;
142 /* Slowpath of the chain - required for initialization and destruction,
143 * but isn't involved in regular functionality.
146 /* Base address of a pre-allocated buffer for pbl */
148 dma_addr_t p_phys_table
;
152 /* Address of first page of the chain - the address is required
153 * for fastpath operation [consume/produce] but only for the the SINGLE
154 * flavour which isn't considered fastpath [== SPQ].
157 dma_addr_t p_phys_addr
;
159 /* Total number of elements [for entire chain] */
167 #define QED_CHAIN_PBL_ENTRY_SIZE (8)
168 #define QED_CHAIN_PAGE_SIZE (0x1000)
169 #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
171 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
172 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
173 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
176 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
177 ((u32)(ELEMS_PER_PAGE(elem_size) - \
178 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
180 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
181 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
183 #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
184 #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
187 static inline u16
qed_chain_get_prod_idx(struct qed_chain
*p_chain
)
189 return p_chain
->u
.chain16
.prod_idx
;
192 static inline u16
qed_chain_get_cons_idx(struct qed_chain
*p_chain
)
194 return p_chain
->u
.chain16
.cons_idx
;
197 static inline u32
qed_chain_get_cons_idx_u32(struct qed_chain
*p_chain
)
199 return p_chain
->u
.chain32
.cons_idx
;
202 static inline u16
qed_chain_get_elem_left(struct qed_chain
*p_chain
)
206 used
= (u16
) (((u32
)0x10000 +
207 (u32
)p_chain
->u
.chain16
.prod_idx
) -
208 (u32
)p_chain
->u
.chain16
.cons_idx
);
209 if (p_chain
->mode
== QED_CHAIN_MODE_NEXT_PTR
)
210 used
-= p_chain
->u
.chain16
.prod_idx
/ p_chain
->elem_per_page
-
211 p_chain
->u
.chain16
.cons_idx
/ p_chain
->elem_per_page
;
213 return (u16
)(p_chain
->capacity
- used
);
216 static inline u32
qed_chain_get_elem_left_u32(struct qed_chain
*p_chain
)
220 used
= (u32
) (((u64
)0x100000000ULL
+
221 (u64
)p_chain
->u
.chain32
.prod_idx
) -
222 (u64
)p_chain
->u
.chain32
.cons_idx
);
223 if (p_chain
->mode
== QED_CHAIN_MODE_NEXT_PTR
)
224 used
-= p_chain
->u
.chain32
.prod_idx
/ p_chain
->elem_per_page
-
225 p_chain
->u
.chain32
.cons_idx
/ p_chain
->elem_per_page
;
227 return p_chain
->capacity
- used
;
230 static inline u16
qed_chain_get_usable_per_page(struct qed_chain
*p_chain
)
232 return p_chain
->usable_per_page
;
235 static inline u8
qed_chain_get_unusable_per_page(struct qed_chain
*p_chain
)
237 return p_chain
->elem_unusable
;
240 static inline u32
qed_chain_get_page_cnt(struct qed_chain
*p_chain
)
242 return p_chain
->page_cnt
;
245 static inline dma_addr_t
qed_chain_get_pbl_phys(struct qed_chain
*p_chain
)
247 return p_chain
->pbl_sp
.p_phys_table
;
251 * @brief qed_chain_advance_page -
253 * Advance the next element accros pages for a linked chain
261 qed_chain_advance_page(struct qed_chain
*p_chain
,
262 void **p_next_elem
, void *idx_to_inc
, void *page_to_inc
)
264 struct qed_chain_next
*p_next
= NULL
;
267 switch (p_chain
->mode
) {
268 case QED_CHAIN_MODE_NEXT_PTR
:
269 p_next
= *p_next_elem
;
270 *p_next_elem
= p_next
->next_virt
;
271 if (is_chain_u16(p_chain
))
272 *(u16
*)idx_to_inc
+= p_chain
->elem_unusable
;
274 *(u32
*)idx_to_inc
+= p_chain
->elem_unusable
;
276 case QED_CHAIN_MODE_SINGLE
:
277 *p_next_elem
= p_chain
->p_virt_addr
;
280 case QED_CHAIN_MODE_PBL
:
281 if (is_chain_u16(p_chain
)) {
282 if (++(*(u16
*)page_to_inc
) == p_chain
->page_cnt
)
283 *(u16
*)page_to_inc
= 0;
284 page_index
= *(u16
*)page_to_inc
;
286 if (++(*(u32
*)page_to_inc
) == p_chain
->page_cnt
)
287 *(u32
*)page_to_inc
= 0;
288 page_index
= *(u32
*)page_to_inc
;
290 *p_next_elem
= p_chain
->pbl
.pp_virt_addr_tbl
[page_index
];
294 #define is_unusable_idx(p, idx) \
295 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
297 #define is_unusable_idx_u32(p, idx) \
298 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
299 #define is_unusable_next_idx(p, idx) \
300 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
301 (p)->usable_per_page)
303 #define is_unusable_next_idx_u32(p, idx) \
304 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
305 (p)->usable_per_page)
307 #define test_and_skip(p, idx) \
309 if (is_chain_u16(p)) { \
310 if (is_unusable_idx(p, idx)) \
311 (p)->u.chain16.idx += (p)->elem_unusable; \
313 if (is_unusable_idx_u32(p, idx)) \
314 (p)->u.chain32.idx += (p)->elem_unusable; \
319 * @brief qed_chain_return_produced -
321 * A chain in which the driver "Produces" elements should use this API
322 * to indicate previous produced elements are now consumed.
326 static inline void qed_chain_return_produced(struct qed_chain
*p_chain
)
328 if (is_chain_u16(p_chain
))
329 p_chain
->u
.chain16
.cons_idx
++;
331 p_chain
->u
.chain32
.cons_idx
++;
332 test_and_skip(p_chain
, cons_idx
);
336 * @brief qed_chain_produce -
338 * A chain in which the driver "Produces" elements should use this to get
339 * a pointer to the next element which can be "Produced". It's driver
340 * responsibility to validate that the chain has room for new element.
344 * @return void*, a pointer to next element
346 static inline void *qed_chain_produce(struct qed_chain
*p_chain
)
348 void *p_ret
= NULL
, *p_prod_idx
, *p_prod_page_idx
;
350 if (is_chain_u16(p_chain
)) {
351 if ((p_chain
->u
.chain16
.prod_idx
&
352 p_chain
->elem_per_page_mask
) == p_chain
->next_page_mask
) {
353 p_prod_idx
= &p_chain
->u
.chain16
.prod_idx
;
354 p_prod_page_idx
= &p_chain
->pbl
.c
.u16
.prod_page_idx
;
355 qed_chain_advance_page(p_chain
, &p_chain
->p_prod_elem
,
356 p_prod_idx
, p_prod_page_idx
);
358 p_chain
->u
.chain16
.prod_idx
++;
360 if ((p_chain
->u
.chain32
.prod_idx
&
361 p_chain
->elem_per_page_mask
) == p_chain
->next_page_mask
) {
362 p_prod_idx
= &p_chain
->u
.chain32
.prod_idx
;
363 p_prod_page_idx
= &p_chain
->pbl
.c
.u32
.prod_page_idx
;
364 qed_chain_advance_page(p_chain
, &p_chain
->p_prod_elem
,
365 p_prod_idx
, p_prod_page_idx
);
367 p_chain
->u
.chain32
.prod_idx
++;
370 p_ret
= p_chain
->p_prod_elem
;
371 p_chain
->p_prod_elem
= (void *)(((u8
*)p_chain
->p_prod_elem
) +
378 * @brief qed_chain_get_capacity -
380 * Get the maximum number of BDs in chain
385 * @return number of unusable BDs
387 static inline u32
qed_chain_get_capacity(struct qed_chain
*p_chain
)
389 return p_chain
->capacity
;
393 * @brief qed_chain_recycle_consumed -
395 * Returns an element which was previously consumed;
396 * Increments producers so they could be written to FW.
400 static inline void qed_chain_recycle_consumed(struct qed_chain
*p_chain
)
402 test_and_skip(p_chain
, prod_idx
);
403 if (is_chain_u16(p_chain
))
404 p_chain
->u
.chain16
.prod_idx
++;
406 p_chain
->u
.chain32
.prod_idx
++;
410 * @brief qed_chain_consume -
412 * A Chain in which the driver utilizes data written by a different source
413 * (i.e., FW) should use this to access passed buffers.
417 * @return void*, a pointer to the next buffer written
419 static inline void *qed_chain_consume(struct qed_chain
*p_chain
)
421 void *p_ret
= NULL
, *p_cons_idx
, *p_cons_page_idx
;
423 if (is_chain_u16(p_chain
)) {
424 if ((p_chain
->u
.chain16
.cons_idx
&
425 p_chain
->elem_per_page_mask
) == p_chain
->next_page_mask
) {
426 p_cons_idx
= &p_chain
->u
.chain16
.cons_idx
;
427 p_cons_page_idx
= &p_chain
->pbl
.c
.u16
.cons_page_idx
;
428 qed_chain_advance_page(p_chain
, &p_chain
->p_cons_elem
,
429 p_cons_idx
, p_cons_page_idx
);
431 p_chain
->u
.chain16
.cons_idx
++;
433 if ((p_chain
->u
.chain32
.cons_idx
&
434 p_chain
->elem_per_page_mask
) == p_chain
->next_page_mask
) {
435 p_cons_idx
= &p_chain
->u
.chain32
.cons_idx
;
436 p_cons_page_idx
= &p_chain
->pbl
.c
.u32
.cons_page_idx
;
437 qed_chain_advance_page(p_chain
, &p_chain
->p_cons_elem
,
438 p_cons_idx
, p_cons_page_idx
);
440 p_chain
->u
.chain32
.cons_idx
++;
443 p_ret
= p_chain
->p_cons_elem
;
444 p_chain
->p_cons_elem
= (void *)(((u8
*)p_chain
->p_cons_elem
) +
451 * @brief qed_chain_reset - Resets the chain to its start state
453 * @param p_chain pointer to a previously allocted chain
455 static inline void qed_chain_reset(struct qed_chain
*p_chain
)
459 if (is_chain_u16(p_chain
)) {
460 p_chain
->u
.chain16
.prod_idx
= 0;
461 p_chain
->u
.chain16
.cons_idx
= 0;
463 p_chain
->u
.chain32
.prod_idx
= 0;
464 p_chain
->u
.chain32
.cons_idx
= 0;
466 p_chain
->p_cons_elem
= p_chain
->p_virt_addr
;
467 p_chain
->p_prod_elem
= p_chain
->p_virt_addr
;
469 if (p_chain
->mode
== QED_CHAIN_MODE_PBL
) {
470 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
471 * indices, to avoid unnecessary page advancing on the first
472 * call to qed_chain_produce/consume. Instead, the indices
473 * will be advanced to page_cnt and then will be wrapped to 0.
475 u32 reset_val
= p_chain
->page_cnt
- 1;
477 if (is_chain_u16(p_chain
)) {
478 p_chain
->pbl
.c
.u16
.prod_page_idx
= (u16
)reset_val
;
479 p_chain
->pbl
.c
.u16
.cons_page_idx
= (u16
)reset_val
;
481 p_chain
->pbl
.c
.u32
.prod_page_idx
= reset_val
;
482 p_chain
->pbl
.c
.u32
.cons_page_idx
= reset_val
;
486 switch (p_chain
->intended_use
) {
487 case QED_CHAIN_USE_TO_CONSUME
:
488 /* produce empty elements */
489 for (i
= 0; i
< p_chain
->capacity
; i
++)
490 qed_chain_recycle_consumed(p_chain
);
493 case QED_CHAIN_USE_TO_CONSUME_PRODUCE
:
494 case QED_CHAIN_USE_TO_PRODUCE
:
502 * @brief qed_chain_init - Initalizes a basic chain struct
506 * @param p_phys_addr physical address of allocated buffer's beginning
507 * @param page_cnt number of pages in the allocated buffer
508 * @param elem_size size of each element in the chain
509 * @param intended_use
512 static inline void qed_chain_init_params(struct qed_chain
*p_chain
,
515 enum qed_chain_use_mode intended_use
,
516 enum qed_chain_mode mode
,
517 enum qed_chain_cnt_type cnt_type
)
519 /* chain fixed parameters */
520 p_chain
->p_virt_addr
= NULL
;
521 p_chain
->p_phys_addr
= 0;
522 p_chain
->elem_size
= elem_size
;
523 p_chain
->intended_use
= (u8
)intended_use
;
524 p_chain
->mode
= mode
;
525 p_chain
->cnt_type
= (u8
)cnt_type
;
527 p_chain
->elem_per_page
= ELEMS_PER_PAGE(elem_size
);
528 p_chain
->usable_per_page
= USABLE_ELEMS_PER_PAGE(elem_size
, mode
);
529 p_chain
->elem_per_page_mask
= p_chain
->elem_per_page
- 1;
530 p_chain
->elem_unusable
= UNUSABLE_ELEMS_PER_PAGE(elem_size
, mode
);
531 p_chain
->next_page_mask
= (p_chain
->usable_per_page
&
532 p_chain
->elem_per_page_mask
);
534 p_chain
->page_cnt
= page_cnt
;
535 p_chain
->capacity
= p_chain
->usable_per_page
* page_cnt
;
536 p_chain
->size
= p_chain
->elem_per_page
* page_cnt
;
538 p_chain
->pbl_sp
.p_phys_table
= 0;
539 p_chain
->pbl_sp
.p_virt_table
= NULL
;
540 p_chain
->pbl
.pp_virt_addr_tbl
= NULL
;
544 * @brief qed_chain_init_mem -
546 * Initalizes a basic chain struct with its chain buffers
549 * @param p_virt_addr virtual address of allocated buffer's beginning
550 * @param p_phys_addr physical address of allocated buffer's beginning
553 static inline void qed_chain_init_mem(struct qed_chain
*p_chain
,
554 void *p_virt_addr
, dma_addr_t p_phys_addr
)
556 p_chain
->p_virt_addr
= p_virt_addr
;
557 p_chain
->p_phys_addr
= p_phys_addr
;
561 * @brief qed_chain_init_pbl_mem -
563 * Initalizes a basic chain struct with its pbl buffers
566 * @param p_virt_pbl pointer to a pre allocated side table which will hold
567 * virtual page addresses.
568 * @param p_phys_pbl pointer to a pre-allocated side table which will hold
569 * physical page addresses.
570 * @param pp_virt_addr_tbl
571 * pointer to a pre-allocated side table which will hold
572 * the virtual addresses of the chain pages.
575 static inline void qed_chain_init_pbl_mem(struct qed_chain
*p_chain
,
577 dma_addr_t p_phys_pbl
,
578 void **pp_virt_addr_tbl
)
580 p_chain
->pbl_sp
.p_phys_table
= p_phys_pbl
;
581 p_chain
->pbl_sp
.p_virt_table
= p_virt_pbl
;
582 p_chain
->pbl
.pp_virt_addr_tbl
= pp_virt_addr_tbl
;
586 * @brief qed_chain_init_next_ptr_elem -
588 * Initalizes a next pointer element
591 * @param p_virt_curr virtual address of a chain page of which the next
592 * pointer element is initialized
593 * @param p_virt_next virtual address of the next chain page
594 * @param p_phys_next physical address of the next chain page
598 qed_chain_init_next_ptr_elem(struct qed_chain
*p_chain
,
600 void *p_virt_next
, dma_addr_t p_phys_next
)
602 struct qed_chain_next
*p_next
;
605 size
= p_chain
->elem_size
* p_chain
->usable_per_page
;
606 p_next
= (struct qed_chain_next
*)((u8
*)p_virt_curr
+ size
);
608 DMA_REGPAIR_LE(p_next
->next_phys
, p_phys_next
);
610 p_next
->next_virt
= p_virt_next
;
614 * @brief qed_chain_get_last_elem -
616 * Returns a pointer to the last element of the chain
622 static inline void *qed_chain_get_last_elem(struct qed_chain
*p_chain
)
624 struct qed_chain_next
*p_next
= NULL
;
625 void *p_virt_addr
= NULL
;
626 u32 size
, last_page_idx
;
628 if (!p_chain
->p_virt_addr
)
631 switch (p_chain
->mode
) {
632 case QED_CHAIN_MODE_NEXT_PTR
:
633 size
= p_chain
->elem_size
* p_chain
->usable_per_page
;
634 p_virt_addr
= p_chain
->p_virt_addr
;
635 p_next
= (struct qed_chain_next
*)((u8
*)p_virt_addr
+ size
);
636 while (p_next
->next_virt
!= p_chain
->p_virt_addr
) {
637 p_virt_addr
= p_next
->next_virt
;
638 p_next
= (struct qed_chain_next
*)((u8
*)p_virt_addr
+
642 case QED_CHAIN_MODE_SINGLE
:
643 p_virt_addr
= p_chain
->p_virt_addr
;
645 case QED_CHAIN_MODE_PBL
:
646 last_page_idx
= p_chain
->page_cnt
- 1;
647 p_virt_addr
= p_chain
->pbl
.pp_virt_addr_tbl
[last_page_idx
];
650 /* p_virt_addr points at this stage to the last page of the chain */
651 size
= p_chain
->elem_size
* (p_chain
->usable_per_page
- 1);
652 p_virt_addr
= (u8
*)p_virt_addr
+ size
;
658 * @brief qed_chain_set_prod - sets the prod to the given value
663 static inline void qed_chain_set_prod(struct qed_chain
*p_chain
,
664 u32 prod_idx
, void *p_prod_elem
)
666 if (is_chain_u16(p_chain
))
667 p_chain
->u
.chain16
.prod_idx
= (u16
) prod_idx
;
669 p_chain
->u
.chain32
.prod_idx
= prod_idx
;
670 p_chain
->p_prod_elem
= p_prod_elem
;
674 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
678 static inline void qed_chain_pbl_zero_mem(struct qed_chain
*p_chain
)
682 if (p_chain
->mode
!= QED_CHAIN_MODE_PBL
)
685 page_cnt
= qed_chain_get_page_cnt(p_chain
);
687 for (i
= 0; i
< page_cnt
; i
++)
688 memset(p_chain
->pbl
.pp_virt_addr_tbl
[i
], 0,
689 QED_CHAIN_PAGE_SIZE
);