2 * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
46 #include "qib_user_sdma.h"
48 /* minimum size of header */
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* attempt to drain the queue for 5secs */
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 500
55 struct qib_user_sdma_pkt
{
56 u8 naddr
; /* dimension of addr (1..3) ... */
57 u32 counter
; /* sdma pkts queued counter for this entry */
58 u64 added
; /* global descq number of entries */
61 u32 offset
; /* offset for kvaddr, addr */
62 u32 length
; /* length in page */
63 u8 put_page
; /* should we put_page? */
64 u8 dma_mapped
; /* is page dma_mapped? */
65 struct page
*page
; /* may be NULL (coherent mem) */
66 void *kvaddr
; /* FIXME: only for pio hack */
68 } addr
[4]; /* max pages, any more and we coalesce */
69 struct list_head list
; /* list element */
72 struct qib_user_sdma_queue
{
74 * pkts sent to dma engine are queued on this
75 * list head. the type of the elements of this
76 * list are struct qib_user_sdma_pkt...
78 struct list_head sent
;
80 /* headers with expected length are allocated from here... */
81 char header_cache_name
[64];
82 struct dma_pool
*header_cache
;
84 /* packets are allocated from the slab cache... */
85 char pkt_slab_name
[64];
86 struct kmem_cache
*pkt_slab
;
88 /* as packets go on the queued queue, they are counted... */
93 struct rb_root dma_pages_root
;
95 /* protect everything above... */
99 struct qib_user_sdma_queue
*
100 qib_user_sdma_queue_create(struct device
*dev
, int unit
, int ctxt
, int sctxt
)
102 struct qib_user_sdma_queue
*pq
=
103 kmalloc(sizeof(struct qib_user_sdma_queue
), GFP_KERNEL
);
109 pq
->sent_counter
= 0;
110 INIT_LIST_HEAD(&pq
->sent
);
112 mutex_init(&pq
->lock
);
114 snprintf(pq
->pkt_slab_name
, sizeof(pq
->pkt_slab_name
),
115 "qib-user-sdma-pkts-%u-%02u.%02u", unit
, ctxt
, sctxt
);
116 pq
->pkt_slab
= kmem_cache_create(pq
->pkt_slab_name
,
117 sizeof(struct qib_user_sdma_pkt
),
123 snprintf(pq
->header_cache_name
, sizeof(pq
->header_cache_name
),
124 "qib-user-sdma-headers-%u-%02u.%02u", unit
, ctxt
, sctxt
);
125 pq
->header_cache
= dma_pool_create(pq
->header_cache_name
,
127 QIB_USER_SDMA_EXP_HEADER_LENGTH
,
129 if (!pq
->header_cache
)
132 pq
->dma_pages_root
= RB_ROOT
;
137 kmem_cache_destroy(pq
->pkt_slab
);
146 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt
*pkt
,
147 int i
, size_t offset
, size_t len
,
148 int put_page
, int dma_mapped
,
150 void *kvaddr
, dma_addr_t dma_addr
)
152 pkt
->addr
[i
].offset
= offset
;
153 pkt
->addr
[i
].length
= len
;
154 pkt
->addr
[i
].put_page
= put_page
;
155 pkt
->addr
[i
].dma_mapped
= dma_mapped
;
156 pkt
->addr
[i
].page
= page
;
157 pkt
->addr
[i
].kvaddr
= kvaddr
;
158 pkt
->addr
[i
].addr
= dma_addr
;
161 static void qib_user_sdma_init_header(struct qib_user_sdma_pkt
*pkt
,
162 u32 counter
, size_t offset
,
163 size_t len
, int dma_mapped
,
165 void *kvaddr
, dma_addr_t dma_addr
)
168 pkt
->counter
= counter
;
169 qib_user_sdma_init_frag(pkt
, 0, offset
, len
, 0, dma_mapped
, page
,
173 /* we've too many pages in the iovec, coalesce to a single page */
174 static int qib_user_sdma_coalesce(const struct qib_devdata
*dd
,
175 struct qib_user_sdma_pkt
*pkt
,
176 const struct iovec
*iov
,
180 struct page
*page
= alloc_page(GFP_KERNEL
);
194 for (i
= 0; i
< niov
; i
++) {
197 cfur
= copy_from_user(mpage
,
198 iov
[i
].iov_base
, iov
[i
].iov_len
);
204 mpage
+= iov
[i
].iov_len
;
205 len
+= iov
[i
].iov_len
;
208 dma_addr
= dma_map_page(&dd
->pcidev
->dev
, page
, 0, len
,
210 if (dma_mapping_error(&dd
->pcidev
->dev
, dma_addr
)) {
215 qib_user_sdma_init_frag(pkt
, 1, 0, len
, 0, 1, page
, mpage_save
,
229 * How many pages in this iovec element?
231 static int qib_user_sdma_num_pages(const struct iovec
*iov
)
233 const unsigned long addr
= (unsigned long) iov
->iov_base
;
234 const unsigned long len
= iov
->iov_len
;
235 const unsigned long spage
= addr
& PAGE_MASK
;
236 const unsigned long epage
= (addr
+ len
- 1) & PAGE_MASK
;
238 return 1 + ((epage
- spage
) >> PAGE_SHIFT
);
242 * Truncate length to page boundary.
244 static int qib_user_sdma_page_length(unsigned long addr
, unsigned long len
)
246 const unsigned long offset
= addr
& ~PAGE_MASK
;
248 return ((offset
+ len
) > PAGE_SIZE
) ? (PAGE_SIZE
- offset
) : len
;
251 static void qib_user_sdma_free_pkt_frag(struct device
*dev
,
252 struct qib_user_sdma_queue
*pq
,
253 struct qib_user_sdma_pkt
*pkt
,
258 if (pkt
->addr
[i
].page
) {
259 if (pkt
->addr
[i
].dma_mapped
)
265 if (pkt
->addr
[i
].kvaddr
)
266 kunmap(pkt
->addr
[i
].page
);
268 if (pkt
->addr
[i
].put_page
)
269 put_page(pkt
->addr
[i
].page
);
271 __free_page(pkt
->addr
[i
].page
);
272 } else if (pkt
->addr
[i
].kvaddr
)
273 /* free coherent mem from cache... */
274 dma_pool_free(pq
->header_cache
,
275 pkt
->addr
[i
].kvaddr
, pkt
->addr
[i
].addr
);
278 /* return number of pages pinned... */
279 static int qib_user_sdma_pin_pages(const struct qib_devdata
*dd
,
280 struct qib_user_sdma_pkt
*pkt
,
281 unsigned long addr
, int tlen
, int npages
)
283 struct page
*pages
[2];
287 ret
= get_user_pages(current
, current
->mm
, addr
,
288 npages
, 0, 1, pages
, NULL
);
293 for (i
= 0; i
< ret
; i
++)
300 for (j
= 0; j
< npages
; j
++) {
301 /* map the pages... */
302 const int flen
= qib_user_sdma_page_length(addr
, tlen
);
303 dma_addr_t dma_addr
=
304 dma_map_page(&dd
->pcidev
->dev
,
305 pages
[j
], 0, flen
, DMA_TO_DEVICE
);
306 unsigned long fofs
= addr
& ~PAGE_MASK
;
308 if (dma_mapping_error(&dd
->pcidev
->dev
, dma_addr
)) {
313 qib_user_sdma_init_frag(pkt
, pkt
->naddr
, fofs
, flen
, 1, 1,
314 pages
[j
], kmap(pages
[j
]), dma_addr
);
325 static int qib_user_sdma_pin_pkt(const struct qib_devdata
*dd
,
326 struct qib_user_sdma_queue
*pq
,
327 struct qib_user_sdma_pkt
*pkt
,
328 const struct iovec
*iov
,
334 for (idx
= 0; idx
< niov
; idx
++) {
335 const int npages
= qib_user_sdma_num_pages(iov
+ idx
);
336 const unsigned long addr
= (unsigned long) iov
[idx
].iov_base
;
338 ret
= qib_user_sdma_pin_pages(dd
, pkt
, addr
,
339 iov
[idx
].iov_len
, npages
);
347 for (idx
= 0; idx
< pkt
->naddr
; idx
++)
348 qib_user_sdma_free_pkt_frag(&dd
->pcidev
->dev
, pq
, pkt
, idx
);
354 static int qib_user_sdma_init_payload(const struct qib_devdata
*dd
,
355 struct qib_user_sdma_queue
*pq
,
356 struct qib_user_sdma_pkt
*pkt
,
357 const struct iovec
*iov
,
358 unsigned long niov
, int npages
)
362 if (npages
>= ARRAY_SIZE(pkt
->addr
))
363 ret
= qib_user_sdma_coalesce(dd
, pkt
, iov
, niov
);
365 ret
= qib_user_sdma_pin_pkt(dd
, pq
, pkt
, iov
, niov
);
370 /* free a packet list -- return counter value of last packet */
371 static void qib_user_sdma_free_pkt_list(struct device
*dev
,
372 struct qib_user_sdma_queue
*pq
,
373 struct list_head
*list
)
375 struct qib_user_sdma_pkt
*pkt
, *pkt_next
;
377 list_for_each_entry_safe(pkt
, pkt_next
, list
, list
) {
380 for (i
= 0; i
< pkt
->naddr
; i
++)
381 qib_user_sdma_free_pkt_frag(dev
, pq
, pkt
, i
);
383 kmem_cache_free(pq
->pkt_slab
, pkt
);
385 INIT_LIST_HEAD(list
);
389 * copy headers, coalesce etc -- pq->lock must be held
391 * we queue all the packets to list, returning the
392 * number of bytes total. list must be empty initially,
393 * as, if there is an error we clean it...
395 static int qib_user_sdma_queue_pkts(const struct qib_devdata
*dd
,
396 struct qib_user_sdma_queue
*pq
,
397 struct list_head
*list
,
398 const struct iovec
*iov
,
402 unsigned long idx
= 0;
405 struct page
*page
= NULL
;
408 struct qib_user_sdma_pkt
*pkt
= NULL
;
411 u32 counter
= pq
->counter
;
414 while (idx
< niov
&& npkts
< maxpkts
) {
415 const unsigned long addr
= (unsigned long) iov
[idx
].iov_base
;
416 const unsigned long idx_save
= idx
;
424 len
= iov
[idx
].iov_len
;
428 pkt
= kmem_cache_alloc(pq
->pkt_slab
, GFP_KERNEL
);
434 if (len
< QIB_USER_SDMA_MIN_HEADER_LENGTH
||
435 len
> PAGE_SIZE
|| len
& 3 || addr
& 3) {
440 if (len
== QIB_USER_SDMA_EXP_HEADER_LENGTH
)
441 pbc
= dma_pool_alloc(pq
->header_cache
, GFP_KERNEL
,
447 page
= alloc_page(GFP_KERNEL
);
455 cfur
= copy_from_user(pbc
, iov
[idx
].iov_base
, len
);
462 * This assignment is a bit strange. it's because the
463 * the pbc counts the number of 32 bit words in the full
464 * packet _except_ the first word of the pbc itself...
469 * pktnw computation yields the number of 32 bit words
470 * that the caller has indicated in the PBC. note that
471 * this is one less than the total number of words that
472 * goes to the send DMA engine as the first 32 bit word
473 * of the PBC itself is not counted. Armed with this count,
474 * we can verify that the packet is consistent with the
477 pktnw
= le32_to_cpu(*pbc
) & QIB_PBC_LENGTH_MASK
;
478 if (pktnw
< pktnwc
|| pktnw
> pktnwc
+ (PAGE_SIZE
>> 2)) {
484 while (pktnwc
< pktnw
&& idx
< niov
) {
485 const size_t slen
= iov
[idx
].iov_len
;
486 const unsigned long faddr
=
487 (unsigned long) iov
[idx
].iov_base
;
489 if (slen
& 3 || faddr
& 3 || !slen
||
496 if ((faddr
& PAGE_MASK
) !=
497 ((faddr
+ slen
- 1) & PAGE_MASK
))
505 if (pktnwc
!= pktnw
) {
511 dma_addr
= dma_map_page(&dd
->pcidev
->dev
,
512 page
, 0, len
, DMA_TO_DEVICE
);
513 if (dma_mapping_error(&dd
->pcidev
->dev
, dma_addr
)) {
521 qib_user_sdma_init_header(pkt
, counter
, 0, len
, dma_mapped
,
522 page
, pbc
, dma_addr
);
525 ret
= qib_user_sdma_init_payload(dd
, pq
, pkt
,
535 list_add_tail(&pkt
->list
, list
);
543 dma_unmap_page(&dd
->pcidev
->dev
, dma_addr
, len
, DMA_TO_DEVICE
);
549 dma_pool_free(pq
->header_cache
, pbc
, dma_addr
);
551 kmem_cache_free(pq
->pkt_slab
, pkt
);
553 qib_user_sdma_free_pkt_list(&dd
->pcidev
->dev
, pq
, list
);
558 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue
*pq
,
561 pq
->sent_counter
= c
;
564 /* try to clean out queue -- needs pq->lock */
565 static int qib_user_sdma_queue_clean(struct qib_pportdata
*ppd
,
566 struct qib_user_sdma_queue
*pq
)
568 struct qib_devdata
*dd
= ppd
->dd
;
569 struct list_head free_list
;
570 struct qib_user_sdma_pkt
*pkt
;
571 struct qib_user_sdma_pkt
*pkt_prev
;
574 INIT_LIST_HEAD(&free_list
);
576 list_for_each_entry_safe(pkt
, pkt_prev
, &pq
->sent
, list
) {
577 s64 descd
= ppd
->sdma_descq_removed
- pkt
->added
;
582 list_move_tail(&pkt
->list
, &free_list
);
584 /* one more packet cleaned */
588 if (!list_empty(&free_list
)) {
591 pkt
= list_entry(free_list
.prev
,
592 struct qib_user_sdma_pkt
, list
);
593 counter
= pkt
->counter
;
595 qib_user_sdma_free_pkt_list(&dd
->pcidev
->dev
, pq
, &free_list
);
596 qib_user_sdma_set_complete_counter(pq
, counter
);
602 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue
*pq
)
607 kmem_cache_destroy(pq
->pkt_slab
);
608 dma_pool_destroy(pq
->header_cache
);
612 /* clean descriptor queue, returns > 0 if some elements cleaned */
613 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata
*ppd
)
618 spin_lock_irqsave(&ppd
->sdma_lock
, flags
);
619 ret
= qib_sdma_make_progress(ppd
);
620 spin_unlock_irqrestore(&ppd
->sdma_lock
, flags
);
625 /* we're in close, drain packets so that we can cleanup successfully... */
626 void qib_user_sdma_queue_drain(struct qib_pportdata
*ppd
,
627 struct qib_user_sdma_queue
*pq
)
629 struct qib_devdata
*dd
= ppd
->dd
;
635 for (i
= 0; i
< QIB_USER_SDMA_DRAIN_TIMEOUT
; i
++) {
636 mutex_lock(&pq
->lock
);
637 if (list_empty(&pq
->sent
)) {
638 mutex_unlock(&pq
->lock
);
641 qib_user_sdma_hwqueue_clean(ppd
);
642 qib_user_sdma_queue_clean(ppd
, pq
);
643 mutex_unlock(&pq
->lock
);
647 if (!list_empty(&pq
->sent
)) {
648 struct list_head free_list
;
650 qib_dev_err(dd
, "user sdma lists not empty: forcing!\n");
651 INIT_LIST_HEAD(&free_list
);
652 mutex_lock(&pq
->lock
);
653 list_splice_init(&pq
->sent
, &free_list
);
654 qib_user_sdma_free_pkt_list(&dd
->pcidev
->dev
, pq
, &free_list
);
655 mutex_unlock(&pq
->lock
);
659 static inline __le64
qib_sdma_make_desc0(struct qib_pportdata
*ppd
,
660 u64 addr
, u64 dwlen
, u64 dwoffset
)
664 tmpgen
= ppd
->sdma_generation
;
666 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
667 ((addr
& 0xfffffffcULL
) << 32) |
668 /* SDmaGeneration[1:0] */
669 ((tmpgen
& 3ULL) << 30) |
670 /* SDmaDwordCount[10:0] */
671 ((dwlen
& 0x7ffULL
) << 16) |
672 /* SDmaBufOffset[12:2] */
673 (dwoffset
& 0x7ffULL
));
676 static inline __le64
qib_sdma_make_first_desc0(__le64 descq
)
678 return descq
| cpu_to_le64(1ULL << 12);
681 static inline __le64
qib_sdma_make_last_desc0(__le64 descq
)
683 /* last */ /* dma head */
684 return descq
| cpu_to_le64(1ULL << 11 | 1ULL << 13);
687 static inline __le64
qib_sdma_make_desc1(u64 addr
)
689 /* SDmaPhyAddr[47:32] */
690 return cpu_to_le64(addr
>> 32);
693 static void qib_user_sdma_send_frag(struct qib_pportdata
*ppd
,
694 struct qib_user_sdma_pkt
*pkt
, int idx
,
695 unsigned ofs
, u16 tail
)
697 const u64 addr
= (u64
) pkt
->addr
[idx
].addr
+
698 (u64
) pkt
->addr
[idx
].offset
;
699 const u64 dwlen
= (u64
) pkt
->addr
[idx
].length
/ 4;
703 descqp
= &ppd
->sdma_descq
[tail
].qw
[0];
705 descq0
= qib_sdma_make_desc0(ppd
, addr
, dwlen
, ofs
);
707 descq0
= qib_sdma_make_first_desc0(descq0
);
708 if (idx
== pkt
->naddr
- 1)
709 descq0
= qib_sdma_make_last_desc0(descq0
);
712 descqp
[1] = qib_sdma_make_desc1(addr
);
715 /* pq->lock must be held, get packets on the wire... */
716 static int qib_user_sdma_push_pkts(struct qib_pportdata
*ppd
,
717 struct qib_user_sdma_queue
*pq
,
718 struct list_head
*pktlist
)
720 struct qib_devdata
*dd
= ppd
->dd
;
727 if (list_empty(pktlist
))
730 if (unlikely(!(ppd
->lflags
& QIBL_LINKACTIVE
)))
733 spin_lock_irqsave(&ppd
->sdma_lock
, flags
);
735 /* keep a copy for restoring purposes in case of problems */
736 generation
= ppd
->sdma_generation
;
737 descq_added
= ppd
->sdma_descq_added
;
739 if (unlikely(!__qib_sdma_running(ppd
))) {
744 tail
= ppd
->sdma_descq_tail
;
745 while (!list_empty(pktlist
)) {
746 struct qib_user_sdma_pkt
*pkt
=
747 list_entry(pktlist
->next
, struct qib_user_sdma_pkt
,
753 if (pkt
->naddr
> qib_sdma_descq_freecnt(ppd
))
754 goto unlock_check_tail
;
756 for (i
= 0; i
< pkt
->naddr
; i
++) {
757 qib_user_sdma_send_frag(ppd
, pkt
, i
, ofs
, tail
);
758 ofs
+= pkt
->addr
[i
].length
>> 2;
760 if (++tail
== ppd
->sdma_descq_cnt
) {
762 ++ppd
->sdma_generation
;
766 if ((ofs
<< 2) > ppd
->ibmaxlen
) {
772 * If the packet is >= 2KB mtu equivalent, we have to use
773 * the large buffers, and have to mark each descriptor as
774 * part of a large buffer packet.
776 if (ofs
> dd
->piosize2kmax_dwords
) {
777 for (i
= 0; i
< pkt
->naddr
; i
++) {
778 ppd
->sdma_descq
[dtail
].qw
[0] |=
779 cpu_to_le64(1ULL << 14);
780 if (++dtail
== ppd
->sdma_descq_cnt
)
785 ppd
->sdma_descq_added
+= pkt
->naddr
;
786 pkt
->added
= ppd
->sdma_descq_added
;
787 list_move_tail(&pkt
->list
, &pq
->sent
);
792 /* advance the tail on the chip if necessary */
793 if (ppd
->sdma_descq_tail
!= tail
)
794 dd
->f_sdma_update_tail(ppd
, tail
);
797 if (unlikely(ret
< 0)) {
798 ppd
->sdma_generation
= generation
;
799 ppd
->sdma_descq_added
= descq_added
;
801 spin_unlock_irqrestore(&ppd
->sdma_lock
, flags
);
806 int qib_user_sdma_writev(struct qib_ctxtdata
*rcd
,
807 struct qib_user_sdma_queue
*pq
,
808 const struct iovec
*iov
,
811 struct qib_devdata
*dd
= rcd
->dd
;
812 struct qib_pportdata
*ppd
= rcd
->ppd
;
814 struct list_head list
;
817 INIT_LIST_HEAD(&list
);
819 mutex_lock(&pq
->lock
);
821 /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
822 if (!qib_sdma_running(ppd
))
825 if (ppd
->sdma_descq_added
!= ppd
->sdma_descq_removed
) {
826 qib_user_sdma_hwqueue_clean(ppd
);
827 qib_user_sdma_queue_clean(ppd
, pq
);
833 down_write(¤t
->mm
->mmap_sem
);
834 ret
= qib_user_sdma_queue_pkts(dd
, pq
, &list
, iov
, dim
, mxp
);
835 up_write(¤t
->mm
->mmap_sem
);
844 /* force packets onto the sdma hw queue... */
845 if (!list_empty(&list
)) {
847 * Lazily clean hw queue. the 4 is a guess of about
848 * how many sdma descriptors a packet will take (it
849 * doesn't have to be perfect).
851 if (qib_sdma_descq_freecnt(ppd
) < ret
* 4) {
852 qib_user_sdma_hwqueue_clean(ppd
);
853 qib_user_sdma_queue_clean(ppd
, pq
);
856 ret
= qib_user_sdma_push_pkts(ppd
, pq
, &list
);
863 if (!list_empty(&list
))
870 if (!list_empty(&list
))
871 qib_user_sdma_free_pkt_list(&dd
->pcidev
->dev
, pq
, &list
);
872 mutex_unlock(&pq
->lock
);
874 return (ret
< 0) ? ret
: npkts
;
877 int qib_user_sdma_make_progress(struct qib_pportdata
*ppd
,
878 struct qib_user_sdma_queue
*pq
)
882 mutex_lock(&pq
->lock
);
883 qib_user_sdma_hwqueue_clean(ppd
);
884 ret
= qib_user_sdma_queue_clean(ppd
, pq
);
885 mutex_unlock(&pq
->lock
);
890 u32
qib_user_sdma_complete_counter(const struct qib_user_sdma_queue
*pq
)
892 return pq
? pq
->sent_counter
: 0;
895 u32
qib_user_sdma_inflight_counter(struct qib_user_sdma_queue
*pq
)
897 return pq
? pq
->counter
: 0;