2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/types.h>
49 #include <linux/device.h>
50 #include <linux/dmapool.h>
51 #include <linux/slab.h>
52 #include <linux/list.h>
53 #include <linux/highmem.h>
55 #include <linux/uio.h>
56 #include <linux/rbtree.h>
57 #include <linux/spinlock.h>
58 #include <linux/delay.h>
59 #include <linux/kthread.h>
60 #include <linux/mmu_context.h>
61 #include <linux/module.h>
62 #include <linux/vmalloc.h>
63 #include <linux/string.h>
68 #include "user_sdma.h"
69 #include "verbs.h" /* for the headers */
70 #include "common.h" /* for struct hfi1_tid_info */
73 static uint hfi1_sdma_comp_ring_size
= 128;
74 module_param_named(sdma_comp_size
, hfi1_sdma_comp_ring_size
, uint
, S_IRUGO
);
75 MODULE_PARM_DESC(sdma_comp_size
, "Size of User SDMA completion ring. Default: 128");
77 static unsigned initial_pkt_count
= 8;
79 static int user_sdma_send_pkts(struct user_sdma_request
*req
,
81 static void user_sdma_txreq_cb(struct sdma_txreq
*txreq
, int status
);
82 static inline void pq_update(struct hfi1_user_sdma_pkt_q
*pq
);
83 static void user_sdma_free_request(struct user_sdma_request
*req
, bool unpin
);
84 static int pin_vector_pages(struct user_sdma_request
*req
,
85 struct user_sdma_iovec
*iovec
);
86 static void unpin_vector_pages(struct mm_struct
*mm
, struct page
**pages
,
87 unsigned start
, unsigned npages
);
88 static int check_header_template(struct user_sdma_request
*req
,
89 struct hfi1_pkt_header
*hdr
, u32 lrhlen
,
91 static int set_txreq_header(struct user_sdma_request
*req
,
92 struct user_sdma_txreq
*tx
, u32 datalen
);
93 static int set_txreq_header_ahg(struct user_sdma_request
*req
,
94 struct user_sdma_txreq
*tx
, u32 len
);
95 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q
*pq
,
96 struct hfi1_user_sdma_comp_q
*cq
,
97 u16 idx
, enum hfi1_sdma_comp_state state
,
99 static inline u32
set_pkt_bth_psn(__be32 bthpsn
, u8 expct
, u32 frags
);
100 static inline u32
get_lrh_len(struct hfi1_pkt_header
, u32 len
);
102 static int defer_packet_queue(
103 struct sdma_engine
*sde
,
105 struct sdma_txreq
*txreq
,
108 static void activate_packet_queue(struct iowait
*wait
, int reason
);
109 static bool sdma_rb_filter(struct mmu_rb_node
*node
, unsigned long addr
,
111 static int sdma_rb_insert(void *arg
, struct mmu_rb_node
*mnode
);
112 static int sdma_rb_evict(void *arg
, struct mmu_rb_node
*mnode
,
113 void *arg2
, bool *stop
);
114 static void sdma_rb_remove(void *arg
, struct mmu_rb_node
*mnode
);
115 static int sdma_rb_invalidate(void *arg
, struct mmu_rb_node
*mnode
);
117 static struct mmu_rb_ops sdma_rb_ops
= {
118 .filter
= sdma_rb_filter
,
119 .insert
= sdma_rb_insert
,
120 .evict
= sdma_rb_evict
,
121 .remove
= sdma_rb_remove
,
122 .invalidate
= sdma_rb_invalidate
125 static int defer_packet_queue(
126 struct sdma_engine
*sde
,
128 struct sdma_txreq
*txreq
,
132 struct hfi1_user_sdma_pkt_q
*pq
=
133 container_of(wait
, struct hfi1_user_sdma_pkt_q
, busy
);
134 struct hfi1_ibdev
*dev
= &pq
->dd
->verbs_dev
;
135 struct user_sdma_txreq
*tx
=
136 container_of(txreq
, struct user_sdma_txreq
, txreq
);
138 if (sdma_progress(sde
, seq
, txreq
)) {
139 if (tx
->busycount
++ < MAX_DEFER_RETRY_COUNT
)
143 * We are assuming that if the list is enqueued somewhere, it
144 * is to the dmawait list since that is the only place where
145 * it is supposed to be enqueued.
147 xchg(&pq
->state
, SDMA_PKT_Q_DEFERRED
);
148 write_seqlock(&dev
->iowait_lock
);
149 if (list_empty(&pq
->busy
.list
))
150 iowait_queue(pkts_sent
, &pq
->busy
, &sde
->dmawait
);
151 write_sequnlock(&dev
->iowait_lock
);
157 static void activate_packet_queue(struct iowait
*wait
, int reason
)
159 struct hfi1_user_sdma_pkt_q
*pq
=
160 container_of(wait
, struct hfi1_user_sdma_pkt_q
, busy
);
161 xchg(&pq
->state
, SDMA_PKT_Q_ACTIVE
);
162 wake_up(&wait
->wait_dma
);
165 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata
*uctxt
,
166 struct hfi1_filedata
*fd
)
170 struct hfi1_devdata
*dd
;
171 struct hfi1_user_sdma_comp_q
*cq
;
172 struct hfi1_user_sdma_pkt_q
*pq
;
177 if (!hfi1_sdma_comp_ring_size
)
182 pq
= kzalloc(sizeof(*pq
), GFP_KERNEL
);
187 pq
->ctxt
= uctxt
->ctxt
;
188 pq
->subctxt
= fd
->subctxt
;
189 pq
->n_max_reqs
= hfi1_sdma_comp_ring_size
;
190 pq
->state
= SDMA_PKT_Q_INACTIVE
;
191 atomic_set(&pq
->n_reqs
, 0);
192 init_waitqueue_head(&pq
->wait
);
193 atomic_set(&pq
->n_locked
, 0);
196 iowait_init(&pq
->busy
, 0, NULL
, defer_packet_queue
,
197 activate_packet_queue
, NULL
);
200 pq
->reqs
= kcalloc(hfi1_sdma_comp_ring_size
,
206 pq
->req_in_use
= kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size
),
207 sizeof(*pq
->req_in_use
),
210 goto pq_reqs_no_in_use
;
212 snprintf(buf
, 64, "txreq-kmem-cache-%u-%u-%u", dd
->unit
, uctxt
->ctxt
,
214 pq
->txreq_cache
= kmem_cache_create(buf
,
215 sizeof(struct user_sdma_txreq
),
219 if (!pq
->txreq_cache
) {
220 dd_dev_err(dd
, "[%u] Failed to allocate TxReq cache\n",
225 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
229 cq
->comps
= vmalloc_user(PAGE_ALIGN(sizeof(*cq
->comps
)
230 * hfi1_sdma_comp_ring_size
));
234 cq
->nentries
= hfi1_sdma_comp_ring_size
;
236 ret
= hfi1_mmu_rb_register(pq
, pq
->mm
, &sdma_rb_ops
, dd
->pport
->hfi1_wq
,
239 dd_dev_err(dd
, "Failed to register with MMU %d", ret
);
253 kmem_cache_destroy(pq
->txreq_cache
);
255 kfree(pq
->req_in_use
);
264 int hfi1_user_sdma_free_queues(struct hfi1_filedata
*fd
,
265 struct hfi1_ctxtdata
*uctxt
)
267 struct hfi1_user_sdma_pkt_q
*pq
;
269 trace_hfi1_sdma_user_free_queues(uctxt
->dd
, uctxt
->ctxt
, fd
->subctxt
);
274 hfi1_mmu_rb_unregister(pq
->handler
);
275 iowait_sdma_drain(&pq
->busy
);
276 /* Wait until all requests have been freed. */
277 wait_event_interruptible(
279 (READ_ONCE(pq
->state
) == SDMA_PKT_Q_INACTIVE
));
281 kfree(pq
->req_in_use
);
282 kmem_cache_destroy(pq
->txreq_cache
);
287 vfree(fd
->cq
->comps
);
294 static u8
dlid_to_selector(u16 dlid
)
296 static u8 mapping
[256];
297 static int initialized
;
302 memset(mapping
, 0xFF, 256);
306 hash
= ((dlid
>> 8) ^ dlid
) & 0xFF;
307 if (mapping
[hash
] == 0xFF) {
308 mapping
[hash
] = next
;
309 next
= (next
+ 1) & 0x7F;
312 return mapping
[hash
];
315 int hfi1_user_sdma_process_request(struct hfi1_filedata
*fd
,
316 struct iovec
*iovec
, unsigned long dim
,
317 unsigned long *count
)
320 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
321 struct hfi1_user_sdma_pkt_q
*pq
= fd
->pq
;
322 struct hfi1_user_sdma_comp_q
*cq
= fd
->cq
;
323 struct hfi1_devdata
*dd
= pq
->dd
;
324 unsigned long idx
= 0;
325 u8 pcount
= initial_pkt_count
;
326 struct sdma_req_info info
;
327 struct user_sdma_request
*req
;
335 if (iovec
[idx
].iov_len
< sizeof(info
) + sizeof(req
->hdr
)) {
338 "[%u:%u:%u] First vector not big enough for header %lu/%lu",
339 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
,
340 iovec
[idx
].iov_len
, sizeof(info
) + sizeof(req
->hdr
));
343 ret
= copy_from_user(&info
, iovec
[idx
].iov_base
, sizeof(info
));
345 hfi1_cdbg(SDMA
, "[%u:%u:%u] Failed to copy info QW (%d)",
346 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
, ret
);
350 trace_hfi1_sdma_user_reqinfo(dd
, uctxt
->ctxt
, fd
->subctxt
,
352 if (info
.comp_idx
>= hfi1_sdma_comp_ring_size
) {
354 "[%u:%u:%u:%u] Invalid comp index",
355 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
, info
.comp_idx
);
360 * Sanity check the header io vector count. Need at least 1 vector
361 * (header) and cannot be larger than the actual io vector count.
363 if (req_iovcnt(info
.ctrl
) < 1 || req_iovcnt(info
.ctrl
) > dim
) {
365 "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
366 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
, info
.comp_idx
,
367 req_iovcnt(info
.ctrl
), dim
);
371 if (!info
.fragsize
) {
373 "[%u:%u:%u:%u] Request does not specify fragsize",
374 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
, info
.comp_idx
);
378 /* Try to claim the request. */
379 if (test_and_set_bit(info
.comp_idx
, pq
->req_in_use
)) {
380 hfi1_cdbg(SDMA
, "[%u:%u:%u] Entry %u is in use",
381 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
,
386 * All safety checks have been done and this request has been claimed.
388 trace_hfi1_sdma_user_process_request(dd
, uctxt
->ctxt
, fd
->subctxt
,
390 req
= pq
->reqs
+ info
.comp_idx
;
391 req
->data_iovs
= req_iovcnt(info
.ctrl
) - 1; /* subtract header vector */
401 req
->seqsubmitted
= 0;
405 INIT_LIST_HEAD(&req
->txps
);
407 memcpy(&req
->info
, &info
, sizeof(info
));
409 if (req_opcode(info
.ctrl
) == EXPECTED
) {
410 /* expected must have a TID info and at least one data vector */
411 if (req
->data_iovs
< 2) {
413 "Not enough vectors for expected request");
420 if (!info
.npkts
|| req
->data_iovs
> MAX_VECTORS_PER_REQ
) {
421 SDMA_DBG(req
, "Too many vectors (%u/%u)", req
->data_iovs
,
422 MAX_VECTORS_PER_REQ
);
426 /* Copy the header from the user buffer */
427 ret
= copy_from_user(&req
->hdr
, iovec
[idx
].iov_base
+ sizeof(info
),
430 SDMA_DBG(req
, "Failed to copy header template (%d)", ret
);
435 /* If Static rate control is not enabled, sanitize the header. */
436 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL
))
439 /* Validate the opcode. Do not trust packets from user space blindly. */
440 opcode
= (be32_to_cpu(req
->hdr
.bth
[0]) >> 24) & 0xff;
441 if ((opcode
& USER_OPCODE_CHECK_MASK
) !=
442 USER_OPCODE_CHECK_VAL
) {
443 SDMA_DBG(req
, "Invalid opcode (%d)", opcode
);
448 * Validate the vl. Do not trust packets from user space blindly.
449 * VL comes from PBC, SC comes from LRH, and the VL needs to
450 * match the SC look up.
452 vl
= (le16_to_cpu(req
->hdr
.pbc
[0]) >> 12) & 0xF;
453 sc
= (((be16_to_cpu(req
->hdr
.lrh
[0]) >> 12) & 0xF) |
454 (((le16_to_cpu(req
->hdr
.pbc
[1]) >> 14) & 0x1) << 4));
455 if (vl
>= dd
->pport
->vls_operational
||
456 vl
!= sc_to_vlt(dd
, sc
)) {
457 SDMA_DBG(req
, "Invalid SC(%u)/VL(%u)", sc
, vl
);
462 /* Checking P_KEY for requests from user-space */
463 pkey
= (u16
)be32_to_cpu(req
->hdr
.bth
[0]);
464 slid
= be16_to_cpu(req
->hdr
.lrh
[3]);
465 if (egress_pkey_check(dd
->pport
, slid
, pkey
, sc
, PKEY_CHECK_INVALID
)) {
471 * Also should check the BTH.lnh. If it says the next header is GRH then
472 * the RXE parsing will be off and will land in the middle of the KDETH
473 * or miss it entirely.
475 if ((be16_to_cpu(req
->hdr
.lrh
[0]) & 0x3) == HFI1_LRH_GRH
) {
476 SDMA_DBG(req
, "User tried to pass in a GRH");
481 req
->koffset
= le32_to_cpu(req
->hdr
.kdeth
.swdata
[6]);
483 * Calculate the initial TID offset based on the values of
484 * KDETH.OFFSET and KDETH.OM that are passed in.
486 req
->tidoffset
= KDETH_GET(req
->hdr
.kdeth
.ver_tid_offset
, OFFSET
) *
487 (KDETH_GET(req
->hdr
.kdeth
.ver_tid_offset
, OM
) ?
488 KDETH_OM_LARGE
: KDETH_OM_SMALL
);
489 trace_hfi1_sdma_user_initial_tidoffset(dd
, uctxt
->ctxt
, fd
->subctxt
,
490 info
.comp_idx
, req
->tidoffset
);
493 /* Save all the IO vector structures */
494 for (i
= 0; i
< req
->data_iovs
; i
++) {
495 req
->iovs
[i
].offset
= 0;
496 INIT_LIST_HEAD(&req
->iovs
[i
].list
);
497 memcpy(&req
->iovs
[i
].iov
,
499 sizeof(req
->iovs
[i
].iov
));
500 ret
= pin_vector_pages(req
, &req
->iovs
[i
]);
506 req
->data_len
+= req
->iovs
[i
].iov
.iov_len
;
508 trace_hfi1_sdma_user_data_length(dd
, uctxt
->ctxt
, fd
->subctxt
,
509 info
.comp_idx
, req
->data_len
);
510 if (pcount
> req
->info
.npkts
)
511 pcount
= req
->info
.npkts
;
514 * User space will provide the TID info only when the
515 * request type is EXPECTED. This is true even if there is
516 * only one packet in the request and the header is already
517 * setup. The reason for the singular TID case is that the
518 * driver needs to perform safety checks.
520 if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
521 u16 ntids
= iovec
[idx
].iov_len
/ sizeof(*req
->tids
);
524 if (!ntids
|| ntids
> MAX_TID_PAIR_ENTRIES
) {
530 * We have to copy all of the tids because they may vary
531 * in size and, therefore, the TID count might not be
532 * equal to the pkt count. However, there is no way to
533 * tell at this point.
535 tmp
= memdup_user(iovec
[idx
].iov_base
,
536 ntids
* sizeof(*req
->tids
));
539 SDMA_DBG(req
, "Failed to copy %d TIDs (%d)",
549 dlid
= be16_to_cpu(req
->hdr
.lrh
[1]);
550 selector
= dlid_to_selector(dlid
);
551 selector
+= uctxt
->ctxt
+ fd
->subctxt
;
552 req
->sde
= sdma_select_user_engine(dd
, selector
, vl
);
554 if (!req
->sde
|| !sdma_running(req
->sde
)) {
559 /* We don't need an AHG entry if the request contains only one packet */
560 if (req
->info
.npkts
> 1 && HFI1_CAP_IS_USET(SDMA_AHG
))
561 req
->ahg_idx
= sdma_ahg_alloc(req
->sde
);
563 set_comp_state(pq
, cq
, info
.comp_idx
, QUEUED
, 0);
564 atomic_inc(&pq
->n_reqs
);
566 /* Send the first N packets in the request to buy us some time */
567 ret
= user_sdma_send_pkts(req
, pcount
);
568 if (unlikely(ret
< 0 && ret
!= -EBUSY
)) {
574 * It is possible that the SDMA engine would have processed all the
575 * submitted packets by the time we get here. Therefore, only set
576 * packet queue state to ACTIVE if there are still uncompleted
579 if (atomic_read(&pq
->n_reqs
))
580 xchg(&pq
->state
, SDMA_PKT_Q_ACTIVE
);
583 * This is a somewhat blocking send implementation.
584 * The driver will block the caller until all packets of the
585 * request have been submitted to the SDMA engine. However, it
586 * will not wait for send completions.
588 while (req
->seqsubmitted
!= req
->info
.npkts
) {
589 ret
= user_sdma_send_pkts(req
, pcount
);
593 WRITE_ONCE(req
->has_error
, 1);
594 if (READ_ONCE(req
->seqcomp
) ==
595 req
->seqsubmitted
- 1)
599 wait_event_interruptible_timeout(
601 (pq
->state
== SDMA_PKT_Q_ACTIVE
),
603 SDMA_IOWAIT_TIMEOUT
));
609 user_sdma_free_request(req
, true);
612 set_comp_state(pq
, cq
, info
.comp_idx
, ERROR
, req
->status
);
616 static inline u32
compute_data_length(struct user_sdma_request
*req
,
617 struct user_sdma_txreq
*tx
)
620 * Determine the proper size of the packet data.
621 * The size of the data of the first packet is in the header
622 * template. However, it includes the header and ICRC, which need
624 * The minimum representable packet data length in a header is 4 bytes,
625 * therefore, when the data length request is less than 4 bytes, there's
626 * only one packet, and the packet data length is equal to that of the
627 * request data length.
628 * The size of the remaining packets is the minimum of the frag
629 * size (MTU) or remaining data in the request.
634 if (req
->data_len
< sizeof(u32
))
637 len
= ((be16_to_cpu(req
->hdr
.lrh
[2]) << 2) -
638 (sizeof(tx
->hdr
) - 4));
639 } else if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
640 u32 tidlen
= EXP_TID_GET(req
->tids
[req
->tididx
], LEN
) *
643 * Get the data length based on the remaining space in the
646 len
= min(tidlen
- req
->tidoffset
, (u32
)req
->info
.fragsize
);
647 /* If we've filled up the TID pair, move to the next one. */
648 if (unlikely(!len
) && ++req
->tididx
< req
->n_tids
&&
649 req
->tids
[req
->tididx
]) {
650 tidlen
= EXP_TID_GET(req
->tids
[req
->tididx
],
653 len
= min_t(u32
, tidlen
, req
->info
.fragsize
);
656 * Since the TID pairs map entire pages, make sure that we
657 * are not going to try to send more data that we have
660 len
= min(len
, req
->data_len
- req
->sent
);
662 len
= min(req
->data_len
- req
->sent
, (u32
)req
->info
.fragsize
);
664 trace_hfi1_sdma_user_compute_length(req
->pq
->dd
,
672 static inline u32
pad_len(u32 len
)
674 if (len
& (sizeof(u32
) - 1))
675 len
+= sizeof(u32
) - (len
& (sizeof(u32
) - 1));
679 static inline u32
get_lrh_len(struct hfi1_pkt_header hdr
, u32 len
)
681 /* (Size of complete header - size of PBC) + 4B ICRC + data length */
682 return ((sizeof(hdr
) - sizeof(hdr
.pbc
)) + 4 + len
);
685 static int user_sdma_txadd_ahg(struct user_sdma_request
*req
,
686 struct user_sdma_txreq
*tx
,
690 u16 pbclen
= le16_to_cpu(req
->hdr
.pbc
[0]);
691 u32 lrhlen
= get_lrh_len(req
->hdr
, pad_len(datalen
));
692 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
695 * Copy the request header into the tx header
696 * because the HW needs a cacheline-aligned
698 * This copy can be optimized out if the hdr
699 * member of user_sdma_request were also
702 memcpy(&tx
->hdr
, &req
->hdr
, sizeof(tx
->hdr
));
703 if (PBC2LRH(pbclen
) != lrhlen
) {
704 pbclen
= (pbclen
& 0xf000) | LRH2PBC(lrhlen
);
705 tx
->hdr
.pbc
[0] = cpu_to_le16(pbclen
);
707 ret
= check_header_template(req
, &tx
->hdr
, lrhlen
, datalen
);
710 ret
= sdma_txinit_ahg(&tx
->txreq
, SDMA_TXREQ_F_AHG_COPY
,
711 sizeof(tx
->hdr
) + datalen
, req
->ahg_idx
,
712 0, NULL
, 0, user_sdma_txreq_cb
);
715 ret
= sdma_txadd_kvaddr(pq
->dd
, &tx
->txreq
, &tx
->hdr
, sizeof(tx
->hdr
));
717 sdma_txclean(pq
->dd
, &tx
->txreq
);
721 static int user_sdma_txadd(struct user_sdma_request
*req
,
722 struct user_sdma_txreq
*tx
,
723 struct user_sdma_iovec
*iovec
, u32 datalen
,
724 u32
*queued_ptr
, u32
*data_sent_ptr
,
728 unsigned int pageidx
, len
;
729 unsigned long base
, offset
;
730 u64 iov_offset
= *iov_offset_ptr
;
731 u32 queued
= *queued_ptr
, data_sent
= *data_sent_ptr
;
732 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
734 base
= (unsigned long)iovec
->iov
.iov_base
;
735 offset
= offset_in_page(base
+ iovec
->offset
+ iov_offset
);
736 pageidx
= (((iovec
->offset
+ iov_offset
+ base
) - (base
& PAGE_MASK
)) >>
738 len
= offset
+ req
->info
.fragsize
> PAGE_SIZE
?
739 PAGE_SIZE
- offset
: req
->info
.fragsize
;
740 len
= min((datalen
- queued
), len
);
741 ret
= sdma_txadd_page(pq
->dd
, &tx
->txreq
, iovec
->pages
[pageidx
],
744 SDMA_DBG(req
, "SDMA txreq add page failed %d\n", ret
);
750 if (unlikely(queued
< datalen
&& pageidx
== iovec
->npages
&&
751 req
->iov_idx
< req
->data_iovs
- 1)) {
752 iovec
->offset
+= iov_offset
;
753 iovec
= &req
->iovs
[++req
->iov_idx
];
757 *queued_ptr
= queued
;
758 *data_sent_ptr
= data_sent
;
759 *iov_offset_ptr
= iov_offset
;
763 static int user_sdma_send_pkts(struct user_sdma_request
*req
, unsigned maxpkts
)
767 struct user_sdma_txreq
*tx
= NULL
;
768 struct hfi1_user_sdma_pkt_q
*pq
= NULL
;
769 struct user_sdma_iovec
*iovec
= NULL
;
776 /* If tx completion has reported an error, we are done. */
777 if (READ_ONCE(req
->has_error
))
781 * Check if we might have sent the entire request already
783 if (unlikely(req
->seqnum
== req
->info
.npkts
)) {
784 if (!list_empty(&req
->txps
))
789 if (!maxpkts
|| maxpkts
> req
->info
.npkts
- req
->seqnum
)
790 maxpkts
= req
->info
.npkts
- req
->seqnum
;
792 while (npkts
< maxpkts
) {
793 u32 datalen
= 0, queued
= 0, data_sent
= 0;
797 * Check whether any of the completions have come back
798 * with errors. If so, we are not going to process any
799 * more packets from this request.
801 if (READ_ONCE(req
->has_error
))
804 tx
= kmem_cache_alloc(pq
->txreq_cache
, GFP_KERNEL
);
811 INIT_LIST_HEAD(&tx
->list
);
814 * For the last packet set the ACK request
815 * and disable header suppression.
817 if (req
->seqnum
== req
->info
.npkts
- 1)
818 tx
->flags
|= (TXREQ_FLAGS_REQ_ACK
|
819 TXREQ_FLAGS_REQ_DISABLE_SH
);
822 * Calculate the payload size - this is min of the fragment
823 * (MTU) size or the remaining bytes in the request but only
824 * if we have payload data.
827 iovec
= &req
->iovs
[req
->iov_idx
];
828 if (READ_ONCE(iovec
->offset
) == iovec
->iov
.iov_len
) {
829 if (++req
->iov_idx
== req
->data_iovs
) {
833 iovec
= &req
->iovs
[req
->iov_idx
];
834 WARN_ON(iovec
->offset
);
837 datalen
= compute_data_length(req
, tx
);
840 * Disable header suppression for the payload <= 8DWS.
841 * If there is an uncorrectable error in the receive
842 * data FIFO when the received payload size is less than
843 * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
844 * not reported.There is set RHF.EccErr if the header
849 "Request has data but pkt len is 0");
852 } else if (datalen
<= 32) {
853 tx
->flags
|= TXREQ_FLAGS_REQ_DISABLE_SH
;
857 if (req
->ahg_idx
>= 0) {
859 ret
= user_sdma_txadd_ahg(req
, tx
, datalen
);
865 changes
= set_txreq_header_ahg(req
, tx
,
871 ret
= sdma_txinit(&tx
->txreq
, 0, sizeof(req
->hdr
) +
872 datalen
, user_sdma_txreq_cb
);
876 * Modify the header for this packet. This only needs
877 * to be done if we are not going to use AHG. Otherwise,
878 * the HW will do it based on the changes we gave it
879 * during sdma_txinit_ahg().
881 ret
= set_txreq_header(req
, tx
, datalen
);
887 * If the request contains any data vectors, add up to
888 * fragsize bytes to the descriptor.
890 while (queued
< datalen
&&
891 (req
->sent
+ data_sent
) < req
->data_len
) {
892 ret
= user_sdma_txadd(req
, tx
, iovec
, datalen
,
893 &queued
, &data_sent
, &iov_offset
);
898 * The txreq was submitted successfully so we can update
901 req
->koffset
+= datalen
;
902 if (req_opcode(req
->info
.ctrl
) == EXPECTED
)
903 req
->tidoffset
+= datalen
;
904 req
->sent
+= data_sent
;
906 iovec
->offset
+= iov_offset
;
907 list_add_tail(&tx
->txreq
.list
, &req
->txps
);
909 * It is important to increment this here as it is used to
910 * generate the BTH.PSN and, therefore, can't be bulk-updated
911 * outside of the loop.
913 tx
->seqnum
= req
->seqnum
++;
917 ret
= sdma_send_txlist(req
->sde
, &pq
->busy
, &req
->txps
, &count
);
918 req
->seqsubmitted
+= count
;
919 if (req
->seqsubmitted
== req
->info
.npkts
) {
920 WRITE_ONCE(req
->done
, 1);
922 * The txreq has already been submitted to the HW queue
923 * so we can free the AHG entry now. Corruption will not
924 * happen due to the sequential manner in which
925 * descriptors are processed.
927 if (req
->ahg_idx
>= 0)
928 sdma_ahg_free(req
->sde
, req
->ahg_idx
);
933 sdma_txclean(pq
->dd
, &tx
->txreq
);
935 kmem_cache_free(pq
->txreq_cache
, tx
);
939 static u32
sdma_cache_evict(struct hfi1_user_sdma_pkt_q
*pq
, u32 npages
)
941 struct evict_data evict_data
;
943 evict_data
.cleared
= 0;
944 evict_data
.target
= npages
;
945 hfi1_mmu_rb_evict(pq
->handler
, &evict_data
);
946 return evict_data
.cleared
;
949 static int pin_sdma_pages(struct user_sdma_request
*req
,
950 struct user_sdma_iovec
*iovec
,
951 struct sdma_mmu_node
*node
,
956 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
958 pages
= kcalloc(npages
, sizeof(*pages
), GFP_KERNEL
);
961 memcpy(pages
, node
->pages
, node
->npages
* sizeof(*pages
));
963 npages
-= node
->npages
;
965 if (!hfi1_can_pin_pages(pq
->dd
, pq
->mm
,
966 atomic_read(&pq
->n_locked
), npages
)) {
967 cleared
= sdma_cache_evict(pq
, npages
);
968 if (cleared
>= npages
)
971 pinned
= hfi1_acquire_user_pages(pq
->mm
,
972 ((unsigned long)iovec
->iov
.iov_base
+
973 (node
->npages
* PAGE_SIZE
)), npages
, 0,
974 pages
+ node
->npages
);
979 if (pinned
!= npages
) {
980 unpin_vector_pages(pq
->mm
, pages
, node
->npages
, pinned
);
984 node
->rb
.len
= iovec
->iov
.iov_len
;
986 atomic_add(pinned
, &pq
->n_locked
);
990 static void unpin_sdma_pages(struct sdma_mmu_node
*node
)
993 unpin_vector_pages(node
->pq
->mm
, node
->pages
, 0, node
->npages
);
994 atomic_sub(node
->npages
, &node
->pq
->n_locked
);
998 static int pin_vector_pages(struct user_sdma_request
*req
,
999 struct user_sdma_iovec
*iovec
)
1001 int ret
= 0, pinned
, npages
;
1002 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
1003 struct sdma_mmu_node
*node
= NULL
;
1004 struct mmu_rb_node
*rb_node
;
1009 hfi1_mmu_rb_remove_unless_exact(pq
->handler
,
1011 iovec
->iov
.iov_base
,
1012 iovec
->iov
.iov_len
, &rb_node
);
1014 node
= container_of(rb_node
, struct sdma_mmu_node
, rb
);
1016 atomic_inc(&node
->refcount
);
1017 iovec
->pages
= node
->pages
;
1018 iovec
->npages
= node
->npages
;
1025 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1029 node
->rb
.addr
= (unsigned long)iovec
->iov
.iov_base
;
1031 atomic_set(&node
->refcount
, 0);
1035 npages
= num_user_pages((unsigned long)iov
->iov_base
, iov
->iov_len
);
1036 if (node
->npages
< npages
) {
1037 pinned
= pin_sdma_pages(req
, iovec
, node
, npages
);
1042 node
->npages
+= pinned
;
1043 npages
= node
->npages
;
1045 iovec
->pages
= node
->pages
;
1046 iovec
->npages
= npages
;
1049 ret
= hfi1_mmu_rb_insert(req
->pq
->handler
, &node
->rb
);
1056 unpin_sdma_pages(node
);
1061 static void unpin_vector_pages(struct mm_struct
*mm
, struct page
**pages
,
1062 unsigned start
, unsigned npages
)
1064 hfi1_release_user_pages(mm
, pages
+ start
, npages
, false);
1068 static int check_header_template(struct user_sdma_request
*req
,
1069 struct hfi1_pkt_header
*hdr
, u32 lrhlen
,
1073 * Perform safety checks for any type of packet:
1074 * - transfer size is multiple of 64bytes
1075 * - packet length is multiple of 4 bytes
1076 * - packet length is not larger than MTU size
1078 * These checks are only done for the first packet of the
1079 * transfer since the header is "given" to us by user space.
1080 * For the remainder of the packets we compute the values.
1082 if (req
->info
.fragsize
% PIO_BLOCK_SIZE
|| lrhlen
& 0x3 ||
1083 lrhlen
> get_lrh_len(*hdr
, req
->info
.fragsize
))
1086 if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
1088 * The header is checked only on the first packet. Furthermore,
1089 * we ensure that at least one TID entry is copied when the
1090 * request is submitted. Therefore, we don't have to verify that
1091 * tididx points to something sane.
1093 u32 tidval
= req
->tids
[req
->tididx
],
1094 tidlen
= EXP_TID_GET(tidval
, LEN
) * PAGE_SIZE
,
1095 tididx
= EXP_TID_GET(tidval
, IDX
),
1096 tidctrl
= EXP_TID_GET(tidval
, CTRL
),
1098 __le32 kval
= hdr
->kdeth
.ver_tid_offset
;
1100 tidoff
= KDETH_GET(kval
, OFFSET
) *
1101 (KDETH_GET(req
->hdr
.kdeth
.ver_tid_offset
, OM
) ?
1102 KDETH_OM_LARGE
: KDETH_OM_SMALL
);
1104 * Expected receive packets have the following
1105 * additional checks:
1106 * - offset is not larger than the TID size
1107 * - TIDCtrl values match between header and TID array
1108 * - TID indexes match between header and TID array
1110 if ((tidoff
+ datalen
> tidlen
) ||
1111 KDETH_GET(kval
, TIDCTRL
) != tidctrl
||
1112 KDETH_GET(kval
, TID
) != tididx
)
1119 * Correctly set the BTH.PSN field based on type of
1120 * transfer - eager packets can just increment the PSN but
1121 * expected packets encode generation and sequence in the
1122 * BTH.PSN field so just incrementing will result in errors.
1124 static inline u32
set_pkt_bth_psn(__be32 bthpsn
, u8 expct
, u32 frags
)
1126 u32 val
= be32_to_cpu(bthpsn
),
1127 mask
= (HFI1_CAP_IS_KSET(EXTENDED_PSN
) ? 0x7fffffffull
:
1131 psn
= (psn
& ~BTH_SEQ_MASK
) | ((psn
+ frags
) & BTH_SEQ_MASK
);
1137 static int set_txreq_header(struct user_sdma_request
*req
,
1138 struct user_sdma_txreq
*tx
, u32 datalen
)
1140 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
1141 struct hfi1_pkt_header
*hdr
= &tx
->hdr
;
1142 u8 omfactor
; /* KDETH.OM */
1145 u32 tidval
= 0, lrhlen
= get_lrh_len(*hdr
, pad_len(datalen
));
1147 /* Copy the header template to the request before modification */
1148 memcpy(hdr
, &req
->hdr
, sizeof(*hdr
));
1151 * Check if the PBC and LRH length are mismatched. If so
1152 * adjust both in the header.
1154 pbclen
= le16_to_cpu(hdr
->pbc
[0]);
1155 if (PBC2LRH(pbclen
) != lrhlen
) {
1156 pbclen
= (pbclen
& 0xf000) | LRH2PBC(lrhlen
);
1157 hdr
->pbc
[0] = cpu_to_le16(pbclen
);
1158 hdr
->lrh
[2] = cpu_to_be16(lrhlen
>> 2);
1161 * This is the first packet in the sequence that has
1162 * a "static" size that can be used for the rest of
1163 * the packets (besides the last one).
1165 if (unlikely(req
->seqnum
== 2)) {
1167 * From this point on the lengths in both the
1168 * PBC and LRH are the same until the last
1170 * Adjust the template so we don't have to update
1173 req
->hdr
.pbc
[0] = hdr
->pbc
[0];
1174 req
->hdr
.lrh
[2] = hdr
->lrh
[2];
1178 * We only have to modify the header if this is not the
1179 * first packet in the request. Otherwise, we use the
1180 * header given to us.
1182 if (unlikely(!req
->seqnum
)) {
1183 ret
= check_header_template(req
, hdr
, lrhlen
, datalen
);
1189 hdr
->bth
[2] = cpu_to_be32(
1190 set_pkt_bth_psn(hdr
->bth
[2],
1191 (req_opcode(req
->info
.ctrl
) == EXPECTED
),
1194 /* Set ACK request on last packet */
1195 if (unlikely(tx
->flags
& TXREQ_FLAGS_REQ_ACK
))
1196 hdr
->bth
[2] |= cpu_to_be32(1UL << 31);
1198 /* Set the new offset */
1199 hdr
->kdeth
.swdata
[6] = cpu_to_le32(req
->koffset
);
1200 /* Expected packets have to fill in the new TID information */
1201 if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
1202 tidval
= req
->tids
[req
->tididx
];
1204 * If the offset puts us at the end of the current TID,
1205 * advance everything.
1207 if ((req
->tidoffset
) == (EXP_TID_GET(tidval
, LEN
) *
1211 * Since we don't copy all the TIDs, all at once,
1212 * we have to check again.
1214 if (++req
->tididx
> req
->n_tids
- 1 ||
1215 !req
->tids
[req
->tididx
]) {
1218 tidval
= req
->tids
[req
->tididx
];
1220 omfactor
= EXP_TID_GET(tidval
, LEN
) * PAGE_SIZE
>=
1221 KDETH_OM_MAX_SIZE
? KDETH_OM_LARGE_SHIFT
:
1222 KDETH_OM_SMALL_SHIFT
;
1223 /* Set KDETH.TIDCtrl based on value for this TID. */
1224 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, TIDCTRL
,
1225 EXP_TID_GET(tidval
, CTRL
));
1226 /* Set KDETH.TID based on value for this TID */
1227 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, TID
,
1228 EXP_TID_GET(tidval
, IDX
));
1229 /* Clear KDETH.SH when DISABLE_SH flag is set */
1230 if (unlikely(tx
->flags
& TXREQ_FLAGS_REQ_DISABLE_SH
))
1231 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, SH
, 0);
1233 * Set the KDETH.OFFSET and KDETH.OM based on size of
1236 trace_hfi1_sdma_user_tid_info(
1237 pq
->dd
, pq
->ctxt
, pq
->subctxt
, req
->info
.comp_idx
,
1238 req
->tidoffset
, req
->tidoffset
>> omfactor
,
1239 omfactor
!= KDETH_OM_SMALL_SHIFT
);
1240 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, OFFSET
,
1241 req
->tidoffset
>> omfactor
);
1242 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, OM
,
1243 omfactor
!= KDETH_OM_SMALL_SHIFT
);
1246 trace_hfi1_sdma_user_header(pq
->dd
, pq
->ctxt
, pq
->subctxt
,
1247 req
->info
.comp_idx
, hdr
, tidval
);
1248 return sdma_txadd_kvaddr(pq
->dd
, &tx
->txreq
, hdr
, sizeof(*hdr
));
1251 static int set_txreq_header_ahg(struct user_sdma_request
*req
,
1252 struct user_sdma_txreq
*tx
, u32 datalen
)
1254 u32 ahg
[AHG_KDETH_ARRAY_SIZE
];
1256 u8 omfactor
; /* KDETH.OM */
1257 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
1258 struct hfi1_pkt_header
*hdr
= &req
->hdr
;
1259 u16 pbclen
= le16_to_cpu(hdr
->pbc
[0]);
1260 u32 val32
, tidval
= 0, lrhlen
= get_lrh_len(*hdr
, pad_len(datalen
));
1261 size_t array_size
= ARRAY_SIZE(ahg
);
1263 if (PBC2LRH(pbclen
) != lrhlen
) {
1264 /* PBC.PbcLengthDWs */
1265 idx
= ahg_header_set(ahg
, idx
, array_size
, 0, 0, 12,
1266 (__force u16
)cpu_to_le16(LRH2PBC(lrhlen
)));
1269 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
1270 idx
= ahg_header_set(ahg
, idx
, array_size
, 3, 0, 16,
1271 (__force u16
)cpu_to_be16(lrhlen
>> 2));
1277 * Do the common updates
1279 /* BTH.PSN and BTH.A */
1280 val32
= (be32_to_cpu(hdr
->bth
[2]) + req
->seqnum
) &
1281 (HFI1_CAP_IS_KSET(EXTENDED_PSN
) ? 0x7fffffff : 0xffffff);
1282 if (unlikely(tx
->flags
& TXREQ_FLAGS_REQ_ACK
))
1284 idx
= ahg_header_set(ahg
, idx
, array_size
, 6, 0, 16,
1285 (__force u16
)cpu_to_be16(val32
>> 16));
1288 idx
= ahg_header_set(ahg
, idx
, array_size
, 6, 16, 16,
1289 (__force u16
)cpu_to_be16(val32
& 0xffff));
1293 idx
= ahg_header_set(ahg
, idx
, array_size
, 15, 0, 16,
1294 (__force u16
)cpu_to_le16(req
->koffset
& 0xffff));
1297 idx
= ahg_header_set(ahg
, idx
, array_size
, 15, 16, 16,
1298 (__force u16
)cpu_to_le16(req
->koffset
>> 16));
1301 if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
1304 tidval
= req
->tids
[req
->tididx
];
1307 * If the offset puts us at the end of the current TID,
1308 * advance everything.
1310 if ((req
->tidoffset
) == (EXP_TID_GET(tidval
, LEN
) *
1314 * Since we don't copy all the TIDs, all at once,
1315 * we have to check again.
1317 if (++req
->tididx
> req
->n_tids
- 1 ||
1318 !req
->tids
[req
->tididx
])
1320 tidval
= req
->tids
[req
->tididx
];
1322 omfactor
= ((EXP_TID_GET(tidval
, LEN
) *
1324 KDETH_OM_MAX_SIZE
) ? KDETH_OM_LARGE_SHIFT
:
1325 KDETH_OM_SMALL_SHIFT
;
1326 /* KDETH.OM and KDETH.OFFSET (TID) */
1327 idx
= ahg_header_set(
1328 ahg
, idx
, array_size
, 7, 0, 16,
1329 ((!!(omfactor
- KDETH_OM_SMALL_SHIFT
)) << 15 |
1330 ((req
->tidoffset
>> omfactor
)
1334 /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
1335 val
= cpu_to_le16(((EXP_TID_GET(tidval
, CTRL
) & 0x3) << 10) |
1336 (EXP_TID_GET(tidval
, IDX
) & 0x3ff));
1338 if (unlikely(tx
->flags
& TXREQ_FLAGS_REQ_DISABLE_SH
)) {
1339 val
|= cpu_to_le16((KDETH_GET(hdr
->kdeth
.ver_tid_offset
,
1341 AHG_KDETH_INTR_SHIFT
));
1343 val
|= KDETH_GET(hdr
->kdeth
.ver_tid_offset
, SH
) ?
1344 cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT
) :
1345 cpu_to_le16((KDETH_GET(hdr
->kdeth
.ver_tid_offset
,
1347 AHG_KDETH_INTR_SHIFT
));
1350 idx
= ahg_header_set(ahg
, idx
, array_size
,
1351 7, 16, 14, (__force u16
)val
);
1356 trace_hfi1_sdma_user_header_ahg(pq
->dd
, pq
->ctxt
, pq
->subctxt
,
1357 req
->info
.comp_idx
, req
->sde
->this_idx
,
1358 req
->ahg_idx
, ahg
, idx
, tidval
);
1359 sdma_txinit_ahg(&tx
->txreq
,
1360 SDMA_TXREQ_F_USE_AHG
,
1361 datalen
, req
->ahg_idx
, idx
,
1362 ahg
, sizeof(req
->hdr
),
1363 user_sdma_txreq_cb
);
1369 * SDMA tx request completion callback. Called when the SDMA progress
1370 * state machine gets notification that the SDMA descriptors for this
1371 * tx request have been processed by the DMA engine. Called in
1372 * interrupt context.
1374 static void user_sdma_txreq_cb(struct sdma_txreq
*txreq
, int status
)
1376 struct user_sdma_txreq
*tx
=
1377 container_of(txreq
, struct user_sdma_txreq
, txreq
);
1378 struct user_sdma_request
*req
;
1379 struct hfi1_user_sdma_pkt_q
*pq
;
1380 struct hfi1_user_sdma_comp_q
*cq
;
1390 if (status
!= SDMA_TXREQ_S_OK
) {
1391 SDMA_DBG(req
, "SDMA completion with error %d",
1393 WRITE_ONCE(req
->has_error
, 1);
1396 req
->seqcomp
= tx
->seqnum
;
1397 kmem_cache_free(pq
->txreq_cache
, tx
);
1400 idx
= req
->info
.comp_idx
;
1401 if (req
->status
== -1 && status
== SDMA_TXREQ_S_OK
) {
1402 if (req
->seqcomp
== req
->info
.npkts
- 1) {
1404 user_sdma_free_request(req
, false);
1406 set_comp_state(pq
, cq
, idx
, COMPLETE
, 0);
1409 if (status
!= SDMA_TXREQ_S_OK
)
1410 req
->status
= status
;
1411 if (req
->seqcomp
== (READ_ONCE(req
->seqsubmitted
) - 1) &&
1412 (READ_ONCE(req
->done
) ||
1413 READ_ONCE(req
->has_error
))) {
1414 user_sdma_free_request(req
, false);
1416 set_comp_state(pq
, cq
, idx
, ERROR
, req
->status
);
1421 static inline void pq_update(struct hfi1_user_sdma_pkt_q
*pq
)
1423 if (atomic_dec_and_test(&pq
->n_reqs
)) {
1424 xchg(&pq
->state
, SDMA_PKT_Q_INACTIVE
);
1429 static void user_sdma_free_request(struct user_sdma_request
*req
, bool unpin
)
1433 if (!list_empty(&req
->txps
)) {
1434 struct sdma_txreq
*t
, *p
;
1436 list_for_each_entry_safe(t
, p
, &req
->txps
, list
) {
1437 struct user_sdma_txreq
*tx
=
1438 container_of(t
, struct user_sdma_txreq
, txreq
);
1439 list_del_init(&t
->list
);
1440 sdma_txclean(req
->pq
->dd
, t
);
1441 kmem_cache_free(req
->pq
->txreq_cache
, tx
);
1445 for (i
= 0; i
< req
->data_iovs
; i
++) {
1446 struct sdma_mmu_node
*node
= req
->iovs
[i
].node
;
1452 hfi1_mmu_rb_remove(req
->pq
->handler
,
1455 atomic_dec(&node
->refcount
);
1459 clear_bit(req
->info
.comp_idx
, req
->pq
->req_in_use
);
1462 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q
*pq
,
1463 struct hfi1_user_sdma_comp_q
*cq
,
1464 u16 idx
, enum hfi1_sdma_comp_state state
,
1468 cq
->comps
[idx
].errcode
= -ret
;
1469 smp_wmb(); /* make sure errcode is visible first */
1470 cq
->comps
[idx
].status
= state
;
1471 trace_hfi1_sdma_user_completion(pq
->dd
, pq
->ctxt
, pq
->subctxt
,
1475 static bool sdma_rb_filter(struct mmu_rb_node
*node
, unsigned long addr
,
1478 return (bool)(node
->addr
== addr
);
1481 static int sdma_rb_insert(void *arg
, struct mmu_rb_node
*mnode
)
1483 struct sdma_mmu_node
*node
=
1484 container_of(mnode
, struct sdma_mmu_node
, rb
);
1486 atomic_inc(&node
->refcount
);
1491 * Return 1 to remove the node from the rb tree and call the remove op.
1493 * Called with the rb tree lock held.
1495 static int sdma_rb_evict(void *arg
, struct mmu_rb_node
*mnode
,
1496 void *evict_arg
, bool *stop
)
1498 struct sdma_mmu_node
*node
=
1499 container_of(mnode
, struct sdma_mmu_node
, rb
);
1500 struct evict_data
*evict_data
= evict_arg
;
1502 /* is this node still being used? */
1503 if (atomic_read(&node
->refcount
))
1504 return 0; /* keep this node */
1506 /* this node will be evicted, add its pages to our count */
1507 evict_data
->cleared
+= node
->npages
;
1509 /* have enough pages been cleared? */
1510 if (evict_data
->cleared
>= evict_data
->target
)
1513 return 1; /* remove this node */
1516 static void sdma_rb_remove(void *arg
, struct mmu_rb_node
*mnode
)
1518 struct sdma_mmu_node
*node
=
1519 container_of(mnode
, struct sdma_mmu_node
, rb
);
1521 unpin_sdma_pages(node
);
1525 static int sdma_rb_invalidate(void *arg
, struct mmu_rb_node
*mnode
)
1527 struct sdma_mmu_node
*node
=
1528 container_of(mnode
, struct sdma_mmu_node
, rb
);
1530 if (!atomic_read(&node
->refcount
))