2 * Copyright(c) 2020 Cornelis Networks, Inc.
3 * Copyright(c) 2015-2018 Intel Corporation.
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * - Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * - Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * - Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <linux/string.h>
52 #include "user_exp_rcv.h"
55 static void unlock_exp_tids(struct hfi1_ctxtdata
*uctxt
,
56 struct exp_tid_set
*set
,
57 struct hfi1_filedata
*fd
);
58 static u32
find_phys_blocks(struct tid_user_buf
*tidbuf
, unsigned int npages
);
59 static int set_rcvarray_entry(struct hfi1_filedata
*fd
,
60 struct tid_user_buf
*tbuf
,
61 u32 rcventry
, struct tid_group
*grp
,
62 u16 pageidx
, unsigned int npages
);
63 static void cacheless_tid_rb_remove(struct hfi1_filedata
*fdata
,
64 struct tid_rb_node
*tnode
);
65 static bool tid_rb_invalidate(struct mmu_interval_notifier
*mni
,
66 const struct mmu_notifier_range
*range
,
67 unsigned long cur_seq
);
68 static int program_rcvarray(struct hfi1_filedata
*fd
, struct tid_user_buf
*,
69 struct tid_group
*grp
,
70 unsigned int start
, u16 count
,
71 u32
*tidlist
, unsigned int *tididx
,
72 unsigned int *pmapped
);
73 static int unprogram_rcvarray(struct hfi1_filedata
*fd
, u32 tidinfo
,
74 struct tid_group
**grp
);
75 static void clear_tid_node(struct hfi1_filedata
*fd
, struct tid_rb_node
*node
);
77 static const struct mmu_interval_notifier_ops tid_mn_ops
= {
78 .invalidate
= tid_rb_invalidate
,
82 * Initialize context and file private data needed for Expected
83 * receive caching. This needs to be done after the context has
84 * been configured with the eager/expected RcvEntry counts.
86 int hfi1_user_exp_rcv_init(struct hfi1_filedata
*fd
,
87 struct hfi1_ctxtdata
*uctxt
)
91 fd
->entry_to_rb
= kcalloc(uctxt
->expected_count
,
92 sizeof(struct rb_node
*),
97 if (!HFI1_CAP_UGET_MASK(uctxt
->flags
, TID_UNMAP
)) {
98 fd
->invalid_tid_idx
= 0;
99 fd
->invalid_tids
= kcalloc(uctxt
->expected_count
,
100 sizeof(*fd
->invalid_tids
),
102 if (!fd
->invalid_tids
) {
103 kfree(fd
->entry_to_rb
);
104 fd
->entry_to_rb
= NULL
;
111 * PSM does not have a good way to separate, count, and
112 * effectively enforce a limit on RcvArray entries used by
113 * subctxts (when context sharing is used) when TID caching
114 * is enabled. To help with that, we calculate a per-process
115 * RcvArray entry share and enforce that.
116 * If TID caching is not in use, PSM deals with usage on its
117 * own. In that case, we allow any subctxt to take all of the
120 * Make sure that we set the tid counts only after successful
123 spin_lock(&fd
->tid_lock
);
124 if (uctxt
->subctxt_cnt
&& fd
->use_mn
) {
127 fd
->tid_limit
= uctxt
->expected_count
/ uctxt
->subctxt_cnt
;
128 remainder
= uctxt
->expected_count
% uctxt
->subctxt_cnt
;
129 if (remainder
&& fd
->subctxt
< remainder
)
132 fd
->tid_limit
= uctxt
->expected_count
;
134 spin_unlock(&fd
->tid_lock
);
139 void hfi1_user_exp_rcv_free(struct hfi1_filedata
*fd
)
141 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
143 mutex_lock(&uctxt
->exp_mutex
);
144 if (!EXP_TID_SET_EMPTY(uctxt
->tid_full_list
))
145 unlock_exp_tids(uctxt
, &uctxt
->tid_full_list
, fd
);
146 if (!EXP_TID_SET_EMPTY(uctxt
->tid_used_list
))
147 unlock_exp_tids(uctxt
, &uctxt
->tid_used_list
, fd
);
148 mutex_unlock(&uctxt
->exp_mutex
);
150 kfree(fd
->invalid_tids
);
151 fd
->invalid_tids
= NULL
;
153 kfree(fd
->entry_to_rb
);
154 fd
->entry_to_rb
= NULL
;
158 * Release pinned receive buffer pages.
160 * @mapped - true if the pages have been DMA mapped. false otherwise.
161 * @idx - Index of the first page to unpin.
162 * @npages - No of pages to unpin.
164 * If the pages have been DMA mapped (indicated by mapped parameter), their
165 * info will be passed via a struct tid_rb_node. If they haven't been mapped,
166 * their info will be passed via a struct tid_user_buf.
168 static void unpin_rcv_pages(struct hfi1_filedata
*fd
,
169 struct tid_user_buf
*tidbuf
,
170 struct tid_rb_node
*node
,
176 struct hfi1_devdata
*dd
= fd
->uctxt
->dd
;
177 struct mm_struct
*mm
;
180 pci_unmap_single(dd
->pcidev
, node
->dma_addr
,
181 node
->npages
* PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
182 pages
= &node
->pages
[idx
];
183 mm
= mm_from_tid_node(node
);
185 pages
= &tidbuf
->pages
[idx
];
188 hfi1_release_user_pages(mm
, pages
, npages
, mapped
);
189 fd
->tid_n_pinned
-= npages
;
193 * Pin receive buffer pages.
195 static int pin_rcv_pages(struct hfi1_filedata
*fd
, struct tid_user_buf
*tidbuf
)
199 unsigned long vaddr
= tidbuf
->vaddr
;
200 struct page
**pages
= NULL
;
201 struct hfi1_devdata
*dd
= fd
->uctxt
->dd
;
203 /* Get the number of pages the user buffer spans */
204 npages
= num_user_pages(vaddr
, tidbuf
->length
);
208 if (npages
> fd
->uctxt
->expected_count
) {
209 dd_dev_err(dd
, "Expected buffer too big\n");
213 /* Allocate the array of struct page pointers needed for pinning */
214 pages
= kcalloc(npages
, sizeof(*pages
), GFP_KERNEL
);
219 * Pin all the pages of the user buffer. If we can't pin all the
220 * pages, accept the amount pinned so far and program only that.
221 * User space knows how to deal with partially programmed buffers.
223 if (!hfi1_can_pin_pages(dd
, current
->mm
, fd
->tid_n_pinned
, npages
)) {
228 pinned
= hfi1_acquire_user_pages(current
->mm
, vaddr
, npages
, true, pages
);
233 tidbuf
->pages
= pages
;
234 tidbuf
->npages
= npages
;
235 fd
->tid_n_pinned
+= pinned
;
240 * RcvArray entry allocation for Expected Receives is done by the
241 * following algorithm:
243 * The context keeps 3 lists of groups of RcvArray entries:
244 * 1. List of empty groups - tid_group_list
245 * This list is created during user context creation and
246 * contains elements which describe sets (of 8) of empty
248 * 2. List of partially used groups - tid_used_list
249 * This list contains sets of RcvArray entries which are
250 * not completely used up. Another mapping request could
251 * use some of all of the remaining entries.
252 * 3. List of full groups - tid_full_list
253 * This is the list where sets that are completely used
256 * An attempt to optimize the usage of RcvArray entries is
257 * made by finding all sets of physically contiguous pages in a
259 * These physically contiguous sets are further split into
260 * sizes supported by the receive engine of the HFI. The
261 * resulting sets of pages are stored in struct tid_pageset,
262 * which describes the sets as:
263 * * .count - number of pages in this set
264 * * .idx - starting index into struct page ** array
267 * From this point on, the algorithm deals with the page sets
268 * described above. The number of pagesets is divided by the
269 * RcvArray group size to produce the number of full groups
272 * Groups from the 3 lists are manipulated using the following
274 * 1. For each set of 8 pagesets, a complete group from
275 * tid_group_list is taken, programmed, and moved to
276 * the tid_full_list list.
277 * 2. For all remaining pagesets:
278 * 2.1 If the tid_used_list is empty and the tid_group_list
279 * is empty, stop processing pageset and return only
280 * what has been programmed up to this point.
281 * 2.2 If the tid_used_list is empty and the tid_group_list
282 * is not empty, move a group from tid_group_list to
284 * 2.3 For each group is tid_used_group, program as much as
285 * can fit into the group. If the group becomes fully
286 * used, move it to tid_full_list.
288 int hfi1_user_exp_rcv_setup(struct hfi1_filedata
*fd
,
289 struct hfi1_tid_info
*tinfo
)
291 int ret
= 0, need_group
= 0, pinned
;
292 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
293 struct hfi1_devdata
*dd
= uctxt
->dd
;
294 unsigned int ngroups
, pageidx
= 0, pageset_count
,
295 tididx
= 0, mapped
, mapped_pages
= 0;
297 struct tid_user_buf
*tidbuf
;
299 if (!PAGE_ALIGNED(tinfo
->vaddr
))
302 tidbuf
= kzalloc(sizeof(*tidbuf
), GFP_KERNEL
);
306 tidbuf
->vaddr
= tinfo
->vaddr
;
307 tidbuf
->length
= tinfo
->length
;
308 tidbuf
->psets
= kcalloc(uctxt
->expected_count
, sizeof(*tidbuf
->psets
),
310 if (!tidbuf
->psets
) {
315 pinned
= pin_rcv_pages(fd
, tidbuf
);
317 kfree(tidbuf
->psets
);
322 /* Find sets of physically contiguous pages */
323 tidbuf
->n_psets
= find_phys_blocks(tidbuf
, pinned
);
326 * We don't need to access this under a lock since tid_used is per
327 * process and the same process cannot be in hfi1_user_exp_rcv_clear()
328 * and hfi1_user_exp_rcv_setup() at the same time.
330 spin_lock(&fd
->tid_lock
);
331 if (fd
->tid_used
+ tidbuf
->n_psets
> fd
->tid_limit
)
332 pageset_count
= fd
->tid_limit
- fd
->tid_used
;
334 pageset_count
= tidbuf
->n_psets
;
335 spin_unlock(&fd
->tid_lock
);
340 ngroups
= pageset_count
/ dd
->rcv_entries
.group_size
;
341 tidlist
= kcalloc(pageset_count
, sizeof(*tidlist
), GFP_KERNEL
);
350 * From this point on, we are going to be using shared (between master
351 * and subcontexts) context resources. We need to take the lock.
353 mutex_lock(&uctxt
->exp_mutex
);
355 * The first step is to program the RcvArray entries which are complete
358 while (ngroups
&& uctxt
->tid_group_list
.count
) {
359 struct tid_group
*grp
=
360 tid_group_pop(&uctxt
->tid_group_list
);
362 ret
= program_rcvarray(fd
, tidbuf
, grp
,
363 pageidx
, dd
->rcv_entries
.group_size
,
364 tidlist
, &tididx
, &mapped
);
366 * If there was a failure to program the RcvArray
367 * entries for the entire group, reset the grp fields
368 * and add the grp back to the free group list.
371 tid_group_add_tail(grp
, &uctxt
->tid_group_list
);
373 "Failed to program RcvArray group %d", ret
);
377 tid_group_add_tail(grp
, &uctxt
->tid_full_list
);
380 mapped_pages
+= mapped
;
383 while (pageidx
< pageset_count
) {
384 struct tid_group
*grp
, *ptr
;
386 * If we don't have any partially used tid groups, check
387 * if we have empty groups. If so, take one from there and
388 * put in the partially used list.
390 if (!uctxt
->tid_used_list
.count
|| need_group
) {
391 if (!uctxt
->tid_group_list
.count
)
394 grp
= tid_group_pop(&uctxt
->tid_group_list
);
395 tid_group_add_tail(grp
, &uctxt
->tid_used_list
);
399 * There is an optimization opportunity here - instead of
400 * fitting as many page sets as we can, check for a group
401 * later on in the list that could fit all of them.
403 list_for_each_entry_safe(grp
, ptr
, &uctxt
->tid_used_list
.list
,
405 unsigned use
= min_t(unsigned, pageset_count
- pageidx
,
406 grp
->size
- grp
->used
);
408 ret
= program_rcvarray(fd
, tidbuf
, grp
,
409 pageidx
, use
, tidlist
,
413 "Failed to program RcvArray entries %d",
416 } else if (ret
> 0) {
417 if (grp
->used
== grp
->size
)
419 &uctxt
->tid_used_list
,
420 &uctxt
->tid_full_list
);
422 mapped_pages
+= mapped
;
424 /* Check if we are done so we break out early */
425 if (pageidx
>= pageset_count
)
427 } else if (WARN_ON(ret
== 0)) {
429 * If ret is 0, we did not program any entries
430 * into this group, which can only happen if
431 * we've screwed up the accounting somewhere.
432 * Warn and try to continue.
439 mutex_unlock(&uctxt
->exp_mutex
);
441 hfi1_cdbg(TID
, "total mapped: tidpairs:%u pages:%u (%d)", tididx
,
444 spin_lock(&fd
->tid_lock
);
445 fd
->tid_used
+= tididx
;
446 spin_unlock(&fd
->tid_lock
);
447 tinfo
->tidcnt
= tididx
;
448 tinfo
->length
= mapped_pages
* PAGE_SIZE
;
450 if (copy_to_user(u64_to_user_ptr(tinfo
->tidlist
),
451 tidlist
, sizeof(tidlist
[0]) * tididx
)) {
453 * On failure to copy to the user level, we need to undo
454 * everything done so far so we don't leak resources.
456 tinfo
->tidlist
= (unsigned long)&tidlist
;
457 hfi1_user_exp_rcv_clear(fd
, tinfo
);
465 * If not everything was mapped (due to insufficient RcvArray entries,
466 * for example), unpin all unmapped pages so we can pin them nex time.
468 if (mapped_pages
!= pinned
)
469 unpin_rcv_pages(fd
, tidbuf
, NULL
, mapped_pages
,
470 (pinned
- mapped_pages
), false);
472 kfree(tidbuf
->psets
);
474 kfree(tidbuf
->pages
);
476 return ret
> 0 ? 0 : ret
;
479 int hfi1_user_exp_rcv_clear(struct hfi1_filedata
*fd
,
480 struct hfi1_tid_info
*tinfo
)
483 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
487 if (unlikely(tinfo
->tidcnt
> fd
->tid_used
))
490 tidinfo
= memdup_user(u64_to_user_ptr(tinfo
->tidlist
),
491 sizeof(tidinfo
[0]) * tinfo
->tidcnt
);
493 return PTR_ERR(tidinfo
);
495 mutex_lock(&uctxt
->exp_mutex
);
496 for (tididx
= 0; tididx
< tinfo
->tidcnt
; tididx
++) {
497 ret
= unprogram_rcvarray(fd
, tidinfo
[tididx
], NULL
);
499 hfi1_cdbg(TID
, "Failed to unprogram rcv array %d",
504 spin_lock(&fd
->tid_lock
);
505 fd
->tid_used
-= tididx
;
506 spin_unlock(&fd
->tid_lock
);
507 tinfo
->tidcnt
= tididx
;
508 mutex_unlock(&uctxt
->exp_mutex
);
514 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata
*fd
,
515 struct hfi1_tid_info
*tinfo
)
517 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
518 unsigned long *ev
= uctxt
->dd
->events
+
519 (uctxt_offset(uctxt
) + fd
->subctxt
);
524 * copy_to_user() can sleep, which will leave the invalid_lock
525 * locked and cause the MMU notifier to be blocked on the lock
527 * Copy the data to a local buffer so we can release the lock.
529 array
= kcalloc(uctxt
->expected_count
, sizeof(*array
), GFP_KERNEL
);
533 spin_lock(&fd
->invalid_lock
);
534 if (fd
->invalid_tid_idx
) {
535 memcpy(array
, fd
->invalid_tids
, sizeof(*array
) *
536 fd
->invalid_tid_idx
);
537 memset(fd
->invalid_tids
, 0, sizeof(*fd
->invalid_tids
) *
538 fd
->invalid_tid_idx
);
539 tinfo
->tidcnt
= fd
->invalid_tid_idx
;
540 fd
->invalid_tid_idx
= 0;
542 * Reset the user flag while still holding the lock.
543 * Otherwise, PSM can miss events.
545 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT
, ev
);
549 spin_unlock(&fd
->invalid_lock
);
552 if (copy_to_user((void __user
*)tinfo
->tidlist
,
553 array
, sizeof(*array
) * tinfo
->tidcnt
))
561 static u32
find_phys_blocks(struct tid_user_buf
*tidbuf
, unsigned int npages
)
563 unsigned pagecount
, pageidx
, setcount
= 0, i
;
564 unsigned long pfn
, this_pfn
;
565 struct page
**pages
= tidbuf
->pages
;
566 struct tid_pageset
*list
= tidbuf
->psets
;
572 * Look for sets of physically contiguous pages in the user buffer.
573 * This will allow us to optimize Expected RcvArray entry usage by
574 * using the bigger supported sizes.
576 pfn
= page_to_pfn(pages
[0]);
577 for (pageidx
= 0, pagecount
= 1, i
= 1; i
<= npages
; i
++) {
578 this_pfn
= i
< npages
? page_to_pfn(pages
[i
]) : 0;
581 * If the pfn's are not sequential, pages are not physically
584 if (this_pfn
!= ++pfn
) {
586 * At this point we have to loop over the set of
587 * physically contiguous pages and break them down it
588 * sizes supported by the HW.
589 * There are two main constraints:
590 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
591 * If the total set size is bigger than that
592 * program only a MAX_EXPECTED_BUFFER chunk.
593 * 2. The buffer size has to be a power of two. If
594 * it is not, round down to the closes power of
595 * 2 and program that size.
598 int maxpages
= pagecount
;
599 u32 bufsize
= pagecount
* PAGE_SIZE
;
601 if (bufsize
> MAX_EXPECTED_BUFFER
)
603 MAX_EXPECTED_BUFFER
>>
605 else if (!is_power_of_2(bufsize
))
607 rounddown_pow_of_two(bufsize
) >>
610 list
[setcount
].idx
= pageidx
;
611 list
[setcount
].count
= maxpages
;
612 pagecount
-= maxpages
;
627 * program_rcvarray() - program an RcvArray group with receive buffers
628 * @fd: filedata pointer
629 * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
630 * virtual address, buffer length, page pointers, pagesets (array of
631 * struct tid_pageset holding information on physically contiguous
632 * chunks from the user buffer), and other fields.
633 * @grp: RcvArray group
634 * @start: starting index into sets array
635 * @count: number of struct tid_pageset's to program
636 * @tidlist: the array of u32 elements when the information about the
637 * programmed RcvArray entries is to be encoded.
638 * @tididx: starting offset into tidlist
639 * @pmapped: (output parameter) number of pages programmed into the RcvArray
642 * This function will program up to 'count' number of RcvArray entries from the
643 * group 'grp'. To make best use of write-combining writes, the function will
644 * perform writes to the unused RcvArray entries which will be ignored by the
645 * HW. Each RcvArray entry will be programmed with a physically contiguous
646 * buffer chunk from the user's virtual buffer.
649 * -EINVAL if the requested count is larger than the size of the group,
650 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
651 * number of RcvArray entries programmed.
653 static int program_rcvarray(struct hfi1_filedata
*fd
, struct tid_user_buf
*tbuf
,
654 struct tid_group
*grp
,
655 unsigned int start
, u16 count
,
656 u32
*tidlist
, unsigned int *tididx
,
657 unsigned int *pmapped
)
659 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
660 struct hfi1_devdata
*dd
= uctxt
->dd
;
662 u32 tidinfo
= 0, rcventry
, useidx
= 0;
665 /* Count should never be larger than the group size */
666 if (count
> grp
->size
)
669 /* Find the first unused entry in the group */
670 for (idx
= 0; idx
< grp
->size
; idx
++) {
671 if (!(grp
->map
& (1 << idx
))) {
675 rcv_array_wc_fill(dd
, grp
->base
+ idx
);
679 while (idx
< count
) {
680 u16 npages
, pageidx
, setidx
= start
+ idx
;
684 * If this entry in the group is used, move to the next one.
685 * If we go past the end of the group, exit the loop.
687 if (useidx
>= grp
->size
) {
689 } else if (grp
->map
& (1 << useidx
)) {
690 rcv_array_wc_fill(dd
, grp
->base
+ useidx
);
695 rcventry
= grp
->base
+ useidx
;
696 npages
= tbuf
->psets
[setidx
].count
;
697 pageidx
= tbuf
->psets
[setidx
].idx
;
699 ret
= set_rcvarray_entry(fd
, tbuf
,
700 rcventry
, grp
, pageidx
,
706 tidinfo
= rcventry2tidinfo(rcventry
- uctxt
->expected_base
) |
707 EXP_TID_SET(LEN
, npages
);
708 tidlist
[(*tididx
)++] = tidinfo
;
710 grp
->map
|= 1 << useidx
++;
714 /* Fill the rest of the group with "blank" writes */
715 for (; useidx
< grp
->size
; useidx
++)
716 rcv_array_wc_fill(dd
, grp
->base
+ useidx
);
721 static int set_rcvarray_entry(struct hfi1_filedata
*fd
,
722 struct tid_user_buf
*tbuf
,
723 u32 rcventry
, struct tid_group
*grp
,
724 u16 pageidx
, unsigned int npages
)
727 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
728 struct tid_rb_node
*node
;
729 struct hfi1_devdata
*dd
= uctxt
->dd
;
731 struct page
**pages
= tbuf
->pages
+ pageidx
;
734 * Allocate the node first so we can handle a potential
735 * failure before we've programmed anything.
737 node
= kzalloc(sizeof(*node
) + (sizeof(struct page
*) * npages
),
742 phys
= pci_map_single(dd
->pcidev
,
743 __va(page_to_phys(pages
[0])),
744 npages
* PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
745 if (dma_mapping_error(&dd
->pcidev
->dev
, phys
)) {
746 dd_dev_err(dd
, "Failed to DMA map Exp Rcv pages 0x%llx\n",
753 node
->phys
= page_to_phys(pages
[0]);
754 node
->npages
= npages
;
755 node
->rcventry
= rcventry
;
756 node
->dma_addr
= phys
;
759 memcpy(node
->pages
, pages
, sizeof(struct page
*) * npages
);
762 ret
= mmu_interval_notifier_insert(
763 &node
->notifier
, current
->mm
,
764 tbuf
->vaddr
+ (pageidx
* PAGE_SIZE
), npages
* PAGE_SIZE
,
769 * FIXME: This is in the wrong order, the notifier should be
770 * established before the pages are pinned by pin_rcv_pages.
772 mmu_interval_read_begin(&node
->notifier
);
774 fd
->entry_to_rb
[node
->rcventry
- uctxt
->expected_base
] = node
;
776 hfi1_put_tid(dd
, rcventry
, PT_EXPECTED
, phys
, ilog2(npages
) + 1);
777 trace_hfi1_exp_tid_reg(uctxt
->ctxt
, fd
->subctxt
, rcventry
, npages
,
778 node
->notifier
.interval_tree
.start
, node
->phys
,
783 hfi1_cdbg(TID
, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
784 node
->rcventry
, node
->notifier
.interval_tree
.start
,
786 pci_unmap_single(dd
->pcidev
, phys
, npages
* PAGE_SIZE
,
792 static int unprogram_rcvarray(struct hfi1_filedata
*fd
, u32 tidinfo
,
793 struct tid_group
**grp
)
795 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
796 struct hfi1_devdata
*dd
= uctxt
->dd
;
797 struct tid_rb_node
*node
;
798 u8 tidctrl
= EXP_TID_GET(tidinfo
, CTRL
);
799 u32 tididx
= EXP_TID_GET(tidinfo
, IDX
) << 1, rcventry
;
801 if (tididx
>= uctxt
->expected_count
) {
802 dd_dev_err(dd
, "Invalid RcvArray entry (%u) index for ctxt %u\n",
803 tididx
, uctxt
->ctxt
);
810 rcventry
= tididx
+ (tidctrl
- 1);
812 node
= fd
->entry_to_rb
[rcventry
];
813 if (!node
|| node
->rcventry
!= (uctxt
->expected_base
+ rcventry
))
820 mmu_interval_notifier_remove(&node
->notifier
);
821 cacheless_tid_rb_remove(fd
, node
);
826 static void clear_tid_node(struct hfi1_filedata
*fd
, struct tid_rb_node
*node
)
828 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
829 struct hfi1_devdata
*dd
= uctxt
->dd
;
831 trace_hfi1_exp_tid_unreg(uctxt
->ctxt
, fd
->subctxt
, node
->rcventry
,
833 node
->notifier
.interval_tree
.start
, node
->phys
,
837 * Make sure device has seen the write before we unpin the
840 hfi1_put_tid(dd
, node
->rcventry
, PT_INVALID_FLUSH
, 0, 0);
842 unpin_rcv_pages(fd
, NULL
, node
, 0, node
->npages
, true);
845 node
->grp
->map
&= ~(1 << (node
->rcventry
- node
->grp
->base
));
847 if (node
->grp
->used
== node
->grp
->size
- 1)
848 tid_group_move(node
->grp
, &uctxt
->tid_full_list
,
849 &uctxt
->tid_used_list
);
850 else if (!node
->grp
->used
)
851 tid_group_move(node
->grp
, &uctxt
->tid_used_list
,
852 &uctxt
->tid_group_list
);
857 * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
858 * clearing nodes in the non-cached case.
860 static void unlock_exp_tids(struct hfi1_ctxtdata
*uctxt
,
861 struct exp_tid_set
*set
,
862 struct hfi1_filedata
*fd
)
864 struct tid_group
*grp
, *ptr
;
867 list_for_each_entry_safe(grp
, ptr
, &set
->list
, list
) {
868 list_del_init(&grp
->list
);
870 for (i
= 0; i
< grp
->size
; i
++) {
871 if (grp
->map
& (1 << i
)) {
872 u16 rcventry
= grp
->base
+ i
;
873 struct tid_rb_node
*node
;
875 node
= fd
->entry_to_rb
[rcventry
-
876 uctxt
->expected_base
];
877 if (!node
|| node
->rcventry
!= rcventry
)
881 mmu_interval_notifier_remove(
883 cacheless_tid_rb_remove(fd
, node
);
889 static bool tid_rb_invalidate(struct mmu_interval_notifier
*mni
,
890 const struct mmu_notifier_range
*range
,
891 unsigned long cur_seq
)
893 struct tid_rb_node
*node
=
894 container_of(mni
, struct tid_rb_node
, notifier
);
895 struct hfi1_filedata
*fdata
= node
->fdata
;
896 struct hfi1_ctxtdata
*uctxt
= fdata
->uctxt
;
901 trace_hfi1_exp_tid_inval(uctxt
->ctxt
, fdata
->subctxt
,
902 node
->notifier
.interval_tree
.start
,
903 node
->rcventry
, node
->npages
, node
->dma_addr
);
906 spin_lock(&fdata
->invalid_lock
);
907 if (fdata
->invalid_tid_idx
< uctxt
->expected_count
) {
908 fdata
->invalid_tids
[fdata
->invalid_tid_idx
] =
909 rcventry2tidinfo(node
->rcventry
- uctxt
->expected_base
);
910 fdata
->invalid_tids
[fdata
->invalid_tid_idx
] |=
911 EXP_TID_SET(LEN
, node
->npages
);
912 if (!fdata
->invalid_tid_idx
) {
916 * hfi1_set_uevent_bits() sets a user event flag
917 * for all processes. Because calling into the
918 * driver to process TID cache invalidations is
919 * expensive and TID cache invalidations are
920 * handled on a per-process basis, we can
921 * optimize this to set the flag only for the
922 * process in question.
924 ev
= uctxt
->dd
->events
+
925 (uctxt_offset(uctxt
) + fdata
->subctxt
);
926 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT
, ev
);
928 fdata
->invalid_tid_idx
++;
930 spin_unlock(&fdata
->invalid_lock
);
934 static void cacheless_tid_rb_remove(struct hfi1_filedata
*fdata
,
935 struct tid_rb_node
*tnode
)
937 u32 base
= fdata
->uctxt
->expected_base
;
939 fdata
->entry_to_rb
[tnode
->rcventry
- base
] = NULL
;
940 clear_tid_node(fdata
, tnode
);