2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include "user_exp_rcv.h"
54 struct list_head list
;
62 struct mmu_rb_node mmu
;
64 struct tid_group
*grp
;
69 struct page
*pages
[0];
77 #define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
79 #define num_user_pages(vaddr, len) \
80 (1 + (((((unsigned long)(vaddr) + \
81 (unsigned long)(len) - 1) & PAGE_MASK) - \
82 ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
84 static void unlock_exp_tids(struct hfi1_ctxtdata
*, struct exp_tid_set
*,
85 struct hfi1_filedata
*);
86 static u32
find_phys_blocks(struct page
**, unsigned, struct tid_pageset
*);
87 static int set_rcvarray_entry(struct file
*, unsigned long, u32
,
88 struct tid_group
*, struct page
**, unsigned);
89 static int tid_rb_insert(void *, struct mmu_rb_node
*);
90 static void cacheless_tid_rb_remove(struct hfi1_filedata
*fdata
,
91 struct tid_rb_node
*tnode
);
92 static void tid_rb_remove(void *, struct mmu_rb_node
*);
93 static int tid_rb_invalidate(void *, struct mmu_rb_node
*);
94 static int program_rcvarray(struct file
*, unsigned long, struct tid_group
*,
95 struct tid_pageset
*, unsigned, u16
, struct page
**,
96 u32
*, unsigned *, unsigned *);
97 static int unprogram_rcvarray(struct file
*, u32
, struct tid_group
**);
98 static void clear_tid_node(struct hfi1_filedata
*fd
, struct tid_rb_node
*node
);
100 static struct mmu_rb_ops tid_rb_ops
= {
101 .insert
= tid_rb_insert
,
102 .remove
= tid_rb_remove
,
103 .invalidate
= tid_rb_invalidate
106 static inline u32
rcventry2tidinfo(u32 rcventry
)
108 u32 pair
= rcventry
& ~0x1;
110 return EXP_TID_SET(IDX
, pair
>> 1) |
111 EXP_TID_SET(CTRL
, 1 << (rcventry
- pair
));
114 static inline void exp_tid_group_init(struct exp_tid_set
*set
)
116 INIT_LIST_HEAD(&set
->list
);
120 static inline void tid_group_remove(struct tid_group
*grp
,
121 struct exp_tid_set
*set
)
123 list_del_init(&grp
->list
);
127 static inline void tid_group_add_tail(struct tid_group
*grp
,
128 struct exp_tid_set
*set
)
130 list_add_tail(&grp
->list
, &set
->list
);
134 static inline struct tid_group
*tid_group_pop(struct exp_tid_set
*set
)
136 struct tid_group
*grp
=
137 list_first_entry(&set
->list
, struct tid_group
, list
);
138 list_del_init(&grp
->list
);
143 static inline void tid_group_move(struct tid_group
*group
,
144 struct exp_tid_set
*s1
,
145 struct exp_tid_set
*s2
)
147 tid_group_remove(group
, s1
);
148 tid_group_add_tail(group
, s2
);
152 * Initialize context and file private data needed for Expected
153 * receive caching. This needs to be done after the context has
154 * been configured with the eager/expected RcvEntry counts.
156 int hfi1_user_exp_rcv_init(struct file
*fp
)
158 struct hfi1_filedata
*fd
= fp
->private_data
;
159 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
160 struct hfi1_devdata
*dd
= uctxt
->dd
;
164 spin_lock_init(&fd
->tid_lock
);
165 spin_lock_init(&fd
->invalid_lock
);
167 if (!uctxt
->subctxt_cnt
|| !fd
->subctxt
) {
168 exp_tid_group_init(&uctxt
->tid_group_list
);
169 exp_tid_group_init(&uctxt
->tid_used_list
);
170 exp_tid_group_init(&uctxt
->tid_full_list
);
172 tidbase
= uctxt
->expected_base
;
173 for (i
= 0; i
< uctxt
->expected_count
/
174 dd
->rcv_entries
.group_size
; i
++) {
175 struct tid_group
*grp
;
177 grp
= kzalloc(sizeof(*grp
), GFP_KERNEL
);
180 * If we fail here, the groups already
181 * allocated will be freed by the close
187 grp
->size
= dd
->rcv_entries
.group_size
;
189 tid_group_add_tail(grp
, &uctxt
->tid_group_list
);
190 tidbase
+= dd
->rcv_entries
.group_size
;
194 fd
->entry_to_rb
= kcalloc(uctxt
->expected_count
,
195 sizeof(struct rb_node
*),
197 if (!fd
->entry_to_rb
)
200 if (!HFI1_CAP_UGET_MASK(uctxt
->flags
, TID_UNMAP
)) {
201 fd
->invalid_tid_idx
= 0;
202 fd
->invalid_tids
= kzalloc(uctxt
->expected_count
*
203 sizeof(u32
), GFP_KERNEL
);
204 if (!fd
->invalid_tids
) {
210 * Register MMU notifier callbacks. If the registration
211 * fails, continue without TID caching for this context.
213 ret
= hfi1_mmu_rb_register(fd
, fd
->mm
, &tid_rb_ops
,
218 "Failed MMU notifier registration %d\n",
225 * PSM does not have a good way to separate, count, and
226 * effectively enforce a limit on RcvArray entries used by
227 * subctxts (when context sharing is used) when TID caching
228 * is enabled. To help with that, we calculate a per-process
229 * RcvArray entry share and enforce that.
230 * If TID caching is not in use, PSM deals with usage on its
231 * own. In that case, we allow any subctxt to take all of the
234 * Make sure that we set the tid counts only after successful
237 spin_lock(&fd
->tid_lock
);
238 if (uctxt
->subctxt_cnt
&& fd
->handler
) {
241 fd
->tid_limit
= uctxt
->expected_count
/ uctxt
->subctxt_cnt
;
242 remainder
= uctxt
->expected_count
% uctxt
->subctxt_cnt
;
243 if (remainder
&& fd
->subctxt
< remainder
)
246 fd
->tid_limit
= uctxt
->expected_count
;
248 spin_unlock(&fd
->tid_lock
);
253 int hfi1_user_exp_rcv_free(struct hfi1_filedata
*fd
)
255 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
256 struct tid_group
*grp
, *gptr
;
258 if (!test_bit(HFI1_CTXT_SETUP_DONE
, &uctxt
->event_flags
))
261 * The notifier would have been removed when the process'es mm
265 hfi1_mmu_rb_unregister(fd
->handler
);
267 kfree(fd
->invalid_tids
);
270 if (!EXP_TID_SET_EMPTY(uctxt
->tid_full_list
))
271 unlock_exp_tids(uctxt
, &uctxt
->tid_full_list
, fd
);
272 if (!EXP_TID_SET_EMPTY(uctxt
->tid_used_list
))
273 unlock_exp_tids(uctxt
, &uctxt
->tid_used_list
, fd
);
274 list_for_each_entry_safe(grp
, gptr
, &uctxt
->tid_group_list
.list
,
276 list_del_init(&grp
->list
);
279 hfi1_clear_tids(uctxt
);
282 kfree(fd
->entry_to_rb
);
287 * Write an "empty" RcvArray entry.
288 * This function exists so the TID registaration code can use it
289 * to write to unused/unneeded entries and still take advantage
290 * of the WC performance improvements. The HFI will ignore this
291 * write to the RcvArray entry.
293 static inline void rcv_array_wc_fill(struct hfi1_devdata
*dd
, u32 index
)
296 * Doing the WC fill writes only makes sense if the device is
297 * present and the RcvArray has been mapped as WC memory.
299 if ((dd
->flags
& HFI1_PRESENT
) && dd
->rcvarray_wc
)
300 writeq(0, dd
->rcvarray_wc
+ (index
* 8));
304 * RcvArray entry allocation for Expected Receives is done by the
305 * following algorithm:
307 * The context keeps 3 lists of groups of RcvArray entries:
308 * 1. List of empty groups - tid_group_list
309 * This list is created during user context creation and
310 * contains elements which describe sets (of 8) of empty
312 * 2. List of partially used groups - tid_used_list
313 * This list contains sets of RcvArray entries which are
314 * not completely used up. Another mapping request could
315 * use some of all of the remaining entries.
316 * 3. List of full groups - tid_full_list
317 * This is the list where sets that are completely used
320 * An attempt to optimize the usage of RcvArray entries is
321 * made by finding all sets of physically contiguous pages in a
323 * These physically contiguous sets are further split into
324 * sizes supported by the receive engine of the HFI. The
325 * resulting sets of pages are stored in struct tid_pageset,
326 * which describes the sets as:
327 * * .count - number of pages in this set
328 * * .idx - starting index into struct page ** array
331 * From this point on, the algorithm deals with the page sets
332 * described above. The number of pagesets is divided by the
333 * RcvArray group size to produce the number of full groups
336 * Groups from the 3 lists are manipulated using the following
338 * 1. For each set of 8 pagesets, a complete group from
339 * tid_group_list is taken, programmed, and moved to
340 * the tid_full_list list.
341 * 2. For all remaining pagesets:
342 * 2.1 If the tid_used_list is empty and the tid_group_list
343 * is empty, stop processing pageset and return only
344 * what has been programmed up to this point.
345 * 2.2 If the tid_used_list is empty and the tid_group_list
346 * is not empty, move a group from tid_group_list to
348 * 2.3 For each group is tid_used_group, program as much as
349 * can fit into the group. If the group becomes fully
350 * used, move it to tid_full_list.
352 int hfi1_user_exp_rcv_setup(struct file
*fp
, struct hfi1_tid_info
*tinfo
)
354 int ret
= 0, need_group
= 0, pinned
;
355 struct hfi1_filedata
*fd
= fp
->private_data
;
356 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
357 struct hfi1_devdata
*dd
= uctxt
->dd
;
358 unsigned npages
, ngroups
, pageidx
= 0, pageset_count
, npagesets
,
359 tididx
= 0, mapped
, mapped_pages
= 0;
360 unsigned long vaddr
= tinfo
->vaddr
;
361 struct page
**pages
= NULL
;
363 struct tid_pageset
*pagesets
= NULL
;
365 /* Get the number of pages the user buffer spans */
366 npages
= num_user_pages(vaddr
, tinfo
->length
);
370 if (npages
> uctxt
->expected_count
) {
371 dd_dev_err(dd
, "Expected buffer too big\n");
375 /* Verify that access is OK for the user buffer */
376 if (!access_ok(VERIFY_WRITE
, (void __user
*)vaddr
,
377 npages
* PAGE_SIZE
)) {
378 dd_dev_err(dd
, "Fail vaddr %p, %u pages, !access_ok\n",
379 (void *)vaddr
, npages
);
383 pagesets
= kcalloc(uctxt
->expected_count
, sizeof(*pagesets
),
388 /* Allocate the array of struct page pointers needed for pinning */
389 pages
= kcalloc(npages
, sizeof(*pages
), GFP_KERNEL
);
396 * Pin all the pages of the user buffer. If we can't pin all the
397 * pages, accept the amount pinned so far and program only that.
398 * User space knows how to deal with partially programmed buffers.
400 if (!hfi1_can_pin_pages(dd
, fd
->mm
, fd
->tid_n_pinned
, npages
)) {
405 pinned
= hfi1_acquire_user_pages(fd
->mm
, vaddr
, npages
, true, pages
);
410 fd
->tid_n_pinned
+= npages
;
412 /* Find sets of physically contiguous pages */
413 npagesets
= find_phys_blocks(pages
, pinned
, pagesets
);
416 * We don't need to access this under a lock since tid_used is per
417 * process and the same process cannot be in hfi1_user_exp_rcv_clear()
418 * and hfi1_user_exp_rcv_setup() at the same time.
420 spin_lock(&fd
->tid_lock
);
421 if (fd
->tid_used
+ npagesets
> fd
->tid_limit
)
422 pageset_count
= fd
->tid_limit
- fd
->tid_used
;
424 pageset_count
= npagesets
;
425 spin_unlock(&fd
->tid_lock
);
430 ngroups
= pageset_count
/ dd
->rcv_entries
.group_size
;
431 tidlist
= kcalloc(pageset_count
, sizeof(*tidlist
), GFP_KERNEL
);
440 * From this point on, we are going to be using shared (between master
441 * and subcontexts) context resources. We need to take the lock.
443 mutex_lock(&uctxt
->exp_lock
);
445 * The first step is to program the RcvArray entries which are complete
448 while (ngroups
&& uctxt
->tid_group_list
.count
) {
449 struct tid_group
*grp
=
450 tid_group_pop(&uctxt
->tid_group_list
);
452 ret
= program_rcvarray(fp
, vaddr
, grp
, pagesets
,
453 pageidx
, dd
->rcv_entries
.group_size
,
454 pages
, tidlist
, &tididx
, &mapped
);
456 * If there was a failure to program the RcvArray
457 * entries for the entire group, reset the grp fields
458 * and add the grp back to the free group list.
461 tid_group_add_tail(grp
, &uctxt
->tid_group_list
);
463 "Failed to program RcvArray group %d", ret
);
467 tid_group_add_tail(grp
, &uctxt
->tid_full_list
);
470 mapped_pages
+= mapped
;
473 while (pageidx
< pageset_count
) {
474 struct tid_group
*grp
, *ptr
;
476 * If we don't have any partially used tid groups, check
477 * if we have empty groups. If so, take one from there and
478 * put in the partially used list.
480 if (!uctxt
->tid_used_list
.count
|| need_group
) {
481 if (!uctxt
->tid_group_list
.count
)
484 grp
= tid_group_pop(&uctxt
->tid_group_list
);
485 tid_group_add_tail(grp
, &uctxt
->tid_used_list
);
489 * There is an optimization opportunity here - instead of
490 * fitting as many page sets as we can, check for a group
491 * later on in the list that could fit all of them.
493 list_for_each_entry_safe(grp
, ptr
, &uctxt
->tid_used_list
.list
,
495 unsigned use
= min_t(unsigned, pageset_count
- pageidx
,
496 grp
->size
- grp
->used
);
498 ret
= program_rcvarray(fp
, vaddr
, grp
, pagesets
,
499 pageidx
, use
, pages
, tidlist
,
503 "Failed to program RcvArray entries %d",
507 } else if (ret
> 0) {
508 if (grp
->used
== grp
->size
)
510 &uctxt
->tid_used_list
,
511 &uctxt
->tid_full_list
);
513 mapped_pages
+= mapped
;
515 /* Check if we are done so we break out early */
516 if (pageidx
>= pageset_count
)
518 } else if (WARN_ON(ret
== 0)) {
520 * If ret is 0, we did not program any entries
521 * into this group, which can only happen if
522 * we've screwed up the accounting somewhere.
523 * Warn and try to continue.
530 mutex_unlock(&uctxt
->exp_lock
);
532 hfi1_cdbg(TID
, "total mapped: tidpairs:%u pages:%u (%d)", tididx
,
535 spin_lock(&fd
->tid_lock
);
536 fd
->tid_used
+= tididx
;
537 spin_unlock(&fd
->tid_lock
);
538 tinfo
->tidcnt
= tididx
;
539 tinfo
->length
= mapped_pages
* PAGE_SIZE
;
541 if (copy_to_user((void __user
*)(unsigned long)tinfo
->tidlist
,
542 tidlist
, sizeof(tidlist
[0]) * tididx
)) {
544 * On failure to copy to the user level, we need to undo
545 * everything done so far so we don't leak resources.
547 tinfo
->tidlist
= (unsigned long)&tidlist
;
548 hfi1_user_exp_rcv_clear(fp
, tinfo
);
556 * If not everything was mapped (due to insufficient RcvArray entries,
557 * for example), unpin all unmapped pages so we can pin them nex time.
559 if (mapped_pages
!= pinned
) {
560 hfi1_release_user_pages(fd
->mm
, &pages
[mapped_pages
],
561 pinned
- mapped_pages
,
563 fd
->tid_n_pinned
-= pinned
- mapped_pages
;
569 return ret
> 0 ? 0 : ret
;
572 int hfi1_user_exp_rcv_clear(struct file
*fp
, struct hfi1_tid_info
*tinfo
)
575 struct hfi1_filedata
*fd
= fp
->private_data
;
576 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
580 tidinfo
= kcalloc(tinfo
->tidcnt
, sizeof(*tidinfo
), GFP_KERNEL
);
584 if (copy_from_user(tidinfo
, (void __user
*)(unsigned long)
585 tinfo
->tidlist
, sizeof(tidinfo
[0]) *
591 mutex_lock(&uctxt
->exp_lock
);
592 for (tididx
= 0; tididx
< tinfo
->tidcnt
; tididx
++) {
593 ret
= unprogram_rcvarray(fp
, tidinfo
[tididx
], NULL
);
595 hfi1_cdbg(TID
, "Failed to unprogram rcv array %d",
600 spin_lock(&fd
->tid_lock
);
601 fd
->tid_used
-= tididx
;
602 spin_unlock(&fd
->tid_lock
);
603 tinfo
->tidcnt
= tididx
;
604 mutex_unlock(&uctxt
->exp_lock
);
610 int hfi1_user_exp_rcv_invalid(struct file
*fp
, struct hfi1_tid_info
*tinfo
)
612 struct hfi1_filedata
*fd
= fp
->private_data
;
613 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
614 unsigned long *ev
= uctxt
->dd
->events
+
615 (((uctxt
->ctxt
- uctxt
->dd
->first_user_ctxt
) *
616 HFI1_MAX_SHARED_CTXTS
) + fd
->subctxt
);
620 if (!fd
->invalid_tids
)
624 * copy_to_user() can sleep, which will leave the invalid_lock
625 * locked and cause the MMU notifier to be blocked on the lock
627 * Copy the data to a local buffer so we can release the lock.
629 array
= kcalloc(uctxt
->expected_count
, sizeof(*array
), GFP_KERNEL
);
633 spin_lock(&fd
->invalid_lock
);
634 if (fd
->invalid_tid_idx
) {
635 memcpy(array
, fd
->invalid_tids
, sizeof(*array
) *
636 fd
->invalid_tid_idx
);
637 memset(fd
->invalid_tids
, 0, sizeof(*fd
->invalid_tids
) *
638 fd
->invalid_tid_idx
);
639 tinfo
->tidcnt
= fd
->invalid_tid_idx
;
640 fd
->invalid_tid_idx
= 0;
642 * Reset the user flag while still holding the lock.
643 * Otherwise, PSM can miss events.
645 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT
, ev
);
649 spin_unlock(&fd
->invalid_lock
);
652 if (copy_to_user((void __user
*)tinfo
->tidlist
,
653 array
, sizeof(*array
) * tinfo
->tidcnt
))
661 static u32
find_phys_blocks(struct page
**pages
, unsigned npages
,
662 struct tid_pageset
*list
)
664 unsigned pagecount
, pageidx
, setcount
= 0, i
;
665 unsigned long pfn
, this_pfn
;
671 * Look for sets of physically contiguous pages in the user buffer.
672 * This will allow us to optimize Expected RcvArray entry usage by
673 * using the bigger supported sizes.
675 pfn
= page_to_pfn(pages
[0]);
676 for (pageidx
= 0, pagecount
= 1, i
= 1; i
<= npages
; i
++) {
677 this_pfn
= i
< npages
? page_to_pfn(pages
[i
]) : 0;
680 * If the pfn's are not sequential, pages are not physically
683 if (this_pfn
!= ++pfn
) {
685 * At this point we have to loop over the set of
686 * physically contiguous pages and break them down it
687 * sizes supported by the HW.
688 * There are two main constraints:
689 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
690 * If the total set size is bigger than that
691 * program only a MAX_EXPECTED_BUFFER chunk.
692 * 2. The buffer size has to be a power of two. If
693 * it is not, round down to the closes power of
694 * 2 and program that size.
697 int maxpages
= pagecount
;
698 u32 bufsize
= pagecount
* PAGE_SIZE
;
700 if (bufsize
> MAX_EXPECTED_BUFFER
)
702 MAX_EXPECTED_BUFFER
>>
704 else if (!is_power_of_2(bufsize
))
706 rounddown_pow_of_two(bufsize
) >>
709 list
[setcount
].idx
= pageidx
;
710 list
[setcount
].count
= maxpages
;
711 pagecount
-= maxpages
;
726 * program_rcvarray() - program an RcvArray group with receive buffers
728 * @vaddr: starting user virtual address
729 * @grp: RcvArray group
730 * @sets: array of struct tid_pageset holding information on physically
731 * contiguous chunks from the user buffer
732 * @start: starting index into sets array
733 * @count: number of struct tid_pageset's to program
734 * @pages: an array of struct page * for the user buffer
735 * @tidlist: the array of u32 elements when the information about the
736 * programmed RcvArray entries is to be encoded.
737 * @tididx: starting offset into tidlist
738 * @pmapped: (output parameter) number of pages programmed into the RcvArray
741 * This function will program up to 'count' number of RcvArray entries from the
742 * group 'grp'. To make best use of write-combining writes, the function will
743 * perform writes to the unused RcvArray entries which will be ignored by the
744 * HW. Each RcvArray entry will be programmed with a physically contiguous
745 * buffer chunk from the user's virtual buffer.
748 * -EINVAL if the requested count is larger than the size of the group,
749 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
750 * number of RcvArray entries programmed.
752 static int program_rcvarray(struct file
*fp
, unsigned long vaddr
,
753 struct tid_group
*grp
,
754 struct tid_pageset
*sets
,
755 unsigned start
, u16 count
, struct page
**pages
,
756 u32
*tidlist
, unsigned *tididx
, unsigned *pmapped
)
758 struct hfi1_filedata
*fd
= fp
->private_data
;
759 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
760 struct hfi1_devdata
*dd
= uctxt
->dd
;
762 u32 tidinfo
= 0, rcventry
, useidx
= 0;
765 /* Count should never be larger than the group size */
766 if (count
> grp
->size
)
769 /* Find the first unused entry in the group */
770 for (idx
= 0; idx
< grp
->size
; idx
++) {
771 if (!(grp
->map
& (1 << idx
))) {
775 rcv_array_wc_fill(dd
, grp
->base
+ idx
);
779 while (idx
< count
) {
780 u16 npages
, pageidx
, setidx
= start
+ idx
;
784 * If this entry in the group is used, move to the next one.
785 * If we go past the end of the group, exit the loop.
787 if (useidx
>= grp
->size
) {
789 } else if (grp
->map
& (1 << useidx
)) {
790 rcv_array_wc_fill(dd
, grp
->base
+ useidx
);
795 rcventry
= grp
->base
+ useidx
;
796 npages
= sets
[setidx
].count
;
797 pageidx
= sets
[setidx
].idx
;
799 ret
= set_rcvarray_entry(fp
, vaddr
+ (pageidx
* PAGE_SIZE
),
800 rcventry
, grp
, pages
+ pageidx
,
806 tidinfo
= rcventry2tidinfo(rcventry
- uctxt
->expected_base
) |
807 EXP_TID_SET(LEN
, npages
);
808 tidlist
[(*tididx
)++] = tidinfo
;
810 grp
->map
|= 1 << useidx
++;
814 /* Fill the rest of the group with "blank" writes */
815 for (; useidx
< grp
->size
; useidx
++)
816 rcv_array_wc_fill(dd
, grp
->base
+ useidx
);
821 static int set_rcvarray_entry(struct file
*fp
, unsigned long vaddr
,
822 u32 rcventry
, struct tid_group
*grp
,
823 struct page
**pages
, unsigned npages
)
826 struct hfi1_filedata
*fd
= fp
->private_data
;
827 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
828 struct tid_rb_node
*node
;
829 struct hfi1_devdata
*dd
= uctxt
->dd
;
833 * Allocate the node first so we can handle a potential
834 * failure before we've programmed anything.
836 node
= kzalloc(sizeof(*node
) + (sizeof(struct page
*) * npages
),
841 phys
= pci_map_single(dd
->pcidev
,
842 __va(page_to_phys(pages
[0])),
843 npages
* PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
844 if (dma_mapping_error(&dd
->pcidev
->dev
, phys
)) {
845 dd_dev_err(dd
, "Failed to DMA map Exp Rcv pages 0x%llx\n",
851 node
->mmu
.addr
= vaddr
;
852 node
->mmu
.len
= npages
* PAGE_SIZE
;
853 node
->phys
= page_to_phys(pages
[0]);
854 node
->npages
= npages
;
855 node
->rcventry
= rcventry
;
856 node
->dma_addr
= phys
;
859 memcpy(node
->pages
, pages
, sizeof(struct page
*) * npages
);
862 ret
= tid_rb_insert(fd
, &node
->mmu
);
864 ret
= hfi1_mmu_rb_insert(fd
->handler
, &node
->mmu
);
867 hfi1_cdbg(TID
, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
868 node
->rcventry
, node
->mmu
.addr
, node
->phys
, ret
);
869 pci_unmap_single(dd
->pcidev
, phys
, npages
* PAGE_SIZE
,
874 hfi1_put_tid(dd
, rcventry
, PT_EXPECTED
, phys
, ilog2(npages
) + 1);
875 trace_hfi1_exp_tid_reg(uctxt
->ctxt
, fd
->subctxt
, rcventry
, npages
,
876 node
->mmu
.addr
, node
->phys
, phys
);
880 static int unprogram_rcvarray(struct file
*fp
, u32 tidinfo
,
881 struct tid_group
**grp
)
883 struct hfi1_filedata
*fd
= fp
->private_data
;
884 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
885 struct hfi1_devdata
*dd
= uctxt
->dd
;
886 struct tid_rb_node
*node
;
887 u8 tidctrl
= EXP_TID_GET(tidinfo
, CTRL
);
888 u32 tididx
= EXP_TID_GET(tidinfo
, IDX
) << 1, rcventry
;
890 if (tididx
>= uctxt
->expected_count
) {
891 dd_dev_err(dd
, "Invalid RcvArray entry (%u) index for ctxt %u\n",
892 tididx
, uctxt
->ctxt
);
899 rcventry
= tididx
+ (tidctrl
- 1);
901 node
= fd
->entry_to_rb
[rcventry
];
902 if (!node
|| node
->rcventry
!= (uctxt
->expected_base
+ rcventry
))
909 cacheless_tid_rb_remove(fd
, node
);
911 hfi1_mmu_rb_remove(fd
->handler
, &node
->mmu
);
916 static void clear_tid_node(struct hfi1_filedata
*fd
, struct tid_rb_node
*node
)
918 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
919 struct hfi1_devdata
*dd
= uctxt
->dd
;
921 trace_hfi1_exp_tid_unreg(uctxt
->ctxt
, fd
->subctxt
, node
->rcventry
,
922 node
->npages
, node
->mmu
.addr
, node
->phys
,
925 hfi1_put_tid(dd
, node
->rcventry
, PT_INVALID
, 0, 0);
927 * Make sure device has seen the write before we unpin the
932 pci_unmap_single(dd
->pcidev
, node
->dma_addr
, node
->mmu
.len
,
934 hfi1_release_user_pages(fd
->mm
, node
->pages
, node
->npages
, true);
935 fd
->tid_n_pinned
-= node
->npages
;
938 node
->grp
->map
&= ~(1 << (node
->rcventry
- node
->grp
->base
));
940 if (node
->grp
->used
== node
->grp
->size
- 1)
941 tid_group_move(node
->grp
, &uctxt
->tid_full_list
,
942 &uctxt
->tid_used_list
);
943 else if (!node
->grp
->used
)
944 tid_group_move(node
->grp
, &uctxt
->tid_used_list
,
945 &uctxt
->tid_group_list
);
950 * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
951 * clearing nodes in the non-cached case.
953 static void unlock_exp_tids(struct hfi1_ctxtdata
*uctxt
,
954 struct exp_tid_set
*set
,
955 struct hfi1_filedata
*fd
)
957 struct tid_group
*grp
, *ptr
;
960 list_for_each_entry_safe(grp
, ptr
, &set
->list
, list
) {
961 list_del_init(&grp
->list
);
963 for (i
= 0; i
< grp
->size
; i
++) {
964 if (grp
->map
& (1 << i
)) {
965 u16 rcventry
= grp
->base
+ i
;
966 struct tid_rb_node
*node
;
968 node
= fd
->entry_to_rb
[rcventry
-
969 uctxt
->expected_base
];
970 if (!node
|| node
->rcventry
!= rcventry
)
973 cacheless_tid_rb_remove(fd
, node
);
980 * Always return 0 from this function. A non-zero return indicates that the
981 * remove operation will be called and that memory should be unpinned.
982 * However, the driver cannot unpin out from under PSM. Instead, retain the
983 * memory (by returning 0) and inform PSM that the memory is going away. PSM
984 * will call back later when it has removed the memory from its list.
986 static int tid_rb_invalidate(void *arg
, struct mmu_rb_node
*mnode
)
988 struct hfi1_filedata
*fdata
= arg
;
989 struct hfi1_ctxtdata
*uctxt
= fdata
->uctxt
;
990 struct tid_rb_node
*node
=
991 container_of(mnode
, struct tid_rb_node
, mmu
);
996 trace_hfi1_exp_tid_inval(uctxt
->ctxt
, fdata
->subctxt
, node
->mmu
.addr
,
997 node
->rcventry
, node
->npages
, node
->dma_addr
);
1000 spin_lock(&fdata
->invalid_lock
);
1001 if (fdata
->invalid_tid_idx
< uctxt
->expected_count
) {
1002 fdata
->invalid_tids
[fdata
->invalid_tid_idx
] =
1003 rcventry2tidinfo(node
->rcventry
- uctxt
->expected_base
);
1004 fdata
->invalid_tids
[fdata
->invalid_tid_idx
] |=
1005 EXP_TID_SET(LEN
, node
->npages
);
1006 if (!fdata
->invalid_tid_idx
) {
1010 * hfi1_set_uevent_bits() sets a user event flag
1011 * for all processes. Because calling into the
1012 * driver to process TID cache invalidations is
1013 * expensive and TID cache invalidations are
1014 * handled on a per-process basis, we can
1015 * optimize this to set the flag only for the
1016 * process in question.
1018 ev
= uctxt
->dd
->events
+
1019 (((uctxt
->ctxt
- uctxt
->dd
->first_user_ctxt
) *
1020 HFI1_MAX_SHARED_CTXTS
) + fdata
->subctxt
);
1021 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT
, ev
);
1023 fdata
->invalid_tid_idx
++;
1025 spin_unlock(&fdata
->invalid_lock
);
1029 static int tid_rb_insert(void *arg
, struct mmu_rb_node
*node
)
1031 struct hfi1_filedata
*fdata
= arg
;
1032 struct tid_rb_node
*tnode
=
1033 container_of(node
, struct tid_rb_node
, mmu
);
1034 u32 base
= fdata
->uctxt
->expected_base
;
1036 fdata
->entry_to_rb
[tnode
->rcventry
- base
] = tnode
;
1040 static void cacheless_tid_rb_remove(struct hfi1_filedata
*fdata
,
1041 struct tid_rb_node
*tnode
)
1043 u32 base
= fdata
->uctxt
->expected_base
;
1045 fdata
->entry_to_rb
[tnode
->rcventry
- base
] = NULL
;
1046 clear_tid_node(fdata
, tnode
);
1049 static void tid_rb_remove(void *arg
, struct mmu_rb_node
*node
)
1051 struct hfi1_filedata
*fdata
= arg
;
1052 struct tid_rb_node
*tnode
=
1053 container_of(node
, struct tid_rb_node
, mmu
);
1055 cacheless_tid_rb_remove(fdata
, tnode
);