1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel MIC Platform Software Stack (MPSS)
5 * Copyright(c) 2015 Intel Corporation.
9 #include <linux/intel-iommu.h>
10 #include <linux/pagemap.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/signal.h>
14 #include "scif_main.h"
17 /* Used to skip ulimit checks for registrations with SCIF_MAP_KERNEL flag */
18 #define SCIF_MAP_ULIMIT 0x40
20 bool scif_ulimit_check
= 1;
26 * Initialize RMA per EP data structures.
28 void scif_rma_ep_init(struct scif_endpt
*ep
)
30 struct scif_endpt_rma_info
*rma
= &ep
->rma_info
;
32 mutex_init(&rma
->rma_lock
);
33 init_iova_domain(&rma
->iovad
, PAGE_SIZE
, SCIF_IOVA_START_PFN
);
34 spin_lock_init(&rma
->tc_lock
);
35 mutex_init(&rma
->mmn_lock
);
36 INIT_LIST_HEAD(&rma
->reg_list
);
37 INIT_LIST_HEAD(&rma
->remote_reg_list
);
38 atomic_set(&rma
->tw_refcount
, 0);
39 atomic_set(&rma
->tcw_refcount
, 0);
40 atomic_set(&rma
->tcw_total_pages
, 0);
41 atomic_set(&rma
->fence_refcount
, 0);
43 rma
->async_list_del
= 0;
45 INIT_LIST_HEAD(&rma
->mmn_list
);
46 INIT_LIST_HEAD(&rma
->vma_list
);
47 init_waitqueue_head(&rma
->markwq
);
51 * scif_rma_ep_can_uninit:
54 * Returns 1 if an endpoint can be uninitialized and 0 otherwise.
56 int scif_rma_ep_can_uninit(struct scif_endpt
*ep
)
60 mutex_lock(&ep
->rma_info
.rma_lock
);
61 /* Destroy RMA Info only if both lists are empty */
62 if (list_empty(&ep
->rma_info
.reg_list
) &&
63 list_empty(&ep
->rma_info
.remote_reg_list
) &&
64 list_empty(&ep
->rma_info
.mmn_list
) &&
65 !atomic_read(&ep
->rma_info
.tw_refcount
) &&
66 !atomic_read(&ep
->rma_info
.tcw_refcount
) &&
67 !atomic_read(&ep
->rma_info
.fence_refcount
))
69 mutex_unlock(&ep
->rma_info
.rma_lock
);
74 * scif_create_pinned_pages:
75 * @nr_pages: number of pages in window
76 * @prot: read/write protection
78 * Allocate and prepare a set of pinned pages.
80 static struct scif_pinned_pages
*
81 scif_create_pinned_pages(int nr_pages
, int prot
)
83 struct scif_pinned_pages
*pin
;
86 pin
= scif_zalloc(sizeof(*pin
));
90 pin
->pages
= scif_zalloc(nr_pages
* sizeof(*pin
->pages
));
92 goto error_free_pinned_pages
;
95 pin
->magic
= SCIFEP_MAGIC
;
98 error_free_pinned_pages
:
99 scif_free(pin
, sizeof(*pin
));
105 * scif_destroy_pinned_pages:
106 * @pin: A set of pinned pages.
108 * Deallocate resources for pinned pages.
110 static int scif_destroy_pinned_pages(struct scif_pinned_pages
*pin
)
113 int writeable
= pin
->prot
& SCIF_PROT_WRITE
;
114 int kernel
= SCIF_MAP_KERNEL
& pin
->map_flags
;
116 for (j
= 0; j
< pin
->nr_pages
; j
++) {
117 if (pin
->pages
[j
] && !kernel
) {
119 SetPageDirty(pin
->pages
[j
]);
120 put_page(pin
->pages
[j
]);
124 scif_free(pin
->pages
,
125 pin
->nr_pages
* sizeof(*pin
->pages
));
126 scif_free(pin
, sizeof(*pin
));
131 * scif_create_window:
133 * @nr_pages: number of pages
134 * @offset: registration offset
135 * @temp: true if a temporary window is being created
137 * Allocate and prepare a self registration window.
139 struct scif_window
*scif_create_window(struct scif_endpt
*ep
, int nr_pages
,
140 s64 offset
, bool temp
)
142 struct scif_window
*window
;
145 window
= scif_zalloc(sizeof(*window
));
149 window
->dma_addr
= scif_zalloc(nr_pages
* sizeof(*window
->dma_addr
));
150 if (!window
->dma_addr
)
151 goto error_free_window
;
153 window
->num_pages
= scif_zalloc(nr_pages
* sizeof(*window
->num_pages
));
154 if (!window
->num_pages
)
155 goto error_free_window
;
157 window
->offset
= offset
;
158 window
->ep
= (u64
)ep
;
159 window
->magic
= SCIFEP_MAGIC
;
160 window
->reg_state
= OP_IDLE
;
161 init_waitqueue_head(&window
->regwq
);
162 window
->unreg_state
= OP_IDLE
;
163 init_waitqueue_head(&window
->unregwq
);
164 INIT_LIST_HEAD(&window
->list
);
165 window
->type
= SCIF_WINDOW_SELF
;
170 scif_free(window
->dma_addr
,
171 nr_pages
* sizeof(*window
->dma_addr
));
172 scif_free(window
, sizeof(*window
));
178 * scif_destroy_incomplete_window:
180 * @window: registration window
182 * Deallocate resources for self window.
184 static void scif_destroy_incomplete_window(struct scif_endpt
*ep
,
185 struct scif_window
*window
)
188 int nr_pages
= window
->nr_pages
;
189 struct scif_allocmsg
*alloc
= &window
->alloc_handle
;
193 /* Wait for a SCIF_ALLOC_GNT/REJ message */
194 err
= wait_event_timeout(alloc
->allocwq
,
195 alloc
->state
!= OP_IN_PROGRESS
,
196 SCIF_NODE_ALIVE_TIMEOUT
);
197 if (!err
&& scifdev_alive(ep
))
200 mutex_lock(&ep
->rma_info
.rma_lock
);
201 if (alloc
->state
== OP_COMPLETED
) {
202 msg
.uop
= SCIF_FREE_VIRT
;
204 msg
.payload
[0] = ep
->remote_ep
;
205 msg
.payload
[1] = window
->alloc_handle
.vaddr
;
206 msg
.payload
[2] = (u64
)window
;
207 msg
.payload
[3] = SCIF_REGISTER
;
208 _scif_nodeqp_send(ep
->remote_dev
, &msg
);
210 mutex_unlock(&ep
->rma_info
.rma_lock
);
212 scif_free_window_offset(ep
, window
, window
->offset
);
213 scif_free(window
->dma_addr
, nr_pages
* sizeof(*window
->dma_addr
));
214 scif_free(window
->num_pages
, nr_pages
* sizeof(*window
->num_pages
));
215 scif_free(window
, sizeof(*window
));
220 * @remote_dev: SCIF remote device
221 * @window: registration window
223 * Delete any DMA mappings created for a registered self window
225 void scif_unmap_window(struct scif_dev
*remote_dev
, struct scif_window
*window
)
229 if (scif_is_iommu_enabled() && !scifdev_self(remote_dev
)) {
231 dma_unmap_sg(&remote_dev
->sdev
->dev
,
232 window
->st
->sgl
, window
->st
->nents
,
234 sg_free_table(window
->st
);
239 for (j
= 0; j
< window
->nr_contig_chunks
; j
++) {
240 if (window
->dma_addr
[j
]) {
241 scif_unmap_single(window
->dma_addr
[j
],
243 window
->num_pages
[j
] <<
245 window
->dma_addr
[j
] = 0x0;
251 static inline struct mm_struct
*__scif_acquire_mm(void)
253 if (scif_ulimit_check
)
254 return get_task_mm(current
);
258 static inline void __scif_release_mm(struct mm_struct
*mm
)
265 __scif_dec_pinned_vm_lock(struct mm_struct
*mm
,
268 if (!mm
|| !nr_pages
|| !scif_ulimit_check
)
271 atomic64_sub(nr_pages
, &mm
->pinned_vm
);
275 static inline int __scif_check_inc_pinned_vm(struct mm_struct
*mm
,
278 unsigned long locked
, lock_limit
;
280 if (!mm
|| !nr_pages
|| !scif_ulimit_check
)
283 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
284 locked
= atomic64_add_return(nr_pages
, &mm
->pinned_vm
);
286 if ((locked
> lock_limit
) && !capable(CAP_IPC_LOCK
)) {
287 atomic64_sub(nr_pages
, &mm
->pinned_vm
);
288 dev_err(scif_info
.mdev
.this_device
,
289 "locked(%lu) > lock_limit(%lu)\n",
297 * scif_destroy_window:
299 * @window: registration window
301 * Deallocate resources for self window.
303 int scif_destroy_window(struct scif_endpt
*ep
, struct scif_window
*window
)
306 struct scif_pinned_pages
*pinned_pages
= window
->pinned_pages
;
307 int nr_pages
= window
->nr_pages
;
310 if (!window
->temp
&& window
->mm
) {
311 __scif_dec_pinned_vm_lock(window
->mm
, window
->nr_pages
);
312 __scif_release_mm(window
->mm
);
316 scif_free_window_offset(ep
, window
, window
->offset
);
317 scif_unmap_window(ep
->remote_dev
, window
);
319 * Decrement references for this set of pinned pages from
322 j
= atomic_sub_return(1, &pinned_pages
->ref_count
);
324 dev_err(scif_info
.mdev
.this_device
,
325 "%s %d incorrect ref count %d\n",
326 __func__
, __LINE__
, j
);
328 * If the ref count for pinned_pages is zero then someone
329 * has already called scif_unpin_pages() for it and we should
330 * destroy the page cache.
333 scif_destroy_pinned_pages(window
->pinned_pages
);
334 scif_free(window
->dma_addr
, nr_pages
* sizeof(*window
->dma_addr
));
335 scif_free(window
->num_pages
, nr_pages
* sizeof(*window
->num_pages
));
337 scif_free(window
, sizeof(*window
));
342 * scif_create_remote_lookup:
343 * @remote_dev: SCIF remote device
344 * @window: remote window
346 * Allocate and prepare lookup entries for the remote
347 * end to copy over the physical addresses.
348 * Returns 0 on success and appropriate errno on failure.
350 static int scif_create_remote_lookup(struct scif_dev
*remote_dev
,
351 struct scif_window
*window
)
354 int nr_pages
= window
->nr_pages
;
355 bool vmalloc_dma_phys
, vmalloc_num_pages
;
359 err
= scif_map_single(&window
->mapped_offset
,
360 window
, remote_dev
, sizeof(*window
));
364 /* Compute the number of lookup entries. 21 == 2MB Shift */
365 window
->nr_lookup
= ALIGN(nr_pages
* PAGE_SIZE
,
366 ((2) * 1024 * 1024)) >> 21;
368 window
->dma_addr_lookup
.lookup
=
369 scif_alloc_coherent(&window
->dma_addr_lookup
.offset
,
370 remote_dev
, window
->nr_lookup
*
371 sizeof(*window
->dma_addr_lookup
.lookup
),
372 GFP_KERNEL
| __GFP_ZERO
);
373 if (!window
->dma_addr_lookup
.lookup
) {
378 window
->num_pages_lookup
.lookup
=
379 scif_alloc_coherent(&window
->num_pages_lookup
.offset
,
380 remote_dev
, window
->nr_lookup
*
381 sizeof(*window
->num_pages_lookup
.lookup
),
382 GFP_KERNEL
| __GFP_ZERO
);
383 if (!window
->num_pages_lookup
.lookup
) {
388 vmalloc_dma_phys
= is_vmalloc_addr(&window
->dma_addr
[0]);
389 vmalloc_num_pages
= is_vmalloc_addr(&window
->num_pages
[0]);
391 /* Now map each of the pages containing physical addresses */
392 for (i
= 0, j
= 0; i
< nr_pages
; i
+= SCIF_NR_ADDR_IN_PAGE
, j
++) {
393 err
= scif_map_page(&window
->dma_addr_lookup
.lookup
[j
],
395 vmalloc_to_page(&window
->dma_addr
[i
]) :
396 virt_to_page(&window
->dma_addr
[i
]),
400 err
= scif_map_page(&window
->num_pages_lookup
.lookup
[j
],
402 vmalloc_to_page(&window
->num_pages
[i
]) :
403 virt_to_page(&window
->num_pages
[i
]),
414 * scif_destroy_remote_lookup:
415 * @remote_dev: SCIF remote device
416 * @window: remote window
418 * Destroy lookup entries used for the remote
419 * end to copy over the physical addresses.
421 static void scif_destroy_remote_lookup(struct scif_dev
*remote_dev
,
422 struct scif_window
*window
)
426 if (window
->nr_lookup
) {
427 struct scif_rma_lookup
*lup
= &window
->dma_addr_lookup
;
428 struct scif_rma_lookup
*npup
= &window
->num_pages_lookup
;
430 for (i
= 0, j
= 0; i
< window
->nr_pages
;
431 i
+= SCIF_NR_ADDR_IN_PAGE
, j
++) {
432 if (lup
->lookup
&& lup
->lookup
[j
])
433 scif_unmap_single(lup
->lookup
[j
],
436 if (npup
->lookup
&& npup
->lookup
[j
])
437 scif_unmap_single(npup
->lookup
[j
],
442 scif_free_coherent(lup
->lookup
, lup
->offset
,
443 remote_dev
, window
->nr_lookup
*
444 sizeof(*lup
->lookup
));
446 scif_free_coherent(npup
->lookup
, npup
->offset
,
447 remote_dev
, window
->nr_lookup
*
448 sizeof(*npup
->lookup
));
449 if (window
->mapped_offset
)
450 scif_unmap_single(window
->mapped_offset
,
451 remote_dev
, sizeof(*window
));
452 window
->nr_lookup
= 0;
457 * scif_create_remote_window:
459 * @nr_pages: number of pages in window
461 * Allocate and prepare a remote registration window.
463 static struct scif_window
*
464 scif_create_remote_window(struct scif_dev
*scifdev
, int nr_pages
)
466 struct scif_window
*window
;
469 window
= scif_zalloc(sizeof(*window
));
473 window
->magic
= SCIFEP_MAGIC
;
474 window
->nr_pages
= nr_pages
;
476 window
->dma_addr
= scif_zalloc(nr_pages
* sizeof(*window
->dma_addr
));
477 if (!window
->dma_addr
)
480 window
->num_pages
= scif_zalloc(nr_pages
*
481 sizeof(*window
->num_pages
));
482 if (!window
->num_pages
)
485 if (scif_create_remote_lookup(scifdev
, window
))
488 window
->type
= SCIF_WINDOW_PEER
;
489 window
->unreg_state
= OP_IDLE
;
490 INIT_LIST_HEAD(&window
->list
);
493 scif_destroy_remote_window(window
);
499 * scif_destroy_remote_window:
501 * @window: remote registration window
503 * Deallocate resources for remote window.
506 scif_destroy_remote_window(struct scif_window
*window
)
508 scif_free(window
->dma_addr
, window
->nr_pages
*
509 sizeof(*window
->dma_addr
));
510 scif_free(window
->num_pages
, window
->nr_pages
*
511 sizeof(*window
->num_pages
));
513 scif_free(window
, sizeof(*window
));
517 * scif_iommu_map: create DMA mappings if the IOMMU is enabled
518 * @remote_dev: SCIF remote device
519 * @window: remote registration window
521 * Map the physical pages using dma_map_sg(..) and then detect the number
522 * of contiguous DMA mappings allocated
524 static int scif_iommu_map(struct scif_dev
*remote_dev
,
525 struct scif_window
*window
)
527 struct scatterlist
*sg
;
529 scif_pinned_pages_t pin
= window
->pinned_pages
;
531 window
->st
= kzalloc(sizeof(*window
->st
), GFP_KERNEL
);
535 err
= sg_alloc_table(window
->st
, window
->nr_pages
, GFP_KERNEL
);
539 for_each_sg(window
->st
->sgl
, sg
, window
->st
->nents
, i
)
540 sg_set_page(sg
, pin
->pages
[i
], PAGE_SIZE
, 0x0);
542 err
= dma_map_sg(&remote_dev
->sdev
->dev
, window
->st
->sgl
,
543 window
->st
->nents
, DMA_BIDIRECTIONAL
);
546 /* Detect contiguous ranges of DMA mappings */
547 sg
= window
->st
->sgl
;
548 for (i
= 0; sg
; i
++) {
551 window
->dma_addr
[i
] = sg_dma_address(sg
);
552 window
->num_pages
[i
] = sg_dma_len(sg
) >> PAGE_SHIFT
;
553 last_da
= sg_dma_address(sg
) + sg_dma_len(sg
);
554 while ((sg
= sg_next(sg
)) && sg_dma_address(sg
) == last_da
) {
555 window
->num_pages
[i
] +=
556 (sg_dma_len(sg
) >> PAGE_SHIFT
);
557 last_da
= window
->dma_addr
[i
] +
560 window
->nr_contig_chunks
++;
567 * @remote_dev: SCIF remote device
568 * @window: self registration window
570 * Map pages of a window into the aperture/PCI.
571 * Also determine addresses required for DMA.
574 scif_map_window(struct scif_dev
*remote_dev
, struct scif_window
*window
)
576 int i
, j
, k
, err
= 0, nr_contig_pages
;
577 scif_pinned_pages_t pin
;
578 phys_addr_t phys_prev
, phys_curr
;
582 pin
= window
->pinned_pages
;
584 if (intel_iommu_enabled
&& !scifdev_self(remote_dev
))
585 return scif_iommu_map(remote_dev
, window
);
587 for (i
= 0, j
= 0; i
< window
->nr_pages
; i
+= nr_contig_pages
, j
++) {
588 phys_prev
= page_to_phys(pin
->pages
[i
]);
591 /* Detect physically contiguous chunks */
592 for (k
= i
+ 1; k
< window
->nr_pages
; k
++) {
593 phys_curr
= page_to_phys(pin
->pages
[k
]);
594 if (phys_curr
!= (phys_prev
+ PAGE_SIZE
))
596 phys_prev
= phys_curr
;
599 window
->num_pages
[j
] = nr_contig_pages
;
600 window
->nr_contig_chunks
++;
601 if (scif_is_mgmt_node()) {
603 * Management node has to deal with SMPT on X100 and
604 * hence the DMA mapping is required
606 err
= scif_map_single(&window
->dma_addr
[j
],
607 phys_to_virt(page_to_phys(
610 nr_contig_pages
<< PAGE_SHIFT
);
614 window
->dma_addr
[j
] = page_to_phys(pin
->pages
[i
]);
621 * scif_send_scif_unregister:
623 * @window: self registration window
625 * Send a SCIF_UNREGISTER message.
627 static int scif_send_scif_unregister(struct scif_endpt
*ep
,
628 struct scif_window
*window
)
632 msg
.uop
= SCIF_UNREGISTER
;
634 msg
.payload
[0] = window
->alloc_handle
.vaddr
;
635 msg
.payload
[1] = (u64
)window
;
636 return scif_nodeqp_send(ep
->remote_dev
, &msg
);
640 * scif_unregister_window:
641 * @window: self registration window
643 * Send an unregistration request and wait for a response.
645 int scif_unregister_window(struct scif_window
*window
)
648 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
649 bool send_msg
= false;
652 switch (window
->unreg_state
) {
655 window
->unreg_state
= OP_IN_PROGRESS
;
661 scif_get_window(window
, 1);
662 mutex_unlock(&ep
->rma_info
.rma_lock
);
664 err
= scif_send_scif_unregister(ep
, window
);
666 window
->unreg_state
= OP_COMPLETED
;
670 /* Return ENXIO since unregistration is in progress */
671 mutex_lock(&ep
->rma_info
.rma_lock
);
675 /* Wait for a SCIF_UNREGISTER_(N)ACK message */
676 err
= wait_event_timeout(window
->unregwq
,
677 window
->unreg_state
!= OP_IN_PROGRESS
,
678 SCIF_NODE_ALIVE_TIMEOUT
);
679 if (!err
&& scifdev_alive(ep
))
683 window
->unreg_state
= OP_COMPLETED
;
684 dev_err(scif_info
.mdev
.this_device
,
685 "%s %d err %d\n", __func__
, __LINE__
, err
);
690 mutex_lock(&ep
->rma_info
.rma_lock
);
691 scif_put_window(window
, 1);
696 if (!scifdev_alive(ep
)) {
698 window
->unreg_state
= OP_COMPLETED
;
708 if (window
->unreg_state
== OP_COMPLETED
&& window
->ref_count
)
709 scif_put_window(window
, window
->nr_pages
);
711 if (!window
->ref_count
) {
712 atomic_inc(&ep
->rma_info
.tw_refcount
);
713 list_del_init(&window
->list
);
714 scif_free_window_offset(ep
, window
, window
->offset
);
715 mutex_unlock(&ep
->rma_info
.rma_lock
);
716 if ((!!(window
->pinned_pages
->map_flags
& SCIF_MAP_KERNEL
)) &&
718 scif_drain_dma_intr(ep
->remote_dev
->sdev
,
719 ep
->rma_info
.dma_chan
);
721 if (!__scif_dec_pinned_vm_lock(window
->mm
,
723 __scif_release_mm(window
->mm
);
727 scif_queue_for_cleanup(window
, &scif_info
.rma
);
728 mutex_lock(&ep
->rma_info
.rma_lock
);
734 * scif_send_alloc_request:
736 * @window: self registration window
738 * Send a remote window allocation request
740 static int scif_send_alloc_request(struct scif_endpt
*ep
,
741 struct scif_window
*window
)
744 struct scif_allocmsg
*alloc
= &window
->alloc_handle
;
746 /* Set up the Alloc Handle */
747 alloc
->state
= OP_IN_PROGRESS
;
748 init_waitqueue_head(&alloc
->allocwq
);
750 /* Send out an allocation request */
751 msg
.uop
= SCIF_ALLOC_REQ
;
752 msg
.payload
[1] = window
->nr_pages
;
753 msg
.payload
[2] = (u64
)&window
->alloc_handle
;
754 return _scif_nodeqp_send(ep
->remote_dev
, &msg
);
758 * scif_prep_remote_window:
760 * @window: self registration window
762 * Send a remote window allocation request, wait for an allocation response,
763 * and prepares the remote window by copying over the page lists
765 static int scif_prep_remote_window(struct scif_endpt
*ep
,
766 struct scif_window
*window
)
769 struct scif_window
*remote_window
;
770 struct scif_allocmsg
*alloc
= &window
->alloc_handle
;
771 dma_addr_t
*dma_phys_lookup
, *tmp
, *num_pages_lookup
, *tmp1
;
773 int nr_contig_chunks
, loop_nr_contig_chunks
;
774 int remaining_nr_contig_chunks
, nr_lookup
;
777 map_err
= scif_map_window(ep
->remote_dev
, window
);
779 dev_err(&ep
->remote_dev
->sdev
->dev
,
780 "%s %d map_err %d\n", __func__
, __LINE__
, map_err
);
781 remaining_nr_contig_chunks
= window
->nr_contig_chunks
;
782 nr_contig_chunks
= window
->nr_contig_chunks
;
784 /* Wait for a SCIF_ALLOC_GNT/REJ message */
785 err
= wait_event_timeout(alloc
->allocwq
,
786 alloc
->state
!= OP_IN_PROGRESS
,
787 SCIF_NODE_ALIVE_TIMEOUT
);
788 mutex_lock(&ep
->rma_info
.rma_lock
);
789 /* Synchronize with the thread waking up allocwq */
790 mutex_unlock(&ep
->rma_info
.rma_lock
);
791 if (!err
&& scifdev_alive(ep
))
802 /* Bail out. The remote end rejected this request */
803 if (alloc
->state
== OP_FAILED
)
807 dev_err(&ep
->remote_dev
->sdev
->dev
,
808 "%s %d err %d\n", __func__
, __LINE__
, map_err
);
809 msg
.uop
= SCIF_FREE_VIRT
;
811 msg
.payload
[0] = ep
->remote_ep
;
812 msg
.payload
[1] = window
->alloc_handle
.vaddr
;
813 msg
.payload
[2] = (u64
)window
;
814 msg
.payload
[3] = SCIF_REGISTER
;
815 spin_lock(&ep
->lock
);
816 if (ep
->state
== SCIFEP_CONNECTED
)
817 err
= _scif_nodeqp_send(ep
->remote_dev
, &msg
);
820 spin_unlock(&ep
->lock
);
824 remote_window
= scif_ioremap(alloc
->phys_addr
, sizeof(*window
),
827 /* Compute the number of lookup entries. 21 == 2MB Shift */
828 nr_lookup
= ALIGN(nr_contig_chunks
, SCIF_NR_ADDR_IN_PAGE
)
829 >> ilog2(SCIF_NR_ADDR_IN_PAGE
);
832 scif_ioremap(remote_window
->dma_addr_lookup
.offset
,
834 sizeof(*remote_window
->dma_addr_lookup
.lookup
),
837 scif_ioremap(remote_window
->num_pages_lookup
.offset
,
839 sizeof(*remote_window
->num_pages_lookup
.lookup
),
842 while (remaining_nr_contig_chunks
) {
843 loop_nr_contig_chunks
= min_t(int, remaining_nr_contig_chunks
,
844 (int)SCIF_NR_ADDR_IN_PAGE
);
845 /* #1/2 - Copy physical addresses over to the remote side */
847 /* #2/2 - Copy DMA addresses (addresses that are fed into the
848 * DMA engine) We transfer bus addresses which are then
849 * converted into a MIC physical address on the remote
850 * side if it is a MIC, if the remote node is a mgmt node we
851 * transfer the MIC physical address
853 tmp
= scif_ioremap(dma_phys_lookup
[j
],
854 loop_nr_contig_chunks
*
855 sizeof(*window
->dma_addr
),
857 tmp1
= scif_ioremap(num_pages_lookup
[j
],
858 loop_nr_contig_chunks
*
859 sizeof(*window
->num_pages
),
861 if (scif_is_mgmt_node()) {
862 memcpy_toio((void __force __iomem
*)tmp
,
863 &window
->dma_addr
[i
], loop_nr_contig_chunks
864 * sizeof(*window
->dma_addr
));
865 memcpy_toio((void __force __iomem
*)tmp1
,
866 &window
->num_pages
[i
], loop_nr_contig_chunks
867 * sizeof(*window
->num_pages
));
869 if (scifdev_is_p2p(ep
->remote_dev
)) {
871 * add remote node's base address for this node
872 * to convert it into a MIC address
877 for (m
= 0; m
< loop_nr_contig_chunks
; m
++) {
878 dma_addr
= window
->dma_addr
[i
+ m
] +
879 ep
->remote_dev
->base_addr
;
881 (void __force __iomem
*)&tmp
[m
]);
883 memcpy_toio((void __force __iomem
*)tmp1
,
884 &window
->num_pages
[i
],
885 loop_nr_contig_chunks
886 * sizeof(*window
->num_pages
));
888 /* Mgmt node or loopback - transfer DMA
889 * addresses as is, this is the same as a
890 * MIC physical address (we use the dma_addr
891 * and not the phys_addr array since the
892 * phys_addr is only setup if there is a mmap()
893 * request from the mgmt node)
895 memcpy_toio((void __force __iomem
*)tmp
,
896 &window
->dma_addr
[i
],
897 loop_nr_contig_chunks
*
898 sizeof(*window
->dma_addr
));
899 memcpy_toio((void __force __iomem
*)tmp1
,
900 &window
->num_pages
[i
],
901 loop_nr_contig_chunks
*
902 sizeof(*window
->num_pages
));
905 remaining_nr_contig_chunks
-= loop_nr_contig_chunks
;
906 i
+= loop_nr_contig_chunks
;
908 scif_iounmap(tmp
, loop_nr_contig_chunks
*
909 sizeof(*window
->dma_addr
), ep
->remote_dev
);
910 scif_iounmap(tmp1
, loop_nr_contig_chunks
*
911 sizeof(*window
->num_pages
), ep
->remote_dev
);
914 /* Prepare the remote window for the peer */
915 remote_window
->peer_window
= (u64
)window
;
916 remote_window
->offset
= window
->offset
;
917 remote_window
->prot
= window
->prot
;
918 remote_window
->nr_contig_chunks
= nr_contig_chunks
;
919 remote_window
->ep
= ep
->remote_ep
;
920 scif_iounmap(num_pages_lookup
,
922 sizeof(*remote_window
->num_pages_lookup
.lookup
),
924 scif_iounmap(dma_phys_lookup
,
926 sizeof(*remote_window
->dma_addr_lookup
.lookup
),
928 scif_iounmap(remote_window
, sizeof(*remote_window
), ep
->remote_dev
);
929 window
->peer_window
= alloc
->vaddr
;
934 * scif_send_scif_register:
936 * @window: self registration window
938 * Send a SCIF_REGISTER message if EP is connected and wait for a
939 * SCIF_REGISTER_(N)ACK message else send a SCIF_FREE_VIRT
940 * message so that the peer can free its remote window allocated earlier.
942 static int scif_send_scif_register(struct scif_endpt
*ep
,
943 struct scif_window
*window
)
949 msg
.payload
[0] = ep
->remote_ep
;
950 msg
.payload
[1] = window
->alloc_handle
.vaddr
;
951 msg
.payload
[2] = (u64
)window
;
952 spin_lock(&ep
->lock
);
953 if (ep
->state
== SCIFEP_CONNECTED
) {
954 msg
.uop
= SCIF_REGISTER
;
955 window
->reg_state
= OP_IN_PROGRESS
;
956 err
= _scif_nodeqp_send(ep
->remote_dev
, &msg
);
957 spin_unlock(&ep
->lock
);
960 /* Wait for a SCIF_REGISTER_(N)ACK message */
961 err
= wait_event_timeout(window
->regwq
,
964 SCIF_NODE_ALIVE_TIMEOUT
);
965 if (!err
&& scifdev_alive(ep
))
967 err
= !err
? -ENODEV
: 0;
968 if (window
->reg_state
== OP_FAILED
)
972 msg
.uop
= SCIF_FREE_VIRT
;
973 msg
.payload
[3] = SCIF_REGISTER
;
974 err
= _scif_nodeqp_send(ep
->remote_dev
, &msg
);
975 spin_unlock(&ep
->lock
);
983 * scif_get_window_offset:
984 * @ep: end point descriptor
986 * @offset: offset hint
987 * @num_pages: number of pages
988 * @out_offset: computed offset returned by reference.
990 * Compute/Claim a new offset for this EP.
992 int scif_get_window_offset(struct scif_endpt
*ep
, int flags
, s64 offset
,
993 int num_pages
, s64
*out_offset
)
996 struct iova
*iova_ptr
;
999 if (flags
& SCIF_MAP_FIXED
) {
1000 page_index
= SCIF_IOVA_PFN(offset
);
1001 iova_ptr
= reserve_iova(&ep
->rma_info
.iovad
, page_index
,
1002 page_index
+ num_pages
- 1);
1006 iova_ptr
= alloc_iova(&ep
->rma_info
.iovad
, num_pages
,
1007 SCIF_DMA_63BIT_PFN
- 1, 0);
1012 *out_offset
= (iova_ptr
->pfn_lo
) << PAGE_SHIFT
;
1017 * scif_free_window_offset:
1018 * @ep: end point descriptor
1019 * @window: registration window
1020 * @offset: Offset to be freed
1022 * Free offset for this EP. The callee is supposed to grab
1023 * the RMA mutex before calling this API.
1025 void scif_free_window_offset(struct scif_endpt
*ep
,
1026 struct scif_window
*window
, s64 offset
)
1028 if ((window
&& !window
->offset_freed
) || !window
) {
1029 free_iova(&ep
->rma_info
.iovad
, offset
>> PAGE_SHIFT
);
1031 window
->offset_freed
= true;
1036 * scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message
1037 * @msg: Interrupt message
1039 * Remote side is requesting a memory allocation.
1041 void scif_alloc_req(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1044 struct scif_window
*window
= NULL
;
1045 int nr_pages
= msg
->payload
[1];
1047 window
= scif_create_remote_window(scifdev
, nr_pages
);
1053 /* The peer's allocation request is granted */
1054 msg
->uop
= SCIF_ALLOC_GNT
;
1055 msg
->payload
[0] = (u64
)window
;
1056 msg
->payload
[1] = window
->mapped_offset
;
1057 err
= scif_nodeqp_send(scifdev
, msg
);
1059 scif_destroy_remote_window(window
);
1062 /* The peer's allocation request is rejected */
1063 dev_err(&scifdev
->sdev
->dev
,
1064 "%s %d error %d alloc_ptr %p nr_pages 0x%x\n",
1065 __func__
, __LINE__
, err
, window
, nr_pages
);
1066 msg
->uop
= SCIF_ALLOC_REJ
;
1067 scif_nodeqp_send(scifdev
, msg
);
1071 * scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message
1072 * @msg: Interrupt message
1074 * Remote side responded to a memory allocation.
1076 void scif_alloc_gnt_rej(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1078 struct scif_allocmsg
*handle
= (struct scif_allocmsg
*)msg
->payload
[2];
1079 struct scif_window
*window
= container_of(handle
, struct scif_window
,
1081 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1083 mutex_lock(&ep
->rma_info
.rma_lock
);
1084 handle
->vaddr
= msg
->payload
[0];
1085 handle
->phys_addr
= msg
->payload
[1];
1086 if (msg
->uop
== SCIF_ALLOC_GNT
)
1087 handle
->state
= OP_COMPLETED
;
1089 handle
->state
= OP_FAILED
;
1090 wake_up(&handle
->allocwq
);
1091 mutex_unlock(&ep
->rma_info
.rma_lock
);
1095 * scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message
1096 * @msg: Interrupt message
1098 * Free up memory kmalloc'd earlier.
1100 void scif_free_virt(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1102 struct scif_window
*window
= (struct scif_window
*)msg
->payload
[1];
1104 scif_destroy_remote_window(window
);
1108 scif_fixup_aper_base(struct scif_dev
*dev
, struct scif_window
*window
)
1111 struct scif_hw_dev
*sdev
= dev
->sdev
;
1112 phys_addr_t apt_base
= 0;
1115 * Add the aperture base if the DMA address is not card relative
1116 * since the DMA addresses need to be an offset into the bar
1118 if (!scifdev_self(dev
) && window
->type
== SCIF_WINDOW_PEER
&&
1119 sdev
->aper
&& !sdev
->card_rel_da
)
1120 apt_base
= sdev
->aper
->pa
;
1124 for (j
= 0; j
< window
->nr_contig_chunks
; j
++) {
1125 if (window
->num_pages
[j
])
1126 window
->dma_addr
[j
] += apt_base
;
1133 * scif_recv_reg: Respond to SCIF_REGISTER interrupt message
1134 * @msg: Interrupt message
1136 * Update remote window list with a new registered window.
1138 void scif_recv_reg(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1140 struct scif_endpt
*ep
= (struct scif_endpt
*)msg
->payload
[0];
1141 struct scif_window
*window
=
1142 (struct scif_window
*)msg
->payload
[1];
1144 mutex_lock(&ep
->rma_info
.rma_lock
);
1145 spin_lock(&ep
->lock
);
1146 if (ep
->state
== SCIFEP_CONNECTED
) {
1147 msg
->uop
= SCIF_REGISTER_ACK
;
1148 scif_nodeqp_send(ep
->remote_dev
, msg
);
1149 scif_fixup_aper_base(ep
->remote_dev
, window
);
1150 /* No further failures expected. Insert new window */
1151 scif_insert_window(window
, &ep
->rma_info
.remote_reg_list
);
1153 msg
->uop
= SCIF_REGISTER_NACK
;
1154 scif_nodeqp_send(ep
->remote_dev
, msg
);
1156 spin_unlock(&ep
->lock
);
1157 mutex_unlock(&ep
->rma_info
.rma_lock
);
1158 /* free up any lookup resources now that page lists are transferred */
1159 scif_destroy_remote_lookup(ep
->remote_dev
, window
);
1161 * We could not insert the window but we need to
1162 * destroy the window.
1164 if (msg
->uop
== SCIF_REGISTER_NACK
)
1165 scif_destroy_remote_window(window
);
1169 * scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message
1170 * @msg: Interrupt message
1172 * Remove window from remote registration list;
1174 void scif_recv_unreg(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1176 struct scif_rma_req req
;
1177 struct scif_window
*window
= NULL
;
1178 struct scif_window
*recv_window
=
1179 (struct scif_window
*)msg
->payload
[0];
1180 struct scif_endpt
*ep
;
1183 ep
= (struct scif_endpt
*)recv_window
->ep
;
1184 req
.out_window
= &window
;
1185 req
.offset
= recv_window
->offset
;
1187 req
.nr_bytes
= recv_window
->nr_pages
<< PAGE_SHIFT
;
1188 req
.type
= SCIF_WINDOW_FULL
;
1189 req
.head
= &ep
->rma_info
.remote_reg_list
;
1190 msg
->payload
[0] = ep
->remote_ep
;
1192 mutex_lock(&ep
->rma_info
.rma_lock
);
1193 /* Does a valid window exist? */
1194 if (scif_query_window(&req
)) {
1195 dev_err(&scifdev
->sdev
->dev
,
1196 "%s %d -ENXIO\n", __func__
, __LINE__
);
1197 msg
->uop
= SCIF_UNREGISTER_ACK
;
1201 if (window
->ref_count
)
1202 scif_put_window(window
, window
->nr_pages
);
1204 dev_err(&scifdev
->sdev
->dev
,
1205 "%s %d ref count should be +ve\n",
1206 __func__
, __LINE__
);
1207 window
->unreg_state
= OP_COMPLETED
;
1208 if (!window
->ref_count
) {
1209 msg
->uop
= SCIF_UNREGISTER_ACK
;
1210 atomic_inc(&ep
->rma_info
.tw_refcount
);
1211 ep
->rma_info
.async_list_del
= 1;
1212 list_del_init(&window
->list
);
1215 /* NACK! There are valid references to this window */
1216 msg
->uop
= SCIF_UNREGISTER_NACK
;
1219 /* The window did not make its way to the list at all. ACK */
1220 msg
->uop
= SCIF_UNREGISTER_ACK
;
1221 scif_destroy_remote_window(recv_window
);
1224 mutex_unlock(&ep
->rma_info
.rma_lock
);
1226 scif_drain_dma_intr(ep
->remote_dev
->sdev
,
1227 ep
->rma_info
.dma_chan
);
1228 scif_nodeqp_send(ep
->remote_dev
, msg
);
1230 scif_queue_for_cleanup(window
, &scif_info
.rma
);
1234 * scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message
1235 * @msg: Interrupt message
1237 * Wake up the window waiting to complete registration.
1239 void scif_recv_reg_ack(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1241 struct scif_window
*window
=
1242 (struct scif_window
*)msg
->payload
[2];
1243 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1245 mutex_lock(&ep
->rma_info
.rma_lock
);
1246 window
->reg_state
= OP_COMPLETED
;
1247 wake_up(&window
->regwq
);
1248 mutex_unlock(&ep
->rma_info
.rma_lock
);
1252 * scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message
1253 * @msg: Interrupt message
1255 * Wake up the window waiting to inform it that registration
1256 * cannot be completed.
1258 void scif_recv_reg_nack(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1260 struct scif_window
*window
=
1261 (struct scif_window
*)msg
->payload
[2];
1262 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1264 mutex_lock(&ep
->rma_info
.rma_lock
);
1265 window
->reg_state
= OP_FAILED
;
1266 wake_up(&window
->regwq
);
1267 mutex_unlock(&ep
->rma_info
.rma_lock
);
1271 * scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message
1272 * @msg: Interrupt message
1274 * Wake up the window waiting to complete unregistration.
1276 void scif_recv_unreg_ack(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1278 struct scif_window
*window
=
1279 (struct scif_window
*)msg
->payload
[1];
1280 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1282 mutex_lock(&ep
->rma_info
.rma_lock
);
1283 window
->unreg_state
= OP_COMPLETED
;
1284 wake_up(&window
->unregwq
);
1285 mutex_unlock(&ep
->rma_info
.rma_lock
);
1289 * scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message
1290 * @msg: Interrupt message
1292 * Wake up the window waiting to inform it that unregistration
1293 * cannot be completed immediately.
1295 void scif_recv_unreg_nack(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1297 struct scif_window
*window
=
1298 (struct scif_window
*)msg
->payload
[1];
1299 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1301 mutex_lock(&ep
->rma_info
.rma_lock
);
1302 window
->unreg_state
= OP_FAILED
;
1303 wake_up(&window
->unregwq
);
1304 mutex_unlock(&ep
->rma_info
.rma_lock
);
1307 int __scif_pin_pages(void *addr
, size_t len
, int *out_prot
,
1308 int map_flags
, scif_pinned_pages_t
*pages
)
1310 struct scif_pinned_pages
*pinned_pages
;
1311 int nr_pages
, err
= 0, i
;
1312 bool vmalloc_addr
= false;
1313 bool try_upgrade
= false;
1314 int prot
= *out_prot
;
1316 struct mm_struct
*mm
= NULL
;
1318 /* Unsupported flags */
1319 if (map_flags
& ~(SCIF_MAP_KERNEL
| SCIF_MAP_ULIMIT
))
1321 ulimit
= !!(map_flags
& SCIF_MAP_ULIMIT
);
1323 /* Unsupported protection requested */
1324 if (prot
& ~(SCIF_PROT_READ
| SCIF_PROT_WRITE
))
1327 /* addr/len must be page aligned. len should be non zero */
1329 (ALIGN((u64
)addr
, PAGE_SIZE
) != (u64
)addr
) ||
1330 (ALIGN((u64
)len
, PAGE_SIZE
) != (u64
)len
))
1335 nr_pages
= len
>> PAGE_SHIFT
;
1337 /* Allocate a set of pinned pages */
1338 pinned_pages
= scif_create_pinned_pages(nr_pages
, prot
);
1342 if (map_flags
& SCIF_MAP_KERNEL
) {
1343 if (is_vmalloc_addr(addr
))
1344 vmalloc_addr
= true;
1346 for (i
= 0; i
< nr_pages
; i
++) {
1348 pinned_pages
->pages
[i
] =
1349 vmalloc_to_page(addr
+ (i
* PAGE_SIZE
));
1351 pinned_pages
->pages
[i
] =
1352 virt_to_page(addr
+ (i
* PAGE_SIZE
));
1354 pinned_pages
->nr_pages
= nr_pages
;
1355 pinned_pages
->map_flags
= SCIF_MAP_KERNEL
;
1358 * SCIF supports registration caching. If a registration has
1359 * been requested with read only permissions, then we try
1360 * to pin the pages with RW permissions so that a subsequent
1361 * transfer with RW permission can hit the cache instead of
1362 * invalidating it. If the upgrade fails with RW then we
1363 * revert back to R permission and retry
1365 if (prot
== SCIF_PROT_READ
)
1367 prot
|= SCIF_PROT_WRITE
;
1371 err
= __scif_check_inc_pinned_vm(mm
, nr_pages
);
1373 pinned_pages
->nr_pages
= 0;
1378 pinned_pages
->nr_pages
= get_user_pages_fast(
1381 (prot
& SCIF_PROT_WRITE
) ? FOLL_WRITE
: 0,
1382 pinned_pages
->pages
);
1383 if (nr_pages
!= pinned_pages
->nr_pages
) {
1386 __scif_dec_pinned_vm_lock(mm
, nr_pages
);
1387 /* Roll back any pinned pages */
1388 for (i
= 0; i
< pinned_pages
->nr_pages
; i
++) {
1389 if (pinned_pages
->pages
[i
])
1391 pinned_pages
->pages
[i
]);
1393 prot
&= ~SCIF_PROT_WRITE
;
1394 try_upgrade
= false;
1398 pinned_pages
->map_flags
= 0;
1401 if (pinned_pages
->nr_pages
< nr_pages
) {
1403 pinned_pages
->nr_pages
= nr_pages
;
1408 atomic_set(&pinned_pages
->ref_count
, 1);
1409 *pages
= pinned_pages
;
1413 __scif_dec_pinned_vm_lock(mm
, nr_pages
);
1414 /* Something went wrong! Rollback */
1416 pinned_pages
->nr_pages
= nr_pages
;
1417 scif_destroy_pinned_pages(pinned_pages
);
1419 dev_dbg(scif_info
.mdev
.this_device
,
1420 "%s %d err %d len 0x%lx\n", __func__
, __LINE__
, err
, len
);
1424 int scif_pin_pages(void *addr
, size_t len
, int prot
,
1425 int map_flags
, scif_pinned_pages_t
*pages
)
1427 return __scif_pin_pages(addr
, len
, &prot
, map_flags
, pages
);
1429 EXPORT_SYMBOL_GPL(scif_pin_pages
);
1431 int scif_unpin_pages(scif_pinned_pages_t pinned_pages
)
1435 if (!pinned_pages
|| SCIFEP_MAGIC
!= pinned_pages
->magic
)
1438 ret
= atomic_sub_return(1, &pinned_pages
->ref_count
);
1440 dev_err(scif_info
.mdev
.this_device
,
1441 "%s %d scif_unpin_pages called without pinning? rc %d\n",
1442 __func__
, __LINE__
, ret
);
1446 * Destroy the window if the ref count for this set of pinned
1447 * pages has dropped to zero. If it is positive then there is
1448 * a valid registered window which is backed by these pages and
1449 * it will be destroyed once all such windows are unregistered.
1452 err
= scif_destroy_pinned_pages(pinned_pages
);
1456 EXPORT_SYMBOL_GPL(scif_unpin_pages
);
1459 scif_insert_local_window(struct scif_window
*window
, struct scif_endpt
*ep
)
1461 mutex_lock(&ep
->rma_info
.rma_lock
);
1462 scif_insert_window(window
, &ep
->rma_info
.reg_list
);
1463 mutex_unlock(&ep
->rma_info
.rma_lock
);
1466 off_t
scif_register_pinned_pages(scif_epd_t epd
,
1467 scif_pinned_pages_t pinned_pages
,
1468 off_t offset
, int map_flags
)
1470 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
1471 s64 computed_offset
;
1472 struct scif_window
*window
;
1475 struct device
*spdev
;
1477 /* Unsupported flags */
1478 if (map_flags
& ~SCIF_MAP_FIXED
)
1481 len
= pinned_pages
->nr_pages
<< PAGE_SHIFT
;
1484 * Offset is not page aligned/negative or offset+len
1485 * wraps around with SCIF_MAP_FIXED.
1487 if ((map_flags
& SCIF_MAP_FIXED
) &&
1488 ((ALIGN(offset
, PAGE_SIZE
) != offset
) ||
1490 (len
> LONG_MAX
- offset
)))
1495 err
= scif_verify_epd(ep
);
1499 * It is an error to pass pinned_pages to scif_register_pinned_pages()
1500 * after calling scif_unpin_pages().
1502 if (!atomic_add_unless(&pinned_pages
->ref_count
, 1, 0))
1505 /* Compute the offset for this registration */
1506 err
= scif_get_window_offset(ep
, map_flags
, offset
,
1507 len
, &computed_offset
);
1509 atomic_sub(1, &pinned_pages
->ref_count
);
1513 /* Allocate and prepare self registration window */
1514 window
= scif_create_window(ep
, pinned_pages
->nr_pages
,
1515 computed_offset
, false);
1517 atomic_sub(1, &pinned_pages
->ref_count
);
1518 scif_free_window_offset(ep
, NULL
, computed_offset
);
1522 window
->pinned_pages
= pinned_pages
;
1523 window
->nr_pages
= pinned_pages
->nr_pages
;
1524 window
->prot
= pinned_pages
->prot
;
1526 spdev
= scif_get_peer_dev(ep
->remote_dev
);
1527 if (IS_ERR(spdev
)) {
1528 err
= PTR_ERR(spdev
);
1529 scif_destroy_window(ep
, window
);
1532 err
= scif_send_alloc_request(ep
, window
);
1534 dev_err(&ep
->remote_dev
->sdev
->dev
,
1535 "%s %d err %d\n", __func__
, __LINE__
, err
);
1539 /* Prepare the remote registration window */
1540 err
= scif_prep_remote_window(ep
, window
);
1542 dev_err(&ep
->remote_dev
->sdev
->dev
,
1543 "%s %d err %d\n", __func__
, __LINE__
, err
);
1547 /* Tell the peer about the new window */
1548 err
= scif_send_scif_register(ep
, window
);
1550 dev_err(&ep
->remote_dev
->sdev
->dev
,
1551 "%s %d err %d\n", __func__
, __LINE__
, err
);
1555 scif_put_peer_dev(spdev
);
1556 /* No further failures expected. Insert new window */
1557 scif_insert_local_window(window
, ep
);
1558 return computed_offset
;
1560 scif_destroy_window(ep
, window
);
1561 scif_put_peer_dev(spdev
);
1562 dev_err(&ep
->remote_dev
->sdev
->dev
,
1563 "%s %d err %d\n", __func__
, __LINE__
, err
);
1566 EXPORT_SYMBOL_GPL(scif_register_pinned_pages
);
1568 off_t
scif_register(scif_epd_t epd
, void *addr
, size_t len
, off_t offset
,
1569 int prot
, int map_flags
)
1571 scif_pinned_pages_t pinned_pages
;
1573 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
1574 s64 computed_offset
;
1575 struct scif_window
*window
;
1576 struct mm_struct
*mm
= NULL
;
1577 struct device
*spdev
;
1579 dev_dbg(scif_info
.mdev
.this_device
,
1580 "SCIFAPI register: ep %p addr %p len 0x%lx offset 0x%lx prot 0x%x map_flags 0x%x\n",
1581 epd
, addr
, len
, offset
, prot
, map_flags
);
1582 /* Unsupported flags */
1583 if (map_flags
& ~(SCIF_MAP_FIXED
| SCIF_MAP_KERNEL
))
1587 * Offset is not page aligned/negative or offset+len
1588 * wraps around with SCIF_MAP_FIXED.
1590 if ((map_flags
& SCIF_MAP_FIXED
) &&
1591 ((ALIGN(offset
, PAGE_SIZE
) != offset
) ||
1593 (len
> LONG_MAX
- offset
)))
1596 /* Unsupported protection requested */
1597 if (prot
& ~(SCIF_PROT_READ
| SCIF_PROT_WRITE
))
1600 /* addr/len must be page aligned. len should be non zero */
1601 if (!len
|| (ALIGN((u64
)addr
, PAGE_SIZE
) != (u64
)addr
) ||
1602 (ALIGN(len
, PAGE_SIZE
) != len
))
1607 err
= scif_verify_epd(ep
);
1611 /* Compute the offset for this registration */
1612 err
= scif_get_window_offset(ep
, map_flags
, offset
,
1613 len
>> PAGE_SHIFT
, &computed_offset
);
1617 spdev
= scif_get_peer_dev(ep
->remote_dev
);
1618 if (IS_ERR(spdev
)) {
1619 err
= PTR_ERR(spdev
);
1620 scif_free_window_offset(ep
, NULL
, computed_offset
);
1623 /* Allocate and prepare self registration window */
1624 window
= scif_create_window(ep
, len
>> PAGE_SHIFT
,
1625 computed_offset
, false);
1627 scif_free_window_offset(ep
, NULL
, computed_offset
);
1628 scif_put_peer_dev(spdev
);
1632 window
->nr_pages
= len
>> PAGE_SHIFT
;
1634 err
= scif_send_alloc_request(ep
, window
);
1636 scif_destroy_incomplete_window(ep
, window
);
1637 scif_put_peer_dev(spdev
);
1641 if (!(map_flags
& SCIF_MAP_KERNEL
)) {
1642 mm
= __scif_acquire_mm();
1643 map_flags
|= SCIF_MAP_ULIMIT
;
1645 /* Pin down the pages */
1646 err
= __scif_pin_pages(addr
, len
, &prot
,
1647 map_flags
& (SCIF_MAP_KERNEL
| SCIF_MAP_ULIMIT
),
1650 scif_destroy_incomplete_window(ep
, window
);
1651 __scif_release_mm(mm
);
1655 window
->pinned_pages
= pinned_pages
;
1656 window
->prot
= pinned_pages
->prot
;
1659 /* Prepare the remote registration window */
1660 err
= scif_prep_remote_window(ep
, window
);
1662 dev_err(&ep
->remote_dev
->sdev
->dev
,
1663 "%s %d err %ld\n", __func__
, __LINE__
, err
);
1667 /* Tell the peer about the new window */
1668 err
= scif_send_scif_register(ep
, window
);
1670 dev_err(&ep
->remote_dev
->sdev
->dev
,
1671 "%s %d err %ld\n", __func__
, __LINE__
, err
);
1675 scif_put_peer_dev(spdev
);
1676 /* No further failures expected. Insert new window */
1677 scif_insert_local_window(window
, ep
);
1678 dev_dbg(&ep
->remote_dev
->sdev
->dev
,
1679 "SCIFAPI register: ep %p addr %p len 0x%lx computed_offset 0x%llx\n",
1680 epd
, addr
, len
, computed_offset
);
1681 return computed_offset
;
1683 scif_destroy_window(ep
, window
);
1685 scif_put_peer_dev(spdev
);
1686 dev_err(&ep
->remote_dev
->sdev
->dev
,
1687 "%s %d err %ld\n", __func__
, __LINE__
, err
);
1690 EXPORT_SYMBOL_GPL(scif_register
);
1693 scif_unregister(scif_epd_t epd
, off_t offset
, size_t len
)
1695 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
1696 struct scif_window
*window
= NULL
;
1697 struct scif_rma_req req
;
1699 struct device
*spdev
;
1701 dev_dbg(scif_info
.mdev
.this_device
,
1702 "SCIFAPI unregister: ep %p offset 0x%lx len 0x%lx\n",
1704 /* len must be page aligned. len should be non zero */
1706 (ALIGN((u64
)len
, PAGE_SIZE
) != (u64
)len
))
1709 /* Offset is not page aligned or offset+len wraps around */
1710 if ((ALIGN(offset
, PAGE_SIZE
) != offset
) ||
1712 (len
> LONG_MAX
- offset
))
1715 err
= scif_verify_epd(ep
);
1720 nr_pages
= len
>> PAGE_SHIFT
;
1722 req
.out_window
= &window
;
1723 req
.offset
= offset
;
1726 req
.type
= SCIF_WINDOW_FULL
;
1727 req
.head
= &ep
->rma_info
.reg_list
;
1729 spdev
= scif_get_peer_dev(ep
->remote_dev
);
1730 if (IS_ERR(spdev
)) {
1731 err
= PTR_ERR(spdev
);
1734 mutex_lock(&ep
->rma_info
.rma_lock
);
1735 /* Does a valid window exist? */
1736 err
= scif_query_window(&req
);
1738 dev_err(&ep
->remote_dev
->sdev
->dev
,
1739 "%s %d err %d\n", __func__
, __LINE__
, err
);
1742 /* Unregister all the windows in this range */
1743 err
= scif_rma_list_unregister(window
, offset
, nr_pages
);
1745 dev_err(&ep
->remote_dev
->sdev
->dev
,
1746 "%s %d err %d\n", __func__
, __LINE__
, err
);
1748 mutex_unlock(&ep
->rma_info
.rma_lock
);
1749 scif_put_peer_dev(spdev
);
1752 EXPORT_SYMBOL_GPL(scif_unregister
);