1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel MIC Platform Software Stack (MPSS)
5 * Copyright(c) 2015 Intel Corporation.
10 #include <linux/mmu_notifier.h>
11 #include <linux/highmem.h>
16 * Insert a temp window to the temp registration list sorted by va_for_temp.
17 * RMA lock must be held.
19 void scif_insert_tcw(struct scif_window
*window
, struct list_head
*head
)
21 struct scif_window
*curr
= NULL
;
22 struct scif_window
*prev
= list_entry(head
, struct scif_window
, list
);
23 struct list_head
*item
;
25 INIT_LIST_HEAD(&window
->list
);
26 /* Compare with tail and if the entry is new tail add it to the end */
27 if (!list_empty(head
)) {
28 curr
= list_entry(head
->prev
, struct scif_window
, list
);
29 if (curr
->va_for_temp
< window
->va_for_temp
) {
30 list_add_tail(&window
->list
, head
);
34 list_for_each(item
, head
) {
35 curr
= list_entry(item
, struct scif_window
, list
);
36 if (curr
->va_for_temp
> window
->va_for_temp
)
40 list_add(&window
->list
, &prev
->list
);
46 * Insert a window to the self registration list sorted by offset.
47 * RMA lock must be held.
49 void scif_insert_window(struct scif_window
*window
, struct list_head
*head
)
51 struct scif_window
*curr
= NULL
, *prev
= NULL
;
52 struct list_head
*item
;
54 INIT_LIST_HEAD(&window
->list
);
55 list_for_each(item
, head
) {
56 curr
= list_entry(item
, struct scif_window
, list
);
57 if (curr
->offset
> window
->offset
)
62 list_add(&window
->list
, head
);
64 list_add(&window
->list
, &prev
->list
);
65 scif_set_window_ref(window
, window
->nr_pages
);
71 * Query the temp cached registration list of ep for an overlapping window
72 * in case of permission mismatch, destroy the previous window. if permissions
73 * match and overlap is partial, destroy the window but return the new range
74 * RMA lock must be held.
76 int scif_query_tcw(struct scif_endpt
*ep
, struct scif_rma_req
*req
)
78 struct list_head
*item
, *temp
, *head
= req
->head
;
79 struct scif_window
*window
;
80 u64 start_va_window
, start_va_req
= req
->va_for_temp
;
81 u64 end_va_window
, end_va_req
= start_va_req
+ req
->nr_bytes
;
86 * Avoid traversing the entire list to find out that there
87 * is no entry that matches
89 if (!list_empty(head
)) {
90 window
= list_last_entry(head
, struct scif_window
, list
);
91 end_va_window
= window
->va_for_temp
+
92 (window
->nr_pages
<< PAGE_SHIFT
);
93 if (start_va_req
> end_va_window
)
96 list_for_each_safe(item
, temp
, head
) {
97 window
= list_entry(item
, struct scif_window
, list
);
98 start_va_window
= window
->va_for_temp
;
99 end_va_window
= window
->va_for_temp
+
100 (window
->nr_pages
<< PAGE_SHIFT
);
101 if (start_va_req
< start_va_window
&&
102 end_va_req
< start_va_window
)
104 if (start_va_req
>= end_va_window
)
106 if ((window
->prot
& req
->prot
) == req
->prot
) {
107 if (start_va_req
>= start_va_window
&&
108 end_va_req
<= end_va_window
) {
109 *req
->out_window
= window
;
113 if (start_va_req
< start_va_window
) {
115 start_va_window
- start_va_req
;
116 req
->va_for_temp
= start_va_window
;
118 if (end_va_req
>= end_va_window
)
119 req
->nr_bytes
+= end_va_window
- end_va_req
;
121 /* Destroy the old window to create a new one */
122 __scif_rma_destroy_tcw_helper(window
);
131 * Query the registration list and check if a valid contiguous
132 * range of windows exist.
133 * RMA lock must be held.
135 int scif_query_window(struct scif_rma_req
*req
)
137 struct list_head
*item
;
138 struct scif_window
*window
;
139 s64 end_offset
, offset
= req
->offset
;
140 u64 tmp_min
, nr_bytes_left
= req
->nr_bytes
;
145 list_for_each(item
, req
->head
) {
146 window
= list_entry(item
, struct scif_window
, list
);
147 end_offset
= window
->offset
+
148 (window
->nr_pages
<< PAGE_SHIFT
);
149 if (offset
< window
->offset
)
150 /* Offset not found! */
152 if (offset
>= end_offset
)
154 /* Check read/write protections. */
155 if ((window
->prot
& req
->prot
) != req
->prot
)
157 if (nr_bytes_left
== req
->nr_bytes
)
158 /* Store the first window */
159 *req
->out_window
= window
;
160 tmp_min
= min((u64
)end_offset
- offset
, nr_bytes_left
);
161 nr_bytes_left
-= tmp_min
;
164 * Range requested encompasses
165 * multiple windows contiguously.
167 if (!nr_bytes_left
) {
168 /* Done for partial window */
169 if (req
->type
== SCIF_WINDOW_PARTIAL
||
170 req
->type
== SCIF_WINDOW_SINGLE
)
172 /* Extra logic for full windows */
173 if (offset
== end_offset
)
174 /* Spanning multiple whole windows */
176 /* Not spanning multiple whole windows */
179 if (req
->type
== SCIF_WINDOW_SINGLE
)
182 dev_err(scif_info
.mdev
.this_device
,
183 "%s %d ENXIO\n", __func__
, __LINE__
);
188 * scif_rma_list_unregister:
190 * Traverse the self registration list starting from window:
191 * 1) Call scif_unregister_window(..)
192 * RMA lock must be held.
194 int scif_rma_list_unregister(struct scif_window
*window
,
195 s64 offset
, int nr_pages
)
197 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
198 struct list_head
*head
= &ep
->rma_info
.reg_list
;
202 struct scif_window
*_window
;
204 list_for_each_entry_safe_from(window
, _window
, head
, list
) {
205 end_offset
= window
->offset
+ (window
->nr_pages
<< PAGE_SHIFT
);
206 loop_nr_pages
= min((int)((end_offset
- offset
) >> PAGE_SHIFT
),
208 err
= scif_unregister_window(window
);
211 nr_pages
-= loop_nr_pages
;
212 offset
+= (loop_nr_pages
<< PAGE_SHIFT
);
220 * scif_unmap_all_window:
222 * Traverse all the windows in the self registration list and:
223 * 1) Delete any DMA mappings created
225 void scif_unmap_all_windows(scif_epd_t epd
)
227 struct list_head
*item
, *tmp
;
228 struct scif_window
*window
;
229 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
230 struct list_head
*head
= &ep
->rma_info
.reg_list
;
232 mutex_lock(&ep
->rma_info
.rma_lock
);
233 list_for_each_safe(item
, tmp
, head
) {
234 window
= list_entry(item
, struct scif_window
, list
);
235 scif_unmap_window(ep
->remote_dev
, window
);
237 mutex_unlock(&ep
->rma_info
.rma_lock
);
241 * scif_unregister_all_window:
243 * Traverse all the windows in the self registration list and:
244 * 1) Call scif_unregister_window(..)
245 * RMA lock must be held.
247 int scif_unregister_all_windows(scif_epd_t epd
)
249 struct list_head
*item
, *tmp
;
250 struct scif_window
*window
;
251 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
252 struct list_head
*head
= &ep
->rma_info
.reg_list
;
255 mutex_lock(&ep
->rma_info
.rma_lock
);
259 list_for_each_safe(item
, tmp
, head
) {
260 window
= list_entry(item
, struct scif_window
, list
);
261 ep
->rma_info
.async_list_del
= 0;
262 err
= scif_unregister_window(window
);
264 dev_err(scif_info
.mdev
.this_device
,
266 __func__
, __LINE__
, err
);
268 * Need to restart list traversal if there has been
269 * an asynchronous list entry deletion.
271 if (READ_ONCE(ep
->rma_info
.async_list_del
))
274 mutex_unlock(&ep
->rma_info
.rma_lock
);
275 if (!list_empty(&ep
->rma_info
.mmn_list
)) {
276 spin_lock(&scif_info
.rmalock
);
277 list_add_tail(&ep
->mmu_list
, &scif_info
.mmu_notif_cleanup
);
278 spin_unlock(&scif_info
.rmalock
);
279 schedule_work(&scif_info
.mmu_notif_work
);