1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
3 #include <linux/sched/mm.h>
5 #include "ocxl_internal.h"
7 int ocxl_context_alloc(struct ocxl_context
**context
, struct ocxl_afu
*afu
,
8 struct address_space
*mapping
)
11 struct ocxl_context
*ctx
;
13 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
18 mutex_lock(&afu
->contexts_lock
);
19 pasid
= idr_alloc(&afu
->contexts_idr
, ctx
, afu
->pasid_base
,
20 afu
->pasid_base
+ afu
->pasid_max
, GFP_KERNEL
);
22 mutex_unlock(&afu
->contexts_lock
);
27 mutex_unlock(&afu
->contexts_lock
);
31 mutex_init(&ctx
->status_mutex
);
32 ctx
->mapping
= mapping
;
33 mutex_init(&ctx
->mapping_lock
);
34 init_waitqueue_head(&ctx
->events_wq
);
35 mutex_init(&ctx
->xsl_error_lock
);
36 mutex_init(&ctx
->irq_lock
);
37 idr_init(&ctx
->irq_idr
);
41 * Keep a reference on the AFU to make sure it's valid for the
42 * duration of the life of the context
48 EXPORT_SYMBOL_GPL(ocxl_context_alloc
);
51 * Callback for when a translation fault triggers an error
52 * data: a pointer to the context which triggered the fault
53 * addr: the address that triggered the error
54 * dsisr: the value of the PPC64 dsisr register
56 static void xsl_fault_error(void *data
, u64 addr
, u64 dsisr
)
58 struct ocxl_context
*ctx
= data
;
60 mutex_lock(&ctx
->xsl_error_lock
);
61 ctx
->xsl_error
.addr
= addr
;
62 ctx
->xsl_error
.dsisr
= dsisr
;
63 ctx
->xsl_error
.count
++;
64 mutex_unlock(&ctx
->xsl_error_lock
);
66 wake_up_all(&ctx
->events_wq
);
69 int ocxl_context_attach(struct ocxl_context
*ctx
, u64 amr
, struct mm_struct
*mm
)
72 unsigned long pidr
= 0;
75 // Locks both status & tidr
76 mutex_lock(&ctx
->status_mutex
);
77 if (ctx
->status
!= OPENED
) {
83 pidr
= mm
->context
.id
;
85 dev
= to_pci_dev(ctx
->afu
->fn
->dev
.parent
);
86 rc
= ocxl_link_add_pe(ctx
->afu
->fn
->link
, ctx
->pasid
, pidr
, ctx
->tidr
,
87 amr
, pci_dev_id(dev
), mm
, xsl_fault_error
, ctx
);
91 ctx
->status
= ATTACHED
;
93 mutex_unlock(&ctx
->status_mutex
);
96 EXPORT_SYMBOL_GPL(ocxl_context_attach
);
98 static vm_fault_t
map_afu_irq(struct vm_area_struct
*vma
, unsigned long address
,
99 u64 offset
, struct ocxl_context
*ctx
)
102 int irq_id
= ocxl_irq_offset_to_id(ctx
, offset
);
104 trigger_addr
= ocxl_afu_irq_get_addr(ctx
, irq_id
);
106 return VM_FAULT_SIGBUS
;
108 return vmf_insert_pfn(vma
, address
, trigger_addr
>> PAGE_SHIFT
);
111 static vm_fault_t
map_pp_mmio(struct vm_area_struct
*vma
, unsigned long address
,
112 u64 offset
, struct ocxl_context
*ctx
)
118 if (offset
>= ctx
->afu
->config
.pp_mmio_stride
)
119 return VM_FAULT_SIGBUS
;
121 mutex_lock(&ctx
->status_mutex
);
122 if (ctx
->status
!= ATTACHED
) {
123 mutex_unlock(&ctx
->status_mutex
);
124 pr_debug("%s: Context not attached, failing mmio mmap\n",
126 return VM_FAULT_SIGBUS
;
129 pasid_off
= ctx
->pasid
- ctx
->afu
->pasid_base
;
130 pp_mmio_addr
= ctx
->afu
->pp_mmio_start
+
131 pasid_off
* ctx
->afu
->config
.pp_mmio_stride
+
134 ret
= vmf_insert_pfn(vma
, address
, pp_mmio_addr
>> PAGE_SHIFT
);
135 mutex_unlock(&ctx
->status_mutex
);
139 static vm_fault_t
ocxl_mmap_fault(struct vm_fault
*vmf
)
141 struct vm_area_struct
*vma
= vmf
->vma
;
142 struct ocxl_context
*ctx
= vma
->vm_file
->private_data
;
146 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
147 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__
,
148 ctx
->pasid
, vmf
->address
, offset
);
150 if (offset
< ctx
->afu
->irq_base_offset
)
151 ret
= map_pp_mmio(vma
, vmf
->address
, offset
, ctx
);
153 ret
= map_afu_irq(vma
, vmf
->address
, offset
, ctx
);
157 static const struct vm_operations_struct ocxl_vmops
= {
158 .fault
= ocxl_mmap_fault
,
161 static int check_mmap_afu_irq(struct ocxl_context
*ctx
,
162 struct vm_area_struct
*vma
)
164 int irq_id
= ocxl_irq_offset_to_id(ctx
, vma
->vm_pgoff
<< PAGE_SHIFT
);
167 if (vma_pages(vma
) != 1)
170 /* check offset validty */
171 if (!ocxl_afu_irq_get_addr(ctx
, irq_id
))
175 * trigger page should only be accessible in write mode.
177 * It's a bit theoretical, as a page mmaped with only
178 * PROT_WRITE is currently readable, but it doesn't hurt.
180 if ((vma
->vm_flags
& VM_READ
) || (vma
->vm_flags
& VM_EXEC
) ||
181 !(vma
->vm_flags
& VM_WRITE
))
183 vm_flags_clear(vma
, VM_MAYREAD
| VM_MAYEXEC
);
187 static int check_mmap_mmio(struct ocxl_context
*ctx
,
188 struct vm_area_struct
*vma
)
190 if ((vma_pages(vma
) + vma
->vm_pgoff
) >
191 (ctx
->afu
->config
.pp_mmio_stride
>> PAGE_SHIFT
))
196 int ocxl_context_mmap(struct ocxl_context
*ctx
, struct vm_area_struct
*vma
)
200 if ((vma
->vm_pgoff
<< PAGE_SHIFT
) < ctx
->afu
->irq_base_offset
)
201 rc
= check_mmap_mmio(ctx
, vma
);
203 rc
= check_mmap_afu_irq(ctx
, vma
);
207 vm_flags_set(vma
, VM_IO
| VM_PFNMAP
);
208 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
209 vma
->vm_ops
= &ocxl_vmops
;
213 int ocxl_context_detach(struct ocxl_context
*ctx
)
217 enum ocxl_context_status status
;
220 mutex_lock(&ctx
->status_mutex
);
221 status
= ctx
->status
;
222 ctx
->status
= CLOSED
;
223 mutex_unlock(&ctx
->status_mutex
);
224 if (status
!= ATTACHED
)
227 dev
= to_pci_dev(ctx
->afu
->fn
->dev
.parent
);
228 afu_control_pos
= ctx
->afu
->config
.dvsec_afu_control_pos
;
230 mutex_lock(&ctx
->afu
->afu_control_lock
);
231 rc
= ocxl_config_terminate_pasid(dev
, afu_control_pos
, ctx
->pasid
);
232 mutex_unlock(&ctx
->afu
->afu_control_lock
);
233 trace_ocxl_terminate_pasid(ctx
->pasid
, rc
);
236 * If we timeout waiting for the AFU to terminate the
237 * pasid, then it's dangerous to clean up the Process
238 * Element entry in the SPA, as it may be referenced
239 * in the future by the AFU. In which case, we would
240 * checkstop because of an invalid PE access (FIR
241 * register 2, bit 42). So leave the PE
242 * defined. Caller shouldn't free the context so that
243 * PASID remains allocated.
245 * A link reset will be required to cleanup the AFU
251 rc
= ocxl_link_remove_pe(ctx
->afu
->fn
->link
, ctx
->pasid
);
254 "Couldn't remove PE entry cleanly: %d\n", rc
);
258 EXPORT_SYMBOL_GPL(ocxl_context_detach
);
260 void ocxl_context_detach_all(struct ocxl_afu
*afu
)
262 struct ocxl_context
*ctx
;
265 mutex_lock(&afu
->contexts_lock
);
266 idr_for_each_entry(&afu
->contexts_idr
, ctx
, tmp
) {
267 ocxl_context_detach(ctx
);
269 * We are force detaching - remove any active mmio
270 * mappings so userspace cannot interfere with the
271 * card if it comes back. Easiest way to exercise
272 * this is to unbind and rebind the driver via sysfs
273 * while it is in use.
275 mutex_lock(&ctx
->mapping_lock
);
277 unmap_mapping_range(ctx
->mapping
, 0, 0, 1);
278 mutex_unlock(&ctx
->mapping_lock
);
280 mutex_unlock(&afu
->contexts_lock
);
283 void ocxl_context_free(struct ocxl_context
*ctx
)
285 mutex_lock(&ctx
->afu
->contexts_lock
);
286 ctx
->afu
->pasid_count
--;
287 idr_remove(&ctx
->afu
->contexts_idr
, ctx
->pasid
);
288 mutex_unlock(&ctx
->afu
->contexts_lock
);
290 ocxl_afu_irq_free_all(ctx
);
291 idr_destroy(&ctx
->irq_idr
);
292 /* reference to the AFU taken in ocxl_context_alloc() */
293 ocxl_afu_put(ctx
->afu
);
296 EXPORT_SYMBOL_GPL(ocxl_context_free
);