1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
3 #include <linux/sched/mm.h>
5 #include "ocxl_internal.h"
7 int ocxl_context_alloc(struct ocxl_context
**context
, struct ocxl_afu
*afu
,
8 struct address_space
*mapping
)
11 struct ocxl_context
*ctx
;
13 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
18 mutex_lock(&afu
->contexts_lock
);
19 pasid
= idr_alloc(&afu
->contexts_idr
, ctx
, afu
->pasid_base
,
20 afu
->pasid_base
+ afu
->pasid_max
, GFP_KERNEL
);
22 mutex_unlock(&afu
->contexts_lock
);
27 mutex_unlock(&afu
->contexts_lock
);
31 mutex_init(&ctx
->status_mutex
);
32 ctx
->mapping
= mapping
;
33 mutex_init(&ctx
->mapping_lock
);
34 init_waitqueue_head(&ctx
->events_wq
);
35 mutex_init(&ctx
->xsl_error_lock
);
36 mutex_init(&ctx
->irq_lock
);
37 idr_init(&ctx
->irq_idr
);
41 * Keep a reference on the AFU to make sure it's valid for the
42 * duration of the life of the context
48 EXPORT_SYMBOL_GPL(ocxl_context_alloc
);
51 * Callback for when a translation fault triggers an error
52 * data: a pointer to the context which triggered the fault
53 * addr: the address that triggered the error
54 * dsisr: the value of the PPC64 dsisr register
56 static void xsl_fault_error(void *data
, u64 addr
, u64 dsisr
)
58 struct ocxl_context
*ctx
= (struct ocxl_context
*) data
;
60 mutex_lock(&ctx
->xsl_error_lock
);
61 ctx
->xsl_error
.addr
= addr
;
62 ctx
->xsl_error
.dsisr
= dsisr
;
63 ctx
->xsl_error
.count
++;
64 mutex_unlock(&ctx
->xsl_error_lock
);
66 wake_up_all(&ctx
->events_wq
);
69 int ocxl_context_attach(struct ocxl_context
*ctx
, u64 amr
, struct mm_struct
*mm
)
72 unsigned long pidr
= 0;
74 // Locks both status & tidr
75 mutex_lock(&ctx
->status_mutex
);
76 if (ctx
->status
!= OPENED
) {
82 pidr
= mm
->context
.id
;
84 rc
= ocxl_link_add_pe(ctx
->afu
->fn
->link
, ctx
->pasid
, pidr
, ctx
->tidr
,
85 amr
, mm
, xsl_fault_error
, ctx
);
89 ctx
->status
= ATTACHED
;
91 mutex_unlock(&ctx
->status_mutex
);
94 EXPORT_SYMBOL_GPL(ocxl_context_attach
);
96 static vm_fault_t
map_afu_irq(struct vm_area_struct
*vma
, unsigned long address
,
97 u64 offset
, struct ocxl_context
*ctx
)
100 int irq_id
= ocxl_irq_offset_to_id(ctx
, offset
);
102 trigger_addr
= ocxl_afu_irq_get_addr(ctx
, irq_id
);
104 return VM_FAULT_SIGBUS
;
106 return vmf_insert_pfn(vma
, address
, trigger_addr
>> PAGE_SHIFT
);
109 static vm_fault_t
map_pp_mmio(struct vm_area_struct
*vma
, unsigned long address
,
110 u64 offset
, struct ocxl_context
*ctx
)
116 if (offset
>= ctx
->afu
->config
.pp_mmio_stride
)
117 return VM_FAULT_SIGBUS
;
119 mutex_lock(&ctx
->status_mutex
);
120 if (ctx
->status
!= ATTACHED
) {
121 mutex_unlock(&ctx
->status_mutex
);
122 pr_debug("%s: Context not attached, failing mmio mmap\n",
124 return VM_FAULT_SIGBUS
;
127 pasid_off
= ctx
->pasid
- ctx
->afu
->pasid_base
;
128 pp_mmio_addr
= ctx
->afu
->pp_mmio_start
+
129 pasid_off
* ctx
->afu
->config
.pp_mmio_stride
+
132 ret
= vmf_insert_pfn(vma
, address
, pp_mmio_addr
>> PAGE_SHIFT
);
133 mutex_unlock(&ctx
->status_mutex
);
137 static vm_fault_t
ocxl_mmap_fault(struct vm_fault
*vmf
)
139 struct vm_area_struct
*vma
= vmf
->vma
;
140 struct ocxl_context
*ctx
= vma
->vm_file
->private_data
;
144 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
145 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__
,
146 ctx
->pasid
, vmf
->address
, offset
);
148 if (offset
< ctx
->afu
->irq_base_offset
)
149 ret
= map_pp_mmio(vma
, vmf
->address
, offset
, ctx
);
151 ret
= map_afu_irq(vma
, vmf
->address
, offset
, ctx
);
155 static const struct vm_operations_struct ocxl_vmops
= {
156 .fault
= ocxl_mmap_fault
,
159 static int check_mmap_afu_irq(struct ocxl_context
*ctx
,
160 struct vm_area_struct
*vma
)
162 int irq_id
= ocxl_irq_offset_to_id(ctx
, vma
->vm_pgoff
<< PAGE_SHIFT
);
165 if (vma_pages(vma
) != 1)
168 /* check offset validty */
169 if (!ocxl_afu_irq_get_addr(ctx
, irq_id
))
173 * trigger page should only be accessible in write mode.
175 * It's a bit theoretical, as a page mmaped with only
176 * PROT_WRITE is currently readable, but it doesn't hurt.
178 if ((vma
->vm_flags
& VM_READ
) || (vma
->vm_flags
& VM_EXEC
) ||
179 !(vma
->vm_flags
& VM_WRITE
))
181 vma
->vm_flags
&= ~(VM_MAYREAD
| VM_MAYEXEC
);
185 static int check_mmap_mmio(struct ocxl_context
*ctx
,
186 struct vm_area_struct
*vma
)
188 if ((vma_pages(vma
) + vma
->vm_pgoff
) >
189 (ctx
->afu
->config
.pp_mmio_stride
>> PAGE_SHIFT
))
194 int ocxl_context_mmap(struct ocxl_context
*ctx
, struct vm_area_struct
*vma
)
198 if ((vma
->vm_pgoff
<< PAGE_SHIFT
) < ctx
->afu
->irq_base_offset
)
199 rc
= check_mmap_mmio(ctx
, vma
);
201 rc
= check_mmap_afu_irq(ctx
, vma
);
205 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
206 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
207 vma
->vm_ops
= &ocxl_vmops
;
211 int ocxl_context_detach(struct ocxl_context
*ctx
)
215 enum ocxl_context_status status
;
218 mutex_lock(&ctx
->status_mutex
);
219 status
= ctx
->status
;
220 ctx
->status
= CLOSED
;
221 mutex_unlock(&ctx
->status_mutex
);
222 if (status
!= ATTACHED
)
225 dev
= to_pci_dev(ctx
->afu
->fn
->dev
.parent
);
226 afu_control_pos
= ctx
->afu
->config
.dvsec_afu_control_pos
;
228 mutex_lock(&ctx
->afu
->afu_control_lock
);
229 rc
= ocxl_config_terminate_pasid(dev
, afu_control_pos
, ctx
->pasid
);
230 mutex_unlock(&ctx
->afu
->afu_control_lock
);
231 trace_ocxl_terminate_pasid(ctx
->pasid
, rc
);
234 * If we timeout waiting for the AFU to terminate the
235 * pasid, then it's dangerous to clean up the Process
236 * Element entry in the SPA, as it may be referenced
237 * in the future by the AFU. In which case, we would
238 * checkstop because of an invalid PE access (FIR
239 * register 2, bit 42). So leave the PE
240 * defined. Caller shouldn't free the context so that
241 * PASID remains allocated.
243 * A link reset will be required to cleanup the AFU
249 rc
= ocxl_link_remove_pe(ctx
->afu
->fn
->link
, ctx
->pasid
);
252 "Couldn't remove PE entry cleanly: %d\n", rc
);
256 EXPORT_SYMBOL_GPL(ocxl_context_detach
);
258 void ocxl_context_detach_all(struct ocxl_afu
*afu
)
260 struct ocxl_context
*ctx
;
263 mutex_lock(&afu
->contexts_lock
);
264 idr_for_each_entry(&afu
->contexts_idr
, ctx
, tmp
) {
265 ocxl_context_detach(ctx
);
267 * We are force detaching - remove any active mmio
268 * mappings so userspace cannot interfere with the
269 * card if it comes back. Easiest way to exercise
270 * this is to unbind and rebind the driver via sysfs
271 * while it is in use.
273 mutex_lock(&ctx
->mapping_lock
);
275 unmap_mapping_range(ctx
->mapping
, 0, 0, 1);
276 mutex_unlock(&ctx
->mapping_lock
);
278 mutex_unlock(&afu
->contexts_lock
);
281 void ocxl_context_free(struct ocxl_context
*ctx
)
283 mutex_lock(&ctx
->afu
->contexts_lock
);
284 ctx
->afu
->pasid_count
--;
285 idr_remove(&ctx
->afu
->contexts_idr
, ctx
->pasid
);
286 mutex_unlock(&ctx
->afu
->contexts_lock
);
288 ocxl_afu_irq_free_all(ctx
);
289 idr_destroy(&ctx
->irq_idr
);
290 /* reference to the AFU taken in ocxl_context_init */
291 ocxl_afu_put(ctx
->afu
);
294 EXPORT_SYMBOL_GPL(ocxl_context_free
);