1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
3 #include <linux/sched/mm.h>
5 #include "ocxl_internal.h"
7 struct ocxl_context
*ocxl_context_alloc(void)
9 return kzalloc(sizeof(struct ocxl_context
), GFP_KERNEL
);
12 int ocxl_context_init(struct ocxl_context
*ctx
, struct ocxl_afu
*afu
,
13 struct address_space
*mapping
)
18 mutex_lock(&afu
->contexts_lock
);
19 pasid
= idr_alloc(&afu
->contexts_idr
, ctx
, afu
->pasid_base
,
20 afu
->pasid_base
+ afu
->pasid_max
, GFP_KERNEL
);
22 mutex_unlock(&afu
->contexts_lock
);
26 mutex_unlock(&afu
->contexts_lock
);
30 mutex_init(&ctx
->status_mutex
);
31 ctx
->mapping
= mapping
;
32 mutex_init(&ctx
->mapping_lock
);
33 init_waitqueue_head(&ctx
->events_wq
);
34 mutex_init(&ctx
->xsl_error_lock
);
35 mutex_init(&ctx
->irq_lock
);
36 idr_init(&ctx
->irq_idr
);
40 * Keep a reference on the AFU to make sure it's valid for the
41 * duration of the life of the context
48 * Callback for when a translation fault triggers an error
49 * data: a pointer to the context which triggered the fault
50 * addr: the address that triggered the error
51 * dsisr: the value of the PPC64 dsisr register
53 static void xsl_fault_error(void *data
, u64 addr
, u64 dsisr
)
55 struct ocxl_context
*ctx
= (struct ocxl_context
*) data
;
57 mutex_lock(&ctx
->xsl_error_lock
);
58 ctx
->xsl_error
.addr
= addr
;
59 ctx
->xsl_error
.dsisr
= dsisr
;
60 ctx
->xsl_error
.count
++;
61 mutex_unlock(&ctx
->xsl_error_lock
);
63 wake_up_all(&ctx
->events_wq
);
66 int ocxl_context_attach(struct ocxl_context
*ctx
, u64 amr
)
70 // Locks both status & tidr
71 mutex_lock(&ctx
->status_mutex
);
72 if (ctx
->status
!= OPENED
) {
77 rc
= ocxl_link_add_pe(ctx
->afu
->fn
->link
, ctx
->pasid
,
78 current
->mm
->context
.id
, ctx
->tidr
, amr
, current
->mm
,
79 xsl_fault_error
, ctx
);
83 ctx
->status
= ATTACHED
;
85 mutex_unlock(&ctx
->status_mutex
);
89 static int map_afu_irq(struct vm_area_struct
*vma
, unsigned long address
,
90 u64 offset
, struct ocxl_context
*ctx
)
94 trigger_addr
= ocxl_afu_irq_get_addr(ctx
, offset
);
96 return VM_FAULT_SIGBUS
;
98 vm_insert_pfn(vma
, address
, trigger_addr
>> PAGE_SHIFT
);
99 return VM_FAULT_NOPAGE
;
102 static int map_pp_mmio(struct vm_area_struct
*vma
, unsigned long address
,
103 u64 offset
, struct ocxl_context
*ctx
)
108 if (offset
>= ctx
->afu
->config
.pp_mmio_stride
)
109 return VM_FAULT_SIGBUS
;
111 mutex_lock(&ctx
->status_mutex
);
112 if (ctx
->status
!= ATTACHED
) {
113 mutex_unlock(&ctx
->status_mutex
);
114 pr_debug("%s: Context not attached, failing mmio mmap\n",
116 return VM_FAULT_SIGBUS
;
119 pasid_off
= ctx
->pasid
- ctx
->afu
->pasid_base
;
120 pp_mmio_addr
= ctx
->afu
->pp_mmio_start
+
121 pasid_off
* ctx
->afu
->config
.pp_mmio_stride
+
124 vm_insert_pfn(vma
, address
, pp_mmio_addr
>> PAGE_SHIFT
);
125 mutex_unlock(&ctx
->status_mutex
);
126 return VM_FAULT_NOPAGE
;
129 static int ocxl_mmap_fault(struct vm_fault
*vmf
)
131 struct vm_area_struct
*vma
= vmf
->vma
;
132 struct ocxl_context
*ctx
= vma
->vm_file
->private_data
;
136 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
137 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__
,
138 ctx
->pasid
, vmf
->address
, offset
);
140 if (offset
< ctx
->afu
->irq_base_offset
)
141 rc
= map_pp_mmio(vma
, vmf
->address
, offset
, ctx
);
143 rc
= map_afu_irq(vma
, vmf
->address
, offset
, ctx
);
147 static const struct vm_operations_struct ocxl_vmops
= {
148 .fault
= ocxl_mmap_fault
,
151 static int check_mmap_afu_irq(struct ocxl_context
*ctx
,
152 struct vm_area_struct
*vma
)
155 if (vma_pages(vma
) != 1)
158 /* check offset validty */
159 if (!ocxl_afu_irq_get_addr(ctx
, vma
->vm_pgoff
<< PAGE_SHIFT
))
163 * trigger page should only be accessible in write mode.
165 * It's a bit theoretical, as a page mmaped with only
166 * PROT_WRITE is currently readable, but it doesn't hurt.
168 if ((vma
->vm_flags
& VM_READ
) || (vma
->vm_flags
& VM_EXEC
) ||
169 !(vma
->vm_flags
& VM_WRITE
))
171 vma
->vm_flags
&= ~(VM_MAYREAD
| VM_MAYEXEC
);
175 static int check_mmap_mmio(struct ocxl_context
*ctx
,
176 struct vm_area_struct
*vma
)
178 if ((vma_pages(vma
) + vma
->vm_pgoff
) >
179 (ctx
->afu
->config
.pp_mmio_stride
>> PAGE_SHIFT
))
184 int ocxl_context_mmap(struct ocxl_context
*ctx
, struct vm_area_struct
*vma
)
188 if ((vma
->vm_pgoff
<< PAGE_SHIFT
) < ctx
->afu
->irq_base_offset
)
189 rc
= check_mmap_mmio(ctx
, vma
);
191 rc
= check_mmap_afu_irq(ctx
, vma
);
195 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
196 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
197 vma
->vm_ops
= &ocxl_vmops
;
201 int ocxl_context_detach(struct ocxl_context
*ctx
)
205 enum ocxl_context_status status
;
208 mutex_lock(&ctx
->status_mutex
);
209 status
= ctx
->status
;
210 ctx
->status
= CLOSED
;
211 mutex_unlock(&ctx
->status_mutex
);
212 if (status
!= ATTACHED
)
215 dev
= to_pci_dev(ctx
->afu
->fn
->dev
.parent
);
216 afu_control_pos
= ctx
->afu
->config
.dvsec_afu_control_pos
;
218 mutex_lock(&ctx
->afu
->afu_control_lock
);
219 rc
= ocxl_config_terminate_pasid(dev
, afu_control_pos
, ctx
->pasid
);
220 mutex_unlock(&ctx
->afu
->afu_control_lock
);
221 trace_ocxl_terminate_pasid(ctx
->pasid
, rc
);
224 * If we timeout waiting for the AFU to terminate the
225 * pasid, then it's dangerous to clean up the Process
226 * Element entry in the SPA, as it may be referenced
227 * in the future by the AFU. In which case, we would
228 * checkstop because of an invalid PE access (FIR
229 * register 2, bit 42). So leave the PE
230 * defined. Caller shouldn't free the context so that
231 * PASID remains allocated.
233 * A link reset will be required to cleanup the AFU
239 rc
= ocxl_link_remove_pe(ctx
->afu
->fn
->link
, ctx
->pasid
);
241 dev_warn(&ctx
->afu
->dev
,
242 "Couldn't remove PE entry cleanly: %d\n", rc
);
247 void ocxl_context_detach_all(struct ocxl_afu
*afu
)
249 struct ocxl_context
*ctx
;
252 mutex_lock(&afu
->contexts_lock
);
253 idr_for_each_entry(&afu
->contexts_idr
, ctx
, tmp
) {
254 ocxl_context_detach(ctx
);
256 * We are force detaching - remove any active mmio
257 * mappings so userspace cannot interfere with the
258 * card if it comes back. Easiest way to exercise
259 * this is to unbind and rebind the driver via sysfs
260 * while it is in use.
262 mutex_lock(&ctx
->mapping_lock
);
264 unmap_mapping_range(ctx
->mapping
, 0, 0, 1);
265 mutex_unlock(&ctx
->mapping_lock
);
267 mutex_unlock(&afu
->contexts_lock
);
270 void ocxl_context_free(struct ocxl_context
*ctx
)
272 mutex_lock(&ctx
->afu
->contexts_lock
);
273 ctx
->afu
->pasid_count
--;
274 idr_remove(&ctx
->afu
->contexts_idr
, ctx
->pasid
);
275 mutex_unlock(&ctx
->afu
->contexts_lock
);
277 ocxl_afu_irq_free_all(ctx
);
278 idr_destroy(&ctx
->irq_idr
);
279 /* reference to the AFU taken in ocxl_context_init */
280 ocxl_afu_put(ctx
->afu
);