1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
3 #include <linux/sched/mm.h>
5 #include "ocxl_internal.h"
7 struct ocxl_context
*ocxl_context_alloc(void)
9 return kzalloc(sizeof(struct ocxl_context
), GFP_KERNEL
);
12 int ocxl_context_init(struct ocxl_context
*ctx
, struct ocxl_afu
*afu
,
13 struct address_space
*mapping
)
18 mutex_lock(&afu
->contexts_lock
);
19 pasid
= idr_alloc(&afu
->contexts_idr
, ctx
, afu
->pasid_base
,
20 afu
->pasid_base
+ afu
->pasid_max
, GFP_KERNEL
);
22 mutex_unlock(&afu
->contexts_lock
);
26 mutex_unlock(&afu
->contexts_lock
);
30 mutex_init(&ctx
->status_mutex
);
31 ctx
->mapping
= mapping
;
32 mutex_init(&ctx
->mapping_lock
);
33 init_waitqueue_head(&ctx
->events_wq
);
34 mutex_init(&ctx
->xsl_error_lock
);
35 mutex_init(&ctx
->irq_lock
);
36 idr_init(&ctx
->irq_idr
);
38 * Keep a reference on the AFU to make sure it's valid for the
39 * duration of the life of the context
46 * Callback for when a translation fault triggers an error
47 * data: a pointer to the context which triggered the fault
48 * addr: the address that triggered the error
49 * dsisr: the value of the PPC64 dsisr register
51 static void xsl_fault_error(void *data
, u64 addr
, u64 dsisr
)
53 struct ocxl_context
*ctx
= (struct ocxl_context
*) data
;
55 mutex_lock(&ctx
->xsl_error_lock
);
56 ctx
->xsl_error
.addr
= addr
;
57 ctx
->xsl_error
.dsisr
= dsisr
;
58 ctx
->xsl_error
.count
++;
59 mutex_unlock(&ctx
->xsl_error_lock
);
61 wake_up_all(&ctx
->events_wq
);
64 int ocxl_context_attach(struct ocxl_context
*ctx
, u64 amr
)
68 mutex_lock(&ctx
->status_mutex
);
69 if (ctx
->status
!= OPENED
) {
74 rc
= ocxl_link_add_pe(ctx
->afu
->fn
->link
, ctx
->pasid
,
75 current
->mm
->context
.id
, 0, amr
, current
->mm
,
76 xsl_fault_error
, ctx
);
80 ctx
->status
= ATTACHED
;
82 mutex_unlock(&ctx
->status_mutex
);
86 static int map_afu_irq(struct vm_area_struct
*vma
, unsigned long address
,
87 u64 offset
, struct ocxl_context
*ctx
)
91 trigger_addr
= ocxl_afu_irq_get_addr(ctx
, offset
);
93 return VM_FAULT_SIGBUS
;
95 vm_insert_pfn(vma
, address
, trigger_addr
>> PAGE_SHIFT
);
96 return VM_FAULT_NOPAGE
;
99 static int map_pp_mmio(struct vm_area_struct
*vma
, unsigned long address
,
100 u64 offset
, struct ocxl_context
*ctx
)
105 if (offset
>= ctx
->afu
->config
.pp_mmio_stride
)
106 return VM_FAULT_SIGBUS
;
108 mutex_lock(&ctx
->status_mutex
);
109 if (ctx
->status
!= ATTACHED
) {
110 mutex_unlock(&ctx
->status_mutex
);
111 pr_debug("%s: Context not attached, failing mmio mmap\n",
113 return VM_FAULT_SIGBUS
;
116 pasid_off
= ctx
->pasid
- ctx
->afu
->pasid_base
;
117 pp_mmio_addr
= ctx
->afu
->pp_mmio_start
+
118 pasid_off
* ctx
->afu
->config
.pp_mmio_stride
+
121 vm_insert_pfn(vma
, address
, pp_mmio_addr
>> PAGE_SHIFT
);
122 mutex_unlock(&ctx
->status_mutex
);
123 return VM_FAULT_NOPAGE
;
126 static int ocxl_mmap_fault(struct vm_fault
*vmf
)
128 struct vm_area_struct
*vma
= vmf
->vma
;
129 struct ocxl_context
*ctx
= vma
->vm_file
->private_data
;
133 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
134 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__
,
135 ctx
->pasid
, vmf
->address
, offset
);
137 if (offset
< ctx
->afu
->irq_base_offset
)
138 rc
= map_pp_mmio(vma
, vmf
->address
, offset
, ctx
);
140 rc
= map_afu_irq(vma
, vmf
->address
, offset
, ctx
);
144 static const struct vm_operations_struct ocxl_vmops
= {
145 .fault
= ocxl_mmap_fault
,
148 static int check_mmap_afu_irq(struct ocxl_context
*ctx
,
149 struct vm_area_struct
*vma
)
152 if (vma_pages(vma
) != 1)
155 /* check offset validty */
156 if (!ocxl_afu_irq_get_addr(ctx
, vma
->vm_pgoff
<< PAGE_SHIFT
))
160 * trigger page should only be accessible in write mode.
162 * It's a bit theoretical, as a page mmaped with only
163 * PROT_WRITE is currently readable, but it doesn't hurt.
165 if ((vma
->vm_flags
& VM_READ
) || (vma
->vm_flags
& VM_EXEC
) ||
166 !(vma
->vm_flags
& VM_WRITE
))
168 vma
->vm_flags
&= ~(VM_MAYREAD
| VM_MAYEXEC
);
172 static int check_mmap_mmio(struct ocxl_context
*ctx
,
173 struct vm_area_struct
*vma
)
175 if ((vma_pages(vma
) + vma
->vm_pgoff
) >
176 (ctx
->afu
->config
.pp_mmio_stride
>> PAGE_SHIFT
))
181 int ocxl_context_mmap(struct ocxl_context
*ctx
, struct vm_area_struct
*vma
)
185 if ((vma
->vm_pgoff
<< PAGE_SHIFT
) < ctx
->afu
->irq_base_offset
)
186 rc
= check_mmap_mmio(ctx
, vma
);
188 rc
= check_mmap_afu_irq(ctx
, vma
);
192 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
193 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
194 vma
->vm_ops
= &ocxl_vmops
;
198 int ocxl_context_detach(struct ocxl_context
*ctx
)
202 enum ocxl_context_status status
;
205 mutex_lock(&ctx
->status_mutex
);
206 status
= ctx
->status
;
207 ctx
->status
= CLOSED
;
208 mutex_unlock(&ctx
->status_mutex
);
209 if (status
!= ATTACHED
)
212 dev
= to_pci_dev(ctx
->afu
->fn
->dev
.parent
);
213 afu_control_pos
= ctx
->afu
->config
.dvsec_afu_control_pos
;
215 mutex_lock(&ctx
->afu
->afu_control_lock
);
216 rc
= ocxl_config_terminate_pasid(dev
, afu_control_pos
, ctx
->pasid
);
217 mutex_unlock(&ctx
->afu
->afu_control_lock
);
218 trace_ocxl_terminate_pasid(ctx
->pasid
, rc
);
221 * If we timeout waiting for the AFU to terminate the
222 * pasid, then it's dangerous to clean up the Process
223 * Element entry in the SPA, as it may be referenced
224 * in the future by the AFU. In which case, we would
225 * checkstop because of an invalid PE access (FIR
226 * register 2, bit 42). So leave the PE
227 * defined. Caller shouldn't free the context so that
228 * PASID remains allocated.
230 * A link reset will be required to cleanup the AFU
236 rc
= ocxl_link_remove_pe(ctx
->afu
->fn
->link
, ctx
->pasid
);
238 dev_warn(&ctx
->afu
->dev
,
239 "Couldn't remove PE entry cleanly: %d\n", rc
);
244 void ocxl_context_detach_all(struct ocxl_afu
*afu
)
246 struct ocxl_context
*ctx
;
249 mutex_lock(&afu
->contexts_lock
);
250 idr_for_each_entry(&afu
->contexts_idr
, ctx
, tmp
) {
251 ocxl_context_detach(ctx
);
253 * We are force detaching - remove any active mmio
254 * mappings so userspace cannot interfere with the
255 * card if it comes back. Easiest way to exercise
256 * this is to unbind and rebind the driver via sysfs
257 * while it is in use.
259 mutex_lock(&ctx
->mapping_lock
);
261 unmap_mapping_range(ctx
->mapping
, 0, 0, 1);
262 mutex_unlock(&ctx
->mapping_lock
);
264 mutex_unlock(&afu
->contexts_lock
);
267 void ocxl_context_free(struct ocxl_context
*ctx
)
269 mutex_lock(&ctx
->afu
->contexts_lock
);
270 ctx
->afu
->pasid_count
--;
271 idr_remove(&ctx
->afu
->contexts_idr
, ctx
->pasid
);
272 mutex_unlock(&ctx
->afu
->contexts_lock
);
274 ocxl_afu_irq_free_all(ctx
);
275 idr_destroy(&ctx
->irq_idr
);
276 /* reference to the AFU taken in ocxl_context_init */
277 ocxl_afu_put(ctx
->afu
);