2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/interrupt.h>
11 #include <linux/workqueue.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/slab.h>
15 #include <linux/pid.h>
16 #include <asm/cputable.h>
22 /* XXX: This is implementation specific */
23 static irqreturn_t
handle_psl_slice_error(struct cxl_context
*ctx
, u64 dsisr
, u64 errstat
)
25 u64 fir1
, fir2
, fir_slice
, serr
, afu_debug
;
27 fir1
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR1
);
28 fir2
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR2
);
29 fir_slice
= cxl_p1n_read(ctx
->afu
, CXL_PSL_FIR_SLICE_An
);
30 serr
= cxl_p1n_read(ctx
->afu
, CXL_PSL_SERR_An
);
31 afu_debug
= cxl_p1n_read(ctx
->afu
, CXL_AFU_DEBUG_An
);
33 dev_crit(&ctx
->afu
->dev
, "PSL ERROR STATUS: 0x%.16llx\n", errstat
);
34 dev_crit(&ctx
->afu
->dev
, "PSL_FIR1: 0x%.16llx\n", fir1
);
35 dev_crit(&ctx
->afu
->dev
, "PSL_FIR2: 0x%.16llx\n", fir2
);
36 dev_crit(&ctx
->afu
->dev
, "PSL_SERR_An: 0x%.16llx\n", serr
);
37 dev_crit(&ctx
->afu
->dev
, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice
);
38 dev_crit(&ctx
->afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug
);
40 dev_crit(&ctx
->afu
->dev
, "STOPPING CXL TRACE\n");
41 cxl_stop_trace(ctx
->afu
->adapter
);
43 return cxl_ack_irq(ctx
, 0, errstat
);
46 irqreturn_t
cxl_slice_irq_err(int irq
, void *data
)
48 struct cxl_afu
*afu
= data
;
49 u64 fir_slice
, errstat
, serr
, afu_debug
;
51 WARN(irq
, "CXL SLICE ERROR interrupt %i\n", irq
);
53 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
54 fir_slice
= cxl_p1n_read(afu
, CXL_PSL_FIR_SLICE_An
);
55 errstat
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
56 afu_debug
= cxl_p1n_read(afu
, CXL_AFU_DEBUG_An
);
57 dev_crit(&afu
->dev
, "PSL_SERR_An: 0x%.16llx\n", serr
);
58 dev_crit(&afu
->dev
, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice
);
59 dev_crit(&afu
->dev
, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat
);
60 dev_crit(&afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug
);
62 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
67 static irqreturn_t
cxl_irq_err(int irq
, void *data
)
69 struct cxl
*adapter
= data
;
70 u64 fir1
, fir2
, err_ivte
;
72 WARN(1, "CXL ERROR interrupt %i\n", irq
);
74 err_ivte
= cxl_p1_read(adapter
, CXL_PSL_ErrIVTE
);
75 dev_crit(&adapter
->dev
, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte
);
77 dev_crit(&adapter
->dev
, "STOPPING CXL TRACE\n");
78 cxl_stop_trace(adapter
);
80 fir1
= cxl_p1_read(adapter
, CXL_PSL_FIR1
);
81 fir2
= cxl_p1_read(adapter
, CXL_PSL_FIR2
);
83 dev_crit(&adapter
->dev
, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1
, fir2
);
88 static irqreturn_t
schedule_cxl_fault(struct cxl_context
*ctx
, u64 dsisr
, u64 dar
)
92 schedule_work(&ctx
->fault_work
);
96 static irqreturn_t
cxl_irq(int irq
, void *data
, struct cxl_irq_info
*irq_info
)
98 struct cxl_context
*ctx
= data
;
101 dsisr
= irq_info
->dsisr
;
104 trace_cxl_psl_irq(ctx
, irq
, dsisr
, dar
);
106 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq
, ctx
->pe
, dsisr
, dar
);
108 if (dsisr
& CXL_PSL_DSISR_An_DS
) {
110 * We don't inherently need to sleep to handle this, but we do
111 * need to get a ref to the task's mm, which we can't do from
112 * irq context without the potential for a deadlock since it
113 * takes the task_lock. An alternate option would be to keep a
114 * reference to the task's mm the entire time it has cxl open,
115 * but to do that we need to solve the issue where we hold a
116 * ref to the mm, but the mm can hold a ref to the fd after an
117 * mmap preventing anything from being cleaned up.
119 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx
->pe
);
120 return schedule_cxl_fault(ctx
, dsisr
, dar
);
123 if (dsisr
& CXL_PSL_DSISR_An_M
)
124 pr_devel("CXL interrupt: PTE not found\n");
125 if (dsisr
& CXL_PSL_DSISR_An_P
)
126 pr_devel("CXL interrupt: Storage protection violation\n");
127 if (dsisr
& CXL_PSL_DSISR_An_A
)
128 pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
129 if (dsisr
& CXL_PSL_DSISR_An_S
)
130 pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
131 if (dsisr
& CXL_PSL_DSISR_An_K
)
132 pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
134 if (dsisr
& CXL_PSL_DSISR_An_DM
) {
136 * In some cases we might be able to handle the fault
137 * immediately if hash_page would succeed, but we still need
138 * the task's mm, which as above we can't get without a lock
140 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx
->pe
);
141 return schedule_cxl_fault(ctx
, dsisr
, dar
);
143 if (dsisr
& CXL_PSL_DSISR_An_ST
)
144 WARN(1, "CXL interrupt: Segment Table PTE not found\n");
145 if (dsisr
& CXL_PSL_DSISR_An_UR
)
146 pr_devel("CXL interrupt: AURP PTE not found\n");
147 if (dsisr
& CXL_PSL_DSISR_An_PE
)
148 return handle_psl_slice_error(ctx
, dsisr
, irq_info
->errstat
);
149 if (dsisr
& CXL_PSL_DSISR_An_AE
) {
150 pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info
->afu_err
);
152 if (ctx
->pending_afu_err
) {
154 * This shouldn't happen - the PSL treats these errors
155 * as fatal and will have reset the AFU, so there's not
156 * much point buffering multiple AFU errors.
157 * OTOH if we DO ever see a storm of these come in it's
158 * probably best that we log them somewhere:
160 dev_err_ratelimited(&ctx
->afu
->dev
, "CXL AFU Error "
161 "undelivered to pe %i: %.llx\n",
162 ctx
->pe
, irq_info
->afu_err
);
164 spin_lock(&ctx
->lock
);
165 ctx
->afu_err
= irq_info
->afu_err
;
166 ctx
->pending_afu_err
= 1;
167 spin_unlock(&ctx
->lock
);
169 wake_up_all(&ctx
->wq
);
172 cxl_ack_irq(ctx
, CXL_PSL_TFC_An_A
, 0);
175 if (dsisr
& CXL_PSL_DSISR_An_OC
)
176 pr_devel("CXL interrupt: OS Context Warning\n");
178 WARN(1, "Unhandled CXL PSL IRQ\n");
182 static irqreturn_t
fail_psl_irq(struct cxl_afu
*afu
, struct cxl_irq_info
*irq_info
)
184 if (irq_info
->dsisr
& CXL_PSL_DSISR_TRANS
)
185 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
187 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
192 static irqreturn_t
cxl_irq_multiplexed(int irq
, void *data
)
194 struct cxl_afu
*afu
= data
;
195 struct cxl_context
*ctx
;
196 struct cxl_irq_info irq_info
;
197 int ph
= cxl_p2n_read(afu
, CXL_PSL_PEHandle_An
) & 0xffff;
200 if ((ret
= cxl_get_irq(afu
, &irq_info
))) {
201 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret
);
202 return fail_psl_irq(afu
, &irq_info
);
206 ctx
= idr_find(&afu
->contexts_idr
, ph
);
208 ret
= cxl_irq(irq
, ctx
, &irq_info
);
214 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR"
215 " %.16llx\n(Possible AFU HW issue - was a term/remove acked"
216 " with outstanding transactions?)\n", ph
, irq_info
.dsisr
,
218 return fail_psl_irq(afu
, &irq_info
);
221 static irqreturn_t
cxl_irq_afu(int irq
, void *data
)
223 struct cxl_context
*ctx
= data
;
224 irq_hw_number_t hwirq
= irqd_to_hwirq(irq_get_irq_data(irq
));
225 int irq_off
, afu_irq
= 1;
229 for (r
= 1; r
< CXL_IRQ_RANGES
; r
++) {
230 irq_off
= hwirq
- ctx
->irqs
.offset
[r
];
231 range
= ctx
->irqs
.range
[r
];
232 if (irq_off
>= 0 && irq_off
< range
) {
238 if (unlikely(r
>= CXL_IRQ_RANGES
)) {
239 WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
240 ctx
->pe
, irq
, hwirq
);
244 trace_cxl_afu_irq(ctx
, afu_irq
, irq
, hwirq
);
245 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
246 afu_irq
, ctx
->pe
, irq
, hwirq
);
248 if (unlikely(!ctx
->irq_bitmap
)) {
249 WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
252 spin_lock(&ctx
->lock
);
253 set_bit(afu_irq
- 1, ctx
->irq_bitmap
);
254 ctx
->pending_irq
= true;
255 spin_unlock(&ctx
->lock
);
257 wake_up_all(&ctx
->wq
);
262 unsigned int cxl_map_irq(struct cxl
*adapter
, irq_hw_number_t hwirq
,
263 irq_handler_t handler
, void *cookie
, const char *name
)
269 virq
= irq_create_mapping(NULL
, hwirq
);
271 dev_warn(&adapter
->dev
, "cxl_map_irq: irq_create_mapping failed\n");
275 cxl_setup_irq(adapter
, hwirq
, virq
);
277 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq
, virq
);
279 result
= request_irq(virq
, handler
, 0, name
, cookie
);
281 dev_warn(&adapter
->dev
, "cxl_map_irq: request_irq failed: %i\n", result
);
288 void cxl_unmap_irq(unsigned int virq
, void *cookie
)
290 free_irq(virq
, cookie
);
293 static int cxl_register_one_irq(struct cxl
*adapter
,
294 irq_handler_t handler
,
296 irq_hw_number_t
*dest_hwirq
,
297 unsigned int *dest_virq
,
302 if ((hwirq
= cxl_alloc_one_irq(adapter
)) < 0)
305 if (!(virq
= cxl_map_irq(adapter
, hwirq
, handler
, cookie
, name
)))
314 cxl_release_one_irq(adapter
, hwirq
);
318 int cxl_register_psl_err_irq(struct cxl
*adapter
)
322 adapter
->irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
323 dev_name(&adapter
->dev
));
324 if (!adapter
->irq_name
)
327 if ((rc
= cxl_register_one_irq(adapter
, cxl_irq_err
, adapter
,
330 adapter
->irq_name
))) {
331 kfree(adapter
->irq_name
);
332 adapter
->irq_name
= NULL
;
336 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, adapter
->err_hwirq
& 0xffff);
341 void cxl_release_psl_err_irq(struct cxl
*adapter
)
343 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, 0x0000000000000000);
344 cxl_unmap_irq(adapter
->err_virq
, adapter
);
345 cxl_release_one_irq(adapter
, adapter
->err_hwirq
);
346 kfree(adapter
->irq_name
);
349 int cxl_register_serr_irq(struct cxl_afu
*afu
)
354 afu
->err_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
355 dev_name(&afu
->dev
));
356 if (!afu
->err_irq_name
)
359 if ((rc
= cxl_register_one_irq(afu
->adapter
, cxl_slice_irq_err
, afu
,
361 &afu
->serr_virq
, afu
->err_irq_name
))) {
362 kfree(afu
->err_irq_name
);
363 afu
->err_irq_name
= NULL
;
367 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
368 serr
= (serr
& 0x00ffffffffff0000ULL
) | (afu
->serr_hwirq
& 0xffff);
369 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
374 void cxl_release_serr_irq(struct cxl_afu
*afu
)
376 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, 0x0000000000000000);
377 cxl_unmap_irq(afu
->serr_virq
, afu
);
378 cxl_release_one_irq(afu
->adapter
, afu
->serr_hwirq
);
379 kfree(afu
->err_irq_name
);
382 int cxl_register_psl_irq(struct cxl_afu
*afu
)
386 afu
->psl_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s",
387 dev_name(&afu
->dev
));
388 if (!afu
->psl_irq_name
)
391 if ((rc
= cxl_register_one_irq(afu
->adapter
, cxl_irq_multiplexed
, afu
,
392 &afu
->psl_hwirq
, &afu
->psl_virq
,
393 afu
->psl_irq_name
))) {
394 kfree(afu
->psl_irq_name
);
395 afu
->psl_irq_name
= NULL
;
400 void cxl_release_psl_irq(struct cxl_afu
*afu
)
402 cxl_unmap_irq(afu
->psl_virq
, afu
);
403 cxl_release_one_irq(afu
->adapter
, afu
->psl_hwirq
);
404 kfree(afu
->psl_irq_name
);
407 void afu_irq_name_free(struct cxl_context
*ctx
)
409 struct cxl_irq_name
*irq_name
, *tmp
;
411 list_for_each_entry_safe(irq_name
, tmp
, &ctx
->irq_names
, list
) {
412 kfree(irq_name
->name
);
413 list_del(&irq_name
->list
);
418 int afu_register_irqs(struct cxl_context
*ctx
, u32 count
)
420 irq_hw_number_t hwirq
;
422 struct cxl_irq_name
*irq_name
;
424 if ((rc
= cxl_alloc_irq_ranges(&ctx
->irqs
, ctx
->afu
->adapter
, count
)))
427 /* Multiplexed PSL Interrupt */
428 ctx
->irqs
.offset
[0] = ctx
->afu
->psl_hwirq
;
429 ctx
->irqs
.range
[0] = 1;
431 ctx
->irq_count
= count
;
432 ctx
->irq_bitmap
= kcalloc(BITS_TO_LONGS(count
),
433 sizeof(*ctx
->irq_bitmap
), GFP_KERNEL
);
434 if (!ctx
->irq_bitmap
)
438 * Allocate names first. If any fail, bail out before allocating
439 * actual hardware IRQs.
441 INIT_LIST_HEAD(&ctx
->irq_names
);
442 for (r
= 1; r
< CXL_IRQ_RANGES
; r
++) {
443 for (i
= 0; i
< ctx
->irqs
.range
[r
]; i
++) {
444 irq_name
= kmalloc(sizeof(struct cxl_irq_name
),
448 irq_name
->name
= kasprintf(GFP_KERNEL
, "cxl-%s-pe%i-%i",
449 dev_name(&ctx
->afu
->dev
),
451 if (!irq_name
->name
) {
455 /* Add to tail so next look get the correct order */
456 list_add_tail(&irq_name
->list
, &ctx
->irq_names
);
461 /* We've allocated all memory now, so let's do the irq allocations */
462 irq_name
= list_first_entry(&ctx
->irq_names
, struct cxl_irq_name
, list
);
463 for (r
= 1; r
< CXL_IRQ_RANGES
; r
++) {
464 hwirq
= ctx
->irqs
.offset
[r
];
465 for (i
= 0; i
< ctx
->irqs
.range
[r
]; hwirq
++, i
++) {
466 cxl_map_irq(ctx
->afu
->adapter
, hwirq
,
467 cxl_irq_afu
, ctx
, irq_name
->name
);
468 irq_name
= list_next_entry(irq_name
, list
);
475 afu_irq_name_free(ctx
);
479 void afu_release_irqs(struct cxl_context
*ctx
)
481 irq_hw_number_t hwirq
;
485 for (r
= 1; r
< CXL_IRQ_RANGES
; r
++) {
486 hwirq
= ctx
->irqs
.offset
[r
];
487 for (i
= 0; i
< ctx
->irqs
.range
[r
]; hwirq
++, i
++) {
488 virq
= irq_find_mapping(NULL
, hwirq
);
490 cxl_unmap_irq(virq
, ctx
);
494 afu_irq_name_free(ctx
);
495 cxl_release_irq_ranges(&ctx
->irqs
, ctx
->afu
->adapter
);