2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/mutex.h>
16 #include <linux/uaccess.h>
17 #include <asm/synch.h>
18 #include <misc/cxl-base.h>
23 static int afu_control(struct cxl_afu
*afu
, u64 command
,
24 u64 result
, u64 mask
, bool enabled
)
26 u64 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
27 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
30 spin_lock(&afu
->afu_cntl_lock
);
31 pr_devel("AFU command starting: %llx\n", command
);
33 trace_cxl_afu_ctrl(afu
, command
);
35 cxl_p2n_write(afu
, CXL_AFU_Cntl_An
, AFU_Cntl
| command
);
37 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
38 while ((AFU_Cntl
& mask
) != result
) {
39 if (time_after_eq(jiffies
, timeout
)) {
40 dev_warn(&afu
->dev
, "WARNING: AFU control timed out!\n");
45 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
46 afu
->enabled
= enabled
;
51 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
54 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
56 pr_devel("AFU command complete: %llx\n", command
);
57 afu
->enabled
= enabled
;
59 trace_cxl_afu_ctrl_done(afu
, command
, rc
);
60 spin_unlock(&afu
->afu_cntl_lock
);
65 static int afu_enable(struct cxl_afu
*afu
)
67 pr_devel("AFU enable request\n");
69 return afu_control(afu
, CXL_AFU_Cntl_An_E
,
70 CXL_AFU_Cntl_An_ES_Enabled
,
71 CXL_AFU_Cntl_An_ES_MASK
, true);
74 int cxl_afu_disable(struct cxl_afu
*afu
)
76 pr_devel("AFU disable request\n");
78 return afu_control(afu
, 0, CXL_AFU_Cntl_An_ES_Disabled
,
79 CXL_AFU_Cntl_An_ES_MASK
, false);
82 /* This will disable as well as reset */
83 static int native_afu_reset(struct cxl_afu
*afu
)
85 pr_devel("AFU reset request\n");
87 return afu_control(afu
, CXL_AFU_Cntl_An_RA
,
88 CXL_AFU_Cntl_An_RS_Complete
| CXL_AFU_Cntl_An_ES_Disabled
,
89 CXL_AFU_Cntl_An_RS_MASK
| CXL_AFU_Cntl_An_ES_MASK
,
93 static int native_afu_check_and_enable(struct cxl_afu
*afu
)
95 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
96 WARN(1, "Refusing to enable afu while link down!\n");
101 return afu_enable(afu
);
104 int cxl_psl_purge(struct cxl_afu
*afu
)
106 u64 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
107 u64 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
110 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
113 trace_cxl_psl_ctrl(afu
, CXL_PSL_SCNTL_An_Pc
);
115 pr_devel("PSL purge request\n");
117 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
118 dev_warn(&afu
->dev
, "PSL Purge called with link down, ignoring\n");
123 if ((AFU_Cntl
& CXL_AFU_Cntl_An_ES_MASK
) != CXL_AFU_Cntl_An_ES_Disabled
) {
124 WARN(1, "psl_purge request while AFU not disabled!\n");
125 cxl_afu_disable(afu
);
128 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
,
129 PSL_CNTL
| CXL_PSL_SCNTL_An_Pc
);
130 start
= local_clock();
131 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
132 while ((PSL_CNTL
& CXL_PSL_SCNTL_An_Ps_MASK
)
133 == CXL_PSL_SCNTL_An_Ps_Pending
) {
134 if (time_after_eq(jiffies
, timeout
)) {
135 dev_warn(&afu
->dev
, "WARNING: PSL Purge timed out!\n");
139 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
144 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
145 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL
, dsisr
);
146 if (dsisr
& CXL_PSL_DSISR_TRANS
) {
147 dar
= cxl_p2n_read(afu
, CXL_PSL_DAR_An
);
148 dev_notice(&afu
->dev
, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr
, dar
);
149 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
151 dev_notice(&afu
->dev
, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr
);
152 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
156 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
159 pr_devel("PSL purged in %lld ns\n", end
- start
);
161 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
,
162 PSL_CNTL
& ~CXL_PSL_SCNTL_An_Pc
);
164 trace_cxl_psl_ctrl_done(afu
, CXL_PSL_SCNTL_An_Pc
, rc
);
168 static int spa_max_procs(int spa_size
)
172 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
173 * Most of that junk is really just an overly-complicated way of saying
174 * the last 256 bytes are __aligned(128), so it's really:
175 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
177 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
179 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
180 * Ignore the alignment (which is safe in this case as long as we are
181 * careful with our rounding) and solve for n:
183 return ((spa_size
/ 8) - 96) / 17;
186 int cxl_alloc_spa(struct cxl_afu
*afu
)
188 /* Work out how many pages to allocate */
189 afu
->native
->spa_order
= 0;
191 afu
->native
->spa_order
++;
192 afu
->native
->spa_size
= (1 << afu
->native
->spa_order
) * PAGE_SIZE
;
193 afu
->native
->spa_max_procs
= spa_max_procs(afu
->native
->spa_size
);
194 } while (afu
->native
->spa_max_procs
< afu
->num_procs
);
196 WARN_ON(afu
->native
->spa_size
> 0x100000); /* Max size supported by the hardware */
198 if (!(afu
->native
->spa
= (struct cxl_process_element
*)
199 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, afu
->native
->spa_order
))) {
200 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
203 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
204 1<<afu
->native
->spa_order
, afu
->native
->spa_max_procs
, afu
->num_procs
);
209 static void attach_spa(struct cxl_afu
*afu
)
213 afu
->native
->sw_command_status
= (__be64
*)((char *)afu
->native
->spa
+
214 ((afu
->native
->spa_max_procs
+ 3) * 128));
216 spap
= virt_to_phys(afu
->native
->spa
) & CXL_PSL_SPAP_Addr
;
217 spap
|= ((afu
->native
->spa_size
>> (12 - CXL_PSL_SPAP_Size_Shift
)) - 1) & CXL_PSL_SPAP_Size
;
218 spap
|= CXL_PSL_SPAP_V
;
219 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
220 afu
->native
->spa
, afu
->native
->spa_max_procs
,
221 afu
->native
->sw_command_status
, spap
);
222 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, spap
);
225 static inline void detach_spa(struct cxl_afu
*afu
)
227 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, 0);
230 void cxl_release_spa(struct cxl_afu
*afu
)
232 if (afu
->native
->spa
) {
233 free_pages((unsigned long) afu
->native
->spa
,
234 afu
->native
->spa_order
);
235 afu
->native
->spa
= NULL
;
239 int cxl_tlb_slb_invalidate(struct cxl
*adapter
)
241 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
243 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
245 cxl_p1_write(adapter
, CXL_PSL_AFUSEL
, CXL_PSL_AFUSEL_A
);
247 cxl_p1_write(adapter
, CXL_PSL_TLBIA
, CXL_TLB_SLB_IQ_ALL
);
248 while (cxl_p1_read(adapter
, CXL_PSL_TLBIA
) & CXL_TLB_SLB_P
) {
249 if (time_after_eq(jiffies
, timeout
)) {
250 dev_warn(&adapter
->dev
, "WARNING: CXL adapter wide TLBIA timed out!\n");
253 if (!cxl_ops
->link_ok(adapter
, NULL
))
258 cxl_p1_write(adapter
, CXL_PSL_SLBIA
, CXL_TLB_SLB_IQ_ALL
);
259 while (cxl_p1_read(adapter
, CXL_PSL_SLBIA
) & CXL_TLB_SLB_P
) {
260 if (time_after_eq(jiffies
, timeout
)) {
261 dev_warn(&adapter
->dev
, "WARNING: CXL adapter wide SLBIA timed out!\n");
264 if (!cxl_ops
->link_ok(adapter
, NULL
))
271 static int cxl_write_sstp(struct cxl_afu
*afu
, u64 sstp0
, u64 sstp1
)
275 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
276 cxl_p2n_write(afu
, CXL_SSTP1_An
, 0);
278 /* 2. Invalidate all SLB entries */
279 if ((rc
= cxl_afu_slbia(afu
)))
282 /* 3. Set SSTP0_An */
283 cxl_p2n_write(afu
, CXL_SSTP0_An
, sstp0
);
285 /* 4. Set SSTP1_An */
286 cxl_p2n_write(afu
, CXL_SSTP1_An
, sstp1
);
291 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
292 static void slb_invalid(struct cxl_context
*ctx
)
294 struct cxl
*adapter
= ctx
->afu
->adapter
;
297 WARN_ON(!mutex_is_locked(&ctx
->afu
->native
->spa_mutex
));
299 cxl_p1_write(adapter
, CXL_PSL_LBISEL
,
300 ((u64
)be32_to_cpu(ctx
->elem
->common
.pid
) << 32) |
301 be32_to_cpu(ctx
->elem
->lpid
));
302 cxl_p1_write(adapter
, CXL_PSL_SLBIA
, CXL_TLB_SLB_IQ_LPIDPID
);
305 if (!cxl_ops
->link_ok(adapter
, NULL
))
307 slbia
= cxl_p1_read(adapter
, CXL_PSL_SLBIA
);
308 if (!(slbia
& CXL_TLB_SLB_P
))
314 static int do_process_element_cmd(struct cxl_context
*ctx
,
315 u64 cmd
, u64 pe_state
)
318 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
321 trace_cxl_llcmd(ctx
, cmd
);
323 WARN_ON(!ctx
->afu
->enabled
);
325 ctx
->elem
->software_state
= cpu_to_be32(pe_state
);
327 *(ctx
->afu
->native
->sw_command_status
) = cpu_to_be64(cmd
| 0 | ctx
->pe
);
329 cxl_p1n_write(ctx
->afu
, CXL_PSL_LLCMD_An
, cmd
| ctx
->pe
);
331 if (time_after_eq(jiffies
, timeout
)) {
332 dev_warn(&ctx
->afu
->dev
, "WARNING: Process Element Command timed out!\n");
336 if (!cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
)) {
337 dev_warn(&ctx
->afu
->dev
, "WARNING: Device link down, aborting Process Element Command!\n");
341 state
= be64_to_cpup(ctx
->afu
->native
->sw_command_status
);
342 if (state
== ~0ULL) {
343 pr_err("cxl: Error adding process element to AFU\n");
347 if ((state
& (CXL_SPA_SW_CMD_MASK
| CXL_SPA_SW_STATE_MASK
| CXL_SPA_SW_LINK_MASK
)) ==
348 (cmd
| (cmd
>> 16) | ctx
->pe
))
351 * The command won't finish in the PSL if there are
352 * outstanding DSIs. Hence we need to yield here in
353 * case there are outstanding DSIs that we need to
354 * service. Tuning possiblity: we could wait for a
361 trace_cxl_llcmd_done(ctx
, cmd
, rc
);
365 static int add_process_element(struct cxl_context
*ctx
)
369 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
370 pr_devel("%s Adding pe: %i started\n", __func__
, ctx
->pe
);
371 if (!(rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_ADD
, CXL_PE_SOFTWARE_STATE_V
)))
372 ctx
->pe_inserted
= true;
373 pr_devel("%s Adding pe: %i finished\n", __func__
, ctx
->pe
);
374 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
378 static int terminate_process_element(struct cxl_context
*ctx
)
382 /* fast path terminate if it's already invalid */
383 if (!(ctx
->elem
->software_state
& cpu_to_be32(CXL_PE_SOFTWARE_STATE_V
)))
386 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
387 pr_devel("%s Terminate pe: %i started\n", __func__
, ctx
->pe
);
388 /* We could be asked to terminate when the hw is down. That
389 * should always succeed: it's not running if the hw has gone
390 * away and is being reset.
392 if (cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
))
393 rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_TERMINATE
,
394 CXL_PE_SOFTWARE_STATE_V
| CXL_PE_SOFTWARE_STATE_T
);
395 ctx
->elem
->software_state
= 0; /* Remove Valid bit */
396 pr_devel("%s Terminate pe: %i finished\n", __func__
, ctx
->pe
);
397 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
401 static int remove_process_element(struct cxl_context
*ctx
)
405 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
406 pr_devel("%s Remove pe: %i started\n", __func__
, ctx
->pe
);
408 /* We could be asked to remove when the hw is down. Again, if
409 * the hw is down, the PE is gone, so we succeed.
411 if (cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
))
412 rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_REMOVE
, 0);
415 ctx
->pe_inserted
= false;
417 pr_devel("%s Remove pe: %i finished\n", __func__
, ctx
->pe
);
418 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
424 void cxl_assign_psn_space(struct cxl_context
*ctx
)
426 if (!ctx
->afu
->pp_size
|| ctx
->master
) {
427 ctx
->psn_phys
= ctx
->afu
->psn_phys
;
428 ctx
->psn_size
= ctx
->afu
->adapter
->ps_size
;
430 ctx
->psn_phys
= ctx
->afu
->psn_phys
+
431 (ctx
->afu
->native
->pp_offset
+ ctx
->afu
->pp_size
* ctx
->pe
);
432 ctx
->psn_size
= ctx
->afu
->pp_size
;
436 static int activate_afu_directed(struct cxl_afu
*afu
)
440 dev_info(&afu
->dev
, "Activating AFU directed mode\n");
442 afu
->num_procs
= afu
->max_procs_virtualised
;
443 if (afu
->native
->spa
== NULL
) {
444 if (cxl_alloc_spa(afu
))
449 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
, CXL_PSL_SCNTL_An_PM_AFU
);
450 cxl_p1n_write(afu
, CXL_PSL_AMOR_An
, 0xFFFFFFFFFFFFFFFFULL
);
451 cxl_p1n_write(afu
, CXL_PSL_ID_An
, CXL_PSL_ID_An_F
| CXL_PSL_ID_An_L
);
453 afu
->current_mode
= CXL_MODE_DIRECTED
;
455 if ((rc
= cxl_chardev_m_afu_add(afu
)))
458 if ((rc
= cxl_sysfs_afu_m_add(afu
)))
461 if ((rc
= cxl_chardev_s_afu_add(afu
)))
466 cxl_sysfs_afu_m_remove(afu
);
468 cxl_chardev_afu_remove(afu
);
472 #ifdef CONFIG_CPU_LITTLE_ENDIAN
473 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
475 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
478 static u64
calculate_sr(struct cxl_context
*ctx
)
484 sr
|= CXL_PSL_SR_An_MP
;
485 if (mfspr(SPRN_LPCR
) & LPCR_TC
)
486 sr
|= CXL_PSL_SR_An_TC
;
488 sr
|= CXL_PSL_SR_An_R
| (mfmsr() & MSR_SF
);
489 sr
|= CXL_PSL_SR_An_HV
;
491 sr
|= CXL_PSL_SR_An_PR
| CXL_PSL_SR_An_R
;
492 sr
&= ~(CXL_PSL_SR_An_HV
);
493 if (!test_tsk_thread_flag(current
, TIF_32BIT
))
494 sr
|= CXL_PSL_SR_An_SF
;
499 static int attach_afu_directed(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
504 cxl_assign_psn_space(ctx
);
506 ctx
->elem
->ctxtime
= 0; /* disable */
507 ctx
->elem
->lpid
= cpu_to_be32(mfspr(SPRN_LPID
));
508 ctx
->elem
->haurp
= 0; /* disable */
509 ctx
->elem
->sdr
= cpu_to_be64(mfspr(SPRN_SDR1
));
514 ctx
->elem
->common
.tid
= 0;
515 ctx
->elem
->common
.pid
= cpu_to_be32(pid
);
517 ctx
->elem
->sr
= cpu_to_be64(calculate_sr(ctx
));
519 ctx
->elem
->common
.csrp
= 0; /* disable */
520 ctx
->elem
->common
.aurp0
= 0; /* disable */
521 ctx
->elem
->common
.aurp1
= 0; /* disable */
523 cxl_prefault(ctx
, wed
);
525 ctx
->elem
->common
.sstp0
= cpu_to_be64(ctx
->sstp0
);
526 ctx
->elem
->common
.sstp1
= cpu_to_be64(ctx
->sstp1
);
528 for (r
= 0; r
< CXL_IRQ_RANGES
; r
++) {
529 ctx
->elem
->ivte_offsets
[r
] = cpu_to_be16(ctx
->irqs
.offset
[r
]);
530 ctx
->elem
->ivte_ranges
[r
] = cpu_to_be16(ctx
->irqs
.range
[r
]);
533 ctx
->elem
->common
.amr
= cpu_to_be64(amr
);
534 ctx
->elem
->common
.wed
= cpu_to_be64(wed
);
536 /* first guy needs to enable */
537 if ((result
= cxl_ops
->afu_check_and_enable(ctx
->afu
)))
540 return add_process_element(ctx
);
543 static int deactivate_afu_directed(struct cxl_afu
*afu
)
545 dev_info(&afu
->dev
, "Deactivating AFU directed mode\n");
547 afu
->current_mode
= 0;
550 cxl_sysfs_afu_m_remove(afu
);
551 cxl_chardev_afu_remove(afu
);
553 cxl_ops
->afu_reset(afu
);
554 cxl_afu_disable(afu
);
560 static int activate_dedicated_process(struct cxl_afu
*afu
)
562 dev_info(&afu
->dev
, "Activating dedicated process mode\n");
564 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
, CXL_PSL_SCNTL_An_PM_Process
);
566 cxl_p1n_write(afu
, CXL_PSL_CtxTime_An
, 0); /* disable */
567 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, 0); /* disable */
568 cxl_p1n_write(afu
, CXL_PSL_AMOR_An
, 0xFFFFFFFFFFFFFFFFULL
);
569 cxl_p1n_write(afu
, CXL_PSL_LPID_An
, mfspr(SPRN_LPID
));
570 cxl_p1n_write(afu
, CXL_HAURP_An
, 0); /* disable */
571 cxl_p1n_write(afu
, CXL_PSL_SDR_An
, mfspr(SPRN_SDR1
));
573 cxl_p2n_write(afu
, CXL_CSRP_An
, 0); /* disable */
574 cxl_p2n_write(afu
, CXL_AURP0_An
, 0); /* disable */
575 cxl_p2n_write(afu
, CXL_AURP1_An
, 0); /* disable */
577 afu
->current_mode
= CXL_MODE_DEDICATED
;
580 return cxl_chardev_d_afu_add(afu
);
583 static int attach_dedicated(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
585 struct cxl_afu
*afu
= ctx
->afu
;
589 pid
= (u64
)current
->pid
<< 32;
592 cxl_p2n_write(afu
, CXL_PSL_PID_TID_An
, pid
);
594 cxl_p1n_write(afu
, CXL_PSL_SR_An
, calculate_sr(ctx
));
596 if ((rc
= cxl_write_sstp(afu
, ctx
->sstp0
, ctx
->sstp1
)))
599 cxl_prefault(ctx
, wed
);
601 cxl_p1n_write(afu
, CXL_PSL_IVTE_Offset_An
,
602 (((u64
)ctx
->irqs
.offset
[0] & 0xffff) << 48) |
603 (((u64
)ctx
->irqs
.offset
[1] & 0xffff) << 32) |
604 (((u64
)ctx
->irqs
.offset
[2] & 0xffff) << 16) |
605 ((u64
)ctx
->irqs
.offset
[3] & 0xffff));
606 cxl_p1n_write(afu
, CXL_PSL_IVTE_Limit_An
, (u64
)
607 (((u64
)ctx
->irqs
.range
[0] & 0xffff) << 48) |
608 (((u64
)ctx
->irqs
.range
[1] & 0xffff) << 32) |
609 (((u64
)ctx
->irqs
.range
[2] & 0xffff) << 16) |
610 ((u64
)ctx
->irqs
.range
[3] & 0xffff));
612 cxl_p2n_write(afu
, CXL_PSL_AMR_An
, amr
);
614 /* master only context for dedicated */
615 cxl_assign_psn_space(ctx
);
617 if ((rc
= cxl_ops
->afu_reset(afu
)))
620 cxl_p2n_write(afu
, CXL_PSL_WED_An
, wed
);
622 return afu_enable(afu
);
625 static int deactivate_dedicated_process(struct cxl_afu
*afu
)
627 dev_info(&afu
->dev
, "Deactivating dedicated process mode\n");
629 afu
->current_mode
= 0;
632 cxl_chardev_afu_remove(afu
);
637 static int native_afu_deactivate_mode(struct cxl_afu
*afu
, int mode
)
639 if (mode
== CXL_MODE_DIRECTED
)
640 return deactivate_afu_directed(afu
);
641 if (mode
== CXL_MODE_DEDICATED
)
642 return deactivate_dedicated_process(afu
);
646 static int native_afu_activate_mode(struct cxl_afu
*afu
, int mode
)
650 if (!(mode
& afu
->modes_supported
))
653 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
654 WARN(1, "Device link is down, refusing to activate!\n");
658 if (mode
== CXL_MODE_DIRECTED
)
659 return activate_afu_directed(afu
);
660 if (mode
== CXL_MODE_DEDICATED
)
661 return activate_dedicated_process(afu
);
666 static int native_attach_process(struct cxl_context
*ctx
, bool kernel
,
669 if (!cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
)) {
670 WARN(1, "Device link is down, refusing to attach process!\n");
674 ctx
->kernel
= kernel
;
675 if (ctx
->afu
->current_mode
== CXL_MODE_DIRECTED
)
676 return attach_afu_directed(ctx
, wed
, amr
);
678 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
679 return attach_dedicated(ctx
, wed
, amr
);
684 static inline int detach_process_native_dedicated(struct cxl_context
*ctx
)
686 cxl_ops
->afu_reset(ctx
->afu
);
687 cxl_afu_disable(ctx
->afu
);
688 cxl_psl_purge(ctx
->afu
);
692 static inline int detach_process_native_afu_directed(struct cxl_context
*ctx
)
694 if (!ctx
->pe_inserted
)
696 if (terminate_process_element(ctx
))
698 if (remove_process_element(ctx
))
704 static int native_detach_process(struct cxl_context
*ctx
)
706 trace_cxl_detach(ctx
);
708 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
709 return detach_process_native_dedicated(ctx
);
711 return detach_process_native_afu_directed(ctx
);
714 static int native_get_irq_info(struct cxl_afu
*afu
, struct cxl_irq_info
*info
)
718 /* If the adapter has gone away, we can't get any meaningful
721 if (!cxl_ops
->link_ok(afu
->adapter
, afu
))
724 info
->dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
725 info
->dar
= cxl_p2n_read(afu
, CXL_PSL_DAR_An
);
726 info
->dsr
= cxl_p2n_read(afu
, CXL_PSL_DSR_An
);
727 pidtid
= cxl_p2n_read(afu
, CXL_PSL_PID_TID_An
);
728 info
->pid
= pidtid
>> 32;
729 info
->tid
= pidtid
& 0xffffffff;
730 info
->afu_err
= cxl_p2n_read(afu
, CXL_AFU_ERR_An
);
731 info
->errstat
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
732 info
->proc_handle
= 0;
737 static irqreturn_t
native_handle_psl_slice_error(struct cxl_context
*ctx
,
738 u64 dsisr
, u64 errstat
)
740 u64 fir1
, fir2
, fir_slice
, serr
, afu_debug
;
742 fir1
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR1
);
743 fir2
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR2
);
744 fir_slice
= cxl_p1n_read(ctx
->afu
, CXL_PSL_FIR_SLICE_An
);
745 serr
= cxl_p1n_read(ctx
->afu
, CXL_PSL_SERR_An
);
746 afu_debug
= cxl_p1n_read(ctx
->afu
, CXL_AFU_DEBUG_An
);
748 dev_crit(&ctx
->afu
->dev
, "PSL ERROR STATUS: 0x%016llx\n", errstat
);
749 dev_crit(&ctx
->afu
->dev
, "PSL_FIR1: 0x%016llx\n", fir1
);
750 dev_crit(&ctx
->afu
->dev
, "PSL_FIR2: 0x%016llx\n", fir2
);
751 dev_crit(&ctx
->afu
->dev
, "PSL_SERR_An: 0x%016llx\n", serr
);
752 dev_crit(&ctx
->afu
->dev
, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice
);
753 dev_crit(&ctx
->afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug
);
755 dev_crit(&ctx
->afu
->dev
, "STOPPING CXL TRACE\n");
756 cxl_stop_trace(ctx
->afu
->adapter
);
758 return cxl_ops
->ack_irq(ctx
, 0, errstat
);
761 static irqreturn_t
fail_psl_irq(struct cxl_afu
*afu
, struct cxl_irq_info
*irq_info
)
763 if (irq_info
->dsisr
& CXL_PSL_DSISR_TRANS
)
764 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
766 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
771 static irqreturn_t
native_irq_multiplexed(int irq
, void *data
)
773 struct cxl_afu
*afu
= data
;
774 struct cxl_context
*ctx
;
775 struct cxl_irq_info irq_info
;
776 int ph
= cxl_p2n_read(afu
, CXL_PSL_PEHandle_An
) & 0xffff;
779 if ((ret
= native_get_irq_info(afu
, &irq_info
))) {
780 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret
);
781 return fail_psl_irq(afu
, &irq_info
);
785 ctx
= idr_find(&afu
->contexts_idr
, ph
);
787 ret
= cxl_irq(irq
, ctx
, &irq_info
);
793 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
794 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
795 " with outstanding transactions?)\n", ph
, irq_info
.dsisr
,
797 return fail_psl_irq(afu
, &irq_info
);
800 static irqreturn_t
native_slice_irq_err(int irq
, void *data
)
802 struct cxl_afu
*afu
= data
;
803 u64 fir_slice
, errstat
, serr
, afu_debug
;
805 WARN(irq
, "CXL SLICE ERROR interrupt %i\n", irq
);
807 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
808 fir_slice
= cxl_p1n_read(afu
, CXL_PSL_FIR_SLICE_An
);
809 errstat
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
810 afu_debug
= cxl_p1n_read(afu
, CXL_AFU_DEBUG_An
);
811 dev_crit(&afu
->dev
, "PSL_SERR_An: 0x%016llx\n", serr
);
812 dev_crit(&afu
->dev
, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice
);
813 dev_crit(&afu
->dev
, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat
);
814 dev_crit(&afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug
);
816 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
821 static irqreturn_t
native_irq_err(int irq
, void *data
)
823 struct cxl
*adapter
= data
;
824 u64 fir1
, fir2
, err_ivte
;
826 WARN(1, "CXL ERROR interrupt %i\n", irq
);
828 err_ivte
= cxl_p1_read(adapter
, CXL_PSL_ErrIVTE
);
829 dev_crit(&adapter
->dev
, "PSL_ErrIVTE: 0x%016llx\n", err_ivte
);
831 dev_crit(&adapter
->dev
, "STOPPING CXL TRACE\n");
832 cxl_stop_trace(adapter
);
834 fir1
= cxl_p1_read(adapter
, CXL_PSL_FIR1
);
835 fir2
= cxl_p1_read(adapter
, CXL_PSL_FIR2
);
837 dev_crit(&adapter
->dev
, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1
, fir2
);
842 int cxl_native_register_psl_err_irq(struct cxl
*adapter
)
846 adapter
->irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
847 dev_name(&adapter
->dev
));
848 if (!adapter
->irq_name
)
851 if ((rc
= cxl_register_one_irq(adapter
, native_irq_err
, adapter
,
852 &adapter
->native
->err_hwirq
,
853 &adapter
->native
->err_virq
,
854 adapter
->irq_name
))) {
855 kfree(adapter
->irq_name
);
856 adapter
->irq_name
= NULL
;
860 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, adapter
->native
->err_hwirq
& 0xffff);
865 void cxl_native_release_psl_err_irq(struct cxl
*adapter
)
867 if (adapter
->native
->err_virq
!= irq_find_mapping(NULL
, adapter
->native
->err_hwirq
))
870 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, 0x0000000000000000);
871 cxl_unmap_irq(adapter
->native
->err_virq
, adapter
);
872 cxl_ops
->release_one_irq(adapter
, adapter
->native
->err_hwirq
);
873 kfree(adapter
->irq_name
);
876 int cxl_native_register_serr_irq(struct cxl_afu
*afu
)
881 afu
->err_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
882 dev_name(&afu
->dev
));
883 if (!afu
->err_irq_name
)
886 if ((rc
= cxl_register_one_irq(afu
->adapter
, native_slice_irq_err
, afu
,
888 &afu
->serr_virq
, afu
->err_irq_name
))) {
889 kfree(afu
->err_irq_name
);
890 afu
->err_irq_name
= NULL
;
894 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
895 serr
= (serr
& 0x00ffffffffff0000ULL
) | (afu
->serr_hwirq
& 0xffff);
896 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
901 void cxl_native_release_serr_irq(struct cxl_afu
*afu
)
903 if (afu
->serr_virq
!= irq_find_mapping(NULL
, afu
->serr_hwirq
))
906 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, 0x0000000000000000);
907 cxl_unmap_irq(afu
->serr_virq
, afu
);
908 cxl_ops
->release_one_irq(afu
->adapter
, afu
->serr_hwirq
);
909 kfree(afu
->err_irq_name
);
912 int cxl_native_register_psl_irq(struct cxl_afu
*afu
)
916 afu
->psl_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s",
917 dev_name(&afu
->dev
));
918 if (!afu
->psl_irq_name
)
921 if ((rc
= cxl_register_one_irq(afu
->adapter
, native_irq_multiplexed
,
922 afu
, &afu
->native
->psl_hwirq
, &afu
->native
->psl_virq
,
923 afu
->psl_irq_name
))) {
924 kfree(afu
->psl_irq_name
);
925 afu
->psl_irq_name
= NULL
;
930 void cxl_native_release_psl_irq(struct cxl_afu
*afu
)
932 if (afu
->native
->psl_virq
!= irq_find_mapping(NULL
, afu
->native
->psl_hwirq
))
935 cxl_unmap_irq(afu
->native
->psl_virq
, afu
);
936 cxl_ops
->release_one_irq(afu
->adapter
, afu
->native
->psl_hwirq
);
937 kfree(afu
->psl_irq_name
);
940 static void recover_psl_err(struct cxl_afu
*afu
, u64 errstat
)
944 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat
);
946 /* Clear PSL_DSISR[PE] */
947 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
948 cxl_p2n_write(afu
, CXL_PSL_DSISR_An
, dsisr
& ~CXL_PSL_DSISR_An_PE
);
950 /* Write 1s to clear error status bits */
951 cxl_p2n_write(afu
, CXL_PSL_ErrStat_An
, errstat
);
954 static int native_ack_irq(struct cxl_context
*ctx
, u64 tfc
, u64 psl_reset_mask
)
956 trace_cxl_psl_irq_ack(ctx
, tfc
);
958 cxl_p2n_write(ctx
->afu
, CXL_PSL_TFC_An
, tfc
);
960 recover_psl_err(ctx
->afu
, psl_reset_mask
);
965 int cxl_check_error(struct cxl_afu
*afu
)
967 return (cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
) == ~0ULL);
970 static bool native_support_attributes(const char *attr_name
,
976 static int native_afu_cr_read64(struct cxl_afu
*afu
, int cr
, u64 off
, u64
*out
)
978 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
980 if (unlikely(off
>= afu
->crs_len
))
982 *out
= in_le64(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
983 (cr
* afu
->crs_len
) + off
);
987 static int native_afu_cr_read32(struct cxl_afu
*afu
, int cr
, u64 off
, u32
*out
)
989 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
991 if (unlikely(off
>= afu
->crs_len
))
993 *out
= in_le32(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
994 (cr
* afu
->crs_len
) + off
);
998 static int native_afu_cr_read16(struct cxl_afu
*afu
, int cr
, u64 off
, u16
*out
)
1000 u64 aligned_off
= off
& ~0x3L
;
1004 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val
);
1006 *out
= (val
>> ((off
& 0x3) * 8)) & 0xffff;
1010 static int native_afu_cr_read8(struct cxl_afu
*afu
, int cr
, u64 off
, u8
*out
)
1012 u64 aligned_off
= off
& ~0x3L
;
1016 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val
);
1018 *out
= (val
>> ((off
& 0x3) * 8)) & 0xff;
1022 static int native_afu_cr_write32(struct cxl_afu
*afu
, int cr
, u64 off
, u32 in
)
1024 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
1026 if (unlikely(off
>= afu
->crs_len
))
1028 out_le32(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
1029 (cr
* afu
->crs_len
) + off
, in
);
1033 static int native_afu_cr_write16(struct cxl_afu
*afu
, int cr
, u64 off
, u16 in
)
1035 u64 aligned_off
= off
& ~0x3L
;
1036 u32 val32
, mask
, shift
;
1039 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val32
);
1042 shift
= (off
& 0x3) * 8;
1043 WARN_ON(shift
== 24);
1044 mask
= 0xffff << shift
;
1045 val32
= (val32
& ~mask
) | (in
<< shift
);
1047 rc
= native_afu_cr_write32(afu
, cr
, aligned_off
, val32
);
1051 static int native_afu_cr_write8(struct cxl_afu
*afu
, int cr
, u64 off
, u8 in
)
1053 u64 aligned_off
= off
& ~0x3L
;
1054 u32 val32
, mask
, shift
;
1057 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val32
);
1060 shift
= (off
& 0x3) * 8;
1061 mask
= 0xff << shift
;
1062 val32
= (val32
& ~mask
) | (in
<< shift
);
1064 rc
= native_afu_cr_write32(afu
, cr
, aligned_off
, val32
);
1068 const struct cxl_backend_ops cxl_native_ops
= {
1069 .module
= THIS_MODULE
,
1070 .adapter_reset
= cxl_pci_reset
,
1071 .alloc_one_irq
= cxl_pci_alloc_one_irq
,
1072 .release_one_irq
= cxl_pci_release_one_irq
,
1073 .alloc_irq_ranges
= cxl_pci_alloc_irq_ranges
,
1074 .release_irq_ranges
= cxl_pci_release_irq_ranges
,
1075 .setup_irq
= cxl_pci_setup_irq
,
1076 .handle_psl_slice_error
= native_handle_psl_slice_error
,
1077 .psl_interrupt
= NULL
,
1078 .ack_irq
= native_ack_irq
,
1079 .attach_process
= native_attach_process
,
1080 .detach_process
= native_detach_process
,
1081 .support_attributes
= native_support_attributes
,
1082 .link_ok
= cxl_adapter_link_ok
,
1083 .release_afu
= cxl_pci_release_afu
,
1084 .afu_read_err_buffer
= cxl_pci_afu_read_err_buffer
,
1085 .afu_check_and_enable
= native_afu_check_and_enable
,
1086 .afu_activate_mode
= native_afu_activate_mode
,
1087 .afu_deactivate_mode
= native_afu_deactivate_mode
,
1088 .afu_reset
= native_afu_reset
,
1089 .afu_cr_read8
= native_afu_cr_read8
,
1090 .afu_cr_read16
= native_afu_cr_read16
,
1091 .afu_cr_read32
= native_afu_cr_read32
,
1092 .afu_cr_read64
= native_afu_cr_read64
,
1093 .afu_cr_write8
= native_afu_cr_write8
,
1094 .afu_cr_write16
= native_afu_cr_write16
,
1095 .afu_cr_write32
= native_afu_cr_write32
,
1096 .read_adapter_vpd
= cxl_pci_read_adapter_vpd
,