2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/mutex.h>
16 #include <linux/uaccess.h>
17 #include <linux/delay.h>
18 #include <asm/synch.h>
19 #include <misc/cxl-base.h>
24 static int afu_control(struct cxl_afu
*afu
, u64 command
, u64 clear
,
25 u64 result
, u64 mask
, bool enabled
)
28 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
31 spin_lock(&afu
->afu_cntl_lock
);
32 pr_devel("AFU command starting: %llx\n", command
);
34 trace_cxl_afu_ctrl(afu
, command
);
36 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
37 cxl_p2n_write(afu
, CXL_AFU_Cntl_An
, (AFU_Cntl
& ~clear
) | command
);
39 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
40 while ((AFU_Cntl
& mask
) != result
) {
41 if (time_after_eq(jiffies
, timeout
)) {
42 dev_warn(&afu
->dev
, "WARNING: AFU control timed out!\n");
47 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
48 afu
->enabled
= enabled
;
53 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
56 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
59 if (AFU_Cntl
& CXL_AFU_Cntl_An_RA
) {
61 * Workaround for a bug in the XSL used in the Mellanox CX4
62 * that fails to clear the RA bit after an AFU reset,
63 * preventing subsequent AFU resets from working.
65 cxl_p2n_write(afu
, CXL_AFU_Cntl_An
, AFU_Cntl
& ~CXL_AFU_Cntl_An_RA
);
68 pr_devel("AFU command complete: %llx\n", command
);
69 afu
->enabled
= enabled
;
71 trace_cxl_afu_ctrl_done(afu
, command
, rc
);
72 spin_unlock(&afu
->afu_cntl_lock
);
77 static int afu_enable(struct cxl_afu
*afu
)
79 pr_devel("AFU enable request\n");
81 return afu_control(afu
, CXL_AFU_Cntl_An_E
, 0,
82 CXL_AFU_Cntl_An_ES_Enabled
,
83 CXL_AFU_Cntl_An_ES_MASK
, true);
86 int cxl_afu_disable(struct cxl_afu
*afu
)
88 pr_devel("AFU disable request\n");
90 return afu_control(afu
, 0, CXL_AFU_Cntl_An_E
,
91 CXL_AFU_Cntl_An_ES_Disabled
,
92 CXL_AFU_Cntl_An_ES_MASK
, false);
95 /* This will disable as well as reset */
96 static int native_afu_reset(struct cxl_afu
*afu
)
98 pr_devel("AFU reset request\n");
100 return afu_control(afu
, CXL_AFU_Cntl_An_RA
, 0,
101 CXL_AFU_Cntl_An_RS_Complete
| CXL_AFU_Cntl_An_ES_Disabled
,
102 CXL_AFU_Cntl_An_RS_MASK
| CXL_AFU_Cntl_An_ES_MASK
,
106 static int native_afu_check_and_enable(struct cxl_afu
*afu
)
108 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
109 WARN(1, "Refusing to enable afu while link down!\n");
114 return afu_enable(afu
);
117 int cxl_psl_purge(struct cxl_afu
*afu
)
119 u64 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
120 u64 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
123 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
126 trace_cxl_psl_ctrl(afu
, CXL_PSL_SCNTL_An_Pc
);
128 pr_devel("PSL purge request\n");
130 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
131 dev_warn(&afu
->dev
, "PSL Purge called with link down, ignoring\n");
136 if ((AFU_Cntl
& CXL_AFU_Cntl_An_ES_MASK
) != CXL_AFU_Cntl_An_ES_Disabled
) {
137 WARN(1, "psl_purge request while AFU not disabled!\n");
138 cxl_afu_disable(afu
);
141 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
,
142 PSL_CNTL
| CXL_PSL_SCNTL_An_Pc
);
143 start
= local_clock();
144 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
145 while ((PSL_CNTL
& CXL_PSL_SCNTL_An_Ps_MASK
)
146 == CXL_PSL_SCNTL_An_Ps_Pending
) {
147 if (time_after_eq(jiffies
, timeout
)) {
148 dev_warn(&afu
->dev
, "WARNING: PSL Purge timed out!\n");
152 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
157 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
158 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL
, dsisr
);
159 if (dsisr
& CXL_PSL_DSISR_TRANS
) {
160 dar
= cxl_p2n_read(afu
, CXL_PSL_DAR_An
);
161 dev_notice(&afu
->dev
, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr
, dar
);
162 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
164 dev_notice(&afu
->dev
, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr
);
165 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
169 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
172 pr_devel("PSL purged in %lld ns\n", end
- start
);
174 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
,
175 PSL_CNTL
& ~CXL_PSL_SCNTL_An_Pc
);
177 trace_cxl_psl_ctrl_done(afu
, CXL_PSL_SCNTL_An_Pc
, rc
);
181 static int spa_max_procs(int spa_size
)
185 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
186 * Most of that junk is really just an overly-complicated way of saying
187 * the last 256 bytes are __aligned(128), so it's really:
188 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
190 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
192 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
193 * Ignore the alignment (which is safe in this case as long as we are
194 * careful with our rounding) and solve for n:
196 return ((spa_size
/ 8) - 96) / 17;
199 int cxl_alloc_spa(struct cxl_afu
*afu
)
203 /* Work out how many pages to allocate */
204 afu
->native
->spa_order
= -1;
206 afu
->native
->spa_order
++;
207 spa_size
= (1 << afu
->native
->spa_order
) * PAGE_SIZE
;
209 if (spa_size
> 0x100000) {
210 dev_warn(&afu
->dev
, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
211 afu
->native
->spa_max_procs
, afu
->native
->spa_size
);
212 afu
->num_procs
= afu
->native
->spa_max_procs
;
216 afu
->native
->spa_size
= spa_size
;
217 afu
->native
->spa_max_procs
= spa_max_procs(afu
->native
->spa_size
);
218 } while (afu
->native
->spa_max_procs
< afu
->num_procs
);
220 if (!(afu
->native
->spa
= (struct cxl_process_element
*)
221 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, afu
->native
->spa_order
))) {
222 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
225 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
226 1<<afu
->native
->spa_order
, afu
->native
->spa_max_procs
, afu
->num_procs
);
231 static void attach_spa(struct cxl_afu
*afu
)
235 afu
->native
->sw_command_status
= (__be64
*)((char *)afu
->native
->spa
+
236 ((afu
->native
->spa_max_procs
+ 3) * 128));
238 spap
= virt_to_phys(afu
->native
->spa
) & CXL_PSL_SPAP_Addr
;
239 spap
|= ((afu
->native
->spa_size
>> (12 - CXL_PSL_SPAP_Size_Shift
)) - 1) & CXL_PSL_SPAP_Size
;
240 spap
|= CXL_PSL_SPAP_V
;
241 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
242 afu
->native
->spa
, afu
->native
->spa_max_procs
,
243 afu
->native
->sw_command_status
, spap
);
244 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, spap
);
247 static inline void detach_spa(struct cxl_afu
*afu
)
249 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, 0);
252 void cxl_release_spa(struct cxl_afu
*afu
)
254 if (afu
->native
->spa
) {
255 free_pages((unsigned long) afu
->native
->spa
,
256 afu
->native
->spa_order
);
257 afu
->native
->spa
= NULL
;
261 int cxl_tlb_slb_invalidate(struct cxl
*adapter
)
263 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
265 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
267 cxl_p1_write(adapter
, CXL_PSL_AFUSEL
, CXL_PSL_AFUSEL_A
);
269 cxl_p1_write(adapter
, CXL_PSL_TLBIA
, CXL_TLB_SLB_IQ_ALL
);
270 while (cxl_p1_read(adapter
, CXL_PSL_TLBIA
) & CXL_TLB_SLB_P
) {
271 if (time_after_eq(jiffies
, timeout
)) {
272 dev_warn(&adapter
->dev
, "WARNING: CXL adapter wide TLBIA timed out!\n");
275 if (!cxl_ops
->link_ok(adapter
, NULL
))
280 cxl_p1_write(adapter
, CXL_PSL_SLBIA
, CXL_TLB_SLB_IQ_ALL
);
281 while (cxl_p1_read(adapter
, CXL_PSL_SLBIA
) & CXL_TLB_SLB_P
) {
282 if (time_after_eq(jiffies
, timeout
)) {
283 dev_warn(&adapter
->dev
, "WARNING: CXL adapter wide SLBIA timed out!\n");
286 if (!cxl_ops
->link_ok(adapter
, NULL
))
293 int cxl_data_cache_flush(struct cxl
*adapter
)
296 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
298 pr_devel("Flushing data cache\n");
300 reg
= cxl_p1_read(adapter
, CXL_PSL_Control
);
301 reg
|= CXL_PSL_Control_Fr
;
302 cxl_p1_write(adapter
, CXL_PSL_Control
, reg
);
304 reg
= cxl_p1_read(adapter
, CXL_PSL_Control
);
305 while ((reg
& CXL_PSL_Control_Fs_MASK
) != CXL_PSL_Control_Fs_Complete
) {
306 if (time_after_eq(jiffies
, timeout
)) {
307 dev_warn(&adapter
->dev
, "WARNING: cache flush timed out!\n");
311 if (!cxl_ops
->link_ok(adapter
, NULL
)) {
312 dev_warn(&adapter
->dev
, "WARNING: link down when flushing cache\n");
316 reg
= cxl_p1_read(adapter
, CXL_PSL_Control
);
319 reg
&= ~CXL_PSL_Control_Fr
;
320 cxl_p1_write(adapter
, CXL_PSL_Control
, reg
);
324 static int cxl_write_sstp(struct cxl_afu
*afu
, u64 sstp0
, u64 sstp1
)
328 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
329 cxl_p2n_write(afu
, CXL_SSTP1_An
, 0);
331 /* 2. Invalidate all SLB entries */
332 if ((rc
= cxl_afu_slbia(afu
)))
335 /* 3. Set SSTP0_An */
336 cxl_p2n_write(afu
, CXL_SSTP0_An
, sstp0
);
338 /* 4. Set SSTP1_An */
339 cxl_p2n_write(afu
, CXL_SSTP1_An
, sstp1
);
344 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
345 static void slb_invalid(struct cxl_context
*ctx
)
347 struct cxl
*adapter
= ctx
->afu
->adapter
;
350 WARN_ON(!mutex_is_locked(&ctx
->afu
->native
->spa_mutex
));
352 cxl_p1_write(adapter
, CXL_PSL_LBISEL
,
353 ((u64
)be32_to_cpu(ctx
->elem
->common
.pid
) << 32) |
354 be32_to_cpu(ctx
->elem
->lpid
));
355 cxl_p1_write(adapter
, CXL_PSL_SLBIA
, CXL_TLB_SLB_IQ_LPIDPID
);
358 if (!cxl_ops
->link_ok(adapter
, NULL
))
360 slbia
= cxl_p1_read(adapter
, CXL_PSL_SLBIA
);
361 if (!(slbia
& CXL_TLB_SLB_P
))
367 static int do_process_element_cmd(struct cxl_context
*ctx
,
368 u64 cmd
, u64 pe_state
)
371 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
374 trace_cxl_llcmd(ctx
, cmd
);
376 WARN_ON(!ctx
->afu
->enabled
);
378 ctx
->elem
->software_state
= cpu_to_be32(pe_state
);
380 *(ctx
->afu
->native
->sw_command_status
) = cpu_to_be64(cmd
| 0 | ctx
->pe
);
382 cxl_p1n_write(ctx
->afu
, CXL_PSL_LLCMD_An
, cmd
| ctx
->pe
);
384 if (time_after_eq(jiffies
, timeout
)) {
385 dev_warn(&ctx
->afu
->dev
, "WARNING: Process Element Command timed out!\n");
389 if (!cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
)) {
390 dev_warn(&ctx
->afu
->dev
, "WARNING: Device link down, aborting Process Element Command!\n");
394 state
= be64_to_cpup(ctx
->afu
->native
->sw_command_status
);
395 if (state
== ~0ULL) {
396 pr_err("cxl: Error adding process element to AFU\n");
400 if ((state
& (CXL_SPA_SW_CMD_MASK
| CXL_SPA_SW_STATE_MASK
| CXL_SPA_SW_LINK_MASK
)) ==
401 (cmd
| (cmd
>> 16) | ctx
->pe
))
404 * The command won't finish in the PSL if there are
405 * outstanding DSIs. Hence we need to yield here in
406 * case there are outstanding DSIs that we need to
407 * service. Tuning possiblity: we could wait for a
414 trace_cxl_llcmd_done(ctx
, cmd
, rc
);
418 static int add_process_element(struct cxl_context
*ctx
)
422 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
423 pr_devel("%s Adding pe: %i started\n", __func__
, ctx
->pe
);
424 if (!(rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_ADD
, CXL_PE_SOFTWARE_STATE_V
)))
425 ctx
->pe_inserted
= true;
426 pr_devel("%s Adding pe: %i finished\n", __func__
, ctx
->pe
);
427 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
431 static int terminate_process_element(struct cxl_context
*ctx
)
435 /* fast path terminate if it's already invalid */
436 if (!(ctx
->elem
->software_state
& cpu_to_be32(CXL_PE_SOFTWARE_STATE_V
)))
439 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
440 pr_devel("%s Terminate pe: %i started\n", __func__
, ctx
->pe
);
441 /* We could be asked to terminate when the hw is down. That
442 * should always succeed: it's not running if the hw has gone
443 * away and is being reset.
445 if (cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
))
446 rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_TERMINATE
,
447 CXL_PE_SOFTWARE_STATE_V
| CXL_PE_SOFTWARE_STATE_T
);
448 ctx
->elem
->software_state
= 0; /* Remove Valid bit */
449 pr_devel("%s Terminate pe: %i finished\n", __func__
, ctx
->pe
);
450 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
454 static int remove_process_element(struct cxl_context
*ctx
)
458 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
459 pr_devel("%s Remove pe: %i started\n", __func__
, ctx
->pe
);
461 /* We could be asked to remove when the hw is down. Again, if
462 * the hw is down, the PE is gone, so we succeed.
464 if (cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
))
465 rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_REMOVE
, 0);
468 ctx
->pe_inserted
= false;
470 pr_devel("%s Remove pe: %i finished\n", __func__
, ctx
->pe
);
471 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
476 void cxl_assign_psn_space(struct cxl_context
*ctx
)
478 if (!ctx
->afu
->pp_size
|| ctx
->master
) {
479 ctx
->psn_phys
= ctx
->afu
->psn_phys
;
480 ctx
->psn_size
= ctx
->afu
->adapter
->ps_size
;
482 ctx
->psn_phys
= ctx
->afu
->psn_phys
+
483 (ctx
->afu
->native
->pp_offset
+ ctx
->afu
->pp_size
* ctx
->pe
);
484 ctx
->psn_size
= ctx
->afu
->pp_size
;
488 static int activate_afu_directed(struct cxl_afu
*afu
)
492 dev_info(&afu
->dev
, "Activating AFU directed mode\n");
494 afu
->num_procs
= afu
->max_procs_virtualised
;
495 if (afu
->native
->spa
== NULL
) {
496 if (cxl_alloc_spa(afu
))
501 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
, CXL_PSL_SCNTL_An_PM_AFU
);
502 cxl_p1n_write(afu
, CXL_PSL_AMOR_An
, 0xFFFFFFFFFFFFFFFFULL
);
503 cxl_p1n_write(afu
, CXL_PSL_ID_An
, CXL_PSL_ID_An_F
| CXL_PSL_ID_An_L
);
505 afu
->current_mode
= CXL_MODE_DIRECTED
;
507 if ((rc
= cxl_chardev_m_afu_add(afu
)))
510 if ((rc
= cxl_sysfs_afu_m_add(afu
)))
513 if ((rc
= cxl_chardev_s_afu_add(afu
)))
518 cxl_sysfs_afu_m_remove(afu
);
520 cxl_chardev_afu_remove(afu
);
524 #ifdef CONFIG_CPU_LITTLE_ENDIAN
525 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
527 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
530 static u64
calculate_sr(struct cxl_context
*ctx
)
536 sr
|= CXL_PSL_SR_An_MP
;
537 if (mfspr(SPRN_LPCR
) & LPCR_TC
)
538 sr
|= CXL_PSL_SR_An_TC
;
541 sr
|= CXL_PSL_SR_An_R
;
542 sr
|= (mfmsr() & MSR_SF
) | CXL_PSL_SR_An_HV
;
544 sr
|= CXL_PSL_SR_An_PR
| CXL_PSL_SR_An_R
;
545 sr
&= ~(CXL_PSL_SR_An_HV
);
546 if (!test_tsk_thread_flag(current
, TIF_32BIT
))
547 sr
|= CXL_PSL_SR_An_SF
;
552 static void update_ivtes_directed(struct cxl_context
*ctx
)
554 bool need_update
= (ctx
->status
== STARTED
);
558 WARN_ON(terminate_process_element(ctx
));
559 WARN_ON(remove_process_element(ctx
));
562 for (r
= 0; r
< CXL_IRQ_RANGES
; r
++) {
563 ctx
->elem
->ivte_offsets
[r
] = cpu_to_be16(ctx
->irqs
.offset
[r
]);
564 ctx
->elem
->ivte_ranges
[r
] = cpu_to_be16(ctx
->irqs
.range
[r
]);
568 * Theoretically we could use the update llcmd, instead of a
569 * terminate/remove/add (or if an atomic update was required we could
570 * do a suspend/update/resume), however it seems there might be issues
571 * with the update llcmd on some cards (including those using an XSL on
572 * an ASIC) so for now it's safest to go with the commands that are
573 * known to work. In the future if we come across a situation where the
574 * card may be performing transactions using the same PE while we are
575 * doing this update we might need to revisit this.
578 WARN_ON(add_process_element(ctx
));
581 static int attach_afu_directed(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
586 cxl_assign_psn_space(ctx
);
588 ctx
->elem
->ctxtime
= 0; /* disable */
589 ctx
->elem
->lpid
= cpu_to_be32(mfspr(SPRN_LPID
));
590 ctx
->elem
->haurp
= 0; /* disable */
591 ctx
->elem
->sdr
= cpu_to_be64(mfspr(SPRN_SDR1
));
596 ctx
->elem
->common
.tid
= 0;
597 ctx
->elem
->common
.pid
= cpu_to_be32(pid
);
599 ctx
->elem
->sr
= cpu_to_be64(calculate_sr(ctx
));
601 ctx
->elem
->common
.csrp
= 0; /* disable */
602 ctx
->elem
->common
.aurp0
= 0; /* disable */
603 ctx
->elem
->common
.aurp1
= 0; /* disable */
605 cxl_prefault(ctx
, wed
);
607 ctx
->elem
->common
.sstp0
= cpu_to_be64(ctx
->sstp0
);
608 ctx
->elem
->common
.sstp1
= cpu_to_be64(ctx
->sstp1
);
611 * Ensure we have the multiplexed PSL interrupt set up to take faults
612 * for kernel contexts that may not have allocated any AFU IRQs at all:
614 if (ctx
->irqs
.range
[0] == 0) {
615 ctx
->irqs
.offset
[0] = ctx
->afu
->native
->psl_hwirq
;
616 ctx
->irqs
.range
[0] = 1;
619 update_ivtes_directed(ctx
);
621 ctx
->elem
->common
.amr
= cpu_to_be64(amr
);
622 ctx
->elem
->common
.wed
= cpu_to_be64(wed
);
624 /* first guy needs to enable */
625 if ((result
= cxl_ops
->afu_check_and_enable(ctx
->afu
)))
628 return add_process_element(ctx
);
631 static int deactivate_afu_directed(struct cxl_afu
*afu
)
633 dev_info(&afu
->dev
, "Deactivating AFU directed mode\n");
635 afu
->current_mode
= 0;
638 cxl_sysfs_afu_m_remove(afu
);
639 cxl_chardev_afu_remove(afu
);
642 * The CAIA section 2.2.1 indicates that the procedure for starting and
643 * stopping an AFU in AFU directed mode is AFU specific, which is not
644 * ideal since this code is generic and with one exception has no
645 * knowledge of the AFU. This is in contrast to the procedure for
646 * disabling a dedicated process AFU, which is documented to just
647 * require a reset. The architecture does indicate that both an AFU
648 * reset and an AFU disable should result in the AFU being disabled and
649 * we do both followed by a PSL purge for safety.
651 * Notably we used to have some issues with the disable sequence on PSL
652 * cards, which is why we ended up using this heavy weight procedure in
653 * the first place, however a bug was discovered that had rendered the
654 * disable operation ineffective, so it is conceivable that was the
655 * sole explanation for those difficulties. Careful regression testing
656 * is recommended if anyone attempts to remove or reorder these
659 * The XSL on the Mellanox CX4 behaves a little differently from the
660 * PSL based cards and will time out an AFU reset if the AFU is still
661 * enabled. That card is special in that we do have a means to identify
662 * it from this code, so in that case we skip the reset and just use a
663 * disable/purge to avoid the timeout and corresponding noise in the
666 if (afu
->adapter
->native
->sl_ops
->needs_reset_before_disable
)
667 cxl_ops
->afu_reset(afu
);
668 cxl_afu_disable(afu
);
674 static int activate_dedicated_process(struct cxl_afu
*afu
)
676 dev_info(&afu
->dev
, "Activating dedicated process mode\n");
678 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
, CXL_PSL_SCNTL_An_PM_Process
);
680 cxl_p1n_write(afu
, CXL_PSL_CtxTime_An
, 0); /* disable */
681 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, 0); /* disable */
682 cxl_p1n_write(afu
, CXL_PSL_AMOR_An
, 0xFFFFFFFFFFFFFFFFULL
);
683 cxl_p1n_write(afu
, CXL_PSL_LPID_An
, mfspr(SPRN_LPID
));
684 cxl_p1n_write(afu
, CXL_HAURP_An
, 0); /* disable */
685 cxl_p1n_write(afu
, CXL_PSL_SDR_An
, mfspr(SPRN_SDR1
));
687 cxl_p2n_write(afu
, CXL_CSRP_An
, 0); /* disable */
688 cxl_p2n_write(afu
, CXL_AURP0_An
, 0); /* disable */
689 cxl_p2n_write(afu
, CXL_AURP1_An
, 0); /* disable */
691 afu
->current_mode
= CXL_MODE_DEDICATED
;
694 return cxl_chardev_d_afu_add(afu
);
697 static void update_ivtes_dedicated(struct cxl_context
*ctx
)
699 struct cxl_afu
*afu
= ctx
->afu
;
701 cxl_p1n_write(afu
, CXL_PSL_IVTE_Offset_An
,
702 (((u64
)ctx
->irqs
.offset
[0] & 0xffff) << 48) |
703 (((u64
)ctx
->irqs
.offset
[1] & 0xffff) << 32) |
704 (((u64
)ctx
->irqs
.offset
[2] & 0xffff) << 16) |
705 ((u64
)ctx
->irqs
.offset
[3] & 0xffff));
706 cxl_p1n_write(afu
, CXL_PSL_IVTE_Limit_An
, (u64
)
707 (((u64
)ctx
->irqs
.range
[0] & 0xffff) << 48) |
708 (((u64
)ctx
->irqs
.range
[1] & 0xffff) << 32) |
709 (((u64
)ctx
->irqs
.range
[2] & 0xffff) << 16) |
710 ((u64
)ctx
->irqs
.range
[3] & 0xffff));
713 static int attach_dedicated(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
715 struct cxl_afu
*afu
= ctx
->afu
;
719 pid
= (u64
)current
->pid
<< 32;
722 cxl_p2n_write(afu
, CXL_PSL_PID_TID_An
, pid
);
724 cxl_p1n_write(afu
, CXL_PSL_SR_An
, calculate_sr(ctx
));
726 if ((rc
= cxl_write_sstp(afu
, ctx
->sstp0
, ctx
->sstp1
)))
729 cxl_prefault(ctx
, wed
);
731 update_ivtes_dedicated(ctx
);
733 cxl_p2n_write(afu
, CXL_PSL_AMR_An
, amr
);
735 /* master only context for dedicated */
736 cxl_assign_psn_space(ctx
);
738 if ((rc
= cxl_ops
->afu_reset(afu
)))
741 cxl_p2n_write(afu
, CXL_PSL_WED_An
, wed
);
743 return afu_enable(afu
);
746 static int deactivate_dedicated_process(struct cxl_afu
*afu
)
748 dev_info(&afu
->dev
, "Deactivating dedicated process mode\n");
750 afu
->current_mode
= 0;
753 cxl_chardev_afu_remove(afu
);
758 static int native_afu_deactivate_mode(struct cxl_afu
*afu
, int mode
)
760 if (mode
== CXL_MODE_DIRECTED
)
761 return deactivate_afu_directed(afu
);
762 if (mode
== CXL_MODE_DEDICATED
)
763 return deactivate_dedicated_process(afu
);
767 static int native_afu_activate_mode(struct cxl_afu
*afu
, int mode
)
771 if (!(mode
& afu
->modes_supported
))
774 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
775 WARN(1, "Device link is down, refusing to activate!\n");
779 if (mode
== CXL_MODE_DIRECTED
)
780 return activate_afu_directed(afu
);
781 if (mode
== CXL_MODE_DEDICATED
)
782 return activate_dedicated_process(afu
);
787 static int native_attach_process(struct cxl_context
*ctx
, bool kernel
,
790 if (!cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
)) {
791 WARN(1, "Device link is down, refusing to attach process!\n");
795 ctx
->kernel
= kernel
;
796 if (ctx
->afu
->current_mode
== CXL_MODE_DIRECTED
)
797 return attach_afu_directed(ctx
, wed
, amr
);
799 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
800 return attach_dedicated(ctx
, wed
, amr
);
805 static inline int detach_process_native_dedicated(struct cxl_context
*ctx
)
808 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
809 * stop the AFU in dedicated mode (we therefore do not make that
810 * optional like we do in the afu directed path). It does not indicate
811 * that we need to do an explicit disable (which should occur
812 * implicitly as part of the reset) or purge, but we do these as well
813 * to be on the safe side.
815 * Notably we used to have some issues with the disable sequence
816 * (before the sequence was spelled out in the architecture) which is
817 * why we were so heavy weight in the first place, however a bug was
818 * discovered that had rendered the disable operation ineffective, so
819 * it is conceivable that was the sole explanation for those
820 * difficulties. Point is, we should be careful and do some regression
821 * testing if we ever attempt to remove any part of this procedure.
823 cxl_ops
->afu_reset(ctx
->afu
);
824 cxl_afu_disable(ctx
->afu
);
825 cxl_psl_purge(ctx
->afu
);
829 static void native_update_ivtes(struct cxl_context
*ctx
)
831 if (ctx
->afu
->current_mode
== CXL_MODE_DIRECTED
)
832 return update_ivtes_directed(ctx
);
833 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
834 return update_ivtes_dedicated(ctx
);
835 WARN(1, "native_update_ivtes: Bad mode\n");
838 static inline int detach_process_native_afu_directed(struct cxl_context
*ctx
)
840 if (!ctx
->pe_inserted
)
842 if (terminate_process_element(ctx
))
844 if (remove_process_element(ctx
))
850 static int native_detach_process(struct cxl_context
*ctx
)
852 trace_cxl_detach(ctx
);
854 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
855 return detach_process_native_dedicated(ctx
);
857 return detach_process_native_afu_directed(ctx
);
860 static int native_get_irq_info(struct cxl_afu
*afu
, struct cxl_irq_info
*info
)
864 /* If the adapter has gone away, we can't get any meaningful
867 if (!cxl_ops
->link_ok(afu
->adapter
, afu
))
870 info
->dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
871 info
->dar
= cxl_p2n_read(afu
, CXL_PSL_DAR_An
);
872 info
->dsr
= cxl_p2n_read(afu
, CXL_PSL_DSR_An
);
873 pidtid
= cxl_p2n_read(afu
, CXL_PSL_PID_TID_An
);
874 info
->pid
= pidtid
>> 32;
875 info
->tid
= pidtid
& 0xffffffff;
876 info
->afu_err
= cxl_p2n_read(afu
, CXL_AFU_ERR_An
);
877 info
->errstat
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
878 info
->proc_handle
= 0;
883 void cxl_native_psl_irq_dump_regs(struct cxl_context
*ctx
)
885 u64 fir1
, fir2
, fir_slice
, serr
, afu_debug
;
887 fir1
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR1
);
888 fir2
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR2
);
889 fir_slice
= cxl_p1n_read(ctx
->afu
, CXL_PSL_FIR_SLICE_An
);
890 afu_debug
= cxl_p1n_read(ctx
->afu
, CXL_AFU_DEBUG_An
);
892 dev_crit(&ctx
->afu
->dev
, "PSL_FIR1: 0x%016llx\n", fir1
);
893 dev_crit(&ctx
->afu
->dev
, "PSL_FIR2: 0x%016llx\n", fir2
);
894 if (ctx
->afu
->adapter
->native
->sl_ops
->register_serr_irq
) {
895 serr
= cxl_p1n_read(ctx
->afu
, CXL_PSL_SERR_An
);
896 cxl_afu_decode_psl_serr(ctx
->afu
, serr
);
898 dev_crit(&ctx
->afu
->dev
, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice
);
899 dev_crit(&ctx
->afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug
);
902 static irqreturn_t
native_handle_psl_slice_error(struct cxl_context
*ctx
,
903 u64 dsisr
, u64 errstat
)
906 dev_crit(&ctx
->afu
->dev
, "PSL ERROR STATUS: 0x%016llx\n", errstat
);
908 if (ctx
->afu
->adapter
->native
->sl_ops
->psl_irq_dump_registers
)
909 ctx
->afu
->adapter
->native
->sl_ops
->psl_irq_dump_registers(ctx
);
911 if (ctx
->afu
->adapter
->native
->sl_ops
->debugfs_stop_trace
) {
912 dev_crit(&ctx
->afu
->dev
, "STOPPING CXL TRACE\n");
913 ctx
->afu
->adapter
->native
->sl_ops
->debugfs_stop_trace(ctx
->afu
->adapter
);
916 return cxl_ops
->ack_irq(ctx
, 0, errstat
);
919 static irqreturn_t
fail_psl_irq(struct cxl_afu
*afu
, struct cxl_irq_info
*irq_info
)
921 if (irq_info
->dsisr
& CXL_PSL_DSISR_TRANS
)
922 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
924 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
929 static irqreturn_t
native_irq_multiplexed(int irq
, void *data
)
931 struct cxl_afu
*afu
= data
;
932 struct cxl_context
*ctx
;
933 struct cxl_irq_info irq_info
;
934 int ph
= cxl_p2n_read(afu
, CXL_PSL_PEHandle_An
) & 0xffff;
937 if ((ret
= native_get_irq_info(afu
, &irq_info
))) {
938 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret
);
939 return fail_psl_irq(afu
, &irq_info
);
943 ctx
= idr_find(&afu
->contexts_idr
, ph
);
945 ret
= cxl_irq(irq
, ctx
, &irq_info
);
951 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
952 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
953 " with outstanding transactions?)\n", ph
, irq_info
.dsisr
,
955 return fail_psl_irq(afu
, &irq_info
);
958 static void native_irq_wait(struct cxl_context
*ctx
)
965 * Wait until no further interrupts are presented by the PSL
969 ph
= cxl_p2n_read(ctx
->afu
, CXL_PSL_PEHandle_An
) & 0xffff;
972 dsisr
= cxl_p2n_read(ctx
->afu
, CXL_PSL_DSISR_An
);
973 if ((dsisr
& CXL_PSL_DSISR_PENDING
) == 0)
976 * We are waiting for the workqueue to process our
977 * irq, so need to let that run here.
982 dev_warn(&ctx
->afu
->dev
, "WARNING: waiting on DSI for PE %i"
983 " DSISR %016llx!\n", ph
, dsisr
);
987 static irqreturn_t
native_slice_irq_err(int irq
, void *data
)
989 struct cxl_afu
*afu
= data
;
990 u64 fir_slice
, errstat
, serr
, afu_debug
, afu_error
, dsisr
;
993 * slice err interrupt is only used with full PSL (no XSL)
995 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
996 fir_slice
= cxl_p1n_read(afu
, CXL_PSL_FIR_SLICE_An
);
997 errstat
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
998 afu_debug
= cxl_p1n_read(afu
, CXL_AFU_DEBUG_An
);
999 afu_error
= cxl_p2n_read(afu
, CXL_AFU_ERR_An
);
1000 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
1001 cxl_afu_decode_psl_serr(afu
, serr
);
1002 dev_crit(&afu
->dev
, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice
);
1003 dev_crit(&afu
->dev
, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat
);
1004 dev_crit(&afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug
);
1005 dev_crit(&afu
->dev
, "AFU_ERR_An: 0x%.16llx\n", afu_error
);
1006 dev_crit(&afu
->dev
, "PSL_DSISR_An: 0x%.16llx\n", dsisr
);
1008 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
1013 void cxl_native_err_irq_dump_regs(struct cxl
*adapter
)
1017 fir1
= cxl_p1_read(adapter
, CXL_PSL_FIR1
);
1018 fir2
= cxl_p1_read(adapter
, CXL_PSL_FIR2
);
1020 dev_crit(&adapter
->dev
, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1
, fir2
);
1023 static irqreturn_t
native_irq_err(int irq
, void *data
)
1025 struct cxl
*adapter
= data
;
1028 WARN(1, "CXL ERROR interrupt %i\n", irq
);
1030 err_ivte
= cxl_p1_read(adapter
, CXL_PSL_ErrIVTE
);
1031 dev_crit(&adapter
->dev
, "PSL_ErrIVTE: 0x%016llx\n", err_ivte
);
1033 if (adapter
->native
->sl_ops
->debugfs_stop_trace
) {
1034 dev_crit(&adapter
->dev
, "STOPPING CXL TRACE\n");
1035 adapter
->native
->sl_ops
->debugfs_stop_trace(adapter
);
1038 if (adapter
->native
->sl_ops
->err_irq_dump_registers
)
1039 adapter
->native
->sl_ops
->err_irq_dump_registers(adapter
);
1044 int cxl_native_register_psl_err_irq(struct cxl
*adapter
)
1048 adapter
->irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
1049 dev_name(&adapter
->dev
));
1050 if (!adapter
->irq_name
)
1053 if ((rc
= cxl_register_one_irq(adapter
, native_irq_err
, adapter
,
1054 &adapter
->native
->err_hwirq
,
1055 &adapter
->native
->err_virq
,
1056 adapter
->irq_name
))) {
1057 kfree(adapter
->irq_name
);
1058 adapter
->irq_name
= NULL
;
1062 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, adapter
->native
->err_hwirq
& 0xffff);
1067 void cxl_native_release_psl_err_irq(struct cxl
*adapter
)
1069 if (adapter
->native
->err_virq
!= irq_find_mapping(NULL
, adapter
->native
->err_hwirq
))
1072 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, 0x0000000000000000);
1073 cxl_unmap_irq(adapter
->native
->err_virq
, adapter
);
1074 cxl_ops
->release_one_irq(adapter
, adapter
->native
->err_hwirq
);
1075 kfree(adapter
->irq_name
);
1078 int cxl_native_register_serr_irq(struct cxl_afu
*afu
)
1083 afu
->err_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
1084 dev_name(&afu
->dev
));
1085 if (!afu
->err_irq_name
)
1088 if ((rc
= cxl_register_one_irq(afu
->adapter
, native_slice_irq_err
, afu
,
1090 &afu
->serr_virq
, afu
->err_irq_name
))) {
1091 kfree(afu
->err_irq_name
);
1092 afu
->err_irq_name
= NULL
;
1096 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
1097 serr
= (serr
& 0x00ffffffffff0000ULL
) | (afu
->serr_hwirq
& 0xffff);
1098 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
1103 void cxl_native_release_serr_irq(struct cxl_afu
*afu
)
1105 if (afu
->serr_virq
!= irq_find_mapping(NULL
, afu
->serr_hwirq
))
1108 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, 0x0000000000000000);
1109 cxl_unmap_irq(afu
->serr_virq
, afu
);
1110 cxl_ops
->release_one_irq(afu
->adapter
, afu
->serr_hwirq
);
1111 kfree(afu
->err_irq_name
);
1114 int cxl_native_register_psl_irq(struct cxl_afu
*afu
)
1118 afu
->psl_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s",
1119 dev_name(&afu
->dev
));
1120 if (!afu
->psl_irq_name
)
1123 if ((rc
= cxl_register_one_irq(afu
->adapter
, native_irq_multiplexed
,
1124 afu
, &afu
->native
->psl_hwirq
, &afu
->native
->psl_virq
,
1125 afu
->psl_irq_name
))) {
1126 kfree(afu
->psl_irq_name
);
1127 afu
->psl_irq_name
= NULL
;
1132 void cxl_native_release_psl_irq(struct cxl_afu
*afu
)
1134 if (afu
->native
->psl_virq
!= irq_find_mapping(NULL
, afu
->native
->psl_hwirq
))
1137 cxl_unmap_irq(afu
->native
->psl_virq
, afu
);
1138 cxl_ops
->release_one_irq(afu
->adapter
, afu
->native
->psl_hwirq
);
1139 kfree(afu
->psl_irq_name
);
1142 static void recover_psl_err(struct cxl_afu
*afu
, u64 errstat
)
1146 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat
);
1148 /* Clear PSL_DSISR[PE] */
1149 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
1150 cxl_p2n_write(afu
, CXL_PSL_DSISR_An
, dsisr
& ~CXL_PSL_DSISR_An_PE
);
1152 /* Write 1s to clear error status bits */
1153 cxl_p2n_write(afu
, CXL_PSL_ErrStat_An
, errstat
);
1156 static int native_ack_irq(struct cxl_context
*ctx
, u64 tfc
, u64 psl_reset_mask
)
1158 trace_cxl_psl_irq_ack(ctx
, tfc
);
1160 cxl_p2n_write(ctx
->afu
, CXL_PSL_TFC_An
, tfc
);
1162 recover_psl_err(ctx
->afu
, psl_reset_mask
);
1167 int cxl_check_error(struct cxl_afu
*afu
)
1169 return (cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
) == ~0ULL);
1172 static bool native_support_attributes(const char *attr_name
,
1173 enum cxl_attrs type
)
1178 static int native_afu_cr_read64(struct cxl_afu
*afu
, int cr
, u64 off
, u64
*out
)
1180 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
1182 if (unlikely(off
>= afu
->crs_len
))
1184 *out
= in_le64(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
1185 (cr
* afu
->crs_len
) + off
);
1189 static int native_afu_cr_read32(struct cxl_afu
*afu
, int cr
, u64 off
, u32
*out
)
1191 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
1193 if (unlikely(off
>= afu
->crs_len
))
1195 *out
= in_le32(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
1196 (cr
* afu
->crs_len
) + off
);
1200 static int native_afu_cr_read16(struct cxl_afu
*afu
, int cr
, u64 off
, u16
*out
)
1202 u64 aligned_off
= off
& ~0x3L
;
1206 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val
);
1208 *out
= (val
>> ((off
& 0x3) * 8)) & 0xffff;
1212 static int native_afu_cr_read8(struct cxl_afu
*afu
, int cr
, u64 off
, u8
*out
)
1214 u64 aligned_off
= off
& ~0x3L
;
1218 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val
);
1220 *out
= (val
>> ((off
& 0x3) * 8)) & 0xff;
1224 static int native_afu_cr_write32(struct cxl_afu
*afu
, int cr
, u64 off
, u32 in
)
1226 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
1228 if (unlikely(off
>= afu
->crs_len
))
1230 out_le32(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
1231 (cr
* afu
->crs_len
) + off
, in
);
1235 static int native_afu_cr_write16(struct cxl_afu
*afu
, int cr
, u64 off
, u16 in
)
1237 u64 aligned_off
= off
& ~0x3L
;
1238 u32 val32
, mask
, shift
;
1241 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val32
);
1244 shift
= (off
& 0x3) * 8;
1245 WARN_ON(shift
== 24);
1246 mask
= 0xffff << shift
;
1247 val32
= (val32
& ~mask
) | (in
<< shift
);
1249 rc
= native_afu_cr_write32(afu
, cr
, aligned_off
, val32
);
1253 static int native_afu_cr_write8(struct cxl_afu
*afu
, int cr
, u64 off
, u8 in
)
1255 u64 aligned_off
= off
& ~0x3L
;
1256 u32 val32
, mask
, shift
;
1259 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val32
);
1262 shift
= (off
& 0x3) * 8;
1263 mask
= 0xff << shift
;
1264 val32
= (val32
& ~mask
) | (in
<< shift
);
1266 rc
= native_afu_cr_write32(afu
, cr
, aligned_off
, val32
);
1270 const struct cxl_backend_ops cxl_native_ops
= {
1271 .module
= THIS_MODULE
,
1272 .adapter_reset
= cxl_pci_reset
,
1273 .alloc_one_irq
= cxl_pci_alloc_one_irq
,
1274 .release_one_irq
= cxl_pci_release_one_irq
,
1275 .alloc_irq_ranges
= cxl_pci_alloc_irq_ranges
,
1276 .release_irq_ranges
= cxl_pci_release_irq_ranges
,
1277 .setup_irq
= cxl_pci_setup_irq
,
1278 .handle_psl_slice_error
= native_handle_psl_slice_error
,
1279 .psl_interrupt
= NULL
,
1280 .ack_irq
= native_ack_irq
,
1281 .irq_wait
= native_irq_wait
,
1282 .attach_process
= native_attach_process
,
1283 .detach_process
= native_detach_process
,
1284 .update_ivtes
= native_update_ivtes
,
1285 .support_attributes
= native_support_attributes
,
1286 .link_ok
= cxl_adapter_link_ok
,
1287 .release_afu
= cxl_pci_release_afu
,
1288 .afu_read_err_buffer
= cxl_pci_afu_read_err_buffer
,
1289 .afu_check_and_enable
= native_afu_check_and_enable
,
1290 .afu_activate_mode
= native_afu_activate_mode
,
1291 .afu_deactivate_mode
= native_afu_deactivate_mode
,
1292 .afu_reset
= native_afu_reset
,
1293 .afu_cr_read8
= native_afu_cr_read8
,
1294 .afu_cr_read16
= native_afu_cr_read16
,
1295 .afu_cr_read32
= native_afu_cr_read32
,
1296 .afu_cr_read64
= native_afu_cr_read64
,
1297 .afu_cr_write8
= native_afu_cr_write8
,
1298 .afu_cr_write16
= native_afu_cr_write16
,
1299 .afu_cr_write32
= native_afu_cr_write32
,
1300 .read_adapter_vpd
= cxl_pci_read_adapter_vpd
,