2 * Copyright 2015 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/delay.h>
18 #define CXL_ERROR_DETECTED_EVENT 1
19 #define CXL_SLOT_RESET_EVENT 2
20 #define CXL_RESUME_EVENT 3
22 static void pci_error_handlers(struct cxl_afu
*afu
,
24 pci_channel_state_t state
)
26 struct pci_dev
*afu_dev
;
31 list_for_each_entry(afu_dev
, &afu
->phb
->bus
->devices
, bus_list
) {
35 switch (bus_error_event
) {
36 case CXL_ERROR_DETECTED_EVENT
:
37 afu_dev
->error_state
= state
;
39 if (afu_dev
->driver
->err_handler
&&
40 afu_dev
->driver
->err_handler
->error_detected
)
41 afu_dev
->driver
->err_handler
->error_detected(afu_dev
, state
);
43 case CXL_SLOT_RESET_EVENT
:
44 afu_dev
->error_state
= state
;
46 if (afu_dev
->driver
->err_handler
&&
47 afu_dev
->driver
->err_handler
->slot_reset
)
48 afu_dev
->driver
->err_handler
->slot_reset(afu_dev
);
50 case CXL_RESUME_EVENT
:
51 if (afu_dev
->driver
->err_handler
&&
52 afu_dev
->driver
->err_handler
->resume
)
53 afu_dev
->driver
->err_handler
->resume(afu_dev
);
59 static irqreturn_t
guest_handle_psl_slice_error(struct cxl_context
*ctx
, u64 dsisr
,
62 pr_devel("in %s\n", __func__
);
63 dev_crit(&ctx
->afu
->dev
, "PSL ERROR STATUS: 0x%.16llx\n", errstat
);
65 return cxl_ops
->ack_irq(ctx
, 0, errstat
);
68 static ssize_t
guest_collect_vpd(struct cxl
*adapter
, struct cxl_afu
*afu
,
69 void *buf
, size_t len
)
71 unsigned int entries
, mod
;
72 unsigned long **vpd_buf
= NULL
;
74 int rc
= 0, i
, tocopy
;
80 /* number of entries in the list */
81 entries
= len
/ SG_BUFFER_SIZE
;
82 mod
= len
% SG_BUFFER_SIZE
;
86 if (entries
> SG_MAX_ENTRIES
) {
87 entries
= SG_MAX_ENTRIES
;
88 len
= SG_MAX_ENTRIES
* SG_BUFFER_SIZE
;
92 vpd_buf
= kzalloc(entries
* sizeof(unsigned long *), GFP_KERNEL
);
96 le
= (struct sg_list
*)get_zeroed_page(GFP_KERNEL
);
102 for (i
= 0; i
< entries
; i
++) {
103 vpd_buf
[i
] = (unsigned long *)get_zeroed_page(GFP_KERNEL
);
108 le
[i
].phys_addr
= cpu_to_be64(virt_to_phys(vpd_buf
[i
]));
109 le
[i
].len
= cpu_to_be64(SG_BUFFER_SIZE
);
110 if ((i
== (entries
- 1)) && mod
)
111 le
[i
].len
= cpu_to_be64(mod
);
115 rc
= cxl_h_collect_vpd_adapter(adapter
->guest
->handle
,
116 virt_to_phys(le
), entries
, &out
);
118 rc
= cxl_h_collect_vpd(afu
->guest
->handle
, 0,
119 virt_to_phys(le
), entries
, &out
);
120 pr_devel("length of available (entries: %i), vpd: %#llx\n",
125 * hcall returns in 'out' the size of available VPDs.
126 * It fills the buffer with as much data as possible.
132 for (i
= 0; i
< entries
; i
++) {
133 if (len
< SG_BUFFER_SIZE
)
136 tocopy
= SG_BUFFER_SIZE
;
137 memcpy(buf
, vpd_buf
[i
], tocopy
);
144 for (i
= 0; i
< entries
; i
++) {
146 free_page((unsigned long) vpd_buf
[i
]);
148 free_page((unsigned long) le
);
154 static int guest_get_irq_info(struct cxl_context
*ctx
, struct cxl_irq_info
*info
)
156 return cxl_h_collect_int_info(ctx
->afu
->guest
->handle
, ctx
->process_token
, info
);
159 static irqreturn_t
guest_psl_irq(int irq
, void *data
)
161 struct cxl_context
*ctx
= data
;
162 struct cxl_irq_info irq_info
;
165 pr_devel("%d: received PSL interrupt %i\n", ctx
->pe
, irq
);
166 rc
= guest_get_irq_info(ctx
, &irq_info
);
168 WARN(1, "Unable to get IRQ info: %i\n", rc
);
172 rc
= cxl_irq_psl8(irq
, ctx
, &irq_info
);
176 static int afu_read_error_state(struct cxl_afu
*afu
, int *state_out
)
184 rc
= cxl_h_read_error_state(afu
->guest
->handle
, &state
);
186 WARN_ON(state
!= H_STATE_NORMAL
&&
187 state
!= H_STATE_DISABLE
&&
188 state
!= H_STATE_TEMP_UNAVAILABLE
&&
189 state
!= H_STATE_PERM_UNAVAILABLE
);
190 *state_out
= state
& 0xffffffff;
195 static irqreturn_t
guest_slice_irq_err(int irq
, void *data
)
197 struct cxl_afu
*afu
= data
;
199 u64 serr
, afu_error
, dsisr
;
201 rc
= cxl_h_get_fn_error_interrupt(afu
->guest
->handle
, &serr
);
203 dev_crit(&afu
->dev
, "Couldn't read PSL_SERR_An: %d\n", rc
);
206 afu_error
= cxl_p2n_read(afu
, CXL_AFU_ERR_An
);
207 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
208 cxl_afu_decode_psl_serr(afu
, serr
);
209 dev_crit(&afu
->dev
, "AFU_ERR_An: 0x%.16llx\n", afu_error
);
210 dev_crit(&afu
->dev
, "PSL_DSISR_An: 0x%.16llx\n", dsisr
);
212 rc
= cxl_h_ack_fn_error_interrupt(afu
->guest
->handle
, serr
);
214 dev_crit(&afu
->dev
, "Couldn't ack slice error interrupt: %d\n",
221 static int irq_alloc_range(struct cxl
*adapter
, int len
, int *irq
)
224 struct irq_avail
*cur
;
226 for (i
= 0; i
< adapter
->guest
->irq_nranges
; i
++) {
227 cur
= &adapter
->guest
->irq_avail
[i
];
228 n
= bitmap_find_next_zero_area(cur
->bitmap
, cur
->range
,
230 if (n
< cur
->range
) {
231 bitmap_set(cur
->bitmap
, n
, len
);
232 *irq
= cur
->offset
+ n
;
233 pr_devel("guest: allocate IRQs %#x->%#x\n",
234 *irq
, *irq
+ len
- 1);
242 static int irq_free_range(struct cxl
*adapter
, int irq
, int len
)
245 struct irq_avail
*cur
;
250 for (i
= 0; i
< adapter
->guest
->irq_nranges
; i
++) {
251 cur
= &adapter
->guest
->irq_avail
[i
];
252 if (irq
>= cur
->offset
&&
253 (irq
+ len
) <= (cur
->offset
+ cur
->range
)) {
254 n
= irq
- cur
->offset
;
255 bitmap_clear(cur
->bitmap
, n
, len
);
256 pr_devel("guest: release IRQs %#x->%#x\n",
264 static int guest_reset(struct cxl
*adapter
)
266 struct cxl_afu
*afu
= NULL
;
269 pr_devel("Adapter reset request\n");
270 for (i
= 0; i
< adapter
->slices
; i
++) {
271 if ((afu
= adapter
->afu
[i
])) {
272 pci_error_handlers(afu
, CXL_ERROR_DETECTED_EVENT
,
273 pci_channel_io_frozen
);
274 cxl_context_detach_all(afu
);
278 rc
= cxl_h_reset_adapter(adapter
->guest
->handle
);
279 for (i
= 0; i
< adapter
->slices
; i
++) {
280 if (!rc
&& (afu
= adapter
->afu
[i
])) {
281 pci_error_handlers(afu
, CXL_SLOT_RESET_EVENT
,
282 pci_channel_io_normal
);
283 pci_error_handlers(afu
, CXL_RESUME_EVENT
, 0);
289 static int guest_alloc_one_irq(struct cxl
*adapter
)
293 spin_lock(&adapter
->guest
->irq_alloc_lock
);
294 if (irq_alloc_range(adapter
, 1, &irq
))
296 spin_unlock(&adapter
->guest
->irq_alloc_lock
);
300 static void guest_release_one_irq(struct cxl
*adapter
, int irq
)
302 spin_lock(&adapter
->guest
->irq_alloc_lock
);
303 irq_free_range(adapter
, irq
, 1);
304 spin_unlock(&adapter
->guest
->irq_alloc_lock
);
307 static int guest_alloc_irq_ranges(struct cxl_irq_ranges
*irqs
,
308 struct cxl
*adapter
, unsigned int num
)
312 memset(irqs
, 0, sizeof(struct cxl_irq_ranges
));
314 spin_lock(&adapter
->guest
->irq_alloc_lock
);
315 for (i
= 0; i
< CXL_IRQ_RANGES
&& num
; i
++) {
318 if (irq_alloc_range(adapter
, try, &irq
) == 0)
324 irqs
->offset
[i
] = irq
;
325 irqs
->range
[i
] = try;
330 spin_unlock(&adapter
->guest
->irq_alloc_lock
);
334 for (i
= 0; i
< CXL_IRQ_RANGES
; i
++)
335 irq_free_range(adapter
, irqs
->offset
[i
], irqs
->range
[i
]);
336 spin_unlock(&adapter
->guest
->irq_alloc_lock
);
340 static void guest_release_irq_ranges(struct cxl_irq_ranges
*irqs
,
345 spin_lock(&adapter
->guest
->irq_alloc_lock
);
346 for (i
= 0; i
< CXL_IRQ_RANGES
; i
++)
347 irq_free_range(adapter
, irqs
->offset
[i
], irqs
->range
[i
]);
348 spin_unlock(&adapter
->guest
->irq_alloc_lock
);
351 static int guest_register_serr_irq(struct cxl_afu
*afu
)
353 afu
->err_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
354 dev_name(&afu
->dev
));
355 if (!afu
->err_irq_name
)
358 if (!(afu
->serr_virq
= cxl_map_irq(afu
->adapter
, afu
->serr_hwirq
,
359 guest_slice_irq_err
, afu
, afu
->err_irq_name
))) {
360 kfree(afu
->err_irq_name
);
361 afu
->err_irq_name
= NULL
;
368 static void guest_release_serr_irq(struct cxl_afu
*afu
)
370 cxl_unmap_irq(afu
->serr_virq
, afu
);
371 cxl_ops
->release_one_irq(afu
->adapter
, afu
->serr_hwirq
);
372 kfree(afu
->err_irq_name
);
375 static int guest_ack_irq(struct cxl_context
*ctx
, u64 tfc
, u64 psl_reset_mask
)
377 return cxl_h_control_faults(ctx
->afu
->guest
->handle
, ctx
->process_token
,
378 tfc
>> 32, (psl_reset_mask
!= 0));
381 static void disable_afu_irqs(struct cxl_context
*ctx
)
383 irq_hw_number_t hwirq
;
387 pr_devel("Disabling AFU(%d) interrupts\n", ctx
->afu
->slice
);
388 for (r
= 0; r
< CXL_IRQ_RANGES
; r
++) {
389 hwirq
= ctx
->irqs
.offset
[r
];
390 for (i
= 0; i
< ctx
->irqs
.range
[r
]; hwirq
++, i
++) {
391 virq
= irq_find_mapping(NULL
, hwirq
);
397 static void enable_afu_irqs(struct cxl_context
*ctx
)
399 irq_hw_number_t hwirq
;
403 pr_devel("Enabling AFU(%d) interrupts\n", ctx
->afu
->slice
);
404 for (r
= 0; r
< CXL_IRQ_RANGES
; r
++) {
405 hwirq
= ctx
->irqs
.offset
[r
];
406 for (i
= 0; i
< ctx
->irqs
.range
[r
]; hwirq
++, i
++) {
407 virq
= irq_find_mapping(NULL
, hwirq
);
413 static int _guest_afu_cr_readXX(int sz
, struct cxl_afu
*afu
, int cr_idx
,
414 u64 offset
, u64
*val
)
420 if (afu
->crs_len
< sz
)
423 if (unlikely(offset
>= afu
->crs_len
))
426 cr
= get_zeroed_page(GFP_KERNEL
);
430 rc
= cxl_h_get_config(afu
->guest
->handle
, cr_idx
, offset
,
431 virt_to_phys((void *)cr
), sz
);
441 *val
= in_le16((u16
*)cr
);
444 *val
= in_le32((unsigned *)cr
);
447 *val
= in_le64((u64
*)cr
);
457 static int guest_afu_cr_read32(struct cxl_afu
*afu
, int cr_idx
, u64 offset
,
463 rc
= _guest_afu_cr_readXX(4, afu
, cr_idx
, offset
, &val
);
469 static int guest_afu_cr_read16(struct cxl_afu
*afu
, int cr_idx
, u64 offset
,
475 rc
= _guest_afu_cr_readXX(2, afu
, cr_idx
, offset
, &val
);
481 static int guest_afu_cr_read8(struct cxl_afu
*afu
, int cr_idx
, u64 offset
,
487 rc
= _guest_afu_cr_readXX(1, afu
, cr_idx
, offset
, &val
);
493 static int guest_afu_cr_read64(struct cxl_afu
*afu
, int cr_idx
, u64 offset
,
496 return _guest_afu_cr_readXX(8, afu
, cr_idx
, offset
, out
);
499 static int guest_afu_cr_write32(struct cxl_afu
*afu
, int cr
, u64 off
, u32 in
)
501 /* config record is not writable from guest */
505 static int guest_afu_cr_write16(struct cxl_afu
*afu
, int cr
, u64 off
, u16 in
)
507 /* config record is not writable from guest */
511 static int guest_afu_cr_write8(struct cxl_afu
*afu
, int cr
, u64 off
, u8 in
)
513 /* config record is not writable from guest */
517 static int attach_afu_directed(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
519 struct cxl_process_element_hcall
*elem
;
520 struct cxl
*adapter
= ctx
->afu
->adapter
;
521 const struct cred
*cred
;
524 u64 mmio_addr
, mmio_size
;
527 /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
528 if (!(elem
= (struct cxl_process_element_hcall
*)
529 get_zeroed_page(GFP_KERNEL
)))
532 elem
->version
= cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION
);
535 flags
|= CXL_PE_TRANSLATION_ENABLED
;
536 flags
|= CXL_PE_PRIVILEGED_PROCESS
;
537 if (mfmsr() & MSR_SF
)
538 flags
|= CXL_PE_64_BIT
;
541 flags
|= CXL_PE_PROBLEM_STATE
;
542 flags
|= CXL_PE_TRANSLATION_ENABLED
;
543 if (!test_tsk_thread_flag(current
, TIF_32BIT
))
544 flags
|= CXL_PE_64_BIT
;
545 cred
= get_current_cred();
546 if (uid_eq(cred
->euid
, GLOBAL_ROOT_UID
))
547 flags
|= CXL_PE_PRIVILEGED_PROCESS
;
550 elem
->flags
= cpu_to_be64(flags
);
551 elem
->common
.tid
= cpu_to_be32(0); /* Unused */
552 elem
->common
.pid
= cpu_to_be32(pid
);
553 elem
->common
.csrp
= cpu_to_be64(0); /* disable */
554 elem
->common
.u
.psl8
.aurp0
= cpu_to_be64(0); /* disable */
555 elem
->common
.u
.psl8
.aurp1
= cpu_to_be64(0); /* disable */
557 cxl_prefault(ctx
, wed
);
559 elem
->common
.u
.psl8
.sstp0
= cpu_to_be64(ctx
->sstp0
);
560 elem
->common
.u
.psl8
.sstp1
= cpu_to_be64(ctx
->sstp1
);
563 * Ensure we have at least one interrupt allocated to take faults for
564 * kernel contexts that may not have allocated any AFU IRQs at all:
566 if (ctx
->irqs
.range
[0] == 0) {
567 rc
= afu_register_irqs(ctx
, 0);
572 for (r
= 0; r
< CXL_IRQ_RANGES
; r
++) {
573 for (i
= 0; i
< ctx
->irqs
.range
[r
]; i
++) {
574 if (r
== 0 && i
== 0) {
575 elem
->pslVirtualIsn
= cpu_to_be32(ctx
->irqs
.offset
[0]);
577 idx
= ctx
->irqs
.offset
[r
] + i
- adapter
->guest
->irq_base_offset
;
578 elem
->applicationVirtualIsnBitmap
[idx
/ 8] |= 0x80 >> (idx
% 8);
582 elem
->common
.amr
= cpu_to_be64(amr
);
583 elem
->common
.wed
= cpu_to_be64(wed
);
585 disable_afu_irqs(ctx
);
587 rc
= cxl_h_attach_process(ctx
->afu
->guest
->handle
, elem
,
588 &ctx
->process_token
, &mmio_addr
, &mmio_size
);
589 if (rc
== H_SUCCESS
) {
590 if (ctx
->master
|| !ctx
->afu
->pp_psa
) {
591 ctx
->psn_phys
= ctx
->afu
->psn_phys
;
592 ctx
->psn_size
= ctx
->afu
->adapter
->ps_size
;
594 ctx
->psn_phys
= mmio_addr
;
595 ctx
->psn_size
= mmio_size
;
597 if (ctx
->afu
->pp_psa
&& mmio_size
&&
598 ctx
->afu
->pp_size
== 0) {
600 * There's no property in the device tree to read the
601 * pp_size. We only find out at the 1st attach.
602 * Compared to bare-metal, it is too late and we
603 * should really lock here. However, on powerVM,
604 * pp_size is really only used to display in /sys.
605 * Being discussed with pHyp for their next release.
607 ctx
->afu
->pp_size
= mmio_size
;
609 /* from PAPR: process element is bytes 4-7 of process token */
610 ctx
->external_pe
= ctx
->process_token
& 0xFFFFFFFF;
611 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
612 ctx
->pe
, ctx
->external_pe
, ctx
->psn_size
);
613 ctx
->pe_inserted
= true;
614 enable_afu_irqs(ctx
);
618 free_page((u64
)elem
);
622 static int guest_attach_process(struct cxl_context
*ctx
, bool kernel
, u64 wed
, u64 amr
)
624 pr_devel("in %s\n", __func__
);
629 ctx
->kernel
= kernel
;
630 if (ctx
->afu
->current_mode
== CXL_MODE_DIRECTED
)
631 return attach_afu_directed(ctx
, wed
, amr
);
633 /* dedicated mode not supported on FW840 */
638 static int detach_afu_directed(struct cxl_context
*ctx
)
640 if (!ctx
->pe_inserted
)
642 if (cxl_h_detach_process(ctx
->afu
->guest
->handle
, ctx
->process_token
))
647 static int guest_detach_process(struct cxl_context
*ctx
)
649 pr_devel("in %s\n", __func__
);
650 trace_cxl_detach(ctx
);
652 if (!cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
))
655 if (ctx
->afu
->current_mode
== CXL_MODE_DIRECTED
)
656 return detach_afu_directed(ctx
);
661 static void guest_release_afu(struct device
*dev
)
663 struct cxl_afu
*afu
= to_cxl_afu(dev
);
665 pr_devel("%s\n", __func__
);
667 idr_destroy(&afu
->contexts_idr
);
673 ssize_t
cxl_guest_read_afu_vpd(struct cxl_afu
*afu
, void *buf
, size_t len
)
675 return guest_collect_vpd(NULL
, afu
, buf
, len
);
678 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
679 static ssize_t
guest_afu_read_err_buffer(struct cxl_afu
*afu
, char *buf
,
680 loff_t off
, size_t count
)
685 tbuf
= (void *) get_zeroed_page(GFP_KERNEL
);
689 rc
= cxl_h_get_afu_err(afu
->guest
->handle
,
696 if (count
> ERR_BUFF_MAX_COPY_SIZE
)
697 count
= ERR_BUFF_MAX_COPY_SIZE
- (off
& 0x7);
698 memcpy(buf
, tbuf
, count
);
700 free_page((u64
)tbuf
);
705 static int guest_afu_check_and_enable(struct cxl_afu
*afu
)
710 static bool guest_support_attributes(const char *attr_name
,
714 case CXL_ADAPTER_ATTRS
:
715 if ((strcmp(attr_name
, "base_image") == 0) ||
716 (strcmp(attr_name
, "load_image_on_perst") == 0) ||
717 (strcmp(attr_name
, "perst_reloads_same_image") == 0) ||
718 (strcmp(attr_name
, "image_loaded") == 0))
721 case CXL_AFU_MASTER_ATTRS
:
722 if ((strcmp(attr_name
, "pp_mmio_off") == 0))
734 static int activate_afu_directed(struct cxl_afu
*afu
)
738 dev_info(&afu
->dev
, "Activating AFU(%d) directed mode\n", afu
->slice
);
740 afu
->current_mode
= CXL_MODE_DIRECTED
;
742 afu
->num_procs
= afu
->max_procs_virtualised
;
744 if ((rc
= cxl_chardev_m_afu_add(afu
)))
747 if ((rc
= cxl_sysfs_afu_m_add(afu
)))
750 if ((rc
= cxl_chardev_s_afu_add(afu
)))
755 cxl_sysfs_afu_m_remove(afu
);
757 cxl_chardev_afu_remove(afu
);
761 static int guest_afu_activate_mode(struct cxl_afu
*afu
, int mode
)
765 if (!(mode
& afu
->modes_supported
))
768 if (mode
== CXL_MODE_DIRECTED
)
769 return activate_afu_directed(afu
);
771 if (mode
== CXL_MODE_DEDICATED
)
772 dev_err(&afu
->dev
, "Dedicated mode not supported\n");
777 static int deactivate_afu_directed(struct cxl_afu
*afu
)
779 dev_info(&afu
->dev
, "Deactivating AFU(%d) directed mode\n", afu
->slice
);
781 afu
->current_mode
= 0;
784 cxl_sysfs_afu_m_remove(afu
);
785 cxl_chardev_afu_remove(afu
);
787 cxl_ops
->afu_reset(afu
);
792 static int guest_afu_deactivate_mode(struct cxl_afu
*afu
, int mode
)
796 if (!(mode
& afu
->modes_supported
))
799 if (mode
== CXL_MODE_DIRECTED
)
800 return deactivate_afu_directed(afu
);
804 static int guest_afu_reset(struct cxl_afu
*afu
)
806 pr_devel("AFU(%d) reset request\n", afu
->slice
);
807 return cxl_h_reset_afu(afu
->guest
->handle
);
810 static int guest_map_slice_regs(struct cxl_afu
*afu
)
812 if (!(afu
->p2n_mmio
= ioremap(afu
->guest
->p2n_phys
, afu
->guest
->p2n_size
))) {
813 dev_err(&afu
->dev
, "Error mapping AFU(%d) MMIO regions\n",
820 static void guest_unmap_slice_regs(struct cxl_afu
*afu
)
823 iounmap(afu
->p2n_mmio
);
826 static int afu_update_state(struct cxl_afu
*afu
)
830 rc
= afu_read_error_state(afu
, &cur_state
);
834 if (afu
->guest
->previous_state
== cur_state
)
837 pr_devel("AFU(%d) update state to %#x\n", afu
->slice
, cur_state
);
841 afu
->guest
->previous_state
= cur_state
;
844 case H_STATE_DISABLE
:
845 pci_error_handlers(afu
, CXL_ERROR_DETECTED_EVENT
,
846 pci_channel_io_frozen
);
848 cxl_context_detach_all(afu
);
849 if ((rc
= cxl_ops
->afu_reset(afu
)))
850 pr_devel("reset hcall failed %d\n", rc
);
852 rc
= afu_read_error_state(afu
, &cur_state
);
853 if (!rc
&& cur_state
== H_STATE_NORMAL
) {
854 pci_error_handlers(afu
, CXL_SLOT_RESET_EVENT
,
855 pci_channel_io_normal
);
856 pci_error_handlers(afu
, CXL_RESUME_EVENT
, 0);
858 afu
->guest
->previous_state
= 0;
861 case H_STATE_TEMP_UNAVAILABLE
:
862 afu
->guest
->previous_state
= cur_state
;
865 case H_STATE_PERM_UNAVAILABLE
:
866 dev_err(&afu
->dev
, "AFU is in permanent error state\n");
867 pci_error_handlers(afu
, CXL_ERROR_DETECTED_EVENT
,
868 pci_channel_io_perm_failure
);
869 afu
->guest
->previous_state
= cur_state
;
873 pr_err("Unexpected AFU(%d) error state: %#x\n",
874 afu
->slice
, cur_state
);
881 static void afu_handle_errstate(struct work_struct
*work
)
883 struct cxl_afu_guest
*afu_guest
=
884 container_of(to_delayed_work(work
), struct cxl_afu_guest
, work_err
);
886 if (!afu_update_state(afu_guest
->parent
) &&
887 afu_guest
->previous_state
== H_STATE_PERM_UNAVAILABLE
)
890 if (afu_guest
->handle_err
)
891 schedule_delayed_work(&afu_guest
->work_err
,
892 msecs_to_jiffies(3000));
895 static bool guest_link_ok(struct cxl
*cxl
, struct cxl_afu
*afu
)
899 if (afu
&& (!afu_read_error_state(afu
, &state
))) {
900 if (state
== H_STATE_NORMAL
)
907 static int afu_properties_look_ok(struct cxl_afu
*afu
)
909 if (afu
->pp_irqs
< 0) {
910 dev_err(&afu
->dev
, "Unexpected per-process minimum interrupt value\n");
914 if (afu
->max_procs_virtualised
< 1) {
915 dev_err(&afu
->dev
, "Unexpected max number of processes virtualised value\n");
919 if (afu
->crs_len
< 0) {
920 dev_err(&afu
->dev
, "Unexpected configuration record size value\n");
927 int cxl_guest_init_afu(struct cxl
*adapter
, int slice
, struct device_node
*afu_np
)
933 pr_devel("in %s - AFU(%d)\n", __func__
, slice
);
934 if (!(afu
= cxl_alloc_afu(adapter
, slice
)))
937 if (!(afu
->guest
= kzalloc(sizeof(struct cxl_afu_guest
), GFP_KERNEL
))) {
942 if ((rc
= dev_set_name(&afu
->dev
, "afu%i.%i",
943 adapter
->adapter_num
,
949 if ((rc
= cxl_of_read_afu_handle(afu
, afu_np
)))
952 if ((rc
= cxl_ops
->afu_reset(afu
)))
955 if ((rc
= cxl_of_read_afu_properties(afu
, afu_np
)))
958 if ((rc
= afu_properties_look_ok(afu
)))
961 if ((rc
= guest_map_slice_regs(afu
)))
964 if ((rc
= guest_register_serr_irq(afu
)))
968 * After we call this function we must not free the afu directly, even
969 * if it returns an error!
971 if ((rc
= cxl_register_afu(afu
)))
974 if ((rc
= cxl_sysfs_afu_add(afu
)))
978 * pHyp doesn't expose the programming models supported by the
979 * AFU. pHyp currently only supports directed mode. If it adds
980 * dedicated mode later, this version of cxl has no way to
981 * detect it. So we'll initialize the driver, but the first
983 * Being discussed with pHyp to do better (likely new property)
985 if (afu
->max_procs_virtualised
== 1)
986 afu
->modes_supported
= CXL_MODE_DEDICATED
;
988 afu
->modes_supported
= CXL_MODE_DIRECTED
;
990 if ((rc
= cxl_afu_select_best_mode(afu
)))
993 adapter
->afu
[afu
->slice
] = afu
;
998 * wake up the cpu periodically to check the state
999 * of the AFU using "afu" stored in the guest structure.
1001 afu
->guest
->parent
= afu
;
1002 afu
->guest
->handle_err
= true;
1003 INIT_DELAYED_WORK(&afu
->guest
->work_err
, afu_handle_errstate
);
1004 schedule_delayed_work(&afu
->guest
->work_err
, msecs_to_jiffies(1000));
1006 if ((rc
= cxl_pci_vphb_add(afu
)))
1007 dev_info(&afu
->dev
, "Can't register vPHB\n");
1012 cxl_sysfs_afu_remove(afu
);
1014 device_unregister(&afu
->dev
);
1016 guest_release_serr_irq(afu
);
1018 guest_unmap_slice_regs(afu
);
1027 void cxl_guest_remove_afu(struct cxl_afu
*afu
)
1029 pr_devel("in %s - AFU(%d)\n", __func__
, afu
->slice
);
1034 /* flush and stop pending job */
1035 afu
->guest
->handle_err
= false;
1036 flush_delayed_work(&afu
->guest
->work_err
);
1038 cxl_pci_vphb_remove(afu
);
1039 cxl_sysfs_afu_remove(afu
);
1041 spin_lock(&afu
->adapter
->afu_list_lock
);
1042 afu
->adapter
->afu
[afu
->slice
] = NULL
;
1043 spin_unlock(&afu
->adapter
->afu_list_lock
);
1045 cxl_context_detach_all(afu
);
1046 cxl_ops
->afu_deactivate_mode(afu
, afu
->current_mode
);
1047 guest_release_serr_irq(afu
);
1048 guest_unmap_slice_regs(afu
);
1050 device_unregister(&afu
->dev
);
1053 static void free_adapter(struct cxl
*adapter
)
1055 struct irq_avail
*cur
;
1058 if (adapter
->guest
) {
1059 if (adapter
->guest
->irq_avail
) {
1060 for (i
= 0; i
< adapter
->guest
->irq_nranges
; i
++) {
1061 cur
= &adapter
->guest
->irq_avail
[i
];
1064 kfree(adapter
->guest
->irq_avail
);
1066 kfree(adapter
->guest
->status
);
1067 kfree(adapter
->guest
);
1069 cxl_remove_adapter_nr(adapter
);
1073 static int properties_look_ok(struct cxl
*adapter
)
1075 /* The absence of this property means that the operational
1076 * status is unknown or okay
1078 if (strlen(adapter
->guest
->status
) &&
1079 strcmp(adapter
->guest
->status
, "okay")) {
1080 pr_err("ABORTING:Bad operational status of the device\n");
1087 ssize_t
cxl_guest_read_adapter_vpd(struct cxl
*adapter
, void *buf
, size_t len
)
1089 return guest_collect_vpd(adapter
, NULL
, buf
, len
);
1092 void cxl_guest_remove_adapter(struct cxl
*adapter
)
1094 pr_devel("in %s\n", __func__
);
1096 cxl_sysfs_adapter_remove(adapter
);
1098 cxl_guest_remove_chardev(adapter
);
1099 device_unregister(&adapter
->dev
);
1102 static void release_adapter(struct device
*dev
)
1104 free_adapter(to_cxl_adapter(dev
));
1107 struct cxl
*cxl_guest_init_adapter(struct device_node
*np
, struct platform_device
*pdev
)
1109 struct cxl
*adapter
;
1113 if (!(adapter
= cxl_alloc_adapter()))
1114 return ERR_PTR(-ENOMEM
);
1116 if (!(adapter
->guest
= kzalloc(sizeof(struct cxl_guest
), GFP_KERNEL
))) {
1117 free_adapter(adapter
);
1118 return ERR_PTR(-ENOMEM
);
1121 adapter
->slices
= 0;
1122 adapter
->guest
->pdev
= pdev
;
1123 adapter
->dev
.parent
= &pdev
->dev
;
1124 adapter
->dev
.release
= release_adapter
;
1125 dev_set_drvdata(&pdev
->dev
, adapter
);
1128 * Hypervisor controls PSL timebase initialization (p1 register).
1129 * On FW840, PSL is initialized.
1131 adapter
->psl_timebase_synced
= true;
1133 if ((rc
= cxl_of_read_adapter_handle(adapter
, np
)))
1136 if ((rc
= cxl_of_read_adapter_properties(adapter
, np
)))
1139 if ((rc
= properties_look_ok(adapter
)))
1142 if ((rc
= cxl_guest_add_chardev(adapter
)))
1146 * After we call this function we must not free the adapter directly,
1147 * even if it returns an error!
1149 if ((rc
= cxl_register_adapter(adapter
)))
1152 if ((rc
= cxl_sysfs_adapter_add(adapter
)))
1155 /* release the context lock as the adapter is configured */
1156 cxl_adapter_context_unlock(adapter
);
1161 device_unregister(&adapter
->dev
);
1163 cxl_guest_remove_chardev(adapter
);
1166 free_adapter(adapter
);
1170 void cxl_guest_reload_module(struct cxl
*adapter
)
1172 struct platform_device
*pdev
;
1174 pdev
= adapter
->guest
->pdev
;
1175 cxl_guest_remove_adapter(adapter
);
1180 const struct cxl_backend_ops cxl_guest_ops
= {
1181 .module
= THIS_MODULE
,
1182 .adapter_reset
= guest_reset
,
1183 .alloc_one_irq
= guest_alloc_one_irq
,
1184 .release_one_irq
= guest_release_one_irq
,
1185 .alloc_irq_ranges
= guest_alloc_irq_ranges
,
1186 .release_irq_ranges
= guest_release_irq_ranges
,
1188 .handle_psl_slice_error
= guest_handle_psl_slice_error
,
1189 .psl_interrupt
= guest_psl_irq
,
1190 .ack_irq
= guest_ack_irq
,
1191 .attach_process
= guest_attach_process
,
1192 .detach_process
= guest_detach_process
,
1193 .update_ivtes
= NULL
,
1194 .support_attributes
= guest_support_attributes
,
1195 .link_ok
= guest_link_ok
,
1196 .release_afu
= guest_release_afu
,
1197 .afu_read_err_buffer
= guest_afu_read_err_buffer
,
1198 .afu_check_and_enable
= guest_afu_check_and_enable
,
1199 .afu_activate_mode
= guest_afu_activate_mode
,
1200 .afu_deactivate_mode
= guest_afu_deactivate_mode
,
1201 .afu_reset
= guest_afu_reset
,
1202 .afu_cr_read8
= guest_afu_cr_read8
,
1203 .afu_cr_read16
= guest_afu_cr_read16
,
1204 .afu_cr_read32
= guest_afu_cr_read32
,
1205 .afu_cr_read64
= guest_afu_cr_read64
,
1206 .afu_cr_write8
= guest_afu_cr_write8
,
1207 .afu_cr_write16
= guest_afu_cr_write16
,
1208 .afu_cr_write32
= guest_afu_cr_write32
,
1209 .read_adapter_vpd
= cxl_guest_read_adapter_vpd
,