1 // SPDX-License-Identifier: GPL-2.0
3 * Finite state machine for vfio-ccw device handling
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Cornelia Huck <cohuck@redhat.com>
12 #include <linux/vfio.h>
13 #include <linux/mdev.h>
16 #include "vfio_ccw_private.h"
18 static int fsm_io_helper(struct vfio_ccw_private
*private)
20 struct subchannel
*sch
;
29 spin_lock_irqsave(sch
->lock
, flags
);
31 orb
= cp_get_orb(&private->cp
, (u32
)(addr_t
)sch
, sch
->lpm
);
37 VFIO_CCW_TRACE_EVENT(5, "stIO");
38 VFIO_CCW_TRACE_EVENT(5, dev_name(&sch
->dev
));
40 /* Issue "Start Subchannel" */
41 ccode
= ssch(sch
->schid
, orb
);
43 VFIO_CCW_HEX_EVENT(5, &ccode
, sizeof(ccode
));
48 * Initialize device status information
50 sch
->schib
.scsw
.cmd
.actl
|= SCSW_ACTL_START_PEND
;
52 private->state
= VFIO_CCW_STATE_CP_PENDING
;
54 case 1: /* Status pending */
58 case 3: /* Device/path not operational */
66 if (cio_update_schib(sch
))
69 ret
= sch
->lpm
? -EACCES
: -ENODEV
;
76 spin_unlock_irqrestore(sch
->lock
, flags
);
80 static int fsm_do_halt(struct vfio_ccw_private
*private)
82 struct subchannel
*sch
;
89 spin_lock_irqsave(sch
->lock
, flags
);
91 VFIO_CCW_TRACE_EVENT(2, "haltIO");
92 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch
->dev
));
94 /* Issue "Halt Subchannel" */
95 ccode
= hsch(sch
->schid
);
97 VFIO_CCW_HEX_EVENT(2, &ccode
, sizeof(ccode
));
102 * Initialize device status information
104 sch
->schib
.scsw
.cmd
.actl
|= SCSW_ACTL_HALT_PEND
;
107 case 1: /* Status pending */
111 case 3: /* Device not operational */
117 spin_unlock_irqrestore(sch
->lock
, flags
);
121 static int fsm_do_clear(struct vfio_ccw_private
*private)
123 struct subchannel
*sch
;
130 spin_lock_irqsave(sch
->lock
, flags
);
132 VFIO_CCW_TRACE_EVENT(2, "clearIO");
133 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch
->dev
));
135 /* Issue "Clear Subchannel" */
136 ccode
= csch(sch
->schid
);
138 VFIO_CCW_HEX_EVENT(2, &ccode
, sizeof(ccode
));
143 * Initialize device status information
145 sch
->schib
.scsw
.cmd
.actl
= SCSW_ACTL_CLEAR_PEND
;
146 /* TODO: check what else we might need to clear */
149 case 3: /* Device not operational */
155 spin_unlock_irqrestore(sch
->lock
, flags
);
159 static void fsm_notoper(struct vfio_ccw_private
*private,
160 enum vfio_ccw_event event
)
162 struct subchannel
*sch
= private->sch
;
164 VFIO_CCW_TRACE_EVENT(2, "notoper");
165 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch
->dev
));
169 * Probably we should send the machine check to the guest.
171 css_sched_sch_todo(sch
, SCH_TODO_UNREG
);
172 private->state
= VFIO_CCW_STATE_NOT_OPER
;
176 * No operation action.
178 static void fsm_nop(struct vfio_ccw_private
*private,
179 enum vfio_ccw_event event
)
183 static void fsm_io_error(struct vfio_ccw_private
*private,
184 enum vfio_ccw_event event
)
186 pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state
);
187 private->io_region
->ret_code
= -EIO
;
190 static void fsm_io_busy(struct vfio_ccw_private
*private,
191 enum vfio_ccw_event event
)
193 private->io_region
->ret_code
= -EBUSY
;
196 static void fsm_io_retry(struct vfio_ccw_private
*private,
197 enum vfio_ccw_event event
)
199 private->io_region
->ret_code
= -EAGAIN
;
202 static void fsm_async_error(struct vfio_ccw_private
*private,
203 enum vfio_ccw_event event
)
205 struct ccw_cmd_region
*cmd_region
= private->cmd_region
;
207 pr_err("vfio-ccw: FSM: %s request from state:%d\n",
208 cmd_region
->command
== VFIO_CCW_ASYNC_CMD_HSCH
? "halt" :
209 cmd_region
->command
== VFIO_CCW_ASYNC_CMD_CSCH
? "clear" :
210 "<unknown>", private->state
);
211 cmd_region
->ret_code
= -EIO
;
214 static void fsm_async_retry(struct vfio_ccw_private
*private,
215 enum vfio_ccw_event event
)
217 private->cmd_region
->ret_code
= -EAGAIN
;
220 static void fsm_disabled_irq(struct vfio_ccw_private
*private,
221 enum vfio_ccw_event event
)
223 struct subchannel
*sch
= private->sch
;
226 * An interrupt in a disabled state means a previous disable was not
227 * successful - should not happen, but we try to disable again.
229 cio_disable_subchannel(sch
);
231 inline struct subchannel_id
get_schid(struct vfio_ccw_private
*p
)
233 return p
->sch
->schid
;
237 * Deal with the ccw command request from the userspace.
239 static void fsm_io_request(struct vfio_ccw_private
*private,
240 enum vfio_ccw_event event
)
243 union scsw
*scsw
= &private->scsw
;
244 struct ccw_io_region
*io_region
= private->io_region
;
245 struct mdev_device
*mdev
= private->mdev
;
246 char *errstr
= "request";
247 struct subchannel_id schid
= get_schid(private);
249 private->state
= VFIO_CCW_STATE_CP_PROCESSING
;
250 memcpy(scsw
, io_region
->scsw_area
, sizeof(*scsw
));
252 if (scsw
->cmd
.fctl
& SCSW_FCTL_START_FUNC
) {
253 orb
= (union orb
*)io_region
->orb_area
;
255 /* Don't try to build a cp if transport mode is specified. */
257 io_region
->ret_code
= -EOPNOTSUPP
;
258 VFIO_CCW_MSG_EVENT(2,
259 "%pUl (%x.%x.%04x): transport mode\n",
260 mdev_uuid(mdev
), schid
.cssid
,
261 schid
.ssid
, schid
.sch_no
);
262 errstr
= "transport mode";
265 io_region
->ret_code
= cp_init(&private->cp
, mdev_dev(mdev
),
267 if (io_region
->ret_code
) {
268 VFIO_CCW_MSG_EVENT(2,
269 "%pUl (%x.%x.%04x): cp_init=%d\n",
270 mdev_uuid(mdev
), schid
.cssid
,
271 schid
.ssid
, schid
.sch_no
,
272 io_region
->ret_code
);
277 io_region
->ret_code
= cp_prefetch(&private->cp
);
278 if (io_region
->ret_code
) {
279 VFIO_CCW_MSG_EVENT(2,
280 "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
281 mdev_uuid(mdev
), schid
.cssid
,
282 schid
.ssid
, schid
.sch_no
,
283 io_region
->ret_code
);
284 errstr
= "cp prefetch";
285 cp_free(&private->cp
);
289 /* Start channel program and wait for I/O interrupt. */
290 io_region
->ret_code
= fsm_io_helper(private);
291 if (io_region
->ret_code
) {
292 VFIO_CCW_MSG_EVENT(2,
293 "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
294 mdev_uuid(mdev
), schid
.cssid
,
295 schid
.ssid
, schid
.sch_no
,
296 io_region
->ret_code
);
297 errstr
= "cp fsm_io_helper";
298 cp_free(&private->cp
);
302 } else if (scsw
->cmd
.fctl
& SCSW_FCTL_HALT_FUNC
) {
303 VFIO_CCW_MSG_EVENT(2,
304 "%pUl (%x.%x.%04x): halt on io_region\n",
305 mdev_uuid(mdev
), schid
.cssid
,
306 schid
.ssid
, schid
.sch_no
);
307 /* halt is handled via the async cmd region */
308 io_region
->ret_code
= -EOPNOTSUPP
;
310 } else if (scsw
->cmd
.fctl
& SCSW_FCTL_CLEAR_FUNC
) {
311 VFIO_CCW_MSG_EVENT(2,
312 "%pUl (%x.%x.%04x): clear on io_region\n",
313 mdev_uuid(mdev
), schid
.cssid
,
314 schid
.ssid
, schid
.sch_no
);
315 /* clear is handled via the async cmd region */
316 io_region
->ret_code
= -EOPNOTSUPP
;
321 trace_vfio_ccw_fsm_io_request(scsw
->cmd
.fctl
, schid
,
322 io_region
->ret_code
, errstr
);
326 * Deal with an async request from userspace.
328 static void fsm_async_request(struct vfio_ccw_private
*private,
329 enum vfio_ccw_event event
)
331 struct ccw_cmd_region
*cmd_region
= private->cmd_region
;
333 switch (cmd_region
->command
) {
334 case VFIO_CCW_ASYNC_CMD_HSCH
:
335 cmd_region
->ret_code
= fsm_do_halt(private);
337 case VFIO_CCW_ASYNC_CMD_CSCH
:
338 cmd_region
->ret_code
= fsm_do_clear(private);
341 /* should not happen? */
342 cmd_region
->ret_code
= -EINVAL
;
345 trace_vfio_ccw_fsm_async_request(get_schid(private),
347 cmd_region
->ret_code
);
351 * Got an interrupt for a normal io (state busy).
353 static void fsm_irq(struct vfio_ccw_private
*private,
354 enum vfio_ccw_event event
)
356 struct irb
*irb
= this_cpu_ptr(&cio_irb
);
358 VFIO_CCW_TRACE_EVENT(6, "IRQ");
359 VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch
->dev
));
361 memcpy(&private->irb
, irb
, sizeof(*irb
));
363 queue_work(vfio_ccw_work_q
, &private->io_work
);
365 if (private->completion
)
366 complete(private->completion
);
370 * Device statemachine
372 fsm_func_t
*vfio_ccw_jumptable
[NR_VFIO_CCW_STATES
][NR_VFIO_CCW_EVENTS
] = {
373 [VFIO_CCW_STATE_NOT_OPER
] = {
374 [VFIO_CCW_EVENT_NOT_OPER
] = fsm_nop
,
375 [VFIO_CCW_EVENT_IO_REQ
] = fsm_io_error
,
376 [VFIO_CCW_EVENT_ASYNC_REQ
] = fsm_async_error
,
377 [VFIO_CCW_EVENT_INTERRUPT
] = fsm_disabled_irq
,
379 [VFIO_CCW_STATE_STANDBY
] = {
380 [VFIO_CCW_EVENT_NOT_OPER
] = fsm_notoper
,
381 [VFIO_CCW_EVENT_IO_REQ
] = fsm_io_error
,
382 [VFIO_CCW_EVENT_ASYNC_REQ
] = fsm_async_error
,
383 [VFIO_CCW_EVENT_INTERRUPT
] = fsm_irq
,
385 [VFIO_CCW_STATE_IDLE
] = {
386 [VFIO_CCW_EVENT_NOT_OPER
] = fsm_notoper
,
387 [VFIO_CCW_EVENT_IO_REQ
] = fsm_io_request
,
388 [VFIO_CCW_EVENT_ASYNC_REQ
] = fsm_async_request
,
389 [VFIO_CCW_EVENT_INTERRUPT
] = fsm_irq
,
391 [VFIO_CCW_STATE_CP_PROCESSING
] = {
392 [VFIO_CCW_EVENT_NOT_OPER
] = fsm_notoper
,
393 [VFIO_CCW_EVENT_IO_REQ
] = fsm_io_retry
,
394 [VFIO_CCW_EVENT_ASYNC_REQ
] = fsm_async_retry
,
395 [VFIO_CCW_EVENT_INTERRUPT
] = fsm_irq
,
397 [VFIO_CCW_STATE_CP_PENDING
] = {
398 [VFIO_CCW_EVENT_NOT_OPER
] = fsm_notoper
,
399 [VFIO_CCW_EVENT_IO_REQ
] = fsm_io_busy
,
400 [VFIO_CCW_EVENT_ASYNC_REQ
] = fsm_async_request
,
401 [VFIO_CCW_EVENT_INTERRUPT
] = fsm_irq
,