2 * core function to access sclp interface
4 * Copyright IBM Corp. 1999, 2009
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel_stat.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <linux/suspend.h>
20 #include <linux/completion.h>
21 #include <linux/platform_device.h>
22 #include <asm/types.h>
27 #define SCLP_HEADER "sclp: "
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock
);
32 /* Mask of events that we can send to the sclp interface. */
33 static sccb_mask_t sclp_receive_mask
;
35 /* Mask of events that we can receive from the sclp interface. */
36 static sccb_mask_t sclp_send_mask
;
38 /* List of registered event listeners and senders. */
39 static struct list_head sclp_reg_list
;
41 /* List of queued requests. */
42 static struct list_head sclp_req_queue
;
44 /* Data for read and and init requests. */
45 static struct sclp_req sclp_read_req
;
46 static struct sclp_req sclp_init_req
;
47 static char sclp_read_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
48 static char sclp_init_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
51 static DECLARE_COMPLETION(sclp_request_queue_flushed
);
53 static void sclp_suspend_req_cb(struct sclp_req
*req
, void *data
)
55 complete(&sclp_request_queue_flushed
);
58 static struct sclp_req sclp_suspend_req
;
60 /* Timer for request retries. */
61 static struct timer_list sclp_request_timer
;
63 /* Internal state: is the driver initialized? */
64 static volatile enum sclp_init_state_t
{
65 sclp_init_state_uninitialized
,
66 sclp_init_state_initializing
,
67 sclp_init_state_initialized
68 } sclp_init_state
= sclp_init_state_uninitialized
;
70 /* Internal state: is a request active at the sclp? */
71 static volatile enum sclp_running_state_t
{
72 sclp_running_state_idle
,
73 sclp_running_state_running
,
74 sclp_running_state_reset_pending
75 } sclp_running_state
= sclp_running_state_idle
;
77 /* Internal state: is a read request pending? */
78 static volatile enum sclp_reading_state_t
{
79 sclp_reading_state_idle
,
80 sclp_reading_state_reading
81 } sclp_reading_state
= sclp_reading_state_idle
;
83 /* Internal state: is the driver currently serving requests? */
84 static volatile enum sclp_activation_state_t
{
85 sclp_activation_state_active
,
86 sclp_activation_state_deactivating
,
87 sclp_activation_state_inactive
,
88 sclp_activation_state_activating
89 } sclp_activation_state
= sclp_activation_state_active
;
91 /* Internal state: is an init mask request pending? */
92 static volatile enum sclp_mask_state_t
{
94 sclp_mask_state_initializing
95 } sclp_mask_state
= sclp_mask_state_idle
;
97 /* Internal state: is the driver suspended? */
98 static enum sclp_suspend_state_t
{
99 sclp_suspend_state_running
,
100 sclp_suspend_state_suspended
,
101 } sclp_suspend_state
= sclp_suspend_state_running
;
103 /* Maximum retry counts */
104 #define SCLP_INIT_RETRY 3
105 #define SCLP_MASK_RETRY 3
107 /* Timeout intervals in seconds.*/
108 #define SCLP_BUSY_INTERVAL 10
109 #define SCLP_RETRY_INTERVAL 30
111 static void sclp_process_queue(void);
112 static void __sclp_make_read_req(void);
113 static int sclp_init_mask(int calculate
);
114 static int sclp_init(void);
116 /* Perform service call. Return 0 on success, non-zero otherwise. */
118 sclp_service_call(sclp_cmdw_t command
, void *sccb
)
123 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
126 : "=&d" (cc
) : "d" (command
), "a" (__pa(sccb
))
137 __sclp_queue_read_req(void)
139 if (sclp_reading_state
== sclp_reading_state_idle
) {
140 sclp_reading_state
= sclp_reading_state_reading
;
141 __sclp_make_read_req();
142 /* Add request to head of queue */
143 list_add(&sclp_read_req
.list
, &sclp_req_queue
);
147 /* Set up request retry timer. Called while sclp_lock is locked. */
149 __sclp_set_request_timer(unsigned long time
, void (*function
)(unsigned long),
152 del_timer(&sclp_request_timer
);
153 sclp_request_timer
.function
= function
;
154 sclp_request_timer
.data
= data
;
155 sclp_request_timer
.expires
= jiffies
+ time
;
156 add_timer(&sclp_request_timer
);
159 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
160 * force restart of running request. */
162 sclp_request_timeout(unsigned long data
)
166 spin_lock_irqsave(&sclp_lock
, flags
);
168 if (sclp_running_state
== sclp_running_state_running
) {
169 /* Break running state and queue NOP read event request
170 * to get a defined interface state. */
171 __sclp_queue_read_req();
172 sclp_running_state
= sclp_running_state_idle
;
175 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
176 sclp_request_timeout
, 0);
178 spin_unlock_irqrestore(&sclp_lock
, flags
);
179 sclp_process_queue();
182 /* Try to start a request. Return zero if the request was successfully
183 * started or if it will be started at a later time. Return non-zero otherwise.
184 * Called while sclp_lock is locked. */
186 __sclp_start_request(struct sclp_req
*req
)
190 if (sclp_running_state
!= sclp_running_state_idle
)
192 del_timer(&sclp_request_timer
);
193 rc
= sclp_service_call(req
->command
, req
->sccb
);
197 /* Successfully started request */
198 req
->status
= SCLP_REQ_RUNNING
;
199 sclp_running_state
= sclp_running_state_running
;
200 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
201 sclp_request_timeout
, 1);
203 } else if (rc
== -EBUSY
) {
204 /* Try again later */
205 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
206 sclp_request_timeout
, 0);
210 req
->status
= SCLP_REQ_FAILED
;
214 /* Try to start queued requests. */
216 sclp_process_queue(void)
218 struct sclp_req
*req
;
222 spin_lock_irqsave(&sclp_lock
, flags
);
223 if (sclp_running_state
!= sclp_running_state_idle
) {
224 spin_unlock_irqrestore(&sclp_lock
, flags
);
227 del_timer(&sclp_request_timer
);
228 while (!list_empty(&sclp_req_queue
)) {
229 req
= list_entry(sclp_req_queue
.next
, struct sclp_req
, list
);
232 rc
= __sclp_start_request(req
);
236 if (req
->start_count
> 1) {
237 /* Cannot abort already submitted request - could still
238 * be active at the SCLP */
239 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
240 sclp_request_timeout
, 0);
244 /* Post-processing for aborted request */
245 list_del(&req
->list
);
247 spin_unlock_irqrestore(&sclp_lock
, flags
);
248 req
->callback(req
, req
->callback_data
);
249 spin_lock_irqsave(&sclp_lock
, flags
);
252 spin_unlock_irqrestore(&sclp_lock
, flags
);
255 static int __sclp_can_add_request(struct sclp_req
*req
)
257 if (req
== &sclp_suspend_req
|| req
== &sclp_init_req
)
259 if (sclp_suspend_state
!= sclp_suspend_state_running
)
261 if (sclp_init_state
!= sclp_init_state_initialized
)
263 if (sclp_activation_state
!= sclp_activation_state_active
)
268 /* Queue a new request. Return zero on success, non-zero otherwise. */
270 sclp_add_request(struct sclp_req
*req
)
275 spin_lock_irqsave(&sclp_lock
, flags
);
276 if (!__sclp_can_add_request(req
)) {
277 spin_unlock_irqrestore(&sclp_lock
, flags
);
280 req
->status
= SCLP_REQ_QUEUED
;
281 req
->start_count
= 0;
282 list_add_tail(&req
->list
, &sclp_req_queue
);
284 /* Start if request is first in list */
285 if (sclp_running_state
== sclp_running_state_idle
&&
286 req
->list
.prev
== &sclp_req_queue
) {
288 list_del(&req
->list
);
292 rc
= __sclp_start_request(req
);
294 list_del(&req
->list
);
297 spin_unlock_irqrestore(&sclp_lock
, flags
);
301 EXPORT_SYMBOL(sclp_add_request
);
303 /* Dispatch events found in request buffer to registered listeners. Return 0
304 * if all events were dispatched, non-zero otherwise. */
306 sclp_dispatch_evbufs(struct sccb_header
*sccb
)
309 struct evbuf_header
*evbuf
;
311 struct sclp_register
*reg
;
315 spin_lock_irqsave(&sclp_lock
, flags
);
317 for (offset
= sizeof(struct sccb_header
); offset
< sccb
->length
;
318 offset
+= evbuf
->length
) {
319 evbuf
= (struct evbuf_header
*) ((addr_t
) sccb
+ offset
);
320 /* Check for malformed hardware response */
321 if (evbuf
->length
== 0)
323 /* Search for event handler */
325 list_for_each(l
, &sclp_reg_list
) {
326 reg
= list_entry(l
, struct sclp_register
, list
);
327 if (reg
->receive_mask
& (1 << (32 - evbuf
->type
)))
332 if (reg
&& reg
->receiver_fn
) {
333 spin_unlock_irqrestore(&sclp_lock
, flags
);
334 reg
->receiver_fn(evbuf
);
335 spin_lock_irqsave(&sclp_lock
, flags
);
336 } else if (reg
== NULL
)
339 spin_unlock_irqrestore(&sclp_lock
, flags
);
343 /* Read event data request callback. */
345 sclp_read_cb(struct sclp_req
*req
, void *data
)
348 struct sccb_header
*sccb
;
350 sccb
= (struct sccb_header
*) req
->sccb
;
351 if (req
->status
== SCLP_REQ_DONE
&& (sccb
->response_code
== 0x20 ||
352 sccb
->response_code
== 0x220))
353 sclp_dispatch_evbufs(sccb
);
354 spin_lock_irqsave(&sclp_lock
, flags
);
355 sclp_reading_state
= sclp_reading_state_idle
;
356 spin_unlock_irqrestore(&sclp_lock
, flags
);
359 /* Prepare read event data request. Called while sclp_lock is locked. */
360 static void __sclp_make_read_req(void)
362 struct sccb_header
*sccb
;
364 sccb
= (struct sccb_header
*) sclp_read_sccb
;
366 memset(&sclp_read_req
, 0, sizeof(struct sclp_req
));
367 sclp_read_req
.command
= SCLP_CMDW_READ_EVENT_DATA
;
368 sclp_read_req
.status
= SCLP_REQ_QUEUED
;
369 sclp_read_req
.start_count
= 0;
370 sclp_read_req
.callback
= sclp_read_cb
;
371 sclp_read_req
.sccb
= sccb
;
372 sccb
->length
= PAGE_SIZE
;
373 sccb
->function_code
= 0;
374 sccb
->control_mask
[2] = 0x80;
377 /* Search request list for request with matching sccb. Return request if found,
378 * NULL otherwise. Called while sclp_lock is locked. */
379 static inline struct sclp_req
*
380 __sclp_find_req(u32 sccb
)
383 struct sclp_req
*req
;
385 list_for_each(l
, &sclp_req_queue
) {
386 req
= list_entry(l
, struct sclp_req
, list
);
387 if (sccb
== (u32
) (addr_t
) req
->sccb
)
393 /* Handler for external interruption. Perform request post-processing.
394 * Prepare read event data request if necessary. Start processing of next
395 * request on queue. */
396 static void sclp_interrupt_handler(unsigned int ext_int_code
,
397 unsigned int param32
, unsigned long param64
)
399 struct sclp_req
*req
;
403 kstat_cpu(smp_processor_id()).irqs
[EXTINT_SCP
]++;
404 spin_lock(&sclp_lock
);
405 finished_sccb
= param32
& 0xfffffff8;
406 evbuf_pending
= param32
& 0x3;
408 del_timer(&sclp_request_timer
);
409 sclp_running_state
= sclp_running_state_reset_pending
;
410 req
= __sclp_find_req(finished_sccb
);
412 /* Request post-processing */
413 list_del(&req
->list
);
414 req
->status
= SCLP_REQ_DONE
;
416 spin_unlock(&sclp_lock
);
417 req
->callback(req
, req
->callback_data
);
418 spin_lock(&sclp_lock
);
421 sclp_running_state
= sclp_running_state_idle
;
424 sclp_activation_state
== sclp_activation_state_active
)
425 __sclp_queue_read_req();
426 spin_unlock(&sclp_lock
);
427 sclp_process_queue();
430 /* Convert interval in jiffies to TOD ticks. */
432 sclp_tod_from_jiffies(unsigned long jiffies
)
434 return (u64
) (jiffies
/ HZ
) << 32;
437 /* Wait until a currently running request finished. Note: while this function
438 * is running, no timers are served on the calling CPU. */
442 unsigned long long old_tick
;
444 unsigned long cr0
, cr0_sync
;
448 /* We'll be disabling timer interrupts, so we need a custom timeout
451 if (timer_pending(&sclp_request_timer
)) {
452 /* Get timeout TOD value */
453 timeout
= get_clock() +
454 sclp_tod_from_jiffies(sclp_request_timer
.expires
-
457 local_irq_save(flags
);
458 /* Prevent bottom half from executing once we force interrupts open */
459 irq_context
= in_interrupt();
462 /* Enable service-signal interruption, disable timer interrupts */
463 old_tick
= local_tick_disable();
465 __ctl_store(cr0
, 0, 0);
467 cr0_sync
&= 0xffff00a0;
468 cr0_sync
|= 0x00000200;
469 __ctl_load(cr0_sync
, 0, 0);
470 __arch_local_irq_stosm(0x01);
471 /* Loop until driver state indicates finished request */
472 while (sclp_running_state
!= sclp_running_state_idle
) {
473 /* Check for expired request timer */
474 if (timer_pending(&sclp_request_timer
) &&
475 get_clock() > timeout
&&
476 del_timer(&sclp_request_timer
))
477 sclp_request_timer
.function(sclp_request_timer
.data
);
481 __ctl_load(cr0
, 0, 0);
484 local_tick_enable(old_tick
);
485 local_irq_restore(flags
);
487 EXPORT_SYMBOL(sclp_sync_wait
);
489 /* Dispatch changes in send and receive mask to registered listeners. */
491 sclp_dispatch_state_change(void)
494 struct sclp_register
*reg
;
496 sccb_mask_t receive_mask
;
497 sccb_mask_t send_mask
;
500 spin_lock_irqsave(&sclp_lock
, flags
);
502 list_for_each(l
, &sclp_reg_list
) {
503 reg
= list_entry(l
, struct sclp_register
, list
);
504 receive_mask
= reg
->send_mask
& sclp_receive_mask
;
505 send_mask
= reg
->receive_mask
& sclp_send_mask
;
506 if (reg
->sclp_receive_mask
!= receive_mask
||
507 reg
->sclp_send_mask
!= send_mask
) {
508 reg
->sclp_receive_mask
= receive_mask
;
509 reg
->sclp_send_mask
= send_mask
;
514 spin_unlock_irqrestore(&sclp_lock
, flags
);
515 if (reg
&& reg
->state_change_fn
)
516 reg
->state_change_fn(reg
);
520 struct sclp_statechangebuf
{
521 struct evbuf_header header
;
522 u8 validity_sclp_active_facility_mask
: 1;
523 u8 validity_sclp_receive_mask
: 1;
524 u8 validity_sclp_send_mask
: 1;
525 u8 validity_read_data_function_mask
: 1;
528 u64 sclp_active_facility_mask
;
529 sccb_mask_t sclp_receive_mask
;
530 sccb_mask_t sclp_send_mask
;
531 u32 read_data_function_mask
;
532 } __attribute__((packed
));
535 /* State change event callback. Inform listeners of changes. */
537 sclp_state_change_cb(struct evbuf_header
*evbuf
)
540 struct sclp_statechangebuf
*scbuf
;
542 scbuf
= (struct sclp_statechangebuf
*) evbuf
;
543 if (scbuf
->mask_length
!= sizeof(sccb_mask_t
))
545 spin_lock_irqsave(&sclp_lock
, flags
);
546 if (scbuf
->validity_sclp_receive_mask
)
547 sclp_receive_mask
= scbuf
->sclp_receive_mask
;
548 if (scbuf
->validity_sclp_send_mask
)
549 sclp_send_mask
= scbuf
->sclp_send_mask
;
550 spin_unlock_irqrestore(&sclp_lock
, flags
);
551 if (scbuf
->validity_sclp_active_facility_mask
)
552 sclp_facilities
= scbuf
->sclp_active_facility_mask
;
553 sclp_dispatch_state_change();
556 static struct sclp_register sclp_state_change_event
= {
557 .receive_mask
= EVTYP_STATECHANGE_MASK
,
558 .receiver_fn
= sclp_state_change_cb
561 /* Calculate receive and send mask of currently registered listeners.
562 * Called while sclp_lock is locked. */
564 __sclp_get_mask(sccb_mask_t
*receive_mask
, sccb_mask_t
*send_mask
)
567 struct sclp_register
*t
;
571 list_for_each(l
, &sclp_reg_list
) {
572 t
= list_entry(l
, struct sclp_register
, list
);
573 *receive_mask
|= t
->receive_mask
;
574 *send_mask
|= t
->send_mask
;
578 /* Register event listener. Return 0 on success, non-zero otherwise. */
580 sclp_register(struct sclp_register
*reg
)
583 sccb_mask_t receive_mask
;
584 sccb_mask_t send_mask
;
590 spin_lock_irqsave(&sclp_lock
, flags
);
591 /* Check event mask for collisions */
592 __sclp_get_mask(&receive_mask
, &send_mask
);
593 if (reg
->receive_mask
& receive_mask
|| reg
->send_mask
& send_mask
) {
594 spin_unlock_irqrestore(&sclp_lock
, flags
);
597 /* Trigger initial state change callback */
598 reg
->sclp_receive_mask
= 0;
599 reg
->sclp_send_mask
= 0;
600 reg
->pm_event_posted
= 0;
601 list_add(®
->list
, &sclp_reg_list
);
602 spin_unlock_irqrestore(&sclp_lock
, flags
);
603 rc
= sclp_init_mask(1);
605 spin_lock_irqsave(&sclp_lock
, flags
);
606 list_del(®
->list
);
607 spin_unlock_irqrestore(&sclp_lock
, flags
);
612 EXPORT_SYMBOL(sclp_register
);
614 /* Unregister event listener. */
616 sclp_unregister(struct sclp_register
*reg
)
620 spin_lock_irqsave(&sclp_lock
, flags
);
621 list_del(®
->list
);
622 spin_unlock_irqrestore(&sclp_lock
, flags
);
626 EXPORT_SYMBOL(sclp_unregister
);
628 /* Remove event buffers which are marked processed. Return the number of
629 * remaining event buffers. */
631 sclp_remove_processed(struct sccb_header
*sccb
)
633 struct evbuf_header
*evbuf
;
637 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
639 remaining
= sccb
->length
- sizeof(struct sccb_header
);
640 while (remaining
> 0) {
641 remaining
-= evbuf
->length
;
642 if (evbuf
->flags
& 0x80) {
643 sccb
->length
-= evbuf
->length
;
644 memcpy(evbuf
, (void *) ((addr_t
) evbuf
+ evbuf
->length
),
648 evbuf
= (struct evbuf_header
*)
649 ((addr_t
) evbuf
+ evbuf
->length
);
655 EXPORT_SYMBOL(sclp_remove_processed
);
658 struct sccb_header header
;
661 sccb_mask_t receive_mask
;
662 sccb_mask_t send_mask
;
663 sccb_mask_t sclp_receive_mask
;
664 sccb_mask_t sclp_send_mask
;
665 } __attribute__((packed
));
667 /* Prepare init mask request. Called while sclp_lock is locked. */
669 __sclp_make_init_req(u32 receive_mask
, u32 send_mask
)
671 struct init_sccb
*sccb
;
673 sccb
= (struct init_sccb
*) sclp_init_sccb
;
675 memset(&sclp_init_req
, 0, sizeof(struct sclp_req
));
676 sclp_init_req
.command
= SCLP_CMDW_WRITE_EVENT_MASK
;
677 sclp_init_req
.status
= SCLP_REQ_FILLED
;
678 sclp_init_req
.start_count
= 0;
679 sclp_init_req
.callback
= NULL
;
680 sclp_init_req
.callback_data
= NULL
;
681 sclp_init_req
.sccb
= sccb
;
682 sccb
->header
.length
= sizeof(struct init_sccb
);
683 sccb
->mask_length
= sizeof(sccb_mask_t
);
684 sccb
->receive_mask
= receive_mask
;
685 sccb
->send_mask
= send_mask
;
686 sccb
->sclp_receive_mask
= 0;
687 sccb
->sclp_send_mask
= 0;
690 /* Start init mask request. If calculate is non-zero, calculate the mask as
691 * requested by registered listeners. Use zero mask otherwise. Return 0 on
692 * success, non-zero otherwise. */
694 sclp_init_mask(int calculate
)
697 struct init_sccb
*sccb
= (struct init_sccb
*) sclp_init_sccb
;
698 sccb_mask_t receive_mask
;
699 sccb_mask_t send_mask
;
704 spin_lock_irqsave(&sclp_lock
, flags
);
705 /* Check if interface is in appropriate state */
706 if (sclp_mask_state
!= sclp_mask_state_idle
) {
707 spin_unlock_irqrestore(&sclp_lock
, flags
);
710 if (sclp_activation_state
== sclp_activation_state_inactive
) {
711 spin_unlock_irqrestore(&sclp_lock
, flags
);
714 sclp_mask_state
= sclp_mask_state_initializing
;
717 __sclp_get_mask(&receive_mask
, &send_mask
);
723 for (retry
= 0; retry
<= SCLP_MASK_RETRY
; retry
++) {
724 /* Prepare request */
725 __sclp_make_init_req(receive_mask
, send_mask
);
726 spin_unlock_irqrestore(&sclp_lock
, flags
);
727 if (sclp_add_request(&sclp_init_req
)) {
728 /* Try again later */
729 wait
= jiffies
+ SCLP_BUSY_INTERVAL
* HZ
;
730 while (time_before(jiffies
, wait
))
732 spin_lock_irqsave(&sclp_lock
, flags
);
735 while (sclp_init_req
.status
!= SCLP_REQ_DONE
&&
736 sclp_init_req
.status
!= SCLP_REQ_FAILED
)
738 spin_lock_irqsave(&sclp_lock
, flags
);
739 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
740 sccb
->header
.response_code
== 0x20) {
741 /* Successful request */
743 sclp_receive_mask
= sccb
->sclp_receive_mask
;
744 sclp_send_mask
= sccb
->sclp_send_mask
;
746 sclp_receive_mask
= 0;
749 spin_unlock_irqrestore(&sclp_lock
, flags
);
750 sclp_dispatch_state_change();
751 spin_lock_irqsave(&sclp_lock
, flags
);
756 sclp_mask_state
= sclp_mask_state_idle
;
757 spin_unlock_irqrestore(&sclp_lock
, flags
);
761 /* Deactivate SCLP interface. On success, new requests will be rejected,
762 * events will no longer be dispatched. Return 0 on success, non-zero
765 sclp_deactivate(void)
770 spin_lock_irqsave(&sclp_lock
, flags
);
771 /* Deactivate can only be called when active */
772 if (sclp_activation_state
!= sclp_activation_state_active
) {
773 spin_unlock_irqrestore(&sclp_lock
, flags
);
776 sclp_activation_state
= sclp_activation_state_deactivating
;
777 spin_unlock_irqrestore(&sclp_lock
, flags
);
778 rc
= sclp_init_mask(0);
779 spin_lock_irqsave(&sclp_lock
, flags
);
781 sclp_activation_state
= sclp_activation_state_inactive
;
783 sclp_activation_state
= sclp_activation_state_active
;
784 spin_unlock_irqrestore(&sclp_lock
, flags
);
788 EXPORT_SYMBOL(sclp_deactivate
);
790 /* Reactivate SCLP interface after sclp_deactivate. On success, new
791 * requests will be accepted, events will be dispatched again. Return 0 on
792 * success, non-zero otherwise. */
794 sclp_reactivate(void)
799 spin_lock_irqsave(&sclp_lock
, flags
);
800 /* Reactivate can only be called when inactive */
801 if (sclp_activation_state
!= sclp_activation_state_inactive
) {
802 spin_unlock_irqrestore(&sclp_lock
, flags
);
805 sclp_activation_state
= sclp_activation_state_activating
;
806 spin_unlock_irqrestore(&sclp_lock
, flags
);
807 rc
= sclp_init_mask(1);
808 spin_lock_irqsave(&sclp_lock
, flags
);
810 sclp_activation_state
= sclp_activation_state_active
;
812 sclp_activation_state
= sclp_activation_state_inactive
;
813 spin_unlock_irqrestore(&sclp_lock
, flags
);
817 EXPORT_SYMBOL(sclp_reactivate
);
819 /* Handler for external interruption used during initialization. Modify
820 * request state to done. */
821 static void sclp_check_handler(unsigned int ext_int_code
,
822 unsigned int param32
, unsigned long param64
)
826 kstat_cpu(smp_processor_id()).irqs
[EXTINT_SCP
]++;
827 finished_sccb
= param32
& 0xfffffff8;
828 /* Is this the interrupt we are waiting for? */
829 if (finished_sccb
== 0)
831 if (finished_sccb
!= (u32
) (addr_t
) sclp_init_sccb
)
832 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
834 spin_lock(&sclp_lock
);
835 if (sclp_running_state
== sclp_running_state_running
) {
836 sclp_init_req
.status
= SCLP_REQ_DONE
;
837 sclp_running_state
= sclp_running_state_idle
;
839 spin_unlock(&sclp_lock
);
842 /* Initial init mask request timed out. Modify request state to failed. */
844 sclp_check_timeout(unsigned long data
)
848 spin_lock_irqsave(&sclp_lock
, flags
);
849 if (sclp_running_state
== sclp_running_state_running
) {
850 sclp_init_req
.status
= SCLP_REQ_FAILED
;
851 sclp_running_state
= sclp_running_state_idle
;
853 spin_unlock_irqrestore(&sclp_lock
, flags
);
856 /* Perform a check of the SCLP interface. Return zero if the interface is
857 * available and there are no pending requests from a previous instance.
858 * Return non-zero otherwise. */
860 sclp_check_interface(void)
862 struct init_sccb
*sccb
;
867 spin_lock_irqsave(&sclp_lock
, flags
);
868 /* Prepare init mask command */
869 rc
= register_external_interrupt(0x2401, sclp_check_handler
);
871 spin_unlock_irqrestore(&sclp_lock
, flags
);
874 for (retry
= 0; retry
<= SCLP_INIT_RETRY
; retry
++) {
875 __sclp_make_init_req(0, 0);
876 sccb
= (struct init_sccb
*) sclp_init_req
.sccb
;
877 rc
= sclp_service_call(sclp_init_req
.command
, sccb
);
880 sclp_init_req
.status
= SCLP_REQ_RUNNING
;
881 sclp_running_state
= sclp_running_state_running
;
882 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
883 sclp_check_timeout
, 0);
884 spin_unlock_irqrestore(&sclp_lock
, flags
);
885 /* Enable service-signal interruption - needs to happen
886 * with IRQs enabled. */
887 service_subclass_irq_register();
888 /* Wait for signal from interrupt or timeout */
890 /* Disable service-signal interruption - needs to happen
891 * with IRQs enabled. */
892 service_subclass_irq_unregister();
893 spin_lock_irqsave(&sclp_lock
, flags
);
894 del_timer(&sclp_request_timer
);
895 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
896 sccb
->header
.response_code
== 0x20) {
902 unregister_external_interrupt(0x2401, sclp_check_handler
);
903 spin_unlock_irqrestore(&sclp_lock
, flags
);
907 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
908 * events from interfering with rebooted system. */
910 sclp_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
916 static struct notifier_block sclp_reboot_notifier
= {
917 .notifier_call
= sclp_reboot_event
921 * Suspend/resume SCLP notifier implementation
924 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event
, int rollback
)
926 struct sclp_register
*reg
;
930 spin_lock_irqsave(&sclp_lock
, flags
);
931 list_for_each_entry(reg
, &sclp_reg_list
, list
)
932 reg
->pm_event_posted
= 0;
933 spin_unlock_irqrestore(&sclp_lock
, flags
);
936 spin_lock_irqsave(&sclp_lock
, flags
);
937 list_for_each_entry(reg
, &sclp_reg_list
, list
) {
938 if (rollback
&& reg
->pm_event_posted
)
940 if (!rollback
&& !reg
->pm_event_posted
)
943 spin_unlock_irqrestore(&sclp_lock
, flags
);
946 spin_unlock_irqrestore(&sclp_lock
, flags
);
947 if (reg
->pm_event_fn
)
948 reg
->pm_event_fn(reg
, sclp_pm_event
);
949 reg
->pm_event_posted
= rollback
? 0 : 1;
954 * Susend/resume callbacks for platform device
957 static int sclp_freeze(struct device
*dev
)
962 sclp_pm_event(SCLP_PM_EVENT_FREEZE
, 0);
964 spin_lock_irqsave(&sclp_lock
, flags
);
965 sclp_suspend_state
= sclp_suspend_state_suspended
;
966 spin_unlock_irqrestore(&sclp_lock
, flags
);
968 /* Init supend data */
969 memset(&sclp_suspend_req
, 0, sizeof(sclp_suspend_req
));
970 sclp_suspend_req
.callback
= sclp_suspend_req_cb
;
971 sclp_suspend_req
.status
= SCLP_REQ_FILLED
;
972 init_completion(&sclp_request_queue_flushed
);
974 rc
= sclp_add_request(&sclp_suspend_req
);
976 wait_for_completion(&sclp_request_queue_flushed
);
977 else if (rc
!= -ENODATA
)
980 rc
= sclp_deactivate();
986 spin_lock_irqsave(&sclp_lock
, flags
);
987 sclp_suspend_state
= sclp_suspend_state_running
;
988 spin_unlock_irqrestore(&sclp_lock
, flags
);
989 sclp_pm_event(SCLP_PM_EVENT_THAW
, 1);
993 static int sclp_undo_suspend(enum sclp_pm_event event
)
998 rc
= sclp_reactivate();
1002 spin_lock_irqsave(&sclp_lock
, flags
);
1003 sclp_suspend_state
= sclp_suspend_state_running
;
1004 spin_unlock_irqrestore(&sclp_lock
, flags
);
1006 sclp_pm_event(event
, 0);
1010 static int sclp_thaw(struct device
*dev
)
1012 return sclp_undo_suspend(SCLP_PM_EVENT_THAW
);
1015 static int sclp_restore(struct device
*dev
)
1017 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE
);
1020 static const struct dev_pm_ops sclp_pm_ops
= {
1021 .freeze
= sclp_freeze
,
1023 .restore
= sclp_restore
,
1026 static struct platform_driver sclp_pdrv
= {
1029 .owner
= THIS_MODULE
,
1034 static struct platform_device
*sclp_pdev
;
1036 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1041 unsigned long flags
;
1044 spin_lock_irqsave(&sclp_lock
, flags
);
1045 /* Check for previous or running initialization */
1046 if (sclp_init_state
!= sclp_init_state_uninitialized
)
1048 sclp_init_state
= sclp_init_state_initializing
;
1049 /* Set up variables */
1050 INIT_LIST_HEAD(&sclp_req_queue
);
1051 INIT_LIST_HEAD(&sclp_reg_list
);
1052 list_add(&sclp_state_change_event
.list
, &sclp_reg_list
);
1053 init_timer(&sclp_request_timer
);
1054 /* Check interface */
1055 spin_unlock_irqrestore(&sclp_lock
, flags
);
1056 rc
= sclp_check_interface();
1057 spin_lock_irqsave(&sclp_lock
, flags
);
1059 goto fail_init_state_uninitialized
;
1060 /* Register reboot handler */
1061 rc
= register_reboot_notifier(&sclp_reboot_notifier
);
1063 goto fail_init_state_uninitialized
;
1064 /* Register interrupt handler */
1065 rc
= register_external_interrupt(0x2401, sclp_interrupt_handler
);
1067 goto fail_unregister_reboot_notifier
;
1068 sclp_init_state
= sclp_init_state_initialized
;
1069 spin_unlock_irqrestore(&sclp_lock
, flags
);
1070 /* Enable service-signal external interruption - needs to happen with
1072 service_subclass_irq_register();
1076 fail_unregister_reboot_notifier
:
1077 unregister_reboot_notifier(&sclp_reboot_notifier
);
1078 fail_init_state_uninitialized
:
1079 sclp_init_state
= sclp_init_state_uninitialized
;
1081 spin_unlock_irqrestore(&sclp_lock
, flags
);
1086 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1087 * to print the panic message.
1089 static int sclp_panic_notify(struct notifier_block
*self
,
1090 unsigned long event
, void *data
)
1092 if (sclp_suspend_state
== sclp_suspend_state_suspended
)
1093 sclp_undo_suspend(SCLP_PM_EVENT_THAW
);
1097 static struct notifier_block sclp_on_panic_nb
= {
1098 .notifier_call
= sclp_panic_notify
,
1099 .priority
= SCLP_PANIC_PRIO
,
1102 static __init
int sclp_initcall(void)
1106 rc
= platform_driver_register(&sclp_pdrv
);
1109 sclp_pdev
= platform_device_register_simple("sclp", -1, NULL
, 0);
1110 rc
= IS_ERR(sclp_pdev
) ? PTR_ERR(sclp_pdev
) : 0;
1112 goto fail_platform_driver_unregister
;
1113 rc
= atomic_notifier_chain_register(&panic_notifier_list
,
1116 goto fail_platform_device_unregister
;
1120 fail_platform_device_unregister
:
1121 platform_device_unregister(sclp_pdev
);
1122 fail_platform_driver_unregister
:
1123 platform_driver_unregister(&sclp_pdrv
);
1127 arch_initcall(sclp_initcall
);