2 * core function to access sclp interface
4 * Copyright IBM Corp. 1999, 2009
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel_stat.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <linux/suspend.h>
20 #include <linux/completion.h>
21 #include <linux/platform_device.h>
22 #include <asm/types.h>
27 #define SCLP_HEADER "sclp: "
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock
);
32 /* Mask of events that we can send to the sclp interface. */
33 static sccb_mask_t sclp_receive_mask
;
35 /* Mask of events that we can receive from the sclp interface. */
36 static sccb_mask_t sclp_send_mask
;
38 /* List of registered event listeners and senders. */
39 static struct list_head sclp_reg_list
;
41 /* List of queued requests. */
42 static struct list_head sclp_req_queue
;
44 /* Data for read and and init requests. */
45 static struct sclp_req sclp_read_req
;
46 static struct sclp_req sclp_init_req
;
47 static char sclp_read_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
48 static char sclp_init_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
51 static DECLARE_COMPLETION(sclp_request_queue_flushed
);
53 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
54 int sclp_console_pages
= SCLP_CONSOLE_PAGES
;
55 /* Flag to indicate if buffer pages are dropped on buffer full condition */
56 int sclp_console_drop
= 1;
57 /* Number of times the console dropped buffer pages */
58 unsigned long sclp_console_full
;
60 static void sclp_suspend_req_cb(struct sclp_req
*req
, void *data
)
62 complete(&sclp_request_queue_flushed
);
65 static int __init
sclp_setup_console_pages(char *str
)
69 rc
= kstrtoint(str
, 0, &pages
);
70 if (!rc
&& pages
>= SCLP_CONSOLE_PAGES
)
71 sclp_console_pages
= pages
;
75 __setup("sclp_con_pages=", sclp_setup_console_pages
);
77 static int __init
sclp_setup_console_drop(char *str
)
81 rc
= kstrtoint(str
, 0, &drop
);
83 sclp_console_drop
= drop
;
87 __setup("sclp_con_drop=", sclp_setup_console_drop
);
89 static struct sclp_req sclp_suspend_req
;
91 /* Timer for request retries. */
92 static struct timer_list sclp_request_timer
;
94 /* Timer for queued requests. */
95 static struct timer_list sclp_queue_timer
;
97 /* Internal state: is the driver initialized? */
98 static volatile enum sclp_init_state_t
{
99 sclp_init_state_uninitialized
,
100 sclp_init_state_initializing
,
101 sclp_init_state_initialized
102 } sclp_init_state
= sclp_init_state_uninitialized
;
104 /* Internal state: is a request active at the sclp? */
105 static volatile enum sclp_running_state_t
{
106 sclp_running_state_idle
,
107 sclp_running_state_running
,
108 sclp_running_state_reset_pending
109 } sclp_running_state
= sclp_running_state_idle
;
111 /* Internal state: is a read request pending? */
112 static volatile enum sclp_reading_state_t
{
113 sclp_reading_state_idle
,
114 sclp_reading_state_reading
115 } sclp_reading_state
= sclp_reading_state_idle
;
117 /* Internal state: is the driver currently serving requests? */
118 static volatile enum sclp_activation_state_t
{
119 sclp_activation_state_active
,
120 sclp_activation_state_deactivating
,
121 sclp_activation_state_inactive
,
122 sclp_activation_state_activating
123 } sclp_activation_state
= sclp_activation_state_active
;
125 /* Internal state: is an init mask request pending? */
126 static volatile enum sclp_mask_state_t
{
127 sclp_mask_state_idle
,
128 sclp_mask_state_initializing
129 } sclp_mask_state
= sclp_mask_state_idle
;
131 /* Internal state: is the driver suspended? */
132 static enum sclp_suspend_state_t
{
133 sclp_suspend_state_running
,
134 sclp_suspend_state_suspended
,
135 } sclp_suspend_state
= sclp_suspend_state_running
;
137 /* Maximum retry counts */
138 #define SCLP_INIT_RETRY 3
139 #define SCLP_MASK_RETRY 3
141 /* Timeout intervals in seconds.*/
142 #define SCLP_BUSY_INTERVAL 10
143 #define SCLP_RETRY_INTERVAL 30
145 static void sclp_process_queue(void);
146 static void __sclp_make_read_req(void);
147 static int sclp_init_mask(int calculate
);
148 static int sclp_init(void);
150 /* Perform service call. Return 0 on success, non-zero otherwise. */
152 sclp_service_call(sclp_cmdw_t command
, void *sccb
)
154 int cc
= 4; /* Initialize for program check handling */
157 "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
163 : "+&d" (cc
) : "d" (command
), "a" (__pa(sccb
))
176 __sclp_queue_read_req(void)
178 if (sclp_reading_state
== sclp_reading_state_idle
) {
179 sclp_reading_state
= sclp_reading_state_reading
;
180 __sclp_make_read_req();
181 /* Add request to head of queue */
182 list_add(&sclp_read_req
.list
, &sclp_req_queue
);
186 /* Set up request retry timer. Called while sclp_lock is locked. */
188 __sclp_set_request_timer(unsigned long time
, void (*function
)(unsigned long),
191 del_timer(&sclp_request_timer
);
192 sclp_request_timer
.function
= function
;
193 sclp_request_timer
.data
= data
;
194 sclp_request_timer
.expires
= jiffies
+ time
;
195 add_timer(&sclp_request_timer
);
198 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
199 * force restart of running request. */
201 sclp_request_timeout(unsigned long data
)
205 spin_lock_irqsave(&sclp_lock
, flags
);
207 if (sclp_running_state
== sclp_running_state_running
) {
208 /* Break running state and queue NOP read event request
209 * to get a defined interface state. */
210 __sclp_queue_read_req();
211 sclp_running_state
= sclp_running_state_idle
;
214 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
215 sclp_request_timeout
, 0);
217 spin_unlock_irqrestore(&sclp_lock
, flags
);
218 sclp_process_queue();
222 * Returns the expire value in jiffies of the next pending request timeout,
223 * if any. Needs to be called with sclp_lock.
225 static unsigned long __sclp_req_queue_find_next_timeout(void)
227 unsigned long expires_next
= 0;
228 struct sclp_req
*req
;
230 list_for_each_entry(req
, &sclp_req_queue
, list
) {
231 if (!req
->queue_expires
)
234 (time_before(req
->queue_expires
, expires_next
)))
235 expires_next
= req
->queue_expires
;
241 * Returns expired request, if any, and removes it from the list.
243 static struct sclp_req
*__sclp_req_queue_remove_expired_req(void)
245 unsigned long flags
, now
;
246 struct sclp_req
*req
;
248 spin_lock_irqsave(&sclp_lock
, flags
);
250 /* Don't need list_for_each_safe because we break out after list_del */
251 list_for_each_entry(req
, &sclp_req_queue
, list
) {
252 if (!req
->queue_expires
)
254 if (time_before_eq(req
->queue_expires
, now
)) {
255 if (req
->status
== SCLP_REQ_QUEUED
) {
256 req
->status
= SCLP_REQ_QUEUED_TIMEOUT
;
257 list_del(&req
->list
);
264 spin_unlock_irqrestore(&sclp_lock
, flags
);
269 * Timeout handler for queued requests. Removes request from list and
270 * invokes callback. This timer can be set per request in situations where
271 * waiting too long would be harmful to the system, e.g. during SE reboot.
273 static void sclp_req_queue_timeout(unsigned long data
)
275 unsigned long flags
, expires_next
;
276 struct sclp_req
*req
;
279 req
= __sclp_req_queue_remove_expired_req();
280 if (req
&& req
->callback
)
281 req
->callback(req
, req
->callback_data
);
284 spin_lock_irqsave(&sclp_lock
, flags
);
285 expires_next
= __sclp_req_queue_find_next_timeout();
287 mod_timer(&sclp_queue_timer
, expires_next
);
288 spin_unlock_irqrestore(&sclp_lock
, flags
);
291 /* Try to start a request. Return zero if the request was successfully
292 * started or if it will be started at a later time. Return non-zero otherwise.
293 * Called while sclp_lock is locked. */
295 __sclp_start_request(struct sclp_req
*req
)
299 if (sclp_running_state
!= sclp_running_state_idle
)
301 del_timer(&sclp_request_timer
);
302 rc
= sclp_service_call(req
->command
, req
->sccb
);
306 /* Successfully started request */
307 req
->status
= SCLP_REQ_RUNNING
;
308 sclp_running_state
= sclp_running_state_running
;
309 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
310 sclp_request_timeout
, 1);
312 } else if (rc
== -EBUSY
) {
313 /* Try again later */
314 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
315 sclp_request_timeout
, 0);
319 req
->status
= SCLP_REQ_FAILED
;
323 /* Try to start queued requests. */
325 sclp_process_queue(void)
327 struct sclp_req
*req
;
331 spin_lock_irqsave(&sclp_lock
, flags
);
332 if (sclp_running_state
!= sclp_running_state_idle
) {
333 spin_unlock_irqrestore(&sclp_lock
, flags
);
336 del_timer(&sclp_request_timer
);
337 while (!list_empty(&sclp_req_queue
)) {
338 req
= list_entry(sclp_req_queue
.next
, struct sclp_req
, list
);
341 rc
= __sclp_start_request(req
);
345 if (req
->start_count
> 1) {
346 /* Cannot abort already submitted request - could still
347 * be active at the SCLP */
348 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
349 sclp_request_timeout
, 0);
353 /* Post-processing for aborted request */
354 list_del(&req
->list
);
356 spin_unlock_irqrestore(&sclp_lock
, flags
);
357 req
->callback(req
, req
->callback_data
);
358 spin_lock_irqsave(&sclp_lock
, flags
);
361 spin_unlock_irqrestore(&sclp_lock
, flags
);
364 static int __sclp_can_add_request(struct sclp_req
*req
)
366 if (req
== &sclp_suspend_req
|| req
== &sclp_init_req
)
368 if (sclp_suspend_state
!= sclp_suspend_state_running
)
370 if (sclp_init_state
!= sclp_init_state_initialized
)
372 if (sclp_activation_state
!= sclp_activation_state_active
)
377 /* Queue a new request. Return zero on success, non-zero otherwise. */
379 sclp_add_request(struct sclp_req
*req
)
384 spin_lock_irqsave(&sclp_lock
, flags
);
385 if (!__sclp_can_add_request(req
)) {
386 spin_unlock_irqrestore(&sclp_lock
, flags
);
389 req
->status
= SCLP_REQ_QUEUED
;
390 req
->start_count
= 0;
391 list_add_tail(&req
->list
, &sclp_req_queue
);
393 if (req
->queue_timeout
) {
394 req
->queue_expires
= jiffies
+ req
->queue_timeout
* HZ
;
395 if (!timer_pending(&sclp_queue_timer
) ||
396 time_after(sclp_queue_timer
.expires
, req
->queue_expires
))
397 mod_timer(&sclp_queue_timer
, req
->queue_expires
);
399 req
->queue_expires
= 0;
400 /* Start if request is first in list */
401 if (sclp_running_state
== sclp_running_state_idle
&&
402 req
->list
.prev
== &sclp_req_queue
) {
404 list_del(&req
->list
);
408 rc
= __sclp_start_request(req
);
410 list_del(&req
->list
);
413 spin_unlock_irqrestore(&sclp_lock
, flags
);
417 EXPORT_SYMBOL(sclp_add_request
);
419 /* Dispatch events found in request buffer to registered listeners. Return 0
420 * if all events were dispatched, non-zero otherwise. */
422 sclp_dispatch_evbufs(struct sccb_header
*sccb
)
425 struct evbuf_header
*evbuf
;
427 struct sclp_register
*reg
;
431 spin_lock_irqsave(&sclp_lock
, flags
);
433 for (offset
= sizeof(struct sccb_header
); offset
< sccb
->length
;
434 offset
+= evbuf
->length
) {
435 evbuf
= (struct evbuf_header
*) ((addr_t
) sccb
+ offset
);
436 /* Check for malformed hardware response */
437 if (evbuf
->length
== 0)
439 /* Search for event handler */
441 list_for_each(l
, &sclp_reg_list
) {
442 reg
= list_entry(l
, struct sclp_register
, list
);
443 if (reg
->receive_mask
& (1 << (32 - evbuf
->type
)))
448 if (reg
&& reg
->receiver_fn
) {
449 spin_unlock_irqrestore(&sclp_lock
, flags
);
450 reg
->receiver_fn(evbuf
);
451 spin_lock_irqsave(&sclp_lock
, flags
);
452 } else if (reg
== NULL
)
455 spin_unlock_irqrestore(&sclp_lock
, flags
);
459 /* Read event data request callback. */
461 sclp_read_cb(struct sclp_req
*req
, void *data
)
464 struct sccb_header
*sccb
;
466 sccb
= (struct sccb_header
*) req
->sccb
;
467 if (req
->status
== SCLP_REQ_DONE
&& (sccb
->response_code
== 0x20 ||
468 sccb
->response_code
== 0x220))
469 sclp_dispatch_evbufs(sccb
);
470 spin_lock_irqsave(&sclp_lock
, flags
);
471 sclp_reading_state
= sclp_reading_state_idle
;
472 spin_unlock_irqrestore(&sclp_lock
, flags
);
475 /* Prepare read event data request. Called while sclp_lock is locked. */
476 static void __sclp_make_read_req(void)
478 struct sccb_header
*sccb
;
480 sccb
= (struct sccb_header
*) sclp_read_sccb
;
482 memset(&sclp_read_req
, 0, sizeof(struct sclp_req
));
483 sclp_read_req
.command
= SCLP_CMDW_READ_EVENT_DATA
;
484 sclp_read_req
.status
= SCLP_REQ_QUEUED
;
485 sclp_read_req
.start_count
= 0;
486 sclp_read_req
.callback
= sclp_read_cb
;
487 sclp_read_req
.sccb
= sccb
;
488 sccb
->length
= PAGE_SIZE
;
489 sccb
->function_code
= 0;
490 sccb
->control_mask
[2] = 0x80;
493 /* Search request list for request with matching sccb. Return request if found,
494 * NULL otherwise. Called while sclp_lock is locked. */
495 static inline struct sclp_req
*
496 __sclp_find_req(u32 sccb
)
499 struct sclp_req
*req
;
501 list_for_each(l
, &sclp_req_queue
) {
502 req
= list_entry(l
, struct sclp_req
, list
);
503 if (sccb
== (u32
) (addr_t
) req
->sccb
)
509 /* Handler for external interruption. Perform request post-processing.
510 * Prepare read event data request if necessary. Start processing of next
511 * request on queue. */
512 static void sclp_interrupt_handler(struct ext_code ext_code
,
513 unsigned int param32
, unsigned long param64
)
515 struct sclp_req
*req
;
519 inc_irq_stat(IRQEXT_SCP
);
520 spin_lock(&sclp_lock
);
521 finished_sccb
= param32
& 0xfffffff8;
522 evbuf_pending
= param32
& 0x3;
524 del_timer(&sclp_request_timer
);
525 sclp_running_state
= sclp_running_state_reset_pending
;
526 req
= __sclp_find_req(finished_sccb
);
528 /* Request post-processing */
529 list_del(&req
->list
);
530 req
->status
= SCLP_REQ_DONE
;
532 spin_unlock(&sclp_lock
);
533 req
->callback(req
, req
->callback_data
);
534 spin_lock(&sclp_lock
);
537 sclp_running_state
= sclp_running_state_idle
;
540 sclp_activation_state
== sclp_activation_state_active
)
541 __sclp_queue_read_req();
542 spin_unlock(&sclp_lock
);
543 sclp_process_queue();
546 /* Convert interval in jiffies to TOD ticks. */
548 sclp_tod_from_jiffies(unsigned long jiffies
)
550 return (u64
) (jiffies
/ HZ
) << 32;
553 /* Wait until a currently running request finished. Note: while this function
554 * is running, no timers are served on the calling CPU. */
558 unsigned long long old_tick
;
560 unsigned long cr0
, cr0_sync
;
564 /* We'll be disabling timer interrupts, so we need a custom timeout
567 if (timer_pending(&sclp_request_timer
)) {
568 /* Get timeout TOD value */
569 timeout
= get_tod_clock_fast() +
570 sclp_tod_from_jiffies(sclp_request_timer
.expires
-
573 local_irq_save(flags
);
574 /* Prevent bottom half from executing once we force interrupts open */
575 irq_context
= in_interrupt();
578 /* Enable service-signal interruption, disable timer interrupts */
579 old_tick
= local_tick_disable();
581 __ctl_store(cr0
, 0, 0);
582 cr0_sync
= cr0
& ~CR0_IRQ_SUBCLASS_MASK
;
583 cr0_sync
|= 1UL << (63 - 54);
584 __ctl_load(cr0_sync
, 0, 0);
585 __arch_local_irq_stosm(0x01);
586 /* Loop until driver state indicates finished request */
587 while (sclp_running_state
!= sclp_running_state_idle
) {
588 /* Check for expired request timer */
589 if (timer_pending(&sclp_request_timer
) &&
590 get_tod_clock_fast() > timeout
&&
591 del_timer(&sclp_request_timer
))
592 sclp_request_timer
.function(sclp_request_timer
.data
);
596 __ctl_load(cr0
, 0, 0);
599 local_tick_enable(old_tick
);
600 local_irq_restore(flags
);
602 EXPORT_SYMBOL(sclp_sync_wait
);
604 /* Dispatch changes in send and receive mask to registered listeners. */
606 sclp_dispatch_state_change(void)
609 struct sclp_register
*reg
;
611 sccb_mask_t receive_mask
;
612 sccb_mask_t send_mask
;
615 spin_lock_irqsave(&sclp_lock
, flags
);
617 list_for_each(l
, &sclp_reg_list
) {
618 reg
= list_entry(l
, struct sclp_register
, list
);
619 receive_mask
= reg
->send_mask
& sclp_receive_mask
;
620 send_mask
= reg
->receive_mask
& sclp_send_mask
;
621 if (reg
->sclp_receive_mask
!= receive_mask
||
622 reg
->sclp_send_mask
!= send_mask
) {
623 reg
->sclp_receive_mask
= receive_mask
;
624 reg
->sclp_send_mask
= send_mask
;
629 spin_unlock_irqrestore(&sclp_lock
, flags
);
630 if (reg
&& reg
->state_change_fn
)
631 reg
->state_change_fn(reg
);
635 struct sclp_statechangebuf
{
636 struct evbuf_header header
;
637 u8 validity_sclp_active_facility_mask
: 1;
638 u8 validity_sclp_receive_mask
: 1;
639 u8 validity_sclp_send_mask
: 1;
640 u8 validity_read_data_function_mask
: 1;
643 u64 sclp_active_facility_mask
;
644 sccb_mask_t sclp_receive_mask
;
645 sccb_mask_t sclp_send_mask
;
646 u32 read_data_function_mask
;
647 } __attribute__((packed
));
650 /* State change event callback. Inform listeners of changes. */
652 sclp_state_change_cb(struct evbuf_header
*evbuf
)
655 struct sclp_statechangebuf
*scbuf
;
657 scbuf
= (struct sclp_statechangebuf
*) evbuf
;
658 if (scbuf
->mask_length
!= sizeof(sccb_mask_t
))
660 spin_lock_irqsave(&sclp_lock
, flags
);
661 if (scbuf
->validity_sclp_receive_mask
)
662 sclp_receive_mask
= scbuf
->sclp_receive_mask
;
663 if (scbuf
->validity_sclp_send_mask
)
664 sclp_send_mask
= scbuf
->sclp_send_mask
;
665 spin_unlock_irqrestore(&sclp_lock
, flags
);
666 if (scbuf
->validity_sclp_active_facility_mask
)
667 sclp
.facilities
= scbuf
->sclp_active_facility_mask
;
668 sclp_dispatch_state_change();
671 static struct sclp_register sclp_state_change_event
= {
672 .receive_mask
= EVTYP_STATECHANGE_MASK
,
673 .receiver_fn
= sclp_state_change_cb
676 /* Calculate receive and send mask of currently registered listeners.
677 * Called while sclp_lock is locked. */
679 __sclp_get_mask(sccb_mask_t
*receive_mask
, sccb_mask_t
*send_mask
)
682 struct sclp_register
*t
;
686 list_for_each(l
, &sclp_reg_list
) {
687 t
= list_entry(l
, struct sclp_register
, list
);
688 *receive_mask
|= t
->receive_mask
;
689 *send_mask
|= t
->send_mask
;
693 /* Register event listener. Return 0 on success, non-zero otherwise. */
695 sclp_register(struct sclp_register
*reg
)
698 sccb_mask_t receive_mask
;
699 sccb_mask_t send_mask
;
705 spin_lock_irqsave(&sclp_lock
, flags
);
706 /* Check event mask for collisions */
707 __sclp_get_mask(&receive_mask
, &send_mask
);
708 if (reg
->receive_mask
& receive_mask
|| reg
->send_mask
& send_mask
) {
709 spin_unlock_irqrestore(&sclp_lock
, flags
);
712 /* Trigger initial state change callback */
713 reg
->sclp_receive_mask
= 0;
714 reg
->sclp_send_mask
= 0;
715 reg
->pm_event_posted
= 0;
716 list_add(®
->list
, &sclp_reg_list
);
717 spin_unlock_irqrestore(&sclp_lock
, flags
);
718 rc
= sclp_init_mask(1);
720 spin_lock_irqsave(&sclp_lock
, flags
);
721 list_del(®
->list
);
722 spin_unlock_irqrestore(&sclp_lock
, flags
);
727 EXPORT_SYMBOL(sclp_register
);
729 /* Unregister event listener. */
731 sclp_unregister(struct sclp_register
*reg
)
735 spin_lock_irqsave(&sclp_lock
, flags
);
736 list_del(®
->list
);
737 spin_unlock_irqrestore(&sclp_lock
, flags
);
741 EXPORT_SYMBOL(sclp_unregister
);
743 /* Remove event buffers which are marked processed. Return the number of
744 * remaining event buffers. */
746 sclp_remove_processed(struct sccb_header
*sccb
)
748 struct evbuf_header
*evbuf
;
752 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
754 remaining
= sccb
->length
- sizeof(struct sccb_header
);
755 while (remaining
> 0) {
756 remaining
-= evbuf
->length
;
757 if (evbuf
->flags
& 0x80) {
758 sccb
->length
-= evbuf
->length
;
759 memcpy(evbuf
, (void *) ((addr_t
) evbuf
+ evbuf
->length
),
763 evbuf
= (struct evbuf_header
*)
764 ((addr_t
) evbuf
+ evbuf
->length
);
770 EXPORT_SYMBOL(sclp_remove_processed
);
772 /* Prepare init mask request. Called while sclp_lock is locked. */
774 __sclp_make_init_req(u32 receive_mask
, u32 send_mask
)
776 struct init_sccb
*sccb
;
778 sccb
= (struct init_sccb
*) sclp_init_sccb
;
780 memset(&sclp_init_req
, 0, sizeof(struct sclp_req
));
781 sclp_init_req
.command
= SCLP_CMDW_WRITE_EVENT_MASK
;
782 sclp_init_req
.status
= SCLP_REQ_FILLED
;
783 sclp_init_req
.start_count
= 0;
784 sclp_init_req
.callback
= NULL
;
785 sclp_init_req
.callback_data
= NULL
;
786 sclp_init_req
.sccb
= sccb
;
787 sccb
->header
.length
= sizeof(struct init_sccb
);
788 sccb
->mask_length
= sizeof(sccb_mask_t
);
789 sccb
->receive_mask
= receive_mask
;
790 sccb
->send_mask
= send_mask
;
791 sccb
->sclp_receive_mask
= 0;
792 sccb
->sclp_send_mask
= 0;
795 /* Start init mask request. If calculate is non-zero, calculate the mask as
796 * requested by registered listeners. Use zero mask otherwise. Return 0 on
797 * success, non-zero otherwise. */
799 sclp_init_mask(int calculate
)
802 struct init_sccb
*sccb
= (struct init_sccb
*) sclp_init_sccb
;
803 sccb_mask_t receive_mask
;
804 sccb_mask_t send_mask
;
809 spin_lock_irqsave(&sclp_lock
, flags
);
810 /* Check if interface is in appropriate state */
811 if (sclp_mask_state
!= sclp_mask_state_idle
) {
812 spin_unlock_irqrestore(&sclp_lock
, flags
);
815 if (sclp_activation_state
== sclp_activation_state_inactive
) {
816 spin_unlock_irqrestore(&sclp_lock
, flags
);
819 sclp_mask_state
= sclp_mask_state_initializing
;
822 __sclp_get_mask(&receive_mask
, &send_mask
);
828 for (retry
= 0; retry
<= SCLP_MASK_RETRY
; retry
++) {
829 /* Prepare request */
830 __sclp_make_init_req(receive_mask
, send_mask
);
831 spin_unlock_irqrestore(&sclp_lock
, flags
);
832 if (sclp_add_request(&sclp_init_req
)) {
833 /* Try again later */
834 wait
= jiffies
+ SCLP_BUSY_INTERVAL
* HZ
;
835 while (time_before(jiffies
, wait
))
837 spin_lock_irqsave(&sclp_lock
, flags
);
840 while (sclp_init_req
.status
!= SCLP_REQ_DONE
&&
841 sclp_init_req
.status
!= SCLP_REQ_FAILED
)
843 spin_lock_irqsave(&sclp_lock
, flags
);
844 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
845 sccb
->header
.response_code
== 0x20) {
846 /* Successful request */
848 sclp_receive_mask
= sccb
->sclp_receive_mask
;
849 sclp_send_mask
= sccb
->sclp_send_mask
;
851 sclp_receive_mask
= 0;
854 spin_unlock_irqrestore(&sclp_lock
, flags
);
855 sclp_dispatch_state_change();
856 spin_lock_irqsave(&sclp_lock
, flags
);
861 sclp_mask_state
= sclp_mask_state_idle
;
862 spin_unlock_irqrestore(&sclp_lock
, flags
);
866 /* Deactivate SCLP interface. On success, new requests will be rejected,
867 * events will no longer be dispatched. Return 0 on success, non-zero
870 sclp_deactivate(void)
875 spin_lock_irqsave(&sclp_lock
, flags
);
876 /* Deactivate can only be called when active */
877 if (sclp_activation_state
!= sclp_activation_state_active
) {
878 spin_unlock_irqrestore(&sclp_lock
, flags
);
881 sclp_activation_state
= sclp_activation_state_deactivating
;
882 spin_unlock_irqrestore(&sclp_lock
, flags
);
883 rc
= sclp_init_mask(0);
884 spin_lock_irqsave(&sclp_lock
, flags
);
886 sclp_activation_state
= sclp_activation_state_inactive
;
888 sclp_activation_state
= sclp_activation_state_active
;
889 spin_unlock_irqrestore(&sclp_lock
, flags
);
893 EXPORT_SYMBOL(sclp_deactivate
);
895 /* Reactivate SCLP interface after sclp_deactivate. On success, new
896 * requests will be accepted, events will be dispatched again. Return 0 on
897 * success, non-zero otherwise. */
899 sclp_reactivate(void)
904 spin_lock_irqsave(&sclp_lock
, flags
);
905 /* Reactivate can only be called when inactive */
906 if (sclp_activation_state
!= sclp_activation_state_inactive
) {
907 spin_unlock_irqrestore(&sclp_lock
, flags
);
910 sclp_activation_state
= sclp_activation_state_activating
;
911 spin_unlock_irqrestore(&sclp_lock
, flags
);
912 rc
= sclp_init_mask(1);
913 spin_lock_irqsave(&sclp_lock
, flags
);
915 sclp_activation_state
= sclp_activation_state_active
;
917 sclp_activation_state
= sclp_activation_state_inactive
;
918 spin_unlock_irqrestore(&sclp_lock
, flags
);
922 EXPORT_SYMBOL(sclp_reactivate
);
924 /* Handler for external interruption used during initialization. Modify
925 * request state to done. */
926 static void sclp_check_handler(struct ext_code ext_code
,
927 unsigned int param32
, unsigned long param64
)
931 inc_irq_stat(IRQEXT_SCP
);
932 finished_sccb
= param32
& 0xfffffff8;
933 /* Is this the interrupt we are waiting for? */
934 if (finished_sccb
== 0)
936 if (finished_sccb
!= (u32
) (addr_t
) sclp_init_sccb
)
937 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
939 spin_lock(&sclp_lock
);
940 if (sclp_running_state
== sclp_running_state_running
) {
941 sclp_init_req
.status
= SCLP_REQ_DONE
;
942 sclp_running_state
= sclp_running_state_idle
;
944 spin_unlock(&sclp_lock
);
947 /* Initial init mask request timed out. Modify request state to failed. */
949 sclp_check_timeout(unsigned long data
)
953 spin_lock_irqsave(&sclp_lock
, flags
);
954 if (sclp_running_state
== sclp_running_state_running
) {
955 sclp_init_req
.status
= SCLP_REQ_FAILED
;
956 sclp_running_state
= sclp_running_state_idle
;
958 spin_unlock_irqrestore(&sclp_lock
, flags
);
961 /* Perform a check of the SCLP interface. Return zero if the interface is
962 * available and there are no pending requests from a previous instance.
963 * Return non-zero otherwise. */
965 sclp_check_interface(void)
967 struct init_sccb
*sccb
;
972 spin_lock_irqsave(&sclp_lock
, flags
);
973 /* Prepare init mask command */
974 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
976 spin_unlock_irqrestore(&sclp_lock
, flags
);
979 for (retry
= 0; retry
<= SCLP_INIT_RETRY
; retry
++) {
980 __sclp_make_init_req(0, 0);
981 sccb
= (struct init_sccb
*) sclp_init_req
.sccb
;
982 rc
= sclp_service_call(sclp_init_req
.command
, sccb
);
985 sclp_init_req
.status
= SCLP_REQ_RUNNING
;
986 sclp_running_state
= sclp_running_state_running
;
987 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
988 sclp_check_timeout
, 0);
989 spin_unlock_irqrestore(&sclp_lock
, flags
);
990 /* Enable service-signal interruption - needs to happen
991 * with IRQs enabled. */
992 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
993 /* Wait for signal from interrupt or timeout */
995 /* Disable service-signal interruption - needs to happen
996 * with IRQs enabled. */
997 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL
);
998 spin_lock_irqsave(&sclp_lock
, flags
);
999 del_timer(&sclp_request_timer
);
1000 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
1001 sccb
->header
.response_code
== 0x20) {
1007 unregister_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
1008 spin_unlock_irqrestore(&sclp_lock
, flags
);
1012 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
1013 * events from interfering with rebooted system. */
1015 sclp_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1021 static struct notifier_block sclp_reboot_notifier
= {
1022 .notifier_call
= sclp_reboot_event
1026 * Suspend/resume SCLP notifier implementation
1029 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event
, int rollback
)
1031 struct sclp_register
*reg
;
1032 unsigned long flags
;
1035 spin_lock_irqsave(&sclp_lock
, flags
);
1036 list_for_each_entry(reg
, &sclp_reg_list
, list
)
1037 reg
->pm_event_posted
= 0;
1038 spin_unlock_irqrestore(&sclp_lock
, flags
);
1041 spin_lock_irqsave(&sclp_lock
, flags
);
1042 list_for_each_entry(reg
, &sclp_reg_list
, list
) {
1043 if (rollback
&& reg
->pm_event_posted
)
1045 if (!rollback
&& !reg
->pm_event_posted
)
1048 spin_unlock_irqrestore(&sclp_lock
, flags
);
1051 spin_unlock_irqrestore(&sclp_lock
, flags
);
1052 if (reg
->pm_event_fn
)
1053 reg
->pm_event_fn(reg
, sclp_pm_event
);
1054 reg
->pm_event_posted
= rollback
? 0 : 1;
1059 * Susend/resume callbacks for platform device
1062 static int sclp_freeze(struct device
*dev
)
1064 unsigned long flags
;
1067 sclp_pm_event(SCLP_PM_EVENT_FREEZE
, 0);
1069 spin_lock_irqsave(&sclp_lock
, flags
);
1070 sclp_suspend_state
= sclp_suspend_state_suspended
;
1071 spin_unlock_irqrestore(&sclp_lock
, flags
);
1073 /* Init supend data */
1074 memset(&sclp_suspend_req
, 0, sizeof(sclp_suspend_req
));
1075 sclp_suspend_req
.callback
= sclp_suspend_req_cb
;
1076 sclp_suspend_req
.status
= SCLP_REQ_FILLED
;
1077 init_completion(&sclp_request_queue_flushed
);
1079 rc
= sclp_add_request(&sclp_suspend_req
);
1081 wait_for_completion(&sclp_request_queue_flushed
);
1082 else if (rc
!= -ENODATA
)
1085 rc
= sclp_deactivate();
1091 spin_lock_irqsave(&sclp_lock
, flags
);
1092 sclp_suspend_state
= sclp_suspend_state_running
;
1093 spin_unlock_irqrestore(&sclp_lock
, flags
);
1094 sclp_pm_event(SCLP_PM_EVENT_THAW
, 1);
1098 static int sclp_undo_suspend(enum sclp_pm_event event
)
1100 unsigned long flags
;
1103 rc
= sclp_reactivate();
1107 spin_lock_irqsave(&sclp_lock
, flags
);
1108 sclp_suspend_state
= sclp_suspend_state_running
;
1109 spin_unlock_irqrestore(&sclp_lock
, flags
);
1111 sclp_pm_event(event
, 0);
1115 static int sclp_thaw(struct device
*dev
)
1117 return sclp_undo_suspend(SCLP_PM_EVENT_THAW
);
1120 static int sclp_restore(struct device
*dev
)
1122 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE
);
1125 static const struct dev_pm_ops sclp_pm_ops
= {
1126 .freeze
= sclp_freeze
,
1128 .restore
= sclp_restore
,
1131 static ssize_t
sclp_show_console_pages(struct device_driver
*dev
, char *buf
)
1133 return sprintf(buf
, "%i\n", sclp_console_pages
);
1136 static DRIVER_ATTR(con_pages
, S_IRUSR
, sclp_show_console_pages
, NULL
);
1138 static ssize_t
sclp_show_con_drop(struct device_driver
*dev
, char *buf
)
1140 return sprintf(buf
, "%i\n", sclp_console_drop
);
1143 static DRIVER_ATTR(con_drop
, S_IRUSR
, sclp_show_con_drop
, NULL
);
1145 static ssize_t
sclp_show_console_full(struct device_driver
*dev
, char *buf
)
1147 return sprintf(buf
, "%lu\n", sclp_console_full
);
1150 static DRIVER_ATTR(con_full
, S_IRUSR
, sclp_show_console_full
, NULL
);
1152 static struct attribute
*sclp_drv_attrs
[] = {
1153 &driver_attr_con_pages
.attr
,
1154 &driver_attr_con_drop
.attr
,
1155 &driver_attr_con_full
.attr
,
1158 static struct attribute_group sclp_drv_attr_group
= {
1159 .attrs
= sclp_drv_attrs
,
1161 static const struct attribute_group
*sclp_drv_attr_groups
[] = {
1162 &sclp_drv_attr_group
,
1166 static struct platform_driver sclp_pdrv
= {
1170 .groups
= sclp_drv_attr_groups
,
1174 static struct platform_device
*sclp_pdev
;
1176 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1181 unsigned long flags
;
1184 spin_lock_irqsave(&sclp_lock
, flags
);
1185 /* Check for previous or running initialization */
1186 if (sclp_init_state
!= sclp_init_state_uninitialized
)
1188 sclp_init_state
= sclp_init_state_initializing
;
1189 /* Set up variables */
1190 INIT_LIST_HEAD(&sclp_req_queue
);
1191 INIT_LIST_HEAD(&sclp_reg_list
);
1192 list_add(&sclp_state_change_event
.list
, &sclp_reg_list
);
1193 init_timer(&sclp_request_timer
);
1194 init_timer(&sclp_queue_timer
);
1195 sclp_queue_timer
.function
= sclp_req_queue_timeout
;
1196 /* Check interface */
1197 spin_unlock_irqrestore(&sclp_lock
, flags
);
1198 rc
= sclp_check_interface();
1199 spin_lock_irqsave(&sclp_lock
, flags
);
1201 goto fail_init_state_uninitialized
;
1202 /* Register reboot handler */
1203 rc
= register_reboot_notifier(&sclp_reboot_notifier
);
1205 goto fail_init_state_uninitialized
;
1206 /* Register interrupt handler */
1207 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_interrupt_handler
);
1209 goto fail_unregister_reboot_notifier
;
1210 sclp_init_state
= sclp_init_state_initialized
;
1211 spin_unlock_irqrestore(&sclp_lock
, flags
);
1212 /* Enable service-signal external interruption - needs to happen with
1214 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
1218 fail_unregister_reboot_notifier
:
1219 unregister_reboot_notifier(&sclp_reboot_notifier
);
1220 fail_init_state_uninitialized
:
1221 sclp_init_state
= sclp_init_state_uninitialized
;
1223 spin_unlock_irqrestore(&sclp_lock
, flags
);
1228 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1229 * to print the panic message.
1231 static int sclp_panic_notify(struct notifier_block
*self
,
1232 unsigned long event
, void *data
)
1234 if (sclp_suspend_state
== sclp_suspend_state_suspended
)
1235 sclp_undo_suspend(SCLP_PM_EVENT_THAW
);
1239 static struct notifier_block sclp_on_panic_nb
= {
1240 .notifier_call
= sclp_panic_notify
,
1241 .priority
= SCLP_PANIC_PRIO
,
1244 static __init
int sclp_initcall(void)
1248 rc
= platform_driver_register(&sclp_pdrv
);
1252 sclp_pdev
= platform_device_register_simple("sclp", -1, NULL
, 0);
1253 rc
= PTR_ERR_OR_ZERO(sclp_pdev
);
1255 goto fail_platform_driver_unregister
;
1257 rc
= atomic_notifier_chain_register(&panic_notifier_list
,
1260 goto fail_platform_device_unregister
;
1264 fail_platform_device_unregister
:
1265 platform_device_unregister(sclp_pdev
);
1266 fail_platform_driver_unregister
:
1267 platform_driver_unregister(&sclp_pdrv
);
1271 arch_initcall(sclp_initcall
);