2 * drivers/s390/char/sclp.c
3 * core function to access sclp interface
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <asm/types.h>
20 #include <asm/s390_ext.h>
24 #define SCLP_HEADER "sclp: "
26 /* Structure for register_early_external_interrupt. */
27 static ext_int_info_t ext_int_info_hwc
;
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock
);
32 <<<<<<< HEAD
:drivers
/s390
/char/sclp
.c
33 /* Mask of events that we can receive from the sclp interface. */
35 /* Mask of events that we can send to the sclp interface. */
36 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/s390
/char/sclp
.c
37 static sccb_mask_t sclp_receive_mask
;
39 <<<<<<< HEAD
:drivers
/s390
/char/sclp
.c
40 /* Mask of events that we can send to the sclp interface. */
42 /* Mask of events that we can receive from the sclp interface. */
43 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/s390
/char/sclp
.c
44 static sccb_mask_t sclp_send_mask
;
46 /* List of registered event listeners and senders. */
47 static struct list_head sclp_reg_list
;
49 /* List of queued requests. */
50 static struct list_head sclp_req_queue
;
52 /* Data for read and and init requests. */
53 static struct sclp_req sclp_read_req
;
54 static struct sclp_req sclp_init_req
;
55 static char sclp_read_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
56 static char sclp_init_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
58 /* Timer for request retries. */
59 static struct timer_list sclp_request_timer
;
61 /* Internal state: is the driver initialized? */
62 static volatile enum sclp_init_state_t
{
63 sclp_init_state_uninitialized
,
64 sclp_init_state_initializing
,
65 sclp_init_state_initialized
66 } sclp_init_state
= sclp_init_state_uninitialized
;
68 /* Internal state: is a request active at the sclp? */
69 static volatile enum sclp_running_state_t
{
70 sclp_running_state_idle
,
71 sclp_running_state_running
,
72 sclp_running_state_reset_pending
73 } sclp_running_state
= sclp_running_state_idle
;
75 /* Internal state: is a read request pending? */
76 static volatile enum sclp_reading_state_t
{
77 sclp_reading_state_idle
,
78 sclp_reading_state_reading
79 } sclp_reading_state
= sclp_reading_state_idle
;
81 /* Internal state: is the driver currently serving requests? */
82 static volatile enum sclp_activation_state_t
{
83 sclp_activation_state_active
,
84 sclp_activation_state_deactivating
,
85 sclp_activation_state_inactive
,
86 sclp_activation_state_activating
87 } sclp_activation_state
= sclp_activation_state_active
;
89 /* Internal state: is an init mask request pending? */
90 static volatile enum sclp_mask_state_t
{
92 sclp_mask_state_initializing
93 } sclp_mask_state
= sclp_mask_state_idle
;
95 /* Maximum retry counts */
96 #define SCLP_INIT_RETRY 3
97 #define SCLP_MASK_RETRY 3
99 /* Timeout intervals in seconds.*/
100 #define SCLP_BUSY_INTERVAL 10
101 #define SCLP_RETRY_INTERVAL 30
103 static void sclp_process_queue(void);
104 static void __sclp_make_read_req(void);
105 static int sclp_init_mask(int calculate
);
106 static int sclp_init(void);
108 /* Perform service call. Return 0 on success, non-zero otherwise. */
110 sclp_service_call(sclp_cmdw_t command
, void *sccb
)
115 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
118 : "=&d" (cc
) : "d" (command
), "a" (__pa(sccb
))
129 __sclp_queue_read_req(void)
131 if (sclp_reading_state
== sclp_reading_state_idle
) {
132 sclp_reading_state
= sclp_reading_state_reading
;
133 __sclp_make_read_req();
134 /* Add request to head of queue */
135 list_add(&sclp_read_req
.list
, &sclp_req_queue
);
139 /* Set up request retry timer. Called while sclp_lock is locked. */
141 __sclp_set_request_timer(unsigned long time
, void (*function
)(unsigned long),
144 del_timer(&sclp_request_timer
);
145 sclp_request_timer
.function
= function
;
146 sclp_request_timer
.data
= data
;
147 sclp_request_timer
.expires
= jiffies
+ time
;
148 add_timer(&sclp_request_timer
);
151 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
152 * force restart of running request. */
154 sclp_request_timeout(unsigned long data
)
158 spin_lock_irqsave(&sclp_lock
, flags
);
160 if (sclp_running_state
== sclp_running_state_running
) {
161 /* Break running state and queue NOP read event request
162 * to get a defined interface state. */
163 __sclp_queue_read_req();
164 sclp_running_state
= sclp_running_state_idle
;
167 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
168 sclp_request_timeout
, 0);
170 spin_unlock_irqrestore(&sclp_lock
, flags
);
171 sclp_process_queue();
174 /* Try to start a request. Return zero if the request was successfully
175 * started or if it will be started at a later time. Return non-zero otherwise.
176 * Called while sclp_lock is locked. */
178 __sclp_start_request(struct sclp_req
*req
)
182 if (sclp_running_state
!= sclp_running_state_idle
)
184 del_timer(&sclp_request_timer
);
185 rc
= sclp_service_call(req
->command
, req
->sccb
);
189 /* Sucessfully started request */
190 req
->status
= SCLP_REQ_RUNNING
;
191 sclp_running_state
= sclp_running_state_running
;
192 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
193 sclp_request_timeout
, 1);
195 } else if (rc
== -EBUSY
) {
196 /* Try again later */
197 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
198 sclp_request_timeout
, 0);
202 req
->status
= SCLP_REQ_FAILED
;
206 /* Try to start queued requests. */
208 sclp_process_queue(void)
210 struct sclp_req
*req
;
214 spin_lock_irqsave(&sclp_lock
, flags
);
215 if (sclp_running_state
!= sclp_running_state_idle
) {
216 spin_unlock_irqrestore(&sclp_lock
, flags
);
219 del_timer(&sclp_request_timer
);
220 while (!list_empty(&sclp_req_queue
)) {
221 req
= list_entry(sclp_req_queue
.next
, struct sclp_req
, list
);
222 rc
= __sclp_start_request(req
);
226 if (req
->start_count
> 1) {
227 /* Cannot abort already submitted request - could still
228 * be active at the SCLP */
229 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
230 sclp_request_timeout
, 0);
233 /* Post-processing for aborted request */
234 list_del(&req
->list
);
236 spin_unlock_irqrestore(&sclp_lock
, flags
);
237 req
->callback(req
, req
->callback_data
);
238 spin_lock_irqsave(&sclp_lock
, flags
);
241 spin_unlock_irqrestore(&sclp_lock
, flags
);
244 /* Queue a new request. Return zero on success, non-zero otherwise. */
246 sclp_add_request(struct sclp_req
*req
)
251 spin_lock_irqsave(&sclp_lock
, flags
);
252 if ((sclp_init_state
!= sclp_init_state_initialized
||
253 sclp_activation_state
!= sclp_activation_state_active
) &&
254 req
!= &sclp_init_req
) {
255 spin_unlock_irqrestore(&sclp_lock
, flags
);
258 req
->status
= SCLP_REQ_QUEUED
;
259 req
->start_count
= 0;
260 list_add_tail(&req
->list
, &sclp_req_queue
);
262 /* Start if request is first in list */
263 if (sclp_running_state
== sclp_running_state_idle
&&
264 req
->list
.prev
== &sclp_req_queue
) {
265 rc
= __sclp_start_request(req
);
267 list_del(&req
->list
);
269 spin_unlock_irqrestore(&sclp_lock
, flags
);
273 EXPORT_SYMBOL(sclp_add_request
);
275 /* Dispatch events found in request buffer to registered listeners. Return 0
276 * if all events were dispatched, non-zero otherwise. */
278 sclp_dispatch_evbufs(struct sccb_header
*sccb
)
281 struct evbuf_header
*evbuf
;
283 struct sclp_register
*reg
;
287 spin_lock_irqsave(&sclp_lock
, flags
);
289 for (offset
= sizeof(struct sccb_header
); offset
< sccb
->length
;
290 offset
+= evbuf
->length
) {
291 /* Search for event handler */
292 evbuf
= (struct evbuf_header
*) ((addr_t
) sccb
+ offset
);
294 list_for_each(l
, &sclp_reg_list
) {
295 reg
= list_entry(l
, struct sclp_register
, list
);
296 if (reg
->receive_mask
& (1 << (32 - evbuf
->type
)))
301 if (reg
&& reg
->receiver_fn
) {
302 spin_unlock_irqrestore(&sclp_lock
, flags
);
303 reg
->receiver_fn(evbuf
);
304 spin_lock_irqsave(&sclp_lock
, flags
);
305 } else if (reg
== NULL
)
308 spin_unlock_irqrestore(&sclp_lock
, flags
);
312 /* Read event data request callback. */
314 sclp_read_cb(struct sclp_req
*req
, void *data
)
317 struct sccb_header
*sccb
;
319 sccb
= (struct sccb_header
*) req
->sccb
;
320 if (req
->status
== SCLP_REQ_DONE
&& (sccb
->response_code
== 0x20 ||
321 sccb
->response_code
== 0x220))
322 sclp_dispatch_evbufs(sccb
);
323 spin_lock_irqsave(&sclp_lock
, flags
);
324 sclp_reading_state
= sclp_reading_state_idle
;
325 spin_unlock_irqrestore(&sclp_lock
, flags
);
328 /* Prepare read event data request. Called while sclp_lock is locked. */
329 static void __sclp_make_read_req(void)
331 struct sccb_header
*sccb
;
333 sccb
= (struct sccb_header
*) sclp_read_sccb
;
335 memset(&sclp_read_req
, 0, sizeof(struct sclp_req
));
336 sclp_read_req
.command
= SCLP_CMDW_READ_EVENT_DATA
;
337 sclp_read_req
.status
= SCLP_REQ_QUEUED
;
338 sclp_read_req
.start_count
= 0;
339 sclp_read_req
.callback
= sclp_read_cb
;
340 sclp_read_req
.sccb
= sccb
;
341 sccb
->length
= PAGE_SIZE
;
342 sccb
->function_code
= 0;
343 sccb
->control_mask
[2] = 0x80;
346 /* Search request list for request with matching sccb. Return request if found,
347 * NULL otherwise. Called while sclp_lock is locked. */
348 static inline struct sclp_req
*
349 __sclp_find_req(u32 sccb
)
352 struct sclp_req
*req
;
354 list_for_each(l
, &sclp_req_queue
) {
355 req
= list_entry(l
, struct sclp_req
, list
);
356 if (sccb
== (u32
) (addr_t
) req
->sccb
)
362 /* Handler for external interruption. Perform request post-processing.
363 * Prepare read event data request if necessary. Start processing of next
364 * request on queue. */
366 sclp_interrupt_handler(__u16 code
)
368 struct sclp_req
*req
;
372 spin_lock(&sclp_lock
);
373 finished_sccb
= S390_lowcore
.ext_params
& 0xfffffff8;
374 evbuf_pending
= S390_lowcore
.ext_params
& 0x3;
376 del_timer(&sclp_request_timer
);
377 sclp_running_state
= sclp_running_state_reset_pending
;
378 req
= __sclp_find_req(finished_sccb
);
380 /* Request post-processing */
381 list_del(&req
->list
);
382 req
->status
= SCLP_REQ_DONE
;
384 spin_unlock(&sclp_lock
);
385 req
->callback(req
, req
->callback_data
);
386 spin_lock(&sclp_lock
);
389 sclp_running_state
= sclp_running_state_idle
;
391 <<<<<<< HEAD
:drivers
/s390
/char/sclp
.c
392 if (evbuf_pending
&& sclp_receive_mask
!= 0 &&
395 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/s390
/char/sclp
.c
396 sclp_activation_state
== sclp_activation_state_active
)
397 __sclp_queue_read_req();
398 spin_unlock(&sclp_lock
);
399 sclp_process_queue();
402 /* Convert interval in jiffies to TOD ticks. */
404 sclp_tod_from_jiffies(unsigned long jiffies
)
406 return (u64
) (jiffies
/ HZ
) << 32;
409 /* Wait until a currently running request finished. Note: while this function
410 * is running, no timers are served on the calling CPU. */
415 unsigned long cr0
, cr0_sync
;
419 /* We'll be disabling timer interrupts, so we need a custom timeout
422 if (timer_pending(&sclp_request_timer
)) {
423 /* Get timeout TOD value */
424 timeout
= get_clock() +
425 sclp_tod_from_jiffies(sclp_request_timer
.expires
-
428 local_irq_save(flags
);
429 /* Prevent bottom half from executing once we force interrupts open */
430 irq_context
= in_interrupt();
433 /* Enable service-signal interruption, disable timer interrupts */
435 __ctl_store(cr0
, 0, 0);
437 cr0_sync
|= 0x00000200;
438 cr0_sync
&= 0xFFFFF3AC;
439 __ctl_load(cr0_sync
, 0, 0);
440 __raw_local_irq_stosm(0x01);
441 /* Loop until driver state indicates finished request */
442 while (sclp_running_state
!= sclp_running_state_idle
) {
443 /* Check for expired request timer */
444 if (timer_pending(&sclp_request_timer
) &&
445 get_clock() > timeout
&&
446 del_timer(&sclp_request_timer
))
447 sclp_request_timer
.function(sclp_request_timer
.data
);
451 __ctl_load(cr0
, 0, 0);
454 local_irq_restore(flags
);
457 EXPORT_SYMBOL(sclp_sync_wait
);
459 /* Dispatch changes in send and receive mask to registered listeners. */
461 sclp_dispatch_state_change(void)
464 struct sclp_register
*reg
;
466 sccb_mask_t receive_mask
;
467 sccb_mask_t send_mask
;
470 spin_lock_irqsave(&sclp_lock
, flags
);
472 list_for_each(l
, &sclp_reg_list
) {
473 reg
= list_entry(l
, struct sclp_register
, list
);
474 <<<<<<< HEAD
:drivers
/s390
/char/sclp
.c
475 receive_mask
= reg
->receive_mask
& sclp_receive_mask
;
476 send_mask
= reg
->send_mask
& sclp_send_mask
;
478 receive_mask
= reg
->send_mask
& sclp_receive_mask
;
479 send_mask
= reg
->receive_mask
& sclp_send_mask
;
480 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/s390
/char/sclp
.c
481 if (reg
->sclp_receive_mask
!= receive_mask
||
482 reg
->sclp_send_mask
!= send_mask
) {
483 reg
->sclp_receive_mask
= receive_mask
;
484 reg
->sclp_send_mask
= send_mask
;
489 spin_unlock_irqrestore(&sclp_lock
, flags
);
490 if (reg
&& reg
->state_change_fn
)
491 reg
->state_change_fn(reg
);
495 struct sclp_statechangebuf
{
496 struct evbuf_header header
;
497 u8 validity_sclp_active_facility_mask
: 1;
498 u8 validity_sclp_receive_mask
: 1;
499 u8 validity_sclp_send_mask
: 1;
500 u8 validity_read_data_function_mask
: 1;
503 u64 sclp_active_facility_mask
;
504 sccb_mask_t sclp_receive_mask
;
505 sccb_mask_t sclp_send_mask
;
506 u32 read_data_function_mask
;
507 } __attribute__((packed
));
510 /* State change event callback. Inform listeners of changes. */
512 sclp_state_change_cb(struct evbuf_header
*evbuf
)
515 struct sclp_statechangebuf
*scbuf
;
517 scbuf
= (struct sclp_statechangebuf
*) evbuf
;
518 if (scbuf
->mask_length
!= sizeof(sccb_mask_t
))
520 spin_lock_irqsave(&sclp_lock
, flags
);
521 if (scbuf
->validity_sclp_receive_mask
)
522 sclp_receive_mask
= scbuf
->sclp_receive_mask
;
523 if (scbuf
->validity_sclp_send_mask
)
524 sclp_send_mask
= scbuf
->sclp_send_mask
;
525 spin_unlock_irqrestore(&sclp_lock
, flags
);
526 sclp_dispatch_state_change();
529 static struct sclp_register sclp_state_change_event
= {
530 .receive_mask
= EVTYP_STATECHANGE_MASK
,
531 .receiver_fn
= sclp_state_change_cb
534 /* Calculate receive and send mask of currently registered listeners.
535 * Called while sclp_lock is locked. */
537 __sclp_get_mask(sccb_mask_t
*receive_mask
, sccb_mask_t
*send_mask
)
540 struct sclp_register
*t
;
544 list_for_each(l
, &sclp_reg_list
) {
545 t
= list_entry(l
, struct sclp_register
, list
);
546 *receive_mask
|= t
->receive_mask
;
547 *send_mask
|= t
->send_mask
;
551 /* Register event listener. Return 0 on success, non-zero otherwise. */
553 sclp_register(struct sclp_register
*reg
)
556 sccb_mask_t receive_mask
;
557 sccb_mask_t send_mask
;
563 spin_lock_irqsave(&sclp_lock
, flags
);
564 /* Check event mask for collisions */
565 __sclp_get_mask(&receive_mask
, &send_mask
);
566 if (reg
->receive_mask
& receive_mask
|| reg
->send_mask
& send_mask
) {
567 spin_unlock_irqrestore(&sclp_lock
, flags
);
570 /* Trigger initial state change callback */
571 reg
->sclp_receive_mask
= 0;
572 reg
->sclp_send_mask
= 0;
573 list_add(®
->list
, &sclp_reg_list
);
574 spin_unlock_irqrestore(&sclp_lock
, flags
);
575 rc
= sclp_init_mask(1);
577 spin_lock_irqsave(&sclp_lock
, flags
);
578 list_del(®
->list
);
579 spin_unlock_irqrestore(&sclp_lock
, flags
);
584 EXPORT_SYMBOL(sclp_register
);
586 /* Unregister event listener. */
588 sclp_unregister(struct sclp_register
*reg
)
592 spin_lock_irqsave(&sclp_lock
, flags
);
593 list_del(®
->list
);
594 spin_unlock_irqrestore(&sclp_lock
, flags
);
598 EXPORT_SYMBOL(sclp_unregister
);
600 /* Remove event buffers which are marked processed. Return the number of
601 * remaining event buffers. */
603 sclp_remove_processed(struct sccb_header
*sccb
)
605 struct evbuf_header
*evbuf
;
609 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
611 remaining
= sccb
->length
- sizeof(struct sccb_header
);
612 while (remaining
> 0) {
613 remaining
-= evbuf
->length
;
614 if (evbuf
->flags
& 0x80) {
615 sccb
->length
-= evbuf
->length
;
616 memcpy(evbuf
, (void *) ((addr_t
) evbuf
+ evbuf
->length
),
620 evbuf
= (struct evbuf_header
*)
621 ((addr_t
) evbuf
+ evbuf
->length
);
627 EXPORT_SYMBOL(sclp_remove_processed
);
630 struct sccb_header header
;
633 sccb_mask_t receive_mask
;
634 sccb_mask_t send_mask
;
635 <<<<<<< HEAD
:drivers
/s390
/char/sclp
.c
636 sccb_mask_t sclp_send_mask
;
638 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/s390
/char/sclp
.c
639 sccb_mask_t sclp_receive_mask
;
640 <<<<<<< HEAD
:drivers
/s390
/char/sclp
.c
642 sccb_mask_t sclp_send_mask
;
643 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/s390
/char/sclp
.c
644 } __attribute__((packed
));
646 /* Prepare init mask request. Called while sclp_lock is locked. */
648 __sclp_make_init_req(u32 receive_mask
, u32 send_mask
)
650 struct init_sccb
*sccb
;
652 sccb
= (struct init_sccb
*) sclp_init_sccb
;
654 memset(&sclp_init_req
, 0, sizeof(struct sclp_req
));
655 sclp_init_req
.command
= SCLP_CMDW_WRITE_EVENT_MASK
;
656 sclp_init_req
.status
= SCLP_REQ_FILLED
;
657 sclp_init_req
.start_count
= 0;
658 sclp_init_req
.callback
= NULL
;
659 sclp_init_req
.callback_data
= NULL
;
660 sclp_init_req
.sccb
= sccb
;
661 sccb
->header
.length
= sizeof(struct init_sccb
);
662 sccb
->mask_length
= sizeof(sccb_mask_t
);
663 sccb
->receive_mask
= receive_mask
;
664 sccb
->send_mask
= send_mask
;
665 sccb
->sclp_receive_mask
= 0;
666 sccb
->sclp_send_mask
= 0;
669 /* Start init mask request. If calculate is non-zero, calculate the mask as
670 * requested by registered listeners. Use zero mask otherwise. Return 0 on
671 * success, non-zero otherwise. */
673 sclp_init_mask(int calculate
)
676 struct init_sccb
*sccb
= (struct init_sccb
*) sclp_init_sccb
;
677 sccb_mask_t receive_mask
;
678 sccb_mask_t send_mask
;
683 spin_lock_irqsave(&sclp_lock
, flags
);
684 /* Check if interface is in appropriate state */
685 if (sclp_mask_state
!= sclp_mask_state_idle
) {
686 spin_unlock_irqrestore(&sclp_lock
, flags
);
689 if (sclp_activation_state
== sclp_activation_state_inactive
) {
690 spin_unlock_irqrestore(&sclp_lock
, flags
);
693 sclp_mask_state
= sclp_mask_state_initializing
;
696 __sclp_get_mask(&receive_mask
, &send_mask
);
702 for (retry
= 0; retry
<= SCLP_MASK_RETRY
; retry
++) {
703 /* Prepare request */
704 __sclp_make_init_req(receive_mask
, send_mask
);
705 spin_unlock_irqrestore(&sclp_lock
, flags
);
706 if (sclp_add_request(&sclp_init_req
)) {
707 /* Try again later */
708 wait
= jiffies
+ SCLP_BUSY_INTERVAL
* HZ
;
709 while (time_before(jiffies
, wait
))
711 spin_lock_irqsave(&sclp_lock
, flags
);
714 while (sclp_init_req
.status
!= SCLP_REQ_DONE
&&
715 sclp_init_req
.status
!= SCLP_REQ_FAILED
)
717 spin_lock_irqsave(&sclp_lock
, flags
);
718 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
719 sccb
->header
.response_code
== 0x20) {
720 /* Successful request */
722 sclp_receive_mask
= sccb
->sclp_receive_mask
;
723 sclp_send_mask
= sccb
->sclp_send_mask
;
725 sclp_receive_mask
= 0;
728 spin_unlock_irqrestore(&sclp_lock
, flags
);
729 sclp_dispatch_state_change();
730 spin_lock_irqsave(&sclp_lock
, flags
);
735 sclp_mask_state
= sclp_mask_state_idle
;
736 spin_unlock_irqrestore(&sclp_lock
, flags
);
740 /* Deactivate SCLP interface. On success, new requests will be rejected,
741 * events will no longer be dispatched. Return 0 on success, non-zero
744 sclp_deactivate(void)
749 spin_lock_irqsave(&sclp_lock
, flags
);
750 /* Deactivate can only be called when active */
751 if (sclp_activation_state
!= sclp_activation_state_active
) {
752 spin_unlock_irqrestore(&sclp_lock
, flags
);
755 sclp_activation_state
= sclp_activation_state_deactivating
;
756 spin_unlock_irqrestore(&sclp_lock
, flags
);
757 rc
= sclp_init_mask(0);
758 spin_lock_irqsave(&sclp_lock
, flags
);
760 sclp_activation_state
= sclp_activation_state_inactive
;
762 sclp_activation_state
= sclp_activation_state_active
;
763 spin_unlock_irqrestore(&sclp_lock
, flags
);
767 EXPORT_SYMBOL(sclp_deactivate
);
769 /* Reactivate SCLP interface after sclp_deactivate. On success, new
770 * requests will be accepted, events will be dispatched again. Return 0 on
771 * success, non-zero otherwise. */
773 sclp_reactivate(void)
778 spin_lock_irqsave(&sclp_lock
, flags
);
779 /* Reactivate can only be called when inactive */
780 if (sclp_activation_state
!= sclp_activation_state_inactive
) {
781 spin_unlock_irqrestore(&sclp_lock
, flags
);
784 sclp_activation_state
= sclp_activation_state_activating
;
785 spin_unlock_irqrestore(&sclp_lock
, flags
);
786 rc
= sclp_init_mask(1);
787 spin_lock_irqsave(&sclp_lock
, flags
);
789 sclp_activation_state
= sclp_activation_state_active
;
791 sclp_activation_state
= sclp_activation_state_inactive
;
792 spin_unlock_irqrestore(&sclp_lock
, flags
);
796 EXPORT_SYMBOL(sclp_reactivate
);
798 /* Handler for external interruption used during initialization. Modify
799 * request state to done. */
801 sclp_check_handler(__u16 code
)
805 finished_sccb
= S390_lowcore
.ext_params
& 0xfffffff8;
806 /* Is this the interrupt we are waiting for? */
807 if (finished_sccb
== 0)
809 if (finished_sccb
!= (u32
) (addr_t
) sclp_init_sccb
) {
810 printk(KERN_WARNING SCLP_HEADER
"unsolicited interrupt "
811 "for buffer at 0x%x\n", finished_sccb
);
814 spin_lock(&sclp_lock
);
815 if (sclp_running_state
== sclp_running_state_running
) {
816 sclp_init_req
.status
= SCLP_REQ_DONE
;
817 sclp_running_state
= sclp_running_state_idle
;
819 spin_unlock(&sclp_lock
);
822 /* Initial init mask request timed out. Modify request state to failed. */
824 sclp_check_timeout(unsigned long data
)
828 spin_lock_irqsave(&sclp_lock
, flags
);
829 if (sclp_running_state
== sclp_running_state_running
) {
830 sclp_init_req
.status
= SCLP_REQ_FAILED
;
831 sclp_running_state
= sclp_running_state_idle
;
833 spin_unlock_irqrestore(&sclp_lock
, flags
);
836 /* Perform a check of the SCLP interface. Return zero if the interface is
837 * available and there are no pending requests from a previous instance.
838 * Return non-zero otherwise. */
840 sclp_check_interface(void)
842 struct init_sccb
*sccb
;
847 spin_lock_irqsave(&sclp_lock
, flags
);
848 /* Prepare init mask command */
849 rc
= register_early_external_interrupt(0x2401, sclp_check_handler
,
852 spin_unlock_irqrestore(&sclp_lock
, flags
);
855 for (retry
= 0; retry
<= SCLP_INIT_RETRY
; retry
++) {
856 __sclp_make_init_req(0, 0);
857 sccb
= (struct init_sccb
*) sclp_init_req
.sccb
;
858 rc
= sclp_service_call(sclp_init_req
.command
, sccb
);
861 sclp_init_req
.status
= SCLP_REQ_RUNNING
;
862 sclp_running_state
= sclp_running_state_running
;
863 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
864 sclp_check_timeout
, 0);
865 spin_unlock_irqrestore(&sclp_lock
, flags
);
866 /* Enable service-signal interruption - needs to happen
867 * with IRQs enabled. */
869 /* Wait for signal from interrupt or timeout */
871 /* Disable service-signal interruption - needs to happen
872 * with IRQs enabled. */
874 spin_lock_irqsave(&sclp_lock
, flags
);
875 del_timer(&sclp_request_timer
);
876 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
877 sccb
->header
.response_code
== 0x20) {
883 unregister_early_external_interrupt(0x2401, sclp_check_handler
,
885 spin_unlock_irqrestore(&sclp_lock
, flags
);
889 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
890 * events from interfering with rebooted system. */
892 sclp_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
898 static struct notifier_block sclp_reboot_notifier
= {
899 .notifier_call
= sclp_reboot_event
902 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
910 if (!MACHINE_HAS_SCLP
)
912 spin_lock_irqsave(&sclp_lock
, flags
);
913 /* Check for previous or running initialization */
914 if (sclp_init_state
!= sclp_init_state_uninitialized
) {
915 spin_unlock_irqrestore(&sclp_lock
, flags
);
918 sclp_init_state
= sclp_init_state_initializing
;
919 /* Set up variables */
920 INIT_LIST_HEAD(&sclp_req_queue
);
921 INIT_LIST_HEAD(&sclp_reg_list
);
922 list_add(&sclp_state_change_event
.list
, &sclp_reg_list
);
923 init_timer(&sclp_request_timer
);
924 /* Check interface */
925 spin_unlock_irqrestore(&sclp_lock
, flags
);
926 rc
= sclp_check_interface();
927 spin_lock_irqsave(&sclp_lock
, flags
);
929 sclp_init_state
= sclp_init_state_uninitialized
;
930 spin_unlock_irqrestore(&sclp_lock
, flags
);
933 /* Register reboot handler */
934 rc
= register_reboot_notifier(&sclp_reboot_notifier
);
936 sclp_init_state
= sclp_init_state_uninitialized
;
937 spin_unlock_irqrestore(&sclp_lock
, flags
);
940 /* Register interrupt handler */
941 rc
= register_early_external_interrupt(0x2401, sclp_interrupt_handler
,
944 unregister_reboot_notifier(&sclp_reboot_notifier
);
945 sclp_init_state
= sclp_init_state_uninitialized
;
946 spin_unlock_irqrestore(&sclp_lock
, flags
);
949 sclp_init_state
= sclp_init_state_initialized
;
950 spin_unlock_irqrestore(&sclp_lock
, flags
);
951 /* Enable service-signal external interruption - needs to happen with
958 static __init
int sclp_initcall(void)
963 arch_initcall(sclp_initcall
);