OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / s390 / char / sclp.c
blobeaa7e78186f969f0176a4c9f067688e1bf981bab
1 /*
2 * core function to access sclp interface
4 * Copyright IBM Corp. 1999, 2009
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
10 #include <linux/kernel_stat.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <linux/suspend.h>
20 #include <linux/completion.h>
21 #include <linux/platform_device.h>
22 #include <asm/types.h>
23 #include <asm/irq.h>
25 #include "sclp.h"
27 #define SCLP_HEADER "sclp: "
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock);
32 /* Mask of events that we can send to the sclp interface. */
33 static sccb_mask_t sclp_receive_mask;
35 /* Mask of events that we can receive from the sclp interface. */
36 static sccb_mask_t sclp_send_mask;
38 /* List of registered event listeners and senders. */
39 static struct list_head sclp_reg_list;
41 /* List of queued requests. */
42 static struct list_head sclp_req_queue;
44 /* Data for read and and init requests. */
45 static struct sclp_req sclp_read_req;
46 static struct sclp_req sclp_init_req;
47 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
50 /* Suspend request */
51 static DECLARE_COMPLETION(sclp_request_queue_flushed);
53 static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
55 complete(&sclp_request_queue_flushed);
58 static struct sclp_req sclp_suspend_req;
60 /* Timer for request retries. */
61 static struct timer_list sclp_request_timer;
63 /* Internal state: is the driver initialized? */
64 static volatile enum sclp_init_state_t {
65 sclp_init_state_uninitialized,
66 sclp_init_state_initializing,
67 sclp_init_state_initialized
68 } sclp_init_state = sclp_init_state_uninitialized;
70 /* Internal state: is a request active at the sclp? */
71 static volatile enum sclp_running_state_t {
72 sclp_running_state_idle,
73 sclp_running_state_running,
74 sclp_running_state_reset_pending
75 } sclp_running_state = sclp_running_state_idle;
77 /* Internal state: is a read request pending? */
78 static volatile enum sclp_reading_state_t {
79 sclp_reading_state_idle,
80 sclp_reading_state_reading
81 } sclp_reading_state = sclp_reading_state_idle;
83 /* Internal state: is the driver currently serving requests? */
84 static volatile enum sclp_activation_state_t {
85 sclp_activation_state_active,
86 sclp_activation_state_deactivating,
87 sclp_activation_state_inactive,
88 sclp_activation_state_activating
89 } sclp_activation_state = sclp_activation_state_active;
91 /* Internal state: is an init mask request pending? */
92 static volatile enum sclp_mask_state_t {
93 sclp_mask_state_idle,
94 sclp_mask_state_initializing
95 } sclp_mask_state = sclp_mask_state_idle;
97 /* Internal state: is the driver suspended? */
98 static enum sclp_suspend_state_t {
99 sclp_suspend_state_running,
100 sclp_suspend_state_suspended,
101 } sclp_suspend_state = sclp_suspend_state_running;
103 /* Maximum retry counts */
104 #define SCLP_INIT_RETRY 3
105 #define SCLP_MASK_RETRY 3
107 /* Timeout intervals in seconds.*/
108 #define SCLP_BUSY_INTERVAL 10
109 #define SCLP_RETRY_INTERVAL 30
111 static void sclp_process_queue(void);
112 static void __sclp_make_read_req(void);
113 static int sclp_init_mask(int calculate);
114 static int sclp_init(void);
116 /* Perform service call. Return 0 on success, non-zero otherwise. */
118 sclp_service_call(sclp_cmdw_t command, void *sccb)
120 int cc;
122 asm volatile(
123 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
124 " ipm %0\n"
125 " srl %0,28"
126 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
127 : "cc", "memory");
128 if (cc == 3)
129 return -EIO;
130 if (cc == 2)
131 return -EBUSY;
132 return 0;
136 static void
137 __sclp_queue_read_req(void)
139 if (sclp_reading_state == sclp_reading_state_idle) {
140 sclp_reading_state = sclp_reading_state_reading;
141 __sclp_make_read_req();
142 /* Add request to head of queue */
143 list_add(&sclp_read_req.list, &sclp_req_queue);
147 /* Set up request retry timer. Called while sclp_lock is locked. */
148 static inline void
149 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
150 unsigned long data)
152 del_timer(&sclp_request_timer);
153 sclp_request_timer.function = function;
154 sclp_request_timer.data = data;
155 sclp_request_timer.expires = jiffies + time;
156 add_timer(&sclp_request_timer);
159 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
160 * force restart of running request. */
161 static void
162 sclp_request_timeout(unsigned long data)
164 unsigned long flags;
166 spin_lock_irqsave(&sclp_lock, flags);
167 if (data) {
168 if (sclp_running_state == sclp_running_state_running) {
169 /* Break running state and queue NOP read event request
170 * to get a defined interface state. */
171 __sclp_queue_read_req();
172 sclp_running_state = sclp_running_state_idle;
174 } else {
175 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
176 sclp_request_timeout, 0);
178 spin_unlock_irqrestore(&sclp_lock, flags);
179 sclp_process_queue();
182 /* Try to start a request. Return zero if the request was successfully
183 * started or if it will be started at a later time. Return non-zero otherwise.
184 * Called while sclp_lock is locked. */
185 static int
186 __sclp_start_request(struct sclp_req *req)
188 int rc;
190 if (sclp_running_state != sclp_running_state_idle)
191 return 0;
192 del_timer(&sclp_request_timer);
193 rc = sclp_service_call(req->command, req->sccb);
194 req->start_count++;
196 if (rc == 0) {
197 /* Successfully started request */
198 req->status = SCLP_REQ_RUNNING;
199 sclp_running_state = sclp_running_state_running;
200 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
201 sclp_request_timeout, 1);
202 return 0;
203 } else if (rc == -EBUSY) {
204 /* Try again later */
205 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
206 sclp_request_timeout, 0);
207 return 0;
209 /* Request failed */
210 req->status = SCLP_REQ_FAILED;
211 return rc;
214 /* Try to start queued requests. */
215 static void
216 sclp_process_queue(void)
218 struct sclp_req *req;
219 int rc;
220 unsigned long flags;
222 spin_lock_irqsave(&sclp_lock, flags);
223 if (sclp_running_state != sclp_running_state_idle) {
224 spin_unlock_irqrestore(&sclp_lock, flags);
225 return;
227 del_timer(&sclp_request_timer);
228 while (!list_empty(&sclp_req_queue)) {
229 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
230 if (!req->sccb)
231 goto do_post;
232 rc = __sclp_start_request(req);
233 if (rc == 0)
234 break;
235 /* Request failed */
236 if (req->start_count > 1) {
237 /* Cannot abort already submitted request - could still
238 * be active at the SCLP */
239 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
240 sclp_request_timeout, 0);
241 break;
243 do_post:
244 /* Post-processing for aborted request */
245 list_del(&req->list);
246 if (req->callback) {
247 spin_unlock_irqrestore(&sclp_lock, flags);
248 req->callback(req, req->callback_data);
249 spin_lock_irqsave(&sclp_lock, flags);
252 spin_unlock_irqrestore(&sclp_lock, flags);
255 static int __sclp_can_add_request(struct sclp_req *req)
257 if (req == &sclp_suspend_req || req == &sclp_init_req)
258 return 1;
259 if (sclp_suspend_state != sclp_suspend_state_running)
260 return 0;
261 if (sclp_init_state != sclp_init_state_initialized)
262 return 0;
263 if (sclp_activation_state != sclp_activation_state_active)
264 return 0;
265 return 1;
268 /* Queue a new request. Return zero on success, non-zero otherwise. */
270 sclp_add_request(struct sclp_req *req)
272 unsigned long flags;
273 int rc;
275 spin_lock_irqsave(&sclp_lock, flags);
276 if (!__sclp_can_add_request(req)) {
277 spin_unlock_irqrestore(&sclp_lock, flags);
278 return -EIO;
280 req->status = SCLP_REQ_QUEUED;
281 req->start_count = 0;
282 list_add_tail(&req->list, &sclp_req_queue);
283 rc = 0;
284 /* Start if request is first in list */
285 if (sclp_running_state == sclp_running_state_idle &&
286 req->list.prev == &sclp_req_queue) {
287 if (!req->sccb) {
288 list_del(&req->list);
289 rc = -ENODATA;
290 goto out;
292 rc = __sclp_start_request(req);
293 if (rc)
294 list_del(&req->list);
296 out:
297 spin_unlock_irqrestore(&sclp_lock, flags);
298 return rc;
301 EXPORT_SYMBOL(sclp_add_request);
303 /* Dispatch events found in request buffer to registered listeners. Return 0
304 * if all events were dispatched, non-zero otherwise. */
305 static int
306 sclp_dispatch_evbufs(struct sccb_header *sccb)
308 unsigned long flags;
309 struct evbuf_header *evbuf;
310 struct list_head *l;
311 struct sclp_register *reg;
312 int offset;
313 int rc;
315 spin_lock_irqsave(&sclp_lock, flags);
316 rc = 0;
317 for (offset = sizeof(struct sccb_header); offset < sccb->length;
318 offset += evbuf->length) {
319 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
320 /* Check for malformed hardware response */
321 if (evbuf->length == 0)
322 break;
323 /* Search for event handler */
324 reg = NULL;
325 list_for_each(l, &sclp_reg_list) {
326 reg = list_entry(l, struct sclp_register, list);
327 if (reg->receive_mask & (1 << (32 - evbuf->type)))
328 break;
329 else
330 reg = NULL;
332 if (reg && reg->receiver_fn) {
333 spin_unlock_irqrestore(&sclp_lock, flags);
334 reg->receiver_fn(evbuf);
335 spin_lock_irqsave(&sclp_lock, flags);
336 } else if (reg == NULL)
337 rc = -ENOSYS;
339 spin_unlock_irqrestore(&sclp_lock, flags);
340 return rc;
343 /* Read event data request callback. */
344 static void
345 sclp_read_cb(struct sclp_req *req, void *data)
347 unsigned long flags;
348 struct sccb_header *sccb;
350 sccb = (struct sccb_header *) req->sccb;
351 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
352 sccb->response_code == 0x220))
353 sclp_dispatch_evbufs(sccb);
354 spin_lock_irqsave(&sclp_lock, flags);
355 sclp_reading_state = sclp_reading_state_idle;
356 spin_unlock_irqrestore(&sclp_lock, flags);
359 /* Prepare read event data request. Called while sclp_lock is locked. */
360 static void __sclp_make_read_req(void)
362 struct sccb_header *sccb;
364 sccb = (struct sccb_header *) sclp_read_sccb;
365 clear_page(sccb);
366 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
367 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
368 sclp_read_req.status = SCLP_REQ_QUEUED;
369 sclp_read_req.start_count = 0;
370 sclp_read_req.callback = sclp_read_cb;
371 sclp_read_req.sccb = sccb;
372 sccb->length = PAGE_SIZE;
373 sccb->function_code = 0;
374 sccb->control_mask[2] = 0x80;
377 /* Search request list for request with matching sccb. Return request if found,
378 * NULL otherwise. Called while sclp_lock is locked. */
379 static inline struct sclp_req *
380 __sclp_find_req(u32 sccb)
382 struct list_head *l;
383 struct sclp_req *req;
385 list_for_each(l, &sclp_req_queue) {
386 req = list_entry(l, struct sclp_req, list);
387 if (sccb == (u32) (addr_t) req->sccb)
388 return req;
390 return NULL;
393 /* Handler for external interruption. Perform request post-processing.
394 * Prepare read event data request if necessary. Start processing of next
395 * request on queue. */
396 static void sclp_interrupt_handler(unsigned int ext_int_code,
397 unsigned int param32, unsigned long param64)
399 struct sclp_req *req;
400 u32 finished_sccb;
401 u32 evbuf_pending;
403 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
404 spin_lock(&sclp_lock);
405 finished_sccb = param32 & 0xfffffff8;
406 evbuf_pending = param32 & 0x3;
407 if (finished_sccb) {
408 del_timer(&sclp_request_timer);
409 sclp_running_state = sclp_running_state_reset_pending;
410 req = __sclp_find_req(finished_sccb);
411 if (req) {
412 /* Request post-processing */
413 list_del(&req->list);
414 req->status = SCLP_REQ_DONE;
415 if (req->callback) {
416 spin_unlock(&sclp_lock);
417 req->callback(req, req->callback_data);
418 spin_lock(&sclp_lock);
421 sclp_running_state = sclp_running_state_idle;
423 if (evbuf_pending &&
424 sclp_activation_state == sclp_activation_state_active)
425 __sclp_queue_read_req();
426 spin_unlock(&sclp_lock);
427 sclp_process_queue();
430 /* Convert interval in jiffies to TOD ticks. */
431 static inline u64
432 sclp_tod_from_jiffies(unsigned long jiffies)
434 return (u64) (jiffies / HZ) << 32;
437 /* Wait until a currently running request finished. Note: while this function
438 * is running, no timers are served on the calling CPU. */
439 void
440 sclp_sync_wait(void)
442 unsigned long long old_tick;
443 unsigned long flags;
444 unsigned long cr0, cr0_sync;
445 u64 timeout;
446 int irq_context;
448 /* We'll be disabling timer interrupts, so we need a custom timeout
449 * mechanism */
450 timeout = 0;
451 if (timer_pending(&sclp_request_timer)) {
452 /* Get timeout TOD value */
453 timeout = get_clock() +
454 sclp_tod_from_jiffies(sclp_request_timer.expires -
455 jiffies);
457 local_irq_save(flags);
458 /* Prevent bottom half from executing once we force interrupts open */
459 irq_context = in_interrupt();
460 if (!irq_context)
461 local_bh_disable();
462 /* Enable service-signal interruption, disable timer interrupts */
463 old_tick = local_tick_disable();
464 trace_hardirqs_on();
465 __ctl_store(cr0, 0, 0);
466 cr0_sync = cr0;
467 cr0_sync &= 0xffff00a0;
468 cr0_sync |= 0x00000200;
469 __ctl_load(cr0_sync, 0, 0);
470 __arch_local_irq_stosm(0x01);
471 /* Loop until driver state indicates finished request */
472 while (sclp_running_state != sclp_running_state_idle) {
473 /* Check for expired request timer */
474 if (timer_pending(&sclp_request_timer) &&
475 get_clock() > timeout &&
476 del_timer(&sclp_request_timer))
477 sclp_request_timer.function(sclp_request_timer.data);
478 cpu_relax();
480 local_irq_disable();
481 __ctl_load(cr0, 0, 0);
482 if (!irq_context)
483 _local_bh_enable();
484 local_tick_enable(old_tick);
485 local_irq_restore(flags);
487 EXPORT_SYMBOL(sclp_sync_wait);
489 /* Dispatch changes in send and receive mask to registered listeners. */
490 static void
491 sclp_dispatch_state_change(void)
493 struct list_head *l;
494 struct sclp_register *reg;
495 unsigned long flags;
496 sccb_mask_t receive_mask;
497 sccb_mask_t send_mask;
499 do {
500 spin_lock_irqsave(&sclp_lock, flags);
501 reg = NULL;
502 list_for_each(l, &sclp_reg_list) {
503 reg = list_entry(l, struct sclp_register, list);
504 receive_mask = reg->send_mask & sclp_receive_mask;
505 send_mask = reg->receive_mask & sclp_send_mask;
506 if (reg->sclp_receive_mask != receive_mask ||
507 reg->sclp_send_mask != send_mask) {
508 reg->sclp_receive_mask = receive_mask;
509 reg->sclp_send_mask = send_mask;
510 break;
511 } else
512 reg = NULL;
514 spin_unlock_irqrestore(&sclp_lock, flags);
515 if (reg && reg->state_change_fn)
516 reg->state_change_fn(reg);
517 } while (reg);
520 struct sclp_statechangebuf {
521 struct evbuf_header header;
522 u8 validity_sclp_active_facility_mask : 1;
523 u8 validity_sclp_receive_mask : 1;
524 u8 validity_sclp_send_mask : 1;
525 u8 validity_read_data_function_mask : 1;
526 u16 _zeros : 12;
527 u16 mask_length;
528 u64 sclp_active_facility_mask;
529 sccb_mask_t sclp_receive_mask;
530 sccb_mask_t sclp_send_mask;
531 u32 read_data_function_mask;
532 } __attribute__((packed));
535 /* State change event callback. Inform listeners of changes. */
536 static void
537 sclp_state_change_cb(struct evbuf_header *evbuf)
539 unsigned long flags;
540 struct sclp_statechangebuf *scbuf;
542 scbuf = (struct sclp_statechangebuf *) evbuf;
543 if (scbuf->mask_length != sizeof(sccb_mask_t))
544 return;
545 spin_lock_irqsave(&sclp_lock, flags);
546 if (scbuf->validity_sclp_receive_mask)
547 sclp_receive_mask = scbuf->sclp_receive_mask;
548 if (scbuf->validity_sclp_send_mask)
549 sclp_send_mask = scbuf->sclp_send_mask;
550 spin_unlock_irqrestore(&sclp_lock, flags);
551 if (scbuf->validity_sclp_active_facility_mask)
552 sclp_facilities = scbuf->sclp_active_facility_mask;
553 sclp_dispatch_state_change();
556 static struct sclp_register sclp_state_change_event = {
557 .receive_mask = EVTYP_STATECHANGE_MASK,
558 .receiver_fn = sclp_state_change_cb
561 /* Calculate receive and send mask of currently registered listeners.
562 * Called while sclp_lock is locked. */
563 static inline void
564 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
566 struct list_head *l;
567 struct sclp_register *t;
569 *receive_mask = 0;
570 *send_mask = 0;
571 list_for_each(l, &sclp_reg_list) {
572 t = list_entry(l, struct sclp_register, list);
573 *receive_mask |= t->receive_mask;
574 *send_mask |= t->send_mask;
578 /* Register event listener. Return 0 on success, non-zero otherwise. */
580 sclp_register(struct sclp_register *reg)
582 unsigned long flags;
583 sccb_mask_t receive_mask;
584 sccb_mask_t send_mask;
585 int rc;
587 rc = sclp_init();
588 if (rc)
589 return rc;
590 spin_lock_irqsave(&sclp_lock, flags);
591 /* Check event mask for collisions */
592 __sclp_get_mask(&receive_mask, &send_mask);
593 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
594 spin_unlock_irqrestore(&sclp_lock, flags);
595 return -EBUSY;
597 /* Trigger initial state change callback */
598 reg->sclp_receive_mask = 0;
599 reg->sclp_send_mask = 0;
600 reg->pm_event_posted = 0;
601 list_add(&reg->list, &sclp_reg_list);
602 spin_unlock_irqrestore(&sclp_lock, flags);
603 rc = sclp_init_mask(1);
604 if (rc) {
605 spin_lock_irqsave(&sclp_lock, flags);
606 list_del(&reg->list);
607 spin_unlock_irqrestore(&sclp_lock, flags);
609 return rc;
612 EXPORT_SYMBOL(sclp_register);
614 /* Unregister event listener. */
615 void
616 sclp_unregister(struct sclp_register *reg)
618 unsigned long flags;
620 spin_lock_irqsave(&sclp_lock, flags);
621 list_del(&reg->list);
622 spin_unlock_irqrestore(&sclp_lock, flags);
623 sclp_init_mask(1);
626 EXPORT_SYMBOL(sclp_unregister);
628 /* Remove event buffers which are marked processed. Return the number of
629 * remaining event buffers. */
631 sclp_remove_processed(struct sccb_header *sccb)
633 struct evbuf_header *evbuf;
634 int unprocessed;
635 u16 remaining;
637 evbuf = (struct evbuf_header *) (sccb + 1);
638 unprocessed = 0;
639 remaining = sccb->length - sizeof(struct sccb_header);
640 while (remaining > 0) {
641 remaining -= evbuf->length;
642 if (evbuf->flags & 0x80) {
643 sccb->length -= evbuf->length;
644 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
645 remaining);
646 } else {
647 unprocessed++;
648 evbuf = (struct evbuf_header *)
649 ((addr_t) evbuf + evbuf->length);
652 return unprocessed;
655 EXPORT_SYMBOL(sclp_remove_processed);
657 struct init_sccb {
658 struct sccb_header header;
659 u16 _reserved;
660 u16 mask_length;
661 sccb_mask_t receive_mask;
662 sccb_mask_t send_mask;
663 sccb_mask_t sclp_receive_mask;
664 sccb_mask_t sclp_send_mask;
665 } __attribute__((packed));
667 /* Prepare init mask request. Called while sclp_lock is locked. */
668 static inline void
669 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
671 struct init_sccb *sccb;
673 sccb = (struct init_sccb *) sclp_init_sccb;
674 clear_page(sccb);
675 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
676 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
677 sclp_init_req.status = SCLP_REQ_FILLED;
678 sclp_init_req.start_count = 0;
679 sclp_init_req.callback = NULL;
680 sclp_init_req.callback_data = NULL;
681 sclp_init_req.sccb = sccb;
682 sccb->header.length = sizeof(struct init_sccb);
683 sccb->mask_length = sizeof(sccb_mask_t);
684 sccb->receive_mask = receive_mask;
685 sccb->send_mask = send_mask;
686 sccb->sclp_receive_mask = 0;
687 sccb->sclp_send_mask = 0;
690 /* Start init mask request. If calculate is non-zero, calculate the mask as
691 * requested by registered listeners. Use zero mask otherwise. Return 0 on
692 * success, non-zero otherwise. */
693 static int
694 sclp_init_mask(int calculate)
696 unsigned long flags;
697 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
698 sccb_mask_t receive_mask;
699 sccb_mask_t send_mask;
700 int retry;
701 int rc;
702 unsigned long wait;
704 spin_lock_irqsave(&sclp_lock, flags);
705 /* Check if interface is in appropriate state */
706 if (sclp_mask_state != sclp_mask_state_idle) {
707 spin_unlock_irqrestore(&sclp_lock, flags);
708 return -EBUSY;
710 if (sclp_activation_state == sclp_activation_state_inactive) {
711 spin_unlock_irqrestore(&sclp_lock, flags);
712 return -EINVAL;
714 sclp_mask_state = sclp_mask_state_initializing;
715 /* Determine mask */
716 if (calculate)
717 __sclp_get_mask(&receive_mask, &send_mask);
718 else {
719 receive_mask = 0;
720 send_mask = 0;
722 rc = -EIO;
723 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
724 /* Prepare request */
725 __sclp_make_init_req(receive_mask, send_mask);
726 spin_unlock_irqrestore(&sclp_lock, flags);
727 if (sclp_add_request(&sclp_init_req)) {
728 /* Try again later */
729 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
730 while (time_before(jiffies, wait))
731 sclp_sync_wait();
732 spin_lock_irqsave(&sclp_lock, flags);
733 continue;
735 while (sclp_init_req.status != SCLP_REQ_DONE &&
736 sclp_init_req.status != SCLP_REQ_FAILED)
737 sclp_sync_wait();
738 spin_lock_irqsave(&sclp_lock, flags);
739 if (sclp_init_req.status == SCLP_REQ_DONE &&
740 sccb->header.response_code == 0x20) {
741 /* Successful request */
742 if (calculate) {
743 sclp_receive_mask = sccb->sclp_receive_mask;
744 sclp_send_mask = sccb->sclp_send_mask;
745 } else {
746 sclp_receive_mask = 0;
747 sclp_send_mask = 0;
749 spin_unlock_irqrestore(&sclp_lock, flags);
750 sclp_dispatch_state_change();
751 spin_lock_irqsave(&sclp_lock, flags);
752 rc = 0;
753 break;
756 sclp_mask_state = sclp_mask_state_idle;
757 spin_unlock_irqrestore(&sclp_lock, flags);
758 return rc;
761 /* Deactivate SCLP interface. On success, new requests will be rejected,
762 * events will no longer be dispatched. Return 0 on success, non-zero
763 * otherwise. */
765 sclp_deactivate(void)
767 unsigned long flags;
768 int rc;
770 spin_lock_irqsave(&sclp_lock, flags);
771 /* Deactivate can only be called when active */
772 if (sclp_activation_state != sclp_activation_state_active) {
773 spin_unlock_irqrestore(&sclp_lock, flags);
774 return -EINVAL;
776 sclp_activation_state = sclp_activation_state_deactivating;
777 spin_unlock_irqrestore(&sclp_lock, flags);
778 rc = sclp_init_mask(0);
779 spin_lock_irqsave(&sclp_lock, flags);
780 if (rc == 0)
781 sclp_activation_state = sclp_activation_state_inactive;
782 else
783 sclp_activation_state = sclp_activation_state_active;
784 spin_unlock_irqrestore(&sclp_lock, flags);
785 return rc;
788 EXPORT_SYMBOL(sclp_deactivate);
790 /* Reactivate SCLP interface after sclp_deactivate. On success, new
791 * requests will be accepted, events will be dispatched again. Return 0 on
792 * success, non-zero otherwise. */
794 sclp_reactivate(void)
796 unsigned long flags;
797 int rc;
799 spin_lock_irqsave(&sclp_lock, flags);
800 /* Reactivate can only be called when inactive */
801 if (sclp_activation_state != sclp_activation_state_inactive) {
802 spin_unlock_irqrestore(&sclp_lock, flags);
803 return -EINVAL;
805 sclp_activation_state = sclp_activation_state_activating;
806 spin_unlock_irqrestore(&sclp_lock, flags);
807 rc = sclp_init_mask(1);
808 spin_lock_irqsave(&sclp_lock, flags);
809 if (rc == 0)
810 sclp_activation_state = sclp_activation_state_active;
811 else
812 sclp_activation_state = sclp_activation_state_inactive;
813 spin_unlock_irqrestore(&sclp_lock, flags);
814 return rc;
817 EXPORT_SYMBOL(sclp_reactivate);
819 /* Handler for external interruption used during initialization. Modify
820 * request state to done. */
821 static void sclp_check_handler(unsigned int ext_int_code,
822 unsigned int param32, unsigned long param64)
824 u32 finished_sccb;
826 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
827 finished_sccb = param32 & 0xfffffff8;
828 /* Is this the interrupt we are waiting for? */
829 if (finished_sccb == 0)
830 return;
831 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
832 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
833 finished_sccb);
834 spin_lock(&sclp_lock);
835 if (sclp_running_state == sclp_running_state_running) {
836 sclp_init_req.status = SCLP_REQ_DONE;
837 sclp_running_state = sclp_running_state_idle;
839 spin_unlock(&sclp_lock);
842 /* Initial init mask request timed out. Modify request state to failed. */
843 static void
844 sclp_check_timeout(unsigned long data)
846 unsigned long flags;
848 spin_lock_irqsave(&sclp_lock, flags);
849 if (sclp_running_state == sclp_running_state_running) {
850 sclp_init_req.status = SCLP_REQ_FAILED;
851 sclp_running_state = sclp_running_state_idle;
853 spin_unlock_irqrestore(&sclp_lock, flags);
856 /* Perform a check of the SCLP interface. Return zero if the interface is
857 * available and there are no pending requests from a previous instance.
858 * Return non-zero otherwise. */
859 static int
860 sclp_check_interface(void)
862 struct init_sccb *sccb;
863 unsigned long flags;
864 int retry;
865 int rc;
867 spin_lock_irqsave(&sclp_lock, flags);
868 /* Prepare init mask command */
869 rc = register_external_interrupt(0x2401, sclp_check_handler);
870 if (rc) {
871 spin_unlock_irqrestore(&sclp_lock, flags);
872 return rc;
874 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
875 __sclp_make_init_req(0, 0);
876 sccb = (struct init_sccb *) sclp_init_req.sccb;
877 rc = sclp_service_call(sclp_init_req.command, sccb);
878 if (rc == -EIO)
879 break;
880 sclp_init_req.status = SCLP_REQ_RUNNING;
881 sclp_running_state = sclp_running_state_running;
882 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
883 sclp_check_timeout, 0);
884 spin_unlock_irqrestore(&sclp_lock, flags);
885 /* Enable service-signal interruption - needs to happen
886 * with IRQs enabled. */
887 service_subclass_irq_register();
888 /* Wait for signal from interrupt or timeout */
889 sclp_sync_wait();
890 /* Disable service-signal interruption - needs to happen
891 * with IRQs enabled. */
892 service_subclass_irq_unregister();
893 spin_lock_irqsave(&sclp_lock, flags);
894 del_timer(&sclp_request_timer);
895 if (sclp_init_req.status == SCLP_REQ_DONE &&
896 sccb->header.response_code == 0x20) {
897 rc = 0;
898 break;
899 } else
900 rc = -EBUSY;
902 unregister_external_interrupt(0x2401, sclp_check_handler);
903 spin_unlock_irqrestore(&sclp_lock, flags);
904 return rc;
907 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
908 * events from interfering with rebooted system. */
909 static int
910 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
912 sclp_deactivate();
913 return NOTIFY_DONE;
916 static struct notifier_block sclp_reboot_notifier = {
917 .notifier_call = sclp_reboot_event
921 * Suspend/resume SCLP notifier implementation
924 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
926 struct sclp_register *reg;
927 unsigned long flags;
929 if (!rollback) {
930 spin_lock_irqsave(&sclp_lock, flags);
931 list_for_each_entry(reg, &sclp_reg_list, list)
932 reg->pm_event_posted = 0;
933 spin_unlock_irqrestore(&sclp_lock, flags);
935 do {
936 spin_lock_irqsave(&sclp_lock, flags);
937 list_for_each_entry(reg, &sclp_reg_list, list) {
938 if (rollback && reg->pm_event_posted)
939 goto found;
940 if (!rollback && !reg->pm_event_posted)
941 goto found;
943 spin_unlock_irqrestore(&sclp_lock, flags);
944 return;
945 found:
946 spin_unlock_irqrestore(&sclp_lock, flags);
947 if (reg->pm_event_fn)
948 reg->pm_event_fn(reg, sclp_pm_event);
949 reg->pm_event_posted = rollback ? 0 : 1;
950 } while (1);
954 * Susend/resume callbacks for platform device
957 static int sclp_freeze(struct device *dev)
959 unsigned long flags;
960 int rc;
962 sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
964 spin_lock_irqsave(&sclp_lock, flags);
965 sclp_suspend_state = sclp_suspend_state_suspended;
966 spin_unlock_irqrestore(&sclp_lock, flags);
968 /* Init supend data */
969 memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
970 sclp_suspend_req.callback = sclp_suspend_req_cb;
971 sclp_suspend_req.status = SCLP_REQ_FILLED;
972 init_completion(&sclp_request_queue_flushed);
974 rc = sclp_add_request(&sclp_suspend_req);
975 if (rc == 0)
976 wait_for_completion(&sclp_request_queue_flushed);
977 else if (rc != -ENODATA)
978 goto fail_thaw;
980 rc = sclp_deactivate();
981 if (rc)
982 goto fail_thaw;
983 return 0;
985 fail_thaw:
986 spin_lock_irqsave(&sclp_lock, flags);
987 sclp_suspend_state = sclp_suspend_state_running;
988 spin_unlock_irqrestore(&sclp_lock, flags);
989 sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
990 return rc;
993 static int sclp_undo_suspend(enum sclp_pm_event event)
995 unsigned long flags;
996 int rc;
998 rc = sclp_reactivate();
999 if (rc)
1000 return rc;
1002 spin_lock_irqsave(&sclp_lock, flags);
1003 sclp_suspend_state = sclp_suspend_state_running;
1004 spin_unlock_irqrestore(&sclp_lock, flags);
1006 sclp_pm_event(event, 0);
1007 return 0;
1010 static int sclp_thaw(struct device *dev)
1012 return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1015 static int sclp_restore(struct device *dev)
1017 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1020 static const struct dev_pm_ops sclp_pm_ops = {
1021 .freeze = sclp_freeze,
1022 .thaw = sclp_thaw,
1023 .restore = sclp_restore,
1026 static struct platform_driver sclp_pdrv = {
1027 .driver = {
1028 .name = "sclp",
1029 .owner = THIS_MODULE,
1030 .pm = &sclp_pm_ops,
1034 static struct platform_device *sclp_pdev;
1036 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1037 * otherwise. */
1038 static int
1039 sclp_init(void)
1041 unsigned long flags;
1042 int rc = 0;
1044 spin_lock_irqsave(&sclp_lock, flags);
1045 /* Check for previous or running initialization */
1046 if (sclp_init_state != sclp_init_state_uninitialized)
1047 goto fail_unlock;
1048 sclp_init_state = sclp_init_state_initializing;
1049 /* Set up variables */
1050 INIT_LIST_HEAD(&sclp_req_queue);
1051 INIT_LIST_HEAD(&sclp_reg_list);
1052 list_add(&sclp_state_change_event.list, &sclp_reg_list);
1053 init_timer(&sclp_request_timer);
1054 /* Check interface */
1055 spin_unlock_irqrestore(&sclp_lock, flags);
1056 rc = sclp_check_interface();
1057 spin_lock_irqsave(&sclp_lock, flags);
1058 if (rc)
1059 goto fail_init_state_uninitialized;
1060 /* Register reboot handler */
1061 rc = register_reboot_notifier(&sclp_reboot_notifier);
1062 if (rc)
1063 goto fail_init_state_uninitialized;
1064 /* Register interrupt handler */
1065 rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
1066 if (rc)
1067 goto fail_unregister_reboot_notifier;
1068 sclp_init_state = sclp_init_state_initialized;
1069 spin_unlock_irqrestore(&sclp_lock, flags);
1070 /* Enable service-signal external interruption - needs to happen with
1071 * IRQs enabled. */
1072 service_subclass_irq_register();
1073 sclp_init_mask(1);
1074 return 0;
1076 fail_unregister_reboot_notifier:
1077 unregister_reboot_notifier(&sclp_reboot_notifier);
1078 fail_init_state_uninitialized:
1079 sclp_init_state = sclp_init_state_uninitialized;
1080 fail_unlock:
1081 spin_unlock_irqrestore(&sclp_lock, flags);
1082 return rc;
1086 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1087 * to print the panic message.
1089 static int sclp_panic_notify(struct notifier_block *self,
1090 unsigned long event, void *data)
1092 if (sclp_suspend_state == sclp_suspend_state_suspended)
1093 sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1094 return NOTIFY_OK;
1097 static struct notifier_block sclp_on_panic_nb = {
1098 .notifier_call = sclp_panic_notify,
1099 .priority = SCLP_PANIC_PRIO,
1102 static __init int sclp_initcall(void)
1104 int rc;
1106 rc = platform_driver_register(&sclp_pdrv);
1107 if (rc)
1108 return rc;
1109 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1110 rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
1111 if (rc)
1112 goto fail_platform_driver_unregister;
1113 rc = atomic_notifier_chain_register(&panic_notifier_list,
1114 &sclp_on_panic_nb);
1115 if (rc)
1116 goto fail_platform_device_unregister;
1118 return sclp_init();
1120 fail_platform_device_unregister:
1121 platform_device_unregister(sclp_pdev);
1122 fail_platform_driver_unregister:
1123 platform_driver_unregister(&sclp_pdrv);
1124 return rc;
1127 arch_initcall(sclp_initcall);