2 * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 /* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
20 * and the service processor on IBM pSeries servers. On these servers, there
21 * are no serial ports under the OS's control, and sometimes there is no other
22 * console available either. However, the service processor has two standard
23 * serial ports, so this over-complicated protocol allows the OS to control
24 * those ports by proxy.
26 * Besides data, the procotol supports the reading/writing of the serial
27 * port's DTR line, and the reading of the CD line. This is to allow the OS to
28 * control a modem attached to the service processor's serial port. Note that
29 * the OS cannot change the speed of the port through this protocol.
34 #include <linux/console.h>
35 #include <linux/ctype.h>
36 #include <linux/delay.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/module.h>
40 #include <linux/major.h>
41 #include <linux/kernel.h>
42 #include <linux/spinlock.h>
43 #include <linux/sysrq.h>
44 #include <linux/tty.h>
45 #include <linux/tty_flip.h>
46 #include <asm/hvcall.h>
47 #include <asm/hvconsole.h>
49 #include <asm/uaccess.h>
51 #include <asm/param.h>
53 #define HVSI_MAJOR 229
54 #define HVSI_MINOR 128
55 #define MAX_NR_HVSI_CONSOLES 4
57 #define HVSI_TIMEOUT (5*HZ)
58 #define HVSI_VERSION 1
59 #define HVSI_MAX_PACKET 256
60 #define HVSI_MAX_READ 16
61 #define HVSI_MAX_OUTGOING_DATA 12
65 * we pass data via two 8-byte registers, so we would like our char arrays
66 * properly aligned for those loads.
68 #define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
71 struct delayed_work writer
;
72 struct work_struct handshaker
;
73 wait_queue_head_t emptyq
; /* woken when outbuf is emptied */
74 wait_queue_head_t stateq
; /* woken when HVSI state changes */
77 struct tty_struct
*tty
;
79 uint8_t throttle_buf
[128];
80 uint8_t outbuf
[N_OUTBUF
]; /* to implement write_room and chars_in_buffer */
81 /* inbuf is for packet reassembly. leave a little room for leftovers. */
82 uint8_t inbuf
[HVSI_MAX_PACKET
+ HVSI_MAX_READ
];
88 atomic_t seqno
; /* HVSI packet sequence number */
90 uint8_t state
; /* HVSI protocol state */
92 #ifdef CONFIG_MAGIC_SYSRQ
94 #endif /* CONFIG_MAGIC_SYSRQ */
96 static struct hvsi_struct hvsi_ports
[MAX_NR_HVSI_CONSOLES
];
98 static struct tty_driver
*hvsi_driver
;
99 static int hvsi_count
;
100 static int (*hvsi_wait
)(struct hvsi_struct
*hp
, int state
);
102 enum HVSI_PROTOCOL_STATE
{
104 HVSI_WAIT_FOR_VER_RESPONSE
,
105 HVSI_WAIT_FOR_VER_QUERY
,
107 HVSI_WAIT_FOR_MCTRL_RESPONSE
,
110 #define HVSI_CONSOLE 0x1
112 #define VS_DATA_PACKET_HEADER 0xff
113 #define VS_CONTROL_PACKET_HEADER 0xfe
114 #define VS_QUERY_PACKET_HEADER 0xfd
115 #define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
118 #define VSV_SET_MODEM_CTL 1 /* to service processor only */
119 #define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
120 #define VSV_CLOSE_PROTOCOL 3
123 #define VSV_SEND_VERSION_NUMBER 1
124 #define VSV_SEND_MODEM_CTL_STATUS 2
126 /* yes, these masks are not consecutive. */
127 #define HVSI_TSDTR 0x01
128 #define HVSI_TSCD 0x20
134 } __attribute__((packed
));
140 uint8_t data
[HVSI_MAX_OUTGOING_DATA
];
141 } __attribute__((packed
));
143 struct hvsi_control
{
148 /* optional depending on verb: */
151 } __attribute__((packed
));
158 } __attribute__((packed
));
160 struct hvsi_query_response
{
165 uint16_t query_seqno
;
170 } __attribute__((packed
));
174 static inline int is_console(struct hvsi_struct
*hp
)
176 return hp
->flags
& HVSI_CONSOLE
;
179 static inline int is_open(struct hvsi_struct
*hp
)
181 /* if we're waiting for an mctrl then we're already open */
182 return (hp
->state
== HVSI_OPEN
)
183 || (hp
->state
== HVSI_WAIT_FOR_MCTRL_RESPONSE
);
186 static inline void print_state(struct hvsi_struct
*hp
)
189 static const char *state_names
[] = {
191 "HVSI_WAIT_FOR_VER_RESPONSE",
192 "HVSI_WAIT_FOR_VER_QUERY",
194 "HVSI_WAIT_FOR_MCTRL_RESPONSE",
197 const char *name
= (hp
->state
< ARRAY_SIZE(state_names
))
198 ? state_names
[hp
->state
] : "UNKNOWN";
200 pr_debug("hvsi%i: state = %s\n", hp
->index
, name
);
204 static inline void __set_state(struct hvsi_struct
*hp
, int state
)
208 wake_up_all(&hp
->stateq
);
211 static inline void set_state(struct hvsi_struct
*hp
, int state
)
215 spin_lock_irqsave(&hp
->lock
, flags
);
216 __set_state(hp
, state
);
217 spin_unlock_irqrestore(&hp
->lock
, flags
);
220 static inline int len_packet(const uint8_t *packet
)
222 return (int)((struct hvsi_header
*)packet
)->len
;
225 static inline int is_header(const uint8_t *packet
)
227 struct hvsi_header
*header
= (struct hvsi_header
*)packet
;
228 return header
->type
>= VS_QUERY_RESPONSE_PACKET_HEADER
;
231 static inline int got_packet(const struct hvsi_struct
*hp
, uint8_t *packet
)
233 if (hp
->inbuf_end
< packet
+ sizeof(struct hvsi_header
))
234 return 0; /* don't even have the packet header */
236 if (hp
->inbuf_end
< (packet
+ len_packet(packet
)))
237 return 0; /* don't have the rest of the packet */
242 /* shift remaining bytes in packetbuf down */
243 static void compact_inbuf(struct hvsi_struct
*hp
, uint8_t *read_to
)
245 int remaining
= (int)(hp
->inbuf_end
- read_to
);
247 pr_debug("%s: %i chars remain\n", __func__
, remaining
);
249 if (read_to
!= hp
->inbuf
)
250 memmove(hp
->inbuf
, read_to
, remaining
);
252 hp
->inbuf_end
= hp
->inbuf
+ remaining
;
256 #define dbg_dump_packet(packet) dump_packet(packet)
257 #define dbg_dump_hex(data, len) dump_hex(data, len)
259 #define dbg_dump_packet(packet) do { } while (0)
260 #define dbg_dump_hex(data, len) do { } while (0)
263 static void dump_hex(const uint8_t *data
, int len
)
268 for (i
=0; i
< len
; i
++)
269 printk("%.2x", data
[i
]);
272 for (i
=0; i
< len
; i
++) {
273 if (isprint(data
[i
]))
274 printk("%c", data
[i
]);
281 static void dump_packet(uint8_t *packet
)
283 struct hvsi_header
*header
= (struct hvsi_header
*)packet
;
285 printk("type 0x%x, len %i, seqno %i:\n", header
->type
, header
->len
,
288 dump_hex(packet
, header
->len
);
291 static int hvsi_read(struct hvsi_struct
*hp
, char *buf
, int count
)
295 got
= hvc_get_chars(hp
->vtermno
, buf
, count
);
300 static void hvsi_recv_control(struct hvsi_struct
*hp
, uint8_t *packet
,
301 struct tty_struct
**to_hangup
, struct hvsi_struct
**to_handshake
)
303 struct hvsi_control
*header
= (struct hvsi_control
*)packet
;
305 switch (header
->verb
) {
306 case VSV_MODEM_CTL_UPDATE
:
307 if ((header
->word
& HVSI_TSCD
) == 0) {
308 /* CD went away; no more connection */
309 pr_debug("hvsi%i: CD dropped\n", hp
->index
);
310 hp
->mctrl
&= TIOCM_CD
;
311 /* If userland hasn't done an open(2) yet, hp->tty is NULL. */
312 if (hp
->tty
&& !(hp
->tty
->flags
& CLOCAL
))
313 *to_hangup
= hp
->tty
;
316 case VSV_CLOSE_PROTOCOL
:
317 pr_debug("hvsi%i: service processor came back\n", hp
->index
);
318 if (hp
->state
!= HVSI_CLOSED
) {
323 printk(KERN_WARNING
"hvsi%i: unknown HVSI control packet: ",
330 static void hvsi_recv_response(struct hvsi_struct
*hp
, uint8_t *packet
)
332 struct hvsi_query_response
*resp
= (struct hvsi_query_response
*)packet
;
335 case HVSI_WAIT_FOR_VER_RESPONSE
:
336 __set_state(hp
, HVSI_WAIT_FOR_VER_QUERY
);
338 case HVSI_WAIT_FOR_MCTRL_RESPONSE
:
340 if (resp
->u
.mctrl_word
& HVSI_TSDTR
)
341 hp
->mctrl
|= TIOCM_DTR
;
342 if (resp
->u
.mctrl_word
& HVSI_TSCD
)
343 hp
->mctrl
|= TIOCM_CD
;
344 __set_state(hp
, HVSI_OPEN
);
347 printk(KERN_ERR
"hvsi%i: unexpected query response: ", hp
->index
);
353 /* respond to service processor's version query */
354 static int hvsi_version_respond(struct hvsi_struct
*hp
, uint16_t query_seqno
)
356 struct hvsi_query_response packet __ALIGNED__
;
359 packet
.type
= VS_QUERY_RESPONSE_PACKET_HEADER
;
360 packet
.len
= sizeof(struct hvsi_query_response
);
361 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
362 packet
.verb
= VSV_SEND_VERSION_NUMBER
;
363 packet
.u
.version
= HVSI_VERSION
;
364 packet
.query_seqno
= query_seqno
+1;
366 pr_debug("%s: sending %i bytes\n", __func__
, packet
.len
);
367 dbg_dump_hex((uint8_t*)&packet
, packet
.len
);
369 wrote
= hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
370 if (wrote
!= packet
.len
) {
371 printk(KERN_ERR
"hvsi%i: couldn't send query response!\n",
379 static void hvsi_recv_query(struct hvsi_struct
*hp
, uint8_t *packet
)
381 struct hvsi_query
*query
= (struct hvsi_query
*)packet
;
384 case HVSI_WAIT_FOR_VER_QUERY
:
385 hvsi_version_respond(hp
, query
->seqno
);
386 __set_state(hp
, HVSI_OPEN
);
389 printk(KERN_ERR
"hvsi%i: unexpected query: ", hp
->index
);
395 static void hvsi_insert_chars(struct hvsi_struct
*hp
, const char *buf
, int len
)
399 for (i
=0; i
< len
; i
++) {
401 #ifdef CONFIG_MAGIC_SYSRQ
405 } else if (hp
->sysrq
) {
410 #endif /* CONFIG_MAGIC_SYSRQ */
411 tty_insert_flip_char(hp
->tty
, c
, 0);
416 * We could get 252 bytes of data at once here. But the tty layer only
417 * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow
418 * it. Accordingly we won't send more than 128 bytes at a time to the flip
419 * buffer, which will give the tty buffer a chance to throttle us. Should the
420 * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be
423 #define TTY_THRESHOLD_THROTTLE 128
424 static struct tty_struct
*hvsi_recv_data(struct hvsi_struct
*hp
,
425 const uint8_t *packet
)
427 const struct hvsi_header
*header
= (const struct hvsi_header
*)packet
;
428 const uint8_t *data
= packet
+ sizeof(struct hvsi_header
);
429 int datalen
= header
->len
- sizeof(struct hvsi_header
);
430 int overflow
= datalen
- TTY_THRESHOLD_THROTTLE
;
432 pr_debug("queueing %i chars '%.*s'\n", datalen
, datalen
, data
);
438 pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__
);
439 datalen
= TTY_THRESHOLD_THROTTLE
;
442 hvsi_insert_chars(hp
, data
, datalen
);
446 * we still have more data to deliver, so we need to save off the
447 * overflow and send it later
449 pr_debug("%s: deferring overflow\n", __func__
);
450 memcpy(hp
->throttle_buf
, data
+ TTY_THRESHOLD_THROTTLE
, overflow
);
451 hp
->n_throttle
= overflow
;
458 * Returns true/false indicating data successfully read from hypervisor.
459 * Used both to get packets for tty connections and to advance the state
460 * machine during console handshaking (in which case tty = NULL and we ignore
463 static int hvsi_load_chunk(struct hvsi_struct
*hp
, struct tty_struct
**flip
,
464 struct tty_struct
**hangup
, struct hvsi_struct
**handshake
)
466 uint8_t *packet
= hp
->inbuf
;
473 chunklen
= hvsi_read(hp
, hp
->inbuf_end
, HVSI_MAX_READ
);
475 pr_debug("%s: 0-length read\n", __func__
);
479 pr_debug("%s: got %i bytes\n", __func__
, chunklen
);
480 dbg_dump_hex(hp
->inbuf_end
, chunklen
);
482 hp
->inbuf_end
+= chunklen
;
484 /* handle all completed packets */
485 while ((packet
< hp
->inbuf_end
) && got_packet(hp
, packet
)) {
486 struct hvsi_header
*header
= (struct hvsi_header
*)packet
;
488 if (!is_header(packet
)) {
489 printk(KERN_ERR
"hvsi%i: got malformed packet\n", hp
->index
);
490 /* skip bytes until we find a header or run out of data */
491 while ((packet
< hp
->inbuf_end
) && (!is_header(packet
)))
496 pr_debug("%s: handling %i-byte packet\n", __func__
,
498 dbg_dump_packet(packet
);
500 switch (header
->type
) {
501 case VS_DATA_PACKET_HEADER
:
505 break; /* no tty buffer to put data in */
506 *flip
= hvsi_recv_data(hp
, packet
);
508 case VS_CONTROL_PACKET_HEADER
:
509 hvsi_recv_control(hp
, packet
, hangup
, handshake
);
511 case VS_QUERY_RESPONSE_PACKET_HEADER
:
512 hvsi_recv_response(hp
, packet
);
514 case VS_QUERY_PACKET_HEADER
:
515 hvsi_recv_query(hp
, packet
);
518 printk(KERN_ERR
"hvsi%i: unknown HVSI packet type 0x%x\n",
519 hp
->index
, header
->type
);
524 packet
+= len_packet(packet
);
526 if (*hangup
|| *handshake
) {
527 pr_debug("%s: hangup or handshake\n", __func__
);
529 * we need to send the hangup now before receiving any more data.
530 * If we get "data, hangup, data", we can't deliver the second
531 * data before the hangup.
537 compact_inbuf(hp
, packet
);
542 static void hvsi_send_overflow(struct hvsi_struct
*hp
)
544 pr_debug("%s: delivering %i bytes overflow\n", __func__
,
547 hvsi_insert_chars(hp
, hp
->throttle_buf
, hp
->n_throttle
);
552 * must get all pending data because we only get an irq on empty->non-empty
555 static irqreturn_t
hvsi_interrupt(int irq
, void *arg
)
557 struct hvsi_struct
*hp
= (struct hvsi_struct
*)arg
;
558 struct tty_struct
*flip
;
559 struct tty_struct
*hangup
;
560 struct hvsi_struct
*handshake
;
564 pr_debug("%s\n", __func__
);
567 spin_lock_irqsave(&hp
->lock
, flags
);
568 again
= hvsi_load_chunk(hp
, &flip
, &hangup
, &handshake
);
569 spin_unlock_irqrestore(&hp
->lock
, flags
);
572 * we have to call tty_flip_buffer_push() and tty_hangup() outside our
573 * spinlock. But we also have to keep going until we've read all the
578 /* there was data put in the tty flip buffer */
579 tty_flip_buffer_push(flip
);
588 pr_debug("hvsi%i: attempting re-handshake\n", handshake
->index
);
589 schedule_work(&handshake
->handshaker
);
593 spin_lock_irqsave(&hp
->lock
, flags
);
594 if (hp
->tty
&& hp
->n_throttle
595 && (!test_bit(TTY_THROTTLED
, &hp
->tty
->flags
))) {
596 /* we weren't hung up and we weren't throttled, so we can deliver the
599 hvsi_send_overflow(hp
);
601 spin_unlock_irqrestore(&hp
->lock
, flags
);
604 tty_flip_buffer_push(flip
);
610 /* for boot console, before the irq handler is running */
611 static int __init
poll_for_state(struct hvsi_struct
*hp
, int state
)
613 unsigned long end_jiffies
= jiffies
+ HVSI_TIMEOUT
;
616 hvsi_interrupt(hp
->virq
, (void *)hp
); /* get pending data */
618 if (hp
->state
== state
)
622 if (time_after(jiffies
, end_jiffies
))
627 /* wait for irq handler to change our state */
628 static int wait_for_state(struct hvsi_struct
*hp
, int state
)
632 if (!wait_event_timeout(hp
->stateq
, (hp
->state
== state
), HVSI_TIMEOUT
))
638 static int hvsi_query(struct hvsi_struct
*hp
, uint16_t verb
)
640 struct hvsi_query packet __ALIGNED__
;
643 packet
.type
= VS_QUERY_PACKET_HEADER
;
644 packet
.len
= sizeof(struct hvsi_query
);
645 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
648 pr_debug("%s: sending %i bytes\n", __func__
, packet
.len
);
649 dbg_dump_hex((uint8_t*)&packet
, packet
.len
);
651 wrote
= hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
652 if (wrote
!= packet
.len
) {
653 printk(KERN_ERR
"hvsi%i: couldn't send query (%i)!\n", hp
->index
,
661 static int hvsi_get_mctrl(struct hvsi_struct
*hp
)
665 set_state(hp
, HVSI_WAIT_FOR_MCTRL_RESPONSE
);
666 hvsi_query(hp
, VSV_SEND_MODEM_CTL_STATUS
);
668 ret
= hvsi_wait(hp
, HVSI_OPEN
);
670 printk(KERN_ERR
"hvsi%i: didn't get modem flags\n", hp
->index
);
671 set_state(hp
, HVSI_OPEN
);
675 pr_debug("%s: mctrl 0x%x\n", __func__
, hp
->mctrl
);
680 /* note that we can only set DTR */
681 static int hvsi_set_mctrl(struct hvsi_struct
*hp
, uint16_t mctrl
)
683 struct hvsi_control packet __ALIGNED__
;
686 packet
.type
= VS_CONTROL_PACKET_HEADER
,
687 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
688 packet
.len
= sizeof(struct hvsi_control
);
689 packet
.verb
= VSV_SET_MODEM_CTL
;
690 packet
.mask
= HVSI_TSDTR
;
692 if (mctrl
& TIOCM_DTR
)
693 packet
.word
= HVSI_TSDTR
;
695 pr_debug("%s: sending %i bytes\n", __func__
, packet
.len
);
696 dbg_dump_hex((uint8_t*)&packet
, packet
.len
);
698 wrote
= hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
699 if (wrote
!= packet
.len
) {
700 printk(KERN_ERR
"hvsi%i: couldn't set DTR!\n", hp
->index
);
707 static void hvsi_drain_input(struct hvsi_struct
*hp
)
709 uint8_t buf
[HVSI_MAX_READ
] __ALIGNED__
;
710 unsigned long end_jiffies
= jiffies
+ HVSI_TIMEOUT
;
712 while (time_before(end_jiffies
, jiffies
))
713 if (0 == hvsi_read(hp
, buf
, HVSI_MAX_READ
))
717 static int hvsi_handshake(struct hvsi_struct
*hp
)
722 * We could have a CLOSE or other data waiting for us before we even try
723 * to open; try to throw it all away so we don't get confused. (CLOSE
724 * is the first message sent up the pipe when the FSP comes online. We
725 * need to distinguish between "it came up a while ago and we're the first
726 * user" and "it was just reset before it saw our handshake packet".)
728 hvsi_drain_input(hp
);
730 set_state(hp
, HVSI_WAIT_FOR_VER_RESPONSE
);
731 ret
= hvsi_query(hp
, VSV_SEND_VERSION_NUMBER
);
733 printk(KERN_ERR
"hvsi%i: couldn't send version query\n", hp
->index
);
737 ret
= hvsi_wait(hp
, HVSI_OPEN
);
744 static void hvsi_handshaker(struct work_struct
*work
)
746 struct hvsi_struct
*hp
=
747 container_of(work
, struct hvsi_struct
, handshaker
);
749 if (hvsi_handshake(hp
) >= 0)
752 printk(KERN_ERR
"hvsi%i: re-handshaking failed\n", hp
->index
);
753 if (is_console(hp
)) {
755 * ttys will re-attempt the handshake via hvsi_open, but
756 * the console will not.
758 printk(KERN_ERR
"hvsi%i: lost console!\n", hp
->index
);
762 static int hvsi_put_chars(struct hvsi_struct
*hp
, const char *buf
, int count
)
764 struct hvsi_data packet __ALIGNED__
;
767 BUG_ON(count
> HVSI_MAX_OUTGOING_DATA
);
769 packet
.type
= VS_DATA_PACKET_HEADER
;
770 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
771 packet
.len
= count
+ sizeof(struct hvsi_header
);
772 memcpy(&packet
.data
, buf
, count
);
774 ret
= hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
775 if (ret
== packet
.len
) {
776 /* return the number of chars written, not the packet length */
779 return ret
; /* return any errors */
782 static void hvsi_close_protocol(struct hvsi_struct
*hp
)
784 struct hvsi_control packet __ALIGNED__
;
786 packet
.type
= VS_CONTROL_PACKET_HEADER
;
787 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
789 packet
.verb
= VSV_CLOSE_PROTOCOL
;
791 pr_debug("%s: sending %i bytes\n", __func__
, packet
.len
);
792 dbg_dump_hex((uint8_t*)&packet
, packet
.len
);
794 hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
797 static int hvsi_open(struct tty_struct
*tty
, struct file
*filp
)
799 struct hvsi_struct
*hp
;
801 int line
= tty
->index
;
804 pr_debug("%s\n", __func__
);
806 if (line
< 0 || line
>= hvsi_count
)
808 hp
= &hvsi_ports
[line
];
810 tty
->driver_data
= hp
;
813 if (hp
->state
== HVSI_FSP_DIED
)
816 spin_lock_irqsave(&hp
->lock
, flags
);
819 atomic_set(&hp
->seqno
, 0);
820 h_vio_signal(hp
->vtermno
, VIO_IRQ_ENABLE
);
821 spin_unlock_irqrestore(&hp
->lock
, flags
);
824 return 0; /* this has already been handshaked as the console */
826 ret
= hvsi_handshake(hp
);
828 printk(KERN_ERR
"%s: HVSI handshaking failed\n", tty
->name
);
832 ret
= hvsi_get_mctrl(hp
);
834 printk(KERN_ERR
"%s: couldn't get initial modem flags\n", tty
->name
);
838 ret
= hvsi_set_mctrl(hp
, hp
->mctrl
| TIOCM_DTR
);
840 printk(KERN_ERR
"%s: couldn't set DTR\n", tty
->name
);
847 /* wait for hvsi_write_worker to empty hp->outbuf */
848 static void hvsi_flush_output(struct hvsi_struct
*hp
)
850 wait_event_timeout(hp
->emptyq
, (hp
->n_outbuf
<= 0), HVSI_TIMEOUT
);
852 /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
853 cancel_delayed_work_sync(&hp
->writer
);
854 flush_work_sync(&hp
->handshaker
);
857 * it's also possible that our timeout expired and hvsi_write_worker
858 * didn't manage to push outbuf. poof.
863 static void hvsi_close(struct tty_struct
*tty
, struct file
*filp
)
865 struct hvsi_struct
*hp
= tty
->driver_data
;
868 pr_debug("%s\n", __func__
);
870 if (tty_hung_up_p(filp
))
873 spin_lock_irqsave(&hp
->lock
, flags
);
875 if (--hp
->count
== 0) {
877 hp
->inbuf_end
= hp
->inbuf
; /* discard remaining partial packets */
879 /* only close down connection if it is not the console */
880 if (!is_console(hp
)) {
881 h_vio_signal(hp
->vtermno
, VIO_IRQ_DISABLE
); /* no more irqs */
882 __set_state(hp
, HVSI_CLOSED
);
884 * any data delivered to the tty layer after this will be
885 * discarded (except for XON/XOFF)
889 spin_unlock_irqrestore(&hp
->lock
, flags
);
891 /* let any existing irq handlers finish. no more will start. */
892 synchronize_irq(hp
->virq
);
894 /* hvsi_write_worker will re-schedule until outbuf is empty. */
895 hvsi_flush_output(hp
);
897 /* tell FSP to stop sending data */
898 hvsi_close_protocol(hp
);
901 * drain anything FSP is still in the middle of sending, and let
902 * hvsi_handshake drain the rest on the next open.
904 hvsi_drain_input(hp
);
906 spin_lock_irqsave(&hp
->lock
, flags
);
908 } else if (hp
->count
< 0)
909 printk(KERN_ERR
"hvsi_close %lu: oops, count is %d\n",
910 hp
- hvsi_ports
, hp
->count
);
912 spin_unlock_irqrestore(&hp
->lock
, flags
);
915 static void hvsi_hangup(struct tty_struct
*tty
)
917 struct hvsi_struct
*hp
= tty
->driver_data
;
920 pr_debug("%s\n", __func__
);
922 spin_lock_irqsave(&hp
->lock
, flags
);
928 spin_unlock_irqrestore(&hp
->lock
, flags
);
931 /* called with hp->lock held */
932 static void hvsi_push(struct hvsi_struct
*hp
)
936 if (hp
->n_outbuf
<= 0)
939 n
= hvsi_put_chars(hp
, hp
->outbuf
, hp
->n_outbuf
);
942 pr_debug("%s: wrote %i chars\n", __func__
, n
);
944 } else if (n
== -EIO
) {
945 __set_state(hp
, HVSI_FSP_DIED
);
946 printk(KERN_ERR
"hvsi%i: service processor died\n", hp
->index
);
950 /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
951 static void hvsi_write_worker(struct work_struct
*work
)
953 struct hvsi_struct
*hp
=
954 container_of(work
, struct hvsi_struct
, writer
.work
);
957 static long start_j
= 0;
963 spin_lock_irqsave(&hp
->lock
, flags
);
965 pr_debug("%s: %i chars in buffer\n", __func__
, hp
->n_outbuf
);
969 * We could have a non-open connection if the service processor died
970 * while we were busily scheduling ourselves. In that case, it could
971 * be minutes before the service processor comes back, so only try
972 * again once a second.
974 schedule_delayed_work(&hp
->writer
, HZ
);
979 if (hp
->n_outbuf
> 0)
980 schedule_delayed_work(&hp
->writer
, 10);
983 pr_debug("%s: outbuf emptied after %li jiffies\n", __func__
,
987 wake_up_all(&hp
->emptyq
);
992 spin_unlock_irqrestore(&hp
->lock
, flags
);
995 static int hvsi_write_room(struct tty_struct
*tty
)
997 struct hvsi_struct
*hp
= tty
->driver_data
;
999 return N_OUTBUF
- hp
->n_outbuf
;
1002 static int hvsi_chars_in_buffer(struct tty_struct
*tty
)
1004 struct hvsi_struct
*hp
= tty
->driver_data
;
1006 return hp
->n_outbuf
;
1009 static int hvsi_write(struct tty_struct
*tty
,
1010 const unsigned char *buf
, int count
)
1012 struct hvsi_struct
*hp
= tty
->driver_data
;
1013 const char *source
= buf
;
1014 unsigned long flags
;
1016 int origcount
= count
;
1018 spin_lock_irqsave(&hp
->lock
, flags
);
1020 pr_debug("%s: %i chars in buffer\n", __func__
, hp
->n_outbuf
);
1023 /* we're either closing or not yet open; don't accept data */
1024 pr_debug("%s: not open\n", __func__
);
1029 * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf
1030 * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls
1031 * will see there is no room in outbuf and return.
1033 while ((count
> 0) && (hvsi_write_room(hp
->tty
) > 0)) {
1034 int chunksize
= min(count
, hvsi_write_room(hp
->tty
));
1036 BUG_ON(hp
->n_outbuf
< 0);
1037 memcpy(hp
->outbuf
+ hp
->n_outbuf
, source
, chunksize
);
1038 hp
->n_outbuf
+= chunksize
;
1041 source
+= chunksize
;
1046 if (hp
->n_outbuf
> 0) {
1048 * we weren't able to write it all to the hypervisor.
1049 * schedule another push attempt.
1051 schedule_delayed_work(&hp
->writer
, 10);
1055 spin_unlock_irqrestore(&hp
->lock
, flags
);
1057 if (total
!= origcount
)
1058 pr_debug("%s: wanted %i, only wrote %i\n", __func__
, origcount
,
1065 * I have never seen throttle or unthrottle called, so this little throttle
1066 * buffering scheme may or may not work.
1068 static void hvsi_throttle(struct tty_struct
*tty
)
1070 struct hvsi_struct
*hp
= tty
->driver_data
;
1072 pr_debug("%s\n", __func__
);
1074 h_vio_signal(hp
->vtermno
, VIO_IRQ_DISABLE
);
1077 static void hvsi_unthrottle(struct tty_struct
*tty
)
1079 struct hvsi_struct
*hp
= tty
->driver_data
;
1080 unsigned long flags
;
1083 pr_debug("%s\n", __func__
);
1085 spin_lock_irqsave(&hp
->lock
, flags
);
1086 if (hp
->n_throttle
) {
1087 hvsi_send_overflow(hp
);
1090 spin_unlock_irqrestore(&hp
->lock
, flags
);
1093 tty_flip_buffer_push(hp
->tty
);
1095 h_vio_signal(hp
->vtermno
, VIO_IRQ_ENABLE
);
1098 static int hvsi_tiocmget(struct tty_struct
*tty
)
1100 struct hvsi_struct
*hp
= tty
->driver_data
;
1106 static int hvsi_tiocmset(struct tty_struct
*tty
,
1107 unsigned int set
, unsigned int clear
)
1109 struct hvsi_struct
*hp
= tty
->driver_data
;
1110 unsigned long flags
;
1113 /* we can only alter DTR */
1117 spin_lock_irqsave(&hp
->lock
, flags
);
1119 new_mctrl
= (hp
->mctrl
& ~clear
) | set
;
1121 if (hp
->mctrl
!= new_mctrl
) {
1122 hvsi_set_mctrl(hp
, new_mctrl
);
1123 hp
->mctrl
= new_mctrl
;
1125 spin_unlock_irqrestore(&hp
->lock
, flags
);
1131 static const struct tty_operations hvsi_ops
= {
1133 .close
= hvsi_close
,
1134 .write
= hvsi_write
,
1135 .hangup
= hvsi_hangup
,
1136 .write_room
= hvsi_write_room
,
1137 .chars_in_buffer
= hvsi_chars_in_buffer
,
1138 .throttle
= hvsi_throttle
,
1139 .unthrottle
= hvsi_unthrottle
,
1140 .tiocmget
= hvsi_tiocmget
,
1141 .tiocmset
= hvsi_tiocmset
,
1144 static int __init
hvsi_init(void)
1148 hvsi_driver
= alloc_tty_driver(hvsi_count
);
1152 hvsi_driver
->owner
= THIS_MODULE
;
1153 hvsi_driver
->driver_name
= "hvsi";
1154 hvsi_driver
->name
= "hvsi";
1155 hvsi_driver
->major
= HVSI_MAJOR
;
1156 hvsi_driver
->minor_start
= HVSI_MINOR
;
1157 hvsi_driver
->type
= TTY_DRIVER_TYPE_SYSTEM
;
1158 hvsi_driver
->init_termios
= tty_std_termios
;
1159 hvsi_driver
->init_termios
.c_cflag
= B9600
| CS8
| CREAD
| HUPCL
;
1160 hvsi_driver
->init_termios
.c_ispeed
= 9600;
1161 hvsi_driver
->init_termios
.c_ospeed
= 9600;
1162 hvsi_driver
->flags
= TTY_DRIVER_REAL_RAW
;
1163 tty_set_operations(hvsi_driver
, &hvsi_ops
);
1165 for (i
=0; i
< hvsi_count
; i
++) {
1166 struct hvsi_struct
*hp
= &hvsi_ports
[i
];
1169 ret
= request_irq(hp
->virq
, hvsi_interrupt
, IRQF_DISABLED
, "hvsi", hp
);
1171 printk(KERN_ERR
"HVSI: couldn't reserve irq 0x%x (error %i)\n",
1174 hvsi_wait
= wait_for_state
; /* irqs active now */
1176 if (tty_register_driver(hvsi_driver
))
1177 panic("Couldn't register hvsi console driver\n");
1179 printk(KERN_DEBUG
"HVSI: registered %i devices\n", hvsi_count
);
1183 device_initcall(hvsi_init
);
1185 /***** console (not tty) code: *****/
1187 static void hvsi_console_print(struct console
*console
, const char *buf
,
1190 struct hvsi_struct
*hp
= &hvsi_ports
[console
->index
];
1191 char c
[HVSI_MAX_OUTGOING_DATA
] __ALIGNED__
;
1192 unsigned int i
= 0, n
= 0;
1193 int ret
, donecr
= 0;
1200 * ugh, we have to translate LF -> CRLF ourselves, in place.
1201 * copied from hvc_console.c:
1203 while (count
> 0 || i
> 0) {
1204 if (count
> 0 && i
< sizeof(c
)) {
1205 if (buf
[n
] == '\n' && !donecr
) {
1214 ret
= hvsi_put_chars(hp
, c
, i
);
1222 static struct tty_driver
*hvsi_console_device(struct console
*console
,
1225 *index
= console
->index
;
1229 static int __init
hvsi_console_setup(struct console
*console
, char *options
)
1231 struct hvsi_struct
*hp
;
1234 if (console
->index
< 0 || console
->index
>= hvsi_count
)
1236 hp
= &hvsi_ports
[console
->index
];
1238 /* give the FSP a chance to change the baud rate when we re-open */
1239 hvsi_close_protocol(hp
);
1241 ret
= hvsi_handshake(hp
);
1245 ret
= hvsi_get_mctrl(hp
);
1249 ret
= hvsi_set_mctrl(hp
, hp
->mctrl
| TIOCM_DTR
);
1253 hp
->flags
|= HVSI_CONSOLE
;
1258 static struct console hvsi_console
= {
1260 .write
= hvsi_console_print
,
1261 .device
= hvsi_console_device
,
1262 .setup
= hvsi_console_setup
,
1263 .flags
= CON_PRINTBUFFER
,
1267 static int __init
hvsi_console_init(void)
1269 struct device_node
*vty
;
1271 hvsi_wait
= poll_for_state
; /* no irqs yet; must poll */
1273 /* search device tree for vty nodes */
1274 for (vty
= of_find_compatible_node(NULL
, "serial", "hvterm-protocol");
1276 vty
= of_find_compatible_node(vty
, "serial", "hvterm-protocol")) {
1277 struct hvsi_struct
*hp
;
1278 const uint32_t *vtermno
, *irq
;
1280 vtermno
= of_get_property(vty
, "reg", NULL
);
1281 irq
= of_get_property(vty
, "interrupts", NULL
);
1282 if (!vtermno
|| !irq
)
1285 if (hvsi_count
>= MAX_NR_HVSI_CONSOLES
) {
1290 hp
= &hvsi_ports
[hvsi_count
];
1291 INIT_DELAYED_WORK(&hp
->writer
, hvsi_write_worker
);
1292 INIT_WORK(&hp
->handshaker
, hvsi_handshaker
);
1293 init_waitqueue_head(&hp
->emptyq
);
1294 init_waitqueue_head(&hp
->stateq
);
1295 spin_lock_init(&hp
->lock
);
1296 hp
->index
= hvsi_count
;
1297 hp
->inbuf_end
= hp
->inbuf
;
1298 hp
->state
= HVSI_CLOSED
;
1299 hp
->vtermno
= *vtermno
;
1300 hp
->virq
= irq_create_mapping(NULL
, irq
[0]);
1301 if (hp
->virq
== NO_IRQ
) {
1302 printk(KERN_ERR
"%s: couldn't create irq mapping for 0x%x\n",
1311 register_console(&hvsi_console
);
1314 console_initcall(hvsi_console_init
);