Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / sparc / kernel / viohs.c
blob7db5aabe9708576109bd028c241532150339ff59
1 // SPDX-License-Identifier: GPL-2.0
2 /* viohs.c: LDOM Virtual I/O handshake helper layer.
4 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
5 */
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/string.h>
10 #include <linux/delay.h>
11 #include <linux/sched.h>
12 #include <linux/sched/clock.h>
13 #include <linux/slab.h>
15 #include <asm/ldc.h>
16 #include <asm/vio.h>
18 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
20 int err, limit = 1000;
22 err = -EINVAL;
23 while (limit-- > 0) {
24 err = ldc_write(vio->lp, data, len);
25 if (!err || (err != -EAGAIN))
26 break;
27 udelay(1);
30 return err;
32 EXPORT_SYMBOL(vio_ldc_send);
34 static int send_ctrl(struct vio_driver_state *vio,
35 struct vio_msg_tag *tag, int len)
37 tag->sid = vio_send_sid(vio);
38 return vio_ldc_send(vio, tag, len);
41 static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
43 tag->type = type;
44 tag->stype = stype;
45 tag->stype_env = stype_env;
48 static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
50 struct vio_ver_info pkt;
52 vio->_local_sid = (u32) sched_clock();
54 memset(&pkt, 0, sizeof(pkt));
55 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
56 pkt.major = major;
57 pkt.minor = minor;
58 pkt.dev_class = vio->dev_class;
60 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
61 major, minor, vio->dev_class);
63 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
66 static int start_handshake(struct vio_driver_state *vio)
68 int err;
70 viodbg(HS, "START HANDSHAKE\n");
72 vio->hs_state = VIO_HS_INVALID;
74 err = send_version(vio,
75 vio->ver_table[0].major,
76 vio->ver_table[0].minor);
77 if (err < 0)
78 return err;
80 return 0;
83 static void flush_rx_dring(struct vio_driver_state *vio)
85 struct vio_dring_state *dr;
86 u64 ident;
88 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
90 dr = &vio->drings[VIO_DRIVER_RX_RING];
91 ident = dr->ident;
93 BUG_ON(!vio->desc_buf);
94 kfree(vio->desc_buf);
95 vio->desc_buf = NULL;
97 memset(dr, 0, sizeof(*dr));
98 dr->ident = ident;
101 void vio_link_state_change(struct vio_driver_state *vio, int event)
103 if (event == LDC_EVENT_UP) {
104 vio->hs_state = VIO_HS_INVALID;
106 switch (vio->dev_class) {
107 case VDEV_NETWORK:
108 case VDEV_NETWORK_SWITCH:
109 vio->dr_state = (VIO_DR_STATE_TXREQ |
110 VIO_DR_STATE_RXREQ);
111 break;
113 case VDEV_DISK:
114 vio->dr_state = VIO_DR_STATE_TXREQ;
115 break;
116 case VDEV_DISK_SERVER:
117 vio->dr_state = VIO_DR_STATE_RXREQ;
118 break;
120 start_handshake(vio);
121 } else if (event == LDC_EVENT_RESET) {
122 vio->hs_state = VIO_HS_INVALID;
124 if (vio->dr_state & VIO_DR_STATE_RXREG)
125 flush_rx_dring(vio);
127 vio->dr_state = 0x00;
128 memset(&vio->ver, 0, sizeof(vio->ver));
130 ldc_disconnect(vio->lp);
133 EXPORT_SYMBOL(vio_link_state_change);
135 static int handshake_failure(struct vio_driver_state *vio)
137 struct vio_dring_state *dr;
139 /* XXX Put policy here... Perhaps start a timer to fire
140 * XXX in 100 ms, which will bring the link up and retry
141 * XXX the handshake.
144 viodbg(HS, "HANDSHAKE FAILURE\n");
146 vio->dr_state &= ~(VIO_DR_STATE_TXREG |
147 VIO_DR_STATE_RXREG);
149 dr = &vio->drings[VIO_DRIVER_RX_RING];
150 memset(dr, 0, sizeof(*dr));
152 kfree(vio->desc_buf);
153 vio->desc_buf = NULL;
154 vio->desc_buf_len = 0;
156 vio->hs_state = VIO_HS_INVALID;
158 return -ECONNRESET;
161 static int process_unknown(struct vio_driver_state *vio, void *arg)
163 struct vio_msg_tag *pkt = arg;
165 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
166 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
168 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
169 vio->vdev->channel_id);
171 ldc_disconnect(vio->lp);
173 return -ECONNRESET;
176 static int send_dreg(struct vio_driver_state *vio)
178 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
179 union {
180 struct vio_dring_register pkt;
181 char all[sizeof(struct vio_dring_register) +
182 (sizeof(struct ldc_trans_cookie) *
183 VIO_MAX_RING_COOKIES)];
184 } u;
185 size_t bytes = sizeof(struct vio_dring_register) +
186 (sizeof(struct ldc_trans_cookie) *
187 dr->ncookies);
188 int i;
190 if (WARN_ON(bytes > sizeof(u)))
191 return -EINVAL;
193 memset(&u, 0, bytes);
194 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
195 u.pkt.dring_ident = 0;
196 u.pkt.num_descr = dr->num_entries;
197 u.pkt.descr_size = dr->entry_size;
198 u.pkt.options = VIO_TX_DRING;
199 u.pkt.num_cookies = dr->ncookies;
201 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
202 "ncookies[%u]\n",
203 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
204 u.pkt.num_cookies);
206 for (i = 0; i < dr->ncookies; i++) {
207 u.pkt.cookies[i] = dr->cookies[i];
209 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
211 (unsigned long long) u.pkt.cookies[i].cookie_addr,
212 (unsigned long long) u.pkt.cookies[i].cookie_size);
215 return send_ctrl(vio, &u.pkt.tag, bytes);
218 static int send_rdx(struct vio_driver_state *vio)
220 struct vio_rdx pkt;
222 memset(&pkt, 0, sizeof(pkt));
224 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
226 viodbg(HS, "SEND RDX INFO\n");
228 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
231 static int send_attr(struct vio_driver_state *vio)
233 if (!vio->ops)
234 return -EINVAL;
236 return vio->ops->send_attr(vio);
239 static struct vio_version *find_by_major(struct vio_driver_state *vio,
240 u16 major)
242 struct vio_version *ret = NULL;
243 int i;
245 for (i = 0; i < vio->ver_table_entries; i++) {
246 struct vio_version *v = &vio->ver_table[i];
247 if (v->major <= major) {
248 ret = v;
249 break;
252 return ret;
255 static int process_ver_info(struct vio_driver_state *vio,
256 struct vio_ver_info *pkt)
258 struct vio_version *vap;
259 int err;
261 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
262 pkt->major, pkt->minor, pkt->dev_class);
264 if (vio->hs_state != VIO_HS_INVALID) {
265 /* XXX Perhaps invoke start_handshake? XXX */
266 memset(&vio->ver, 0, sizeof(vio->ver));
267 vio->hs_state = VIO_HS_INVALID;
270 vap = find_by_major(vio, pkt->major);
272 vio->_peer_sid = pkt->tag.sid;
274 if (!vap) {
275 pkt->tag.stype = VIO_SUBTYPE_NACK;
276 pkt->major = 0;
277 pkt->minor = 0;
278 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
279 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
280 } else if (vap->major != pkt->major) {
281 pkt->tag.stype = VIO_SUBTYPE_NACK;
282 pkt->major = vap->major;
283 pkt->minor = vap->minor;
284 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
285 pkt->major, pkt->minor);
286 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
287 } else {
288 struct vio_version ver = {
289 .major = pkt->major,
290 .minor = pkt->minor,
292 if (ver.minor > vap->minor)
293 ver.minor = vap->minor;
294 pkt->minor = ver.minor;
295 pkt->tag.stype = VIO_SUBTYPE_ACK;
296 pkt->dev_class = vio->dev_class;
297 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
298 pkt->major, pkt->minor);
299 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
300 if (err > 0) {
301 vio->ver = ver;
302 vio->hs_state = VIO_HS_GOTVERS;
305 if (err < 0)
306 return handshake_failure(vio);
308 return 0;
311 static int process_ver_ack(struct vio_driver_state *vio,
312 struct vio_ver_info *pkt)
314 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
315 pkt->major, pkt->minor, pkt->dev_class);
317 if (vio->hs_state & VIO_HS_GOTVERS) {
318 if (vio->ver.major != pkt->major ||
319 vio->ver.minor != pkt->minor) {
320 pkt->tag.stype = VIO_SUBTYPE_NACK;
321 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
322 return handshake_failure(vio);
324 } else {
325 vio->ver.major = pkt->major;
326 vio->ver.minor = pkt->minor;
327 vio->hs_state = VIO_HS_GOTVERS;
330 switch (vio->dev_class) {
331 case VDEV_NETWORK:
332 case VDEV_DISK:
333 if (send_attr(vio) < 0)
334 return handshake_failure(vio);
335 break;
337 default:
338 break;
341 return 0;
344 static int process_ver_nack(struct vio_driver_state *vio,
345 struct vio_ver_info *pkt)
347 struct vio_version *nver;
349 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
350 pkt->major, pkt->minor, pkt->dev_class);
352 if (pkt->major == 0 && pkt->minor == 0)
353 return handshake_failure(vio);
354 nver = find_by_major(vio, pkt->major);
355 if (!nver)
356 return handshake_failure(vio);
358 if (send_version(vio, nver->major, nver->minor) < 0)
359 return handshake_failure(vio);
361 return 0;
364 static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
366 switch (pkt->tag.stype) {
367 case VIO_SUBTYPE_INFO:
368 return process_ver_info(vio, pkt);
370 case VIO_SUBTYPE_ACK:
371 return process_ver_ack(vio, pkt);
373 case VIO_SUBTYPE_NACK:
374 return process_ver_nack(vio, pkt);
376 default:
377 return handshake_failure(vio);
381 static int process_attr(struct vio_driver_state *vio, void *pkt)
383 int err;
385 if (!(vio->hs_state & VIO_HS_GOTVERS))
386 return handshake_failure(vio);
388 if (!vio->ops)
389 return 0;
391 err = vio->ops->handle_attr(vio, pkt);
392 if (err < 0) {
393 return handshake_failure(vio);
394 } else {
395 vio->hs_state |= VIO_HS_GOT_ATTR;
397 if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
398 !(vio->hs_state & VIO_HS_SENT_DREG)) {
399 if (send_dreg(vio) < 0)
400 return handshake_failure(vio);
402 vio->hs_state |= VIO_HS_SENT_DREG;
406 return 0;
409 static int all_drings_registered(struct vio_driver_state *vio)
411 int need_rx, need_tx;
413 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
414 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
416 if (need_rx &&
417 !(vio->dr_state & VIO_DR_STATE_RXREG))
418 return 0;
420 if (need_tx &&
421 !(vio->dr_state & VIO_DR_STATE_TXREG))
422 return 0;
424 return 1;
427 static int process_dreg_info(struct vio_driver_state *vio,
428 struct vio_dring_register *pkt)
430 struct vio_dring_state *dr;
431 int i, len;
433 viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
434 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
435 (unsigned long long) pkt->dring_ident,
436 pkt->num_descr, pkt->descr_size, pkt->options,
437 pkt->num_cookies);
439 if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
440 goto send_nack;
442 if (vio->dr_state & VIO_DR_STATE_RXREG)
443 goto send_nack;
445 /* v1.6 and higher, ACK with desired, supported mode, or NACK */
446 if (vio_version_after_eq(vio, 1, 6)) {
447 if (!(pkt->options & VIO_TX_DRING))
448 goto send_nack;
449 pkt->options = VIO_TX_DRING;
452 BUG_ON(vio->desc_buf);
454 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
455 if (!vio->desc_buf)
456 goto send_nack;
458 vio->desc_buf_len = pkt->descr_size;
460 dr = &vio->drings[VIO_DRIVER_RX_RING];
462 dr->num_entries = pkt->num_descr;
463 dr->entry_size = pkt->descr_size;
464 dr->ncookies = pkt->num_cookies;
465 for (i = 0; i < dr->ncookies; i++) {
466 dr->cookies[i] = pkt->cookies[i];
468 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
470 (unsigned long long)
471 pkt->cookies[i].cookie_addr,
472 (unsigned long long)
473 pkt->cookies[i].cookie_size);
476 pkt->tag.stype = VIO_SUBTYPE_ACK;
477 pkt->dring_ident = ++dr->ident;
479 viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
480 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
481 (unsigned long long) pkt->dring_ident,
482 pkt->num_descr, pkt->descr_size, pkt->options,
483 pkt->num_cookies);
485 len = (sizeof(*pkt) +
486 (dr->ncookies * sizeof(struct ldc_trans_cookie)));
487 if (send_ctrl(vio, &pkt->tag, len) < 0)
488 goto send_nack;
490 vio->dr_state |= VIO_DR_STATE_RXREG;
492 return 0;
494 send_nack:
495 pkt->tag.stype = VIO_SUBTYPE_NACK;
496 viodbg(HS, "SEND DRING_REG NACK\n");
497 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
499 return handshake_failure(vio);
502 static int process_dreg_ack(struct vio_driver_state *vio,
503 struct vio_dring_register *pkt)
505 struct vio_dring_state *dr;
507 viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
508 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
509 (unsigned long long) pkt->dring_ident,
510 pkt->num_descr, pkt->descr_size, pkt->options,
511 pkt->num_cookies);
513 dr = &vio->drings[VIO_DRIVER_TX_RING];
515 if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
516 return handshake_failure(vio);
518 dr->ident = pkt->dring_ident;
519 vio->dr_state |= VIO_DR_STATE_TXREG;
521 if (all_drings_registered(vio)) {
522 if (send_rdx(vio) < 0)
523 return handshake_failure(vio);
524 vio->hs_state = VIO_HS_SENT_RDX;
526 return 0;
529 static int process_dreg_nack(struct vio_driver_state *vio,
530 struct vio_dring_register *pkt)
532 viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
533 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
534 (unsigned long long) pkt->dring_ident,
535 pkt->num_descr, pkt->descr_size, pkt->options,
536 pkt->num_cookies);
538 return handshake_failure(vio);
541 static int process_dreg(struct vio_driver_state *vio,
542 struct vio_dring_register *pkt)
544 if (!(vio->hs_state & VIO_HS_GOTVERS))
545 return handshake_failure(vio);
547 switch (pkt->tag.stype) {
548 case VIO_SUBTYPE_INFO:
549 return process_dreg_info(vio, pkt);
551 case VIO_SUBTYPE_ACK:
552 return process_dreg_ack(vio, pkt);
554 case VIO_SUBTYPE_NACK:
555 return process_dreg_nack(vio, pkt);
557 default:
558 return handshake_failure(vio);
562 static int process_dunreg(struct vio_driver_state *vio,
563 struct vio_dring_unregister *pkt)
565 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
567 viodbg(HS, "GOT DRING_UNREG\n");
569 if (pkt->dring_ident != dr->ident)
570 return 0;
572 vio->dr_state &= ~VIO_DR_STATE_RXREG;
574 memset(dr, 0, sizeof(*dr));
576 kfree(vio->desc_buf);
577 vio->desc_buf = NULL;
578 vio->desc_buf_len = 0;
580 return 0;
583 static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
585 viodbg(HS, "GOT RDX INFO\n");
587 pkt->tag.stype = VIO_SUBTYPE_ACK;
588 viodbg(HS, "SEND RDX ACK\n");
589 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
590 return handshake_failure(vio);
592 vio->hs_state |= VIO_HS_SENT_RDX_ACK;
593 return 0;
596 static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
598 viodbg(HS, "GOT RDX ACK\n");
600 if (!(vio->hs_state & VIO_HS_SENT_RDX))
601 return handshake_failure(vio);
603 vio->hs_state |= VIO_HS_GOT_RDX_ACK;
604 return 0;
607 static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
609 viodbg(HS, "GOT RDX NACK\n");
611 return handshake_failure(vio);
614 static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
616 if (!all_drings_registered(vio))
617 handshake_failure(vio);
619 switch (pkt->tag.stype) {
620 case VIO_SUBTYPE_INFO:
621 return process_rdx_info(vio, pkt);
623 case VIO_SUBTYPE_ACK:
624 return process_rdx_ack(vio, pkt);
626 case VIO_SUBTYPE_NACK:
627 return process_rdx_nack(vio, pkt);
629 default:
630 return handshake_failure(vio);
634 int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
636 struct vio_msg_tag *tag = pkt;
637 u8 prev_state = vio->hs_state;
638 int err;
640 switch (tag->stype_env) {
641 case VIO_VER_INFO:
642 err = process_ver(vio, pkt);
643 break;
645 case VIO_ATTR_INFO:
646 err = process_attr(vio, pkt);
647 break;
649 case VIO_DRING_REG:
650 err = process_dreg(vio, pkt);
651 break;
653 case VIO_DRING_UNREG:
654 err = process_dunreg(vio, pkt);
655 break;
657 case VIO_RDX:
658 err = process_rdx(vio, pkt);
659 break;
661 default:
662 err = process_unknown(vio, pkt);
663 break;
666 if (!err &&
667 vio->hs_state != prev_state &&
668 (vio->hs_state & VIO_HS_COMPLETE)) {
669 if (vio->ops)
670 vio->ops->handshake_complete(vio);
673 return err;
675 EXPORT_SYMBOL(vio_control_pkt_engine);
677 void vio_conn_reset(struct vio_driver_state *vio)
680 EXPORT_SYMBOL(vio_conn_reset);
682 /* The issue is that the Solaris virtual disk server just mirrors the
683 * SID values it gets from the client peer. So we work around that
684 * here in vio_{validate,send}_sid() so that the drivers don't need
685 * to be aware of this crap.
687 int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
689 u32 sid;
691 /* Always let VERSION+INFO packets through unchecked, they
692 * define the new SID.
694 if (tp->type == VIO_TYPE_CTRL &&
695 tp->stype == VIO_SUBTYPE_INFO &&
696 tp->stype_env == VIO_VER_INFO)
697 return 0;
699 /* Ok, now figure out which SID to use. */
700 switch (vio->dev_class) {
701 case VDEV_NETWORK:
702 case VDEV_NETWORK_SWITCH:
703 case VDEV_DISK_SERVER:
704 default:
705 sid = vio->_peer_sid;
706 break;
708 case VDEV_DISK:
709 sid = vio->_local_sid;
710 break;
713 if (sid == tp->sid)
714 return 0;
715 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
716 tp->sid, vio->_peer_sid, vio->_local_sid);
717 return -EINVAL;
719 EXPORT_SYMBOL(vio_validate_sid);
721 u32 vio_send_sid(struct vio_driver_state *vio)
723 switch (vio->dev_class) {
724 case VDEV_NETWORK:
725 case VDEV_NETWORK_SWITCH:
726 case VDEV_DISK:
727 default:
728 return vio->_local_sid;
730 case VDEV_DISK_SERVER:
731 return vio->_peer_sid;
734 EXPORT_SYMBOL(vio_send_sid);
736 int vio_ldc_alloc(struct vio_driver_state *vio,
737 struct ldc_channel_config *base_cfg,
738 void *event_arg)
740 struct ldc_channel_config cfg = *base_cfg;
741 struct ldc_channel *lp;
743 cfg.tx_irq = vio->vdev->tx_irq;
744 cfg.rx_irq = vio->vdev->rx_irq;
746 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
747 if (IS_ERR(lp))
748 return PTR_ERR(lp);
750 vio->lp = lp;
752 return 0;
754 EXPORT_SYMBOL(vio_ldc_alloc);
756 void vio_ldc_free(struct vio_driver_state *vio)
758 ldc_free(vio->lp);
759 vio->lp = NULL;
761 kfree(vio->desc_buf);
762 vio->desc_buf = NULL;
763 vio->desc_buf_len = 0;
765 EXPORT_SYMBOL(vio_ldc_free);
767 void vio_port_up(struct vio_driver_state *vio)
769 unsigned long flags;
770 int err, state;
772 spin_lock_irqsave(&vio->lock, flags);
774 state = ldc_state(vio->lp);
776 err = 0;
777 if (state == LDC_STATE_INIT) {
778 err = ldc_bind(vio->lp);
779 if (err)
780 printk(KERN_WARNING "%s: Port %lu bind failed, "
781 "err=%d\n",
782 vio->name, vio->vdev->channel_id, err);
785 if (!err) {
786 if (ldc_mode(vio->lp) == LDC_MODE_RAW)
787 ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
788 else
789 err = ldc_connect(vio->lp);
791 if (err)
792 printk(KERN_WARNING "%s: Port %lu connect failed, "
793 "err=%d\n",
794 vio->name, vio->vdev->channel_id, err);
796 if (err) {
797 unsigned long expires = jiffies + HZ;
799 expires = round_jiffies(expires);
800 mod_timer(&vio->timer, expires);
803 spin_unlock_irqrestore(&vio->lock, flags);
805 EXPORT_SYMBOL(vio_port_up);
807 static void vio_port_timer(struct timer_list *t)
809 struct vio_driver_state *vio = from_timer(vio, t, timer);
811 vio_port_up(vio);
814 int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
815 u8 dev_class, struct vio_version *ver_table,
816 int ver_table_size, struct vio_driver_ops *ops,
817 char *name)
819 switch (dev_class) {
820 case VDEV_NETWORK:
821 case VDEV_NETWORK_SWITCH:
822 case VDEV_DISK:
823 case VDEV_DISK_SERVER:
824 case VDEV_CONSOLE_CON:
825 break;
827 default:
828 return -EINVAL;
831 if (dev_class == VDEV_NETWORK ||
832 dev_class == VDEV_NETWORK_SWITCH ||
833 dev_class == VDEV_DISK ||
834 dev_class == VDEV_DISK_SERVER) {
835 if (!ops || !ops->send_attr || !ops->handle_attr ||
836 !ops->handshake_complete)
837 return -EINVAL;
840 if (!ver_table || ver_table_size < 0)
841 return -EINVAL;
843 if (!name)
844 return -EINVAL;
846 spin_lock_init(&vio->lock);
848 vio->name = name;
850 vio->dev_class = dev_class;
851 vio->vdev = vdev;
853 vio->ver_table = ver_table;
854 vio->ver_table_entries = ver_table_size;
856 vio->ops = ops;
858 timer_setup(&vio->timer, vio_port_timer, 0);
860 return 0;
862 EXPORT_SYMBOL(vio_driver_init);