drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / sparc / kernel / viohs.c
blobe27afd233bf52628f89e471638673f81842eaa4b
1 // SPDX-License-Identifier: GPL-2.0
2 /* viohs.c: LDOM Virtual I/O handshake helper layer.
4 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
5 */
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/string.h>
10 #include <linux/delay.h>
11 #include <linux/sched.h>
12 #include <linux/sched/clock.h>
13 #include <linux/slab.h>
15 #include <asm/ldc.h>
16 #include <asm/vio.h>
18 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
20 int err, limit = 1000;
22 err = -EINVAL;
23 while (limit-- > 0) {
24 err = ldc_write(vio->lp, data, len);
25 if (!err || (err != -EAGAIN))
26 break;
27 udelay(1);
30 return err;
32 EXPORT_SYMBOL(vio_ldc_send);
34 static int send_ctrl(struct vio_driver_state *vio,
35 struct vio_msg_tag *tag, int len)
37 tag->sid = vio_send_sid(vio);
38 return vio_ldc_send(vio, tag, len);
41 static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
43 tag->type = type;
44 tag->stype = stype;
45 tag->stype_env = stype_env;
48 static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
50 struct vio_ver_info pkt;
52 vio->_local_sid = (u32) sched_clock();
54 memset(&pkt, 0, sizeof(pkt));
55 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
56 pkt.major = major;
57 pkt.minor = minor;
58 pkt.dev_class = vio->dev_class;
60 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
61 major, minor, vio->dev_class);
63 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
66 static int start_handshake(struct vio_driver_state *vio)
68 int err;
70 viodbg(HS, "START HANDSHAKE\n");
72 vio->hs_state = VIO_HS_INVALID;
74 err = send_version(vio,
75 vio->ver_table[0].major,
76 vio->ver_table[0].minor);
77 if (err < 0)
78 return err;
80 return 0;
83 static void flush_rx_dring(struct vio_driver_state *vio)
85 struct vio_dring_state *dr;
86 u64 ident;
88 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
90 dr = &vio->drings[VIO_DRIVER_RX_RING];
91 ident = dr->ident;
93 BUG_ON(!vio->desc_buf);
94 kfree(vio->desc_buf);
95 vio->desc_buf = NULL;
97 memset(dr, 0, sizeof(*dr));
98 dr->ident = ident;
101 void vio_link_state_change(struct vio_driver_state *vio, int event)
103 if (event == LDC_EVENT_UP) {
104 vio->hs_state = VIO_HS_INVALID;
106 switch (vio->dev_class) {
107 case VDEV_NETWORK:
108 case VDEV_NETWORK_SWITCH:
109 vio->dr_state = (VIO_DR_STATE_TXREQ |
110 VIO_DR_STATE_RXREQ);
111 break;
113 case VDEV_DISK:
114 vio->dr_state = VIO_DR_STATE_TXREQ;
115 break;
116 case VDEV_DISK_SERVER:
117 vio->dr_state = VIO_DR_STATE_RXREQ;
118 break;
120 start_handshake(vio);
121 } else if (event == LDC_EVENT_RESET) {
122 vio->hs_state = VIO_HS_INVALID;
124 if (vio->dr_state & VIO_DR_STATE_RXREG)
125 flush_rx_dring(vio);
127 vio->dr_state = 0x00;
128 memset(&vio->ver, 0, sizeof(vio->ver));
130 ldc_disconnect(vio->lp);
133 EXPORT_SYMBOL(vio_link_state_change);
135 static int handshake_failure(struct vio_driver_state *vio)
137 struct vio_dring_state *dr;
139 /* XXX Put policy here... Perhaps start a timer to fire
140 * XXX in 100 ms, which will bring the link up and retry
141 * XXX the handshake.
144 viodbg(HS, "HANDSHAKE FAILURE\n");
146 vio->dr_state &= ~(VIO_DR_STATE_TXREG |
147 VIO_DR_STATE_RXREG);
149 dr = &vio->drings[VIO_DRIVER_RX_RING];
150 memset(dr, 0, sizeof(*dr));
152 kfree(vio->desc_buf);
153 vio->desc_buf = NULL;
154 vio->desc_buf_len = 0;
156 vio->hs_state = VIO_HS_INVALID;
158 return -ECONNRESET;
161 static int process_unknown(struct vio_driver_state *vio, void *arg)
163 struct vio_msg_tag *pkt = arg;
165 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
166 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
168 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
169 vio->vdev->channel_id);
171 ldc_disconnect(vio->lp);
173 return -ECONNRESET;
176 static int send_dreg(struct vio_driver_state *vio)
178 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
179 union {
180 struct vio_dring_register pkt;
181 char all[sizeof(struct vio_dring_register) +
182 (sizeof(struct ldc_trans_cookie) *
183 VIO_MAX_RING_COOKIES)];
184 } u;
185 size_t bytes = sizeof(struct vio_dring_register) +
186 (sizeof(struct ldc_trans_cookie) *
187 dr->ncookies);
188 int i;
190 if (WARN_ON(bytes > sizeof(u)))
191 return -EINVAL;
193 memset(&u, 0, bytes);
194 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
195 u.pkt.dring_ident = 0;
196 u.pkt.num_descr = dr->num_entries;
197 u.pkt.descr_size = dr->entry_size;
198 u.pkt.options = VIO_TX_DRING;
199 u.pkt.num_cookies = dr->ncookies;
201 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
202 "ncookies[%u]\n",
203 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
204 u.pkt.num_cookies);
206 for (i = 0; i < dr->ncookies; i++) {
207 u.pkt.cookies[i] = dr->cookies[i];
209 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
211 (unsigned long long) u.pkt.cookies[i].cookie_addr,
212 (unsigned long long) u.pkt.cookies[i].cookie_size);
215 return send_ctrl(vio, &u.pkt.tag, bytes);
218 static int send_rdx(struct vio_driver_state *vio)
220 struct vio_rdx pkt;
222 memset(&pkt, 0, sizeof(pkt));
224 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
226 viodbg(HS, "SEND RDX INFO\n");
228 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
231 static int send_attr(struct vio_driver_state *vio)
233 if (!vio->ops)
234 return -EINVAL;
236 return vio->ops->send_attr(vio);
239 static struct vio_version *find_by_major(struct vio_driver_state *vio,
240 u16 major)
242 struct vio_version *ret = NULL;
243 int i;
245 for (i = 0; i < vio->ver_table_entries; i++) {
246 struct vio_version *v = &vio->ver_table[i];
247 if (v->major <= major) {
248 ret = v;
249 break;
252 return ret;
255 static int process_ver_info(struct vio_driver_state *vio,
256 struct vio_ver_info *pkt)
258 struct vio_version *vap;
259 int err;
261 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
262 pkt->major, pkt->minor, pkt->dev_class);
264 if (vio->hs_state != VIO_HS_INVALID) {
265 /* XXX Perhaps invoke start_handshake? XXX */
266 memset(&vio->ver, 0, sizeof(vio->ver));
267 vio->hs_state = VIO_HS_INVALID;
270 vap = find_by_major(vio, pkt->major);
272 vio->_peer_sid = pkt->tag.sid;
274 if (!vap) {
275 pkt->tag.stype = VIO_SUBTYPE_NACK;
276 pkt->major = 0;
277 pkt->minor = 0;
278 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
279 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
280 } else if (vap->major != pkt->major) {
281 pkt->tag.stype = VIO_SUBTYPE_NACK;
282 pkt->major = vap->major;
283 pkt->minor = vap->minor;
284 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
285 pkt->major, pkt->minor);
286 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
287 } else {
288 struct vio_version ver = {
289 .major = pkt->major,
290 .minor = pkt->minor,
292 if (ver.minor > vap->minor)
293 ver.minor = vap->minor;
294 pkt->minor = ver.minor;
295 pkt->tag.stype = VIO_SUBTYPE_ACK;
296 pkt->dev_class = vio->dev_class;
297 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
298 pkt->major, pkt->minor);
299 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
300 if (err > 0) {
301 vio->ver = ver;
302 vio->hs_state = VIO_HS_GOTVERS;
305 if (err < 0)
306 return handshake_failure(vio);
308 return 0;
311 static int process_ver_ack(struct vio_driver_state *vio,
312 struct vio_ver_info *pkt)
314 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
315 pkt->major, pkt->minor, pkt->dev_class);
317 if (vio->hs_state & VIO_HS_GOTVERS) {
318 if (vio->ver.major != pkt->major ||
319 vio->ver.minor != pkt->minor) {
320 pkt->tag.stype = VIO_SUBTYPE_NACK;
321 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
322 return handshake_failure(vio);
324 } else {
325 vio->ver.major = pkt->major;
326 vio->ver.minor = pkt->minor;
327 vio->hs_state = VIO_HS_GOTVERS;
330 switch (vio->dev_class) {
331 case VDEV_NETWORK:
332 case VDEV_DISK:
333 if (send_attr(vio) < 0)
334 return handshake_failure(vio);
335 break;
337 default:
338 break;
341 return 0;
344 static int process_ver_nack(struct vio_driver_state *vio,
345 struct vio_ver_info *pkt)
347 struct vio_version *nver;
349 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
350 pkt->major, pkt->minor, pkt->dev_class);
352 if (pkt->major == 0 && pkt->minor == 0)
353 return handshake_failure(vio);
354 nver = find_by_major(vio, pkt->major);
355 if (!nver)
356 return handshake_failure(vio);
358 if (send_version(vio, nver->major, nver->minor) < 0)
359 return handshake_failure(vio);
361 return 0;
364 static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
366 switch (pkt->tag.stype) {
367 case VIO_SUBTYPE_INFO:
368 return process_ver_info(vio, pkt);
370 case VIO_SUBTYPE_ACK:
371 return process_ver_ack(vio, pkt);
373 case VIO_SUBTYPE_NACK:
374 return process_ver_nack(vio, pkt);
376 default:
377 return handshake_failure(vio);
381 static int process_attr(struct vio_driver_state *vio, void *pkt)
383 int err;
385 if (!(vio->hs_state & VIO_HS_GOTVERS))
386 return handshake_failure(vio);
388 if (!vio->ops)
389 return 0;
391 err = vio->ops->handle_attr(vio, pkt);
392 if (err < 0) {
393 return handshake_failure(vio);
394 } else {
395 vio->hs_state |= VIO_HS_GOT_ATTR;
397 if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
398 !(vio->hs_state & VIO_HS_SENT_DREG)) {
399 if (send_dreg(vio) < 0)
400 return handshake_failure(vio);
402 vio->hs_state |= VIO_HS_SENT_DREG;
406 return 0;
409 static int all_drings_registered(struct vio_driver_state *vio)
411 int need_rx, need_tx;
413 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
414 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
416 if (need_rx &&
417 !(vio->dr_state & VIO_DR_STATE_RXREG))
418 return 0;
420 if (need_tx &&
421 !(vio->dr_state & VIO_DR_STATE_TXREG))
422 return 0;
424 return 1;
427 static int process_dreg_info(struct vio_driver_state *vio,
428 struct vio_dring_register *pkt)
430 struct vio_dring_state *dr;
431 int i;
433 viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
434 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
435 (unsigned long long) pkt->dring_ident,
436 pkt->num_descr, pkt->descr_size, pkt->options,
437 pkt->num_cookies);
439 if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
440 goto send_nack;
442 if (vio->dr_state & VIO_DR_STATE_RXREG)
443 goto send_nack;
445 /* v1.6 and higher, ACK with desired, supported mode, or NACK */
446 if (vio_version_after_eq(vio, 1, 6)) {
447 if (!(pkt->options & VIO_TX_DRING))
448 goto send_nack;
449 pkt->options = VIO_TX_DRING;
452 BUG_ON(vio->desc_buf);
454 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
455 if (!vio->desc_buf)
456 goto send_nack;
458 vio->desc_buf_len = pkt->descr_size;
460 dr = &vio->drings[VIO_DRIVER_RX_RING];
462 dr->num_entries = pkt->num_descr;
463 dr->entry_size = pkt->descr_size;
464 dr->ncookies = pkt->num_cookies;
465 for (i = 0; i < dr->ncookies; i++) {
466 dr->cookies[i] = pkt->cookies[i];
468 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
470 (unsigned long long)
471 pkt->cookies[i].cookie_addr,
472 (unsigned long long)
473 pkt->cookies[i].cookie_size);
476 pkt->tag.stype = VIO_SUBTYPE_ACK;
477 pkt->dring_ident = ++dr->ident;
479 viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
480 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
481 (unsigned long long) pkt->dring_ident,
482 pkt->num_descr, pkt->descr_size, pkt->options,
483 pkt->num_cookies);
485 if (send_ctrl(vio, &pkt->tag, struct_size(pkt, cookies, dr->ncookies)) < 0)
486 goto send_nack;
488 vio->dr_state |= VIO_DR_STATE_RXREG;
490 return 0;
492 send_nack:
493 pkt->tag.stype = VIO_SUBTYPE_NACK;
494 viodbg(HS, "SEND DRING_REG NACK\n");
495 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
497 return handshake_failure(vio);
500 static int process_dreg_ack(struct vio_driver_state *vio,
501 struct vio_dring_register *pkt)
503 struct vio_dring_state *dr;
505 viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
506 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
507 (unsigned long long) pkt->dring_ident,
508 pkt->num_descr, pkt->descr_size, pkt->options,
509 pkt->num_cookies);
511 dr = &vio->drings[VIO_DRIVER_TX_RING];
513 if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
514 return handshake_failure(vio);
516 dr->ident = pkt->dring_ident;
517 vio->dr_state |= VIO_DR_STATE_TXREG;
519 if (all_drings_registered(vio)) {
520 if (send_rdx(vio) < 0)
521 return handshake_failure(vio);
522 vio->hs_state = VIO_HS_SENT_RDX;
524 return 0;
527 static int process_dreg_nack(struct vio_driver_state *vio,
528 struct vio_dring_register *pkt)
530 viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
531 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
532 (unsigned long long) pkt->dring_ident,
533 pkt->num_descr, pkt->descr_size, pkt->options,
534 pkt->num_cookies);
536 return handshake_failure(vio);
539 static int process_dreg(struct vio_driver_state *vio,
540 struct vio_dring_register *pkt)
542 if (!(vio->hs_state & VIO_HS_GOTVERS))
543 return handshake_failure(vio);
545 switch (pkt->tag.stype) {
546 case VIO_SUBTYPE_INFO:
547 return process_dreg_info(vio, pkt);
549 case VIO_SUBTYPE_ACK:
550 return process_dreg_ack(vio, pkt);
552 case VIO_SUBTYPE_NACK:
553 return process_dreg_nack(vio, pkt);
555 default:
556 return handshake_failure(vio);
560 static int process_dunreg(struct vio_driver_state *vio,
561 struct vio_dring_unregister *pkt)
563 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
565 viodbg(HS, "GOT DRING_UNREG\n");
567 if (pkt->dring_ident != dr->ident)
568 return 0;
570 vio->dr_state &= ~VIO_DR_STATE_RXREG;
572 memset(dr, 0, sizeof(*dr));
574 kfree(vio->desc_buf);
575 vio->desc_buf = NULL;
576 vio->desc_buf_len = 0;
578 return 0;
581 static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
583 viodbg(HS, "GOT RDX INFO\n");
585 pkt->tag.stype = VIO_SUBTYPE_ACK;
586 viodbg(HS, "SEND RDX ACK\n");
587 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
588 return handshake_failure(vio);
590 vio->hs_state |= VIO_HS_SENT_RDX_ACK;
591 return 0;
594 static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
596 viodbg(HS, "GOT RDX ACK\n");
598 if (!(vio->hs_state & VIO_HS_SENT_RDX))
599 return handshake_failure(vio);
601 vio->hs_state |= VIO_HS_GOT_RDX_ACK;
602 return 0;
605 static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
607 viodbg(HS, "GOT RDX NACK\n");
609 return handshake_failure(vio);
612 static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
614 if (!all_drings_registered(vio))
615 handshake_failure(vio);
617 switch (pkt->tag.stype) {
618 case VIO_SUBTYPE_INFO:
619 return process_rdx_info(vio, pkt);
621 case VIO_SUBTYPE_ACK:
622 return process_rdx_ack(vio, pkt);
624 case VIO_SUBTYPE_NACK:
625 return process_rdx_nack(vio, pkt);
627 default:
628 return handshake_failure(vio);
632 int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
634 struct vio_msg_tag *tag = pkt;
635 u8 prev_state = vio->hs_state;
636 int err;
638 switch (tag->stype_env) {
639 case VIO_VER_INFO:
640 err = process_ver(vio, pkt);
641 break;
643 case VIO_ATTR_INFO:
644 err = process_attr(vio, pkt);
645 break;
647 case VIO_DRING_REG:
648 err = process_dreg(vio, pkt);
649 break;
651 case VIO_DRING_UNREG:
652 err = process_dunreg(vio, pkt);
653 break;
655 case VIO_RDX:
656 err = process_rdx(vio, pkt);
657 break;
659 default:
660 err = process_unknown(vio, pkt);
661 break;
664 if (!err &&
665 vio->hs_state != prev_state &&
666 (vio->hs_state & VIO_HS_COMPLETE)) {
667 if (vio->ops)
668 vio->ops->handshake_complete(vio);
671 return err;
673 EXPORT_SYMBOL(vio_control_pkt_engine);
675 void vio_conn_reset(struct vio_driver_state *vio)
678 EXPORT_SYMBOL(vio_conn_reset);
680 /* The issue is that the Solaris virtual disk server just mirrors the
681 * SID values it gets from the client peer. So we work around that
682 * here in vio_{validate,send}_sid() so that the drivers don't need
683 * to be aware of this crap.
685 int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
687 u32 sid;
689 /* Always let VERSION+INFO packets through unchecked, they
690 * define the new SID.
692 if (tp->type == VIO_TYPE_CTRL &&
693 tp->stype == VIO_SUBTYPE_INFO &&
694 tp->stype_env == VIO_VER_INFO)
695 return 0;
697 /* Ok, now figure out which SID to use. */
698 switch (vio->dev_class) {
699 case VDEV_NETWORK:
700 case VDEV_NETWORK_SWITCH:
701 case VDEV_DISK_SERVER:
702 default:
703 sid = vio->_peer_sid;
704 break;
706 case VDEV_DISK:
707 sid = vio->_local_sid;
708 break;
711 if (sid == tp->sid)
712 return 0;
713 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
714 tp->sid, vio->_peer_sid, vio->_local_sid);
715 return -EINVAL;
717 EXPORT_SYMBOL(vio_validate_sid);
719 u32 vio_send_sid(struct vio_driver_state *vio)
721 switch (vio->dev_class) {
722 case VDEV_NETWORK:
723 case VDEV_NETWORK_SWITCH:
724 case VDEV_DISK:
725 default:
726 return vio->_local_sid;
728 case VDEV_DISK_SERVER:
729 return vio->_peer_sid;
732 EXPORT_SYMBOL(vio_send_sid);
734 int vio_ldc_alloc(struct vio_driver_state *vio,
735 struct ldc_channel_config *base_cfg,
736 void *event_arg)
738 struct ldc_channel_config cfg = *base_cfg;
739 struct ldc_channel *lp;
741 cfg.tx_irq = vio->vdev->tx_irq;
742 cfg.rx_irq = vio->vdev->rx_irq;
744 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
745 if (IS_ERR(lp))
746 return PTR_ERR(lp);
748 vio->lp = lp;
750 return 0;
752 EXPORT_SYMBOL(vio_ldc_alloc);
754 void vio_ldc_free(struct vio_driver_state *vio)
756 ldc_free(vio->lp);
757 vio->lp = NULL;
759 kfree(vio->desc_buf);
760 vio->desc_buf = NULL;
761 vio->desc_buf_len = 0;
763 EXPORT_SYMBOL(vio_ldc_free);
765 void vio_port_up(struct vio_driver_state *vio)
767 unsigned long flags;
768 int err, state;
770 spin_lock_irqsave(&vio->lock, flags);
772 state = ldc_state(vio->lp);
774 err = 0;
775 if (state == LDC_STATE_INIT) {
776 err = ldc_bind(vio->lp);
777 if (err)
778 printk(KERN_WARNING "%s: Port %lu bind failed, "
779 "err=%d\n",
780 vio->name, vio->vdev->channel_id, err);
783 if (!err) {
784 if (ldc_mode(vio->lp) == LDC_MODE_RAW)
785 ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
786 else
787 err = ldc_connect(vio->lp);
789 if (err)
790 printk(KERN_WARNING "%s: Port %lu connect failed, "
791 "err=%d\n",
792 vio->name, vio->vdev->channel_id, err);
794 if (err) {
795 unsigned long expires = jiffies + HZ;
797 expires = round_jiffies(expires);
798 mod_timer(&vio->timer, expires);
801 spin_unlock_irqrestore(&vio->lock, flags);
803 EXPORT_SYMBOL(vio_port_up);
805 static void vio_port_timer(struct timer_list *t)
807 struct vio_driver_state *vio = from_timer(vio, t, timer);
809 vio_port_up(vio);
812 int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
813 u8 dev_class, struct vio_version *ver_table,
814 int ver_table_size, struct vio_driver_ops *ops,
815 char *name)
817 switch (dev_class) {
818 case VDEV_NETWORK:
819 case VDEV_NETWORK_SWITCH:
820 case VDEV_DISK:
821 case VDEV_DISK_SERVER:
822 case VDEV_CONSOLE_CON:
823 break;
825 default:
826 return -EINVAL;
829 if (dev_class == VDEV_NETWORK ||
830 dev_class == VDEV_NETWORK_SWITCH ||
831 dev_class == VDEV_DISK ||
832 dev_class == VDEV_DISK_SERVER) {
833 if (!ops || !ops->send_attr || !ops->handle_attr ||
834 !ops->handshake_complete)
835 return -EINVAL;
838 if (!ver_table || ver_table_size < 0)
839 return -EINVAL;
841 if (!name)
842 return -EINVAL;
844 spin_lock_init(&vio->lock);
846 vio->name = name;
848 vio->dev_class = dev_class;
849 vio->vdev = vdev;
851 vio->ver_table = ver_table;
852 vio->ver_table_entries = ver_table_size;
854 vio->ops = ops;
856 timer_setup(&vio->timer, vio_port_timer, 0);
858 return 0;
860 EXPORT_SYMBOL(vio_driver_init);