mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / sparc / kernel / viohs.c
blob526fcb5d8ce95d54c7afa7f5ea7c9c3a652dce3a
1 /* viohs.c: LDOM Virtual I/O handshake helper layer.
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
6 #include <linux/kernel.h>
7 #include <linux/export.h>
8 #include <linux/string.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
13 #include <asm/ldc.h>
14 #include <asm/vio.h>
16 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
18 int err, limit = 1000;
20 err = -EINVAL;
21 while (limit-- > 0) {
22 err = ldc_write(vio->lp, data, len);
23 if (!err || (err != -EAGAIN))
24 break;
25 udelay(1);
28 return err;
30 EXPORT_SYMBOL(vio_ldc_send);
32 static int send_ctrl(struct vio_driver_state *vio,
33 struct vio_msg_tag *tag, int len)
35 tag->sid = vio_send_sid(vio);
36 return vio_ldc_send(vio, tag, len);
39 static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
41 tag->type = type;
42 tag->stype = stype;
43 tag->stype_env = stype_env;
46 static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
48 struct vio_ver_info pkt;
50 vio->_local_sid = (u32) sched_clock();
52 memset(&pkt, 0, sizeof(pkt));
53 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
54 pkt.major = major;
55 pkt.minor = minor;
56 pkt.dev_class = vio->dev_class;
58 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
59 major, minor, vio->dev_class);
61 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
64 static int start_handshake(struct vio_driver_state *vio)
66 int err;
68 viodbg(HS, "START HANDSHAKE\n");
70 vio->hs_state = VIO_HS_INVALID;
72 err = send_version(vio,
73 vio->ver_table[0].major,
74 vio->ver_table[0].minor);
75 if (err < 0)
76 return err;
78 return 0;
81 static void flush_rx_dring(struct vio_driver_state *vio)
83 struct vio_dring_state *dr;
84 u64 ident;
86 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
88 dr = &vio->drings[VIO_DRIVER_RX_RING];
89 ident = dr->ident;
91 BUG_ON(!vio->desc_buf);
92 kfree(vio->desc_buf);
93 vio->desc_buf = NULL;
95 memset(dr, 0, sizeof(*dr));
96 dr->ident = ident;
99 void vio_link_state_change(struct vio_driver_state *vio, int event)
101 if (event == LDC_EVENT_UP) {
102 vio->hs_state = VIO_HS_INVALID;
104 switch (vio->dev_class) {
105 case VDEV_NETWORK:
106 case VDEV_NETWORK_SWITCH:
107 vio->dr_state = (VIO_DR_STATE_TXREQ |
108 VIO_DR_STATE_RXREQ);
109 break;
111 case VDEV_DISK:
112 vio->dr_state = VIO_DR_STATE_TXREQ;
113 break;
114 case VDEV_DISK_SERVER:
115 vio->dr_state = VIO_DR_STATE_RXREQ;
116 break;
118 start_handshake(vio);
119 } else if (event == LDC_EVENT_RESET) {
120 vio->hs_state = VIO_HS_INVALID;
122 if (vio->dr_state & VIO_DR_STATE_RXREG)
123 flush_rx_dring(vio);
125 vio->dr_state = 0x00;
126 memset(&vio->ver, 0, sizeof(vio->ver));
128 ldc_disconnect(vio->lp);
131 EXPORT_SYMBOL(vio_link_state_change);
133 static int handshake_failure(struct vio_driver_state *vio)
135 struct vio_dring_state *dr;
137 /* XXX Put policy here... Perhaps start a timer to fire
138 * XXX in 100 ms, which will bring the link up and retry
139 * XXX the handshake.
142 viodbg(HS, "HANDSHAKE FAILURE\n");
144 vio->dr_state &= ~(VIO_DR_STATE_TXREG |
145 VIO_DR_STATE_RXREG);
147 dr = &vio->drings[VIO_DRIVER_RX_RING];
148 memset(dr, 0, sizeof(*dr));
150 kfree(vio->desc_buf);
151 vio->desc_buf = NULL;
152 vio->desc_buf_len = 0;
154 vio->hs_state = VIO_HS_INVALID;
156 return -ECONNRESET;
159 static int process_unknown(struct vio_driver_state *vio, void *arg)
161 struct vio_msg_tag *pkt = arg;
163 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
164 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
166 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
167 vio->vdev->channel_id);
169 ldc_disconnect(vio->lp);
171 return -ECONNRESET;
174 static int send_dreg(struct vio_driver_state *vio)
176 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
177 union {
178 struct vio_dring_register pkt;
179 char all[sizeof(struct vio_dring_register) +
180 (sizeof(struct ldc_trans_cookie) *
181 dr->ncookies)];
182 } u;
183 int i;
185 memset(&u, 0, sizeof(u));
186 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
187 u.pkt.dring_ident = 0;
188 u.pkt.num_descr = dr->num_entries;
189 u.pkt.descr_size = dr->entry_size;
190 u.pkt.options = VIO_TX_DRING;
191 u.pkt.num_cookies = dr->ncookies;
193 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
194 "ncookies[%u]\n",
195 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
196 u.pkt.num_cookies);
198 for (i = 0; i < dr->ncookies; i++) {
199 u.pkt.cookies[i] = dr->cookies[i];
201 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
203 (unsigned long long) u.pkt.cookies[i].cookie_addr,
204 (unsigned long long) u.pkt.cookies[i].cookie_size);
207 return send_ctrl(vio, &u.pkt.tag, sizeof(u));
210 static int send_rdx(struct vio_driver_state *vio)
212 struct vio_rdx pkt;
214 memset(&pkt, 0, sizeof(pkt));
216 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
218 viodbg(HS, "SEND RDX INFO\n");
220 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
223 static int send_attr(struct vio_driver_state *vio)
225 return vio->ops->send_attr(vio);
228 static struct vio_version *find_by_major(struct vio_driver_state *vio,
229 u16 major)
231 struct vio_version *ret = NULL;
232 int i;
234 for (i = 0; i < vio->ver_table_entries; i++) {
235 struct vio_version *v = &vio->ver_table[i];
236 if (v->major <= major) {
237 ret = v;
238 break;
241 return ret;
244 static int process_ver_info(struct vio_driver_state *vio,
245 struct vio_ver_info *pkt)
247 struct vio_version *vap;
248 int err;
250 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
251 pkt->major, pkt->minor, pkt->dev_class);
253 if (vio->hs_state != VIO_HS_INVALID) {
254 /* XXX Perhaps invoke start_handshake? XXX */
255 memset(&vio->ver, 0, sizeof(vio->ver));
256 vio->hs_state = VIO_HS_INVALID;
259 vap = find_by_major(vio, pkt->major);
261 vio->_peer_sid = pkt->tag.sid;
263 if (!vap) {
264 pkt->tag.stype = VIO_SUBTYPE_NACK;
265 pkt->major = 0;
266 pkt->minor = 0;
267 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
268 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
269 } else if (vap->major != pkt->major) {
270 pkt->tag.stype = VIO_SUBTYPE_NACK;
271 pkt->major = vap->major;
272 pkt->minor = vap->minor;
273 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
274 pkt->major, pkt->minor);
275 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
276 } else {
277 struct vio_version ver = {
278 .major = pkt->major,
279 .minor = pkt->minor,
281 if (ver.minor > vap->minor)
282 ver.minor = vap->minor;
283 pkt->minor = ver.minor;
284 pkt->tag.stype = VIO_SUBTYPE_ACK;
285 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
286 pkt->major, pkt->minor);
287 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
288 if (err > 0) {
289 vio->ver = ver;
290 vio->hs_state = VIO_HS_GOTVERS;
293 if (err < 0)
294 return handshake_failure(vio);
296 return 0;
299 static int process_ver_ack(struct vio_driver_state *vio,
300 struct vio_ver_info *pkt)
302 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
303 pkt->major, pkt->minor, pkt->dev_class);
305 if (vio->hs_state & VIO_HS_GOTVERS) {
306 if (vio->ver.major != pkt->major ||
307 vio->ver.minor != pkt->minor) {
308 pkt->tag.stype = VIO_SUBTYPE_NACK;
309 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
310 return handshake_failure(vio);
312 } else {
313 vio->ver.major = pkt->major;
314 vio->ver.minor = pkt->minor;
315 vio->hs_state = VIO_HS_GOTVERS;
318 switch (vio->dev_class) {
319 case VDEV_NETWORK:
320 case VDEV_DISK:
321 if (send_attr(vio) < 0)
322 return handshake_failure(vio);
323 break;
325 default:
326 break;
329 return 0;
332 static int process_ver_nack(struct vio_driver_state *vio,
333 struct vio_ver_info *pkt)
335 struct vio_version *nver;
337 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
338 pkt->major, pkt->minor, pkt->dev_class);
340 if (pkt->major == 0 && pkt->minor == 0)
341 return handshake_failure(vio);
342 nver = find_by_major(vio, pkt->major);
343 if (!nver)
344 return handshake_failure(vio);
346 if (send_version(vio, nver->major, nver->minor) < 0)
347 return handshake_failure(vio);
349 return 0;
352 static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
354 switch (pkt->tag.stype) {
355 case VIO_SUBTYPE_INFO:
356 return process_ver_info(vio, pkt);
358 case VIO_SUBTYPE_ACK:
359 return process_ver_ack(vio, pkt);
361 case VIO_SUBTYPE_NACK:
362 return process_ver_nack(vio, pkt);
364 default:
365 return handshake_failure(vio);
369 static int process_attr(struct vio_driver_state *vio, void *pkt)
371 int err;
373 if (!(vio->hs_state & VIO_HS_GOTVERS))
374 return handshake_failure(vio);
376 err = vio->ops->handle_attr(vio, pkt);
377 if (err < 0) {
378 return handshake_failure(vio);
379 } else {
380 vio->hs_state |= VIO_HS_GOT_ATTR;
382 if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
383 !(vio->hs_state & VIO_HS_SENT_DREG)) {
384 if (send_dreg(vio) < 0)
385 return handshake_failure(vio);
387 vio->hs_state |= VIO_HS_SENT_DREG;
390 return 0;
393 static int all_drings_registered(struct vio_driver_state *vio)
395 int need_rx, need_tx;
397 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
398 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
400 if (need_rx &&
401 !(vio->dr_state & VIO_DR_STATE_RXREG))
402 return 0;
404 if (need_tx &&
405 !(vio->dr_state & VIO_DR_STATE_TXREG))
406 return 0;
408 return 1;
411 static int process_dreg_info(struct vio_driver_state *vio,
412 struct vio_dring_register *pkt)
414 struct vio_dring_state *dr;
415 int i, len;
417 viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
418 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
419 (unsigned long long) pkt->dring_ident,
420 pkt->num_descr, pkt->descr_size, pkt->options,
421 pkt->num_cookies);
423 if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
424 goto send_nack;
426 if (vio->dr_state & VIO_DR_STATE_RXREG)
427 goto send_nack;
429 /* v1.6 and higher, ACK with desired, supported mode, or NACK */
430 if (vio_version_after_eq(vio, 1, 6)) {
431 if (!(pkt->options & VIO_TX_DRING))
432 goto send_nack;
433 pkt->options = VIO_TX_DRING;
436 BUG_ON(vio->desc_buf);
438 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
439 if (!vio->desc_buf)
440 goto send_nack;
442 vio->desc_buf_len = pkt->descr_size;
444 dr = &vio->drings[VIO_DRIVER_RX_RING];
446 dr->num_entries = pkt->num_descr;
447 dr->entry_size = pkt->descr_size;
448 dr->ncookies = pkt->num_cookies;
449 for (i = 0; i < dr->ncookies; i++) {
450 dr->cookies[i] = pkt->cookies[i];
452 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
454 (unsigned long long)
455 pkt->cookies[i].cookie_addr,
456 (unsigned long long)
457 pkt->cookies[i].cookie_size);
460 pkt->tag.stype = VIO_SUBTYPE_ACK;
461 pkt->dring_ident = ++dr->ident;
463 viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
464 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
465 (unsigned long long) pkt->dring_ident,
466 pkt->num_descr, pkt->descr_size, pkt->options,
467 pkt->num_cookies);
469 len = (sizeof(*pkt) +
470 (dr->ncookies * sizeof(struct ldc_trans_cookie)));
471 if (send_ctrl(vio, &pkt->tag, len) < 0)
472 goto send_nack;
474 vio->dr_state |= VIO_DR_STATE_RXREG;
476 return 0;
478 send_nack:
479 pkt->tag.stype = VIO_SUBTYPE_NACK;
480 viodbg(HS, "SEND DRING_REG NACK\n");
481 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
483 return handshake_failure(vio);
486 static int process_dreg_ack(struct vio_driver_state *vio,
487 struct vio_dring_register *pkt)
489 struct vio_dring_state *dr;
491 viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
492 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
493 (unsigned long long) pkt->dring_ident,
494 pkt->num_descr, pkt->descr_size, pkt->options,
495 pkt->num_cookies);
497 dr = &vio->drings[VIO_DRIVER_TX_RING];
499 if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
500 return handshake_failure(vio);
502 dr->ident = pkt->dring_ident;
503 vio->dr_state |= VIO_DR_STATE_TXREG;
505 if (all_drings_registered(vio)) {
506 if (send_rdx(vio) < 0)
507 return handshake_failure(vio);
508 vio->hs_state = VIO_HS_SENT_RDX;
510 return 0;
513 static int process_dreg_nack(struct vio_driver_state *vio,
514 struct vio_dring_register *pkt)
516 viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
517 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
518 (unsigned long long) pkt->dring_ident,
519 pkt->num_descr, pkt->descr_size, pkt->options,
520 pkt->num_cookies);
522 return handshake_failure(vio);
525 static int process_dreg(struct vio_driver_state *vio,
526 struct vio_dring_register *pkt)
528 if (!(vio->hs_state & VIO_HS_GOTVERS))
529 return handshake_failure(vio);
531 switch (pkt->tag.stype) {
532 case VIO_SUBTYPE_INFO:
533 return process_dreg_info(vio, pkt);
535 case VIO_SUBTYPE_ACK:
536 return process_dreg_ack(vio, pkt);
538 case VIO_SUBTYPE_NACK:
539 return process_dreg_nack(vio, pkt);
541 default:
542 return handshake_failure(vio);
546 static int process_dunreg(struct vio_driver_state *vio,
547 struct vio_dring_unregister *pkt)
549 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
551 viodbg(HS, "GOT DRING_UNREG\n");
553 if (pkt->dring_ident != dr->ident)
554 return 0;
556 vio->dr_state &= ~VIO_DR_STATE_RXREG;
558 memset(dr, 0, sizeof(*dr));
560 kfree(vio->desc_buf);
561 vio->desc_buf = NULL;
562 vio->desc_buf_len = 0;
564 return 0;
567 static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
569 viodbg(HS, "GOT RDX INFO\n");
571 pkt->tag.stype = VIO_SUBTYPE_ACK;
572 viodbg(HS, "SEND RDX ACK\n");
573 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
574 return handshake_failure(vio);
576 vio->hs_state |= VIO_HS_SENT_RDX_ACK;
577 return 0;
580 static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
582 viodbg(HS, "GOT RDX ACK\n");
584 if (!(vio->hs_state & VIO_HS_SENT_RDX))
585 return handshake_failure(vio);
587 vio->hs_state |= VIO_HS_GOT_RDX_ACK;
588 return 0;
591 static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
593 viodbg(HS, "GOT RDX NACK\n");
595 return handshake_failure(vio);
598 static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
600 if (!all_drings_registered(vio))
601 handshake_failure(vio);
603 switch (pkt->tag.stype) {
604 case VIO_SUBTYPE_INFO:
605 return process_rdx_info(vio, pkt);
607 case VIO_SUBTYPE_ACK:
608 return process_rdx_ack(vio, pkt);
610 case VIO_SUBTYPE_NACK:
611 return process_rdx_nack(vio, pkt);
613 default:
614 return handshake_failure(vio);
618 int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
620 struct vio_msg_tag *tag = pkt;
621 u8 prev_state = vio->hs_state;
622 int err;
624 switch (tag->stype_env) {
625 case VIO_VER_INFO:
626 err = process_ver(vio, pkt);
627 break;
629 case VIO_ATTR_INFO:
630 err = process_attr(vio, pkt);
631 break;
633 case VIO_DRING_REG:
634 err = process_dreg(vio, pkt);
635 break;
637 case VIO_DRING_UNREG:
638 err = process_dunreg(vio, pkt);
639 break;
641 case VIO_RDX:
642 err = process_rdx(vio, pkt);
643 break;
645 default:
646 err = process_unknown(vio, pkt);
647 break;
649 if (!err &&
650 vio->hs_state != prev_state &&
651 (vio->hs_state & VIO_HS_COMPLETE))
652 vio->ops->handshake_complete(vio);
654 return err;
656 EXPORT_SYMBOL(vio_control_pkt_engine);
658 void vio_conn_reset(struct vio_driver_state *vio)
661 EXPORT_SYMBOL(vio_conn_reset);
663 /* The issue is that the Solaris virtual disk server just mirrors the
664 * SID values it gets from the client peer. So we work around that
665 * here in vio_{validate,send}_sid() so that the drivers don't need
666 * to be aware of this crap.
668 int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
670 u32 sid;
672 /* Always let VERSION+INFO packets through unchecked, they
673 * define the new SID.
675 if (tp->type == VIO_TYPE_CTRL &&
676 tp->stype == VIO_SUBTYPE_INFO &&
677 tp->stype_env == VIO_VER_INFO)
678 return 0;
680 /* Ok, now figure out which SID to use. */
681 switch (vio->dev_class) {
682 case VDEV_NETWORK:
683 case VDEV_NETWORK_SWITCH:
684 case VDEV_DISK_SERVER:
685 default:
686 sid = vio->_peer_sid;
687 break;
689 case VDEV_DISK:
690 sid = vio->_local_sid;
691 break;
694 if (sid == tp->sid)
695 return 0;
696 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
697 tp->sid, vio->_peer_sid, vio->_local_sid);
698 return -EINVAL;
700 EXPORT_SYMBOL(vio_validate_sid);
702 u32 vio_send_sid(struct vio_driver_state *vio)
704 switch (vio->dev_class) {
705 case VDEV_NETWORK:
706 case VDEV_NETWORK_SWITCH:
707 case VDEV_DISK:
708 default:
709 return vio->_local_sid;
711 case VDEV_DISK_SERVER:
712 return vio->_peer_sid;
715 EXPORT_SYMBOL(vio_send_sid);
717 int vio_ldc_alloc(struct vio_driver_state *vio,
718 struct ldc_channel_config *base_cfg,
719 void *event_arg)
721 struct ldc_channel_config cfg = *base_cfg;
722 struct ldc_channel *lp;
724 cfg.tx_irq = vio->vdev->tx_irq;
725 cfg.rx_irq = vio->vdev->rx_irq;
727 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
728 if (IS_ERR(lp))
729 return PTR_ERR(lp);
731 vio->lp = lp;
733 return 0;
735 EXPORT_SYMBOL(vio_ldc_alloc);
737 void vio_ldc_free(struct vio_driver_state *vio)
739 ldc_free(vio->lp);
740 vio->lp = NULL;
742 kfree(vio->desc_buf);
743 vio->desc_buf = NULL;
744 vio->desc_buf_len = 0;
746 EXPORT_SYMBOL(vio_ldc_free);
748 void vio_port_up(struct vio_driver_state *vio)
750 unsigned long flags;
751 int err, state;
753 spin_lock_irqsave(&vio->lock, flags);
755 state = ldc_state(vio->lp);
757 err = 0;
758 if (state == LDC_STATE_INIT) {
759 err = ldc_bind(vio->lp);
760 if (err)
761 printk(KERN_WARNING "%s: Port %lu bind failed, "
762 "err=%d\n",
763 vio->name, vio->vdev->channel_id, err);
766 if (!err) {
767 err = ldc_connect(vio->lp);
768 if (err)
769 printk(KERN_WARNING "%s: Port %lu connect failed, "
770 "err=%d\n",
771 vio->name, vio->vdev->channel_id, err);
773 if (err) {
774 unsigned long expires = jiffies + HZ;
776 expires = round_jiffies(expires);
777 mod_timer(&vio->timer, expires);
780 spin_unlock_irqrestore(&vio->lock, flags);
782 EXPORT_SYMBOL(vio_port_up);
784 static void vio_port_timer(unsigned long _arg)
786 struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
788 vio_port_up(vio);
791 int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
792 u8 dev_class, struct vio_version *ver_table,
793 int ver_table_size, struct vio_driver_ops *ops,
794 char *name)
796 switch (dev_class) {
797 case VDEV_NETWORK:
798 case VDEV_NETWORK_SWITCH:
799 case VDEV_DISK:
800 case VDEV_DISK_SERVER:
801 break;
803 default:
804 return -EINVAL;
807 if (!ops->send_attr ||
808 !ops->handle_attr ||
809 !ops->handshake_complete)
810 return -EINVAL;
812 if (!ver_table || ver_table_size < 0)
813 return -EINVAL;
815 if (!name)
816 return -EINVAL;
818 spin_lock_init(&vio->lock);
820 vio->name = name;
822 vio->dev_class = dev_class;
823 vio->vdev = vdev;
825 vio->ver_table = ver_table;
826 vio->ver_table_entries = ver_table_size;
828 vio->ops = ops;
830 setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
832 return 0;
834 EXPORT_SYMBOL(vio_driver_init);