2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/if_ether.h>
25 #include <linux/slab.h>
26 #include "vnic_resource.h"
27 #include "vnic_devcmd.h"
29 #include "vnic_stats.h"
32 struct devcmd2_controller
{
33 struct vnic_wq_ctrl
*wq_ctrl
;
34 struct vnic_dev_ring results_ring
;
36 struct vnic_devcmd2
*cmd_ring
;
37 struct devcmd2_result
*result
;
43 enum vnic_proxy_type
{
57 struct vnic_res res
[RES_TYPE_MAX
];
58 enum vnic_dev_intr_mode intr_mode
;
59 struct vnic_devcmd __iomem
*devcmd
;
60 struct vnic_devcmd_notify
*notify
;
61 struct vnic_devcmd_notify notify_copy
;
64 dma_addr_t linkstatus_pa
;
65 struct vnic_stats
*stats
;
67 struct vnic_devcmd_fw_info
*fw_info
;
68 dma_addr_t fw_info_pa
;
69 enum vnic_proxy_type proxy
;
71 u64 args
[VNIC_DEVCMD_NARGS
];
72 struct devcmd2_controller
*devcmd2
;
73 int (*devcmd_rtn
)(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
77 #define VNIC_MAX_RES_HDR_SIZE \
78 (sizeof(struct vnic_resource_header) + \
79 sizeof(struct vnic_resource) * RES_TYPE_MAX)
80 #define VNIC_RES_STRIDE 128
82 void *vnic_dev_priv(struct vnic_dev
*vdev
)
87 static int vnic_dev_discover_res(struct vnic_dev
*vdev
,
88 struct vnic_dev_bar
*bar
)
90 struct vnic_resource_header __iomem
*rh
;
91 struct vnic_resource __iomem
*r
;
94 if (bar
->len
< VNIC_MAX_RES_HDR_SIZE
) {
95 printk(KERN_ERR
"vNIC BAR0 res hdr length error\n");
101 printk(KERN_ERR
"vNIC BAR0 res hdr not mem-mapped\n");
105 if (ioread32(&rh
->magic
) != VNIC_RES_MAGIC
||
106 ioread32(&rh
->version
) != VNIC_RES_VERSION
) {
107 printk(KERN_ERR
"vNIC BAR0 res magic/version error "
108 "exp (%lx/%lx) curr (%x/%x)\n",
109 VNIC_RES_MAGIC
, VNIC_RES_VERSION
,
110 ioread32(&rh
->magic
), ioread32(&rh
->version
));
114 r
= (struct vnic_resource __iomem
*)(rh
+ 1);
116 while ((type
= ioread8(&r
->type
)) != RES_TYPE_EOL
) {
118 u8 bar_num
= ioread8(&r
->bar
);
119 u32 bar_offset
= ioread32(&r
->bar_offset
);
120 u32 count
= ioread32(&r
->count
);
125 if (bar_num
!= 0) /* only mapping in BAR0 resources */
132 case RES_TYPE_INTR_CTRL
:
133 /* each count is stride bytes long */
134 len
= count
* VNIC_RES_STRIDE
;
135 if (len
+ bar_offset
> bar
->len
) {
136 printk(KERN_ERR
"vNIC BAR0 resource %d "
137 "out-of-bounds, offset 0x%x + "
138 "size 0x%x > bar len 0x%lx\n",
145 case RES_TYPE_INTR_PBA_LEGACY
:
146 case RES_TYPE_DEVCMD2
:
147 case RES_TYPE_DEVCMD
:
154 vdev
->res
[type
].count
= count
;
155 vdev
->res
[type
].vaddr
= (char __iomem
*)bar
->vaddr
+ bar_offset
;
161 unsigned int vnic_dev_get_res_count(struct vnic_dev
*vdev
,
162 enum vnic_res_type type
)
164 return vdev
->res
[type
].count
;
167 void __iomem
*vnic_dev_get_res(struct vnic_dev
*vdev
, enum vnic_res_type type
,
170 if (!vdev
->res
[type
].vaddr
)
177 case RES_TYPE_INTR_CTRL
:
178 return (char __iomem
*)vdev
->res
[type
].vaddr
+
179 index
* VNIC_RES_STRIDE
;
181 return (char __iomem
*)vdev
->res
[type
].vaddr
;
185 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring
*ring
,
186 unsigned int desc_count
,
187 unsigned int desc_size
)
189 /* The base address of the desc rings must be 512 byte aligned.
190 * Descriptor count is aligned to groups of 32 descriptors. A
191 * count of 0 means the maximum 4096 descriptors. Descriptor
192 * size is aligned to 16 bytes.
195 unsigned int count_align
= 32;
196 unsigned int desc_align
= 16;
198 ring
->base_align
= 512;
203 ring
->desc_count
= ALIGN(desc_count
, count_align
);
205 ring
->desc_size
= ALIGN(desc_size
, desc_align
);
207 ring
->size
= ring
->desc_count
* ring
->desc_size
;
208 ring
->size_unaligned
= ring
->size
+ ring
->base_align
;
210 return ring
->size_unaligned
;
213 void vnic_dev_clear_desc_ring(struct vnic_dev_ring
*ring
)
215 memset(ring
->descs
, 0, ring
->size
);
218 int vnic_dev_alloc_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
,
219 unsigned int desc_count
, unsigned int desc_size
)
221 vnic_dev_desc_ring_size(ring
, desc_count
, desc_size
);
223 ring
->descs_unaligned
= dma_alloc_coherent(&vdev
->pdev
->dev
,
224 ring
->size_unaligned
,
225 &ring
->base_addr_unaligned
, GFP_KERNEL
);
227 if (!ring
->descs_unaligned
) {
229 "Failed to allocate ring (size=%d), aborting\n",
234 ring
->base_addr
= ALIGN(ring
->base_addr_unaligned
,
236 ring
->descs
= (u8
*)ring
->descs_unaligned
+
237 (ring
->base_addr
- ring
->base_addr_unaligned
);
239 vnic_dev_clear_desc_ring(ring
);
241 ring
->desc_avail
= ring
->desc_count
- 1;
246 void vnic_dev_free_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
)
249 dma_free_coherent(&vdev
->pdev
->dev
,
250 ring
->size_unaligned
,
251 ring
->descs_unaligned
,
252 ring
->base_addr_unaligned
);
257 int vnic_dev_cmd1(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
, int wait
)
259 struct vnic_devcmd __iomem
*devcmd
= vdev
->devcmd
;
262 int dev_cmd_err
[] = {
263 /* convert from fw's version of error.h to host's version */
265 EINVAL
, /* ERR_EINVAL */
266 EFAULT
, /* ERR_EFAULT */
267 EPERM
, /* ERR_EPERM */
268 EBUSY
, /* ERR_EBUSY */
271 u64
*a0
= &vdev
->args
[0];
272 u64
*a1
= &vdev
->args
[1];
274 status
= ioread32(&devcmd
->status
);
275 if (status
& STAT_BUSY
) {
276 printk(KERN_ERR
"Busy devcmd %d\n", _CMD_N(cmd
));
280 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
) {
281 writeq(*a0
, &devcmd
->args
[0]);
282 writeq(*a1
, &devcmd
->args
[1]);
286 iowrite32(cmd
, &devcmd
->cmd
);
288 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
291 for (delay
= 0; delay
< wait
; delay
++) {
295 status
= ioread32(&devcmd
->status
);
296 if (!(status
& STAT_BUSY
)) {
298 if (status
& STAT_ERROR
) {
299 err
= dev_cmd_err
[(int)readq(&devcmd
->args
[0])];
300 printk(KERN_ERR
"Error %d devcmd %d\n",
305 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
) {
307 *a0
= readq(&devcmd
->args
[0]);
308 *a1
= readq(&devcmd
->args
[1]);
315 printk(KERN_ERR
"Timedout devcmd %d\n", _CMD_N(cmd
));
319 int vnic_dev_cmd2(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
322 struct devcmd2_controller
*dc2c
= vdev
->devcmd2
;
323 struct devcmd2_result
*result
;
332 posted
= ioread32(&dc2c
->wq_ctrl
->posted_index
);
333 fetch_index
= ioread32(&dc2c
->wq_ctrl
->fetch_index
);
335 if (posted
== 0xFFFFFFFF || fetch_index
== 0xFFFFFFFF) {
336 /* Hardware surprise removal: return error */
337 pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n",
338 pci_name(vdev
->pdev
), _CMD_N(cmd
));
339 pr_err("%s: fetch index: %u, posted index: %u\n",
340 pci_name(vdev
->pdev
), fetch_index
, posted
);
346 new_posted
= (posted
+ 1) % DEVCMD2_RING_SIZE
;
348 if (new_posted
== fetch_index
) {
349 pr_err("%s: devcmd2 wq full while issuing cmd %d\n",
350 pci_name(vdev
->pdev
), _CMD_N(cmd
));
351 pr_err("%s: fetch index: %u, posted index: %u\n",
352 pci_name(vdev
->pdev
), fetch_index
, posted
);
356 dc2c
->cmd_ring
[posted
].cmd
= cmd
;
357 dc2c
->cmd_ring
[posted
].flags
= 0;
359 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
360 dc2c
->cmd_ring
[posted
].flags
|= DEVCMD2_FNORESULT
;
361 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
) {
362 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
363 dc2c
->cmd_ring
[posted
].args
[i
] = vdev
->args
[i
];
367 /* Adding write memory barrier prevents compiler and/or CPU
368 * reordering, thus avoiding descriptor posting before
369 * descriptor is initialized. Otherwise, hardware can read
370 * stale descriptor fields.
373 iowrite32(new_posted
, &dc2c
->wq_ctrl
->posted_index
);
375 if (dc2c
->cmd_ring
[posted
].flags
& DEVCMD2_FNORESULT
)
378 result
= dc2c
->result
+ dc2c
->next_result
;
382 if (dc2c
->next_result
== dc2c
->result_size
) {
383 dc2c
->next_result
= 0;
384 dc2c
->color
= dc2c
->color
? 0 : 1;
387 for (delay
= 0; delay
< wait
; delay
++) {
389 if (result
->color
== color
) {
391 err
= -(int) result
->error
;
392 if (err
!= ERR_ECMDUNKNOWN
||
393 cmd
!= CMD_CAPABILITY
)
394 pr_err("%s:Error %d devcmd %d\n",
395 pci_name(vdev
->pdev
),
399 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
) {
400 rmb(); /*prevent reorder while reding result*/
401 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
402 vdev
->args
[i
] = result
->results
[i
];
408 pr_err("%s:Timed out devcmd %d\n", pci_name(vdev
->pdev
), _CMD_N(cmd
));
414 int vnic_dev_init_devcmd1(struct vnic_dev
*vdev
)
416 vdev
->devcmd
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD
, 0);
420 vdev
->devcmd_rtn
= &vnic_dev_cmd1
;
425 int vnic_dev_init_devcmd2(struct vnic_dev
*vdev
)
428 unsigned int fetch_index
;
433 vdev
->devcmd2
= kzalloc(sizeof(*vdev
->devcmd2
), GFP_ATOMIC
);
437 vdev
->devcmd2
->color
= 1;
438 vdev
->devcmd2
->result_size
= DEVCMD2_RING_SIZE
;
439 err
= vnic_wq_devcmd2_alloc(vdev
, &vdev
->devcmd2
->wq
,
440 DEVCMD2_RING_SIZE
, DEVCMD2_DESC_SIZE
);
442 goto err_free_devcmd2
;
444 fetch_index
= ioread32(&vdev
->devcmd2
->wq
.ctrl
->fetch_index
);
445 if (fetch_index
== 0xFFFFFFFF) { /* check for hardware gone */
446 pr_err("error in devcmd2 init");
451 * Don't change fetch_index ever and
452 * set posted_index same as fetch_index
453 * when setting up the WQ for devcmd2.
455 vnic_wq_init_start(&vdev
->devcmd2
->wq
, 0, fetch_index
,
458 vnic_wq_enable(&vdev
->devcmd2
->wq
);
460 err
= vnic_dev_alloc_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
,
461 DEVCMD2_RING_SIZE
, DEVCMD2_DESC_SIZE
);
465 vdev
->devcmd2
->result
=
466 (struct devcmd2_result
*) vdev
->devcmd2
->results_ring
.descs
;
467 vdev
->devcmd2
->cmd_ring
=
468 (struct vnic_devcmd2
*) vdev
->devcmd2
->wq
.ring
.descs
;
469 vdev
->devcmd2
->wq_ctrl
= vdev
->devcmd2
->wq
.ctrl
;
470 vdev
->args
[0] = (u64
) vdev
->devcmd2
->results_ring
.base_addr
|
472 vdev
->args
[1] = DEVCMD2_RING_SIZE
;
474 err
= vnic_dev_cmd2(vdev
, CMD_INITIALIZE_DEVCMD2
, 1000);
476 goto err_free_desc_ring
;
478 vdev
->devcmd_rtn
= &vnic_dev_cmd2
;
483 vnic_dev_free_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
);
485 vnic_wq_disable(&vdev
->devcmd2
->wq
);
486 vnic_wq_free(&vdev
->devcmd2
->wq
);
488 kfree(vdev
->devcmd2
);
489 vdev
->devcmd2
= NULL
;
495 void vnic_dev_deinit_devcmd2(struct vnic_dev
*vdev
)
497 vnic_dev_free_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
);
498 vnic_wq_disable(&vdev
->devcmd2
->wq
);
499 vnic_wq_free(&vdev
->devcmd2
->wq
);
500 kfree(vdev
->devcmd2
);
501 vdev
->devcmd2
= NULL
;
502 vdev
->devcmd_rtn
= &vnic_dev_cmd1
;
506 int vnic_dev_cmd_no_proxy(struct vnic_dev
*vdev
,
507 enum vnic_devcmd_cmd cmd
, u64
*a0
, u64
*a1
, int wait
)
514 err
= (*vdev
->devcmd_rtn
)(vdev
, cmd
, wait
);
523 int vnic_dev_cmd(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
524 u64
*a0
, u64
*a1
, int wait
)
526 memset(vdev
->args
, 0, sizeof(vdev
->args
));
528 switch (vdev
->proxy
) {
531 return vnic_dev_cmd_no_proxy(vdev
, cmd
, a0
, a1
, wait
);
536 int vnic_dev_fw_info(struct vnic_dev
*vdev
,
537 struct vnic_devcmd_fw_info
**fw_info
)
543 if (!vdev
->fw_info
) {
544 vdev
->fw_info
= dma_alloc_coherent(&vdev
->pdev
->dev
,
545 sizeof(struct vnic_devcmd_fw_info
),
546 &vdev
->fw_info_pa
, GFP_KERNEL
);
550 a0
= vdev
->fw_info_pa
;
552 /* only get fw_info once and cache it */
553 err
= vnic_dev_cmd(vdev
, CMD_MCPU_FW_INFO
, &a0
, &a1
, wait
);
556 *fw_info
= vdev
->fw_info
;
561 int vnic_dev_spec(struct vnic_dev
*vdev
, unsigned int offset
, unsigned int size
,
571 err
= vnic_dev_cmd(vdev
, CMD_DEV_SPEC
, &a0
, &a1
, wait
);
575 *(u8
*)value
= (u8
)a0
;
578 *(u16
*)value
= (u16
)a0
;
581 *(u32
*)value
= (u32
)a0
;
594 int vnic_dev_stats_clear(struct vnic_dev
*vdev
)
598 return vnic_dev_cmd(vdev
, CMD_STATS_CLEAR
, &a0
, &a1
, wait
);
601 int vnic_dev_stats_dump(struct vnic_dev
*vdev
, struct vnic_stats
**stats
)
607 vdev
->stats
= dma_alloc_coherent(&vdev
->pdev
->dev
,
608 sizeof(struct vnic_stats
), &vdev
->stats_pa
, GFP_KERNEL
);
613 *stats
= vdev
->stats
;
615 a1
= sizeof(struct vnic_stats
);
617 return vnic_dev_cmd(vdev
, CMD_STATS_DUMP
, &a0
, &a1
, wait
);
620 int vnic_dev_close(struct vnic_dev
*vdev
)
624 return vnic_dev_cmd(vdev
, CMD_CLOSE
, &a0
, &a1
, wait
);
627 int vnic_dev_enable(struct vnic_dev
*vdev
)
631 return vnic_dev_cmd(vdev
, CMD_ENABLE
, &a0
, &a1
, wait
);
634 int vnic_dev_disable(struct vnic_dev
*vdev
)
638 return vnic_dev_cmd(vdev
, CMD_DISABLE
, &a0
, &a1
, wait
);
641 int vnic_dev_open(struct vnic_dev
*vdev
, int arg
)
643 u64 a0
= (u32
)arg
, a1
= 0;
645 return vnic_dev_cmd(vdev
, CMD_OPEN
, &a0
, &a1
, wait
);
648 int vnic_dev_open_done(struct vnic_dev
*vdev
, int *done
)
656 err
= vnic_dev_cmd(vdev
, CMD_OPEN_STATUS
, &a0
, &a1
, wait
);
665 int vnic_dev_soft_reset(struct vnic_dev
*vdev
, int arg
)
667 u64 a0
= (u32
)arg
, a1
= 0;
669 return vnic_dev_cmd(vdev
, CMD_SOFT_RESET
, &a0
, &a1
, wait
);
672 int vnic_dev_soft_reset_done(struct vnic_dev
*vdev
, int *done
)
680 err
= vnic_dev_cmd(vdev
, CMD_SOFT_RESET_STATUS
, &a0
, &a1
, wait
);
689 int vnic_dev_hang_notify(struct vnic_dev
*vdev
)
693 return vnic_dev_cmd(vdev
, CMD_HANG_NOTIFY
, &a0
, &a1
, wait
);
696 int vnic_dev_mac_addr(struct vnic_dev
*vdev
, u8
*mac_addr
)
702 for (i
= 0; i
< ETH_ALEN
; i
++)
705 err
= vnic_dev_cmd(vdev
, CMD_MAC_ADDR
, &a0
, &a1
, wait
);
709 for (i
= 0; i
< ETH_ALEN
; i
++)
710 mac_addr
[i
] = ((u8
*)&a0
)[i
];
715 void vnic_dev_packet_filter(struct vnic_dev
*vdev
, int directed
, int multicast
,
716 int broadcast
, int promisc
, int allmulti
)
722 a0
= (directed
? CMD_PFILTER_DIRECTED
: 0) |
723 (multicast
? CMD_PFILTER_MULTICAST
: 0) |
724 (broadcast
? CMD_PFILTER_BROADCAST
: 0) |
725 (promisc
? CMD_PFILTER_PROMISCUOUS
: 0) |
726 (allmulti
? CMD_PFILTER_ALL_MULTICAST
: 0);
728 err
= vnic_dev_cmd(vdev
, CMD_PACKET_FILTER
, &a0
, &a1
, wait
);
730 printk(KERN_ERR
"Can't set packet filter\n");
733 void vnic_dev_add_addr(struct vnic_dev
*vdev
, u8
*addr
)
740 for (i
= 0; i
< ETH_ALEN
; i
++)
741 ((u8
*)&a0
)[i
] = addr
[i
];
743 err
= vnic_dev_cmd(vdev
, CMD_ADDR_ADD
, &a0
, &a1
, wait
);
745 pr_err("Can't add addr [%pM], %d\n", addr
, err
);
748 void vnic_dev_del_addr(struct vnic_dev
*vdev
, u8
*addr
)
755 for (i
= 0; i
< ETH_ALEN
; i
++)
756 ((u8
*)&a0
)[i
] = addr
[i
];
758 err
= vnic_dev_cmd(vdev
, CMD_ADDR_DEL
, &a0
, &a1
, wait
);
760 pr_err("Can't del addr [%pM], %d\n", addr
, err
);
763 int vnic_dev_notify_set(struct vnic_dev
*vdev
, u16 intr
)
769 vdev
->notify
= dma_alloc_coherent(&vdev
->pdev
->dev
,
770 sizeof(struct vnic_devcmd_notify
),
771 &vdev
->notify_pa
, GFP_KERNEL
);
776 a0
= vdev
->notify_pa
;
777 a1
= ((u64
)intr
<< 32) & 0x0000ffff00000000ULL
;
778 a1
+= sizeof(struct vnic_devcmd_notify
);
780 return vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
783 void vnic_dev_notify_unset(struct vnic_dev
*vdev
)
788 a0
= 0; /* paddr = 0 to unset notify buffer */
789 a1
= 0x0000ffff00000000ULL
; /* intr num = -1 to unreg for intr */
790 a1
+= sizeof(struct vnic_devcmd_notify
);
792 vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
795 static int vnic_dev_notify_ready(struct vnic_dev
*vdev
)
798 unsigned int nwords
= sizeof(struct vnic_devcmd_notify
) / 4;
807 memcpy(&vdev
->notify_copy
, vdev
->notify
,
808 sizeof(struct vnic_devcmd_notify
));
809 words
= (u32
*)&vdev
->notify_copy
;
810 for (i
= 1; i
< nwords
; i
++)
812 } while (csum
!= words
[0]);
817 int vnic_dev_init(struct vnic_dev
*vdev
, int arg
)
819 u64 a0
= (u32
)arg
, a1
= 0;
821 return vnic_dev_cmd(vdev
, CMD_INIT
, &a0
, &a1
, wait
);
824 u16
vnic_dev_set_default_vlan(struct vnic_dev
*vdev
, u16 new_default_vlan
)
826 u64 a0
= new_default_vlan
, a1
= 0;
830 old_vlan
= vnic_dev_cmd(vdev
, CMD_SET_DEFAULT_VLAN
, &a0
, &a1
, wait
);
831 return (u16
)old_vlan
;
834 int vnic_dev_link_status(struct vnic_dev
*vdev
)
836 if (vdev
->linkstatus
)
837 return *vdev
->linkstatus
;
839 if (!vnic_dev_notify_ready(vdev
))
842 return vdev
->notify_copy
.link_state
;
845 u32
vnic_dev_port_speed(struct vnic_dev
*vdev
)
847 if (!vnic_dev_notify_ready(vdev
))
850 return vdev
->notify_copy
.port_speed
;
853 u32
vnic_dev_msg_lvl(struct vnic_dev
*vdev
)
855 if (!vnic_dev_notify_ready(vdev
))
858 return vdev
->notify_copy
.msglvl
;
861 u32
vnic_dev_mtu(struct vnic_dev
*vdev
)
863 if (!vnic_dev_notify_ready(vdev
))
866 return vdev
->notify_copy
.mtu
;
869 u32
vnic_dev_link_down_cnt(struct vnic_dev
*vdev
)
871 if (!vnic_dev_notify_ready(vdev
))
874 return vdev
->notify_copy
.link_down_cnt
;
877 void vnic_dev_set_intr_mode(struct vnic_dev
*vdev
,
878 enum vnic_dev_intr_mode intr_mode
)
880 vdev
->intr_mode
= intr_mode
;
883 enum vnic_dev_intr_mode
vnic_dev_get_intr_mode(
884 struct vnic_dev
*vdev
)
886 return vdev
->intr_mode
;
889 void vnic_dev_unregister(struct vnic_dev
*vdev
)
893 dma_free_coherent(&vdev
->pdev
->dev
,
894 sizeof(struct vnic_devcmd_notify
),
897 if (vdev
->linkstatus
)
898 dma_free_coherent(&vdev
->pdev
->dev
,
901 vdev
->linkstatus_pa
);
903 dma_free_coherent(&vdev
->pdev
->dev
,
904 sizeof(struct vnic_stats
),
905 vdev
->stats
, vdev
->stats_pa
);
907 dma_free_coherent(&vdev
->pdev
->dev
,
908 sizeof(struct vnic_devcmd_fw_info
),
909 vdev
->fw_info
, vdev
->fw_info_pa
);
911 vnic_dev_deinit_devcmd2(vdev
);
916 struct vnic_dev
*vnic_dev_register(struct vnic_dev
*vdev
,
917 void *priv
, struct pci_dev
*pdev
, struct vnic_dev_bar
*bar
)
920 vdev
= kzalloc(sizeof(struct vnic_dev
), GFP_KERNEL
);
928 if (vnic_dev_discover_res(vdev
, bar
))
934 vnic_dev_unregister(vdev
);
938 int vnic_dev_cmd_init(struct vnic_dev
*vdev
)
943 p
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD2
, 0);
945 pr_err("fnic: DEVCMD2 resource found!\n");
946 err
= vnic_dev_init_devcmd2(vdev
);
948 pr_err("fnic: DEVCMD2 not found, fall back to Devcmd\n");
949 err
= vnic_dev_init_devcmd1(vdev
);