1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/pci.h>
11 #include <linux/delay.h>
12 #include <linux/if_ether.h>
13 #include <linux/slab.h>
14 #include "vnic_resource.h"
15 #include "vnic_devcmd.h"
17 #include "vnic_stats.h"
20 struct devcmd2_controller
{
21 struct vnic_wq_ctrl
*wq_ctrl
;
22 struct vnic_dev_ring results_ring
;
24 struct vnic_devcmd2
*cmd_ring
;
25 struct devcmd2_result
*result
;
31 enum vnic_proxy_type
{
45 struct vnic_res res
[RES_TYPE_MAX
];
46 enum vnic_dev_intr_mode intr_mode
;
47 struct vnic_devcmd __iomem
*devcmd
;
48 struct vnic_devcmd_notify
*notify
;
49 struct vnic_devcmd_notify notify_copy
;
52 dma_addr_t linkstatus_pa
;
53 struct vnic_stats
*stats
;
55 struct vnic_devcmd_fw_info
*fw_info
;
56 dma_addr_t fw_info_pa
;
57 enum vnic_proxy_type proxy
;
59 u64 args
[VNIC_DEVCMD_NARGS
];
60 struct devcmd2_controller
*devcmd2
;
61 int (*devcmd_rtn
)(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
65 #define VNIC_MAX_RES_HDR_SIZE \
66 (sizeof(struct vnic_resource_header) + \
67 sizeof(struct vnic_resource) * RES_TYPE_MAX)
68 #define VNIC_RES_STRIDE 128
70 void *vnic_dev_priv(struct vnic_dev
*vdev
)
75 static int vnic_dev_discover_res(struct vnic_dev
*vdev
,
76 struct vnic_dev_bar
*bar
)
78 struct vnic_resource_header __iomem
*rh
;
79 struct vnic_resource __iomem
*r
;
82 if (bar
->len
< VNIC_MAX_RES_HDR_SIZE
) {
83 printk(KERN_ERR
"vNIC BAR0 res hdr length error\n");
89 printk(KERN_ERR
"vNIC BAR0 res hdr not mem-mapped\n");
93 if (ioread32(&rh
->magic
) != VNIC_RES_MAGIC
||
94 ioread32(&rh
->version
) != VNIC_RES_VERSION
) {
95 printk(KERN_ERR
"vNIC BAR0 res magic/version error "
96 "exp (%lx/%lx) curr (%x/%x)\n",
97 VNIC_RES_MAGIC
, VNIC_RES_VERSION
,
98 ioread32(&rh
->magic
), ioread32(&rh
->version
));
102 r
= (struct vnic_resource __iomem
*)(rh
+ 1);
104 while ((type
= ioread8(&r
->type
)) != RES_TYPE_EOL
) {
106 u8 bar_num
= ioread8(&r
->bar
);
107 u32 bar_offset
= ioread32(&r
->bar_offset
);
108 u32 count
= ioread32(&r
->count
);
113 if (bar_num
!= 0) /* only mapping in BAR0 resources */
120 case RES_TYPE_INTR_CTRL
:
121 /* each count is stride bytes long */
122 len
= count
* VNIC_RES_STRIDE
;
123 if (len
+ bar_offset
> bar
->len
) {
124 printk(KERN_ERR
"vNIC BAR0 resource %d "
125 "out-of-bounds, offset 0x%x + "
126 "size 0x%x > bar len 0x%lx\n",
133 case RES_TYPE_INTR_PBA_LEGACY
:
134 case RES_TYPE_DEVCMD2
:
135 case RES_TYPE_DEVCMD
:
142 vdev
->res
[type
].count
= count
;
143 vdev
->res
[type
].vaddr
= (char __iomem
*)bar
->vaddr
+ bar_offset
;
146 pr_info("res_type_wq: %d res_type_rq: %d res_type_cq: %d res_type_intr_ctrl: %d\n",
147 vdev
->res
[RES_TYPE_WQ
].count
, vdev
->res
[RES_TYPE_RQ
].count
,
148 vdev
->res
[RES_TYPE_CQ
].count
, vdev
->res
[RES_TYPE_INTR_CTRL
].count
);
153 unsigned int vnic_dev_get_res_count(struct vnic_dev
*vdev
,
154 enum vnic_res_type type
)
156 return vdev
->res
[type
].count
;
159 void __iomem
*vnic_dev_get_res(struct vnic_dev
*vdev
, enum vnic_res_type type
,
162 if (!vdev
->res
[type
].vaddr
)
169 case RES_TYPE_INTR_CTRL
:
170 return (char __iomem
*)vdev
->res
[type
].vaddr
+
171 index
* VNIC_RES_STRIDE
;
173 return (char __iomem
*)vdev
->res
[type
].vaddr
;
177 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring
*ring
,
178 unsigned int desc_count
,
179 unsigned int desc_size
)
181 /* The base address of the desc rings must be 512 byte aligned.
182 * Descriptor count is aligned to groups of 32 descriptors. A
183 * count of 0 means the maximum 4096 descriptors. Descriptor
184 * size is aligned to 16 bytes.
187 unsigned int count_align
= 32;
188 unsigned int desc_align
= 16;
190 ring
->base_align
= 512;
195 ring
->desc_count
= ALIGN(desc_count
, count_align
);
197 ring
->desc_size
= ALIGN(desc_size
, desc_align
);
199 ring
->size
= ring
->desc_count
* ring
->desc_size
;
200 ring
->size_unaligned
= ring
->size
+ ring
->base_align
;
202 return ring
->size_unaligned
;
205 void vnic_dev_clear_desc_ring(struct vnic_dev_ring
*ring
)
207 memset(ring
->descs
, 0, ring
->size
);
210 int vnic_dev_alloc_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
,
211 unsigned int desc_count
, unsigned int desc_size
)
213 vnic_dev_desc_ring_size(ring
, desc_count
, desc_size
);
215 ring
->descs_unaligned
= dma_alloc_coherent(&vdev
->pdev
->dev
,
216 ring
->size_unaligned
,
217 &ring
->base_addr_unaligned
, GFP_KERNEL
);
219 if (!ring
->descs_unaligned
) {
221 "Failed to allocate ring (size=%d), aborting\n",
226 ring
->base_addr
= ALIGN(ring
->base_addr_unaligned
,
228 ring
->descs
= (u8
*)ring
->descs_unaligned
+
229 (ring
->base_addr
- ring
->base_addr_unaligned
);
231 vnic_dev_clear_desc_ring(ring
);
233 ring
->desc_avail
= ring
->desc_count
- 1;
238 void vnic_dev_free_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
)
241 dma_free_coherent(&vdev
->pdev
->dev
,
242 ring
->size_unaligned
,
243 ring
->descs_unaligned
,
244 ring
->base_addr_unaligned
);
249 static int vnic_dev_cmd1(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
, int wait
)
251 struct vnic_devcmd __iomem
*devcmd
= vdev
->devcmd
;
254 static const int dev_cmd_err
[] = {
255 /* convert from fw's version of error.h to host's version */
257 EINVAL
, /* ERR_EINVAL */
258 EFAULT
, /* ERR_EFAULT */
259 EPERM
, /* ERR_EPERM */
260 EBUSY
, /* ERR_EBUSY */
263 u64
*a0
= &vdev
->args
[0];
264 u64
*a1
= &vdev
->args
[1];
266 status
= ioread32(&devcmd
->status
);
267 if (status
& STAT_BUSY
) {
268 printk(KERN_ERR
"Busy devcmd %d\n", _CMD_N(cmd
));
272 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
) {
273 writeq(*a0
, &devcmd
->args
[0]);
274 writeq(*a1
, &devcmd
->args
[1]);
278 iowrite32(cmd
, &devcmd
->cmd
);
280 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
283 for (delay
= 0; delay
< wait
; delay
++) {
287 status
= ioread32(&devcmd
->status
);
288 if (!(status
& STAT_BUSY
)) {
290 if (status
& STAT_ERROR
) {
291 err
= dev_cmd_err
[(int)readq(&devcmd
->args
[0])];
292 printk(KERN_ERR
"Error %d devcmd %d\n",
297 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
) {
299 *a0
= readq(&devcmd
->args
[0]);
300 *a1
= readq(&devcmd
->args
[1]);
307 printk(KERN_ERR
"Timedout devcmd %d\n", _CMD_N(cmd
));
311 static int vnic_dev_cmd2(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
314 struct devcmd2_controller
*dc2c
= vdev
->devcmd2
;
315 struct devcmd2_result
*result
;
324 posted
= ioread32(&dc2c
->wq_ctrl
->posted_index
);
325 fetch_index
= ioread32(&dc2c
->wq_ctrl
->fetch_index
);
327 if (posted
== 0xFFFFFFFF || fetch_index
== 0xFFFFFFFF) {
328 /* Hardware surprise removal: return error */
329 pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n",
330 pci_name(vdev
->pdev
), _CMD_N(cmd
));
331 pr_err("%s: fetch index: %u, posted index: %u\n",
332 pci_name(vdev
->pdev
), fetch_index
, posted
);
338 new_posted
= (posted
+ 1) % DEVCMD2_RING_SIZE
;
340 if (new_posted
== fetch_index
) {
341 pr_err("%s: devcmd2 wq full while issuing cmd %d\n",
342 pci_name(vdev
->pdev
), _CMD_N(cmd
));
343 pr_err("%s: fetch index: %u, posted index: %u\n",
344 pci_name(vdev
->pdev
), fetch_index
, posted
);
348 dc2c
->cmd_ring
[posted
].cmd
= cmd
;
349 dc2c
->cmd_ring
[posted
].flags
= 0;
351 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
352 dc2c
->cmd_ring
[posted
].flags
|= DEVCMD2_FNORESULT
;
353 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
) {
354 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
355 dc2c
->cmd_ring
[posted
].args
[i
] = vdev
->args
[i
];
359 /* Adding write memory barrier prevents compiler and/or CPU
360 * reordering, thus avoiding descriptor posting before
361 * descriptor is initialized. Otherwise, hardware can read
362 * stale descriptor fields.
365 iowrite32(new_posted
, &dc2c
->wq_ctrl
->posted_index
);
367 if (dc2c
->cmd_ring
[posted
].flags
& DEVCMD2_FNORESULT
)
370 result
= dc2c
->result
+ dc2c
->next_result
;
374 if (dc2c
->next_result
== dc2c
->result_size
) {
375 dc2c
->next_result
= 0;
376 dc2c
->color
= dc2c
->color
? 0 : 1;
379 for (delay
= 0; delay
< wait
; delay
++) {
381 if (result
->color
== color
) {
383 err
= -(int) result
->error
;
384 if (err
!= ERR_ECMDUNKNOWN
||
385 cmd
!= CMD_CAPABILITY
)
386 pr_err("%s:Error %d devcmd %d\n",
387 pci_name(vdev
->pdev
),
391 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
) {
392 rmb(); /*prevent reorder while reding result*/
393 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
394 vdev
->args
[i
] = result
->results
[i
];
400 pr_err("%s:Timed out devcmd %d\n", pci_name(vdev
->pdev
), _CMD_N(cmd
));
406 static int vnic_dev_init_devcmd1(struct vnic_dev
*vdev
)
408 vdev
->devcmd
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD
, 0);
412 vdev
->devcmd_rtn
= &vnic_dev_cmd1
;
417 static int vnic_dev_init_devcmd2(struct vnic_dev
*vdev
)
420 unsigned int fetch_index
;
425 vdev
->devcmd2
= kzalloc(sizeof(*vdev
->devcmd2
), GFP_ATOMIC
);
429 vdev
->devcmd2
->color
= 1;
430 vdev
->devcmd2
->result_size
= DEVCMD2_RING_SIZE
;
431 err
= vnic_wq_devcmd2_alloc(vdev
, &vdev
->devcmd2
->wq
,
432 DEVCMD2_RING_SIZE
, DEVCMD2_DESC_SIZE
);
434 goto err_free_devcmd2
;
436 fetch_index
= ioread32(&vdev
->devcmd2
->wq
.ctrl
->fetch_index
);
437 if (fetch_index
== 0xFFFFFFFF) { /* check for hardware gone */
438 pr_err("error in devcmd2 init");
444 * Don't change fetch_index ever and
445 * set posted_index same as fetch_index
446 * when setting up the WQ for devcmd2.
448 vnic_wq_init_start(&vdev
->devcmd2
->wq
, 0, fetch_index
,
451 vnic_wq_enable(&vdev
->devcmd2
->wq
);
453 err
= vnic_dev_alloc_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
,
454 DEVCMD2_RING_SIZE
, DEVCMD2_DESC_SIZE
);
458 vdev
->devcmd2
->result
=
459 (struct devcmd2_result
*) vdev
->devcmd2
->results_ring
.descs
;
460 vdev
->devcmd2
->cmd_ring
=
461 (struct vnic_devcmd2
*) vdev
->devcmd2
->wq
.ring
.descs
;
462 vdev
->devcmd2
->wq_ctrl
= vdev
->devcmd2
->wq
.ctrl
;
463 vdev
->args
[0] = (u64
) vdev
->devcmd2
->results_ring
.base_addr
|
465 vdev
->args
[1] = DEVCMD2_RING_SIZE
;
467 err
= vnic_dev_cmd2(vdev
, CMD_INITIALIZE_DEVCMD2
, 1000);
469 goto err_free_desc_ring
;
471 vdev
->devcmd_rtn
= &vnic_dev_cmd2
;
476 vnic_dev_free_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
);
478 vnic_wq_disable(&vdev
->devcmd2
->wq
);
480 vnic_wq_free(&vdev
->devcmd2
->wq
);
482 kfree(vdev
->devcmd2
);
483 vdev
->devcmd2
= NULL
;
489 static void vnic_dev_deinit_devcmd2(struct vnic_dev
*vdev
)
491 vnic_dev_free_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
);
492 vnic_wq_disable(&vdev
->devcmd2
->wq
);
493 vnic_wq_free(&vdev
->devcmd2
->wq
);
494 kfree(vdev
->devcmd2
);
495 vdev
->devcmd2
= NULL
;
496 vdev
->devcmd_rtn
= &vnic_dev_cmd1
;
500 static int vnic_dev_cmd_no_proxy(struct vnic_dev
*vdev
,
501 enum vnic_devcmd_cmd cmd
, u64
*a0
, u64
*a1
, int wait
)
508 err
= (*vdev
->devcmd_rtn
)(vdev
, cmd
, wait
);
517 int vnic_dev_cmd(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
518 u64
*a0
, u64
*a1
, int wait
)
520 memset(vdev
->args
, 0, sizeof(vdev
->args
));
522 switch (vdev
->proxy
) {
525 return vnic_dev_cmd_no_proxy(vdev
, cmd
, a0
, a1
, wait
);
530 int vnic_dev_fw_info(struct vnic_dev
*vdev
,
531 struct vnic_devcmd_fw_info
**fw_info
)
537 if (!vdev
->fw_info
) {
538 vdev
->fw_info
= dma_alloc_coherent(&vdev
->pdev
->dev
,
539 sizeof(struct vnic_devcmd_fw_info
),
540 &vdev
->fw_info_pa
, GFP_KERNEL
);
544 a0
= vdev
->fw_info_pa
;
546 /* only get fw_info once and cache it */
547 err
= vnic_dev_cmd(vdev
, CMD_MCPU_FW_INFO
, &a0
, &a1
, wait
);
550 *fw_info
= vdev
->fw_info
;
555 int vnic_dev_spec(struct vnic_dev
*vdev
, unsigned int offset
, unsigned int size
,
565 err
= vnic_dev_cmd(vdev
, CMD_DEV_SPEC
, &a0
, &a1
, wait
);
569 *(u8
*)value
= (u8
)a0
;
572 *(u16
*)value
= (u16
)a0
;
575 *(u32
*)value
= (u32
)a0
;
588 int vnic_dev_stats_clear(struct vnic_dev
*vdev
)
592 return vnic_dev_cmd(vdev
, CMD_STATS_CLEAR
, &a0
, &a1
, wait
);
595 int vnic_dev_stats_dump(struct vnic_dev
*vdev
, struct vnic_stats
**stats
)
601 vdev
->stats
= dma_alloc_coherent(&vdev
->pdev
->dev
,
602 sizeof(struct vnic_stats
), &vdev
->stats_pa
, GFP_KERNEL
);
607 *stats
= vdev
->stats
;
609 a1
= sizeof(struct vnic_stats
);
611 return vnic_dev_cmd(vdev
, CMD_STATS_DUMP
, &a0
, &a1
, wait
);
614 int vnic_dev_close(struct vnic_dev
*vdev
)
618 return vnic_dev_cmd(vdev
, CMD_CLOSE
, &a0
, &a1
, wait
);
621 int vnic_dev_enable(struct vnic_dev
*vdev
)
625 return vnic_dev_cmd(vdev
, CMD_ENABLE
, &a0
, &a1
, wait
);
628 int vnic_dev_disable(struct vnic_dev
*vdev
)
632 return vnic_dev_cmd(vdev
, CMD_DISABLE
, &a0
, &a1
, wait
);
635 int vnic_dev_open(struct vnic_dev
*vdev
, int arg
)
637 u64 a0
= (u32
)arg
, a1
= 0;
639 return vnic_dev_cmd(vdev
, CMD_OPEN
, &a0
, &a1
, wait
);
642 int vnic_dev_open_done(struct vnic_dev
*vdev
, int *done
)
650 err
= vnic_dev_cmd(vdev
, CMD_OPEN_STATUS
, &a0
, &a1
, wait
);
659 int vnic_dev_soft_reset(struct vnic_dev
*vdev
, int arg
)
661 u64 a0
= (u32
)arg
, a1
= 0;
663 return vnic_dev_cmd(vdev
, CMD_SOFT_RESET
, &a0
, &a1
, wait
);
666 int vnic_dev_soft_reset_done(struct vnic_dev
*vdev
, int *done
)
674 err
= vnic_dev_cmd(vdev
, CMD_SOFT_RESET_STATUS
, &a0
, &a1
, wait
);
683 int vnic_dev_hang_notify(struct vnic_dev
*vdev
)
687 return vnic_dev_cmd(vdev
, CMD_HANG_NOTIFY
, &a0
, &a1
, wait
);
690 int vnic_dev_mac_addr(struct vnic_dev
*vdev
, u8
*mac_addr
)
696 for (i
= 0; i
< ETH_ALEN
; i
++)
699 err
= vnic_dev_cmd(vdev
, CMD_MAC_ADDR
, &a
[0], &a
[1], wait
);
703 for (i
= 0; i
< ETH_ALEN
; i
++)
704 mac_addr
[i
] = ((u8
*)&a
)[i
];
709 void vnic_dev_packet_filter(struct vnic_dev
*vdev
, int directed
, int multicast
,
710 int broadcast
, int promisc
, int allmulti
)
716 a0
= (directed
? CMD_PFILTER_DIRECTED
: 0) |
717 (multicast
? CMD_PFILTER_MULTICAST
: 0) |
718 (broadcast
? CMD_PFILTER_BROADCAST
: 0) |
719 (promisc
? CMD_PFILTER_PROMISCUOUS
: 0) |
720 (allmulti
? CMD_PFILTER_ALL_MULTICAST
: 0);
722 err
= vnic_dev_cmd(vdev
, CMD_PACKET_FILTER
, &a0
, &a1
, wait
);
724 printk(KERN_ERR
"Can't set packet filter\n");
727 void vnic_dev_add_addr(struct vnic_dev
*vdev
, u8
*addr
)
734 for (i
= 0; i
< ETH_ALEN
; i
++)
735 ((u8
*)&a
)[i
] = addr
[i
];
737 err
= vnic_dev_cmd(vdev
, CMD_ADDR_ADD
, &a
[0], &a
[1], wait
);
739 pr_err("Can't add addr [%pM], %d\n", addr
, err
);
742 void vnic_dev_del_addr(struct vnic_dev
*vdev
, u8
*addr
)
749 for (i
= 0; i
< ETH_ALEN
; i
++)
750 ((u8
*)&a
)[i
] = addr
[i
];
752 err
= vnic_dev_cmd(vdev
, CMD_ADDR_DEL
, &a
[0], &a
[1], wait
);
754 pr_err("Can't del addr [%pM], %d\n", addr
, err
);
757 int vnic_dev_notify_set(struct vnic_dev
*vdev
, u16 intr
)
763 vdev
->notify
= dma_alloc_coherent(&vdev
->pdev
->dev
,
764 sizeof(struct vnic_devcmd_notify
),
765 &vdev
->notify_pa
, GFP_KERNEL
);
770 a0
= vdev
->notify_pa
;
771 a1
= ((u64
)intr
<< 32) & 0x0000ffff00000000ULL
;
772 a1
+= sizeof(struct vnic_devcmd_notify
);
774 return vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
777 void vnic_dev_notify_unset(struct vnic_dev
*vdev
)
782 a0
= 0; /* paddr = 0 to unset notify buffer */
783 a1
= 0x0000ffff00000000ULL
; /* intr num = -1 to unreg for intr */
784 a1
+= sizeof(struct vnic_devcmd_notify
);
786 vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
789 static int vnic_dev_notify_ready(struct vnic_dev
*vdev
)
792 unsigned int nwords
= sizeof(struct vnic_devcmd_notify
) / 4;
801 memcpy(&vdev
->notify_copy
, vdev
->notify
,
802 sizeof(struct vnic_devcmd_notify
));
803 words
= (u32
*)&vdev
->notify_copy
;
804 for (i
= 1; i
< nwords
; i
++)
806 } while (csum
!= words
[0]);
811 int vnic_dev_init(struct vnic_dev
*vdev
, int arg
)
813 u64 a0
= (u32
)arg
, a1
= 0;
815 return vnic_dev_cmd(vdev
, CMD_INIT
, &a0
, &a1
, wait
);
818 u16
vnic_dev_set_default_vlan(struct vnic_dev
*vdev
, u16 new_default_vlan
)
820 u64 a0
= new_default_vlan
, a1
= 0;
824 old_vlan
= vnic_dev_cmd(vdev
, CMD_SET_DEFAULT_VLAN
, &a0
, &a1
, wait
);
825 return (u16
)old_vlan
;
828 int vnic_dev_link_status(struct vnic_dev
*vdev
)
830 if (vdev
->linkstatus
)
831 return *vdev
->linkstatus
;
833 if (!vnic_dev_notify_ready(vdev
))
836 return vdev
->notify_copy
.link_state
;
839 u32
vnic_dev_port_speed(struct vnic_dev
*vdev
)
841 if (!vnic_dev_notify_ready(vdev
))
844 return vdev
->notify_copy
.port_speed
;
847 u32
vnic_dev_msg_lvl(struct vnic_dev
*vdev
)
849 if (!vnic_dev_notify_ready(vdev
))
852 return vdev
->notify_copy
.msglvl
;
855 u32
vnic_dev_mtu(struct vnic_dev
*vdev
)
857 if (!vnic_dev_notify_ready(vdev
))
860 return vdev
->notify_copy
.mtu
;
863 u32
vnic_dev_link_down_cnt(struct vnic_dev
*vdev
)
865 if (!vnic_dev_notify_ready(vdev
))
868 return vdev
->notify_copy
.link_down_cnt
;
871 void vnic_dev_set_intr_mode(struct vnic_dev
*vdev
,
872 enum vnic_dev_intr_mode intr_mode
)
874 vdev
->intr_mode
= intr_mode
;
877 enum vnic_dev_intr_mode
vnic_dev_get_intr_mode(
878 struct vnic_dev
*vdev
)
880 return vdev
->intr_mode
;
883 void vnic_dev_unregister(struct vnic_dev
*vdev
)
887 dma_free_coherent(&vdev
->pdev
->dev
,
888 sizeof(struct vnic_devcmd_notify
),
891 if (vdev
->linkstatus
)
892 dma_free_coherent(&vdev
->pdev
->dev
,
895 vdev
->linkstatus_pa
);
897 dma_free_coherent(&vdev
->pdev
->dev
,
898 sizeof(struct vnic_stats
),
899 vdev
->stats
, vdev
->stats_pa
);
901 dma_free_coherent(&vdev
->pdev
->dev
,
902 sizeof(struct vnic_devcmd_fw_info
),
903 vdev
->fw_info
, vdev
->fw_info_pa
);
905 vnic_dev_deinit_devcmd2(vdev
);
910 struct vnic_dev
*vnic_dev_register(struct vnic_dev
*vdev
,
911 void *priv
, struct pci_dev
*pdev
, struct vnic_dev_bar
*bar
)
914 vdev
= kzalloc(sizeof(struct vnic_dev
), GFP_KERNEL
);
922 if (vnic_dev_discover_res(vdev
, bar
))
928 vnic_dev_unregister(vdev
);
932 int vnic_dev_cmd_init(struct vnic_dev
*vdev
)
937 p
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD2
, 0);
939 pr_err("fnic: DEVCMD2 resource found!\n");
940 err
= vnic_dev_init_devcmd2(vdev
);
942 pr_err("fnic: DEVCMD2 not found, fall back to Devcmd\n");
943 err
= vnic_dev_init_devcmd1(vdev
);