2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/if_ether.h>
27 #include "vnic_resource.h"
28 #include "vnic_devcmd.h"
31 #include "vnic_stats.h"
34 #define VNIC_MAX_RES_HDR_SIZE \
35 (sizeof(struct vnic_resource_header) + \
36 sizeof(struct vnic_resource) * RES_TYPE_MAX)
37 #define VNIC_RES_STRIDE 128
39 void *vnic_dev_priv(struct vnic_dev
*vdev
)
44 static int vnic_dev_discover_res(struct vnic_dev
*vdev
,
45 struct vnic_dev_bar
*bar
, unsigned int num_bars
)
47 struct vnic_resource_header __iomem
*rh
;
48 struct mgmt_barmap_hdr __iomem
*mrh
;
49 struct vnic_resource __iomem
*r
;
55 if (bar
->len
< VNIC_MAX_RES_HDR_SIZE
) {
56 vdev_err(vdev
, "vNIC BAR0 res hdr length error\n");
63 vdev_err(vdev
, "vNIC BAR0 res hdr not mem-mapped\n");
67 /* Check for mgmt vnic in addition to normal vnic */
68 if ((ioread32(&rh
->magic
) != VNIC_RES_MAGIC
) ||
69 (ioread32(&rh
->version
) != VNIC_RES_VERSION
)) {
70 if ((ioread32(&mrh
->magic
) != MGMTVNIC_MAGIC
) ||
71 (ioread32(&mrh
->version
) != MGMTVNIC_VERSION
)) {
72 vdev_err(vdev
, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
73 VNIC_RES_MAGIC
, VNIC_RES_VERSION
,
74 MGMTVNIC_MAGIC
, MGMTVNIC_VERSION
,
75 ioread32(&rh
->magic
), ioread32(&rh
->version
));
80 if (ioread32(&mrh
->magic
) == MGMTVNIC_MAGIC
)
81 r
= (struct vnic_resource __iomem
*)(mrh
+ 1);
83 r
= (struct vnic_resource __iomem
*)(rh
+ 1);
86 while ((type
= ioread8(&r
->type
)) != RES_TYPE_EOL
) {
88 u8 bar_num
= ioread8(&r
->bar
);
89 u32 bar_offset
= ioread32(&r
->bar_offset
);
90 u32 count
= ioread32(&r
->count
);
95 if (bar_num
>= num_bars
)
98 if (!bar
[bar_num
].len
|| !bar
[bar_num
].vaddr
)
105 case RES_TYPE_INTR_CTRL
:
106 /* each count is stride bytes long */
107 len
= count
* VNIC_RES_STRIDE
;
108 if (len
+ bar_offset
> bar
[bar_num
].len
) {
109 vdev_err(vdev
, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
110 type
, bar_offset
, len
,
115 case RES_TYPE_INTR_PBA_LEGACY
:
116 case RES_TYPE_DEVCMD
:
117 case RES_TYPE_DEVCMD2
:
124 vdev
->res
[type
].count
= count
;
125 vdev
->res
[type
].vaddr
= (char __iomem
*)bar
[bar_num
].vaddr
+
127 vdev
->res
[type
].bus_addr
= bar
[bar_num
].bus_addr
+ bar_offset
;
133 unsigned int vnic_dev_get_res_count(struct vnic_dev
*vdev
,
134 enum vnic_res_type type
)
136 return vdev
->res
[type
].count
;
138 EXPORT_SYMBOL(vnic_dev_get_res_count
);
140 void __iomem
*vnic_dev_get_res(struct vnic_dev
*vdev
, enum vnic_res_type type
,
143 if (!vdev
->res
[type
].vaddr
)
150 case RES_TYPE_INTR_CTRL
:
151 return (char __iomem
*)vdev
->res
[type
].vaddr
+
152 index
* VNIC_RES_STRIDE
;
154 return (char __iomem
*)vdev
->res
[type
].vaddr
;
157 EXPORT_SYMBOL(vnic_dev_get_res
);
159 static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring
*ring
,
160 unsigned int desc_count
, unsigned int desc_size
)
162 /* The base address of the desc rings must be 512 byte aligned.
163 * Descriptor count is aligned to groups of 32 descriptors. A
164 * count of 0 means the maximum 4096 descriptors. Descriptor
165 * size is aligned to 16 bytes.
168 unsigned int count_align
= 32;
169 unsigned int desc_align
= 16;
171 ring
->base_align
= 512;
176 ring
->desc_count
= ALIGN(desc_count
, count_align
);
178 ring
->desc_size
= ALIGN(desc_size
, desc_align
);
180 ring
->size
= ring
->desc_count
* ring
->desc_size
;
181 ring
->size_unaligned
= ring
->size
+ ring
->base_align
;
183 return ring
->size_unaligned
;
186 void vnic_dev_clear_desc_ring(struct vnic_dev_ring
*ring
)
188 memset(ring
->descs
, 0, ring
->size
);
191 int vnic_dev_alloc_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
,
192 unsigned int desc_count
, unsigned int desc_size
)
194 vnic_dev_desc_ring_size(ring
, desc_count
, desc_size
);
196 ring
->descs_unaligned
= pci_alloc_consistent(vdev
->pdev
,
197 ring
->size_unaligned
,
198 &ring
->base_addr_unaligned
);
200 if (!ring
->descs_unaligned
) {
201 vdev_err(vdev
, "Failed to allocate ring (size=%d), aborting\n",
206 ring
->base_addr
= ALIGN(ring
->base_addr_unaligned
,
208 ring
->descs
= (u8
*)ring
->descs_unaligned
+
209 (ring
->base_addr
- ring
->base_addr_unaligned
);
211 vnic_dev_clear_desc_ring(ring
);
213 ring
->desc_avail
= ring
->desc_count
- 1;
218 void vnic_dev_free_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
)
221 pci_free_consistent(vdev
->pdev
,
222 ring
->size_unaligned
,
223 ring
->descs_unaligned
,
224 ring
->base_addr_unaligned
);
229 static int _vnic_dev_cmd(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
232 struct vnic_devcmd __iomem
*devcmd
= vdev
->devcmd
;
238 status
= ioread32(&devcmd
->status
);
239 if (status
== 0xFFFFFFFF) {
240 /* PCI-e target device is gone */
243 if (status
& STAT_BUSY
) {
244 vdev_neterr(vdev
, "Busy devcmd %d\n", _CMD_N(cmd
));
248 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
) {
249 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
250 writeq(vdev
->args
[i
], &devcmd
->args
[i
]);
254 iowrite32(cmd
, &devcmd
->cmd
);
256 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
259 for (delay
= 0; delay
< wait
; delay
++) {
263 status
= ioread32(&devcmd
->status
);
264 if (status
== 0xFFFFFFFF) {
265 /* PCI-e target device is gone */
269 if (!(status
& STAT_BUSY
)) {
271 if (status
& STAT_ERROR
) {
272 err
= (int)readq(&devcmd
->args
[0]);
273 if (err
== ERR_EINVAL
&&
274 cmd
== CMD_CAPABILITY
)
276 if (err
!= ERR_ECMDUNKNOWN
||
277 cmd
!= CMD_CAPABILITY
)
278 vdev_neterr(vdev
, "Error %d devcmd %d\n",
283 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
) {
285 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
286 vdev
->args
[i
] = readq(&devcmd
->args
[i
]);
293 vdev_neterr(vdev
, "Timedout devcmd %d\n", _CMD_N(cmd
));
297 static int _vnic_dev_cmd2(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
300 struct devcmd2_controller
*dc2c
= vdev
->devcmd2
;
301 struct devcmd2_result
*result
;
305 u32 fetch_index
, new_posted
;
306 u32 posted
= dc2c
->posted
;
308 fetch_index
= ioread32(&dc2c
->wq_ctrl
->fetch_index
);
310 if (fetch_index
== 0xFFFFFFFF)
313 new_posted
= (posted
+ 1) % DEVCMD2_RING_SIZE
;
315 if (new_posted
== fetch_index
) {
316 vdev_neterr(vdev
, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
317 _CMD_N(cmd
), fetch_index
, posted
);
320 dc2c
->cmd_ring
[posted
].cmd
= cmd
;
321 dc2c
->cmd_ring
[posted
].flags
= 0;
323 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
324 dc2c
->cmd_ring
[posted
].flags
|= DEVCMD2_FNORESULT
;
325 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
)
326 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
327 dc2c
->cmd_ring
[posted
].args
[i
] = vdev
->args
[i
];
329 /* Adding write memory barrier prevents compiler and/or CPU reordering,
330 * thus avoiding descriptor posting before descriptor is initialized.
331 * Otherwise, hardware can read stale descriptor fields.
334 iowrite32(new_posted
, &dc2c
->wq_ctrl
->posted_index
);
335 dc2c
->posted
= new_posted
;
337 if (dc2c
->cmd_ring
[posted
].flags
& DEVCMD2_FNORESULT
)
340 result
= dc2c
->result
+ dc2c
->next_result
;
344 if (dc2c
->next_result
== dc2c
->result_size
) {
345 dc2c
->next_result
= 0;
346 dc2c
->color
= dc2c
->color
? 0 : 1;
349 for (delay
= 0; delay
< wait
; delay
++) {
350 if (result
->color
== color
) {
353 if (err
!= ERR_ECMDUNKNOWN
||
354 cmd
!= CMD_CAPABILITY
)
355 vdev_neterr(vdev
, "Error %d devcmd %d\n",
359 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
)
360 for (i
= 0; i
< VNIC_DEVCMD2_NARGS
; i
++)
361 vdev
->args
[i
] = result
->results
[i
];
368 vdev_neterr(vdev
, "devcmd %d timed out\n", _CMD_N(cmd
));
373 static int vnic_dev_init_devcmd1(struct vnic_dev
*vdev
)
375 vdev
->devcmd
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD
, 0);
378 vdev
->devcmd_rtn
= _vnic_dev_cmd
;
383 static int vnic_dev_init_devcmd2(struct vnic_dev
*vdev
)
386 unsigned int fetch_index
;
391 vdev
->devcmd2
= kzalloc(sizeof(*vdev
->devcmd2
), GFP_KERNEL
);
395 vdev
->devcmd2
->color
= 1;
396 vdev
->devcmd2
->result_size
= DEVCMD2_RING_SIZE
;
397 err
= enic_wq_devcmd2_alloc(vdev
, &vdev
->devcmd2
->wq
, DEVCMD2_RING_SIZE
,
400 goto err_free_devcmd2
;
402 fetch_index
= ioread32(&vdev
->devcmd2
->wq
.ctrl
->fetch_index
);
403 if (fetch_index
== 0xFFFFFFFF) { /* check for hardware gone */
404 vdev_err(vdev
, "Fatal error in devcmd2 init - hardware surprise removal\n");
409 enic_wq_init_start(&vdev
->devcmd2
->wq
, 0, fetch_index
, fetch_index
, 0,
411 vdev
->devcmd2
->posted
= fetch_index
;
412 vnic_wq_enable(&vdev
->devcmd2
->wq
);
414 err
= vnic_dev_alloc_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
,
415 DEVCMD2_RING_SIZE
, DEVCMD2_DESC_SIZE
);
419 vdev
->devcmd2
->result
= vdev
->devcmd2
->results_ring
.descs
;
420 vdev
->devcmd2
->cmd_ring
= vdev
->devcmd2
->wq
.ring
.descs
;
421 vdev
->devcmd2
->wq_ctrl
= vdev
->devcmd2
->wq
.ctrl
;
422 vdev
->args
[0] = (u64
)vdev
->devcmd2
->results_ring
.base_addr
|
424 vdev
->args
[1] = DEVCMD2_RING_SIZE
;
426 err
= _vnic_dev_cmd2(vdev
, CMD_INITIALIZE_DEVCMD2
, 1000);
428 goto err_free_desc_ring
;
430 vdev
->devcmd_rtn
= _vnic_dev_cmd2
;
435 vnic_dev_free_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
);
437 vnic_wq_disable(&vdev
->devcmd2
->wq
);
439 vnic_wq_free(&vdev
->devcmd2
->wq
);
441 kfree(vdev
->devcmd2
);
442 vdev
->devcmd2
= NULL
;
447 static void vnic_dev_deinit_devcmd2(struct vnic_dev
*vdev
)
449 vnic_dev_free_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
);
450 vnic_wq_disable(&vdev
->devcmd2
->wq
);
451 vnic_wq_free(&vdev
->devcmd2
->wq
);
452 kfree(vdev
->devcmd2
);
455 static int vnic_dev_cmd_proxy(struct vnic_dev
*vdev
,
456 enum vnic_devcmd_cmd proxy_cmd
, enum vnic_devcmd_cmd cmd
,
457 u64
*a0
, u64
*a1
, int wait
)
462 memset(vdev
->args
, 0, sizeof(vdev
->args
));
464 vdev
->args
[0] = vdev
->proxy_index
;
469 err
= vdev
->devcmd_rtn(vdev
, proxy_cmd
, wait
);
473 status
= (u32
)vdev
->args
[0];
474 if (status
& STAT_ERROR
) {
475 err
= (int)vdev
->args
[1];
476 if (err
!= ERR_ECMDUNKNOWN
||
477 cmd
!= CMD_CAPABILITY
)
478 vdev_neterr(vdev
, "Error %d proxy devcmd %d\n",
489 static int vnic_dev_cmd_no_proxy(struct vnic_dev
*vdev
,
490 enum vnic_devcmd_cmd cmd
, u64
*a0
, u64
*a1
, int wait
)
497 err
= vdev
->devcmd_rtn(vdev
, cmd
, wait
);
505 void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev
*vdev
, u16 index
)
507 vdev
->proxy
= PROXY_BY_INDEX
;
508 vdev
->proxy_index
= index
;
511 void vnic_dev_cmd_proxy_end(struct vnic_dev
*vdev
)
513 vdev
->proxy
= PROXY_NONE
;
514 vdev
->proxy_index
= 0;
517 int vnic_dev_cmd(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
518 u64
*a0
, u64
*a1
, int wait
)
520 memset(vdev
->args
, 0, sizeof(vdev
->args
));
522 switch (vdev
->proxy
) {
524 return vnic_dev_cmd_proxy(vdev
, CMD_PROXY_BY_INDEX
, cmd
,
527 return vnic_dev_cmd_proxy(vdev
, CMD_PROXY_BY_BDF
, cmd
,
531 return vnic_dev_cmd_no_proxy(vdev
, cmd
, a0
, a1
, wait
);
535 static int vnic_dev_capable(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
)
537 u64 a0
= (u32
)cmd
, a1
= 0;
541 err
= vnic_dev_cmd(vdev
, CMD_CAPABILITY
, &a0
, &a1
, wait
);
546 int vnic_dev_fw_info(struct vnic_dev
*vdev
,
547 struct vnic_devcmd_fw_info
**fw_info
)
553 if (!vdev
->fw_info
) {
554 vdev
->fw_info
= pci_zalloc_consistent(vdev
->pdev
,
555 sizeof(struct vnic_devcmd_fw_info
),
560 a0
= vdev
->fw_info_pa
;
561 a1
= sizeof(struct vnic_devcmd_fw_info
);
563 /* only get fw_info once and cache it */
564 if (vnic_dev_capable(vdev
, CMD_MCPU_FW_INFO
))
565 err
= vnic_dev_cmd(vdev
, CMD_MCPU_FW_INFO
,
568 err
= vnic_dev_cmd(vdev
, CMD_MCPU_FW_INFO_OLD
,
572 *fw_info
= vdev
->fw_info
;
577 int vnic_dev_spec(struct vnic_dev
*vdev
, unsigned int offset
, unsigned int size
,
587 err
= vnic_dev_cmd(vdev
, CMD_DEV_SPEC
, &a0
, &a1
, wait
);
590 case 1: *(u8
*)value
= (u8
)a0
; break;
591 case 2: *(u16
*)value
= (u16
)a0
; break;
592 case 4: *(u32
*)value
= (u32
)a0
; break;
593 case 8: *(u64
*)value
= a0
; break;
594 default: BUG(); break;
600 int vnic_dev_stats_dump(struct vnic_dev
*vdev
, struct vnic_stats
**stats
)
606 vdev
->stats
= pci_alloc_consistent(vdev
->pdev
,
607 sizeof(struct vnic_stats
), &vdev
->stats_pa
);
612 *stats
= vdev
->stats
;
614 a1
= sizeof(struct vnic_stats
);
616 return vnic_dev_cmd(vdev
, CMD_STATS_DUMP
, &a0
, &a1
, wait
);
619 int vnic_dev_close(struct vnic_dev
*vdev
)
623 return vnic_dev_cmd(vdev
, CMD_CLOSE
, &a0
, &a1
, wait
);
626 int vnic_dev_enable_wait(struct vnic_dev
*vdev
)
631 if (vnic_dev_capable(vdev
, CMD_ENABLE_WAIT
))
632 return vnic_dev_cmd(vdev
, CMD_ENABLE_WAIT
, &a0
, &a1
, wait
);
634 return vnic_dev_cmd(vdev
, CMD_ENABLE
, &a0
, &a1
, wait
);
637 int vnic_dev_disable(struct vnic_dev
*vdev
)
641 return vnic_dev_cmd(vdev
, CMD_DISABLE
, &a0
, &a1
, wait
);
644 int vnic_dev_open(struct vnic_dev
*vdev
, int arg
)
646 u64 a0
= (u32
)arg
, a1
= 0;
648 return vnic_dev_cmd(vdev
, CMD_OPEN
, &a0
, &a1
, wait
);
651 int vnic_dev_open_done(struct vnic_dev
*vdev
, int *done
)
659 err
= vnic_dev_cmd(vdev
, CMD_OPEN_STATUS
, &a0
, &a1
, wait
);
668 int vnic_dev_soft_reset(struct vnic_dev
*vdev
, int arg
)
670 u64 a0
= (u32
)arg
, a1
= 0;
672 return vnic_dev_cmd(vdev
, CMD_SOFT_RESET
, &a0
, &a1
, wait
);
675 int vnic_dev_soft_reset_done(struct vnic_dev
*vdev
, int *done
)
683 err
= vnic_dev_cmd(vdev
, CMD_SOFT_RESET_STATUS
, &a0
, &a1
, wait
);
692 int vnic_dev_hang_reset(struct vnic_dev
*vdev
, int arg
)
694 u64 a0
= (u32
)arg
, a1
= 0;
698 if (vnic_dev_capable(vdev
, CMD_HANG_RESET
)) {
699 return vnic_dev_cmd(vdev
, CMD_HANG_RESET
,
702 err
= vnic_dev_soft_reset(vdev
, arg
);
705 return vnic_dev_init(vdev
, 0);
709 int vnic_dev_hang_reset_done(struct vnic_dev
*vdev
, int *done
)
717 if (vnic_dev_capable(vdev
, CMD_HANG_RESET_STATUS
)) {
718 err
= vnic_dev_cmd(vdev
, CMD_HANG_RESET_STATUS
,
723 return vnic_dev_soft_reset_done(vdev
, done
);
731 int vnic_dev_hang_notify(struct vnic_dev
*vdev
)
735 return vnic_dev_cmd(vdev
, CMD_HANG_NOTIFY
, &a0
, &a1
, wait
);
738 int vnic_dev_get_mac_addr(struct vnic_dev
*vdev
, u8
*mac_addr
)
744 for (i
= 0; i
< ETH_ALEN
; i
++)
747 err
= vnic_dev_cmd(vdev
, CMD_GET_MAC_ADDR
, &a0
, &a1
, wait
);
751 for (i
= 0; i
< ETH_ALEN
; i
++)
752 mac_addr
[i
] = ((u8
*)&a0
)[i
];
757 int vnic_dev_packet_filter(struct vnic_dev
*vdev
, int directed
, int multicast
,
758 int broadcast
, int promisc
, int allmulti
)
764 a0
= (directed
? CMD_PFILTER_DIRECTED
: 0) |
765 (multicast
? CMD_PFILTER_MULTICAST
: 0) |
766 (broadcast
? CMD_PFILTER_BROADCAST
: 0) |
767 (promisc
? CMD_PFILTER_PROMISCUOUS
: 0) |
768 (allmulti
? CMD_PFILTER_ALL_MULTICAST
: 0);
770 err
= vnic_dev_cmd(vdev
, CMD_PACKET_FILTER
, &a0
, &a1
, wait
);
772 vdev_neterr(vdev
, "Can't set packet filter\n");
777 int vnic_dev_add_addr(struct vnic_dev
*vdev
, const u8
*addr
)
784 for (i
= 0; i
< ETH_ALEN
; i
++)
785 ((u8
*)&a0
)[i
] = addr
[i
];
787 err
= vnic_dev_cmd(vdev
, CMD_ADDR_ADD
, &a0
, &a1
, wait
);
789 vdev_neterr(vdev
, "Can't add addr [%pM], %d\n", addr
, err
);
794 int vnic_dev_del_addr(struct vnic_dev
*vdev
, const u8
*addr
)
801 for (i
= 0; i
< ETH_ALEN
; i
++)
802 ((u8
*)&a0
)[i
] = addr
[i
];
804 err
= vnic_dev_cmd(vdev
, CMD_ADDR_DEL
, &a0
, &a1
, wait
);
806 vdev_neterr(vdev
, "Can't del addr [%pM], %d\n", addr
, err
);
811 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev
*vdev
,
812 u8 ig_vlan_rewrite_mode
)
814 u64 a0
= ig_vlan_rewrite_mode
, a1
= 0;
817 if (vnic_dev_capable(vdev
, CMD_IG_VLAN_REWRITE_MODE
))
818 return vnic_dev_cmd(vdev
, CMD_IG_VLAN_REWRITE_MODE
,
824 static int vnic_dev_notify_setcmd(struct vnic_dev
*vdev
,
825 void *notify_addr
, dma_addr_t notify_pa
, u16 intr
)
831 memset(notify_addr
, 0, sizeof(struct vnic_devcmd_notify
));
832 vdev
->notify
= notify_addr
;
833 vdev
->notify_pa
= notify_pa
;
836 a1
= ((u64
)intr
<< 32) & 0x0000ffff00000000ULL
;
837 a1
+= sizeof(struct vnic_devcmd_notify
);
839 r
= vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
840 vdev
->notify_sz
= (r
== 0) ? (u32
)a1
: 0;
844 int vnic_dev_notify_set(struct vnic_dev
*vdev
, u16 intr
)
847 dma_addr_t notify_pa
;
849 if (vdev
->notify
|| vdev
->notify_pa
) {
850 vdev_neterr(vdev
, "notify block %p still allocated\n",
855 notify_addr
= pci_alloc_consistent(vdev
->pdev
,
856 sizeof(struct vnic_devcmd_notify
),
861 return vnic_dev_notify_setcmd(vdev
, notify_addr
, notify_pa
, intr
);
864 static int vnic_dev_notify_unsetcmd(struct vnic_dev
*vdev
)
870 a0
= 0; /* paddr = 0 to unset notify buffer */
871 a1
= 0x0000ffff00000000ULL
; /* intr num = -1 to unreg for intr */
872 a1
+= sizeof(struct vnic_devcmd_notify
);
874 err
= vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
882 int vnic_dev_notify_unset(struct vnic_dev
*vdev
)
885 pci_free_consistent(vdev
->pdev
,
886 sizeof(struct vnic_devcmd_notify
),
891 return vnic_dev_notify_unsetcmd(vdev
);
894 static int vnic_dev_notify_ready(struct vnic_dev
*vdev
)
897 unsigned int nwords
= vdev
->notify_sz
/ 4;
901 if (!vdev
->notify
|| !vdev
->notify_sz
)
906 memcpy(&vdev
->notify_copy
, vdev
->notify
, vdev
->notify_sz
);
907 words
= (u32
*)&vdev
->notify_copy
;
908 for (i
= 1; i
< nwords
; i
++)
910 } while (csum
!= words
[0]);
915 int vnic_dev_init(struct vnic_dev
*vdev
, int arg
)
917 u64 a0
= (u32
)arg
, a1
= 0;
921 if (vnic_dev_capable(vdev
, CMD_INIT
))
922 r
= vnic_dev_cmd(vdev
, CMD_INIT
, &a0
, &a1
, wait
);
924 vnic_dev_cmd(vdev
, CMD_INIT_v1
, &a0
, &a1
, wait
);
925 if (a0
& CMD_INITF_DEFAULT_MAC
) {
926 /* Emulate these for old CMD_INIT_v1 which
927 * didn't pass a0 so no CMD_INITF_*.
929 vnic_dev_cmd(vdev
, CMD_GET_MAC_ADDR
, &a0
, &a1
, wait
);
930 vnic_dev_cmd(vdev
, CMD_ADDR_ADD
, &a0
, &a1
, wait
);
936 int vnic_dev_deinit(struct vnic_dev
*vdev
)
941 return vnic_dev_cmd(vdev
, CMD_DEINIT
, &a0
, &a1
, wait
);
944 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev
*vdev
)
946 /* Default: hardware intr coal timer is in units of 1.5 usecs */
947 vdev
->intr_coal_timer_info
.mul
= 2;
948 vdev
->intr_coal_timer_info
.div
= 3;
949 vdev
->intr_coal_timer_info
.max_usec
=
950 vnic_dev_intr_coal_timer_hw_to_usec(vdev
, 0xffff);
953 int vnic_dev_intr_coal_timer_info(struct vnic_dev
*vdev
)
958 memset(vdev
->args
, 0, sizeof(vdev
->args
));
960 if (vnic_dev_capable(vdev
, CMD_INTR_COAL_CONVERT
))
961 err
= vdev
->devcmd_rtn(vdev
, CMD_INTR_COAL_CONVERT
, wait
);
963 err
= ERR_ECMDUNKNOWN
;
965 /* Use defaults when firmware doesn't support the devcmd at all or
966 * supports it for only specific hardware
968 if ((err
== ERR_ECMDUNKNOWN
) ||
969 (!err
&& !(vdev
->args
[0] && vdev
->args
[1] && vdev
->args
[2]))) {
970 vdev_netwarn(vdev
, "Using default conversion factor for interrupt coalesce timer\n");
971 vnic_dev_intr_coal_timer_info_default(vdev
);
976 vdev
->intr_coal_timer_info
.mul
= (u32
) vdev
->args
[0];
977 vdev
->intr_coal_timer_info
.div
= (u32
) vdev
->args
[1];
978 vdev
->intr_coal_timer_info
.max_usec
= (u32
) vdev
->args
[2];
984 int vnic_dev_link_status(struct vnic_dev
*vdev
)
986 if (!vnic_dev_notify_ready(vdev
))
989 return vdev
->notify_copy
.link_state
;
992 u32
vnic_dev_port_speed(struct vnic_dev
*vdev
)
994 if (!vnic_dev_notify_ready(vdev
))
997 return vdev
->notify_copy
.port_speed
;
1000 u32
vnic_dev_msg_lvl(struct vnic_dev
*vdev
)
1002 if (!vnic_dev_notify_ready(vdev
))
1005 return vdev
->notify_copy
.msglvl
;
1008 u32
vnic_dev_mtu(struct vnic_dev
*vdev
)
1010 if (!vnic_dev_notify_ready(vdev
))
1013 return vdev
->notify_copy
.mtu
;
1016 void vnic_dev_set_intr_mode(struct vnic_dev
*vdev
,
1017 enum vnic_dev_intr_mode intr_mode
)
1019 vdev
->intr_mode
= intr_mode
;
1022 enum vnic_dev_intr_mode
vnic_dev_get_intr_mode(
1023 struct vnic_dev
*vdev
)
1025 return vdev
->intr_mode
;
1028 u32
vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev
*vdev
, u32 usec
)
1030 return (usec
* vdev
->intr_coal_timer_info
.mul
) /
1031 vdev
->intr_coal_timer_info
.div
;
1034 u32
vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev
*vdev
, u32 hw_cycles
)
1036 return (hw_cycles
* vdev
->intr_coal_timer_info
.div
) /
1037 vdev
->intr_coal_timer_info
.mul
;
1040 u32
vnic_dev_get_intr_coal_timer_max(struct vnic_dev
*vdev
)
1042 return vdev
->intr_coal_timer_info
.max_usec
;
1045 void vnic_dev_unregister(struct vnic_dev
*vdev
)
1049 pci_free_consistent(vdev
->pdev
,
1050 sizeof(struct vnic_devcmd_notify
),
1054 pci_free_consistent(vdev
->pdev
,
1055 sizeof(struct vnic_stats
),
1056 vdev
->stats
, vdev
->stats_pa
);
1058 pci_free_consistent(vdev
->pdev
,
1059 sizeof(struct vnic_devcmd_fw_info
),
1060 vdev
->fw_info
, vdev
->fw_info_pa
);
1062 vnic_dev_deinit_devcmd2(vdev
);
1067 EXPORT_SYMBOL(vnic_dev_unregister
);
1069 struct vnic_dev
*vnic_dev_register(struct vnic_dev
*vdev
,
1070 void *priv
, struct pci_dev
*pdev
, struct vnic_dev_bar
*bar
,
1071 unsigned int num_bars
)
1074 vdev
= kzalloc(sizeof(struct vnic_dev
), GFP_KERNEL
);
1082 if (vnic_dev_discover_res(vdev
, bar
, num_bars
))
1088 vnic_dev_unregister(vdev
);
1091 EXPORT_SYMBOL(vnic_dev_register
);
1093 struct pci_dev
*vnic_dev_get_pdev(struct vnic_dev
*vdev
)
1097 EXPORT_SYMBOL(vnic_dev_get_pdev
);
1099 int vnic_devcmd_init(struct vnic_dev
*vdev
)
1104 res
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD2
, 0);
1106 err
= vnic_dev_init_devcmd2(vdev
);
1108 vdev_warn(vdev
, "DEVCMD2 init failed: %d, Using DEVCMD1\n",
1113 vdev_warn(vdev
, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
1115 err
= vnic_dev_init_devcmd1(vdev
);
1117 vdev_err(vdev
, "DEVCMD1 initialization failed: %d\n", err
);
1122 int vnic_dev_init_prov2(struct vnic_dev
*vdev
, u8
*buf
, u32 len
)
1130 prov_buf
= pci_alloc_consistent(vdev
->pdev
, len
, &prov_pa
);
1134 memcpy(prov_buf
, buf
, len
);
1138 ret
= vnic_dev_cmd(vdev
, CMD_INIT_PROV_INFO2
, &a0
, &a1
, wait
);
1140 pci_free_consistent(vdev
->pdev
, len
, prov_buf
, prov_pa
);
1145 int vnic_dev_enable2(struct vnic_dev
*vdev
, int active
)
1150 a0
= (active
? CMD_ENABLE2_ACTIVE
: 0);
1152 return vnic_dev_cmd(vdev
, CMD_ENABLE2
, &a0
, &a1
, wait
);
1155 static int vnic_dev_cmd_status(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
1158 u64 a0
= cmd
, a1
= 0;
1162 ret
= vnic_dev_cmd(vdev
, CMD_STATUS
, &a0
, &a1
, wait
);
1169 int vnic_dev_enable2_done(struct vnic_dev
*vdev
, int *status
)
1171 return vnic_dev_cmd_status(vdev
, CMD_ENABLE2
, status
);
1174 int vnic_dev_deinit_done(struct vnic_dev
*vdev
, int *status
)
1176 return vnic_dev_cmd_status(vdev
, CMD_DEINIT
, status
);
1179 int vnic_dev_set_mac_addr(struct vnic_dev
*vdev
, u8
*mac_addr
)
1185 for (i
= 0; i
< ETH_ALEN
; i
++)
1186 ((u8
*)&a0
)[i
] = mac_addr
[i
];
1188 return vnic_dev_cmd(vdev
, CMD_SET_MAC_ADDR
, &a0
, &a1
, wait
);
1191 /* vnic_dev_classifier: Add/Delete classifier entries
1192 * @vdev: vdev of the device
1193 * @cmd: CLSF_ADD for Add filter
1194 * CLSF_DEL for Delete filter
1195 * @entry: In case of ADD filter, the caller passes the RQ number in this
1198 * This function stores the filter_id returned by the firmware in the
1199 * same variable before return;
1201 * In case of DEL filter, the caller passes the RQ number. Return
1202 * value is irrelevant.
1203 * @data: filter data
1205 int vnic_dev_classifier(struct vnic_dev
*vdev
, u8 cmd
, u16
*entry
,
1206 struct filter
*data
)
1212 struct filter_tlv
*tlv
, *tlv_va
;
1213 struct filter_action
*action
;
1216 if (cmd
== CLSF_ADD
) {
1217 tlv_size
= sizeof(struct filter
) +
1218 sizeof(struct filter_action
) +
1219 2 * sizeof(struct filter_tlv
);
1220 tlv_va
= pci_alloc_consistent(vdev
->pdev
, tlv_size
, &tlv_pa
);
1226 memset(tlv
, 0, tlv_size
);
1227 tlv
->type
= CLSF_TLV_FILTER
;
1228 tlv
->length
= sizeof(struct filter
);
1229 *(struct filter
*)&tlv
->val
= *data
;
1231 tlv
= (struct filter_tlv
*)((char *)tlv
+
1232 sizeof(struct filter_tlv
) +
1233 sizeof(struct filter
));
1235 tlv
->type
= CLSF_TLV_ACTION
;
1236 tlv
->length
= sizeof(struct filter_action
);
1237 action
= (struct filter_action
*)&tlv
->val
;
1238 action
->type
= FILTER_ACTION_RQ_STEERING
;
1239 action
->u
.rq_idx
= *entry
;
1241 ret
= vnic_dev_cmd(vdev
, CMD_ADD_FILTER
, &a0
, &a1
, wait
);
1243 pci_free_consistent(vdev
->pdev
, tlv_size
, tlv_va
, tlv_pa
);
1244 } else if (cmd
== CLSF_DEL
) {
1246 ret
= vnic_dev_cmd(vdev
, CMD_DEL_FILTER
, &a0
, &a1
, wait
);
1252 int vnic_dev_overlay_offload_ctrl(struct vnic_dev
*vdev
, u8 overlay
, u8 config
)
1258 return vnic_dev_cmd(vdev
, CMD_OVERLAY_OFFLOAD_CTRL
, &a0
, &a1
, wait
);
1261 int vnic_dev_overlay_offload_cfg(struct vnic_dev
*vdev
, u8 overlay
,
1262 u16 vxlan_udp_port_number
)
1264 u64 a1
= vxlan_udp_port_number
;
1268 return vnic_dev_cmd(vdev
, CMD_OVERLAY_OFFLOAD_CFG
, &a0
, &a1
, wait
);
1271 int vnic_dev_get_supported_feature_ver(struct vnic_dev
*vdev
, u8 feature
,
1272 u64
*supported_versions
, u64
*a1
)
1278 ret
= vnic_dev_cmd(vdev
, CMD_GET_SUPP_FEATURE_VER
, &a0
, a1
, wait
);
1280 *supported_versions
= a0
;
1285 int vnic_dev_capable_rss_hash_type(struct vnic_dev
*vdev
, u8
*rss_hash_type
)
1287 u64 a0
= CMD_NIC_CFG
, a1
= 0;
1291 err
= vnic_dev_cmd(vdev
, CMD_CAPABILITY
, &a0
, &a1
, wait
);
1292 /* rss_hash_type is valid only when a0 is 1. Adapter which does not
1293 * support CMD_CAPABILITY for rss_hash_type has a0 = 0
1295 if (err
|| (a0
!= 1))
1298 a1
= (a1
>> NIC_CFG_RSS_HASH_TYPE_SHIFT
) &
1299 NIC_CFG_RSS_HASH_TYPE_MASK_FIELD
;
1301 *rss_hash_type
= (u8
)a1
;