1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/pci.h>
11 #include <linux/delay.h>
12 #include <linux/if_ether.h>
14 #include "vnic_resource.h"
15 #include "vnic_devcmd.h"
18 #include "vnic_stats.h"
21 #define VNIC_MAX_RES_HDR_SIZE \
22 (sizeof(struct vnic_resource_header) + \
23 sizeof(struct vnic_resource) * RES_TYPE_MAX)
24 #define VNIC_RES_STRIDE 128
26 void *vnic_dev_priv(struct vnic_dev
*vdev
)
31 static int vnic_dev_discover_res(struct vnic_dev
*vdev
,
32 struct vnic_dev_bar
*bar
, unsigned int num_bars
)
34 struct vnic_resource_header __iomem
*rh
;
35 struct mgmt_barmap_hdr __iomem
*mrh
;
36 struct vnic_resource __iomem
*r
;
42 if (bar
->len
< VNIC_MAX_RES_HDR_SIZE
) {
43 vdev_err(vdev
, "vNIC BAR0 res hdr length error\n");
50 vdev_err(vdev
, "vNIC BAR0 res hdr not mem-mapped\n");
54 /* Check for mgmt vnic in addition to normal vnic */
55 if ((ioread32(&rh
->magic
) != VNIC_RES_MAGIC
) ||
56 (ioread32(&rh
->version
) != VNIC_RES_VERSION
)) {
57 if ((ioread32(&mrh
->magic
) != MGMTVNIC_MAGIC
) ||
58 (ioread32(&mrh
->version
) != MGMTVNIC_VERSION
)) {
59 vdev_err(vdev
, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
60 VNIC_RES_MAGIC
, VNIC_RES_VERSION
,
61 MGMTVNIC_MAGIC
, MGMTVNIC_VERSION
,
62 ioread32(&rh
->magic
), ioread32(&rh
->version
));
67 if (ioread32(&mrh
->magic
) == MGMTVNIC_MAGIC
)
68 r
= (struct vnic_resource __iomem
*)(mrh
+ 1);
70 r
= (struct vnic_resource __iomem
*)(rh
+ 1);
73 while ((type
= ioread8(&r
->type
)) != RES_TYPE_EOL
) {
75 u8 bar_num
= ioread8(&r
->bar
);
76 u32 bar_offset
= ioread32(&r
->bar_offset
);
77 u32 count
= ioread32(&r
->count
);
82 if (bar_num
>= num_bars
)
85 if (!bar
[bar_num
].len
|| !bar
[bar_num
].vaddr
)
92 case RES_TYPE_INTR_CTRL
:
93 /* each count is stride bytes long */
94 len
= count
* VNIC_RES_STRIDE
;
95 if (len
+ bar_offset
> bar
[bar_num
].len
) {
96 vdev_err(vdev
, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
97 type
, bar_offset
, len
,
102 case RES_TYPE_INTR_PBA_LEGACY
:
103 case RES_TYPE_DEVCMD
:
104 case RES_TYPE_DEVCMD2
:
111 vdev
->res
[type
].count
= count
;
112 vdev
->res
[type
].vaddr
= (char __iomem
*)bar
[bar_num
].vaddr
+
114 vdev
->res
[type
].bus_addr
= bar
[bar_num
].bus_addr
+ bar_offset
;
120 unsigned int vnic_dev_get_res_count(struct vnic_dev
*vdev
,
121 enum vnic_res_type type
)
123 return vdev
->res
[type
].count
;
125 EXPORT_SYMBOL(vnic_dev_get_res_count
);
127 void __iomem
*vnic_dev_get_res(struct vnic_dev
*vdev
, enum vnic_res_type type
,
130 if (!vdev
->res
[type
].vaddr
)
137 case RES_TYPE_INTR_CTRL
:
138 return (char __iomem
*)vdev
->res
[type
].vaddr
+
139 index
* VNIC_RES_STRIDE
;
141 return (char __iomem
*)vdev
->res
[type
].vaddr
;
144 EXPORT_SYMBOL(vnic_dev_get_res
);
146 static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring
*ring
,
147 unsigned int desc_count
, unsigned int desc_size
)
150 /* Descriptor ring base address alignment in bytes*/
151 ring
->base_align
= VNIC_DESC_BASE_ALIGN
;
153 /* A count of 0 means the maximum descriptors */
155 desc_count
= VNIC_DESC_MAX_COUNT
;
157 /* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */
158 ring
->desc_count
= ALIGN(desc_count
, VNIC_DESC_COUNT_ALIGN
);
160 /* Descriptor size alignment in bytes */
161 ring
->desc_size
= ALIGN(desc_size
, VNIC_DESC_SIZE_ALIGN
);
163 ring
->size
= ring
->desc_count
* ring
->desc_size
;
164 ring
->size_unaligned
= ring
->size
+ ring
->base_align
;
166 return ring
->size_unaligned
;
169 void vnic_dev_clear_desc_ring(struct vnic_dev_ring
*ring
)
171 memset(ring
->descs
, 0, ring
->size
);
174 int vnic_dev_alloc_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
,
175 unsigned int desc_count
, unsigned int desc_size
)
177 vnic_dev_desc_ring_size(ring
, desc_count
, desc_size
);
179 ring
->descs_unaligned
= dma_alloc_coherent(&vdev
->pdev
->dev
,
180 ring
->size_unaligned
,
181 &ring
->base_addr_unaligned
,
184 if (!ring
->descs_unaligned
) {
185 vdev_err(vdev
, "Failed to allocate ring (size=%d), aborting\n",
190 ring
->base_addr
= ALIGN(ring
->base_addr_unaligned
,
192 ring
->descs
= (u8
*)ring
->descs_unaligned
+
193 (ring
->base_addr
- ring
->base_addr_unaligned
);
195 vnic_dev_clear_desc_ring(ring
);
197 ring
->desc_avail
= ring
->desc_count
- 1;
202 void vnic_dev_free_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
)
205 dma_free_coherent(&vdev
->pdev
->dev
, ring
->size_unaligned
,
206 ring
->descs_unaligned
,
207 ring
->base_addr_unaligned
);
212 static int _vnic_dev_cmd(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
215 struct vnic_devcmd __iomem
*devcmd
= vdev
->devcmd
;
221 status
= ioread32(&devcmd
->status
);
222 if (status
== 0xFFFFFFFF) {
223 /* PCI-e target device is gone */
226 if (status
& STAT_BUSY
) {
227 vdev_neterr(vdev
, "Busy devcmd %d\n", _CMD_N(cmd
));
231 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
) {
232 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
233 writeq(vdev
->args
[i
], &devcmd
->args
[i
]);
237 iowrite32(cmd
, &devcmd
->cmd
);
239 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
242 for (delay
= 0; delay
< wait
; delay
++) {
246 status
= ioread32(&devcmd
->status
);
247 if (status
== 0xFFFFFFFF) {
248 /* PCI-e target device is gone */
252 if (!(status
& STAT_BUSY
)) {
254 if (status
& STAT_ERROR
) {
255 err
= (int)readq(&devcmd
->args
[0]);
256 if (err
== ERR_EINVAL
&&
257 cmd
== CMD_CAPABILITY
)
259 if (err
!= ERR_ECMDUNKNOWN
||
260 cmd
!= CMD_CAPABILITY
)
261 vdev_neterr(vdev
, "Error %d devcmd %d\n",
266 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
) {
268 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
269 vdev
->args
[i
] = readq(&devcmd
->args
[i
]);
276 vdev_neterr(vdev
, "Timedout devcmd %d\n", _CMD_N(cmd
));
280 static int _vnic_dev_cmd2(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
283 struct devcmd2_controller
*dc2c
= vdev
->devcmd2
;
284 struct devcmd2_result
*result
;
288 u32 fetch_index
, new_posted
;
289 u32 posted
= dc2c
->posted
;
291 fetch_index
= ioread32(&dc2c
->wq_ctrl
->fetch_index
);
293 if (fetch_index
== 0xFFFFFFFF)
296 new_posted
= (posted
+ 1) % DEVCMD2_RING_SIZE
;
298 if (new_posted
== fetch_index
) {
299 vdev_neterr(vdev
, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
300 _CMD_N(cmd
), fetch_index
, posted
);
303 dc2c
->cmd_ring
[posted
].cmd
= cmd
;
304 dc2c
->cmd_ring
[posted
].flags
= 0;
306 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
307 dc2c
->cmd_ring
[posted
].flags
|= DEVCMD2_FNORESULT
;
308 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
)
309 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
310 dc2c
->cmd_ring
[posted
].args
[i
] = vdev
->args
[i
];
312 /* Adding write memory barrier prevents compiler and/or CPU reordering,
313 * thus avoiding descriptor posting before descriptor is initialized.
314 * Otherwise, hardware can read stale descriptor fields.
317 iowrite32(new_posted
, &dc2c
->wq_ctrl
->posted_index
);
318 dc2c
->posted
= new_posted
;
320 if (dc2c
->cmd_ring
[posted
].flags
& DEVCMD2_FNORESULT
)
323 result
= dc2c
->result
+ dc2c
->next_result
;
327 if (dc2c
->next_result
== dc2c
->result_size
) {
328 dc2c
->next_result
= 0;
329 dc2c
->color
= dc2c
->color
? 0 : 1;
332 for (delay
= 0; delay
< wait
; delay
++) {
333 if (result
->color
== color
) {
336 if (err
!= ERR_ECMDUNKNOWN
||
337 cmd
!= CMD_CAPABILITY
)
338 vdev_neterr(vdev
, "Error %d devcmd %d\n",
342 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
)
343 for (i
= 0; i
< VNIC_DEVCMD2_NARGS
; i
++)
344 vdev
->args
[i
] = result
->results
[i
];
351 vdev_neterr(vdev
, "devcmd %d timed out\n", _CMD_N(cmd
));
356 static int vnic_dev_init_devcmd1(struct vnic_dev
*vdev
)
358 vdev
->devcmd
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD
, 0);
361 vdev
->devcmd_rtn
= _vnic_dev_cmd
;
366 static int vnic_dev_init_devcmd2(struct vnic_dev
*vdev
)
369 unsigned int fetch_index
;
374 vdev
->devcmd2
= kzalloc(sizeof(*vdev
->devcmd2
), GFP_KERNEL
);
378 vdev
->devcmd2
->color
= 1;
379 vdev
->devcmd2
->result_size
= DEVCMD2_RING_SIZE
;
380 err
= enic_wq_devcmd2_alloc(vdev
, &vdev
->devcmd2
->wq
, DEVCMD2_RING_SIZE
,
383 goto err_free_devcmd2
;
385 fetch_index
= ioread32(&vdev
->devcmd2
->wq
.ctrl
->fetch_index
);
386 if (fetch_index
== 0xFFFFFFFF) { /* check for hardware gone */
387 vdev_err(vdev
, "Fatal error in devcmd2 init - hardware surprise removal\n");
392 enic_wq_init_start(&vdev
->devcmd2
->wq
, 0, fetch_index
, fetch_index
, 0,
394 vdev
->devcmd2
->posted
= fetch_index
;
395 vnic_wq_enable(&vdev
->devcmd2
->wq
);
397 err
= vnic_dev_alloc_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
,
398 DEVCMD2_RING_SIZE
, DEVCMD2_DESC_SIZE
);
402 vdev
->devcmd2
->result
= vdev
->devcmd2
->results_ring
.descs
;
403 vdev
->devcmd2
->cmd_ring
= vdev
->devcmd2
->wq
.ring
.descs
;
404 vdev
->devcmd2
->wq_ctrl
= vdev
->devcmd2
->wq
.ctrl
;
405 vdev
->args
[0] = (u64
)vdev
->devcmd2
->results_ring
.base_addr
|
407 vdev
->args
[1] = DEVCMD2_RING_SIZE
;
409 err
= _vnic_dev_cmd2(vdev
, CMD_INITIALIZE_DEVCMD2
, 1000);
411 goto err_free_desc_ring
;
413 vdev
->devcmd_rtn
= _vnic_dev_cmd2
;
418 vnic_dev_free_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
);
420 vnic_wq_disable(&vdev
->devcmd2
->wq
);
422 vnic_wq_free(&vdev
->devcmd2
->wq
);
424 kfree(vdev
->devcmd2
);
425 vdev
->devcmd2
= NULL
;
430 static void vnic_dev_deinit_devcmd2(struct vnic_dev
*vdev
)
432 vnic_dev_free_desc_ring(vdev
, &vdev
->devcmd2
->results_ring
);
433 vnic_wq_disable(&vdev
->devcmd2
->wq
);
434 vnic_wq_free(&vdev
->devcmd2
->wq
);
435 kfree(vdev
->devcmd2
);
438 static int vnic_dev_cmd_proxy(struct vnic_dev
*vdev
,
439 enum vnic_devcmd_cmd proxy_cmd
, enum vnic_devcmd_cmd cmd
,
440 u64
*a0
, u64
*a1
, int wait
)
445 memset(vdev
->args
, 0, sizeof(vdev
->args
));
447 vdev
->args
[0] = vdev
->proxy_index
;
452 err
= vdev
->devcmd_rtn(vdev
, proxy_cmd
, wait
);
456 status
= (u32
)vdev
->args
[0];
457 if (status
& STAT_ERROR
) {
458 err
= (int)vdev
->args
[1];
459 if (err
!= ERR_ECMDUNKNOWN
||
460 cmd
!= CMD_CAPABILITY
)
461 vdev_neterr(vdev
, "Error %d proxy devcmd %d\n",
472 static int vnic_dev_cmd_no_proxy(struct vnic_dev
*vdev
,
473 enum vnic_devcmd_cmd cmd
, u64
*a0
, u64
*a1
, int wait
)
480 err
= vdev
->devcmd_rtn(vdev
, cmd
, wait
);
488 void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev
*vdev
, u16 index
)
490 vdev
->proxy
= PROXY_BY_INDEX
;
491 vdev
->proxy_index
= index
;
494 void vnic_dev_cmd_proxy_end(struct vnic_dev
*vdev
)
496 vdev
->proxy
= PROXY_NONE
;
497 vdev
->proxy_index
= 0;
500 int vnic_dev_cmd(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
501 u64
*a0
, u64
*a1
, int wait
)
503 memset(vdev
->args
, 0, sizeof(vdev
->args
));
505 switch (vdev
->proxy
) {
507 return vnic_dev_cmd_proxy(vdev
, CMD_PROXY_BY_INDEX
, cmd
,
510 return vnic_dev_cmd_proxy(vdev
, CMD_PROXY_BY_BDF
, cmd
,
514 return vnic_dev_cmd_no_proxy(vdev
, cmd
, a0
, a1
, wait
);
518 static int vnic_dev_capable(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
)
520 u64 a0
= (u32
)cmd
, a1
= 0;
524 err
= vnic_dev_cmd(vdev
, CMD_CAPABILITY
, &a0
, &a1
, wait
);
529 int vnic_dev_fw_info(struct vnic_dev
*vdev
,
530 struct vnic_devcmd_fw_info
**fw_info
)
536 if (!vdev
->fw_info
) {
537 vdev
->fw_info
= dma_alloc_coherent(&vdev
->pdev
->dev
,
538 sizeof(struct vnic_devcmd_fw_info
),
539 &vdev
->fw_info_pa
, GFP_ATOMIC
);
543 a0
= vdev
->fw_info_pa
;
544 a1
= sizeof(struct vnic_devcmd_fw_info
);
546 /* only get fw_info once and cache it */
547 if (vnic_dev_capable(vdev
, CMD_MCPU_FW_INFO
))
548 err
= vnic_dev_cmd(vdev
, CMD_MCPU_FW_INFO
,
551 err
= vnic_dev_cmd(vdev
, CMD_MCPU_FW_INFO_OLD
,
555 *fw_info
= vdev
->fw_info
;
560 int vnic_dev_spec(struct vnic_dev
*vdev
, unsigned int offset
, unsigned int size
,
570 err
= vnic_dev_cmd(vdev
, CMD_DEV_SPEC
, &a0
, &a1
, wait
);
573 case 1: *(u8
*)value
= (u8
)a0
; break;
574 case 2: *(u16
*)value
= (u16
)a0
; break;
575 case 4: *(u32
*)value
= (u32
)a0
; break;
576 case 8: *(u64
*)value
= a0
; break;
577 default: BUG(); break;
583 int vnic_dev_stats_dump(struct vnic_dev
*vdev
, struct vnic_stats
**stats
)
589 vdev
->stats
= dma_alloc_coherent(&vdev
->pdev
->dev
,
590 sizeof(struct vnic_stats
),
591 &vdev
->stats_pa
, GFP_ATOMIC
);
596 *stats
= vdev
->stats
;
598 a1
= sizeof(struct vnic_stats
);
600 return vnic_dev_cmd(vdev
, CMD_STATS_DUMP
, &a0
, &a1
, wait
);
603 int vnic_dev_close(struct vnic_dev
*vdev
)
607 return vnic_dev_cmd(vdev
, CMD_CLOSE
, &a0
, &a1
, wait
);
610 int vnic_dev_enable_wait(struct vnic_dev
*vdev
)
615 if (vnic_dev_capable(vdev
, CMD_ENABLE_WAIT
))
616 return vnic_dev_cmd(vdev
, CMD_ENABLE_WAIT
, &a0
, &a1
, wait
);
618 return vnic_dev_cmd(vdev
, CMD_ENABLE
, &a0
, &a1
, wait
);
621 int vnic_dev_disable(struct vnic_dev
*vdev
)
625 return vnic_dev_cmd(vdev
, CMD_DISABLE
, &a0
, &a1
, wait
);
628 int vnic_dev_open(struct vnic_dev
*vdev
, int arg
)
630 u64 a0
= (u32
)arg
, a1
= 0;
632 return vnic_dev_cmd(vdev
, CMD_OPEN
, &a0
, &a1
, wait
);
635 int vnic_dev_open_done(struct vnic_dev
*vdev
, int *done
)
643 err
= vnic_dev_cmd(vdev
, CMD_OPEN_STATUS
, &a0
, &a1
, wait
);
652 int vnic_dev_soft_reset(struct vnic_dev
*vdev
, int arg
)
654 u64 a0
= (u32
)arg
, a1
= 0;
656 return vnic_dev_cmd(vdev
, CMD_SOFT_RESET
, &a0
, &a1
, wait
);
659 int vnic_dev_soft_reset_done(struct vnic_dev
*vdev
, int *done
)
667 err
= vnic_dev_cmd(vdev
, CMD_SOFT_RESET_STATUS
, &a0
, &a1
, wait
);
676 int vnic_dev_hang_reset(struct vnic_dev
*vdev
, int arg
)
678 u64 a0
= (u32
)arg
, a1
= 0;
682 if (vnic_dev_capable(vdev
, CMD_HANG_RESET
)) {
683 return vnic_dev_cmd(vdev
, CMD_HANG_RESET
,
686 err
= vnic_dev_soft_reset(vdev
, arg
);
689 return vnic_dev_init(vdev
, 0);
693 int vnic_dev_hang_reset_done(struct vnic_dev
*vdev
, int *done
)
701 if (vnic_dev_capable(vdev
, CMD_HANG_RESET_STATUS
)) {
702 err
= vnic_dev_cmd(vdev
, CMD_HANG_RESET_STATUS
,
707 return vnic_dev_soft_reset_done(vdev
, done
);
715 int vnic_dev_hang_notify(struct vnic_dev
*vdev
)
719 return vnic_dev_cmd(vdev
, CMD_HANG_NOTIFY
, &a0
, &a1
, wait
);
722 int vnic_dev_get_mac_addr(struct vnic_dev
*vdev
, u8
*mac_addr
)
728 for (i
= 0; i
< ETH_ALEN
; i
++)
731 err
= vnic_dev_cmd(vdev
, CMD_GET_MAC_ADDR
, &a0
, &a1
, wait
);
735 for (i
= 0; i
< ETH_ALEN
; i
++)
736 mac_addr
[i
] = ((u8
*)&a0
)[i
];
741 int vnic_dev_packet_filter(struct vnic_dev
*vdev
, int directed
, int multicast
,
742 int broadcast
, int promisc
, int allmulti
)
748 a0
= (directed
? CMD_PFILTER_DIRECTED
: 0) |
749 (multicast
? CMD_PFILTER_MULTICAST
: 0) |
750 (broadcast
? CMD_PFILTER_BROADCAST
: 0) |
751 (promisc
? CMD_PFILTER_PROMISCUOUS
: 0) |
752 (allmulti
? CMD_PFILTER_ALL_MULTICAST
: 0);
754 err
= vnic_dev_cmd(vdev
, CMD_PACKET_FILTER
, &a0
, &a1
, wait
);
756 vdev_neterr(vdev
, "Can't set packet filter\n");
761 int vnic_dev_add_addr(struct vnic_dev
*vdev
, const u8
*addr
)
768 for (i
= 0; i
< ETH_ALEN
; i
++)
769 ((u8
*)&a0
)[i
] = addr
[i
];
771 err
= vnic_dev_cmd(vdev
, CMD_ADDR_ADD
, &a0
, &a1
, wait
);
773 vdev_neterr(vdev
, "Can't add addr [%pM], %d\n", addr
, err
);
778 int vnic_dev_del_addr(struct vnic_dev
*vdev
, const u8
*addr
)
785 for (i
= 0; i
< ETH_ALEN
; i
++)
786 ((u8
*)&a0
)[i
] = addr
[i
];
788 err
= vnic_dev_cmd(vdev
, CMD_ADDR_DEL
, &a0
, &a1
, wait
);
790 vdev_neterr(vdev
, "Can't del addr [%pM], %d\n", addr
, err
);
795 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev
*vdev
,
796 u8 ig_vlan_rewrite_mode
)
798 u64 a0
= ig_vlan_rewrite_mode
, a1
= 0;
801 if (vnic_dev_capable(vdev
, CMD_IG_VLAN_REWRITE_MODE
))
802 return vnic_dev_cmd(vdev
, CMD_IG_VLAN_REWRITE_MODE
,
808 static int vnic_dev_notify_setcmd(struct vnic_dev
*vdev
,
809 void *notify_addr
, dma_addr_t notify_pa
, u16 intr
)
815 memset(notify_addr
, 0, sizeof(struct vnic_devcmd_notify
));
816 vdev
->notify
= notify_addr
;
817 vdev
->notify_pa
= notify_pa
;
820 a1
= ((u64
)intr
<< 32) & 0x0000ffff00000000ULL
;
821 a1
+= sizeof(struct vnic_devcmd_notify
);
823 r
= vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
824 vdev
->notify_sz
= (r
== 0) ? (u32
)a1
: 0;
828 int vnic_dev_notify_set(struct vnic_dev
*vdev
, u16 intr
)
831 dma_addr_t notify_pa
;
833 if (vdev
->notify
|| vdev
->notify_pa
) {
834 vdev_neterr(vdev
, "notify block %p still allocated\n",
839 notify_addr
= dma_alloc_coherent(&vdev
->pdev
->dev
,
840 sizeof(struct vnic_devcmd_notify
),
841 ¬ify_pa
, GFP_ATOMIC
);
845 return vnic_dev_notify_setcmd(vdev
, notify_addr
, notify_pa
, intr
);
848 static int vnic_dev_notify_unsetcmd(struct vnic_dev
*vdev
)
854 a0
= 0; /* paddr = 0 to unset notify buffer */
855 a1
= 0x0000ffff00000000ULL
; /* intr num = -1 to unreg for intr */
856 a1
+= sizeof(struct vnic_devcmd_notify
);
858 err
= vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
866 int vnic_dev_notify_unset(struct vnic_dev
*vdev
)
869 dma_free_coherent(&vdev
->pdev
->dev
,
870 sizeof(struct vnic_devcmd_notify
),
871 vdev
->notify
, vdev
->notify_pa
);
874 return vnic_dev_notify_unsetcmd(vdev
);
877 static int vnic_dev_notify_ready(struct vnic_dev
*vdev
)
880 unsigned int nwords
= vdev
->notify_sz
/ 4;
884 if (!vdev
->notify
|| !vdev
->notify_sz
)
889 memcpy(&vdev
->notify_copy
, vdev
->notify
, vdev
->notify_sz
);
890 words
= (u32
*)&vdev
->notify_copy
;
891 for (i
= 1; i
< nwords
; i
++)
893 } while (csum
!= words
[0]);
898 int vnic_dev_init(struct vnic_dev
*vdev
, int arg
)
900 u64 a0
= (u32
)arg
, a1
= 0;
904 if (vnic_dev_capable(vdev
, CMD_INIT
))
905 r
= vnic_dev_cmd(vdev
, CMD_INIT
, &a0
, &a1
, wait
);
907 vnic_dev_cmd(vdev
, CMD_INIT_v1
, &a0
, &a1
, wait
);
908 if (a0
& CMD_INITF_DEFAULT_MAC
) {
909 /* Emulate these for old CMD_INIT_v1 which
910 * didn't pass a0 so no CMD_INITF_*.
912 vnic_dev_cmd(vdev
, CMD_GET_MAC_ADDR
, &a0
, &a1
, wait
);
913 vnic_dev_cmd(vdev
, CMD_ADDR_ADD
, &a0
, &a1
, wait
);
919 int vnic_dev_deinit(struct vnic_dev
*vdev
)
924 return vnic_dev_cmd(vdev
, CMD_DEINIT
, &a0
, &a1
, wait
);
927 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev
*vdev
)
929 /* Default: hardware intr coal timer is in units of 1.5 usecs */
930 vdev
->intr_coal_timer_info
.mul
= 2;
931 vdev
->intr_coal_timer_info
.div
= 3;
932 vdev
->intr_coal_timer_info
.max_usec
=
933 vnic_dev_intr_coal_timer_hw_to_usec(vdev
, 0xffff);
936 int vnic_dev_intr_coal_timer_info(struct vnic_dev
*vdev
)
941 memset(vdev
->args
, 0, sizeof(vdev
->args
));
943 if (vnic_dev_capable(vdev
, CMD_INTR_COAL_CONVERT
))
944 err
= vdev
->devcmd_rtn(vdev
, CMD_INTR_COAL_CONVERT
, wait
);
946 err
= ERR_ECMDUNKNOWN
;
948 /* Use defaults when firmware doesn't support the devcmd at all or
949 * supports it for only specific hardware
951 if ((err
== ERR_ECMDUNKNOWN
) ||
952 (!err
&& !(vdev
->args
[0] && vdev
->args
[1] && vdev
->args
[2]))) {
953 vdev_netwarn(vdev
, "Using default conversion factor for interrupt coalesce timer\n");
954 vnic_dev_intr_coal_timer_info_default(vdev
);
959 vdev
->intr_coal_timer_info
.mul
= (u32
) vdev
->args
[0];
960 vdev
->intr_coal_timer_info
.div
= (u32
) vdev
->args
[1];
961 vdev
->intr_coal_timer_info
.max_usec
= (u32
) vdev
->args
[2];
967 int vnic_dev_link_status(struct vnic_dev
*vdev
)
969 if (!vnic_dev_notify_ready(vdev
))
972 return vdev
->notify_copy
.link_state
;
975 u32
vnic_dev_port_speed(struct vnic_dev
*vdev
)
977 if (!vnic_dev_notify_ready(vdev
))
980 return vdev
->notify_copy
.port_speed
;
983 u32
vnic_dev_msg_lvl(struct vnic_dev
*vdev
)
985 if (!vnic_dev_notify_ready(vdev
))
988 return vdev
->notify_copy
.msglvl
;
991 u32
vnic_dev_mtu(struct vnic_dev
*vdev
)
993 if (!vnic_dev_notify_ready(vdev
))
996 return vdev
->notify_copy
.mtu
;
999 void vnic_dev_set_intr_mode(struct vnic_dev
*vdev
,
1000 enum vnic_dev_intr_mode intr_mode
)
1002 vdev
->intr_mode
= intr_mode
;
1005 enum vnic_dev_intr_mode
vnic_dev_get_intr_mode(
1006 struct vnic_dev
*vdev
)
1008 return vdev
->intr_mode
;
1011 u32
vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev
*vdev
, u32 usec
)
1013 return (usec
* vdev
->intr_coal_timer_info
.mul
) /
1014 vdev
->intr_coal_timer_info
.div
;
1017 u32
vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev
*vdev
, u32 hw_cycles
)
1019 return (hw_cycles
* vdev
->intr_coal_timer_info
.div
) /
1020 vdev
->intr_coal_timer_info
.mul
;
1023 u32
vnic_dev_get_intr_coal_timer_max(struct vnic_dev
*vdev
)
1025 return vdev
->intr_coal_timer_info
.max_usec
;
1028 void vnic_dev_unregister(struct vnic_dev
*vdev
)
1032 dma_free_coherent(&vdev
->pdev
->dev
,
1033 sizeof(struct vnic_devcmd_notify
),
1034 vdev
->notify
, vdev
->notify_pa
);
1036 dma_free_coherent(&vdev
->pdev
->dev
,
1037 sizeof(struct vnic_stats
),
1038 vdev
->stats
, vdev
->stats_pa
);
1040 dma_free_coherent(&vdev
->pdev
->dev
,
1041 sizeof(struct vnic_devcmd_fw_info
),
1042 vdev
->fw_info
, vdev
->fw_info_pa
);
1044 vnic_dev_deinit_devcmd2(vdev
);
1049 EXPORT_SYMBOL(vnic_dev_unregister
);
1051 struct vnic_dev
*vnic_dev_register(struct vnic_dev
*vdev
,
1052 void *priv
, struct pci_dev
*pdev
, struct vnic_dev_bar
*bar
,
1053 unsigned int num_bars
)
1056 vdev
= kzalloc(sizeof(struct vnic_dev
), GFP_KERNEL
);
1064 if (vnic_dev_discover_res(vdev
, bar
, num_bars
))
1070 vnic_dev_unregister(vdev
);
1073 EXPORT_SYMBOL(vnic_dev_register
);
1075 struct pci_dev
*vnic_dev_get_pdev(struct vnic_dev
*vdev
)
1079 EXPORT_SYMBOL(vnic_dev_get_pdev
);
1081 int vnic_devcmd_init(struct vnic_dev
*vdev
)
1086 res
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD2
, 0);
1088 err
= vnic_dev_init_devcmd2(vdev
);
1090 vdev_warn(vdev
, "DEVCMD2 init failed: %d, Using DEVCMD1\n",
1095 vdev_warn(vdev
, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
1097 err
= vnic_dev_init_devcmd1(vdev
);
1099 vdev_err(vdev
, "DEVCMD1 initialization failed: %d\n", err
);
1104 int vnic_dev_init_prov2(struct vnic_dev
*vdev
, u8
*buf
, u32 len
)
1112 prov_buf
= dma_alloc_coherent(&vdev
->pdev
->dev
, len
, &prov_pa
, GFP_ATOMIC
);
1116 memcpy(prov_buf
, buf
, len
);
1120 ret
= vnic_dev_cmd(vdev
, CMD_INIT_PROV_INFO2
, &a0
, &a1
, wait
);
1122 dma_free_coherent(&vdev
->pdev
->dev
, len
, prov_buf
, prov_pa
);
1127 int vnic_dev_enable2(struct vnic_dev
*vdev
, int active
)
1132 a0
= (active
? CMD_ENABLE2_ACTIVE
: 0);
1134 return vnic_dev_cmd(vdev
, CMD_ENABLE2
, &a0
, &a1
, wait
);
1137 static int vnic_dev_cmd_status(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
1140 u64 a0
= cmd
, a1
= 0;
1144 ret
= vnic_dev_cmd(vdev
, CMD_STATUS
, &a0
, &a1
, wait
);
1151 int vnic_dev_enable2_done(struct vnic_dev
*vdev
, int *status
)
1153 return vnic_dev_cmd_status(vdev
, CMD_ENABLE2
, status
);
1156 int vnic_dev_deinit_done(struct vnic_dev
*vdev
, int *status
)
1158 return vnic_dev_cmd_status(vdev
, CMD_DEINIT
, status
);
1161 int vnic_dev_set_mac_addr(struct vnic_dev
*vdev
, u8
*mac_addr
)
1167 for (i
= 0; i
< ETH_ALEN
; i
++)
1168 ((u8
*)&a0
)[i
] = mac_addr
[i
];
1170 return vnic_dev_cmd(vdev
, CMD_SET_MAC_ADDR
, &a0
, &a1
, wait
);
1173 /* vnic_dev_classifier: Add/Delete classifier entries
1174 * @vdev: vdev of the device
1175 * @cmd: CLSF_ADD for Add filter
1176 * CLSF_DEL for Delete filter
1177 * @entry: In case of ADD filter, the caller passes the RQ number in this
1180 * This function stores the filter_id returned by the firmware in the
1181 * same variable before return;
1183 * In case of DEL filter, the caller passes the RQ number. Return
1184 * value is irrelevant.
1185 * @data: filter data
1187 int vnic_dev_classifier(struct vnic_dev
*vdev
, u8 cmd
, u16
*entry
,
1188 struct filter
*data
)
1194 struct filter_tlv
*tlv
, *tlv_va
;
1195 struct filter_action
*action
;
1198 if (cmd
== CLSF_ADD
) {
1199 tlv_size
= sizeof(struct filter
) +
1200 sizeof(struct filter_action
) +
1201 2 * sizeof(struct filter_tlv
);
1202 tlv_va
= dma_alloc_coherent(&vdev
->pdev
->dev
, tlv_size
,
1203 &tlv_pa
, GFP_ATOMIC
);
1209 memset(tlv
, 0, tlv_size
);
1210 tlv
->type
= CLSF_TLV_FILTER
;
1211 tlv
->length
= sizeof(struct filter
);
1212 *(struct filter
*)&tlv
->val
= *data
;
1214 tlv
= (struct filter_tlv
*)((char *)tlv
+
1215 sizeof(struct filter_tlv
) +
1216 sizeof(struct filter
));
1218 tlv
->type
= CLSF_TLV_ACTION
;
1219 tlv
->length
= sizeof(struct filter_action
);
1220 action
= (struct filter_action
*)&tlv
->val
;
1221 action
->type
= FILTER_ACTION_RQ_STEERING
;
1222 action
->u
.rq_idx
= *entry
;
1224 ret
= vnic_dev_cmd(vdev
, CMD_ADD_FILTER
, &a0
, &a1
, wait
);
1226 dma_free_coherent(&vdev
->pdev
->dev
, tlv_size
, tlv_va
, tlv_pa
);
1227 } else if (cmd
== CLSF_DEL
) {
1229 ret
= vnic_dev_cmd(vdev
, CMD_DEL_FILTER
, &a0
, &a1
, wait
);
1235 int vnic_dev_overlay_offload_ctrl(struct vnic_dev
*vdev
, u8 overlay
, u8 config
)
1241 return vnic_dev_cmd(vdev
, CMD_OVERLAY_OFFLOAD_CTRL
, &a0
, &a1
, wait
);
1244 int vnic_dev_overlay_offload_cfg(struct vnic_dev
*vdev
, u8 overlay
,
1245 u16 vxlan_udp_port_number
)
1247 u64 a1
= vxlan_udp_port_number
;
1251 return vnic_dev_cmd(vdev
, CMD_OVERLAY_OFFLOAD_CFG
, &a0
, &a1
, wait
);
1254 int vnic_dev_get_supported_feature_ver(struct vnic_dev
*vdev
, u8 feature
,
1255 u64
*supported_versions
, u64
*a1
)
1261 ret
= vnic_dev_cmd(vdev
, CMD_GET_SUPP_FEATURE_VER
, &a0
, a1
, wait
);
1263 *supported_versions
= a0
;
1268 int vnic_dev_capable_rss_hash_type(struct vnic_dev
*vdev
, u8
*rss_hash_type
)
1270 u64 a0
= CMD_NIC_CFG
, a1
= 0;
1274 err
= vnic_dev_cmd(vdev
, CMD_CAPABILITY
, &a0
, &a1
, wait
);
1275 /* rss_hash_type is valid only when a0 is 1. Adapter which does not
1276 * support CMD_CAPABILITY for rss_hash_type has a0 = 0
1278 if (err
|| (a0
!= 1))
1281 a1
= (a1
>> NIC_CFG_RSS_HASH_TYPE_SHIFT
) &
1282 NIC_CFG_RSS_HASH_TYPE_MASK_FIELD
;
1284 *rss_hash_type
= (u8
)a1
;