2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/if_ether.h>
27 #include "vnic_resource.h"
28 #include "vnic_devcmd.h"
30 #include "vnic_stats.h"
38 #define VNIC_DEV_CAP_INIT 0x0001
39 #define VNIC_DEV_CAP_PERBI 0x0002
44 struct vnic_res res
[RES_TYPE_MAX
];
45 enum vnic_dev_intr_mode intr_mode
;
46 struct vnic_devcmd __iomem
*devcmd
;
47 struct vnic_devcmd_notify
*notify
;
48 struct vnic_devcmd_notify notify_copy
;
52 dma_addr_t linkstatus_pa
;
53 struct vnic_stats
*stats
;
55 struct vnic_devcmd_fw_info
*fw_info
;
56 dma_addr_t fw_info_pa
;
60 #define VNIC_MAX_RES_HDR_SIZE \
61 (sizeof(struct vnic_resource_header) + \
62 sizeof(struct vnic_resource) * RES_TYPE_MAX)
63 #define VNIC_RES_STRIDE 128
65 void *vnic_dev_priv(struct vnic_dev
*vdev
)
70 static int vnic_dev_discover_res(struct vnic_dev
*vdev
,
71 struct vnic_dev_bar
*bar
, unsigned int num_bars
)
73 struct vnic_resource_header __iomem
*rh
;
74 struct vnic_resource __iomem
*r
;
80 if (bar
->len
< VNIC_MAX_RES_HDR_SIZE
) {
81 printk(KERN_ERR
"vNIC BAR0 res hdr length error\n");
87 printk(KERN_ERR
"vNIC BAR0 res hdr not mem-mapped\n");
91 if (ioread32(&rh
->magic
) != VNIC_RES_MAGIC
||
92 ioread32(&rh
->version
) != VNIC_RES_VERSION
) {
93 printk(KERN_ERR
"vNIC BAR0 res magic/version error "
94 "exp (%lx/%lx) curr (%x/%x)\n",
95 VNIC_RES_MAGIC
, VNIC_RES_VERSION
,
96 ioread32(&rh
->magic
), ioread32(&rh
->version
));
100 r
= (struct vnic_resource __iomem
*)(rh
+ 1);
102 while ((type
= ioread8(&r
->type
)) != RES_TYPE_EOL
) {
104 u8 bar_num
= ioread8(&r
->bar
);
105 u32 bar_offset
= ioread32(&r
->bar_offset
);
106 u32 count
= ioread32(&r
->count
);
111 if (bar_num
>= num_bars
)
114 if (!bar
[bar_num
].len
|| !bar
[bar_num
].vaddr
)
121 case RES_TYPE_INTR_CTRL
:
122 /* each count is stride bytes long */
123 len
= count
* VNIC_RES_STRIDE
;
124 if (len
+ bar_offset
> bar
[bar_num
].len
) {
125 printk(KERN_ERR
"vNIC BAR0 resource %d "
126 "out-of-bounds, offset 0x%x + "
127 "size 0x%x > bar len 0x%lx\n",
134 case RES_TYPE_INTR_PBA_LEGACY
:
135 case RES_TYPE_DEVCMD
:
142 vdev
->res
[type
].count
= count
;
143 vdev
->res
[type
].vaddr
= (char __iomem
*)bar
[bar_num
].vaddr
+
145 vdev
->res
[type
].bus_addr
= bar
[bar_num
].bus_addr
+ bar_offset
;
151 unsigned int vnic_dev_get_res_count(struct vnic_dev
*vdev
,
152 enum vnic_res_type type
)
154 return vdev
->res
[type
].count
;
157 void __iomem
*vnic_dev_get_res(struct vnic_dev
*vdev
, enum vnic_res_type type
,
160 if (!vdev
->res
[type
].vaddr
)
167 case RES_TYPE_INTR_CTRL
:
168 return (char __iomem
*)vdev
->res
[type
].vaddr
+
169 index
* VNIC_RES_STRIDE
;
171 return (char __iomem
*)vdev
->res
[type
].vaddr
;
175 dma_addr_t
vnic_dev_get_res_bus_addr(struct vnic_dev
*vdev
,
176 enum vnic_res_type type
, unsigned int index
)
182 case RES_TYPE_INTR_CTRL
:
183 return vdev
->res
[type
].bus_addr
+
184 index
* VNIC_RES_STRIDE
;
186 return vdev
->res
[type
].bus_addr
;
190 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring
*ring
,
191 unsigned int desc_count
, unsigned int desc_size
)
193 /* The base address of the desc rings must be 512 byte aligned.
194 * Descriptor count is aligned to groups of 32 descriptors. A
195 * count of 0 means the maximum 4096 descriptors. Descriptor
196 * size is aligned to 16 bytes.
199 unsigned int count_align
= 32;
200 unsigned int desc_align
= 16;
202 ring
->base_align
= 512;
207 ring
->desc_count
= ALIGN(desc_count
, count_align
);
209 ring
->desc_size
= ALIGN(desc_size
, desc_align
);
211 ring
->size
= ring
->desc_count
* ring
->desc_size
;
212 ring
->size_unaligned
= ring
->size
+ ring
->base_align
;
214 return ring
->size_unaligned
;
217 void vnic_dev_clear_desc_ring(struct vnic_dev_ring
*ring
)
219 memset(ring
->descs
, 0, ring
->size
);
222 int vnic_dev_alloc_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
,
223 unsigned int desc_count
, unsigned int desc_size
)
225 vnic_dev_desc_ring_size(ring
, desc_count
, desc_size
);
227 ring
->descs_unaligned
= pci_alloc_consistent(vdev
->pdev
,
228 ring
->size_unaligned
,
229 &ring
->base_addr_unaligned
);
231 if (!ring
->descs_unaligned
) {
233 "Failed to allocate ring (size=%d), aborting\n",
238 ring
->base_addr
= ALIGN(ring
->base_addr_unaligned
,
240 ring
->descs
= (u8
*)ring
->descs_unaligned
+
241 (ring
->base_addr
- ring
->base_addr_unaligned
);
243 vnic_dev_clear_desc_ring(ring
);
245 ring
->desc_avail
= ring
->desc_count
- 1;
250 void vnic_dev_free_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
)
253 pci_free_consistent(vdev
->pdev
,
254 ring
->size_unaligned
,
255 ring
->descs_unaligned
,
256 ring
->base_addr_unaligned
);
261 int vnic_dev_cmd(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
262 u64
*a0
, u64
*a1
, int wait
)
264 struct vnic_devcmd __iomem
*devcmd
= vdev
->devcmd
;
269 status
= ioread32(&devcmd
->status
);
270 if (status
& STAT_BUSY
) {
271 printk(KERN_ERR
"Busy devcmd %d\n", _CMD_N(cmd
));
275 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
) {
276 writeq(*a0
, &devcmd
->args
[0]);
277 writeq(*a1
, &devcmd
->args
[1]);
281 iowrite32(cmd
, &devcmd
->cmd
);
283 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
286 for (delay
= 0; delay
< wait
; delay
++) {
290 status
= ioread32(&devcmd
->status
);
291 if (!(status
& STAT_BUSY
)) {
293 if (status
& STAT_ERROR
) {
294 err
= (int)readq(&devcmd
->args
[0]);
295 if (err
!= ERR_ECMDUNKNOWN
||
296 cmd
!= CMD_CAPABILITY
)
297 printk(KERN_ERR
"Error %d devcmd %d\n",
302 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
) {
304 *a0
= readq(&devcmd
->args
[0]);
305 *a1
= readq(&devcmd
->args
[1]);
312 printk(KERN_ERR
"Timedout devcmd %d\n", _CMD_N(cmd
));
316 static int vnic_dev_capable(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
)
318 u64 a0
= (u32
)cmd
, a1
= 0;
322 err
= vnic_dev_cmd(vdev
, CMD_CAPABILITY
, &a0
, &a1
, wait
);
327 int vnic_dev_fw_info(struct vnic_dev
*vdev
,
328 struct vnic_devcmd_fw_info
**fw_info
)
334 if (!vdev
->fw_info
) {
335 vdev
->fw_info
= pci_alloc_consistent(vdev
->pdev
,
336 sizeof(struct vnic_devcmd_fw_info
),
341 a0
= vdev
->fw_info_pa
;
343 /* only get fw_info once and cache it */
344 err
= vnic_dev_cmd(vdev
, CMD_MCPU_FW_INFO
, &a0
, &a1
, wait
);
347 *fw_info
= vdev
->fw_info
;
352 int vnic_dev_hw_version(struct vnic_dev
*vdev
, enum vnic_dev_hw_version
*hw_ver
)
354 struct vnic_devcmd_fw_info
*fw_info
;
357 err
= vnic_dev_fw_info(vdev
, &fw_info
);
361 if (strncmp(fw_info
->hw_version
, "A1", sizeof("A1")) == 0)
362 *hw_ver
= VNIC_DEV_HW_VER_A1
;
363 else if (strncmp(fw_info
->hw_version
, "A2", sizeof("A2")) == 0)
364 *hw_ver
= VNIC_DEV_HW_VER_A2
;
366 *hw_ver
= VNIC_DEV_HW_VER_UNKNOWN
;
371 int vnic_dev_spec(struct vnic_dev
*vdev
, unsigned int offset
, unsigned int size
,
381 err
= vnic_dev_cmd(vdev
, CMD_DEV_SPEC
, &a0
, &a1
, wait
);
384 case 1: *(u8
*)value
= (u8
)a0
; break;
385 case 2: *(u16
*)value
= (u16
)a0
; break;
386 case 4: *(u32
*)value
= (u32
)a0
; break;
387 case 8: *(u64
*)value
= a0
; break;
388 default: BUG(); break;
394 int vnic_dev_stats_clear(struct vnic_dev
*vdev
)
398 return vnic_dev_cmd(vdev
, CMD_STATS_CLEAR
, &a0
, &a1
, wait
);
401 int vnic_dev_stats_dump(struct vnic_dev
*vdev
, struct vnic_stats
**stats
)
407 vdev
->stats
= pci_alloc_consistent(vdev
->pdev
,
408 sizeof(struct vnic_stats
), &vdev
->stats_pa
);
413 *stats
= vdev
->stats
;
415 a1
= sizeof(struct vnic_stats
);
417 return vnic_dev_cmd(vdev
, CMD_STATS_DUMP
, &a0
, &a1
, wait
);
420 int vnic_dev_close(struct vnic_dev
*vdev
)
424 return vnic_dev_cmd(vdev
, CMD_CLOSE
, &a0
, &a1
, wait
);
427 int vnic_dev_enable(struct vnic_dev
*vdev
)
431 return vnic_dev_cmd(vdev
, CMD_ENABLE
, &a0
, &a1
, wait
);
434 int vnic_dev_disable(struct vnic_dev
*vdev
)
438 return vnic_dev_cmd(vdev
, CMD_DISABLE
, &a0
, &a1
, wait
);
441 int vnic_dev_open(struct vnic_dev
*vdev
, int arg
)
443 u64 a0
= (u32
)arg
, a1
= 0;
445 return vnic_dev_cmd(vdev
, CMD_OPEN
, &a0
, &a1
, wait
);
448 int vnic_dev_open_done(struct vnic_dev
*vdev
, int *done
)
456 err
= vnic_dev_cmd(vdev
, CMD_OPEN_STATUS
, &a0
, &a1
, wait
);
465 int vnic_dev_soft_reset(struct vnic_dev
*vdev
, int arg
)
467 u64 a0
= (u32
)arg
, a1
= 0;
469 return vnic_dev_cmd(vdev
, CMD_SOFT_RESET
, &a0
, &a1
, wait
);
472 int vnic_dev_soft_reset_done(struct vnic_dev
*vdev
, int *done
)
480 err
= vnic_dev_cmd(vdev
, CMD_SOFT_RESET_STATUS
, &a0
, &a1
, wait
);
489 int vnic_dev_hang_notify(struct vnic_dev
*vdev
)
493 return vnic_dev_cmd(vdev
, CMD_HANG_NOTIFY
, &a0
, &a1
, wait
);
496 int vnic_dev_mac_addr(struct vnic_dev
*vdev
, u8
*mac_addr
)
502 for (i
= 0; i
< ETH_ALEN
; i
++)
505 err
= vnic_dev_cmd(vdev
, CMD_MAC_ADDR
, &a0
, &a1
, wait
);
509 for (i
= 0; i
< ETH_ALEN
; i
++)
510 mac_addr
[i
] = ((u8
*)&a0
)[i
];
515 void vnic_dev_packet_filter(struct vnic_dev
*vdev
, int directed
, int multicast
,
516 int broadcast
, int promisc
, int allmulti
)
522 a0
= (directed
? CMD_PFILTER_DIRECTED
: 0) |
523 (multicast
? CMD_PFILTER_MULTICAST
: 0) |
524 (broadcast
? CMD_PFILTER_BROADCAST
: 0) |
525 (promisc
? CMD_PFILTER_PROMISCUOUS
: 0) |
526 (allmulti
? CMD_PFILTER_ALL_MULTICAST
: 0);
528 err
= vnic_dev_cmd(vdev
, CMD_PACKET_FILTER
, &a0
, &a1
, wait
);
530 printk(KERN_ERR
"Can't set packet filter\n");
533 void vnic_dev_add_addr(struct vnic_dev
*vdev
, u8
*addr
)
540 for (i
= 0; i
< ETH_ALEN
; i
++)
541 ((u8
*)&a0
)[i
] = addr
[i
];
543 err
= vnic_dev_cmd(vdev
, CMD_ADDR_ADD
, &a0
, &a1
, wait
);
545 printk(KERN_ERR
"Can't add addr [%pM], %d\n", addr
, err
);
548 void vnic_dev_del_addr(struct vnic_dev
*vdev
, u8
*addr
)
555 for (i
= 0; i
< ETH_ALEN
; i
++)
556 ((u8
*)&a0
)[i
] = addr
[i
];
558 err
= vnic_dev_cmd(vdev
, CMD_ADDR_DEL
, &a0
, &a1
, wait
);
560 printk(KERN_ERR
"Can't del addr [%pM], %d\n", addr
, err
);
563 int vnic_dev_raise_intr(struct vnic_dev
*vdev
, u16 intr
)
565 u64 a0
= intr
, a1
= 0;
569 err
= vnic_dev_cmd(vdev
, CMD_IAR
, &a0
, &a1
, wait
);
571 printk(KERN_ERR
"Failed to raise INTR[%d], err %d\n",
577 int vnic_dev_notify_set(struct vnic_dev
*vdev
, u16 intr
)
584 vdev
->notify
= pci_alloc_consistent(vdev
->pdev
,
585 sizeof(struct vnic_devcmd_notify
),
589 memset(vdev
->notify
, 0, sizeof(struct vnic_devcmd_notify
));
592 a0
= vdev
->notify_pa
;
593 a1
= ((u64
)intr
<< 32) & 0x0000ffff00000000ULL
;
594 a1
+= sizeof(struct vnic_devcmd_notify
);
596 r
= vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
597 vdev
->notify_sz
= (r
== 0) ? (u32
)a1
: 0;
601 void vnic_dev_notify_unset(struct vnic_dev
*vdev
)
606 a0
= 0; /* paddr = 0 to unset notify buffer */
607 a1
= 0x0000ffff00000000ULL
; /* intr num = -1 to unreg for intr */
608 a1
+= sizeof(struct vnic_devcmd_notify
);
610 vnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
614 static int vnic_dev_notify_ready(struct vnic_dev
*vdev
)
617 unsigned int nwords
= vdev
->notify_sz
/ 4;
621 if (!vdev
->notify
|| !vdev
->notify_sz
)
626 memcpy(&vdev
->notify_copy
, vdev
->notify
, vdev
->notify_sz
);
627 words
= (u32
*)&vdev
->notify_copy
;
628 for (i
= 1; i
< nwords
; i
++)
630 } while (csum
!= words
[0]);
635 int vnic_dev_init(struct vnic_dev
*vdev
, int arg
)
637 u64 a0
= (u32
)arg
, a1
= 0;
641 if (vdev
->cap_flags
& VNIC_DEV_CAP_INIT
)
642 r
= vnic_dev_cmd(vdev
, CMD_INIT
, &a0
, &a1
, wait
);
644 vnic_dev_cmd(vdev
, CMD_INIT_v1
, &a0
, &a1
, wait
);
645 if (a0
& CMD_INITF_DEFAULT_MAC
) {
646 // Emulate these for old CMD_INIT_v1 which
647 // didn't pass a0 so no CMD_INITF_*.
648 vnic_dev_cmd(vdev
, CMD_MAC_ADDR
, &a0
, &a1
, wait
);
649 vnic_dev_cmd(vdev
, CMD_ADDR_ADD
, &a0
, &a1
, wait
);
655 int vnic_dev_link_status(struct vnic_dev
*vdev
)
657 if (vdev
->linkstatus
)
658 return *vdev
->linkstatus
;
660 if (!vnic_dev_notify_ready(vdev
))
663 return vdev
->notify_copy
.link_state
;
666 u32
vnic_dev_port_speed(struct vnic_dev
*vdev
)
668 if (!vnic_dev_notify_ready(vdev
))
671 return vdev
->notify_copy
.port_speed
;
674 u32
vnic_dev_msg_lvl(struct vnic_dev
*vdev
)
676 if (!vnic_dev_notify_ready(vdev
))
679 return vdev
->notify_copy
.msglvl
;
682 u32
vnic_dev_mtu(struct vnic_dev
*vdev
)
684 if (!vnic_dev_notify_ready(vdev
))
687 return vdev
->notify_copy
.mtu
;
690 u32
vnic_dev_link_down_cnt(struct vnic_dev
*vdev
)
692 if (!vnic_dev_notify_ready(vdev
))
695 return vdev
->notify_copy
.link_down_cnt
;
698 u32
vnic_dev_notify_status(struct vnic_dev
*vdev
)
700 if (!vnic_dev_notify_ready(vdev
))
703 return vdev
->notify_copy
.status
;
706 void vnic_dev_set_intr_mode(struct vnic_dev
*vdev
,
707 enum vnic_dev_intr_mode intr_mode
)
709 vdev
->intr_mode
= intr_mode
;
712 enum vnic_dev_intr_mode
vnic_dev_get_intr_mode(
713 struct vnic_dev
*vdev
)
715 return vdev
->intr_mode
;
718 void vnic_dev_unregister(struct vnic_dev
*vdev
)
722 pci_free_consistent(vdev
->pdev
,
723 sizeof(struct vnic_devcmd_notify
),
726 if (vdev
->linkstatus
)
727 pci_free_consistent(vdev
->pdev
,
730 vdev
->linkstatus_pa
);
732 pci_free_consistent(vdev
->pdev
,
733 sizeof(struct vnic_dev
),
734 vdev
->stats
, vdev
->stats_pa
);
736 pci_free_consistent(vdev
->pdev
,
737 sizeof(struct vnic_devcmd_fw_info
),
738 vdev
->fw_info
, vdev
->fw_info_pa
);
743 struct vnic_dev
*vnic_dev_register(struct vnic_dev
*vdev
,
744 void *priv
, struct pci_dev
*pdev
, struct vnic_dev_bar
*bar
,
745 unsigned int num_bars
)
748 vdev
= kzalloc(sizeof(struct vnic_dev
), GFP_ATOMIC
);
756 if (vnic_dev_discover_res(vdev
, bar
, num_bars
))
759 vdev
->devcmd
= vnic_dev_get_res(vdev
, RES_TYPE_DEVCMD
, 0);
765 if (vnic_dev_capable(vdev
, CMD_INIT
))
766 vdev
->cap_flags
|= VNIC_DEV_CAP_INIT
;
771 vnic_dev_unregister(vdev
);