2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/if_ether.h>
24 #include <linux/slab.h>
25 #include "vnic_resource.h"
26 #include "vnic_devcmd.h"
28 #include "vnic_stats.h"
31 #define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */
32 #define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
34 struct devcmd2_controller
{
35 struct vnic_wq_ctrl __iomem
*wq_ctrl
;
36 struct vnic_dev_ring results_ring
;
38 struct vnic_devcmd2
*cmd_ring
;
39 struct devcmd2_result
*result
;
53 struct vnic_res res
[RES_TYPE_MAX
];
54 enum vnic_dev_intr_mode intr_mode
;
55 struct vnic_devcmd __iomem
*devcmd
;
56 struct vnic_devcmd_notify
*notify
;
57 struct vnic_devcmd_notify notify_copy
;
60 dma_addr_t linkstatus_pa
;
61 struct vnic_stats
*stats
;
63 struct vnic_devcmd_fw_info
*fw_info
;
64 dma_addr_t fw_info_pa
;
65 u64 args
[VNIC_DEVCMD_NARGS
];
66 struct devcmd2_controller
*devcmd2
;
68 int (*devcmd_rtn
)(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
72 #define VNIC_MAX_RES_HDR_SIZE \
73 (sizeof(struct vnic_resource_header) + \
74 sizeof(struct vnic_resource) * RES_TYPE_MAX)
75 #define VNIC_RES_STRIDE 128
77 void *svnic_dev_priv(struct vnic_dev
*vdev
)
82 static int vnic_dev_discover_res(struct vnic_dev
*vdev
,
83 struct vnic_dev_bar
*bar
, unsigned int num_bars
)
85 struct vnic_resource_header __iomem
*rh
;
86 struct vnic_resource __iomem
*r
;
92 if (bar
->len
< VNIC_MAX_RES_HDR_SIZE
) {
93 pr_err("vNIC BAR0 res hdr length error\n");
100 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
105 if (ioread32(&rh
->magic
) != VNIC_RES_MAGIC
||
106 ioread32(&rh
->version
) != VNIC_RES_VERSION
) {
107 pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
108 VNIC_RES_MAGIC
, VNIC_RES_VERSION
,
109 ioread32(&rh
->magic
), ioread32(&rh
->version
));
114 r
= (struct vnic_resource __iomem
*)(rh
+ 1);
116 while ((type
= ioread8(&r
->type
)) != RES_TYPE_EOL
) {
118 u8 bar_num
= ioread8(&r
->bar
);
119 u32 bar_offset
= ioread32(&r
->bar_offset
);
120 u32 count
= ioread32(&r
->count
);
125 if (bar_num
>= num_bars
)
128 if (!bar
[bar_num
].len
|| !bar
[bar_num
].vaddr
)
135 case RES_TYPE_INTR_CTRL
:
136 /* each count is stride bytes long */
137 len
= count
* VNIC_RES_STRIDE
;
138 if (len
+ bar_offset
> bar
->len
) {
139 pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
148 case RES_TYPE_INTR_PBA_LEGACY
:
149 case RES_TYPE_DEVCMD
:
150 case RES_TYPE_DEVCMD2
:
158 vdev
->res
[type
].count
= count
;
159 vdev
->res
[type
].vaddr
= (char __iomem
*)bar
->vaddr
+ bar_offset
;
165 unsigned int svnic_dev_get_res_count(struct vnic_dev
*vdev
,
166 enum vnic_res_type type
)
168 return vdev
->res
[type
].count
;
171 void __iomem
*svnic_dev_get_res(struct vnic_dev
*vdev
, enum vnic_res_type type
,
174 if (!vdev
->res
[type
].vaddr
)
181 case RES_TYPE_INTR_CTRL
:
182 return (char __iomem
*)vdev
->res
[type
].vaddr
+
183 index
* VNIC_RES_STRIDE
;
186 return (char __iomem
*)vdev
->res
[type
].vaddr
;
190 unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring
*ring
,
191 unsigned int desc_count
,
192 unsigned int desc_size
)
194 /* The base address of the desc rings must be 512 byte aligned.
195 * Descriptor count is aligned to groups of 32 descriptors. A
196 * count of 0 means the maximum 4096 descriptors. Descriptor
197 * size is aligned to 16 bytes.
200 unsigned int count_align
= 32;
201 unsigned int desc_align
= 16;
203 ring
->base_align
= 512;
208 ring
->desc_count
= ALIGN(desc_count
, count_align
);
210 ring
->desc_size
= ALIGN(desc_size
, desc_align
);
212 ring
->size
= ring
->desc_count
* ring
->desc_size
;
213 ring
->size_unaligned
= ring
->size
+ ring
->base_align
;
215 return ring
->size_unaligned
;
218 void svnic_dev_clear_desc_ring(struct vnic_dev_ring
*ring
)
220 memset(ring
->descs
, 0, ring
->size
);
223 int svnic_dev_alloc_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
,
224 unsigned int desc_count
, unsigned int desc_size
)
226 svnic_dev_desc_ring_size(ring
, desc_count
, desc_size
);
228 ring
->descs_unaligned
= pci_alloc_consistent(vdev
->pdev
,
229 ring
->size_unaligned
,
230 &ring
->base_addr_unaligned
);
232 if (!ring
->descs_unaligned
) {
233 pr_err("Failed to allocate ring (size=%d), aborting\n",
239 ring
->base_addr
= ALIGN(ring
->base_addr_unaligned
,
241 ring
->descs
= (u8
*)ring
->descs_unaligned
+
242 (ring
->base_addr
- ring
->base_addr_unaligned
);
244 svnic_dev_clear_desc_ring(ring
);
246 ring
->desc_avail
= ring
->desc_count
- 1;
251 void svnic_dev_free_desc_ring(struct vnic_dev
*vdev
, struct vnic_dev_ring
*ring
)
254 pci_free_consistent(vdev
->pdev
,
255 ring
->size_unaligned
,
256 ring
->descs_unaligned
,
257 ring
->base_addr_unaligned
);
262 static int _svnic_dev_cmd2(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
265 struct devcmd2_controller
*dc2c
= vdev
->devcmd2
;
266 struct devcmd2_result
*result
= NULL
;
275 fetch_idx
= ioread32(&dc2c
->wq_ctrl
->fetch_index
);
276 if (fetch_idx
== 0xFFFFFFFF) { /* check for hardware gone */
277 /* Hardware surprise removal: return error */
281 posted
= ioread32(&dc2c
->wq_ctrl
->posted_index
);
283 if (posted
== 0xFFFFFFFF) { /* check for hardware gone */
284 /* Hardware surprise removal: return error */
288 new_posted
= (posted
+ 1) % DEVCMD2_RING_SIZE
;
289 if (new_posted
== fetch_idx
) {
290 pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n",
291 pci_name(vdev
->pdev
), _CMD_N(cmd
), fetch_idx
, posted
);
296 dc2c
->cmd_ring
[posted
].cmd
= cmd
;
297 dc2c
->cmd_ring
[posted
].flags
= 0;
299 if ((_CMD_FLAGS(cmd
) & _CMD_FLAGS_NOWAIT
))
300 dc2c
->cmd_ring
[posted
].flags
|= DEVCMD2_FNORESULT
;
302 if (_CMD_DIR(cmd
) & _CMD_DIR_WRITE
) {
303 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
304 dc2c
->cmd_ring
[posted
].args
[i
] = vdev
->args
[i
];
306 /* Adding write memory barrier prevents compiler and/or CPU
307 * reordering, thus avoiding descriptor posting before
308 * descriptor is initialized. Otherwise, hardware can read
309 * stale descriptor fields.
312 iowrite32(new_posted
, &dc2c
->wq_ctrl
->posted_index
);
314 if (dc2c
->cmd_ring
[posted
].flags
& DEVCMD2_FNORESULT
)
317 result
= dc2c
->result
+ dc2c
->next_result
;
321 * Increment next_result, after posting the devcmd, irrespective of
322 * devcmd result, and it should be done only once.
325 if (dc2c
->next_result
== dc2c
->result_size
) {
326 dc2c
->next_result
= 0;
327 dc2c
->color
= dc2c
->color
? 0 : 1;
330 for (delay
= 0; delay
< wait
; delay
++) {
332 if (result
->color
== color
) {
334 err
= (int) result
->error
;
335 if (err
!= ERR_ECMDUNKNOWN
||
336 cmd
!= CMD_CAPABILITY
)
337 pr_err("Error %d devcmd %d\n",
342 if (_CMD_DIR(cmd
) & _CMD_DIR_READ
) {
343 for (i
= 0; i
< VNIC_DEVCMD_NARGS
; i
++)
344 vdev
->args
[i
] = result
->results
[i
];
351 pr_err("Timed out devcmd %d\n", _CMD_N(cmd
));
356 static int svnic_dev_init_devcmd2(struct vnic_dev
*vdev
)
358 struct devcmd2_controller
*dc2c
= NULL
;
359 unsigned int fetch_idx
;
366 p
= svnic_dev_get_res(vdev
, RES_TYPE_DEVCMD2
, 0);
370 dc2c
= kzalloc(sizeof(*dc2c
), GFP_ATOMIC
);
374 vdev
->devcmd2
= dc2c
;
377 dc2c
->result_size
= DEVCMD2_RING_SIZE
;
379 ret
= vnic_wq_devcmd2_alloc(vdev
,
384 goto err_free_devcmd2
;
386 fetch_idx
= ioread32(&dc2c
->wq
.ctrl
->fetch_index
);
387 if (fetch_idx
== 0xFFFFFFFF) { /* check for hardware gone */
388 /* Hardware surprise removal: reset fetch_index */
393 * Don't change fetch_index ever and
394 * set posted_index same as fetch_index
395 * when setting up the WQ for devcmd2.
397 vnic_wq_init_start(&dc2c
->wq
, 0, fetch_idx
, fetch_idx
, 0, 0);
398 svnic_wq_enable(&dc2c
->wq
);
399 ret
= svnic_dev_alloc_desc_ring(vdev
,
406 dc2c
->result
= (struct devcmd2_result
*) dc2c
->results_ring
.descs
;
407 dc2c
->cmd_ring
= (struct vnic_devcmd2
*) dc2c
->wq
.ring
.descs
;
408 dc2c
->wq_ctrl
= dc2c
->wq
.ctrl
;
409 vdev
->args
[0] = (u64
) dc2c
->results_ring
.base_addr
| VNIC_PADDR_TARGET
;
410 vdev
->args
[1] = DEVCMD2_RING_SIZE
;
412 ret
= _svnic_dev_cmd2(vdev
, CMD_INITIALIZE_DEVCMD2
, VNIC_DVCMD_TMO
);
414 goto err_free_desc_ring
;
416 vdev
->devcmd_rtn
= &_svnic_dev_cmd2
;
417 pr_info("DEVCMD2 Initialized.\n");
422 svnic_dev_free_desc_ring(vdev
, &dc2c
->results_ring
);
425 svnic_wq_disable(&dc2c
->wq
);
426 svnic_wq_free(&dc2c
->wq
);
430 vdev
->devcmd2
= NULL
;
433 } /* end of svnic_dev_init_devcmd2 */
435 static void vnic_dev_deinit_devcmd2(struct vnic_dev
*vdev
)
437 struct devcmd2_controller
*dc2c
= vdev
->devcmd2
;
439 vdev
->devcmd2
= NULL
;
440 vdev
->devcmd_rtn
= NULL
;
442 svnic_dev_free_desc_ring(vdev
, &dc2c
->results_ring
);
443 svnic_wq_disable(&dc2c
->wq
);
444 svnic_wq_free(&dc2c
->wq
);
448 int svnic_dev_cmd(struct vnic_dev
*vdev
, enum vnic_devcmd_cmd cmd
,
449 u64
*a0
, u64
*a1
, int wait
)
453 memset(vdev
->args
, 0, sizeof(vdev
->args
));
457 err
= (*vdev
->devcmd_rtn
)(vdev
, cmd
, wait
);
465 int svnic_dev_fw_info(struct vnic_dev
*vdev
,
466 struct vnic_devcmd_fw_info
**fw_info
)
469 int wait
= VNIC_DVCMD_TMO
;
472 if (!vdev
->fw_info
) {
473 vdev
->fw_info
= pci_alloc_consistent(vdev
->pdev
,
474 sizeof(struct vnic_devcmd_fw_info
),
479 a0
= vdev
->fw_info_pa
;
481 /* only get fw_info once and cache it */
482 err
= svnic_dev_cmd(vdev
, CMD_MCPU_FW_INFO
, &a0
, &a1
, wait
);
485 *fw_info
= vdev
->fw_info
;
490 int svnic_dev_spec(struct vnic_dev
*vdev
, unsigned int offset
,
491 unsigned int size
, void *value
)
494 int wait
= VNIC_DVCMD_TMO
;
500 err
= svnic_dev_cmd(vdev
, CMD_DEV_SPEC
, &a0
, &a1
, wait
);
504 *(u8
*)value
= (u8
)a0
;
507 *(u16
*)value
= (u16
)a0
;
510 *(u32
*)value
= (u32
)a0
;
523 int svnic_dev_stats_clear(struct vnic_dev
*vdev
)
526 int wait
= VNIC_DVCMD_TMO
;
528 return svnic_dev_cmd(vdev
, CMD_STATS_CLEAR
, &a0
, &a1
, wait
);
531 int svnic_dev_stats_dump(struct vnic_dev
*vdev
, struct vnic_stats
**stats
)
534 int wait
= VNIC_DVCMD_TMO
;
537 vdev
->stats
= pci_alloc_consistent(vdev
->pdev
,
538 sizeof(struct vnic_stats
), &vdev
->stats_pa
);
543 *stats
= vdev
->stats
;
545 a1
= sizeof(struct vnic_stats
);
547 return svnic_dev_cmd(vdev
, CMD_STATS_DUMP
, &a0
, &a1
, wait
);
550 int svnic_dev_close(struct vnic_dev
*vdev
)
553 int wait
= VNIC_DVCMD_TMO
;
555 return svnic_dev_cmd(vdev
, CMD_CLOSE
, &a0
, &a1
, wait
);
558 int svnic_dev_enable_wait(struct vnic_dev
*vdev
)
561 int wait
= VNIC_DVCMD_TMO
;
564 err
= svnic_dev_cmd(vdev
, CMD_ENABLE_WAIT
, &a0
, &a1
, wait
);
565 if (err
== ERR_ECMDUNKNOWN
)
566 return svnic_dev_cmd(vdev
, CMD_ENABLE
, &a0
, &a1
, wait
);
571 int svnic_dev_disable(struct vnic_dev
*vdev
)
574 int wait
= VNIC_DVCMD_TMO
;
576 return svnic_dev_cmd(vdev
, CMD_DISABLE
, &a0
, &a1
, wait
);
579 int svnic_dev_open(struct vnic_dev
*vdev
, int arg
)
581 u64 a0
= (u32
)arg
, a1
= 0;
582 int wait
= VNIC_DVCMD_TMO
;
584 return svnic_dev_cmd(vdev
, CMD_OPEN
, &a0
, &a1
, wait
);
587 int svnic_dev_open_done(struct vnic_dev
*vdev
, int *done
)
590 int wait
= VNIC_DVCMD_TMO
;
595 err
= svnic_dev_cmd(vdev
, CMD_OPEN_STATUS
, &a0
, &a1
, wait
);
604 int svnic_dev_notify_set(struct vnic_dev
*vdev
, u16 intr
)
607 int wait
= VNIC_DVCMD_TMO
;
610 vdev
->notify
= pci_alloc_consistent(vdev
->pdev
,
611 sizeof(struct vnic_devcmd_notify
),
617 a0
= vdev
->notify_pa
;
618 a1
= ((u64
)intr
<< 32) & VNIC_NOTIFY_INTR_MASK
;
619 a1
+= sizeof(struct vnic_devcmd_notify
);
621 return svnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
624 void svnic_dev_notify_unset(struct vnic_dev
*vdev
)
627 int wait
= VNIC_DVCMD_TMO
;
629 a0
= 0; /* paddr = 0 to unset notify buffer */
630 a1
= VNIC_NOTIFY_INTR_MASK
; /* intr num = -1 to unreg for intr */
631 a1
+= sizeof(struct vnic_devcmd_notify
);
633 svnic_dev_cmd(vdev
, CMD_NOTIFY
, &a0
, &a1
, wait
);
636 static int vnic_dev_notify_ready(struct vnic_dev
*vdev
)
639 unsigned int nwords
= sizeof(struct vnic_devcmd_notify
) / 4;
648 memcpy(&vdev
->notify_copy
, vdev
->notify
,
649 sizeof(struct vnic_devcmd_notify
));
650 words
= (u32
*)&vdev
->notify_copy
;
651 for (i
= 1; i
< nwords
; i
++)
653 } while (csum
!= words
[0]);
658 int svnic_dev_init(struct vnic_dev
*vdev
, int arg
)
660 u64 a0
= (u32
)arg
, a1
= 0;
661 int wait
= VNIC_DVCMD_TMO
;
663 return svnic_dev_cmd(vdev
, CMD_INIT
, &a0
, &a1
, wait
);
666 int svnic_dev_link_status(struct vnic_dev
*vdev
)
668 if (vdev
->linkstatus
)
669 return *vdev
->linkstatus
;
671 if (!vnic_dev_notify_ready(vdev
))
674 return vdev
->notify_copy
.link_state
;
677 u32
svnic_dev_link_down_cnt(struct vnic_dev
*vdev
)
679 if (!vnic_dev_notify_ready(vdev
))
682 return vdev
->notify_copy
.link_down_cnt
;
685 void svnic_dev_set_intr_mode(struct vnic_dev
*vdev
,
686 enum vnic_dev_intr_mode intr_mode
)
688 vdev
->intr_mode
= intr_mode
;
691 enum vnic_dev_intr_mode
svnic_dev_get_intr_mode(struct vnic_dev
*vdev
)
693 return vdev
->intr_mode
;
696 void svnic_dev_unregister(struct vnic_dev
*vdev
)
700 pci_free_consistent(vdev
->pdev
,
701 sizeof(struct vnic_devcmd_notify
),
704 if (vdev
->linkstatus
)
705 pci_free_consistent(vdev
->pdev
,
708 vdev
->linkstatus_pa
);
710 pci_free_consistent(vdev
->pdev
,
711 sizeof(struct vnic_stats
),
712 vdev
->stats
, vdev
->stats_pa
);
714 pci_free_consistent(vdev
->pdev
,
715 sizeof(struct vnic_devcmd_fw_info
),
716 vdev
->fw_info
, vdev
->fw_info_pa
);
718 vnic_dev_deinit_devcmd2(vdev
);
723 struct vnic_dev
*svnic_dev_alloc_discover(struct vnic_dev
*vdev
,
725 struct pci_dev
*pdev
,
726 struct vnic_dev_bar
*bar
,
727 unsigned int num_bars
)
730 vdev
= kzalloc(sizeof(struct vnic_dev
), GFP_ATOMIC
);
738 if (vnic_dev_discover_res(vdev
, bar
, num_bars
))
744 svnic_dev_unregister(vdev
);
747 } /* end of svnic_dev_alloc_discover */
750 * fallback option is left to keep the interface common for other vnics.
752 int svnic_dev_cmd_init(struct vnic_dev
*vdev
, int fallback
)
757 p
= svnic_dev_get_res(vdev
, RES_TYPE_DEVCMD2
, 0);
759 err
= svnic_dev_init_devcmd2(vdev
);
761 pr_err("DEVCMD2 resource not found.\n");
764 } /* end of svnic_dev_cmd_init */