1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
19 #include "vxge-traffic.h"
20 #include "vxge-config.h"
23 * __vxge_hw_channel_allocate - Allocate memory for channel
24 * This function allocates required memory for the channel and various arrays
27 struct __vxge_hw_channel
*
28 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle
*vph
,
29 enum __vxge_hw_channel_type type
,
30 u32 length
, u32 per_dtr_space
, void *userdata
)
32 struct __vxge_hw_channel
*channel
;
33 struct __vxge_hw_device
*hldev
;
37 hldev
= vph
->vpath
->hldev
;
38 vp_id
= vph
->vpath
->vp_id
;
41 case VXGE_HW_CHANNEL_TYPE_FIFO
:
42 size
= sizeof(struct __vxge_hw_fifo
);
44 case VXGE_HW_CHANNEL_TYPE_RING
:
45 size
= sizeof(struct __vxge_hw_ring
);
51 channel
= kzalloc(size
, GFP_KERNEL
);
54 INIT_LIST_HEAD(&channel
->item
);
56 channel
->common_reg
= hldev
->common_reg
;
57 channel
->first_vp_id
= hldev
->first_vp_id
;
59 channel
->devh
= hldev
;
61 channel
->userdata
= userdata
;
62 channel
->per_dtr_space
= per_dtr_space
;
63 channel
->length
= length
;
64 channel
->vp_id
= vp_id
;
66 channel
->work_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
67 if (channel
->work_arr
== NULL
)
70 channel
->free_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
71 if (channel
->free_arr
== NULL
)
73 channel
->free_ptr
= length
;
75 channel
->reserve_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
76 if (channel
->reserve_arr
== NULL
)
78 channel
->reserve_ptr
= length
;
79 channel
->reserve_top
= 0;
81 channel
->orig_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
82 if (channel
->orig_arr
== NULL
)
87 __vxge_hw_channel_free(channel
);
94 * __vxge_hw_channel_free - Free memory allocated for channel
95 * This function deallocates memory from the channel and various arrays
98 void __vxge_hw_channel_free(struct __vxge_hw_channel
*channel
)
100 kfree(channel
->work_arr
);
101 kfree(channel
->free_arr
);
102 kfree(channel
->reserve_arr
);
103 kfree(channel
->orig_arr
);
108 * __vxge_hw_channel_initialize - Initialize a channel
109 * This function initializes a channel by properly setting the
113 __vxge_hw_channel_initialize(struct __vxge_hw_channel
*channel
)
116 struct __vxge_hw_virtualpath
*vpath
;
118 vpath
= channel
->vph
->vpath
;
120 if ((channel
->reserve_arr
!= NULL
) && (channel
->orig_arr
!= NULL
)) {
121 for (i
= 0; i
< channel
->length
; i
++)
122 channel
->orig_arr
[i
] = channel
->reserve_arr
[i
];
125 switch (channel
->type
) {
126 case VXGE_HW_CHANNEL_TYPE_FIFO
:
127 vpath
->fifoh
= (struct __vxge_hw_fifo
*)channel
;
128 channel
->stats
= &((struct __vxge_hw_fifo
*)
129 channel
)->stats
->common_stats
;
131 case VXGE_HW_CHANNEL_TYPE_RING
:
132 vpath
->ringh
= (struct __vxge_hw_ring
*)channel
;
133 channel
->stats
= &((struct __vxge_hw_ring
*)
134 channel
)->stats
->common_stats
;
144 * __vxge_hw_channel_reset - Resets a channel
145 * This function resets a channel by properly setting the various references
148 __vxge_hw_channel_reset(struct __vxge_hw_channel
*channel
)
152 for (i
= 0; i
< channel
->length
; i
++) {
153 if (channel
->reserve_arr
!= NULL
)
154 channel
->reserve_arr
[i
] = channel
->orig_arr
[i
];
155 if (channel
->free_arr
!= NULL
)
156 channel
->free_arr
[i
] = NULL
;
157 if (channel
->work_arr
!= NULL
)
158 channel
->work_arr
[i
] = NULL
;
160 channel
->free_ptr
= channel
->length
;
161 channel
->reserve_ptr
= channel
->length
;
162 channel
->reserve_top
= 0;
163 channel
->post_index
= 0;
164 channel
->compl_index
= 0;
170 * __vxge_hw_device_pci_e_init
171 * Initialize certain PCI/PCI-X configuration registers
172 * with recommended values. Save config space for future hw resets.
175 __vxge_hw_device_pci_e_init(struct __vxge_hw_device
*hldev
)
179 /* Set the PErr Repconse bit and SERR in PCI command register. */
180 pci_read_config_word(hldev
->pdev
, PCI_COMMAND
, &cmd
);
182 pci_write_config_word(hldev
->pdev
, PCI_COMMAND
, cmd
);
184 pci_save_state(hldev
->pdev
);
190 * __vxge_hw_device_register_poll
191 * Will poll certain register for specified amount of time.
192 * Will poll until masked bit is not cleared.
195 __vxge_hw_device_register_poll(void __iomem
*reg
, u64 mask
, u32 max_millis
)
199 enum vxge_hw_status ret
= VXGE_HW_FAIL
;
216 } while (++i
<= max_millis
);
221 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
223 * This routine checks the vpath reset in progress register is turned zero
226 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem
*vpath_rst_in_prog
)
228 enum vxge_hw_status status
;
229 status
= __vxge_hw_device_register_poll(vpath_rst_in_prog
,
230 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
231 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
236 * __vxge_hw_device_toc_get
237 * This routine sets the swapper and reads the toc pointer and returns the
238 * memory mapped address of the toc
240 struct vxge_hw_toc_reg __iomem
*
241 __vxge_hw_device_toc_get(void __iomem
*bar0
)
244 struct vxge_hw_toc_reg __iomem
*toc
= NULL
;
245 enum vxge_hw_status status
;
247 struct vxge_hw_legacy_reg __iomem
*legacy_reg
=
248 (struct vxge_hw_legacy_reg __iomem
*)bar0
;
250 status
= __vxge_hw_legacy_swapper_set(legacy_reg
);
251 if (status
!= VXGE_HW_OK
)
254 val64
= readq(&legacy_reg
->toc_first_pointer
);
255 toc
= (struct vxge_hw_toc_reg __iomem
*)(bar0
+val64
);
261 * __vxge_hw_device_reg_addr_get
262 * This routine sets the swapper and reads the toc pointer and initializes the
263 * register location pointers in the device object. It waits until the ric is
264 * completed initializing registers.
267 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device
*hldev
)
271 enum vxge_hw_status status
= VXGE_HW_OK
;
273 hldev
->legacy_reg
= (struct vxge_hw_legacy_reg __iomem
*)hldev
->bar0
;
275 hldev
->toc_reg
= __vxge_hw_device_toc_get(hldev
->bar0
);
276 if (hldev
->toc_reg
== NULL
) {
277 status
= VXGE_HW_FAIL
;
281 val64
= readq(&hldev
->toc_reg
->toc_common_pointer
);
283 (struct vxge_hw_common_reg __iomem
*)(hldev
->bar0
+ val64
);
285 val64
= readq(&hldev
->toc_reg
->toc_mrpcim_pointer
);
287 (struct vxge_hw_mrpcim_reg __iomem
*)(hldev
->bar0
+ val64
);
289 for (i
= 0; i
< VXGE_HW_TITAN_SRPCIM_REG_SPACES
; i
++) {
290 val64
= readq(&hldev
->toc_reg
->toc_srpcim_pointer
[i
]);
291 hldev
->srpcim_reg
[i
] =
292 (struct vxge_hw_srpcim_reg __iomem
*)
293 (hldev
->bar0
+ val64
);
296 for (i
= 0; i
< VXGE_HW_TITAN_VPMGMT_REG_SPACES
; i
++) {
297 val64
= readq(&hldev
->toc_reg
->toc_vpmgmt_pointer
[i
]);
298 hldev
->vpmgmt_reg
[i
] =
299 (struct vxge_hw_vpmgmt_reg __iomem
*)(hldev
->bar0
+ val64
);
302 for (i
= 0; i
< VXGE_HW_TITAN_VPATH_REG_SPACES
; i
++) {
303 val64
= readq(&hldev
->toc_reg
->toc_vpath_pointer
[i
]);
304 hldev
->vpath_reg
[i
] =
305 (struct vxge_hw_vpath_reg __iomem
*)
306 (hldev
->bar0
+ val64
);
309 val64
= readq(&hldev
->toc_reg
->toc_kdfc
);
311 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64
)) {
313 hldev
->kdfc
= (u8 __iomem
*)(hldev
->bar0
+
314 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64
));
320 status
= __vxge_hw_device_vpath_reset_in_prog_check(
321 (u64 __iomem
*)&hldev
->common_reg
->vpath_rst_in_prog
);
327 * __vxge_hw_device_id_get
328 * This routine returns sets the device id and revision numbers into the device
331 void __vxge_hw_device_id_get(struct __vxge_hw_device
*hldev
)
335 val64
= readq(&hldev
->common_reg
->titan_asic_id
);
337 (u16
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64
);
339 hldev
->major_revision
=
340 (u8
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64
);
342 hldev
->minor_revision
=
343 (u8
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64
);
349 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
350 * This routine returns the Access Rights of the driver
353 __vxge_hw_device_access_rights_get(u32 host_type
, u32 func_id
)
355 u32 access_rights
= VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH
;
358 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION
:
360 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
361 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
364 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION
:
365 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
366 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
368 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0
:
369 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
370 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
372 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION
:
373 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION
:
374 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG
:
376 case VXGE_HW_SR_VH_FUNCTION0
:
377 case VXGE_HW_VH_NORMAL_FUNCTION
:
378 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
382 return access_rights
;
385 * __vxge_hw_device_host_info_get
386 * This routine returns the host type assignments
388 void __vxge_hw_device_host_info_get(struct __vxge_hw_device
*hldev
)
393 val64
= readq(&hldev
->common_reg
->host_type_assignments
);
396 (u32
)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64
);
398 hldev
->vpath_assignments
= readq(&hldev
->common_reg
->vpath_assignments
);
400 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
402 if (!(hldev
->vpath_assignments
& vxge_mBIT(i
)))
406 __vxge_hw_vpath_func_id_get(i
, hldev
->vpmgmt_reg
[i
]);
408 hldev
->access_rights
= __vxge_hw_device_access_rights_get(
409 hldev
->host_type
, hldev
->func_id
);
411 hldev
->first_vp_id
= i
;
419 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
420 * link width and signalling rate.
422 static enum vxge_hw_status
423 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device
*hldev
)
428 /* Get the negotiated link width and speed from PCI config space */
429 exp_cap
= pci_find_capability(hldev
->pdev
, PCI_CAP_ID_EXP
);
430 pci_read_config_word(hldev
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
432 if ((lnk
& PCI_EXP_LNKSTA_CLS
) != 1)
433 return VXGE_HW_ERR_INVALID_PCI_INFO
;
435 switch ((lnk
& PCI_EXP_LNKSTA_NLW
) >> 4) {
436 case PCIE_LNK_WIDTH_RESRV
:
443 return VXGE_HW_ERR_INVALID_PCI_INFO
;
450 __vxge_hw_device_is_privilaged(struct __vxge_hw_device
*hldev
)
452 if ((hldev
->host_type
== VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION
||
453 hldev
->host_type
== VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION
||
454 hldev
->host_type
== VXGE_HW_NO_MR_SR_VH0_FUNCTION0
) &&
455 (hldev
->func_id
== 0))
458 return VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
462 * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars.
463 * Rebalance the RX_WRR and KDFC_WRR calandars.
466 vxge_hw_status
vxge_hw_wrr_rebalance(struct __vxge_hw_device
*hldev
)
469 u32 wrr_states
[VXGE_HW_WEIGHTED_RR_SERVICE_STATES
];
470 u32 i
, j
, how_often
= 1;
471 enum vxge_hw_status status
= VXGE_HW_OK
;
473 status
= __vxge_hw_device_is_privilaged(hldev
);
474 if (status
!= VXGE_HW_OK
)
477 /* Reset the priorities assigned to the WRR arbitration
478 phases for the receive traffic */
479 for (i
= 0; i
< VXGE_HW_WRR_RING_COUNT
; i
++)
480 writeq(0, ((&hldev
->mrpcim_reg
->rx_w_round_robin_0
) + i
));
482 /* Reset the transmit FIFO servicing calendar for FIFOs */
483 for (i
= 0; i
< VXGE_HW_WRR_FIFO_COUNT
; i
++) {
484 writeq(0, ((&hldev
->mrpcim_reg
->kdfc_w_round_robin_0
) + i
));
485 writeq(0, ((&hldev
->mrpcim_reg
->kdfc_w_round_robin_20
) + i
));
488 /* Assign WRR priority 0 for all FIFOs */
489 for (i
= 1; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
490 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0),
491 ((&hldev
->mrpcim_reg
->kdfc_fifo_0_ctrl
) + i
));
493 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0),
494 ((&hldev
->mrpcim_reg
->kdfc_fifo_17_ctrl
) + i
));
497 /* Reset to service non-offload doorbells */
498 writeq(0, &hldev
->mrpcim_reg
->kdfc_entry_type_sel_0
);
499 writeq(0, &hldev
->mrpcim_reg
->kdfc_entry_type_sel_1
);
501 /* Set priority 0 to all receive queues */
502 writeq(0, &hldev
->mrpcim_reg
->rx_queue_priority_0
);
503 writeq(0, &hldev
->mrpcim_reg
->rx_queue_priority_1
);
504 writeq(0, &hldev
->mrpcim_reg
->rx_queue_priority_2
);
506 /* Initialize all the slots as unused */
507 for (i
= 0; i
< VXGE_HW_WEIGHTED_RR_SERVICE_STATES
; i
++)
510 /* Prepare the Fifo service states */
511 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
513 if (!hldev
->config
.vp_config
[i
].min_bandwidth
)
516 how_often
= VXGE_HW_VPATH_BANDWIDTH_MAX
/
517 hldev
->config
.vp_config
[i
].min_bandwidth
;
520 for (j
= 0; j
< VXGE_HW_WRR_FIFO_SERVICE_STATES
;) {
521 if (wrr_states
[j
] == -1) {
523 /* Make sure each fifo is serviced
526 j
+= VXGE_HW_MAX_VIRTUAL_PATHS
;
535 /* Fill the unused slots with 0 */
536 for (j
= 0; j
< VXGE_HW_WEIGHTED_RR_SERVICE_STATES
; j
++) {
537 if (wrr_states
[j
] == -1)
541 /* Assign WRR priority number for FIFOs */
542 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
543 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i
),
544 ((&hldev
->mrpcim_reg
->kdfc_fifo_0_ctrl
) + i
));
546 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i
),
547 ((&hldev
->mrpcim_reg
->kdfc_fifo_17_ctrl
) + i
));
550 /* Modify the servicing algorithm applied to the 3 types of doorbells.
551 i.e, none-offload, message and offload */
552 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) |
553 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) |
554 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) |
555 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) |
556 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) |
557 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) |
558 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) |
559 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0),
560 &hldev
->mrpcim_reg
->kdfc_entry_type_sel_0
);
562 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1),
563 &hldev
->mrpcim_reg
->kdfc_entry_type_sel_1
);
565 for (i
= 0, j
= 0; i
< VXGE_HW_WRR_FIFO_COUNT
; i
++) {
567 val64
= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states
[j
++]);
568 val64
|= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states
[j
++]);
569 val64
|= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states
[j
++]);
570 val64
|= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states
[j
++]);
571 val64
|= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states
[j
++]);
572 val64
|= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states
[j
++]);
573 val64
|= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states
[j
++]);
574 val64
|= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states
[j
++]);
576 writeq(val64
, (&hldev
->mrpcim_reg
->kdfc_w_round_robin_0
+ i
));
577 writeq(val64
, (&hldev
->mrpcim_reg
->kdfc_w_round_robin_20
+ i
));
580 /* Set up the priorities assigned to receive queues */
581 writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) |
582 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) |
583 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) |
584 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) |
585 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) |
586 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) |
587 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) |
588 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7),
589 &hldev
->mrpcim_reg
->rx_queue_priority_0
);
591 writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) |
592 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) |
593 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) |
594 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) |
595 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) |
596 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) |
597 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) |
598 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15),
599 &hldev
->mrpcim_reg
->rx_queue_priority_1
);
601 writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16),
602 &hldev
->mrpcim_reg
->rx_queue_priority_2
);
604 /* Initialize all the slots as unused */
605 for (i
= 0; i
< VXGE_HW_WEIGHTED_RR_SERVICE_STATES
; i
++)
608 /* Prepare the Ring service states */
609 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
611 if (!hldev
->config
.vp_config
[i
].min_bandwidth
)
614 how_often
= VXGE_HW_VPATH_BANDWIDTH_MAX
/
615 hldev
->config
.vp_config
[i
].min_bandwidth
;
618 for (j
= 0; j
< VXGE_HW_WRR_RING_SERVICE_STATES
;) {
619 if (wrr_states
[j
] == -1) {
621 /* Make sure each ring is
622 * serviced atleast once */
624 j
+= VXGE_HW_MAX_VIRTUAL_PATHS
;
633 /* Fill the unused slots with 0 */
634 for (j
= 0; j
< VXGE_HW_WEIGHTED_RR_SERVICE_STATES
; j
++) {
635 if (wrr_states
[j
] == -1)
639 for (i
= 0, j
= 0; i
< VXGE_HW_WRR_RING_COUNT
; i
++) {
640 val64
= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(
642 val64
|= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(
644 val64
|= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(
646 val64
|= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(
648 val64
|= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(
650 val64
|= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(
652 val64
|= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(
654 val64
|= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(
657 writeq(val64
, ((&hldev
->mrpcim_reg
->rx_w_round_robin_0
) + i
));
664 * __vxge_hw_device_initialize
665 * Initialize Titan-V hardware.
667 enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device
*hldev
)
669 enum vxge_hw_status status
= VXGE_HW_OK
;
671 if (VXGE_HW_OK
== __vxge_hw_device_is_privilaged(hldev
)) {
672 /* Validate the pci-e link width and speed */
673 status
= __vxge_hw_verify_pci_e_info(hldev
);
674 if (status
!= VXGE_HW_OK
)
678 vxge_hw_wrr_rebalance(hldev
);
684 * vxge_hw_device_hw_info_get - Get the hw information
685 * Returns the vpath mask that has the bits set for each vpath allocated
686 * for the driver, FW version information and the first mac addresse for
689 enum vxge_hw_status __devinit
690 vxge_hw_device_hw_info_get(void __iomem
*bar0
,
691 struct vxge_hw_device_hw_info
*hw_info
)
695 struct vxge_hw_toc_reg __iomem
*toc
;
696 struct vxge_hw_mrpcim_reg __iomem
*mrpcim_reg
;
697 struct vxge_hw_common_reg __iomem
*common_reg
;
698 struct vxge_hw_vpath_reg __iomem
*vpath_reg
;
699 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
;
700 enum vxge_hw_status status
;
702 memset(hw_info
, 0, sizeof(struct vxge_hw_device_hw_info
));
704 toc
= __vxge_hw_device_toc_get(bar0
);
706 status
= VXGE_HW_ERR_CRITICAL
;
710 val64
= readq(&toc
->toc_common_pointer
);
711 common_reg
= (struct vxge_hw_common_reg __iomem
*)(bar0
+ val64
);
713 status
= __vxge_hw_device_vpath_reset_in_prog_check(
714 (u64 __iomem
*)&common_reg
->vpath_rst_in_prog
);
715 if (status
!= VXGE_HW_OK
)
718 hw_info
->vpath_mask
= readq(&common_reg
->vpath_assignments
);
720 val64
= readq(&common_reg
->host_type_assignments
);
723 (u32
)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64
);
725 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
727 if (!((hw_info
->vpath_mask
) & vxge_mBIT(i
)))
730 val64
= readq(&toc
->toc_vpmgmt_pointer
[i
]);
732 vpmgmt_reg
= (struct vxge_hw_vpmgmt_reg __iomem
*)
735 hw_info
->func_id
= __vxge_hw_vpath_func_id_get(i
, vpmgmt_reg
);
736 if (__vxge_hw_device_access_rights_get(hw_info
->host_type
,
738 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
) {
740 val64
= readq(&toc
->toc_mrpcim_pointer
);
742 mrpcim_reg
= (struct vxge_hw_mrpcim_reg __iomem
*)
745 writeq(0, &mrpcim_reg
->xgmac_gen_fw_memo_mask
);
749 val64
= readq(&toc
->toc_vpath_pointer
[i
]);
751 vpath_reg
= (struct vxge_hw_vpath_reg __iomem
*)(bar0
+ val64
);
753 hw_info
->function_mode
=
754 __vxge_hw_vpath_pci_func_mode_get(i
, vpath_reg
);
756 status
= __vxge_hw_vpath_fw_ver_get(i
, vpath_reg
, hw_info
);
757 if (status
!= VXGE_HW_OK
)
760 status
= __vxge_hw_vpath_card_info_get(i
, vpath_reg
, hw_info
);
761 if (status
!= VXGE_HW_OK
)
767 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
769 if (!((hw_info
->vpath_mask
) & vxge_mBIT(i
)))
772 val64
= readq(&toc
->toc_vpath_pointer
[i
]);
773 vpath_reg
= (struct vxge_hw_vpath_reg __iomem
*)(bar0
+ val64
);
775 status
= __vxge_hw_vpath_addr_get(i
, vpath_reg
,
776 hw_info
->mac_addrs
[i
],
777 hw_info
->mac_addr_masks
[i
]);
778 if (status
!= VXGE_HW_OK
)
786 * vxge_hw_device_initialize - Initialize Titan device.
787 * Initialize Titan device. Note that all the arguments of this public API
788 * are 'IN', including @hldev. Driver cooperates with
789 * OS to find new Titan device, locate its PCI and memory spaces.
791 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
792 * to enable the latter to perform Titan hardware initialization.
794 enum vxge_hw_status __devinit
795 vxge_hw_device_initialize(
796 struct __vxge_hw_device
**devh
,
797 struct vxge_hw_device_attr
*attr
,
798 struct vxge_hw_device_config
*device_config
)
802 struct __vxge_hw_device
*hldev
= NULL
;
803 enum vxge_hw_status status
= VXGE_HW_OK
;
805 status
= __vxge_hw_device_config_check(device_config
);
806 if (status
!= VXGE_HW_OK
)
809 hldev
= (struct __vxge_hw_device
*)
810 vmalloc(sizeof(struct __vxge_hw_device
));
812 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
816 memset(hldev
, 0, sizeof(struct __vxge_hw_device
));
817 hldev
->magic
= VXGE_HW_DEVICE_MAGIC
;
819 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_ALL
);
822 memcpy(&hldev
->config
, device_config
,
823 sizeof(struct vxge_hw_device_config
));
825 hldev
->bar0
= attr
->bar0
;
826 hldev
->pdev
= attr
->pdev
;
828 hldev
->uld_callbacks
.link_up
= attr
->uld_callbacks
.link_up
;
829 hldev
->uld_callbacks
.link_down
= attr
->uld_callbacks
.link_down
;
830 hldev
->uld_callbacks
.crit_err
= attr
->uld_callbacks
.crit_err
;
832 __vxge_hw_device_pci_e_init(hldev
);
834 status
= __vxge_hw_device_reg_addr_get(hldev
);
835 if (status
!= VXGE_HW_OK
)
837 __vxge_hw_device_id_get(hldev
);
839 __vxge_hw_device_host_info_get(hldev
);
841 /* Incrementing for stats blocks */
844 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
846 if (!(hldev
->vpath_assignments
& vxge_mBIT(i
)))
849 if (device_config
->vp_config
[i
].ring
.enable
==
851 nblocks
+= device_config
->vp_config
[i
].ring
.ring_blocks
;
853 if (device_config
->vp_config
[i
].fifo
.enable
==
855 nblocks
+= device_config
->vp_config
[i
].fifo
.fifo_blocks
;
859 if (__vxge_hw_blockpool_create(hldev
,
861 device_config
->dma_blockpool_initial
+ nblocks
,
862 device_config
->dma_blockpool_max
+ nblocks
) != VXGE_HW_OK
) {
864 vxge_hw_device_terminate(hldev
);
865 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
869 status
= __vxge_hw_device_initialize(hldev
);
871 if (status
!= VXGE_HW_OK
) {
872 vxge_hw_device_terminate(hldev
);
882 * vxge_hw_device_terminate - Terminate Titan device.
883 * Terminate HW device.
886 vxge_hw_device_terminate(struct __vxge_hw_device
*hldev
)
888 vxge_assert(hldev
->magic
== VXGE_HW_DEVICE_MAGIC
);
890 hldev
->magic
= VXGE_HW_DEVICE_DEAD
;
891 __vxge_hw_blockpool_destroy(&hldev
->block_pool
);
896 * vxge_hw_device_stats_get - Get the device hw statistics.
897 * Returns the vpath h/w stats for the device.
900 vxge_hw_device_stats_get(struct __vxge_hw_device
*hldev
,
901 struct vxge_hw_device_stats_hw_info
*hw_stats
)
904 enum vxge_hw_status status
= VXGE_HW_OK
;
906 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
908 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)) ||
909 (hldev
->virtual_paths
[i
].vp_open
==
910 VXGE_HW_VP_NOT_OPEN
))
913 memcpy(hldev
->virtual_paths
[i
].hw_stats_sav
,
914 hldev
->virtual_paths
[i
].hw_stats
,
915 sizeof(struct vxge_hw_vpath_stats_hw_info
));
917 status
= __vxge_hw_vpath_stats_get(
918 &hldev
->virtual_paths
[i
],
919 hldev
->virtual_paths
[i
].hw_stats
);
922 memcpy(hw_stats
, &hldev
->stats
.hw_dev_info_stats
,
923 sizeof(struct vxge_hw_device_stats_hw_info
));
929 * vxge_hw_driver_stats_get - Get the device sw statistics.
930 * Returns the vpath s/w stats for the device.
932 enum vxge_hw_status
vxge_hw_driver_stats_get(
933 struct __vxge_hw_device
*hldev
,
934 struct vxge_hw_device_stats_sw_info
*sw_stats
)
936 enum vxge_hw_status status
= VXGE_HW_OK
;
938 memcpy(sw_stats
, &hldev
->stats
.sw_dev_info_stats
,
939 sizeof(struct vxge_hw_device_stats_sw_info
));
945 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
946 * and offset and perform an operation
947 * Get the statistics from the given location and offset.
950 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device
*hldev
,
951 u32 operation
, u32 location
, u32 offset
, u64
*stat
)
954 enum vxge_hw_status status
= VXGE_HW_OK
;
956 status
= __vxge_hw_device_is_privilaged(hldev
);
957 if (status
!= VXGE_HW_OK
)
960 val64
= VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation
) |
961 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE
|
962 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location
) |
963 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset
);
965 status
= __vxge_hw_pio_mem_write64(val64
,
966 &hldev
->mrpcim_reg
->xmac_stats_sys_cmd
,
967 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE
,
968 hldev
->config
.device_poll_millis
);
970 if ((status
== VXGE_HW_OK
) && (operation
== VXGE_HW_STATS_OP_READ
))
971 *stat
= readq(&hldev
->mrpcim_reg
->xmac_stats_sys_data
);
979 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
980 * Get the Statistics on aggregate port
983 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device
*hldev
, u32 port
,
984 struct vxge_hw_xmac_aggr_stats
*aggr_stats
)
988 u32 offset
= VXGE_HW_STATS_AGGRn_OFFSET
;
989 enum vxge_hw_status status
= VXGE_HW_OK
;
991 val64
= (u64
*)aggr_stats
;
993 status
= __vxge_hw_device_is_privilaged(hldev
);
994 if (status
!= VXGE_HW_OK
)
997 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_aggr_stats
) / 8; i
++) {
998 status
= vxge_hw_mrpcim_stats_access(hldev
,
999 VXGE_HW_STATS_OP_READ
,
1000 VXGE_HW_STATS_LOC_AGGR
,
1001 ((offset
+ (104 * port
)) >> 3), val64
);
1002 if (status
!= VXGE_HW_OK
)
1013 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1014 * Get the Statistics on port
1017 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device
*hldev
, u32 port
,
1018 struct vxge_hw_xmac_port_stats
*port_stats
)
1021 enum vxge_hw_status status
= VXGE_HW_OK
;
1024 val64
= (u64
*) port_stats
;
1026 status
= __vxge_hw_device_is_privilaged(hldev
);
1027 if (status
!= VXGE_HW_OK
)
1030 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_port_stats
) / 8; i
++) {
1031 status
= vxge_hw_mrpcim_stats_access(hldev
,
1032 VXGE_HW_STATS_OP_READ
,
1033 VXGE_HW_STATS_LOC_AGGR
,
1034 ((offset
+ (608 * port
)) >> 3), val64
);
1035 if (status
!= VXGE_HW_OK
)
1047 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1048 * Get the XMAC Statistics
1051 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device
*hldev
,
1052 struct vxge_hw_xmac_stats
*xmac_stats
)
1054 enum vxge_hw_status status
= VXGE_HW_OK
;
1057 status
= vxge_hw_device_xmac_aggr_stats_get(hldev
,
1058 0, &xmac_stats
->aggr_stats
[0]);
1060 if (status
!= VXGE_HW_OK
)
1063 status
= vxge_hw_device_xmac_aggr_stats_get(hldev
,
1064 1, &xmac_stats
->aggr_stats
[1]);
1065 if (status
!= VXGE_HW_OK
)
1068 for (i
= 0; i
<= VXGE_HW_MAC_MAX_MAC_PORT_ID
; i
++) {
1070 status
= vxge_hw_device_xmac_port_stats_get(hldev
,
1071 i
, &xmac_stats
->port_stats
[i
]);
1072 if (status
!= VXGE_HW_OK
)
1076 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1078 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
1081 status
= __vxge_hw_vpath_xmac_tx_stats_get(
1082 &hldev
->virtual_paths
[i
],
1083 &xmac_stats
->vpath_tx_stats
[i
]);
1084 if (status
!= VXGE_HW_OK
)
1087 status
= __vxge_hw_vpath_xmac_rx_stats_get(
1088 &hldev
->virtual_paths
[i
],
1089 &xmac_stats
->vpath_rx_stats
[i
]);
1090 if (status
!= VXGE_HW_OK
)
1098 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1099 * This routine is used to dynamically change the debug output
1101 void vxge_hw_device_debug_set(struct __vxge_hw_device
*hldev
,
1102 enum vxge_debug_level level
, u32 mask
)
1107 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1108 defined(VXGE_DEBUG_ERR_MASK)
1109 hldev
->debug_module_mask
= mask
;
1110 hldev
->debug_level
= level
;
1113 #if defined(VXGE_DEBUG_ERR_MASK)
1114 hldev
->level_err
= level
& VXGE_ERR
;
1117 #if defined(VXGE_DEBUG_TRACE_MASK)
1118 hldev
->level_trace
= level
& VXGE_TRACE
;
1123 * vxge_hw_device_error_level_get - Get the error level
1124 * This routine returns the current error level set
1126 u32
vxge_hw_device_error_level_get(struct __vxge_hw_device
*hldev
)
1128 #if defined(VXGE_DEBUG_ERR_MASK)
1132 return hldev
->level_err
;
1139 * vxge_hw_device_trace_level_get - Get the trace level
1140 * This routine returns the current trace level set
1142 u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device
*hldev
)
1144 #if defined(VXGE_DEBUG_TRACE_MASK)
1148 return hldev
->level_trace
;
1154 * vxge_hw_device_debug_mask_get - Get the debug mask
1155 * This routine returns the current debug mask set
1157 u32
vxge_hw_device_debug_mask_get(struct __vxge_hw_device
*hldev
)
1159 #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
1162 return hldev
->debug_module_mask
;
1169 * vxge_hw_getpause_data -Pause frame frame generation and reception.
1170 * Returns the Pause frame generation and reception capability of the NIC.
1172 enum vxge_hw_status
vxge_hw_device_getpause_data(struct __vxge_hw_device
*hldev
,
1173 u32 port
, u32
*tx
, u32
*rx
)
1176 enum vxge_hw_status status
= VXGE_HW_OK
;
1178 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
1179 status
= VXGE_HW_ERR_INVALID_DEVICE
;
1183 if (port
> VXGE_HW_MAC_MAX_MAC_PORT_ID
) {
1184 status
= VXGE_HW_ERR_INVALID_PORT
;
1188 if (!(hldev
->access_rights
& VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
1189 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
1193 val64
= readq(&hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1194 if (val64
& VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
)
1196 if (val64
& VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
)
1203 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1204 * It can be used to set or reset Pause frame generation or reception
1205 * support of the NIC.
1208 enum vxge_hw_status
vxge_hw_device_setpause_data(struct __vxge_hw_device
*hldev
,
1209 u32 port
, u32 tx
, u32 rx
)
1212 enum vxge_hw_status status
= VXGE_HW_OK
;
1214 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
1215 status
= VXGE_HW_ERR_INVALID_DEVICE
;
1219 if (port
> VXGE_HW_MAC_MAX_MAC_PORT_ID
) {
1220 status
= VXGE_HW_ERR_INVALID_PORT
;
1224 status
= __vxge_hw_device_is_privilaged(hldev
);
1225 if (status
!= VXGE_HW_OK
)
1228 val64
= readq(&hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1230 val64
|= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
;
1232 val64
&= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
;
1234 val64
|= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
;
1236 val64
&= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
;
1238 writeq(val64
, &hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1243 u16
vxge_hw_device_link_width_get(struct __vxge_hw_device
*hldev
)
1245 int link_width
, exp_cap
;
1248 exp_cap
= pci_find_capability(hldev
->pdev
, PCI_CAP_ID_EXP
);
1249 pci_read_config_word(hldev
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
1250 link_width
= (lnk
& VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH
) >> 4;
1255 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1256 * This function returns the index of memory block
1259 __vxge_hw_ring_block_memblock_idx(u8
*block
)
1261 return (u32
)*((u64
*)(block
+ VXGE_HW_RING_MEMBLOCK_IDX_OFFSET
));
1265 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1266 * This function sets index to a memory block
1269 __vxge_hw_ring_block_memblock_idx_set(u8
*block
, u32 memblock_idx
)
1271 *((u64
*)(block
+ VXGE_HW_RING_MEMBLOCK_IDX_OFFSET
)) = memblock_idx
;
1275 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1277 * Sets the next block pointer in RxD block
1280 __vxge_hw_ring_block_next_pointer_set(u8
*block
, dma_addr_t dma_next
)
1282 *((u64
*)(block
+ VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET
)) = dma_next
;
1286 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1288 * Returns the dma address of the first RxD block
1290 u64
__vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring
*ring
)
1292 struct vxge_hw_mempool_dma
*dma_object
;
1294 dma_object
= ring
->mempool
->memblocks_dma_arr
;
1295 vxge_assert(dma_object
!= NULL
);
1297 return dma_object
->addr
;
1301 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1302 * This function returns the dma address of a given item
1304 static dma_addr_t
__vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool
*mempoolh
,
1309 struct vxge_hw_mempool_dma
*memblock_dma_object
;
1310 ptrdiff_t dma_item_offset
;
1312 /* get owner memblock index */
1313 memblock_idx
= __vxge_hw_ring_block_memblock_idx(item
);
1315 /* get owner memblock by memblock index */
1316 memblock
= mempoolh
->memblocks_arr
[memblock_idx
];
1318 /* get memblock DMA object by memblock index */
1319 memblock_dma_object
= mempoolh
->memblocks_dma_arr
+ memblock_idx
;
1321 /* calculate offset in the memblock of this item */
1322 dma_item_offset
= (u8
*)item
- (u8
*)memblock
;
1324 return memblock_dma_object
->addr
+ dma_item_offset
;
1328 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1329 * This function returns the dma address of a given item
1331 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool
*mempoolh
,
1332 struct __vxge_hw_ring
*ring
, u32 from
,
1335 u8
*to_item
, *from_item
;
1338 /* get "from" RxD block */
1339 from_item
= mempoolh
->items_arr
[from
];
1340 vxge_assert(from_item
);
1342 /* get "to" RxD block */
1343 to_item
= mempoolh
->items_arr
[to
];
1344 vxge_assert(to_item
);
1346 /* return address of the beginning of previous RxD block */
1347 to_dma
= __vxge_hw_ring_item_dma_addr(mempoolh
, to_item
);
1349 /* set next pointer for this RxD block to point on
1350 * previous item's DMA start address */
1351 __vxge_hw_ring_block_next_pointer_set(from_item
, to_dma
);
1355 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1357 * This function is callback passed to __vxge_hw_mempool_create to create memory
1358 * pool for RxD block
1361 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool
*mempoolh
,
1363 struct vxge_hw_mempool_dma
*dma_object
,
1364 u32 index
, u32 is_last
)
1367 void *item
= mempoolh
->items_arr
[index
];
1368 struct __vxge_hw_ring
*ring
=
1369 (struct __vxge_hw_ring
*)mempoolh
->userdata
;
1371 /* format rxds array */
1372 for (i
= 0; i
< ring
->rxds_per_block
; i
++) {
1373 void *rxdblock_priv
;
1375 struct vxge_hw_ring_rxd_1
*rxdp
;
1377 u32 reserve_index
= ring
->channel
.reserve_ptr
-
1378 (index
* ring
->rxds_per_block
+ i
+ 1);
1379 u32 memblock_item_idx
;
1381 ring
->channel
.reserve_arr
[reserve_index
] = ((u8
*)item
) +
1384 /* Note: memblock_item_idx is index of the item within
1385 * the memblock. For instance, in case of three RxD-blocks
1386 * per memblock this value can be 0, 1 or 2. */
1387 rxdblock_priv
= __vxge_hw_mempool_item_priv(mempoolh
,
1388 memblock_index
, item
,
1389 &memblock_item_idx
);
1391 rxdp
= (struct vxge_hw_ring_rxd_1
*)
1392 ring
->channel
.reserve_arr
[reserve_index
];
1394 uld_priv
= ((u8
*)rxdblock_priv
+ ring
->rxd_priv_size
* i
);
1396 /* pre-format Host_Control */
1397 rxdp
->host_control
= (u64
)(size_t)uld_priv
;
1400 __vxge_hw_ring_block_memblock_idx_set(item
, memblock_index
);
1403 /* link last one with first one */
1404 __vxge_hw_ring_rxdblock_link(mempoolh
, ring
, index
, 0);
1408 /* link this RxD block with previous one */
1409 __vxge_hw_ring_rxdblock_link(mempoolh
, ring
, index
- 1, index
);
1416 * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
1417 * This function replenishes the RxDs from reserve array to work array
1420 vxge_hw_ring_replenish(struct __vxge_hw_ring
*ring
, u16 min_flag
)
1424 struct __vxge_hw_channel
*channel
;
1425 enum vxge_hw_status status
= VXGE_HW_OK
;
1427 channel
= &ring
->channel
;
1429 while (vxge_hw_channel_dtr_count(channel
) > 0) {
1431 status
= vxge_hw_ring_rxd_reserve(ring
, &rxd
);
1433 vxge_assert(status
== VXGE_HW_OK
);
1435 if (ring
->rxd_init
) {
1436 status
= ring
->rxd_init(rxd
, channel
->userdata
);
1437 if (status
!= VXGE_HW_OK
) {
1438 vxge_hw_ring_rxd_free(ring
, rxd
);
1443 vxge_hw_ring_rxd_post(ring
, rxd
);
1446 if (i
== VXGE_HW_RING_MIN_BUFF_ALLOCATION
)
1450 status
= VXGE_HW_OK
;
1456 * __vxge_hw_ring_create - Create a Ring
1457 * This function creates Ring and initializes it.
1461 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle
*vp
,
1462 struct vxge_hw_ring_attr
*attr
)
1464 enum vxge_hw_status status
= VXGE_HW_OK
;
1465 struct __vxge_hw_ring
*ring
;
1467 struct vxge_hw_ring_config
*config
;
1468 struct __vxge_hw_device
*hldev
;
1470 struct vxge_hw_mempool_cbs ring_mp_callback
;
1472 if ((vp
== NULL
) || (attr
== NULL
)) {
1473 status
= VXGE_HW_FAIL
;
1477 hldev
= vp
->vpath
->hldev
;
1478 vp_id
= vp
->vpath
->vp_id
;
1480 config
= &hldev
->config
.vp_config
[vp_id
].ring
;
1482 ring_length
= config
->ring_blocks
*
1483 vxge_hw_ring_rxds_per_block_get(config
->buffer_mode
);
1485 ring
= (struct __vxge_hw_ring
*)__vxge_hw_channel_allocate(vp
,
1486 VXGE_HW_CHANNEL_TYPE_RING
,
1488 attr
->per_rxd_space
,
1492 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1496 vp
->vpath
->ringh
= ring
;
1497 ring
->vp_id
= vp_id
;
1498 ring
->vp_reg
= vp
->vpath
->vp_reg
;
1499 ring
->common_reg
= hldev
->common_reg
;
1500 ring
->stats
= &vp
->vpath
->sw_stats
->ring_stats
;
1501 ring
->config
= config
;
1502 ring
->callback
= attr
->callback
;
1503 ring
->rxd_init
= attr
->rxd_init
;
1504 ring
->rxd_term
= attr
->rxd_term
;
1505 ring
->buffer_mode
= config
->buffer_mode
;
1506 ring
->rxds_limit
= config
->rxds_limit
;
1508 ring
->rxd_size
= vxge_hw_ring_rxd_size_get(config
->buffer_mode
);
1509 ring
->rxd_priv_size
=
1510 sizeof(struct __vxge_hw_ring_rxd_priv
) + attr
->per_rxd_space
;
1511 ring
->per_rxd_space
= attr
->per_rxd_space
;
1513 ring
->rxd_priv_size
=
1514 ((ring
->rxd_priv_size
+ VXGE_CACHE_LINE_SIZE
- 1) /
1515 VXGE_CACHE_LINE_SIZE
) * VXGE_CACHE_LINE_SIZE
;
1517 /* how many RxDs can fit into one block. Depends on configured
1519 ring
->rxds_per_block
=
1520 vxge_hw_ring_rxds_per_block_get(config
->buffer_mode
);
1522 /* calculate actual RxD block private size */
1523 ring
->rxdblock_priv_size
= ring
->rxd_priv_size
* ring
->rxds_per_block
;
1524 ring_mp_callback
.item_func_alloc
= __vxge_hw_ring_mempool_item_alloc
;
1525 ring
->mempool
= __vxge_hw_mempool_create(hldev
,
1528 ring
->rxdblock_priv_size
,
1529 ring
->config
->ring_blocks
,
1530 ring
->config
->ring_blocks
,
1534 if (ring
->mempool
== NULL
) {
1535 __vxge_hw_ring_delete(vp
);
1536 return VXGE_HW_ERR_OUT_OF_MEMORY
;
1539 status
= __vxge_hw_channel_initialize(&ring
->channel
);
1540 if (status
!= VXGE_HW_OK
) {
1541 __vxge_hw_ring_delete(vp
);
1546 * Specifying rxd_init callback means two things:
1547 * 1) rxds need to be initialized by driver at channel-open time;
1548 * 2) rxds need to be posted at channel-open time
1549 * (that's what the initial_replenish() below does)
1550 * Currently we don't have a case when the 1) is done without the 2).
1552 if (ring
->rxd_init
) {
1553 status
= vxge_hw_ring_replenish(ring
, 1);
1554 if (status
!= VXGE_HW_OK
) {
1555 __vxge_hw_ring_delete(vp
);
1560 /* initial replenish will increment the counter in its post() routine,
1561 * we have to reset it */
1562 ring
->stats
->common_stats
.usage_cnt
= 0;
1568 * __vxge_hw_ring_abort - Returns the RxD
1569 * This function terminates the RxDs of ring
1571 enum vxge_hw_status
__vxge_hw_ring_abort(struct __vxge_hw_ring
*ring
)
1574 struct __vxge_hw_channel
*channel
;
1576 channel
= &ring
->channel
;
1579 vxge_hw_channel_dtr_try_complete(channel
, &rxdh
);
1584 vxge_hw_channel_dtr_complete(channel
);
1587 ring
->rxd_term(rxdh
, VXGE_HW_RXD_STATE_POSTED
,
1590 vxge_hw_channel_dtr_free(channel
, rxdh
);
1597 * __vxge_hw_ring_reset - Resets the ring
1598 * This function resets the ring during vpath reset operation
1600 enum vxge_hw_status
__vxge_hw_ring_reset(struct __vxge_hw_ring
*ring
)
1602 enum vxge_hw_status status
= VXGE_HW_OK
;
1603 struct __vxge_hw_channel
*channel
;
1605 channel
= &ring
->channel
;
1607 __vxge_hw_ring_abort(ring
);
1609 status
= __vxge_hw_channel_reset(channel
);
1611 if (status
!= VXGE_HW_OK
)
1614 if (ring
->rxd_init
) {
1615 status
= vxge_hw_ring_replenish(ring
, 1);
1616 if (status
!= VXGE_HW_OK
)
1624 * __vxge_hw_ring_delete - Removes the ring
1625 * This function freeup the memory pool and removes the ring
1627 enum vxge_hw_status
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle
*vp
)
1629 struct __vxge_hw_ring
*ring
= vp
->vpath
->ringh
;
1631 __vxge_hw_ring_abort(ring
);
1634 __vxge_hw_mempool_destroy(ring
->mempool
);
1636 vp
->vpath
->ringh
= NULL
;
1637 __vxge_hw_channel_free(&ring
->channel
);
1643 * __vxge_hw_mempool_grow
1644 * Will resize mempool up to %num_allocate value.
1647 __vxge_hw_mempool_grow(struct vxge_hw_mempool
*mempool
, u32 num_allocate
,
1650 u32 i
, first_time
= mempool
->memblocks_allocated
== 0 ? 1 : 0;
1651 u32 n_items
= mempool
->items_per_memblock
;
1652 u32 start_block_idx
= mempool
->memblocks_allocated
;
1653 u32 end_block_idx
= mempool
->memblocks_allocated
+ num_allocate
;
1654 enum vxge_hw_status status
= VXGE_HW_OK
;
1658 if (end_block_idx
> mempool
->memblocks_max
) {
1659 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1663 for (i
= start_block_idx
; i
< end_block_idx
; i
++) {
1665 u32 is_last
= ((end_block_idx
- 1) == i
);
1666 struct vxge_hw_mempool_dma
*dma_object
=
1667 mempool
->memblocks_dma_arr
+ i
;
1670 /* allocate memblock's private part. Each DMA memblock
1671 * has a space allocated for item's private usage upon
1672 * mempool's user request. Each time mempool grows, it will
1673 * allocate new memblock and its private part at once.
1674 * This helps to minimize memory usage a lot. */
1675 mempool
->memblocks_priv_arr
[i
] =
1676 vmalloc(mempool
->items_priv_size
* n_items
);
1677 if (mempool
->memblocks_priv_arr
[i
] == NULL
) {
1678 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1682 memset(mempool
->memblocks_priv_arr
[i
], 0,
1683 mempool
->items_priv_size
* n_items
);
1685 /* allocate DMA-capable memblock */
1686 mempool
->memblocks_arr
[i
] =
1687 __vxge_hw_blockpool_malloc(mempool
->devh
,
1688 mempool
->memblock_size
, dma_object
);
1689 if (mempool
->memblocks_arr
[i
] == NULL
) {
1690 vfree(mempool
->memblocks_priv_arr
[i
]);
1691 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1696 mempool
->memblocks_allocated
++;
1698 memset(mempool
->memblocks_arr
[i
], 0, mempool
->memblock_size
);
1700 the_memblock
= mempool
->memblocks_arr
[i
];
1702 /* fill the items hash array */
1703 for (j
= 0; j
< n_items
; j
++) {
1704 u32 index
= i
* n_items
+ j
;
1706 if (first_time
&& index
>= mempool
->items_initial
)
1709 mempool
->items_arr
[index
] =
1710 ((char *)the_memblock
+ j
*mempool
->item_size
);
1712 /* let caller to do more job on each item */
1713 if (mempool
->item_func_alloc
!= NULL
)
1714 mempool
->item_func_alloc(mempool
, i
,
1715 dma_object
, index
, is_last
);
1717 mempool
->items_current
= index
+ 1;
1720 if (first_time
&& mempool
->items_current
==
1721 mempool
->items_initial
)
1729 * vxge_hw_mempool_create
1730 * This function will create memory pool object. Pool may grow but will
1731 * never shrink. Pool consists of number of dynamically allocated blocks
1732 * with size enough to hold %items_initial number of items. Memory is
1733 * DMA-able but client must map/unmap before interoperating with the device.
1735 struct vxge_hw_mempool
*
1736 __vxge_hw_mempool_create(
1737 struct __vxge_hw_device
*devh
,
1740 u32 items_priv_size
,
1743 struct vxge_hw_mempool_cbs
*mp_callback
,
1746 enum vxge_hw_status status
= VXGE_HW_OK
;
1747 u32 memblocks_to_allocate
;
1748 struct vxge_hw_mempool
*mempool
= NULL
;
1751 if (memblock_size
< item_size
) {
1752 status
= VXGE_HW_FAIL
;
1756 mempool
= (struct vxge_hw_mempool
*)
1757 vmalloc(sizeof(struct vxge_hw_mempool
));
1758 if (mempool
== NULL
) {
1759 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1762 memset(mempool
, 0, sizeof(struct vxge_hw_mempool
));
1764 mempool
->devh
= devh
;
1765 mempool
->memblock_size
= memblock_size
;
1766 mempool
->items_max
= items_max
;
1767 mempool
->items_initial
= items_initial
;
1768 mempool
->item_size
= item_size
;
1769 mempool
->items_priv_size
= items_priv_size
;
1770 mempool
->item_func_alloc
= mp_callback
->item_func_alloc
;
1771 mempool
->userdata
= userdata
;
1773 mempool
->memblocks_allocated
= 0;
1775 mempool
->items_per_memblock
= memblock_size
/ item_size
;
1777 mempool
->memblocks_max
= (items_max
+ mempool
->items_per_memblock
- 1) /
1778 mempool
->items_per_memblock
;
1780 /* allocate array of memblocks */
1781 mempool
->memblocks_arr
=
1782 (void **) vmalloc(sizeof(void *) * mempool
->memblocks_max
);
1783 if (mempool
->memblocks_arr
== NULL
) {
1784 __vxge_hw_mempool_destroy(mempool
);
1785 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1789 memset(mempool
->memblocks_arr
, 0,
1790 sizeof(void *) * mempool
->memblocks_max
);
1792 /* allocate array of private parts of items per memblocks */
1793 mempool
->memblocks_priv_arr
=
1794 (void **) vmalloc(sizeof(void *) * mempool
->memblocks_max
);
1795 if (mempool
->memblocks_priv_arr
== NULL
) {
1796 __vxge_hw_mempool_destroy(mempool
);
1797 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1801 memset(mempool
->memblocks_priv_arr
, 0,
1802 sizeof(void *) * mempool
->memblocks_max
);
1804 /* allocate array of memblocks DMA objects */
1805 mempool
->memblocks_dma_arr
= (struct vxge_hw_mempool_dma
*)
1806 vmalloc(sizeof(struct vxge_hw_mempool_dma
) *
1807 mempool
->memblocks_max
);
1809 if (mempool
->memblocks_dma_arr
== NULL
) {
1810 __vxge_hw_mempool_destroy(mempool
);
1811 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1815 memset(mempool
->memblocks_dma_arr
, 0,
1816 sizeof(struct vxge_hw_mempool_dma
) *
1817 mempool
->memblocks_max
);
1819 /* allocate hash array of items */
1820 mempool
->items_arr
=
1821 (void **) vmalloc(sizeof(void *) * mempool
->items_max
);
1822 if (mempool
->items_arr
== NULL
) {
1823 __vxge_hw_mempool_destroy(mempool
);
1824 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1828 memset(mempool
->items_arr
, 0, sizeof(void *) * mempool
->items_max
);
1830 /* calculate initial number of memblocks */
1831 memblocks_to_allocate
= (mempool
->items_initial
+
1832 mempool
->items_per_memblock
- 1) /
1833 mempool
->items_per_memblock
;
1835 /* pre-allocate the mempool */
1836 status
= __vxge_hw_mempool_grow(mempool
, memblocks_to_allocate
,
1838 if (status
!= VXGE_HW_OK
) {
1839 __vxge_hw_mempool_destroy(mempool
);
1840 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1850 * vxge_hw_mempool_destroy
1852 void __vxge_hw_mempool_destroy(struct vxge_hw_mempool
*mempool
)
1855 struct __vxge_hw_device
*devh
= mempool
->devh
;
1857 for (i
= 0; i
< mempool
->memblocks_allocated
; i
++) {
1858 struct vxge_hw_mempool_dma
*dma_object
;
1860 vxge_assert(mempool
->memblocks_arr
[i
]);
1861 vxge_assert(mempool
->memblocks_dma_arr
+ i
);
1863 dma_object
= mempool
->memblocks_dma_arr
+ i
;
1865 for (j
= 0; j
< mempool
->items_per_memblock
; j
++) {
1866 u32 index
= i
* mempool
->items_per_memblock
+ j
;
1868 /* to skip last partially filled(if any) memblock */
1869 if (index
>= mempool
->items_current
)
1873 vfree(mempool
->memblocks_priv_arr
[i
]);
1875 __vxge_hw_blockpool_free(devh
, mempool
->memblocks_arr
[i
],
1876 mempool
->memblock_size
, dma_object
);
1879 vfree(mempool
->items_arr
);
1881 vfree(mempool
->memblocks_dma_arr
);
1883 vfree(mempool
->memblocks_priv_arr
);
1885 vfree(mempool
->memblocks_arr
);
1891 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1892 * Check the fifo configuration
1895 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config
*fifo_config
)
1897 if ((fifo_config
->fifo_blocks
< VXGE_HW_MIN_FIFO_BLOCKS
) ||
1898 (fifo_config
->fifo_blocks
> VXGE_HW_MAX_FIFO_BLOCKS
))
1899 return VXGE_HW_BADCFG_FIFO_BLOCKS
;
1905 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1906 * Check the vpath configuration
1909 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config
*vp_config
)
1911 enum vxge_hw_status status
;
1913 if ((vp_config
->min_bandwidth
< VXGE_HW_VPATH_BANDWIDTH_MIN
) ||
1914 (vp_config
->min_bandwidth
>
1915 VXGE_HW_VPATH_BANDWIDTH_MAX
))
1916 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH
;
1918 status
= __vxge_hw_device_fifo_config_check(&vp_config
->fifo
);
1919 if (status
!= VXGE_HW_OK
)
1922 if ((vp_config
->mtu
!= VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
) &&
1923 ((vp_config
->mtu
< VXGE_HW_VPATH_MIN_INITIAL_MTU
) ||
1924 (vp_config
->mtu
> VXGE_HW_VPATH_MAX_INITIAL_MTU
)))
1925 return VXGE_HW_BADCFG_VPATH_MTU
;
1927 if ((vp_config
->rpa_strip_vlan_tag
!=
1928 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
) &&
1929 (vp_config
->rpa_strip_vlan_tag
!=
1930 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
) &&
1931 (vp_config
->rpa_strip_vlan_tag
!=
1932 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE
))
1933 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG
;
1939 * __vxge_hw_device_config_check - Check device configuration.
1940 * Check the device configuration
1943 __vxge_hw_device_config_check(struct vxge_hw_device_config
*new_config
)
1946 enum vxge_hw_status status
;
1948 if ((new_config
->intr_mode
!= VXGE_HW_INTR_MODE_IRQLINE
) &&
1949 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_MSIX
) &&
1950 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) &&
1951 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_DEF
))
1952 return VXGE_HW_BADCFG_INTR_MODE
;
1954 if ((new_config
->rts_mac_en
!= VXGE_HW_RTS_MAC_DISABLE
) &&
1955 (new_config
->rts_mac_en
!= VXGE_HW_RTS_MAC_ENABLE
))
1956 return VXGE_HW_BADCFG_RTS_MAC_EN
;
1958 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1959 status
= __vxge_hw_device_vpath_config_check(
1960 &new_config
->vp_config
[i
]);
1961 if (status
!= VXGE_HW_OK
)
1969 * vxge_hw_device_config_default_get - Initialize device config with defaults.
1970 * Initialize Titan device config with default values.
1972 enum vxge_hw_status __devinit
1973 vxge_hw_device_config_default_get(struct vxge_hw_device_config
*device_config
)
1977 device_config
->dma_blockpool_initial
=
1978 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
1979 device_config
->dma_blockpool_max
= VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
1980 device_config
->intr_mode
= VXGE_HW_INTR_MODE_DEF
;
1981 device_config
->rth_en
= VXGE_HW_RTH_DEFAULT
;
1982 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_DEFAULT
;
1983 device_config
->device_poll_millis
= VXGE_HW_DEF_DEVICE_POLL_MILLIS
;
1984 device_config
->rts_mac_en
= VXGE_HW_RTS_MAC_DEFAULT
;
1986 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1988 device_config
->vp_config
[i
].vp_id
= i
;
1990 device_config
->vp_config
[i
].min_bandwidth
=
1991 VXGE_HW_VPATH_BANDWIDTH_DEFAULT
;
1993 device_config
->vp_config
[i
].ring
.enable
= VXGE_HW_RING_DEFAULT
;
1995 device_config
->vp_config
[i
].ring
.ring_blocks
=
1996 VXGE_HW_DEF_RING_BLOCKS
;
1998 device_config
->vp_config
[i
].ring
.buffer_mode
=
1999 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT
;
2001 device_config
->vp_config
[i
].ring
.scatter_mode
=
2002 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT
;
2004 device_config
->vp_config
[i
].ring
.rxds_limit
=
2005 VXGE_HW_DEF_RING_RXDS_LIMIT
;
2007 device_config
->vp_config
[i
].fifo
.enable
= VXGE_HW_FIFO_ENABLE
;
2009 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
2010 VXGE_HW_MIN_FIFO_BLOCKS
;
2012 device_config
->vp_config
[i
].fifo
.max_frags
=
2013 VXGE_HW_MAX_FIFO_FRAGS
;
2015 device_config
->vp_config
[i
].fifo
.memblock_size
=
2016 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE
;
2018 device_config
->vp_config
[i
].fifo
.alignment_size
=
2019 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE
;
2021 device_config
->vp_config
[i
].fifo
.intr
=
2022 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT
;
2024 device_config
->vp_config
[i
].fifo
.no_snoop_bits
=
2025 VXGE_HW_FIFO_NO_SNOOP_DEFAULT
;
2026 device_config
->vp_config
[i
].tti
.intr_enable
=
2027 VXGE_HW_TIM_INTR_DEFAULT
;
2029 device_config
->vp_config
[i
].tti
.btimer_val
=
2030 VXGE_HW_USE_FLASH_DEFAULT
;
2032 device_config
->vp_config
[i
].tti
.timer_ac_en
=
2033 VXGE_HW_USE_FLASH_DEFAULT
;
2035 device_config
->vp_config
[i
].tti
.timer_ci_en
=
2036 VXGE_HW_USE_FLASH_DEFAULT
;
2038 device_config
->vp_config
[i
].tti
.timer_ri_en
=
2039 VXGE_HW_USE_FLASH_DEFAULT
;
2041 device_config
->vp_config
[i
].tti
.rtimer_val
=
2042 VXGE_HW_USE_FLASH_DEFAULT
;
2044 device_config
->vp_config
[i
].tti
.util_sel
=
2045 VXGE_HW_USE_FLASH_DEFAULT
;
2047 device_config
->vp_config
[i
].tti
.ltimer_val
=
2048 VXGE_HW_USE_FLASH_DEFAULT
;
2050 device_config
->vp_config
[i
].tti
.urange_a
=
2051 VXGE_HW_USE_FLASH_DEFAULT
;
2053 device_config
->vp_config
[i
].tti
.uec_a
=
2054 VXGE_HW_USE_FLASH_DEFAULT
;
2056 device_config
->vp_config
[i
].tti
.urange_b
=
2057 VXGE_HW_USE_FLASH_DEFAULT
;
2059 device_config
->vp_config
[i
].tti
.uec_b
=
2060 VXGE_HW_USE_FLASH_DEFAULT
;
2062 device_config
->vp_config
[i
].tti
.urange_c
=
2063 VXGE_HW_USE_FLASH_DEFAULT
;
2065 device_config
->vp_config
[i
].tti
.uec_c
=
2066 VXGE_HW_USE_FLASH_DEFAULT
;
2068 device_config
->vp_config
[i
].tti
.uec_d
=
2069 VXGE_HW_USE_FLASH_DEFAULT
;
2071 device_config
->vp_config
[i
].rti
.intr_enable
=
2072 VXGE_HW_TIM_INTR_DEFAULT
;
2074 device_config
->vp_config
[i
].rti
.btimer_val
=
2075 VXGE_HW_USE_FLASH_DEFAULT
;
2077 device_config
->vp_config
[i
].rti
.timer_ac_en
=
2078 VXGE_HW_USE_FLASH_DEFAULT
;
2080 device_config
->vp_config
[i
].rti
.timer_ci_en
=
2081 VXGE_HW_USE_FLASH_DEFAULT
;
2083 device_config
->vp_config
[i
].rti
.timer_ri_en
=
2084 VXGE_HW_USE_FLASH_DEFAULT
;
2086 device_config
->vp_config
[i
].rti
.rtimer_val
=
2087 VXGE_HW_USE_FLASH_DEFAULT
;
2089 device_config
->vp_config
[i
].rti
.util_sel
=
2090 VXGE_HW_USE_FLASH_DEFAULT
;
2092 device_config
->vp_config
[i
].rti
.ltimer_val
=
2093 VXGE_HW_USE_FLASH_DEFAULT
;
2095 device_config
->vp_config
[i
].rti
.urange_a
=
2096 VXGE_HW_USE_FLASH_DEFAULT
;
2098 device_config
->vp_config
[i
].rti
.uec_a
=
2099 VXGE_HW_USE_FLASH_DEFAULT
;
2101 device_config
->vp_config
[i
].rti
.urange_b
=
2102 VXGE_HW_USE_FLASH_DEFAULT
;
2104 device_config
->vp_config
[i
].rti
.uec_b
=
2105 VXGE_HW_USE_FLASH_DEFAULT
;
2107 device_config
->vp_config
[i
].rti
.urange_c
=
2108 VXGE_HW_USE_FLASH_DEFAULT
;
2110 device_config
->vp_config
[i
].rti
.uec_c
=
2111 VXGE_HW_USE_FLASH_DEFAULT
;
2113 device_config
->vp_config
[i
].rti
.uec_d
=
2114 VXGE_HW_USE_FLASH_DEFAULT
;
2116 device_config
->vp_config
[i
].mtu
=
2117 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
;
2119 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
2120 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
;
2127 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2128 * Set the swapper bits appropriately for the lagacy section.
2131 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem
*legacy_reg
)
2134 enum vxge_hw_status status
= VXGE_HW_OK
;
2136 val64
= readq(&legacy_reg
->toc_swapper_fb
);
2142 case VXGE_HW_SWAPPER_INITIAL_VALUE
:
2145 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED
:
2146 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE
,
2147 &legacy_reg
->pifm_rd_swap_en
);
2148 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE
,
2149 &legacy_reg
->pifm_rd_flip_en
);
2150 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
,
2151 &legacy_reg
->pifm_wr_swap_en
);
2152 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE
,
2153 &legacy_reg
->pifm_wr_flip_en
);
2156 case VXGE_HW_SWAPPER_BYTE_SWAPPED
:
2157 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE
,
2158 &legacy_reg
->pifm_rd_swap_en
);
2159 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
,
2160 &legacy_reg
->pifm_wr_swap_en
);
2163 case VXGE_HW_SWAPPER_BIT_FLIPPED
:
2164 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE
,
2165 &legacy_reg
->pifm_rd_flip_en
);
2166 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE
,
2167 &legacy_reg
->pifm_wr_flip_en
);
2173 val64
= readq(&legacy_reg
->toc_swapper_fb
);
2175 if (val64
!= VXGE_HW_SWAPPER_INITIAL_VALUE
)
2176 status
= VXGE_HW_ERR_SWAPPER_CTRL
;
2182 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2183 * Set the swapper bits appropriately for the vpath.
2186 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2188 #ifndef __BIG_ENDIAN
2191 val64
= readq(&vpath_reg
->vpath_general_cfg1
);
2193 val64
|= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN
;
2194 writeq(val64
, &vpath_reg
->vpath_general_cfg1
);
2201 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2202 * Set the swapper bits appropriately for the vpath.
2205 __vxge_hw_kdfc_swapper_set(
2206 struct vxge_hw_legacy_reg __iomem
*legacy_reg
,
2207 struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2211 val64
= readq(&legacy_reg
->pifm_wr_swap_en
);
2213 if (val64
== VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
) {
2214 val64
= readq(&vpath_reg
->kdfcctl_cfg0
);
2217 val64
|= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0
|
2218 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1
|
2219 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2
;
2221 writeq(val64
, &vpath_reg
->kdfcctl_cfg0
);
2229 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2230 * Get device configuration. Permits to retrieve at run-time configuration
2231 * values that were used to initialize and configure the device.
2234 vxge_hw_mgmt_device_config(struct __vxge_hw_device
*hldev
,
2235 struct vxge_hw_device_config
*dev_config
, int size
)
2238 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
))
2239 return VXGE_HW_ERR_INVALID_DEVICE
;
2241 if (size
!= sizeof(struct vxge_hw_device_config
))
2242 return VXGE_HW_ERR_VERSION_CONFLICT
;
2244 memcpy(dev_config
, &hldev
->config
,
2245 sizeof(struct vxge_hw_device_config
));
2251 * vxge_hw_mgmt_reg_read - Read Titan register.
2254 vxge_hw_mgmt_reg_read(struct __vxge_hw_device
*hldev
,
2255 enum vxge_hw_mgmt_reg_type type
,
2256 u32 index
, u32 offset
, u64
*value
)
2258 enum vxge_hw_status status
= VXGE_HW_OK
;
2260 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
2261 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2266 case vxge_hw_mgmt_reg_type_legacy
:
2267 if (offset
> sizeof(struct vxge_hw_legacy_reg
) - 8) {
2268 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2271 *value
= readq((void __iomem
*)hldev
->legacy_reg
+ offset
);
2273 case vxge_hw_mgmt_reg_type_toc
:
2274 if (offset
> sizeof(struct vxge_hw_toc_reg
) - 8) {
2275 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2278 *value
= readq((void __iomem
*)hldev
->toc_reg
+ offset
);
2280 case vxge_hw_mgmt_reg_type_common
:
2281 if (offset
> sizeof(struct vxge_hw_common_reg
) - 8) {
2282 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2285 *value
= readq((void __iomem
*)hldev
->common_reg
+ offset
);
2287 case vxge_hw_mgmt_reg_type_mrpcim
:
2288 if (!(hldev
->access_rights
&
2289 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
2290 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2293 if (offset
> sizeof(struct vxge_hw_mrpcim_reg
) - 8) {
2294 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2297 *value
= readq((void __iomem
*)hldev
->mrpcim_reg
+ offset
);
2299 case vxge_hw_mgmt_reg_type_srpcim
:
2300 if (!(hldev
->access_rights
&
2301 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
)) {
2302 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2305 if (index
> VXGE_HW_TITAN_SRPCIM_REG_SPACES
- 1) {
2306 status
= VXGE_HW_ERR_INVALID_INDEX
;
2309 if (offset
> sizeof(struct vxge_hw_srpcim_reg
) - 8) {
2310 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2313 *value
= readq((void __iomem
*)hldev
->srpcim_reg
[index
] +
2316 case vxge_hw_mgmt_reg_type_vpmgmt
:
2317 if ((index
> VXGE_HW_TITAN_VPMGMT_REG_SPACES
- 1) ||
2318 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2319 status
= VXGE_HW_ERR_INVALID_INDEX
;
2322 if (offset
> sizeof(struct vxge_hw_vpmgmt_reg
) - 8) {
2323 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2326 *value
= readq((void __iomem
*)hldev
->vpmgmt_reg
[index
] +
2329 case vxge_hw_mgmt_reg_type_vpath
:
2330 if ((index
> VXGE_HW_TITAN_VPATH_REG_SPACES
- 1) ||
2331 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2332 status
= VXGE_HW_ERR_INVALID_INDEX
;
2335 if (index
> VXGE_HW_TITAN_VPATH_REG_SPACES
- 1) {
2336 status
= VXGE_HW_ERR_INVALID_INDEX
;
2339 if (offset
> sizeof(struct vxge_hw_vpath_reg
) - 8) {
2340 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2343 *value
= readq((void __iomem
*)hldev
->vpath_reg
[index
] +
2347 status
= VXGE_HW_ERR_INVALID_TYPE
;
2356 * vxge_hw_mgmt_reg_Write - Write Titan register.
2359 vxge_hw_mgmt_reg_write(struct __vxge_hw_device
*hldev
,
2360 enum vxge_hw_mgmt_reg_type type
,
2361 u32 index
, u32 offset
, u64 value
)
2363 enum vxge_hw_status status
= VXGE_HW_OK
;
2365 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
2366 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2371 case vxge_hw_mgmt_reg_type_legacy
:
2372 if (offset
> sizeof(struct vxge_hw_legacy_reg
) - 8) {
2373 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2376 writeq(value
, (void __iomem
*)hldev
->legacy_reg
+ offset
);
2378 case vxge_hw_mgmt_reg_type_toc
:
2379 if (offset
> sizeof(struct vxge_hw_toc_reg
) - 8) {
2380 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2383 writeq(value
, (void __iomem
*)hldev
->toc_reg
+ offset
);
2385 case vxge_hw_mgmt_reg_type_common
:
2386 if (offset
> sizeof(struct vxge_hw_common_reg
) - 8) {
2387 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2390 writeq(value
, (void __iomem
*)hldev
->common_reg
+ offset
);
2392 case vxge_hw_mgmt_reg_type_mrpcim
:
2393 if (!(hldev
->access_rights
&
2394 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
2395 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2398 if (offset
> sizeof(struct vxge_hw_mrpcim_reg
) - 8) {
2399 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2402 writeq(value
, (void __iomem
*)hldev
->mrpcim_reg
+ offset
);
2404 case vxge_hw_mgmt_reg_type_srpcim
:
2405 if (!(hldev
->access_rights
&
2406 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
)) {
2407 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2410 if (index
> VXGE_HW_TITAN_SRPCIM_REG_SPACES
- 1) {
2411 status
= VXGE_HW_ERR_INVALID_INDEX
;
2414 if (offset
> sizeof(struct vxge_hw_srpcim_reg
) - 8) {
2415 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2418 writeq(value
, (void __iomem
*)hldev
->srpcim_reg
[index
] +
2422 case vxge_hw_mgmt_reg_type_vpmgmt
:
2423 if ((index
> VXGE_HW_TITAN_VPMGMT_REG_SPACES
- 1) ||
2424 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2425 status
= VXGE_HW_ERR_INVALID_INDEX
;
2428 if (offset
> sizeof(struct vxge_hw_vpmgmt_reg
) - 8) {
2429 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2432 writeq(value
, (void __iomem
*)hldev
->vpmgmt_reg
[index
] +
2435 case vxge_hw_mgmt_reg_type_vpath
:
2436 if ((index
> VXGE_HW_TITAN_VPATH_REG_SPACES
-1) ||
2437 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2438 status
= VXGE_HW_ERR_INVALID_INDEX
;
2441 if (offset
> sizeof(struct vxge_hw_vpath_reg
) - 8) {
2442 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2445 writeq(value
, (void __iomem
*)hldev
->vpath_reg
[index
] +
2449 status
= VXGE_HW_ERR_INVALID_TYPE
;
2457 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2459 * This function is callback passed to __vxge_hw_mempool_create to create memory
2463 __vxge_hw_fifo_mempool_item_alloc(
2464 struct vxge_hw_mempool
*mempoolh
,
2465 u32 memblock_index
, struct vxge_hw_mempool_dma
*dma_object
,
2466 u32 index
, u32 is_last
)
2468 u32 memblock_item_idx
;
2469 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
2470 struct vxge_hw_fifo_txd
*txdp
=
2471 (struct vxge_hw_fifo_txd
*)mempoolh
->items_arr
[index
];
2472 struct __vxge_hw_fifo
*fifo
=
2473 (struct __vxge_hw_fifo
*)mempoolh
->userdata
;
2474 void *memblock
= mempoolh
->memblocks_arr
[memblock_index
];
2478 txdp
->host_control
= (u64
) (size_t)
2479 __vxge_hw_mempool_item_priv(mempoolh
, memblock_index
, txdp
,
2480 &memblock_item_idx
);
2482 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdp
);
2484 vxge_assert(txdl_priv
);
2486 fifo
->channel
.reserve_arr
[fifo
->channel
.reserve_ptr
- 1 - index
] = txdp
;
2488 /* pre-format HW's TxDL's private */
2489 txdl_priv
->dma_offset
= (char *)txdp
- (char *)memblock
;
2490 txdl_priv
->dma_addr
= dma_object
->addr
+ txdl_priv
->dma_offset
;
2491 txdl_priv
->dma_handle
= dma_object
->handle
;
2492 txdl_priv
->memblock
= memblock
;
2493 txdl_priv
->first_txdp
= txdp
;
2494 txdl_priv
->next_txdl_priv
= NULL
;
2495 txdl_priv
->alloc_frags
= 0;
2501 * __vxge_hw_fifo_create - Create a FIFO
2502 * This function creates FIFO and initializes it.
2505 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle
*vp
,
2506 struct vxge_hw_fifo_attr
*attr
)
2508 enum vxge_hw_status status
= VXGE_HW_OK
;
2509 struct __vxge_hw_fifo
*fifo
;
2510 struct vxge_hw_fifo_config
*config
;
2511 u32 txdl_size
, txdl_per_memblock
;
2512 struct vxge_hw_mempool_cbs fifo_mp_callback
;
2513 struct __vxge_hw_virtualpath
*vpath
;
2515 if ((vp
== NULL
) || (attr
== NULL
)) {
2516 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2520 config
= &vpath
->hldev
->config
.vp_config
[vpath
->vp_id
].fifo
;
2522 txdl_size
= config
->max_frags
* sizeof(struct vxge_hw_fifo_txd
);
2524 txdl_per_memblock
= config
->memblock_size
/ txdl_size
;
2526 fifo
= (struct __vxge_hw_fifo
*)__vxge_hw_channel_allocate(vp
,
2527 VXGE_HW_CHANNEL_TYPE_FIFO
,
2528 config
->fifo_blocks
* txdl_per_memblock
,
2529 attr
->per_txdl_space
, attr
->userdata
);
2532 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2536 vpath
->fifoh
= fifo
;
2537 fifo
->nofl_db
= vpath
->nofl_db
;
2539 fifo
->vp_id
= vpath
->vp_id
;
2540 fifo
->vp_reg
= vpath
->vp_reg
;
2541 fifo
->stats
= &vpath
->sw_stats
->fifo_stats
;
2543 fifo
->config
= config
;
2545 /* apply "interrupts per txdl" attribute */
2546 fifo
->interrupt_type
= VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ
;
2548 if (fifo
->config
->intr
)
2549 fifo
->interrupt_type
= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST
;
2551 fifo
->no_snoop_bits
= config
->no_snoop_bits
;
2554 * FIFO memory management strategy:
2556 * TxDL split into three independent parts:
2558 * - TxD HW private part
2559 * - driver private part
2561 * Adaptative memory allocation used. i.e. Memory allocated on
2562 * demand with the size which will fit into one memory block.
2563 * One memory block may contain more than one TxDL.
2565 * During "reserve" operations more memory can be allocated on demand
2566 * for example due to FIFO full condition.
2568 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2569 * routine which will essentially stop the channel and free resources.
2572 /* TxDL common private size == TxDL private + driver private */
2574 sizeof(struct __vxge_hw_fifo_txdl_priv
) + attr
->per_txdl_space
;
2575 fifo
->priv_size
= ((fifo
->priv_size
+ VXGE_CACHE_LINE_SIZE
- 1) /
2576 VXGE_CACHE_LINE_SIZE
) * VXGE_CACHE_LINE_SIZE
;
2578 fifo
->per_txdl_space
= attr
->per_txdl_space
;
2580 /* recompute txdl size to be cacheline aligned */
2581 fifo
->txdl_size
= txdl_size
;
2582 fifo
->txdl_per_memblock
= txdl_per_memblock
;
2584 fifo
->txdl_term
= attr
->txdl_term
;
2585 fifo
->callback
= attr
->callback
;
2587 if (fifo
->txdl_per_memblock
== 0) {
2588 __vxge_hw_fifo_delete(vp
);
2589 status
= VXGE_HW_ERR_INVALID_BLOCK_SIZE
;
2593 fifo_mp_callback
.item_func_alloc
= __vxge_hw_fifo_mempool_item_alloc
;
2596 __vxge_hw_mempool_create(vpath
->hldev
,
2597 fifo
->config
->memblock_size
,
2600 (fifo
->config
->fifo_blocks
* fifo
->txdl_per_memblock
),
2601 (fifo
->config
->fifo_blocks
* fifo
->txdl_per_memblock
),
2605 if (fifo
->mempool
== NULL
) {
2606 __vxge_hw_fifo_delete(vp
);
2607 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2611 status
= __vxge_hw_channel_initialize(&fifo
->channel
);
2612 if (status
!= VXGE_HW_OK
) {
2613 __vxge_hw_fifo_delete(vp
);
2617 vxge_assert(fifo
->channel
.reserve_ptr
);
2623 * __vxge_hw_fifo_abort - Returns the TxD
2624 * This function terminates the TxDs of fifo
2626 enum vxge_hw_status
__vxge_hw_fifo_abort(struct __vxge_hw_fifo
*fifo
)
2631 vxge_hw_channel_dtr_try_complete(&fifo
->channel
, &txdlh
);
2636 vxge_hw_channel_dtr_complete(&fifo
->channel
);
2638 if (fifo
->txdl_term
) {
2639 fifo
->txdl_term(txdlh
,
2640 VXGE_HW_TXDL_STATE_POSTED
,
2641 fifo
->channel
.userdata
);
2644 vxge_hw_channel_dtr_free(&fifo
->channel
, txdlh
);
2651 * __vxge_hw_fifo_reset - Resets the fifo
2652 * This function resets the fifo during vpath reset operation
2654 enum vxge_hw_status
__vxge_hw_fifo_reset(struct __vxge_hw_fifo
*fifo
)
2656 enum vxge_hw_status status
= VXGE_HW_OK
;
2658 __vxge_hw_fifo_abort(fifo
);
2659 status
= __vxge_hw_channel_reset(&fifo
->channel
);
2665 * __vxge_hw_fifo_delete - Removes the FIFO
2666 * This function freeup the memory pool and removes the FIFO
2668 enum vxge_hw_status
__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle
*vp
)
2670 struct __vxge_hw_fifo
*fifo
= vp
->vpath
->fifoh
;
2672 __vxge_hw_fifo_abort(fifo
);
2675 __vxge_hw_mempool_destroy(fifo
->mempool
);
2677 vp
->vpath
->fifoh
= NULL
;
2679 __vxge_hw_channel_free(&fifo
->channel
);
2685 * __vxge_hw_vpath_pci_read - Read the content of given address
2686 * in pci config space.
2687 * Read from the vpath pci config space.
2690 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath
*vpath
,
2691 u32 phy_func_0
, u32 offset
, u32
*val
)
2694 enum vxge_hw_status status
= VXGE_HW_OK
;
2695 struct vxge_hw_vpath_reg __iomem
*vp_reg
= vpath
->vp_reg
;
2697 val64
= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset
);
2700 val64
|= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0
;
2702 writeq(val64
, &vp_reg
->pci_config_access_cfg1
);
2704 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ
,
2705 &vp_reg
->pci_config_access_cfg2
);
2708 status
= __vxge_hw_device_register_poll(
2709 &vp_reg
->pci_config_access_cfg2
,
2710 VXGE_HW_INTR_MASK_ALL
, VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2712 if (status
!= VXGE_HW_OK
)
2715 val64
= readq(&vp_reg
->pci_config_access_status
);
2717 if (val64
& VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR
) {
2718 status
= VXGE_HW_FAIL
;
2721 *val
= (u32
)vxge_bVALn(val64
, 32, 32);
2727 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2728 * Returns the function number of the vpath.
2731 __vxge_hw_vpath_func_id_get(u32 vp_id
,
2732 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
)
2736 val64
= readq(&vpmgmt_reg
->vpath_to_func_map_cfg1
);
2739 (u32
)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64
);
2743 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2746 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2749 writeq(0, &vpath_reg
->rts_access_steer_ctrl
);
2751 writeq(dta_struct_sel
, &vpath_reg
->rts_access_steer_data0
);
2752 writeq(0, &vpath_reg
->rts_access_steer_data1
);
2759 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2760 * part number and product description.
2763 __vxge_hw_vpath_card_info_get(
2765 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2766 struct vxge_hw_device_hw_info
*hw_info
)
2772 enum vxge_hw_status status
= VXGE_HW_OK
;
2773 u8
*serial_number
= hw_info
->serial_number
;
2774 u8
*part_number
= hw_info
->part_number
;
2775 u8
*product_desc
= hw_info
->product_desc
;
2777 __vxge_hw_read_rts_ds(vpath_reg
,
2778 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER
);
2780 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2781 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2782 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2783 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2784 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2785 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2787 status
= __vxge_hw_pio_mem_write64(val64
,
2788 &vpath_reg
->rts_access_steer_ctrl
,
2789 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2790 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2792 if (status
!= VXGE_HW_OK
)
2795 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2797 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2798 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2799 ((u64
*)serial_number
)[0] = be64_to_cpu(data1
);
2801 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2802 ((u64
*)serial_number
)[1] = be64_to_cpu(data2
);
2803 status
= VXGE_HW_OK
;
2807 __vxge_hw_read_rts_ds(vpath_reg
,
2808 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER
);
2810 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2811 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2812 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2813 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2814 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2815 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2817 status
= __vxge_hw_pio_mem_write64(val64
,
2818 &vpath_reg
->rts_access_steer_ctrl
,
2819 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2820 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2822 if (status
!= VXGE_HW_OK
)
2825 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2827 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2829 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2830 ((u64
*)part_number
)[0] = be64_to_cpu(data1
);
2832 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2833 ((u64
*)part_number
)[1] = be64_to_cpu(data2
);
2835 status
= VXGE_HW_OK
;
2842 for (i
= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0
;
2843 i
<= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3
; i
++) {
2845 __vxge_hw_read_rts_ds(vpath_reg
, i
);
2847 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2848 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2849 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2850 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2851 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2852 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2854 status
= __vxge_hw_pio_mem_write64(val64
,
2855 &vpath_reg
->rts_access_steer_ctrl
,
2856 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2857 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2859 if (status
!= VXGE_HW_OK
)
2862 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2864 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2866 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2867 ((u64
*)product_desc
)[j
++] = be64_to_cpu(data1
);
2869 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2870 ((u64
*)product_desc
)[j
++] = be64_to_cpu(data2
);
2872 status
= VXGE_HW_OK
;
2881 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2882 * Returns FW Version
2885 __vxge_hw_vpath_fw_ver_get(
2887 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2888 struct vxge_hw_device_hw_info
*hw_info
)
2893 struct vxge_hw_device_version
*fw_version
= &hw_info
->fw_version
;
2894 struct vxge_hw_device_date
*fw_date
= &hw_info
->fw_date
;
2895 struct vxge_hw_device_version
*flash_version
= &hw_info
->flash_version
;
2896 struct vxge_hw_device_date
*flash_date
= &hw_info
->flash_date
;
2897 enum vxge_hw_status status
= VXGE_HW_OK
;
2899 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2900 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY
) |
2901 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2902 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2903 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2904 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2906 status
= __vxge_hw_pio_mem_write64(val64
,
2907 &vpath_reg
->rts_access_steer_ctrl
,
2908 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2909 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2911 if (status
!= VXGE_HW_OK
)
2914 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2916 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2918 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2919 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2922 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2925 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2928 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2931 snprintf(fw_date
->date
, VXGE_HW_FW_STRLEN
, "%2.2d/%2.2d/%4.4d",
2932 fw_date
->month
, fw_date
->day
, fw_date
->year
);
2935 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1
);
2937 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1
);
2939 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1
);
2941 snprintf(fw_version
->version
, VXGE_HW_FW_STRLEN
, "%d.%d.%d",
2942 fw_version
->major
, fw_version
->minor
, fw_version
->build
);
2945 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2
);
2947 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2
);
2949 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2
);
2951 snprintf(flash_date
->date
, VXGE_HW_FW_STRLEN
,
2952 "%2.2d/%2.2d/%4.4d",
2953 flash_date
->month
, flash_date
->day
, flash_date
->year
);
2955 flash_version
->major
=
2956 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2
);
2957 flash_version
->minor
=
2958 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2
);
2959 flash_version
->build
=
2960 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2
);
2962 snprintf(flash_version
->version
, VXGE_HW_FW_STRLEN
, "%d.%d.%d",
2963 flash_version
->major
, flash_version
->minor
,
2964 flash_version
->build
);
2966 status
= VXGE_HW_OK
;
2969 status
= VXGE_HW_FAIL
;
2975 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2976 * Returns pci function mode
2979 __vxge_hw_vpath_pci_func_mode_get(
2981 struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2985 enum vxge_hw_status status
= VXGE_HW_OK
;
2987 __vxge_hw_read_rts_ds(vpath_reg
,
2988 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE
);
2990 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2991 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2992 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2993 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2994 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2995 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2997 status
= __vxge_hw_pio_mem_write64(val64
,
2998 &vpath_reg
->rts_access_steer_ctrl
,
2999 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
3000 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
3002 if (status
!= VXGE_HW_OK
)
3005 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
3007 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
3008 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
3009 status
= VXGE_HW_OK
;
3012 status
= VXGE_HW_FAIL
;
3019 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3020 * @hldev: HW device.
3021 * @on_off: TRUE if flickering to be on, FALSE to be off
3023 * Flicker the link LED.
3026 vxge_hw_device_flick_link_led(struct __vxge_hw_device
*hldev
,
3030 enum vxge_hw_status status
= VXGE_HW_OK
;
3031 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3033 if (hldev
== NULL
) {
3034 status
= VXGE_HW_ERR_INVALID_DEVICE
;
3038 vp_reg
= hldev
->vpath_reg
[hldev
->first_vp_id
];
3040 writeq(0, &vp_reg
->rts_access_steer_ctrl
);
3042 writeq(on_off
, &vp_reg
->rts_access_steer_data0
);
3043 writeq(0, &vp_reg
->rts_access_steer_data1
);
3046 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3047 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL
) |
3048 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3049 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
3050 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
3051 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3053 status
= __vxge_hw_pio_mem_write64(val64
,
3054 &vp_reg
->rts_access_steer_ctrl
,
3055 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
3056 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
3062 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3065 __vxge_hw_vpath_rts_table_get(
3066 struct __vxge_hw_vpath_handle
*vp
,
3067 u32 action
, u32 rts_table
, u32 offset
, u64
*data1
, u64
*data2
)
3070 struct __vxge_hw_virtualpath
*vpath
;
3071 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3073 enum vxge_hw_status status
= VXGE_HW_OK
;
3076 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3081 vp_reg
= vpath
->vp_reg
;
3083 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action
) |
3084 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table
) |
3085 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
3086 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset
);
3089 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT
) ||
3091 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
) ||
3093 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK
) ||
3095 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY
)) {
3096 val64
= val64
| VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL
;
3099 status
= __vxge_hw_pio_mem_write64(val64
,
3100 &vp_reg
->rts_access_steer_ctrl
,
3101 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
3102 vpath
->hldev
->config
.device_poll_millis
);
3104 if (status
!= VXGE_HW_OK
)
3107 val64
= readq(&vp_reg
->rts_access_steer_ctrl
);
3109 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
3111 *data1
= readq(&vp_reg
->rts_access_steer_data0
);
3114 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) ||
3116 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
)) {
3117 *data2
= readq(&vp_reg
->rts_access_steer_data1
);
3119 status
= VXGE_HW_OK
;
3121 status
= VXGE_HW_FAIL
;
3127 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3130 __vxge_hw_vpath_rts_table_set(
3131 struct __vxge_hw_vpath_handle
*vp
, u32 action
, u32 rts_table
,
3132 u32 offset
, u64 data1
, u64 data2
)
3135 struct __vxge_hw_virtualpath
*vpath
;
3136 enum vxge_hw_status status
= VXGE_HW_OK
;
3137 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3140 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3145 vp_reg
= vpath
->vp_reg
;
3147 writeq(data1
, &vp_reg
->rts_access_steer_data0
);
3150 if ((rts_table
== VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) ||
3152 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
)) {
3153 writeq(data2
, &vp_reg
->rts_access_steer_data1
);
3157 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action
) |
3158 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table
) |
3159 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
3160 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset
);
3162 status
= __vxge_hw_pio_mem_write64(val64
,
3163 &vp_reg
->rts_access_steer_ctrl
,
3164 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
3165 vpath
->hldev
->config
.device_poll_millis
);
3167 if (status
!= VXGE_HW_OK
)
3170 val64
= readq(&vp_reg
->rts_access_steer_ctrl
);
3172 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
)
3173 status
= VXGE_HW_OK
;
3175 status
= VXGE_HW_FAIL
;
3181 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3182 * from MAC address table.
3185 __vxge_hw_vpath_addr_get(
3186 u32 vp_id
, struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
3187 u8 (macaddr
)[ETH_ALEN
], u8 (macaddr_mask
)[ETH_ALEN
])
3193 enum vxge_hw_status status
= VXGE_HW_OK
;
3195 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3196 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
) |
3197 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3198 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) |
3199 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
3200 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3202 status
= __vxge_hw_pio_mem_write64(val64
,
3203 &vpath_reg
->rts_access_steer_ctrl
,
3204 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
3205 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
3207 if (status
!= VXGE_HW_OK
)
3210 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
3212 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
3214 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
3215 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
3217 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
3218 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3221 for (i
= ETH_ALEN
; i
> 0; i
--) {
3222 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
3225 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
3228 status
= VXGE_HW_OK
;
3230 status
= VXGE_HW_FAIL
;
3236 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3238 enum vxge_hw_status
vxge_hw_vpath_rts_rth_set(
3239 struct __vxge_hw_vpath_handle
*vp
,
3240 enum vxge_hw_rth_algoritms algorithm
,
3241 struct vxge_hw_rth_hash_types
*hash_type
,
3245 enum vxge_hw_status status
= VXGE_HW_OK
;
3248 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3252 status
= __vxge_hw_vpath_rts_table_get(vp
,
3253 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY
,
3254 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG
,
3257 data0
&= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3258 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3260 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN
|
3261 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size
) |
3262 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm
);
3264 if (hash_type
->hash_type_tcpipv4_en
)
3265 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN
;
3267 if (hash_type
->hash_type_ipv4_en
)
3268 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN
;
3270 if (hash_type
->hash_type_tcpipv6_en
)
3271 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN
;
3273 if (hash_type
->hash_type_ipv6_en
)
3274 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN
;
3276 if (hash_type
->hash_type_tcpipv6ex_en
)
3278 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN
;
3280 if (hash_type
->hash_type_ipv6ex_en
)
3281 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN
;
3283 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0
))
3284 data0
&= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE
;
3286 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE
;
3288 status
= __vxge_hw_vpath_rts_table_set(vp
,
3289 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
,
3290 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG
,
3297 vxge_hw_rts_rth_data0_data1_get(u32 j
, u64
*data0
, u64
*data1
,
3298 u16 flag
, u8
*itable
)
3302 *data0
= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j
)|
3303 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN
|
3304 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3308 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j
)|
3309 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN
|
3310 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3313 *data1
= VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j
)|
3314 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN
|
3315 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3319 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j
)|
3320 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN
|
3321 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3328 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3330 enum vxge_hw_status
vxge_hw_vpath_rts_rth_itable_set(
3331 struct __vxge_hw_vpath_handle
**vpath_handles
,
3337 u32 i
, j
, action
, rts_table
;
3341 enum vxge_hw_status status
= VXGE_HW_OK
;
3342 struct __vxge_hw_vpath_handle
*vp
= vpath_handles
[0];
3345 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3349 max_entries
= (((u32
)1) << itable_size
);
3351 if (vp
->vpath
->hldev
->config
.rth_it_type
3352 == VXGE_HW_RTH_IT_TYPE_SOLO_IT
) {
3353 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
;
3355 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT
;
3357 for (j
= 0; j
< max_entries
; j
++) {
3362 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3365 status
= __vxge_hw_vpath_rts_table_set(vpath_handles
[0],
3366 action
, rts_table
, j
, data0
, data1
);
3368 if (status
!= VXGE_HW_OK
)
3372 for (j
= 0; j
< max_entries
; j
++) {
3377 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN
|
3378 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3381 status
= __vxge_hw_vpath_rts_table_set(
3382 vpath_handles
[mtable
[itable
[j
]]], action
,
3383 rts_table
, j
, data0
, data1
);
3385 if (status
!= VXGE_HW_OK
)
3389 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
;
3391 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
;
3392 for (i
= 0; i
< vpath_count
; i
++) {
3394 for (j
= 0; j
< max_entries
;) {
3399 while (j
< max_entries
) {
3400 if (mtable
[itable
[j
]] != i
) {
3404 vxge_hw_rts_rth_data0_data1_get(j
,
3405 &data0
, &data1
, 1, itable
);
3410 while (j
< max_entries
) {
3411 if (mtable
[itable
[j
]] != i
) {
3415 vxge_hw_rts_rth_data0_data1_get(j
,
3416 &data0
, &data1
, 2, itable
);
3421 while (j
< max_entries
) {
3422 if (mtable
[itable
[j
]] != i
) {
3426 vxge_hw_rts_rth_data0_data1_get(j
,
3427 &data0
, &data1
, 3, itable
);
3432 while (j
< max_entries
) {
3433 if (mtable
[itable
[j
]] != i
) {
3437 vxge_hw_rts_rth_data0_data1_get(j
,
3438 &data0
, &data1
, 4, itable
);
3444 status
= __vxge_hw_vpath_rts_table_set(
3449 if (status
!= VXGE_HW_OK
)
3460 * vxge_hw_vpath_check_leak - Check for memory leak
3461 * @ringh: Handle to the ring object used for receive
3463 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3464 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3465 * Returns: VXGE_HW_FAIL, if leak has occurred.
3469 vxge_hw_vpath_check_leak(struct __vxge_hw_ring
*ring
)
3471 enum vxge_hw_status status
= VXGE_HW_OK
;
3472 u64 rxd_new_count
, rxd_spat
;
3477 rxd_new_count
= readl(&ring
->vp_reg
->prc_rxd_doorbell
);
3478 rxd_spat
= readq(&ring
->vp_reg
->prc_cfg6
);
3479 rxd_spat
= VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat
);
3481 if (rxd_new_count
>= rxd_spat
)
3482 status
= VXGE_HW_FAIL
;
3488 * __vxge_hw_vpath_mgmt_read
3489 * This routine reads the vpath_mgmt registers
3491 static enum vxge_hw_status
3492 __vxge_hw_vpath_mgmt_read(
3493 struct __vxge_hw_device
*hldev
,
3494 struct __vxge_hw_virtualpath
*vpath
)
3496 u32 i
, mtu
= 0, max_pyld
= 0;
3498 enum vxge_hw_status status
= VXGE_HW_OK
;
3500 for (i
= 0; i
< VXGE_HW_MAC_MAX_MAC_PORT_ID
; i
++) {
3502 val64
= readq(&vpath
->vpmgmt_reg
->
3503 rxmac_cfg0_port_vpmgmt_clone
[i
]);
3506 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3512 vpath
->max_mtu
= mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
;
3514 val64
= readq(&vpath
->vpmgmt_reg
->xmac_vsport_choices_vp
);
3516 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3517 if (val64
& vxge_mBIT(i
))
3518 vpath
->vsport_number
= i
;
3521 val64
= readq(&vpath
->vpmgmt_reg
->xgmac_gen_status_vpmgmt_clone
);
3523 if (val64
& VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK
)
3524 VXGE_HW_DEVICE_LINK_STATE_SET(vpath
->hldev
, VXGE_HW_LINK_UP
);
3526 VXGE_HW_DEVICE_LINK_STATE_SET(vpath
->hldev
, VXGE_HW_LINK_DOWN
);
3532 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3533 * This routine checks the vpath_rst_in_prog register to see if
3534 * adapter completed the reset process for the vpath
3537 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath
*vpath
)
3539 enum vxge_hw_status status
;
3541 status
= __vxge_hw_device_register_poll(
3542 &vpath
->hldev
->common_reg
->vpath_rst_in_prog
,
3543 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3544 1 << (16 - vpath
->vp_id
)),
3545 vpath
->hldev
->config
.device_poll_millis
);
3551 * __vxge_hw_vpath_reset
3552 * This routine resets the vpath on the device
3555 __vxge_hw_vpath_reset(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3558 enum vxge_hw_status status
= VXGE_HW_OK
;
3560 val64
= VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id
));
3562 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
3563 &hldev
->common_reg
->cmn_rsthdlr_cfg0
);
3569 * __vxge_hw_vpath_sw_reset
3570 * This routine resets the vpath structures
3573 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3575 enum vxge_hw_status status
= VXGE_HW_OK
;
3576 struct __vxge_hw_virtualpath
*vpath
;
3578 vpath
= (struct __vxge_hw_virtualpath
*)&hldev
->virtual_paths
[vp_id
];
3581 status
= __vxge_hw_ring_reset(vpath
->ringh
);
3582 if (status
!= VXGE_HW_OK
)
3587 status
= __vxge_hw_fifo_reset(vpath
->fifoh
);
3593 * __vxge_hw_vpath_prc_configure
3594 * This routine configures the prc registers of virtual path using the config
3598 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3601 struct __vxge_hw_virtualpath
*vpath
;
3602 struct vxge_hw_vp_config
*vp_config
;
3603 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3605 vpath
= &hldev
->virtual_paths
[vp_id
];
3606 vp_reg
= vpath
->vp_reg
;
3607 vp_config
= vpath
->vp_config
;
3609 if (vp_config
->ring
.enable
== VXGE_HW_RING_DISABLE
)
3612 val64
= readq(&vp_reg
->prc_cfg1
);
3613 val64
|= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE
;
3614 writeq(val64
, &vp_reg
->prc_cfg1
);
3616 val64
= readq(&vpath
->vp_reg
->prc_cfg6
);
3617 val64
|= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN
;
3618 writeq(val64
, &vpath
->vp_reg
->prc_cfg6
);
3620 val64
= readq(&vp_reg
->prc_cfg7
);
3622 if (vpath
->vp_config
->ring
.scatter_mode
!=
3623 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT
) {
3625 val64
&= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3627 switch (vpath
->vp_config
->ring
.scatter_mode
) {
3628 case VXGE_HW_RING_SCATTER_MODE_A
:
3629 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3630 VXGE_HW_PRC_CFG7_SCATTER_MODE_A
);
3632 case VXGE_HW_RING_SCATTER_MODE_B
:
3633 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3634 VXGE_HW_PRC_CFG7_SCATTER_MODE_B
);
3636 case VXGE_HW_RING_SCATTER_MODE_C
:
3637 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3638 VXGE_HW_PRC_CFG7_SCATTER_MODE_C
);
3643 writeq(val64
, &vp_reg
->prc_cfg7
);
3645 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3646 __vxge_hw_ring_first_block_address_get(
3647 vpath
->ringh
) >> 3), &vp_reg
->prc_cfg5
);
3649 val64
= readq(&vp_reg
->prc_cfg4
);
3650 val64
|= VXGE_HW_PRC_CFG4_IN_SVC
;
3651 val64
&= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3653 val64
|= VXGE_HW_PRC_CFG4_RING_MODE(
3654 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER
);
3656 if (hldev
->config
.rth_en
== VXGE_HW_RTH_DISABLE
)
3657 val64
|= VXGE_HW_PRC_CFG4_RTH_DISABLE
;
3659 val64
&= ~VXGE_HW_PRC_CFG4_RTH_DISABLE
;
3661 writeq(val64
, &vp_reg
->prc_cfg4
);
3666 * __vxge_hw_vpath_kdfc_configure
3667 * This routine configures the kdfc registers of virtual path using the
3671 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3675 enum vxge_hw_status status
= VXGE_HW_OK
;
3676 struct __vxge_hw_virtualpath
*vpath
;
3677 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3679 vpath
= &hldev
->virtual_paths
[vp_id
];
3680 vp_reg
= vpath
->vp_reg
;
3681 status
= __vxge_hw_kdfc_swapper_set(hldev
->legacy_reg
, vp_reg
);
3683 if (status
!= VXGE_HW_OK
)
3686 val64
= readq(&vp_reg
->kdfc_drbl_triplet_total
);
3688 vpath
->max_kdfc_db
=
3689 (u32
)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3692 if (vpath
->vp_config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3694 vpath
->max_nofl_db
= vpath
->max_kdfc_db
;
3696 if (vpath
->max_nofl_db
<
3697 ((vpath
->vp_config
->fifo
.memblock_size
/
3698 (vpath
->vp_config
->fifo
.max_frags
*
3699 sizeof(struct vxge_hw_fifo_txd
))) *
3700 vpath
->vp_config
->fifo
.fifo_blocks
)) {
3702 return VXGE_HW_BADCFG_FIFO_BLOCKS
;
3704 val64
= VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3705 (vpath
->max_nofl_db
*2)-1);
3708 writeq(val64
, &vp_reg
->kdfc_fifo_trpl_partition
);
3710 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE
,
3711 &vp_reg
->kdfc_fifo_trpl_ctrl
);
3713 val64
= readq(&vp_reg
->kdfc_trpl_fifo_0_ctrl
);
3715 val64
&= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3716 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3718 val64
|= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3719 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY
) |
3720 #ifndef __BIG_ENDIAN
3721 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN
|
3723 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3725 writeq(val64
, &vp_reg
->kdfc_trpl_fifo_0_ctrl
);
3726 writeq((u64
)0, &vp_reg
->kdfc_trpl_fifo_0_wb_address
);
3728 vpath_stride
= readq(&hldev
->toc_reg
->toc_kdfc_vpath_stride
);
3731 (struct __vxge_hw_non_offload_db_wrapper __iomem
*)
3732 (hldev
->kdfc
+ (vp_id
*
3733 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3740 * __vxge_hw_vpath_mac_configure
3741 * This routine configures the mac of virtual path using the config passed
3744 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3747 enum vxge_hw_status status
= VXGE_HW_OK
;
3748 struct __vxge_hw_virtualpath
*vpath
;
3749 struct vxge_hw_vp_config
*vp_config
;
3750 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3752 vpath
= &hldev
->virtual_paths
[vp_id
];
3753 vp_reg
= vpath
->vp_reg
;
3754 vp_config
= vpath
->vp_config
;
3756 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3757 vpath
->vsport_number
), &vp_reg
->xmac_vsport_choice
);
3759 if (vp_config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
3761 val64
= readq(&vp_reg
->xmac_rpa_vcfg
);
3763 if (vp_config
->rpa_strip_vlan_tag
!=
3764 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
) {
3765 if (vp_config
->rpa_strip_vlan_tag
)
3766 val64
|= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG
;
3768 val64
&= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG
;
3771 writeq(val64
, &vp_reg
->xmac_rpa_vcfg
);
3772 val64
= readq(&vp_reg
->rxmac_vcfg0
);
3774 if (vp_config
->mtu
!=
3775 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
) {
3776 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3777 if ((vp_config
->mtu
+
3778 VXGE_HW_MAC_HEADER_MAX_SIZE
) < vpath
->max_mtu
)
3779 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3781 VXGE_HW_MAC_HEADER_MAX_SIZE
);
3783 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3787 writeq(val64
, &vp_reg
->rxmac_vcfg0
);
3789 val64
= readq(&vp_reg
->rxmac_vcfg1
);
3791 val64
&= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3792 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
);
3794 if (hldev
->config
.rth_it_type
==
3795 VXGE_HW_RTH_IT_TYPE_MULTI_IT
) {
3796 val64
|= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3798 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
;
3801 writeq(val64
, &vp_reg
->rxmac_vcfg1
);
3807 * __vxge_hw_vpath_tim_configure
3808 * This routine configures the tim registers of virtual path using the config
3812 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3815 enum vxge_hw_status status
= VXGE_HW_OK
;
3816 struct __vxge_hw_virtualpath
*vpath
;
3817 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3818 struct vxge_hw_vp_config
*config
;
3820 vpath
= &hldev
->virtual_paths
[vp_id
];
3821 vp_reg
= vpath
->vp_reg
;
3822 config
= vpath
->vp_config
;
3824 writeq((u64
)0, &vp_reg
->tim_dest_addr
);
3825 writeq((u64
)0, &vp_reg
->tim_vpath_map
);
3826 writeq((u64
)0, &vp_reg
->tim_bitmap
);
3827 writeq((u64
)0, &vp_reg
->tim_remap
);
3829 if (config
->ring
.enable
== VXGE_HW_RING_ENABLE
)
3830 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3831 (vp_id
* VXGE_HW_MAX_INTR_PER_VP
) +
3832 VXGE_HW_VPATH_INTR_RX
), &vp_reg
->tim_ring_assn
);
3834 val64
= readq(&vp_reg
->tim_pci_cfg
);
3835 val64
|= VXGE_HW_TIM_PCI_CFG_ADD_PAD
;
3836 writeq(val64
, &vp_reg
->tim_pci_cfg
);
3838 if (config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3840 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3842 if (config
->tti
.btimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3843 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3845 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3846 config
->tti
.btimer_val
);
3849 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN
;
3851 if (config
->tti
.timer_ac_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3852 if (config
->tti
.timer_ac_en
)
3853 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3855 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3858 if (config
->tti
.timer_ci_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3859 if (config
->tti
.timer_ci_en
)
3860 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3862 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3865 if (config
->tti
.urange_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3866 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3867 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3868 config
->tti
.urange_a
);
3871 if (config
->tti
.urange_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3872 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3873 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3874 config
->tti
.urange_b
);
3877 if (config
->tti
.urange_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3878 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3879 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3880 config
->tti
.urange_c
);
3883 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3884 val64
= readq(&vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3886 if (config
->tti
.uec_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3887 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3888 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3892 if (config
->tti
.uec_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3893 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3894 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3898 if (config
->tti
.uec_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3899 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3900 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3904 if (config
->tti
.uec_d
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3905 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3906 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3910 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3911 val64
= readq(&vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3913 if (config
->tti
.timer_ri_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3914 if (config
->tti
.timer_ri_en
)
3915 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3917 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3920 if (config
->tti
.rtimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3921 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3923 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3924 config
->tti
.rtimer_val
);
3927 if (config
->tti
.util_sel
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3928 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3929 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3930 config
->tti
.util_sel
);
3933 if (config
->tti
.ltimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3934 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3936 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3937 config
->tti
.ltimer_val
);
3940 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3943 if (config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
3945 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3947 if (config
->rti
.btimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3948 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3950 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3951 config
->rti
.btimer_val
);
3954 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN
;
3956 if (config
->rti
.timer_ac_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3957 if (config
->rti
.timer_ac_en
)
3958 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3960 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3963 if (config
->rti
.timer_ci_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3964 if (config
->rti
.timer_ci_en
)
3965 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3967 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3970 if (config
->rti
.urange_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3971 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3972 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3973 config
->rti
.urange_a
);
3976 if (config
->rti
.urange_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3977 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3978 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3979 config
->rti
.urange_b
);
3982 if (config
->rti
.urange_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3983 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3984 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3985 config
->rti
.urange_c
);
3988 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3989 val64
= readq(&vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3991 if (config
->rti
.uec_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3992 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3993 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3997 if (config
->rti
.uec_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3998 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3999 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4003 if (config
->rti
.uec_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
4004 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4005 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4009 if (config
->rti
.uec_d
!= VXGE_HW_USE_FLASH_DEFAULT
) {
4010 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4011 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4015 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_RX
]);
4016 val64
= readq(&vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
4018 if (config
->rti
.timer_ri_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
4019 if (config
->rti
.timer_ri_en
)
4020 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
4022 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
4025 if (config
->rti
.rtimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
4026 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4028 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4029 config
->rti
.rtimer_val
);
4032 if (config
->rti
.util_sel
!= VXGE_HW_USE_FLASH_DEFAULT
) {
4033 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4034 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
4035 config
->rti
.util_sel
);
4038 if (config
->rti
.ltimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
4039 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4041 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4042 config
->rti
.ltimer_val
);
4045 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
4049 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
4050 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
4051 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
4052 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
4053 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
4054 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
4060 * __vxge_hw_vpath_initialize
4061 * This routine is the final phase of init which initializes the
4062 * registers of the vpath using the configuration passed.
4065 __vxge_hw_vpath_initialize(struct __vxge_hw_device
*hldev
, u32 vp_id
)
4069 enum vxge_hw_status status
= VXGE_HW_OK
;
4070 struct __vxge_hw_virtualpath
*vpath
;
4071 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4073 vpath
= &hldev
->virtual_paths
[vp_id
];
4075 if (!(hldev
->vpath_assignments
& vxge_mBIT(vp_id
))) {
4076 status
= VXGE_HW_ERR_VPATH_NOT_AVAILABLE
;
4079 vp_reg
= vpath
->vp_reg
;
4081 status
= __vxge_hw_vpath_swapper_set(vpath
->vp_reg
);
4083 if (status
!= VXGE_HW_OK
)
4086 status
= __vxge_hw_vpath_mac_configure(hldev
, vp_id
);
4088 if (status
!= VXGE_HW_OK
)
4091 status
= __vxge_hw_vpath_kdfc_configure(hldev
, vp_id
);
4093 if (status
!= VXGE_HW_OK
)
4096 status
= __vxge_hw_vpath_tim_configure(hldev
, vp_id
);
4098 if (status
!= VXGE_HW_OK
)
4101 writeq(0, &vp_reg
->gendma_int
);
4103 val64
= readq(&vp_reg
->rtdma_rd_optimization_ctrl
);
4105 /* Get MRRS value from device control */
4106 status
= __vxge_hw_vpath_pci_read(vpath
, 1, 0x78, &val32
);
4108 if (status
== VXGE_HW_OK
) {
4109 val32
= (val32
& VXGE_HW_PCI_EXP_DEVCTL_READRQ
) >> 12;
4111 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4113 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32
);
4115 val64
|= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE
;
4118 val64
&= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4120 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4121 VXGE_HW_MAX_PAYLOAD_SIZE_512
);
4123 val64
|= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN
;
4124 writeq(val64
, &vp_reg
->rtdma_rd_optimization_ctrl
);
4131 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4132 * This routine is the initial phase of init which resets the vpath and
4133 * initializes the software support structures.
4136 __vxge_hw_vp_initialize(struct __vxge_hw_device
*hldev
, u32 vp_id
,
4137 struct vxge_hw_vp_config
*config
)
4139 struct __vxge_hw_virtualpath
*vpath
;
4140 enum vxge_hw_status status
= VXGE_HW_OK
;
4142 if (!(hldev
->vpath_assignments
& vxge_mBIT(vp_id
))) {
4143 status
= VXGE_HW_ERR_VPATH_NOT_AVAILABLE
;
4147 vpath
= &hldev
->virtual_paths
[vp_id
];
4149 vpath
->vp_id
= vp_id
;
4150 vpath
->vp_open
= VXGE_HW_VP_OPEN
;
4151 vpath
->hldev
= hldev
;
4152 vpath
->vp_config
= config
;
4153 vpath
->vp_reg
= hldev
->vpath_reg
[vp_id
];
4154 vpath
->vpmgmt_reg
= hldev
->vpmgmt_reg
[vp_id
];
4156 __vxge_hw_vpath_reset(hldev
, vp_id
);
4158 status
= __vxge_hw_vpath_reset_check(vpath
);
4160 if (status
!= VXGE_HW_OK
) {
4161 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4165 status
= __vxge_hw_vpath_mgmt_read(hldev
, vpath
);
4167 if (status
!= VXGE_HW_OK
) {
4168 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4172 INIT_LIST_HEAD(&vpath
->vpath_handles
);
4174 vpath
->sw_stats
= &hldev
->stats
.sw_dev_info_stats
.vpath_info
[vp_id
];
4176 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev
->tim_int_mask0
,
4177 hldev
->tim_int_mask1
, vp_id
);
4179 status
= __vxge_hw_vpath_initialize(hldev
, vp_id
);
4181 if (status
!= VXGE_HW_OK
)
4182 __vxge_hw_vp_terminate(hldev
, vp_id
);
4188 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4189 * This routine closes all channels it opened and freeup memory
4192 __vxge_hw_vp_terminate(struct __vxge_hw_device
*hldev
, u32 vp_id
)
4194 struct __vxge_hw_virtualpath
*vpath
;
4196 vpath
= &hldev
->virtual_paths
[vp_id
];
4198 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
)
4201 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath
->hldev
->tim_int_mask0
,
4202 vpath
->hldev
->tim_int_mask1
, vpath
->vp_id
);
4203 hldev
->stats
.hw_dev_info_stats
.vpath_info
[vpath
->vp_id
] = NULL
;
4205 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4211 * vxge_hw_vpath_mtu_set - Set MTU.
4212 * Set new MTU value. Example, to use jumbo frames:
4213 * vxge_hw_vpath_mtu_set(my_device, 9600);
4216 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle
*vp
, u32 new_mtu
)
4219 enum vxge_hw_status status
= VXGE_HW_OK
;
4220 struct __vxge_hw_virtualpath
*vpath
;
4223 status
= VXGE_HW_ERR_INVALID_HANDLE
;
4228 new_mtu
+= VXGE_HW_MAC_HEADER_MAX_SIZE
;
4230 if ((new_mtu
< VXGE_HW_MIN_MTU
) || (new_mtu
> vpath
->max_mtu
))
4231 status
= VXGE_HW_ERR_INVALID_MTU_SIZE
;
4233 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
4235 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4236 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu
);
4238 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
4240 vpath
->vp_config
->mtu
= new_mtu
- VXGE_HW_MAC_HEADER_MAX_SIZE
;
4247 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4248 * This function is used to open access to virtual path of an
4249 * adapter for offload, GRO operations. This function returns
4253 vxge_hw_vpath_open(struct __vxge_hw_device
*hldev
,
4254 struct vxge_hw_vpath_attr
*attr
,
4255 struct __vxge_hw_vpath_handle
**vpath_handle
)
4257 struct __vxge_hw_virtualpath
*vpath
;
4258 struct __vxge_hw_vpath_handle
*vp
;
4259 enum vxge_hw_status status
;
4261 vpath
= &hldev
->virtual_paths
[attr
->vp_id
];
4263 if (vpath
->vp_open
== VXGE_HW_VP_OPEN
) {
4264 status
= VXGE_HW_ERR_INVALID_STATE
;
4265 goto vpath_open_exit1
;
4268 status
= __vxge_hw_vp_initialize(hldev
, attr
->vp_id
,
4269 &hldev
->config
.vp_config
[attr
->vp_id
]);
4271 if (status
!= VXGE_HW_OK
)
4272 goto vpath_open_exit1
;
4274 vp
= (struct __vxge_hw_vpath_handle
*)
4275 vmalloc(sizeof(struct __vxge_hw_vpath_handle
));
4277 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4278 goto vpath_open_exit2
;
4281 memset(vp
, 0, sizeof(struct __vxge_hw_vpath_handle
));
4285 if (vpath
->vp_config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
4286 status
= __vxge_hw_fifo_create(vp
, &attr
->fifo_attr
);
4287 if (status
!= VXGE_HW_OK
)
4288 goto vpath_open_exit6
;
4291 if (vpath
->vp_config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
4292 status
= __vxge_hw_ring_create(vp
, &attr
->ring_attr
);
4293 if (status
!= VXGE_HW_OK
)
4294 goto vpath_open_exit7
;
4296 __vxge_hw_vpath_prc_configure(hldev
, attr
->vp_id
);
4299 vpath
->fifoh
->tx_intr_num
=
4300 (attr
->vp_id
* VXGE_HW_MAX_INTR_PER_VP
) +
4301 VXGE_HW_VPATH_INTR_TX
;
4303 vpath
->stats_block
= __vxge_hw_blockpool_block_allocate(hldev
,
4304 VXGE_HW_BLOCK_SIZE
);
4306 if (vpath
->stats_block
== NULL
) {
4307 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4308 goto vpath_open_exit8
;
4311 vpath
->hw_stats
= (struct vxge_hw_vpath_stats_hw_info
*)vpath
->
4312 stats_block
->memblock
;
4313 memset(vpath
->hw_stats
, 0,
4314 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4316 hldev
->stats
.hw_dev_info_stats
.vpath_info
[attr
->vp_id
] =
4319 vpath
->hw_stats_sav
=
4320 &hldev
->stats
.hw_dev_info_stats
.vpath_info_sav
[attr
->vp_id
];
4321 memset(vpath
->hw_stats_sav
, 0,
4322 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4324 writeq(vpath
->stats_block
->dma_addr
, &vpath
->vp_reg
->stats_cfg
);
4326 status
= vxge_hw_vpath_stats_enable(vp
);
4327 if (status
!= VXGE_HW_OK
)
4328 goto vpath_open_exit8
;
4330 list_add(&vp
->item
, &vpath
->vpath_handles
);
4332 hldev
->vpaths_deployed
|= vxge_mBIT(vpath
->vp_id
);
4336 attr
->fifo_attr
.userdata
= vpath
->fifoh
;
4337 attr
->ring_attr
.userdata
= vpath
->ringh
;
4342 if (vpath
->ringh
!= NULL
)
4343 __vxge_hw_ring_delete(vp
);
4345 if (vpath
->fifoh
!= NULL
)
4346 __vxge_hw_fifo_delete(vp
);
4350 __vxge_hw_vp_terminate(hldev
, attr
->vp_id
);
4357 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4359 * @vp: Handle got from previous vpath open
4361 * This function is used to close access to virtual path opened
4365 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle
*vp
)
4367 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4368 u64 new_count
, val64
, val164
;
4369 struct __vxge_hw_ring
*ring
;
4372 ring
= vpath
->ringh
;
4374 new_count
= readq(&vpath
->vp_reg
->rxdmem_size
);
4375 new_count
&= 0x1fff;
4376 val164
= (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count
));
4378 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164
),
4379 &vpath
->vp_reg
->prc_rxd_doorbell
);
4380 readl(&vpath
->vp_reg
->prc_rxd_doorbell
);
4383 val64
= readq(&vpath
->vp_reg
->prc_cfg6
);
4384 val64
= VXGE_HW_PRC_CFG6_RXD_SPAT(val64
);
4388 * Each RxD is of 4 qwords
4390 new_count
-= (val64
+ 1);
4391 val64
= min(val164
, new_count
) / 4;
4393 ring
->rxds_limit
= min(ring
->rxds_limit
, val64
);
4394 if (ring
->rxds_limit
< 4)
4395 ring
->rxds_limit
= 4;
4399 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4400 * This function is used to close access to virtual path opened
4403 enum vxge_hw_status
vxge_hw_vpath_close(struct __vxge_hw_vpath_handle
*vp
)
4405 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4406 struct __vxge_hw_device
*devh
= NULL
;
4407 u32 vp_id
= vp
->vpath
->vp_id
;
4408 u32 is_empty
= TRUE
;
4409 enum vxge_hw_status status
= VXGE_HW_OK
;
4412 devh
= vpath
->hldev
;
4414 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4415 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4416 goto vpath_close_exit
;
4419 list_del(&vp
->item
);
4421 if (!list_empty(&vpath
->vpath_handles
)) {
4422 list_add(&vp
->item
, &vpath
->vpath_handles
);
4427 status
= VXGE_HW_FAIL
;
4428 goto vpath_close_exit
;
4431 devh
->vpaths_deployed
&= ~vxge_mBIT(vp_id
);
4433 if (vpath
->ringh
!= NULL
)
4434 __vxge_hw_ring_delete(vp
);
4436 if (vpath
->fifoh
!= NULL
)
4437 __vxge_hw_fifo_delete(vp
);
4439 if (vpath
->stats_block
!= NULL
)
4440 __vxge_hw_blockpool_block_free(devh
, vpath
->stats_block
);
4444 __vxge_hw_vp_terminate(devh
, vp_id
);
4446 vpath
->vp_open
= VXGE_HW_VP_NOT_OPEN
;
4453 * vxge_hw_vpath_reset - Resets vpath
4454 * This function is used to request a reset of vpath
4456 enum vxge_hw_status
vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle
*vp
)
4458 enum vxge_hw_status status
;
4460 struct __vxge_hw_virtualpath
*vpath
= vp
->vpath
;
4462 vp_id
= vpath
->vp_id
;
4464 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4465 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4469 status
= __vxge_hw_vpath_reset(vpath
->hldev
, vp_id
);
4470 if (status
== VXGE_HW_OK
)
4471 vpath
->sw_stats
->soft_reset_cnt
++;
4477 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4478 * This function poll's for the vpath reset completion and re initializes
4482 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle
*vp
)
4484 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4485 enum vxge_hw_status status
;
4486 struct __vxge_hw_device
*hldev
;
4489 vp_id
= vp
->vpath
->vp_id
;
4491 hldev
= vpath
->hldev
;
4493 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4494 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4498 status
= __vxge_hw_vpath_reset_check(vpath
);
4499 if (status
!= VXGE_HW_OK
)
4502 status
= __vxge_hw_vpath_sw_reset(hldev
, vp_id
);
4503 if (status
!= VXGE_HW_OK
)
4506 status
= __vxge_hw_vpath_initialize(hldev
, vp_id
);
4507 if (status
!= VXGE_HW_OK
)
4510 if (vpath
->ringh
!= NULL
)
4511 __vxge_hw_vpath_prc_configure(hldev
, vp_id
);
4513 memset(vpath
->hw_stats
, 0,
4514 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4516 memset(vpath
->hw_stats_sav
, 0,
4517 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4519 writeq(vpath
->stats_block
->dma_addr
,
4520 &vpath
->vp_reg
->stats_cfg
);
4522 status
= vxge_hw_vpath_stats_enable(vp
);
4529 * vxge_hw_vpath_enable - Enable vpath.
4530 * This routine clears the vpath reset thereby enabling a vpath
4531 * to start forwarding frames and generating interrupts.
4534 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle
*vp
)
4536 struct __vxge_hw_device
*hldev
;
4539 hldev
= vp
->vpath
->hldev
;
4541 val64
= VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4542 1 << (16 - vp
->vpath
->vp_id
));
4544 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
4545 &hldev
->common_reg
->cmn_rsthdlr_cfg1
);
4549 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4550 * Enable the DMA vpath statistics. The function is to be called to re-enable
4551 * the adapter to update stats into the host memory
4554 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle
*vp
)
4556 enum vxge_hw_status status
= VXGE_HW_OK
;
4557 struct __vxge_hw_virtualpath
*vpath
;
4561 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4562 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4566 memcpy(vpath
->hw_stats_sav
, vpath
->hw_stats
,
4567 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4569 status
= __vxge_hw_vpath_stats_get(vpath
, vpath
->hw_stats
);
4575 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4576 * and offset and perform an operation
4579 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath
*vpath
,
4580 u32 operation
, u32 offset
, u64
*stat
)
4583 enum vxge_hw_status status
= VXGE_HW_OK
;
4584 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4586 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4587 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4588 goto vpath_stats_access_exit
;
4591 vp_reg
= vpath
->vp_reg
;
4593 val64
= VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation
) |
4594 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE
|
4595 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset
);
4597 status
= __vxge_hw_pio_mem_write64(val64
,
4598 &vp_reg
->xmac_stats_access_cmd
,
4599 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE
,
4600 vpath
->hldev
->config
.device_poll_millis
);
4602 if ((status
== VXGE_HW_OK
) && (operation
== VXGE_HW_STATS_OP_READ
))
4603 *stat
= readq(&vp_reg
->xmac_stats_access_data
);
4607 vpath_stats_access_exit
:
4612 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4615 __vxge_hw_vpath_xmac_tx_stats_get(
4616 struct __vxge_hw_virtualpath
*vpath
,
4617 struct vxge_hw_xmac_vpath_tx_stats
*vpath_tx_stats
)
4621 u32 offset
= VXGE_HW_STATS_VPATH_TX_OFFSET
;
4622 enum vxge_hw_status status
= VXGE_HW_OK
;
4624 val64
= (u64
*) vpath_tx_stats
;
4626 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4627 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4631 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_vpath_tx_stats
) / 8; i
++) {
4632 status
= __vxge_hw_vpath_stats_access(vpath
,
4633 VXGE_HW_STATS_OP_READ
,
4635 if (status
!= VXGE_HW_OK
)
4645 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4648 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath
*vpath
,
4649 struct vxge_hw_xmac_vpath_rx_stats
*vpath_rx_stats
)
4652 enum vxge_hw_status status
= VXGE_HW_OK
;
4654 u32 offset
= VXGE_HW_STATS_VPATH_RX_OFFSET
;
4655 val64
= (u64
*) vpath_rx_stats
;
4657 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4658 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4661 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_vpath_rx_stats
) / 8; i
++) {
4662 status
= __vxge_hw_vpath_stats_access(vpath
,
4663 VXGE_HW_STATS_OP_READ
,
4664 offset
>> 3, val64
);
4665 if (status
!= VXGE_HW_OK
)
4676 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4678 enum vxge_hw_status
__vxge_hw_vpath_stats_get(
4679 struct __vxge_hw_virtualpath
*vpath
,
4680 struct vxge_hw_vpath_stats_hw_info
*hw_stats
)
4683 enum vxge_hw_status status
= VXGE_HW_OK
;
4684 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4686 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4687 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4690 vp_reg
= vpath
->vp_reg
;
4692 val64
= readq(&vp_reg
->vpath_debug_stats0
);
4693 hw_stats
->ini_num_mwr_sent
=
4694 (u32
)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64
);
4696 val64
= readq(&vp_reg
->vpath_debug_stats1
);
4697 hw_stats
->ini_num_mrd_sent
=
4698 (u32
)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64
);
4700 val64
= readq(&vp_reg
->vpath_debug_stats2
);
4701 hw_stats
->ini_num_cpl_rcvd
=
4702 (u32
)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64
);
4704 val64
= readq(&vp_reg
->vpath_debug_stats3
);
4705 hw_stats
->ini_num_mwr_byte_sent
=
4706 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64
);
4708 val64
= readq(&vp_reg
->vpath_debug_stats4
);
4709 hw_stats
->ini_num_cpl_byte_rcvd
=
4710 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64
);
4712 val64
= readq(&vp_reg
->vpath_debug_stats5
);
4713 hw_stats
->wrcrdtarb_xoff
=
4714 (u32
)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64
);
4716 val64
= readq(&vp_reg
->vpath_debug_stats6
);
4717 hw_stats
->rdcrdtarb_xoff
=
4718 (u32
)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64
);
4720 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4721 hw_stats
->vpath_genstats_count0
=
4722 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4725 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4726 hw_stats
->vpath_genstats_count1
=
4727 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4730 val64
= readq(&vp_reg
->vpath_genstats_count23
);
4731 hw_stats
->vpath_genstats_count2
=
4732 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4735 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4736 hw_stats
->vpath_genstats_count3
=
4737 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4740 val64
= readq(&vp_reg
->vpath_genstats_count4
);
4741 hw_stats
->vpath_genstats_count4
=
4742 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4745 val64
= readq(&vp_reg
->vpath_genstats_count5
);
4746 hw_stats
->vpath_genstats_count5
=
4747 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4750 status
= __vxge_hw_vpath_xmac_tx_stats_get(vpath
, &hw_stats
->tx_stats
);
4751 if (status
!= VXGE_HW_OK
)
4754 status
= __vxge_hw_vpath_xmac_rx_stats_get(vpath
, &hw_stats
->rx_stats
);
4755 if (status
!= VXGE_HW_OK
)
4758 VXGE_HW_VPATH_STATS_PIO_READ(
4759 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET
);
4761 hw_stats
->prog_event_vnum0
=
4762 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64
);
4764 hw_stats
->prog_event_vnum1
=
4765 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64
);
4767 VXGE_HW_VPATH_STATS_PIO_READ(
4768 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET
);
4770 hw_stats
->prog_event_vnum2
=
4771 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64
);
4773 hw_stats
->prog_event_vnum3
=
4774 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64
);
4776 val64
= readq(&vp_reg
->rx_multi_cast_stats
);
4777 hw_stats
->rx_multi_cast_frame_discard
=
4778 (u16
)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64
);
4780 val64
= readq(&vp_reg
->rx_frm_transferred
);
4781 hw_stats
->rx_frm_transferred
=
4782 (u32
)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64
);
4784 val64
= readq(&vp_reg
->rxd_returned
);
4785 hw_stats
->rxd_returned
=
4786 (u16
)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64
);
4788 val64
= readq(&vp_reg
->dbg_stats_rx_mpa
);
4789 hw_stats
->rx_mpa_len_fail_frms
=
4790 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64
);
4791 hw_stats
->rx_mpa_mrk_fail_frms
=
4792 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64
);
4793 hw_stats
->rx_mpa_crc_fail_frms
=
4794 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64
);
4796 val64
= readq(&vp_reg
->dbg_stats_rx_fau
);
4797 hw_stats
->rx_permitted_frms
=
4798 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64
);
4799 hw_stats
->rx_vp_reset_discarded_frms
=
4800 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64
);
4801 hw_stats
->rx_wol_frms
=
4802 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64
);
4804 val64
= readq(&vp_reg
->tx_vp_reset_discarded_frms
);
4805 hw_stats
->tx_vp_reset_discarded_frms
=
4806 (u16
)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4813 * __vxge_hw_blockpool_create - Create block pool
4817 __vxge_hw_blockpool_create(struct __vxge_hw_device
*hldev
,
4818 struct __vxge_hw_blockpool
*blockpool
,
4823 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
4825 dma_addr_t dma_addr
;
4826 struct pci_dev
*dma_handle
;
4827 struct pci_dev
*acc_handle
;
4828 enum vxge_hw_status status
= VXGE_HW_OK
;
4830 if (blockpool
== NULL
) {
4831 status
= VXGE_HW_FAIL
;
4832 goto blockpool_create_exit
;
4835 blockpool
->hldev
= hldev
;
4836 blockpool
->block_size
= VXGE_HW_BLOCK_SIZE
;
4837 blockpool
->pool_size
= 0;
4838 blockpool
->pool_max
= pool_max
;
4839 blockpool
->req_out
= 0;
4841 INIT_LIST_HEAD(&blockpool
->free_block_list
);
4842 INIT_LIST_HEAD(&blockpool
->free_entry_list
);
4844 for (i
= 0; i
< pool_size
+ pool_max
; i
++) {
4845 entry
= kzalloc(sizeof(struct __vxge_hw_blockpool_entry
),
4847 if (entry
== NULL
) {
4848 __vxge_hw_blockpool_destroy(blockpool
);
4849 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4850 goto blockpool_create_exit
;
4852 list_add(&entry
->item
, &blockpool
->free_entry_list
);
4855 for (i
= 0; i
< pool_size
; i
++) {
4857 memblock
= vxge_os_dma_malloc(
4863 if (memblock
== NULL
) {
4864 __vxge_hw_blockpool_destroy(blockpool
);
4865 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4866 goto blockpool_create_exit
;
4869 dma_addr
= pci_map_single(hldev
->pdev
, memblock
,
4870 VXGE_HW_BLOCK_SIZE
, PCI_DMA_BIDIRECTIONAL
);
4872 if (unlikely(pci_dma_mapping_error(hldev
->pdev
,
4875 vxge_os_dma_free(hldev
->pdev
, memblock
, &acc_handle
);
4876 __vxge_hw_blockpool_destroy(blockpool
);
4877 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4878 goto blockpool_create_exit
;
4881 if (!list_empty(&blockpool
->free_entry_list
))
4882 entry
= (struct __vxge_hw_blockpool_entry
*)
4883 list_first_entry(&blockpool
->free_entry_list
,
4884 struct __vxge_hw_blockpool_entry
,
4889 kzalloc(sizeof(struct __vxge_hw_blockpool_entry
),
4891 if (entry
!= NULL
) {
4892 list_del(&entry
->item
);
4893 entry
->length
= VXGE_HW_BLOCK_SIZE
;
4894 entry
->memblock
= memblock
;
4895 entry
->dma_addr
= dma_addr
;
4896 entry
->acc_handle
= acc_handle
;
4897 entry
->dma_handle
= dma_handle
;
4898 list_add(&entry
->item
,
4899 &blockpool
->free_block_list
);
4900 blockpool
->pool_size
++;
4902 __vxge_hw_blockpool_destroy(blockpool
);
4903 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4904 goto blockpool_create_exit
;
4908 blockpool_create_exit
:
4913 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4916 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool
*blockpool
)
4919 struct __vxge_hw_device
*hldev
;
4920 struct list_head
*p
, *n
;
4923 if (blockpool
== NULL
) {
4928 hldev
= blockpool
->hldev
;
4930 list_for_each_safe(p
, n
, &blockpool
->free_block_list
) {
4932 pci_unmap_single(hldev
->pdev
,
4933 ((struct __vxge_hw_blockpool_entry
*)p
)->dma_addr
,
4934 ((struct __vxge_hw_blockpool_entry
*)p
)->length
,
4935 PCI_DMA_BIDIRECTIONAL
);
4937 vxge_os_dma_free(hldev
->pdev
,
4938 ((struct __vxge_hw_blockpool_entry
*)p
)->memblock
,
4939 &((struct __vxge_hw_blockpool_entry
*) p
)->acc_handle
);
4942 &((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4944 blockpool
->pool_size
--;
4947 list_for_each_safe(p
, n
, &blockpool
->free_entry_list
) {
4949 &((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4958 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4961 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool
*blockpool
)
4965 if ((blockpool
->pool_size
+ blockpool
->req_out
) <
4966 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE
) {
4967 nreq
= VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE
;
4968 blockpool
->req_out
+= nreq
;
4971 for (i
= 0; i
< nreq
; i
++)
4972 vxge_os_dma_malloc_async(
4973 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4974 blockpool
->hldev
, VXGE_HW_BLOCK_SIZE
);
4978 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4981 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool
*blockpool
)
4983 struct list_head
*p
, *n
;
4985 list_for_each_safe(p
, n
, &blockpool
->free_block_list
) {
4987 if (blockpool
->pool_size
< blockpool
->pool_max
)
4991 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4992 ((struct __vxge_hw_blockpool_entry
*)p
)->dma_addr
,
4993 ((struct __vxge_hw_blockpool_entry
*)p
)->length
,
4994 PCI_DMA_BIDIRECTIONAL
);
4997 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4998 ((struct __vxge_hw_blockpool_entry
*)p
)->memblock
,
4999 &((struct __vxge_hw_blockpool_entry
*)p
)->acc_handle
);
5001 list_del(&((struct __vxge_hw_blockpool_entry
*)p
)->item
);
5003 list_add(p
, &blockpool
->free_entry_list
);
5005 blockpool
->pool_size
--;
5011 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5012 * Adds a block to block pool
5014 void vxge_hw_blockpool_block_add(
5015 struct __vxge_hw_device
*devh
,
5018 struct pci_dev
*dma_h
,
5019 struct pci_dev
*acc_handle
)
5021 struct __vxge_hw_blockpool
*blockpool
;
5022 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5023 dma_addr_t dma_addr
;
5024 enum vxge_hw_status status
= VXGE_HW_OK
;
5027 blockpool
= &devh
->block_pool
;
5029 if (block_addr
== NULL
) {
5030 blockpool
->req_out
--;
5031 status
= VXGE_HW_FAIL
;
5035 dma_addr
= pci_map_single(devh
->pdev
, block_addr
, length
,
5036 PCI_DMA_BIDIRECTIONAL
);
5038 if (unlikely(pci_dma_mapping_error(devh
->pdev
, dma_addr
))) {
5040 vxge_os_dma_free(devh
->pdev
, block_addr
, &acc_handle
);
5041 blockpool
->req_out
--;
5042 status
= VXGE_HW_FAIL
;
5047 if (!list_empty(&blockpool
->free_entry_list
))
5048 entry
= (struct __vxge_hw_blockpool_entry
*)
5049 list_first_entry(&blockpool
->free_entry_list
,
5050 struct __vxge_hw_blockpool_entry
,
5054 entry
= (struct __vxge_hw_blockpool_entry
*)
5055 vmalloc(sizeof(struct __vxge_hw_blockpool_entry
));
5057 list_del(&entry
->item
);
5059 if (entry
!= NULL
) {
5060 entry
->length
= length
;
5061 entry
->memblock
= block_addr
;
5062 entry
->dma_addr
= dma_addr
;
5063 entry
->acc_handle
= acc_handle
;
5064 entry
->dma_handle
= dma_h
;
5065 list_add(&entry
->item
, &blockpool
->free_block_list
);
5066 blockpool
->pool_size
++;
5067 status
= VXGE_HW_OK
;
5069 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5071 blockpool
->req_out
--;
5073 req_out
= blockpool
->req_out
;
5079 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5080 * Allocates a block of memory of given size, either from block pool
5081 * or by calling vxge_os_dma_malloc()
5084 __vxge_hw_blockpool_malloc(struct __vxge_hw_device
*devh
, u32 size
,
5085 struct vxge_hw_mempool_dma
*dma_object
)
5087 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5088 struct __vxge_hw_blockpool
*blockpool
;
5089 void *memblock
= NULL
;
5090 enum vxge_hw_status status
= VXGE_HW_OK
;
5092 blockpool
= &devh
->block_pool
;
5094 if (size
!= blockpool
->block_size
) {
5096 memblock
= vxge_os_dma_malloc(devh
->pdev
, size
,
5097 &dma_object
->handle
,
5098 &dma_object
->acc_handle
);
5100 if (memblock
== NULL
) {
5101 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5105 dma_object
->addr
= pci_map_single(devh
->pdev
, memblock
, size
,
5106 PCI_DMA_BIDIRECTIONAL
);
5108 if (unlikely(pci_dma_mapping_error(devh
->pdev
,
5109 dma_object
->addr
))) {
5110 vxge_os_dma_free(devh
->pdev
, memblock
,
5111 &dma_object
->acc_handle
);
5112 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5118 if (!list_empty(&blockpool
->free_block_list
))
5119 entry
= (struct __vxge_hw_blockpool_entry
*)
5120 list_first_entry(&blockpool
->free_block_list
,
5121 struct __vxge_hw_blockpool_entry
,
5124 if (entry
!= NULL
) {
5125 list_del(&entry
->item
);
5126 dma_object
->addr
= entry
->dma_addr
;
5127 dma_object
->handle
= entry
->dma_handle
;
5128 dma_object
->acc_handle
= entry
->acc_handle
;
5129 memblock
= entry
->memblock
;
5131 list_add(&entry
->item
,
5132 &blockpool
->free_entry_list
);
5133 blockpool
->pool_size
--;
5136 if (memblock
!= NULL
)
5137 __vxge_hw_blockpool_blocks_add(blockpool
);
5144 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5145 __vxge_hw_blockpool_malloc
5148 __vxge_hw_blockpool_free(struct __vxge_hw_device
*devh
,
5149 void *memblock
, u32 size
,
5150 struct vxge_hw_mempool_dma
*dma_object
)
5152 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5153 struct __vxge_hw_blockpool
*blockpool
;
5154 enum vxge_hw_status status
= VXGE_HW_OK
;
5156 blockpool
= &devh
->block_pool
;
5158 if (size
!= blockpool
->block_size
) {
5159 pci_unmap_single(devh
->pdev
, dma_object
->addr
, size
,
5160 PCI_DMA_BIDIRECTIONAL
);
5161 vxge_os_dma_free(devh
->pdev
, memblock
, &dma_object
->acc_handle
);
5164 if (!list_empty(&blockpool
->free_entry_list
))
5165 entry
= (struct __vxge_hw_blockpool_entry
*)
5166 list_first_entry(&blockpool
->free_entry_list
,
5167 struct __vxge_hw_blockpool_entry
,
5171 entry
= (struct __vxge_hw_blockpool_entry
*)
5173 struct __vxge_hw_blockpool_entry
));
5175 list_del(&entry
->item
);
5177 if (entry
!= NULL
) {
5178 entry
->length
= size
;
5179 entry
->memblock
= memblock
;
5180 entry
->dma_addr
= dma_object
->addr
;
5181 entry
->acc_handle
= dma_object
->acc_handle
;
5182 entry
->dma_handle
= dma_object
->handle
;
5183 list_add(&entry
->item
,
5184 &blockpool
->free_block_list
);
5185 blockpool
->pool_size
++;
5186 status
= VXGE_HW_OK
;
5188 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5190 if (status
== VXGE_HW_OK
)
5191 __vxge_hw_blockpool_blocks_remove(blockpool
);
5198 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5199 * This function allocates a block from block pool or from the system
5201 struct __vxge_hw_blockpool_entry
*
5202 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device
*devh
, u32 size
)
5204 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5205 struct __vxge_hw_blockpool
*blockpool
;
5207 blockpool
= &devh
->block_pool
;
5209 if (size
== blockpool
->block_size
) {
5211 if (!list_empty(&blockpool
->free_block_list
))
5212 entry
= (struct __vxge_hw_blockpool_entry
*)
5213 list_first_entry(&blockpool
->free_block_list
,
5214 struct __vxge_hw_blockpool_entry
,
5217 if (entry
!= NULL
) {
5218 list_del(&entry
->item
);
5219 blockpool
->pool_size
--;
5224 __vxge_hw_blockpool_blocks_add(blockpool
);
5230 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5232 * @entry: Entry of block to be freed
5234 * This function frees a block from block pool
5237 __vxge_hw_blockpool_block_free(struct __vxge_hw_device
*devh
,
5238 struct __vxge_hw_blockpool_entry
*entry
)
5240 struct __vxge_hw_blockpool
*blockpool
;
5242 blockpool
= &devh
->block_pool
;
5244 if (entry
->length
== blockpool
->block_size
) {
5245 list_add(&entry
->item
, &blockpool
->free_block_list
);
5246 blockpool
->pool_size
++;
5249 __vxge_hw_blockpool_blocks_remove(blockpool
);