1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
24 * __vxge_hw_channel_allocate - Allocate memory for channel
25 * This function allocates required memory for the channel and various arrays
28 struct __vxge_hw_channel
*
29 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle
*vph
,
30 enum __vxge_hw_channel_type type
,
31 u32 length
, u32 per_dtr_space
, void *userdata
)
33 struct __vxge_hw_channel
*channel
;
34 struct __vxge_hw_device
*hldev
;
38 hldev
= vph
->vpath
->hldev
;
39 vp_id
= vph
->vpath
->vp_id
;
42 case VXGE_HW_CHANNEL_TYPE_FIFO
:
43 size
= sizeof(struct __vxge_hw_fifo
);
45 case VXGE_HW_CHANNEL_TYPE_RING
:
46 size
= sizeof(struct __vxge_hw_ring
);
52 channel
= kzalloc(size
, GFP_KERNEL
);
55 INIT_LIST_HEAD(&channel
->item
);
57 channel
->common_reg
= hldev
->common_reg
;
58 channel
->first_vp_id
= hldev
->first_vp_id
;
60 channel
->devh
= hldev
;
62 channel
->userdata
= userdata
;
63 channel
->per_dtr_space
= per_dtr_space
;
64 channel
->length
= length
;
65 channel
->vp_id
= vp_id
;
67 channel
->work_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
68 if (channel
->work_arr
== NULL
)
71 channel
->free_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
72 if (channel
->free_arr
== NULL
)
74 channel
->free_ptr
= length
;
76 channel
->reserve_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
77 if (channel
->reserve_arr
== NULL
)
79 channel
->reserve_ptr
= length
;
80 channel
->reserve_top
= 0;
82 channel
->orig_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
83 if (channel
->orig_arr
== NULL
)
88 __vxge_hw_channel_free(channel
);
95 * __vxge_hw_channel_free - Free memory allocated for channel
96 * This function deallocates memory from the channel and various arrays
99 void __vxge_hw_channel_free(struct __vxge_hw_channel
*channel
)
101 kfree(channel
->work_arr
);
102 kfree(channel
->free_arr
);
103 kfree(channel
->reserve_arr
);
104 kfree(channel
->orig_arr
);
109 * __vxge_hw_channel_initialize - Initialize a channel
110 * This function initializes a channel by properly setting the
114 __vxge_hw_channel_initialize(struct __vxge_hw_channel
*channel
)
117 struct __vxge_hw_virtualpath
*vpath
;
119 vpath
= channel
->vph
->vpath
;
121 if ((channel
->reserve_arr
!= NULL
) && (channel
->orig_arr
!= NULL
)) {
122 for (i
= 0; i
< channel
->length
; i
++)
123 channel
->orig_arr
[i
] = channel
->reserve_arr
[i
];
126 switch (channel
->type
) {
127 case VXGE_HW_CHANNEL_TYPE_FIFO
:
128 vpath
->fifoh
= (struct __vxge_hw_fifo
*)channel
;
129 channel
->stats
= &((struct __vxge_hw_fifo
*)
130 channel
)->stats
->common_stats
;
132 case VXGE_HW_CHANNEL_TYPE_RING
:
133 vpath
->ringh
= (struct __vxge_hw_ring
*)channel
;
134 channel
->stats
= &((struct __vxge_hw_ring
*)
135 channel
)->stats
->common_stats
;
145 * __vxge_hw_channel_reset - Resets a channel
146 * This function resets a channel by properly setting the various references
149 __vxge_hw_channel_reset(struct __vxge_hw_channel
*channel
)
153 for (i
= 0; i
< channel
->length
; i
++) {
154 if (channel
->reserve_arr
!= NULL
)
155 channel
->reserve_arr
[i
] = channel
->orig_arr
[i
];
156 if (channel
->free_arr
!= NULL
)
157 channel
->free_arr
[i
] = NULL
;
158 if (channel
->work_arr
!= NULL
)
159 channel
->work_arr
[i
] = NULL
;
161 channel
->free_ptr
= channel
->length
;
162 channel
->reserve_ptr
= channel
->length
;
163 channel
->reserve_top
= 0;
164 channel
->post_index
= 0;
165 channel
->compl_index
= 0;
171 * __vxge_hw_device_pci_e_init
172 * Initialize certain PCI/PCI-X configuration registers
173 * with recommended values. Save config space for future hw resets.
176 __vxge_hw_device_pci_e_init(struct __vxge_hw_device
*hldev
)
180 /* Set the PErr Repconse bit and SERR in PCI command register. */
181 pci_read_config_word(hldev
->pdev
, PCI_COMMAND
, &cmd
);
183 pci_write_config_word(hldev
->pdev
, PCI_COMMAND
, cmd
);
185 pci_save_state(hldev
->pdev
);
191 * __vxge_hw_device_register_poll
192 * Will poll certain register for specified amount of time.
193 * Will poll until masked bit is not cleared.
196 __vxge_hw_device_register_poll(void __iomem
*reg
, u64 mask
, u32 max_millis
)
200 enum vxge_hw_status ret
= VXGE_HW_FAIL
;
217 } while (++i
<= max_millis
);
222 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
224 * This routine checks the vpath reset in progress register is turned zero
227 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem
*vpath_rst_in_prog
)
229 enum vxge_hw_status status
;
230 status
= __vxge_hw_device_register_poll(vpath_rst_in_prog
,
231 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
232 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
237 * __vxge_hw_device_toc_get
238 * This routine sets the swapper and reads the toc pointer and returns the
239 * memory mapped address of the toc
241 struct vxge_hw_toc_reg __iomem
*
242 __vxge_hw_device_toc_get(void __iomem
*bar0
)
245 struct vxge_hw_toc_reg __iomem
*toc
= NULL
;
246 enum vxge_hw_status status
;
248 struct vxge_hw_legacy_reg __iomem
*legacy_reg
=
249 (struct vxge_hw_legacy_reg __iomem
*)bar0
;
251 status
= __vxge_hw_legacy_swapper_set(legacy_reg
);
252 if (status
!= VXGE_HW_OK
)
255 val64
= readq(&legacy_reg
->toc_first_pointer
);
256 toc
= (struct vxge_hw_toc_reg __iomem
*)(bar0
+val64
);
262 * __vxge_hw_device_reg_addr_get
263 * This routine sets the swapper and reads the toc pointer and initializes the
264 * register location pointers in the device object. It waits until the ric is
265 * completed initializing registers.
268 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device
*hldev
)
272 enum vxge_hw_status status
= VXGE_HW_OK
;
274 hldev
->legacy_reg
= (struct vxge_hw_legacy_reg __iomem
*)hldev
->bar0
;
276 hldev
->toc_reg
= __vxge_hw_device_toc_get(hldev
->bar0
);
277 if (hldev
->toc_reg
== NULL
) {
278 status
= VXGE_HW_FAIL
;
282 val64
= readq(&hldev
->toc_reg
->toc_common_pointer
);
284 (struct vxge_hw_common_reg __iomem
*)(hldev
->bar0
+ val64
);
286 val64
= readq(&hldev
->toc_reg
->toc_mrpcim_pointer
);
288 (struct vxge_hw_mrpcim_reg __iomem
*)(hldev
->bar0
+ val64
);
290 for (i
= 0; i
< VXGE_HW_TITAN_SRPCIM_REG_SPACES
; i
++) {
291 val64
= readq(&hldev
->toc_reg
->toc_srpcim_pointer
[i
]);
292 hldev
->srpcim_reg
[i
] =
293 (struct vxge_hw_srpcim_reg __iomem
*)
294 (hldev
->bar0
+ val64
);
297 for (i
= 0; i
< VXGE_HW_TITAN_VPMGMT_REG_SPACES
; i
++) {
298 val64
= readq(&hldev
->toc_reg
->toc_vpmgmt_pointer
[i
]);
299 hldev
->vpmgmt_reg
[i
] =
300 (struct vxge_hw_vpmgmt_reg __iomem
*)(hldev
->bar0
+ val64
);
303 for (i
= 0; i
< VXGE_HW_TITAN_VPATH_REG_SPACES
; i
++) {
304 val64
= readq(&hldev
->toc_reg
->toc_vpath_pointer
[i
]);
305 hldev
->vpath_reg
[i
] =
306 (struct vxge_hw_vpath_reg __iomem
*)
307 (hldev
->bar0
+ val64
);
310 val64
= readq(&hldev
->toc_reg
->toc_kdfc
);
312 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64
)) {
314 hldev
->kdfc
= (u8 __iomem
*)(hldev
->bar0
+
315 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64
));
321 status
= __vxge_hw_device_vpath_reset_in_prog_check(
322 (u64 __iomem
*)&hldev
->common_reg
->vpath_rst_in_prog
);
328 * __vxge_hw_device_id_get
329 * This routine returns sets the device id and revision numbers into the device
332 void __vxge_hw_device_id_get(struct __vxge_hw_device
*hldev
)
336 val64
= readq(&hldev
->common_reg
->titan_asic_id
);
338 (u16
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64
);
340 hldev
->major_revision
=
341 (u8
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64
);
343 hldev
->minor_revision
=
344 (u8
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64
);
350 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
351 * This routine returns the Access Rights of the driver
354 __vxge_hw_device_access_rights_get(u32 host_type
, u32 func_id
)
356 u32 access_rights
= VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH
;
359 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION
:
360 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
361 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
363 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION
:
364 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
365 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
367 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0
:
368 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
369 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
371 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION
:
372 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION
:
373 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG
:
375 case VXGE_HW_SR_VH_FUNCTION0
:
376 case VXGE_HW_VH_NORMAL_FUNCTION
:
377 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
381 return access_rights
;
384 * __vxge_hw_device_is_privilaged
385 * This routine checks if the device function is privilaged or not
389 __vxge_hw_device_is_privilaged(u32 host_type
, u32 func_id
)
391 if (__vxge_hw_device_access_rights_get(host_type
,
393 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)
396 return VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
400 * __vxge_hw_device_host_info_get
401 * This routine returns the host type assignments
403 void __vxge_hw_device_host_info_get(struct __vxge_hw_device
*hldev
)
408 val64
= readq(&hldev
->common_reg
->host_type_assignments
);
411 (u32
)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64
);
413 hldev
->vpath_assignments
= readq(&hldev
->common_reg
->vpath_assignments
);
415 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
417 if (!(hldev
->vpath_assignments
& vxge_mBIT(i
)))
421 __vxge_hw_vpath_func_id_get(i
, hldev
->vpmgmt_reg
[i
]);
423 hldev
->access_rights
= __vxge_hw_device_access_rights_get(
424 hldev
->host_type
, hldev
->func_id
);
426 hldev
->first_vp_id
= i
;
434 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
435 * link width and signalling rate.
437 static enum vxge_hw_status
438 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device
*hldev
)
443 /* Get the negotiated link width and speed from PCI config space */
444 exp_cap
= pci_find_capability(hldev
->pdev
, PCI_CAP_ID_EXP
);
445 pci_read_config_word(hldev
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
447 if ((lnk
& PCI_EXP_LNKSTA_CLS
) != 1)
448 return VXGE_HW_ERR_INVALID_PCI_INFO
;
450 switch ((lnk
& PCI_EXP_LNKSTA_NLW
) >> 4) {
451 case PCIE_LNK_WIDTH_RESRV
:
458 return VXGE_HW_ERR_INVALID_PCI_INFO
;
465 * __vxge_hw_device_initialize
466 * Initialize Titan-V hardware.
468 enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device
*hldev
)
470 enum vxge_hw_status status
= VXGE_HW_OK
;
472 if (VXGE_HW_OK
== __vxge_hw_device_is_privilaged(hldev
->host_type
,
474 /* Validate the pci-e link width and speed */
475 status
= __vxge_hw_verify_pci_e_info(hldev
);
476 if (status
!= VXGE_HW_OK
)
485 * vxge_hw_device_hw_info_get - Get the hw information
486 * Returns the vpath mask that has the bits set for each vpath allocated
487 * for the driver, FW version information and the first mac addresse for
490 enum vxge_hw_status __devinit
491 vxge_hw_device_hw_info_get(void __iomem
*bar0
,
492 struct vxge_hw_device_hw_info
*hw_info
)
496 struct vxge_hw_toc_reg __iomem
*toc
;
497 struct vxge_hw_mrpcim_reg __iomem
*mrpcim_reg
;
498 struct vxge_hw_common_reg __iomem
*common_reg
;
499 struct vxge_hw_vpath_reg __iomem
*vpath_reg
;
500 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
;
501 enum vxge_hw_status status
;
503 memset(hw_info
, 0, sizeof(struct vxge_hw_device_hw_info
));
505 toc
= __vxge_hw_device_toc_get(bar0
);
507 status
= VXGE_HW_ERR_CRITICAL
;
511 val64
= readq(&toc
->toc_common_pointer
);
512 common_reg
= (struct vxge_hw_common_reg __iomem
*)(bar0
+ val64
);
514 status
= __vxge_hw_device_vpath_reset_in_prog_check(
515 (u64 __iomem
*)&common_reg
->vpath_rst_in_prog
);
516 if (status
!= VXGE_HW_OK
)
519 hw_info
->vpath_mask
= readq(&common_reg
->vpath_assignments
);
521 val64
= readq(&common_reg
->host_type_assignments
);
524 (u32
)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64
);
526 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
528 if (!((hw_info
->vpath_mask
) & vxge_mBIT(i
)))
531 val64
= readq(&toc
->toc_vpmgmt_pointer
[i
]);
533 vpmgmt_reg
= (struct vxge_hw_vpmgmt_reg __iomem
*)
536 hw_info
->func_id
= __vxge_hw_vpath_func_id_get(i
, vpmgmt_reg
);
537 if (__vxge_hw_device_access_rights_get(hw_info
->host_type
,
539 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
) {
541 val64
= readq(&toc
->toc_mrpcim_pointer
);
543 mrpcim_reg
= (struct vxge_hw_mrpcim_reg __iomem
*)
546 writeq(0, &mrpcim_reg
->xgmac_gen_fw_memo_mask
);
550 val64
= readq(&toc
->toc_vpath_pointer
[i
]);
552 vpath_reg
= (struct vxge_hw_vpath_reg __iomem
*)(bar0
+ val64
);
554 hw_info
->function_mode
=
555 __vxge_hw_vpath_pci_func_mode_get(i
, vpath_reg
);
557 status
= __vxge_hw_vpath_fw_ver_get(i
, vpath_reg
, hw_info
);
558 if (status
!= VXGE_HW_OK
)
561 status
= __vxge_hw_vpath_card_info_get(i
, vpath_reg
, hw_info
);
562 if (status
!= VXGE_HW_OK
)
568 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
570 if (!((hw_info
->vpath_mask
) & vxge_mBIT(i
)))
573 val64
= readq(&toc
->toc_vpath_pointer
[i
]);
574 vpath_reg
= (struct vxge_hw_vpath_reg __iomem
*)(bar0
+ val64
);
576 status
= __vxge_hw_vpath_addr_get(i
, vpath_reg
,
577 hw_info
->mac_addrs
[i
],
578 hw_info
->mac_addr_masks
[i
]);
579 if (status
!= VXGE_HW_OK
)
587 * vxge_hw_device_initialize - Initialize Titan device.
588 * Initialize Titan device. Note that all the arguments of this public API
589 * are 'IN', including @hldev. Driver cooperates with
590 * OS to find new Titan device, locate its PCI and memory spaces.
592 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
593 * to enable the latter to perform Titan hardware initialization.
595 enum vxge_hw_status __devinit
596 vxge_hw_device_initialize(
597 struct __vxge_hw_device
**devh
,
598 struct vxge_hw_device_attr
*attr
,
599 struct vxge_hw_device_config
*device_config
)
603 struct __vxge_hw_device
*hldev
= NULL
;
604 enum vxge_hw_status status
= VXGE_HW_OK
;
606 status
= __vxge_hw_device_config_check(device_config
);
607 if (status
!= VXGE_HW_OK
)
610 hldev
= (struct __vxge_hw_device
*)
611 vmalloc(sizeof(struct __vxge_hw_device
));
613 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
617 memset(hldev
, 0, sizeof(struct __vxge_hw_device
));
618 hldev
->magic
= VXGE_HW_DEVICE_MAGIC
;
620 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_ALL
);
623 memcpy(&hldev
->config
, device_config
,
624 sizeof(struct vxge_hw_device_config
));
626 hldev
->bar0
= attr
->bar0
;
627 hldev
->pdev
= attr
->pdev
;
629 hldev
->uld_callbacks
.link_up
= attr
->uld_callbacks
.link_up
;
630 hldev
->uld_callbacks
.link_down
= attr
->uld_callbacks
.link_down
;
631 hldev
->uld_callbacks
.crit_err
= attr
->uld_callbacks
.crit_err
;
633 __vxge_hw_device_pci_e_init(hldev
);
635 status
= __vxge_hw_device_reg_addr_get(hldev
);
636 if (status
!= VXGE_HW_OK
)
638 __vxge_hw_device_id_get(hldev
);
640 __vxge_hw_device_host_info_get(hldev
);
642 /* Incrementing for stats blocks */
645 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
647 if (!(hldev
->vpath_assignments
& vxge_mBIT(i
)))
650 if (device_config
->vp_config
[i
].ring
.enable
==
652 nblocks
+= device_config
->vp_config
[i
].ring
.ring_blocks
;
654 if (device_config
->vp_config
[i
].fifo
.enable
==
656 nblocks
+= device_config
->vp_config
[i
].fifo
.fifo_blocks
;
660 if (__vxge_hw_blockpool_create(hldev
,
662 device_config
->dma_blockpool_initial
+ nblocks
,
663 device_config
->dma_blockpool_max
+ nblocks
) != VXGE_HW_OK
) {
665 vxge_hw_device_terminate(hldev
);
666 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
670 status
= __vxge_hw_device_initialize(hldev
);
672 if (status
!= VXGE_HW_OK
) {
673 vxge_hw_device_terminate(hldev
);
683 * vxge_hw_device_terminate - Terminate Titan device.
684 * Terminate HW device.
687 vxge_hw_device_terminate(struct __vxge_hw_device
*hldev
)
689 vxge_assert(hldev
->magic
== VXGE_HW_DEVICE_MAGIC
);
691 hldev
->magic
= VXGE_HW_DEVICE_DEAD
;
692 __vxge_hw_blockpool_destroy(&hldev
->block_pool
);
697 * vxge_hw_device_stats_get - Get the device hw statistics.
698 * Returns the vpath h/w stats for the device.
701 vxge_hw_device_stats_get(struct __vxge_hw_device
*hldev
,
702 struct vxge_hw_device_stats_hw_info
*hw_stats
)
705 enum vxge_hw_status status
= VXGE_HW_OK
;
707 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
709 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)) ||
710 (hldev
->virtual_paths
[i
].vp_open
==
711 VXGE_HW_VP_NOT_OPEN
))
714 memcpy(hldev
->virtual_paths
[i
].hw_stats_sav
,
715 hldev
->virtual_paths
[i
].hw_stats
,
716 sizeof(struct vxge_hw_vpath_stats_hw_info
));
718 status
= __vxge_hw_vpath_stats_get(
719 &hldev
->virtual_paths
[i
],
720 hldev
->virtual_paths
[i
].hw_stats
);
723 memcpy(hw_stats
, &hldev
->stats
.hw_dev_info_stats
,
724 sizeof(struct vxge_hw_device_stats_hw_info
));
730 * vxge_hw_driver_stats_get - Get the device sw statistics.
731 * Returns the vpath s/w stats for the device.
733 enum vxge_hw_status
vxge_hw_driver_stats_get(
734 struct __vxge_hw_device
*hldev
,
735 struct vxge_hw_device_stats_sw_info
*sw_stats
)
737 enum vxge_hw_status status
= VXGE_HW_OK
;
739 memcpy(sw_stats
, &hldev
->stats
.sw_dev_info_stats
,
740 sizeof(struct vxge_hw_device_stats_sw_info
));
746 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
747 * and offset and perform an operation
748 * Get the statistics from the given location and offset.
751 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device
*hldev
,
752 u32 operation
, u32 location
, u32 offset
, u64
*stat
)
755 enum vxge_hw_status status
= VXGE_HW_OK
;
757 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
759 if (status
!= VXGE_HW_OK
)
762 val64
= VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation
) |
763 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE
|
764 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location
) |
765 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset
);
767 status
= __vxge_hw_pio_mem_write64(val64
,
768 &hldev
->mrpcim_reg
->xmac_stats_sys_cmd
,
769 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE
,
770 hldev
->config
.device_poll_millis
);
772 if ((status
== VXGE_HW_OK
) && (operation
== VXGE_HW_STATS_OP_READ
))
773 *stat
= readq(&hldev
->mrpcim_reg
->xmac_stats_sys_data
);
781 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
782 * Get the Statistics on aggregate port
785 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device
*hldev
, u32 port
,
786 struct vxge_hw_xmac_aggr_stats
*aggr_stats
)
790 u32 offset
= VXGE_HW_STATS_AGGRn_OFFSET
;
791 enum vxge_hw_status status
= VXGE_HW_OK
;
793 val64
= (u64
*)aggr_stats
;
795 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
797 if (status
!= VXGE_HW_OK
)
800 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_aggr_stats
) / 8; i
++) {
801 status
= vxge_hw_mrpcim_stats_access(hldev
,
802 VXGE_HW_STATS_OP_READ
,
803 VXGE_HW_STATS_LOC_AGGR
,
804 ((offset
+ (104 * port
)) >> 3), val64
);
805 if (status
!= VXGE_HW_OK
)
816 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
817 * Get the Statistics on port
820 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device
*hldev
, u32 port
,
821 struct vxge_hw_xmac_port_stats
*port_stats
)
824 enum vxge_hw_status status
= VXGE_HW_OK
;
827 val64
= (u64
*) port_stats
;
829 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
831 if (status
!= VXGE_HW_OK
)
834 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_port_stats
) / 8; i
++) {
835 status
= vxge_hw_mrpcim_stats_access(hldev
,
836 VXGE_HW_STATS_OP_READ
,
837 VXGE_HW_STATS_LOC_AGGR
,
838 ((offset
+ (608 * port
)) >> 3), val64
);
839 if (status
!= VXGE_HW_OK
)
851 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
852 * Get the XMAC Statistics
855 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device
*hldev
,
856 struct vxge_hw_xmac_stats
*xmac_stats
)
858 enum vxge_hw_status status
= VXGE_HW_OK
;
861 status
= vxge_hw_device_xmac_aggr_stats_get(hldev
,
862 0, &xmac_stats
->aggr_stats
[0]);
864 if (status
!= VXGE_HW_OK
)
867 status
= vxge_hw_device_xmac_aggr_stats_get(hldev
,
868 1, &xmac_stats
->aggr_stats
[1]);
869 if (status
!= VXGE_HW_OK
)
872 for (i
= 0; i
<= VXGE_HW_MAC_MAX_MAC_PORT_ID
; i
++) {
874 status
= vxge_hw_device_xmac_port_stats_get(hldev
,
875 i
, &xmac_stats
->port_stats
[i
]);
876 if (status
!= VXGE_HW_OK
)
880 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
882 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
885 status
= __vxge_hw_vpath_xmac_tx_stats_get(
886 &hldev
->virtual_paths
[i
],
887 &xmac_stats
->vpath_tx_stats
[i
]);
888 if (status
!= VXGE_HW_OK
)
891 status
= __vxge_hw_vpath_xmac_rx_stats_get(
892 &hldev
->virtual_paths
[i
],
893 &xmac_stats
->vpath_rx_stats
[i
]);
894 if (status
!= VXGE_HW_OK
)
902 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
903 * This routine is used to dynamically change the debug output
905 void vxge_hw_device_debug_set(struct __vxge_hw_device
*hldev
,
906 enum vxge_debug_level level
, u32 mask
)
911 #if defined(VXGE_DEBUG_TRACE_MASK) || \
912 defined(VXGE_DEBUG_ERR_MASK)
913 hldev
->debug_module_mask
= mask
;
914 hldev
->debug_level
= level
;
917 #if defined(VXGE_DEBUG_ERR_MASK)
918 hldev
->level_err
= level
& VXGE_ERR
;
921 #if defined(VXGE_DEBUG_TRACE_MASK)
922 hldev
->level_trace
= level
& VXGE_TRACE
;
927 * vxge_hw_device_error_level_get - Get the error level
928 * This routine returns the current error level set
930 u32
vxge_hw_device_error_level_get(struct __vxge_hw_device
*hldev
)
932 #if defined(VXGE_DEBUG_ERR_MASK)
936 return hldev
->level_err
;
943 * vxge_hw_device_trace_level_get - Get the trace level
944 * This routine returns the current trace level set
946 u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device
*hldev
)
948 #if defined(VXGE_DEBUG_TRACE_MASK)
952 return hldev
->level_trace
;
958 * vxge_hw_device_debug_mask_get - Get the debug mask
959 * This routine returns the current debug mask set
961 u32
vxge_hw_device_debug_mask_get(struct __vxge_hw_device
*hldev
)
963 #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
966 return hldev
->debug_module_mask
;
973 * vxge_hw_getpause_data -Pause frame frame generation and reception.
974 * Returns the Pause frame generation and reception capability of the NIC.
976 enum vxge_hw_status
vxge_hw_device_getpause_data(struct __vxge_hw_device
*hldev
,
977 u32 port
, u32
*tx
, u32
*rx
)
980 enum vxge_hw_status status
= VXGE_HW_OK
;
982 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
983 status
= VXGE_HW_ERR_INVALID_DEVICE
;
987 if (port
> VXGE_HW_MAC_MAX_MAC_PORT_ID
) {
988 status
= VXGE_HW_ERR_INVALID_PORT
;
992 if (!(hldev
->access_rights
& VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
993 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
997 val64
= readq(&hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
998 if (val64
& VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
)
1000 if (val64
& VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
)
1007 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1008 * It can be used to set or reset Pause frame generation or reception
1009 * support of the NIC.
1012 enum vxge_hw_status
vxge_hw_device_setpause_data(struct __vxge_hw_device
*hldev
,
1013 u32 port
, u32 tx
, u32 rx
)
1016 enum vxge_hw_status status
= VXGE_HW_OK
;
1018 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
1019 status
= VXGE_HW_ERR_INVALID_DEVICE
;
1023 if (port
> VXGE_HW_MAC_MAX_MAC_PORT_ID
) {
1024 status
= VXGE_HW_ERR_INVALID_PORT
;
1028 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
1030 if (status
!= VXGE_HW_OK
)
1033 val64
= readq(&hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1035 val64
|= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
;
1037 val64
&= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
;
1039 val64
|= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
;
1041 val64
&= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
;
1043 writeq(val64
, &hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1048 u16
vxge_hw_device_link_width_get(struct __vxge_hw_device
*hldev
)
1050 int link_width
, exp_cap
;
1053 exp_cap
= pci_find_capability(hldev
->pdev
, PCI_CAP_ID_EXP
);
1054 pci_read_config_word(hldev
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
1055 link_width
= (lnk
& VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH
) >> 4;
1060 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1061 * This function returns the index of memory block
1064 __vxge_hw_ring_block_memblock_idx(u8
*block
)
1066 return (u32
)*((u64
*)(block
+ VXGE_HW_RING_MEMBLOCK_IDX_OFFSET
));
1070 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1071 * This function sets index to a memory block
1074 __vxge_hw_ring_block_memblock_idx_set(u8
*block
, u32 memblock_idx
)
1076 *((u64
*)(block
+ VXGE_HW_RING_MEMBLOCK_IDX_OFFSET
)) = memblock_idx
;
1080 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1082 * Sets the next block pointer in RxD block
1085 __vxge_hw_ring_block_next_pointer_set(u8
*block
, dma_addr_t dma_next
)
1087 *((u64
*)(block
+ VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET
)) = dma_next
;
1091 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1093 * Returns the dma address of the first RxD block
1095 u64
__vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring
*ring
)
1097 struct vxge_hw_mempool_dma
*dma_object
;
1099 dma_object
= ring
->mempool
->memblocks_dma_arr
;
1100 vxge_assert(dma_object
!= NULL
);
1102 return dma_object
->addr
;
1106 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1107 * This function returns the dma address of a given item
1109 static dma_addr_t
__vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool
*mempoolh
,
1114 struct vxge_hw_mempool_dma
*memblock_dma_object
;
1115 ptrdiff_t dma_item_offset
;
1117 /* get owner memblock index */
1118 memblock_idx
= __vxge_hw_ring_block_memblock_idx(item
);
1120 /* get owner memblock by memblock index */
1121 memblock
= mempoolh
->memblocks_arr
[memblock_idx
];
1123 /* get memblock DMA object by memblock index */
1124 memblock_dma_object
= mempoolh
->memblocks_dma_arr
+ memblock_idx
;
1126 /* calculate offset in the memblock of this item */
1127 dma_item_offset
= (u8
*)item
- (u8
*)memblock
;
1129 return memblock_dma_object
->addr
+ dma_item_offset
;
1133 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1134 * This function returns the dma address of a given item
1136 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool
*mempoolh
,
1137 struct __vxge_hw_ring
*ring
, u32 from
,
1140 u8
*to_item
, *from_item
;
1143 /* get "from" RxD block */
1144 from_item
= mempoolh
->items_arr
[from
];
1145 vxge_assert(from_item
);
1147 /* get "to" RxD block */
1148 to_item
= mempoolh
->items_arr
[to
];
1149 vxge_assert(to_item
);
1151 /* return address of the beginning of previous RxD block */
1152 to_dma
= __vxge_hw_ring_item_dma_addr(mempoolh
, to_item
);
1154 /* set next pointer for this RxD block to point on
1155 * previous item's DMA start address */
1156 __vxge_hw_ring_block_next_pointer_set(from_item
, to_dma
);
1160 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1162 * This function is callback passed to __vxge_hw_mempool_create to create memory
1163 * pool for RxD block
1166 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool
*mempoolh
,
1168 struct vxge_hw_mempool_dma
*dma_object
,
1169 u32 index
, u32 is_last
)
1172 void *item
= mempoolh
->items_arr
[index
];
1173 struct __vxge_hw_ring
*ring
=
1174 (struct __vxge_hw_ring
*)mempoolh
->userdata
;
1176 /* format rxds array */
1177 for (i
= 0; i
< ring
->rxds_per_block
; i
++) {
1178 void *rxdblock_priv
;
1180 struct vxge_hw_ring_rxd_1
*rxdp
;
1182 u32 reserve_index
= ring
->channel
.reserve_ptr
-
1183 (index
* ring
->rxds_per_block
+ i
+ 1);
1184 u32 memblock_item_idx
;
1186 ring
->channel
.reserve_arr
[reserve_index
] = ((u8
*)item
) +
1189 /* Note: memblock_item_idx is index of the item within
1190 * the memblock. For instance, in case of three RxD-blocks
1191 * per memblock this value can be 0, 1 or 2. */
1192 rxdblock_priv
= __vxge_hw_mempool_item_priv(mempoolh
,
1193 memblock_index
, item
,
1194 &memblock_item_idx
);
1196 rxdp
= (struct vxge_hw_ring_rxd_1
*)
1197 ring
->channel
.reserve_arr
[reserve_index
];
1199 uld_priv
= ((u8
*)rxdblock_priv
+ ring
->rxd_priv_size
* i
);
1201 /* pre-format Host_Control */
1202 rxdp
->host_control
= (u64
)(size_t)uld_priv
;
1205 __vxge_hw_ring_block_memblock_idx_set(item
, memblock_index
);
1208 /* link last one with first one */
1209 __vxge_hw_ring_rxdblock_link(mempoolh
, ring
, index
, 0);
1213 /* link this RxD block with previous one */
1214 __vxge_hw_ring_rxdblock_link(mempoolh
, ring
, index
- 1, index
);
1221 * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
1222 * This function replenishes the RxDs from reserve array to work array
1225 vxge_hw_ring_replenish(struct __vxge_hw_ring
*ring
, u16 min_flag
)
1229 struct __vxge_hw_channel
*channel
;
1230 enum vxge_hw_status status
= VXGE_HW_OK
;
1232 channel
= &ring
->channel
;
1234 while (vxge_hw_channel_dtr_count(channel
) > 0) {
1236 status
= vxge_hw_ring_rxd_reserve(ring
, &rxd
);
1238 vxge_assert(status
== VXGE_HW_OK
);
1240 if (ring
->rxd_init
) {
1241 status
= ring
->rxd_init(rxd
, channel
->userdata
);
1242 if (status
!= VXGE_HW_OK
) {
1243 vxge_hw_ring_rxd_free(ring
, rxd
);
1248 vxge_hw_ring_rxd_post(ring
, rxd
);
1251 if (i
== VXGE_HW_RING_MIN_BUFF_ALLOCATION
)
1255 status
= VXGE_HW_OK
;
1261 * __vxge_hw_ring_create - Create a Ring
1262 * This function creates Ring and initializes it.
1266 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle
*vp
,
1267 struct vxge_hw_ring_attr
*attr
)
1269 enum vxge_hw_status status
= VXGE_HW_OK
;
1270 struct __vxge_hw_ring
*ring
;
1272 struct vxge_hw_ring_config
*config
;
1273 struct __vxge_hw_device
*hldev
;
1275 struct vxge_hw_mempool_cbs ring_mp_callback
;
1277 if ((vp
== NULL
) || (attr
== NULL
)) {
1278 status
= VXGE_HW_FAIL
;
1282 hldev
= vp
->vpath
->hldev
;
1283 vp_id
= vp
->vpath
->vp_id
;
1285 config
= &hldev
->config
.vp_config
[vp_id
].ring
;
1287 ring_length
= config
->ring_blocks
*
1288 vxge_hw_ring_rxds_per_block_get(config
->buffer_mode
);
1290 ring
= (struct __vxge_hw_ring
*)__vxge_hw_channel_allocate(vp
,
1291 VXGE_HW_CHANNEL_TYPE_RING
,
1293 attr
->per_rxd_space
,
1297 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1301 vp
->vpath
->ringh
= ring
;
1302 ring
->vp_id
= vp_id
;
1303 ring
->vp_reg
= vp
->vpath
->vp_reg
;
1304 ring
->common_reg
= hldev
->common_reg
;
1305 ring
->stats
= &vp
->vpath
->sw_stats
->ring_stats
;
1306 ring
->config
= config
;
1307 ring
->callback
= attr
->callback
;
1308 ring
->rxd_init
= attr
->rxd_init
;
1309 ring
->rxd_term
= attr
->rxd_term
;
1310 ring
->buffer_mode
= config
->buffer_mode
;
1311 ring
->rxds_limit
= config
->rxds_limit
;
1313 ring
->rxd_size
= vxge_hw_ring_rxd_size_get(config
->buffer_mode
);
1314 ring
->rxd_priv_size
=
1315 sizeof(struct __vxge_hw_ring_rxd_priv
) + attr
->per_rxd_space
;
1316 ring
->per_rxd_space
= attr
->per_rxd_space
;
1318 ring
->rxd_priv_size
=
1319 ((ring
->rxd_priv_size
+ VXGE_CACHE_LINE_SIZE
- 1) /
1320 VXGE_CACHE_LINE_SIZE
) * VXGE_CACHE_LINE_SIZE
;
1322 /* how many RxDs can fit into one block. Depends on configured
1324 ring
->rxds_per_block
=
1325 vxge_hw_ring_rxds_per_block_get(config
->buffer_mode
);
1327 /* calculate actual RxD block private size */
1328 ring
->rxdblock_priv_size
= ring
->rxd_priv_size
* ring
->rxds_per_block
;
1329 ring_mp_callback
.item_func_alloc
= __vxge_hw_ring_mempool_item_alloc
;
1330 ring
->mempool
= __vxge_hw_mempool_create(hldev
,
1333 ring
->rxdblock_priv_size
,
1334 ring
->config
->ring_blocks
,
1335 ring
->config
->ring_blocks
,
1339 if (ring
->mempool
== NULL
) {
1340 __vxge_hw_ring_delete(vp
);
1341 return VXGE_HW_ERR_OUT_OF_MEMORY
;
1344 status
= __vxge_hw_channel_initialize(&ring
->channel
);
1345 if (status
!= VXGE_HW_OK
) {
1346 __vxge_hw_ring_delete(vp
);
1351 * Specifying rxd_init callback means two things:
1352 * 1) rxds need to be initialized by driver at channel-open time;
1353 * 2) rxds need to be posted at channel-open time
1354 * (that's what the initial_replenish() below does)
1355 * Currently we don't have a case when the 1) is done without the 2).
1357 if (ring
->rxd_init
) {
1358 status
= vxge_hw_ring_replenish(ring
, 1);
1359 if (status
!= VXGE_HW_OK
) {
1360 __vxge_hw_ring_delete(vp
);
1365 /* initial replenish will increment the counter in its post() routine,
1366 * we have to reset it */
1367 ring
->stats
->common_stats
.usage_cnt
= 0;
1373 * __vxge_hw_ring_abort - Returns the RxD
1374 * This function terminates the RxDs of ring
1376 enum vxge_hw_status
__vxge_hw_ring_abort(struct __vxge_hw_ring
*ring
)
1379 struct __vxge_hw_channel
*channel
;
1381 channel
= &ring
->channel
;
1384 vxge_hw_channel_dtr_try_complete(channel
, &rxdh
);
1389 vxge_hw_channel_dtr_complete(channel
);
1392 ring
->rxd_term(rxdh
, VXGE_HW_RXD_STATE_POSTED
,
1395 vxge_hw_channel_dtr_free(channel
, rxdh
);
1402 * __vxge_hw_ring_reset - Resets the ring
1403 * This function resets the ring during vpath reset operation
1405 enum vxge_hw_status
__vxge_hw_ring_reset(struct __vxge_hw_ring
*ring
)
1407 enum vxge_hw_status status
= VXGE_HW_OK
;
1408 struct __vxge_hw_channel
*channel
;
1410 channel
= &ring
->channel
;
1412 __vxge_hw_ring_abort(ring
);
1414 status
= __vxge_hw_channel_reset(channel
);
1416 if (status
!= VXGE_HW_OK
)
1419 if (ring
->rxd_init
) {
1420 status
= vxge_hw_ring_replenish(ring
, 1);
1421 if (status
!= VXGE_HW_OK
)
1429 * __vxge_hw_ring_delete - Removes the ring
1430 * This function freeup the memory pool and removes the ring
1432 enum vxge_hw_status
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle
*vp
)
1434 struct __vxge_hw_ring
*ring
= vp
->vpath
->ringh
;
1436 __vxge_hw_ring_abort(ring
);
1439 __vxge_hw_mempool_destroy(ring
->mempool
);
1441 vp
->vpath
->ringh
= NULL
;
1442 __vxge_hw_channel_free(&ring
->channel
);
1448 * __vxge_hw_mempool_grow
1449 * Will resize mempool up to %num_allocate value.
1452 __vxge_hw_mempool_grow(struct vxge_hw_mempool
*mempool
, u32 num_allocate
,
1455 u32 i
, first_time
= mempool
->memblocks_allocated
== 0 ? 1 : 0;
1456 u32 n_items
= mempool
->items_per_memblock
;
1457 u32 start_block_idx
= mempool
->memblocks_allocated
;
1458 u32 end_block_idx
= mempool
->memblocks_allocated
+ num_allocate
;
1459 enum vxge_hw_status status
= VXGE_HW_OK
;
1463 if (end_block_idx
> mempool
->memblocks_max
) {
1464 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1468 for (i
= start_block_idx
; i
< end_block_idx
; i
++) {
1470 u32 is_last
= ((end_block_idx
- 1) == i
);
1471 struct vxge_hw_mempool_dma
*dma_object
=
1472 mempool
->memblocks_dma_arr
+ i
;
1475 /* allocate memblock's private part. Each DMA memblock
1476 * has a space allocated for item's private usage upon
1477 * mempool's user request. Each time mempool grows, it will
1478 * allocate new memblock and its private part at once.
1479 * This helps to minimize memory usage a lot. */
1480 mempool
->memblocks_priv_arr
[i
] =
1481 vmalloc(mempool
->items_priv_size
* n_items
);
1482 if (mempool
->memblocks_priv_arr
[i
] == NULL
) {
1483 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1487 memset(mempool
->memblocks_priv_arr
[i
], 0,
1488 mempool
->items_priv_size
* n_items
);
1490 /* allocate DMA-capable memblock */
1491 mempool
->memblocks_arr
[i
] =
1492 __vxge_hw_blockpool_malloc(mempool
->devh
,
1493 mempool
->memblock_size
, dma_object
);
1494 if (mempool
->memblocks_arr
[i
] == NULL
) {
1495 vfree(mempool
->memblocks_priv_arr
[i
]);
1496 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1501 mempool
->memblocks_allocated
++;
1503 memset(mempool
->memblocks_arr
[i
], 0, mempool
->memblock_size
);
1505 the_memblock
= mempool
->memblocks_arr
[i
];
1507 /* fill the items hash array */
1508 for (j
= 0; j
< n_items
; j
++) {
1509 u32 index
= i
* n_items
+ j
;
1511 if (first_time
&& index
>= mempool
->items_initial
)
1514 mempool
->items_arr
[index
] =
1515 ((char *)the_memblock
+ j
*mempool
->item_size
);
1517 /* let caller to do more job on each item */
1518 if (mempool
->item_func_alloc
!= NULL
)
1519 mempool
->item_func_alloc(mempool
, i
,
1520 dma_object
, index
, is_last
);
1522 mempool
->items_current
= index
+ 1;
1525 if (first_time
&& mempool
->items_current
==
1526 mempool
->items_initial
)
1534 * vxge_hw_mempool_create
1535 * This function will create memory pool object. Pool may grow but will
1536 * never shrink. Pool consists of number of dynamically allocated blocks
1537 * with size enough to hold %items_initial number of items. Memory is
1538 * DMA-able but client must map/unmap before interoperating with the device.
1540 struct vxge_hw_mempool
*
1541 __vxge_hw_mempool_create(
1542 struct __vxge_hw_device
*devh
,
1545 u32 items_priv_size
,
1548 struct vxge_hw_mempool_cbs
*mp_callback
,
1551 enum vxge_hw_status status
= VXGE_HW_OK
;
1552 u32 memblocks_to_allocate
;
1553 struct vxge_hw_mempool
*mempool
= NULL
;
1556 if (memblock_size
< item_size
) {
1557 status
= VXGE_HW_FAIL
;
1561 mempool
= (struct vxge_hw_mempool
*)
1562 vmalloc(sizeof(struct vxge_hw_mempool
));
1563 if (mempool
== NULL
) {
1564 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1567 memset(mempool
, 0, sizeof(struct vxge_hw_mempool
));
1569 mempool
->devh
= devh
;
1570 mempool
->memblock_size
= memblock_size
;
1571 mempool
->items_max
= items_max
;
1572 mempool
->items_initial
= items_initial
;
1573 mempool
->item_size
= item_size
;
1574 mempool
->items_priv_size
= items_priv_size
;
1575 mempool
->item_func_alloc
= mp_callback
->item_func_alloc
;
1576 mempool
->userdata
= userdata
;
1578 mempool
->memblocks_allocated
= 0;
1580 mempool
->items_per_memblock
= memblock_size
/ item_size
;
1582 mempool
->memblocks_max
= (items_max
+ mempool
->items_per_memblock
- 1) /
1583 mempool
->items_per_memblock
;
1585 /* allocate array of memblocks */
1586 mempool
->memblocks_arr
=
1587 (void **) vmalloc(sizeof(void *) * mempool
->memblocks_max
);
1588 if (mempool
->memblocks_arr
== NULL
) {
1589 __vxge_hw_mempool_destroy(mempool
);
1590 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1594 memset(mempool
->memblocks_arr
, 0,
1595 sizeof(void *) * mempool
->memblocks_max
);
1597 /* allocate array of private parts of items per memblocks */
1598 mempool
->memblocks_priv_arr
=
1599 (void **) vmalloc(sizeof(void *) * mempool
->memblocks_max
);
1600 if (mempool
->memblocks_priv_arr
== NULL
) {
1601 __vxge_hw_mempool_destroy(mempool
);
1602 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1606 memset(mempool
->memblocks_priv_arr
, 0,
1607 sizeof(void *) * mempool
->memblocks_max
);
1609 /* allocate array of memblocks DMA objects */
1610 mempool
->memblocks_dma_arr
= (struct vxge_hw_mempool_dma
*)
1611 vmalloc(sizeof(struct vxge_hw_mempool_dma
) *
1612 mempool
->memblocks_max
);
1614 if (mempool
->memblocks_dma_arr
== NULL
) {
1615 __vxge_hw_mempool_destroy(mempool
);
1616 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1620 memset(mempool
->memblocks_dma_arr
, 0,
1621 sizeof(struct vxge_hw_mempool_dma
) *
1622 mempool
->memblocks_max
);
1624 /* allocate hash array of items */
1625 mempool
->items_arr
=
1626 (void **) vmalloc(sizeof(void *) * mempool
->items_max
);
1627 if (mempool
->items_arr
== NULL
) {
1628 __vxge_hw_mempool_destroy(mempool
);
1629 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1633 memset(mempool
->items_arr
, 0, sizeof(void *) * mempool
->items_max
);
1635 /* calculate initial number of memblocks */
1636 memblocks_to_allocate
= (mempool
->items_initial
+
1637 mempool
->items_per_memblock
- 1) /
1638 mempool
->items_per_memblock
;
1640 /* pre-allocate the mempool */
1641 status
= __vxge_hw_mempool_grow(mempool
, memblocks_to_allocate
,
1643 if (status
!= VXGE_HW_OK
) {
1644 __vxge_hw_mempool_destroy(mempool
);
1645 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1655 * vxge_hw_mempool_destroy
1657 void __vxge_hw_mempool_destroy(struct vxge_hw_mempool
*mempool
)
1660 struct __vxge_hw_device
*devh
= mempool
->devh
;
1662 for (i
= 0; i
< mempool
->memblocks_allocated
; i
++) {
1663 struct vxge_hw_mempool_dma
*dma_object
;
1665 vxge_assert(mempool
->memblocks_arr
[i
]);
1666 vxge_assert(mempool
->memblocks_dma_arr
+ i
);
1668 dma_object
= mempool
->memblocks_dma_arr
+ i
;
1670 for (j
= 0; j
< mempool
->items_per_memblock
; j
++) {
1671 u32 index
= i
* mempool
->items_per_memblock
+ j
;
1673 /* to skip last partially filled(if any) memblock */
1674 if (index
>= mempool
->items_current
)
1678 vfree(mempool
->memblocks_priv_arr
[i
]);
1680 __vxge_hw_blockpool_free(devh
, mempool
->memblocks_arr
[i
],
1681 mempool
->memblock_size
, dma_object
);
1684 vfree(mempool
->items_arr
);
1686 vfree(mempool
->memblocks_dma_arr
);
1688 vfree(mempool
->memblocks_priv_arr
);
1690 vfree(mempool
->memblocks_arr
);
1696 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1697 * Check the fifo configuration
1700 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config
*fifo_config
)
1702 if ((fifo_config
->fifo_blocks
< VXGE_HW_MIN_FIFO_BLOCKS
) ||
1703 (fifo_config
->fifo_blocks
> VXGE_HW_MAX_FIFO_BLOCKS
))
1704 return VXGE_HW_BADCFG_FIFO_BLOCKS
;
1710 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1711 * Check the vpath configuration
1714 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config
*vp_config
)
1716 enum vxge_hw_status status
;
1718 if ((vp_config
->min_bandwidth
< VXGE_HW_VPATH_BANDWIDTH_MIN
) ||
1719 (vp_config
->min_bandwidth
>
1720 VXGE_HW_VPATH_BANDWIDTH_MAX
))
1721 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH
;
1723 status
= __vxge_hw_device_fifo_config_check(&vp_config
->fifo
);
1724 if (status
!= VXGE_HW_OK
)
1727 if ((vp_config
->mtu
!= VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
) &&
1728 ((vp_config
->mtu
< VXGE_HW_VPATH_MIN_INITIAL_MTU
) ||
1729 (vp_config
->mtu
> VXGE_HW_VPATH_MAX_INITIAL_MTU
)))
1730 return VXGE_HW_BADCFG_VPATH_MTU
;
1732 if ((vp_config
->rpa_strip_vlan_tag
!=
1733 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
) &&
1734 (vp_config
->rpa_strip_vlan_tag
!=
1735 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
) &&
1736 (vp_config
->rpa_strip_vlan_tag
!=
1737 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE
))
1738 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG
;
1744 * __vxge_hw_device_config_check - Check device configuration.
1745 * Check the device configuration
1748 __vxge_hw_device_config_check(struct vxge_hw_device_config
*new_config
)
1751 enum vxge_hw_status status
;
1753 if ((new_config
->intr_mode
!= VXGE_HW_INTR_MODE_IRQLINE
) &&
1754 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_MSIX
) &&
1755 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) &&
1756 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_DEF
))
1757 return VXGE_HW_BADCFG_INTR_MODE
;
1759 if ((new_config
->rts_mac_en
!= VXGE_HW_RTS_MAC_DISABLE
) &&
1760 (new_config
->rts_mac_en
!= VXGE_HW_RTS_MAC_ENABLE
))
1761 return VXGE_HW_BADCFG_RTS_MAC_EN
;
1763 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1764 status
= __vxge_hw_device_vpath_config_check(
1765 &new_config
->vp_config
[i
]);
1766 if (status
!= VXGE_HW_OK
)
1774 * vxge_hw_device_config_default_get - Initialize device config with defaults.
1775 * Initialize Titan device config with default values.
1777 enum vxge_hw_status __devinit
1778 vxge_hw_device_config_default_get(struct vxge_hw_device_config
*device_config
)
1782 device_config
->dma_blockpool_initial
=
1783 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
1784 device_config
->dma_blockpool_max
= VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
1785 device_config
->intr_mode
= VXGE_HW_INTR_MODE_DEF
;
1786 device_config
->rth_en
= VXGE_HW_RTH_DEFAULT
;
1787 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_DEFAULT
;
1788 device_config
->device_poll_millis
= VXGE_HW_DEF_DEVICE_POLL_MILLIS
;
1789 device_config
->rts_mac_en
= VXGE_HW_RTS_MAC_DEFAULT
;
1791 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1793 device_config
->vp_config
[i
].vp_id
= i
;
1795 device_config
->vp_config
[i
].min_bandwidth
=
1796 VXGE_HW_VPATH_BANDWIDTH_DEFAULT
;
1798 device_config
->vp_config
[i
].ring
.enable
= VXGE_HW_RING_DEFAULT
;
1800 device_config
->vp_config
[i
].ring
.ring_blocks
=
1801 VXGE_HW_DEF_RING_BLOCKS
;
1803 device_config
->vp_config
[i
].ring
.buffer_mode
=
1804 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT
;
1806 device_config
->vp_config
[i
].ring
.scatter_mode
=
1807 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT
;
1809 device_config
->vp_config
[i
].ring
.rxds_limit
=
1810 VXGE_HW_DEF_RING_RXDS_LIMIT
;
1812 device_config
->vp_config
[i
].fifo
.enable
= VXGE_HW_FIFO_ENABLE
;
1814 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
1815 VXGE_HW_MIN_FIFO_BLOCKS
;
1817 device_config
->vp_config
[i
].fifo
.max_frags
=
1818 VXGE_HW_MAX_FIFO_FRAGS
;
1820 device_config
->vp_config
[i
].fifo
.memblock_size
=
1821 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE
;
1823 device_config
->vp_config
[i
].fifo
.alignment_size
=
1824 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE
;
1826 device_config
->vp_config
[i
].fifo
.intr
=
1827 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT
;
1829 device_config
->vp_config
[i
].fifo
.no_snoop_bits
=
1830 VXGE_HW_FIFO_NO_SNOOP_DEFAULT
;
1831 device_config
->vp_config
[i
].tti
.intr_enable
=
1832 VXGE_HW_TIM_INTR_DEFAULT
;
1834 device_config
->vp_config
[i
].tti
.btimer_val
=
1835 VXGE_HW_USE_FLASH_DEFAULT
;
1837 device_config
->vp_config
[i
].tti
.timer_ac_en
=
1838 VXGE_HW_USE_FLASH_DEFAULT
;
1840 device_config
->vp_config
[i
].tti
.timer_ci_en
=
1841 VXGE_HW_USE_FLASH_DEFAULT
;
1843 device_config
->vp_config
[i
].tti
.timer_ri_en
=
1844 VXGE_HW_USE_FLASH_DEFAULT
;
1846 device_config
->vp_config
[i
].tti
.rtimer_val
=
1847 VXGE_HW_USE_FLASH_DEFAULT
;
1849 device_config
->vp_config
[i
].tti
.util_sel
=
1850 VXGE_HW_USE_FLASH_DEFAULT
;
1852 device_config
->vp_config
[i
].tti
.ltimer_val
=
1853 VXGE_HW_USE_FLASH_DEFAULT
;
1855 device_config
->vp_config
[i
].tti
.urange_a
=
1856 VXGE_HW_USE_FLASH_DEFAULT
;
1858 device_config
->vp_config
[i
].tti
.uec_a
=
1859 VXGE_HW_USE_FLASH_DEFAULT
;
1861 device_config
->vp_config
[i
].tti
.urange_b
=
1862 VXGE_HW_USE_FLASH_DEFAULT
;
1864 device_config
->vp_config
[i
].tti
.uec_b
=
1865 VXGE_HW_USE_FLASH_DEFAULT
;
1867 device_config
->vp_config
[i
].tti
.urange_c
=
1868 VXGE_HW_USE_FLASH_DEFAULT
;
1870 device_config
->vp_config
[i
].tti
.uec_c
=
1871 VXGE_HW_USE_FLASH_DEFAULT
;
1873 device_config
->vp_config
[i
].tti
.uec_d
=
1874 VXGE_HW_USE_FLASH_DEFAULT
;
1876 device_config
->vp_config
[i
].rti
.intr_enable
=
1877 VXGE_HW_TIM_INTR_DEFAULT
;
1879 device_config
->vp_config
[i
].rti
.btimer_val
=
1880 VXGE_HW_USE_FLASH_DEFAULT
;
1882 device_config
->vp_config
[i
].rti
.timer_ac_en
=
1883 VXGE_HW_USE_FLASH_DEFAULT
;
1885 device_config
->vp_config
[i
].rti
.timer_ci_en
=
1886 VXGE_HW_USE_FLASH_DEFAULT
;
1888 device_config
->vp_config
[i
].rti
.timer_ri_en
=
1889 VXGE_HW_USE_FLASH_DEFAULT
;
1891 device_config
->vp_config
[i
].rti
.rtimer_val
=
1892 VXGE_HW_USE_FLASH_DEFAULT
;
1894 device_config
->vp_config
[i
].rti
.util_sel
=
1895 VXGE_HW_USE_FLASH_DEFAULT
;
1897 device_config
->vp_config
[i
].rti
.ltimer_val
=
1898 VXGE_HW_USE_FLASH_DEFAULT
;
1900 device_config
->vp_config
[i
].rti
.urange_a
=
1901 VXGE_HW_USE_FLASH_DEFAULT
;
1903 device_config
->vp_config
[i
].rti
.uec_a
=
1904 VXGE_HW_USE_FLASH_DEFAULT
;
1906 device_config
->vp_config
[i
].rti
.urange_b
=
1907 VXGE_HW_USE_FLASH_DEFAULT
;
1909 device_config
->vp_config
[i
].rti
.uec_b
=
1910 VXGE_HW_USE_FLASH_DEFAULT
;
1912 device_config
->vp_config
[i
].rti
.urange_c
=
1913 VXGE_HW_USE_FLASH_DEFAULT
;
1915 device_config
->vp_config
[i
].rti
.uec_c
=
1916 VXGE_HW_USE_FLASH_DEFAULT
;
1918 device_config
->vp_config
[i
].rti
.uec_d
=
1919 VXGE_HW_USE_FLASH_DEFAULT
;
1921 device_config
->vp_config
[i
].mtu
=
1922 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
;
1924 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
1925 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
;
1932 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
1933 * Set the swapper bits appropriately for the lagacy section.
1936 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem
*legacy_reg
)
1939 enum vxge_hw_status status
= VXGE_HW_OK
;
1941 val64
= readq(&legacy_reg
->toc_swapper_fb
);
1947 case VXGE_HW_SWAPPER_INITIAL_VALUE
:
1950 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED
:
1951 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE
,
1952 &legacy_reg
->pifm_rd_swap_en
);
1953 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE
,
1954 &legacy_reg
->pifm_rd_flip_en
);
1955 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
,
1956 &legacy_reg
->pifm_wr_swap_en
);
1957 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE
,
1958 &legacy_reg
->pifm_wr_flip_en
);
1961 case VXGE_HW_SWAPPER_BYTE_SWAPPED
:
1962 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE
,
1963 &legacy_reg
->pifm_rd_swap_en
);
1964 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
,
1965 &legacy_reg
->pifm_wr_swap_en
);
1968 case VXGE_HW_SWAPPER_BIT_FLIPPED
:
1969 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE
,
1970 &legacy_reg
->pifm_rd_flip_en
);
1971 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE
,
1972 &legacy_reg
->pifm_wr_flip_en
);
1978 val64
= readq(&legacy_reg
->toc_swapper_fb
);
1980 if (val64
!= VXGE_HW_SWAPPER_INITIAL_VALUE
)
1981 status
= VXGE_HW_ERR_SWAPPER_CTRL
;
1987 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
1988 * Set the swapper bits appropriately for the vpath.
1991 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
1993 #ifndef __BIG_ENDIAN
1996 val64
= readq(&vpath_reg
->vpath_general_cfg1
);
1998 val64
|= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN
;
1999 writeq(val64
, &vpath_reg
->vpath_general_cfg1
);
2006 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2007 * Set the swapper bits appropriately for the vpath.
2010 __vxge_hw_kdfc_swapper_set(
2011 struct vxge_hw_legacy_reg __iomem
*legacy_reg
,
2012 struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2016 val64
= readq(&legacy_reg
->pifm_wr_swap_en
);
2018 if (val64
== VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
) {
2019 val64
= readq(&vpath_reg
->kdfcctl_cfg0
);
2022 val64
|= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0
|
2023 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1
|
2024 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2
;
2026 writeq(val64
, &vpath_reg
->kdfcctl_cfg0
);
2034 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2035 * Get device configuration. Permits to retrieve at run-time configuration
2036 * values that were used to initialize and configure the device.
2039 vxge_hw_mgmt_device_config(struct __vxge_hw_device
*hldev
,
2040 struct vxge_hw_device_config
*dev_config
, int size
)
2043 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
))
2044 return VXGE_HW_ERR_INVALID_DEVICE
;
2046 if (size
!= sizeof(struct vxge_hw_device_config
))
2047 return VXGE_HW_ERR_VERSION_CONFLICT
;
2049 memcpy(dev_config
, &hldev
->config
,
2050 sizeof(struct vxge_hw_device_config
));
2056 * vxge_hw_mgmt_reg_read - Read Titan register.
2059 vxge_hw_mgmt_reg_read(struct __vxge_hw_device
*hldev
,
2060 enum vxge_hw_mgmt_reg_type type
,
2061 u32 index
, u32 offset
, u64
*value
)
2063 enum vxge_hw_status status
= VXGE_HW_OK
;
2065 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
2066 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2071 case vxge_hw_mgmt_reg_type_legacy
:
2072 if (offset
> sizeof(struct vxge_hw_legacy_reg
) - 8) {
2073 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2076 *value
= readq((void __iomem
*)hldev
->legacy_reg
+ offset
);
2078 case vxge_hw_mgmt_reg_type_toc
:
2079 if (offset
> sizeof(struct vxge_hw_toc_reg
) - 8) {
2080 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2083 *value
= readq((void __iomem
*)hldev
->toc_reg
+ offset
);
2085 case vxge_hw_mgmt_reg_type_common
:
2086 if (offset
> sizeof(struct vxge_hw_common_reg
) - 8) {
2087 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2090 *value
= readq((void __iomem
*)hldev
->common_reg
+ offset
);
2092 case vxge_hw_mgmt_reg_type_mrpcim
:
2093 if (!(hldev
->access_rights
&
2094 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
2095 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2098 if (offset
> sizeof(struct vxge_hw_mrpcim_reg
) - 8) {
2099 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2102 *value
= readq((void __iomem
*)hldev
->mrpcim_reg
+ offset
);
2104 case vxge_hw_mgmt_reg_type_srpcim
:
2105 if (!(hldev
->access_rights
&
2106 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
)) {
2107 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2110 if (index
> VXGE_HW_TITAN_SRPCIM_REG_SPACES
- 1) {
2111 status
= VXGE_HW_ERR_INVALID_INDEX
;
2114 if (offset
> sizeof(struct vxge_hw_srpcim_reg
) - 8) {
2115 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2118 *value
= readq((void __iomem
*)hldev
->srpcim_reg
[index
] +
2121 case vxge_hw_mgmt_reg_type_vpmgmt
:
2122 if ((index
> VXGE_HW_TITAN_VPMGMT_REG_SPACES
- 1) ||
2123 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2124 status
= VXGE_HW_ERR_INVALID_INDEX
;
2127 if (offset
> sizeof(struct vxge_hw_vpmgmt_reg
) - 8) {
2128 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2131 *value
= readq((void __iomem
*)hldev
->vpmgmt_reg
[index
] +
2134 case vxge_hw_mgmt_reg_type_vpath
:
2135 if ((index
> VXGE_HW_TITAN_VPATH_REG_SPACES
- 1) ||
2136 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2137 status
= VXGE_HW_ERR_INVALID_INDEX
;
2140 if (index
> VXGE_HW_TITAN_VPATH_REG_SPACES
- 1) {
2141 status
= VXGE_HW_ERR_INVALID_INDEX
;
2144 if (offset
> sizeof(struct vxge_hw_vpath_reg
) - 8) {
2145 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2148 *value
= readq((void __iomem
*)hldev
->vpath_reg
[index
] +
2152 status
= VXGE_HW_ERR_INVALID_TYPE
;
2161 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2164 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device
*hldev
, u64 vpath_mask
)
2166 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
;
2167 enum vxge_hw_status status
= VXGE_HW_OK
;
2170 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
2171 if (!((vpath_mask
) & vxge_mBIT(i
)))
2173 vpmgmt_reg
= hldev
->vpmgmt_reg
[i
];
2174 for (j
= 0; j
< VXGE_HW_MAC_MAX_MAC_PORT_ID
; j
++) {
2175 if (readq(&vpmgmt_reg
->rxmac_cfg0_port_vpmgmt_clone
[j
])
2176 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS
)
2177 return VXGE_HW_FAIL
;
2183 * vxge_hw_mgmt_reg_Write - Write Titan register.
2186 vxge_hw_mgmt_reg_write(struct __vxge_hw_device
*hldev
,
2187 enum vxge_hw_mgmt_reg_type type
,
2188 u32 index
, u32 offset
, u64 value
)
2190 enum vxge_hw_status status
= VXGE_HW_OK
;
2192 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
2193 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2198 case vxge_hw_mgmt_reg_type_legacy
:
2199 if (offset
> sizeof(struct vxge_hw_legacy_reg
) - 8) {
2200 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2203 writeq(value
, (void __iomem
*)hldev
->legacy_reg
+ offset
);
2205 case vxge_hw_mgmt_reg_type_toc
:
2206 if (offset
> sizeof(struct vxge_hw_toc_reg
) - 8) {
2207 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2210 writeq(value
, (void __iomem
*)hldev
->toc_reg
+ offset
);
2212 case vxge_hw_mgmt_reg_type_common
:
2213 if (offset
> sizeof(struct vxge_hw_common_reg
) - 8) {
2214 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2217 writeq(value
, (void __iomem
*)hldev
->common_reg
+ offset
);
2219 case vxge_hw_mgmt_reg_type_mrpcim
:
2220 if (!(hldev
->access_rights
&
2221 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
2222 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2225 if (offset
> sizeof(struct vxge_hw_mrpcim_reg
) - 8) {
2226 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2229 writeq(value
, (void __iomem
*)hldev
->mrpcim_reg
+ offset
);
2231 case vxge_hw_mgmt_reg_type_srpcim
:
2232 if (!(hldev
->access_rights
&
2233 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
)) {
2234 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2237 if (index
> VXGE_HW_TITAN_SRPCIM_REG_SPACES
- 1) {
2238 status
= VXGE_HW_ERR_INVALID_INDEX
;
2241 if (offset
> sizeof(struct vxge_hw_srpcim_reg
) - 8) {
2242 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2245 writeq(value
, (void __iomem
*)hldev
->srpcim_reg
[index
] +
2249 case vxge_hw_mgmt_reg_type_vpmgmt
:
2250 if ((index
> VXGE_HW_TITAN_VPMGMT_REG_SPACES
- 1) ||
2251 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2252 status
= VXGE_HW_ERR_INVALID_INDEX
;
2255 if (offset
> sizeof(struct vxge_hw_vpmgmt_reg
) - 8) {
2256 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2259 writeq(value
, (void __iomem
*)hldev
->vpmgmt_reg
[index
] +
2262 case vxge_hw_mgmt_reg_type_vpath
:
2263 if ((index
> VXGE_HW_TITAN_VPATH_REG_SPACES
-1) ||
2264 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2265 status
= VXGE_HW_ERR_INVALID_INDEX
;
2268 if (offset
> sizeof(struct vxge_hw_vpath_reg
) - 8) {
2269 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2272 writeq(value
, (void __iomem
*)hldev
->vpath_reg
[index
] +
2276 status
= VXGE_HW_ERR_INVALID_TYPE
;
2284 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2286 * This function is callback passed to __vxge_hw_mempool_create to create memory
2290 __vxge_hw_fifo_mempool_item_alloc(
2291 struct vxge_hw_mempool
*mempoolh
,
2292 u32 memblock_index
, struct vxge_hw_mempool_dma
*dma_object
,
2293 u32 index
, u32 is_last
)
2295 u32 memblock_item_idx
;
2296 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
2297 struct vxge_hw_fifo_txd
*txdp
=
2298 (struct vxge_hw_fifo_txd
*)mempoolh
->items_arr
[index
];
2299 struct __vxge_hw_fifo
*fifo
=
2300 (struct __vxge_hw_fifo
*)mempoolh
->userdata
;
2301 void *memblock
= mempoolh
->memblocks_arr
[memblock_index
];
2305 txdp
->host_control
= (u64
) (size_t)
2306 __vxge_hw_mempool_item_priv(mempoolh
, memblock_index
, txdp
,
2307 &memblock_item_idx
);
2309 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdp
);
2311 vxge_assert(txdl_priv
);
2313 fifo
->channel
.reserve_arr
[fifo
->channel
.reserve_ptr
- 1 - index
] = txdp
;
2315 /* pre-format HW's TxDL's private */
2316 txdl_priv
->dma_offset
= (char *)txdp
- (char *)memblock
;
2317 txdl_priv
->dma_addr
= dma_object
->addr
+ txdl_priv
->dma_offset
;
2318 txdl_priv
->dma_handle
= dma_object
->handle
;
2319 txdl_priv
->memblock
= memblock
;
2320 txdl_priv
->first_txdp
= txdp
;
2321 txdl_priv
->next_txdl_priv
= NULL
;
2322 txdl_priv
->alloc_frags
= 0;
2328 * __vxge_hw_fifo_create - Create a FIFO
2329 * This function creates FIFO and initializes it.
2332 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle
*vp
,
2333 struct vxge_hw_fifo_attr
*attr
)
2335 enum vxge_hw_status status
= VXGE_HW_OK
;
2336 struct __vxge_hw_fifo
*fifo
;
2337 struct vxge_hw_fifo_config
*config
;
2338 u32 txdl_size
, txdl_per_memblock
;
2339 struct vxge_hw_mempool_cbs fifo_mp_callback
;
2340 struct __vxge_hw_virtualpath
*vpath
;
2342 if ((vp
== NULL
) || (attr
== NULL
)) {
2343 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2347 config
= &vpath
->hldev
->config
.vp_config
[vpath
->vp_id
].fifo
;
2349 txdl_size
= config
->max_frags
* sizeof(struct vxge_hw_fifo_txd
);
2351 txdl_per_memblock
= config
->memblock_size
/ txdl_size
;
2353 fifo
= (struct __vxge_hw_fifo
*)__vxge_hw_channel_allocate(vp
,
2354 VXGE_HW_CHANNEL_TYPE_FIFO
,
2355 config
->fifo_blocks
* txdl_per_memblock
,
2356 attr
->per_txdl_space
, attr
->userdata
);
2359 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2363 vpath
->fifoh
= fifo
;
2364 fifo
->nofl_db
= vpath
->nofl_db
;
2366 fifo
->vp_id
= vpath
->vp_id
;
2367 fifo
->vp_reg
= vpath
->vp_reg
;
2368 fifo
->stats
= &vpath
->sw_stats
->fifo_stats
;
2370 fifo
->config
= config
;
2372 /* apply "interrupts per txdl" attribute */
2373 fifo
->interrupt_type
= VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ
;
2375 if (fifo
->config
->intr
)
2376 fifo
->interrupt_type
= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST
;
2378 fifo
->no_snoop_bits
= config
->no_snoop_bits
;
2381 * FIFO memory management strategy:
2383 * TxDL split into three independent parts:
2385 * - TxD HW private part
2386 * - driver private part
2388 * Adaptative memory allocation used. i.e. Memory allocated on
2389 * demand with the size which will fit into one memory block.
2390 * One memory block may contain more than one TxDL.
2392 * During "reserve" operations more memory can be allocated on demand
2393 * for example due to FIFO full condition.
2395 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2396 * routine which will essentially stop the channel and free resources.
2399 /* TxDL common private size == TxDL private + driver private */
2401 sizeof(struct __vxge_hw_fifo_txdl_priv
) + attr
->per_txdl_space
;
2402 fifo
->priv_size
= ((fifo
->priv_size
+ VXGE_CACHE_LINE_SIZE
- 1) /
2403 VXGE_CACHE_LINE_SIZE
) * VXGE_CACHE_LINE_SIZE
;
2405 fifo
->per_txdl_space
= attr
->per_txdl_space
;
2407 /* recompute txdl size to be cacheline aligned */
2408 fifo
->txdl_size
= txdl_size
;
2409 fifo
->txdl_per_memblock
= txdl_per_memblock
;
2411 fifo
->txdl_term
= attr
->txdl_term
;
2412 fifo
->callback
= attr
->callback
;
2414 if (fifo
->txdl_per_memblock
== 0) {
2415 __vxge_hw_fifo_delete(vp
);
2416 status
= VXGE_HW_ERR_INVALID_BLOCK_SIZE
;
2420 fifo_mp_callback
.item_func_alloc
= __vxge_hw_fifo_mempool_item_alloc
;
2423 __vxge_hw_mempool_create(vpath
->hldev
,
2424 fifo
->config
->memblock_size
,
2427 (fifo
->config
->fifo_blocks
* fifo
->txdl_per_memblock
),
2428 (fifo
->config
->fifo_blocks
* fifo
->txdl_per_memblock
),
2432 if (fifo
->mempool
== NULL
) {
2433 __vxge_hw_fifo_delete(vp
);
2434 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2438 status
= __vxge_hw_channel_initialize(&fifo
->channel
);
2439 if (status
!= VXGE_HW_OK
) {
2440 __vxge_hw_fifo_delete(vp
);
2444 vxge_assert(fifo
->channel
.reserve_ptr
);
2450 * __vxge_hw_fifo_abort - Returns the TxD
2451 * This function terminates the TxDs of fifo
2453 enum vxge_hw_status
__vxge_hw_fifo_abort(struct __vxge_hw_fifo
*fifo
)
2458 vxge_hw_channel_dtr_try_complete(&fifo
->channel
, &txdlh
);
2463 vxge_hw_channel_dtr_complete(&fifo
->channel
);
2465 if (fifo
->txdl_term
) {
2466 fifo
->txdl_term(txdlh
,
2467 VXGE_HW_TXDL_STATE_POSTED
,
2468 fifo
->channel
.userdata
);
2471 vxge_hw_channel_dtr_free(&fifo
->channel
, txdlh
);
2478 * __vxge_hw_fifo_reset - Resets the fifo
2479 * This function resets the fifo during vpath reset operation
2481 enum vxge_hw_status
__vxge_hw_fifo_reset(struct __vxge_hw_fifo
*fifo
)
2483 enum vxge_hw_status status
= VXGE_HW_OK
;
2485 __vxge_hw_fifo_abort(fifo
);
2486 status
= __vxge_hw_channel_reset(&fifo
->channel
);
2492 * __vxge_hw_fifo_delete - Removes the FIFO
2493 * This function freeup the memory pool and removes the FIFO
2495 enum vxge_hw_status
__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle
*vp
)
2497 struct __vxge_hw_fifo
*fifo
= vp
->vpath
->fifoh
;
2499 __vxge_hw_fifo_abort(fifo
);
2502 __vxge_hw_mempool_destroy(fifo
->mempool
);
2504 vp
->vpath
->fifoh
= NULL
;
2506 __vxge_hw_channel_free(&fifo
->channel
);
2512 * __vxge_hw_vpath_pci_read - Read the content of given address
2513 * in pci config space.
2514 * Read from the vpath pci config space.
2517 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath
*vpath
,
2518 u32 phy_func_0
, u32 offset
, u32
*val
)
2521 enum vxge_hw_status status
= VXGE_HW_OK
;
2522 struct vxge_hw_vpath_reg __iomem
*vp_reg
= vpath
->vp_reg
;
2524 val64
= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset
);
2527 val64
|= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0
;
2529 writeq(val64
, &vp_reg
->pci_config_access_cfg1
);
2531 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ
,
2532 &vp_reg
->pci_config_access_cfg2
);
2535 status
= __vxge_hw_device_register_poll(
2536 &vp_reg
->pci_config_access_cfg2
,
2537 VXGE_HW_INTR_MASK_ALL
, VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2539 if (status
!= VXGE_HW_OK
)
2542 val64
= readq(&vp_reg
->pci_config_access_status
);
2544 if (val64
& VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR
) {
2545 status
= VXGE_HW_FAIL
;
2548 *val
= (u32
)vxge_bVALn(val64
, 32, 32);
2554 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2555 * Returns the function number of the vpath.
2558 __vxge_hw_vpath_func_id_get(u32 vp_id
,
2559 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
)
2563 val64
= readq(&vpmgmt_reg
->vpath_to_func_map_cfg1
);
2566 (u32
)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64
);
2570 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2573 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2576 writeq(0, &vpath_reg
->rts_access_steer_ctrl
);
2578 writeq(dta_struct_sel
, &vpath_reg
->rts_access_steer_data0
);
2579 writeq(0, &vpath_reg
->rts_access_steer_data1
);
2586 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2587 * part number and product description.
2590 __vxge_hw_vpath_card_info_get(
2592 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2593 struct vxge_hw_device_hw_info
*hw_info
)
2599 enum vxge_hw_status status
= VXGE_HW_OK
;
2600 u8
*serial_number
= hw_info
->serial_number
;
2601 u8
*part_number
= hw_info
->part_number
;
2602 u8
*product_desc
= hw_info
->product_desc
;
2604 __vxge_hw_read_rts_ds(vpath_reg
,
2605 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER
);
2607 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2608 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2609 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2610 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2611 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2612 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2614 status
= __vxge_hw_pio_mem_write64(val64
,
2615 &vpath_reg
->rts_access_steer_ctrl
,
2616 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2617 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2619 if (status
!= VXGE_HW_OK
)
2622 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2624 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2625 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2626 ((u64
*)serial_number
)[0] = be64_to_cpu(data1
);
2628 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2629 ((u64
*)serial_number
)[1] = be64_to_cpu(data2
);
2630 status
= VXGE_HW_OK
;
2634 __vxge_hw_read_rts_ds(vpath_reg
,
2635 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER
);
2637 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2638 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2639 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2640 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2641 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2642 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2644 status
= __vxge_hw_pio_mem_write64(val64
,
2645 &vpath_reg
->rts_access_steer_ctrl
,
2646 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2647 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2649 if (status
!= VXGE_HW_OK
)
2652 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2654 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2656 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2657 ((u64
*)part_number
)[0] = be64_to_cpu(data1
);
2659 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2660 ((u64
*)part_number
)[1] = be64_to_cpu(data2
);
2662 status
= VXGE_HW_OK
;
2669 for (i
= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0
;
2670 i
<= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3
; i
++) {
2672 __vxge_hw_read_rts_ds(vpath_reg
, i
);
2674 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2675 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2676 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2677 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2678 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2679 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2681 status
= __vxge_hw_pio_mem_write64(val64
,
2682 &vpath_reg
->rts_access_steer_ctrl
,
2683 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2684 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2686 if (status
!= VXGE_HW_OK
)
2689 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2691 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2693 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2694 ((u64
*)product_desc
)[j
++] = be64_to_cpu(data1
);
2696 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2697 ((u64
*)product_desc
)[j
++] = be64_to_cpu(data2
);
2699 status
= VXGE_HW_OK
;
2708 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2709 * Returns FW Version
2712 __vxge_hw_vpath_fw_ver_get(
2714 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2715 struct vxge_hw_device_hw_info
*hw_info
)
2720 struct vxge_hw_device_version
*fw_version
= &hw_info
->fw_version
;
2721 struct vxge_hw_device_date
*fw_date
= &hw_info
->fw_date
;
2722 struct vxge_hw_device_version
*flash_version
= &hw_info
->flash_version
;
2723 struct vxge_hw_device_date
*flash_date
= &hw_info
->flash_date
;
2724 enum vxge_hw_status status
= VXGE_HW_OK
;
2726 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2727 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY
) |
2728 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2729 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2730 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2731 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2733 status
= __vxge_hw_pio_mem_write64(val64
,
2734 &vpath_reg
->rts_access_steer_ctrl
,
2735 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2736 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2738 if (status
!= VXGE_HW_OK
)
2741 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2743 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2745 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2746 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2749 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2752 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2755 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2758 snprintf(fw_date
->date
, VXGE_HW_FW_STRLEN
, "%2.2d/%2.2d/%4.4d",
2759 fw_date
->month
, fw_date
->day
, fw_date
->year
);
2762 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1
);
2764 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1
);
2766 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1
);
2768 snprintf(fw_version
->version
, VXGE_HW_FW_STRLEN
, "%d.%d.%d",
2769 fw_version
->major
, fw_version
->minor
, fw_version
->build
);
2772 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2
);
2774 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2
);
2776 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2
);
2778 snprintf(flash_date
->date
, VXGE_HW_FW_STRLEN
,
2779 "%2.2d/%2.2d/%4.4d",
2780 flash_date
->month
, flash_date
->day
, flash_date
->year
);
2782 flash_version
->major
=
2783 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2
);
2784 flash_version
->minor
=
2785 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2
);
2786 flash_version
->build
=
2787 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2
);
2789 snprintf(flash_version
->version
, VXGE_HW_FW_STRLEN
, "%d.%d.%d",
2790 flash_version
->major
, flash_version
->minor
,
2791 flash_version
->build
);
2793 status
= VXGE_HW_OK
;
2796 status
= VXGE_HW_FAIL
;
2802 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2803 * Returns pci function mode
2806 __vxge_hw_vpath_pci_func_mode_get(
2808 struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2812 enum vxge_hw_status status
= VXGE_HW_OK
;
2814 __vxge_hw_read_rts_ds(vpath_reg
,
2815 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE
);
2817 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2818 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2819 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2820 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2821 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2822 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2824 status
= __vxge_hw_pio_mem_write64(val64
,
2825 &vpath_reg
->rts_access_steer_ctrl
,
2826 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2827 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2829 if (status
!= VXGE_HW_OK
)
2832 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2834 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2835 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2836 status
= VXGE_HW_OK
;
2839 status
= VXGE_HW_FAIL
;
2846 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2847 * @hldev: HW device.
2848 * @on_off: TRUE if flickering to be on, FALSE to be off
2850 * Flicker the link LED.
2853 vxge_hw_device_flick_link_led(struct __vxge_hw_device
*hldev
,
2857 enum vxge_hw_status status
= VXGE_HW_OK
;
2858 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
2860 if (hldev
== NULL
) {
2861 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2865 vp_reg
= hldev
->vpath_reg
[hldev
->first_vp_id
];
2867 writeq(0, &vp_reg
->rts_access_steer_ctrl
);
2869 writeq(on_off
, &vp_reg
->rts_access_steer_data0
);
2870 writeq(0, &vp_reg
->rts_access_steer_data1
);
2873 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2874 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL
) |
2875 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2876 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2877 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2878 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2880 status
= __vxge_hw_pio_mem_write64(val64
,
2881 &vp_reg
->rts_access_steer_ctrl
,
2882 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2883 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2889 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
2892 __vxge_hw_vpath_rts_table_get(
2893 struct __vxge_hw_vpath_handle
*vp
,
2894 u32 action
, u32 rts_table
, u32 offset
, u64
*data1
, u64
*data2
)
2897 struct __vxge_hw_virtualpath
*vpath
;
2898 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
2900 enum vxge_hw_status status
= VXGE_HW_OK
;
2903 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2908 vp_reg
= vpath
->vp_reg
;
2910 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action
) |
2911 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table
) |
2912 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2913 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset
);
2916 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT
) ||
2918 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
) ||
2920 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK
) ||
2922 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY
)) {
2923 val64
= val64
| VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL
;
2926 status
= __vxge_hw_pio_mem_write64(val64
,
2927 &vp_reg
->rts_access_steer_ctrl
,
2928 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2929 vpath
->hldev
->config
.device_poll_millis
);
2931 if (status
!= VXGE_HW_OK
)
2934 val64
= readq(&vp_reg
->rts_access_steer_ctrl
);
2936 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2938 *data1
= readq(&vp_reg
->rts_access_steer_data0
);
2941 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) ||
2943 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
)) {
2944 *data2
= readq(&vp_reg
->rts_access_steer_data1
);
2946 status
= VXGE_HW_OK
;
2948 status
= VXGE_HW_FAIL
;
2954 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
2957 __vxge_hw_vpath_rts_table_set(
2958 struct __vxge_hw_vpath_handle
*vp
, u32 action
, u32 rts_table
,
2959 u32 offset
, u64 data1
, u64 data2
)
2962 struct __vxge_hw_virtualpath
*vpath
;
2963 enum vxge_hw_status status
= VXGE_HW_OK
;
2964 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
2967 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2972 vp_reg
= vpath
->vp_reg
;
2974 writeq(data1
, &vp_reg
->rts_access_steer_data0
);
2977 if ((rts_table
== VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) ||
2979 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
)) {
2980 writeq(data2
, &vp_reg
->rts_access_steer_data1
);
2984 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action
) |
2985 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table
) |
2986 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2987 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset
);
2989 status
= __vxge_hw_pio_mem_write64(val64
,
2990 &vp_reg
->rts_access_steer_ctrl
,
2991 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2992 vpath
->hldev
->config
.device_poll_millis
);
2994 if (status
!= VXGE_HW_OK
)
2997 val64
= readq(&vp_reg
->rts_access_steer_ctrl
);
2999 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
)
3000 status
= VXGE_HW_OK
;
3002 status
= VXGE_HW_FAIL
;
3008 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3009 * from MAC address table.
3012 __vxge_hw_vpath_addr_get(
3013 u32 vp_id
, struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
3014 u8 (macaddr
)[ETH_ALEN
], u8 (macaddr_mask
)[ETH_ALEN
])
3020 enum vxge_hw_status status
= VXGE_HW_OK
;
3022 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3023 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
) |
3024 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3025 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) |
3026 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
3027 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3029 status
= __vxge_hw_pio_mem_write64(val64
,
3030 &vpath_reg
->rts_access_steer_ctrl
,
3031 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
3032 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
3034 if (status
!= VXGE_HW_OK
)
3037 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
3039 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
3041 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
3042 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
3044 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
3045 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3048 for (i
= ETH_ALEN
; i
> 0; i
--) {
3049 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
3052 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
3055 status
= VXGE_HW_OK
;
3057 status
= VXGE_HW_FAIL
;
3063 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3065 enum vxge_hw_status
vxge_hw_vpath_rts_rth_set(
3066 struct __vxge_hw_vpath_handle
*vp
,
3067 enum vxge_hw_rth_algoritms algorithm
,
3068 struct vxge_hw_rth_hash_types
*hash_type
,
3072 enum vxge_hw_status status
= VXGE_HW_OK
;
3075 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3079 status
= __vxge_hw_vpath_rts_table_get(vp
,
3080 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY
,
3081 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG
,
3084 data0
&= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3085 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3087 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN
|
3088 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size
) |
3089 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm
);
3091 if (hash_type
->hash_type_tcpipv4_en
)
3092 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN
;
3094 if (hash_type
->hash_type_ipv4_en
)
3095 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN
;
3097 if (hash_type
->hash_type_tcpipv6_en
)
3098 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN
;
3100 if (hash_type
->hash_type_ipv6_en
)
3101 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN
;
3103 if (hash_type
->hash_type_tcpipv6ex_en
)
3105 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN
;
3107 if (hash_type
->hash_type_ipv6ex_en
)
3108 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN
;
3110 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0
))
3111 data0
&= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE
;
3113 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE
;
3115 status
= __vxge_hw_vpath_rts_table_set(vp
,
3116 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
,
3117 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG
,
3124 vxge_hw_rts_rth_data0_data1_get(u32 j
, u64
*data0
, u64
*data1
,
3125 u16 flag
, u8
*itable
)
3129 *data0
= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j
)|
3130 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN
|
3131 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3135 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j
)|
3136 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN
|
3137 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3140 *data1
= VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j
)|
3141 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN
|
3142 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3146 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j
)|
3147 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN
|
3148 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3155 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3157 enum vxge_hw_status
vxge_hw_vpath_rts_rth_itable_set(
3158 struct __vxge_hw_vpath_handle
**vpath_handles
,
3164 u32 i
, j
, action
, rts_table
;
3168 enum vxge_hw_status status
= VXGE_HW_OK
;
3169 struct __vxge_hw_vpath_handle
*vp
= vpath_handles
[0];
3172 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3176 max_entries
= (((u32
)1) << itable_size
);
3178 if (vp
->vpath
->hldev
->config
.rth_it_type
3179 == VXGE_HW_RTH_IT_TYPE_SOLO_IT
) {
3180 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
;
3182 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT
;
3184 for (j
= 0; j
< max_entries
; j
++) {
3189 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3192 status
= __vxge_hw_vpath_rts_table_set(vpath_handles
[0],
3193 action
, rts_table
, j
, data0
, data1
);
3195 if (status
!= VXGE_HW_OK
)
3199 for (j
= 0; j
< max_entries
; j
++) {
3204 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN
|
3205 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3208 status
= __vxge_hw_vpath_rts_table_set(
3209 vpath_handles
[mtable
[itable
[j
]]], action
,
3210 rts_table
, j
, data0
, data1
);
3212 if (status
!= VXGE_HW_OK
)
3216 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
;
3218 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
;
3219 for (i
= 0; i
< vpath_count
; i
++) {
3221 for (j
= 0; j
< max_entries
;) {
3226 while (j
< max_entries
) {
3227 if (mtable
[itable
[j
]] != i
) {
3231 vxge_hw_rts_rth_data0_data1_get(j
,
3232 &data0
, &data1
, 1, itable
);
3237 while (j
< max_entries
) {
3238 if (mtable
[itable
[j
]] != i
) {
3242 vxge_hw_rts_rth_data0_data1_get(j
,
3243 &data0
, &data1
, 2, itable
);
3248 while (j
< max_entries
) {
3249 if (mtable
[itable
[j
]] != i
) {
3253 vxge_hw_rts_rth_data0_data1_get(j
,
3254 &data0
, &data1
, 3, itable
);
3259 while (j
< max_entries
) {
3260 if (mtable
[itable
[j
]] != i
) {
3264 vxge_hw_rts_rth_data0_data1_get(j
,
3265 &data0
, &data1
, 4, itable
);
3271 status
= __vxge_hw_vpath_rts_table_set(
3276 if (status
!= VXGE_HW_OK
)
3287 * vxge_hw_vpath_check_leak - Check for memory leak
3288 * @ringh: Handle to the ring object used for receive
3290 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3291 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3292 * Returns: VXGE_HW_FAIL, if leak has occurred.
3296 vxge_hw_vpath_check_leak(struct __vxge_hw_ring
*ring
)
3298 enum vxge_hw_status status
= VXGE_HW_OK
;
3299 u64 rxd_new_count
, rxd_spat
;
3304 rxd_new_count
= readl(&ring
->vp_reg
->prc_rxd_doorbell
);
3305 rxd_spat
= readq(&ring
->vp_reg
->prc_cfg6
);
3306 rxd_spat
= VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat
);
3308 if (rxd_new_count
>= rxd_spat
)
3309 status
= VXGE_HW_FAIL
;
3315 * __vxge_hw_vpath_mgmt_read
3316 * This routine reads the vpath_mgmt registers
3318 static enum vxge_hw_status
3319 __vxge_hw_vpath_mgmt_read(
3320 struct __vxge_hw_device
*hldev
,
3321 struct __vxge_hw_virtualpath
*vpath
)
3323 u32 i
, mtu
= 0, max_pyld
= 0;
3325 enum vxge_hw_status status
= VXGE_HW_OK
;
3327 for (i
= 0; i
< VXGE_HW_MAC_MAX_MAC_PORT_ID
; i
++) {
3329 val64
= readq(&vpath
->vpmgmt_reg
->
3330 rxmac_cfg0_port_vpmgmt_clone
[i
]);
3333 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3339 vpath
->max_mtu
= mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
;
3341 val64
= readq(&vpath
->vpmgmt_reg
->xmac_vsport_choices_vp
);
3343 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3344 if (val64
& vxge_mBIT(i
))
3345 vpath
->vsport_number
= i
;
3348 val64
= readq(&vpath
->vpmgmt_reg
->xgmac_gen_status_vpmgmt_clone
);
3350 if (val64
& VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK
)
3351 VXGE_HW_DEVICE_LINK_STATE_SET(vpath
->hldev
, VXGE_HW_LINK_UP
);
3353 VXGE_HW_DEVICE_LINK_STATE_SET(vpath
->hldev
, VXGE_HW_LINK_DOWN
);
3359 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3360 * This routine checks the vpath_rst_in_prog register to see if
3361 * adapter completed the reset process for the vpath
3364 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath
*vpath
)
3366 enum vxge_hw_status status
;
3368 status
= __vxge_hw_device_register_poll(
3369 &vpath
->hldev
->common_reg
->vpath_rst_in_prog
,
3370 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3371 1 << (16 - vpath
->vp_id
)),
3372 vpath
->hldev
->config
.device_poll_millis
);
3378 * __vxge_hw_vpath_reset
3379 * This routine resets the vpath on the device
3382 __vxge_hw_vpath_reset(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3385 enum vxge_hw_status status
= VXGE_HW_OK
;
3387 val64
= VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id
));
3389 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
3390 &hldev
->common_reg
->cmn_rsthdlr_cfg0
);
3396 * __vxge_hw_vpath_sw_reset
3397 * This routine resets the vpath structures
3400 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3402 enum vxge_hw_status status
= VXGE_HW_OK
;
3403 struct __vxge_hw_virtualpath
*vpath
;
3405 vpath
= (struct __vxge_hw_virtualpath
*)&hldev
->virtual_paths
[vp_id
];
3408 status
= __vxge_hw_ring_reset(vpath
->ringh
);
3409 if (status
!= VXGE_HW_OK
)
3414 status
= __vxge_hw_fifo_reset(vpath
->fifoh
);
3420 * __vxge_hw_vpath_prc_configure
3421 * This routine configures the prc registers of virtual path using the config
3425 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3428 struct __vxge_hw_virtualpath
*vpath
;
3429 struct vxge_hw_vp_config
*vp_config
;
3430 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3432 vpath
= &hldev
->virtual_paths
[vp_id
];
3433 vp_reg
= vpath
->vp_reg
;
3434 vp_config
= vpath
->vp_config
;
3436 if (vp_config
->ring
.enable
== VXGE_HW_RING_DISABLE
)
3439 val64
= readq(&vp_reg
->prc_cfg1
);
3440 val64
|= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE
;
3441 writeq(val64
, &vp_reg
->prc_cfg1
);
3443 val64
= readq(&vpath
->vp_reg
->prc_cfg6
);
3444 val64
|= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN
;
3445 writeq(val64
, &vpath
->vp_reg
->prc_cfg6
);
3447 val64
= readq(&vp_reg
->prc_cfg7
);
3449 if (vpath
->vp_config
->ring
.scatter_mode
!=
3450 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT
) {
3452 val64
&= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3454 switch (vpath
->vp_config
->ring
.scatter_mode
) {
3455 case VXGE_HW_RING_SCATTER_MODE_A
:
3456 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3457 VXGE_HW_PRC_CFG7_SCATTER_MODE_A
);
3459 case VXGE_HW_RING_SCATTER_MODE_B
:
3460 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3461 VXGE_HW_PRC_CFG7_SCATTER_MODE_B
);
3463 case VXGE_HW_RING_SCATTER_MODE_C
:
3464 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3465 VXGE_HW_PRC_CFG7_SCATTER_MODE_C
);
3470 writeq(val64
, &vp_reg
->prc_cfg7
);
3472 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3473 __vxge_hw_ring_first_block_address_get(
3474 vpath
->ringh
) >> 3), &vp_reg
->prc_cfg5
);
3476 val64
= readq(&vp_reg
->prc_cfg4
);
3477 val64
|= VXGE_HW_PRC_CFG4_IN_SVC
;
3478 val64
&= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3480 val64
|= VXGE_HW_PRC_CFG4_RING_MODE(
3481 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER
);
3483 if (hldev
->config
.rth_en
== VXGE_HW_RTH_DISABLE
)
3484 val64
|= VXGE_HW_PRC_CFG4_RTH_DISABLE
;
3486 val64
&= ~VXGE_HW_PRC_CFG4_RTH_DISABLE
;
3488 writeq(val64
, &vp_reg
->prc_cfg4
);
3493 * __vxge_hw_vpath_kdfc_configure
3494 * This routine configures the kdfc registers of virtual path using the
3498 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3502 enum vxge_hw_status status
= VXGE_HW_OK
;
3503 struct __vxge_hw_virtualpath
*vpath
;
3504 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3506 vpath
= &hldev
->virtual_paths
[vp_id
];
3507 vp_reg
= vpath
->vp_reg
;
3508 status
= __vxge_hw_kdfc_swapper_set(hldev
->legacy_reg
, vp_reg
);
3510 if (status
!= VXGE_HW_OK
)
3513 val64
= readq(&vp_reg
->kdfc_drbl_triplet_total
);
3515 vpath
->max_kdfc_db
=
3516 (u32
)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3519 if (vpath
->vp_config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3521 vpath
->max_nofl_db
= vpath
->max_kdfc_db
;
3523 if (vpath
->max_nofl_db
<
3524 ((vpath
->vp_config
->fifo
.memblock_size
/
3525 (vpath
->vp_config
->fifo
.max_frags
*
3526 sizeof(struct vxge_hw_fifo_txd
))) *
3527 vpath
->vp_config
->fifo
.fifo_blocks
)) {
3529 return VXGE_HW_BADCFG_FIFO_BLOCKS
;
3531 val64
= VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3532 (vpath
->max_nofl_db
*2)-1);
3535 writeq(val64
, &vp_reg
->kdfc_fifo_trpl_partition
);
3537 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE
,
3538 &vp_reg
->kdfc_fifo_trpl_ctrl
);
3540 val64
= readq(&vp_reg
->kdfc_trpl_fifo_0_ctrl
);
3542 val64
&= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3543 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3545 val64
|= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3546 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY
) |
3547 #ifndef __BIG_ENDIAN
3548 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN
|
3550 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3552 writeq(val64
, &vp_reg
->kdfc_trpl_fifo_0_ctrl
);
3553 writeq((u64
)0, &vp_reg
->kdfc_trpl_fifo_0_wb_address
);
3555 vpath_stride
= readq(&hldev
->toc_reg
->toc_kdfc_vpath_stride
);
3558 (struct __vxge_hw_non_offload_db_wrapper __iomem
*)
3559 (hldev
->kdfc
+ (vp_id
*
3560 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3567 * __vxge_hw_vpath_mac_configure
3568 * This routine configures the mac of virtual path using the config passed
3571 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3574 enum vxge_hw_status status
= VXGE_HW_OK
;
3575 struct __vxge_hw_virtualpath
*vpath
;
3576 struct vxge_hw_vp_config
*vp_config
;
3577 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3579 vpath
= &hldev
->virtual_paths
[vp_id
];
3580 vp_reg
= vpath
->vp_reg
;
3581 vp_config
= vpath
->vp_config
;
3583 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3584 vpath
->vsport_number
), &vp_reg
->xmac_vsport_choice
);
3586 if (vp_config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
3588 val64
= readq(&vp_reg
->xmac_rpa_vcfg
);
3590 if (vp_config
->rpa_strip_vlan_tag
!=
3591 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
) {
3592 if (vp_config
->rpa_strip_vlan_tag
)
3593 val64
|= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG
;
3595 val64
&= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG
;
3598 writeq(val64
, &vp_reg
->xmac_rpa_vcfg
);
3599 val64
= readq(&vp_reg
->rxmac_vcfg0
);
3601 if (vp_config
->mtu
!=
3602 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
) {
3603 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3604 if ((vp_config
->mtu
+
3605 VXGE_HW_MAC_HEADER_MAX_SIZE
) < vpath
->max_mtu
)
3606 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3608 VXGE_HW_MAC_HEADER_MAX_SIZE
);
3610 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3614 writeq(val64
, &vp_reg
->rxmac_vcfg0
);
3616 val64
= readq(&vp_reg
->rxmac_vcfg1
);
3618 val64
&= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3619 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
);
3621 if (hldev
->config
.rth_it_type
==
3622 VXGE_HW_RTH_IT_TYPE_MULTI_IT
) {
3623 val64
|= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3625 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
;
3628 writeq(val64
, &vp_reg
->rxmac_vcfg1
);
3634 * __vxge_hw_vpath_tim_configure
3635 * This routine configures the tim registers of virtual path using the config
3639 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3642 enum vxge_hw_status status
= VXGE_HW_OK
;
3643 struct __vxge_hw_virtualpath
*vpath
;
3644 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3645 struct vxge_hw_vp_config
*config
;
3647 vpath
= &hldev
->virtual_paths
[vp_id
];
3648 vp_reg
= vpath
->vp_reg
;
3649 config
= vpath
->vp_config
;
3651 writeq((u64
)0, &vp_reg
->tim_dest_addr
);
3652 writeq((u64
)0, &vp_reg
->tim_vpath_map
);
3653 writeq((u64
)0, &vp_reg
->tim_bitmap
);
3654 writeq((u64
)0, &vp_reg
->tim_remap
);
3656 if (config
->ring
.enable
== VXGE_HW_RING_ENABLE
)
3657 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3658 (vp_id
* VXGE_HW_MAX_INTR_PER_VP
) +
3659 VXGE_HW_VPATH_INTR_RX
), &vp_reg
->tim_ring_assn
);
3661 val64
= readq(&vp_reg
->tim_pci_cfg
);
3662 val64
|= VXGE_HW_TIM_PCI_CFG_ADD_PAD
;
3663 writeq(val64
, &vp_reg
->tim_pci_cfg
);
3665 if (config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3667 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3669 if (config
->tti
.btimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3670 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3672 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3673 config
->tti
.btimer_val
);
3676 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN
;
3678 if (config
->tti
.timer_ac_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3679 if (config
->tti
.timer_ac_en
)
3680 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3682 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3685 if (config
->tti
.timer_ci_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3686 if (config
->tti
.timer_ci_en
)
3687 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3689 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3692 if (config
->tti
.urange_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3693 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3694 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3695 config
->tti
.urange_a
);
3698 if (config
->tti
.urange_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3699 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3700 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3701 config
->tti
.urange_b
);
3704 if (config
->tti
.urange_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3705 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3706 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3707 config
->tti
.urange_c
);
3710 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3711 val64
= readq(&vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3713 if (config
->tti
.uec_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3714 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3715 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3719 if (config
->tti
.uec_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3720 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3721 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3725 if (config
->tti
.uec_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3726 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3727 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3731 if (config
->tti
.uec_d
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3732 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3733 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3737 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3738 val64
= readq(&vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3740 if (config
->tti
.timer_ri_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3741 if (config
->tti
.timer_ri_en
)
3742 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3744 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3747 if (config
->tti
.rtimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3748 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3750 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3751 config
->tti
.rtimer_val
);
3754 if (config
->tti
.util_sel
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3755 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3756 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3757 config
->tti
.util_sel
);
3760 if (config
->tti
.ltimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3761 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3763 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3764 config
->tti
.ltimer_val
);
3767 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3770 if (config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
3772 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3774 if (config
->rti
.btimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3775 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3777 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3778 config
->rti
.btimer_val
);
3781 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN
;
3783 if (config
->rti
.timer_ac_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3784 if (config
->rti
.timer_ac_en
)
3785 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3787 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3790 if (config
->rti
.timer_ci_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3791 if (config
->rti
.timer_ci_en
)
3792 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3794 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3797 if (config
->rti
.urange_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3798 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3799 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3800 config
->rti
.urange_a
);
3803 if (config
->rti
.urange_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3804 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3805 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3806 config
->rti
.urange_b
);
3809 if (config
->rti
.urange_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3810 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3811 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3812 config
->rti
.urange_c
);
3815 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3816 val64
= readq(&vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3818 if (config
->rti
.uec_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3819 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3820 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3824 if (config
->rti
.uec_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3825 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3826 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3830 if (config
->rti
.uec_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3831 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3832 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3836 if (config
->rti
.uec_d
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3837 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3838 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3842 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3843 val64
= readq(&vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3845 if (config
->rti
.timer_ri_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3846 if (config
->rti
.timer_ri_en
)
3847 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3849 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3852 if (config
->rti
.rtimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3853 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3855 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3856 config
->rti
.rtimer_val
);
3859 if (config
->rti
.util_sel
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3860 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3861 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3862 config
->rti
.util_sel
);
3865 if (config
->rti
.ltimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3866 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3868 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3869 config
->rti
.ltimer_val
);
3872 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3876 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
3877 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
3878 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
3879 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
3880 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
3881 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
3887 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3889 struct __vxge_hw_virtualpath
*vpath
;
3890 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3891 struct vxge_hw_vp_config
*config
;
3894 vpath
= &hldev
->virtual_paths
[vp_id
];
3895 vp_reg
= vpath
->vp_reg
;
3896 config
= vpath
->vp_config
;
3898 if (config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3899 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3901 if (config
->tti
.timer_ci_en
!= VXGE_HW_TIM_TIMER_CI_ENABLE
) {
3902 config
->tti
.timer_ci_en
= VXGE_HW_TIM_TIMER_CI_ENABLE
;
3903 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3905 &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3911 * __vxge_hw_vpath_initialize
3912 * This routine is the final phase of init which initializes the
3913 * registers of the vpath using the configuration passed.
3916 __vxge_hw_vpath_initialize(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3920 enum vxge_hw_status status
= VXGE_HW_OK
;
3921 struct __vxge_hw_virtualpath
*vpath
;
3922 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3924 vpath
= &hldev
->virtual_paths
[vp_id
];
3926 if (!(hldev
->vpath_assignments
& vxge_mBIT(vp_id
))) {
3927 status
= VXGE_HW_ERR_VPATH_NOT_AVAILABLE
;
3930 vp_reg
= vpath
->vp_reg
;
3932 status
= __vxge_hw_vpath_swapper_set(vpath
->vp_reg
);
3934 if (status
!= VXGE_HW_OK
)
3937 status
= __vxge_hw_vpath_mac_configure(hldev
, vp_id
);
3939 if (status
!= VXGE_HW_OK
)
3942 status
= __vxge_hw_vpath_kdfc_configure(hldev
, vp_id
);
3944 if (status
!= VXGE_HW_OK
)
3947 status
= __vxge_hw_vpath_tim_configure(hldev
, vp_id
);
3949 if (status
!= VXGE_HW_OK
)
3952 val64
= readq(&vp_reg
->rtdma_rd_optimization_ctrl
);
3954 /* Get MRRS value from device control */
3955 status
= __vxge_hw_vpath_pci_read(vpath
, 1, 0x78, &val32
);
3957 if (status
== VXGE_HW_OK
) {
3958 val32
= (val32
& VXGE_HW_PCI_EXP_DEVCTL_READRQ
) >> 12;
3960 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
3962 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32
);
3964 val64
|= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE
;
3967 val64
&= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
3969 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
3970 VXGE_HW_MAX_PAYLOAD_SIZE_512
);
3972 val64
|= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN
;
3973 writeq(val64
, &vp_reg
->rtdma_rd_optimization_ctrl
);
3980 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
3981 * This routine is the initial phase of init which resets the vpath and
3982 * initializes the software support structures.
3985 __vxge_hw_vp_initialize(struct __vxge_hw_device
*hldev
, u32 vp_id
,
3986 struct vxge_hw_vp_config
*config
)
3988 struct __vxge_hw_virtualpath
*vpath
;
3989 enum vxge_hw_status status
= VXGE_HW_OK
;
3991 if (!(hldev
->vpath_assignments
& vxge_mBIT(vp_id
))) {
3992 status
= VXGE_HW_ERR_VPATH_NOT_AVAILABLE
;
3996 vpath
= &hldev
->virtual_paths
[vp_id
];
3998 vpath
->vp_id
= vp_id
;
3999 vpath
->vp_open
= VXGE_HW_VP_OPEN
;
4000 vpath
->hldev
= hldev
;
4001 vpath
->vp_config
= config
;
4002 vpath
->vp_reg
= hldev
->vpath_reg
[vp_id
];
4003 vpath
->vpmgmt_reg
= hldev
->vpmgmt_reg
[vp_id
];
4005 __vxge_hw_vpath_reset(hldev
, vp_id
);
4007 status
= __vxge_hw_vpath_reset_check(vpath
);
4009 if (status
!= VXGE_HW_OK
) {
4010 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4014 status
= __vxge_hw_vpath_mgmt_read(hldev
, vpath
);
4016 if (status
!= VXGE_HW_OK
) {
4017 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4021 INIT_LIST_HEAD(&vpath
->vpath_handles
);
4023 vpath
->sw_stats
= &hldev
->stats
.sw_dev_info_stats
.vpath_info
[vp_id
];
4025 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev
->tim_int_mask0
,
4026 hldev
->tim_int_mask1
, vp_id
);
4028 status
= __vxge_hw_vpath_initialize(hldev
, vp_id
);
4030 if (status
!= VXGE_HW_OK
)
4031 __vxge_hw_vp_terminate(hldev
, vp_id
);
4037 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4038 * This routine closes all channels it opened and freeup memory
4041 __vxge_hw_vp_terminate(struct __vxge_hw_device
*hldev
, u32 vp_id
)
4043 struct __vxge_hw_virtualpath
*vpath
;
4045 vpath
= &hldev
->virtual_paths
[vp_id
];
4047 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
)
4050 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath
->hldev
->tim_int_mask0
,
4051 vpath
->hldev
->tim_int_mask1
, vpath
->vp_id
);
4052 hldev
->stats
.hw_dev_info_stats
.vpath_info
[vpath
->vp_id
] = NULL
;
4054 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4060 * vxge_hw_vpath_mtu_set - Set MTU.
4061 * Set new MTU value. Example, to use jumbo frames:
4062 * vxge_hw_vpath_mtu_set(my_device, 9600);
4065 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle
*vp
, u32 new_mtu
)
4068 enum vxge_hw_status status
= VXGE_HW_OK
;
4069 struct __vxge_hw_virtualpath
*vpath
;
4072 status
= VXGE_HW_ERR_INVALID_HANDLE
;
4077 new_mtu
+= VXGE_HW_MAC_HEADER_MAX_SIZE
;
4079 if ((new_mtu
< VXGE_HW_MIN_MTU
) || (new_mtu
> vpath
->max_mtu
))
4080 status
= VXGE_HW_ERR_INVALID_MTU_SIZE
;
4082 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
4084 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4085 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu
);
4087 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
4089 vpath
->vp_config
->mtu
= new_mtu
- VXGE_HW_MAC_HEADER_MAX_SIZE
;
4096 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4097 * This function is used to open access to virtual path of an
4098 * adapter for offload, GRO operations. This function returns
4102 vxge_hw_vpath_open(struct __vxge_hw_device
*hldev
,
4103 struct vxge_hw_vpath_attr
*attr
,
4104 struct __vxge_hw_vpath_handle
**vpath_handle
)
4106 struct __vxge_hw_virtualpath
*vpath
;
4107 struct __vxge_hw_vpath_handle
*vp
;
4108 enum vxge_hw_status status
;
4110 vpath
= &hldev
->virtual_paths
[attr
->vp_id
];
4112 if (vpath
->vp_open
== VXGE_HW_VP_OPEN
) {
4113 status
= VXGE_HW_ERR_INVALID_STATE
;
4114 goto vpath_open_exit1
;
4117 status
= __vxge_hw_vp_initialize(hldev
, attr
->vp_id
,
4118 &hldev
->config
.vp_config
[attr
->vp_id
]);
4120 if (status
!= VXGE_HW_OK
)
4121 goto vpath_open_exit1
;
4123 vp
= (struct __vxge_hw_vpath_handle
*)
4124 vmalloc(sizeof(struct __vxge_hw_vpath_handle
));
4126 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4127 goto vpath_open_exit2
;
4130 memset(vp
, 0, sizeof(struct __vxge_hw_vpath_handle
));
4134 if (vpath
->vp_config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
4135 status
= __vxge_hw_fifo_create(vp
, &attr
->fifo_attr
);
4136 if (status
!= VXGE_HW_OK
)
4137 goto vpath_open_exit6
;
4140 if (vpath
->vp_config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
4141 status
= __vxge_hw_ring_create(vp
, &attr
->ring_attr
);
4142 if (status
!= VXGE_HW_OK
)
4143 goto vpath_open_exit7
;
4145 __vxge_hw_vpath_prc_configure(hldev
, attr
->vp_id
);
4148 vpath
->fifoh
->tx_intr_num
=
4149 (attr
->vp_id
* VXGE_HW_MAX_INTR_PER_VP
) +
4150 VXGE_HW_VPATH_INTR_TX
;
4152 vpath
->stats_block
= __vxge_hw_blockpool_block_allocate(hldev
,
4153 VXGE_HW_BLOCK_SIZE
);
4155 if (vpath
->stats_block
== NULL
) {
4156 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4157 goto vpath_open_exit8
;
4160 vpath
->hw_stats
= (struct vxge_hw_vpath_stats_hw_info
*)vpath
->
4161 stats_block
->memblock
;
4162 memset(vpath
->hw_stats
, 0,
4163 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4165 hldev
->stats
.hw_dev_info_stats
.vpath_info
[attr
->vp_id
] =
4168 vpath
->hw_stats_sav
=
4169 &hldev
->stats
.hw_dev_info_stats
.vpath_info_sav
[attr
->vp_id
];
4170 memset(vpath
->hw_stats_sav
, 0,
4171 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4173 writeq(vpath
->stats_block
->dma_addr
, &vpath
->vp_reg
->stats_cfg
);
4175 status
= vxge_hw_vpath_stats_enable(vp
);
4176 if (status
!= VXGE_HW_OK
)
4177 goto vpath_open_exit8
;
4179 list_add(&vp
->item
, &vpath
->vpath_handles
);
4181 hldev
->vpaths_deployed
|= vxge_mBIT(vpath
->vp_id
);
4185 attr
->fifo_attr
.userdata
= vpath
->fifoh
;
4186 attr
->ring_attr
.userdata
= vpath
->ringh
;
4191 if (vpath
->ringh
!= NULL
)
4192 __vxge_hw_ring_delete(vp
);
4194 if (vpath
->fifoh
!= NULL
)
4195 __vxge_hw_fifo_delete(vp
);
4199 __vxge_hw_vp_terminate(hldev
, attr
->vp_id
);
4206 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4208 * @vp: Handle got from previous vpath open
4210 * This function is used to close access to virtual path opened
4214 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle
*vp
)
4216 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4217 u64 new_count
, val64
, val164
;
4218 struct __vxge_hw_ring
*ring
;
4221 ring
= vpath
->ringh
;
4223 new_count
= readq(&vpath
->vp_reg
->rxdmem_size
);
4224 new_count
&= 0x1fff;
4225 val164
= (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count
));
4227 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164
),
4228 &vpath
->vp_reg
->prc_rxd_doorbell
);
4229 readl(&vpath
->vp_reg
->prc_rxd_doorbell
);
4232 val64
= readq(&vpath
->vp_reg
->prc_cfg6
);
4233 val64
= VXGE_HW_PRC_CFG6_RXD_SPAT(val64
);
4237 * Each RxD is of 4 qwords
4239 new_count
-= (val64
+ 1);
4240 val64
= min(val164
, new_count
) / 4;
4242 ring
->rxds_limit
= min(ring
->rxds_limit
, val64
);
4243 if (ring
->rxds_limit
< 4)
4244 ring
->rxds_limit
= 4;
4248 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4249 * This function is used to close access to virtual path opened
4252 enum vxge_hw_status
vxge_hw_vpath_close(struct __vxge_hw_vpath_handle
*vp
)
4254 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4255 struct __vxge_hw_device
*devh
= NULL
;
4256 u32 vp_id
= vp
->vpath
->vp_id
;
4257 u32 is_empty
= TRUE
;
4258 enum vxge_hw_status status
= VXGE_HW_OK
;
4261 devh
= vpath
->hldev
;
4263 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4264 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4265 goto vpath_close_exit
;
4268 list_del(&vp
->item
);
4270 if (!list_empty(&vpath
->vpath_handles
)) {
4271 list_add(&vp
->item
, &vpath
->vpath_handles
);
4276 status
= VXGE_HW_FAIL
;
4277 goto vpath_close_exit
;
4280 devh
->vpaths_deployed
&= ~vxge_mBIT(vp_id
);
4282 if (vpath
->ringh
!= NULL
)
4283 __vxge_hw_ring_delete(vp
);
4285 if (vpath
->fifoh
!= NULL
)
4286 __vxge_hw_fifo_delete(vp
);
4288 if (vpath
->stats_block
!= NULL
)
4289 __vxge_hw_blockpool_block_free(devh
, vpath
->stats_block
);
4293 __vxge_hw_vp_terminate(devh
, vp_id
);
4295 vpath
->vp_open
= VXGE_HW_VP_NOT_OPEN
;
4302 * vxge_hw_vpath_reset - Resets vpath
4303 * This function is used to request a reset of vpath
4305 enum vxge_hw_status
vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle
*vp
)
4307 enum vxge_hw_status status
;
4309 struct __vxge_hw_virtualpath
*vpath
= vp
->vpath
;
4311 vp_id
= vpath
->vp_id
;
4313 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4314 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4318 status
= __vxge_hw_vpath_reset(vpath
->hldev
, vp_id
);
4319 if (status
== VXGE_HW_OK
)
4320 vpath
->sw_stats
->soft_reset_cnt
++;
4326 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4327 * This function poll's for the vpath reset completion and re initializes
4331 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle
*vp
)
4333 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4334 enum vxge_hw_status status
;
4335 struct __vxge_hw_device
*hldev
;
4338 vp_id
= vp
->vpath
->vp_id
;
4340 hldev
= vpath
->hldev
;
4342 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4343 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4347 status
= __vxge_hw_vpath_reset_check(vpath
);
4348 if (status
!= VXGE_HW_OK
)
4351 status
= __vxge_hw_vpath_sw_reset(hldev
, vp_id
);
4352 if (status
!= VXGE_HW_OK
)
4355 status
= __vxge_hw_vpath_initialize(hldev
, vp_id
);
4356 if (status
!= VXGE_HW_OK
)
4359 if (vpath
->ringh
!= NULL
)
4360 __vxge_hw_vpath_prc_configure(hldev
, vp_id
);
4362 memset(vpath
->hw_stats
, 0,
4363 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4365 memset(vpath
->hw_stats_sav
, 0,
4366 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4368 writeq(vpath
->stats_block
->dma_addr
,
4369 &vpath
->vp_reg
->stats_cfg
);
4371 status
= vxge_hw_vpath_stats_enable(vp
);
4378 * vxge_hw_vpath_enable - Enable vpath.
4379 * This routine clears the vpath reset thereby enabling a vpath
4380 * to start forwarding frames and generating interrupts.
4383 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle
*vp
)
4385 struct __vxge_hw_device
*hldev
;
4388 hldev
= vp
->vpath
->hldev
;
4390 val64
= VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4391 1 << (16 - vp
->vpath
->vp_id
));
4393 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
4394 &hldev
->common_reg
->cmn_rsthdlr_cfg1
);
4398 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4399 * Enable the DMA vpath statistics. The function is to be called to re-enable
4400 * the adapter to update stats into the host memory
4403 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle
*vp
)
4405 enum vxge_hw_status status
= VXGE_HW_OK
;
4406 struct __vxge_hw_virtualpath
*vpath
;
4410 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4411 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4415 memcpy(vpath
->hw_stats_sav
, vpath
->hw_stats
,
4416 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4418 status
= __vxge_hw_vpath_stats_get(vpath
, vpath
->hw_stats
);
4424 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4425 * and offset and perform an operation
4428 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath
*vpath
,
4429 u32 operation
, u32 offset
, u64
*stat
)
4432 enum vxge_hw_status status
= VXGE_HW_OK
;
4433 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4435 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4436 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4437 goto vpath_stats_access_exit
;
4440 vp_reg
= vpath
->vp_reg
;
4442 val64
= VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation
) |
4443 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE
|
4444 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset
);
4446 status
= __vxge_hw_pio_mem_write64(val64
,
4447 &vp_reg
->xmac_stats_access_cmd
,
4448 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE
,
4449 vpath
->hldev
->config
.device_poll_millis
);
4451 if ((status
== VXGE_HW_OK
) && (operation
== VXGE_HW_STATS_OP_READ
))
4452 *stat
= readq(&vp_reg
->xmac_stats_access_data
);
4456 vpath_stats_access_exit
:
4461 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4464 __vxge_hw_vpath_xmac_tx_stats_get(
4465 struct __vxge_hw_virtualpath
*vpath
,
4466 struct vxge_hw_xmac_vpath_tx_stats
*vpath_tx_stats
)
4470 u32 offset
= VXGE_HW_STATS_VPATH_TX_OFFSET
;
4471 enum vxge_hw_status status
= VXGE_HW_OK
;
4473 val64
= (u64
*) vpath_tx_stats
;
4475 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4476 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4480 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_vpath_tx_stats
) / 8; i
++) {
4481 status
= __vxge_hw_vpath_stats_access(vpath
,
4482 VXGE_HW_STATS_OP_READ
,
4484 if (status
!= VXGE_HW_OK
)
4494 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4497 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath
*vpath
,
4498 struct vxge_hw_xmac_vpath_rx_stats
*vpath_rx_stats
)
4501 enum vxge_hw_status status
= VXGE_HW_OK
;
4503 u32 offset
= VXGE_HW_STATS_VPATH_RX_OFFSET
;
4504 val64
= (u64
*) vpath_rx_stats
;
4506 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4507 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4510 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_vpath_rx_stats
) / 8; i
++) {
4511 status
= __vxge_hw_vpath_stats_access(vpath
,
4512 VXGE_HW_STATS_OP_READ
,
4513 offset
>> 3, val64
);
4514 if (status
!= VXGE_HW_OK
)
4525 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4527 enum vxge_hw_status
__vxge_hw_vpath_stats_get(
4528 struct __vxge_hw_virtualpath
*vpath
,
4529 struct vxge_hw_vpath_stats_hw_info
*hw_stats
)
4532 enum vxge_hw_status status
= VXGE_HW_OK
;
4533 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4535 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4536 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4539 vp_reg
= vpath
->vp_reg
;
4541 val64
= readq(&vp_reg
->vpath_debug_stats0
);
4542 hw_stats
->ini_num_mwr_sent
=
4543 (u32
)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64
);
4545 val64
= readq(&vp_reg
->vpath_debug_stats1
);
4546 hw_stats
->ini_num_mrd_sent
=
4547 (u32
)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64
);
4549 val64
= readq(&vp_reg
->vpath_debug_stats2
);
4550 hw_stats
->ini_num_cpl_rcvd
=
4551 (u32
)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64
);
4553 val64
= readq(&vp_reg
->vpath_debug_stats3
);
4554 hw_stats
->ini_num_mwr_byte_sent
=
4555 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64
);
4557 val64
= readq(&vp_reg
->vpath_debug_stats4
);
4558 hw_stats
->ini_num_cpl_byte_rcvd
=
4559 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64
);
4561 val64
= readq(&vp_reg
->vpath_debug_stats5
);
4562 hw_stats
->wrcrdtarb_xoff
=
4563 (u32
)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64
);
4565 val64
= readq(&vp_reg
->vpath_debug_stats6
);
4566 hw_stats
->rdcrdtarb_xoff
=
4567 (u32
)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64
);
4569 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4570 hw_stats
->vpath_genstats_count0
=
4571 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4574 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4575 hw_stats
->vpath_genstats_count1
=
4576 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4579 val64
= readq(&vp_reg
->vpath_genstats_count23
);
4580 hw_stats
->vpath_genstats_count2
=
4581 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4584 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4585 hw_stats
->vpath_genstats_count3
=
4586 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4589 val64
= readq(&vp_reg
->vpath_genstats_count4
);
4590 hw_stats
->vpath_genstats_count4
=
4591 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4594 val64
= readq(&vp_reg
->vpath_genstats_count5
);
4595 hw_stats
->vpath_genstats_count5
=
4596 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4599 status
= __vxge_hw_vpath_xmac_tx_stats_get(vpath
, &hw_stats
->tx_stats
);
4600 if (status
!= VXGE_HW_OK
)
4603 status
= __vxge_hw_vpath_xmac_rx_stats_get(vpath
, &hw_stats
->rx_stats
);
4604 if (status
!= VXGE_HW_OK
)
4607 VXGE_HW_VPATH_STATS_PIO_READ(
4608 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET
);
4610 hw_stats
->prog_event_vnum0
=
4611 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64
);
4613 hw_stats
->prog_event_vnum1
=
4614 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64
);
4616 VXGE_HW_VPATH_STATS_PIO_READ(
4617 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET
);
4619 hw_stats
->prog_event_vnum2
=
4620 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64
);
4622 hw_stats
->prog_event_vnum3
=
4623 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64
);
4625 val64
= readq(&vp_reg
->rx_multi_cast_stats
);
4626 hw_stats
->rx_multi_cast_frame_discard
=
4627 (u16
)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64
);
4629 val64
= readq(&vp_reg
->rx_frm_transferred
);
4630 hw_stats
->rx_frm_transferred
=
4631 (u32
)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64
);
4633 val64
= readq(&vp_reg
->rxd_returned
);
4634 hw_stats
->rxd_returned
=
4635 (u16
)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64
);
4637 val64
= readq(&vp_reg
->dbg_stats_rx_mpa
);
4638 hw_stats
->rx_mpa_len_fail_frms
=
4639 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64
);
4640 hw_stats
->rx_mpa_mrk_fail_frms
=
4641 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64
);
4642 hw_stats
->rx_mpa_crc_fail_frms
=
4643 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64
);
4645 val64
= readq(&vp_reg
->dbg_stats_rx_fau
);
4646 hw_stats
->rx_permitted_frms
=
4647 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64
);
4648 hw_stats
->rx_vp_reset_discarded_frms
=
4649 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64
);
4650 hw_stats
->rx_wol_frms
=
4651 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64
);
4653 val64
= readq(&vp_reg
->tx_vp_reset_discarded_frms
);
4654 hw_stats
->tx_vp_reset_discarded_frms
=
4655 (u16
)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4662 * __vxge_hw_blockpool_create - Create block pool
4666 __vxge_hw_blockpool_create(struct __vxge_hw_device
*hldev
,
4667 struct __vxge_hw_blockpool
*blockpool
,
4672 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
4674 dma_addr_t dma_addr
;
4675 struct pci_dev
*dma_handle
;
4676 struct pci_dev
*acc_handle
;
4677 enum vxge_hw_status status
= VXGE_HW_OK
;
4679 if (blockpool
== NULL
) {
4680 status
= VXGE_HW_FAIL
;
4681 goto blockpool_create_exit
;
4684 blockpool
->hldev
= hldev
;
4685 blockpool
->block_size
= VXGE_HW_BLOCK_SIZE
;
4686 blockpool
->pool_size
= 0;
4687 blockpool
->pool_max
= pool_max
;
4688 blockpool
->req_out
= 0;
4690 INIT_LIST_HEAD(&blockpool
->free_block_list
);
4691 INIT_LIST_HEAD(&blockpool
->free_entry_list
);
4693 for (i
= 0; i
< pool_size
+ pool_max
; i
++) {
4694 entry
= kzalloc(sizeof(struct __vxge_hw_blockpool_entry
),
4696 if (entry
== NULL
) {
4697 __vxge_hw_blockpool_destroy(blockpool
);
4698 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4699 goto blockpool_create_exit
;
4701 list_add(&entry
->item
, &blockpool
->free_entry_list
);
4704 for (i
= 0; i
< pool_size
; i
++) {
4706 memblock
= vxge_os_dma_malloc(
4712 if (memblock
== NULL
) {
4713 __vxge_hw_blockpool_destroy(blockpool
);
4714 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4715 goto blockpool_create_exit
;
4718 dma_addr
= pci_map_single(hldev
->pdev
, memblock
,
4719 VXGE_HW_BLOCK_SIZE
, PCI_DMA_BIDIRECTIONAL
);
4721 if (unlikely(pci_dma_mapping_error(hldev
->pdev
,
4724 vxge_os_dma_free(hldev
->pdev
, memblock
, &acc_handle
);
4725 __vxge_hw_blockpool_destroy(blockpool
);
4726 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4727 goto blockpool_create_exit
;
4730 if (!list_empty(&blockpool
->free_entry_list
))
4731 entry
= (struct __vxge_hw_blockpool_entry
*)
4732 list_first_entry(&blockpool
->free_entry_list
,
4733 struct __vxge_hw_blockpool_entry
,
4738 kzalloc(sizeof(struct __vxge_hw_blockpool_entry
),
4740 if (entry
!= NULL
) {
4741 list_del(&entry
->item
);
4742 entry
->length
= VXGE_HW_BLOCK_SIZE
;
4743 entry
->memblock
= memblock
;
4744 entry
->dma_addr
= dma_addr
;
4745 entry
->acc_handle
= acc_handle
;
4746 entry
->dma_handle
= dma_handle
;
4747 list_add(&entry
->item
,
4748 &blockpool
->free_block_list
);
4749 blockpool
->pool_size
++;
4751 __vxge_hw_blockpool_destroy(blockpool
);
4752 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4753 goto blockpool_create_exit
;
4757 blockpool_create_exit
:
4762 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4765 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool
*blockpool
)
4768 struct __vxge_hw_device
*hldev
;
4769 struct list_head
*p
, *n
;
4772 if (blockpool
== NULL
) {
4777 hldev
= blockpool
->hldev
;
4779 list_for_each_safe(p
, n
, &blockpool
->free_block_list
) {
4781 pci_unmap_single(hldev
->pdev
,
4782 ((struct __vxge_hw_blockpool_entry
*)p
)->dma_addr
,
4783 ((struct __vxge_hw_blockpool_entry
*)p
)->length
,
4784 PCI_DMA_BIDIRECTIONAL
);
4786 vxge_os_dma_free(hldev
->pdev
,
4787 ((struct __vxge_hw_blockpool_entry
*)p
)->memblock
,
4788 &((struct __vxge_hw_blockpool_entry
*) p
)->acc_handle
);
4791 &((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4793 blockpool
->pool_size
--;
4796 list_for_each_safe(p
, n
, &blockpool
->free_entry_list
) {
4798 &((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4807 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4810 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool
*blockpool
)
4814 if ((blockpool
->pool_size
+ blockpool
->req_out
) <
4815 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE
) {
4816 nreq
= VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE
;
4817 blockpool
->req_out
+= nreq
;
4820 for (i
= 0; i
< nreq
; i
++)
4821 vxge_os_dma_malloc_async(
4822 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4823 blockpool
->hldev
, VXGE_HW_BLOCK_SIZE
);
4827 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4830 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool
*blockpool
)
4832 struct list_head
*p
, *n
;
4834 list_for_each_safe(p
, n
, &blockpool
->free_block_list
) {
4836 if (blockpool
->pool_size
< blockpool
->pool_max
)
4840 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4841 ((struct __vxge_hw_blockpool_entry
*)p
)->dma_addr
,
4842 ((struct __vxge_hw_blockpool_entry
*)p
)->length
,
4843 PCI_DMA_BIDIRECTIONAL
);
4846 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4847 ((struct __vxge_hw_blockpool_entry
*)p
)->memblock
,
4848 &((struct __vxge_hw_blockpool_entry
*)p
)->acc_handle
);
4850 list_del(&((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4852 list_add(p
, &blockpool
->free_entry_list
);
4854 blockpool
->pool_size
--;
4860 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4861 * Adds a block to block pool
4863 void vxge_hw_blockpool_block_add(
4864 struct __vxge_hw_device
*devh
,
4867 struct pci_dev
*dma_h
,
4868 struct pci_dev
*acc_handle
)
4870 struct __vxge_hw_blockpool
*blockpool
;
4871 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
4872 dma_addr_t dma_addr
;
4873 enum vxge_hw_status status
= VXGE_HW_OK
;
4876 blockpool
= &devh
->block_pool
;
4878 if (block_addr
== NULL
) {
4879 blockpool
->req_out
--;
4880 status
= VXGE_HW_FAIL
;
4884 dma_addr
= pci_map_single(devh
->pdev
, block_addr
, length
,
4885 PCI_DMA_BIDIRECTIONAL
);
4887 if (unlikely(pci_dma_mapping_error(devh
->pdev
, dma_addr
))) {
4889 vxge_os_dma_free(devh
->pdev
, block_addr
, &acc_handle
);
4890 blockpool
->req_out
--;
4891 status
= VXGE_HW_FAIL
;
4896 if (!list_empty(&blockpool
->free_entry_list
))
4897 entry
= (struct __vxge_hw_blockpool_entry
*)
4898 list_first_entry(&blockpool
->free_entry_list
,
4899 struct __vxge_hw_blockpool_entry
,
4903 entry
= (struct __vxge_hw_blockpool_entry
*)
4904 vmalloc(sizeof(struct __vxge_hw_blockpool_entry
));
4906 list_del(&entry
->item
);
4908 if (entry
!= NULL
) {
4909 entry
->length
= length
;
4910 entry
->memblock
= block_addr
;
4911 entry
->dma_addr
= dma_addr
;
4912 entry
->acc_handle
= acc_handle
;
4913 entry
->dma_handle
= dma_h
;
4914 list_add(&entry
->item
, &blockpool
->free_block_list
);
4915 blockpool
->pool_size
++;
4916 status
= VXGE_HW_OK
;
4918 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4920 blockpool
->req_out
--;
4922 req_out
= blockpool
->req_out
;
4928 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
4929 * Allocates a block of memory of given size, either from block pool
4930 * or by calling vxge_os_dma_malloc()
4933 __vxge_hw_blockpool_malloc(struct __vxge_hw_device
*devh
, u32 size
,
4934 struct vxge_hw_mempool_dma
*dma_object
)
4936 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
4937 struct __vxge_hw_blockpool
*blockpool
;
4938 void *memblock
= NULL
;
4939 enum vxge_hw_status status
= VXGE_HW_OK
;
4941 blockpool
= &devh
->block_pool
;
4943 if (size
!= blockpool
->block_size
) {
4945 memblock
= vxge_os_dma_malloc(devh
->pdev
, size
,
4946 &dma_object
->handle
,
4947 &dma_object
->acc_handle
);
4949 if (memblock
== NULL
) {
4950 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4954 dma_object
->addr
= pci_map_single(devh
->pdev
, memblock
, size
,
4955 PCI_DMA_BIDIRECTIONAL
);
4957 if (unlikely(pci_dma_mapping_error(devh
->pdev
,
4958 dma_object
->addr
))) {
4959 vxge_os_dma_free(devh
->pdev
, memblock
,
4960 &dma_object
->acc_handle
);
4961 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4967 if (!list_empty(&blockpool
->free_block_list
))
4968 entry
= (struct __vxge_hw_blockpool_entry
*)
4969 list_first_entry(&blockpool
->free_block_list
,
4970 struct __vxge_hw_blockpool_entry
,
4973 if (entry
!= NULL
) {
4974 list_del(&entry
->item
);
4975 dma_object
->addr
= entry
->dma_addr
;
4976 dma_object
->handle
= entry
->dma_handle
;
4977 dma_object
->acc_handle
= entry
->acc_handle
;
4978 memblock
= entry
->memblock
;
4980 list_add(&entry
->item
,
4981 &blockpool
->free_entry_list
);
4982 blockpool
->pool_size
--;
4985 if (memblock
!= NULL
)
4986 __vxge_hw_blockpool_blocks_add(blockpool
);
4993 * __vxge_hw_blockpool_free - Frees the memory allcoated with
4994 __vxge_hw_blockpool_malloc
4997 __vxge_hw_blockpool_free(struct __vxge_hw_device
*devh
,
4998 void *memblock
, u32 size
,
4999 struct vxge_hw_mempool_dma
*dma_object
)
5001 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5002 struct __vxge_hw_blockpool
*blockpool
;
5003 enum vxge_hw_status status
= VXGE_HW_OK
;
5005 blockpool
= &devh
->block_pool
;
5007 if (size
!= blockpool
->block_size
) {
5008 pci_unmap_single(devh
->pdev
, dma_object
->addr
, size
,
5009 PCI_DMA_BIDIRECTIONAL
);
5010 vxge_os_dma_free(devh
->pdev
, memblock
, &dma_object
->acc_handle
);
5013 if (!list_empty(&blockpool
->free_entry_list
))
5014 entry
= (struct __vxge_hw_blockpool_entry
*)
5015 list_first_entry(&blockpool
->free_entry_list
,
5016 struct __vxge_hw_blockpool_entry
,
5020 entry
= (struct __vxge_hw_blockpool_entry
*)
5022 struct __vxge_hw_blockpool_entry
));
5024 list_del(&entry
->item
);
5026 if (entry
!= NULL
) {
5027 entry
->length
= size
;
5028 entry
->memblock
= memblock
;
5029 entry
->dma_addr
= dma_object
->addr
;
5030 entry
->acc_handle
= dma_object
->acc_handle
;
5031 entry
->dma_handle
= dma_object
->handle
;
5032 list_add(&entry
->item
,
5033 &blockpool
->free_block_list
);
5034 blockpool
->pool_size
++;
5035 status
= VXGE_HW_OK
;
5037 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5039 if (status
== VXGE_HW_OK
)
5040 __vxge_hw_blockpool_blocks_remove(blockpool
);
5047 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5048 * This function allocates a block from block pool or from the system
5050 struct __vxge_hw_blockpool_entry
*
5051 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device
*devh
, u32 size
)
5053 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5054 struct __vxge_hw_blockpool
*blockpool
;
5056 blockpool
= &devh
->block_pool
;
5058 if (size
== blockpool
->block_size
) {
5060 if (!list_empty(&blockpool
->free_block_list
))
5061 entry
= (struct __vxge_hw_blockpool_entry
*)
5062 list_first_entry(&blockpool
->free_block_list
,
5063 struct __vxge_hw_blockpool_entry
,
5066 if (entry
!= NULL
) {
5067 list_del(&entry
->item
);
5068 blockpool
->pool_size
--;
5073 __vxge_hw_blockpool_blocks_add(blockpool
);
5079 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5081 * @entry: Entry of block to be freed
5083 * This function frees a block from block pool
5086 __vxge_hw_blockpool_block_free(struct __vxge_hw_device
*devh
,
5087 struct __vxge_hw_blockpool_entry
*entry
)
5089 struct __vxge_hw_blockpool
*blockpool
;
5091 blockpool
= &devh
->block_pool
;
5093 if (entry
->length
== blockpool
->block_size
) {
5094 list_add(&entry
->item
, &blockpool
->free_block_list
);
5095 blockpool
->pool_size
++;
5098 __vxge_hw_blockpool_blocks_remove(blockpool
);