1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
19 #include "vxge-traffic.h"
20 #include "vxge-config.h"
23 * __vxge_hw_channel_allocate - Allocate memory for channel
24 * This function allocates required memory for the channel and various arrays
27 struct __vxge_hw_channel
*
28 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle
*vph
,
29 enum __vxge_hw_channel_type type
,
30 u32 length
, u32 per_dtr_space
, void *userdata
)
32 struct __vxge_hw_channel
*channel
;
33 struct __vxge_hw_device
*hldev
;
37 hldev
= vph
->vpath
->hldev
;
38 vp_id
= vph
->vpath
->vp_id
;
41 case VXGE_HW_CHANNEL_TYPE_FIFO
:
42 size
= sizeof(struct __vxge_hw_fifo
);
44 case VXGE_HW_CHANNEL_TYPE_RING
:
45 size
= sizeof(struct __vxge_hw_ring
);
51 channel
= kzalloc(size
, GFP_KERNEL
);
54 INIT_LIST_HEAD(&channel
->item
);
56 channel
->common_reg
= hldev
->common_reg
;
57 channel
->first_vp_id
= hldev
->first_vp_id
;
59 channel
->devh
= hldev
;
61 channel
->userdata
= userdata
;
62 channel
->per_dtr_space
= per_dtr_space
;
63 channel
->length
= length
;
64 channel
->vp_id
= vp_id
;
66 channel
->work_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
67 if (channel
->work_arr
== NULL
)
70 channel
->free_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
71 if (channel
->free_arr
== NULL
)
73 channel
->free_ptr
= length
;
75 channel
->reserve_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
76 if (channel
->reserve_arr
== NULL
)
78 channel
->reserve_ptr
= length
;
79 channel
->reserve_top
= 0;
81 channel
->orig_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
82 if (channel
->orig_arr
== NULL
)
87 __vxge_hw_channel_free(channel
);
94 * __vxge_hw_channel_free - Free memory allocated for channel
95 * This function deallocates memory from the channel and various arrays
98 void __vxge_hw_channel_free(struct __vxge_hw_channel
*channel
)
100 kfree(channel
->work_arr
);
101 kfree(channel
->free_arr
);
102 kfree(channel
->reserve_arr
);
103 kfree(channel
->orig_arr
);
108 * __vxge_hw_channel_initialize - Initialize a channel
109 * This function initializes a channel by properly setting the
113 __vxge_hw_channel_initialize(struct __vxge_hw_channel
*channel
)
116 struct __vxge_hw_virtualpath
*vpath
;
118 vpath
= channel
->vph
->vpath
;
120 if ((channel
->reserve_arr
!= NULL
) && (channel
->orig_arr
!= NULL
)) {
121 for (i
= 0; i
< channel
->length
; i
++)
122 channel
->orig_arr
[i
] = channel
->reserve_arr
[i
];
125 switch (channel
->type
) {
126 case VXGE_HW_CHANNEL_TYPE_FIFO
:
127 vpath
->fifoh
= (struct __vxge_hw_fifo
*)channel
;
128 channel
->stats
= &((struct __vxge_hw_fifo
*)
129 channel
)->stats
->common_stats
;
131 case VXGE_HW_CHANNEL_TYPE_RING
:
132 vpath
->ringh
= (struct __vxge_hw_ring
*)channel
;
133 channel
->stats
= &((struct __vxge_hw_ring
*)
134 channel
)->stats
->common_stats
;
144 * __vxge_hw_channel_reset - Resets a channel
145 * This function resets a channel by properly setting the various references
148 __vxge_hw_channel_reset(struct __vxge_hw_channel
*channel
)
152 for (i
= 0; i
< channel
->length
; i
++) {
153 if (channel
->reserve_arr
!= NULL
)
154 channel
->reserve_arr
[i
] = channel
->orig_arr
[i
];
155 if (channel
->free_arr
!= NULL
)
156 channel
->free_arr
[i
] = NULL
;
157 if (channel
->work_arr
!= NULL
)
158 channel
->work_arr
[i
] = NULL
;
160 channel
->free_ptr
= channel
->length
;
161 channel
->reserve_ptr
= channel
->length
;
162 channel
->reserve_top
= 0;
163 channel
->post_index
= 0;
164 channel
->compl_index
= 0;
170 * __vxge_hw_device_pci_e_init
171 * Initialize certain PCI/PCI-X configuration registers
172 * with recommended values. Save config space for future hw resets.
175 __vxge_hw_device_pci_e_init(struct __vxge_hw_device
*hldev
)
179 /* Set the PErr Repconse bit and SERR in PCI command register. */
180 pci_read_config_word(hldev
->pdev
, PCI_COMMAND
, &cmd
);
182 pci_write_config_word(hldev
->pdev
, PCI_COMMAND
, cmd
);
184 pci_save_state(hldev
->pdev
);
190 * __vxge_hw_device_register_poll
191 * Will poll certain register for specified amount of time.
192 * Will poll until masked bit is not cleared.
195 __vxge_hw_device_register_poll(void __iomem
*reg
, u64 mask
, u32 max_millis
)
199 enum vxge_hw_status ret
= VXGE_HW_FAIL
;
216 } while (++i
<= max_millis
);
221 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
223 * This routine checks the vpath reset in progress register is turned zero
226 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem
*vpath_rst_in_prog
)
228 enum vxge_hw_status status
;
229 status
= __vxge_hw_device_register_poll(vpath_rst_in_prog
,
230 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
231 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
236 * __vxge_hw_device_toc_get
237 * This routine sets the swapper and reads the toc pointer and returns the
238 * memory mapped address of the toc
240 struct vxge_hw_toc_reg __iomem
*
241 __vxge_hw_device_toc_get(void __iomem
*bar0
)
244 struct vxge_hw_toc_reg __iomem
*toc
= NULL
;
245 enum vxge_hw_status status
;
247 struct vxge_hw_legacy_reg __iomem
*legacy_reg
=
248 (struct vxge_hw_legacy_reg __iomem
*)bar0
;
250 status
= __vxge_hw_legacy_swapper_set(legacy_reg
);
251 if (status
!= VXGE_HW_OK
)
254 val64
= readq(&legacy_reg
->toc_first_pointer
);
255 toc
= (struct vxge_hw_toc_reg __iomem
*)(bar0
+val64
);
261 * __vxge_hw_device_reg_addr_get
262 * This routine sets the swapper and reads the toc pointer and initializes the
263 * register location pointers in the device object. It waits until the ric is
264 * completed initializing registers.
267 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device
*hldev
)
271 enum vxge_hw_status status
= VXGE_HW_OK
;
273 hldev
->legacy_reg
= (struct vxge_hw_legacy_reg __iomem
*)hldev
->bar0
;
275 hldev
->toc_reg
= __vxge_hw_device_toc_get(hldev
->bar0
);
276 if (hldev
->toc_reg
== NULL
) {
277 status
= VXGE_HW_FAIL
;
281 val64
= readq(&hldev
->toc_reg
->toc_common_pointer
);
283 (struct vxge_hw_common_reg __iomem
*)(hldev
->bar0
+ val64
);
285 val64
= readq(&hldev
->toc_reg
->toc_mrpcim_pointer
);
287 (struct vxge_hw_mrpcim_reg __iomem
*)(hldev
->bar0
+ val64
);
289 for (i
= 0; i
< VXGE_HW_TITAN_SRPCIM_REG_SPACES
; i
++) {
290 val64
= readq(&hldev
->toc_reg
->toc_srpcim_pointer
[i
]);
291 hldev
->srpcim_reg
[i
] =
292 (struct vxge_hw_srpcim_reg __iomem
*)
293 (hldev
->bar0
+ val64
);
296 for (i
= 0; i
< VXGE_HW_TITAN_VPMGMT_REG_SPACES
; i
++) {
297 val64
= readq(&hldev
->toc_reg
->toc_vpmgmt_pointer
[i
]);
298 hldev
->vpmgmt_reg
[i
] =
299 (struct vxge_hw_vpmgmt_reg __iomem
*)(hldev
->bar0
+ val64
);
302 for (i
= 0; i
< VXGE_HW_TITAN_VPATH_REG_SPACES
; i
++) {
303 val64
= readq(&hldev
->toc_reg
->toc_vpath_pointer
[i
]);
304 hldev
->vpath_reg
[i
] =
305 (struct vxge_hw_vpath_reg __iomem
*)
306 (hldev
->bar0
+ val64
);
309 val64
= readq(&hldev
->toc_reg
->toc_kdfc
);
311 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64
)) {
313 hldev
->kdfc
= (u8 __iomem
*)(hldev
->bar0
+
314 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64
));
320 status
= __vxge_hw_device_vpath_reset_in_prog_check(
321 (u64 __iomem
*)&hldev
->common_reg
->vpath_rst_in_prog
);
327 * __vxge_hw_device_id_get
328 * This routine returns sets the device id and revision numbers into the device
331 void __vxge_hw_device_id_get(struct __vxge_hw_device
*hldev
)
335 val64
= readq(&hldev
->common_reg
->titan_asic_id
);
337 (u16
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64
);
339 hldev
->major_revision
=
340 (u8
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64
);
342 hldev
->minor_revision
=
343 (u8
)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64
);
349 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
350 * This routine returns the Access Rights of the driver
353 __vxge_hw_device_access_rights_get(u32 host_type
, u32 func_id
)
355 u32 access_rights
= VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH
;
358 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION
:
359 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
360 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
362 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION
:
363 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
364 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
366 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0
:
367 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
368 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
370 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION
:
371 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION
:
372 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG
:
374 case VXGE_HW_SR_VH_FUNCTION0
:
375 case VXGE_HW_VH_NORMAL_FUNCTION
:
376 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
380 return access_rights
;
383 * __vxge_hw_device_is_privilaged
384 * This routine checks if the device function is privilaged or not
388 __vxge_hw_device_is_privilaged(u32 host_type
, u32 func_id
)
390 if (__vxge_hw_device_access_rights_get(host_type
,
392 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)
395 return VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
399 * __vxge_hw_device_host_info_get
400 * This routine returns the host type assignments
402 void __vxge_hw_device_host_info_get(struct __vxge_hw_device
*hldev
)
407 val64
= readq(&hldev
->common_reg
->host_type_assignments
);
410 (u32
)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64
);
412 hldev
->vpath_assignments
= readq(&hldev
->common_reg
->vpath_assignments
);
414 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
416 if (!(hldev
->vpath_assignments
& vxge_mBIT(i
)))
420 __vxge_hw_vpath_func_id_get(i
, hldev
->vpmgmt_reg
[i
]);
422 hldev
->access_rights
= __vxge_hw_device_access_rights_get(
423 hldev
->host_type
, hldev
->func_id
);
425 hldev
->first_vp_id
= i
;
433 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
434 * link width and signalling rate.
436 static enum vxge_hw_status
437 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device
*hldev
)
442 /* Get the negotiated link width and speed from PCI config space */
443 exp_cap
= pci_find_capability(hldev
->pdev
, PCI_CAP_ID_EXP
);
444 pci_read_config_word(hldev
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
446 if ((lnk
& PCI_EXP_LNKSTA_CLS
) != 1)
447 return VXGE_HW_ERR_INVALID_PCI_INFO
;
449 switch ((lnk
& PCI_EXP_LNKSTA_NLW
) >> 4) {
450 case PCIE_LNK_WIDTH_RESRV
:
457 return VXGE_HW_ERR_INVALID_PCI_INFO
;
464 * __vxge_hw_device_initialize
465 * Initialize Titan-V hardware.
467 enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device
*hldev
)
469 enum vxge_hw_status status
= VXGE_HW_OK
;
471 if (VXGE_HW_OK
== __vxge_hw_device_is_privilaged(hldev
->host_type
,
473 /* Validate the pci-e link width and speed */
474 status
= __vxge_hw_verify_pci_e_info(hldev
);
475 if (status
!= VXGE_HW_OK
)
484 * vxge_hw_device_hw_info_get - Get the hw information
485 * Returns the vpath mask that has the bits set for each vpath allocated
486 * for the driver, FW version information and the first mac addresse for
489 enum vxge_hw_status __devinit
490 vxge_hw_device_hw_info_get(void __iomem
*bar0
,
491 struct vxge_hw_device_hw_info
*hw_info
)
495 struct vxge_hw_toc_reg __iomem
*toc
;
496 struct vxge_hw_mrpcim_reg __iomem
*mrpcim_reg
;
497 struct vxge_hw_common_reg __iomem
*common_reg
;
498 struct vxge_hw_vpath_reg __iomem
*vpath_reg
;
499 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
;
500 enum vxge_hw_status status
;
502 memset(hw_info
, 0, sizeof(struct vxge_hw_device_hw_info
));
504 toc
= __vxge_hw_device_toc_get(bar0
);
506 status
= VXGE_HW_ERR_CRITICAL
;
510 val64
= readq(&toc
->toc_common_pointer
);
511 common_reg
= (struct vxge_hw_common_reg __iomem
*)(bar0
+ val64
);
513 status
= __vxge_hw_device_vpath_reset_in_prog_check(
514 (u64 __iomem
*)&common_reg
->vpath_rst_in_prog
);
515 if (status
!= VXGE_HW_OK
)
518 hw_info
->vpath_mask
= readq(&common_reg
->vpath_assignments
);
520 val64
= readq(&common_reg
->host_type_assignments
);
523 (u32
)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64
);
525 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
527 if (!((hw_info
->vpath_mask
) & vxge_mBIT(i
)))
530 val64
= readq(&toc
->toc_vpmgmt_pointer
[i
]);
532 vpmgmt_reg
= (struct vxge_hw_vpmgmt_reg __iomem
*)
535 hw_info
->func_id
= __vxge_hw_vpath_func_id_get(i
, vpmgmt_reg
);
536 if (__vxge_hw_device_access_rights_get(hw_info
->host_type
,
538 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
) {
540 val64
= readq(&toc
->toc_mrpcim_pointer
);
542 mrpcim_reg
= (struct vxge_hw_mrpcim_reg __iomem
*)
545 writeq(0, &mrpcim_reg
->xgmac_gen_fw_memo_mask
);
549 val64
= readq(&toc
->toc_vpath_pointer
[i
]);
551 vpath_reg
= (struct vxge_hw_vpath_reg __iomem
*)(bar0
+ val64
);
553 hw_info
->function_mode
=
554 __vxge_hw_vpath_pci_func_mode_get(i
, vpath_reg
);
556 status
= __vxge_hw_vpath_fw_ver_get(i
, vpath_reg
, hw_info
);
557 if (status
!= VXGE_HW_OK
)
560 status
= __vxge_hw_vpath_card_info_get(i
, vpath_reg
, hw_info
);
561 if (status
!= VXGE_HW_OK
)
567 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
569 if (!((hw_info
->vpath_mask
) & vxge_mBIT(i
)))
572 val64
= readq(&toc
->toc_vpath_pointer
[i
]);
573 vpath_reg
= (struct vxge_hw_vpath_reg __iomem
*)(bar0
+ val64
);
575 status
= __vxge_hw_vpath_addr_get(i
, vpath_reg
,
576 hw_info
->mac_addrs
[i
],
577 hw_info
->mac_addr_masks
[i
]);
578 if (status
!= VXGE_HW_OK
)
586 * vxge_hw_device_initialize - Initialize Titan device.
587 * Initialize Titan device. Note that all the arguments of this public API
588 * are 'IN', including @hldev. Driver cooperates with
589 * OS to find new Titan device, locate its PCI and memory spaces.
591 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
592 * to enable the latter to perform Titan hardware initialization.
594 enum vxge_hw_status __devinit
595 vxge_hw_device_initialize(
596 struct __vxge_hw_device
**devh
,
597 struct vxge_hw_device_attr
*attr
,
598 struct vxge_hw_device_config
*device_config
)
602 struct __vxge_hw_device
*hldev
= NULL
;
603 enum vxge_hw_status status
= VXGE_HW_OK
;
605 status
= __vxge_hw_device_config_check(device_config
);
606 if (status
!= VXGE_HW_OK
)
609 hldev
= (struct __vxge_hw_device
*)
610 vmalloc(sizeof(struct __vxge_hw_device
));
612 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
616 memset(hldev
, 0, sizeof(struct __vxge_hw_device
));
617 hldev
->magic
= VXGE_HW_DEVICE_MAGIC
;
619 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_ALL
);
622 memcpy(&hldev
->config
, device_config
,
623 sizeof(struct vxge_hw_device_config
));
625 hldev
->bar0
= attr
->bar0
;
626 hldev
->pdev
= attr
->pdev
;
628 hldev
->uld_callbacks
.link_up
= attr
->uld_callbacks
.link_up
;
629 hldev
->uld_callbacks
.link_down
= attr
->uld_callbacks
.link_down
;
630 hldev
->uld_callbacks
.crit_err
= attr
->uld_callbacks
.crit_err
;
632 __vxge_hw_device_pci_e_init(hldev
);
634 status
= __vxge_hw_device_reg_addr_get(hldev
);
635 if (status
!= VXGE_HW_OK
)
637 __vxge_hw_device_id_get(hldev
);
639 __vxge_hw_device_host_info_get(hldev
);
641 /* Incrementing for stats blocks */
644 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
646 if (!(hldev
->vpath_assignments
& vxge_mBIT(i
)))
649 if (device_config
->vp_config
[i
].ring
.enable
==
651 nblocks
+= device_config
->vp_config
[i
].ring
.ring_blocks
;
653 if (device_config
->vp_config
[i
].fifo
.enable
==
655 nblocks
+= device_config
->vp_config
[i
].fifo
.fifo_blocks
;
659 if (__vxge_hw_blockpool_create(hldev
,
661 device_config
->dma_blockpool_initial
+ nblocks
,
662 device_config
->dma_blockpool_max
+ nblocks
) != VXGE_HW_OK
) {
664 vxge_hw_device_terminate(hldev
);
665 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
669 status
= __vxge_hw_device_initialize(hldev
);
671 if (status
!= VXGE_HW_OK
) {
672 vxge_hw_device_terminate(hldev
);
682 * vxge_hw_device_terminate - Terminate Titan device.
683 * Terminate HW device.
686 vxge_hw_device_terminate(struct __vxge_hw_device
*hldev
)
688 vxge_assert(hldev
->magic
== VXGE_HW_DEVICE_MAGIC
);
690 hldev
->magic
= VXGE_HW_DEVICE_DEAD
;
691 __vxge_hw_blockpool_destroy(&hldev
->block_pool
);
696 * vxge_hw_device_stats_get - Get the device hw statistics.
697 * Returns the vpath h/w stats for the device.
700 vxge_hw_device_stats_get(struct __vxge_hw_device
*hldev
,
701 struct vxge_hw_device_stats_hw_info
*hw_stats
)
704 enum vxge_hw_status status
= VXGE_HW_OK
;
706 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
708 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)) ||
709 (hldev
->virtual_paths
[i
].vp_open
==
710 VXGE_HW_VP_NOT_OPEN
))
713 memcpy(hldev
->virtual_paths
[i
].hw_stats_sav
,
714 hldev
->virtual_paths
[i
].hw_stats
,
715 sizeof(struct vxge_hw_vpath_stats_hw_info
));
717 status
= __vxge_hw_vpath_stats_get(
718 &hldev
->virtual_paths
[i
],
719 hldev
->virtual_paths
[i
].hw_stats
);
722 memcpy(hw_stats
, &hldev
->stats
.hw_dev_info_stats
,
723 sizeof(struct vxge_hw_device_stats_hw_info
));
729 * vxge_hw_driver_stats_get - Get the device sw statistics.
730 * Returns the vpath s/w stats for the device.
732 enum vxge_hw_status
vxge_hw_driver_stats_get(
733 struct __vxge_hw_device
*hldev
,
734 struct vxge_hw_device_stats_sw_info
*sw_stats
)
736 enum vxge_hw_status status
= VXGE_HW_OK
;
738 memcpy(sw_stats
, &hldev
->stats
.sw_dev_info_stats
,
739 sizeof(struct vxge_hw_device_stats_sw_info
));
745 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
746 * and offset and perform an operation
747 * Get the statistics from the given location and offset.
750 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device
*hldev
,
751 u32 operation
, u32 location
, u32 offset
, u64
*stat
)
754 enum vxge_hw_status status
= VXGE_HW_OK
;
756 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
758 if (status
!= VXGE_HW_OK
)
761 val64
= VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation
) |
762 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE
|
763 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location
) |
764 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset
);
766 status
= __vxge_hw_pio_mem_write64(val64
,
767 &hldev
->mrpcim_reg
->xmac_stats_sys_cmd
,
768 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE
,
769 hldev
->config
.device_poll_millis
);
771 if ((status
== VXGE_HW_OK
) && (operation
== VXGE_HW_STATS_OP_READ
))
772 *stat
= readq(&hldev
->mrpcim_reg
->xmac_stats_sys_data
);
780 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
781 * Get the Statistics on aggregate port
784 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device
*hldev
, u32 port
,
785 struct vxge_hw_xmac_aggr_stats
*aggr_stats
)
789 u32 offset
= VXGE_HW_STATS_AGGRn_OFFSET
;
790 enum vxge_hw_status status
= VXGE_HW_OK
;
792 val64
= (u64
*)aggr_stats
;
794 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
796 if (status
!= VXGE_HW_OK
)
799 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_aggr_stats
) / 8; i
++) {
800 status
= vxge_hw_mrpcim_stats_access(hldev
,
801 VXGE_HW_STATS_OP_READ
,
802 VXGE_HW_STATS_LOC_AGGR
,
803 ((offset
+ (104 * port
)) >> 3), val64
);
804 if (status
!= VXGE_HW_OK
)
815 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
816 * Get the Statistics on port
819 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device
*hldev
, u32 port
,
820 struct vxge_hw_xmac_port_stats
*port_stats
)
823 enum vxge_hw_status status
= VXGE_HW_OK
;
826 val64
= (u64
*) port_stats
;
828 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
830 if (status
!= VXGE_HW_OK
)
833 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_port_stats
) / 8; i
++) {
834 status
= vxge_hw_mrpcim_stats_access(hldev
,
835 VXGE_HW_STATS_OP_READ
,
836 VXGE_HW_STATS_LOC_AGGR
,
837 ((offset
+ (608 * port
)) >> 3), val64
);
838 if (status
!= VXGE_HW_OK
)
850 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
851 * Get the XMAC Statistics
854 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device
*hldev
,
855 struct vxge_hw_xmac_stats
*xmac_stats
)
857 enum vxge_hw_status status
= VXGE_HW_OK
;
860 status
= vxge_hw_device_xmac_aggr_stats_get(hldev
,
861 0, &xmac_stats
->aggr_stats
[0]);
863 if (status
!= VXGE_HW_OK
)
866 status
= vxge_hw_device_xmac_aggr_stats_get(hldev
,
867 1, &xmac_stats
->aggr_stats
[1]);
868 if (status
!= VXGE_HW_OK
)
871 for (i
= 0; i
<= VXGE_HW_MAC_MAX_MAC_PORT_ID
; i
++) {
873 status
= vxge_hw_device_xmac_port_stats_get(hldev
,
874 i
, &xmac_stats
->port_stats
[i
]);
875 if (status
!= VXGE_HW_OK
)
879 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
881 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
884 status
= __vxge_hw_vpath_xmac_tx_stats_get(
885 &hldev
->virtual_paths
[i
],
886 &xmac_stats
->vpath_tx_stats
[i
]);
887 if (status
!= VXGE_HW_OK
)
890 status
= __vxge_hw_vpath_xmac_rx_stats_get(
891 &hldev
->virtual_paths
[i
],
892 &xmac_stats
->vpath_rx_stats
[i
]);
893 if (status
!= VXGE_HW_OK
)
901 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
902 * This routine is used to dynamically change the debug output
904 void vxge_hw_device_debug_set(struct __vxge_hw_device
*hldev
,
905 enum vxge_debug_level level
, u32 mask
)
910 #if defined(VXGE_DEBUG_TRACE_MASK) || \
911 defined(VXGE_DEBUG_ERR_MASK)
912 hldev
->debug_module_mask
= mask
;
913 hldev
->debug_level
= level
;
916 #if defined(VXGE_DEBUG_ERR_MASK)
917 hldev
->level_err
= level
& VXGE_ERR
;
920 #if defined(VXGE_DEBUG_TRACE_MASK)
921 hldev
->level_trace
= level
& VXGE_TRACE
;
926 * vxge_hw_device_error_level_get - Get the error level
927 * This routine returns the current error level set
929 u32
vxge_hw_device_error_level_get(struct __vxge_hw_device
*hldev
)
931 #if defined(VXGE_DEBUG_ERR_MASK)
935 return hldev
->level_err
;
942 * vxge_hw_device_trace_level_get - Get the trace level
943 * This routine returns the current trace level set
945 u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device
*hldev
)
947 #if defined(VXGE_DEBUG_TRACE_MASK)
951 return hldev
->level_trace
;
957 * vxge_hw_device_debug_mask_get - Get the debug mask
958 * This routine returns the current debug mask set
960 u32
vxge_hw_device_debug_mask_get(struct __vxge_hw_device
*hldev
)
962 #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
965 return hldev
->debug_module_mask
;
972 * vxge_hw_getpause_data -Pause frame frame generation and reception.
973 * Returns the Pause frame generation and reception capability of the NIC.
975 enum vxge_hw_status
vxge_hw_device_getpause_data(struct __vxge_hw_device
*hldev
,
976 u32 port
, u32
*tx
, u32
*rx
)
979 enum vxge_hw_status status
= VXGE_HW_OK
;
981 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
982 status
= VXGE_HW_ERR_INVALID_DEVICE
;
986 if (port
> VXGE_HW_MAC_MAX_MAC_PORT_ID
) {
987 status
= VXGE_HW_ERR_INVALID_PORT
;
991 if (!(hldev
->access_rights
& VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
992 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
996 val64
= readq(&hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
997 if (val64
& VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
)
999 if (val64
& VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
)
1006 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1007 * It can be used to set or reset Pause frame generation or reception
1008 * support of the NIC.
1011 enum vxge_hw_status
vxge_hw_device_setpause_data(struct __vxge_hw_device
*hldev
,
1012 u32 port
, u32 tx
, u32 rx
)
1015 enum vxge_hw_status status
= VXGE_HW_OK
;
1017 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
1018 status
= VXGE_HW_ERR_INVALID_DEVICE
;
1022 if (port
> VXGE_HW_MAC_MAX_MAC_PORT_ID
) {
1023 status
= VXGE_HW_ERR_INVALID_PORT
;
1027 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
1029 if (status
!= VXGE_HW_OK
)
1032 val64
= readq(&hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1034 val64
|= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
;
1036 val64
&= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
;
1038 val64
|= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
;
1040 val64
&= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
;
1042 writeq(val64
, &hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1047 u16
vxge_hw_device_link_width_get(struct __vxge_hw_device
*hldev
)
1049 int link_width
, exp_cap
;
1052 exp_cap
= pci_find_capability(hldev
->pdev
, PCI_CAP_ID_EXP
);
1053 pci_read_config_word(hldev
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
1054 link_width
= (lnk
& VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH
) >> 4;
1059 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1060 * This function returns the index of memory block
1063 __vxge_hw_ring_block_memblock_idx(u8
*block
)
1065 return (u32
)*((u64
*)(block
+ VXGE_HW_RING_MEMBLOCK_IDX_OFFSET
));
1069 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1070 * This function sets index to a memory block
1073 __vxge_hw_ring_block_memblock_idx_set(u8
*block
, u32 memblock_idx
)
1075 *((u64
*)(block
+ VXGE_HW_RING_MEMBLOCK_IDX_OFFSET
)) = memblock_idx
;
1079 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1081 * Sets the next block pointer in RxD block
1084 __vxge_hw_ring_block_next_pointer_set(u8
*block
, dma_addr_t dma_next
)
1086 *((u64
*)(block
+ VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET
)) = dma_next
;
1090 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1092 * Returns the dma address of the first RxD block
1094 u64
__vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring
*ring
)
1096 struct vxge_hw_mempool_dma
*dma_object
;
1098 dma_object
= ring
->mempool
->memblocks_dma_arr
;
1099 vxge_assert(dma_object
!= NULL
);
1101 return dma_object
->addr
;
1105 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1106 * This function returns the dma address of a given item
1108 static dma_addr_t
__vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool
*mempoolh
,
1113 struct vxge_hw_mempool_dma
*memblock_dma_object
;
1114 ptrdiff_t dma_item_offset
;
1116 /* get owner memblock index */
1117 memblock_idx
= __vxge_hw_ring_block_memblock_idx(item
);
1119 /* get owner memblock by memblock index */
1120 memblock
= mempoolh
->memblocks_arr
[memblock_idx
];
1122 /* get memblock DMA object by memblock index */
1123 memblock_dma_object
= mempoolh
->memblocks_dma_arr
+ memblock_idx
;
1125 /* calculate offset in the memblock of this item */
1126 dma_item_offset
= (u8
*)item
- (u8
*)memblock
;
1128 return memblock_dma_object
->addr
+ dma_item_offset
;
1132 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1133 * This function returns the dma address of a given item
1135 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool
*mempoolh
,
1136 struct __vxge_hw_ring
*ring
, u32 from
,
1139 u8
*to_item
, *from_item
;
1142 /* get "from" RxD block */
1143 from_item
= mempoolh
->items_arr
[from
];
1144 vxge_assert(from_item
);
1146 /* get "to" RxD block */
1147 to_item
= mempoolh
->items_arr
[to
];
1148 vxge_assert(to_item
);
1150 /* return address of the beginning of previous RxD block */
1151 to_dma
= __vxge_hw_ring_item_dma_addr(mempoolh
, to_item
);
1153 /* set next pointer for this RxD block to point on
1154 * previous item's DMA start address */
1155 __vxge_hw_ring_block_next_pointer_set(from_item
, to_dma
);
1159 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1161 * This function is callback passed to __vxge_hw_mempool_create to create memory
1162 * pool for RxD block
1165 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool
*mempoolh
,
1167 struct vxge_hw_mempool_dma
*dma_object
,
1168 u32 index
, u32 is_last
)
1171 void *item
= mempoolh
->items_arr
[index
];
1172 struct __vxge_hw_ring
*ring
=
1173 (struct __vxge_hw_ring
*)mempoolh
->userdata
;
1175 /* format rxds array */
1176 for (i
= 0; i
< ring
->rxds_per_block
; i
++) {
1177 void *rxdblock_priv
;
1179 struct vxge_hw_ring_rxd_1
*rxdp
;
1181 u32 reserve_index
= ring
->channel
.reserve_ptr
-
1182 (index
* ring
->rxds_per_block
+ i
+ 1);
1183 u32 memblock_item_idx
;
1185 ring
->channel
.reserve_arr
[reserve_index
] = ((u8
*)item
) +
1188 /* Note: memblock_item_idx is index of the item within
1189 * the memblock. For instance, in case of three RxD-blocks
1190 * per memblock this value can be 0, 1 or 2. */
1191 rxdblock_priv
= __vxge_hw_mempool_item_priv(mempoolh
,
1192 memblock_index
, item
,
1193 &memblock_item_idx
);
1195 rxdp
= (struct vxge_hw_ring_rxd_1
*)
1196 ring
->channel
.reserve_arr
[reserve_index
];
1198 uld_priv
= ((u8
*)rxdblock_priv
+ ring
->rxd_priv_size
* i
);
1200 /* pre-format Host_Control */
1201 rxdp
->host_control
= (u64
)(size_t)uld_priv
;
1204 __vxge_hw_ring_block_memblock_idx_set(item
, memblock_index
);
1207 /* link last one with first one */
1208 __vxge_hw_ring_rxdblock_link(mempoolh
, ring
, index
, 0);
1212 /* link this RxD block with previous one */
1213 __vxge_hw_ring_rxdblock_link(mempoolh
, ring
, index
- 1, index
);
1220 * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
1221 * This function replenishes the RxDs from reserve array to work array
1224 vxge_hw_ring_replenish(struct __vxge_hw_ring
*ring
, u16 min_flag
)
1228 struct __vxge_hw_channel
*channel
;
1229 enum vxge_hw_status status
= VXGE_HW_OK
;
1231 channel
= &ring
->channel
;
1233 while (vxge_hw_channel_dtr_count(channel
) > 0) {
1235 status
= vxge_hw_ring_rxd_reserve(ring
, &rxd
);
1237 vxge_assert(status
== VXGE_HW_OK
);
1239 if (ring
->rxd_init
) {
1240 status
= ring
->rxd_init(rxd
, channel
->userdata
);
1241 if (status
!= VXGE_HW_OK
) {
1242 vxge_hw_ring_rxd_free(ring
, rxd
);
1247 vxge_hw_ring_rxd_post(ring
, rxd
);
1250 if (i
== VXGE_HW_RING_MIN_BUFF_ALLOCATION
)
1254 status
= VXGE_HW_OK
;
1260 * __vxge_hw_ring_create - Create a Ring
1261 * This function creates Ring and initializes it.
1265 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle
*vp
,
1266 struct vxge_hw_ring_attr
*attr
)
1268 enum vxge_hw_status status
= VXGE_HW_OK
;
1269 struct __vxge_hw_ring
*ring
;
1271 struct vxge_hw_ring_config
*config
;
1272 struct __vxge_hw_device
*hldev
;
1274 struct vxge_hw_mempool_cbs ring_mp_callback
;
1276 if ((vp
== NULL
) || (attr
== NULL
)) {
1277 status
= VXGE_HW_FAIL
;
1281 hldev
= vp
->vpath
->hldev
;
1282 vp_id
= vp
->vpath
->vp_id
;
1284 config
= &hldev
->config
.vp_config
[vp_id
].ring
;
1286 ring_length
= config
->ring_blocks
*
1287 vxge_hw_ring_rxds_per_block_get(config
->buffer_mode
);
1289 ring
= (struct __vxge_hw_ring
*)__vxge_hw_channel_allocate(vp
,
1290 VXGE_HW_CHANNEL_TYPE_RING
,
1292 attr
->per_rxd_space
,
1296 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1300 vp
->vpath
->ringh
= ring
;
1301 ring
->vp_id
= vp_id
;
1302 ring
->vp_reg
= vp
->vpath
->vp_reg
;
1303 ring
->common_reg
= hldev
->common_reg
;
1304 ring
->stats
= &vp
->vpath
->sw_stats
->ring_stats
;
1305 ring
->config
= config
;
1306 ring
->callback
= attr
->callback
;
1307 ring
->rxd_init
= attr
->rxd_init
;
1308 ring
->rxd_term
= attr
->rxd_term
;
1309 ring
->buffer_mode
= config
->buffer_mode
;
1310 ring
->rxds_limit
= config
->rxds_limit
;
1312 ring
->rxd_size
= vxge_hw_ring_rxd_size_get(config
->buffer_mode
);
1313 ring
->rxd_priv_size
=
1314 sizeof(struct __vxge_hw_ring_rxd_priv
) + attr
->per_rxd_space
;
1315 ring
->per_rxd_space
= attr
->per_rxd_space
;
1317 ring
->rxd_priv_size
=
1318 ((ring
->rxd_priv_size
+ VXGE_CACHE_LINE_SIZE
- 1) /
1319 VXGE_CACHE_LINE_SIZE
) * VXGE_CACHE_LINE_SIZE
;
1321 /* how many RxDs can fit into one block. Depends on configured
1323 ring
->rxds_per_block
=
1324 vxge_hw_ring_rxds_per_block_get(config
->buffer_mode
);
1326 /* calculate actual RxD block private size */
1327 ring
->rxdblock_priv_size
= ring
->rxd_priv_size
* ring
->rxds_per_block
;
1328 ring_mp_callback
.item_func_alloc
= __vxge_hw_ring_mempool_item_alloc
;
1329 ring
->mempool
= __vxge_hw_mempool_create(hldev
,
1332 ring
->rxdblock_priv_size
,
1333 ring
->config
->ring_blocks
,
1334 ring
->config
->ring_blocks
,
1338 if (ring
->mempool
== NULL
) {
1339 __vxge_hw_ring_delete(vp
);
1340 return VXGE_HW_ERR_OUT_OF_MEMORY
;
1343 status
= __vxge_hw_channel_initialize(&ring
->channel
);
1344 if (status
!= VXGE_HW_OK
) {
1345 __vxge_hw_ring_delete(vp
);
1350 * Specifying rxd_init callback means two things:
1351 * 1) rxds need to be initialized by driver at channel-open time;
1352 * 2) rxds need to be posted at channel-open time
1353 * (that's what the initial_replenish() below does)
1354 * Currently we don't have a case when the 1) is done without the 2).
1356 if (ring
->rxd_init
) {
1357 status
= vxge_hw_ring_replenish(ring
, 1);
1358 if (status
!= VXGE_HW_OK
) {
1359 __vxge_hw_ring_delete(vp
);
1364 /* initial replenish will increment the counter in its post() routine,
1365 * we have to reset it */
1366 ring
->stats
->common_stats
.usage_cnt
= 0;
1372 * __vxge_hw_ring_abort - Returns the RxD
1373 * This function terminates the RxDs of ring
1375 enum vxge_hw_status
__vxge_hw_ring_abort(struct __vxge_hw_ring
*ring
)
1378 struct __vxge_hw_channel
*channel
;
1380 channel
= &ring
->channel
;
1383 vxge_hw_channel_dtr_try_complete(channel
, &rxdh
);
1388 vxge_hw_channel_dtr_complete(channel
);
1391 ring
->rxd_term(rxdh
, VXGE_HW_RXD_STATE_POSTED
,
1394 vxge_hw_channel_dtr_free(channel
, rxdh
);
1401 * __vxge_hw_ring_reset - Resets the ring
1402 * This function resets the ring during vpath reset operation
1404 enum vxge_hw_status
__vxge_hw_ring_reset(struct __vxge_hw_ring
*ring
)
1406 enum vxge_hw_status status
= VXGE_HW_OK
;
1407 struct __vxge_hw_channel
*channel
;
1409 channel
= &ring
->channel
;
1411 __vxge_hw_ring_abort(ring
);
1413 status
= __vxge_hw_channel_reset(channel
);
1415 if (status
!= VXGE_HW_OK
)
1418 if (ring
->rxd_init
) {
1419 status
= vxge_hw_ring_replenish(ring
, 1);
1420 if (status
!= VXGE_HW_OK
)
1428 * __vxge_hw_ring_delete - Removes the ring
1429 * This function freeup the memory pool and removes the ring
1431 enum vxge_hw_status
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle
*vp
)
1433 struct __vxge_hw_ring
*ring
= vp
->vpath
->ringh
;
1435 __vxge_hw_ring_abort(ring
);
1438 __vxge_hw_mempool_destroy(ring
->mempool
);
1440 vp
->vpath
->ringh
= NULL
;
1441 __vxge_hw_channel_free(&ring
->channel
);
1447 * __vxge_hw_mempool_grow
1448 * Will resize mempool up to %num_allocate value.
1451 __vxge_hw_mempool_grow(struct vxge_hw_mempool
*mempool
, u32 num_allocate
,
1454 u32 i
, first_time
= mempool
->memblocks_allocated
== 0 ? 1 : 0;
1455 u32 n_items
= mempool
->items_per_memblock
;
1456 u32 start_block_idx
= mempool
->memblocks_allocated
;
1457 u32 end_block_idx
= mempool
->memblocks_allocated
+ num_allocate
;
1458 enum vxge_hw_status status
= VXGE_HW_OK
;
1462 if (end_block_idx
> mempool
->memblocks_max
) {
1463 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1467 for (i
= start_block_idx
; i
< end_block_idx
; i
++) {
1469 u32 is_last
= ((end_block_idx
- 1) == i
);
1470 struct vxge_hw_mempool_dma
*dma_object
=
1471 mempool
->memblocks_dma_arr
+ i
;
1474 /* allocate memblock's private part. Each DMA memblock
1475 * has a space allocated for item's private usage upon
1476 * mempool's user request. Each time mempool grows, it will
1477 * allocate new memblock and its private part at once.
1478 * This helps to minimize memory usage a lot. */
1479 mempool
->memblocks_priv_arr
[i
] =
1480 vmalloc(mempool
->items_priv_size
* n_items
);
1481 if (mempool
->memblocks_priv_arr
[i
] == NULL
) {
1482 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1486 memset(mempool
->memblocks_priv_arr
[i
], 0,
1487 mempool
->items_priv_size
* n_items
);
1489 /* allocate DMA-capable memblock */
1490 mempool
->memblocks_arr
[i
] =
1491 __vxge_hw_blockpool_malloc(mempool
->devh
,
1492 mempool
->memblock_size
, dma_object
);
1493 if (mempool
->memblocks_arr
[i
] == NULL
) {
1494 vfree(mempool
->memblocks_priv_arr
[i
]);
1495 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1500 mempool
->memblocks_allocated
++;
1502 memset(mempool
->memblocks_arr
[i
], 0, mempool
->memblock_size
);
1504 the_memblock
= mempool
->memblocks_arr
[i
];
1506 /* fill the items hash array */
1507 for (j
= 0; j
< n_items
; j
++) {
1508 u32 index
= i
* n_items
+ j
;
1510 if (first_time
&& index
>= mempool
->items_initial
)
1513 mempool
->items_arr
[index
] =
1514 ((char *)the_memblock
+ j
*mempool
->item_size
);
1516 /* let caller to do more job on each item */
1517 if (mempool
->item_func_alloc
!= NULL
)
1518 mempool
->item_func_alloc(mempool
, i
,
1519 dma_object
, index
, is_last
);
1521 mempool
->items_current
= index
+ 1;
1524 if (first_time
&& mempool
->items_current
==
1525 mempool
->items_initial
)
1533 * vxge_hw_mempool_create
1534 * This function will create memory pool object. Pool may grow but will
1535 * never shrink. Pool consists of number of dynamically allocated blocks
1536 * with size enough to hold %items_initial number of items. Memory is
1537 * DMA-able but client must map/unmap before interoperating with the device.
1539 struct vxge_hw_mempool
*
1540 __vxge_hw_mempool_create(
1541 struct __vxge_hw_device
*devh
,
1544 u32 items_priv_size
,
1547 struct vxge_hw_mempool_cbs
*mp_callback
,
1550 enum vxge_hw_status status
= VXGE_HW_OK
;
1551 u32 memblocks_to_allocate
;
1552 struct vxge_hw_mempool
*mempool
= NULL
;
1555 if (memblock_size
< item_size
) {
1556 status
= VXGE_HW_FAIL
;
1560 mempool
= (struct vxge_hw_mempool
*)
1561 vmalloc(sizeof(struct vxge_hw_mempool
));
1562 if (mempool
== NULL
) {
1563 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1566 memset(mempool
, 0, sizeof(struct vxge_hw_mempool
));
1568 mempool
->devh
= devh
;
1569 mempool
->memblock_size
= memblock_size
;
1570 mempool
->items_max
= items_max
;
1571 mempool
->items_initial
= items_initial
;
1572 mempool
->item_size
= item_size
;
1573 mempool
->items_priv_size
= items_priv_size
;
1574 mempool
->item_func_alloc
= mp_callback
->item_func_alloc
;
1575 mempool
->userdata
= userdata
;
1577 mempool
->memblocks_allocated
= 0;
1579 mempool
->items_per_memblock
= memblock_size
/ item_size
;
1581 mempool
->memblocks_max
= (items_max
+ mempool
->items_per_memblock
- 1) /
1582 mempool
->items_per_memblock
;
1584 /* allocate array of memblocks */
1585 mempool
->memblocks_arr
=
1586 (void **) vmalloc(sizeof(void *) * mempool
->memblocks_max
);
1587 if (mempool
->memblocks_arr
== NULL
) {
1588 __vxge_hw_mempool_destroy(mempool
);
1589 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1593 memset(mempool
->memblocks_arr
, 0,
1594 sizeof(void *) * mempool
->memblocks_max
);
1596 /* allocate array of private parts of items per memblocks */
1597 mempool
->memblocks_priv_arr
=
1598 (void **) vmalloc(sizeof(void *) * mempool
->memblocks_max
);
1599 if (mempool
->memblocks_priv_arr
== NULL
) {
1600 __vxge_hw_mempool_destroy(mempool
);
1601 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1605 memset(mempool
->memblocks_priv_arr
, 0,
1606 sizeof(void *) * mempool
->memblocks_max
);
1608 /* allocate array of memblocks DMA objects */
1609 mempool
->memblocks_dma_arr
= (struct vxge_hw_mempool_dma
*)
1610 vmalloc(sizeof(struct vxge_hw_mempool_dma
) *
1611 mempool
->memblocks_max
);
1613 if (mempool
->memblocks_dma_arr
== NULL
) {
1614 __vxge_hw_mempool_destroy(mempool
);
1615 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1619 memset(mempool
->memblocks_dma_arr
, 0,
1620 sizeof(struct vxge_hw_mempool_dma
) *
1621 mempool
->memblocks_max
);
1623 /* allocate hash array of items */
1624 mempool
->items_arr
=
1625 (void **) vmalloc(sizeof(void *) * mempool
->items_max
);
1626 if (mempool
->items_arr
== NULL
) {
1627 __vxge_hw_mempool_destroy(mempool
);
1628 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1632 memset(mempool
->items_arr
, 0, sizeof(void *) * mempool
->items_max
);
1634 /* calculate initial number of memblocks */
1635 memblocks_to_allocate
= (mempool
->items_initial
+
1636 mempool
->items_per_memblock
- 1) /
1637 mempool
->items_per_memblock
;
1639 /* pre-allocate the mempool */
1640 status
= __vxge_hw_mempool_grow(mempool
, memblocks_to_allocate
,
1642 if (status
!= VXGE_HW_OK
) {
1643 __vxge_hw_mempool_destroy(mempool
);
1644 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1654 * vxge_hw_mempool_destroy
1656 void __vxge_hw_mempool_destroy(struct vxge_hw_mempool
*mempool
)
1659 struct __vxge_hw_device
*devh
= mempool
->devh
;
1661 for (i
= 0; i
< mempool
->memblocks_allocated
; i
++) {
1662 struct vxge_hw_mempool_dma
*dma_object
;
1664 vxge_assert(mempool
->memblocks_arr
[i
]);
1665 vxge_assert(mempool
->memblocks_dma_arr
+ i
);
1667 dma_object
= mempool
->memblocks_dma_arr
+ i
;
1669 for (j
= 0; j
< mempool
->items_per_memblock
; j
++) {
1670 u32 index
= i
* mempool
->items_per_memblock
+ j
;
1672 /* to skip last partially filled(if any) memblock */
1673 if (index
>= mempool
->items_current
)
1677 vfree(mempool
->memblocks_priv_arr
[i
]);
1679 __vxge_hw_blockpool_free(devh
, mempool
->memblocks_arr
[i
],
1680 mempool
->memblock_size
, dma_object
);
1683 vfree(mempool
->items_arr
);
1685 vfree(mempool
->memblocks_dma_arr
);
1687 vfree(mempool
->memblocks_priv_arr
);
1689 vfree(mempool
->memblocks_arr
);
1695 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1696 * Check the fifo configuration
1699 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config
*fifo_config
)
1701 if ((fifo_config
->fifo_blocks
< VXGE_HW_MIN_FIFO_BLOCKS
) ||
1702 (fifo_config
->fifo_blocks
> VXGE_HW_MAX_FIFO_BLOCKS
))
1703 return VXGE_HW_BADCFG_FIFO_BLOCKS
;
1709 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1710 * Check the vpath configuration
1713 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config
*vp_config
)
1715 enum vxge_hw_status status
;
1717 if ((vp_config
->min_bandwidth
< VXGE_HW_VPATH_BANDWIDTH_MIN
) ||
1718 (vp_config
->min_bandwidth
>
1719 VXGE_HW_VPATH_BANDWIDTH_MAX
))
1720 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH
;
1722 status
= __vxge_hw_device_fifo_config_check(&vp_config
->fifo
);
1723 if (status
!= VXGE_HW_OK
)
1726 if ((vp_config
->mtu
!= VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
) &&
1727 ((vp_config
->mtu
< VXGE_HW_VPATH_MIN_INITIAL_MTU
) ||
1728 (vp_config
->mtu
> VXGE_HW_VPATH_MAX_INITIAL_MTU
)))
1729 return VXGE_HW_BADCFG_VPATH_MTU
;
1731 if ((vp_config
->rpa_strip_vlan_tag
!=
1732 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
) &&
1733 (vp_config
->rpa_strip_vlan_tag
!=
1734 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
) &&
1735 (vp_config
->rpa_strip_vlan_tag
!=
1736 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE
))
1737 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG
;
1743 * __vxge_hw_device_config_check - Check device configuration.
1744 * Check the device configuration
1747 __vxge_hw_device_config_check(struct vxge_hw_device_config
*new_config
)
1750 enum vxge_hw_status status
;
1752 if ((new_config
->intr_mode
!= VXGE_HW_INTR_MODE_IRQLINE
) &&
1753 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_MSIX
) &&
1754 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) &&
1755 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_DEF
))
1756 return VXGE_HW_BADCFG_INTR_MODE
;
1758 if ((new_config
->rts_mac_en
!= VXGE_HW_RTS_MAC_DISABLE
) &&
1759 (new_config
->rts_mac_en
!= VXGE_HW_RTS_MAC_ENABLE
))
1760 return VXGE_HW_BADCFG_RTS_MAC_EN
;
1762 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1763 status
= __vxge_hw_device_vpath_config_check(
1764 &new_config
->vp_config
[i
]);
1765 if (status
!= VXGE_HW_OK
)
1773 * vxge_hw_device_config_default_get - Initialize device config with defaults.
1774 * Initialize Titan device config with default values.
1776 enum vxge_hw_status __devinit
1777 vxge_hw_device_config_default_get(struct vxge_hw_device_config
*device_config
)
1781 device_config
->dma_blockpool_initial
=
1782 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
1783 device_config
->dma_blockpool_max
= VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
1784 device_config
->intr_mode
= VXGE_HW_INTR_MODE_DEF
;
1785 device_config
->rth_en
= VXGE_HW_RTH_DEFAULT
;
1786 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_DEFAULT
;
1787 device_config
->device_poll_millis
= VXGE_HW_DEF_DEVICE_POLL_MILLIS
;
1788 device_config
->rts_mac_en
= VXGE_HW_RTS_MAC_DEFAULT
;
1790 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1792 device_config
->vp_config
[i
].vp_id
= i
;
1794 device_config
->vp_config
[i
].min_bandwidth
=
1795 VXGE_HW_VPATH_BANDWIDTH_DEFAULT
;
1797 device_config
->vp_config
[i
].ring
.enable
= VXGE_HW_RING_DEFAULT
;
1799 device_config
->vp_config
[i
].ring
.ring_blocks
=
1800 VXGE_HW_DEF_RING_BLOCKS
;
1802 device_config
->vp_config
[i
].ring
.buffer_mode
=
1803 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT
;
1805 device_config
->vp_config
[i
].ring
.scatter_mode
=
1806 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT
;
1808 device_config
->vp_config
[i
].ring
.rxds_limit
=
1809 VXGE_HW_DEF_RING_RXDS_LIMIT
;
1811 device_config
->vp_config
[i
].fifo
.enable
= VXGE_HW_FIFO_ENABLE
;
1813 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
1814 VXGE_HW_MIN_FIFO_BLOCKS
;
1816 device_config
->vp_config
[i
].fifo
.max_frags
=
1817 VXGE_HW_MAX_FIFO_FRAGS
;
1819 device_config
->vp_config
[i
].fifo
.memblock_size
=
1820 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE
;
1822 device_config
->vp_config
[i
].fifo
.alignment_size
=
1823 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE
;
1825 device_config
->vp_config
[i
].fifo
.intr
=
1826 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT
;
1828 device_config
->vp_config
[i
].fifo
.no_snoop_bits
=
1829 VXGE_HW_FIFO_NO_SNOOP_DEFAULT
;
1830 device_config
->vp_config
[i
].tti
.intr_enable
=
1831 VXGE_HW_TIM_INTR_DEFAULT
;
1833 device_config
->vp_config
[i
].tti
.btimer_val
=
1834 VXGE_HW_USE_FLASH_DEFAULT
;
1836 device_config
->vp_config
[i
].tti
.timer_ac_en
=
1837 VXGE_HW_USE_FLASH_DEFAULT
;
1839 device_config
->vp_config
[i
].tti
.timer_ci_en
=
1840 VXGE_HW_USE_FLASH_DEFAULT
;
1842 device_config
->vp_config
[i
].tti
.timer_ri_en
=
1843 VXGE_HW_USE_FLASH_DEFAULT
;
1845 device_config
->vp_config
[i
].tti
.rtimer_val
=
1846 VXGE_HW_USE_FLASH_DEFAULT
;
1848 device_config
->vp_config
[i
].tti
.util_sel
=
1849 VXGE_HW_USE_FLASH_DEFAULT
;
1851 device_config
->vp_config
[i
].tti
.ltimer_val
=
1852 VXGE_HW_USE_FLASH_DEFAULT
;
1854 device_config
->vp_config
[i
].tti
.urange_a
=
1855 VXGE_HW_USE_FLASH_DEFAULT
;
1857 device_config
->vp_config
[i
].tti
.uec_a
=
1858 VXGE_HW_USE_FLASH_DEFAULT
;
1860 device_config
->vp_config
[i
].tti
.urange_b
=
1861 VXGE_HW_USE_FLASH_DEFAULT
;
1863 device_config
->vp_config
[i
].tti
.uec_b
=
1864 VXGE_HW_USE_FLASH_DEFAULT
;
1866 device_config
->vp_config
[i
].tti
.urange_c
=
1867 VXGE_HW_USE_FLASH_DEFAULT
;
1869 device_config
->vp_config
[i
].tti
.uec_c
=
1870 VXGE_HW_USE_FLASH_DEFAULT
;
1872 device_config
->vp_config
[i
].tti
.uec_d
=
1873 VXGE_HW_USE_FLASH_DEFAULT
;
1875 device_config
->vp_config
[i
].rti
.intr_enable
=
1876 VXGE_HW_TIM_INTR_DEFAULT
;
1878 device_config
->vp_config
[i
].rti
.btimer_val
=
1879 VXGE_HW_USE_FLASH_DEFAULT
;
1881 device_config
->vp_config
[i
].rti
.timer_ac_en
=
1882 VXGE_HW_USE_FLASH_DEFAULT
;
1884 device_config
->vp_config
[i
].rti
.timer_ci_en
=
1885 VXGE_HW_USE_FLASH_DEFAULT
;
1887 device_config
->vp_config
[i
].rti
.timer_ri_en
=
1888 VXGE_HW_USE_FLASH_DEFAULT
;
1890 device_config
->vp_config
[i
].rti
.rtimer_val
=
1891 VXGE_HW_USE_FLASH_DEFAULT
;
1893 device_config
->vp_config
[i
].rti
.util_sel
=
1894 VXGE_HW_USE_FLASH_DEFAULT
;
1896 device_config
->vp_config
[i
].rti
.ltimer_val
=
1897 VXGE_HW_USE_FLASH_DEFAULT
;
1899 device_config
->vp_config
[i
].rti
.urange_a
=
1900 VXGE_HW_USE_FLASH_DEFAULT
;
1902 device_config
->vp_config
[i
].rti
.uec_a
=
1903 VXGE_HW_USE_FLASH_DEFAULT
;
1905 device_config
->vp_config
[i
].rti
.urange_b
=
1906 VXGE_HW_USE_FLASH_DEFAULT
;
1908 device_config
->vp_config
[i
].rti
.uec_b
=
1909 VXGE_HW_USE_FLASH_DEFAULT
;
1911 device_config
->vp_config
[i
].rti
.urange_c
=
1912 VXGE_HW_USE_FLASH_DEFAULT
;
1914 device_config
->vp_config
[i
].rti
.uec_c
=
1915 VXGE_HW_USE_FLASH_DEFAULT
;
1917 device_config
->vp_config
[i
].rti
.uec_d
=
1918 VXGE_HW_USE_FLASH_DEFAULT
;
1920 device_config
->vp_config
[i
].mtu
=
1921 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
;
1923 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
1924 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
;
1931 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
1932 * Set the swapper bits appropriately for the lagacy section.
1935 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem
*legacy_reg
)
1938 enum vxge_hw_status status
= VXGE_HW_OK
;
1940 val64
= readq(&legacy_reg
->toc_swapper_fb
);
1946 case VXGE_HW_SWAPPER_INITIAL_VALUE
:
1949 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED
:
1950 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE
,
1951 &legacy_reg
->pifm_rd_swap_en
);
1952 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE
,
1953 &legacy_reg
->pifm_rd_flip_en
);
1954 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
,
1955 &legacy_reg
->pifm_wr_swap_en
);
1956 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE
,
1957 &legacy_reg
->pifm_wr_flip_en
);
1960 case VXGE_HW_SWAPPER_BYTE_SWAPPED
:
1961 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE
,
1962 &legacy_reg
->pifm_rd_swap_en
);
1963 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
,
1964 &legacy_reg
->pifm_wr_swap_en
);
1967 case VXGE_HW_SWAPPER_BIT_FLIPPED
:
1968 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE
,
1969 &legacy_reg
->pifm_rd_flip_en
);
1970 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE
,
1971 &legacy_reg
->pifm_wr_flip_en
);
1977 val64
= readq(&legacy_reg
->toc_swapper_fb
);
1979 if (val64
!= VXGE_HW_SWAPPER_INITIAL_VALUE
)
1980 status
= VXGE_HW_ERR_SWAPPER_CTRL
;
1986 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
1987 * Set the swapper bits appropriately for the vpath.
1990 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
1992 #ifndef __BIG_ENDIAN
1995 val64
= readq(&vpath_reg
->vpath_general_cfg1
);
1997 val64
|= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN
;
1998 writeq(val64
, &vpath_reg
->vpath_general_cfg1
);
2005 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2006 * Set the swapper bits appropriately for the vpath.
2009 __vxge_hw_kdfc_swapper_set(
2010 struct vxge_hw_legacy_reg __iomem
*legacy_reg
,
2011 struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2015 val64
= readq(&legacy_reg
->pifm_wr_swap_en
);
2017 if (val64
== VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
) {
2018 val64
= readq(&vpath_reg
->kdfcctl_cfg0
);
2021 val64
|= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0
|
2022 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1
|
2023 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2
;
2025 writeq(val64
, &vpath_reg
->kdfcctl_cfg0
);
2033 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2034 * Get device configuration. Permits to retrieve at run-time configuration
2035 * values that were used to initialize and configure the device.
2038 vxge_hw_mgmt_device_config(struct __vxge_hw_device
*hldev
,
2039 struct vxge_hw_device_config
*dev_config
, int size
)
2042 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
))
2043 return VXGE_HW_ERR_INVALID_DEVICE
;
2045 if (size
!= sizeof(struct vxge_hw_device_config
))
2046 return VXGE_HW_ERR_VERSION_CONFLICT
;
2048 memcpy(dev_config
, &hldev
->config
,
2049 sizeof(struct vxge_hw_device_config
));
2055 * vxge_hw_mgmt_reg_read - Read Titan register.
2058 vxge_hw_mgmt_reg_read(struct __vxge_hw_device
*hldev
,
2059 enum vxge_hw_mgmt_reg_type type
,
2060 u32 index
, u32 offset
, u64
*value
)
2062 enum vxge_hw_status status
= VXGE_HW_OK
;
2064 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
2065 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2070 case vxge_hw_mgmt_reg_type_legacy
:
2071 if (offset
> sizeof(struct vxge_hw_legacy_reg
) - 8) {
2072 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2075 *value
= readq((void __iomem
*)hldev
->legacy_reg
+ offset
);
2077 case vxge_hw_mgmt_reg_type_toc
:
2078 if (offset
> sizeof(struct vxge_hw_toc_reg
) - 8) {
2079 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2082 *value
= readq((void __iomem
*)hldev
->toc_reg
+ offset
);
2084 case vxge_hw_mgmt_reg_type_common
:
2085 if (offset
> sizeof(struct vxge_hw_common_reg
) - 8) {
2086 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2089 *value
= readq((void __iomem
*)hldev
->common_reg
+ offset
);
2091 case vxge_hw_mgmt_reg_type_mrpcim
:
2092 if (!(hldev
->access_rights
&
2093 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
2094 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2097 if (offset
> sizeof(struct vxge_hw_mrpcim_reg
) - 8) {
2098 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2101 *value
= readq((void __iomem
*)hldev
->mrpcim_reg
+ offset
);
2103 case vxge_hw_mgmt_reg_type_srpcim
:
2104 if (!(hldev
->access_rights
&
2105 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
)) {
2106 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2109 if (index
> VXGE_HW_TITAN_SRPCIM_REG_SPACES
- 1) {
2110 status
= VXGE_HW_ERR_INVALID_INDEX
;
2113 if (offset
> sizeof(struct vxge_hw_srpcim_reg
) - 8) {
2114 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2117 *value
= readq((void __iomem
*)hldev
->srpcim_reg
[index
] +
2120 case vxge_hw_mgmt_reg_type_vpmgmt
:
2121 if ((index
> VXGE_HW_TITAN_VPMGMT_REG_SPACES
- 1) ||
2122 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2123 status
= VXGE_HW_ERR_INVALID_INDEX
;
2126 if (offset
> sizeof(struct vxge_hw_vpmgmt_reg
) - 8) {
2127 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2130 *value
= readq((void __iomem
*)hldev
->vpmgmt_reg
[index
] +
2133 case vxge_hw_mgmt_reg_type_vpath
:
2134 if ((index
> VXGE_HW_TITAN_VPATH_REG_SPACES
- 1) ||
2135 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2136 status
= VXGE_HW_ERR_INVALID_INDEX
;
2139 if (index
> VXGE_HW_TITAN_VPATH_REG_SPACES
- 1) {
2140 status
= VXGE_HW_ERR_INVALID_INDEX
;
2143 if (offset
> sizeof(struct vxge_hw_vpath_reg
) - 8) {
2144 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2147 *value
= readq((void __iomem
*)hldev
->vpath_reg
[index
] +
2151 status
= VXGE_HW_ERR_INVALID_TYPE
;
2160 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2163 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device
*hldev
, u64 vpath_mask
)
2165 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
;
2166 enum vxge_hw_status status
= VXGE_HW_OK
;
2169 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
2170 if (!((vpath_mask
) & vxge_mBIT(i
)))
2172 vpmgmt_reg
= hldev
->vpmgmt_reg
[i
];
2173 for (j
= 0; j
< VXGE_HW_MAC_MAX_MAC_PORT_ID
; j
++) {
2174 if (readq(&vpmgmt_reg
->rxmac_cfg0_port_vpmgmt_clone
[j
])
2175 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS
)
2176 return VXGE_HW_FAIL
;
2182 * vxge_hw_mgmt_reg_Write - Write Titan register.
2185 vxge_hw_mgmt_reg_write(struct __vxge_hw_device
*hldev
,
2186 enum vxge_hw_mgmt_reg_type type
,
2187 u32 index
, u32 offset
, u64 value
)
2189 enum vxge_hw_status status
= VXGE_HW_OK
;
2191 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
2192 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2197 case vxge_hw_mgmt_reg_type_legacy
:
2198 if (offset
> sizeof(struct vxge_hw_legacy_reg
) - 8) {
2199 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2202 writeq(value
, (void __iomem
*)hldev
->legacy_reg
+ offset
);
2204 case vxge_hw_mgmt_reg_type_toc
:
2205 if (offset
> sizeof(struct vxge_hw_toc_reg
) - 8) {
2206 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2209 writeq(value
, (void __iomem
*)hldev
->toc_reg
+ offset
);
2211 case vxge_hw_mgmt_reg_type_common
:
2212 if (offset
> sizeof(struct vxge_hw_common_reg
) - 8) {
2213 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2216 writeq(value
, (void __iomem
*)hldev
->common_reg
+ offset
);
2218 case vxge_hw_mgmt_reg_type_mrpcim
:
2219 if (!(hldev
->access_rights
&
2220 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
2221 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2224 if (offset
> sizeof(struct vxge_hw_mrpcim_reg
) - 8) {
2225 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2228 writeq(value
, (void __iomem
*)hldev
->mrpcim_reg
+ offset
);
2230 case vxge_hw_mgmt_reg_type_srpcim
:
2231 if (!(hldev
->access_rights
&
2232 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
)) {
2233 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2236 if (index
> VXGE_HW_TITAN_SRPCIM_REG_SPACES
- 1) {
2237 status
= VXGE_HW_ERR_INVALID_INDEX
;
2240 if (offset
> sizeof(struct vxge_hw_srpcim_reg
) - 8) {
2241 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2244 writeq(value
, (void __iomem
*)hldev
->srpcim_reg
[index
] +
2248 case vxge_hw_mgmt_reg_type_vpmgmt
:
2249 if ((index
> VXGE_HW_TITAN_VPMGMT_REG_SPACES
- 1) ||
2250 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2251 status
= VXGE_HW_ERR_INVALID_INDEX
;
2254 if (offset
> sizeof(struct vxge_hw_vpmgmt_reg
) - 8) {
2255 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2258 writeq(value
, (void __iomem
*)hldev
->vpmgmt_reg
[index
] +
2261 case vxge_hw_mgmt_reg_type_vpath
:
2262 if ((index
> VXGE_HW_TITAN_VPATH_REG_SPACES
-1) ||
2263 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2264 status
= VXGE_HW_ERR_INVALID_INDEX
;
2267 if (offset
> sizeof(struct vxge_hw_vpath_reg
) - 8) {
2268 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2271 writeq(value
, (void __iomem
*)hldev
->vpath_reg
[index
] +
2275 status
= VXGE_HW_ERR_INVALID_TYPE
;
2283 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2285 * This function is callback passed to __vxge_hw_mempool_create to create memory
2289 __vxge_hw_fifo_mempool_item_alloc(
2290 struct vxge_hw_mempool
*mempoolh
,
2291 u32 memblock_index
, struct vxge_hw_mempool_dma
*dma_object
,
2292 u32 index
, u32 is_last
)
2294 u32 memblock_item_idx
;
2295 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
2296 struct vxge_hw_fifo_txd
*txdp
=
2297 (struct vxge_hw_fifo_txd
*)mempoolh
->items_arr
[index
];
2298 struct __vxge_hw_fifo
*fifo
=
2299 (struct __vxge_hw_fifo
*)mempoolh
->userdata
;
2300 void *memblock
= mempoolh
->memblocks_arr
[memblock_index
];
2304 txdp
->host_control
= (u64
) (size_t)
2305 __vxge_hw_mempool_item_priv(mempoolh
, memblock_index
, txdp
,
2306 &memblock_item_idx
);
2308 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdp
);
2310 vxge_assert(txdl_priv
);
2312 fifo
->channel
.reserve_arr
[fifo
->channel
.reserve_ptr
- 1 - index
] = txdp
;
2314 /* pre-format HW's TxDL's private */
2315 txdl_priv
->dma_offset
= (char *)txdp
- (char *)memblock
;
2316 txdl_priv
->dma_addr
= dma_object
->addr
+ txdl_priv
->dma_offset
;
2317 txdl_priv
->dma_handle
= dma_object
->handle
;
2318 txdl_priv
->memblock
= memblock
;
2319 txdl_priv
->first_txdp
= txdp
;
2320 txdl_priv
->next_txdl_priv
= NULL
;
2321 txdl_priv
->alloc_frags
= 0;
2327 * __vxge_hw_fifo_create - Create a FIFO
2328 * This function creates FIFO and initializes it.
2331 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle
*vp
,
2332 struct vxge_hw_fifo_attr
*attr
)
2334 enum vxge_hw_status status
= VXGE_HW_OK
;
2335 struct __vxge_hw_fifo
*fifo
;
2336 struct vxge_hw_fifo_config
*config
;
2337 u32 txdl_size
, txdl_per_memblock
;
2338 struct vxge_hw_mempool_cbs fifo_mp_callback
;
2339 struct __vxge_hw_virtualpath
*vpath
;
2341 if ((vp
== NULL
) || (attr
== NULL
)) {
2342 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2346 config
= &vpath
->hldev
->config
.vp_config
[vpath
->vp_id
].fifo
;
2348 txdl_size
= config
->max_frags
* sizeof(struct vxge_hw_fifo_txd
);
2350 txdl_per_memblock
= config
->memblock_size
/ txdl_size
;
2352 fifo
= (struct __vxge_hw_fifo
*)__vxge_hw_channel_allocate(vp
,
2353 VXGE_HW_CHANNEL_TYPE_FIFO
,
2354 config
->fifo_blocks
* txdl_per_memblock
,
2355 attr
->per_txdl_space
, attr
->userdata
);
2358 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2362 vpath
->fifoh
= fifo
;
2363 fifo
->nofl_db
= vpath
->nofl_db
;
2365 fifo
->vp_id
= vpath
->vp_id
;
2366 fifo
->vp_reg
= vpath
->vp_reg
;
2367 fifo
->stats
= &vpath
->sw_stats
->fifo_stats
;
2369 fifo
->config
= config
;
2371 /* apply "interrupts per txdl" attribute */
2372 fifo
->interrupt_type
= VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ
;
2374 if (fifo
->config
->intr
)
2375 fifo
->interrupt_type
= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST
;
2377 fifo
->no_snoop_bits
= config
->no_snoop_bits
;
2380 * FIFO memory management strategy:
2382 * TxDL split into three independent parts:
2384 * - TxD HW private part
2385 * - driver private part
2387 * Adaptative memory allocation used. i.e. Memory allocated on
2388 * demand with the size which will fit into one memory block.
2389 * One memory block may contain more than one TxDL.
2391 * During "reserve" operations more memory can be allocated on demand
2392 * for example due to FIFO full condition.
2394 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2395 * routine which will essentially stop the channel and free resources.
2398 /* TxDL common private size == TxDL private + driver private */
2400 sizeof(struct __vxge_hw_fifo_txdl_priv
) + attr
->per_txdl_space
;
2401 fifo
->priv_size
= ((fifo
->priv_size
+ VXGE_CACHE_LINE_SIZE
- 1) /
2402 VXGE_CACHE_LINE_SIZE
) * VXGE_CACHE_LINE_SIZE
;
2404 fifo
->per_txdl_space
= attr
->per_txdl_space
;
2406 /* recompute txdl size to be cacheline aligned */
2407 fifo
->txdl_size
= txdl_size
;
2408 fifo
->txdl_per_memblock
= txdl_per_memblock
;
2410 fifo
->txdl_term
= attr
->txdl_term
;
2411 fifo
->callback
= attr
->callback
;
2413 if (fifo
->txdl_per_memblock
== 0) {
2414 __vxge_hw_fifo_delete(vp
);
2415 status
= VXGE_HW_ERR_INVALID_BLOCK_SIZE
;
2419 fifo_mp_callback
.item_func_alloc
= __vxge_hw_fifo_mempool_item_alloc
;
2422 __vxge_hw_mempool_create(vpath
->hldev
,
2423 fifo
->config
->memblock_size
,
2426 (fifo
->config
->fifo_blocks
* fifo
->txdl_per_memblock
),
2427 (fifo
->config
->fifo_blocks
* fifo
->txdl_per_memblock
),
2431 if (fifo
->mempool
== NULL
) {
2432 __vxge_hw_fifo_delete(vp
);
2433 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2437 status
= __vxge_hw_channel_initialize(&fifo
->channel
);
2438 if (status
!= VXGE_HW_OK
) {
2439 __vxge_hw_fifo_delete(vp
);
2443 vxge_assert(fifo
->channel
.reserve_ptr
);
2449 * __vxge_hw_fifo_abort - Returns the TxD
2450 * This function terminates the TxDs of fifo
2452 enum vxge_hw_status
__vxge_hw_fifo_abort(struct __vxge_hw_fifo
*fifo
)
2457 vxge_hw_channel_dtr_try_complete(&fifo
->channel
, &txdlh
);
2462 vxge_hw_channel_dtr_complete(&fifo
->channel
);
2464 if (fifo
->txdl_term
) {
2465 fifo
->txdl_term(txdlh
,
2466 VXGE_HW_TXDL_STATE_POSTED
,
2467 fifo
->channel
.userdata
);
2470 vxge_hw_channel_dtr_free(&fifo
->channel
, txdlh
);
2477 * __vxge_hw_fifo_reset - Resets the fifo
2478 * This function resets the fifo during vpath reset operation
2480 enum vxge_hw_status
__vxge_hw_fifo_reset(struct __vxge_hw_fifo
*fifo
)
2482 enum vxge_hw_status status
= VXGE_HW_OK
;
2484 __vxge_hw_fifo_abort(fifo
);
2485 status
= __vxge_hw_channel_reset(&fifo
->channel
);
2491 * __vxge_hw_fifo_delete - Removes the FIFO
2492 * This function freeup the memory pool and removes the FIFO
2494 enum vxge_hw_status
__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle
*vp
)
2496 struct __vxge_hw_fifo
*fifo
= vp
->vpath
->fifoh
;
2498 __vxge_hw_fifo_abort(fifo
);
2501 __vxge_hw_mempool_destroy(fifo
->mempool
);
2503 vp
->vpath
->fifoh
= NULL
;
2505 __vxge_hw_channel_free(&fifo
->channel
);
2511 * __vxge_hw_vpath_pci_read - Read the content of given address
2512 * in pci config space.
2513 * Read from the vpath pci config space.
2516 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath
*vpath
,
2517 u32 phy_func_0
, u32 offset
, u32
*val
)
2520 enum vxge_hw_status status
= VXGE_HW_OK
;
2521 struct vxge_hw_vpath_reg __iomem
*vp_reg
= vpath
->vp_reg
;
2523 val64
= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset
);
2526 val64
|= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0
;
2528 writeq(val64
, &vp_reg
->pci_config_access_cfg1
);
2530 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ
,
2531 &vp_reg
->pci_config_access_cfg2
);
2534 status
= __vxge_hw_device_register_poll(
2535 &vp_reg
->pci_config_access_cfg2
,
2536 VXGE_HW_INTR_MASK_ALL
, VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2538 if (status
!= VXGE_HW_OK
)
2541 val64
= readq(&vp_reg
->pci_config_access_status
);
2543 if (val64
& VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR
) {
2544 status
= VXGE_HW_FAIL
;
2547 *val
= (u32
)vxge_bVALn(val64
, 32, 32);
2553 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2554 * Returns the function number of the vpath.
2557 __vxge_hw_vpath_func_id_get(u32 vp_id
,
2558 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
)
2562 val64
= readq(&vpmgmt_reg
->vpath_to_func_map_cfg1
);
2565 (u32
)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64
);
2569 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2572 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2575 writeq(0, &vpath_reg
->rts_access_steer_ctrl
);
2577 writeq(dta_struct_sel
, &vpath_reg
->rts_access_steer_data0
);
2578 writeq(0, &vpath_reg
->rts_access_steer_data1
);
2585 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2586 * part number and product description.
2589 __vxge_hw_vpath_card_info_get(
2591 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2592 struct vxge_hw_device_hw_info
*hw_info
)
2598 enum vxge_hw_status status
= VXGE_HW_OK
;
2599 u8
*serial_number
= hw_info
->serial_number
;
2600 u8
*part_number
= hw_info
->part_number
;
2601 u8
*product_desc
= hw_info
->product_desc
;
2603 __vxge_hw_read_rts_ds(vpath_reg
,
2604 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER
);
2606 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2607 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2608 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2609 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2610 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2611 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2613 status
= __vxge_hw_pio_mem_write64(val64
,
2614 &vpath_reg
->rts_access_steer_ctrl
,
2615 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2616 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2618 if (status
!= VXGE_HW_OK
)
2621 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2623 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2624 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2625 ((u64
*)serial_number
)[0] = be64_to_cpu(data1
);
2627 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2628 ((u64
*)serial_number
)[1] = be64_to_cpu(data2
);
2629 status
= VXGE_HW_OK
;
2633 __vxge_hw_read_rts_ds(vpath_reg
,
2634 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER
);
2636 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2637 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2638 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2639 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2640 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2641 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2643 status
= __vxge_hw_pio_mem_write64(val64
,
2644 &vpath_reg
->rts_access_steer_ctrl
,
2645 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2646 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2648 if (status
!= VXGE_HW_OK
)
2651 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2653 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2655 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2656 ((u64
*)part_number
)[0] = be64_to_cpu(data1
);
2658 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2659 ((u64
*)part_number
)[1] = be64_to_cpu(data2
);
2661 status
= VXGE_HW_OK
;
2668 for (i
= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0
;
2669 i
<= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3
; i
++) {
2671 __vxge_hw_read_rts_ds(vpath_reg
, i
);
2673 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2674 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2675 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2676 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2677 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2678 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2680 status
= __vxge_hw_pio_mem_write64(val64
,
2681 &vpath_reg
->rts_access_steer_ctrl
,
2682 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2683 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2685 if (status
!= VXGE_HW_OK
)
2688 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2690 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2692 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2693 ((u64
*)product_desc
)[j
++] = be64_to_cpu(data1
);
2695 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2696 ((u64
*)product_desc
)[j
++] = be64_to_cpu(data2
);
2698 status
= VXGE_HW_OK
;
2707 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2708 * Returns FW Version
2711 __vxge_hw_vpath_fw_ver_get(
2713 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
2714 struct vxge_hw_device_hw_info
*hw_info
)
2719 struct vxge_hw_device_version
*fw_version
= &hw_info
->fw_version
;
2720 struct vxge_hw_device_date
*fw_date
= &hw_info
->fw_date
;
2721 struct vxge_hw_device_version
*flash_version
= &hw_info
->flash_version
;
2722 struct vxge_hw_device_date
*flash_date
= &hw_info
->flash_date
;
2723 enum vxge_hw_status status
= VXGE_HW_OK
;
2725 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2726 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY
) |
2727 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2728 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2729 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2730 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2732 status
= __vxge_hw_pio_mem_write64(val64
,
2733 &vpath_reg
->rts_access_steer_ctrl
,
2734 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2735 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2737 if (status
!= VXGE_HW_OK
)
2740 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2742 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2744 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2745 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
2748 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2751 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2754 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2757 snprintf(fw_date
->date
, VXGE_HW_FW_STRLEN
, "%2.2d/%2.2d/%4.4d",
2758 fw_date
->month
, fw_date
->day
, fw_date
->year
);
2761 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1
);
2763 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1
);
2765 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1
);
2767 snprintf(fw_version
->version
, VXGE_HW_FW_STRLEN
, "%d.%d.%d",
2768 fw_version
->major
, fw_version
->minor
, fw_version
->build
);
2771 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2
);
2773 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2
);
2775 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2
);
2777 snprintf(flash_date
->date
, VXGE_HW_FW_STRLEN
,
2778 "%2.2d/%2.2d/%4.4d",
2779 flash_date
->month
, flash_date
->day
, flash_date
->year
);
2781 flash_version
->major
=
2782 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2
);
2783 flash_version
->minor
=
2784 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2
);
2785 flash_version
->build
=
2786 (u32
)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2
);
2788 snprintf(flash_version
->version
, VXGE_HW_FW_STRLEN
, "%d.%d.%d",
2789 flash_version
->major
, flash_version
->minor
,
2790 flash_version
->build
);
2792 status
= VXGE_HW_OK
;
2795 status
= VXGE_HW_FAIL
;
2801 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2802 * Returns pci function mode
2805 __vxge_hw_vpath_pci_func_mode_get(
2807 struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2811 enum vxge_hw_status status
= VXGE_HW_OK
;
2813 __vxge_hw_read_rts_ds(vpath_reg
,
2814 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE
);
2816 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2817 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
) |
2818 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2819 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2820 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2821 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2823 status
= __vxge_hw_pio_mem_write64(val64
,
2824 &vpath_reg
->rts_access_steer_ctrl
,
2825 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2826 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2828 if (status
!= VXGE_HW_OK
)
2831 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
2833 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2834 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
2835 status
= VXGE_HW_OK
;
2838 status
= VXGE_HW_FAIL
;
2845 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2846 * @hldev: HW device.
2847 * @on_off: TRUE if flickering to be on, FALSE to be off
2849 * Flicker the link LED.
2852 vxge_hw_device_flick_link_led(struct __vxge_hw_device
*hldev
,
2856 enum vxge_hw_status status
= VXGE_HW_OK
;
2857 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
2859 if (hldev
== NULL
) {
2860 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2864 vp_reg
= hldev
->vpath_reg
[hldev
->first_vp_id
];
2866 writeq(0, &vp_reg
->rts_access_steer_ctrl
);
2868 writeq(on_off
, &vp_reg
->rts_access_steer_data0
);
2869 writeq(0, &vp_reg
->rts_access_steer_data1
);
2872 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2873 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL
) |
2874 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2875 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
) |
2876 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2877 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2879 status
= __vxge_hw_pio_mem_write64(val64
,
2880 &vp_reg
->rts_access_steer_ctrl
,
2881 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2882 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
2888 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
2891 __vxge_hw_vpath_rts_table_get(
2892 struct __vxge_hw_vpath_handle
*vp
,
2893 u32 action
, u32 rts_table
, u32 offset
, u64
*data1
, u64
*data2
)
2896 struct __vxge_hw_virtualpath
*vpath
;
2897 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
2899 enum vxge_hw_status status
= VXGE_HW_OK
;
2902 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2907 vp_reg
= vpath
->vp_reg
;
2909 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action
) |
2910 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table
) |
2911 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2912 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset
);
2915 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT
) ||
2917 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
) ||
2919 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK
) ||
2921 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY
)) {
2922 val64
= val64
| VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL
;
2925 status
= __vxge_hw_pio_mem_write64(val64
,
2926 &vp_reg
->rts_access_steer_ctrl
,
2927 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2928 vpath
->hldev
->config
.device_poll_millis
);
2930 if (status
!= VXGE_HW_OK
)
2933 val64
= readq(&vp_reg
->rts_access_steer_ctrl
);
2935 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
2937 *data1
= readq(&vp_reg
->rts_access_steer_data0
);
2940 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) ||
2942 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
)) {
2943 *data2
= readq(&vp_reg
->rts_access_steer_data1
);
2945 status
= VXGE_HW_OK
;
2947 status
= VXGE_HW_FAIL
;
2953 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
2956 __vxge_hw_vpath_rts_table_set(
2957 struct __vxge_hw_vpath_handle
*vp
, u32 action
, u32 rts_table
,
2958 u32 offset
, u64 data1
, u64 data2
)
2961 struct __vxge_hw_virtualpath
*vpath
;
2962 enum vxge_hw_status status
= VXGE_HW_OK
;
2963 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
2966 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2971 vp_reg
= vpath
->vp_reg
;
2973 writeq(data1
, &vp_reg
->rts_access_steer_data0
);
2976 if ((rts_table
== VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) ||
2978 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
)) {
2979 writeq(data2
, &vp_reg
->rts_access_steer_data1
);
2983 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action
) |
2984 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table
) |
2985 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
2986 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset
);
2988 status
= __vxge_hw_pio_mem_write64(val64
,
2989 &vp_reg
->rts_access_steer_ctrl
,
2990 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
2991 vpath
->hldev
->config
.device_poll_millis
);
2993 if (status
!= VXGE_HW_OK
)
2996 val64
= readq(&vp_reg
->rts_access_steer_ctrl
);
2998 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
)
2999 status
= VXGE_HW_OK
;
3001 status
= VXGE_HW_FAIL
;
3007 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3008 * from MAC address table.
3011 __vxge_hw_vpath_addr_get(
3012 u32 vp_id
, struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
3013 u8 (macaddr
)[ETH_ALEN
], u8 (macaddr_mask
)[ETH_ALEN
])
3019 enum vxge_hw_status status
= VXGE_HW_OK
;
3021 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3022 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
) |
3023 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3024 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) |
3025 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
3026 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3028 status
= __vxge_hw_pio_mem_write64(val64
,
3029 &vpath_reg
->rts_access_steer_ctrl
,
3030 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
3031 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
3033 if (status
!= VXGE_HW_OK
)
3036 val64
= readq(&vpath_reg
->rts_access_steer_ctrl
);
3038 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
3040 data1
= readq(&vpath_reg
->rts_access_steer_data0
);
3041 data2
= readq(&vpath_reg
->rts_access_steer_data1
);
3043 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
3044 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3047 for (i
= ETH_ALEN
; i
> 0; i
--) {
3048 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
3051 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
3054 status
= VXGE_HW_OK
;
3056 status
= VXGE_HW_FAIL
;
3062 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3064 enum vxge_hw_status
vxge_hw_vpath_rts_rth_set(
3065 struct __vxge_hw_vpath_handle
*vp
,
3066 enum vxge_hw_rth_algoritms algorithm
,
3067 struct vxge_hw_rth_hash_types
*hash_type
,
3071 enum vxge_hw_status status
= VXGE_HW_OK
;
3074 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3078 status
= __vxge_hw_vpath_rts_table_get(vp
,
3079 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY
,
3080 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG
,
3083 data0
&= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3084 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3086 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN
|
3087 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size
) |
3088 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm
);
3090 if (hash_type
->hash_type_tcpipv4_en
)
3091 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN
;
3093 if (hash_type
->hash_type_ipv4_en
)
3094 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN
;
3096 if (hash_type
->hash_type_tcpipv6_en
)
3097 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN
;
3099 if (hash_type
->hash_type_ipv6_en
)
3100 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN
;
3102 if (hash_type
->hash_type_tcpipv6ex_en
)
3104 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN
;
3106 if (hash_type
->hash_type_ipv6ex_en
)
3107 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN
;
3109 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0
))
3110 data0
&= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE
;
3112 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE
;
3114 status
= __vxge_hw_vpath_rts_table_set(vp
,
3115 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
,
3116 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG
,
3123 vxge_hw_rts_rth_data0_data1_get(u32 j
, u64
*data0
, u64
*data1
,
3124 u16 flag
, u8
*itable
)
3128 *data0
= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j
)|
3129 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN
|
3130 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3134 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j
)|
3135 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN
|
3136 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3139 *data1
= VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j
)|
3140 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN
|
3141 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3145 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j
)|
3146 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN
|
3147 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3154 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3156 enum vxge_hw_status
vxge_hw_vpath_rts_rth_itable_set(
3157 struct __vxge_hw_vpath_handle
**vpath_handles
,
3163 u32 i
, j
, action
, rts_table
;
3167 enum vxge_hw_status status
= VXGE_HW_OK
;
3168 struct __vxge_hw_vpath_handle
*vp
= vpath_handles
[0];
3171 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3175 max_entries
= (((u32
)1) << itable_size
);
3177 if (vp
->vpath
->hldev
->config
.rth_it_type
3178 == VXGE_HW_RTH_IT_TYPE_SOLO_IT
) {
3179 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
;
3181 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT
;
3183 for (j
= 0; j
< max_entries
; j
++) {
3188 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3191 status
= __vxge_hw_vpath_rts_table_set(vpath_handles
[0],
3192 action
, rts_table
, j
, data0
, data1
);
3194 if (status
!= VXGE_HW_OK
)
3198 for (j
= 0; j
< max_entries
; j
++) {
3203 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN
|
3204 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3207 status
= __vxge_hw_vpath_rts_table_set(
3208 vpath_handles
[mtable
[itable
[j
]]], action
,
3209 rts_table
, j
, data0
, data1
);
3211 if (status
!= VXGE_HW_OK
)
3215 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
;
3217 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
;
3218 for (i
= 0; i
< vpath_count
; i
++) {
3220 for (j
= 0; j
< max_entries
;) {
3225 while (j
< max_entries
) {
3226 if (mtable
[itable
[j
]] != i
) {
3230 vxge_hw_rts_rth_data0_data1_get(j
,
3231 &data0
, &data1
, 1, itable
);
3236 while (j
< max_entries
) {
3237 if (mtable
[itable
[j
]] != i
) {
3241 vxge_hw_rts_rth_data0_data1_get(j
,
3242 &data0
, &data1
, 2, itable
);
3247 while (j
< max_entries
) {
3248 if (mtable
[itable
[j
]] != i
) {
3252 vxge_hw_rts_rth_data0_data1_get(j
,
3253 &data0
, &data1
, 3, itable
);
3258 while (j
< max_entries
) {
3259 if (mtable
[itable
[j
]] != i
) {
3263 vxge_hw_rts_rth_data0_data1_get(j
,
3264 &data0
, &data1
, 4, itable
);
3270 status
= __vxge_hw_vpath_rts_table_set(
3275 if (status
!= VXGE_HW_OK
)
3286 * vxge_hw_vpath_check_leak - Check for memory leak
3287 * @ringh: Handle to the ring object used for receive
3289 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3290 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3291 * Returns: VXGE_HW_FAIL, if leak has occurred.
3295 vxge_hw_vpath_check_leak(struct __vxge_hw_ring
*ring
)
3297 enum vxge_hw_status status
= VXGE_HW_OK
;
3298 u64 rxd_new_count
, rxd_spat
;
3303 rxd_new_count
= readl(&ring
->vp_reg
->prc_rxd_doorbell
);
3304 rxd_spat
= readq(&ring
->vp_reg
->prc_cfg6
);
3305 rxd_spat
= VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat
);
3307 if (rxd_new_count
>= rxd_spat
)
3308 status
= VXGE_HW_FAIL
;
3314 * __vxge_hw_vpath_mgmt_read
3315 * This routine reads the vpath_mgmt registers
3317 static enum vxge_hw_status
3318 __vxge_hw_vpath_mgmt_read(
3319 struct __vxge_hw_device
*hldev
,
3320 struct __vxge_hw_virtualpath
*vpath
)
3322 u32 i
, mtu
= 0, max_pyld
= 0;
3324 enum vxge_hw_status status
= VXGE_HW_OK
;
3326 for (i
= 0; i
< VXGE_HW_MAC_MAX_MAC_PORT_ID
; i
++) {
3328 val64
= readq(&vpath
->vpmgmt_reg
->
3329 rxmac_cfg0_port_vpmgmt_clone
[i
]);
3332 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3338 vpath
->max_mtu
= mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
;
3340 val64
= readq(&vpath
->vpmgmt_reg
->xmac_vsport_choices_vp
);
3342 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3343 if (val64
& vxge_mBIT(i
))
3344 vpath
->vsport_number
= i
;
3347 val64
= readq(&vpath
->vpmgmt_reg
->xgmac_gen_status_vpmgmt_clone
);
3349 if (val64
& VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK
)
3350 VXGE_HW_DEVICE_LINK_STATE_SET(vpath
->hldev
, VXGE_HW_LINK_UP
);
3352 VXGE_HW_DEVICE_LINK_STATE_SET(vpath
->hldev
, VXGE_HW_LINK_DOWN
);
3358 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3359 * This routine checks the vpath_rst_in_prog register to see if
3360 * adapter completed the reset process for the vpath
3363 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath
*vpath
)
3365 enum vxge_hw_status status
;
3367 status
= __vxge_hw_device_register_poll(
3368 &vpath
->hldev
->common_reg
->vpath_rst_in_prog
,
3369 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3370 1 << (16 - vpath
->vp_id
)),
3371 vpath
->hldev
->config
.device_poll_millis
);
3377 * __vxge_hw_vpath_reset
3378 * This routine resets the vpath on the device
3381 __vxge_hw_vpath_reset(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3384 enum vxge_hw_status status
= VXGE_HW_OK
;
3386 val64
= VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id
));
3388 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
3389 &hldev
->common_reg
->cmn_rsthdlr_cfg0
);
3395 * __vxge_hw_vpath_sw_reset
3396 * This routine resets the vpath structures
3399 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3401 enum vxge_hw_status status
= VXGE_HW_OK
;
3402 struct __vxge_hw_virtualpath
*vpath
;
3404 vpath
= (struct __vxge_hw_virtualpath
*)&hldev
->virtual_paths
[vp_id
];
3407 status
= __vxge_hw_ring_reset(vpath
->ringh
);
3408 if (status
!= VXGE_HW_OK
)
3413 status
= __vxge_hw_fifo_reset(vpath
->fifoh
);
3419 * __vxge_hw_vpath_prc_configure
3420 * This routine configures the prc registers of virtual path using the config
3424 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3427 struct __vxge_hw_virtualpath
*vpath
;
3428 struct vxge_hw_vp_config
*vp_config
;
3429 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3431 vpath
= &hldev
->virtual_paths
[vp_id
];
3432 vp_reg
= vpath
->vp_reg
;
3433 vp_config
= vpath
->vp_config
;
3435 if (vp_config
->ring
.enable
== VXGE_HW_RING_DISABLE
)
3438 val64
= readq(&vp_reg
->prc_cfg1
);
3439 val64
|= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE
;
3440 writeq(val64
, &vp_reg
->prc_cfg1
);
3442 val64
= readq(&vpath
->vp_reg
->prc_cfg6
);
3443 val64
|= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN
;
3444 writeq(val64
, &vpath
->vp_reg
->prc_cfg6
);
3446 val64
= readq(&vp_reg
->prc_cfg7
);
3448 if (vpath
->vp_config
->ring
.scatter_mode
!=
3449 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT
) {
3451 val64
&= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3453 switch (vpath
->vp_config
->ring
.scatter_mode
) {
3454 case VXGE_HW_RING_SCATTER_MODE_A
:
3455 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3456 VXGE_HW_PRC_CFG7_SCATTER_MODE_A
);
3458 case VXGE_HW_RING_SCATTER_MODE_B
:
3459 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3460 VXGE_HW_PRC_CFG7_SCATTER_MODE_B
);
3462 case VXGE_HW_RING_SCATTER_MODE_C
:
3463 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3464 VXGE_HW_PRC_CFG7_SCATTER_MODE_C
);
3469 writeq(val64
, &vp_reg
->prc_cfg7
);
3471 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3472 __vxge_hw_ring_first_block_address_get(
3473 vpath
->ringh
) >> 3), &vp_reg
->prc_cfg5
);
3475 val64
= readq(&vp_reg
->prc_cfg4
);
3476 val64
|= VXGE_HW_PRC_CFG4_IN_SVC
;
3477 val64
&= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3479 val64
|= VXGE_HW_PRC_CFG4_RING_MODE(
3480 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER
);
3482 if (hldev
->config
.rth_en
== VXGE_HW_RTH_DISABLE
)
3483 val64
|= VXGE_HW_PRC_CFG4_RTH_DISABLE
;
3485 val64
&= ~VXGE_HW_PRC_CFG4_RTH_DISABLE
;
3487 writeq(val64
, &vp_reg
->prc_cfg4
);
3492 * __vxge_hw_vpath_kdfc_configure
3493 * This routine configures the kdfc registers of virtual path using the
3497 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3501 enum vxge_hw_status status
= VXGE_HW_OK
;
3502 struct __vxge_hw_virtualpath
*vpath
;
3503 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3505 vpath
= &hldev
->virtual_paths
[vp_id
];
3506 vp_reg
= vpath
->vp_reg
;
3507 status
= __vxge_hw_kdfc_swapper_set(hldev
->legacy_reg
, vp_reg
);
3509 if (status
!= VXGE_HW_OK
)
3512 val64
= readq(&vp_reg
->kdfc_drbl_triplet_total
);
3514 vpath
->max_kdfc_db
=
3515 (u32
)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3518 if (vpath
->vp_config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3520 vpath
->max_nofl_db
= vpath
->max_kdfc_db
;
3522 if (vpath
->max_nofl_db
<
3523 ((vpath
->vp_config
->fifo
.memblock_size
/
3524 (vpath
->vp_config
->fifo
.max_frags
*
3525 sizeof(struct vxge_hw_fifo_txd
))) *
3526 vpath
->vp_config
->fifo
.fifo_blocks
)) {
3528 return VXGE_HW_BADCFG_FIFO_BLOCKS
;
3530 val64
= VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3531 (vpath
->max_nofl_db
*2)-1);
3534 writeq(val64
, &vp_reg
->kdfc_fifo_trpl_partition
);
3536 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE
,
3537 &vp_reg
->kdfc_fifo_trpl_ctrl
);
3539 val64
= readq(&vp_reg
->kdfc_trpl_fifo_0_ctrl
);
3541 val64
&= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3542 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3544 val64
|= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3545 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY
) |
3546 #ifndef __BIG_ENDIAN
3547 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN
|
3549 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3551 writeq(val64
, &vp_reg
->kdfc_trpl_fifo_0_ctrl
);
3552 writeq((u64
)0, &vp_reg
->kdfc_trpl_fifo_0_wb_address
);
3554 vpath_stride
= readq(&hldev
->toc_reg
->toc_kdfc_vpath_stride
);
3557 (struct __vxge_hw_non_offload_db_wrapper __iomem
*)
3558 (hldev
->kdfc
+ (vp_id
*
3559 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3566 * __vxge_hw_vpath_mac_configure
3567 * This routine configures the mac of virtual path using the config passed
3570 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3573 enum vxge_hw_status status
= VXGE_HW_OK
;
3574 struct __vxge_hw_virtualpath
*vpath
;
3575 struct vxge_hw_vp_config
*vp_config
;
3576 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3578 vpath
= &hldev
->virtual_paths
[vp_id
];
3579 vp_reg
= vpath
->vp_reg
;
3580 vp_config
= vpath
->vp_config
;
3582 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3583 vpath
->vsport_number
), &vp_reg
->xmac_vsport_choice
);
3585 if (vp_config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
3587 val64
= readq(&vp_reg
->xmac_rpa_vcfg
);
3589 if (vp_config
->rpa_strip_vlan_tag
!=
3590 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
) {
3591 if (vp_config
->rpa_strip_vlan_tag
)
3592 val64
|= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG
;
3594 val64
&= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG
;
3597 writeq(val64
, &vp_reg
->xmac_rpa_vcfg
);
3598 val64
= readq(&vp_reg
->rxmac_vcfg0
);
3600 if (vp_config
->mtu
!=
3601 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
) {
3602 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3603 if ((vp_config
->mtu
+
3604 VXGE_HW_MAC_HEADER_MAX_SIZE
) < vpath
->max_mtu
)
3605 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3607 VXGE_HW_MAC_HEADER_MAX_SIZE
);
3609 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3613 writeq(val64
, &vp_reg
->rxmac_vcfg0
);
3615 val64
= readq(&vp_reg
->rxmac_vcfg1
);
3617 val64
&= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3618 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
);
3620 if (hldev
->config
.rth_it_type
==
3621 VXGE_HW_RTH_IT_TYPE_MULTI_IT
) {
3622 val64
|= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3624 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
;
3627 writeq(val64
, &vp_reg
->rxmac_vcfg1
);
3633 * __vxge_hw_vpath_tim_configure
3634 * This routine configures the tim registers of virtual path using the config
3638 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3641 enum vxge_hw_status status
= VXGE_HW_OK
;
3642 struct __vxge_hw_virtualpath
*vpath
;
3643 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3644 struct vxge_hw_vp_config
*config
;
3646 vpath
= &hldev
->virtual_paths
[vp_id
];
3647 vp_reg
= vpath
->vp_reg
;
3648 config
= vpath
->vp_config
;
3650 writeq((u64
)0, &vp_reg
->tim_dest_addr
);
3651 writeq((u64
)0, &vp_reg
->tim_vpath_map
);
3652 writeq((u64
)0, &vp_reg
->tim_bitmap
);
3653 writeq((u64
)0, &vp_reg
->tim_remap
);
3655 if (config
->ring
.enable
== VXGE_HW_RING_ENABLE
)
3656 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3657 (vp_id
* VXGE_HW_MAX_INTR_PER_VP
) +
3658 VXGE_HW_VPATH_INTR_RX
), &vp_reg
->tim_ring_assn
);
3660 val64
= readq(&vp_reg
->tim_pci_cfg
);
3661 val64
|= VXGE_HW_TIM_PCI_CFG_ADD_PAD
;
3662 writeq(val64
, &vp_reg
->tim_pci_cfg
);
3664 if (config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3666 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3668 if (config
->tti
.btimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3669 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3671 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3672 config
->tti
.btimer_val
);
3675 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN
;
3677 if (config
->tti
.timer_ac_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3678 if (config
->tti
.timer_ac_en
)
3679 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3681 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3684 if (config
->tti
.timer_ci_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3685 if (config
->tti
.timer_ci_en
)
3686 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3688 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3691 if (config
->tti
.urange_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3692 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3693 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3694 config
->tti
.urange_a
);
3697 if (config
->tti
.urange_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3698 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3699 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3700 config
->tti
.urange_b
);
3703 if (config
->tti
.urange_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3704 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3705 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3706 config
->tti
.urange_c
);
3709 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3710 val64
= readq(&vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3712 if (config
->tti
.uec_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3713 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3714 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3718 if (config
->tti
.uec_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3719 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3720 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3724 if (config
->tti
.uec_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3725 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3726 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3730 if (config
->tti
.uec_d
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3731 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3732 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3736 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3737 val64
= readq(&vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3739 if (config
->tti
.timer_ri_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3740 if (config
->tti
.timer_ri_en
)
3741 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3743 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3746 if (config
->tti
.rtimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3747 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3749 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3750 config
->tti
.rtimer_val
);
3753 if (config
->tti
.util_sel
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3754 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3755 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3756 config
->tti
.util_sel
);
3759 if (config
->tti
.ltimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3760 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3762 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3763 config
->tti
.ltimer_val
);
3766 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3769 if (config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
3771 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3773 if (config
->rti
.btimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3774 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3776 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3777 config
->rti
.btimer_val
);
3780 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN
;
3782 if (config
->rti
.timer_ac_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3783 if (config
->rti
.timer_ac_en
)
3784 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3786 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3789 if (config
->rti
.timer_ci_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3790 if (config
->rti
.timer_ci_en
)
3791 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3793 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3796 if (config
->rti
.urange_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3797 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3798 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3799 config
->rti
.urange_a
);
3802 if (config
->rti
.urange_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3803 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3804 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3805 config
->rti
.urange_b
);
3808 if (config
->rti
.urange_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3809 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3810 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3811 config
->rti
.urange_c
);
3814 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3815 val64
= readq(&vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3817 if (config
->rti
.uec_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3818 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3819 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3823 if (config
->rti
.uec_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3824 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3825 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3829 if (config
->rti
.uec_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3830 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3831 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3835 if (config
->rti
.uec_d
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3836 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3837 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3841 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3842 val64
= readq(&vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3844 if (config
->rti
.timer_ri_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3845 if (config
->rti
.timer_ri_en
)
3846 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3848 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3851 if (config
->rti
.rtimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3852 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3854 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3855 config
->rti
.rtimer_val
);
3858 if (config
->rti
.util_sel
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3859 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3860 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3861 config
->rti
.util_sel
);
3864 if (config
->rti
.ltimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3865 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3867 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3868 config
->rti
.ltimer_val
);
3871 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3875 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
3876 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
3877 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
3878 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
3879 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
3880 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
3886 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3888 struct __vxge_hw_virtualpath
*vpath
;
3889 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3890 struct vxge_hw_vp_config
*config
;
3893 vpath
= &hldev
->virtual_paths
[vp_id
];
3894 vp_reg
= vpath
->vp_reg
;
3895 config
= vpath
->vp_config
;
3897 if (config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3898 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3900 if (config
->tti
.timer_ci_en
!= VXGE_HW_TIM_TIMER_CI_ENABLE
) {
3901 config
->tti
.timer_ci_en
= VXGE_HW_TIM_TIMER_CI_ENABLE
;
3902 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3904 &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3910 * __vxge_hw_vpath_initialize
3911 * This routine is the final phase of init which initializes the
3912 * registers of the vpath using the configuration passed.
3915 __vxge_hw_vpath_initialize(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3919 enum vxge_hw_status status
= VXGE_HW_OK
;
3920 struct __vxge_hw_virtualpath
*vpath
;
3921 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3923 vpath
= &hldev
->virtual_paths
[vp_id
];
3925 if (!(hldev
->vpath_assignments
& vxge_mBIT(vp_id
))) {
3926 status
= VXGE_HW_ERR_VPATH_NOT_AVAILABLE
;
3929 vp_reg
= vpath
->vp_reg
;
3931 status
= __vxge_hw_vpath_swapper_set(vpath
->vp_reg
);
3933 if (status
!= VXGE_HW_OK
)
3936 status
= __vxge_hw_vpath_mac_configure(hldev
, vp_id
);
3938 if (status
!= VXGE_HW_OK
)
3941 status
= __vxge_hw_vpath_kdfc_configure(hldev
, vp_id
);
3943 if (status
!= VXGE_HW_OK
)
3946 status
= __vxge_hw_vpath_tim_configure(hldev
, vp_id
);
3948 if (status
!= VXGE_HW_OK
)
3951 val64
= readq(&vp_reg
->rtdma_rd_optimization_ctrl
);
3953 /* Get MRRS value from device control */
3954 status
= __vxge_hw_vpath_pci_read(vpath
, 1, 0x78, &val32
);
3956 if (status
== VXGE_HW_OK
) {
3957 val32
= (val32
& VXGE_HW_PCI_EXP_DEVCTL_READRQ
) >> 12;
3959 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
3961 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32
);
3963 val64
|= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE
;
3966 val64
&= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
3968 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
3969 VXGE_HW_MAX_PAYLOAD_SIZE_512
);
3971 val64
|= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN
;
3972 writeq(val64
, &vp_reg
->rtdma_rd_optimization_ctrl
);
3979 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
3980 * This routine is the initial phase of init which resets the vpath and
3981 * initializes the software support structures.
3984 __vxge_hw_vp_initialize(struct __vxge_hw_device
*hldev
, u32 vp_id
,
3985 struct vxge_hw_vp_config
*config
)
3987 struct __vxge_hw_virtualpath
*vpath
;
3988 enum vxge_hw_status status
= VXGE_HW_OK
;
3990 if (!(hldev
->vpath_assignments
& vxge_mBIT(vp_id
))) {
3991 status
= VXGE_HW_ERR_VPATH_NOT_AVAILABLE
;
3995 vpath
= &hldev
->virtual_paths
[vp_id
];
3997 vpath
->vp_id
= vp_id
;
3998 vpath
->vp_open
= VXGE_HW_VP_OPEN
;
3999 vpath
->hldev
= hldev
;
4000 vpath
->vp_config
= config
;
4001 vpath
->vp_reg
= hldev
->vpath_reg
[vp_id
];
4002 vpath
->vpmgmt_reg
= hldev
->vpmgmt_reg
[vp_id
];
4004 __vxge_hw_vpath_reset(hldev
, vp_id
);
4006 status
= __vxge_hw_vpath_reset_check(vpath
);
4008 if (status
!= VXGE_HW_OK
) {
4009 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4013 status
= __vxge_hw_vpath_mgmt_read(hldev
, vpath
);
4015 if (status
!= VXGE_HW_OK
) {
4016 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4020 INIT_LIST_HEAD(&vpath
->vpath_handles
);
4022 vpath
->sw_stats
= &hldev
->stats
.sw_dev_info_stats
.vpath_info
[vp_id
];
4024 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev
->tim_int_mask0
,
4025 hldev
->tim_int_mask1
, vp_id
);
4027 status
= __vxge_hw_vpath_initialize(hldev
, vp_id
);
4029 if (status
!= VXGE_HW_OK
)
4030 __vxge_hw_vp_terminate(hldev
, vp_id
);
4036 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4037 * This routine closes all channels it opened and freeup memory
4040 __vxge_hw_vp_terminate(struct __vxge_hw_device
*hldev
, u32 vp_id
)
4042 struct __vxge_hw_virtualpath
*vpath
;
4044 vpath
= &hldev
->virtual_paths
[vp_id
];
4046 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
)
4049 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath
->hldev
->tim_int_mask0
,
4050 vpath
->hldev
->tim_int_mask1
, vpath
->vp_id
);
4051 hldev
->stats
.hw_dev_info_stats
.vpath_info
[vpath
->vp_id
] = NULL
;
4053 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4059 * vxge_hw_vpath_mtu_set - Set MTU.
4060 * Set new MTU value. Example, to use jumbo frames:
4061 * vxge_hw_vpath_mtu_set(my_device, 9600);
4064 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle
*vp
, u32 new_mtu
)
4067 enum vxge_hw_status status
= VXGE_HW_OK
;
4068 struct __vxge_hw_virtualpath
*vpath
;
4071 status
= VXGE_HW_ERR_INVALID_HANDLE
;
4076 new_mtu
+= VXGE_HW_MAC_HEADER_MAX_SIZE
;
4078 if ((new_mtu
< VXGE_HW_MIN_MTU
) || (new_mtu
> vpath
->max_mtu
))
4079 status
= VXGE_HW_ERR_INVALID_MTU_SIZE
;
4081 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
4083 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4084 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu
);
4086 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
4088 vpath
->vp_config
->mtu
= new_mtu
- VXGE_HW_MAC_HEADER_MAX_SIZE
;
4095 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4096 * This function is used to open access to virtual path of an
4097 * adapter for offload, GRO operations. This function returns
4101 vxge_hw_vpath_open(struct __vxge_hw_device
*hldev
,
4102 struct vxge_hw_vpath_attr
*attr
,
4103 struct __vxge_hw_vpath_handle
**vpath_handle
)
4105 struct __vxge_hw_virtualpath
*vpath
;
4106 struct __vxge_hw_vpath_handle
*vp
;
4107 enum vxge_hw_status status
;
4109 vpath
= &hldev
->virtual_paths
[attr
->vp_id
];
4111 if (vpath
->vp_open
== VXGE_HW_VP_OPEN
) {
4112 status
= VXGE_HW_ERR_INVALID_STATE
;
4113 goto vpath_open_exit1
;
4116 status
= __vxge_hw_vp_initialize(hldev
, attr
->vp_id
,
4117 &hldev
->config
.vp_config
[attr
->vp_id
]);
4119 if (status
!= VXGE_HW_OK
)
4120 goto vpath_open_exit1
;
4122 vp
= (struct __vxge_hw_vpath_handle
*)
4123 vmalloc(sizeof(struct __vxge_hw_vpath_handle
));
4125 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4126 goto vpath_open_exit2
;
4129 memset(vp
, 0, sizeof(struct __vxge_hw_vpath_handle
));
4133 if (vpath
->vp_config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
4134 status
= __vxge_hw_fifo_create(vp
, &attr
->fifo_attr
);
4135 if (status
!= VXGE_HW_OK
)
4136 goto vpath_open_exit6
;
4139 if (vpath
->vp_config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
4140 status
= __vxge_hw_ring_create(vp
, &attr
->ring_attr
);
4141 if (status
!= VXGE_HW_OK
)
4142 goto vpath_open_exit7
;
4144 __vxge_hw_vpath_prc_configure(hldev
, attr
->vp_id
);
4147 vpath
->fifoh
->tx_intr_num
=
4148 (attr
->vp_id
* VXGE_HW_MAX_INTR_PER_VP
) +
4149 VXGE_HW_VPATH_INTR_TX
;
4151 vpath
->stats_block
= __vxge_hw_blockpool_block_allocate(hldev
,
4152 VXGE_HW_BLOCK_SIZE
);
4154 if (vpath
->stats_block
== NULL
) {
4155 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4156 goto vpath_open_exit8
;
4159 vpath
->hw_stats
= (struct vxge_hw_vpath_stats_hw_info
*)vpath
->
4160 stats_block
->memblock
;
4161 memset(vpath
->hw_stats
, 0,
4162 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4164 hldev
->stats
.hw_dev_info_stats
.vpath_info
[attr
->vp_id
] =
4167 vpath
->hw_stats_sav
=
4168 &hldev
->stats
.hw_dev_info_stats
.vpath_info_sav
[attr
->vp_id
];
4169 memset(vpath
->hw_stats_sav
, 0,
4170 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4172 writeq(vpath
->stats_block
->dma_addr
, &vpath
->vp_reg
->stats_cfg
);
4174 status
= vxge_hw_vpath_stats_enable(vp
);
4175 if (status
!= VXGE_HW_OK
)
4176 goto vpath_open_exit8
;
4178 list_add(&vp
->item
, &vpath
->vpath_handles
);
4180 hldev
->vpaths_deployed
|= vxge_mBIT(vpath
->vp_id
);
4184 attr
->fifo_attr
.userdata
= vpath
->fifoh
;
4185 attr
->ring_attr
.userdata
= vpath
->ringh
;
4190 if (vpath
->ringh
!= NULL
)
4191 __vxge_hw_ring_delete(vp
);
4193 if (vpath
->fifoh
!= NULL
)
4194 __vxge_hw_fifo_delete(vp
);
4198 __vxge_hw_vp_terminate(hldev
, attr
->vp_id
);
4205 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4207 * @vp: Handle got from previous vpath open
4209 * This function is used to close access to virtual path opened
4213 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle
*vp
)
4215 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4216 u64 new_count
, val64
, val164
;
4217 struct __vxge_hw_ring
*ring
;
4220 ring
= vpath
->ringh
;
4222 new_count
= readq(&vpath
->vp_reg
->rxdmem_size
);
4223 new_count
&= 0x1fff;
4224 val164
= (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count
));
4226 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164
),
4227 &vpath
->vp_reg
->prc_rxd_doorbell
);
4228 readl(&vpath
->vp_reg
->prc_rxd_doorbell
);
4231 val64
= readq(&vpath
->vp_reg
->prc_cfg6
);
4232 val64
= VXGE_HW_PRC_CFG6_RXD_SPAT(val64
);
4236 * Each RxD is of 4 qwords
4238 new_count
-= (val64
+ 1);
4239 val64
= min(val164
, new_count
) / 4;
4241 ring
->rxds_limit
= min(ring
->rxds_limit
, val64
);
4242 if (ring
->rxds_limit
< 4)
4243 ring
->rxds_limit
= 4;
4247 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4248 * This function is used to close access to virtual path opened
4251 enum vxge_hw_status
vxge_hw_vpath_close(struct __vxge_hw_vpath_handle
*vp
)
4253 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4254 struct __vxge_hw_device
*devh
= NULL
;
4255 u32 vp_id
= vp
->vpath
->vp_id
;
4256 u32 is_empty
= TRUE
;
4257 enum vxge_hw_status status
= VXGE_HW_OK
;
4260 devh
= vpath
->hldev
;
4262 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4263 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4264 goto vpath_close_exit
;
4267 list_del(&vp
->item
);
4269 if (!list_empty(&vpath
->vpath_handles
)) {
4270 list_add(&vp
->item
, &vpath
->vpath_handles
);
4275 status
= VXGE_HW_FAIL
;
4276 goto vpath_close_exit
;
4279 devh
->vpaths_deployed
&= ~vxge_mBIT(vp_id
);
4281 if (vpath
->ringh
!= NULL
)
4282 __vxge_hw_ring_delete(vp
);
4284 if (vpath
->fifoh
!= NULL
)
4285 __vxge_hw_fifo_delete(vp
);
4287 if (vpath
->stats_block
!= NULL
)
4288 __vxge_hw_blockpool_block_free(devh
, vpath
->stats_block
);
4292 __vxge_hw_vp_terminate(devh
, vp_id
);
4294 vpath
->vp_open
= VXGE_HW_VP_NOT_OPEN
;
4301 * vxge_hw_vpath_reset - Resets vpath
4302 * This function is used to request a reset of vpath
4304 enum vxge_hw_status
vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle
*vp
)
4306 enum vxge_hw_status status
;
4308 struct __vxge_hw_virtualpath
*vpath
= vp
->vpath
;
4310 vp_id
= vpath
->vp_id
;
4312 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4313 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4317 status
= __vxge_hw_vpath_reset(vpath
->hldev
, vp_id
);
4318 if (status
== VXGE_HW_OK
)
4319 vpath
->sw_stats
->soft_reset_cnt
++;
4325 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4326 * This function poll's for the vpath reset completion and re initializes
4330 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle
*vp
)
4332 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4333 enum vxge_hw_status status
;
4334 struct __vxge_hw_device
*hldev
;
4337 vp_id
= vp
->vpath
->vp_id
;
4339 hldev
= vpath
->hldev
;
4341 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4342 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4346 status
= __vxge_hw_vpath_reset_check(vpath
);
4347 if (status
!= VXGE_HW_OK
)
4350 status
= __vxge_hw_vpath_sw_reset(hldev
, vp_id
);
4351 if (status
!= VXGE_HW_OK
)
4354 status
= __vxge_hw_vpath_initialize(hldev
, vp_id
);
4355 if (status
!= VXGE_HW_OK
)
4358 if (vpath
->ringh
!= NULL
)
4359 __vxge_hw_vpath_prc_configure(hldev
, vp_id
);
4361 memset(vpath
->hw_stats
, 0,
4362 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4364 memset(vpath
->hw_stats_sav
, 0,
4365 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4367 writeq(vpath
->stats_block
->dma_addr
,
4368 &vpath
->vp_reg
->stats_cfg
);
4370 status
= vxge_hw_vpath_stats_enable(vp
);
4377 * vxge_hw_vpath_enable - Enable vpath.
4378 * This routine clears the vpath reset thereby enabling a vpath
4379 * to start forwarding frames and generating interrupts.
4382 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle
*vp
)
4384 struct __vxge_hw_device
*hldev
;
4387 hldev
= vp
->vpath
->hldev
;
4389 val64
= VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4390 1 << (16 - vp
->vpath
->vp_id
));
4392 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
4393 &hldev
->common_reg
->cmn_rsthdlr_cfg1
);
4397 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4398 * Enable the DMA vpath statistics. The function is to be called to re-enable
4399 * the adapter to update stats into the host memory
4402 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle
*vp
)
4404 enum vxge_hw_status status
= VXGE_HW_OK
;
4405 struct __vxge_hw_virtualpath
*vpath
;
4409 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4410 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4414 memcpy(vpath
->hw_stats_sav
, vpath
->hw_stats
,
4415 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4417 status
= __vxge_hw_vpath_stats_get(vpath
, vpath
->hw_stats
);
4423 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4424 * and offset and perform an operation
4427 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath
*vpath
,
4428 u32 operation
, u32 offset
, u64
*stat
)
4431 enum vxge_hw_status status
= VXGE_HW_OK
;
4432 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4434 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4435 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4436 goto vpath_stats_access_exit
;
4439 vp_reg
= vpath
->vp_reg
;
4441 val64
= VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation
) |
4442 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE
|
4443 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset
);
4445 status
= __vxge_hw_pio_mem_write64(val64
,
4446 &vp_reg
->xmac_stats_access_cmd
,
4447 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE
,
4448 vpath
->hldev
->config
.device_poll_millis
);
4450 if ((status
== VXGE_HW_OK
) && (operation
== VXGE_HW_STATS_OP_READ
))
4451 *stat
= readq(&vp_reg
->xmac_stats_access_data
);
4455 vpath_stats_access_exit
:
4460 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4463 __vxge_hw_vpath_xmac_tx_stats_get(
4464 struct __vxge_hw_virtualpath
*vpath
,
4465 struct vxge_hw_xmac_vpath_tx_stats
*vpath_tx_stats
)
4469 u32 offset
= VXGE_HW_STATS_VPATH_TX_OFFSET
;
4470 enum vxge_hw_status status
= VXGE_HW_OK
;
4472 val64
= (u64
*) vpath_tx_stats
;
4474 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4475 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4479 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_vpath_tx_stats
) / 8; i
++) {
4480 status
= __vxge_hw_vpath_stats_access(vpath
,
4481 VXGE_HW_STATS_OP_READ
,
4483 if (status
!= VXGE_HW_OK
)
4493 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4496 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath
*vpath
,
4497 struct vxge_hw_xmac_vpath_rx_stats
*vpath_rx_stats
)
4500 enum vxge_hw_status status
= VXGE_HW_OK
;
4502 u32 offset
= VXGE_HW_STATS_VPATH_RX_OFFSET
;
4503 val64
= (u64
*) vpath_rx_stats
;
4505 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4506 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4509 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_vpath_rx_stats
) / 8; i
++) {
4510 status
= __vxge_hw_vpath_stats_access(vpath
,
4511 VXGE_HW_STATS_OP_READ
,
4512 offset
>> 3, val64
);
4513 if (status
!= VXGE_HW_OK
)
4524 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4526 enum vxge_hw_status
__vxge_hw_vpath_stats_get(
4527 struct __vxge_hw_virtualpath
*vpath
,
4528 struct vxge_hw_vpath_stats_hw_info
*hw_stats
)
4531 enum vxge_hw_status status
= VXGE_HW_OK
;
4532 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4534 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4535 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4538 vp_reg
= vpath
->vp_reg
;
4540 val64
= readq(&vp_reg
->vpath_debug_stats0
);
4541 hw_stats
->ini_num_mwr_sent
=
4542 (u32
)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64
);
4544 val64
= readq(&vp_reg
->vpath_debug_stats1
);
4545 hw_stats
->ini_num_mrd_sent
=
4546 (u32
)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64
);
4548 val64
= readq(&vp_reg
->vpath_debug_stats2
);
4549 hw_stats
->ini_num_cpl_rcvd
=
4550 (u32
)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64
);
4552 val64
= readq(&vp_reg
->vpath_debug_stats3
);
4553 hw_stats
->ini_num_mwr_byte_sent
=
4554 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64
);
4556 val64
= readq(&vp_reg
->vpath_debug_stats4
);
4557 hw_stats
->ini_num_cpl_byte_rcvd
=
4558 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64
);
4560 val64
= readq(&vp_reg
->vpath_debug_stats5
);
4561 hw_stats
->wrcrdtarb_xoff
=
4562 (u32
)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64
);
4564 val64
= readq(&vp_reg
->vpath_debug_stats6
);
4565 hw_stats
->rdcrdtarb_xoff
=
4566 (u32
)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64
);
4568 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4569 hw_stats
->vpath_genstats_count0
=
4570 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4573 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4574 hw_stats
->vpath_genstats_count1
=
4575 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4578 val64
= readq(&vp_reg
->vpath_genstats_count23
);
4579 hw_stats
->vpath_genstats_count2
=
4580 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4583 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4584 hw_stats
->vpath_genstats_count3
=
4585 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4588 val64
= readq(&vp_reg
->vpath_genstats_count4
);
4589 hw_stats
->vpath_genstats_count4
=
4590 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4593 val64
= readq(&vp_reg
->vpath_genstats_count5
);
4594 hw_stats
->vpath_genstats_count5
=
4595 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4598 status
= __vxge_hw_vpath_xmac_tx_stats_get(vpath
, &hw_stats
->tx_stats
);
4599 if (status
!= VXGE_HW_OK
)
4602 status
= __vxge_hw_vpath_xmac_rx_stats_get(vpath
, &hw_stats
->rx_stats
);
4603 if (status
!= VXGE_HW_OK
)
4606 VXGE_HW_VPATH_STATS_PIO_READ(
4607 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET
);
4609 hw_stats
->prog_event_vnum0
=
4610 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64
);
4612 hw_stats
->prog_event_vnum1
=
4613 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64
);
4615 VXGE_HW_VPATH_STATS_PIO_READ(
4616 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET
);
4618 hw_stats
->prog_event_vnum2
=
4619 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64
);
4621 hw_stats
->prog_event_vnum3
=
4622 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64
);
4624 val64
= readq(&vp_reg
->rx_multi_cast_stats
);
4625 hw_stats
->rx_multi_cast_frame_discard
=
4626 (u16
)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64
);
4628 val64
= readq(&vp_reg
->rx_frm_transferred
);
4629 hw_stats
->rx_frm_transferred
=
4630 (u32
)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64
);
4632 val64
= readq(&vp_reg
->rxd_returned
);
4633 hw_stats
->rxd_returned
=
4634 (u16
)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64
);
4636 val64
= readq(&vp_reg
->dbg_stats_rx_mpa
);
4637 hw_stats
->rx_mpa_len_fail_frms
=
4638 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64
);
4639 hw_stats
->rx_mpa_mrk_fail_frms
=
4640 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64
);
4641 hw_stats
->rx_mpa_crc_fail_frms
=
4642 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64
);
4644 val64
= readq(&vp_reg
->dbg_stats_rx_fau
);
4645 hw_stats
->rx_permitted_frms
=
4646 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64
);
4647 hw_stats
->rx_vp_reset_discarded_frms
=
4648 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64
);
4649 hw_stats
->rx_wol_frms
=
4650 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64
);
4652 val64
= readq(&vp_reg
->tx_vp_reset_discarded_frms
);
4653 hw_stats
->tx_vp_reset_discarded_frms
=
4654 (u16
)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4661 * __vxge_hw_blockpool_create - Create block pool
4665 __vxge_hw_blockpool_create(struct __vxge_hw_device
*hldev
,
4666 struct __vxge_hw_blockpool
*blockpool
,
4671 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
4673 dma_addr_t dma_addr
;
4674 struct pci_dev
*dma_handle
;
4675 struct pci_dev
*acc_handle
;
4676 enum vxge_hw_status status
= VXGE_HW_OK
;
4678 if (blockpool
== NULL
) {
4679 status
= VXGE_HW_FAIL
;
4680 goto blockpool_create_exit
;
4683 blockpool
->hldev
= hldev
;
4684 blockpool
->block_size
= VXGE_HW_BLOCK_SIZE
;
4685 blockpool
->pool_size
= 0;
4686 blockpool
->pool_max
= pool_max
;
4687 blockpool
->req_out
= 0;
4689 INIT_LIST_HEAD(&blockpool
->free_block_list
);
4690 INIT_LIST_HEAD(&blockpool
->free_entry_list
);
4692 for (i
= 0; i
< pool_size
+ pool_max
; i
++) {
4693 entry
= kzalloc(sizeof(struct __vxge_hw_blockpool_entry
),
4695 if (entry
== NULL
) {
4696 __vxge_hw_blockpool_destroy(blockpool
);
4697 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4698 goto blockpool_create_exit
;
4700 list_add(&entry
->item
, &blockpool
->free_entry_list
);
4703 for (i
= 0; i
< pool_size
; i
++) {
4705 memblock
= vxge_os_dma_malloc(
4711 if (memblock
== NULL
) {
4712 __vxge_hw_blockpool_destroy(blockpool
);
4713 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4714 goto blockpool_create_exit
;
4717 dma_addr
= pci_map_single(hldev
->pdev
, memblock
,
4718 VXGE_HW_BLOCK_SIZE
, PCI_DMA_BIDIRECTIONAL
);
4720 if (unlikely(pci_dma_mapping_error(hldev
->pdev
,
4723 vxge_os_dma_free(hldev
->pdev
, memblock
, &acc_handle
);
4724 __vxge_hw_blockpool_destroy(blockpool
);
4725 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4726 goto blockpool_create_exit
;
4729 if (!list_empty(&blockpool
->free_entry_list
))
4730 entry
= (struct __vxge_hw_blockpool_entry
*)
4731 list_first_entry(&blockpool
->free_entry_list
,
4732 struct __vxge_hw_blockpool_entry
,
4737 kzalloc(sizeof(struct __vxge_hw_blockpool_entry
),
4739 if (entry
!= NULL
) {
4740 list_del(&entry
->item
);
4741 entry
->length
= VXGE_HW_BLOCK_SIZE
;
4742 entry
->memblock
= memblock
;
4743 entry
->dma_addr
= dma_addr
;
4744 entry
->acc_handle
= acc_handle
;
4745 entry
->dma_handle
= dma_handle
;
4746 list_add(&entry
->item
,
4747 &blockpool
->free_block_list
);
4748 blockpool
->pool_size
++;
4750 __vxge_hw_blockpool_destroy(blockpool
);
4751 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4752 goto blockpool_create_exit
;
4756 blockpool_create_exit
:
4761 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4764 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool
*blockpool
)
4767 struct __vxge_hw_device
*hldev
;
4768 struct list_head
*p
, *n
;
4771 if (blockpool
== NULL
) {
4776 hldev
= blockpool
->hldev
;
4778 list_for_each_safe(p
, n
, &blockpool
->free_block_list
) {
4780 pci_unmap_single(hldev
->pdev
,
4781 ((struct __vxge_hw_blockpool_entry
*)p
)->dma_addr
,
4782 ((struct __vxge_hw_blockpool_entry
*)p
)->length
,
4783 PCI_DMA_BIDIRECTIONAL
);
4785 vxge_os_dma_free(hldev
->pdev
,
4786 ((struct __vxge_hw_blockpool_entry
*)p
)->memblock
,
4787 &((struct __vxge_hw_blockpool_entry
*) p
)->acc_handle
);
4790 &((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4792 blockpool
->pool_size
--;
4795 list_for_each_safe(p
, n
, &blockpool
->free_entry_list
) {
4797 &((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4806 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4809 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool
*blockpool
)
4813 if ((blockpool
->pool_size
+ blockpool
->req_out
) <
4814 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE
) {
4815 nreq
= VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE
;
4816 blockpool
->req_out
+= nreq
;
4819 for (i
= 0; i
< nreq
; i
++)
4820 vxge_os_dma_malloc_async(
4821 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4822 blockpool
->hldev
, VXGE_HW_BLOCK_SIZE
);
4826 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4829 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool
*blockpool
)
4831 struct list_head
*p
, *n
;
4833 list_for_each_safe(p
, n
, &blockpool
->free_block_list
) {
4835 if (blockpool
->pool_size
< blockpool
->pool_max
)
4839 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4840 ((struct __vxge_hw_blockpool_entry
*)p
)->dma_addr
,
4841 ((struct __vxge_hw_blockpool_entry
*)p
)->length
,
4842 PCI_DMA_BIDIRECTIONAL
);
4845 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4846 ((struct __vxge_hw_blockpool_entry
*)p
)->memblock
,
4847 &((struct __vxge_hw_blockpool_entry
*)p
)->acc_handle
);
4849 list_del(&((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4851 list_add(p
, &blockpool
->free_entry_list
);
4853 blockpool
->pool_size
--;
4859 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4860 * Adds a block to block pool
4862 void vxge_hw_blockpool_block_add(
4863 struct __vxge_hw_device
*devh
,
4866 struct pci_dev
*dma_h
,
4867 struct pci_dev
*acc_handle
)
4869 struct __vxge_hw_blockpool
*blockpool
;
4870 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
4871 dma_addr_t dma_addr
;
4872 enum vxge_hw_status status
= VXGE_HW_OK
;
4875 blockpool
= &devh
->block_pool
;
4877 if (block_addr
== NULL
) {
4878 blockpool
->req_out
--;
4879 status
= VXGE_HW_FAIL
;
4883 dma_addr
= pci_map_single(devh
->pdev
, block_addr
, length
,
4884 PCI_DMA_BIDIRECTIONAL
);
4886 if (unlikely(pci_dma_mapping_error(devh
->pdev
, dma_addr
))) {
4888 vxge_os_dma_free(devh
->pdev
, block_addr
, &acc_handle
);
4889 blockpool
->req_out
--;
4890 status
= VXGE_HW_FAIL
;
4895 if (!list_empty(&blockpool
->free_entry_list
))
4896 entry
= (struct __vxge_hw_blockpool_entry
*)
4897 list_first_entry(&blockpool
->free_entry_list
,
4898 struct __vxge_hw_blockpool_entry
,
4902 entry
= (struct __vxge_hw_blockpool_entry
*)
4903 vmalloc(sizeof(struct __vxge_hw_blockpool_entry
));
4905 list_del(&entry
->item
);
4907 if (entry
!= NULL
) {
4908 entry
->length
= length
;
4909 entry
->memblock
= block_addr
;
4910 entry
->dma_addr
= dma_addr
;
4911 entry
->acc_handle
= acc_handle
;
4912 entry
->dma_handle
= dma_h
;
4913 list_add(&entry
->item
, &blockpool
->free_block_list
);
4914 blockpool
->pool_size
++;
4915 status
= VXGE_HW_OK
;
4917 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4919 blockpool
->req_out
--;
4921 req_out
= blockpool
->req_out
;
4927 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
4928 * Allocates a block of memory of given size, either from block pool
4929 * or by calling vxge_os_dma_malloc()
4932 __vxge_hw_blockpool_malloc(struct __vxge_hw_device
*devh
, u32 size
,
4933 struct vxge_hw_mempool_dma
*dma_object
)
4935 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
4936 struct __vxge_hw_blockpool
*blockpool
;
4937 void *memblock
= NULL
;
4938 enum vxge_hw_status status
= VXGE_HW_OK
;
4940 blockpool
= &devh
->block_pool
;
4942 if (size
!= blockpool
->block_size
) {
4944 memblock
= vxge_os_dma_malloc(devh
->pdev
, size
,
4945 &dma_object
->handle
,
4946 &dma_object
->acc_handle
);
4948 if (memblock
== NULL
) {
4949 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4953 dma_object
->addr
= pci_map_single(devh
->pdev
, memblock
, size
,
4954 PCI_DMA_BIDIRECTIONAL
);
4956 if (unlikely(pci_dma_mapping_error(devh
->pdev
,
4957 dma_object
->addr
))) {
4958 vxge_os_dma_free(devh
->pdev
, memblock
,
4959 &dma_object
->acc_handle
);
4960 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4966 if (!list_empty(&blockpool
->free_block_list
))
4967 entry
= (struct __vxge_hw_blockpool_entry
*)
4968 list_first_entry(&blockpool
->free_block_list
,
4969 struct __vxge_hw_blockpool_entry
,
4972 if (entry
!= NULL
) {
4973 list_del(&entry
->item
);
4974 dma_object
->addr
= entry
->dma_addr
;
4975 dma_object
->handle
= entry
->dma_handle
;
4976 dma_object
->acc_handle
= entry
->acc_handle
;
4977 memblock
= entry
->memblock
;
4979 list_add(&entry
->item
,
4980 &blockpool
->free_entry_list
);
4981 blockpool
->pool_size
--;
4984 if (memblock
!= NULL
)
4985 __vxge_hw_blockpool_blocks_add(blockpool
);
4992 * __vxge_hw_blockpool_free - Frees the memory allcoated with
4993 __vxge_hw_blockpool_malloc
4996 __vxge_hw_blockpool_free(struct __vxge_hw_device
*devh
,
4997 void *memblock
, u32 size
,
4998 struct vxge_hw_mempool_dma
*dma_object
)
5000 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5001 struct __vxge_hw_blockpool
*blockpool
;
5002 enum vxge_hw_status status
= VXGE_HW_OK
;
5004 blockpool
= &devh
->block_pool
;
5006 if (size
!= blockpool
->block_size
) {
5007 pci_unmap_single(devh
->pdev
, dma_object
->addr
, size
,
5008 PCI_DMA_BIDIRECTIONAL
);
5009 vxge_os_dma_free(devh
->pdev
, memblock
, &dma_object
->acc_handle
);
5012 if (!list_empty(&blockpool
->free_entry_list
))
5013 entry
= (struct __vxge_hw_blockpool_entry
*)
5014 list_first_entry(&blockpool
->free_entry_list
,
5015 struct __vxge_hw_blockpool_entry
,
5019 entry
= (struct __vxge_hw_blockpool_entry
*)
5021 struct __vxge_hw_blockpool_entry
));
5023 list_del(&entry
->item
);
5025 if (entry
!= NULL
) {
5026 entry
->length
= size
;
5027 entry
->memblock
= memblock
;
5028 entry
->dma_addr
= dma_object
->addr
;
5029 entry
->acc_handle
= dma_object
->acc_handle
;
5030 entry
->dma_handle
= dma_object
->handle
;
5031 list_add(&entry
->item
,
5032 &blockpool
->free_block_list
);
5033 blockpool
->pool_size
++;
5034 status
= VXGE_HW_OK
;
5036 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5038 if (status
== VXGE_HW_OK
)
5039 __vxge_hw_blockpool_blocks_remove(blockpool
);
5046 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5047 * This function allocates a block from block pool or from the system
5049 struct __vxge_hw_blockpool_entry
*
5050 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device
*devh
, u32 size
)
5052 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5053 struct __vxge_hw_blockpool
*blockpool
;
5055 blockpool
= &devh
->block_pool
;
5057 if (size
== blockpool
->block_size
) {
5059 if (!list_empty(&blockpool
->free_block_list
))
5060 entry
= (struct __vxge_hw_blockpool_entry
*)
5061 list_first_entry(&blockpool
->free_block_list
,
5062 struct __vxge_hw_blockpool_entry
,
5065 if (entry
!= NULL
) {
5066 list_del(&entry
->item
);
5067 blockpool
->pool_size
--;
5072 __vxge_hw_blockpool_blocks_add(blockpool
);
5078 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5080 * @entry: Entry of block to be freed
5082 * This function frees a block from block pool
5085 __vxge_hw_blockpool_block_free(struct __vxge_hw_device
*devh
,
5086 struct __vxge_hw_blockpool_entry
*entry
)
5088 struct __vxge_hw_blockpool
*blockpool
;
5090 blockpool
= &devh
->block_pool
;
5092 if (entry
->length
== blockpool
->block_size
) {
5093 list_add(&entry
->item
, &blockpool
->free_block_list
);
5094 blockpool
->pool_size
++;
5097 __vxge_hw_blockpool_blocks_remove(blockpool
);