1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
27 * See also: vxge_hw_vpath_intr_disable()
29 enum vxge_hw_status
vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle
*vp
)
33 struct __vxge_hw_virtualpath
*vpath
;
34 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
35 enum vxge_hw_status status
= VXGE_HW_OK
;
37 status
= VXGE_HW_ERR_INVALID_HANDLE
;
43 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
44 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
48 vp_reg
= vpath
->vp_reg
;
50 writeq(VXGE_HW_INTR_MASK_ALL
, &vp_reg
->kdfcctl_errors_reg
);
52 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
53 &vp_reg
->general_errors_reg
);
55 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
56 &vp_reg
->pci_config_errors_reg
);
58 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
59 &vp_reg
->mrpcim_to_vpath_alarm_reg
);
61 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
62 &vp_reg
->srpcim_to_vpath_alarm_reg
);
64 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
65 &vp_reg
->vpath_ppif_int_status
);
67 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
68 &vp_reg
->srpcim_msg_to_vpath_reg
);
70 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
71 &vp_reg
->vpath_pcipif_int_status
);
73 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
74 &vp_reg
->prc_alarm_reg
);
76 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
77 &vp_reg
->wrdma_alarm_status
);
79 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
80 &vp_reg
->asic_ntwk_vp_err_reg
);
82 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
83 &vp_reg
->xgmac_vp_int_status
);
85 val64
= readq(&vp_reg
->vpath_general_int_status
);
87 /* Mask unwanted interrupts */
89 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
90 &vp_reg
->vpath_pcipif_int_mask
);
92 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
93 &vp_reg
->srpcim_msg_to_vpath_mask
);
95 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
96 &vp_reg
->srpcim_to_vpath_alarm_mask
);
98 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
99 &vp_reg
->mrpcim_to_vpath_alarm_mask
);
101 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
102 &vp_reg
->pci_config_errors_mask
);
104 /* Unmask the individual interrupts */
106 writeq((u32
)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW
|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW
|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
), 0, 32),
110 &vp_reg
->general_errors_mask
);
112 __vxge_hw_pio_mem_write32_upper(
113 (u32
)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR
|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR
|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON
|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON
|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR
|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR
), 0, 32),
119 &vp_reg
->kdfcctl_errors_mask
);
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->vpath_ppif_int_mask
);
123 __vxge_hw_pio_mem_write32_upper(
124 (u32
)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
, 0, 32),
125 &vp_reg
->prc_alarm_mask
);
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->wrdma_alarm_mask
);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->xgmac_vp_int_mask
);
130 if (vpath
->hldev
->first_vp_id
!= vpath
->vp_id
)
131 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
132 &vp_reg
->asic_ntwk_vp_err_mask
);
134 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT
|
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK
), 0, 32),
137 &vp_reg
->asic_ntwk_vp_err_mask
);
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg
->vpath_general_int_mask
);
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
153 * See also: vxge_hw_vpath_intr_enable()
155 enum vxge_hw_status
vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle
*vp
)
160 struct __vxge_hw_virtualpath
*vpath
;
161 enum vxge_hw_status status
= VXGE_HW_OK
;
162 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
164 status
= VXGE_HW_ERR_INVALID_HANDLE
;
170 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
171 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
174 vp_reg
= vpath
->vp_reg
;
176 __vxge_hw_pio_mem_write32_upper(
177 (u32
)VXGE_HW_INTR_MASK_ALL
,
178 &vp_reg
->vpath_general_int_mask
);
180 val64
= VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath
->vp_id
));
182 writeq(VXGE_HW_INTR_MASK_ALL
, &vp_reg
->kdfcctl_errors_mask
);
184 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
185 &vp_reg
->general_errors_mask
);
187 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
188 &vp_reg
->pci_config_errors_mask
);
190 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
191 &vp_reg
->mrpcim_to_vpath_alarm_mask
);
193 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
194 &vp_reg
->srpcim_to_vpath_alarm_mask
);
196 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
197 &vp_reg
->vpath_ppif_int_mask
);
199 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
200 &vp_reg
->srpcim_msg_to_vpath_mask
);
202 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
203 &vp_reg
->vpath_pcipif_int_mask
);
205 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
206 &vp_reg
->wrdma_alarm_mask
);
208 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
209 &vp_reg
->prc_alarm_mask
);
211 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
212 &vp_reg
->xgmac_vp_int_mask
);
214 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
215 &vp_reg
->asic_ntwk_vp_err_mask
);
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
226 * The function masks the msix interrupt for the given msix_id
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel
*channel
, int msix_id
)
233 __vxge_hw_pio_mem_write32_upper(
234 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
235 &channel
->common_reg
->set_msix_mask_vect
[msix_id
%4]);
239 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
240 * @channeh: Channel for rx or tx handle
243 * The function unmasks the msix interrupt for the given msix_id
248 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel
*channel
, int msix_id
)
251 __vxge_hw_pio_mem_write32_upper(
252 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
253 &channel
->common_reg
->clear_msix_mask_vect
[msix_id
%4]);
257 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type.
259 * @hldev: HW device handle.
260 * @intr_mode: New interrupt type
262 u32
vxge_hw_device_set_intr_type(struct __vxge_hw_device
*hldev
, u32 intr_mode
)
265 if ((intr_mode
!= VXGE_HW_INTR_MODE_IRQLINE
) &&
266 (intr_mode
!= VXGE_HW_INTR_MODE_MSIX
) &&
267 (intr_mode
!= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) &&
268 (intr_mode
!= VXGE_HW_INTR_MODE_DEF
))
269 intr_mode
= VXGE_HW_INTR_MODE_IRQLINE
;
271 hldev
->config
.intr_mode
= intr_mode
;
276 * vxge_hw_device_intr_enable - Enable interrupts.
277 * @hldev: HW device handle.
278 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
279 * the type(s) of interrupts to enable.
281 * Enable Titan interrupts. The function is to be executed the last in
282 * Titan initialization sequence.
284 * See also: vxge_hw_device_intr_disable()
286 void vxge_hw_device_intr_enable(struct __vxge_hw_device
*hldev
)
292 vxge_hw_device_mask_all(hldev
);
294 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
296 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
299 vxge_hw_vpath_intr_enable(
300 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev
->virtual_paths
[i
]));
303 if (hldev
->config
.intr_mode
== VXGE_HW_INTR_MODE_IRQLINE
) {
304 val64
= hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
305 hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
];
308 writeq(val64
, &hldev
->common_reg
->tim_int_status0
);
310 writeq(~val64
, &hldev
->common_reg
->tim_int_mask0
);
313 val32
= hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
314 hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
];
317 __vxge_hw_pio_mem_write32_upper(val32
,
318 &hldev
->common_reg
->tim_int_status1
);
320 __vxge_hw_pio_mem_write32_upper(~val32
,
321 &hldev
->common_reg
->tim_int_mask1
);
325 val64
= readq(&hldev
->common_reg
->titan_general_int_status
);
327 vxge_hw_device_unmask_all(hldev
);
331 * vxge_hw_device_intr_disable - Disable Titan interrupts.
332 * @hldev: HW device handle.
333 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
334 * the type(s) of interrupts to disable.
336 * Disable Titan interrupts.
338 * See also: vxge_hw_device_intr_enable()
340 void vxge_hw_device_intr_disable(struct __vxge_hw_device
*hldev
)
344 vxge_hw_device_mask_all(hldev
);
346 /* mask all the tim interrupts */
347 writeq(VXGE_HW_INTR_MASK_ALL
, &hldev
->common_reg
->tim_int_mask0
);
348 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32
,
349 &hldev
->common_reg
->tim_int_mask1
);
351 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
353 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
356 vxge_hw_vpath_intr_disable(
357 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev
->virtual_paths
[i
]));
362 * vxge_hw_device_mask_all - Mask all device interrupts.
363 * @hldev: HW device handle.
365 * Mask all device interrupts.
367 * See also: vxge_hw_device_unmask_all()
369 void vxge_hw_device_mask_all(struct __vxge_hw_device
*hldev
)
373 val64
= VXGE_HW_TITAN_MASK_ALL_INT_ALARM
|
374 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
;
376 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
377 &hldev
->common_reg
->titan_mask_all_int
);
381 * vxge_hw_device_unmask_all - Unmask all device interrupts.
382 * @hldev: HW device handle.
384 * Unmask all device interrupts.
386 * See also: vxge_hw_device_mask_all()
388 void vxge_hw_device_unmask_all(struct __vxge_hw_device
*hldev
)
392 if (hldev
->config
.intr_mode
== VXGE_HW_INTR_MODE_IRQLINE
)
393 val64
= VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
;
395 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
396 &hldev
->common_reg
->titan_mask_all_int
);
400 * vxge_hw_device_flush_io - Flush io writes.
401 * @hldev: HW device handle.
403 * The function performs a read operation to flush io writes.
407 void vxge_hw_device_flush_io(struct __vxge_hw_device
*hldev
)
411 val32
= readl(&hldev
->common_reg
->titan_general_int_status
);
415 * vxge_hw_device_begin_irq - Begin IRQ processing.
416 * @hldev: HW device handle.
417 * @skip_alarms: Do not clear the alarms
418 * @reason: "Reason" for the interrupt, the value of Titan's
419 * general_int_status register.
421 * The function performs two actions, It first checks whether (shared IRQ) the
422 * interrupt was raised by the device. Next, it masks the device interrupts.
425 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
426 * bridge. Therefore, two back-to-back interrupts are potentially possible.
428 * Returns: 0, if the interrupt is not "ours" (note that in this case the
429 * device remain enabled).
430 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
433 enum vxge_hw_status
vxge_hw_device_begin_irq(struct __vxge_hw_device
*hldev
,
434 u32 skip_alarms
, u64
*reason
)
440 enum vxge_hw_status ret
= VXGE_HW_OK
;
442 val64
= readq(&hldev
->common_reg
->titan_general_int_status
);
444 if (unlikely(!val64
)) {
445 /* not Titan interrupt */
447 ret
= VXGE_HW_ERR_WRONG_IRQ
;
451 if (unlikely(val64
== VXGE_HW_ALL_FOXES
)) {
453 adapter_status
= readq(&hldev
->common_reg
->adapter_status
);
455 if (adapter_status
== VXGE_HW_ALL_FOXES
) {
457 __vxge_hw_device_handle_error(hldev
,
458 NULL_VPID
, VXGE_HW_EVENT_SLOT_FREEZE
);
460 ret
= VXGE_HW_ERR_SLOT_FREEZE
;
465 hldev
->stats
.sw_dev_info_stats
.total_intr_cnt
++;
469 vpath_mask
= hldev
->vpaths_deployed
>>
470 (64 - VXGE_HW_MAX_VIRTUAL_PATHS
);
473 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask
)) {
474 hldev
->stats
.sw_dev_info_stats
.traffic_intr_cnt
++;
479 hldev
->stats
.sw_dev_info_stats
.not_traffic_intr_cnt
++;
482 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT
)) {
484 enum vxge_hw_status error_level
= VXGE_HW_OK
;
486 hldev
->stats
.sw_dev_err_stats
.vpath_alarms
++;
488 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
490 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
493 ret
= __vxge_hw_vpath_alarm_process(
494 &hldev
->virtual_paths
[i
], skip_alarms
);
496 error_level
= VXGE_HW_SET_LEVEL(ret
, error_level
);
498 if (unlikely((ret
== VXGE_HW_ERR_CRITICAL
) ||
499 (ret
== VXGE_HW_ERR_SLOT_FREEZE
)))
510 * __vxge_hw_device_handle_link_up_ind
511 * @hldev: HW device handle.
513 * Link up indication handler. The function is invoked by HW when
514 * Titan indicates that the link is up for programmable amount of time.
517 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device
*hldev
)
520 * If the previous link state is not down, return.
522 if (hldev
->link_state
== VXGE_HW_LINK_UP
)
525 hldev
->link_state
= VXGE_HW_LINK_UP
;
528 if (hldev
->uld_callbacks
.link_up
)
529 hldev
->uld_callbacks
.link_up(hldev
);
535 * __vxge_hw_device_handle_link_down_ind
536 * @hldev: HW device handle.
538 * Link down indication handler. The function is invoked by HW when
539 * Titan indicates that the link is down.
542 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device
*hldev
)
545 * If the previous link state is not down, return.
547 if (hldev
->link_state
== VXGE_HW_LINK_DOWN
)
550 hldev
->link_state
= VXGE_HW_LINK_DOWN
;
553 if (hldev
->uld_callbacks
.link_down
)
554 hldev
->uld_callbacks
.link_down(hldev
);
560 * __vxge_hw_device_handle_error - Handle error
563 * @type: Error type. Please see enum vxge_hw_event{}
568 __vxge_hw_device_handle_error(
569 struct __vxge_hw_device
*hldev
,
571 enum vxge_hw_event type
)
574 case VXGE_HW_EVENT_UNKNOWN
:
576 case VXGE_HW_EVENT_RESET_START
:
577 case VXGE_HW_EVENT_RESET_COMPLETE
:
578 case VXGE_HW_EVENT_LINK_DOWN
:
579 case VXGE_HW_EVENT_LINK_UP
:
581 case VXGE_HW_EVENT_ALARM_CLEARED
:
583 case VXGE_HW_EVENT_ECCERR
:
584 case VXGE_HW_EVENT_MRPCIM_ECCERR
:
586 case VXGE_HW_EVENT_FIFO_ERR
:
587 case VXGE_HW_EVENT_VPATH_ERR
:
588 case VXGE_HW_EVENT_CRITICAL_ERR
:
589 case VXGE_HW_EVENT_SERR
:
591 case VXGE_HW_EVENT_SRPCIM_SERR
:
592 case VXGE_HW_EVENT_MRPCIM_SERR
:
594 case VXGE_HW_EVENT_SLOT_FREEZE
:
602 if (hldev
->uld_callbacks
.crit_err
)
603 hldev
->uld_callbacks
.crit_err(
604 (struct __vxge_hw_device
*)hldev
,
612 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
613 * condition that has caused the Tx and RX interrupt.
616 * Acknowledge (that is, clear) the condition that has caused
617 * the Tx and Rx interrupt.
618 * See also: vxge_hw_device_begin_irq(),
619 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
621 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device
*hldev
)
624 if ((hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
625 (hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
626 writeq((hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
627 hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
]),
628 &hldev
->common_reg
->tim_int_status0
);
631 if ((hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
632 (hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
633 __vxge_hw_pio_mem_write32_upper(
634 (hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
635 hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
]),
636 &hldev
->common_reg
->tim_int_status1
);
641 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
643 * @dtrh: Buffer to return the DTR pointer
645 * Allocates a dtr from the reserve array. If the reserve array is empty,
646 * it swaps the reserve and free arrays.
650 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel
*channel
, void **dtrh
)
654 if (channel
->reserve_ptr
- channel
->reserve_top
> 0) {
656 *dtrh
= channel
->reserve_arr
[--channel
->reserve_ptr
];
661 /* switch between empty and full arrays */
663 /* the idea behind such a design is that by having free and reserved
664 * arrays separated we basically separated irq and non-irq parts.
665 * i.e. no additional lock need to be done when we free a resource */
667 if (channel
->length
- channel
->free_ptr
> 0) {
669 tmp_arr
= channel
->reserve_arr
;
670 channel
->reserve_arr
= channel
->free_arr
;
671 channel
->free_arr
= tmp_arr
;
672 channel
->reserve_ptr
= channel
->length
;
673 channel
->reserve_top
= channel
->free_ptr
;
674 channel
->free_ptr
= channel
->length
;
676 channel
->stats
->reserve_free_swaps_cnt
++;
678 goto _alloc_after_swap
;
681 channel
->stats
->full_cnt
++;
684 return VXGE_HW_INF_OUT_OF_DESCRIPTORS
;
688 * vxge_hw_channel_dtr_post - Post a dtr to the channel
692 * Posts a dtr to work array.
695 void vxge_hw_channel_dtr_post(struct __vxge_hw_channel
*channel
, void *dtrh
)
697 vxge_assert(channel
->work_arr
[channel
->post_index
] == NULL
);
699 channel
->work_arr
[channel
->post_index
++] = dtrh
;
702 if (channel
->post_index
== channel
->length
)
703 channel
->post_index
= 0;
707 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
709 * @dtr: Buffer to return the next completed DTR pointer
711 * Returns the next completed dtr with out removing it from work array
715 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel
*channel
, void **dtrh
)
717 vxge_assert(channel
->compl_index
< channel
->length
);
719 *dtrh
= channel
->work_arr
[channel
->compl_index
];
724 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
725 * @channel: Channel handle
727 * Removes the next completed dtr from work array
730 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel
*channel
)
732 channel
->work_arr
[channel
->compl_index
] = NULL
;
735 if (++channel
->compl_index
== channel
->length
)
736 channel
->compl_index
= 0;
738 channel
->stats
->total_compl_cnt
++;
742 * vxge_hw_channel_dtr_free - Frees a dtr
743 * @channel: Channel handle
746 * Returns the dtr to free array
749 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel
*channel
, void *dtrh
)
751 channel
->free_arr
[--channel
->free_ptr
] = dtrh
;
755 * vxge_hw_channel_dtr_count
756 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
758 * Retreive number of DTRs available. This function can not be called
759 * from data path. ring_initial_replenishi() is the only user.
761 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel
*channel
)
763 return (channel
->reserve_ptr
- channel
->reserve_top
) +
764 (channel
->length
- channel
->free_ptr
);
768 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
769 * @ring: Handle to the ring object used for receive
770 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
771 * with a valid handle.
773 * Reserve Rx descriptor for the subsequent filling-in driver
774 * and posting on the corresponding channel (@channelh)
775 * via vxge_hw_ring_rxd_post().
777 * Returns: VXGE_HW_OK - success.
778 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
781 enum vxge_hw_status
vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring
*ring
,
784 enum vxge_hw_status status
;
785 struct __vxge_hw_channel
*channel
;
787 channel
= &ring
->channel
;
789 status
= vxge_hw_channel_dtr_alloc(channel
, rxdh
);
791 if (status
== VXGE_HW_OK
) {
792 struct vxge_hw_ring_rxd_1
*rxdp
=
793 (struct vxge_hw_ring_rxd_1
*)*rxdh
;
795 rxdp
->control_0
= rxdp
->control_1
= 0;
802 * vxge_hw_ring_rxd_free - Free descriptor.
803 * @ring: Handle to the ring object used for receive
804 * @rxdh: Descriptor handle.
806 * Free the reserved descriptor. This operation is "symmetrical" to
807 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
810 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
813 * - reserved (vxge_hw_ring_rxd_reserve);
815 * - posted (vxge_hw_ring_rxd_post);
817 * - completed (vxge_hw_ring_rxd_next_completed);
819 * - and recycled again (vxge_hw_ring_rxd_free).
821 * For alternative state transitions and more details please refer to
825 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring
*ring
, void *rxdh
)
827 struct __vxge_hw_channel
*channel
;
829 channel
= &ring
->channel
;
831 vxge_hw_channel_dtr_free(channel
, rxdh
);
836 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
837 * @ring: Handle to the ring object used for receive
838 * @rxdh: Descriptor handle.
840 * This routine prepares a rxd and posts
842 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
844 struct __vxge_hw_channel
*channel
;
846 channel
= &ring
->channel
;
848 vxge_hw_channel_dtr_post(channel
, rxdh
);
852 * vxge_hw_ring_rxd_post_post - Process rxd after post.
853 * @ring: Handle to the ring object used for receive
854 * @rxdh: Descriptor handle.
856 * Processes rxd after post
858 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
860 struct vxge_hw_ring_rxd_1
*rxdp
= (struct vxge_hw_ring_rxd_1
*)rxdh
;
861 struct __vxge_hw_channel
*channel
;
863 channel
= &ring
->channel
;
865 rxdp
->control_0
= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
867 if (ring
->stats
->common_stats
.usage_cnt
> 0)
868 ring
->stats
->common_stats
.usage_cnt
--;
872 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
873 * @ring: Handle to the ring object used for receive
874 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
876 * Post descriptor on the ring.
877 * Prior to posting the descriptor should be filled in accordance with
878 * Host/Titan interface specification for a given service (LL, etc.).
881 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
883 struct vxge_hw_ring_rxd_1
*rxdp
= (struct vxge_hw_ring_rxd_1
*)rxdh
;
884 struct __vxge_hw_channel
*channel
;
886 channel
= &ring
->channel
;
889 rxdp
->control_0
= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
891 vxge_hw_channel_dtr_post(channel
, rxdh
);
893 if (ring
->stats
->common_stats
.usage_cnt
> 0)
894 ring
->stats
->common_stats
.usage_cnt
--;
898 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
899 * @ring: Handle to the ring object used for receive
900 * @rxdh: Descriptor handle.
902 * Processes rxd after post with memory barrier.
904 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring
*ring
, void *rxdh
)
906 struct __vxge_hw_channel
*channel
;
908 channel
= &ring
->channel
;
911 vxge_hw_ring_rxd_post_post(ring
, rxdh
);
915 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
916 * @ring: Handle to the ring object used for receive
917 * @rxdh: Descriptor handle. Returned by HW.
918 * @t_code: Transfer code, as per Titan User Guide,
919 * Receive Descriptor Format. Returned by HW.
921 * Retrieve the _next_ completed descriptor.
922 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
923 * driver of new completed descriptors. After that
924 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
925 * completions (the very first completion is passed by HW via
926 * vxge_hw_ring_callback_f).
928 * Implementation-wise, the driver is free to call
929 * vxge_hw_ring_rxd_next_completed either immediately from inside the
930 * ring callback, or in a deferred fashion and separate (from HW)
933 * Non-zero @t_code means failure to fill-in receive buffer(s)
935 * For instance, parity error detected during the data transfer.
936 * In this case Titan will complete the descriptor and indicate
937 * for the host that the received data is not to be used.
938 * For details please refer to Titan User Guide.
940 * Returns: VXGE_HW_OK - success.
941 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
942 * are currently available for processing.
944 * See also: vxge_hw_ring_callback_f{},
945 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
947 enum vxge_hw_status
vxge_hw_ring_rxd_next_completed(
948 struct __vxge_hw_ring
*ring
, void **rxdh
, u8
*t_code
)
950 struct __vxge_hw_channel
*channel
;
951 struct vxge_hw_ring_rxd_1
*rxdp
;
952 enum vxge_hw_status status
= VXGE_HW_OK
;
955 channel
= &ring
->channel
;
957 vxge_hw_channel_dtr_try_complete(channel
, rxdh
);
959 rxdp
= (struct vxge_hw_ring_rxd_1
*)*rxdh
;
961 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
965 control_0
= rxdp
->control_0
;
966 own
= control_0
& VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
967 *t_code
= (u8
)VXGE_HW_RING_RXD_T_CODE_GET(control_0
);
969 /* check whether it is not the end */
970 if (!own
|| ((*t_code
== VXGE_HW_RING_T_CODE_FRM_DROP
) && own
)) {
972 vxge_assert(((struct vxge_hw_ring_rxd_1
*)rxdp
)->host_control
!=
976 vxge_hw_channel_dtr_complete(channel
);
978 vxge_assert(*t_code
!= VXGE_HW_RING_RXD_T_CODE_UNUSED
);
980 ring
->stats
->common_stats
.usage_cnt
++;
981 if (ring
->stats
->common_stats
.usage_max
<
982 ring
->stats
->common_stats
.usage_cnt
)
983 ring
->stats
->common_stats
.usage_max
=
984 ring
->stats
->common_stats
.usage_cnt
;
990 /* reset it. since we don't want to return
991 * garbage to the driver */
993 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
999 * vxge_hw_ring_handle_tcode - Handle transfer code.
1000 * @ring: Handle to the ring object used for receive
1001 * @rxdh: Descriptor handle.
1002 * @t_code: One of the enumerated (and documented in the Titan user guide)
1005 * Handle descriptor's transfer code. The latter comes with each completed
1008 * Returns: one of the enum vxge_hw_status{} enumerated types.
1009 * VXGE_HW_OK - for success.
1010 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1012 enum vxge_hw_status
vxge_hw_ring_handle_tcode(
1013 struct __vxge_hw_ring
*ring
, void *rxdh
, u8 t_code
)
1015 struct __vxge_hw_channel
*channel
;
1016 enum vxge_hw_status status
= VXGE_HW_OK
;
1018 channel
= &ring
->channel
;
1020 /* If the t_code is not supported and if the
1021 * t_code is other than 0x5 (unparseable packet
1022 * such as unknown UPV6 header), Drop it !!!
1025 if (t_code
== VXGE_HW_RING_T_CODE_OK
||
1026 t_code
== VXGE_HW_RING_T_CODE_L3_PKT_ERR
) {
1027 status
= VXGE_HW_OK
;
1031 if (t_code
> VXGE_HW_RING_T_CODE_MULTI_ERR
) {
1032 status
= VXGE_HW_ERR_INVALID_TCODE
;
1036 ring
->stats
->rxd_t_code_err_cnt
[t_code
]++;
1042 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1045 * @txdl_ptr: The starting location of the TxDL in host memory
1046 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1047 * @no_snoop: No snoop flags
1049 * This function posts a non-offload doorbell to doorbell FIFO
1052 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo
*fifo
,
1053 u64 txdl_ptr
, u32 num_txds
, u32 no_snoop
)
1055 struct __vxge_hw_channel
*channel
;
1057 channel
= &fifo
->channel
;
1059 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW
) |
1060 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds
) |
1061 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop
),
1062 &fifo
->nofl_db
->control_0
);
1066 writeq(txdl_ptr
, &fifo
->nofl_db
->txdl_ptr
);
1072 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1074 * @fifoh: Handle to the fifo object used for non offload send
1076 u32
vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo
*fifoh
)
1078 return vxge_hw_channel_dtr_count(&fifoh
->channel
);
1082 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1083 * @fifoh: Handle to the fifo object used for non offload send
1084 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1085 * with a valid handle.
1086 * @txdl_priv: Buffer to return the pointer to per txdl space
1088 * Reserve a single TxDL (that is, fifo descriptor)
1089 * for the subsequent filling-in by driver)
1090 * and posting on the corresponding channel (@channelh)
1091 * via vxge_hw_fifo_txdl_post().
1093 * Note: it is the responsibility of driver to reserve multiple descriptors
1094 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1095 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1097 * Returns: VXGE_HW_OK - success;
1098 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1101 enum vxge_hw_status
vxge_hw_fifo_txdl_reserve(
1102 struct __vxge_hw_fifo
*fifo
,
1103 void **txdlh
, void **txdl_priv
)
1105 struct __vxge_hw_channel
*channel
;
1106 enum vxge_hw_status status
;
1109 channel
= &fifo
->channel
;
1111 status
= vxge_hw_channel_dtr_alloc(channel
, txdlh
);
1113 if (status
== VXGE_HW_OK
) {
1114 struct vxge_hw_fifo_txd
*txdp
=
1115 (struct vxge_hw_fifo_txd
*)*txdlh
;
1116 struct __vxge_hw_fifo_txdl_priv
*priv
;
1118 priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdp
);
1120 /* reset the TxDL's private */
1121 priv
->align_dma_offset
= 0;
1122 priv
->align_vaddr_start
= priv
->align_vaddr
;
1123 priv
->align_used_frags
= 0;
1125 priv
->alloc_frags
= fifo
->config
->max_frags
;
1126 priv
->next_txdl_priv
= NULL
;
1128 *txdl_priv
= (void *)(size_t)txdp
->host_control
;
1130 for (i
= 0; i
< fifo
->config
->max_frags
; i
++) {
1131 txdp
= ((struct vxge_hw_fifo_txd
*)*txdlh
) + i
;
1132 txdp
->control_0
= txdp
->control_1
= 0;
1140 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1142 * @fifo: Handle to the fifo object used for non offload send
1143 * @txdlh: Descriptor handle.
1144 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1146 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1147 * @size: Size of the data buffer (in bytes).
1149 * This API is part of the preparation of the transmit descriptor for posting
1150 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1151 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1152 * All three APIs fill in the fields of the fifo descriptor,
1153 * in accordance with the Titan specification.
1156 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo
*fifo
,
1157 void *txdlh
, u32 frag_idx
,
1158 dma_addr_t dma_pointer
, u32 size
)
1160 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
1161 struct vxge_hw_fifo_txd
*txdp
, *txdp_last
;
1162 struct __vxge_hw_channel
*channel
;
1164 channel
= &fifo
->channel
;
1166 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdlh
);
1167 txdp
= (struct vxge_hw_fifo_txd
*)txdlh
+ txdl_priv
->frags
;
1170 txdp
->control_0
= txdp
->control_1
= 0;
1172 txdp
->control_0
|= VXGE_HW_FIFO_TXD_GATHER_CODE(
1173 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST
);
1174 txdp
->control_1
|= fifo
->interrupt_type
;
1175 txdp
->control_1
|= VXGE_HW_FIFO_TXD_INT_NUMBER(
1177 if (txdl_priv
->frags
) {
1178 txdp_last
= (struct vxge_hw_fifo_txd
*)txdlh
+
1179 (txdl_priv
->frags
- 1);
1180 txdp_last
->control_0
|= VXGE_HW_FIFO_TXD_GATHER_CODE(
1181 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST
);
1185 vxge_assert(frag_idx
< txdl_priv
->alloc_frags
);
1187 txdp
->buffer_pointer
= (u64
)dma_pointer
;
1188 txdp
->control_0
|= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size
);
1189 fifo
->stats
->total_buffers
++;
1194 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1195 * @fifo: Handle to the fifo object used for non offload send
1196 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1197 * @frags: Number of contiguous buffers that are part of a single
1198 * transmit operation.
1200 * Post descriptor on the 'fifo' type channel for transmission.
1201 * Prior to posting the descriptor should be filled in accordance with
1202 * Host/Titan interface specification for a given service (LL, etc.).
1205 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo
*fifo
, void *txdlh
)
1207 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
1208 struct vxge_hw_fifo_txd
*txdp_last
;
1209 struct vxge_hw_fifo_txd
*txdp_first
;
1210 struct __vxge_hw_channel
*channel
;
1212 channel
= &fifo
->channel
;
1214 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdlh
);
1215 txdp_first
= (struct vxge_hw_fifo_txd
*)txdlh
;
1217 txdp_last
= (struct vxge_hw_fifo_txd
*)txdlh
+ (txdl_priv
->frags
- 1);
1218 txdp_last
->control_0
|=
1219 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST
);
1220 txdp_first
->control_0
|= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
;
1222 vxge_hw_channel_dtr_post(&fifo
->channel
, txdlh
);
1224 __vxge_hw_non_offload_db_post(fifo
,
1225 (u64
)txdl_priv
->dma_addr
,
1226 txdl_priv
->frags
- 1,
1227 fifo
->no_snoop_bits
);
1229 fifo
->stats
->total_posts
++;
1230 fifo
->stats
->common_stats
.usage_cnt
++;
1231 if (fifo
->stats
->common_stats
.usage_max
<
1232 fifo
->stats
->common_stats
.usage_cnt
)
1233 fifo
->stats
->common_stats
.usage_max
=
1234 fifo
->stats
->common_stats
.usage_cnt
;
1238 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1239 * @fifo: Handle to the fifo object used for non offload send
1240 * @txdlh: Descriptor handle. Returned by HW.
1241 * @t_code: Transfer code, as per Titan User Guide,
1242 * Transmit Descriptor Format.
1245 * Retrieve the _next_ completed descriptor.
1246 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1247 * driver of new completed descriptors. After that
1248 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1249 * completions (the very first completion is passed by HW via
1250 * vxge_hw_channel_callback_f).
1252 * Implementation-wise, the driver is free to call
1253 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1254 * channel callback, or in a deferred fashion and separate (from HW)
1257 * Non-zero @t_code means failure to process the descriptor.
1258 * The failure could happen, for instance, when the link is
1259 * down, in which case Titan completes the descriptor because it
1260 * is not able to send the data out.
1262 * For details please refer to Titan User Guide.
1264 * Returns: VXGE_HW_OK - success.
1265 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1266 * are currently available for processing.
1269 enum vxge_hw_status
vxge_hw_fifo_txdl_next_completed(
1270 struct __vxge_hw_fifo
*fifo
, void **txdlh
,
1271 enum vxge_hw_fifo_tcode
*t_code
)
1273 struct __vxge_hw_channel
*channel
;
1274 struct vxge_hw_fifo_txd
*txdp
;
1275 enum vxge_hw_status status
= VXGE_HW_OK
;
1277 channel
= &fifo
->channel
;
1279 vxge_hw_channel_dtr_try_complete(channel
, txdlh
);
1281 txdp
= (struct vxge_hw_fifo_txd
*)*txdlh
;
1283 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1287 /* check whether host owns it */
1288 if (!(txdp
->control_0
& VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
)) {
1290 vxge_assert(txdp
->host_control
!= 0);
1292 vxge_hw_channel_dtr_complete(channel
);
1294 *t_code
= (u8
)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp
->control_0
);
1296 if (fifo
->stats
->common_stats
.usage_cnt
> 0)
1297 fifo
->stats
->common_stats
.usage_cnt
--;
1299 status
= VXGE_HW_OK
;
1303 /* no more completions */
1305 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1311 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1312 * @fifo: Handle to the fifo object used for non offload send
1313 * @txdlh: Descriptor handle.
1314 * @t_code: One of the enumerated (and documented in the Titan user guide)
1317 * Handle descriptor's transfer code. The latter comes with each completed
1320 * Returns: one of the enum vxge_hw_status{} enumerated types.
1321 * VXGE_HW_OK - for success.
1322 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1324 enum vxge_hw_status
vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo
*fifo
,
1326 enum vxge_hw_fifo_tcode t_code
)
1328 struct __vxge_hw_channel
*channel
;
1330 enum vxge_hw_status status
= VXGE_HW_OK
;
1331 channel
= &fifo
->channel
;
1333 if (((t_code
& 0x7) < 0) || ((t_code
& 0x7) > 0x4)) {
1334 status
= VXGE_HW_ERR_INVALID_TCODE
;
1338 fifo
->stats
->txd_t_code_err_cnt
[t_code
]++;
1344 * vxge_hw_fifo_txdl_free - Free descriptor.
1345 * @fifo: Handle to the fifo object used for non offload send
1346 * @txdlh: Descriptor handle.
1348 * Free the reserved descriptor. This operation is "symmetrical" to
1349 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1352 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1355 * - reserved (vxge_hw_fifo_txdl_reserve);
1357 * - posted (vxge_hw_fifo_txdl_post);
1359 * - completed (vxge_hw_fifo_txdl_next_completed);
1361 * - and recycled again (vxge_hw_fifo_txdl_free).
1363 * For alternative state transitions and more details please refer to
1367 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo
*fifo
, void *txdlh
)
1369 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
1371 struct __vxge_hw_channel
*channel
;
1373 channel
= &fifo
->channel
;
1375 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
,
1376 (struct vxge_hw_fifo_txd
*)txdlh
);
1378 max_frags
= fifo
->config
->max_frags
;
1380 vxge_hw_channel_dtr_free(channel
, txdlh
);
1384 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1385 * to MAC address table.
1386 * @vp: Vpath handle.
1387 * @macaddr: MAC address to be added for this vpath into the list
1388 * @macaddr_mask: MAC address mask for macaddr
1389 * @duplicate_mode: Duplicate MAC address add mode. Please see
1390 * enum vxge_hw_vpath_mac_addr_add_mode{}
1392 * Adds the given mac address and mac address mask into the list for this
1394 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1395 * vxge_hw_vpath_mac_addr_get_next
1399 vxge_hw_vpath_mac_addr_add(
1400 struct __vxge_hw_vpath_handle
*vp
,
1401 u8 (macaddr
)[ETH_ALEN
],
1402 u8 (macaddr_mask
)[ETH_ALEN
],
1403 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode
)
1408 enum vxge_hw_status status
= VXGE_HW_OK
;
1411 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1415 for (i
= 0; i
< ETH_ALEN
; i
++) {
1417 data1
|= (u8
)macaddr
[i
];
1420 data2
|= (u8
)macaddr_mask
[i
];
1423 switch (duplicate_mode
) {
1424 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
:
1427 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE
:
1430 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE
:
1438 status
= __vxge_hw_vpath_rts_table_set(vp
,
1439 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY
,
1440 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1442 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1
),
1443 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2
)|
1444 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i
));
1450 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1451 * from MAC address table.
1452 * @vp: Vpath handle.
1453 * @macaddr: First MAC address entry for this vpath in the list
1454 * @macaddr_mask: MAC address mask for macaddr
1456 * Returns the first mac address and mac address mask in the list for this
1458 * see also: vxge_hw_vpath_mac_addr_get_next
1462 vxge_hw_vpath_mac_addr_get(
1463 struct __vxge_hw_vpath_handle
*vp
,
1464 u8 (macaddr
)[ETH_ALEN
],
1465 u8 (macaddr_mask
)[ETH_ALEN
])
1470 enum vxge_hw_status status
= VXGE_HW_OK
;
1473 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1477 status
= __vxge_hw_vpath_rts_table_get(vp
,
1478 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
,
1479 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1482 if (status
!= VXGE_HW_OK
)
1485 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
1487 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2
);
1489 for (i
= ETH_ALEN
; i
> 0; i
--) {
1490 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
1493 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
1501 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1503 * from MAC address table.
1504 * @vp: Vpath handle.
1505 * @macaddr: Next MAC address entry for this vpath in the list
1506 * @macaddr_mask: MAC address mask for macaddr
1508 * Returns the next mac address and mac address mask in the list for this
1510 * see also: vxge_hw_vpath_mac_addr_get
1514 vxge_hw_vpath_mac_addr_get_next(
1515 struct __vxge_hw_vpath_handle
*vp
,
1516 u8 (macaddr
)[ETH_ALEN
],
1517 u8 (macaddr_mask
)[ETH_ALEN
])
1522 enum vxge_hw_status status
= VXGE_HW_OK
;
1525 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1529 status
= __vxge_hw_vpath_rts_table_get(vp
,
1530 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY
,
1531 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1534 if (status
!= VXGE_HW_OK
)
1537 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
1539 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2
);
1541 for (i
= ETH_ALEN
; i
> 0; i
--) {
1542 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
1545 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
1554 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1555 * to MAC address table.
1556 * @vp: Vpath handle.
1557 * @macaddr: MAC address to be added for this vpath into the list
1558 * @macaddr_mask: MAC address mask for macaddr
1560 * Delete the given mac address and mac address mask into the list for this
1562 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1563 * vxge_hw_vpath_mac_addr_get_next
1567 vxge_hw_vpath_mac_addr_delete(
1568 struct __vxge_hw_vpath_handle
*vp
,
1569 u8 (macaddr
)[ETH_ALEN
],
1570 u8 (macaddr_mask
)[ETH_ALEN
])
1575 enum vxge_hw_status status
= VXGE_HW_OK
;
1578 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1582 for (i
= 0; i
< ETH_ALEN
; i
++) {
1584 data1
|= (u8
)macaddr
[i
];
1587 data2
|= (u8
)macaddr_mask
[i
];
1590 status
= __vxge_hw_vpath_rts_table_set(vp
,
1591 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY
,
1592 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1594 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1
),
1595 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2
));
1601 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1603 * @vp: Vpath handle.
1604 * @vid: vlan id to be added for this vpath into the list
1606 * Adds the given vlan id into the list for this vpath.
1607 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1608 * vxge_hw_vpath_vid_get_next
1612 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle
*vp
, u64 vid
)
1614 enum vxge_hw_status status
= VXGE_HW_OK
;
1617 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1621 status
= __vxge_hw_vpath_rts_table_set(vp
,
1622 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY
,
1623 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1624 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid
), 0);
1630 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1631 * from vlan id table.
1632 * @vp: Vpath handle.
1633 * @vid: Buffer to return vlan id
1635 * Returns the first vlan id in the list for this vpath.
1636 * see also: vxge_hw_vpath_vid_get_next
1640 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle
*vp
, u64
*vid
)
1643 enum vxge_hw_status status
= VXGE_HW_OK
;
1646 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1650 status
= __vxge_hw_vpath_rts_table_get(vp
,
1651 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
,
1652 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1655 *vid
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid
);
1661 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1662 * from vlan id table.
1663 * @vp: Vpath handle.
1664 * @vid: Buffer to return vlan id
1666 * Returns the next vlan id in the list for this vpath.
1667 * see also: vxge_hw_vpath_vid_get
1671 vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle
*vp
, u64
*vid
)
1674 enum vxge_hw_status status
= VXGE_HW_OK
;
1677 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1681 status
= __vxge_hw_vpath_rts_table_get(vp
,
1682 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY
,
1683 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1686 *vid
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid
);
1692 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1694 * @vp: Vpath handle.
1695 * @vid: vlan id to be added for this vpath into the list
1697 * Adds the given vlan id into the list for this vpath.
1698 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1699 * vxge_hw_vpath_vid_get_next
1703 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle
*vp
, u64 vid
)
1705 enum vxge_hw_status status
= VXGE_HW_OK
;
1708 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1712 status
= __vxge_hw_vpath_rts_table_set(vp
,
1713 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY
,
1714 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1715 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid
), 0);
1721 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1722 * @vp: Vpath handle.
1724 * Enable promiscuous mode of Titan-e operation.
1726 * See also: vxge_hw_vpath_promisc_disable().
1728 enum vxge_hw_status
vxge_hw_vpath_promisc_enable(
1729 struct __vxge_hw_vpath_handle
*vp
)
1732 struct __vxge_hw_virtualpath
*vpath
;
1733 enum vxge_hw_status status
= VXGE_HW_OK
;
1735 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
1736 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1742 /* Enable promiscous mode for function 0 only */
1743 if (!(vpath
->hldev
->access_rights
&
1744 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
))
1747 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
1749 if (!(val64
& VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
)) {
1751 val64
|= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
|
1752 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
|
1753 VXGE_HW_RXMAC_VCFG0_BCAST_EN
|
1754 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN
;
1756 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
1763 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1764 * @vp: Vpath handle.
1766 * Disable promiscuous mode of Titan-e operation.
1768 * See also: vxge_hw_vpath_promisc_enable().
1770 enum vxge_hw_status
vxge_hw_vpath_promisc_disable(
1771 struct __vxge_hw_vpath_handle
*vp
)
1774 struct __vxge_hw_virtualpath
*vpath
;
1775 enum vxge_hw_status status
= VXGE_HW_OK
;
1777 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
1778 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1784 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
1786 if (val64
& VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
) {
1788 val64
&= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
|
1789 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
|
1790 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN
);
1792 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
1799 * vxge_hw_vpath_bcast_enable - Enable broadcast
1800 * @vp: Vpath handle.
1802 * Enable receiving broadcasts.
1804 enum vxge_hw_status
vxge_hw_vpath_bcast_enable(
1805 struct __vxge_hw_vpath_handle
*vp
)
1808 struct __vxge_hw_virtualpath
*vpath
;
1809 enum vxge_hw_status status
= VXGE_HW_OK
;
1811 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
1812 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1818 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
1820 if (!(val64
& VXGE_HW_RXMAC_VCFG0_BCAST_EN
)) {
1821 val64
|= VXGE_HW_RXMAC_VCFG0_BCAST_EN
;
1822 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
1829 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1830 * @vp: Vpath handle.
1832 * Enable Titan-e multicast addresses.
1833 * Returns: VXGE_HW_OK on success.
1836 enum vxge_hw_status
vxge_hw_vpath_mcast_enable(
1837 struct __vxge_hw_vpath_handle
*vp
)
1840 struct __vxge_hw_virtualpath
*vpath
;
1841 enum vxge_hw_status status
= VXGE_HW_OK
;
1843 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
1844 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1850 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
1852 if (!(val64
& VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
)) {
1853 val64
|= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
;
1854 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
1861 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1862 * @vp: Vpath handle.
1864 * Disable Titan-e multicast addresses.
1865 * Returns: VXGE_HW_OK - success.
1866 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1870 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle
*vp
)
1873 struct __vxge_hw_virtualpath
*vpath
;
1874 enum vxge_hw_status status
= VXGE_HW_OK
;
1876 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
1877 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1883 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
1885 if (val64
& VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
) {
1886 val64
&= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
;
1887 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
1894 * __vxge_hw_vpath_alarm_process - Process Alarms.
1895 * @vpath: Virtual Path.
1896 * @skip_alarms: Do not clear the alarms
1898 * Process vpath alarms.
1901 enum vxge_hw_status
__vxge_hw_vpath_alarm_process(
1902 struct __vxge_hw_virtualpath
*vpath
,
1908 struct __vxge_hw_device
*hldev
= NULL
;
1909 enum vxge_hw_event alarm_event
= VXGE_HW_EVENT_UNKNOWN
;
1911 struct vxge_hw_vpath_stats_sw_info
*sw_stats
;
1912 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
1914 if (vpath
== NULL
) {
1915 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN
,
1920 hldev
= vpath
->hldev
;
1921 vp_reg
= vpath
->vp_reg
;
1922 alarm_status
= readq(&vp_reg
->vpath_general_int_status
);
1924 if (alarm_status
== VXGE_HW_ALL_FOXES
) {
1925 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE
,
1930 sw_stats
= vpath
->sw_stats
;
1932 if (alarm_status
& ~(
1933 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
|
1934 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT
|
1935 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
|
1936 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
)) {
1937 sw_stats
->error_stats
.unknown_alarms
++;
1939 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN
,
1944 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
) {
1946 val64
= readq(&vp_reg
->xgmac_vp_int_status
);
1949 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT
) {
1951 val64
= readq(&vp_reg
->asic_ntwk_vp_err_reg
);
1954 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
) &&
1956 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
))) ||
1958 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
) &&
1960 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
)
1962 sw_stats
->error_stats
.network_sustained_fault
++;
1965 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
,
1966 &vp_reg
->asic_ntwk_vp_err_mask
);
1968 __vxge_hw_device_handle_link_down_ind(hldev
);
1969 alarm_event
= VXGE_HW_SET_LEVEL(
1970 VXGE_HW_EVENT_LINK_DOWN
, alarm_event
);
1974 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
) &&
1976 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
))) ||
1978 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
) &&
1980 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
)
1983 sw_stats
->error_stats
.network_sustained_ok
++;
1986 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
,
1987 &vp_reg
->asic_ntwk_vp_err_mask
);
1989 __vxge_hw_device_handle_link_up_ind(hldev
);
1990 alarm_event
= VXGE_HW_SET_LEVEL(
1991 VXGE_HW_EVENT_LINK_UP
, alarm_event
);
1994 writeq(VXGE_HW_INTR_MASK_ALL
,
1995 &vp_reg
->asic_ntwk_vp_err_reg
);
1997 alarm_event
= VXGE_HW_SET_LEVEL(
1998 VXGE_HW_EVENT_ALARM_CLEARED
, alarm_event
);
2005 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
) {
2007 pic_status
= readq(&vp_reg
->vpath_ppif_int_status
);
2010 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT
) {
2012 val64
= readq(&vp_reg
->general_errors_reg
);
2013 mask64
= readq(&vp_reg
->general_errors_mask
);
2016 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET
) &
2018 sw_stats
->error_stats
.ini_serr_det
++;
2020 alarm_event
= VXGE_HW_SET_LEVEL(
2021 VXGE_HW_EVENT_SERR
, alarm_event
);
2025 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW
) &
2027 sw_stats
->error_stats
.dblgen_fifo0_overflow
++;
2029 alarm_event
= VXGE_HW_SET_LEVEL(
2030 VXGE_HW_EVENT_FIFO_ERR
, alarm_event
);
2034 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
) &
2036 sw_stats
->error_stats
.statsb_pif_chain_error
++;
2039 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
) &
2041 sw_stats
->error_stats
.statsb_drop_timeout
++;
2044 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS
) &
2046 sw_stats
->error_stats
.target_illegal_access
++;
2049 writeq(VXGE_HW_INTR_MASK_ALL
,
2050 &vp_reg
->general_errors_reg
);
2051 alarm_event
= VXGE_HW_SET_LEVEL(
2052 VXGE_HW_EVENT_ALARM_CLEARED
,
2058 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT
) {
2060 val64
= readq(&vp_reg
->kdfcctl_errors_reg
);
2061 mask64
= readq(&vp_reg
->kdfcctl_errors_mask
);
2064 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR
) &
2066 sw_stats
->error_stats
.kdfcctl_fifo0_overwrite
++;
2068 alarm_event
= VXGE_HW_SET_LEVEL(
2069 VXGE_HW_EVENT_FIFO_ERR
,
2074 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON
) &
2076 sw_stats
->error_stats
.kdfcctl_fifo0_poison
++;
2078 alarm_event
= VXGE_HW_SET_LEVEL(
2079 VXGE_HW_EVENT_FIFO_ERR
,
2084 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR
) &
2086 sw_stats
->error_stats
.kdfcctl_fifo0_dma_error
++;
2088 alarm_event
= VXGE_HW_SET_LEVEL(
2089 VXGE_HW_EVENT_FIFO_ERR
,
2094 writeq(VXGE_HW_INTR_MASK_ALL
,
2095 &vp_reg
->kdfcctl_errors_reg
);
2096 alarm_event
= VXGE_HW_SET_LEVEL(
2097 VXGE_HW_EVENT_ALARM_CLEARED
,
2104 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
) {
2106 val64
= readq(&vp_reg
->wrdma_alarm_status
);
2108 if (val64
& VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT
) {
2110 val64
= readq(&vp_reg
->prc_alarm_reg
);
2111 mask64
= readq(&vp_reg
->prc_alarm_mask
);
2113 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
)&
2115 sw_stats
->error_stats
.prc_ring_bumps
++;
2117 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR
) &
2119 sw_stats
->error_stats
.prc_rxdcm_sc_err
++;
2121 alarm_event
= VXGE_HW_SET_LEVEL(
2122 VXGE_HW_EVENT_VPATH_ERR
,
2126 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT
)
2128 sw_stats
->error_stats
.prc_rxdcm_sc_abort
++;
2130 alarm_event
= VXGE_HW_SET_LEVEL(
2131 VXGE_HW_EVENT_VPATH_ERR
,
2135 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR
)
2137 sw_stats
->error_stats
.prc_quanta_size_err
++;
2139 alarm_event
= VXGE_HW_SET_LEVEL(
2140 VXGE_HW_EVENT_VPATH_ERR
,
2145 writeq(VXGE_HW_INTR_MASK_ALL
,
2146 &vp_reg
->prc_alarm_reg
);
2147 alarm_event
= VXGE_HW_SET_LEVEL(
2148 VXGE_HW_EVENT_ALARM_CLEARED
,
2154 hldev
->stats
.sw_dev_err_stats
.vpath_alarms
++;
2156 if ((alarm_event
== VXGE_HW_EVENT_ALARM_CLEARED
) ||
2157 (alarm_event
== VXGE_HW_EVENT_UNKNOWN
))
2160 __vxge_hw_device_handle_error(hldev
, vpath
->vp_id
, alarm_event
);
2162 if (alarm_event
== VXGE_HW_EVENT_SERR
)
2163 return VXGE_HW_ERR_CRITICAL
;
2165 return (alarm_event
== VXGE_HW_EVENT_SLOT_FREEZE
) ?
2166 VXGE_HW_ERR_SLOT_FREEZE
:
2167 (alarm_event
== VXGE_HW_EVENT_FIFO_ERR
) ? VXGE_HW_ERR_FIFO
:
2172 * vxge_hw_vpath_alarm_process - Process Alarms.
2173 * @vpath: Virtual Path.
2174 * @skip_alarms: Do not clear the alarms
2176 * Process vpath alarms.
2179 enum vxge_hw_status
vxge_hw_vpath_alarm_process(
2180 struct __vxge_hw_vpath_handle
*vp
,
2183 enum vxge_hw_status status
= VXGE_HW_OK
;
2186 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2190 status
= __vxge_hw_vpath_alarm_process(vp
->vpath
, skip_alarms
);
2196 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2198 * @vp: Virtual Path handle.
2199 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2200 * interrupts(Can be repeated). If fifo or ring are not enabled
2201 * the MSIX vector for that should be set to 0
2202 * @alarm_msix_id: MSIX vector for alarm.
2204 * This API will associate a given MSIX vector numbers with the four TIM
2205 * interrupts and alarm interrupt.
2208 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle
*vp
, int *tim_msix_id
,
2212 struct __vxge_hw_virtualpath
*vpath
= vp
->vpath
;
2213 struct vxge_hw_vpath_reg __iomem
*vp_reg
= vpath
->vp_reg
;
2214 u32 vp_id
= vp
->vpath
->vp_id
;
2216 val64
= VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2217 (vp_id
* 4) + tim_msix_id
[0]) |
2218 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2219 (vp_id
* 4) + tim_msix_id
[1]);
2221 writeq(val64
, &vp_reg
->interrupt_cfg0
);
2223 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2224 (vpath
->hldev
->first_vp_id
* 4) + alarm_msix_id
),
2225 &vp_reg
->interrupt_cfg2
);
2227 if (vpath
->hldev
->config
.intr_mode
==
2228 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) {
2229 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2230 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN
,
2231 0, 32), &vp_reg
->one_shot_vect1_en
);
2234 if (vpath
->hldev
->config
.intr_mode
==
2235 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) {
2236 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2237 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN
,
2238 0, 32), &vp_reg
->one_shot_vect2_en
);
2240 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2241 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN
,
2242 0, 32), &vp_reg
->one_shot_vect3_en
);
2247 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2248 * @vp: Virtual Path handle.
2251 * The function masks the msix interrupt for the given msix_id
2254 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2259 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle
*vp
, int msix_id
)
2261 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2262 __vxge_hw_pio_mem_write32_upper(
2263 (u32
) vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
2264 &hldev
->common_reg
->set_msix_mask_vect
[msix_id
% 4]);
2268 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2269 * @vp: Virtual Path handle.
2272 * The function clears the msix interrupt for the given msix_id
2275 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2280 vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle
*vp
, int msix_id
)
2282 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2283 if (hldev
->config
.intr_mode
==
2284 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) {
2285 __vxge_hw_pio_mem_write32_upper(
2286 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
2287 &hldev
->common_reg
->
2288 clr_msix_one_shot_vec
[msix_id
%4]);
2290 __vxge_hw_pio_mem_write32_upper(
2291 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
2292 &hldev
->common_reg
->
2293 clear_msix_mask_vect
[msix_id
%4]);
2298 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2299 * @vp: Virtual Path handle.
2302 * The function unmasks the msix interrupt for the given msix_id
2305 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2310 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle
*vp
, int msix_id
)
2312 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2313 __vxge_hw_pio_mem_write32_upper(
2314 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
2315 &hldev
->common_reg
->clear_msix_mask_vect
[msix_id
%4]);
2319 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2320 * @vp: Virtual Path handle.
2322 * The function masks all msix interrupt for the given vpath
2326 vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle
*vp
)
2329 __vxge_hw_pio_mem_write32_upper(
2330 (u32
)vxge_bVALn(vxge_mBIT(vp
->vpath
->vp_id
), 0, 32),
2331 &vp
->vpath
->hldev
->common_reg
->set_msix_mask_all_vect
);
2335 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2336 * @vp: Virtual Path handle.
2338 * Mask Tx and Rx vpath interrupts.
2340 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2342 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle
*vp
)
2344 u64 tim_int_mask0
[4] = {[0 ...3] = 0};
2345 u32 tim_int_mask1
[4] = {[0 ...3] = 0};
2347 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2349 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0
,
2350 tim_int_mask1
, vp
->vpath
->vp_id
);
2352 val64
= readq(&hldev
->common_reg
->tim_int_mask0
);
2354 if ((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2355 (tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2356 writeq((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
2357 tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] | val64
),
2358 &hldev
->common_reg
->tim_int_mask0
);
2361 val64
= readl(&hldev
->common_reg
->tim_int_mask1
);
2363 if ((tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2364 (tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2365 __vxge_hw_pio_mem_write32_upper(
2366 (tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
2367 tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] | val64
),
2368 &hldev
->common_reg
->tim_int_mask1
);
2373 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2374 * @vp: Virtual Path handle.
2376 * Unmask Tx and Rx vpath interrupts.
2378 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2380 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle
*vp
)
2382 u64 tim_int_mask0
[4] = {[0 ...3] = 0};
2383 u32 tim_int_mask1
[4] = {[0 ...3] = 0};
2385 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2387 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0
,
2388 tim_int_mask1
, vp
->vpath
->vp_id
);
2390 val64
= readq(&hldev
->common_reg
->tim_int_mask0
);
2392 if ((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2393 (tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2394 writeq((~(tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
2395 tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
])) & val64
,
2396 &hldev
->common_reg
->tim_int_mask0
);
2399 if ((tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2400 (tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2401 __vxge_hw_pio_mem_write32_upper(
2402 (~(tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
2403 tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
])) & val64
,
2404 &hldev
->common_reg
->tim_int_mask1
);
2409 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2410 * descriptors and process the same.
2411 * @ring: Handle to the ring object used for receive
2413 * The function polls the Rx for the completed descriptors and calls
2414 * the driver via supplied completion callback.
2416 * Returns: VXGE_HW_OK, if the polling is completed successful.
2417 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2418 * descriptors available which are yet to be processed.
2420 * See also: vxge_hw_vpath_poll_rx()
2422 enum vxge_hw_status
vxge_hw_vpath_poll_rx(struct __vxge_hw_ring
*ring
)
2425 enum vxge_hw_status status
= VXGE_HW_OK
;
2432 status
= vxge_hw_ring_rxd_next_completed(ring
, &first_rxdh
, &t_code
);
2433 if (status
== VXGE_HW_OK
)
2434 ring
->callback(ring
, first_rxdh
,
2435 t_code
, ring
->channel
.userdata
);
2437 if (ring
->cmpl_cnt
!= 0) {
2438 ring
->doorbell_cnt
+= ring
->cmpl_cnt
;
2439 if (ring
->doorbell_cnt
>= ring
->rxds_limit
) {
2441 * Each RxD is of 4 qwords, update the number of
2442 * qwords replenished
2444 new_count
= (ring
->doorbell_cnt
* 4);
2446 /* For each block add 4 more qwords */
2447 ring
->total_db_cnt
+= ring
->doorbell_cnt
;
2448 if (ring
->total_db_cnt
>= ring
->rxds_per_block
) {
2450 /* Reset total count */
2451 ring
->total_db_cnt
%= ring
->rxds_per_block
;
2453 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count
),
2454 &ring
->vp_reg
->prc_rxd_doorbell
);
2456 readl(&ring
->common_reg
->titan_general_int_status
);
2457 ring
->doorbell_cnt
= 0;
2465 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2467 * @fifo: Handle to the fifo object used for non offload send
2469 * The function polls the Tx for the completed descriptors and calls
2470 * the driver via supplied completion callback.
2472 * Returns: VXGE_HW_OK, if the polling is completed successful.
2473 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2474 * descriptors available which are yet to be processed.
2476 * See also: vxge_hw_vpath_poll_tx().
2478 enum vxge_hw_status
vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo
*fifo
,
2479 struct sk_buff
***skb_ptr
, int nr_skb
,
2482 enum vxge_hw_fifo_tcode t_code
;
2484 enum vxge_hw_status status
= VXGE_HW_OK
;
2485 struct __vxge_hw_channel
*channel
;
2487 channel
= &fifo
->channel
;
2489 status
= vxge_hw_fifo_txdl_next_completed(fifo
,
2490 &first_txdlh
, &t_code
);
2491 if (status
== VXGE_HW_OK
)
2492 if (fifo
->callback(fifo
, first_txdlh
, t_code
,
2493 channel
->userdata
, skb_ptr
, nr_skb
, more
) != VXGE_HW_OK
)
2494 status
= VXGE_HW_COMPLETIONS_REMAIN
;