1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/io-64-nonatomic-lo-hi.h>
16 #include <linux/prefetch.h>
18 #include "vxge-traffic.h"
19 #include "vxge-config.h"
20 #include "vxge-main.h"
23 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
24 * @vp: Virtual Path handle.
26 * Enable vpath interrupts. The function is to be executed the last in
27 * vpath initialization sequence.
29 * See also: vxge_hw_vpath_intr_disable()
31 enum vxge_hw_status
vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle
*vp
)
35 struct __vxge_hw_virtualpath
*vpath
;
36 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
37 enum vxge_hw_status status
= VXGE_HW_OK
;
39 status
= VXGE_HW_ERR_INVALID_HANDLE
;
45 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
46 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
50 vp_reg
= vpath
->vp_reg
;
52 writeq(VXGE_HW_INTR_MASK_ALL
, &vp_reg
->kdfcctl_errors_reg
);
54 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
55 &vp_reg
->general_errors_reg
);
57 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
58 &vp_reg
->pci_config_errors_reg
);
60 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
61 &vp_reg
->mrpcim_to_vpath_alarm_reg
);
63 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
64 &vp_reg
->srpcim_to_vpath_alarm_reg
);
66 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
67 &vp_reg
->vpath_ppif_int_status
);
69 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
70 &vp_reg
->srpcim_msg_to_vpath_reg
);
72 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
73 &vp_reg
->vpath_pcipif_int_status
);
75 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
76 &vp_reg
->prc_alarm_reg
);
78 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
79 &vp_reg
->wrdma_alarm_status
);
81 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
82 &vp_reg
->asic_ntwk_vp_err_reg
);
84 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
85 &vp_reg
->xgmac_vp_int_status
);
87 val64
= readq(&vp_reg
->vpath_general_int_status
);
89 /* Mask unwanted interrupts */
91 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
92 &vp_reg
->vpath_pcipif_int_mask
);
94 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
95 &vp_reg
->srpcim_msg_to_vpath_mask
);
97 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
98 &vp_reg
->srpcim_to_vpath_alarm_mask
);
100 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
101 &vp_reg
->mrpcim_to_vpath_alarm_mask
);
103 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
104 &vp_reg
->pci_config_errors_mask
);
106 /* Unmask the individual interrupts */
108 writeq((u32
)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW
|
109 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW
|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
|
111 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
), 0, 32),
112 &vp_reg
->general_errors_mask
);
114 __vxge_hw_pio_mem_write32_upper(
115 (u32
)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR
|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR
|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON
|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON
|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR
|
120 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR
), 0, 32),
121 &vp_reg
->kdfcctl_errors_mask
);
123 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->vpath_ppif_int_mask
);
125 __vxge_hw_pio_mem_write32_upper(
126 (u32
)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
, 0, 32),
127 &vp_reg
->prc_alarm_mask
);
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->wrdma_alarm_mask
);
130 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->xgmac_vp_int_mask
);
132 if (vpath
->hldev
->first_vp_id
!= vpath
->vp_id
)
133 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
134 &vp_reg
->asic_ntwk_vp_err_mask
);
136 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn((
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT
|
138 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK
), 0, 32),
139 &vp_reg
->asic_ntwk_vp_err_mask
);
141 __vxge_hw_pio_mem_write32_upper(0,
142 &vp_reg
->vpath_general_int_mask
);
149 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
150 * @vp: Virtual Path handle.
152 * Disable vpath interrupts. The function is to be executed the last in
153 * vpath initialization sequence.
155 * See also: vxge_hw_vpath_intr_enable()
157 enum vxge_hw_status
vxge_hw_vpath_intr_disable(
158 struct __vxge_hw_vpath_handle
*vp
)
162 struct __vxge_hw_virtualpath
*vpath
;
163 enum vxge_hw_status status
= VXGE_HW_OK
;
164 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
166 status
= VXGE_HW_ERR_INVALID_HANDLE
;
172 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
173 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
176 vp_reg
= vpath
->vp_reg
;
178 __vxge_hw_pio_mem_write32_upper(
179 (u32
)VXGE_HW_INTR_MASK_ALL
,
180 &vp_reg
->vpath_general_int_mask
);
182 val64
= VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath
->vp_id
));
184 writeq(VXGE_HW_INTR_MASK_ALL
, &vp_reg
->kdfcctl_errors_mask
);
186 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
187 &vp_reg
->general_errors_mask
);
189 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
190 &vp_reg
->pci_config_errors_mask
);
192 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
193 &vp_reg
->mrpcim_to_vpath_alarm_mask
);
195 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
196 &vp_reg
->srpcim_to_vpath_alarm_mask
);
198 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
199 &vp_reg
->vpath_ppif_int_mask
);
201 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
202 &vp_reg
->srpcim_msg_to_vpath_mask
);
204 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
205 &vp_reg
->vpath_pcipif_int_mask
);
207 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
208 &vp_reg
->wrdma_alarm_mask
);
210 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
211 &vp_reg
->prc_alarm_mask
);
213 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
214 &vp_reg
->xgmac_vp_int_mask
);
216 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
217 &vp_reg
->asic_ntwk_vp_err_mask
);
223 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo
*fifo
)
225 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
226 struct vxge_hw_vp_config
*config
;
229 if (fifo
->config
->enable
!= VXGE_HW_FIFO_ENABLE
)
232 vp_reg
= fifo
->vp_reg
;
233 config
= container_of(fifo
->config
, struct vxge_hw_vp_config
, fifo
);
235 if (config
->tti
.timer_ci_en
!= VXGE_HW_TIM_TIMER_CI_ENABLE
) {
236 config
->tti
.timer_ci_en
= VXGE_HW_TIM_TIMER_CI_ENABLE
;
237 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
238 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
239 fifo
->tim_tti_cfg1_saved
= val64
;
240 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
244 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring
*ring
)
246 u64 val64
= ring
->tim_rti_cfg1_saved
;
248 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
249 ring
->tim_rti_cfg1_saved
= val64
;
250 writeq(val64
, &ring
->vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
253 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo
*fifo
)
255 u64 val64
= fifo
->tim_tti_cfg3_saved
;
256 u64 timer
= (fifo
->rtimer
* 1000) / 272;
258 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
260 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer
) |
261 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
263 writeq(val64
, &fifo
->vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
264 /* tti_cfg3_saved is not updated again because it is
265 * initialized at one place only - init time.
269 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring
*ring
)
271 u64 val64
= ring
->tim_rti_cfg3_saved
;
272 u64 timer
= (ring
->rtimer
* 1000) / 272;
274 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
276 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer
) |
277 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
279 writeq(val64
, &ring
->vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
280 /* rti_cfg3_saved is not updated again because it is
281 * initialized at one place only - init time.
286 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
287 * @channeh: Channel for rx or tx handle
290 * The function masks the msix interrupt for the given msix_id
294 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel
*channel
, int msix_id
)
297 __vxge_hw_pio_mem_write32_upper(
298 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
299 &channel
->common_reg
->set_msix_mask_vect
[msix_id
%4]);
303 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
304 * @channeh: Channel for rx or tx handle
307 * The function unmasks the msix interrupt for the given msix_id
312 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel
*channel
, int msix_id
)
315 __vxge_hw_pio_mem_write32_upper(
316 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
317 &channel
->common_reg
->clear_msix_mask_vect
[msix_id
%4]);
321 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
322 * @channel: Channel for rx or tx handle
325 * The function unmasks the msix interrupt for the given msix_id
326 * if configured in MSIX oneshot mode
330 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel
*channel
, int msix_id
)
332 __vxge_hw_pio_mem_write32_upper(
333 (u32
) vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
334 &channel
->common_reg
->clr_msix_one_shot_vec
[msix_id
% 4]);
338 * vxge_hw_device_set_intr_type - Updates the configuration
339 * with new interrupt type.
340 * @hldev: HW device handle.
341 * @intr_mode: New interrupt type
343 u32
vxge_hw_device_set_intr_type(struct __vxge_hw_device
*hldev
, u32 intr_mode
)
346 if ((intr_mode
!= VXGE_HW_INTR_MODE_IRQLINE
) &&
347 (intr_mode
!= VXGE_HW_INTR_MODE_MSIX
) &&
348 (intr_mode
!= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) &&
349 (intr_mode
!= VXGE_HW_INTR_MODE_DEF
))
350 intr_mode
= VXGE_HW_INTR_MODE_IRQLINE
;
352 hldev
->config
.intr_mode
= intr_mode
;
357 * vxge_hw_device_intr_enable - Enable interrupts.
358 * @hldev: HW device handle.
359 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
360 * the type(s) of interrupts to enable.
362 * Enable Titan interrupts. The function is to be executed the last in
363 * Titan initialization sequence.
365 * See also: vxge_hw_device_intr_disable()
367 void vxge_hw_device_intr_enable(struct __vxge_hw_device
*hldev
)
373 vxge_hw_device_mask_all(hldev
);
375 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
377 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
380 vxge_hw_vpath_intr_enable(
381 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev
->virtual_paths
[i
]));
384 if (hldev
->config
.intr_mode
== VXGE_HW_INTR_MODE_IRQLINE
) {
385 val64
= hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
386 hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
];
389 writeq(val64
, &hldev
->common_reg
->tim_int_status0
);
391 writeq(~val64
, &hldev
->common_reg
->tim_int_mask0
);
394 val32
= hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
395 hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
];
398 __vxge_hw_pio_mem_write32_upper(val32
,
399 &hldev
->common_reg
->tim_int_status1
);
401 __vxge_hw_pio_mem_write32_upper(~val32
,
402 &hldev
->common_reg
->tim_int_mask1
);
406 val64
= readq(&hldev
->common_reg
->titan_general_int_status
);
408 vxge_hw_device_unmask_all(hldev
);
412 * vxge_hw_device_intr_disable - Disable Titan interrupts.
413 * @hldev: HW device handle.
414 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
415 * the type(s) of interrupts to disable.
417 * Disable Titan interrupts.
419 * See also: vxge_hw_device_intr_enable()
421 void vxge_hw_device_intr_disable(struct __vxge_hw_device
*hldev
)
425 vxge_hw_device_mask_all(hldev
);
427 /* mask all the tim interrupts */
428 writeq(VXGE_HW_INTR_MASK_ALL
, &hldev
->common_reg
->tim_int_mask0
);
429 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32
,
430 &hldev
->common_reg
->tim_int_mask1
);
432 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
434 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
437 vxge_hw_vpath_intr_disable(
438 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev
->virtual_paths
[i
]));
443 * vxge_hw_device_mask_all - Mask all device interrupts.
444 * @hldev: HW device handle.
446 * Mask all device interrupts.
448 * See also: vxge_hw_device_unmask_all()
450 void vxge_hw_device_mask_all(struct __vxge_hw_device
*hldev
)
454 val64
= VXGE_HW_TITAN_MASK_ALL_INT_ALARM
|
455 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
;
457 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
458 &hldev
->common_reg
->titan_mask_all_int
);
462 * vxge_hw_device_unmask_all - Unmask all device interrupts.
463 * @hldev: HW device handle.
465 * Unmask all device interrupts.
467 * See also: vxge_hw_device_mask_all()
469 void vxge_hw_device_unmask_all(struct __vxge_hw_device
*hldev
)
473 if (hldev
->config
.intr_mode
== VXGE_HW_INTR_MODE_IRQLINE
)
474 val64
= VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
;
476 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
477 &hldev
->common_reg
->titan_mask_all_int
);
481 * vxge_hw_device_flush_io - Flush io writes.
482 * @hldev: HW device handle.
484 * The function performs a read operation to flush io writes.
488 void vxge_hw_device_flush_io(struct __vxge_hw_device
*hldev
)
492 val32
= readl(&hldev
->common_reg
->titan_general_int_status
);
496 * __vxge_hw_device_handle_error - Handle error
499 * @type: Error type. Please see enum vxge_hw_event{}
503 static enum vxge_hw_status
504 __vxge_hw_device_handle_error(struct __vxge_hw_device
*hldev
, u32 vp_id
,
505 enum vxge_hw_event type
)
508 case VXGE_HW_EVENT_UNKNOWN
:
510 case VXGE_HW_EVENT_RESET_START
:
511 case VXGE_HW_EVENT_RESET_COMPLETE
:
512 case VXGE_HW_EVENT_LINK_DOWN
:
513 case VXGE_HW_EVENT_LINK_UP
:
515 case VXGE_HW_EVENT_ALARM_CLEARED
:
517 case VXGE_HW_EVENT_ECCERR
:
518 case VXGE_HW_EVENT_MRPCIM_ECCERR
:
520 case VXGE_HW_EVENT_FIFO_ERR
:
521 case VXGE_HW_EVENT_VPATH_ERR
:
522 case VXGE_HW_EVENT_CRITICAL_ERR
:
523 case VXGE_HW_EVENT_SERR
:
525 case VXGE_HW_EVENT_SRPCIM_SERR
:
526 case VXGE_HW_EVENT_MRPCIM_SERR
:
528 case VXGE_HW_EVENT_SLOT_FREEZE
:
536 if (hldev
->uld_callbacks
->crit_err
)
537 hldev
->uld_callbacks
->crit_err(hldev
,
545 * __vxge_hw_device_handle_link_down_ind
546 * @hldev: HW device handle.
548 * Link down indication handler. The function is invoked by HW when
549 * Titan indicates that the link is down.
551 static enum vxge_hw_status
552 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device
*hldev
)
555 * If the previous link state is not down, return.
557 if (hldev
->link_state
== VXGE_HW_LINK_DOWN
)
560 hldev
->link_state
= VXGE_HW_LINK_DOWN
;
563 if (hldev
->uld_callbacks
->link_down
)
564 hldev
->uld_callbacks
->link_down(hldev
);
570 * __vxge_hw_device_handle_link_up_ind
571 * @hldev: HW device handle.
573 * Link up indication handler. The function is invoked by HW when
574 * Titan indicates that the link is up for programmable amount of time.
576 static enum vxge_hw_status
577 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device
*hldev
)
580 * If the previous link state is not down, return.
582 if (hldev
->link_state
== VXGE_HW_LINK_UP
)
585 hldev
->link_state
= VXGE_HW_LINK_UP
;
588 if (hldev
->uld_callbacks
->link_up
)
589 hldev
->uld_callbacks
->link_up(hldev
);
595 * __vxge_hw_vpath_alarm_process - Process Alarms.
596 * @vpath: Virtual Path.
597 * @skip_alarms: Do not clear the alarms
599 * Process vpath alarms.
602 static enum vxge_hw_status
603 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath
*vpath
,
609 struct __vxge_hw_device
*hldev
= NULL
;
610 enum vxge_hw_event alarm_event
= VXGE_HW_EVENT_UNKNOWN
;
612 struct vxge_hw_vpath_stats_sw_info
*sw_stats
;
613 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
616 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN
,
621 hldev
= vpath
->hldev
;
622 vp_reg
= vpath
->vp_reg
;
623 alarm_status
= readq(&vp_reg
->vpath_general_int_status
);
625 if (alarm_status
== VXGE_HW_ALL_FOXES
) {
626 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE
,
631 sw_stats
= vpath
->sw_stats
;
633 if (alarm_status
& ~(
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
|
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT
|
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
|
637 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
)) {
638 sw_stats
->error_stats
.unknown_alarms
++;
640 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN
,
645 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
) {
647 val64
= readq(&vp_reg
->xgmac_vp_int_status
);
650 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT
) {
652 val64
= readq(&vp_reg
->asic_ntwk_vp_err_reg
);
655 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
) &&
657 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
))) ||
659 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
) &&
661 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
)
663 sw_stats
->error_stats
.network_sustained_fault
++;
666 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
,
667 &vp_reg
->asic_ntwk_vp_err_mask
);
669 __vxge_hw_device_handle_link_down_ind(hldev
);
670 alarm_event
= VXGE_HW_SET_LEVEL(
671 VXGE_HW_EVENT_LINK_DOWN
, alarm_event
);
675 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
) &&
677 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
))) ||
679 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
) &&
681 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
)
684 sw_stats
->error_stats
.network_sustained_ok
++;
687 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
,
688 &vp_reg
->asic_ntwk_vp_err_mask
);
690 __vxge_hw_device_handle_link_up_ind(hldev
);
691 alarm_event
= VXGE_HW_SET_LEVEL(
692 VXGE_HW_EVENT_LINK_UP
, alarm_event
);
695 writeq(VXGE_HW_INTR_MASK_ALL
,
696 &vp_reg
->asic_ntwk_vp_err_reg
);
698 alarm_event
= VXGE_HW_SET_LEVEL(
699 VXGE_HW_EVENT_ALARM_CLEARED
, alarm_event
);
706 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
) {
708 pic_status
= readq(&vp_reg
->vpath_ppif_int_status
);
711 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT
) {
713 val64
= readq(&vp_reg
->general_errors_reg
);
714 mask64
= readq(&vp_reg
->general_errors_mask
);
717 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET
) &
719 sw_stats
->error_stats
.ini_serr_det
++;
721 alarm_event
= VXGE_HW_SET_LEVEL(
722 VXGE_HW_EVENT_SERR
, alarm_event
);
726 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW
) &
728 sw_stats
->error_stats
.dblgen_fifo0_overflow
++;
730 alarm_event
= VXGE_HW_SET_LEVEL(
731 VXGE_HW_EVENT_FIFO_ERR
, alarm_event
);
735 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
) &
737 sw_stats
->error_stats
.statsb_pif_chain_error
++;
740 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
) &
742 sw_stats
->error_stats
.statsb_drop_timeout
++;
745 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS
) &
747 sw_stats
->error_stats
.target_illegal_access
++;
750 writeq(VXGE_HW_INTR_MASK_ALL
,
751 &vp_reg
->general_errors_reg
);
752 alarm_event
= VXGE_HW_SET_LEVEL(
753 VXGE_HW_EVENT_ALARM_CLEARED
,
759 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT
) {
761 val64
= readq(&vp_reg
->kdfcctl_errors_reg
);
762 mask64
= readq(&vp_reg
->kdfcctl_errors_mask
);
765 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR
) &
767 sw_stats
->error_stats
.kdfcctl_fifo0_overwrite
++;
769 alarm_event
= VXGE_HW_SET_LEVEL(
770 VXGE_HW_EVENT_FIFO_ERR
,
775 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON
) &
777 sw_stats
->error_stats
.kdfcctl_fifo0_poison
++;
779 alarm_event
= VXGE_HW_SET_LEVEL(
780 VXGE_HW_EVENT_FIFO_ERR
,
785 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR
) &
787 sw_stats
->error_stats
.kdfcctl_fifo0_dma_error
++;
789 alarm_event
= VXGE_HW_SET_LEVEL(
790 VXGE_HW_EVENT_FIFO_ERR
,
795 writeq(VXGE_HW_INTR_MASK_ALL
,
796 &vp_reg
->kdfcctl_errors_reg
);
797 alarm_event
= VXGE_HW_SET_LEVEL(
798 VXGE_HW_EVENT_ALARM_CLEARED
,
805 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
) {
807 val64
= readq(&vp_reg
->wrdma_alarm_status
);
809 if (val64
& VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT
) {
811 val64
= readq(&vp_reg
->prc_alarm_reg
);
812 mask64
= readq(&vp_reg
->prc_alarm_mask
);
814 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
)&
816 sw_stats
->error_stats
.prc_ring_bumps
++;
818 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR
) &
820 sw_stats
->error_stats
.prc_rxdcm_sc_err
++;
822 alarm_event
= VXGE_HW_SET_LEVEL(
823 VXGE_HW_EVENT_VPATH_ERR
,
827 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT
)
829 sw_stats
->error_stats
.prc_rxdcm_sc_abort
++;
831 alarm_event
= VXGE_HW_SET_LEVEL(
832 VXGE_HW_EVENT_VPATH_ERR
,
836 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR
)
838 sw_stats
->error_stats
.prc_quanta_size_err
++;
840 alarm_event
= VXGE_HW_SET_LEVEL(
841 VXGE_HW_EVENT_VPATH_ERR
,
846 writeq(VXGE_HW_INTR_MASK_ALL
,
847 &vp_reg
->prc_alarm_reg
);
848 alarm_event
= VXGE_HW_SET_LEVEL(
849 VXGE_HW_EVENT_ALARM_CLEARED
,
855 hldev
->stats
.sw_dev_err_stats
.vpath_alarms
++;
857 if ((alarm_event
== VXGE_HW_EVENT_ALARM_CLEARED
) ||
858 (alarm_event
== VXGE_HW_EVENT_UNKNOWN
))
861 __vxge_hw_device_handle_error(hldev
, vpath
->vp_id
, alarm_event
);
863 if (alarm_event
== VXGE_HW_EVENT_SERR
)
864 return VXGE_HW_ERR_CRITICAL
;
866 return (alarm_event
== VXGE_HW_EVENT_SLOT_FREEZE
) ?
867 VXGE_HW_ERR_SLOT_FREEZE
:
868 (alarm_event
== VXGE_HW_EVENT_FIFO_ERR
) ? VXGE_HW_ERR_FIFO
:
873 * vxge_hw_device_begin_irq - Begin IRQ processing.
874 * @hldev: HW device handle.
875 * @skip_alarms: Do not clear the alarms
876 * @reason: "Reason" for the interrupt, the value of Titan's
877 * general_int_status register.
879 * The function performs two actions, It first checks whether (shared IRQ) the
880 * interrupt was raised by the device. Next, it masks the device interrupts.
883 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
884 * bridge. Therefore, two back-to-back interrupts are potentially possible.
886 * Returns: 0, if the interrupt is not "ours" (note that in this case the
887 * device remain enabled).
888 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
891 enum vxge_hw_status
vxge_hw_device_begin_irq(struct __vxge_hw_device
*hldev
,
892 u32 skip_alarms
, u64
*reason
)
898 enum vxge_hw_status ret
= VXGE_HW_OK
;
900 val64
= readq(&hldev
->common_reg
->titan_general_int_status
);
902 if (unlikely(!val64
)) {
903 /* not Titan interrupt */
905 ret
= VXGE_HW_ERR_WRONG_IRQ
;
909 if (unlikely(val64
== VXGE_HW_ALL_FOXES
)) {
911 adapter_status
= readq(&hldev
->common_reg
->adapter_status
);
913 if (adapter_status
== VXGE_HW_ALL_FOXES
) {
915 __vxge_hw_device_handle_error(hldev
,
916 NULL_VPID
, VXGE_HW_EVENT_SLOT_FREEZE
);
918 ret
= VXGE_HW_ERR_SLOT_FREEZE
;
923 hldev
->stats
.sw_dev_info_stats
.total_intr_cnt
++;
927 vpath_mask
= hldev
->vpaths_deployed
>>
928 (64 - VXGE_HW_MAX_VIRTUAL_PATHS
);
931 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask
)) {
932 hldev
->stats
.sw_dev_info_stats
.traffic_intr_cnt
++;
937 hldev
->stats
.sw_dev_info_stats
.not_traffic_intr_cnt
++;
940 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT
)) {
942 enum vxge_hw_status error_level
= VXGE_HW_OK
;
944 hldev
->stats
.sw_dev_err_stats
.vpath_alarms
++;
946 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
948 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
951 ret
= __vxge_hw_vpath_alarm_process(
952 &hldev
->virtual_paths
[i
], skip_alarms
);
954 error_level
= VXGE_HW_SET_LEVEL(ret
, error_level
);
956 if (unlikely((ret
== VXGE_HW_ERR_CRITICAL
) ||
957 (ret
== VXGE_HW_ERR_SLOT_FREEZE
)))
968 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
969 * condition that has caused the Tx and RX interrupt.
972 * Acknowledge (that is, clear) the condition that has caused
973 * the Tx and Rx interrupt.
974 * See also: vxge_hw_device_begin_irq(),
975 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
977 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device
*hldev
)
980 if ((hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
981 (hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
982 writeq((hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
983 hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
]),
984 &hldev
->common_reg
->tim_int_status0
);
987 if ((hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
988 (hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
989 __vxge_hw_pio_mem_write32_upper(
990 (hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
991 hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
]),
992 &hldev
->common_reg
->tim_int_status1
);
997 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
999 * @dtrh: Buffer to return the DTR pointer
1001 * Allocates a dtr from the reserve array. If the reserve array is empty,
1002 * it swaps the reserve and free arrays.
1005 static enum vxge_hw_status
1006 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel
*channel
, void **dtrh
)
1008 if (channel
->reserve_ptr
- channel
->reserve_top
> 0) {
1010 *dtrh
= channel
->reserve_arr
[--channel
->reserve_ptr
];
1015 /* switch between empty and full arrays */
1017 /* the idea behind such a design is that by having free and reserved
1018 * arrays separated we basically separated irq and non-irq parts.
1019 * i.e. no additional lock need to be done when we free a resource */
1021 if (channel
->length
- channel
->free_ptr
> 0) {
1022 swap(channel
->reserve_arr
, channel
->free_arr
);
1023 channel
->reserve_ptr
= channel
->length
;
1024 channel
->reserve_top
= channel
->free_ptr
;
1025 channel
->free_ptr
= channel
->length
;
1027 channel
->stats
->reserve_free_swaps_cnt
++;
1029 goto _alloc_after_swap
;
1032 channel
->stats
->full_cnt
++;
1035 return VXGE_HW_INF_OUT_OF_DESCRIPTORS
;
1039 * vxge_hw_channel_dtr_post - Post a dtr to the channel
1040 * @channelh: Channel
1041 * @dtrh: DTR pointer
1043 * Posts a dtr to work array.
1047 vxge_hw_channel_dtr_post(struct __vxge_hw_channel
*channel
, void *dtrh
)
1049 vxge_assert(channel
->work_arr
[channel
->post_index
] == NULL
);
1051 channel
->work_arr
[channel
->post_index
++] = dtrh
;
1054 if (channel
->post_index
== channel
->length
)
1055 channel
->post_index
= 0;
1059 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1061 * @dtr: Buffer to return the next completed DTR pointer
1063 * Returns the next completed dtr with out removing it from work array
1067 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel
*channel
, void **dtrh
)
1069 vxge_assert(channel
->compl_index
< channel
->length
);
1071 *dtrh
= channel
->work_arr
[channel
->compl_index
];
1076 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1077 * @channel: Channel handle
1079 * Removes the next completed dtr from work array
1082 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel
*channel
)
1084 channel
->work_arr
[channel
->compl_index
] = NULL
;
1087 if (++channel
->compl_index
== channel
->length
)
1088 channel
->compl_index
= 0;
1090 channel
->stats
->total_compl_cnt
++;
1094 * vxge_hw_channel_dtr_free - Frees a dtr
1095 * @channel: Channel handle
1098 * Returns the dtr to free array
1101 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel
*channel
, void *dtrh
)
1103 channel
->free_arr
[--channel
->free_ptr
] = dtrh
;
1107 * vxge_hw_channel_dtr_count
1108 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1110 * Retrieve number of DTRs available. This function can not be called
1111 * from data path. ring_initial_replenishi() is the only user.
1113 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel
*channel
)
1115 return (channel
->reserve_ptr
- channel
->reserve_top
) +
1116 (channel
->length
- channel
->free_ptr
);
1120 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1121 * @ring: Handle to the ring object used for receive
1122 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1123 * with a valid handle.
1125 * Reserve Rx descriptor for the subsequent filling-in driver
1126 * and posting on the corresponding channel (@channelh)
1127 * via vxge_hw_ring_rxd_post().
1129 * Returns: VXGE_HW_OK - success.
1130 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1133 enum vxge_hw_status
vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring
*ring
,
1136 enum vxge_hw_status status
;
1137 struct __vxge_hw_channel
*channel
;
1139 channel
= &ring
->channel
;
1141 status
= vxge_hw_channel_dtr_alloc(channel
, rxdh
);
1143 if (status
== VXGE_HW_OK
) {
1144 struct vxge_hw_ring_rxd_1
*rxdp
=
1145 (struct vxge_hw_ring_rxd_1
*)*rxdh
;
1147 rxdp
->control_0
= rxdp
->control_1
= 0;
1154 * vxge_hw_ring_rxd_free - Free descriptor.
1155 * @ring: Handle to the ring object used for receive
1156 * @rxdh: Descriptor handle.
1158 * Free the reserved descriptor. This operation is "symmetrical" to
1159 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1162 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1165 * - reserved (vxge_hw_ring_rxd_reserve);
1167 * - posted (vxge_hw_ring_rxd_post);
1169 * - completed (vxge_hw_ring_rxd_next_completed);
1171 * - and recycled again (vxge_hw_ring_rxd_free).
1173 * For alternative state transitions and more details please refer to
1177 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring
*ring
, void *rxdh
)
1179 struct __vxge_hw_channel
*channel
;
1181 channel
= &ring
->channel
;
1183 vxge_hw_channel_dtr_free(channel
, rxdh
);
1188 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1189 * @ring: Handle to the ring object used for receive
1190 * @rxdh: Descriptor handle.
1192 * This routine prepares a rxd and posts
1194 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
1196 struct __vxge_hw_channel
*channel
;
1198 channel
= &ring
->channel
;
1200 vxge_hw_channel_dtr_post(channel
, rxdh
);
1204 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1205 * @ring: Handle to the ring object used for receive
1206 * @rxdh: Descriptor handle.
1208 * Processes rxd after post
1210 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
1212 struct vxge_hw_ring_rxd_1
*rxdp
= (struct vxge_hw_ring_rxd_1
*)rxdh
;
1214 rxdp
->control_0
= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
1216 if (ring
->stats
->common_stats
.usage_cnt
> 0)
1217 ring
->stats
->common_stats
.usage_cnt
--;
1221 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1222 * @ring: Handle to the ring object used for receive
1223 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1225 * Post descriptor on the ring.
1226 * Prior to posting the descriptor should be filled in accordance with
1227 * Host/Titan interface specification for a given service (LL, etc.).
1230 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
1232 struct vxge_hw_ring_rxd_1
*rxdp
= (struct vxge_hw_ring_rxd_1
*)rxdh
;
1233 struct __vxge_hw_channel
*channel
;
1235 channel
= &ring
->channel
;
1238 rxdp
->control_0
= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
1240 vxge_hw_channel_dtr_post(channel
, rxdh
);
1242 if (ring
->stats
->common_stats
.usage_cnt
> 0)
1243 ring
->stats
->common_stats
.usage_cnt
--;
1247 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1248 * @ring: Handle to the ring object used for receive
1249 * @rxdh: Descriptor handle.
1251 * Processes rxd after post with memory barrier.
1253 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring
*ring
, void *rxdh
)
1256 vxge_hw_ring_rxd_post_post(ring
, rxdh
);
1260 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1261 * @ring: Handle to the ring object used for receive
1262 * @rxdh: Descriptor handle. Returned by HW.
1263 * @t_code: Transfer code, as per Titan User Guide,
1264 * Receive Descriptor Format. Returned by HW.
1266 * Retrieve the _next_ completed descriptor.
1267 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1268 * driver of new completed descriptors. After that
1269 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1270 * completions (the very first completion is passed by HW via
1271 * vxge_hw_ring_callback_f).
1273 * Implementation-wise, the driver is free to call
1274 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1275 * ring callback, or in a deferred fashion and separate (from HW)
1278 * Non-zero @t_code means failure to fill-in receive buffer(s)
1279 * of the descriptor.
1280 * For instance, parity error detected during the data transfer.
1281 * In this case Titan will complete the descriptor and indicate
1282 * for the host that the received data is not to be used.
1283 * For details please refer to Titan User Guide.
1285 * Returns: VXGE_HW_OK - success.
1286 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1287 * are currently available for processing.
1289 * See also: vxge_hw_ring_callback_f{},
1290 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1292 enum vxge_hw_status
vxge_hw_ring_rxd_next_completed(
1293 struct __vxge_hw_ring
*ring
, void **rxdh
, u8
*t_code
)
1295 struct __vxge_hw_channel
*channel
;
1296 struct vxge_hw_ring_rxd_1
*rxdp
;
1297 enum vxge_hw_status status
= VXGE_HW_OK
;
1300 channel
= &ring
->channel
;
1302 vxge_hw_channel_dtr_try_complete(channel
, rxdh
);
1306 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1310 control_0
= rxdp
->control_0
;
1311 own
= control_0
& VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
1312 *t_code
= (u8
)VXGE_HW_RING_RXD_T_CODE_GET(control_0
);
1314 /* check whether it is not the end */
1315 if (!own
|| *t_code
== VXGE_HW_RING_T_CODE_FRM_DROP
) {
1317 vxge_assert((rxdp
)->host_control
!=
1321 vxge_hw_channel_dtr_complete(channel
);
1323 vxge_assert(*t_code
!= VXGE_HW_RING_RXD_T_CODE_UNUSED
);
1325 ring
->stats
->common_stats
.usage_cnt
++;
1326 if (ring
->stats
->common_stats
.usage_max
<
1327 ring
->stats
->common_stats
.usage_cnt
)
1328 ring
->stats
->common_stats
.usage_max
=
1329 ring
->stats
->common_stats
.usage_cnt
;
1331 status
= VXGE_HW_OK
;
1335 /* reset it. since we don't want to return
1336 * garbage to the driver */
1338 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1344 * vxge_hw_ring_handle_tcode - Handle transfer code.
1345 * @ring: Handle to the ring object used for receive
1346 * @rxdh: Descriptor handle.
1347 * @t_code: One of the enumerated (and documented in the Titan user guide)
1350 * Handle descriptor's transfer code. The latter comes with each completed
1353 * Returns: one of the enum vxge_hw_status{} enumerated types.
1354 * VXGE_HW_OK - for success.
1355 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1357 enum vxge_hw_status
vxge_hw_ring_handle_tcode(
1358 struct __vxge_hw_ring
*ring
, void *rxdh
, u8 t_code
)
1360 enum vxge_hw_status status
= VXGE_HW_OK
;
1362 /* If the t_code is not supported and if the
1363 * t_code is other than 0x5 (unparseable packet
1364 * such as unknown UPV6 header), Drop it !!!
1367 if (t_code
== VXGE_HW_RING_T_CODE_OK
||
1368 t_code
== VXGE_HW_RING_T_CODE_L3_PKT_ERR
) {
1369 status
= VXGE_HW_OK
;
1373 if (t_code
> VXGE_HW_RING_T_CODE_MULTI_ERR
) {
1374 status
= VXGE_HW_ERR_INVALID_TCODE
;
1378 ring
->stats
->rxd_t_code_err_cnt
[t_code
]++;
1384 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1387 * @txdl_ptr: The starting location of the TxDL in host memory
1388 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1389 * @no_snoop: No snoop flags
1391 * This function posts a non-offload doorbell to doorbell FIFO
1394 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo
*fifo
,
1395 u64 txdl_ptr
, u32 num_txds
, u32 no_snoop
)
1397 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW
) |
1398 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds
) |
1399 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop
),
1400 &fifo
->nofl_db
->control_0
);
1402 writeq(txdl_ptr
, &fifo
->nofl_db
->txdl_ptr
);
1406 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1408 * @fifoh: Handle to the fifo object used for non offload send
1410 u32
vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo
*fifoh
)
1412 return vxge_hw_channel_dtr_count(&fifoh
->channel
);
1416 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1417 * @fifoh: Handle to the fifo object used for non offload send
1418 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1419 * with a valid handle.
1420 * @txdl_priv: Buffer to return the pointer to per txdl space
1422 * Reserve a single TxDL (that is, fifo descriptor)
1423 * for the subsequent filling-in by driver)
1424 * and posting on the corresponding channel (@channelh)
1425 * via vxge_hw_fifo_txdl_post().
1427 * Note: it is the responsibility of driver to reserve multiple descriptors
1428 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1429 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1431 * Returns: VXGE_HW_OK - success;
1432 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1435 enum vxge_hw_status
vxge_hw_fifo_txdl_reserve(
1436 struct __vxge_hw_fifo
*fifo
,
1437 void **txdlh
, void **txdl_priv
)
1439 struct __vxge_hw_channel
*channel
;
1440 enum vxge_hw_status status
;
1443 channel
= &fifo
->channel
;
1445 status
= vxge_hw_channel_dtr_alloc(channel
, txdlh
);
1447 if (status
== VXGE_HW_OK
) {
1448 struct vxge_hw_fifo_txd
*txdp
=
1449 (struct vxge_hw_fifo_txd
*)*txdlh
;
1450 struct __vxge_hw_fifo_txdl_priv
*priv
;
1452 priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdp
);
1454 /* reset the TxDL's private */
1455 priv
->align_dma_offset
= 0;
1456 priv
->align_vaddr_start
= priv
->align_vaddr
;
1457 priv
->align_used_frags
= 0;
1459 priv
->alloc_frags
= fifo
->config
->max_frags
;
1460 priv
->next_txdl_priv
= NULL
;
1462 *txdl_priv
= (void *)(size_t)txdp
->host_control
;
1464 for (i
= 0; i
< fifo
->config
->max_frags
; i
++) {
1465 txdp
= ((struct vxge_hw_fifo_txd
*)*txdlh
) + i
;
1466 txdp
->control_0
= txdp
->control_1
= 0;
1474 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1476 * @fifo: Handle to the fifo object used for non offload send
1477 * @txdlh: Descriptor handle.
1478 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1480 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1481 * @size: Size of the data buffer (in bytes).
1483 * This API is part of the preparation of the transmit descriptor for posting
1484 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1485 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1486 * All three APIs fill in the fields of the fifo descriptor,
1487 * in accordance with the Titan specification.
1490 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo
*fifo
,
1491 void *txdlh
, u32 frag_idx
,
1492 dma_addr_t dma_pointer
, u32 size
)
1494 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
1495 struct vxge_hw_fifo_txd
*txdp
, *txdp_last
;
1497 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdlh
);
1498 txdp
= (struct vxge_hw_fifo_txd
*)txdlh
+ txdl_priv
->frags
;
1501 txdp
->control_0
= txdp
->control_1
= 0;
1503 txdp
->control_0
|= VXGE_HW_FIFO_TXD_GATHER_CODE(
1504 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST
);
1505 txdp
->control_1
|= fifo
->interrupt_type
;
1506 txdp
->control_1
|= VXGE_HW_FIFO_TXD_INT_NUMBER(
1508 if (txdl_priv
->frags
) {
1509 txdp_last
= (struct vxge_hw_fifo_txd
*)txdlh
+
1510 (txdl_priv
->frags
- 1);
1511 txdp_last
->control_0
|= VXGE_HW_FIFO_TXD_GATHER_CODE(
1512 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST
);
1516 vxge_assert(frag_idx
< txdl_priv
->alloc_frags
);
1518 txdp
->buffer_pointer
= (u64
)dma_pointer
;
1519 txdp
->control_0
|= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size
);
1520 fifo
->stats
->total_buffers
++;
1525 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1526 * @fifo: Handle to the fifo object used for non offload send
1527 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1528 * @frags: Number of contiguous buffers that are part of a single
1529 * transmit operation.
1531 * Post descriptor on the 'fifo' type channel for transmission.
1532 * Prior to posting the descriptor should be filled in accordance with
1533 * Host/Titan interface specification for a given service (LL, etc.).
1536 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo
*fifo
, void *txdlh
)
1538 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
1539 struct vxge_hw_fifo_txd
*txdp_last
;
1540 struct vxge_hw_fifo_txd
*txdp_first
;
1542 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdlh
);
1545 txdp_last
= (struct vxge_hw_fifo_txd
*)txdlh
+ (txdl_priv
->frags
- 1);
1546 txdp_last
->control_0
|=
1547 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST
);
1548 txdp_first
->control_0
|= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
;
1550 vxge_hw_channel_dtr_post(&fifo
->channel
, txdlh
);
1552 __vxge_hw_non_offload_db_post(fifo
,
1553 (u64
)txdl_priv
->dma_addr
,
1554 txdl_priv
->frags
- 1,
1555 fifo
->no_snoop_bits
);
1557 fifo
->stats
->total_posts
++;
1558 fifo
->stats
->common_stats
.usage_cnt
++;
1559 if (fifo
->stats
->common_stats
.usage_max
<
1560 fifo
->stats
->common_stats
.usage_cnt
)
1561 fifo
->stats
->common_stats
.usage_max
=
1562 fifo
->stats
->common_stats
.usage_cnt
;
1566 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1567 * @fifo: Handle to the fifo object used for non offload send
1568 * @txdlh: Descriptor handle. Returned by HW.
1569 * @t_code: Transfer code, as per Titan User Guide,
1570 * Transmit Descriptor Format.
1573 * Retrieve the _next_ completed descriptor.
1574 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1575 * driver of new completed descriptors. After that
1576 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1577 * completions (the very first completion is passed by HW via
1578 * vxge_hw_channel_callback_f).
1580 * Implementation-wise, the driver is free to call
1581 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1582 * channel callback, or in a deferred fashion and separate (from HW)
1585 * Non-zero @t_code means failure to process the descriptor.
1586 * The failure could happen, for instance, when the link is
1587 * down, in which case Titan completes the descriptor because it
1588 * is not able to send the data out.
1590 * For details please refer to Titan User Guide.
1592 * Returns: VXGE_HW_OK - success.
1593 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1594 * are currently available for processing.
1597 enum vxge_hw_status
vxge_hw_fifo_txdl_next_completed(
1598 struct __vxge_hw_fifo
*fifo
, void **txdlh
,
1599 enum vxge_hw_fifo_tcode
*t_code
)
1601 struct __vxge_hw_channel
*channel
;
1602 struct vxge_hw_fifo_txd
*txdp
;
1603 enum vxge_hw_status status
= VXGE_HW_OK
;
1605 channel
= &fifo
->channel
;
1607 vxge_hw_channel_dtr_try_complete(channel
, txdlh
);
1611 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1615 /* check whether host owns it */
1616 if (!(txdp
->control_0
& VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
)) {
1618 vxge_assert(txdp
->host_control
!= 0);
1620 vxge_hw_channel_dtr_complete(channel
);
1622 *t_code
= (u8
)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp
->control_0
);
1624 if (fifo
->stats
->common_stats
.usage_cnt
> 0)
1625 fifo
->stats
->common_stats
.usage_cnt
--;
1627 status
= VXGE_HW_OK
;
1631 /* no more completions */
1633 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1639 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1640 * @fifo: Handle to the fifo object used for non offload send
1641 * @txdlh: Descriptor handle.
1642 * @t_code: One of the enumerated (and documented in the Titan user guide)
1645 * Handle descriptor's transfer code. The latter comes with each completed
1648 * Returns: one of the enum vxge_hw_status{} enumerated types.
1649 * VXGE_HW_OK - for success.
1650 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1652 enum vxge_hw_status
vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo
*fifo
,
1654 enum vxge_hw_fifo_tcode t_code
)
1656 enum vxge_hw_status status
= VXGE_HW_OK
;
1658 if (((t_code
& 0x7) < 0) || ((t_code
& 0x7) > 0x4)) {
1659 status
= VXGE_HW_ERR_INVALID_TCODE
;
1663 fifo
->stats
->txd_t_code_err_cnt
[t_code
]++;
1669 * vxge_hw_fifo_txdl_free - Free descriptor.
1670 * @fifo: Handle to the fifo object used for non offload send
1671 * @txdlh: Descriptor handle.
1673 * Free the reserved descriptor. This operation is "symmetrical" to
1674 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1677 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1680 * - reserved (vxge_hw_fifo_txdl_reserve);
1682 * - posted (vxge_hw_fifo_txdl_post);
1684 * - completed (vxge_hw_fifo_txdl_next_completed);
1686 * - and recycled again (vxge_hw_fifo_txdl_free).
1688 * For alternative state transitions and more details please refer to
1692 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo
*fifo
, void *txdlh
)
1694 struct __vxge_hw_channel
*channel
;
1696 channel
= &fifo
->channel
;
1698 vxge_hw_channel_dtr_free(channel
, txdlh
);
1702 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1703 * to MAC address table.
1704 * @vp: Vpath handle.
1705 * @macaddr: MAC address to be added for this vpath into the list
1706 * @macaddr_mask: MAC address mask for macaddr
1707 * @duplicate_mode: Duplicate MAC address add mode. Please see
1708 * enum vxge_hw_vpath_mac_addr_add_mode{}
1710 * Adds the given mac address and mac address mask into the list for this
1712 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1713 * vxge_hw_vpath_mac_addr_get_next
1717 vxge_hw_vpath_mac_addr_add(
1718 struct __vxge_hw_vpath_handle
*vp
,
1719 u8 (macaddr
)[ETH_ALEN
],
1720 u8 (macaddr_mask
)[ETH_ALEN
],
1721 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode
)
1726 enum vxge_hw_status status
= VXGE_HW_OK
;
1729 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1733 for (i
= 0; i
< ETH_ALEN
; i
++) {
1735 data1
|= (u8
)macaddr
[i
];
1738 data2
|= (u8
)macaddr_mask
[i
];
1741 switch (duplicate_mode
) {
1742 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
:
1745 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE
:
1748 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE
:
1756 status
= __vxge_hw_vpath_rts_table_set(vp
,
1757 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY
,
1758 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1760 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1
),
1761 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2
)|
1762 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i
));
1768 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1769 * from MAC address table.
1770 * @vp: Vpath handle.
1771 * @macaddr: First MAC address entry for this vpath in the list
1772 * @macaddr_mask: MAC address mask for macaddr
1774 * Returns the first mac address and mac address mask in the list for this
1776 * see also: vxge_hw_vpath_mac_addr_get_next
1780 vxge_hw_vpath_mac_addr_get(
1781 struct __vxge_hw_vpath_handle
*vp
,
1782 u8 (macaddr
)[ETH_ALEN
],
1783 u8 (macaddr_mask
)[ETH_ALEN
])
1788 enum vxge_hw_status status
= VXGE_HW_OK
;
1791 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1795 status
= __vxge_hw_vpath_rts_table_get(vp
,
1796 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
,
1797 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1800 if (status
!= VXGE_HW_OK
)
1803 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
1805 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2
);
1807 for (i
= ETH_ALEN
; i
> 0; i
--) {
1808 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
1811 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
1819 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1821 * from MAC address table.
1822 * @vp: Vpath handle.
1823 * @macaddr: Next MAC address entry for this vpath in the list
1824 * @macaddr_mask: MAC address mask for macaddr
1826 * Returns the next mac address and mac address mask in the list for this
1828 * see also: vxge_hw_vpath_mac_addr_get
1832 vxge_hw_vpath_mac_addr_get_next(
1833 struct __vxge_hw_vpath_handle
*vp
,
1834 u8 (macaddr
)[ETH_ALEN
],
1835 u8 (macaddr_mask
)[ETH_ALEN
])
1840 enum vxge_hw_status status
= VXGE_HW_OK
;
1843 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1847 status
= __vxge_hw_vpath_rts_table_get(vp
,
1848 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY
,
1849 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1852 if (status
!= VXGE_HW_OK
)
1855 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
1857 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2
);
1859 for (i
= ETH_ALEN
; i
> 0; i
--) {
1860 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
1863 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
1872 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1873 * to MAC address table.
1874 * @vp: Vpath handle.
1875 * @macaddr: MAC address to be added for this vpath into the list
1876 * @macaddr_mask: MAC address mask for macaddr
1878 * Delete the given mac address and mac address mask into the list for this
1880 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1881 * vxge_hw_vpath_mac_addr_get_next
1885 vxge_hw_vpath_mac_addr_delete(
1886 struct __vxge_hw_vpath_handle
*vp
,
1887 u8 (macaddr
)[ETH_ALEN
],
1888 u8 (macaddr_mask
)[ETH_ALEN
])
1893 enum vxge_hw_status status
= VXGE_HW_OK
;
1896 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1900 for (i
= 0; i
< ETH_ALEN
; i
++) {
1902 data1
|= (u8
)macaddr
[i
];
1905 data2
|= (u8
)macaddr_mask
[i
];
1908 status
= __vxge_hw_vpath_rts_table_set(vp
,
1909 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY
,
1910 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1912 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1
),
1913 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2
));
1919 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1921 * @vp: Vpath handle.
1922 * @vid: vlan id to be added for this vpath into the list
1924 * Adds the given vlan id into the list for this vpath.
1925 * see also: vxge_hw_vpath_vid_delete
1929 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle
*vp
, u64 vid
)
1931 enum vxge_hw_status status
= VXGE_HW_OK
;
1934 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1938 status
= __vxge_hw_vpath_rts_table_set(vp
,
1939 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY
,
1940 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1941 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid
), 0);
1947 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1949 * @vp: Vpath handle.
1950 * @vid: vlan id to be added for this vpath into the list
1952 * Adds the given vlan id into the list for this vpath.
1953 * see also: vxge_hw_vpath_vid_add
1957 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle
*vp
, u64 vid
)
1959 enum vxge_hw_status status
= VXGE_HW_OK
;
1962 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1966 status
= __vxge_hw_vpath_rts_table_set(vp
,
1967 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY
,
1968 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1969 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid
), 0);
1975 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1976 * @vp: Vpath handle.
1978 * Enable promiscuous mode of Titan-e operation.
1980 * See also: vxge_hw_vpath_promisc_disable().
1982 enum vxge_hw_status
vxge_hw_vpath_promisc_enable(
1983 struct __vxge_hw_vpath_handle
*vp
)
1986 struct __vxge_hw_virtualpath
*vpath
;
1987 enum vxge_hw_status status
= VXGE_HW_OK
;
1989 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
1990 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1996 /* Enable promiscuous mode for function 0 only */
1997 if (!(vpath
->hldev
->access_rights
&
1998 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
))
2001 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2003 if (!(val64
& VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
)) {
2005 val64
|= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
|
2006 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
|
2007 VXGE_HW_RXMAC_VCFG0_BCAST_EN
|
2008 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN
;
2010 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2017 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2018 * @vp: Vpath handle.
2020 * Disable promiscuous mode of Titan-e operation.
2022 * See also: vxge_hw_vpath_promisc_enable().
2024 enum vxge_hw_status
vxge_hw_vpath_promisc_disable(
2025 struct __vxge_hw_vpath_handle
*vp
)
2028 struct __vxge_hw_virtualpath
*vpath
;
2029 enum vxge_hw_status status
= VXGE_HW_OK
;
2031 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
2032 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2038 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2040 if (val64
& VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
) {
2042 val64
&= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
|
2043 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
|
2044 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN
);
2046 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2053 * vxge_hw_vpath_bcast_enable - Enable broadcast
2054 * @vp: Vpath handle.
2056 * Enable receiving broadcasts.
2058 enum vxge_hw_status
vxge_hw_vpath_bcast_enable(
2059 struct __vxge_hw_vpath_handle
*vp
)
2062 struct __vxge_hw_virtualpath
*vpath
;
2063 enum vxge_hw_status status
= VXGE_HW_OK
;
2065 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
2066 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2072 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2074 if (!(val64
& VXGE_HW_RXMAC_VCFG0_BCAST_EN
)) {
2075 val64
|= VXGE_HW_RXMAC_VCFG0_BCAST_EN
;
2076 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2083 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2084 * @vp: Vpath handle.
2086 * Enable Titan-e multicast addresses.
2087 * Returns: VXGE_HW_OK on success.
2090 enum vxge_hw_status
vxge_hw_vpath_mcast_enable(
2091 struct __vxge_hw_vpath_handle
*vp
)
2094 struct __vxge_hw_virtualpath
*vpath
;
2095 enum vxge_hw_status status
= VXGE_HW_OK
;
2097 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
2098 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2104 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2106 if (!(val64
& VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
)) {
2107 val64
|= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
;
2108 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2115 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2116 * @vp: Vpath handle.
2118 * Disable Titan-e multicast addresses.
2119 * Returns: VXGE_HW_OK - success.
2120 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2124 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle
*vp
)
2127 struct __vxge_hw_virtualpath
*vpath
;
2128 enum vxge_hw_status status
= VXGE_HW_OK
;
2130 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
2131 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2137 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2139 if (val64
& VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
) {
2140 val64
&= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
;
2141 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2148 * vxge_hw_vpath_alarm_process - Process Alarms.
2149 * @vpath: Virtual Path.
2150 * @skip_alarms: Do not clear the alarms
2152 * Process vpath alarms.
2155 enum vxge_hw_status
vxge_hw_vpath_alarm_process(
2156 struct __vxge_hw_vpath_handle
*vp
,
2159 enum vxge_hw_status status
= VXGE_HW_OK
;
2162 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2166 status
= __vxge_hw_vpath_alarm_process(vp
->vpath
, skip_alarms
);
2172 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2174 * @vp: Virtual Path handle.
2175 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2176 * interrupts(Can be repeated). If fifo or ring are not enabled
2177 * the MSIX vector for that should be set to 0
2178 * @alarm_msix_id: MSIX vector for alarm.
2180 * This API will associate a given MSIX vector numbers with the four TIM
2181 * interrupts and alarm interrupt.
2184 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle
*vp
, int *tim_msix_id
,
2188 struct __vxge_hw_virtualpath
*vpath
= vp
->vpath
;
2189 struct vxge_hw_vpath_reg __iomem
*vp_reg
= vpath
->vp_reg
;
2190 u32 vp_id
= vp
->vpath
->vp_id
;
2192 val64
= VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2193 (vp_id
* 4) + tim_msix_id
[0]) |
2194 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2195 (vp_id
* 4) + tim_msix_id
[1]);
2197 writeq(val64
, &vp_reg
->interrupt_cfg0
);
2199 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2200 (vpath
->hldev
->first_vp_id
* 4) + alarm_msix_id
),
2201 &vp_reg
->interrupt_cfg2
);
2203 if (vpath
->hldev
->config
.intr_mode
==
2204 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) {
2205 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2206 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN
,
2207 0, 32), &vp_reg
->one_shot_vect0_en
);
2208 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2209 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN
,
2210 0, 32), &vp_reg
->one_shot_vect1_en
);
2211 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2212 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN
,
2213 0, 32), &vp_reg
->one_shot_vect2_en
);
2218 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2219 * @vp: Virtual Path handle.
2222 * The function masks the msix interrupt for the given msix_id
2225 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2230 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle
*vp
, int msix_id
)
2232 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2233 __vxge_hw_pio_mem_write32_upper(
2234 (u32
) vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
2235 &hldev
->common_reg
->set_msix_mask_vect
[msix_id
% 4]);
2239 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2240 * @vp: Virtual Path handle.
2243 * The function clears the msix interrupt for the given msix_id
2246 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2250 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle
*vp
, int msix_id
)
2252 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2254 if (hldev
->config
.intr_mode
== VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
)
2255 __vxge_hw_pio_mem_write32_upper(
2256 (u32
) vxge_bVALn(vxge_mBIT((msix_id
>> 2)), 0, 32),
2257 &hldev
->common_reg
->clr_msix_one_shot_vec
[msix_id
% 4]);
2259 __vxge_hw_pio_mem_write32_upper(
2260 (u32
) vxge_bVALn(vxge_mBIT((msix_id
>> 2)), 0, 32),
2261 &hldev
->common_reg
->clear_msix_mask_vect
[msix_id
% 4]);
2265 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2266 * @vp: Virtual Path handle.
2269 * The function unmasks the msix interrupt for the given msix_id
2272 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2277 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle
*vp
, int msix_id
)
2279 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2280 __vxge_hw_pio_mem_write32_upper(
2281 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
2282 &hldev
->common_reg
->clear_msix_mask_vect
[msix_id
%4]);
2286 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2287 * @vp: Virtual Path handle.
2289 * Mask Tx and Rx vpath interrupts.
2291 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2293 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle
*vp
)
2295 u64 tim_int_mask0
[4] = {[0 ...3] = 0};
2296 u32 tim_int_mask1
[4] = {[0 ...3] = 0};
2298 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2300 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0
,
2301 tim_int_mask1
, vp
->vpath
->vp_id
);
2303 val64
= readq(&hldev
->common_reg
->tim_int_mask0
);
2305 if ((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2306 (tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2307 writeq((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
2308 tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] | val64
),
2309 &hldev
->common_reg
->tim_int_mask0
);
2312 val64
= readl(&hldev
->common_reg
->tim_int_mask1
);
2314 if ((tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2315 (tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2316 __vxge_hw_pio_mem_write32_upper(
2317 (tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
2318 tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] | val64
),
2319 &hldev
->common_reg
->tim_int_mask1
);
2324 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2325 * @vp: Virtual Path handle.
2327 * Unmask Tx and Rx vpath interrupts.
2329 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2331 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle
*vp
)
2333 u64 tim_int_mask0
[4] = {[0 ...3] = 0};
2334 u32 tim_int_mask1
[4] = {[0 ...3] = 0};
2336 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2338 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0
,
2339 tim_int_mask1
, vp
->vpath
->vp_id
);
2341 val64
= readq(&hldev
->common_reg
->tim_int_mask0
);
2343 if ((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2344 (tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2345 writeq((~(tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
2346 tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
])) & val64
,
2347 &hldev
->common_reg
->tim_int_mask0
);
2350 if ((tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2351 (tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2352 __vxge_hw_pio_mem_write32_upper(
2353 (~(tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
2354 tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
])) & val64
,
2355 &hldev
->common_reg
->tim_int_mask1
);
2360 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2361 * descriptors and process the same.
2362 * @ring: Handle to the ring object used for receive
2364 * The function polls the Rx for the completed descriptors and calls
2365 * the driver via supplied completion callback.
2367 * Returns: VXGE_HW_OK, if the polling is completed successful.
2368 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2369 * descriptors available which are yet to be processed.
2371 * See also: vxge_hw_vpath_poll_rx()
2373 enum vxge_hw_status
vxge_hw_vpath_poll_rx(struct __vxge_hw_ring
*ring
)
2376 enum vxge_hw_status status
= VXGE_HW_OK
;
2383 status
= vxge_hw_ring_rxd_next_completed(ring
, &first_rxdh
, &t_code
);
2384 if (status
== VXGE_HW_OK
)
2385 ring
->callback(ring
, first_rxdh
,
2386 t_code
, ring
->channel
.userdata
);
2388 if (ring
->cmpl_cnt
!= 0) {
2389 ring
->doorbell_cnt
+= ring
->cmpl_cnt
;
2390 if (ring
->doorbell_cnt
>= ring
->rxds_limit
) {
2392 * Each RxD is of 4 qwords, update the number of
2393 * qwords replenished
2395 new_count
= (ring
->doorbell_cnt
* 4);
2397 /* For each block add 4 more qwords */
2398 ring
->total_db_cnt
+= ring
->doorbell_cnt
;
2399 if (ring
->total_db_cnt
>= ring
->rxds_per_block
) {
2401 /* Reset total count */
2402 ring
->total_db_cnt
%= ring
->rxds_per_block
;
2404 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count
),
2405 &ring
->vp_reg
->prc_rxd_doorbell
);
2407 readl(&ring
->common_reg
->titan_general_int_status
);
2408 ring
->doorbell_cnt
= 0;
2416 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2418 * @fifo: Handle to the fifo object used for non offload send
2420 * The function polls the Tx for the completed descriptors and calls
2421 * the driver via supplied completion callback.
2423 * Returns: VXGE_HW_OK, if the polling is completed successful.
2424 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2425 * descriptors available which are yet to be processed.
2427 enum vxge_hw_status
vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo
*fifo
,
2428 struct sk_buff
***skb_ptr
, int nr_skb
,
2431 enum vxge_hw_fifo_tcode t_code
;
2433 enum vxge_hw_status status
= VXGE_HW_OK
;
2434 struct __vxge_hw_channel
*channel
;
2436 channel
= &fifo
->channel
;
2438 status
= vxge_hw_fifo_txdl_next_completed(fifo
,
2439 &first_txdlh
, &t_code
);
2440 if (status
== VXGE_HW_OK
)
2441 if (fifo
->callback(fifo
, first_txdlh
, t_code
,
2442 channel
->userdata
, skb_ptr
, nr_skb
, more
) != VXGE_HW_OK
)
2443 status
= VXGE_HW_COMPLETIONS_REMAIN
;