x86/ldt: Further fix FPU emulation
[linux/fpc-iii.git] / drivers / net / ethernet / neterion / vxge / vxge-traffic.c
blob5f630a24e491eb3226db04549209f9da91baefc8
1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/prefetch.h>
17 #include "vxge-traffic.h"
18 #include "vxge-config.h"
19 #include "vxge-main.h"
22 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23 * @vp: Virtual Path handle.
25 * Enable vpath interrupts. The function is to be executed the last in
26 * vpath initialization sequence.
28 * See also: vxge_hw_vpath_intr_disable()
30 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
32 u64 val64;
34 struct __vxge_hw_virtualpath *vpath;
35 struct vxge_hw_vpath_reg __iomem *vp_reg;
36 enum vxge_hw_status status = VXGE_HW_OK;
37 if (vp == NULL) {
38 status = VXGE_HW_ERR_INVALID_HANDLE;
39 goto exit;
42 vpath = vp->vpath;
44 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
46 goto exit;
49 vp_reg = vpath->vp_reg;
51 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
53 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54 &vp_reg->general_errors_reg);
56 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57 &vp_reg->pci_config_errors_reg);
59 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60 &vp_reg->mrpcim_to_vpath_alarm_reg);
62 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63 &vp_reg->srpcim_to_vpath_alarm_reg);
65 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66 &vp_reg->vpath_ppif_int_status);
68 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69 &vp_reg->srpcim_msg_to_vpath_reg);
71 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72 &vp_reg->vpath_pcipif_int_status);
74 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75 &vp_reg->prc_alarm_reg);
77 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78 &vp_reg->wrdma_alarm_status);
80 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81 &vp_reg->asic_ntwk_vp_err_reg);
83 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84 &vp_reg->xgmac_vp_int_status);
86 val64 = readq(&vp_reg->vpath_general_int_status);
88 /* Mask unwanted interrupts */
90 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91 &vp_reg->vpath_pcipif_int_mask);
93 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94 &vp_reg->srpcim_msg_to_vpath_mask);
96 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 &vp_reg->srpcim_to_vpath_alarm_mask);
99 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100 &vp_reg->mrpcim_to_vpath_alarm_mask);
102 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103 &vp_reg->pci_config_errors_mask);
105 /* Unmask the individual interrupts */
107 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111 &vp_reg->general_errors_mask);
113 __vxge_hw_pio_mem_write32_upper(
114 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120 &vp_reg->kdfcctl_errors_mask);
122 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
124 __vxge_hw_pio_mem_write32_upper(
125 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126 &vp_reg->prc_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
131 if (vpath->hldev->first_vp_id != vpath->vp_id)
132 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133 &vp_reg->asic_ntwk_vp_err_mask);
134 else
135 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138 &vp_reg->asic_ntwk_vp_err_mask);
140 __vxge_hw_pio_mem_write32_upper(0,
141 &vp_reg->vpath_general_int_mask);
142 exit:
143 return status;
148 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
149 * @vp: Virtual Path handle.
151 * Disable vpath interrupts. The function is to be executed the last in
152 * vpath initialization sequence.
154 * See also: vxge_hw_vpath_intr_enable()
156 enum vxge_hw_status vxge_hw_vpath_intr_disable(
157 struct __vxge_hw_vpath_handle *vp)
159 u64 val64;
161 struct __vxge_hw_virtualpath *vpath;
162 enum vxge_hw_status status = VXGE_HW_OK;
163 struct vxge_hw_vpath_reg __iomem *vp_reg;
164 if (vp == NULL) {
165 status = VXGE_HW_ERR_INVALID_HANDLE;
166 goto exit;
169 vpath = vp->vpath;
171 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
173 goto exit;
175 vp_reg = vpath->vp_reg;
177 __vxge_hw_pio_mem_write32_upper(
178 (u32)VXGE_HW_INTR_MASK_ALL,
179 &vp_reg->vpath_general_int_mask);
181 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
183 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
185 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186 &vp_reg->general_errors_mask);
188 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189 &vp_reg->pci_config_errors_mask);
191 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 &vp_reg->mrpcim_to_vpath_alarm_mask);
194 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 &vp_reg->srpcim_to_vpath_alarm_mask);
197 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 &vp_reg->vpath_ppif_int_mask);
200 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 &vp_reg->srpcim_msg_to_vpath_mask);
203 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 &vp_reg->vpath_pcipif_int_mask);
206 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 &vp_reg->wrdma_alarm_mask);
209 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 &vp_reg->prc_alarm_mask);
212 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213 &vp_reg->xgmac_vp_int_mask);
215 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216 &vp_reg->asic_ntwk_vp_err_mask);
218 exit:
219 return status;
222 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
224 struct vxge_hw_vpath_reg __iomem *vp_reg;
225 struct vxge_hw_vp_config *config;
226 u64 val64;
228 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229 return;
231 vp_reg = fifo->vp_reg;
232 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
234 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238 fifo->tim_tti_cfg1_saved = val64;
239 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
243 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
245 u64 val64 = ring->tim_rti_cfg1_saved;
247 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248 ring->tim_rti_cfg1_saved = val64;
249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
252 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
254 u64 val64 = fifo->tim_tti_cfg3_saved;
255 u64 timer = (fifo->rtimer * 1000) / 272;
257 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258 if (timer)
259 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
262 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263 /* tti_cfg3_saved is not updated again because it is
264 * initialized at one place only - init time.
268 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
270 u64 val64 = ring->tim_rti_cfg3_saved;
271 u64 timer = (ring->rtimer * 1000) / 272;
273 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274 if (timer)
275 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279 /* rti_cfg3_saved is not updated again because it is
280 * initialized at one place only - init time.
285 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
286 * @channeh: Channel for rx or tx handle
287 * @msix_id: MSIX ID
289 * The function masks the msix interrupt for the given msix_id
291 * Returns: 0
293 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
296 __vxge_hw_pio_mem_write32_upper(
297 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
302 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
303 * @channeh: Channel for rx or tx handle
304 * @msix_id: MSI ID
306 * The function unmasks the msix interrupt for the given msix_id
308 * Returns: 0
310 void
311 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
314 __vxge_hw_pio_mem_write32_upper(
315 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
320 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
321 * @channel: Channel for rx or tx handle
322 * @msix_id: MSI ID
324 * The function unmasks the msix interrupt for the given msix_id
325 * if configured in MSIX oneshot mode
327 * Returns: 0
329 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
331 __vxge_hw_pio_mem_write32_upper(
332 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
337 * vxge_hw_device_set_intr_type - Updates the configuration
338 * with new interrupt type.
339 * @hldev: HW device handle.
340 * @intr_mode: New interrupt type
342 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
345 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348 (intr_mode != VXGE_HW_INTR_MODE_DEF))
349 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
351 hldev->config.intr_mode = intr_mode;
352 return intr_mode;
356 * vxge_hw_device_intr_enable - Enable interrupts.
357 * @hldev: HW device handle.
358 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
359 * the type(s) of interrupts to enable.
361 * Enable Titan interrupts. The function is to be executed the last in
362 * Titan initialization sequence.
364 * See also: vxge_hw_device_intr_disable()
366 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
368 u32 i;
369 u64 val64;
370 u32 val32;
372 vxge_hw_device_mask_all(hldev);
374 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
376 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377 continue;
379 vxge_hw_vpath_intr_enable(
380 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
383 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
387 if (val64 != 0) {
388 writeq(val64, &hldev->common_reg->tim_int_status0);
390 writeq(~val64, &hldev->common_reg->tim_int_mask0);
393 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
396 if (val32 != 0) {
397 __vxge_hw_pio_mem_write32_upper(val32,
398 &hldev->common_reg->tim_int_status1);
400 __vxge_hw_pio_mem_write32_upper(~val32,
401 &hldev->common_reg->tim_int_mask1);
405 val64 = readq(&hldev->common_reg->titan_general_int_status);
407 vxge_hw_device_unmask_all(hldev);
411 * vxge_hw_device_intr_disable - Disable Titan interrupts.
412 * @hldev: HW device handle.
413 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
414 * the type(s) of interrupts to disable.
416 * Disable Titan interrupts.
418 * See also: vxge_hw_device_intr_enable()
420 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
422 u32 i;
424 vxge_hw_device_mask_all(hldev);
426 /* mask all the tim interrupts */
427 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429 &hldev->common_reg->tim_int_mask1);
431 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
433 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434 continue;
436 vxge_hw_vpath_intr_disable(
437 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
442 * vxge_hw_device_mask_all - Mask all device interrupts.
443 * @hldev: HW device handle.
445 * Mask all device interrupts.
447 * See also: vxge_hw_device_unmask_all()
449 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
451 u64 val64;
453 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
456 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457 &hldev->common_reg->titan_mask_all_int);
461 * vxge_hw_device_unmask_all - Unmask all device interrupts.
462 * @hldev: HW device handle.
464 * Unmask all device interrupts.
466 * See also: vxge_hw_device_mask_all()
468 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
470 u64 val64 = 0;
472 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
475 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476 &hldev->common_reg->titan_mask_all_int);
480 * vxge_hw_device_flush_io - Flush io writes.
481 * @hldev: HW device handle.
483 * The function performs a read operation to flush io writes.
485 * Returns: void
487 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
489 u32 val32;
491 val32 = readl(&hldev->common_reg->titan_general_int_status);
495 * __vxge_hw_device_handle_error - Handle error
496 * @hldev: HW device
497 * @vp_id: Vpath Id
498 * @type: Error type. Please see enum vxge_hw_event{}
500 * Handle error.
502 static enum vxge_hw_status
503 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504 enum vxge_hw_event type)
506 switch (type) {
507 case VXGE_HW_EVENT_UNKNOWN:
508 break;
509 case VXGE_HW_EVENT_RESET_START:
510 case VXGE_HW_EVENT_RESET_COMPLETE:
511 case VXGE_HW_EVENT_LINK_DOWN:
512 case VXGE_HW_EVENT_LINK_UP:
513 goto out;
514 case VXGE_HW_EVENT_ALARM_CLEARED:
515 goto out;
516 case VXGE_HW_EVENT_ECCERR:
517 case VXGE_HW_EVENT_MRPCIM_ECCERR:
518 goto out;
519 case VXGE_HW_EVENT_FIFO_ERR:
520 case VXGE_HW_EVENT_VPATH_ERR:
521 case VXGE_HW_EVENT_CRITICAL_ERR:
522 case VXGE_HW_EVENT_SERR:
523 break;
524 case VXGE_HW_EVENT_SRPCIM_SERR:
525 case VXGE_HW_EVENT_MRPCIM_SERR:
526 goto out;
527 case VXGE_HW_EVENT_SLOT_FREEZE:
528 break;
529 default:
530 vxge_assert(0);
531 goto out;
534 /* notify driver */
535 if (hldev->uld_callbacks->crit_err)
536 hldev->uld_callbacks->crit_err(hldev,
537 type, vp_id);
538 out:
540 return VXGE_HW_OK;
544 * __vxge_hw_device_handle_link_down_ind
545 * @hldev: HW device handle.
547 * Link down indication handler. The function is invoked by HW when
548 * Titan indicates that the link is down.
550 static enum vxge_hw_status
551 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
554 * If the previous link state is not down, return.
556 if (hldev->link_state == VXGE_HW_LINK_DOWN)
557 goto exit;
559 hldev->link_state = VXGE_HW_LINK_DOWN;
561 /* notify driver */
562 if (hldev->uld_callbacks->link_down)
563 hldev->uld_callbacks->link_down(hldev);
564 exit:
565 return VXGE_HW_OK;
569 * __vxge_hw_device_handle_link_up_ind
570 * @hldev: HW device handle.
572 * Link up indication handler. The function is invoked by HW when
573 * Titan indicates that the link is up for programmable amount of time.
575 static enum vxge_hw_status
576 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
579 * If the previous link state is not down, return.
581 if (hldev->link_state == VXGE_HW_LINK_UP)
582 goto exit;
584 hldev->link_state = VXGE_HW_LINK_UP;
586 /* notify driver */
587 if (hldev->uld_callbacks->link_up)
588 hldev->uld_callbacks->link_up(hldev);
589 exit:
590 return VXGE_HW_OK;
594 * __vxge_hw_vpath_alarm_process - Process Alarms.
595 * @vpath: Virtual Path.
596 * @skip_alarms: Do not clear the alarms
598 * Process vpath alarms.
601 static enum vxge_hw_status
602 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
603 u32 skip_alarms)
605 u64 val64;
606 u64 alarm_status;
607 u64 pic_status;
608 struct __vxge_hw_device *hldev = NULL;
609 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
610 u64 mask64;
611 struct vxge_hw_vpath_stats_sw_info *sw_stats;
612 struct vxge_hw_vpath_reg __iomem *vp_reg;
614 if (vpath == NULL) {
615 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
616 alarm_event);
617 goto out2;
620 hldev = vpath->hldev;
621 vp_reg = vpath->vp_reg;
622 alarm_status = readq(&vp_reg->vpath_general_int_status);
624 if (alarm_status == VXGE_HW_ALL_FOXES) {
625 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
626 alarm_event);
627 goto out;
630 sw_stats = vpath->sw_stats;
632 if (alarm_status & ~(
633 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
637 sw_stats->error_stats.unknown_alarms++;
639 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
640 alarm_event);
641 goto out;
644 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
646 val64 = readq(&vp_reg->xgmac_vp_int_status);
648 if (val64 &
649 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
651 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
653 if (((val64 &
654 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
655 (!(val64 &
656 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
657 ((val64 &
658 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
659 (!(val64 &
660 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
661 ))) {
662 sw_stats->error_stats.network_sustained_fault++;
664 writeq(
665 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
666 &vp_reg->asic_ntwk_vp_err_mask);
668 __vxge_hw_device_handle_link_down_ind(hldev);
669 alarm_event = VXGE_HW_SET_LEVEL(
670 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
673 if (((val64 &
674 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
675 (!(val64 &
676 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
677 ((val64 &
678 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
679 (!(val64 &
680 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
681 ))) {
683 sw_stats->error_stats.network_sustained_ok++;
685 writeq(
686 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
687 &vp_reg->asic_ntwk_vp_err_mask);
689 __vxge_hw_device_handle_link_up_ind(hldev);
690 alarm_event = VXGE_HW_SET_LEVEL(
691 VXGE_HW_EVENT_LINK_UP, alarm_event);
694 writeq(VXGE_HW_INTR_MASK_ALL,
695 &vp_reg->asic_ntwk_vp_err_reg);
697 alarm_event = VXGE_HW_SET_LEVEL(
698 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
700 if (skip_alarms)
701 return VXGE_HW_OK;
705 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
707 pic_status = readq(&vp_reg->vpath_ppif_int_status);
709 if (pic_status &
710 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
712 val64 = readq(&vp_reg->general_errors_reg);
713 mask64 = readq(&vp_reg->general_errors_mask);
715 if ((val64 &
716 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
717 ~mask64) {
718 sw_stats->error_stats.ini_serr_det++;
720 alarm_event = VXGE_HW_SET_LEVEL(
721 VXGE_HW_EVENT_SERR, alarm_event);
724 if ((val64 &
725 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
726 ~mask64) {
727 sw_stats->error_stats.dblgen_fifo0_overflow++;
729 alarm_event = VXGE_HW_SET_LEVEL(
730 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
733 if ((val64 &
734 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
735 ~mask64)
736 sw_stats->error_stats.statsb_pif_chain_error++;
738 if ((val64 &
739 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
740 ~mask64)
741 sw_stats->error_stats.statsb_drop_timeout++;
743 if ((val64 &
744 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
745 ~mask64)
746 sw_stats->error_stats.target_illegal_access++;
748 if (!skip_alarms) {
749 writeq(VXGE_HW_INTR_MASK_ALL,
750 &vp_reg->general_errors_reg);
751 alarm_event = VXGE_HW_SET_LEVEL(
752 VXGE_HW_EVENT_ALARM_CLEARED,
753 alarm_event);
757 if (pic_status &
758 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
760 val64 = readq(&vp_reg->kdfcctl_errors_reg);
761 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
763 if ((val64 &
764 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
765 ~mask64) {
766 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
768 alarm_event = VXGE_HW_SET_LEVEL(
769 VXGE_HW_EVENT_FIFO_ERR,
770 alarm_event);
773 if ((val64 &
774 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
775 ~mask64) {
776 sw_stats->error_stats.kdfcctl_fifo0_poison++;
778 alarm_event = VXGE_HW_SET_LEVEL(
779 VXGE_HW_EVENT_FIFO_ERR,
780 alarm_event);
783 if ((val64 &
784 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
785 ~mask64) {
786 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
788 alarm_event = VXGE_HW_SET_LEVEL(
789 VXGE_HW_EVENT_FIFO_ERR,
790 alarm_event);
793 if (!skip_alarms) {
794 writeq(VXGE_HW_INTR_MASK_ALL,
795 &vp_reg->kdfcctl_errors_reg);
796 alarm_event = VXGE_HW_SET_LEVEL(
797 VXGE_HW_EVENT_ALARM_CLEARED,
798 alarm_event);
804 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
806 val64 = readq(&vp_reg->wrdma_alarm_status);
808 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
810 val64 = readq(&vp_reg->prc_alarm_reg);
811 mask64 = readq(&vp_reg->prc_alarm_mask);
813 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
814 ~mask64)
815 sw_stats->error_stats.prc_ring_bumps++;
817 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
818 ~mask64) {
819 sw_stats->error_stats.prc_rxdcm_sc_err++;
821 alarm_event = VXGE_HW_SET_LEVEL(
822 VXGE_HW_EVENT_VPATH_ERR,
823 alarm_event);
826 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
827 & ~mask64) {
828 sw_stats->error_stats.prc_rxdcm_sc_abort++;
830 alarm_event = VXGE_HW_SET_LEVEL(
831 VXGE_HW_EVENT_VPATH_ERR,
832 alarm_event);
835 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
836 & ~mask64) {
837 sw_stats->error_stats.prc_quanta_size_err++;
839 alarm_event = VXGE_HW_SET_LEVEL(
840 VXGE_HW_EVENT_VPATH_ERR,
841 alarm_event);
844 if (!skip_alarms) {
845 writeq(VXGE_HW_INTR_MASK_ALL,
846 &vp_reg->prc_alarm_reg);
847 alarm_event = VXGE_HW_SET_LEVEL(
848 VXGE_HW_EVENT_ALARM_CLEARED,
849 alarm_event);
853 out:
854 hldev->stats.sw_dev_err_stats.vpath_alarms++;
855 out2:
856 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
857 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
858 return VXGE_HW_OK;
860 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
862 if (alarm_event == VXGE_HW_EVENT_SERR)
863 return VXGE_HW_ERR_CRITICAL;
865 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
866 VXGE_HW_ERR_SLOT_FREEZE :
867 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
868 VXGE_HW_ERR_VPATH;
872 * vxge_hw_device_begin_irq - Begin IRQ processing.
873 * @hldev: HW device handle.
874 * @skip_alarms: Do not clear the alarms
875 * @reason: "Reason" for the interrupt, the value of Titan's
876 * general_int_status register.
878 * The function performs two actions, It first checks whether (shared IRQ) the
879 * interrupt was raised by the device. Next, it masks the device interrupts.
881 * Note:
882 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
883 * bridge. Therefore, two back-to-back interrupts are potentially possible.
885 * Returns: 0, if the interrupt is not "ours" (note that in this case the
886 * device remain enabled).
887 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
888 * status.
890 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
891 u32 skip_alarms, u64 *reason)
893 u32 i;
894 u64 val64;
895 u64 adapter_status;
896 u64 vpath_mask;
897 enum vxge_hw_status ret = VXGE_HW_OK;
899 val64 = readq(&hldev->common_reg->titan_general_int_status);
901 if (unlikely(!val64)) {
902 /* not Titan interrupt */
903 *reason = 0;
904 ret = VXGE_HW_ERR_WRONG_IRQ;
905 goto exit;
908 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
910 adapter_status = readq(&hldev->common_reg->adapter_status);
912 if (adapter_status == VXGE_HW_ALL_FOXES) {
914 __vxge_hw_device_handle_error(hldev,
915 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
916 *reason = 0;
917 ret = VXGE_HW_ERR_SLOT_FREEZE;
918 goto exit;
922 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
924 *reason = val64;
926 vpath_mask = hldev->vpaths_deployed >>
927 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
929 if (val64 &
930 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
931 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
933 return VXGE_HW_OK;
936 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
938 if (unlikely(val64 &
939 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
941 enum vxge_hw_status error_level = VXGE_HW_OK;
943 hldev->stats.sw_dev_err_stats.vpath_alarms++;
945 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
947 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
948 continue;
950 ret = __vxge_hw_vpath_alarm_process(
951 &hldev->virtual_paths[i], skip_alarms);
953 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
955 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
956 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
957 break;
960 ret = error_level;
962 exit:
963 return ret;
967 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
968 * condition that has caused the Tx and RX interrupt.
969 * @hldev: HW device.
971 * Acknowledge (that is, clear) the condition that has caused
972 * the Tx and Rx interrupt.
973 * See also: vxge_hw_device_begin_irq(),
974 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
976 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
979 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
980 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
981 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
982 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
983 &hldev->common_reg->tim_int_status0);
986 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
987 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
988 __vxge_hw_pio_mem_write32_upper(
989 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
990 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
991 &hldev->common_reg->tim_int_status1);
996 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
997 * @channel: Channel
998 * @dtrh: Buffer to return the DTR pointer
1000 * Allocates a dtr from the reserve array. If the reserve array is empty,
1001 * it swaps the reserve and free arrays.
1004 static enum vxge_hw_status
1005 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1007 if (channel->reserve_ptr - channel->reserve_top > 0) {
1008 _alloc_after_swap:
1009 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1011 return VXGE_HW_OK;
1014 /* switch between empty and full arrays */
1016 /* the idea behind such a design is that by having free and reserved
1017 * arrays separated we basically separated irq and non-irq parts.
1018 * i.e. no additional lock need to be done when we free a resource */
1020 if (channel->length - channel->free_ptr > 0) {
1021 swap(channel->reserve_arr, channel->free_arr);
1022 channel->reserve_ptr = channel->length;
1023 channel->reserve_top = channel->free_ptr;
1024 channel->free_ptr = channel->length;
1026 channel->stats->reserve_free_swaps_cnt++;
1028 goto _alloc_after_swap;
1031 channel->stats->full_cnt++;
1033 *dtrh = NULL;
1034 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1038 * vxge_hw_channel_dtr_post - Post a dtr to the channel
1039 * @channelh: Channel
1040 * @dtrh: DTR pointer
1042 * Posts a dtr to work array.
1045 static void
1046 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1048 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1050 channel->work_arr[channel->post_index++] = dtrh;
1052 /* wrap-around */
1053 if (channel->post_index == channel->length)
1054 channel->post_index = 0;
1058 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1059 * @channel: Channel
1060 * @dtr: Buffer to return the next completed DTR pointer
1062 * Returns the next completed dtr with out removing it from work array
1065 void
1066 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1068 vxge_assert(channel->compl_index < channel->length);
1070 *dtrh = channel->work_arr[channel->compl_index];
1071 prefetch(*dtrh);
1075 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1076 * @channel: Channel handle
1078 * Removes the next completed dtr from work array
1081 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1083 channel->work_arr[channel->compl_index] = NULL;
1085 /* wrap-around */
1086 if (++channel->compl_index == channel->length)
1087 channel->compl_index = 0;
1089 channel->stats->total_compl_cnt++;
1093 * vxge_hw_channel_dtr_free - Frees a dtr
1094 * @channel: Channel handle
1095 * @dtr: DTR pointer
1097 * Returns the dtr to free array
1100 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1102 channel->free_arr[--channel->free_ptr] = dtrh;
1106 * vxge_hw_channel_dtr_count
1107 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1109 * Retrieve number of DTRs available. This function can not be called
1110 * from data path. ring_initial_replenishi() is the only user.
1112 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1114 return (channel->reserve_ptr - channel->reserve_top) +
1115 (channel->length - channel->free_ptr);
1119 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1120 * @ring: Handle to the ring object used for receive
1121 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1122 * with a valid handle.
1124 * Reserve Rx descriptor for the subsequent filling-in driver
1125 * and posting on the corresponding channel (@channelh)
1126 * via vxge_hw_ring_rxd_post().
1128 * Returns: VXGE_HW_OK - success.
1129 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1132 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1133 void **rxdh)
1135 enum vxge_hw_status status;
1136 struct __vxge_hw_channel *channel;
1138 channel = &ring->channel;
1140 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1142 if (status == VXGE_HW_OK) {
1143 struct vxge_hw_ring_rxd_1 *rxdp =
1144 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1146 rxdp->control_0 = rxdp->control_1 = 0;
1149 return status;
1153 * vxge_hw_ring_rxd_free - Free descriptor.
1154 * @ring: Handle to the ring object used for receive
1155 * @rxdh: Descriptor handle.
1157 * Free the reserved descriptor. This operation is "symmetrical" to
1158 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1159 * lifecycle.
1161 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1162 * be:
1164 * - reserved (vxge_hw_ring_rxd_reserve);
1166 * - posted (vxge_hw_ring_rxd_post);
1168 * - completed (vxge_hw_ring_rxd_next_completed);
1170 * - and recycled again (vxge_hw_ring_rxd_free).
1172 * For alternative state transitions and more details please refer to
1173 * the design doc.
1176 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1178 struct __vxge_hw_channel *channel;
1180 channel = &ring->channel;
1182 vxge_hw_channel_dtr_free(channel, rxdh);
1187 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1188 * @ring: Handle to the ring object used for receive
1189 * @rxdh: Descriptor handle.
1191 * This routine prepares a rxd and posts
1193 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1195 struct __vxge_hw_channel *channel;
1197 channel = &ring->channel;
1199 vxge_hw_channel_dtr_post(channel, rxdh);
1203 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1204 * @ring: Handle to the ring object used for receive
1205 * @rxdh: Descriptor handle.
1207 * Processes rxd after post
1209 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1211 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1212 struct __vxge_hw_channel *channel;
1214 channel = &ring->channel;
1216 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1218 if (ring->stats->common_stats.usage_cnt > 0)
1219 ring->stats->common_stats.usage_cnt--;
1223 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1224 * @ring: Handle to the ring object used for receive
1225 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1227 * Post descriptor on the ring.
1228 * Prior to posting the descriptor should be filled in accordance with
1229 * Host/Titan interface specification for a given service (LL, etc.).
1232 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1234 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1235 struct __vxge_hw_channel *channel;
1237 channel = &ring->channel;
1239 wmb();
1240 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1242 vxge_hw_channel_dtr_post(channel, rxdh);
1244 if (ring->stats->common_stats.usage_cnt > 0)
1245 ring->stats->common_stats.usage_cnt--;
1249 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1250 * @ring: Handle to the ring object used for receive
1251 * @rxdh: Descriptor handle.
1253 * Processes rxd after post with memory barrier.
1255 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1257 wmb();
1258 vxge_hw_ring_rxd_post_post(ring, rxdh);
1262 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1263 * @ring: Handle to the ring object used for receive
1264 * @rxdh: Descriptor handle. Returned by HW.
1265 * @t_code: Transfer code, as per Titan User Guide,
1266 * Receive Descriptor Format. Returned by HW.
1268 * Retrieve the _next_ completed descriptor.
1269 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1270 * driver of new completed descriptors. After that
1271 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1272 * completions (the very first completion is passed by HW via
1273 * vxge_hw_ring_callback_f).
1275 * Implementation-wise, the driver is free to call
1276 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1277 * ring callback, or in a deferred fashion and separate (from HW)
1278 * context.
1280 * Non-zero @t_code means failure to fill-in receive buffer(s)
1281 * of the descriptor.
1282 * For instance, parity error detected during the data transfer.
1283 * In this case Titan will complete the descriptor and indicate
1284 * for the host that the received data is not to be used.
1285 * For details please refer to Titan User Guide.
1287 * Returns: VXGE_HW_OK - success.
1288 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1289 * are currently available for processing.
1291 * See also: vxge_hw_ring_callback_f{},
1292 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1294 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1295 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1297 struct __vxge_hw_channel *channel;
1298 struct vxge_hw_ring_rxd_1 *rxdp;
1299 enum vxge_hw_status status = VXGE_HW_OK;
1300 u64 control_0, own;
1302 channel = &ring->channel;
1304 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1306 rxdp = *rxdh;
1307 if (rxdp == NULL) {
1308 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1309 goto exit;
1312 control_0 = rxdp->control_0;
1313 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1314 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1316 /* check whether it is not the end */
1317 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1319 vxge_assert((rxdp)->host_control !=
1322 ++ring->cmpl_cnt;
1323 vxge_hw_channel_dtr_complete(channel);
1325 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1327 ring->stats->common_stats.usage_cnt++;
1328 if (ring->stats->common_stats.usage_max <
1329 ring->stats->common_stats.usage_cnt)
1330 ring->stats->common_stats.usage_max =
1331 ring->stats->common_stats.usage_cnt;
1333 status = VXGE_HW_OK;
1334 goto exit;
1337 /* reset it. since we don't want to return
1338 * garbage to the driver */
1339 *rxdh = NULL;
1340 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1341 exit:
1342 return status;
1346 * vxge_hw_ring_handle_tcode - Handle transfer code.
1347 * @ring: Handle to the ring object used for receive
1348 * @rxdh: Descriptor handle.
1349 * @t_code: One of the enumerated (and documented in the Titan user guide)
1350 * "transfer codes".
1352 * Handle descriptor's transfer code. The latter comes with each completed
1353 * descriptor.
1355 * Returns: one of the enum vxge_hw_status{} enumerated types.
1356 * VXGE_HW_OK - for success.
1357 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1359 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1360 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1362 struct __vxge_hw_channel *channel;
1363 enum vxge_hw_status status = VXGE_HW_OK;
1365 channel = &ring->channel;
1367 /* If the t_code is not supported and if the
1368 * t_code is other than 0x5 (unparseable packet
1369 * such as unknown UPV6 header), Drop it !!!
1372 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1373 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1374 status = VXGE_HW_OK;
1375 goto exit;
1378 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1379 status = VXGE_HW_ERR_INVALID_TCODE;
1380 goto exit;
1383 ring->stats->rxd_t_code_err_cnt[t_code]++;
1384 exit:
1385 return status;
1389 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1391 * @fifo: fifohandle
1392 * @txdl_ptr: The starting location of the TxDL in host memory
1393 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1394 * @no_snoop: No snoop flags
1396 * This function posts a non-offload doorbell to doorbell FIFO
1399 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1400 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1402 struct __vxge_hw_channel *channel;
1404 channel = &fifo->channel;
1406 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1407 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1408 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1409 &fifo->nofl_db->control_0);
1411 mmiowb();
1413 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1415 mmiowb();
1419 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1420 * the fifo
1421 * @fifoh: Handle to the fifo object used for non offload send
1423 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1425 return vxge_hw_channel_dtr_count(&fifoh->channel);
1429 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1430 * @fifoh: Handle to the fifo object used for non offload send
1431 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1432 * with a valid handle.
1433 * @txdl_priv: Buffer to return the pointer to per txdl space
1435 * Reserve a single TxDL (that is, fifo descriptor)
1436 * for the subsequent filling-in by driver)
1437 * and posting on the corresponding channel (@channelh)
1438 * via vxge_hw_fifo_txdl_post().
1440 * Note: it is the responsibility of driver to reserve multiple descriptors
1441 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1442 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1444 * Returns: VXGE_HW_OK - success;
1445 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1448 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1449 struct __vxge_hw_fifo *fifo,
1450 void **txdlh, void **txdl_priv)
1452 struct __vxge_hw_channel *channel;
1453 enum vxge_hw_status status;
1454 int i;
1456 channel = &fifo->channel;
1458 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1460 if (status == VXGE_HW_OK) {
1461 struct vxge_hw_fifo_txd *txdp =
1462 (struct vxge_hw_fifo_txd *)*txdlh;
1463 struct __vxge_hw_fifo_txdl_priv *priv;
1465 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1467 /* reset the TxDL's private */
1468 priv->align_dma_offset = 0;
1469 priv->align_vaddr_start = priv->align_vaddr;
1470 priv->align_used_frags = 0;
1471 priv->frags = 0;
1472 priv->alloc_frags = fifo->config->max_frags;
1473 priv->next_txdl_priv = NULL;
1475 *txdl_priv = (void *)(size_t)txdp->host_control;
1477 for (i = 0; i < fifo->config->max_frags; i++) {
1478 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1479 txdp->control_0 = txdp->control_1 = 0;
1483 return status;
1487 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1488 * descriptor.
1489 * @fifo: Handle to the fifo object used for non offload send
1490 * @txdlh: Descriptor handle.
1491 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1492 * (of buffers).
1493 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1494 * @size: Size of the data buffer (in bytes).
1496 * This API is part of the preparation of the transmit descriptor for posting
1497 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1498 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1499 * All three APIs fill in the fields of the fifo descriptor,
1500 * in accordance with the Titan specification.
1503 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1504 void *txdlh, u32 frag_idx,
1505 dma_addr_t dma_pointer, u32 size)
1507 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1508 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1509 struct __vxge_hw_channel *channel;
1511 channel = &fifo->channel;
1513 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1514 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1516 if (frag_idx != 0)
1517 txdp->control_0 = txdp->control_1 = 0;
1518 else {
1519 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1520 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1521 txdp->control_1 |= fifo->interrupt_type;
1522 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1523 fifo->tx_intr_num);
1524 if (txdl_priv->frags) {
1525 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1526 (txdl_priv->frags - 1);
1527 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1528 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1532 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1534 txdp->buffer_pointer = (u64)dma_pointer;
1535 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1536 fifo->stats->total_buffers++;
1537 txdl_priv->frags++;
1541 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1542 * @fifo: Handle to the fifo object used for non offload send
1543 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1544 * @frags: Number of contiguous buffers that are part of a single
1545 * transmit operation.
1547 * Post descriptor on the 'fifo' type channel for transmission.
1548 * Prior to posting the descriptor should be filled in accordance with
1549 * Host/Titan interface specification for a given service (LL, etc.).
1552 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1554 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1555 struct vxge_hw_fifo_txd *txdp_last;
1556 struct vxge_hw_fifo_txd *txdp_first;
1557 struct __vxge_hw_channel *channel;
1559 channel = &fifo->channel;
1561 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1562 txdp_first = txdlh;
1564 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1565 txdp_last->control_0 |=
1566 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1567 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1569 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1571 __vxge_hw_non_offload_db_post(fifo,
1572 (u64)txdl_priv->dma_addr,
1573 txdl_priv->frags - 1,
1574 fifo->no_snoop_bits);
1576 fifo->stats->total_posts++;
1577 fifo->stats->common_stats.usage_cnt++;
1578 if (fifo->stats->common_stats.usage_max <
1579 fifo->stats->common_stats.usage_cnt)
1580 fifo->stats->common_stats.usage_max =
1581 fifo->stats->common_stats.usage_cnt;
1585 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1586 * @fifo: Handle to the fifo object used for non offload send
1587 * @txdlh: Descriptor handle. Returned by HW.
1588 * @t_code: Transfer code, as per Titan User Guide,
1589 * Transmit Descriptor Format.
1590 * Returned by HW.
1592 * Retrieve the _next_ completed descriptor.
1593 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1594 * driver of new completed descriptors. After that
1595 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1596 * completions (the very first completion is passed by HW via
1597 * vxge_hw_channel_callback_f).
1599 * Implementation-wise, the driver is free to call
1600 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1601 * channel callback, or in a deferred fashion and separate (from HW)
1602 * context.
1604 * Non-zero @t_code means failure to process the descriptor.
1605 * The failure could happen, for instance, when the link is
1606 * down, in which case Titan completes the descriptor because it
1607 * is not able to send the data out.
1609 * For details please refer to Titan User Guide.
1611 * Returns: VXGE_HW_OK - success.
1612 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1613 * are currently available for processing.
1616 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1617 struct __vxge_hw_fifo *fifo, void **txdlh,
1618 enum vxge_hw_fifo_tcode *t_code)
1620 struct __vxge_hw_channel *channel;
1621 struct vxge_hw_fifo_txd *txdp;
1622 enum vxge_hw_status status = VXGE_HW_OK;
1624 channel = &fifo->channel;
1626 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1628 txdp = *txdlh;
1629 if (txdp == NULL) {
1630 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1631 goto exit;
1634 /* check whether host owns it */
1635 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1637 vxge_assert(txdp->host_control != 0);
1639 vxge_hw_channel_dtr_complete(channel);
1641 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1643 if (fifo->stats->common_stats.usage_cnt > 0)
1644 fifo->stats->common_stats.usage_cnt--;
1646 status = VXGE_HW_OK;
1647 goto exit;
1650 /* no more completions */
1651 *txdlh = NULL;
1652 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1653 exit:
1654 return status;
1658 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1659 * @fifo: Handle to the fifo object used for non offload send
1660 * @txdlh: Descriptor handle.
1661 * @t_code: One of the enumerated (and documented in the Titan user guide)
1662 * "transfer codes".
1664 * Handle descriptor's transfer code. The latter comes with each completed
1665 * descriptor.
1667 * Returns: one of the enum vxge_hw_status{} enumerated types.
1668 * VXGE_HW_OK - for success.
1669 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1671 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1672 void *txdlh,
1673 enum vxge_hw_fifo_tcode t_code)
1675 struct __vxge_hw_channel *channel;
1677 enum vxge_hw_status status = VXGE_HW_OK;
1678 channel = &fifo->channel;
1680 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1681 status = VXGE_HW_ERR_INVALID_TCODE;
1682 goto exit;
1685 fifo->stats->txd_t_code_err_cnt[t_code]++;
1686 exit:
1687 return status;
1691 * vxge_hw_fifo_txdl_free - Free descriptor.
1692 * @fifo: Handle to the fifo object used for non offload send
1693 * @txdlh: Descriptor handle.
1695 * Free the reserved descriptor. This operation is "symmetrical" to
1696 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1697 * lifecycle.
1699 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1700 * be:
1702 * - reserved (vxge_hw_fifo_txdl_reserve);
1704 * - posted (vxge_hw_fifo_txdl_post);
1706 * - completed (vxge_hw_fifo_txdl_next_completed);
1708 * - and recycled again (vxge_hw_fifo_txdl_free).
1710 * For alternative state transitions and more details please refer to
1711 * the design doc.
1714 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1716 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1717 u32 max_frags;
1718 struct __vxge_hw_channel *channel;
1720 channel = &fifo->channel;
1722 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1723 (struct vxge_hw_fifo_txd *)txdlh);
1725 max_frags = fifo->config->max_frags;
1727 vxge_hw_channel_dtr_free(channel, txdlh);
1731 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1732 * to MAC address table.
1733 * @vp: Vpath handle.
1734 * @macaddr: MAC address to be added for this vpath into the list
1735 * @macaddr_mask: MAC address mask for macaddr
1736 * @duplicate_mode: Duplicate MAC address add mode. Please see
1737 * enum vxge_hw_vpath_mac_addr_add_mode{}
1739 * Adds the given mac address and mac address mask into the list for this
1740 * vpath.
1741 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1742 * vxge_hw_vpath_mac_addr_get_next
1745 enum vxge_hw_status
1746 vxge_hw_vpath_mac_addr_add(
1747 struct __vxge_hw_vpath_handle *vp,
1748 u8 (macaddr)[ETH_ALEN],
1749 u8 (macaddr_mask)[ETH_ALEN],
1750 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1752 u32 i;
1753 u64 data1 = 0ULL;
1754 u64 data2 = 0ULL;
1755 enum vxge_hw_status status = VXGE_HW_OK;
1757 if (vp == NULL) {
1758 status = VXGE_HW_ERR_INVALID_HANDLE;
1759 goto exit;
1762 for (i = 0; i < ETH_ALEN; i++) {
1763 data1 <<= 8;
1764 data1 |= (u8)macaddr[i];
1766 data2 <<= 8;
1767 data2 |= (u8)macaddr_mask[i];
1770 switch (duplicate_mode) {
1771 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1772 i = 0;
1773 break;
1774 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1775 i = 1;
1776 break;
1777 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1778 i = 2;
1779 break;
1780 default:
1781 i = 0;
1782 break;
1785 status = __vxge_hw_vpath_rts_table_set(vp,
1786 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1787 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1789 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1790 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1791 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1792 exit:
1793 return status;
1797 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1798 * from MAC address table.
1799 * @vp: Vpath handle.
1800 * @macaddr: First MAC address entry for this vpath in the list
1801 * @macaddr_mask: MAC address mask for macaddr
1803 * Returns the first mac address and mac address mask in the list for this
1804 * vpath.
1805 * see also: vxge_hw_vpath_mac_addr_get_next
1808 enum vxge_hw_status
1809 vxge_hw_vpath_mac_addr_get(
1810 struct __vxge_hw_vpath_handle *vp,
1811 u8 (macaddr)[ETH_ALEN],
1812 u8 (macaddr_mask)[ETH_ALEN])
1814 u32 i;
1815 u64 data1 = 0ULL;
1816 u64 data2 = 0ULL;
1817 enum vxge_hw_status status = VXGE_HW_OK;
1819 if (vp == NULL) {
1820 status = VXGE_HW_ERR_INVALID_HANDLE;
1821 goto exit;
1824 status = __vxge_hw_vpath_rts_table_get(vp,
1825 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1826 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1827 0, &data1, &data2);
1829 if (status != VXGE_HW_OK)
1830 goto exit;
1832 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1834 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1836 for (i = ETH_ALEN; i > 0; i--) {
1837 macaddr[i-1] = (u8)(data1 & 0xFF);
1838 data1 >>= 8;
1840 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1841 data2 >>= 8;
1843 exit:
1844 return status;
1848 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1849 * vpath
1850 * from MAC address table.
1851 * @vp: Vpath handle.
1852 * @macaddr: Next MAC address entry for this vpath in the list
1853 * @macaddr_mask: MAC address mask for macaddr
1855 * Returns the next mac address and mac address mask in the list for this
1856 * vpath.
1857 * see also: vxge_hw_vpath_mac_addr_get
1860 enum vxge_hw_status
1861 vxge_hw_vpath_mac_addr_get_next(
1862 struct __vxge_hw_vpath_handle *vp,
1863 u8 (macaddr)[ETH_ALEN],
1864 u8 (macaddr_mask)[ETH_ALEN])
1866 u32 i;
1867 u64 data1 = 0ULL;
1868 u64 data2 = 0ULL;
1869 enum vxge_hw_status status = VXGE_HW_OK;
1871 if (vp == NULL) {
1872 status = VXGE_HW_ERR_INVALID_HANDLE;
1873 goto exit;
1876 status = __vxge_hw_vpath_rts_table_get(vp,
1877 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1878 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1879 0, &data1, &data2);
1881 if (status != VXGE_HW_OK)
1882 goto exit;
1884 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1886 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1888 for (i = ETH_ALEN; i > 0; i--) {
1889 macaddr[i-1] = (u8)(data1 & 0xFF);
1890 data1 >>= 8;
1892 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1893 data2 >>= 8;
1896 exit:
1897 return status;
1901 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1902 * to MAC address table.
1903 * @vp: Vpath handle.
1904 * @macaddr: MAC address to be added for this vpath into the list
1905 * @macaddr_mask: MAC address mask for macaddr
1907 * Delete the given mac address and mac address mask into the list for this
1908 * vpath.
1909 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1910 * vxge_hw_vpath_mac_addr_get_next
1913 enum vxge_hw_status
1914 vxge_hw_vpath_mac_addr_delete(
1915 struct __vxge_hw_vpath_handle *vp,
1916 u8 (macaddr)[ETH_ALEN],
1917 u8 (macaddr_mask)[ETH_ALEN])
1919 u32 i;
1920 u64 data1 = 0ULL;
1921 u64 data2 = 0ULL;
1922 enum vxge_hw_status status = VXGE_HW_OK;
1924 if (vp == NULL) {
1925 status = VXGE_HW_ERR_INVALID_HANDLE;
1926 goto exit;
1929 for (i = 0; i < ETH_ALEN; i++) {
1930 data1 <<= 8;
1931 data1 |= (u8)macaddr[i];
1933 data2 <<= 8;
1934 data2 |= (u8)macaddr_mask[i];
1937 status = __vxge_hw_vpath_rts_table_set(vp,
1938 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1939 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1941 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1942 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1943 exit:
1944 return status;
1948 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1949 * to vlan id table.
1950 * @vp: Vpath handle.
1951 * @vid: vlan id to be added for this vpath into the list
1953 * Adds the given vlan id into the list for this vpath.
1954 * see also: vxge_hw_vpath_vid_delete
1957 enum vxge_hw_status
1958 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1960 enum vxge_hw_status status = VXGE_HW_OK;
1962 if (vp == NULL) {
1963 status = VXGE_HW_ERR_INVALID_HANDLE;
1964 goto exit;
1967 status = __vxge_hw_vpath_rts_table_set(vp,
1968 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1969 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1970 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1971 exit:
1972 return status;
1976 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1977 * to vlan id table.
1978 * @vp: Vpath handle.
1979 * @vid: vlan id to be added for this vpath into the list
1981 * Adds the given vlan id into the list for this vpath.
1982 * see also: vxge_hw_vpath_vid_add
1985 enum vxge_hw_status
1986 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1988 enum vxge_hw_status status = VXGE_HW_OK;
1990 if (vp == NULL) {
1991 status = VXGE_HW_ERR_INVALID_HANDLE;
1992 goto exit;
1995 status = __vxge_hw_vpath_rts_table_set(vp,
1996 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1997 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1998 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1999 exit:
2000 return status;
2004 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
2005 * @vp: Vpath handle.
2007 * Enable promiscuous mode of Titan-e operation.
2009 * See also: vxge_hw_vpath_promisc_disable().
2011 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2012 struct __vxge_hw_vpath_handle *vp)
2014 u64 val64;
2015 struct __vxge_hw_virtualpath *vpath;
2016 enum vxge_hw_status status = VXGE_HW_OK;
2018 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2019 status = VXGE_HW_ERR_INVALID_HANDLE;
2020 goto exit;
2023 vpath = vp->vpath;
2025 /* Enable promiscuous mode for function 0 only */
2026 if (!(vpath->hldev->access_rights &
2027 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2028 return VXGE_HW_OK;
2030 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2032 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2034 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2035 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2036 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2037 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2039 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2041 exit:
2042 return status;
2046 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2047 * @vp: Vpath handle.
2049 * Disable promiscuous mode of Titan-e operation.
2051 * See also: vxge_hw_vpath_promisc_enable().
2053 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2054 struct __vxge_hw_vpath_handle *vp)
2056 u64 val64;
2057 struct __vxge_hw_virtualpath *vpath;
2058 enum vxge_hw_status status = VXGE_HW_OK;
2060 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2061 status = VXGE_HW_ERR_INVALID_HANDLE;
2062 goto exit;
2065 vpath = vp->vpath;
2067 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2069 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2071 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2072 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2073 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2075 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2077 exit:
2078 return status;
2082 * vxge_hw_vpath_bcast_enable - Enable broadcast
2083 * @vp: Vpath handle.
2085 * Enable receiving broadcasts.
2087 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2088 struct __vxge_hw_vpath_handle *vp)
2090 u64 val64;
2091 struct __vxge_hw_virtualpath *vpath;
2092 enum vxge_hw_status status = VXGE_HW_OK;
2094 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2095 status = VXGE_HW_ERR_INVALID_HANDLE;
2096 goto exit;
2099 vpath = vp->vpath;
2101 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2103 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2104 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2105 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2107 exit:
2108 return status;
2112 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2113 * @vp: Vpath handle.
2115 * Enable Titan-e multicast addresses.
2116 * Returns: VXGE_HW_OK on success.
2119 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2120 struct __vxge_hw_vpath_handle *vp)
2122 u64 val64;
2123 struct __vxge_hw_virtualpath *vpath;
2124 enum vxge_hw_status status = VXGE_HW_OK;
2126 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2127 status = VXGE_HW_ERR_INVALID_HANDLE;
2128 goto exit;
2131 vpath = vp->vpath;
2133 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2135 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2136 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2137 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2139 exit:
2140 return status;
2144 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2145 * @vp: Vpath handle.
2147 * Disable Titan-e multicast addresses.
2148 * Returns: VXGE_HW_OK - success.
2149 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2152 enum vxge_hw_status
2153 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2155 u64 val64;
2156 struct __vxge_hw_virtualpath *vpath;
2157 enum vxge_hw_status status = VXGE_HW_OK;
2159 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2160 status = VXGE_HW_ERR_INVALID_HANDLE;
2161 goto exit;
2164 vpath = vp->vpath;
2166 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2168 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2169 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2170 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2172 exit:
2173 return status;
2177 * vxge_hw_vpath_alarm_process - Process Alarms.
2178 * @vpath: Virtual Path.
2179 * @skip_alarms: Do not clear the alarms
2181 * Process vpath alarms.
2184 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2185 struct __vxge_hw_vpath_handle *vp,
2186 u32 skip_alarms)
2188 enum vxge_hw_status status = VXGE_HW_OK;
2190 if (vp == NULL) {
2191 status = VXGE_HW_ERR_INVALID_HANDLE;
2192 goto exit;
2195 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2196 exit:
2197 return status;
2201 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2202 * alrms
2203 * @vp: Virtual Path handle.
2204 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2205 * interrupts(Can be repeated). If fifo or ring are not enabled
2206 * the MSIX vector for that should be set to 0
2207 * @alarm_msix_id: MSIX vector for alarm.
2209 * This API will associate a given MSIX vector numbers with the four TIM
2210 * interrupts and alarm interrupt.
2212 void
2213 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2214 int alarm_msix_id)
2216 u64 val64;
2217 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2218 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2219 u32 vp_id = vp->vpath->vp_id;
2221 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2222 (vp_id * 4) + tim_msix_id[0]) |
2223 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2224 (vp_id * 4) + tim_msix_id[1]);
2226 writeq(val64, &vp_reg->interrupt_cfg0);
2228 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2229 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2230 &vp_reg->interrupt_cfg2);
2232 if (vpath->hldev->config.intr_mode ==
2233 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2234 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2235 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2236 0, 32), &vp_reg->one_shot_vect0_en);
2237 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2238 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2239 0, 32), &vp_reg->one_shot_vect1_en);
2240 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2241 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2242 0, 32), &vp_reg->one_shot_vect2_en);
2247 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2248 * @vp: Virtual Path handle.
2249 * @msix_id: MSIX ID
2251 * The function masks the msix interrupt for the given msix_id
2253 * Returns: 0,
2254 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2255 * status.
2256 * See also:
2258 void
2259 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2261 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2262 __vxge_hw_pio_mem_write32_upper(
2263 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2264 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2268 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2269 * @vp: Virtual Path handle.
2270 * @msix_id: MSI ID
2272 * The function clears the msix interrupt for the given msix_id
2274 * Returns: 0,
2275 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2276 * status.
2277 * See also:
2279 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2281 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2283 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2284 __vxge_hw_pio_mem_write32_upper(
2285 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2286 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2287 else
2288 __vxge_hw_pio_mem_write32_upper(
2289 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2290 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2294 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2295 * @vp: Virtual Path handle.
2296 * @msix_id: MSI ID
2298 * The function unmasks the msix interrupt for the given msix_id
2300 * Returns: 0,
2301 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2302 * status.
2303 * See also:
2305 void
2306 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2308 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2309 __vxge_hw_pio_mem_write32_upper(
2310 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2311 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2315 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2316 * @vp: Virtual Path handle.
2318 * Mask Tx and Rx vpath interrupts.
2320 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2322 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2324 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2325 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2326 u64 val64;
2327 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2329 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2330 tim_int_mask1, vp->vpath->vp_id);
2332 val64 = readq(&hldev->common_reg->tim_int_mask0);
2334 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2335 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2336 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2337 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2338 &hldev->common_reg->tim_int_mask0);
2341 val64 = readl(&hldev->common_reg->tim_int_mask1);
2343 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2344 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2345 __vxge_hw_pio_mem_write32_upper(
2346 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2347 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2348 &hldev->common_reg->tim_int_mask1);
2353 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2354 * @vp: Virtual Path handle.
2356 * Unmask Tx and Rx vpath interrupts.
2358 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2360 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2362 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2363 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2364 u64 val64;
2365 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2367 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2368 tim_int_mask1, vp->vpath->vp_id);
2370 val64 = readq(&hldev->common_reg->tim_int_mask0);
2372 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2373 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2374 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2375 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2376 &hldev->common_reg->tim_int_mask0);
2379 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2380 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2381 __vxge_hw_pio_mem_write32_upper(
2382 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2383 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2384 &hldev->common_reg->tim_int_mask1);
2389 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2390 * descriptors and process the same.
2391 * @ring: Handle to the ring object used for receive
2393 * The function polls the Rx for the completed descriptors and calls
2394 * the driver via supplied completion callback.
2396 * Returns: VXGE_HW_OK, if the polling is completed successful.
2397 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2398 * descriptors available which are yet to be processed.
2400 * See also: vxge_hw_vpath_poll_rx()
2402 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2404 u8 t_code;
2405 enum vxge_hw_status status = VXGE_HW_OK;
2406 void *first_rxdh;
2407 u64 val64 = 0;
2408 int new_count = 0;
2410 ring->cmpl_cnt = 0;
2412 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2413 if (status == VXGE_HW_OK)
2414 ring->callback(ring, first_rxdh,
2415 t_code, ring->channel.userdata);
2417 if (ring->cmpl_cnt != 0) {
2418 ring->doorbell_cnt += ring->cmpl_cnt;
2419 if (ring->doorbell_cnt >= ring->rxds_limit) {
2421 * Each RxD is of 4 qwords, update the number of
2422 * qwords replenished
2424 new_count = (ring->doorbell_cnt * 4);
2426 /* For each block add 4 more qwords */
2427 ring->total_db_cnt += ring->doorbell_cnt;
2428 if (ring->total_db_cnt >= ring->rxds_per_block) {
2429 new_count += 4;
2430 /* Reset total count */
2431 ring->total_db_cnt %= ring->rxds_per_block;
2433 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2434 &ring->vp_reg->prc_rxd_doorbell);
2435 val64 =
2436 readl(&ring->common_reg->titan_general_int_status);
2437 ring->doorbell_cnt = 0;
2441 return status;
2445 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2446 * the same.
2447 * @fifo: Handle to the fifo object used for non offload send
2449 * The function polls the Tx for the completed descriptors and calls
2450 * the driver via supplied completion callback.
2452 * Returns: VXGE_HW_OK, if the polling is completed successful.
2453 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2454 * descriptors available which are yet to be processed.
2456 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2457 struct sk_buff ***skb_ptr, int nr_skb,
2458 int *more)
2460 enum vxge_hw_fifo_tcode t_code;
2461 void *first_txdlh;
2462 enum vxge_hw_status status = VXGE_HW_OK;
2463 struct __vxge_hw_channel *channel;
2465 channel = &fifo->channel;
2467 status = vxge_hw_fifo_txdl_next_completed(fifo,
2468 &first_txdlh, &t_code);
2469 if (status == VXGE_HW_OK)
2470 if (fifo->callback(fifo, first_txdlh, t_code,
2471 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2472 status = VXGE_HW_COMPLETIONS_REMAIN;
2474 return status;