Make gpxe build.
[gpxe/hramrach.git] / src / drivers / net / vxge / vxge_traffic.c
blob31887693e6f8bd6e855c4daa306bb864a55d47d2
1 /*
2 * vxge-traffic.c: gPXE driver for Neterion Inc's X3100 Series 10GbE
3 * PCIe I/O Virtualized Server Adapter.
5 * Copyright(c) 2002-2010 Neterion Inc.
7 * This software may be used and distributed according to the terms of
8 * the GNU General Public License (GPL), incorporated herein by
9 * reference. Drivers based on or derived from this code fall under
10 * the GPL and must retain the authorship, copyright and license
11 * notice.
15 FILE_LICENCE(GPL2_ONLY);
17 #include <gpxe/netdevice.h>
18 #include <errno.h>
20 #include "vxge_traffic.h"
21 #include "vxge_config.h"
22 #include "vxge_main.h"
25 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
26 * @vpath: Virtual Path handle.
28 * Enable vpath interrupts. The function is to be executed the last in
29 * vpath initialization sequence.
31 * See also: vxge_hw_vpath_intr_disable()
33 enum vxge_hw_status
34 vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
36 struct vxge_hw_vpath_reg *vp_reg;
37 enum vxge_hw_status status = VXGE_HW_OK;
39 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
40 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
41 goto exit;
44 vp_reg = vpath->vp_reg;
46 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
48 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
49 &vp_reg->general_errors_reg);
51 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
52 &vp_reg->pci_config_errors_reg);
54 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
55 &vp_reg->mrpcim_to_vpath_alarm_reg);
57 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
58 &vp_reg->srpcim_to_vpath_alarm_reg);
60 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
61 &vp_reg->vpath_ppif_int_status);
63 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
64 &vp_reg->srpcim_msg_to_vpath_reg);
66 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
67 &vp_reg->vpath_pcipif_int_status);
69 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
70 &vp_reg->prc_alarm_reg);
72 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
73 &vp_reg->wrdma_alarm_status);
75 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
76 &vp_reg->asic_ntwk_vp_err_reg);
78 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
79 &vp_reg->xgmac_vp_int_status);
81 readq(&vp_reg->vpath_general_int_status); /* Is this needed ? */
83 /* Mask unwanted interrupts */
84 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
85 &vp_reg->vpath_pcipif_int_mask);
87 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
88 &vp_reg->srpcim_msg_to_vpath_mask);
90 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91 &vp_reg->srpcim_to_vpath_alarm_mask);
93 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94 &vp_reg->mrpcim_to_vpath_alarm_mask);
96 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 &vp_reg->pci_config_errors_mask);
99 /* Unmask the individual interrupts */
100 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
101 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
102 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
103 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
104 &vp_reg->general_errors_mask);
106 __vxge_hw_pio_mem_write32_upper(
107 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
108 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
109 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
110 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
111 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
112 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
113 &vp_reg->kdfcctl_errors_mask);
115 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
117 __vxge_hw_pio_mem_write32_upper(
118 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
119 &vp_reg->prc_alarm_mask);
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
122 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
124 if (vpath->hldev->first_vp_id != vpath->vp_id)
125 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
126 &vp_reg->asic_ntwk_vp_err_mask);
127 else
128 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
129 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT|
130 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK),
131 0, 32), &vp_reg->asic_ntwk_vp_err_mask);
133 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_general_int_mask);
134 exit:
135 return status;
140 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
141 * @vpath: Virtual Path handle.
143 * Disable vpath interrupts. The function is to be executed the last in
144 * vpath initialization sequence.
146 * See also: vxge_hw_vpath_intr_enable()
148 enum vxge_hw_status
149 vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
151 enum vxge_hw_status status = VXGE_HW_OK;
152 struct vxge_hw_vpath_reg __iomem *vp_reg;
154 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
155 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
156 goto exit;
158 vp_reg = vpath->vp_reg;
160 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
161 &vp_reg->vpath_general_int_mask);
163 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
165 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
166 &vp_reg->general_errors_mask);
168 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
169 &vp_reg->pci_config_errors_mask);
171 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
172 &vp_reg->mrpcim_to_vpath_alarm_mask);
174 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
175 &vp_reg->srpcim_to_vpath_alarm_mask);
177 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_ppif_int_mask);
180 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
181 &vp_reg->srpcim_msg_to_vpath_mask);
183 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
184 &vp_reg->vpath_pcipif_int_mask);
186 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
187 &vp_reg->wrdma_alarm_mask);
189 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
190 &vp_reg->prc_alarm_mask);
192 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
193 &vp_reg->xgmac_vp_int_mask);
195 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
196 &vp_reg->asic_ntwk_vp_err_mask);
198 exit:
199 return status;
203 * vxge_hw_device_mask_all - Mask all device interrupts.
204 * @hldev: HW device handle.
206 * Mask all device interrupts.
208 * See also: vxge_hw_device_unmask_all()
210 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
212 u64 val64;
214 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
215 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
217 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
218 &hldev->common_reg->titan_mask_all_int);
220 return;
224 * vxge_hw_device_unmask_all - Unmask all device interrupts.
225 * @hldev: HW device handle.
227 * Unmask all device interrupts.
229 * See also: vxge_hw_device_mask_all()
231 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
233 u64 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
235 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
236 &hldev->common_reg->titan_mask_all_int);
238 return;
242 * vxge_hw_device_intr_enable - Enable interrupts.
243 * @hldev: HW device handle.
245 * Enable Titan interrupts. The function is to be executed the last in
246 * Titan initialization sequence.
248 * See also: vxge_hw_device_intr_disable()
250 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
252 u64 val64;
253 u32 val32;
255 vxge_hw_device_mask_all(hldev);
257 vxge_hw_vpath_intr_enable(&hldev->virtual_path);
259 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
260 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
262 if (val64 != 0) {
263 writeq(val64, &hldev->common_reg->tim_int_status0);
265 writeq(~val64, &hldev->common_reg->tim_int_mask0);
268 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
269 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
271 if (val32 != 0) {
272 __vxge_hw_pio_mem_write32_upper(val32,
273 &hldev->common_reg->tim_int_status1);
275 __vxge_hw_pio_mem_write32_upper(~val32,
276 &hldev->common_reg->tim_int_mask1);
279 val64 = readq(&hldev->common_reg->titan_general_int_status);
281 /* We have not enabled the top level interrupt yet.
282 * This will be controlled from vxge_irq() entry api.
284 return;
288 * vxge_hw_device_intr_disable - Disable Titan interrupts.
289 * @hldev: HW device handle.
291 * Disable Titan interrupts.
293 * See also: vxge_hw_device_intr_enable()
295 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
297 vxge_hw_device_mask_all(hldev);
299 /* mask all the tim interrupts */
300 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
301 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
302 &hldev->common_reg->tim_int_mask1);
304 vxge_hw_vpath_intr_disable(&hldev->virtual_path);
306 return;
310 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
311 * @ring: Handle to the ring object used for receive
312 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
314 * Post descriptor on the ring.
315 * Prior to posting the descriptor should be filled in accordance with
316 * Host/Titan interface specification for a given service (LL, etc.).
318 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused,
319 struct vxge_hw_ring_rxd_1 *rxdp)
321 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
325 * __vxge_hw_non_offload_db_post - Post non offload doorbell
327 * @fifo: fifohandle
328 * @txdl_ptr: The starting location of the TxDL in host memory
329 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
331 * This function posts a non-offload doorbell to doorbell FIFO
334 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
335 u64 txdl_ptr, u32 num_txds)
337 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
338 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds),
339 &fifo->nofl_db->control_0);
341 wmb();
343 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
345 wmb();
349 * vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
351 * @fifo: tx channel handle
353 struct vxge_hw_fifo_txd *
354 vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo)
356 struct vxge_hw_fifo_txd *txdp;
358 txdp = fifo->txdl + fifo->sw_offset;
359 if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
360 vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
361 __func__, __LINE__, fifo->sw_offset);
362 return NULL;
365 return txdp;
368 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
369 * descriptor.
370 * @fifo: Handle to the fifo object used for non offload send
371 * @txdlh: Descriptor handle.
372 * @iob: data buffer.
374 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
375 struct vxge_hw_fifo_txd *txdp,
376 struct io_buffer *iob)
378 txdp->control_0 = VXGE_HW_FIFO_TXD_GATHER_CODE(
379 VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST);
380 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
382 txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
383 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
385 txdp->host_control = (intptr_t)iob;
386 txdp->buffer_pointer = virt_to_bus(iob->data);
390 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
391 * @fifo: Handle to the fifo object used for non offload send
392 * @txdp: Tx Descriptor
394 * Post descriptor on the 'fifo' type channel for transmission.
395 * Prior to posting the descriptor should be filled in accordance with
396 * Host/Titan interface specification for a given service (LL, etc.).
399 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo,
400 struct vxge_hw_fifo_txd *txdp)
402 txdp->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
404 __vxge_hw_non_offload_db_post(fifo, (u64) virt_to_bus(txdp), 0);
406 vxge_hw_fifo_txd_offset_up(&fifo->sw_offset);
410 * __vxge_hw_vpath_alarm_process - Process Alarms.
411 * @vpath: Virtual Path.
412 * @skip_alarms: Do not clear the alarms
414 * Process vpath alarms.
417 static enum vxge_hw_status __vxge_hw_vpath_alarm_process(
418 struct __vxge_hw_virtualpath *vpath)
420 u64 val64;
421 u64 alarm_status;
422 enum vxge_hw_status status = VXGE_HW_OK;
423 struct __vxge_hw_device *hldev = NULL;
424 struct vxge_hw_vpath_reg *vp_reg;
426 hldev = vpath->hldev;
427 vp_reg = vpath->vp_reg;
428 alarm_status = readq(&vp_reg->vpath_general_int_status);
430 if (alarm_status == VXGE_HW_ALL_FOXES) {
432 vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
433 hldev->ndev->name, __func__, __LINE__);
434 status = VXGE_HW_ERR_SLOT_FREEZE;
435 goto out;
438 if (alarm_status & ~(
439 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
440 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
441 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
442 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
444 vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
445 hldev->ndev->name, __func__, __LINE__);
446 status = VXGE_HW_FAIL;
447 goto out;
450 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
452 val64 = readq(&vp_reg->xgmac_vp_int_status);
454 if (val64 &
455 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
457 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
459 if (((val64 &
460 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
461 (!(val64 &
462 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
463 ((val64 &
464 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
465 && (!(val64 &
466 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
467 ))) {
468 writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
469 &vp_reg->asic_ntwk_vp_err_mask);
471 netdev_link_down(hldev->ndev);
472 vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
473 hldev->ndev->name, __func__, __LINE__);
476 if (((val64 &
477 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
478 (!(val64 &
479 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
480 ((val64 &
481 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
482 && (!(val64 &
483 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
484 ))) {
485 writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
486 &vp_reg->asic_ntwk_vp_err_mask);
488 netdev_link_up(hldev->ndev);
489 vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
490 hldev->ndev->name, __func__, __LINE__);
493 writeq(VXGE_HW_INTR_MASK_ALL,
494 &vp_reg->asic_ntwk_vp_err_reg);
496 } else {
497 vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
498 hldev->ndev->name, __func__, __LINE__,
499 alarm_status);
501 out:
502 return status;
506 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
507 * condition that has caused the Tx and RX interrupt.
508 * @hldev: HW device.
510 * Acknowledge (that is, clear) the condition that has caused
511 * the Tx and Rx interrupt.
512 * See also: vxge_hw_device_begin_irq(),
513 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
515 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
518 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
519 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
520 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
521 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
522 &hldev->common_reg->tim_int_status0);
525 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
526 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
527 __vxge_hw_pio_mem_write32_upper(
528 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
529 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
530 &hldev->common_reg->tim_int_status1);
533 return;
538 * vxge_hw_device_begin_irq - Begin IRQ processing.
539 * @hldev: HW device handle.
541 * The function performs two actions, It first checks whether (shared IRQ) the
542 * interrupt was raised by the device. Next, it masks the device interrupts.
544 * Note:
545 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
546 * bridge. Therefore, two back-to-back interrupts are potentially possible.
548 * Returns: 0, if the interrupt is not "ours" (note that in this case the
549 * device remain enabled).
550 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
551 * status.
553 enum vxge_hw_status
554 vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev)
556 u64 val64;
557 u64 adapter_status;
558 u64 vpath_mask;
559 enum vxge_hw_status ret = VXGE_HW_OK;
561 val64 = readq(&hldev->common_reg->titan_general_int_status);
563 if (!val64) {
564 ret = VXGE_HW_ERR_WRONG_IRQ;
565 goto exit;
568 if (val64 == VXGE_HW_ALL_FOXES) {
570 adapter_status = readq(&hldev->common_reg->adapter_status);
572 if (adapter_status == VXGE_HW_ALL_FOXES) {
574 vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
575 "occurred\n", hldev->ndev->name,
576 __func__, __LINE__);
577 ret = VXGE_HW_ERR_SLOT_FREEZE;
578 goto exit;
582 vpath_mask = hldev->vpaths_deployed >>
583 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
584 if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
585 vpath_mask))
586 vxge_hw_device_clear_tx_rx(hldev);
588 if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)
589 ret = __vxge_hw_vpath_alarm_process(&hldev->virtual_path);
591 exit:
592 return ret;
596 * vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive
597 * descriptors posted.
598 * @ring: Handle to the ring object used for receive
600 * The function writes the number of qwords of rxds posted during replishment.
601 * Since the function is called frequently, a flush is not required to post the
602 * write transaction. At the very least, the previous write will be flushed
603 * once the subsequent write is made.
605 * Returns: None.
607 void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
609 u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
610 VXGE_HW_RING_RXD_QWORDS_MODE_1;
612 ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
614 ring->total_db_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
616 if (ring->total_db_cnt >= rxds_qw_per_block) {
617 /* For each block add 4 more qwords */
618 ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
620 /* Reset total count */
621 ring->total_db_cnt -= rxds_qw_per_block;
624 if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
625 wmb();
626 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(
627 ring->doorbell_cnt),
628 &ring->vp_reg->prc_rxd_doorbell);
629 ring->doorbell_cnt = 0;
634 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
635 * descriptors and process the same.
636 * @ring: Handle to the ring object used for receive
638 * The function polls the Rx for the completed descriptors.
640 #define ETH_FCS_LEN 4
641 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
643 struct __vxge_hw_device *hldev;
644 enum vxge_hw_status status = VXGE_HW_OK;
645 struct vxge_hw_ring_rxd_1 *rxd;
646 unsigned int len;
647 enum vxge_hw_ring_tcode tcode;
648 struct io_buffer *rx_iob, *iobuf = NULL;
649 u16 poll_count = 0;
651 hldev = ring->vpathh->hldev;
653 do {
654 rxd = &ring->rxdl->rxd[ring->rxd_offset];
655 tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
657 /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
658 * possible the ownership bit still set to adapter
660 if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
661 && (tcode == VXGE_HW_RING_T_CODE_OK)) {
663 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
664 goto err0;
667 vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
668 hldev->ndev->name, ring->rxd_offset);
670 if (tcode != VXGE_HW_RING_T_CODE_OK) {
671 netdev_rx_err(hldev->ndev, NULL, -EINVAL);
672 vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
673 __func__, __LINE__, tcode);
674 status = VXGE_HW_FAIL;
675 goto err1;
678 iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
680 len = VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxd->control_1);
681 len -= ETH_FCS_LEN;
683 rx_iob = alloc_iob(len);
684 if (!rx_iob) {
685 netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
686 vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
687 __func__, __LINE__);
688 status = VXGE_HW_ERR_OUT_OF_MEMORY;
689 goto err1;
692 memcpy(iob_put(rx_iob, len), iobuf->data, len);
693 /* Add this packet to the receive queue. */
694 netdev_rx(hldev->ndev, rx_iob);
696 err1:
697 /* repost the rxd */
698 rxd->control_0 = rxd->control_1 = 0;
699 vxge_hw_ring_rxd_1b_set(rxd, iobuf,
700 VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
701 vxge_hw_ring_rxd_post(ring, rxd);
703 /* repost the qword count for doorbell */
704 vxge_hw_vpath_doorbell_rx(ring);
706 /* increment the descriptor offset */
707 vxge_hw_ring_rxd_offset_up(&ring->rxd_offset);
709 } while (++poll_count < ring->rx_poll_weight);
710 err0:
711 return status;
715 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
716 * the same.
717 * @fifo: Handle to the fifo object used for non offload send
719 * The function polls the Tx for the completed descriptors and calls
720 * the driver via supplied completion callback.
722 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo)
724 enum vxge_hw_status status = VXGE_HW_OK;
725 struct vxge_hw_fifo_txd *txdp;
727 txdp = fifo->txdl + fifo->hw_offset;
728 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
729 && (txdp->host_control)) {
731 vxge_xmit_compl(fifo, txdp,
732 VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0));
734 vxge_hw_fifo_txd_offset_up(&fifo->hw_offset);
737 return status;