Adding debian version 3.70~pre8+dfsg-1.
[syslinux-debian/hramrach.git] / gpxe / src / drivers / net / mtnic.c
blob536fcb8d7318a12241f3b89577b6d7a6f8d421af
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <stdio.h>
34 #include <strings.h>
35 #include <errno.h>
36 #include <gpxe/malloc.h>
37 #include <gpxe/umalloc.h>
38 #include <bits/byteswap.h>
39 #include <little_bswap.h>
40 #include <unistd.h>
41 #include <gpxe/pci.h>
42 #include <gpxe/ethernet.h>
43 #include <gpxe/netdevice.h>
44 #include <gpxe/iobuf.h>
45 #include "mtnic.h"
51 mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN
59 /** Set port number to use
61 * 0 - port 1
62 * 1 - port 2
64 #define MTNIC_PORT_NUM 0
65 /* Note: for verbose printing do Make ... DEBUG=mtnic */
70 /********************************************************************
72 * MTNIC allocation functions
74 *********************************************************************/
75 /**
76 * mtnic_alloc_aligned
78 * @v unsigned int size size
79 * @v void **va virtual address
80 * @v u32 *pa physical address
81 * @v u32 aligment aligment
83 * Function allocate aligned buffer and put it's virtual address in 'va'
84 * and it's physical aligned address in 'pa'
86 static int
87 mtnic_alloc_aligned(unsigned int size, void **va, u32 *pa, unsigned int alignment)
89 *va = alloc_memblock(size, alignment);
90 if (!*va) {
91 return MTNIC_ERROR;
93 *pa = (u32)virt_to_bus(*va);
94 return 0;
99 /**
101 * mtnic alloc command interface
104 static int
105 mtnic_alloc_cmdif(struct mtnic_priv *priv)
107 u32 bar = mtnic_pci_dev.dev.bar[0];
109 priv->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE);
110 if (!priv->hcr) {
111 eprintf("Couldn't map command register.");
112 return MTNIC_ERROR;
114 mtnic_alloc_aligned(PAGE_SIZE, (void *)&priv->cmd.buf, &priv->cmd.mapping, PAGE_SIZE);
115 if (!priv->cmd.buf) {
116 eprintf("Error in allocating buffer for command interface\n");
117 return MTNIC_ERROR;
119 return 0;
123 * Free RX io buffers
125 static void
126 mtnic_free_io_buffers(struct mtnic_ring *ring)
128 int index;
130 for (; ring->cons <= ring->prod; ++ring->cons) {
131 index = ring->cons & ring->size_mask;
132 if (ring->iobuf[index])
133 free_iob(ring->iobuf[index]);
141 * mtnic alloc and attach io buffers
144 static int
145 mtnic_alloc_iobuf(struct mtnic_priv *priv, struct mtnic_ring *ring,
146 unsigned int size)
148 struct mtnic_rx_desc *rx_desc_ptr = ring->buf;
149 u32 index;
151 while ((u32)(ring->prod - ring->cons) < UNITS_BUFFER_SIZE) {
152 index = ring->prod & ring->size_mask;
153 ring->iobuf[index] = alloc_iob(size);
154 if (!&ring->iobuf[index]) {
155 if (ring->prod <= (ring->cons + 1)) {
156 eprintf("Error allocating Rx io "
157 "buffer number %lx", index);
158 /* In case of error freeing io buffer */
159 mtnic_free_io_buffers(ring);
160 return MTNIC_ERROR;
163 break;
166 /* Attach io_buffer to descriptor */
167 rx_desc_ptr = ring->buf +
168 (sizeof(struct mtnic_rx_desc) * index);
169 rx_desc_ptr->data.count = cpu_to_be32(size);
170 rx_desc_ptr->data.mem_type = priv->fw.mem_type_snoop_be;
171 rx_desc_ptr->data.addr_l = cpu_to_be32(
172 virt_to_bus(ring->iobuf[index]->data));
174 ++ ring->prod;
177 /* Update RX producer index (PI) */
178 ring->db->count = cpu_to_be32(ring->prod & 0xffff);
179 return 0;
184 * mtnic alloc ring
186 * Alloc and configure TX or RX ring
189 static int
190 mtnic_alloc_ring(struct mtnic_priv *priv, struct mtnic_ring *ring,
191 u32 size, u16 stride, u16 cq, u8 is_rx)
193 unsigned int i;
194 int err;
195 struct mtnic_rx_desc *rx_desc;
196 struct mtnic_tx_desc *tx_desc;
198 ring->size = size; /* Number of descriptors */
199 ring->size_mask = size - 1;
200 ring->stride = stride; /* Size of each entry */
201 ring->cq = cq; /* CQ number associated with this ring */
202 ring->cons = 0;
203 ring->prod = 0;
205 /* Alloc descriptors buffer */
206 ring->buf_size = ring->size * ((is_rx) ? sizeof(struct mtnic_rx_desc) :
207 sizeof(struct mtnic_tx_desc));
208 err = mtnic_alloc_aligned(ring->buf_size, (void *)&ring->buf,
209 &ring->dma, PAGE_SIZE);
210 if (err) {
211 eprintf("Failed allocating descriptor ring sizeof %lx\n",
212 ring->buf_size);
213 return MTNIC_ERROR;
215 memset(ring->buf, 0, ring->buf_size);
217 DBG("Allocated %s ring (addr:%p) - buf:%p size:%lx"
218 "buf_size:%lx dma:%lx\n",
219 is_rx ? "Rx" : "Tx", ring, ring->buf, ring->size,
220 ring->buf_size, ring->dma);
223 if (is_rx) { /* RX ring */
224 /* Alloc doorbell */
225 err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
226 (void *)&ring->db, &ring->db_dma, 32);
227 if (err) {
228 eprintf("Failed allocating Rx ring doorbell record\n");
229 free(ring->buf);
230 return MTNIC_ERROR;
233 /* ==- Configure Descriptor -== */
234 /* Init ctrl seg of rx desc */
235 for (i = 0; i < UNITS_BUFFER_SIZE; ++i) {
236 rx_desc = ring->buf +
237 (sizeof(struct mtnic_rx_desc) * i);
238 /* Pre-link descriptor */
239 rx_desc->next = cpu_to_be16(i + 1);
241 /*The last ctrl descriptor is '0' and points to the first one*/
243 /* Alloc IO_BUFFERS */
244 err = mtnic_alloc_iobuf(priv, ring, DEF_IOBUF_SIZE);
245 if (err) {
246 eprintf("ERROR Allocating io buffer");
247 free(ring->buf);
248 return MTNIC_ERROR;
251 } else { /* TX ring */
252 /* Set initial ownership of all Tx Desc' to SW (1) */
253 for (i = 0; i < ring->size; i++) {
254 tx_desc = ring->buf + ring->stride * i;
255 tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_BIT_DESC_OWN);
257 /* DB */
258 ring->db_offset = cpu_to_be32(
259 ((u32) priv->fw.tx_offset[priv->port]) << 8);
261 /* Map Tx+CQ doorbells */
262 DBG("Mapping TxCQ doorbell at offset:0x%lx\n",
263 priv->fw.txcq_db_offset);
264 ring->txcq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
265 priv->fw.txcq_db_offset, PAGE_SIZE);
266 if (!ring->txcq_db) {
267 eprintf("Couldn't map txcq doorbell, aborting...\n");
268 free(ring->buf);
269 return MTNIC_ERROR;
273 return 0;
279 * mtnic alloc CQ
281 * Alloc and configure CQ.
284 static int
285 mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq,
286 u8 is_rx, u32 size, u32 offset_ind)
288 int err ;
289 unsigned int i;
291 cq->num = num;
292 cq->dev = dev;
293 cq->size = size;
294 cq->last = 0;
295 cq->is_rx = is_rx;
296 cq->offset_ind = offset_ind;
298 /* Alloc doorbell */
299 err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
300 (void *)&cq->db, &cq->db_dma, 32);
301 if (err) {
302 eprintf("Failed allocating CQ doorbell record\n");
303 return MTNIC_ERROR;
305 memset(cq->db, 0, sizeof(struct mtnic_cq_db_record));
307 /* Alloc CQEs buffer */
308 cq->buf_size = size * sizeof(struct mtnic_cqe);
309 err = mtnic_alloc_aligned(cq->buf_size,
310 (void *)&cq->buf, &cq->dma, PAGE_SIZE);
311 if (err) {
312 eprintf("Failed allocating CQ buffer\n");
313 free(cq->db);
314 return MTNIC_ERROR;
316 memset(cq->buf, 0, cq->buf_size);
317 DBG("Allocated CQ (addr:%p) - size:%lx buf:%p buf_size:%lx "
318 "dma:%lx db:%p db_dma:%lx\n"
319 "cqn offset:%lx \n", cq, cq->size, cq->buf,
320 cq->buf_size, cq->dma, cq->db,
321 cq->db_dma, offset_ind);
324 /* Set ownership of all CQEs to HW */
325 DBG("Setting HW ownership for CQ:%d\n", num);
326 for (i = 0; i < cq->size; i++) {
327 /* Initial HW ownership is 1 */
328 cq->buf[i].op_tr_own = MTNIC_BIT_CQ_OWN;
330 return 0;
336 * mtnic_alloc_resources
338 * Alloc and configure CQs, Tx, Rx
340 unsigned int
341 mtnic_alloc_resources(struct net_device *dev)
343 struct mtnic_priv *priv = netdev_priv(dev);
344 int err;
345 int cq_ind = 0;
346 int cq_offset = priv->fw.cq_offset;
348 /* Alloc 1st CQ */
349 err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */,
350 UNITS_BUFFER_SIZE, cq_offset + cq_ind);
351 if (err) {
352 eprintf("Failed allocating Rx CQ\n");
353 return MTNIC_ERROR;
356 /* Alloc RX */
357 err = mtnic_alloc_ring(priv, &priv->rx_ring, UNITS_BUFFER_SIZE,
358 sizeof(struct mtnic_rx_desc), cq_ind, /* RX */1);
359 if (err) {
360 eprintf("Failed allocating Rx Ring\n");
361 goto cq0_error;
364 ++cq_ind;
366 /* alloc 2nd CQ */
367 err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 0 /* TX */,
368 UNITS_BUFFER_SIZE, cq_offset + cq_ind);
369 if (err) {
370 eprintf("Failed allocating Tx CQ\n");
371 goto rx_error;
374 /* Alloc TX */
375 err = mtnic_alloc_ring(priv, &priv->tx_ring, UNITS_BUFFER_SIZE,
376 sizeof(struct mtnic_tx_desc), cq_ind, /* TX */ 0);
377 if (err) {
378 eprintf("Failed allocating Tx ring\n");
379 goto cq1_error;
382 return 0;
384 cq1_error:
385 free(priv->cq[1].buf);
386 free(priv->cq[1].db);
387 rx_error:
388 free(priv->rx_ring.buf);
389 free(priv->rx_ring.db);
390 mtnic_free_io_buffers(&priv->rx_ring);
391 cq0_error:
392 free(priv->cq[0].buf);
393 free(priv->cq[0].db);
395 return MTNIC_ERROR;
400 * mtnic alloc_eq
402 * Note: EQ is not used by the driver but must be allocated
404 static int
405 mtnic_alloc_eq(struct mtnic_priv *priv)
407 int err;
408 unsigned int i;
409 struct mtnic_eqe *eqe_desc = NULL;
411 /* Allocating doorbell */
412 priv->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
413 priv->fw.eq_db_offset, sizeof(u32));
414 if (!priv->eq_db) {
415 eprintf("Couldn't map EQ doorbell, aborting...\n");
416 return MTNIC_ERROR;
419 /* Allocating buffer */
420 priv->eq.size = NUM_EQES;
421 priv->eq.buf_size = priv->eq.size * sizeof(struct mtnic_eqe);
422 err = mtnic_alloc_aligned(priv->eq.buf_size, (void *)&priv->eq.buf,
423 &priv->eq.dma, PAGE_SIZE);
424 if (err) {
425 eprintf("Failed allocating EQ buffer\n");
426 iounmap(priv->eq_db);
427 return MTNIC_ERROR;
429 memset(priv->eq.buf, 0, priv->eq.buf_size);
431 for (i = 0; i < priv->eq.size; i++)
432 eqe_desc = priv->eq.buf + (sizeof(struct mtnic_eqe) * i);
433 eqe_desc->own |= MTNIC_BIT_EQE_OWN;
435 mdelay(20);
436 return 0;
449 /********************************************************************
451 * Mtnic commands functions
452 * -=-=-=-=-=-=-=-=-=-=-=-=
456 *********************************************************************/
457 static inline int
458 cmdif_go_bit(struct mtnic_priv *priv)
460 struct mtnic_if_cmd_reg *hcr = priv->hcr;
461 u32 status;
462 int i;
464 for (i = 0; i < TBIT_RETRIES; i++) {
465 status = be32_to_cpu(readl(&hcr->status_go_opcode));
466 if ((status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT)) ==
467 (priv->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) {
468 /* Read expected t-bit - now return go-bit value */
469 return status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT);
473 eprintf("Invalid tbit after %d retries!\n", TBIT_RETRIES);
474 return 1; /* Return busy... */
477 /* Base Command interface */
478 static int
479 mtnic_cmd(struct mtnic_priv *priv, void *in_imm,
480 void *out_imm, u32 in_modifier, u16 op)
483 struct mtnic_if_cmd_reg *hcr = priv->hcr;
484 int err = 0;
485 u32 out_param_h = 0;
486 u32 out_param_l = 0;
487 u32 in_param_h = 0;
488 u32 in_param_l = 0;
491 static u16 token = 0x8000;
492 u32 status;
493 unsigned int timeout = 0;
495 token++;
497 if (cmdif_go_bit(priv)) {
498 eprintf("GO BIT BUSY:%p.\n", hcr + 6);
499 err = MTNIC_ERROR;
500 goto out;
502 if (in_imm) {
503 in_param_h = *((u32*)in_imm);
504 in_param_l = *((u32*)in_imm + 1);
505 } else {
506 in_param_l = cpu_to_be32(priv->cmd.mapping);
508 out_param_l = cpu_to_be32(priv->cmd.mapping);
510 /* writing to MCR */
511 writel(in_param_h, &hcr->in_param_h);
512 writel(in_param_l, &hcr->in_param_l);
513 writel((u32) cpu_to_be32(in_modifier), &hcr->input_modifier);
514 writel(out_param_h, &hcr->out_param_h);
515 writel(out_param_l, &hcr->out_param_l);
516 writel((u32)cpu_to_be32(token << 16), &hcr->token);
517 wmb();
519 /* flip toggle bit before each write to the HCR */
520 priv->cmd.tbit = !priv->cmd.tbit;
521 writel((u32)
522 cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT) |
523 (priv->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT)) | op),
524 &hcr->status_go_opcode);
526 while (cmdif_go_bit(priv) && (timeout <= GO_BIT_TIMEOUT)) {
527 mdelay(1);
528 ++timeout;
531 if (cmdif_go_bit(priv)) {
532 eprintf("Command opcode:0x%x token:0x%x TIMEOUT.\n", op, token);
533 err = MTNIC_ERROR;
534 goto out;
537 if (out_imm) {
538 *((u32 *)out_imm) = readl(&hcr->out_param_h);
539 *((u32 *)out_imm + 1) = readl(&hcr->out_param_l);
542 status = be32_to_cpu((u32)readl(&hcr->status_go_opcode)) >> 24;
543 /*DBG("Command opcode:0x%x token:0x%x returned:0x%lx\n",
544 op, token, status);*/
546 if (status) {
547 return status;
550 out:
551 return err;
554 /* MAP PAGES wrapper */
555 static int
556 mtnic_map_cmd(struct mtnic_priv *priv, u16 op, struct mtnic_pages pages)
558 unsigned int j;
559 u32 addr;
560 unsigned int len;
561 u32 *page_arr = priv->cmd.buf;
562 int nent = 0;
563 int err = 0;
565 memset(page_arr, 0, PAGE_SIZE);
567 len = PAGE_SIZE * pages.num;
568 pages.buf = (u32 *)umalloc(PAGE_SIZE * (pages.num + 1));
569 addr = PAGE_SIZE + ((virt_to_bus(pages.buf) & 0xfffff000) + PAGE_SIZE);
570 DBG("Mapping pages: size: %lx address: %p\n", pages.num, pages.buf);
572 if (addr & (PAGE_MASK)) {
573 eprintf("Got FW area not aligned to %d (%llx/%x)\n",
574 PAGE_SIZE, (u64) addr, len);
575 return MTNIC_ERROR;
578 /* Function maps each PAGE seperately */
579 for (j = 0; j < len; j+= PAGE_SIZE) {
580 page_arr[nent * 4 + 3] = cpu_to_be32(addr + j);
581 if (++nent == MTNIC_MAILBOX_SIZE / 16) {
582 err = mtnic_cmd(priv, NULL, NULL, nent, op);
583 if (err)
584 return MTNIC_ERROR;
585 nent = 0;
589 if (nent)
590 err = mtnic_cmd(priv, NULL, NULL, nent, op);
592 return err;
598 * Query FW
600 static int
601 mtnic_QUERY_FW(struct mtnic_priv *priv)
603 int err;
604 struct mtnic_if_query_fw_out_mbox *cmd = priv->cmd.buf;
606 err = mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW);
607 if (err)
608 return MTNIC_ERROR;
610 /* Get FW and interface versions */
611 priv->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) |
612 ((u64) be16_to_cpu(cmd->rev_min) << 16) |
613 (u64) be16_to_cpu(cmd->rev_smin);
614 priv->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev);
616 /* Get offset for internal error reports (debug) */
617 priv->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start);
618 priv->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size);
620 DBG("Error buf offset is %llx\n", priv->fw.err_buf.offset);
622 /* Get number of required FW (4k) pages */
623 priv->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages);
625 return 0;
629 static int
630 mtnic_OPEN_NIC(struct mtnic_priv *priv)
633 struct mtnic_if_open_nic_in_mbox *open_nic = priv->cmd.buf;
634 u32 extra_pages[2] = {0};
635 int err;
637 memset(open_nic, 0, sizeof *open_nic);
639 /* port 1 */
640 open_nic->log_rx_p1 = 0;
641 open_nic->log_cq_p1 = 1;
643 open_nic->log_tx_p1 = 0;
644 open_nic->steer_p1 = MTNIC_IF_STEER_RSS;
645 /* MAC + VLAN - leave reserved */
647 /* port 2 */
648 open_nic->log_rx_p2 = 0;
649 open_nic->log_cq_p2 = 1;
651 open_nic->log_tx_p2 = 0;
652 open_nic->steer_p2 = MTNIC_IF_STEER_RSS;
653 /* MAC + VLAN - leave reserved */
655 err = mtnic_cmd(priv, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC);
656 priv->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1));
657 DBG("Extra pages num is %lx\n", priv->fw.extra_pages.num);
658 return err;
661 static int
662 mtnic_CONFIG_RX(struct mtnic_priv *priv)
664 struct mtnic_if_config_rx_in_imm config_rx;
666 memset(&config_rx, 0, sizeof config_rx);
667 return mtnic_cmd(priv, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX);
670 static int
671 mtnic_CONFIG_TX(struct mtnic_priv *priv)
673 struct mtnic_if_config_send_in_imm config_tx;
675 config_tx.enph_gpf = 0;
676 return mtnic_cmd(priv, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX);
679 static int
680 mtnic_HEART_BEAT(struct mtnic_priv *priv, u32 *link_state)
682 struct mtnic_if_heart_beat_out_imm heart_beat;
684 int err;
685 u32 flags;
686 err = mtnic_cmd(priv, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT);
687 if (!err) {
688 flags = be32_to_cpu(heart_beat.flags);
689 if (flags & MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)) {
690 eprintf("Internal error detected\n");
691 return MTNIC_ERROR;
693 *link_state = flags &
694 ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR));
696 return err;
701 * Port commands
704 static int
705 mtnic_SET_PORT_DEFAULT_RING(struct mtnic_priv *priv, u8 port, u16 ring)
707 struct mtnic_if_set_port_default_ring_in_imm def_ring;
709 memset(&def_ring, 0, sizeof(def_ring));
710 def_ring.ring = ring;
711 return mtnic_cmd(priv, &def_ring, NULL, port + 1,
712 MTNIC_IF_CMD_SET_PORT_DEFAULT_RING);
715 static int
716 mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_priv *priv, int port)
718 memset(priv->cmd.buf, 0, PAGE_SIZE);
719 return mtnic_cmd(priv, NULL, NULL, port + 1,
720 MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER);
723 static int
724 mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_priv *priv, int port)
727 memset(priv->cmd.buf, 0, PAGE_SIZE);
728 return mtnic_cmd(priv, NULL, NULL, port + 1,
729 MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION);
734 * Config commands
736 static int
737 mtnic_CONFIG_CQ(struct mtnic_priv *priv, int port,
738 u16 cq_ind, struct mtnic_cq *cq)
740 struct mtnic_if_config_cq_in_mbox *config_cq = priv->cmd.buf;
742 memset(config_cq, 0, sizeof *config_cq);
743 config_cq->cq = cq_ind;
744 config_cq->size = fls(UNITS_BUFFER_SIZE - 1);
745 config_cq->offset = ((cq->dma) & (PAGE_MASK)) >> 6;
746 config_cq->db_record_addr_l = cpu_to_be32(cq->db_dma);
747 config_cq->page_address[1] = cpu_to_be32(cq->dma);
748 DBG("config cq address: %lx dma_address: %lx"
749 "offset: %d size %d index: %d "
750 , config_cq->page_address[1],cq->dma,
751 config_cq->offset, config_cq->size, config_cq->cq );
753 return mtnic_cmd(priv, NULL, NULL, port + 1,
754 MTNIC_IF_CMD_CONFIG_CQ);
758 static int
759 mtnic_CONFIG_TX_RING(struct mtnic_priv *priv, u8 port,
760 u16 ring_ind, struct mtnic_ring *ring)
762 struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->cmd.buf;
763 memset(config_tx_ring, 0, sizeof *config_tx_ring);
764 config_tx_ring->ring = cpu_to_be16(ring_ind);
765 config_tx_ring->size = fls(UNITS_BUFFER_SIZE - 1);
766 config_tx_ring->cq = cpu_to_be16(ring->cq);
767 config_tx_ring->page_address[1] = cpu_to_be32(ring->dma);
769 return mtnic_cmd(priv, NULL, NULL, port + 1,
770 MTNIC_IF_CMD_CONFIG_TX_RING);
773 static int
774 mtnic_CONFIG_RX_RING(struct mtnic_priv *priv, u8 port,
775 u16 ring_ind, struct mtnic_ring *ring)
777 struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->cmd.buf;
778 memset(config_rx_ring, 0, sizeof *config_rx_ring);
779 config_rx_ring->ring = ring_ind;
780 MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1),
781 MTNIC_MASK_CONFIG_RX_RING_SIZE);
782 MTNIC_BC_PUT(config_rx_ring->stride_size, 1,
783 MTNIC_MASK_CONFIG_RX_RING_STRIDE);
784 config_rx_ring->cq = cpu_to_be16(ring->cq);
785 config_rx_ring->db_record_addr_l = cpu_to_be32(ring->db_dma);
787 DBG("Config RX ring starting at address:%lx\n", ring->dma);
789 config_rx_ring->page_address[1] = cpu_to_be32(ring->dma);
791 return mtnic_cmd(priv, NULL, NULL, port + 1,
792 MTNIC_IF_CMD_CONFIG_RX_RING);
795 static int
796 mtnic_CONFIG_EQ(struct mtnic_priv *priv)
798 struct mtnic_if_config_eq_in_mbox *eq = priv->cmd.buf;
800 if (priv->eq.dma & (PAGE_MASK)) {
801 eprintf("misalligned eq buffer:%lx\n",
802 priv->eq.dma);
803 return MTNIC_ERROR;
806 memset(eq, 0, sizeof *eq);
807 MTNIC_BC_PUT(eq->offset, priv->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET);
808 MTNIC_BC_PUT(eq->size, fls(priv->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE);
809 MTNIC_BC_PUT(eq->int_vector, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC);
810 eq->page_address[1] = cpu_to_be32(priv->eq.dma);
812 return mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ);
818 static int
819 mtnic_SET_RX_RING_ADDR(struct mtnic_priv *priv, u8 port, u64* mac)
821 struct mtnic_if_set_rx_ring_addr_in_imm ring_addr;
822 u32 modifier = ((u32) port + 1) << 16;
824 memset(&ring_addr, 0, sizeof(ring_addr));
826 ring_addr.mac_31_0 = cpu_to_be32(*mac & 0xffffffff);
827 ring_addr.mac_47_32 = cpu_to_be16((*mac >> 32) & 0xffff);
828 ring_addr.flags_vlan_id |= cpu_to_be16(
829 MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC));
831 return mtnic_cmd(priv, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR);
834 static int
835 mtnic_SET_PORT_STATE(struct mtnic_priv *priv, u8 port, u8 state)
837 struct mtnic_if_set_port_state_in_imm port_state;
839 port_state.state = state ? cpu_to_be32(
840 MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0;
841 port_state.reserved = 0;
842 return mtnic_cmd(priv, &port_state, NULL, port + 1,
843 MTNIC_IF_CMD_SET_PORT_STATE);
846 static int
847 mtnic_SET_PORT_MTU(struct mtnic_priv *priv, u8 port, u16 mtu)
849 struct mtnic_if_set_port_mtu_in_imm set_mtu;
851 memset(&set_mtu, 0, sizeof(set_mtu));
852 set_mtu.mtu = cpu_to_be16(mtu);
853 return mtnic_cmd(priv, &set_mtu, NULL, port + 1,
854 MTNIC_IF_CMD_SET_PORT_MTU);
858 static int
859 mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_priv *priv, int port)
861 struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->cmd.buf;
863 /* When no vlans are configured we disable the filter
864 * (i.e., pass all vlans) because we ignore them anyhow */
865 memset(vlan_filter, 0xff, sizeof(*vlan_filter));
866 return mtnic_cmd(priv, NULL, NULL, port + 1,
867 MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER);
871 static int
872 mtnic_RELEASE_RESOURCE(struct mtnic_priv *priv, u8 port, u8 type, u8 index)
874 struct mtnic_if_release_resource_in_imm rel;
875 memset(&rel, 0, sizeof rel);
876 rel.index = index;
877 rel.type = type;
878 return mtnic_cmd(priv,
879 &rel, NULL, (type == MTNIC_IF_RESOURCE_TYPE_EQ) ?
880 0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE);
884 static int
885 mtnic_QUERY_CAP(struct mtnic_priv *priv, u8 index, u8 mod, u64 *result)
887 struct mtnic_if_query_cap_in_imm cap;
888 u32 out_imm[2];
889 int err;
891 memset(&cap, 0, sizeof cap);
892 cap.cap_index = index;
893 cap.cap_modifier = mod;
894 err = mtnic_cmd(priv, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP);
896 *((u32*)result) = be32_to_cpu(*(out_imm+1));
897 *((u32*)result + 1) = be32_to_cpu(*out_imm);
899 DBG("Called Query cap with index:0x%x mod:%d result:0x%llx"
900 " error:%d\n", index, mod, *result, err);
901 return err;
905 #define DO_QUERY_CAP(cap, mod, var) \
906 err = mtnic_QUERY_CAP(priv, cap, mod, &result); \
907 if (err) \
908 return err; \
909 (var) = result
911 static int
912 mtnic_query_cap(struct mtnic_priv *priv)
914 int err = 0;
915 int i;
916 u64 result;
918 DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, priv->fw.num_ports);
919 for (i = 0; i < priv->fw.num_ports; i++) {
920 DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, priv->fw.mac[i]);
923 return 0;
926 static int
927 mtnic_query_offsets(struct mtnic_priv *priv)
929 int err;
930 int i;
931 u64 result;
933 DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY,
934 MTNIC_IF_MEM_TYPE_SNOOP,
935 priv->fw.mem_type_snoop_be);
936 priv->fw.mem_type_snoop_be = cpu_to_be32(priv->fw.mem_type_snoop_be);
937 DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, priv->fw.txcq_db_offset);
938 DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, priv->fw.eq_db_offset);
940 for (i = 0; i < priv->fw.num_ports; i++) {
941 DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, priv->fw.cq_offset);
942 DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, priv->fw.tx_offset[i]);
943 DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, priv->fw.rx_offset[i]);
944 DBG("--> Port %d CQ offset:0x%x\n", i, priv->fw.cq_offset);
945 DBG("--> Port %d Tx offset:0x%x\n", i, priv->fw.tx_offset[i]);
946 DBG("--> Port %d Rx offset:0x%x\n", i, priv->fw.rx_offset[i]);
949 mdelay(20);
950 return 0;
963 /********************************************************************
965 * MTNIC initalization functions
970 *********************************************************************/
973 * Reset device
975 void
976 mtnic_reset(void)
978 void *reset = ioremap(mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET, 4);
979 writel(cpu_to_be32(1), reset);
980 iounmap(reset);
985 * Restore PCI config
987 static int
988 restore_config(void)
990 int i;
991 int rc;
993 for (i = 0; i < 64; ++i) {
994 if (i != 22 && i != 23) {
995 rc = pci_write_config_dword(mtnic_pci_dev.dev.dev,
996 i << 2,
997 mtnic_pci_dev.dev.
998 dev_config_space[i]);
999 if (rc)
1000 return rc;
1003 return 0;
1009 * Init PCI configuration
1011 static int
1012 mtnic_init_pci(struct pci_device *dev)
1014 int i;
1015 int err;
1017 /* save bars */
1018 DBG("bus=%d devfn=0x%x", dev->bus, dev->devfn);
1019 for (i = 0; i < 6; ++i) {
1020 mtnic_pci_dev.dev.bar[i] =
1021 pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
1022 DBG("bar[%d]= 0x%08lx \n", i, mtnic_pci_dev.dev.bar[i]);
1025 /* save config space */
1026 for (i = 0; i < 64; ++i) {
1027 err = pci_read_config_dword(dev, i << 2,
1028 &mtnic_pci_dev.dev.
1029 dev_config_space[i]);
1030 if (err) {
1031 eprintf("Can not save configuration space");
1032 return err;
1036 mtnic_pci_dev.dev.dev = dev;
1038 return 0;
1042 * Initial hardware
1044 static inline
1045 int mtnic_init_card(struct net_device *dev)
1047 struct mtnic_priv *priv = netdev_priv(dev);
1048 int err = 0;
1051 /* Set state */
1052 priv->state = CARD_DOWN;
1053 /* Set port */
1054 priv->port = MTNIC_PORT_NUM;
1056 /* Alloc command interface */
1057 err = mtnic_alloc_cmdif(priv);
1058 if (err) {
1059 eprintf("Failed to init command interface, aborting.\n");
1060 return MTNIC_ERROR;
1066 * Bring up HW
1068 err = mtnic_QUERY_FW(priv);
1069 if (err) {
1070 eprintf("QUERY_FW command failed, aborting.\n");
1071 goto cmd_error;
1074 DBG("Command interface revision:%d\n", priv->fw.ifc_rev);
1076 /* Allocate memory for FW and start it */
1077 err = mtnic_map_cmd(priv, MTNIC_IF_CMD_MAP_FW, priv->fw.fw_pages);
1078 if (err) {
1079 eprintf("Eror In MAP_FW\n");
1080 if (priv->fw.fw_pages.buf)
1081 free(priv->fw.fw_pages.buf);
1082 goto cmd_error;
1085 /* Run firmware */
1086 err = mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW);
1087 if (err) {
1088 eprintf("Eror In RUN FW\n");
1089 goto map_fw_error;
1092 DBG("FW version:%d.%d.%d\n",
1093 (u16) (priv->fw_ver >> 32),
1094 (u16) ((priv->fw_ver >> 16) & 0xffff),
1095 (u16) (priv->fw_ver & 0xffff));
1098 /* Get device information */
1099 err = mtnic_query_cap(priv);
1100 if (err) {
1101 eprintf("Insufficient resources, aborting.\n");
1102 goto map_fw_error;
1105 /* Open NIC */
1106 err = mtnic_OPEN_NIC(priv);
1107 if (err) {
1108 eprintf("Failed opening NIC, aborting.\n");
1109 goto map_fw_error;
1112 /* Allocate and map pages worksace */
1113 err = mtnic_map_cmd(priv, MTNIC_IF_CMD_MAP_PAGES, priv->fw.extra_pages);
1114 if (err) {
1115 eprintf("Couldn't allocate %lx FW extra pages, aborting.\n",
1116 priv->fw.extra_pages.num);
1117 if (priv->fw.extra_pages.buf)
1118 free(priv->fw.extra_pages.buf);
1119 goto map_fw_error;
1122 /* Get device offsets */
1123 err = mtnic_query_offsets(priv);
1124 if (err) {
1125 eprintf("Failed retrieving resource offests, aborting.\n");
1126 free(priv->fw.extra_pages.buf);
1127 goto map_extra_error;
1131 /* Alloc EQ */
1132 err = mtnic_alloc_eq(priv);
1133 if (err) {
1134 eprintf("Failed init shared resources. error: %d\n", err);
1135 goto map_extra_error;
1138 /* Configure HW */
1139 err = mtnic_CONFIG_EQ(priv);
1140 if (err) {
1141 eprintf("Failed configuring EQ\n");
1142 goto eq_error;
1144 err = mtnic_CONFIG_RX(priv);
1145 if (err) {
1146 eprintf("Failed Rx configuration\n");
1147 goto eq_error;
1149 err = mtnic_CONFIG_TX(priv);
1150 if (err) {
1151 eprintf("Failed Tx configuration\n");
1152 goto eq_error;
1155 DBG("Activating port:%d\n", MTNIC_PORT_NUM + 1);
1157 priv->state = CARD_INITIALIZED;
1159 return 0;
1162 eq_error:
1163 iounmap(priv->eq_db);
1164 free(priv->eq.buf);
1165 map_extra_error:
1166 free(priv->fw.extra_pages.buf);
1167 map_fw_error:
1168 free(priv->fw.fw_pages.buf);
1170 cmd_error:
1171 iounmap(priv->hcr);
1172 free(priv->cmd.buf);
1173 free(priv);
1175 return MTNIC_ERROR;
1187 /*******************************************************************
1189 * Process functions
1191 * process compliations of TX and RX
1194 ********************************************************************/
1195 void mtnic_process_tx_cq(struct mtnic_priv *priv, struct net_device *dev,
1196 struct mtnic_cq *cq)
1198 struct mtnic_cqe *cqe = cq->buf;
1199 struct mtnic_ring *ring = &priv->tx_ring;
1200 u16 index;
1203 index = cq->last & (cq->size-1);
1204 cqe = &cq->buf[index];
1206 /* Owner bit changes every round */
1207 while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
1208 netdev_tx_complete (dev, ring->iobuf[index]);
1209 ++cq->last;
1210 index = cq->last & (cq->size-1);
1211 cqe = &cq->buf[index];
1214 /* Update consumer index */
1215 cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1216 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1217 ring->cons = cq->last;
1221 int mtnic_process_rx_cq(struct mtnic_priv *priv, struct net_device *dev, struct mtnic_cq *cq)
1223 struct mtnic_cqe *cqe;
1224 struct mtnic_ring *ring = &priv->rx_ring;
1225 int index;
1226 int err;
1227 struct io_buffer *rx_iob;
1230 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
1231 * descriptor offset can be deduced from the CQE index instead of
1232 * reading 'cqe->index' */
1233 index = cq->last & (cq->size-1);
1234 cqe = &cq->buf[index];
1236 /* Process all completed CQEs */
1237 while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
1238 /* Drop packet on bad receive or bad checksum */
1239 if ((cqe->op_tr_own & 0x1f) == MTNIC_OPCODE_ERROR) {
1240 DBG("CQE completed with error - vendor \n");
1241 free_iob(ring->iobuf[index]);
1242 goto next;
1244 if (cqe->enc_bf & MTNIC_BIT_BAD_FCS) {
1245 DBG("Accepted packet with bad FCS\n");
1246 free_iob(ring->iobuf[index]);
1247 goto next;
1251 * Packet is OK - process it.
1253 rx_iob = ring->iobuf[index];
1254 iob_put(rx_iob, DEF_IOBUF_SIZE);
1255 /* Add this packet to the receive queue. */
1256 netdev_rx(dev, rx_iob);
1257 ring->iobuf[index] = NULL;
1259 next:
1260 ++cq->last;
1261 index = cq->last & (cq->size-1);
1262 cqe = &cq->buf[index];
1265 /* Update consumer index */
1266 cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1267 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1268 ring->cons = cq->last;
1270 if (ring->prod - ring->cons < (MAX_GAP_PROD_CONS)) {
1271 err = mtnic_alloc_iobuf(priv, &priv->rx_ring, DEF_IOBUF_SIZE);
1272 if (err) {
1273 eprintf("ERROR Allocating io buffer");
1274 return MTNIC_ERROR;
1278 return 0;
1296 /********************************************************************
1298 * net_device functions
1301 * open, poll, close, probe, disable, irq
1303 *********************************************************************/
1304 static int
1305 mtnic_open(struct net_device *dev)
1307 struct mtnic_priv *priv = netdev_priv(dev);
1308 int err = 0;
1309 struct mtnic_ring *ring;
1310 struct mtnic_cq *cq;
1311 int cq_ind = 0;
1312 u32 dev_link_state;
1314 DBG("starting port:%d", priv->port);
1316 /* Alloc and configure CQs, TX, RX */
1317 err = mtnic_alloc_resources(dev);
1318 if (err) {
1319 eprintf("Error allocating resources\n");
1320 return MTNIC_ERROR;
1323 /* Pass CQs configuration to HW */
1324 for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) {
1325 cq = &priv->cq[cq_ind];
1326 err = mtnic_CONFIG_CQ(priv, priv->port, cq_ind, cq);
1327 if (err) {
1328 eprintf("Failed configuring CQ:%d error %d\n",
1329 cq_ind, err);
1330 if (cq_ind)
1331 goto cq_error;
1332 else
1333 return MTNIC_ERROR;
1335 /* Update consumer index */
1336 cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1340 /* Pass Tx configuration to HW */
1341 ring = &priv->tx_ring;
1342 err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring);
1343 if (err) {
1344 eprintf("Failed configuring Tx ring:0\n");
1345 goto cq_error;
1348 /* Pass RX configuration to HW */
1349 ring = &priv->rx_ring;
1350 err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring);
1351 if (err) {
1352 eprintf("Failed configuring Rx ring:0\n");
1353 goto tx_error;
1356 /* Configure Rx steering */
1357 err = mtnic_CONFIG_PORT_RSS_STEER(priv, priv->port);
1358 if (!err)
1359 err = mtnic_SET_PORT_RSS_INDIRECTION(priv, priv->port);
1360 if (err) {
1361 eprintf("Failed configuring RSS steering\n");
1362 goto rx_error;
1365 /* Set the port default ring to ring 0 */
1366 err = mtnic_SET_PORT_DEFAULT_RING(priv, priv->port, 0);
1367 if (err) {
1368 eprintf("Failed setting default ring\n");
1369 goto rx_error;
1372 /* Set Mac address */
1373 err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->fw.mac[priv->port]);
1374 if (err) {
1375 eprintf("Failed setting default MAC address\n");
1376 goto rx_error;
1379 /* Set MTU */
1380 err = mtnic_SET_PORT_MTU(priv, priv->port, DEF_MTU);
1381 if (err) {
1382 eprintf("Failed setting MTU\n");
1383 goto rx_error;
1386 /* Configure VLAN filter */
1387 err = mtnic_CONFIG_PORT_VLAN_FILTER(priv, priv->port);
1388 if (err) {
1389 eprintf("Failed configuring VLAN filter\n");
1390 goto rx_error;
1393 /* Bring up physical link */
1394 err = mtnic_SET_PORT_STATE(priv, priv->port, 1);
1395 if (err) {
1396 eprintf("Failed bringing up port\n");
1397 goto rx_error;
1399 mdelay(300); /* Let link state stabilize if cable was connected */
1401 priv->state = CARD_UP;
1403 err = mtnic_HEART_BEAT(priv, &dev_link_state);
1404 if (err) {
1405 eprintf("Failed getting device link state\n");
1406 return MTNIC_ERROR;
1408 if (!(dev_link_state & 0x3)) {
1409 eprintf("Link down, check cables and restart\n");
1410 return MTNIC_ERROR;
1413 return 0;
1416 rx_error:
1417 err = mtnic_RELEASE_RESOURCE(priv, priv->port,
1418 MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
1419 tx_error:
1420 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1421 MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
1422 cq_error:
1423 while (cq_ind) {
1424 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1425 MTNIC_IF_RESOURCE_TYPE_CQ, --cq_ind);
1427 if (err)
1428 DBG("Eror Releasing resources\n");
1430 return MTNIC_ERROR;
1435 /** Check if we got completion for receive and transmit and
1436 * check the line with heart_bit command */
1437 static void
1438 mtnic_poll(struct net_device *dev)
1440 struct mtnic_priv *priv = netdev_priv(dev);
1441 struct mtnic_cq *cq;
1442 u32 dev_link_state;
1443 int err;
1444 unsigned int i;
1446 /* In case of an old error then return */
1447 if (priv->state != CARD_UP)
1448 return;
1450 /* We do not check the device every call _poll call,
1451 since it will slow it down */
1452 if ((priv->poll_counter % ROUND_TO_CHECK) == 0) {
1453 /* Check device */
1454 err = mtnic_HEART_BEAT(priv, &dev_link_state);
1455 if (err) {
1456 eprintf("Device has internal error\n");
1457 priv->state = CARD_DOWN;
1458 return;
1460 if (!(dev_link_state & 0x3)) {
1461 eprintf("Link down, check cables and restart\n");
1462 priv->state = CARD_DOWN;
1463 return;
1467 /* Polling CQ */
1468 for (i = 0; i < NUM_CQS; i++) {
1469 cq = &priv->cq[i]; //Passing on the 2 cqs.
1471 if (cq->is_rx) {
1472 err = mtnic_process_rx_cq(priv, cq->dev, cq);
1473 if (err) {
1474 priv->state = CARD_DOWN;
1475 eprintf(" Error allocating RX buffers\n");
1476 return;
1478 } else {
1479 mtnic_process_tx_cq(priv, cq->dev, cq);
1482 ++ priv->poll_counter;
1485 static int
1486 mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf )
1489 struct mtnic_priv *priv = netdev_priv(dev);
1490 struct mtnic_ring *ring;
1491 struct mtnic_tx_desc *tx_desc;
1492 struct mtnic_data_seg *data;
1493 u32 index;
1495 /* In case of an error then return */
1496 if (priv->state != CARD_UP)
1497 return MTNIC_ERROR;
1499 ring = &priv->tx_ring;
1501 index = ring->prod & ring->size_mask;
1502 if ((ring->prod - ring->cons) >= ring->size) {
1503 DBG("No space left for descriptors!!! cons: %lx prod: %lx\n",
1504 ring->cons, ring->prod);
1505 mdelay(5);
1506 return MTNIC_ERROR;/* no space left */
1509 /* get current descriptor */
1510 tx_desc = ring->buf + (index * sizeof(struct mtnic_tx_desc));
1512 /* Prepare ctrl segement */
1513 tx_desc->ctrl.size_vlan = cpu_to_be32(2);
1514 tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP |
1515 MTNIC_BIT_NO_ICRC);
1516 tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) |
1517 ((ring->prod & ring->size) ?
1518 cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0);
1520 /* Prepare Data Seg */
1521 data = &tx_desc->data;
1522 data->addr_l = cpu_to_be32((u32)virt_to_bus(iobuf->data));
1523 data->count = cpu_to_be32(iob_len(iobuf));
1524 data->mem_type = priv->fw.mem_type_snoop_be;
1526 /* Attach io_buffer */
1527 ring->iobuf[index] = iobuf;
1529 /* Update producer index */
1530 ++ring->prod;
1532 /* Ring doorbell! */
1533 wmb();
1534 writel((u32) ring->db_offset, &ring->txcq_db->send_db);
1536 return 0;
1540 static void
1541 mtnic_close(struct net_device *dev)
1543 struct mtnic_priv *priv = netdev_priv(dev);
1544 int err = 0;
1545 DBG("Close called for port:%d\n", priv->port);
1547 if (priv->state == CARD_UP) {
1548 /* Disable port */
1549 err |= mtnic_SET_PORT_STATE(priv, priv->port, 0);
1551 * Stop HW associated with this port
1553 mdelay(5);
1555 /* Stop RX */
1556 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1557 MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
1559 /* Stop TX */
1560 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1561 MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
1563 /* Stop CQs */
1564 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1565 MTNIC_IF_RESOURCE_TYPE_CQ, 0);
1566 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1567 MTNIC_IF_RESOURCE_TYPE_CQ, 1);
1568 if (err) {
1569 DBG("Close reported error %d", err);
1572 /* Free memory */
1573 free(priv->tx_ring.buf);
1574 iounmap(priv->tx_ring.txcq_db);
1575 free(priv->cq[1].buf);
1576 free(priv->cq[1].db);
1578 /* Free RX buffers */
1579 mtnic_free_io_buffers(&priv->rx_ring);
1581 free(priv->rx_ring.buf);
1582 free(priv->rx_ring.db);
1583 free(priv->cq[0].buf);
1584 free(priv->cq[0].db);
1586 priv->state = CARD_INITIALIZED;
1594 static void
1595 mtnic_disable(struct pci_device *pci)
1598 int err;
1599 struct net_device *dev = pci_get_drvdata(pci);
1600 struct mtnic_priv *priv = netdev_priv(dev);
1602 /* Should NOT happen! but just in case */
1603 if (priv->state == CARD_UP)
1604 mtnic_close(dev);
1606 if (priv->state == CARD_INITIALIZED) {
1607 err = mtnic_RELEASE_RESOURCE(priv, 0,
1608 MTNIC_IF_RESOURCE_TYPE_EQ, 0);
1609 DBG("Calling MTNIC_CLOSE command\n");
1610 err |= mtnic_cmd(priv, NULL, NULL, 0,
1611 MTNIC_IF_CMD_CLOSE_NIC);
1612 if (err) {
1613 DBG("Error Releasing resources %d\n", err);
1616 free(priv->cmd.buf);
1617 iounmap(priv->hcr);
1618 ufree((u32)priv->fw.fw_pages.buf);
1619 ufree((u32)priv->fw.extra_pages.buf);
1620 free(priv->eq.buf);
1621 iounmap(priv->eq_db);
1622 priv->state = CARD_DOWN;
1625 unregister_netdev(dev);
1626 netdev_nullify(dev);
1627 netdev_put(dev);
1632 static void
1633 mtnic_irq(struct net_device *netdev __unused, int enable __unused)
1635 /* Not implemented */
1640 /** mtnic net device operations */
1641 static struct net_device_operations mtnic_operations = {
1642 .open = mtnic_open,
1643 .close = mtnic_close,
1644 .transmit = mtnic_transmit,
1645 .poll = mtnic_poll,
1646 .irq = mtnic_irq,
1655 static int
1656 mtnic_probe(struct pci_device *pci,
1657 const struct pci_device_id *id __unused)
1659 struct net_device *dev;
1660 struct mtnic_priv *priv;
1661 int err;
1662 u64 mac;
1663 u32 result = 0;
1664 void *dev_id;
1665 int i;
1667 if (pci->vendor != MELLANOX_VENDOR_ID) {
1668 eprintf("");
1669 return 0;
1671 printf("\nMellanox Technologies LTD - Boot over MTNIC implementaion\n");
1673 adjust_pci_device(pci);
1675 err = mtnic_init_pci(pci);
1676 if (err) {
1677 eprintf("Error in pci_init\n");
1678 return MTNIC_ERROR;
1681 mtnic_reset();
1682 mdelay(1000);
1684 err = restore_config();
1685 if (err) {
1686 eprintf("");
1687 return err;
1690 /* Checking MTNIC device ID */
1691 dev_id = ioremap(mtnic_pci_dev.dev.bar[0] +
1692 MTNIC_DEVICE_ID_OFFSET, 4);
1693 result = ntohl(readl(dev_id));
1694 iounmap(dev_id);
1695 if (result != MTNIC_DEVICE_ID) {
1696 eprintf("Wrong Devie ID (0x%lx) !!!", result);
1697 return MTNIC_ERROR;
1700 /* Initializing net device */
1701 dev = alloc_etherdev(sizeof(struct mtnic_priv));
1702 if (dev == NULL) {
1703 eprintf("Net device allocation failed\n");
1704 return MTNIC_ERROR;
1707 * Initialize driver private data
1709 priv = netdev_priv(dev);
1710 memset(priv, 0, sizeof(struct mtnic_priv));
1711 priv->dev = dev;
1712 priv->pdev = pci;
1713 priv->dev->dev = &pci->dev;
1714 /* Attach pci device */
1715 pci_set_drvdata(pci, priv->dev);
1716 netdev_init(dev, &mtnic_operations);
1719 /* Initialize hardware */
1720 err = mtnic_init_card(dev);
1721 if (err) {
1722 eprintf("Error in init_card\n");
1723 return MTNIC_ERROR;
1726 /* Program the MAC address */
1727 mac = priv->fw.mac[priv->port];
1728 printf("Port %d Mac address: 0x%12llx\n", MTNIC_PORT_NUM + 1, mac);
1729 for (i = 0;i < MAC_ADDRESS_SIZE; ++i) {
1730 dev->ll_addr[MAC_ADDRESS_SIZE - i - 1] = mac & 0xFF;
1731 mac = mac >> 8;
1734 if (register_netdev(dev)) {
1735 eprintf("Netdev registration failed\n");
1736 return MTNIC_ERROR;
1740 return 0;
1748 static struct pci_device_id mtnic_nics[] = {
1749 PCI_ROM(0x15b3, 0x6368, "mtnic", "Mellanox MTNIC driver"),
1752 struct pci_driver mtnic_driver __pci_driver = {
1753 .ids = mtnic_nics,
1754 .id_count = sizeof(mtnic_nics) / sizeof(mtnic_nics[0]),
1755 .probe = mtnic_probe,
1756 .remove = mtnic_disable,