[uri] Special case NULL in churi()
[gpxe.git] / src / drivers / net / mtnic.c
blobd7ee8d2b0212df7eaa2ae940349ac5c7cc1316be
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
34 FILE_LICENCE ( GPL2_ONLY );
36 #include <strings.h>
37 #include <errno.h>
38 #include <gpxe/malloc.h>
39 #include <gpxe/umalloc.h>
40 #include <byteswap.h>
41 #include <unistd.h>
42 #include <gpxe/io.h>
43 #include <gpxe/pci.h>
44 #include <gpxe/ethernet.h>
45 #include <gpxe/netdevice.h>
46 #include <gpxe/iobuf.h>
47 #include "mtnic.h"
53 mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN
60 /********************************************************************
62 * MTNIC allocation functions
64 *********************************************************************/
65 /**
66 * mtnic_alloc_aligned
68 * @v unsigned int size size
69 * @v void **va virtual address
70 * @v u32 *pa physical address
71 * @v u32 aligment aligment
73 * Function allocate aligned buffer and put it's virtual address in 'va'
74 * and it's physical aligned address in 'pa'
76 static int
77 mtnic_alloc_aligned(unsigned int size, void **va, unsigned long *pa, unsigned int alignment)
79 *va = alloc_memblock(size, alignment);
80 if (!*va) {
81 return -EADDRINUSE;
83 *pa = (u32)virt_to_bus(*va);
84 return 0;
89 /**
91 * mtnic alloc command interface
94 static int
95 mtnic_alloc_cmdif(struct mtnic *mtnic)
97 u32 bar = mtnic_pci_dev.dev.bar[0];
99 mtnic->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE);
100 if ( !mtnic->hcr ) {
101 DBG("Couldn't map command register\n");
102 return -EADDRINUSE;
104 mtnic_alloc_aligned(PAGE_SIZE, (void *)&mtnic->cmd.buf, &mtnic->cmd.mapping, PAGE_SIZE);
105 if ( !mtnic->cmd.buf ) {
106 DBG("Error in allocating buffer for command interface\n");
107 return -EADDRINUSE;
109 return 0;
113 * Free RX io buffers
115 static void
116 mtnic_free_io_buffers(struct mtnic_ring *ring)
118 int index;
120 for (; ring->cons <= ring->prod; ++ring->cons) {
121 index = ring->cons & ring->size_mask;
122 if ( ring->iobuf[index] ) {
123 free_iob(ring->iobuf[index]);
132 * mtnic alloc and attach io buffers
135 static int
136 mtnic_alloc_iobuf(struct mtnic_port *priv, struct mtnic_ring *ring,
137 unsigned int size)
139 struct mtnic_rx_desc *rx_desc_ptr = ring->buf;
140 u32 index;
142 while ((u32)(ring->prod - ring->cons) < UNITS_BUFFER_SIZE) {
143 index = ring->prod & ring->size_mask;
144 ring->iobuf[index] = alloc_iob(size);
145 if (!ring->iobuf[index]) {
146 if (ring->prod <= (ring->cons + 1)) {
147 DBG ( "Dropping packet, buffer is full\n" );
149 break;
152 /* Attach io_buffer to descriptor */
153 rx_desc_ptr = ring->buf +
154 (sizeof(struct mtnic_rx_desc) * index);
155 rx_desc_ptr->data.count = cpu_to_be32(size);
156 rx_desc_ptr->data.mem_type = priv->mtnic->fw.mem_type_snoop_be;
157 rx_desc_ptr->data.addr_l = cpu_to_be32(
158 virt_to_bus(ring->iobuf[index]->data));
160 ++ ring->prod;
163 /* Update RX producer index (PI) */
164 ring->db->count = cpu_to_be32(ring->prod & 0xffff);
165 return 0;
170 * mtnic alloc ring
172 * Alloc and configure TX or RX ring
175 static int
176 mtnic_alloc_ring(struct mtnic_port *priv, struct mtnic_ring *ring,
177 u32 size, u16 stride, u16 cq, u8 is_rx)
179 unsigned int i;
180 int err;
181 struct mtnic_rx_desc *rx_desc;
182 struct mtnic_tx_desc *tx_desc;
184 ring->size = size; /* Number of descriptors */
185 ring->size_mask = size - 1;
186 ring->stride = stride; /* Size of each entry */
187 ring->cq = cq; /* CQ number associated with this ring */
188 ring->cons = 0;
189 ring->prod = 0;
191 /* Alloc descriptors buffer */
192 ring->buf_size = ring->size * ((is_rx) ? sizeof(struct mtnic_rx_desc) :
193 sizeof(struct mtnic_tx_desc));
194 err = mtnic_alloc_aligned(ring->buf_size, (void *)&ring->buf,
195 &ring->dma, PAGE_SIZE);
196 if (err) {
197 DBG("Failed allocating descriptor ring sizeof %x\n",
198 ring->buf_size);
199 return -EADDRINUSE;
201 memset(ring->buf, 0, ring->buf_size);
203 DBG("Allocated %s ring (addr:%p) - buf:%p size:%x"
204 "buf_size:%x dma:%lx\n",
205 is_rx ? "Rx" : "Tx", ring, ring->buf, ring->size,
206 ring->buf_size, ring->dma);
209 if (is_rx) { /* RX ring */
210 /* Alloc doorbell */
211 err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
212 (void *)&ring->db, &ring->db_dma, 32);
213 if (err) {
214 DBG("Failed allocating Rx ring doorbell record\n");
215 free_memblock(ring->buf, ring->buf_size);
216 return -EADDRINUSE;
219 /* ==- Configure Descriptor -== */
220 /* Init ctrl seg of rx desc */
221 for (i = 0; i < UNITS_BUFFER_SIZE; ++i) {
222 rx_desc = ring->buf +
223 (sizeof(struct mtnic_rx_desc) * i);
224 /* Pre-link descriptor */
225 rx_desc->next = cpu_to_be16(i + 1);
227 /*The last ctrl descriptor is '0' and points to the first one*/
229 /* Alloc IO_BUFFERS */
230 err = mtnic_alloc_iobuf ( priv, ring, DEF_IOBUF_SIZE );
231 if (err) {
232 DBG("ERROR Allocating io buffer\n");
233 free_memblock(ring->buf, ring->buf_size);
234 return -EADDRINUSE;
237 } else { /* TX ring */
238 /* Set initial ownership of all Tx Desc' to SW (1) */
239 for (i = 0; i < ring->size; i++) {
240 tx_desc = ring->buf + ring->stride * i;
241 tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_BIT_DESC_OWN);
243 /* DB */
244 ring->db_offset = cpu_to_be32(
245 ((u32) priv->mtnic->fw.tx_offset[priv->port]) << 8);
247 /* Map Tx+CQ doorbells */
248 DBG("Mapping TxCQ doorbell at offset:0x%x\n",
249 priv->mtnic->fw.txcq_db_offset);
250 ring->txcq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
251 priv->mtnic->fw.txcq_db_offset, PAGE_SIZE);
252 if (!ring->txcq_db) {
253 DBG("Couldn't map txcq doorbell, aborting...\n");
254 free_memblock(ring->buf, ring->buf_size);
255 return -EADDRINUSE;
259 return 0;
265 * mtnic alloc CQ
267 * Alloc and configure CQ.
270 static int
271 mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq,
272 u8 is_rx, u32 size, u32 offset_ind)
274 int err ;
275 unsigned int i;
277 cq->num = num;
278 cq->dev = dev;
279 cq->size = size;
280 cq->last = 0;
281 cq->is_rx = is_rx;
282 cq->offset_ind = offset_ind;
284 /* Alloc doorbell */
285 err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
286 (void *)&cq->db, &cq->db_dma, 32);
287 if (err) {
288 DBG("Failed allocating CQ doorbell record\n");
289 return -EADDRINUSE;
291 memset(cq->db, 0, sizeof(struct mtnic_cq_db_record));
293 /* Alloc CQEs buffer */
294 cq->buf_size = size * sizeof(struct mtnic_cqe);
295 err = mtnic_alloc_aligned(cq->buf_size,
296 (void *)&cq->buf, &cq->dma, PAGE_SIZE);
297 if (err) {
298 DBG("Failed allocating CQ buffer\n");
299 free_memblock(cq->db, sizeof(struct mtnic_cq_db_record));
300 return -EADDRINUSE;
302 memset(cq->buf, 0, cq->buf_size);
303 DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x "
304 "dma:%lx db:%p db_dma:%lx\n"
305 "cqn offset:%x \n", cq, cq->size, cq->buf,
306 cq->buf_size, cq->dma, cq->db,
307 cq->db_dma, offset_ind);
310 /* Set ownership of all CQEs to HW */
311 DBG("Setting HW ownership for CQ:%d\n", num);
312 for (i = 0; i < cq->size; i++) {
313 /* Initial HW ownership is 1 */
314 cq->buf[i].op_tr_own = MTNIC_BIT_CQ_OWN;
316 return 0;
322 * mtnic_alloc_resources
324 * Alloc and configure CQs, Tx, Rx
326 unsigned int
327 mtnic_alloc_resources(struct net_device *dev)
329 struct mtnic_port *priv = netdev_priv(dev);
330 int err;
331 int cq_ind = 0;
332 int cq_offset = priv->mtnic->fw.cq_offset;
334 /* Alloc 1st CQ */
335 err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */,
336 UNITS_BUFFER_SIZE, cq_offset + cq_ind);
337 if (err) {
338 DBG("Failed allocating Rx CQ\n");
339 return -EADDRINUSE;
343 /* Alloc RX */
344 err = mtnic_alloc_ring(priv, &priv->rx_ring, UNITS_BUFFER_SIZE,
345 sizeof(struct mtnic_rx_desc), cq_ind, /* RX */1);
346 if (err) {
347 DBG("Failed allocating Rx Ring\n");
348 goto cq0_error;
352 ++cq_ind;
354 /* alloc 2nd CQ */
355 err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 0 /* TX */,
356 UNITS_BUFFER_SIZE, cq_offset + cq_ind);
357 if (err) {
358 DBG("Failed allocating Tx CQ\n");
359 goto rx_error;
362 /* Alloc TX */
363 err = mtnic_alloc_ring(priv, &priv->tx_ring, UNITS_BUFFER_SIZE,
364 sizeof(struct mtnic_tx_desc), cq_ind, /* TX */ 0);
365 if (err) {
366 DBG("Failed allocating Tx ring\n");
367 goto cq1_error;
370 return 0;
372 cq1_error:
373 free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
374 free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
376 rx_error:
377 free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
378 free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
379 mtnic_free_io_buffers(&priv->rx_ring);
380 cq0_error:
381 free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
382 free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
384 return -EADDRINUSE;
389 * mtnic alloc_eq
391 * Note: EQ is not used by the driver but must be allocated
393 static int
394 mtnic_alloc_eq(struct mtnic *mtnic)
396 int err;
397 unsigned int i;
398 struct mtnic_eqe *eqe_desc = NULL;
400 /* Allocating doorbell */
401 mtnic->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
402 mtnic->fw.eq_db_offset, sizeof(u32));
403 if (!mtnic->eq_db) {
404 DBG("Couldn't map EQ doorbell, aborting...\n");
405 return -EADDRINUSE;
408 /* Allocating buffer */
409 mtnic->eq.size = NUM_EQES;
410 mtnic->eq.buf_size = mtnic->eq.size * sizeof(struct mtnic_eqe);
411 err = mtnic_alloc_aligned(mtnic->eq.buf_size, (void *)&mtnic->eq.buf,
412 &mtnic->eq.dma, PAGE_SIZE);
413 if (err) {
414 DBG("Failed allocating EQ buffer\n");
415 iounmap(mtnic->eq_db);
416 return -EADDRINUSE;
418 memset(mtnic->eq.buf, 0, mtnic->eq.buf_size);
420 for (i = 0; i < mtnic->eq.size; i++)
421 eqe_desc = mtnic->eq.buf + (sizeof(struct mtnic_eqe) * i);
422 eqe_desc->own |= MTNIC_BIT_EQE_OWN;
424 mdelay(20);
425 return 0;
438 /********************************************************************
440 * Mtnic commands functions
441 * -=-=-=-=-=-=-=-=-=-=-=-=
445 *********************************************************************/
446 static inline int
447 cmdif_go_bit(struct mtnic *mtnic)
449 struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
450 u32 status;
451 int i;
453 for (i = 0; i < TBIT_RETRIES; i++) {
454 status = be32_to_cpu(readl(&hcr->status_go_opcode));
455 if ((status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT)) ==
456 (mtnic->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) {
457 /* Read expected t-bit - now return go-bit value */
458 return status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT);
462 DBG("Invalid tbit after %d retries!\n", TBIT_RETRIES);
463 return -EBUSY; /* Return busy... */
466 /* Base Command interface */
467 static int
468 mtnic_cmd(struct mtnic *mtnic, void *in_imm,
469 void *out_imm, u32 in_modifier, u16 op)
472 struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
473 int err = 0;
474 u32 out_param_h = 0;
475 u32 out_param_l = 0;
476 u32 in_param_h = 0;
477 u32 in_param_l = 0;
480 static u16 token = 0x8000;
481 u32 status;
482 unsigned int timeout = 0;
484 token++;
486 if ( cmdif_go_bit ( mtnic ) ) {
487 DBG("GO BIT BUSY:%p.\n", hcr + 6);
488 err = -EBUSY;
489 goto out;
491 if (in_imm) {
492 in_param_h = *((u32*)in_imm);
493 in_param_l = *((u32*)in_imm + 1);
494 } else {
495 in_param_l = cpu_to_be32(mtnic->cmd.mapping);
497 out_param_l = cpu_to_be32(mtnic->cmd.mapping);
499 /* writing to MCR */
500 writel(in_param_h, &hcr->in_param_h);
501 writel(in_param_l, &hcr->in_param_l);
502 writel((u32) cpu_to_be32(in_modifier), &hcr->input_modifier);
503 writel(out_param_h, &hcr->out_param_h);
504 writel(out_param_l, &hcr->out_param_l);
505 writel((u32)cpu_to_be32(token << 16), &hcr->token);
506 wmb();
508 /* flip toggle bit before each write to the HCR */
509 mtnic->cmd.tbit = !mtnic->cmd.tbit;
510 writel( ( u32 )
511 cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT) |
512 ( mtnic->cmd.tbit << MTNIC_BC_OFF ( MTNIC_MASK_CMD_REG_T_BIT ) ) | op ),
513 &hcr->status_go_opcode);
515 while ( cmdif_go_bit ( mtnic ) && ( timeout <= GO_BIT_TIMEOUT ) ) {
516 mdelay ( 1 );
517 ++timeout;
520 if ( cmdif_go_bit ( mtnic ) ) {
521 DBG("Command opcode:0x%x token:0x%x TIMEOUT.\n", op, token);
522 err = -EBUSY;
523 goto out;
526 if (out_imm) {
527 *((u32 *)out_imm) = readl(&hcr->out_param_h);
528 *((u32 *)out_imm + 1) = readl(&hcr->out_param_l);
531 status = be32_to_cpu((u32)readl(&hcr->status_go_opcode)) >> 24;
533 if (status) {
534 DBG("Command opcode:0x%x token:0x%x returned:0x%x\n",
535 op, token, status);
536 return status;
539 out:
540 return err;
543 /* MAP PAGES wrapper */
544 static int
545 mtnic_map_cmd(struct mtnic *mtnic, u16 op, struct mtnic_pages pages)
547 unsigned int j;
548 u32 addr;
549 unsigned int len;
550 u32 *page_arr = mtnic->cmd.buf;
551 int nent = 0;
552 int err = 0;
554 memset(page_arr, 0, PAGE_SIZE);
556 len = PAGE_SIZE * pages.num;
557 pages.buf = (u32 *)umalloc(PAGE_SIZE * (pages.num + 1));
558 addr = PAGE_SIZE + ((virt_to_bus(pages.buf) & 0xfffff000) + PAGE_SIZE);
559 DBG("Mapping pages: size: %x address: %p\n", pages.num, pages.buf);
561 if (addr & (PAGE_MASK)) {
562 DBG("Got FW area not aligned to %d (%llx/%x)\n",
563 PAGE_SIZE, (u64) addr, len);
564 return -EADDRINUSE;
567 /* Function maps each PAGE seperately */
568 for (j = 0; j < len; j+= PAGE_SIZE) {
569 page_arr[nent * 4 + 3] = cpu_to_be32(addr + j);
570 if (++nent == MTNIC_MAILBOX_SIZE / 16) {
571 err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
572 if (err)
573 return -EIO;
574 nent = 0;
578 if (nent) {
579 err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
581 return err;
587 * Query FW
589 static int
590 mtnic_QUERY_FW ( struct mtnic *mtnic )
592 int err;
593 struct mtnic_if_query_fw_out_mbox *cmd = mtnic->cmd.buf;
595 err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW);
596 if (err)
597 return -EIO;
599 /* Get FW and interface versions */
600 mtnic->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) |
601 ((u64) be16_to_cpu(cmd->rev_min) << 16) |
602 (u64) be16_to_cpu(cmd->rev_smin);
603 mtnic->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev);
605 /* Get offset for internal error reports (debug) */
606 mtnic->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start);
607 mtnic->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size);
609 DBG("Error buf offset is %llx\n", mtnic->fw.err_buf.offset);
611 /* Get number of required FW (4k) pages */
612 mtnic->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages);
614 return 0;
618 static int
619 mtnic_OPEN_NIC(struct mtnic *mtnic)
621 struct mtnic_if_open_nic_in_mbox *open_nic = mtnic->cmd.buf;
622 u32 extra_pages[2] = {0};
623 int err;
625 memset(open_nic, 0, sizeof *open_nic);
627 /* port 1 */
628 open_nic->log_rx_p1 = 0;
629 open_nic->log_cq_p1 = 1;
631 open_nic->log_tx_p1 = 0;
632 open_nic->steer_p1 = MTNIC_IF_STEER_RSS;
633 /* MAC + VLAN - leave reserved */
635 /* port 2 */
636 open_nic->log_rx_p2 = 0;
637 open_nic->log_cq_p2 = 1;
639 open_nic->log_tx_p2 = 0;
640 open_nic->steer_p2 = MTNIC_IF_STEER_RSS;
641 /* MAC + VLAN - leave reserved */
643 err = mtnic_cmd(mtnic, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC);
645 mtnic->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1));
646 DBG("Extra pages num is %x\n", mtnic->fw.extra_pages.num);
647 return err;
650 static int
651 mtnic_CONFIG_RX(struct mtnic *mtnic)
653 struct mtnic_if_config_rx_in_imm config_rx;
655 memset(&config_rx, 0, sizeof config_rx);
656 return mtnic_cmd(mtnic, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX);
659 static int
660 mtnic_CONFIG_TX(struct mtnic *mtnic)
662 struct mtnic_if_config_send_in_imm config_tx;
664 config_tx.enph_gpf = 0;
665 return mtnic_cmd(mtnic, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX);
668 static int
669 mtnic_HEART_BEAT(struct mtnic_port *priv, u32 *link_state)
671 struct mtnic_if_heart_beat_out_imm heart_beat;
673 int err;
674 u32 flags;
675 err = mtnic_cmd(priv->mtnic, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT);
676 if (!err) {
677 flags = be32_to_cpu(heart_beat.flags);
678 if (flags & MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)) {
679 DBG("Internal error detected\n");
680 return -EIO;
682 *link_state = flags &
683 ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR));
685 return err;
690 * Port commands
693 static int
694 mtnic_SET_PORT_DEFAULT_RING(struct mtnic_port *priv, u8 port, u16 ring)
696 struct mtnic_if_set_port_default_ring_in_imm def_ring;
698 memset(&def_ring, 0, sizeof(def_ring));
699 def_ring.ring = ring;
700 return mtnic_cmd(priv->mtnic, &def_ring, NULL, port + 1,
701 MTNIC_IF_CMD_SET_PORT_DEFAULT_RING);
704 static int
705 mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_port *priv, int port)
707 memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
708 return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
709 MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER);
712 static int
713 mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_port *priv, int port)
716 memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
717 return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
718 MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION);
723 * Config commands
725 static int
726 mtnic_CONFIG_CQ(struct mtnic_port *priv, int port,
727 u16 cq_ind, struct mtnic_cq *cq)
729 struct mtnic_if_config_cq_in_mbox *config_cq = priv->mtnic->cmd.buf;
731 memset(config_cq, 0, sizeof *config_cq);
732 config_cq->cq = cq_ind;
733 config_cq->size = fls(UNITS_BUFFER_SIZE - 1);
734 config_cq->offset = ((cq->dma) & (PAGE_MASK)) >> 6;
735 config_cq->db_record_addr_l = cpu_to_be32(cq->db_dma);
736 config_cq->page_address[1] = cpu_to_be32(cq->dma);
737 DBG("config cq address: %x dma_address: %lx"
738 "offset: %d size %d index: %d\n"
739 , config_cq->page_address[1],cq->dma,
740 config_cq->offset, config_cq->size, config_cq->cq );
742 return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
743 MTNIC_IF_CMD_CONFIG_CQ);
747 static int
748 mtnic_CONFIG_TX_RING(struct mtnic_port *priv, u8 port,
749 u16 ring_ind, struct mtnic_ring *ring)
751 struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->mtnic->cmd.buf;
752 memset(config_tx_ring, 0, sizeof *config_tx_ring);
753 config_tx_ring->ring = cpu_to_be16(ring_ind);
754 config_tx_ring->size = fls(UNITS_BUFFER_SIZE - 1);
755 config_tx_ring->cq = cpu_to_be16(ring->cq);
756 config_tx_ring->page_address[1] = cpu_to_be32(ring->dma);
758 return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
759 MTNIC_IF_CMD_CONFIG_TX_RING);
762 static int
763 mtnic_CONFIG_RX_RING(struct mtnic_port *priv, u8 port,
764 u16 ring_ind, struct mtnic_ring *ring)
766 struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->mtnic->cmd.buf;
767 memset(config_rx_ring, 0, sizeof *config_rx_ring);
768 config_rx_ring->ring = ring_ind;
769 MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1),
770 MTNIC_MASK_CONFIG_RX_RING_SIZE);
771 MTNIC_BC_PUT(config_rx_ring->stride_size, 1,
772 MTNIC_MASK_CONFIG_RX_RING_STRIDE);
773 config_rx_ring->cq = cpu_to_be16(ring->cq);
774 config_rx_ring->db_record_addr_l = cpu_to_be32(ring->db_dma);
776 DBG("Config RX ring starting at address:%lx\n", ring->dma);
778 config_rx_ring->page_address[1] = cpu_to_be32(ring->dma);
780 return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
781 MTNIC_IF_CMD_CONFIG_RX_RING);
784 static int
785 mtnic_CONFIG_EQ(struct mtnic *mtnic)
787 struct mtnic_if_config_eq_in_mbox *eq = mtnic->cmd.buf;
789 if (mtnic->eq.dma & (PAGE_MASK)) {
790 DBG("misalligned eq buffer:%lx\n",
791 mtnic->eq.dma);
792 return -EADDRINUSE;
795 memset(eq, 0, sizeof *eq);
796 MTNIC_BC_PUT(eq->offset, mtnic->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET);
797 MTNIC_BC_PUT(eq->size, fls(mtnic->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE);
798 MTNIC_BC_PUT(eq->int_vector, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC);
799 eq->page_address[1] = cpu_to_be32(mtnic->eq.dma);
801 return mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ);
807 static int
808 mtnic_SET_RX_RING_ADDR(struct mtnic_port *priv, u8 port, u64* mac)
810 struct mtnic_if_set_rx_ring_addr_in_imm ring_addr;
811 u32 modifier = ((u32) port + 1) << 16;
813 memset(&ring_addr, 0, sizeof(ring_addr));
815 ring_addr.mac_31_0 = cpu_to_be32(*mac & 0xffffffff);
816 ring_addr.mac_47_32 = cpu_to_be16((*mac >> 32) & 0xffff);
817 ring_addr.flags_vlan_id |= cpu_to_be16(
818 MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC));
820 return mtnic_cmd(priv->mtnic, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR);
823 static int
824 mtnic_SET_PORT_STATE(struct mtnic_port *priv, u8 port, u8 state)
826 struct mtnic_if_set_port_state_in_imm port_state;
828 port_state.state = state ? cpu_to_be32(
829 MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0;
830 port_state.reserved = 0;
831 return mtnic_cmd(priv->mtnic, &port_state, NULL, port + 1,
832 MTNIC_IF_CMD_SET_PORT_STATE);
835 static int
836 mtnic_SET_PORT_MTU(struct mtnic_port *priv, u8 port, u16 mtu)
838 struct mtnic_if_set_port_mtu_in_imm set_mtu;
840 memset(&set_mtu, 0, sizeof(set_mtu));
841 set_mtu.mtu = cpu_to_be16(mtu);
842 return mtnic_cmd(priv->mtnic, &set_mtu, NULL, port + 1,
843 MTNIC_IF_CMD_SET_PORT_MTU);
847 static int
848 mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_port *priv, int port)
850 struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->mtnic->cmd.buf;
852 // When no vlans are configured we disable the filter
853 // (i.e., pass all vlans) because we ignore them anyhow
854 memset(vlan_filter, 0xff, sizeof(*vlan_filter));
855 return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
856 MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER);
861 static int
862 mtnic_RELEASE_RESOURCE(struct mtnic_port *priv, u8 port, u8 type, u8 index)
864 struct mtnic_if_release_resource_in_imm rel;
865 memset(&rel, 0, sizeof rel);
866 rel.index = index;
867 rel.type = type;
868 return mtnic_cmd ( priv->mtnic,
869 &rel, NULL, ( type == MTNIC_IF_RESOURCE_TYPE_EQ ) ?
870 0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE );
874 static int
875 mtnic_QUERY_CAP(struct mtnic *mtnic, u8 index, u8 mod, u64 *result)
877 struct mtnic_if_query_cap_in_imm cap;
878 u32 out_imm[2];
879 int err;
881 memset(&cap, 0, sizeof cap);
882 cap.cap_index = index;
883 cap.cap_modifier = mod;
884 err = mtnic_cmd(mtnic, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP);
886 *((u32*)result) = be32_to_cpu(*(out_imm+1));
887 *((u32*)result + 1) = be32_to_cpu(*out_imm);
889 DBG("Called Query cap with index:0x%x mod:%d result:0x%llx"
890 " error:%d\n", index, mod, *result, err);
891 return err;
895 #define DO_QUERY_CAP(cap, mod, var) \
896 err = mtnic_QUERY_CAP(mtnic, cap, mod, &result);\
897 if (err) \
898 return err; \
899 (var) = result
901 static int
902 mtnic_query_num_ports(struct mtnic *mtnic)
904 int err = 0;
905 u64 result;
907 DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, mtnic->fw.num_ports);
909 return 0;
912 static int
913 mtnic_query_mac(struct mtnic *mtnic)
915 int err = 0;
916 int i;
917 u64 result;
919 for (i = 0; i < mtnic->fw.num_ports; i++) {
920 DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, mtnic->fw.mac[i]);
923 return 0;
926 static int
927 mtnic_query_offsets(struct mtnic *mtnic)
929 int err;
930 int i;
931 u64 result;
933 DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY,
934 MTNIC_IF_MEM_TYPE_SNOOP,
935 mtnic->fw.mem_type_snoop_be);
936 mtnic->fw.mem_type_snoop_be = cpu_to_be32(mtnic->fw.mem_type_snoop_be);
937 DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, mtnic->fw.txcq_db_offset);
938 DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, mtnic->fw.eq_db_offset);
940 for (i = 0; i < mtnic->fw.num_ports; i++) {
941 DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, mtnic->fw.cq_offset);
942 DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, mtnic->fw.tx_offset[i]);
943 DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, mtnic->fw.rx_offset[i]);
944 DBG("--> Port %d CQ offset:0x%x\n", i, mtnic->fw.cq_offset);
945 DBG("--> Port %d Tx offset:0x%x\n", i, mtnic->fw.tx_offset[i]);
946 DBG("--> Port %d Rx offset:0x%x\n", i, mtnic->fw.rx_offset[i]);
949 mdelay(20);
950 return 0;
963 /********************************************************************
965 * MTNIC initalization functions
970 *********************************************************************/
973 * Reset device
975 void
976 mtnic_reset ( void )
978 void *reset = ioremap ( mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET,
979 4 );
980 writel ( cpu_to_be32 ( 1 ), reset );
981 iounmap ( reset );
986 * Restore PCI config
988 static int
989 restore_config(void)
991 int i;
992 int rc;
994 for (i = 0; i < 64; ++i) {
995 if (i != 22 && i != 23) {
996 rc = pci_write_config_dword(mtnic_pci_dev.dev.dev,
997 i << 2,
998 mtnic_pci_dev.dev.
999 dev_config_space[i]);
1000 if (rc)
1001 return rc;
1004 return 0;
1010 * Init PCI configuration
1012 static int
1013 mtnic_init_pci(struct pci_device *dev)
1015 int i;
1016 int err;
1018 /* save bars */
1019 DBG("bus=%d devfn=0x%x\n", dev->bus, dev->devfn);
1020 for (i = 0; i < 6; ++i) {
1021 mtnic_pci_dev.dev.bar[i] =
1022 pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
1023 DBG("bar[%d]= 0x%08lx \n", i, mtnic_pci_dev.dev.bar[i]);
1026 /* save config space */
1027 for (i = 0; i < 64; ++i) {
1028 err = pci_read_config_dword(dev, i << 2,
1029 &mtnic_pci_dev.dev.
1030 dev_config_space[i]);
1031 if (err) {
1032 DBG("Can not save configuration space");
1033 return err;
1037 mtnic_pci_dev.dev.dev = dev;
1039 return 0;
1043 * Initial hardware
1045 static inline
1046 int mtnic_init_card(struct mtnic *mtnic)
1048 int err = 0;
1051 /* Alloc command interface */
1052 err = mtnic_alloc_cmdif ( mtnic );
1053 if (err) {
1054 DBG("Failed to init command interface, aborting\n");
1055 return -EADDRINUSE;
1060 * Bring up HW
1062 err = mtnic_QUERY_FW ( mtnic );
1063 if (err) {
1064 DBG("QUERY_FW command failed, aborting\n");
1065 goto cmd_error;
1067 DBG("Command interface revision:%d\n", mtnic->fw.ifc_rev);
1069 /* Allocate memory for FW and start it */
1070 err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_FW, mtnic->fw.fw_pages);
1071 if (err) {
1072 DBG("Eror In MAP_FW\n");
1073 if (mtnic->fw.fw_pages.buf)
1074 ufree((intptr_t)mtnic->fw.fw_pages.buf);
1075 goto cmd_error;
1078 /* Run firmware */
1079 err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW);
1080 if (err) {
1081 DBG("Eror In RUN FW\n");
1082 goto map_fw_error;
1085 DBG("FW version:%d.%d.%d\n",
1086 (u16) (mtnic->fw_ver >> 32),
1087 (u16) ((mtnic->fw_ver >> 16) & 0xffff),
1088 (u16) (mtnic->fw_ver & 0xffff));
1091 /* Query num ports */
1092 err = mtnic_query_num_ports(mtnic);
1093 if (err) {
1094 DBG("Insufficient resources, aborting\n");
1095 goto map_fw_error;
1098 /* Open NIC */
1099 err = mtnic_OPEN_NIC(mtnic);
1100 if (err) {
1101 DBG("Failed opening NIC, aborting\n");
1102 goto map_fw_error;
1105 /* Allocate and map pages worksace */
1106 err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_PAGES, mtnic->fw.extra_pages);
1107 if (err) {
1108 DBG("Couldn't allocate %x FW extra pages, aborting\n",
1109 mtnic->fw.extra_pages.num);
1110 if (mtnic->fw.extra_pages.buf)
1111 ufree((intptr_t)mtnic->fw.extra_pages.buf);
1112 goto map_fw_error;
1116 /* Get device information */
1117 err = mtnic_query_mac(mtnic);
1118 if (err) {
1119 DBG("Insufficient resources in quesry mac, aborting\n");
1120 goto map_fw_error;
1123 /* Get device offsets */
1124 err = mtnic_query_offsets(mtnic);
1125 if (err) {
1126 DBG("Failed retrieving resource offests, aborting\n");
1127 ufree((intptr_t)mtnic->fw.extra_pages.buf);
1128 goto map_extra_error;
1132 /* Alloc EQ */
1133 err = mtnic_alloc_eq(mtnic);
1134 if (err) {
1135 DBG("Failed init shared resources. error: %d\n", err);
1136 goto map_extra_error;
1139 /* Configure HW */
1140 err = mtnic_CONFIG_EQ(mtnic);
1141 if (err) {
1142 DBG("Failed configuring EQ\n");
1143 goto eq_error;
1145 err = mtnic_CONFIG_RX(mtnic);
1146 if (err) {
1147 DBG("Failed Rx configuration\n");
1148 goto eq_error;
1150 err = mtnic_CONFIG_TX(mtnic);
1151 if (err) {
1152 DBG("Failed Tx configuration\n");
1153 goto eq_error;
1157 return 0;
1160 eq_error:
1161 iounmap(mtnic->eq_db);
1162 free_memblock(mtnic->eq.buf, mtnic->eq.buf_size);
1163 map_extra_error:
1164 ufree((intptr_t)mtnic->fw.extra_pages.buf);
1165 map_fw_error:
1166 ufree((intptr_t)mtnic->fw.fw_pages.buf);
1168 cmd_error:
1169 iounmap(mtnic->hcr);
1170 free_memblock(mtnic->cmd.buf, PAGE_SIZE);
1172 return -EADDRINUSE;
1184 /*******************************************************************
1186 * Process functions
1188 * process compliations of TX and RX
1191 ********************************************************************/
1192 void mtnic_process_tx_cq(struct mtnic_port *priv, struct net_device *dev,
1193 struct mtnic_cq *cq)
1195 struct mtnic_cqe *cqe = cq->buf;
1196 struct mtnic_ring *ring = &priv->tx_ring;
1197 u16 index;
1200 index = cq->last & (cq->size-1);
1201 cqe = &cq->buf[index];
1203 /* Owner bit changes every round */
1204 while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
1205 netdev_tx_complete (dev, ring->iobuf[index]);
1206 ++cq->last;
1207 index = cq->last & (cq->size-1);
1208 cqe = &cq->buf[index];
1211 /* Update consumer index */
1212 cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1213 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1214 ring->cons = cq->last;
1218 int mtnic_process_rx_cq(struct mtnic_port *priv,
1219 struct net_device *dev,
1220 struct mtnic_cq *cq)
1222 struct mtnic_cqe *cqe;
1223 struct mtnic_ring *ring = &priv->rx_ring;
1224 int index;
1225 int err;
1226 struct io_buffer *rx_iob;
1227 unsigned int length;
1230 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
1231 * descriptor offset can be deduced from the CQE index instead of
1232 * reading 'cqe->index' */
1233 index = cq->last & (cq->size-1);
1234 cqe = &cq->buf[index];
1236 /* Process all completed CQEs */
1237 while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
1238 /* Drop packet on bad receive or bad checksum */
1239 if ((cqe->op_tr_own & 0x1f) == MTNIC_OPCODE_ERROR) {
1240 DBG("CQE completed with error - vendor \n");
1241 free_iob(ring->iobuf[index]);
1242 goto next;
1244 if (cqe->enc_bf & MTNIC_BIT_BAD_FCS) {
1245 DBG("Accepted packet with bad FCS\n");
1246 free_iob(ring->iobuf[index]);
1247 goto next;
1251 * Packet is OK - process it.
1253 length = be32_to_cpu(cqe->byte_cnt);
1254 rx_iob = ring->iobuf[index];
1255 iob_put(rx_iob, length);
1257 /* Add this packet to the receive queue. */
1258 netdev_rx(dev, rx_iob);
1259 ring->iobuf[index] = NULL;
1261 next:
1262 ++cq->last;
1263 index = cq->last & (cq->size-1);
1264 cqe = &cq->buf[index];
1270 /* Update consumer index */
1271 cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1272 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1273 ring->cons = cq->last;
1275 if (ring->prod - ring->cons < (MAX_GAP_PROD_CONS)) {
1276 err = mtnic_alloc_iobuf(priv, &priv->rx_ring, DEF_IOBUF_SIZE);
1277 if (err) {
1278 DBG("ERROR Allocating io buffer");
1279 return -EADDRINUSE;
1283 return 0;
1301 /********************************************************************
1303 * net_device functions
1306 * open, poll, close, probe, disable, irq
1308 *********************************************************************/
1309 static int
1310 mtnic_open(struct net_device *dev)
1312 struct mtnic_port *priv = netdev_priv(dev);
1314 int err = 0;
1315 struct mtnic_ring *ring;
1316 struct mtnic_cq *cq;
1317 int cq_ind = 0;
1318 u32 dev_link_state;
1319 int link_check;
1321 DBG("starting port:%d, MAC Address: 0x%12llx\n",
1322 priv->port, priv->mtnic->fw.mac[priv->port]);
1324 /* Alloc and configure CQs, TX, RX */
1325 err = mtnic_alloc_resources ( dev );
1326 if (err) {
1327 DBG("Error allocating resources\n");
1328 return -EADDRINUSE;
1331 /* Pass CQs configuration to HW */
1332 for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) {
1333 cq = &priv->cq[cq_ind];
1334 err = mtnic_CONFIG_CQ(priv, priv->port, cq_ind, cq);
1335 if (err) {
1336 DBG("Failed configuring CQ:%d error %d\n",
1337 cq_ind, err);
1338 if (cq_ind)
1339 goto cq_error;
1340 else
1341 goto allocation_error;
1343 /* Update consumer index */
1344 cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1349 /* Pass Tx configuration to HW */
1350 ring = &priv->tx_ring;
1351 err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring);
1352 if (err) {
1353 DBG("Failed configuring Tx ring:0\n");
1354 goto cq_error;
1357 /* Pass RX configuration to HW */
1358 ring = &priv->rx_ring;
1359 err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring);
1360 if (err) {
1361 DBG("Failed configuring Rx ring:0\n");
1362 goto tx_error;
1365 /* Configure Rx steering */
1366 err = mtnic_CONFIG_PORT_RSS_STEER(priv, priv->port);
1367 if (!err)
1368 err = mtnic_SET_PORT_RSS_INDIRECTION(priv, priv->port);
1369 if (err) {
1370 DBG("Failed configuring RSS steering\n");
1371 goto rx_error;
1375 /* Set the port default ring to ring 0 */
1376 err = mtnic_SET_PORT_DEFAULT_RING(priv, priv->port, 0);
1377 if (err) {
1378 DBG("Failed setting default ring\n");
1379 goto rx_error;
1382 /* Set Mac address */
1383 err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->mtnic->fw.mac[priv->port]);
1384 if (err) {
1385 DBG("Failed setting default MAC address\n");
1386 goto rx_error;
1389 /* Set MTU */
1390 err = mtnic_SET_PORT_MTU(priv, priv->port, DEF_MTU);
1391 if (err) {
1392 DBG("Failed setting MTU\n");
1393 goto rx_error;
1396 /* Configure VLAN filter */
1397 /* By adding this function, The second port won't accept packets
1398 err = mtnic_CONFIG_PORT_VLAN_FILTER(priv, priv->port);
1399 if (err) {
1400 DBG("Failed configuring VLAN filter\n");
1401 goto rx_error;
1406 /* Bring up physical link */
1407 err = mtnic_SET_PORT_STATE(priv, priv->port, 1);
1408 if (err) {
1409 DBG("Failed bringing up port\n");
1410 goto rx_error;
1413 /* PORT IS UP */
1414 priv->state = CARD_UP;
1417 /* Checking Link is up */
1418 DBG ( "Checking if link is up\n" );
1421 for ( link_check = 0; link_check < CHECK_LINK_TIMES; link_check ++ ) {
1422 /* Let link state stabilize if cable was connected */
1423 mdelay ( DELAY_LINK_CHECK );
1425 err = mtnic_HEART_BEAT(priv, &dev_link_state);
1426 if (err) {
1427 DBG("Failed getting device link state\n");
1428 return -ENETDOWN;
1431 if ( dev_link_state & priv->port ) {
1432 /* Link is up */
1433 break;
1438 if ( ! ( dev_link_state & 0x3 ) ) {
1439 DBG("Link down, check cables and restart\n");
1440 netdev_link_down ( dev );
1441 return -ENETDOWN;
1444 DBG ( "Link is up!\n" );
1446 /* Mark as link up */
1447 netdev_link_up ( dev );
1449 return 0;
1451 rx_error:
1452 err = mtnic_RELEASE_RESOURCE(priv, priv->port,
1453 MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
1454 tx_error:
1455 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1456 MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
1458 cq_error:
1459 while (cq_ind) {
1460 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1461 MTNIC_IF_RESOURCE_TYPE_CQ, --cq_ind);
1463 if (err)
1464 DBG("Eror Releasing resources\n");
1466 allocation_error:
1468 free_memblock(priv->tx_ring.buf, priv->tx_ring.buf_size);
1469 iounmap(priv->tx_ring.txcq_db);
1470 free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
1471 free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
1472 free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
1473 free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
1474 free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
1475 free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
1477 mtnic_free_io_buffers(&priv->rx_ring);
1479 return -ENETDOWN;
1485 /** Check if we got completion for receive and transmit and
1486 * check the line with heart_bit command */
1487 static void
1488 mtnic_poll ( struct net_device *dev )
1490 struct mtnic_port *priv = netdev_priv(dev);
1491 struct mtnic_cq *cq;
1492 u32 dev_link_state;
1493 int err;
1494 unsigned int i;
1496 /* In case of an old error then return */
1497 if (priv->state != CARD_UP)
1498 return;
1500 /* We do not check the device every call _poll call,
1501 since it will slow it down */
1502 if ((priv->poll_counter % ROUND_TO_CHECK) == 0) {
1503 /* Check device */
1504 err = mtnic_HEART_BEAT(priv, &dev_link_state);
1505 if (err) {
1506 DBG("Device has internal error\n");
1507 priv->state = CARD_LINK_DOWN;
1508 return;
1510 if (!(dev_link_state & 0x3)) {
1511 DBG("Link down, check cables and restart\n");
1512 priv->state = CARD_LINK_DOWN;
1513 return;
1516 /* Polling CQ */
1517 for (i = 0; i < NUM_CQS; i++) {
1518 cq = &priv->cq[i]; //Passing on the 2 cqs.
1520 if (cq->is_rx) {
1521 err = mtnic_process_rx_cq(priv, cq->dev, cq);
1522 if (err) {
1523 priv->state = CARD_LINK_DOWN;
1524 DBG(" Error allocating RX buffers\n");
1525 return;
1527 } else {
1528 mtnic_process_tx_cq(priv, cq->dev, cq);
1531 ++ priv->poll_counter;
1536 static int
1537 mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf )
1540 struct mtnic_port *priv = netdev_priv(dev);
1541 struct mtnic_ring *ring;
1542 struct mtnic_tx_desc *tx_desc;
1543 struct mtnic_data_seg *data;
1544 u32 index;
1546 /* In case of an error then return */
1547 if (priv->state != CARD_UP)
1548 return -ENETDOWN;
1550 ring = &priv->tx_ring;
1552 index = ring->prod & ring->size_mask;
1553 if ((ring->prod - ring->cons) >= ring->size) {
1554 DBG("No space left for descriptors!!! cons: %x prod: %x\n",
1555 ring->cons, ring->prod);
1556 mdelay(5);
1557 return -EAGAIN;/* no space left */
1560 /* get current descriptor */
1561 tx_desc = ring->buf + (index * sizeof(struct mtnic_tx_desc));
1563 /* Prepare Data Seg */
1564 data = &tx_desc->data;
1565 data->addr_l = cpu_to_be32((u32)virt_to_bus(iobuf->data));
1566 data->count = cpu_to_be32(iob_len(iobuf));
1567 data->mem_type = priv->mtnic->fw.mem_type_snoop_be;
1569 /* Prepare ctrl segement */
1570 tx_desc->ctrl.size_vlan = cpu_to_be32(2);
1571 tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP |
1572 MTNIC_BIT_NO_ICRC);
1573 tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) |
1574 ((ring->prod & ring->size) ?
1575 cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0);
1577 /* Attach io_buffer */
1578 ring->iobuf[index] = iobuf;
1580 /* Update producer index */
1581 ++ring->prod;
1583 /* Ring doorbell! */
1584 wmb();
1585 writel((u32) ring->db_offset, &ring->txcq_db->send_db);
1587 return 0;
1591 static void
1592 mtnic_close(struct net_device *dev)
1594 struct mtnic_port *priv = netdev_priv(dev);
1595 int err = 0;
1596 DBG("Close called for port:%d\n", priv->port);
1598 if ( ( priv->state == CARD_UP ) ||
1599 ( priv->state == CARD_LINK_DOWN ) ) {
1601 /* Disable port */
1602 err |= mtnic_SET_PORT_STATE(priv, priv->port, 0);
1604 * Stop HW associated with this port
1606 mdelay(5);
1608 /* Stop RX */
1609 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1610 MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
1612 /* Stop TX */
1613 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1614 MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
1616 /* Stop CQs */
1617 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1618 MTNIC_IF_RESOURCE_TYPE_CQ, 0);
1619 err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1620 MTNIC_IF_RESOURCE_TYPE_CQ, 1);
1621 if (err) {
1622 DBG("Close reported error %d\n", err);
1625 mdelay ( 10 );
1627 /* free memory */
1628 free_memblock(priv->tx_ring.buf, priv->tx_ring.buf_size);
1629 iounmap(priv->tx_ring.txcq_db);
1630 free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
1631 free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
1632 free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
1633 free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
1634 free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
1635 free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
1637 /* Free RX buffers */
1638 mtnic_free_io_buffers(&priv->rx_ring);
1644 priv->state = CARD_INITIALIZED;
1649 static void
1650 mtnic_disable(struct pci_device *pci)
1653 int err;
1654 int i;
1655 struct mtnic *mtnic = pci_get_drvdata(pci);
1658 struct net_device *dev;
1659 struct mtnic_port *priv;
1661 for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
1663 dev = mtnic->netdev[i];
1665 priv = netdev_priv(dev);
1667 /* Just in case */
1668 if ( ( priv->state == CARD_UP ) ||
1669 ( priv->state == CARD_LINK_DOWN ) )
1670 mtnic_close ( dev );
1673 /* Releasing EQ */
1674 priv = netdev_priv ( mtnic->netdev[0] );
1675 err = mtnic_RELEASE_RESOURCE(priv, 1,
1676 MTNIC_IF_RESOURCE_TYPE_EQ, 0);
1678 DBG("Calling MTNIC_CLOSE command\n");
1679 err |= mtnic_cmd(mtnic, NULL, NULL, 0,
1680 MTNIC_IF_CMD_CLOSE_NIC);
1681 if (err) {
1682 DBG("Error Releasing resources %d\n", err);
1685 free_memblock(mtnic->cmd.buf, PAGE_SIZE);
1686 iounmap(mtnic->hcr);
1687 ufree((intptr_t)mtnic->fw.fw_pages.buf);
1688 ufree((intptr_t)mtnic->fw.extra_pages.buf);
1689 free_memblock(mtnic->eq.buf, mtnic->eq.buf_size);
1690 iounmap(mtnic->eq_db);
1693 for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
1694 dev = mtnic->netdev[i];
1695 unregister_netdev ( dev );
1696 netdev_nullify ( dev );
1697 netdev_put ( dev );
1700 free ( mtnic );
1703 mtnic_reset ();
1704 mdelay ( 1000 );
1705 /* Restore config, if we would like to retry booting */
1706 restore_config ();
1713 static void
1714 mtnic_irq(struct net_device *netdev __unused, int enable __unused)
1716 /* Not implemented */
1721 /** mtnic net device operations */
1722 static struct net_device_operations mtnic_operations = {
1723 .open = mtnic_open,
1724 .close = mtnic_close,
1725 .transmit = mtnic_transmit,
1726 .poll = mtnic_poll,
1727 .irq = mtnic_irq,
1736 static int
1737 mtnic_probe(struct pci_device *pci,
1738 const struct pci_device_id *id __unused)
1740 struct mtnic_port *priv;
1741 struct mtnic *mtnic;
1742 int err;
1743 u64 mac;
1744 int port_index;
1747 adjust_pci_device(pci);
1749 err = mtnic_init_pci(pci);
1750 if (err) {
1751 DBG("Error in pci_init\n");
1752 return -EIO;
1755 mtnic_reset();
1756 mdelay(1000);
1758 err = restore_config();
1759 if (err) {
1760 DBG("Error in restoring config\n");
1761 return err;
1764 mtnic = zalloc ( sizeof ( *mtnic ) );
1765 if ( ! mtnic ) {
1766 DBG ( "Error Allocating mtnic buffer\n" );
1767 return -EADDRINUSE;
1770 pci_set_drvdata(pci, mtnic);
1772 mtnic->pdev = pci;
1775 /* Initialize hardware */
1776 err = mtnic_init_card ( mtnic );
1777 if (err) {
1778 DBG("Error in init_card\n");
1779 goto err_init_card;
1782 for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
1783 /* Initializing net device */
1784 mtnic->netdev[port_index] = alloc_etherdev( sizeof ( struct mtnic_port ) );
1785 if ( mtnic->netdev[port_index] == NULL ) {
1786 DBG("Net device allocation failed\n");
1787 goto err_alloc_mtnic;
1791 * Initialize driver private data
1794 mtnic->netdev[port_index]->dev = &pci->dev;
1795 priv = netdev_priv ( mtnic->netdev[port_index] );
1796 memset ( priv, 0, sizeof ( struct mtnic_port ) );
1797 priv->mtnic = mtnic;
1798 priv->netdev = mtnic->netdev[port_index];
1800 /* Attach pci device */
1801 netdev_init(mtnic->netdev[port_index], &mtnic_operations);
1803 /* Set port number */
1804 priv->port = port_index;
1806 /* Set state */
1807 priv->state = CARD_DOWN;
1811 int mac_idx;
1812 for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
1813 priv = netdev_priv ( mtnic->netdev[port_index] );
1814 /* Program the MAC address */
1815 mac = priv->mtnic->fw.mac[port_index];
1816 for (mac_idx = 0; mac_idx < MAC_ADDRESS_SIZE; ++mac_idx) {
1817 mtnic->netdev[port_index]->hw_addr[MAC_ADDRESS_SIZE - mac_idx - 1] = mac & 0xFF;
1818 mac = mac >> 8;
1821 if ( register_netdev ( mtnic->netdev[port_index] ) ) {
1822 DBG("Netdev registration failed\n");
1823 priv->state = CARD_INITIALIZED;
1824 goto err_alloc_mtnic;
1829 return 0;
1831 err_alloc_mtnic:
1832 free ( mtnic );
1833 err_init_card:
1834 return -EIO;
1840 static struct pci_device_id mtnic_nics[] = {
1841 PCI_ROM ( 0x15b3, 0x6368, "mt25448", "Mellanox ConnectX EN driver", 0 ),
1842 PCI_ROM ( 0x15b3, 0x6372, "mt25458", "Mellanox ConnectX ENt driver", 0 ),
1843 PCI_ROM ( 0x15b3, 0x6750, "mt26448", "Mellanox ConnectX EN GEN2 driver", 0 ),
1844 PCI_ROM ( 0x15b3, 0x675a, "mt26458", "Mellanox ConnectX ENt GEN2 driver", 0 ),
1847 struct pci_driver mtnic_driver __pci_driver = {
1848 .ids = mtnic_nics,
1849 .id_count = sizeof(mtnic_nics) / sizeof(mtnic_nics[0]),
1850 .probe = mtnic_probe,
1851 .remove = mtnic_disable,