2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <gpxe/malloc.h>
37 #include <gpxe/umalloc.h>
38 #include <bits/byteswap.h>
39 #include <little_bswap.h>
42 #include <gpxe/ethernet.h>
43 #include <gpxe/netdevice.h>
44 #include <gpxe/iobuf.h>
51 mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN
59 /** Set port number to use
64 #define MTNIC_PORT_NUM 0
65 /* Note: for verbose printing do Make ... DEBUG=mtnic */
70 /********************************************************************
72 * MTNIC allocation functions
74 *********************************************************************/
78 * @v unsigned int size size
79 * @v void **va virtual address
80 * @v u32 *pa physical address
81 * @v u32 aligment aligment
83 * Function allocate aligned buffer and put it's virtual address in 'va'
84 * and it's physical aligned address in 'pa'
87 mtnic_alloc_aligned(unsigned int size
, void **va
, u32
*pa
, unsigned int alignment
)
89 *va
= alloc_memblock(size
, alignment
);
93 *pa
= (u32
)virt_to_bus(*va
);
101 * mtnic alloc command interface
105 mtnic_alloc_cmdif(struct mtnic_priv
*priv
)
107 u32 bar
= mtnic_pci_dev
.dev
.bar
[0];
109 priv
->hcr
= ioremap(bar
+ MTNIC_HCR_BASE
, MTNIC_HCR_SIZE
);
111 eprintf("Couldn't map command register.");
114 mtnic_alloc_aligned(PAGE_SIZE
, (void *)&priv
->cmd
.buf
, &priv
->cmd
.mapping
, PAGE_SIZE
);
115 if (!priv
->cmd
.buf
) {
116 eprintf("Error in allocating buffer for command interface\n");
126 mtnic_free_io_buffers(struct mtnic_ring
*ring
)
130 for (; ring
->cons
<= ring
->prod
; ++ring
->cons
) {
131 index
= ring
->cons
& ring
->size_mask
;
132 if (ring
->iobuf
[index
])
133 free_iob(ring
->iobuf
[index
]);
141 * mtnic alloc and attach io buffers
145 mtnic_alloc_iobuf(struct mtnic_priv
*priv
, struct mtnic_ring
*ring
,
148 struct mtnic_rx_desc
*rx_desc_ptr
= ring
->buf
;
151 while ((u32
)(ring
->prod
- ring
->cons
) < UNITS_BUFFER_SIZE
) {
152 index
= ring
->prod
& ring
->size_mask
;
153 ring
->iobuf
[index
] = alloc_iob(size
);
154 if (!&ring
->iobuf
[index
]) {
155 if (ring
->prod
<= (ring
->cons
+ 1)) {
156 eprintf("Error allocating Rx io "
157 "buffer number %lx", index
);
158 /* In case of error freeing io buffer */
159 mtnic_free_io_buffers(ring
);
166 /* Attach io_buffer to descriptor */
167 rx_desc_ptr
= ring
->buf
+
168 (sizeof(struct mtnic_rx_desc
) * index
);
169 rx_desc_ptr
->data
.count
= cpu_to_be32(size
);
170 rx_desc_ptr
->data
.mem_type
= priv
->fw
.mem_type_snoop_be
;
171 rx_desc_ptr
->data
.addr_l
= cpu_to_be32(
172 virt_to_bus(ring
->iobuf
[index
]->data
));
177 /* Update RX producer index (PI) */
178 ring
->db
->count
= cpu_to_be32(ring
->prod
& 0xffff);
186 * Alloc and configure TX or RX ring
190 mtnic_alloc_ring(struct mtnic_priv
*priv
, struct mtnic_ring
*ring
,
191 u32 size
, u16 stride
, u16 cq
, u8 is_rx
)
195 struct mtnic_rx_desc
*rx_desc
;
196 struct mtnic_tx_desc
*tx_desc
;
198 ring
->size
= size
; /* Number of descriptors */
199 ring
->size_mask
= size
- 1;
200 ring
->stride
= stride
; /* Size of each entry */
201 ring
->cq
= cq
; /* CQ number associated with this ring */
205 /* Alloc descriptors buffer */
206 ring
->buf_size
= ring
->size
* ((is_rx
) ? sizeof(struct mtnic_rx_desc
) :
207 sizeof(struct mtnic_tx_desc
));
208 err
= mtnic_alloc_aligned(ring
->buf_size
, (void *)&ring
->buf
,
209 &ring
->dma
, PAGE_SIZE
);
211 eprintf("Failed allocating descriptor ring sizeof %lx\n",
215 memset(ring
->buf
, 0, ring
->buf_size
);
217 DBG("Allocated %s ring (addr:%p) - buf:%p size:%lx"
218 "buf_size:%lx dma:%lx\n",
219 is_rx
? "Rx" : "Tx", ring
, ring
->buf
, ring
->size
,
220 ring
->buf_size
, ring
->dma
);
223 if (is_rx
) { /* RX ring */
225 err
= mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record
),
226 (void *)&ring
->db
, &ring
->db_dma
, 32);
228 eprintf("Failed allocating Rx ring doorbell record\n");
233 /* ==- Configure Descriptor -== */
234 /* Init ctrl seg of rx desc */
235 for (i
= 0; i
< UNITS_BUFFER_SIZE
; ++i
) {
236 rx_desc
= ring
->buf
+
237 (sizeof(struct mtnic_rx_desc
) * i
);
238 /* Pre-link descriptor */
239 rx_desc
->next
= cpu_to_be16(i
+ 1);
241 /*The last ctrl descriptor is '0' and points to the first one*/
243 /* Alloc IO_BUFFERS */
244 err
= mtnic_alloc_iobuf(priv
, ring
, DEF_IOBUF_SIZE
);
246 eprintf("ERROR Allocating io buffer");
251 } else { /* TX ring */
252 /* Set initial ownership of all Tx Desc' to SW (1) */
253 for (i
= 0; i
< ring
->size
; i
++) {
254 tx_desc
= ring
->buf
+ ring
->stride
* i
;
255 tx_desc
->ctrl
.op_own
= cpu_to_be32(MTNIC_BIT_DESC_OWN
);
258 ring
->db_offset
= cpu_to_be32(
259 ((u32
) priv
->fw
.tx_offset
[priv
->port
]) << 8);
261 /* Map Tx+CQ doorbells */
262 DBG("Mapping TxCQ doorbell at offset:0x%lx\n",
263 priv
->fw
.txcq_db_offset
);
264 ring
->txcq_db
= ioremap(mtnic_pci_dev
.dev
.bar
[2] +
265 priv
->fw
.txcq_db_offset
, PAGE_SIZE
);
266 if (!ring
->txcq_db
) {
267 eprintf("Couldn't map txcq doorbell, aborting...\n");
281 * Alloc and configure CQ.
285 mtnic_alloc_cq(struct net_device
*dev
, int num
, struct mtnic_cq
*cq
,
286 u8 is_rx
, u32 size
, u32 offset_ind
)
296 cq
->offset_ind
= offset_ind
;
299 err
= mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record
),
300 (void *)&cq
->db
, &cq
->db_dma
, 32);
302 eprintf("Failed allocating CQ doorbell record\n");
305 memset(cq
->db
, 0, sizeof(struct mtnic_cq_db_record
));
307 /* Alloc CQEs buffer */
308 cq
->buf_size
= size
* sizeof(struct mtnic_cqe
);
309 err
= mtnic_alloc_aligned(cq
->buf_size
,
310 (void *)&cq
->buf
, &cq
->dma
, PAGE_SIZE
);
312 eprintf("Failed allocating CQ buffer\n");
316 memset(cq
->buf
, 0, cq
->buf_size
);
317 DBG("Allocated CQ (addr:%p) - size:%lx buf:%p buf_size:%lx "
318 "dma:%lx db:%p db_dma:%lx\n"
319 "cqn offset:%lx \n", cq
, cq
->size
, cq
->buf
,
320 cq
->buf_size
, cq
->dma
, cq
->db
,
321 cq
->db_dma
, offset_ind
);
324 /* Set ownership of all CQEs to HW */
325 DBG("Setting HW ownership for CQ:%d\n", num
);
326 for (i
= 0; i
< cq
->size
; i
++) {
327 /* Initial HW ownership is 1 */
328 cq
->buf
[i
].op_tr_own
= MTNIC_BIT_CQ_OWN
;
336 * mtnic_alloc_resources
338 * Alloc and configure CQs, Tx, Rx
341 mtnic_alloc_resources(struct net_device
*dev
)
343 struct mtnic_priv
*priv
= netdev_priv(dev
);
346 int cq_offset
= priv
->fw
.cq_offset
;
349 err
= mtnic_alloc_cq(dev
, cq_ind
, &priv
->cq
[cq_ind
], 1 /* RX */,
350 UNITS_BUFFER_SIZE
, cq_offset
+ cq_ind
);
352 eprintf("Failed allocating Rx CQ\n");
357 err
= mtnic_alloc_ring(priv
, &priv
->rx_ring
, UNITS_BUFFER_SIZE
,
358 sizeof(struct mtnic_rx_desc
), cq_ind
, /* RX */1);
360 eprintf("Failed allocating Rx Ring\n");
367 err
= mtnic_alloc_cq(dev
, cq_ind
, &priv
->cq
[cq_ind
], 0 /* TX */,
368 UNITS_BUFFER_SIZE
, cq_offset
+ cq_ind
);
370 eprintf("Failed allocating Tx CQ\n");
375 err
= mtnic_alloc_ring(priv
, &priv
->tx_ring
, UNITS_BUFFER_SIZE
,
376 sizeof(struct mtnic_tx_desc
), cq_ind
, /* TX */ 0);
378 eprintf("Failed allocating Tx ring\n");
385 free(priv
->cq
[1].buf
);
386 free(priv
->cq
[1].db
);
388 free(priv
->rx_ring
.buf
);
389 free(priv
->rx_ring
.db
);
390 mtnic_free_io_buffers(&priv
->rx_ring
);
392 free(priv
->cq
[0].buf
);
393 free(priv
->cq
[0].db
);
402 * Note: EQ is not used by the driver but must be allocated
405 mtnic_alloc_eq(struct mtnic_priv
*priv
)
409 struct mtnic_eqe
*eqe_desc
= NULL
;
411 /* Allocating doorbell */
412 priv
->eq_db
= ioremap(mtnic_pci_dev
.dev
.bar
[2] +
413 priv
->fw
.eq_db_offset
, sizeof(u32
));
415 eprintf("Couldn't map EQ doorbell, aborting...\n");
419 /* Allocating buffer */
420 priv
->eq
.size
= NUM_EQES
;
421 priv
->eq
.buf_size
= priv
->eq
.size
* sizeof(struct mtnic_eqe
);
422 err
= mtnic_alloc_aligned(priv
->eq
.buf_size
, (void *)&priv
->eq
.buf
,
423 &priv
->eq
.dma
, PAGE_SIZE
);
425 eprintf("Failed allocating EQ buffer\n");
426 iounmap(priv
->eq_db
);
429 memset(priv
->eq
.buf
, 0, priv
->eq
.buf_size
);
431 for (i
= 0; i
< priv
->eq
.size
; i
++)
432 eqe_desc
= priv
->eq
.buf
+ (sizeof(struct mtnic_eqe
) * i
);
433 eqe_desc
->own
|= MTNIC_BIT_EQE_OWN
;
449 /********************************************************************
451 * Mtnic commands functions
452 * -=-=-=-=-=-=-=-=-=-=-=-=
456 *********************************************************************/
458 cmdif_go_bit(struct mtnic_priv
*priv
)
460 struct mtnic_if_cmd_reg
*hcr
= priv
->hcr
;
464 for (i
= 0; i
< TBIT_RETRIES
; i
++) {
465 status
= be32_to_cpu(readl(&hcr
->status_go_opcode
));
466 if ((status
& MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT
)) ==
467 (priv
->cmd
.tbit
<< MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT
))) {
468 /* Read expected t-bit - now return go-bit value */
469 return status
& MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT
);
473 eprintf("Invalid tbit after %d retries!\n", TBIT_RETRIES
);
474 return 1; /* Return busy... */
477 /* Base Command interface */
479 mtnic_cmd(struct mtnic_priv
*priv
, void *in_imm
,
480 void *out_imm
, u32 in_modifier
, u16 op
)
483 struct mtnic_if_cmd_reg
*hcr
= priv
->hcr
;
491 static u16 token
= 0x8000;
493 unsigned int timeout
= 0;
497 if (cmdif_go_bit(priv
)) {
498 eprintf("GO BIT BUSY:%p.\n", hcr
+ 6);
503 in_param_h
= *((u32
*)in_imm
);
504 in_param_l
= *((u32
*)in_imm
+ 1);
506 in_param_l
= cpu_to_be32(priv
->cmd
.mapping
);
508 out_param_l
= cpu_to_be32(priv
->cmd
.mapping
);
511 writel(in_param_h
, &hcr
->in_param_h
);
512 writel(in_param_l
, &hcr
->in_param_l
);
513 writel((u32
) cpu_to_be32(in_modifier
), &hcr
->input_modifier
);
514 writel(out_param_h
, &hcr
->out_param_h
);
515 writel(out_param_l
, &hcr
->out_param_l
);
516 writel((u32
)cpu_to_be32(token
<< 16), &hcr
->token
);
519 /* flip toggle bit before each write to the HCR */
520 priv
->cmd
.tbit
= !priv
->cmd
.tbit
;
522 cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT
) |
523 (priv
->cmd
.tbit
<< MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT
)) | op
),
524 &hcr
->status_go_opcode
);
526 while (cmdif_go_bit(priv
) && (timeout
<= GO_BIT_TIMEOUT
)) {
531 if (cmdif_go_bit(priv
)) {
532 eprintf("Command opcode:0x%x token:0x%x TIMEOUT.\n", op
, token
);
538 *((u32
*)out_imm
) = readl(&hcr
->out_param_h
);
539 *((u32
*)out_imm
+ 1) = readl(&hcr
->out_param_l
);
542 status
= be32_to_cpu((u32
)readl(&hcr
->status_go_opcode
)) >> 24;
543 /*DBG("Command opcode:0x%x token:0x%x returned:0x%lx\n",
544 op, token, status);*/
554 /* MAP PAGES wrapper */
556 mtnic_map_cmd(struct mtnic_priv
*priv
, u16 op
, struct mtnic_pages pages
)
561 u32
*page_arr
= priv
->cmd
.buf
;
565 memset(page_arr
, 0, PAGE_SIZE
);
567 len
= PAGE_SIZE
* pages
.num
;
568 pages
.buf
= (u32
*)umalloc(PAGE_SIZE
* (pages
.num
+ 1));
569 addr
= PAGE_SIZE
+ ((virt_to_bus(pages
.buf
) & 0xfffff000) + PAGE_SIZE
);
570 DBG("Mapping pages: size: %lx address: %p\n", pages
.num
, pages
.buf
);
572 if (addr
& (PAGE_MASK
)) {
573 eprintf("Got FW area not aligned to %d (%llx/%x)\n",
574 PAGE_SIZE
, (u64
) addr
, len
);
578 /* Function maps each PAGE seperately */
579 for (j
= 0; j
< len
; j
+= PAGE_SIZE
) {
580 page_arr
[nent
* 4 + 3] = cpu_to_be32(addr
+ j
);
581 if (++nent
== MTNIC_MAILBOX_SIZE
/ 16) {
582 err
= mtnic_cmd(priv
, NULL
, NULL
, nent
, op
);
590 err
= mtnic_cmd(priv
, NULL
, NULL
, nent
, op
);
601 mtnic_QUERY_FW(struct mtnic_priv
*priv
)
604 struct mtnic_if_query_fw_out_mbox
*cmd
= priv
->cmd
.buf
;
606 err
= mtnic_cmd(priv
, NULL
, NULL
, 0, MTNIC_IF_CMD_QUERY_FW
);
610 /* Get FW and interface versions */
611 priv
->fw_ver
= ((u64
) be16_to_cpu(cmd
->rev_maj
) << 32) |
612 ((u64
) be16_to_cpu(cmd
->rev_min
) << 16) |
613 (u64
) be16_to_cpu(cmd
->rev_smin
);
614 priv
->fw
.ifc_rev
= be16_to_cpu(cmd
->ifc_rev
);
616 /* Get offset for internal error reports (debug) */
617 priv
->fw
.err_buf
.offset
= be64_to_cpu(cmd
->err_buf_start
);
618 priv
->fw
.err_buf
.size
= be32_to_cpu(cmd
->err_buf_size
);
620 DBG("Error buf offset is %llx\n", priv
->fw
.err_buf
.offset
);
622 /* Get number of required FW (4k) pages */
623 priv
->fw
.fw_pages
.num
= be16_to_cpu(cmd
->fw_pages
);
630 mtnic_OPEN_NIC(struct mtnic_priv
*priv
)
633 struct mtnic_if_open_nic_in_mbox
*open_nic
= priv
->cmd
.buf
;
634 u32 extra_pages
[2] = {0};
637 memset(open_nic
, 0, sizeof *open_nic
);
640 open_nic
->log_rx_p1
= 0;
641 open_nic
->log_cq_p1
= 1;
643 open_nic
->log_tx_p1
= 0;
644 open_nic
->steer_p1
= MTNIC_IF_STEER_RSS
;
645 /* MAC + VLAN - leave reserved */
648 open_nic
->log_rx_p2
= 0;
649 open_nic
->log_cq_p2
= 1;
651 open_nic
->log_tx_p2
= 0;
652 open_nic
->steer_p2
= MTNIC_IF_STEER_RSS
;
653 /* MAC + VLAN - leave reserved */
655 err
= mtnic_cmd(priv
, NULL
, extra_pages
, 0, MTNIC_IF_CMD_OPEN_NIC
);
656 priv
->fw
.extra_pages
.num
= be32_to_cpu(*(extra_pages
+1));
657 DBG("Extra pages num is %lx\n", priv
->fw
.extra_pages
.num
);
662 mtnic_CONFIG_RX(struct mtnic_priv
*priv
)
664 struct mtnic_if_config_rx_in_imm config_rx
;
666 memset(&config_rx
, 0, sizeof config_rx
);
667 return mtnic_cmd(priv
, &config_rx
, NULL
, 0, MTNIC_IF_CMD_CONFIG_RX
);
671 mtnic_CONFIG_TX(struct mtnic_priv
*priv
)
673 struct mtnic_if_config_send_in_imm config_tx
;
675 config_tx
.enph_gpf
= 0;
676 return mtnic_cmd(priv
, &config_tx
, NULL
, 0, MTNIC_IF_CMD_CONFIG_TX
);
680 mtnic_HEART_BEAT(struct mtnic_priv
*priv
, u32
*link_state
)
682 struct mtnic_if_heart_beat_out_imm heart_beat
;
686 err
= mtnic_cmd(priv
, NULL
, &heart_beat
, 0, MTNIC_IF_CMD_HEART_BEAT
);
688 flags
= be32_to_cpu(heart_beat
.flags
);
689 if (flags
& MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR
)) {
690 eprintf("Internal error detected\n");
693 *link_state
= flags
&
694 ~((u32
) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR
));
705 mtnic_SET_PORT_DEFAULT_RING(struct mtnic_priv
*priv
, u8 port
, u16 ring
)
707 struct mtnic_if_set_port_default_ring_in_imm def_ring
;
709 memset(&def_ring
, 0, sizeof(def_ring
));
710 def_ring
.ring
= ring
;
711 return mtnic_cmd(priv
, &def_ring
, NULL
, port
+ 1,
712 MTNIC_IF_CMD_SET_PORT_DEFAULT_RING
);
716 mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_priv
*priv
, int port
)
718 memset(priv
->cmd
.buf
, 0, PAGE_SIZE
);
719 return mtnic_cmd(priv
, NULL
, NULL
, port
+ 1,
720 MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER
);
724 mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_priv
*priv
, int port
)
727 memset(priv
->cmd
.buf
, 0, PAGE_SIZE
);
728 return mtnic_cmd(priv
, NULL
, NULL
, port
+ 1,
729 MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION
);
737 mtnic_CONFIG_CQ(struct mtnic_priv
*priv
, int port
,
738 u16 cq_ind
, struct mtnic_cq
*cq
)
740 struct mtnic_if_config_cq_in_mbox
*config_cq
= priv
->cmd
.buf
;
742 memset(config_cq
, 0, sizeof *config_cq
);
743 config_cq
->cq
= cq_ind
;
744 config_cq
->size
= fls(UNITS_BUFFER_SIZE
- 1);
745 config_cq
->offset
= ((cq
->dma
) & (PAGE_MASK
)) >> 6;
746 config_cq
->db_record_addr_l
= cpu_to_be32(cq
->db_dma
);
747 config_cq
->page_address
[1] = cpu_to_be32(cq
->dma
);
748 DBG("config cq address: %lx dma_address: %lx"
749 "offset: %d size %d index: %d "
750 , config_cq
->page_address
[1],cq
->dma
,
751 config_cq
->offset
, config_cq
->size
, config_cq
->cq
);
753 return mtnic_cmd(priv
, NULL
, NULL
, port
+ 1,
754 MTNIC_IF_CMD_CONFIG_CQ
);
759 mtnic_CONFIG_TX_RING(struct mtnic_priv
*priv
, u8 port
,
760 u16 ring_ind
, struct mtnic_ring
*ring
)
762 struct mtnic_if_config_send_ring_in_mbox
*config_tx_ring
= priv
->cmd
.buf
;
763 memset(config_tx_ring
, 0, sizeof *config_tx_ring
);
764 config_tx_ring
->ring
= cpu_to_be16(ring_ind
);
765 config_tx_ring
->size
= fls(UNITS_BUFFER_SIZE
- 1);
766 config_tx_ring
->cq
= cpu_to_be16(ring
->cq
);
767 config_tx_ring
->page_address
[1] = cpu_to_be32(ring
->dma
);
769 return mtnic_cmd(priv
, NULL
, NULL
, port
+ 1,
770 MTNIC_IF_CMD_CONFIG_TX_RING
);
774 mtnic_CONFIG_RX_RING(struct mtnic_priv
*priv
, u8 port
,
775 u16 ring_ind
, struct mtnic_ring
*ring
)
777 struct mtnic_if_config_rx_ring_in_mbox
*config_rx_ring
= priv
->cmd
.buf
;
778 memset(config_rx_ring
, 0, sizeof *config_rx_ring
);
779 config_rx_ring
->ring
= ring_ind
;
780 MTNIC_BC_PUT(config_rx_ring
->stride_size
, fls(UNITS_BUFFER_SIZE
- 1),
781 MTNIC_MASK_CONFIG_RX_RING_SIZE
);
782 MTNIC_BC_PUT(config_rx_ring
->stride_size
, 1,
783 MTNIC_MASK_CONFIG_RX_RING_STRIDE
);
784 config_rx_ring
->cq
= cpu_to_be16(ring
->cq
);
785 config_rx_ring
->db_record_addr_l
= cpu_to_be32(ring
->db_dma
);
787 DBG("Config RX ring starting at address:%lx\n", ring
->dma
);
789 config_rx_ring
->page_address
[1] = cpu_to_be32(ring
->dma
);
791 return mtnic_cmd(priv
, NULL
, NULL
, port
+ 1,
792 MTNIC_IF_CMD_CONFIG_RX_RING
);
796 mtnic_CONFIG_EQ(struct mtnic_priv
*priv
)
798 struct mtnic_if_config_eq_in_mbox
*eq
= priv
->cmd
.buf
;
800 if (priv
->eq
.dma
& (PAGE_MASK
)) {
801 eprintf("misalligned eq buffer:%lx\n",
806 memset(eq
, 0, sizeof *eq
);
807 MTNIC_BC_PUT(eq
->offset
, priv
->eq
.dma
>> 6, MTNIC_MASK_CONFIG_EQ_OFFSET
);
808 MTNIC_BC_PUT(eq
->size
, fls(priv
->eq
.size
- 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE
);
809 MTNIC_BC_PUT(eq
->int_vector
, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC
);
810 eq
->page_address
[1] = cpu_to_be32(priv
->eq
.dma
);
812 return mtnic_cmd(priv
, NULL
, NULL
, 0, MTNIC_IF_CMD_CONFIG_EQ
);
819 mtnic_SET_RX_RING_ADDR(struct mtnic_priv
*priv
, u8 port
, u64
* mac
)
821 struct mtnic_if_set_rx_ring_addr_in_imm ring_addr
;
822 u32 modifier
= ((u32
) port
+ 1) << 16;
824 memset(&ring_addr
, 0, sizeof(ring_addr
));
826 ring_addr
.mac_31_0
= cpu_to_be32(*mac
& 0xffffffff);
827 ring_addr
.mac_47_32
= cpu_to_be16((*mac
>> 32) & 0xffff);
828 ring_addr
.flags_vlan_id
|= cpu_to_be16(
829 MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC
));
831 return mtnic_cmd(priv
, &ring_addr
, NULL
, modifier
, MTNIC_IF_CMD_SET_RX_RING_ADDR
);
835 mtnic_SET_PORT_STATE(struct mtnic_priv
*priv
, u8 port
, u8 state
)
837 struct mtnic_if_set_port_state_in_imm port_state
;
839 port_state
.state
= state
? cpu_to_be32(
840 MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE
)) : 0;
841 port_state
.reserved
= 0;
842 return mtnic_cmd(priv
, &port_state
, NULL
, port
+ 1,
843 MTNIC_IF_CMD_SET_PORT_STATE
);
847 mtnic_SET_PORT_MTU(struct mtnic_priv
*priv
, u8 port
, u16 mtu
)
849 struct mtnic_if_set_port_mtu_in_imm set_mtu
;
851 memset(&set_mtu
, 0, sizeof(set_mtu
));
852 set_mtu
.mtu
= cpu_to_be16(mtu
);
853 return mtnic_cmd(priv
, &set_mtu
, NULL
, port
+ 1,
854 MTNIC_IF_CMD_SET_PORT_MTU
);
859 mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_priv
*priv
, int port
)
861 struct mtnic_if_config_port_vlan_filter_in_mbox
*vlan_filter
= priv
->cmd
.buf
;
863 /* When no vlans are configured we disable the filter
864 * (i.e., pass all vlans) because we ignore them anyhow */
865 memset(vlan_filter
, 0xff, sizeof(*vlan_filter
));
866 return mtnic_cmd(priv
, NULL
, NULL
, port
+ 1,
867 MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER
);
872 mtnic_RELEASE_RESOURCE(struct mtnic_priv
*priv
, u8 port
, u8 type
, u8 index
)
874 struct mtnic_if_release_resource_in_imm rel
;
875 memset(&rel
, 0, sizeof rel
);
878 return mtnic_cmd(priv
,
879 &rel
, NULL
, (type
== MTNIC_IF_RESOURCE_TYPE_EQ
) ?
880 0 : port
+ 1, MTNIC_IF_CMD_RELEASE_RESOURCE
);
885 mtnic_QUERY_CAP(struct mtnic_priv
*priv
, u8 index
, u8 mod
, u64
*result
)
887 struct mtnic_if_query_cap_in_imm cap
;
891 memset(&cap
, 0, sizeof cap
);
892 cap
.cap_index
= index
;
893 cap
.cap_modifier
= mod
;
894 err
= mtnic_cmd(priv
, &cap
, &out_imm
, 0, MTNIC_IF_CMD_QUERY_CAP
);
896 *((u32
*)result
) = be32_to_cpu(*(out_imm
+1));
897 *((u32
*)result
+ 1) = be32_to_cpu(*out_imm
);
899 DBG("Called Query cap with index:0x%x mod:%d result:0x%llx"
900 " error:%d\n", index
, mod
, *result
, err
);
905 #define DO_QUERY_CAP(cap, mod, var) \
906 err = mtnic_QUERY_CAP(priv, cap, mod, &result); \
912 mtnic_query_cap(struct mtnic_priv
*priv
)
918 DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS
, 0, priv
->fw
.num_ports
);
919 for (i
= 0; i
< priv
->fw
.num_ports
; i
++) {
920 DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC
, i
+ 1, priv
->fw
.mac
[i
]);
927 mtnic_query_offsets(struct mtnic_priv
*priv
)
933 DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY
,
934 MTNIC_IF_MEM_TYPE_SNOOP
,
935 priv
->fw
.mem_type_snoop_be
);
936 priv
->fw
.mem_type_snoop_be
= cpu_to_be32(priv
->fw
.mem_type_snoop_be
);
937 DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET
, 0, priv
->fw
.txcq_db_offset
);
938 DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET
, 0, priv
->fw
.eq_db_offset
);
940 for (i
= 0; i
< priv
->fw
.num_ports
; i
++) {
941 DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET
, i
+ 1, priv
->fw
.cq_offset
);
942 DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET
, i
+ 1, priv
->fw
.tx_offset
[i
]);
943 DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET
, i
+ 1, priv
->fw
.rx_offset
[i
]);
944 DBG("--> Port %d CQ offset:0x%x\n", i
, priv
->fw
.cq_offset
);
945 DBG("--> Port %d Tx offset:0x%x\n", i
, priv
->fw
.tx_offset
[i
]);
946 DBG("--> Port %d Rx offset:0x%x\n", i
, priv
->fw
.rx_offset
[i
]);
963 /********************************************************************
965 * MTNIC initalization functions
970 *********************************************************************/
978 void *reset
= ioremap(mtnic_pci_dev
.dev
.bar
[0] + MTNIC_RESET_OFFSET
, 4);
979 writel(cpu_to_be32(1), reset
);
993 for (i
= 0; i
< 64; ++i
) {
994 if (i
!= 22 && i
!= 23) {
995 rc
= pci_write_config_dword(mtnic_pci_dev
.dev
.dev
,
998 dev_config_space
[i
]);
1009 * Init PCI configuration
1012 mtnic_init_pci(struct pci_device
*dev
)
1018 DBG("bus=%d devfn=0x%x", dev
->bus
, dev
->devfn
);
1019 for (i
= 0; i
< 6; ++i
) {
1020 mtnic_pci_dev
.dev
.bar
[i
] =
1021 pci_bar_start(dev
, PCI_BASE_ADDRESS_0
+ (i
<< 2));
1022 DBG("bar[%d]= 0x%08lx \n", i
, mtnic_pci_dev
.dev
.bar
[i
]);
1025 /* save config space */
1026 for (i
= 0; i
< 64; ++i
) {
1027 err
= pci_read_config_dword(dev
, i
<< 2,
1029 dev_config_space
[i
]);
1031 eprintf("Can not save configuration space");
1036 mtnic_pci_dev
.dev
.dev
= dev
;
1045 int mtnic_init_card(struct net_device
*dev
)
1047 struct mtnic_priv
*priv
= netdev_priv(dev
);
1052 priv
->state
= CARD_DOWN
;
1054 priv
->port
= MTNIC_PORT_NUM
;
1056 /* Alloc command interface */
1057 err
= mtnic_alloc_cmdif(priv
);
1059 eprintf("Failed to init command interface, aborting.\n");
1068 err
= mtnic_QUERY_FW(priv
);
1070 eprintf("QUERY_FW command failed, aborting.\n");
1074 DBG("Command interface revision:%d\n", priv
->fw
.ifc_rev
);
1076 /* Allocate memory for FW and start it */
1077 err
= mtnic_map_cmd(priv
, MTNIC_IF_CMD_MAP_FW
, priv
->fw
.fw_pages
);
1079 eprintf("Eror In MAP_FW\n");
1080 if (priv
->fw
.fw_pages
.buf
)
1081 free(priv
->fw
.fw_pages
.buf
);
1086 err
= mtnic_cmd(priv
, NULL
, NULL
, 0, MTNIC_IF_CMD_RUN_FW
);
1088 eprintf("Eror In RUN FW\n");
1092 DBG("FW version:%d.%d.%d\n",
1093 (u16
) (priv
->fw_ver
>> 32),
1094 (u16
) ((priv
->fw_ver
>> 16) & 0xffff),
1095 (u16
) (priv
->fw_ver
& 0xffff));
1098 /* Get device information */
1099 err
= mtnic_query_cap(priv
);
1101 eprintf("Insufficient resources, aborting.\n");
1106 err
= mtnic_OPEN_NIC(priv
);
1108 eprintf("Failed opening NIC, aborting.\n");
1112 /* Allocate and map pages worksace */
1113 err
= mtnic_map_cmd(priv
, MTNIC_IF_CMD_MAP_PAGES
, priv
->fw
.extra_pages
);
1115 eprintf("Couldn't allocate %lx FW extra pages, aborting.\n",
1116 priv
->fw
.extra_pages
.num
);
1117 if (priv
->fw
.extra_pages
.buf
)
1118 free(priv
->fw
.extra_pages
.buf
);
1122 /* Get device offsets */
1123 err
= mtnic_query_offsets(priv
);
1125 eprintf("Failed retrieving resource offests, aborting.\n");
1126 free(priv
->fw
.extra_pages
.buf
);
1127 goto map_extra_error
;
1132 err
= mtnic_alloc_eq(priv
);
1134 eprintf("Failed init shared resources. error: %d\n", err
);
1135 goto map_extra_error
;
1139 err
= mtnic_CONFIG_EQ(priv
);
1141 eprintf("Failed configuring EQ\n");
1144 err
= mtnic_CONFIG_RX(priv
);
1146 eprintf("Failed Rx configuration\n");
1149 err
= mtnic_CONFIG_TX(priv
);
1151 eprintf("Failed Tx configuration\n");
1155 DBG("Activating port:%d\n", MTNIC_PORT_NUM
+ 1);
1157 priv
->state
= CARD_INITIALIZED
;
1163 iounmap(priv
->eq_db
);
1166 free(priv
->fw
.extra_pages
.buf
);
1168 free(priv
->fw
.fw_pages
.buf
);
1172 free(priv
->cmd
.buf
);
1187 /*******************************************************************
1191 * process compliations of TX and RX
1194 ********************************************************************/
1195 void mtnic_process_tx_cq(struct mtnic_priv
*priv
, struct net_device
*dev
,
1196 struct mtnic_cq
*cq
)
1198 struct mtnic_cqe
*cqe
= cq
->buf
;
1199 struct mtnic_ring
*ring
= &priv
->tx_ring
;
1203 index
= cq
->last
& (cq
->size
-1);
1204 cqe
= &cq
->buf
[index
];
1206 /* Owner bit changes every round */
1207 while (XNOR(cqe
->op_tr_own
& MTNIC_BIT_CQ_OWN
, cq
->last
& cq
->size
)) {
1208 netdev_tx_complete (dev
, ring
->iobuf
[index
]);
1210 index
= cq
->last
& (cq
->size
-1);
1211 cqe
= &cq
->buf
[index
];
1214 /* Update consumer index */
1215 cq
->db
->update_ci
= cpu_to_be32(cq
->last
& 0xffffff);
1216 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1217 ring
->cons
= cq
->last
;
1221 int mtnic_process_rx_cq(struct mtnic_priv
*priv
, struct net_device
*dev
, struct mtnic_cq
*cq
)
1223 struct mtnic_cqe
*cqe
;
1224 struct mtnic_ring
*ring
= &priv
->rx_ring
;
1227 struct io_buffer
*rx_iob
;
1230 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
1231 * descriptor offset can be deduced from the CQE index instead of
1232 * reading 'cqe->index' */
1233 index
= cq
->last
& (cq
->size
-1);
1234 cqe
= &cq
->buf
[index
];
1236 /* Process all completed CQEs */
1237 while (XNOR(cqe
->op_tr_own
& MTNIC_BIT_CQ_OWN
, cq
->last
& cq
->size
)) {
1238 /* Drop packet on bad receive or bad checksum */
1239 if ((cqe
->op_tr_own
& 0x1f) == MTNIC_OPCODE_ERROR
) {
1240 DBG("CQE completed with error - vendor \n");
1241 free_iob(ring
->iobuf
[index
]);
1244 if (cqe
->enc_bf
& MTNIC_BIT_BAD_FCS
) {
1245 DBG("Accepted packet with bad FCS\n");
1246 free_iob(ring
->iobuf
[index
]);
1251 * Packet is OK - process it.
1253 rx_iob
= ring
->iobuf
[index
];
1254 iob_put(rx_iob
, DEF_IOBUF_SIZE
);
1255 /* Add this packet to the receive queue. */
1256 netdev_rx(dev
, rx_iob
);
1257 ring
->iobuf
[index
] = NULL
;
1261 index
= cq
->last
& (cq
->size
-1);
1262 cqe
= &cq
->buf
[index
];
1265 /* Update consumer index */
1266 cq
->db
->update_ci
= cpu_to_be32(cq
->last
& 0xffffff);
1267 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1268 ring
->cons
= cq
->last
;
1270 if (ring
->prod
- ring
->cons
< (MAX_GAP_PROD_CONS
)) {
1271 err
= mtnic_alloc_iobuf(priv
, &priv
->rx_ring
, DEF_IOBUF_SIZE
);
1273 eprintf("ERROR Allocating io buffer");
1296 /********************************************************************
1298 * net_device functions
1301 * open, poll, close, probe, disable, irq
1303 *********************************************************************/
1305 mtnic_open(struct net_device
*dev
)
1307 struct mtnic_priv
*priv
= netdev_priv(dev
);
1309 struct mtnic_ring
*ring
;
1310 struct mtnic_cq
*cq
;
1314 DBG("starting port:%d", priv
->port
);
1316 /* Alloc and configure CQs, TX, RX */
1317 err
= mtnic_alloc_resources(dev
);
1319 eprintf("Error allocating resources\n");
1323 /* Pass CQs configuration to HW */
1324 for (cq_ind
= 0; cq_ind
< NUM_CQS
; ++cq_ind
) {
1325 cq
= &priv
->cq
[cq_ind
];
1326 err
= mtnic_CONFIG_CQ(priv
, priv
->port
, cq_ind
, cq
);
1328 eprintf("Failed configuring CQ:%d error %d\n",
1335 /* Update consumer index */
1336 cq
->db
->update_ci
= cpu_to_be32(cq
->last
& 0xffffff);
1340 /* Pass Tx configuration to HW */
1341 ring
= &priv
->tx_ring
;
1342 err
= mtnic_CONFIG_TX_RING(priv
, priv
->port
, 0, ring
);
1344 eprintf("Failed configuring Tx ring:0\n");
1348 /* Pass RX configuration to HW */
1349 ring
= &priv
->rx_ring
;
1350 err
= mtnic_CONFIG_RX_RING(priv
, priv
->port
, 0, ring
);
1352 eprintf("Failed configuring Rx ring:0\n");
1356 /* Configure Rx steering */
1357 err
= mtnic_CONFIG_PORT_RSS_STEER(priv
, priv
->port
);
1359 err
= mtnic_SET_PORT_RSS_INDIRECTION(priv
, priv
->port
);
1361 eprintf("Failed configuring RSS steering\n");
1365 /* Set the port default ring to ring 0 */
1366 err
= mtnic_SET_PORT_DEFAULT_RING(priv
, priv
->port
, 0);
1368 eprintf("Failed setting default ring\n");
1372 /* Set Mac address */
1373 err
= mtnic_SET_RX_RING_ADDR(priv
, priv
->port
, &priv
->fw
.mac
[priv
->port
]);
1375 eprintf("Failed setting default MAC address\n");
1380 err
= mtnic_SET_PORT_MTU(priv
, priv
->port
, DEF_MTU
);
1382 eprintf("Failed setting MTU\n");
1386 /* Configure VLAN filter */
1387 err
= mtnic_CONFIG_PORT_VLAN_FILTER(priv
, priv
->port
);
1389 eprintf("Failed configuring VLAN filter\n");
1393 /* Bring up physical link */
1394 err
= mtnic_SET_PORT_STATE(priv
, priv
->port
, 1);
1396 eprintf("Failed bringing up port\n");
1399 mdelay(300); /* Let link state stabilize if cable was connected */
1401 priv
->state
= CARD_UP
;
1403 err
= mtnic_HEART_BEAT(priv
, &dev_link_state
);
1405 eprintf("Failed getting device link state\n");
1408 if (!(dev_link_state
& 0x3)) {
1409 eprintf("Link down, check cables and restart\n");
1417 err
= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1418 MTNIC_IF_RESOURCE_TYPE_RX_RING
, 0);
1420 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1421 MTNIC_IF_RESOURCE_TYPE_TX_RING
, 0);
1424 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1425 MTNIC_IF_RESOURCE_TYPE_CQ
, --cq_ind
);
1428 DBG("Eror Releasing resources\n");
1435 /** Check if we got completion for receive and transmit and
1436 * check the line with heart_bit command */
1438 mtnic_poll(struct net_device
*dev
)
1440 struct mtnic_priv
*priv
= netdev_priv(dev
);
1441 struct mtnic_cq
*cq
;
1446 /* In case of an old error then return */
1447 if (priv
->state
!= CARD_UP
)
1450 /* We do not check the device every call _poll call,
1451 since it will slow it down */
1452 if ((priv
->poll_counter
% ROUND_TO_CHECK
) == 0) {
1454 err
= mtnic_HEART_BEAT(priv
, &dev_link_state
);
1456 eprintf("Device has internal error\n");
1457 priv
->state
= CARD_DOWN
;
1460 if (!(dev_link_state
& 0x3)) {
1461 eprintf("Link down, check cables and restart\n");
1462 priv
->state
= CARD_DOWN
;
1468 for (i
= 0; i
< NUM_CQS
; i
++) {
1469 cq
= &priv
->cq
[i
]; //Passing on the 2 cqs.
1472 err
= mtnic_process_rx_cq(priv
, cq
->dev
, cq
);
1474 priv
->state
= CARD_DOWN
;
1475 eprintf(" Error allocating RX buffers\n");
1479 mtnic_process_tx_cq(priv
, cq
->dev
, cq
);
1482 ++ priv
->poll_counter
;
1486 mtnic_transmit( struct net_device
*dev
, struct io_buffer
*iobuf
)
1489 struct mtnic_priv
*priv
= netdev_priv(dev
);
1490 struct mtnic_ring
*ring
;
1491 struct mtnic_tx_desc
*tx_desc
;
1492 struct mtnic_data_seg
*data
;
1495 /* In case of an error then return */
1496 if (priv
->state
!= CARD_UP
)
1499 ring
= &priv
->tx_ring
;
1501 index
= ring
->prod
& ring
->size_mask
;
1502 if ((ring
->prod
- ring
->cons
) >= ring
->size
) {
1503 DBG("No space left for descriptors!!! cons: %lx prod: %lx\n",
1504 ring
->cons
, ring
->prod
);
1506 return MTNIC_ERROR
;/* no space left */
1509 /* get current descriptor */
1510 tx_desc
= ring
->buf
+ (index
* sizeof(struct mtnic_tx_desc
));
1512 /* Prepare ctrl segement */
1513 tx_desc
->ctrl
.size_vlan
= cpu_to_be32(2);
1514 tx_desc
->ctrl
.flags
= cpu_to_be32(MTNIC_BIT_TX_COMP
|
1516 tx_desc
->ctrl
.op_own
= cpu_to_be32(MTNIC_OPCODE_SEND
) |
1517 ((ring
->prod
& ring
->size
) ?
1518 cpu_to_be32(MTNIC_BIT_DESC_OWN
) : 0);
1520 /* Prepare Data Seg */
1521 data
= &tx_desc
->data
;
1522 data
->addr_l
= cpu_to_be32((u32
)virt_to_bus(iobuf
->data
));
1523 data
->count
= cpu_to_be32(iob_len(iobuf
));
1524 data
->mem_type
= priv
->fw
.mem_type_snoop_be
;
1526 /* Attach io_buffer */
1527 ring
->iobuf
[index
] = iobuf
;
1529 /* Update producer index */
1532 /* Ring doorbell! */
1534 writel((u32
) ring
->db_offset
, &ring
->txcq_db
->send_db
);
1541 mtnic_close(struct net_device
*dev
)
1543 struct mtnic_priv
*priv
= netdev_priv(dev
);
1545 DBG("Close called for port:%d\n", priv
->port
);
1547 if (priv
->state
== CARD_UP
) {
1549 err
|= mtnic_SET_PORT_STATE(priv
, priv
->port
, 0);
1551 * Stop HW associated with this port
1556 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1557 MTNIC_IF_RESOURCE_TYPE_RX_RING
, 0);
1560 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1561 MTNIC_IF_RESOURCE_TYPE_TX_RING
, 0);
1564 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1565 MTNIC_IF_RESOURCE_TYPE_CQ
, 0);
1566 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1567 MTNIC_IF_RESOURCE_TYPE_CQ
, 1);
1569 DBG("Close reported error %d", err
);
1573 free(priv
->tx_ring
.buf
);
1574 iounmap(priv
->tx_ring
.txcq_db
);
1575 free(priv
->cq
[1].buf
);
1576 free(priv
->cq
[1].db
);
1578 /* Free RX buffers */
1579 mtnic_free_io_buffers(&priv
->rx_ring
);
1581 free(priv
->rx_ring
.buf
);
1582 free(priv
->rx_ring
.db
);
1583 free(priv
->cq
[0].buf
);
1584 free(priv
->cq
[0].db
);
1586 priv
->state
= CARD_INITIALIZED
;
1595 mtnic_disable(struct pci_device
*pci
)
1599 struct net_device
*dev
= pci_get_drvdata(pci
);
1600 struct mtnic_priv
*priv
= netdev_priv(dev
);
1602 /* Should NOT happen! but just in case */
1603 if (priv
->state
== CARD_UP
)
1606 if (priv
->state
== CARD_INITIALIZED
) {
1607 err
= mtnic_RELEASE_RESOURCE(priv
, 0,
1608 MTNIC_IF_RESOURCE_TYPE_EQ
, 0);
1609 DBG("Calling MTNIC_CLOSE command\n");
1610 err
|= mtnic_cmd(priv
, NULL
, NULL
, 0,
1611 MTNIC_IF_CMD_CLOSE_NIC
);
1613 DBG("Error Releasing resources %d\n", err
);
1616 free(priv
->cmd
.buf
);
1618 ufree((u32
)priv
->fw
.fw_pages
.buf
);
1619 ufree((u32
)priv
->fw
.extra_pages
.buf
);
1621 iounmap(priv
->eq_db
);
1622 priv
->state
= CARD_DOWN
;
1625 unregister_netdev(dev
);
1626 netdev_nullify(dev
);
1633 mtnic_irq(struct net_device
*netdev __unused
, int enable __unused
)
1635 /* Not implemented */
1640 /** mtnic net device operations */
1641 static struct net_device_operations mtnic_operations
= {
1643 .close
= mtnic_close
,
1644 .transmit
= mtnic_transmit
,
1656 mtnic_probe(struct pci_device
*pci
,
1657 const struct pci_device_id
*id __unused
)
1659 struct net_device
*dev
;
1660 struct mtnic_priv
*priv
;
1667 if (pci
->vendor
!= MELLANOX_VENDOR_ID
) {
1671 printf("\nMellanox Technologies LTD - Boot over MTNIC implementaion\n");
1673 adjust_pci_device(pci
);
1675 err
= mtnic_init_pci(pci
);
1677 eprintf("Error in pci_init\n");
1684 err
= restore_config();
1690 /* Checking MTNIC device ID */
1691 dev_id
= ioremap(mtnic_pci_dev
.dev
.bar
[0] +
1692 MTNIC_DEVICE_ID_OFFSET
, 4);
1693 result
= ntohl(readl(dev_id
));
1695 if (result
!= MTNIC_DEVICE_ID
) {
1696 eprintf("Wrong Devie ID (0x%lx) !!!", result
);
1700 /* Initializing net device */
1701 dev
= alloc_etherdev(sizeof(struct mtnic_priv
));
1703 eprintf("Net device allocation failed\n");
1707 * Initialize driver private data
1709 priv
= netdev_priv(dev
);
1710 memset(priv
, 0, sizeof(struct mtnic_priv
));
1713 priv
->dev
->dev
= &pci
->dev
;
1714 /* Attach pci device */
1715 pci_set_drvdata(pci
, priv
->dev
);
1716 netdev_init(dev
, &mtnic_operations
);
1719 /* Initialize hardware */
1720 err
= mtnic_init_card(dev
);
1722 eprintf("Error in init_card\n");
1726 /* Program the MAC address */
1727 mac
= priv
->fw
.mac
[priv
->port
];
1728 printf("Port %d Mac address: 0x%12llx\n", MTNIC_PORT_NUM
+ 1, mac
);
1729 for (i
= 0;i
< MAC_ADDRESS_SIZE
; ++i
) {
1730 dev
->ll_addr
[MAC_ADDRESS_SIZE
- i
- 1] = mac
& 0xFF;
1734 if (register_netdev(dev
)) {
1735 eprintf("Netdev registration failed\n");
1748 static struct pci_device_id mtnic_nics
[] = {
1749 PCI_ROM(0x15b3, 0x6368, "mtnic", "Mellanox MTNIC driver"),
1752 struct pci_driver mtnic_driver __pci_driver
= {
1754 .id_count
= sizeof(mtnic_nics
) / sizeof(mtnic_nics
[0]),
1755 .probe
= mtnic_probe
,
1756 .remove
= mtnic_disable
,