2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 FILE_LICENCE ( GPL2_ONLY
);
38 #include <gpxe/malloc.h>
39 #include <gpxe/umalloc.h>
44 #include <gpxe/ethernet.h>
45 #include <gpxe/netdevice.h>
46 #include <gpxe/iobuf.h>
53 mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN
60 /********************************************************************
62 * MTNIC allocation functions
64 *********************************************************************/
68 * @v unsigned int size size
69 * @v void **va virtual address
70 * @v u32 *pa physical address
71 * @v u32 aligment aligment
73 * Function allocate aligned buffer and put it's virtual address in 'va'
74 * and it's physical aligned address in 'pa'
77 mtnic_alloc_aligned(unsigned int size
, void **va
, unsigned long *pa
, unsigned int alignment
)
79 *va
= alloc_memblock(size
, alignment
);
83 *pa
= (u32
)virt_to_bus(*va
);
91 * mtnic alloc command interface
95 mtnic_alloc_cmdif(struct mtnic
*mtnic
)
97 u32 bar
= mtnic_pci_dev
.dev
.bar
[0];
99 mtnic
->hcr
= ioremap(bar
+ MTNIC_HCR_BASE
, MTNIC_HCR_SIZE
);
101 DBG("Couldn't map command register\n");
104 mtnic_alloc_aligned(PAGE_SIZE
, (void *)&mtnic
->cmd
.buf
, &mtnic
->cmd
.mapping
, PAGE_SIZE
);
105 if ( !mtnic
->cmd
.buf
) {
106 DBG("Error in allocating buffer for command interface\n");
116 mtnic_free_io_buffers(struct mtnic_ring
*ring
)
120 for (; ring
->cons
<= ring
->prod
; ++ring
->cons
) {
121 index
= ring
->cons
& ring
->size_mask
;
122 if ( ring
->iobuf
[index
] ) {
123 free_iob(ring
->iobuf
[index
]);
132 * mtnic alloc and attach io buffers
136 mtnic_alloc_iobuf(struct mtnic_port
*priv
, struct mtnic_ring
*ring
,
139 struct mtnic_rx_desc
*rx_desc_ptr
= ring
->buf
;
142 while ((u32
)(ring
->prod
- ring
->cons
) < UNITS_BUFFER_SIZE
) {
143 index
= ring
->prod
& ring
->size_mask
;
144 ring
->iobuf
[index
] = alloc_iob(size
);
145 if (!ring
->iobuf
[index
]) {
146 if (ring
->prod
<= (ring
->cons
+ 1)) {
147 DBG ( "Dropping packet, buffer is full\n" );
152 /* Attach io_buffer to descriptor */
153 rx_desc_ptr
= ring
->buf
+
154 (sizeof(struct mtnic_rx_desc
) * index
);
155 rx_desc_ptr
->data
.count
= cpu_to_be32(size
);
156 rx_desc_ptr
->data
.mem_type
= priv
->mtnic
->fw
.mem_type_snoop_be
;
157 rx_desc_ptr
->data
.addr_l
= cpu_to_be32(
158 virt_to_bus(ring
->iobuf
[index
]->data
));
163 /* Update RX producer index (PI) */
164 ring
->db
->count
= cpu_to_be32(ring
->prod
& 0xffff);
172 * Alloc and configure TX or RX ring
176 mtnic_alloc_ring(struct mtnic_port
*priv
, struct mtnic_ring
*ring
,
177 u32 size
, u16 stride
, u16 cq
, u8 is_rx
)
181 struct mtnic_rx_desc
*rx_desc
;
182 struct mtnic_tx_desc
*tx_desc
;
184 ring
->size
= size
; /* Number of descriptors */
185 ring
->size_mask
= size
- 1;
186 ring
->stride
= stride
; /* Size of each entry */
187 ring
->cq
= cq
; /* CQ number associated with this ring */
191 /* Alloc descriptors buffer */
192 ring
->buf_size
= ring
->size
* ((is_rx
) ? sizeof(struct mtnic_rx_desc
) :
193 sizeof(struct mtnic_tx_desc
));
194 err
= mtnic_alloc_aligned(ring
->buf_size
, (void *)&ring
->buf
,
195 &ring
->dma
, PAGE_SIZE
);
197 DBG("Failed allocating descriptor ring sizeof %x\n",
201 memset(ring
->buf
, 0, ring
->buf_size
);
203 DBG("Allocated %s ring (addr:%p) - buf:%p size:%x"
204 "buf_size:%x dma:%lx\n",
205 is_rx
? "Rx" : "Tx", ring
, ring
->buf
, ring
->size
,
206 ring
->buf_size
, ring
->dma
);
209 if (is_rx
) { /* RX ring */
211 err
= mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record
),
212 (void *)&ring
->db
, &ring
->db_dma
, 32);
214 DBG("Failed allocating Rx ring doorbell record\n");
215 free_memblock(ring
->buf
, ring
->buf_size
);
219 /* ==- Configure Descriptor -== */
220 /* Init ctrl seg of rx desc */
221 for (i
= 0; i
< UNITS_BUFFER_SIZE
; ++i
) {
222 rx_desc
= ring
->buf
+
223 (sizeof(struct mtnic_rx_desc
) * i
);
224 /* Pre-link descriptor */
225 rx_desc
->next
= cpu_to_be16(i
+ 1);
227 /*The last ctrl descriptor is '0' and points to the first one*/
229 /* Alloc IO_BUFFERS */
230 err
= mtnic_alloc_iobuf ( priv
, ring
, DEF_IOBUF_SIZE
);
232 DBG("ERROR Allocating io buffer\n");
233 free_memblock(ring
->buf
, ring
->buf_size
);
237 } else { /* TX ring */
238 /* Set initial ownership of all Tx Desc' to SW (1) */
239 for (i
= 0; i
< ring
->size
; i
++) {
240 tx_desc
= ring
->buf
+ ring
->stride
* i
;
241 tx_desc
->ctrl
.op_own
= cpu_to_be32(MTNIC_BIT_DESC_OWN
);
244 ring
->db_offset
= cpu_to_be32(
245 ((u32
) priv
->mtnic
->fw
.tx_offset
[priv
->port
]) << 8);
247 /* Map Tx+CQ doorbells */
248 DBG("Mapping TxCQ doorbell at offset:0x%x\n",
249 priv
->mtnic
->fw
.txcq_db_offset
);
250 ring
->txcq_db
= ioremap(mtnic_pci_dev
.dev
.bar
[2] +
251 priv
->mtnic
->fw
.txcq_db_offset
, PAGE_SIZE
);
252 if (!ring
->txcq_db
) {
253 DBG("Couldn't map txcq doorbell, aborting...\n");
254 free_memblock(ring
->buf
, ring
->buf_size
);
267 * Alloc and configure CQ.
271 mtnic_alloc_cq(struct net_device
*dev
, int num
, struct mtnic_cq
*cq
,
272 u8 is_rx
, u32 size
, u32 offset_ind
)
282 cq
->offset_ind
= offset_ind
;
285 err
= mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record
),
286 (void *)&cq
->db
, &cq
->db_dma
, 32);
288 DBG("Failed allocating CQ doorbell record\n");
291 memset(cq
->db
, 0, sizeof(struct mtnic_cq_db_record
));
293 /* Alloc CQEs buffer */
294 cq
->buf_size
= size
* sizeof(struct mtnic_cqe
);
295 err
= mtnic_alloc_aligned(cq
->buf_size
,
296 (void *)&cq
->buf
, &cq
->dma
, PAGE_SIZE
);
298 DBG("Failed allocating CQ buffer\n");
299 free_memblock(cq
->db
, sizeof(struct mtnic_cq_db_record
));
302 memset(cq
->buf
, 0, cq
->buf_size
);
303 DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x "
304 "dma:%lx db:%p db_dma:%lx\n"
305 "cqn offset:%x \n", cq
, cq
->size
, cq
->buf
,
306 cq
->buf_size
, cq
->dma
, cq
->db
,
307 cq
->db_dma
, offset_ind
);
310 /* Set ownership of all CQEs to HW */
311 DBG("Setting HW ownership for CQ:%d\n", num
);
312 for (i
= 0; i
< cq
->size
; i
++) {
313 /* Initial HW ownership is 1 */
314 cq
->buf
[i
].op_tr_own
= MTNIC_BIT_CQ_OWN
;
322 * mtnic_alloc_resources
324 * Alloc and configure CQs, Tx, Rx
327 mtnic_alloc_resources(struct net_device
*dev
)
329 struct mtnic_port
*priv
= netdev_priv(dev
);
332 int cq_offset
= priv
->mtnic
->fw
.cq_offset
;
335 err
= mtnic_alloc_cq(dev
, cq_ind
, &priv
->cq
[cq_ind
], 1 /* RX */,
336 UNITS_BUFFER_SIZE
, cq_offset
+ cq_ind
);
338 DBG("Failed allocating Rx CQ\n");
344 err
= mtnic_alloc_ring(priv
, &priv
->rx_ring
, UNITS_BUFFER_SIZE
,
345 sizeof(struct mtnic_rx_desc
), cq_ind
, /* RX */1);
347 DBG("Failed allocating Rx Ring\n");
355 err
= mtnic_alloc_cq(dev
, cq_ind
, &priv
->cq
[cq_ind
], 0 /* TX */,
356 UNITS_BUFFER_SIZE
, cq_offset
+ cq_ind
);
358 DBG("Failed allocating Tx CQ\n");
363 err
= mtnic_alloc_ring(priv
, &priv
->tx_ring
, UNITS_BUFFER_SIZE
,
364 sizeof(struct mtnic_tx_desc
), cq_ind
, /* TX */ 0);
366 DBG("Failed allocating Tx ring\n");
373 free_memblock(priv
->cq
[1].buf
, priv
->cq
[1].buf_size
);
374 free_memblock(priv
->cq
[1].db
, sizeof(struct mtnic_cq_db_record
));
377 free_memblock(priv
->rx_ring
.buf
, priv
->rx_ring
.buf_size
);
378 free_memblock(priv
->rx_ring
.db
, sizeof(struct mtnic_cq_db_record
));
379 mtnic_free_io_buffers(&priv
->rx_ring
);
381 free_memblock(priv
->cq
[0].buf
, priv
->cq
[0].buf_size
);
382 free_memblock(priv
->cq
[0].db
, sizeof(struct mtnic_cq_db_record
));
391 * Note: EQ is not used by the driver but must be allocated
394 mtnic_alloc_eq(struct mtnic
*mtnic
)
398 struct mtnic_eqe
*eqe_desc
= NULL
;
400 /* Allocating doorbell */
401 mtnic
->eq_db
= ioremap(mtnic_pci_dev
.dev
.bar
[2] +
402 mtnic
->fw
.eq_db_offset
, sizeof(u32
));
404 DBG("Couldn't map EQ doorbell, aborting...\n");
408 /* Allocating buffer */
409 mtnic
->eq
.size
= NUM_EQES
;
410 mtnic
->eq
.buf_size
= mtnic
->eq
.size
* sizeof(struct mtnic_eqe
);
411 err
= mtnic_alloc_aligned(mtnic
->eq
.buf_size
, (void *)&mtnic
->eq
.buf
,
412 &mtnic
->eq
.dma
, PAGE_SIZE
);
414 DBG("Failed allocating EQ buffer\n");
415 iounmap(mtnic
->eq_db
);
418 memset(mtnic
->eq
.buf
, 0, mtnic
->eq
.buf_size
);
420 for (i
= 0; i
< mtnic
->eq
.size
; i
++)
421 eqe_desc
= mtnic
->eq
.buf
+ (sizeof(struct mtnic_eqe
) * i
);
422 eqe_desc
->own
|= MTNIC_BIT_EQE_OWN
;
438 /********************************************************************
440 * Mtnic commands functions
441 * -=-=-=-=-=-=-=-=-=-=-=-=
445 *********************************************************************/
447 cmdif_go_bit(struct mtnic
*mtnic
)
449 struct mtnic_if_cmd_reg
*hcr
= mtnic
->hcr
;
453 for (i
= 0; i
< TBIT_RETRIES
; i
++) {
454 status
= be32_to_cpu(readl(&hcr
->status_go_opcode
));
455 if ((status
& MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT
)) ==
456 (mtnic
->cmd
.tbit
<< MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT
))) {
457 /* Read expected t-bit - now return go-bit value */
458 return status
& MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT
);
462 DBG("Invalid tbit after %d retries!\n", TBIT_RETRIES
);
463 return -EBUSY
; /* Return busy... */
466 /* Base Command interface */
468 mtnic_cmd(struct mtnic
*mtnic
, void *in_imm
,
469 void *out_imm
, u32 in_modifier
, u16 op
)
472 struct mtnic_if_cmd_reg
*hcr
= mtnic
->hcr
;
480 static u16 token
= 0x8000;
482 unsigned int timeout
= 0;
486 if ( cmdif_go_bit ( mtnic
) ) {
487 DBG("GO BIT BUSY:%p.\n", hcr
+ 6);
492 in_param_h
= *((u32
*)in_imm
);
493 in_param_l
= *((u32
*)in_imm
+ 1);
495 in_param_l
= cpu_to_be32(mtnic
->cmd
.mapping
);
497 out_param_l
= cpu_to_be32(mtnic
->cmd
.mapping
);
500 writel(in_param_h
, &hcr
->in_param_h
);
501 writel(in_param_l
, &hcr
->in_param_l
);
502 writel((u32
) cpu_to_be32(in_modifier
), &hcr
->input_modifier
);
503 writel(out_param_h
, &hcr
->out_param_h
);
504 writel(out_param_l
, &hcr
->out_param_l
);
505 writel((u32
)cpu_to_be32(token
<< 16), &hcr
->token
);
508 /* flip toggle bit before each write to the HCR */
509 mtnic
->cmd
.tbit
= !mtnic
->cmd
.tbit
;
511 cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT
) |
512 ( mtnic
->cmd
.tbit
<< MTNIC_BC_OFF ( MTNIC_MASK_CMD_REG_T_BIT
) ) | op
),
513 &hcr
->status_go_opcode
);
515 while ( cmdif_go_bit ( mtnic
) && ( timeout
<= GO_BIT_TIMEOUT
) ) {
520 if ( cmdif_go_bit ( mtnic
) ) {
521 DBG("Command opcode:0x%x token:0x%x TIMEOUT.\n", op
, token
);
527 *((u32
*)out_imm
) = readl(&hcr
->out_param_h
);
528 *((u32
*)out_imm
+ 1) = readl(&hcr
->out_param_l
);
531 status
= be32_to_cpu((u32
)readl(&hcr
->status_go_opcode
)) >> 24;
534 DBG("Command opcode:0x%x token:0x%x returned:0x%x\n",
543 /* MAP PAGES wrapper */
545 mtnic_map_cmd(struct mtnic
*mtnic
, u16 op
, struct mtnic_pages pages
)
550 u32
*page_arr
= mtnic
->cmd
.buf
;
554 memset(page_arr
, 0, PAGE_SIZE
);
556 len
= PAGE_SIZE
* pages
.num
;
557 pages
.buf
= (u32
*)umalloc(PAGE_SIZE
* (pages
.num
+ 1));
558 addr
= PAGE_SIZE
+ ((virt_to_bus(pages
.buf
) & 0xfffff000) + PAGE_SIZE
);
559 DBG("Mapping pages: size: %x address: %p\n", pages
.num
, pages
.buf
);
561 if (addr
& (PAGE_MASK
)) {
562 DBG("Got FW area not aligned to %d (%llx/%x)\n",
563 PAGE_SIZE
, (u64
) addr
, len
);
567 /* Function maps each PAGE seperately */
568 for (j
= 0; j
< len
; j
+= PAGE_SIZE
) {
569 page_arr
[nent
* 4 + 3] = cpu_to_be32(addr
+ j
);
570 if (++nent
== MTNIC_MAILBOX_SIZE
/ 16) {
571 err
= mtnic_cmd(mtnic
, NULL
, NULL
, nent
, op
);
579 err
= mtnic_cmd(mtnic
, NULL
, NULL
, nent
, op
);
590 mtnic_QUERY_FW ( struct mtnic
*mtnic
)
593 struct mtnic_if_query_fw_out_mbox
*cmd
= mtnic
->cmd
.buf
;
595 err
= mtnic_cmd(mtnic
, NULL
, NULL
, 0, MTNIC_IF_CMD_QUERY_FW
);
599 /* Get FW and interface versions */
600 mtnic
->fw_ver
= ((u64
) be16_to_cpu(cmd
->rev_maj
) << 32) |
601 ((u64
) be16_to_cpu(cmd
->rev_min
) << 16) |
602 (u64
) be16_to_cpu(cmd
->rev_smin
);
603 mtnic
->fw
.ifc_rev
= be16_to_cpu(cmd
->ifc_rev
);
605 /* Get offset for internal error reports (debug) */
606 mtnic
->fw
.err_buf
.offset
= be64_to_cpu(cmd
->err_buf_start
);
607 mtnic
->fw
.err_buf
.size
= be32_to_cpu(cmd
->err_buf_size
);
609 DBG("Error buf offset is %llx\n", mtnic
->fw
.err_buf
.offset
);
611 /* Get number of required FW (4k) pages */
612 mtnic
->fw
.fw_pages
.num
= be16_to_cpu(cmd
->fw_pages
);
619 mtnic_OPEN_NIC(struct mtnic
*mtnic
)
621 struct mtnic_if_open_nic_in_mbox
*open_nic
= mtnic
->cmd
.buf
;
622 u32 extra_pages
[2] = {0};
625 memset(open_nic
, 0, sizeof *open_nic
);
628 open_nic
->log_rx_p1
= 0;
629 open_nic
->log_cq_p1
= 1;
631 open_nic
->log_tx_p1
= 0;
632 open_nic
->steer_p1
= MTNIC_IF_STEER_RSS
;
633 /* MAC + VLAN - leave reserved */
636 open_nic
->log_rx_p2
= 0;
637 open_nic
->log_cq_p2
= 1;
639 open_nic
->log_tx_p2
= 0;
640 open_nic
->steer_p2
= MTNIC_IF_STEER_RSS
;
641 /* MAC + VLAN - leave reserved */
643 err
= mtnic_cmd(mtnic
, NULL
, extra_pages
, 0, MTNIC_IF_CMD_OPEN_NIC
);
645 mtnic
->fw
.extra_pages
.num
= be32_to_cpu(*(extra_pages
+1));
646 DBG("Extra pages num is %x\n", mtnic
->fw
.extra_pages
.num
);
651 mtnic_CONFIG_RX(struct mtnic
*mtnic
)
653 struct mtnic_if_config_rx_in_imm config_rx
;
655 memset(&config_rx
, 0, sizeof config_rx
);
656 return mtnic_cmd(mtnic
, &config_rx
, NULL
, 0, MTNIC_IF_CMD_CONFIG_RX
);
660 mtnic_CONFIG_TX(struct mtnic
*mtnic
)
662 struct mtnic_if_config_send_in_imm config_tx
;
664 config_tx
.enph_gpf
= 0;
665 return mtnic_cmd(mtnic
, &config_tx
, NULL
, 0, MTNIC_IF_CMD_CONFIG_TX
);
669 mtnic_HEART_BEAT(struct mtnic_port
*priv
, u32
*link_state
)
671 struct mtnic_if_heart_beat_out_imm heart_beat
;
675 err
= mtnic_cmd(priv
->mtnic
, NULL
, &heart_beat
, 0, MTNIC_IF_CMD_HEART_BEAT
);
677 flags
= be32_to_cpu(heart_beat
.flags
);
678 if (flags
& MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR
)) {
679 DBG("Internal error detected\n");
682 *link_state
= flags
&
683 ~((u32
) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR
));
694 mtnic_SET_PORT_DEFAULT_RING(struct mtnic_port
*priv
, u8 port
, u16 ring
)
696 struct mtnic_if_set_port_default_ring_in_imm def_ring
;
698 memset(&def_ring
, 0, sizeof(def_ring
));
699 def_ring
.ring
= ring
;
700 return mtnic_cmd(priv
->mtnic
, &def_ring
, NULL
, port
+ 1,
701 MTNIC_IF_CMD_SET_PORT_DEFAULT_RING
);
705 mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_port
*priv
, int port
)
707 memset(priv
->mtnic
->cmd
.buf
, 0, PAGE_SIZE
);
708 return mtnic_cmd(priv
->mtnic
, NULL
, NULL
, port
+ 1,
709 MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER
);
713 mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_port
*priv
, int port
)
716 memset(priv
->mtnic
->cmd
.buf
, 0, PAGE_SIZE
);
717 return mtnic_cmd(priv
->mtnic
, NULL
, NULL
, port
+ 1,
718 MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION
);
726 mtnic_CONFIG_CQ(struct mtnic_port
*priv
, int port
,
727 u16 cq_ind
, struct mtnic_cq
*cq
)
729 struct mtnic_if_config_cq_in_mbox
*config_cq
= priv
->mtnic
->cmd
.buf
;
731 memset(config_cq
, 0, sizeof *config_cq
);
732 config_cq
->cq
= cq_ind
;
733 config_cq
->size
= fls(UNITS_BUFFER_SIZE
- 1);
734 config_cq
->offset
= ((cq
->dma
) & (PAGE_MASK
)) >> 6;
735 config_cq
->db_record_addr_l
= cpu_to_be32(cq
->db_dma
);
736 config_cq
->page_address
[1] = cpu_to_be32(cq
->dma
);
737 DBG("config cq address: %x dma_address: %lx"
738 "offset: %d size %d index: %d\n"
739 , config_cq
->page_address
[1],cq
->dma
,
740 config_cq
->offset
, config_cq
->size
, config_cq
->cq
);
742 return mtnic_cmd(priv
->mtnic
, NULL
, NULL
, port
+ 1,
743 MTNIC_IF_CMD_CONFIG_CQ
);
748 mtnic_CONFIG_TX_RING(struct mtnic_port
*priv
, u8 port
,
749 u16 ring_ind
, struct mtnic_ring
*ring
)
751 struct mtnic_if_config_send_ring_in_mbox
*config_tx_ring
= priv
->mtnic
->cmd
.buf
;
752 memset(config_tx_ring
, 0, sizeof *config_tx_ring
);
753 config_tx_ring
->ring
= cpu_to_be16(ring_ind
);
754 config_tx_ring
->size
= fls(UNITS_BUFFER_SIZE
- 1);
755 config_tx_ring
->cq
= cpu_to_be16(ring
->cq
);
756 config_tx_ring
->page_address
[1] = cpu_to_be32(ring
->dma
);
758 return mtnic_cmd(priv
->mtnic
, NULL
, NULL
, port
+ 1,
759 MTNIC_IF_CMD_CONFIG_TX_RING
);
763 mtnic_CONFIG_RX_RING(struct mtnic_port
*priv
, u8 port
,
764 u16 ring_ind
, struct mtnic_ring
*ring
)
766 struct mtnic_if_config_rx_ring_in_mbox
*config_rx_ring
= priv
->mtnic
->cmd
.buf
;
767 memset(config_rx_ring
, 0, sizeof *config_rx_ring
);
768 config_rx_ring
->ring
= ring_ind
;
769 MTNIC_BC_PUT(config_rx_ring
->stride_size
, fls(UNITS_BUFFER_SIZE
- 1),
770 MTNIC_MASK_CONFIG_RX_RING_SIZE
);
771 MTNIC_BC_PUT(config_rx_ring
->stride_size
, 1,
772 MTNIC_MASK_CONFIG_RX_RING_STRIDE
);
773 config_rx_ring
->cq
= cpu_to_be16(ring
->cq
);
774 config_rx_ring
->db_record_addr_l
= cpu_to_be32(ring
->db_dma
);
776 DBG("Config RX ring starting at address:%lx\n", ring
->dma
);
778 config_rx_ring
->page_address
[1] = cpu_to_be32(ring
->dma
);
780 return mtnic_cmd(priv
->mtnic
, NULL
, NULL
, port
+ 1,
781 MTNIC_IF_CMD_CONFIG_RX_RING
);
785 mtnic_CONFIG_EQ(struct mtnic
*mtnic
)
787 struct mtnic_if_config_eq_in_mbox
*eq
= mtnic
->cmd
.buf
;
789 if (mtnic
->eq
.dma
& (PAGE_MASK
)) {
790 DBG("misalligned eq buffer:%lx\n",
795 memset(eq
, 0, sizeof *eq
);
796 MTNIC_BC_PUT(eq
->offset
, mtnic
->eq
.dma
>> 6, MTNIC_MASK_CONFIG_EQ_OFFSET
);
797 MTNIC_BC_PUT(eq
->size
, fls(mtnic
->eq
.size
- 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE
);
798 MTNIC_BC_PUT(eq
->int_vector
, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC
);
799 eq
->page_address
[1] = cpu_to_be32(mtnic
->eq
.dma
);
801 return mtnic_cmd(mtnic
, NULL
, NULL
, 0, MTNIC_IF_CMD_CONFIG_EQ
);
808 mtnic_SET_RX_RING_ADDR(struct mtnic_port
*priv
, u8 port
, u64
* mac
)
810 struct mtnic_if_set_rx_ring_addr_in_imm ring_addr
;
811 u32 modifier
= ((u32
) port
+ 1) << 16;
813 memset(&ring_addr
, 0, sizeof(ring_addr
));
815 ring_addr
.mac_31_0
= cpu_to_be32(*mac
& 0xffffffff);
816 ring_addr
.mac_47_32
= cpu_to_be16((*mac
>> 32) & 0xffff);
817 ring_addr
.flags_vlan_id
|= cpu_to_be16(
818 MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC
));
820 return mtnic_cmd(priv
->mtnic
, &ring_addr
, NULL
, modifier
, MTNIC_IF_CMD_SET_RX_RING_ADDR
);
824 mtnic_SET_PORT_STATE(struct mtnic_port
*priv
, u8 port
, u8 state
)
826 struct mtnic_if_set_port_state_in_imm port_state
;
828 port_state
.state
= state
? cpu_to_be32(
829 MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE
)) : 0;
830 port_state
.reserved
= 0;
831 return mtnic_cmd(priv
->mtnic
, &port_state
, NULL
, port
+ 1,
832 MTNIC_IF_CMD_SET_PORT_STATE
);
836 mtnic_SET_PORT_MTU(struct mtnic_port
*priv
, u8 port
, u16 mtu
)
838 struct mtnic_if_set_port_mtu_in_imm set_mtu
;
840 memset(&set_mtu
, 0, sizeof(set_mtu
));
841 set_mtu
.mtu
= cpu_to_be16(mtu
);
842 return mtnic_cmd(priv
->mtnic
, &set_mtu
, NULL
, port
+ 1,
843 MTNIC_IF_CMD_SET_PORT_MTU
);
848 mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_port *priv, int port)
850 struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->mtnic->cmd.buf;
852 // When no vlans are configured we disable the filter
853 // (i.e., pass all vlans) because we ignore them anyhow
854 memset(vlan_filter, 0xff, sizeof(*vlan_filter));
855 return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
856 MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER);
862 mtnic_RELEASE_RESOURCE(struct mtnic_port
*priv
, u8 port
, u8 type
, u8 index
)
864 struct mtnic_if_release_resource_in_imm rel
;
865 memset(&rel
, 0, sizeof rel
);
868 return mtnic_cmd ( priv
->mtnic
,
869 &rel
, NULL
, ( type
== MTNIC_IF_RESOURCE_TYPE_EQ
) ?
870 0 : port
+ 1, MTNIC_IF_CMD_RELEASE_RESOURCE
);
875 mtnic_QUERY_CAP(struct mtnic
*mtnic
, u8 index
, u8 mod
, u64
*result
)
877 struct mtnic_if_query_cap_in_imm cap
;
881 memset(&cap
, 0, sizeof cap
);
882 cap
.cap_index
= index
;
883 cap
.cap_modifier
= mod
;
884 err
= mtnic_cmd(mtnic
, &cap
, &out_imm
, 0, MTNIC_IF_CMD_QUERY_CAP
);
886 *((u32
*)result
) = be32_to_cpu(*(out_imm
+1));
887 *((u32
*)result
+ 1) = be32_to_cpu(*out_imm
);
889 DBG("Called Query cap with index:0x%x mod:%d result:0x%llx"
890 " error:%d\n", index
, mod
, *result
, err
);
895 #define DO_QUERY_CAP(cap, mod, var) \
896 err = mtnic_QUERY_CAP(mtnic, cap, mod, &result);\
902 mtnic_query_num_ports(struct mtnic
*mtnic
)
907 DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS
, 0, mtnic
->fw
.num_ports
);
913 mtnic_query_mac(struct mtnic
*mtnic
)
919 for (i
= 0; i
< mtnic
->fw
.num_ports
; i
++) {
920 DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC
, i
+ 1, mtnic
->fw
.mac
[i
]);
927 mtnic_query_offsets(struct mtnic
*mtnic
)
933 DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY
,
934 MTNIC_IF_MEM_TYPE_SNOOP
,
935 mtnic
->fw
.mem_type_snoop_be
);
936 mtnic
->fw
.mem_type_snoop_be
= cpu_to_be32(mtnic
->fw
.mem_type_snoop_be
);
937 DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET
, 0, mtnic
->fw
.txcq_db_offset
);
938 DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET
, 0, mtnic
->fw
.eq_db_offset
);
940 for (i
= 0; i
< mtnic
->fw
.num_ports
; i
++) {
941 DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET
, i
+ 1, mtnic
->fw
.cq_offset
);
942 DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET
, i
+ 1, mtnic
->fw
.tx_offset
[i
]);
943 DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET
, i
+ 1, mtnic
->fw
.rx_offset
[i
]);
944 DBG("--> Port %d CQ offset:0x%x\n", i
, mtnic
->fw
.cq_offset
);
945 DBG("--> Port %d Tx offset:0x%x\n", i
, mtnic
->fw
.tx_offset
[i
]);
946 DBG("--> Port %d Rx offset:0x%x\n", i
, mtnic
->fw
.rx_offset
[i
]);
963 /********************************************************************
965 * MTNIC initalization functions
970 *********************************************************************/
978 void *reset
= ioremap ( mtnic_pci_dev
.dev
.bar
[0] + MTNIC_RESET_OFFSET
,
980 writel ( cpu_to_be32 ( 1 ), reset
);
994 for (i
= 0; i
< 64; ++i
) {
995 if (i
!= 22 && i
!= 23) {
996 rc
= pci_write_config_dword(mtnic_pci_dev
.dev
.dev
,
999 dev_config_space
[i
]);
1010 * Init PCI configuration
1013 mtnic_init_pci(struct pci_device
*dev
)
1019 DBG("bus=%d devfn=0x%x\n", dev
->bus
, dev
->devfn
);
1020 for (i
= 0; i
< 6; ++i
) {
1021 mtnic_pci_dev
.dev
.bar
[i
] =
1022 pci_bar_start(dev
, PCI_BASE_ADDRESS_0
+ (i
<< 2));
1023 DBG("bar[%d]= 0x%08lx \n", i
, mtnic_pci_dev
.dev
.bar
[i
]);
1026 /* save config space */
1027 for (i
= 0; i
< 64; ++i
) {
1028 err
= pci_read_config_dword(dev
, i
<< 2,
1030 dev_config_space
[i
]);
1032 DBG("Can not save configuration space");
1037 mtnic_pci_dev
.dev
.dev
= dev
;
1046 int mtnic_init_card(struct mtnic
*mtnic
)
1051 /* Alloc command interface */
1052 err
= mtnic_alloc_cmdif ( mtnic
);
1054 DBG("Failed to init command interface, aborting\n");
1062 err
= mtnic_QUERY_FW ( mtnic
);
1064 DBG("QUERY_FW command failed, aborting\n");
1067 DBG("Command interface revision:%d\n", mtnic
->fw
.ifc_rev
);
1069 /* Allocate memory for FW and start it */
1070 err
= mtnic_map_cmd(mtnic
, MTNIC_IF_CMD_MAP_FW
, mtnic
->fw
.fw_pages
);
1072 DBG("Eror In MAP_FW\n");
1073 if (mtnic
->fw
.fw_pages
.buf
)
1074 ufree((intptr_t)mtnic
->fw
.fw_pages
.buf
);
1079 err
= mtnic_cmd(mtnic
, NULL
, NULL
, 0, MTNIC_IF_CMD_RUN_FW
);
1081 DBG("Eror In RUN FW\n");
1085 DBG("FW version:%d.%d.%d\n",
1086 (u16
) (mtnic
->fw_ver
>> 32),
1087 (u16
) ((mtnic
->fw_ver
>> 16) & 0xffff),
1088 (u16
) (mtnic
->fw_ver
& 0xffff));
1091 /* Query num ports */
1092 err
= mtnic_query_num_ports(mtnic
);
1094 DBG("Insufficient resources, aborting\n");
1099 err
= mtnic_OPEN_NIC(mtnic
);
1101 DBG("Failed opening NIC, aborting\n");
1105 /* Allocate and map pages worksace */
1106 err
= mtnic_map_cmd(mtnic
, MTNIC_IF_CMD_MAP_PAGES
, mtnic
->fw
.extra_pages
);
1108 DBG("Couldn't allocate %x FW extra pages, aborting\n",
1109 mtnic
->fw
.extra_pages
.num
);
1110 if (mtnic
->fw
.extra_pages
.buf
)
1111 ufree((intptr_t)mtnic
->fw
.extra_pages
.buf
);
1116 /* Get device information */
1117 err
= mtnic_query_mac(mtnic
);
1119 DBG("Insufficient resources in quesry mac, aborting\n");
1123 /* Get device offsets */
1124 err
= mtnic_query_offsets(mtnic
);
1126 DBG("Failed retrieving resource offests, aborting\n");
1127 ufree((intptr_t)mtnic
->fw
.extra_pages
.buf
);
1128 goto map_extra_error
;
1133 err
= mtnic_alloc_eq(mtnic
);
1135 DBG("Failed init shared resources. error: %d\n", err
);
1136 goto map_extra_error
;
1140 err
= mtnic_CONFIG_EQ(mtnic
);
1142 DBG("Failed configuring EQ\n");
1145 err
= mtnic_CONFIG_RX(mtnic
);
1147 DBG("Failed Rx configuration\n");
1150 err
= mtnic_CONFIG_TX(mtnic
);
1152 DBG("Failed Tx configuration\n");
1161 iounmap(mtnic
->eq_db
);
1162 free_memblock(mtnic
->eq
.buf
, mtnic
->eq
.buf_size
);
1164 ufree((intptr_t)mtnic
->fw
.extra_pages
.buf
);
1166 ufree((intptr_t)mtnic
->fw
.fw_pages
.buf
);
1169 iounmap(mtnic
->hcr
);
1170 free_memblock(mtnic
->cmd
.buf
, PAGE_SIZE
);
1184 /*******************************************************************
1188 * process compliations of TX and RX
1191 ********************************************************************/
1192 void mtnic_process_tx_cq(struct mtnic_port
*priv
, struct net_device
*dev
,
1193 struct mtnic_cq
*cq
)
1195 struct mtnic_cqe
*cqe
= cq
->buf
;
1196 struct mtnic_ring
*ring
= &priv
->tx_ring
;
1200 index
= cq
->last
& (cq
->size
-1);
1201 cqe
= &cq
->buf
[index
];
1203 /* Owner bit changes every round */
1204 while (XNOR(cqe
->op_tr_own
& MTNIC_BIT_CQ_OWN
, cq
->last
& cq
->size
)) {
1205 netdev_tx_complete (dev
, ring
->iobuf
[index
]);
1207 index
= cq
->last
& (cq
->size
-1);
1208 cqe
= &cq
->buf
[index
];
1211 /* Update consumer index */
1212 cq
->db
->update_ci
= cpu_to_be32(cq
->last
& 0xffffff);
1213 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1214 ring
->cons
= cq
->last
;
1218 int mtnic_process_rx_cq(struct mtnic_port
*priv
,
1219 struct net_device
*dev
,
1220 struct mtnic_cq
*cq
)
1222 struct mtnic_cqe
*cqe
;
1223 struct mtnic_ring
*ring
= &priv
->rx_ring
;
1226 struct io_buffer
*rx_iob
;
1227 unsigned int length
;
1230 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
1231 * descriptor offset can be deduced from the CQE index instead of
1232 * reading 'cqe->index' */
1233 index
= cq
->last
& (cq
->size
-1);
1234 cqe
= &cq
->buf
[index
];
1236 /* Process all completed CQEs */
1237 while (XNOR(cqe
->op_tr_own
& MTNIC_BIT_CQ_OWN
, cq
->last
& cq
->size
)) {
1238 /* Drop packet on bad receive or bad checksum */
1239 if ((cqe
->op_tr_own
& 0x1f) == MTNIC_OPCODE_ERROR
) {
1240 DBG("CQE completed with error - vendor \n");
1241 free_iob(ring
->iobuf
[index
]);
1244 if (cqe
->enc_bf
& MTNIC_BIT_BAD_FCS
) {
1245 DBG("Accepted packet with bad FCS\n");
1246 free_iob(ring
->iobuf
[index
]);
1251 * Packet is OK - process it.
1253 length
= be32_to_cpu(cqe
->byte_cnt
);
1254 rx_iob
= ring
->iobuf
[index
];
1255 iob_put(rx_iob
, length
);
1257 /* Add this packet to the receive queue. */
1258 netdev_rx(dev
, rx_iob
);
1259 ring
->iobuf
[index
] = NULL
;
1263 index
= cq
->last
& (cq
->size
-1);
1264 cqe
= &cq
->buf
[index
];
1270 /* Update consumer index */
1271 cq
->db
->update_ci
= cpu_to_be32(cq
->last
& 0xffffff);
1272 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1273 ring
->cons
= cq
->last
;
1275 if (ring
->prod
- ring
->cons
< (MAX_GAP_PROD_CONS
)) {
1276 err
= mtnic_alloc_iobuf(priv
, &priv
->rx_ring
, DEF_IOBUF_SIZE
);
1278 DBG("ERROR Allocating io buffer");
1301 /********************************************************************
1303 * net_device functions
1306 * open, poll, close, probe, disable, irq
1308 *********************************************************************/
1310 mtnic_open(struct net_device
*dev
)
1312 struct mtnic_port
*priv
= netdev_priv(dev
);
1315 struct mtnic_ring
*ring
;
1316 struct mtnic_cq
*cq
;
1321 DBG("starting port:%d, MAC Address: 0x%12llx\n",
1322 priv
->port
, priv
->mtnic
->fw
.mac
[priv
->port
]);
1324 /* Alloc and configure CQs, TX, RX */
1325 err
= mtnic_alloc_resources ( dev
);
1327 DBG("Error allocating resources\n");
1331 /* Pass CQs configuration to HW */
1332 for (cq_ind
= 0; cq_ind
< NUM_CQS
; ++cq_ind
) {
1333 cq
= &priv
->cq
[cq_ind
];
1334 err
= mtnic_CONFIG_CQ(priv
, priv
->port
, cq_ind
, cq
);
1336 DBG("Failed configuring CQ:%d error %d\n",
1341 goto allocation_error
;
1343 /* Update consumer index */
1344 cq
->db
->update_ci
= cpu_to_be32(cq
->last
& 0xffffff);
1349 /* Pass Tx configuration to HW */
1350 ring
= &priv
->tx_ring
;
1351 err
= mtnic_CONFIG_TX_RING(priv
, priv
->port
, 0, ring
);
1353 DBG("Failed configuring Tx ring:0\n");
1357 /* Pass RX configuration to HW */
1358 ring
= &priv
->rx_ring
;
1359 err
= mtnic_CONFIG_RX_RING(priv
, priv
->port
, 0, ring
);
1361 DBG("Failed configuring Rx ring:0\n");
1365 /* Configure Rx steering */
1366 err
= mtnic_CONFIG_PORT_RSS_STEER(priv
, priv
->port
);
1368 err
= mtnic_SET_PORT_RSS_INDIRECTION(priv
, priv
->port
);
1370 DBG("Failed configuring RSS steering\n");
1375 /* Set the port default ring to ring 0 */
1376 err
= mtnic_SET_PORT_DEFAULT_RING(priv
, priv
->port
, 0);
1378 DBG("Failed setting default ring\n");
1382 /* Set Mac address */
1383 err
= mtnic_SET_RX_RING_ADDR(priv
, priv
->port
, &priv
->mtnic
->fw
.mac
[priv
->port
]);
1385 DBG("Failed setting default MAC address\n");
1390 err
= mtnic_SET_PORT_MTU(priv
, priv
->port
, DEF_MTU
);
1392 DBG("Failed setting MTU\n");
1396 /* Configure VLAN filter */
1397 /* By adding this function, The second port won't accept packets
1398 err = mtnic_CONFIG_PORT_VLAN_FILTER(priv, priv->port);
1400 DBG("Failed configuring VLAN filter\n");
1406 /* Bring up physical link */
1407 err
= mtnic_SET_PORT_STATE(priv
, priv
->port
, 1);
1409 DBG("Failed bringing up port\n");
1414 priv
->state
= CARD_UP
;
1417 /* Checking Link is up */
1418 DBG ( "Checking if link is up\n" );
1421 for ( link_check
= 0; link_check
< CHECK_LINK_TIMES
; link_check
++ ) {
1422 /* Let link state stabilize if cable was connected */
1423 mdelay ( DELAY_LINK_CHECK
);
1425 err
= mtnic_HEART_BEAT(priv
, &dev_link_state
);
1427 DBG("Failed getting device link state\n");
1431 if ( dev_link_state
& priv
->port
) {
1438 if ( ! ( dev_link_state
& 0x3 ) ) {
1439 DBG("Link down, check cables and restart\n");
1440 netdev_link_down ( dev
);
1444 DBG ( "Link is up!\n" );
1446 /* Mark as link up */
1447 netdev_link_up ( dev
);
1452 err
= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1453 MTNIC_IF_RESOURCE_TYPE_RX_RING
, 0);
1455 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1456 MTNIC_IF_RESOURCE_TYPE_TX_RING
, 0);
1460 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1461 MTNIC_IF_RESOURCE_TYPE_CQ
, --cq_ind
);
1464 DBG("Eror Releasing resources\n");
1468 free_memblock(priv
->tx_ring
.buf
, priv
->tx_ring
.buf_size
);
1469 iounmap(priv
->tx_ring
.txcq_db
);
1470 free_memblock(priv
->cq
[1].buf
, priv
->cq
[1].buf_size
);
1471 free_memblock(priv
->cq
[1].db
, sizeof(struct mtnic_cq_db_record
));
1472 free_memblock(priv
->rx_ring
.buf
, priv
->rx_ring
.buf_size
);
1473 free_memblock(priv
->rx_ring
.db
, sizeof(struct mtnic_cq_db_record
));
1474 free_memblock(priv
->cq
[0].buf
, priv
->cq
[0].buf_size
);
1475 free_memblock(priv
->cq
[0].db
, sizeof(struct mtnic_cq_db_record
));
1477 mtnic_free_io_buffers(&priv
->rx_ring
);
1485 /** Check if we got completion for receive and transmit and
1486 * check the line with heart_bit command */
1488 mtnic_poll ( struct net_device
*dev
)
1490 struct mtnic_port
*priv
= netdev_priv(dev
);
1491 struct mtnic_cq
*cq
;
1496 /* In case of an old error then return */
1497 if (priv
->state
!= CARD_UP
)
1500 /* We do not check the device every call _poll call,
1501 since it will slow it down */
1502 if ((priv
->poll_counter
% ROUND_TO_CHECK
) == 0) {
1504 err
= mtnic_HEART_BEAT(priv
, &dev_link_state
);
1506 DBG("Device has internal error\n");
1507 priv
->state
= CARD_LINK_DOWN
;
1510 if (!(dev_link_state
& 0x3)) {
1511 DBG("Link down, check cables and restart\n");
1512 priv
->state
= CARD_LINK_DOWN
;
1517 for (i
= 0; i
< NUM_CQS
; i
++) {
1518 cq
= &priv
->cq
[i
]; //Passing on the 2 cqs.
1521 err
= mtnic_process_rx_cq(priv
, cq
->dev
, cq
);
1523 priv
->state
= CARD_LINK_DOWN
;
1524 DBG(" Error allocating RX buffers\n");
1528 mtnic_process_tx_cq(priv
, cq
->dev
, cq
);
1531 ++ priv
->poll_counter
;
1537 mtnic_transmit( struct net_device
*dev
, struct io_buffer
*iobuf
)
1540 struct mtnic_port
*priv
= netdev_priv(dev
);
1541 struct mtnic_ring
*ring
;
1542 struct mtnic_tx_desc
*tx_desc
;
1543 struct mtnic_data_seg
*data
;
1546 /* In case of an error then return */
1547 if (priv
->state
!= CARD_UP
)
1550 ring
= &priv
->tx_ring
;
1552 index
= ring
->prod
& ring
->size_mask
;
1553 if ((ring
->prod
- ring
->cons
) >= ring
->size
) {
1554 DBG("No space left for descriptors!!! cons: %x prod: %x\n",
1555 ring
->cons
, ring
->prod
);
1557 return -EAGAIN
;/* no space left */
1560 /* get current descriptor */
1561 tx_desc
= ring
->buf
+ (index
* sizeof(struct mtnic_tx_desc
));
1563 /* Prepare Data Seg */
1564 data
= &tx_desc
->data
;
1565 data
->addr_l
= cpu_to_be32((u32
)virt_to_bus(iobuf
->data
));
1566 data
->count
= cpu_to_be32(iob_len(iobuf
));
1567 data
->mem_type
= priv
->mtnic
->fw
.mem_type_snoop_be
;
1569 /* Prepare ctrl segement */
1570 tx_desc
->ctrl
.size_vlan
= cpu_to_be32(2);
1571 tx_desc
->ctrl
.flags
= cpu_to_be32(MTNIC_BIT_TX_COMP
|
1573 tx_desc
->ctrl
.op_own
= cpu_to_be32(MTNIC_OPCODE_SEND
) |
1574 ((ring
->prod
& ring
->size
) ?
1575 cpu_to_be32(MTNIC_BIT_DESC_OWN
) : 0);
1577 /* Attach io_buffer */
1578 ring
->iobuf
[index
] = iobuf
;
1580 /* Update producer index */
1583 /* Ring doorbell! */
1585 writel((u32
) ring
->db_offset
, &ring
->txcq_db
->send_db
);
1592 mtnic_close(struct net_device
*dev
)
1594 struct mtnic_port
*priv
= netdev_priv(dev
);
1596 DBG("Close called for port:%d\n", priv
->port
);
1598 if ( ( priv
->state
== CARD_UP
) ||
1599 ( priv
->state
== CARD_LINK_DOWN
) ) {
1602 err
|= mtnic_SET_PORT_STATE(priv
, priv
->port
, 0);
1604 * Stop HW associated with this port
1609 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1610 MTNIC_IF_RESOURCE_TYPE_RX_RING
, 0);
1613 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1614 MTNIC_IF_RESOURCE_TYPE_TX_RING
, 0);
1617 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1618 MTNIC_IF_RESOURCE_TYPE_CQ
, 0);
1619 err
|= mtnic_RELEASE_RESOURCE(priv
, priv
->port
,
1620 MTNIC_IF_RESOURCE_TYPE_CQ
, 1);
1622 DBG("Close reported error %d\n", err
);
1628 free_memblock(priv
->tx_ring
.buf
, priv
->tx_ring
.buf_size
);
1629 iounmap(priv
->tx_ring
.txcq_db
);
1630 free_memblock(priv
->cq
[1].buf
, priv
->cq
[1].buf_size
);
1631 free_memblock(priv
->cq
[1].db
, sizeof(struct mtnic_cq_db_record
));
1632 free_memblock(priv
->rx_ring
.buf
, priv
->rx_ring
.buf_size
);
1633 free_memblock(priv
->rx_ring
.db
, sizeof(struct mtnic_cq_db_record
));
1634 free_memblock(priv
->cq
[0].buf
, priv
->cq
[0].buf_size
);
1635 free_memblock(priv
->cq
[0].db
, sizeof(struct mtnic_cq_db_record
));
1637 /* Free RX buffers */
1638 mtnic_free_io_buffers(&priv
->rx_ring
);
1644 priv
->state
= CARD_INITIALIZED
;
1650 mtnic_disable(struct pci_device
*pci
)
1655 struct mtnic
*mtnic
= pci_get_drvdata(pci
);
1658 struct net_device
*dev
;
1659 struct mtnic_port
*priv
;
1661 for ( i
= ( mtnic
->fw
.num_ports
- 1 ); i
>= 0; i
-- ) {
1663 dev
= mtnic
->netdev
[i
];
1665 priv
= netdev_priv(dev
);
1668 if ( ( priv
->state
== CARD_UP
) ||
1669 ( priv
->state
== CARD_LINK_DOWN
) )
1670 mtnic_close ( dev
);
1674 priv
= netdev_priv ( mtnic
->netdev
[0] );
1675 err
= mtnic_RELEASE_RESOURCE(priv
, 1,
1676 MTNIC_IF_RESOURCE_TYPE_EQ
, 0);
1678 DBG("Calling MTNIC_CLOSE command\n");
1679 err
|= mtnic_cmd(mtnic
, NULL
, NULL
, 0,
1680 MTNIC_IF_CMD_CLOSE_NIC
);
1682 DBG("Error Releasing resources %d\n", err
);
1685 free_memblock(mtnic
->cmd
.buf
, PAGE_SIZE
);
1686 iounmap(mtnic
->hcr
);
1687 ufree((intptr_t)mtnic
->fw
.fw_pages
.buf
);
1688 ufree((intptr_t)mtnic
->fw
.extra_pages
.buf
);
1689 free_memblock(mtnic
->eq
.buf
, mtnic
->eq
.buf_size
);
1690 iounmap(mtnic
->eq_db
);
1693 for ( i
= ( mtnic
->fw
.num_ports
- 1 ); i
>= 0; i
-- ) {
1694 dev
= mtnic
->netdev
[i
];
1695 unregister_netdev ( dev
);
1696 netdev_nullify ( dev
);
1705 /* Restore config, if we would like to retry booting */
1714 mtnic_irq(struct net_device
*netdev __unused
, int enable __unused
)
1716 /* Not implemented */
1721 /** mtnic net device operations */
1722 static struct net_device_operations mtnic_operations
= {
1724 .close
= mtnic_close
,
1725 .transmit
= mtnic_transmit
,
1737 mtnic_probe(struct pci_device
*pci
,
1738 const struct pci_device_id
*id __unused
)
1740 struct mtnic_port
*priv
;
1741 struct mtnic
*mtnic
;
1747 adjust_pci_device(pci
);
1749 err
= mtnic_init_pci(pci
);
1751 DBG("Error in pci_init\n");
1758 err
= restore_config();
1760 DBG("Error in restoring config\n");
1764 mtnic
= zalloc ( sizeof ( *mtnic
) );
1766 DBG ( "Error Allocating mtnic buffer\n" );
1770 pci_set_drvdata(pci
, mtnic
);
1775 /* Initialize hardware */
1776 err
= mtnic_init_card ( mtnic
);
1778 DBG("Error in init_card\n");
1782 for ( port_index
= 0; port_index
< mtnic
->fw
.num_ports
; port_index
++ ) {
1783 /* Initializing net device */
1784 mtnic
->netdev
[port_index
] = alloc_etherdev( sizeof ( struct mtnic_port
) );
1785 if ( mtnic
->netdev
[port_index
] == NULL
) {
1786 DBG("Net device allocation failed\n");
1787 goto err_alloc_mtnic
;
1791 * Initialize driver private data
1794 mtnic
->netdev
[port_index
]->dev
= &pci
->dev
;
1795 priv
= netdev_priv ( mtnic
->netdev
[port_index
] );
1796 memset ( priv
, 0, sizeof ( struct mtnic_port
) );
1797 priv
->mtnic
= mtnic
;
1798 priv
->netdev
= mtnic
->netdev
[port_index
];
1800 /* Attach pci device */
1801 netdev_init(mtnic
->netdev
[port_index
], &mtnic_operations
);
1803 /* Set port number */
1804 priv
->port
= port_index
;
1807 priv
->state
= CARD_DOWN
;
1812 for ( port_index
= 0; port_index
< mtnic
->fw
.num_ports
; port_index
++ ) {
1813 priv
= netdev_priv ( mtnic
->netdev
[port_index
] );
1814 /* Program the MAC address */
1815 mac
= priv
->mtnic
->fw
.mac
[port_index
];
1816 for (mac_idx
= 0; mac_idx
< MAC_ADDRESS_SIZE
; ++mac_idx
) {
1817 mtnic
->netdev
[port_index
]->hw_addr
[MAC_ADDRESS_SIZE
- mac_idx
- 1] = mac
& 0xFF;
1821 if ( register_netdev ( mtnic
->netdev
[port_index
] ) ) {
1822 DBG("Netdev registration failed\n");
1823 priv
->state
= CARD_INITIALIZED
;
1824 goto err_alloc_mtnic
;
1840 static struct pci_device_id mtnic_nics
[] = {
1841 PCI_ROM ( 0x15b3, 0x6368, "mt25448", "Mellanox ConnectX EN driver", 0 ),
1842 PCI_ROM ( 0x15b3, 0x6372, "mt25458", "Mellanox ConnectX ENt driver", 0 ),
1843 PCI_ROM ( 0x15b3, 0x6750, "mt26448", "Mellanox ConnectX EN GEN2 driver", 0 ),
1844 PCI_ROM ( 0x15b3, 0x675a, "mt26458", "Mellanox ConnectX ENt GEN2 driver", 0 ),
1847 struct pci_driver mtnic_driver __pci_driver
= {
1849 .id_count
= sizeof(mtnic_nics
) / sizeof(mtnic_nics
[0]),
1850 .probe
= mtnic_probe
,
1851 .remove
= mtnic_disable
,