2 * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3 * Copyright (C) 2008 Mellanox Technologies Ltd.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License, or any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 FILE_LICENCE ( GPL2_OR_LATER
);
32 #include <gpxe/pcibackup.h>
33 #include <gpxe/malloc.h>
34 #include <gpxe/umalloc.h>
35 #include <gpxe/iobuf.h>
36 #include <gpxe/netdevice.h>
37 #include <gpxe/infiniband.h>
38 #include <gpxe/ib_smc.h>
44 * Mellanox Hermon Infiniband HCA
48 /***************************************************************************
50 * Queue number allocation
52 ***************************************************************************
56 * Allocate offsets within usage bitmask
58 * @v bits Usage bitmask
59 * @v bits_len Length of usage bitmask
60 * @v num_bits Number of contiguous bits to allocate within bitmask
61 * @ret bit First free bit within bitmask, or negative error
63 static int hermon_bitmask_alloc ( hermon_bitmask_t
*bits
,
64 unsigned int bits_len
,
65 unsigned int num_bits
) {
67 hermon_bitmask_t mask
= 1;
68 unsigned int found
= 0;
70 /* Search bits for num_bits contiguous free bits */
71 while ( bit
< bits_len
) {
72 if ( ( mask
& *bits
) == 0 ) {
73 if ( ++found
== num_bits
)
79 mask
= ( mask
<< 1 ) | ( mask
>> ( 8 * sizeof ( mask
) - 1 ) );
86 /* Mark bits as in-use */
91 mask
= ( mask
>> 1 ) | ( mask
<< ( 8 * sizeof ( mask
) - 1 ) );
94 return ( bit
- num_bits
+ 1 );
98 * Free offsets within usage bitmask
100 * @v bits Usage bitmask
101 * @v bit Starting bit within bitmask
102 * @v num_bits Number of contiguous bits to free within bitmask
104 static void hermon_bitmask_free ( hermon_bitmask_t
*bits
,
105 int bit
, unsigned int num_bits
) {
106 hermon_bitmask_t mask
;
108 for ( ; num_bits
; bit
++, num_bits
-- ) {
109 mask
= ( 1 << ( bit
% ( 8 * sizeof ( mask
) ) ) );
110 bits
[ ( bit
/ ( 8 * sizeof ( mask
) ) ) ] &= ~mask
;
114 /***************************************************************************
118 ***************************************************************************
122 * Wait for Hermon command completion
124 * @v hermon Hermon device
125 * @v hcr HCA command registers
126 * @ret rc Return status code
128 static int hermon_cmd_wait ( struct hermon
*hermon
,
129 struct hermonprm_hca_command_register
*hcr
) {
132 for ( wait
= HERMON_HCR_MAX_WAIT_MS
; wait
; wait
-- ) {
134 readl ( hermon
->config
+ HERMON_HCR_REG ( 6 ) );
135 if ( ( MLX_GET ( hcr
, go
) == 0 ) &&
136 ( MLX_GET ( hcr
, t
) == hermon
->toggle
) )
146 * @v hermon Hermon device
147 * @v command Command opcode, flags and input/output lengths
148 * @v op_mod Opcode modifier (0 if no modifier applicable)
149 * @v in Input parameters
150 * @v in_mod Input modifier (0 if no modifier applicable)
151 * @v out Output parameters
152 * @ret rc Return status code
154 static int hermon_cmd ( struct hermon
*hermon
, unsigned long command
,
155 unsigned int op_mod
, const void *in
,
156 unsigned int in_mod
, void *out
) {
157 struct hermonprm_hca_command_register hcr
;
158 unsigned int opcode
= HERMON_HCR_OPCODE ( command
);
159 size_t in_len
= HERMON_HCR_IN_LEN ( command
);
160 size_t out_len
= HERMON_HCR_OUT_LEN ( command
);
167 assert ( in_len
<= HERMON_MBOX_SIZE
);
168 assert ( out_len
<= HERMON_MBOX_SIZE
);
170 DBGC2 ( hermon
, "Hermon %p command %02x in %zx%s out %zx%s\n",
171 hermon
, opcode
, in_len
,
172 ( ( command
& HERMON_HCR_IN_MBOX
) ? "(mbox)" : "" ), out_len
,
173 ( ( command
& HERMON_HCR_OUT_MBOX
) ? "(mbox)" : "" ) );
175 /* Check that HCR is free */
176 if ( ( rc
= hermon_cmd_wait ( hermon
, &hcr
) ) != 0 ) {
177 DBGC ( hermon
, "Hermon %p command interface locked\n",
182 /* Flip HCR toggle */
183 hermon
->toggle
= ( 1 - hermon
->toggle
);
186 memset ( &hcr
, 0, sizeof ( hcr
) );
187 in_buffer
= &hcr
.u
.dwords
[0];
188 if ( in_len
&& ( command
& HERMON_HCR_IN_MBOX
) ) {
189 in_buffer
= hermon
->mailbox_in
;
190 MLX_FILL_1 ( &hcr
, 1, in_param_l
, virt_to_bus ( in_buffer
) );
192 memcpy ( in_buffer
, in
, in_len
);
193 MLX_FILL_1 ( &hcr
, 2, input_modifier
, in_mod
);
194 out_buffer
= &hcr
.u
.dwords
[3];
195 if ( out_len
&& ( command
& HERMON_HCR_OUT_MBOX
) ) {
196 out_buffer
= hermon
->mailbox_out
;
197 MLX_FILL_1 ( &hcr
, 4, out_param_l
,
198 virt_to_bus ( out_buffer
) );
200 MLX_FILL_4 ( &hcr
, 6,
202 opcode_modifier
, op_mod
,
205 DBGC ( hermon
, "Hermon %p issuing command %04x\n",
207 DBGC2_HDA ( hermon
, virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
208 &hcr
, sizeof ( hcr
) );
209 if ( in_len
&& ( command
& HERMON_HCR_IN_MBOX
) ) {
210 DBGC2 ( hermon
, "Input mailbox:\n" );
211 DBGC2_HDA ( hermon
, virt_to_phys ( in_buffer
), in_buffer
,
212 ( ( in_len
< 512 ) ? in_len
: 512 ) );
216 for ( i
= 0 ; i
< ( sizeof ( hcr
) / sizeof ( hcr
.u
.dwords
[0] ) ) ;
218 writel ( hcr
.u
.dwords
[i
],
219 hermon
->config
+ HERMON_HCR_REG ( i
) );
223 /* Wait for command completion */
224 if ( ( rc
= hermon_cmd_wait ( hermon
, &hcr
) ) != 0 ) {
225 DBGC ( hermon
, "Hermon %p timed out waiting for command:\n",
228 virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
229 &hcr
, sizeof ( hcr
) );
233 /* Check command status */
234 status
= MLX_GET ( &hcr
, status
);
236 DBGC ( hermon
, "Hermon %p command failed with status %02x:\n",
239 virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
240 &hcr
, sizeof ( hcr
) );
244 /* Read output parameters, if any */
245 hcr
.u
.dwords
[3] = readl ( hermon
->config
+ HERMON_HCR_REG ( 3 ) );
246 hcr
.u
.dwords
[4] = readl ( hermon
->config
+ HERMON_HCR_REG ( 4 ) );
247 memcpy ( out
, out_buffer
, out_len
);
249 DBGC2 ( hermon
, "Output%s:\n",
250 ( command
& HERMON_HCR_OUT_MBOX
) ? " mailbox" : "" );
251 DBGC2_HDA ( hermon
, virt_to_phys ( out_buffer
), out_buffer
,
252 ( ( out_len
< 512 ) ? out_len
: 512 ) );
259 hermon_cmd_query_dev_cap ( struct hermon
*hermon
,
260 struct hermonprm_query_dev_cap
*dev_cap
) {
261 return hermon_cmd ( hermon
,
262 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_DEV_CAP
,
263 1, sizeof ( *dev_cap
) ),
264 0, NULL
, 0, dev_cap
);
268 hermon_cmd_query_fw ( struct hermon
*hermon
, struct hermonprm_query_fw
*fw
) {
269 return hermon_cmd ( hermon
,
270 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_FW
,
276 hermon_cmd_init_hca ( struct hermon
*hermon
,
277 const struct hermonprm_init_hca
*init_hca
) {
278 return hermon_cmd ( hermon
,
279 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_HCA
,
280 1, sizeof ( *init_hca
) ),
281 0, init_hca
, 0, NULL
);
285 hermon_cmd_close_hca ( struct hermon
*hermon
) {
286 return hermon_cmd ( hermon
,
287 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_HCA
),
292 hermon_cmd_init_port ( struct hermon
*hermon
, unsigned int port
,
293 const struct hermonprm_init_port
*init_port
) {
294 return hermon_cmd ( hermon
,
295 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_PORT
,
296 1, sizeof ( *init_port
) ),
297 0, init_port
, port
, NULL
);
301 hermon_cmd_close_port ( struct hermon
*hermon
, unsigned int port
) {
302 return hermon_cmd ( hermon
,
303 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_PORT
),
304 0, NULL
, port
, NULL
);
308 hermon_cmd_sw2hw_mpt ( struct hermon
*hermon
, unsigned int index
,
309 const struct hermonprm_mpt
*mpt
) {
310 return hermon_cmd ( hermon
,
311 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_MPT
,
312 1, sizeof ( *mpt
) ),
313 0, mpt
, index
, NULL
);
317 hermon_cmd_write_mtt ( struct hermon
*hermon
,
318 const struct hermonprm_write_mtt
*write_mtt
) {
319 return hermon_cmd ( hermon
,
320 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MTT
,
321 1, sizeof ( *write_mtt
) ),
322 0, write_mtt
, 1, NULL
);
326 hermon_cmd_map_eq ( struct hermon
*hermon
, unsigned long index_map
,
327 const struct hermonprm_event_mask
*mask
) {
328 return hermon_cmd ( hermon
,
329 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_EQ
,
330 0, sizeof ( *mask
) ),
331 0, mask
, index_map
, NULL
);
335 hermon_cmd_sw2hw_eq ( struct hermon
*hermon
, unsigned int index
,
336 const struct hermonprm_eqc
*eqctx
) {
337 return hermon_cmd ( hermon
,
338 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_EQ
,
339 1, sizeof ( *eqctx
) ),
340 0, eqctx
, index
, NULL
);
344 hermon_cmd_hw2sw_eq ( struct hermon
*hermon
, unsigned int index
,
345 struct hermonprm_eqc
*eqctx
) {
346 return hermon_cmd ( hermon
,
347 HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_EQ
,
348 1, sizeof ( *eqctx
) ),
349 1, NULL
, index
, eqctx
);
353 hermon_cmd_query_eq ( struct hermon
*hermon
, unsigned int index
,
354 struct hermonprm_eqc
*eqctx
) {
355 return hermon_cmd ( hermon
,
356 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_EQ
,
357 1, sizeof ( *eqctx
) ),
358 0, NULL
, index
, eqctx
);
362 hermon_cmd_sw2hw_cq ( struct hermon
*hermon
, unsigned long cqn
,
363 const struct hermonprm_completion_queue_context
*cqctx
){
364 return hermon_cmd ( hermon
,
365 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_CQ
,
366 1, sizeof ( *cqctx
) ),
367 0, cqctx
, cqn
, NULL
);
371 hermon_cmd_hw2sw_cq ( struct hermon
*hermon
, unsigned long cqn
,
372 struct hermonprm_completion_queue_context
*cqctx
) {
373 return hermon_cmd ( hermon
,
374 HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_CQ
,
375 1, sizeof ( *cqctx
) ),
376 0, NULL
, cqn
, cqctx
);
380 hermon_cmd_rst2init_qp ( struct hermon
*hermon
, unsigned long qpn
,
381 const struct hermonprm_qp_ee_state_transitions
*ctx
){
382 return hermon_cmd ( hermon
,
383 HERMON_HCR_IN_CMD ( HERMON_HCR_RST2INIT_QP
,
384 1, sizeof ( *ctx
) ),
389 hermon_cmd_init2rtr_qp ( struct hermon
*hermon
, unsigned long qpn
,
390 const struct hermonprm_qp_ee_state_transitions
*ctx
){
391 return hermon_cmd ( hermon
,
392 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT2RTR_QP
,
393 1, sizeof ( *ctx
) ),
398 hermon_cmd_rtr2rts_qp ( struct hermon
*hermon
, unsigned long qpn
,
399 const struct hermonprm_qp_ee_state_transitions
*ctx
) {
400 return hermon_cmd ( hermon
,
401 HERMON_HCR_IN_CMD ( HERMON_HCR_RTR2RTS_QP
,
402 1, sizeof ( *ctx
) ),
407 hermon_cmd_rts2rts_qp ( struct hermon
*hermon
, unsigned long qpn
,
408 const struct hermonprm_qp_ee_state_transitions
*ctx
) {
409 return hermon_cmd ( hermon
,
410 HERMON_HCR_IN_CMD ( HERMON_HCR_RTS2RTS_QP
,
411 1, sizeof ( *ctx
) ),
416 hermon_cmd_2rst_qp ( struct hermon
*hermon
, unsigned long qpn
) {
417 return hermon_cmd ( hermon
,
418 HERMON_HCR_VOID_CMD ( HERMON_HCR_2RST_QP
),
419 0x03, NULL
, qpn
, NULL
);
423 hermon_cmd_query_qp ( struct hermon
*hermon
, unsigned long qpn
,
424 struct hermonprm_qp_ee_state_transitions
*ctx
) {
425 return hermon_cmd ( hermon
,
426 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_QP
,
427 1, sizeof ( *ctx
) ),
432 hermon_cmd_conf_special_qp ( struct hermon
*hermon
, unsigned int internal_qps
,
433 unsigned long base_qpn
) {
434 return hermon_cmd ( hermon
,
435 HERMON_HCR_VOID_CMD ( HERMON_HCR_CONF_SPECIAL_QP
),
436 internal_qps
, NULL
, base_qpn
, NULL
);
440 hermon_cmd_mad_ifc ( struct hermon
*hermon
, unsigned int port
,
441 union hermonprm_mad
*mad
) {
442 return hermon_cmd ( hermon
,
443 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MAD_IFC
,
445 1, sizeof ( *mad
) ),
446 0x03, mad
, port
, mad
);
450 hermon_cmd_read_mcg ( struct hermon
*hermon
, unsigned int index
,
451 struct hermonprm_mcg_entry
*mcg
) {
452 return hermon_cmd ( hermon
,
453 HERMON_HCR_OUT_CMD ( HERMON_HCR_READ_MCG
,
454 1, sizeof ( *mcg
) ),
455 0, NULL
, index
, mcg
);
459 hermon_cmd_write_mcg ( struct hermon
*hermon
, unsigned int index
,
460 const struct hermonprm_mcg_entry
*mcg
) {
461 return hermon_cmd ( hermon
,
462 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MCG
,
463 1, sizeof ( *mcg
) ),
464 0, mcg
, index
, NULL
);
468 hermon_cmd_mgid_hash ( struct hermon
*hermon
, const struct ib_gid
*gid
,
469 struct hermonprm_mgm_hash
*hash
) {
470 return hermon_cmd ( hermon
,
471 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MGID_HASH
,
473 0, sizeof ( *hash
) ),
478 hermon_cmd_run_fw ( struct hermon
*hermon
) {
479 return hermon_cmd ( hermon
,
480 HERMON_HCR_VOID_CMD ( HERMON_HCR_RUN_FW
),
485 hermon_cmd_unmap_icm ( struct hermon
*hermon
, unsigned int page_count
,
486 const struct hermonprm_scalar_parameter
*offset
) {
487 return hermon_cmd ( hermon
,
488 HERMON_HCR_IN_CMD ( HERMON_HCR_UNMAP_ICM
,
489 0, sizeof ( *offset
) ),
490 0, offset
, page_count
, NULL
);
494 hermon_cmd_map_icm ( struct hermon
*hermon
,
495 const struct hermonprm_virtual_physical_mapping
*map
) {
496 return hermon_cmd ( hermon
,
497 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM
,
498 1, sizeof ( *map
) ),
503 hermon_cmd_unmap_icm_aux ( struct hermon
*hermon
) {
504 return hermon_cmd ( hermon
,
505 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_ICM_AUX
),
510 hermon_cmd_map_icm_aux ( struct hermon
*hermon
,
511 const struct hermonprm_virtual_physical_mapping
*map
) {
512 return hermon_cmd ( hermon
,
513 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM_AUX
,
514 1, sizeof ( *map
) ),
519 hermon_cmd_set_icm_size ( struct hermon
*hermon
,
520 const struct hermonprm_scalar_parameter
*icm_size
,
521 struct hermonprm_scalar_parameter
*icm_aux_size
) {
522 return hermon_cmd ( hermon
,
523 HERMON_HCR_INOUT_CMD ( HERMON_HCR_SET_ICM_SIZE
,
524 0, sizeof ( *icm_size
),
525 0, sizeof (*icm_aux_size
) ),
526 0, icm_size
, 0, icm_aux_size
);
530 hermon_cmd_unmap_fa ( struct hermon
*hermon
) {
531 return hermon_cmd ( hermon
,
532 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_FA
),
537 hermon_cmd_map_fa ( struct hermon
*hermon
,
538 const struct hermonprm_virtual_physical_mapping
*map
) {
539 return hermon_cmd ( hermon
,
540 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_FA
,
541 1, sizeof ( *map
) ),
546 hermon_cmd_sense_port ( struct hermon
*hermon
, unsigned int port
,
547 struct hermonprm_sense_port
*port_type
) {
548 return hermon_cmd ( hermon
,
549 HERMON_HCR_OUT_CMD ( HERMON_HCR_SENSE_PORT
,
550 1, sizeof ( *port_type
) ),
551 0, NULL
, port
, port_type
);
555 /***************************************************************************
557 * Memory translation table operations
559 ***************************************************************************
563 * Allocate MTT entries
565 * @v hermon Hermon device
566 * @v memory Memory to map into MTT
567 * @v len Length of memory to map
568 * @v mtt MTT descriptor to fill in
569 * @ret rc Return status code
571 static int hermon_alloc_mtt ( struct hermon
*hermon
,
572 const void *memory
, size_t len
,
573 struct hermon_mtt
*mtt
) {
574 struct hermonprm_write_mtt write_mtt
;
576 unsigned int page_offset
;
577 unsigned int num_pages
;
579 unsigned int mtt_base_addr
;
583 /* Find available MTT entries */
584 start
= virt_to_phys ( memory
);
585 page_offset
= ( start
& ( HERMON_PAGE_SIZE
- 1 ) );
586 start
-= page_offset
;
588 num_pages
= ( ( len
+ HERMON_PAGE_SIZE
- 1 ) / HERMON_PAGE_SIZE
);
589 mtt_offset
= hermon_bitmask_alloc ( hermon
->mtt_inuse
, HERMON_MAX_MTTS
,
591 if ( mtt_offset
< 0 ) {
592 DBGC ( hermon
, "Hermon %p could not allocate %d MTT entries\n",
597 mtt_base_addr
= ( ( hermon
->cap
.reserved_mtts
+ mtt_offset
) *
598 hermon
->cap
.mtt_entry_size
);
600 /* Fill in MTT structure */
601 mtt
->mtt_offset
= mtt_offset
;
602 mtt
->num_pages
= num_pages
;
603 mtt
->mtt_base_addr
= mtt_base_addr
;
604 mtt
->page_offset
= page_offset
;
606 /* Construct and issue WRITE_MTT commands */
607 for ( i
= 0 ; i
< num_pages
; i
++ ) {
608 memset ( &write_mtt
, 0, sizeof ( write_mtt
) );
609 MLX_FILL_1 ( &write_mtt
.mtt_base_addr
, 1,
610 value
, mtt_base_addr
);
611 MLX_FILL_2 ( &write_mtt
.mtt
, 1,
613 ptag_l
, ( start
>> 3 ) );
614 if ( ( rc
= hermon_cmd_write_mtt ( hermon
,
615 &write_mtt
) ) != 0 ) {
616 DBGC ( hermon
, "Hermon %p could not write MTT at %x\n",
617 hermon
, mtt_base_addr
);
620 start
+= HERMON_PAGE_SIZE
;
621 mtt_base_addr
+= hermon
->cap
.mtt_entry_size
;
627 hermon_bitmask_free ( hermon
->mtt_inuse
, mtt_offset
, num_pages
);
635 * @v hermon Hermon device
636 * @v mtt MTT descriptor
638 static void hermon_free_mtt ( struct hermon
*hermon
,
639 struct hermon_mtt
*mtt
) {
640 hermon_bitmask_free ( hermon
->mtt_inuse
, mtt
->mtt_offset
,
644 /***************************************************************************
648 ***************************************************************************
652 * Issue management datagram
654 * @v ibdev Infiniband device
655 * @v mad Management datagram
656 * @ret rc Return status code
658 static int hermon_mad ( struct ib_device
*ibdev
, union ib_mad
*mad
) {
659 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
660 union hermonprm_mad mad_ifc
;
663 linker_assert ( sizeof ( *mad
) == sizeof ( mad_ifc
.mad
),
666 /* Copy in request packet */
667 memcpy ( &mad_ifc
.mad
, mad
, sizeof ( mad_ifc
.mad
) );
670 if ( ( rc
= hermon_cmd_mad_ifc ( hermon
, ibdev
->port
,
671 &mad_ifc
) ) != 0 ) {
672 DBGC ( hermon
, "Hermon %p could not issue MAD IFC: %s\n",
673 hermon
, strerror ( rc
) );
677 /* Copy out reply packet */
678 memcpy ( mad
, &mad_ifc
.mad
, sizeof ( *mad
) );
680 if ( mad
->hdr
.status
!= 0 ) {
681 DBGC ( hermon
, "Hermon %p MAD IFC status %04x\n",
682 hermon
, ntohs ( mad
->hdr
.status
) );
688 /***************************************************************************
690 * Completion queue operations
692 ***************************************************************************
696 * Create completion queue
698 * @v ibdev Infiniband device
699 * @v cq Completion queue
700 * @ret rc Return status code
702 static int hermon_create_cq ( struct ib_device
*ibdev
,
703 struct ib_completion_queue
*cq
) {
704 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
705 struct hermon_completion_queue
*hermon_cq
;
706 struct hermonprm_completion_queue_context cqctx
;
711 /* Find a free completion queue number */
712 cqn_offset
= hermon_bitmask_alloc ( hermon
->cq_inuse
,
714 if ( cqn_offset
< 0 ) {
715 DBGC ( hermon
, "Hermon %p out of completion queues\n",
720 cq
->cqn
= ( hermon
->cap
.reserved_cqs
+ cqn_offset
);
722 /* Allocate control structures */
723 hermon_cq
= zalloc ( sizeof ( *hermon_cq
) );
729 /* Allocate completion queue itself */
730 hermon_cq
->cqe_size
= ( cq
->num_cqes
* sizeof ( hermon_cq
->cqe
[0] ) );
731 hermon_cq
->cqe
= malloc_dma ( hermon_cq
->cqe_size
,
732 sizeof ( hermon_cq
->cqe
[0] ) );
733 if ( ! hermon_cq
->cqe
) {
737 memset ( hermon_cq
->cqe
, 0, hermon_cq
->cqe_size
);
738 for ( i
= 0 ; i
< cq
->num_cqes
; i
++ ) {
739 MLX_FILL_1 ( &hermon_cq
->cqe
[i
].normal
, 7, owner
, 1 );
743 /* Allocate MTT entries */
744 if ( ( rc
= hermon_alloc_mtt ( hermon
, hermon_cq
->cqe
,
746 &hermon_cq
->mtt
) ) != 0 )
749 /* Hand queue over to hardware */
750 memset ( &cqctx
, 0, sizeof ( cqctx
) );
751 MLX_FILL_1 ( &cqctx
, 0, st
, 0xa /* "Event fired" */ );
752 MLX_FILL_1 ( &cqctx
, 2,
753 page_offset
, ( hermon_cq
->mtt
.page_offset
>> 5 ) );
754 MLX_FILL_2 ( &cqctx
, 3,
755 usr_page
, HERMON_UAR_NON_EQ_PAGE
,
756 log_cq_size
, fls ( cq
->num_cqes
- 1 ) );
757 MLX_FILL_1 ( &cqctx
, 7, mtt_base_addr_l
,
758 ( hermon_cq
->mtt
.mtt_base_addr
>> 3 ) );
759 MLX_FILL_1 ( &cqctx
, 15, db_record_addr_l
,
760 ( virt_to_phys ( &hermon_cq
->doorbell
) >> 3 ) );
761 if ( ( rc
= hermon_cmd_sw2hw_cq ( hermon
, cq
->cqn
, &cqctx
) ) != 0 ) {
762 DBGC ( hermon
, "Hermon %p SW2HW_CQ failed: %s\n",
763 hermon
, strerror ( rc
) );
767 DBGC ( hermon
, "Hermon %p CQN %#lx ring at [%p,%p)\n",
768 hermon
, cq
->cqn
, hermon_cq
->cqe
,
769 ( ( ( void * ) hermon_cq
->cqe
) + hermon_cq
->cqe_size
) );
770 ib_cq_set_drvdata ( cq
, hermon_cq
);
774 hermon_free_mtt ( hermon
, &hermon_cq
->mtt
);
776 free_dma ( hermon_cq
->cqe
, hermon_cq
->cqe_size
);
780 hermon_bitmask_free ( hermon
->cq_inuse
, cqn_offset
, 1 );
786 * Destroy completion queue
788 * @v ibdev Infiniband device
789 * @v cq Completion queue
791 static void hermon_destroy_cq ( struct ib_device
*ibdev
,
792 struct ib_completion_queue
*cq
) {
793 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
794 struct hermon_completion_queue
*hermon_cq
= ib_cq_get_drvdata ( cq
);
795 struct hermonprm_completion_queue_context cqctx
;
799 /* Take ownership back from hardware */
800 if ( ( rc
= hermon_cmd_hw2sw_cq ( hermon
, cq
->cqn
, &cqctx
) ) != 0 ) {
801 DBGC ( hermon
, "Hermon %p FATAL HW2SW_CQ failed on CQN %#lx: "
802 "%s\n", hermon
, cq
->cqn
, strerror ( rc
) );
803 /* Leak memory and return; at least we avoid corruption */
807 /* Free MTT entries */
808 hermon_free_mtt ( hermon
, &hermon_cq
->mtt
);
811 free_dma ( hermon_cq
->cqe
, hermon_cq
->cqe_size
);
814 /* Mark queue number as free */
815 cqn_offset
= ( cq
->cqn
- hermon
->cap
.reserved_cqs
);
816 hermon_bitmask_free ( hermon
->cq_inuse
, cqn_offset
, 1 );
818 ib_cq_set_drvdata ( cq
, NULL
);
821 /***************************************************************************
823 * Queue pair operations
825 ***************************************************************************
829 * Assign queue pair number
831 * @v ibdev Infiniband device
833 * @ret rc Return status code
835 static int hermon_alloc_qpn ( struct ib_device
*ibdev
,
836 struct ib_queue_pair
*qp
) {
837 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
838 unsigned int port_offset
;
841 /* Calculate queue pair number */
842 port_offset
= ( ibdev
->port
- HERMON_PORT_BASE
);
844 switch ( qp
->type
) {
846 qp
->qpn
= ( hermon
->special_qpn_base
+ port_offset
);
849 qp
->qpn
= ( hermon
->special_qpn_base
+ 2 + port_offset
);
853 /* Find a free queue pair number */
854 qpn_offset
= hermon_bitmask_alloc ( hermon
->qp_inuse
,
856 if ( qpn_offset
< 0 ) {
857 DBGC ( hermon
, "Hermon %p out of queue pairs\n",
861 qp
->qpn
= ( ( random() & HERMON_QPN_RANDOM_MASK
) |
862 ( hermon
->qpn_base
+ qpn_offset
) );
865 DBGC ( hermon
, "Hermon %p unsupported QP type %d\n",
872 * Free queue pair number
874 * @v ibdev Infiniband device
877 static void hermon_free_qpn ( struct ib_device
*ibdev
,
878 struct ib_queue_pair
*qp
) {
879 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
882 qpn_offset
= ( ( qp
->qpn
& ~HERMON_QPN_RANDOM_MASK
)
883 - hermon
->qpn_base
);
884 if ( qpn_offset
>= 0 )
885 hermon_bitmask_free ( hermon
->qp_inuse
, qpn_offset
, 1 );
889 * Calculate transmission rate
891 * @v av Address vector
892 * @ret hermon_rate Hermon rate
894 static unsigned int hermon_rate ( struct ib_address_vector
*av
) {
895 return ( ( ( av
->rate
>= IB_RATE_2_5
) && ( av
->rate
<= IB_RATE_120
) )
896 ? ( av
->rate
+ 5 ) : 0 );
900 * Calculate schedule queue
902 * @v ibdev Infiniband device
904 * @ret sched_queue Schedule queue
906 static unsigned int hermon_sched_queue ( struct ib_device
*ibdev
,
907 struct ib_queue_pair
*qp
) {
908 return ( ( ( qp
->type
== IB_QPT_SMI
) ?
909 HERMON_SCHED_QP0
: HERMON_SCHED_DEFAULT
) |
910 ( ( ibdev
->port
- 1 ) << 6 ) );
913 /** Queue pair transport service type map */
914 static uint8_t hermon_qp_st
[] = {
915 [IB_QPT_SMI
] = HERMON_ST_MLX
,
916 [IB_QPT_GSI
] = HERMON_ST_MLX
,
917 [IB_QPT_UD
] = HERMON_ST_UD
,
918 [IB_QPT_RC
] = HERMON_ST_RC
,
922 * Dump queue pair context (for debugging only)
924 * @v hermon Hermon device
926 * @ret rc Return status code
928 static inline int hermon_dump_qpctx ( struct hermon
*hermon
,
929 struct ib_queue_pair
*qp
) {
930 struct hermonprm_qp_ee_state_transitions qpctx
;
933 memset ( &qpctx
, 0, sizeof ( qpctx
) );
934 if ( ( rc
= hermon_cmd_query_qp ( hermon
, qp
->qpn
, &qpctx
) ) != 0 ) {
935 DBGC ( hermon
, "Hermon %p QUERY_QP failed: %s\n",
936 hermon
, strerror ( rc
) );
939 DBGC ( hermon
, "Hermon %p QPN %lx context:\n", hermon
, qp
->qpn
);
940 DBGC_HDA ( hermon
, 0, &qpctx
.u
.dwords
[2],
941 ( sizeof ( qpctx
) - 8 ) );
949 * @v ibdev Infiniband device
951 * @ret rc Return status code
953 static int hermon_create_qp ( struct ib_device
*ibdev
,
954 struct ib_queue_pair
*qp
) {
955 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
956 struct hermon_queue_pair
*hermon_qp
;
957 struct hermonprm_qp_ee_state_transitions qpctx
;
960 /* Calculate queue pair number */
961 if ( ( rc
= hermon_alloc_qpn ( ibdev
, qp
) ) != 0 )
964 /* Allocate control structures */
965 hermon_qp
= zalloc ( sizeof ( *hermon_qp
) );
971 /* Calculate doorbell address */
972 hermon_qp
->send
.doorbell
=
973 ( hermon
->uar
+ HERMON_UAR_NON_EQ_PAGE
* HERMON_PAGE_SIZE
+
974 HERMON_DB_POST_SND_OFFSET
);
976 /* Allocate work queue buffer */
977 hermon_qp
->send
.num_wqes
= ( qp
->send
.num_wqes
/* headroom */ + 1 +
978 ( 2048 / sizeof ( hermon_qp
->send
.wqe
[0] ) ) );
979 hermon_qp
->send
.num_wqes
=
980 ( 1 << fls ( hermon_qp
->send
.num_wqes
- 1 ) ); /* round up */
981 hermon_qp
->send
.wqe_size
= ( hermon_qp
->send
.num_wqes
*
982 sizeof ( hermon_qp
->send
.wqe
[0] ) );
983 hermon_qp
->recv
.wqe_size
= ( qp
->recv
.num_wqes
*
984 sizeof ( hermon_qp
->recv
.wqe
[0] ) );
985 hermon_qp
->wqe_size
= ( hermon_qp
->send
.wqe_size
+
986 hermon_qp
->recv
.wqe_size
);
987 hermon_qp
->wqe
= malloc_dma ( hermon_qp
->wqe_size
,
988 sizeof ( hermon_qp
->send
.wqe
[0] ) );
989 if ( ! hermon_qp
->wqe
) {
993 hermon_qp
->send
.wqe
= hermon_qp
->wqe
;
994 memset ( hermon_qp
->send
.wqe
, 0xff, hermon_qp
->send
.wqe_size
);
995 hermon_qp
->recv
.wqe
= ( hermon_qp
->wqe
+ hermon_qp
->send
.wqe_size
);
996 memset ( hermon_qp
->recv
.wqe
, 0, hermon_qp
->recv
.wqe_size
);
998 /* Allocate MTT entries */
999 if ( ( rc
= hermon_alloc_mtt ( hermon
, hermon_qp
->wqe
,
1000 hermon_qp
->wqe_size
,
1001 &hermon_qp
->mtt
) ) != 0 ) {
1005 /* Transition queue to INIT state */
1006 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1007 MLX_FILL_2 ( &qpctx
, 2,
1008 qpc_eec_data
.pm_state
, HERMON_PM_STATE_MIGRATED
,
1009 qpc_eec_data
.st
, hermon_qp_st
[qp
->type
] );
1010 MLX_FILL_1 ( &qpctx
, 3, qpc_eec_data
.pd
, HERMON_GLOBAL_PD
);
1011 MLX_FILL_4 ( &qpctx
, 4,
1012 qpc_eec_data
.log_rq_size
, fls ( qp
->recv
.num_wqes
- 1 ),
1013 qpc_eec_data
.log_rq_stride
,
1014 ( fls ( sizeof ( hermon_qp
->recv
.wqe
[0] ) - 1 ) - 4 ),
1015 qpc_eec_data
.log_sq_size
,
1016 fls ( hermon_qp
->send
.num_wqes
- 1 ),
1017 qpc_eec_data
.log_sq_stride
,
1018 ( fls ( sizeof ( hermon_qp
->send
.wqe
[0] ) - 1 ) - 4 ) );
1019 MLX_FILL_1 ( &qpctx
, 5,
1020 qpc_eec_data
.usr_page
, HERMON_UAR_NON_EQ_PAGE
);
1021 MLX_FILL_1 ( &qpctx
, 33, qpc_eec_data
.cqn_snd
, qp
->send
.cq
->cqn
);
1022 MLX_FILL_4 ( &qpctx
, 38,
1023 qpc_eec_data
.rre
, 1,
1024 qpc_eec_data
.rwe
, 1,
1025 qpc_eec_data
.rae
, 1,
1026 qpc_eec_data
.page_offset
,
1027 ( hermon_qp
->mtt
.page_offset
>> 6 ) );
1028 MLX_FILL_1 ( &qpctx
, 41, qpc_eec_data
.cqn_rcv
, qp
->recv
.cq
->cqn
);
1029 MLX_FILL_1 ( &qpctx
, 43, qpc_eec_data
.db_record_addr_l
,
1030 ( virt_to_phys ( &hermon_qp
->recv
.doorbell
) >> 2 ) );
1031 MLX_FILL_1 ( &qpctx
, 53, qpc_eec_data
.mtt_base_addr_l
,
1032 ( hermon_qp
->mtt
.mtt_base_addr
>> 3 ) );
1033 if ( ( rc
= hermon_cmd_rst2init_qp ( hermon
, qp
->qpn
,
1035 DBGC ( hermon
, "Hermon %p RST2INIT_QP failed: %s\n",
1036 hermon
, strerror ( rc
) );
1037 goto err_rst2init_qp
;
1039 hermon_qp
->state
= HERMON_QP_ST_INIT
;
1041 DBGC ( hermon
, "Hermon %p QPN %#lx send ring at [%p,%p)\n",
1042 hermon
, qp
->qpn
, hermon_qp
->send
.wqe
,
1043 ( ((void *)hermon_qp
->send
.wqe
) + hermon_qp
->send
.wqe_size
) );
1044 DBGC ( hermon
, "Hermon %p QPN %#lx receive ring at [%p,%p)\n",
1045 hermon
, qp
->qpn
, hermon_qp
->recv
.wqe
,
1046 ( ((void *)hermon_qp
->recv
.wqe
) + hermon_qp
->recv
.wqe_size
) );
1047 ib_qp_set_drvdata ( qp
, hermon_qp
);
1050 hermon_cmd_2rst_qp ( hermon
, qp
->qpn
);
1052 hermon_free_mtt ( hermon
, &hermon_qp
->mtt
);
1054 free_dma ( hermon_qp
->wqe
, hermon_qp
->wqe_size
);
1058 hermon_free_qpn ( ibdev
, qp
);
1066 * @v ibdev Infiniband device
1068 * @ret rc Return status code
1070 static int hermon_modify_qp ( struct ib_device
*ibdev
,
1071 struct ib_queue_pair
*qp
) {
1072 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1073 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1074 struct hermonprm_qp_ee_state_transitions qpctx
;
1077 /* Transition queue to RTR state, if applicable */
1078 if ( hermon_qp
->state
< HERMON_QP_ST_RTR
) {
1079 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1080 MLX_FILL_2 ( &qpctx
, 4,
1081 qpc_eec_data
.mtu
, HERMON_MTU_2048
,
1082 qpc_eec_data
.msg_max
, 31 );
1083 MLX_FILL_1 ( &qpctx
, 7,
1084 qpc_eec_data
.remote_qpn_een
, qp
->av
.qpn
);
1085 MLX_FILL_1 ( &qpctx
, 9,
1086 qpc_eec_data
.primary_address_path
.rlid
,
1088 MLX_FILL_1 ( &qpctx
, 10,
1089 qpc_eec_data
.primary_address_path
.max_stat_rate
,
1090 hermon_rate ( &qp
->av
) );
1091 memcpy ( &qpctx
.u
.dwords
[12], &qp
->av
.gid
,
1092 sizeof ( qp
->av
.gid
) );
1093 MLX_FILL_1 ( &qpctx
, 16,
1094 qpc_eec_data
.primary_address_path
.sched_queue
,
1095 hermon_sched_queue ( ibdev
, qp
) );
1096 MLX_FILL_1 ( &qpctx
, 39,
1097 qpc_eec_data
.next_rcv_psn
, qp
->recv
.psn
);
1098 if ( ( rc
= hermon_cmd_init2rtr_qp ( hermon
, qp
->qpn
,
1100 DBGC ( hermon
, "Hermon %p INIT2RTR_QP failed: %s\n",
1101 hermon
, strerror ( rc
) );
1104 hermon_qp
->state
= HERMON_QP_ST_RTR
;
1107 /* Transition queue to RTS state */
1108 if ( hermon_qp
->state
< HERMON_QP_ST_RTS
) {
1109 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1110 MLX_FILL_1 ( &qpctx
, 10,
1111 qpc_eec_data
.primary_address_path
.ack_timeout
,
1112 14 /* 4.096us * 2^(14) = 67ms */ );
1113 MLX_FILL_2 ( &qpctx
, 30,
1114 qpc_eec_data
.retry_count
, HERMON_RETRY_MAX
,
1115 qpc_eec_data
.rnr_retry
, HERMON_RETRY_MAX
);
1116 MLX_FILL_1 ( &qpctx
, 32,
1117 qpc_eec_data
.next_send_psn
, qp
->send
.psn
);
1118 if ( ( rc
= hermon_cmd_rtr2rts_qp ( hermon
, qp
->qpn
,
1120 DBGC ( hermon
, "Hermon %p RTR2RTS_QP failed: %s\n",
1121 hermon
, strerror ( rc
) );
1124 hermon_qp
->state
= HERMON_QP_ST_RTS
;
1127 /* Update parameters in RTS state */
1128 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1129 MLX_FILL_1 ( &qpctx
, 0, opt_param_mask
, HERMON_QP_OPT_PARAM_QKEY
);
1130 MLX_FILL_1 ( &qpctx
, 44, qpc_eec_data
.q_key
, qp
->qkey
);
1131 if ( ( rc
= hermon_cmd_rts2rts_qp ( hermon
, qp
->qpn
, &qpctx
) ) != 0 ){
1132 DBGC ( hermon
, "Hermon %p RTS2RTS_QP failed: %s\n",
1133 hermon
, strerror ( rc
) );
1141 * Destroy queue pair
1143 * @v ibdev Infiniband device
1146 static void hermon_destroy_qp ( struct ib_device
*ibdev
,
1147 struct ib_queue_pair
*qp
) {
1148 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1149 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1152 /* Take ownership back from hardware */
1153 if ( ( rc
= hermon_cmd_2rst_qp ( hermon
, qp
->qpn
) ) != 0 ) {
1154 DBGC ( hermon
, "Hermon %p FATAL 2RST_QP failed on QPN %#lx: "
1155 "%s\n", hermon
, qp
->qpn
, strerror ( rc
) );
1156 /* Leak memory and return; at least we avoid corruption */
1160 /* Free MTT entries */
1161 hermon_free_mtt ( hermon
, &hermon_qp
->mtt
);
1164 free_dma ( hermon_qp
->wqe
, hermon_qp
->wqe_size
);
1167 /* Mark queue number as free */
1168 hermon_free_qpn ( ibdev
, qp
);
1170 ib_qp_set_drvdata ( qp
, NULL
);
1173 /***************************************************************************
1175 * Work request operations
1177 ***************************************************************************
1181 * Construct UD send work queue entry
1183 * @v ibdev Infiniband device
1185 * @v av Address vector
1186 * @v iobuf I/O buffer
1187 * @v wqe Send work queue entry
1188 * @ret opcode Control opcode
1191 hermon_fill_ud_send_wqe ( struct ib_device
*ibdev
,
1192 struct ib_queue_pair
*qp __unused
,
1193 struct ib_address_vector
*av
,
1194 struct io_buffer
*iobuf
,
1195 union hermon_send_wqe
*wqe
) {
1196 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1198 MLX_FILL_1 ( &wqe
->ud
.ctrl
, 1, ds
,
1199 ( ( offsetof ( typeof ( wqe
->ud
), data
[1] ) / 16 ) ) );
1200 MLX_FILL_1 ( &wqe
->ud
.ctrl
, 2, c
, 0x03 /* generate completion */ );
1201 MLX_FILL_2 ( &wqe
->ud
.ud
, 0,
1202 ud_address_vector
.pd
, HERMON_GLOBAL_PD
,
1203 ud_address_vector
.port_number
, ibdev
->port
);
1204 MLX_FILL_2 ( &wqe
->ud
.ud
, 1,
1205 ud_address_vector
.rlid
, av
->lid
,
1206 ud_address_vector
.g
, av
->gid_present
);
1207 MLX_FILL_1 ( &wqe
->ud
.ud
, 2,
1208 ud_address_vector
.max_stat_rate
, hermon_rate ( av
) );
1209 MLX_FILL_1 ( &wqe
->ud
.ud
, 3, ud_address_vector
.sl
, av
->sl
);
1210 memcpy ( &wqe
->ud
.ud
.u
.dwords
[4], &av
->gid
, sizeof ( av
->gid
) );
1211 MLX_FILL_1 ( &wqe
->ud
.ud
, 8, destination_qp
, av
->qpn
);
1212 MLX_FILL_1 ( &wqe
->ud
.ud
, 9, q_key
, av
->qkey
);
1213 MLX_FILL_1 ( &wqe
->ud
.data
[0], 0, byte_count
, iob_len ( iobuf
) );
1214 MLX_FILL_1 ( &wqe
->ud
.data
[0], 1, l_key
, hermon
->lkey
);
1215 MLX_FILL_1 ( &wqe
->ud
.data
[0], 3,
1216 local_address_l
, virt_to_bus ( iobuf
->data
) );
1217 return HERMON_OPCODE_SEND
;
1221 * Construct MLX send work queue entry
1223 * @v ibdev Infiniband device
1225 * @v av Address vector
1226 * @v iobuf I/O buffer
1227 * @v wqe Send work queue entry
1228 * @ret opcode Control opcode
1231 hermon_fill_mlx_send_wqe ( struct ib_device
*ibdev
,
1232 struct ib_queue_pair
*qp
,
1233 struct ib_address_vector
*av
,
1234 struct io_buffer
*iobuf
,
1235 union hermon_send_wqe
*wqe
) {
1236 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1237 struct io_buffer headers
;
1239 /* Construct IB headers */
1240 iob_populate ( &headers
, &wqe
->mlx
.headers
, 0,
1241 sizeof ( wqe
->mlx
.headers
) );
1242 iob_reserve ( &headers
, sizeof ( wqe
->mlx
.headers
) );
1243 ib_push ( ibdev
, &headers
, qp
, iob_len ( iobuf
), av
);
1245 /* Fill work queue entry */
1246 MLX_FILL_1 ( &wqe
->mlx
.ctrl
, 1, ds
,
1247 ( ( offsetof ( typeof ( wqe
->mlx
), data
[2] ) / 16 ) ) );
1248 MLX_FILL_5 ( &wqe
->mlx
.ctrl
, 2,
1249 c
, 0x03 /* generate completion */,
1250 icrc
, 0 /* generate ICRC */,
1251 max_statrate
, hermon_rate ( av
),
1253 v15
, ( ( qp
->ext_qpn
== IB_QPN_SMI
) ? 1 : 0 ) );
1254 MLX_FILL_1 ( &wqe
->mlx
.ctrl
, 3, rlid
, av
->lid
);
1255 MLX_FILL_1 ( &wqe
->mlx
.data
[0], 0,
1256 byte_count
, iob_len ( &headers
) );
1257 MLX_FILL_1 ( &wqe
->mlx
.data
[0], 1, l_key
, hermon
->lkey
);
1258 MLX_FILL_1 ( &wqe
->mlx
.data
[0], 3,
1259 local_address_l
, virt_to_bus ( headers
.data
) );
1260 MLX_FILL_1 ( &wqe
->mlx
.data
[1], 0,
1261 byte_count
, ( iob_len ( iobuf
) + 4 /* ICRC */ ) );
1262 MLX_FILL_1 ( &wqe
->mlx
.data
[1], 1, l_key
, hermon
->lkey
);
1263 MLX_FILL_1 ( &wqe
->mlx
.data
[1], 3,
1264 local_address_l
, virt_to_bus ( iobuf
->data
) );
1265 return HERMON_OPCODE_SEND
;
1269 * Construct RC send work queue entry
1271 * @v ibdev Infiniband device
1273 * @v av Address vector
1274 * @v iobuf I/O buffer
1275 * @v wqe Send work queue entry
1276 * @ret opcode Control opcode
1279 hermon_fill_rc_send_wqe ( struct ib_device
*ibdev
,
1280 struct ib_queue_pair
*qp __unused
,
1281 struct ib_address_vector
*av __unused
,
1282 struct io_buffer
*iobuf
,
1283 union hermon_send_wqe
*wqe
) {
1284 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1286 MLX_FILL_1 ( &wqe
->rc
.ctrl
, 1, ds
,
1287 ( ( offsetof ( typeof ( wqe
->rc
), data
[1] ) / 16 ) ) );
1288 MLX_FILL_1 ( &wqe
->rc
.ctrl
, 2, c
, 0x03 /* generate completion */ );
1289 MLX_FILL_1 ( &wqe
->rc
.data
[0], 0, byte_count
, iob_len ( iobuf
) );
1290 MLX_FILL_1 ( &wqe
->rc
.data
[0], 1, l_key
, hermon
->lkey
);
1291 MLX_FILL_1 ( &wqe
->rc
.data
[0], 3,
1292 local_address_l
, virt_to_bus ( iobuf
->data
) );
1293 return HERMON_OPCODE_SEND
;
1296 /** Work queue entry constructors */
1298 ( * hermon_fill_send_wqe
[] ) ( struct ib_device
*ibdev
,
1299 struct ib_queue_pair
*qp
,
1300 struct ib_address_vector
*av
,
1301 struct io_buffer
*iobuf
,
1302 union hermon_send_wqe
*wqe
) = {
1303 [IB_QPT_SMI
] = hermon_fill_mlx_send_wqe
,
1304 [IB_QPT_GSI
] = hermon_fill_mlx_send_wqe
,
1305 [IB_QPT_UD
] = hermon_fill_ud_send_wqe
,
1306 [IB_QPT_RC
] = hermon_fill_rc_send_wqe
,
1310 * Post send work queue entry
1312 * @v ibdev Infiniband device
1314 * @v av Address vector
1315 * @v iobuf I/O buffer
1316 * @ret rc Return status code
1318 static int hermon_post_send ( struct ib_device
*ibdev
,
1319 struct ib_queue_pair
*qp
,
1320 struct ib_address_vector
*av
,
1321 struct io_buffer
*iobuf
) {
1322 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1323 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1324 struct ib_work_queue
*wq
= &qp
->send
;
1325 struct hermon_send_work_queue
*hermon_send_wq
= &hermon_qp
->send
;
1326 union hermon_send_wqe
*wqe
;
1327 union hermonprm_doorbell_register db_reg
;
1328 unsigned int wqe_idx_mask
;
1329 unsigned int opcode
;
1331 /* Allocate work queue entry */
1332 wqe_idx_mask
= ( wq
->num_wqes
- 1 );
1333 if ( wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] ) {
1334 DBGC ( hermon
, "Hermon %p send queue full", hermon
);
1337 wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] = iobuf
;
1338 wqe
= &hermon_send_wq
->wqe
[ wq
->next_idx
&
1339 ( hermon_send_wq
->num_wqes
- 1 ) ];
1341 /* Construct work queue entry */
1342 memset ( ( ( ( void * ) wqe
) + 4 /* avoid ctrl.owner */ ), 0,
1343 ( sizeof ( *wqe
) - 4 ) );
1344 assert ( qp
->type
< ( sizeof ( hermon_fill_send_wqe
) /
1345 sizeof ( hermon_fill_send_wqe
[0] ) ) );
1346 assert ( hermon_fill_send_wqe
[qp
->type
] != NULL
);
1347 opcode
= hermon_fill_send_wqe
[qp
->type
] ( ibdev
, qp
, av
, iobuf
, wqe
);
1349 MLX_FILL_2 ( &wqe
->ctrl
, 0,
1352 ( ( wq
->next_idx
& hermon_send_wq
->num_wqes
) ? 1 : 0 ) );
1353 DBGCP ( hermon
, "Hermon %p posting send WQE:\n", hermon
);
1354 DBGCP_HD ( hermon
, wqe
, sizeof ( *wqe
) );
1357 /* Ring doorbell register */
1358 MLX_FILL_1 ( &db_reg
.send
, 0, qn
, qp
->qpn
);
1359 DBGCP ( hermon
, "Ringing doorbell %08lx with %08x\n",
1360 virt_to_phys ( hermon_send_wq
->doorbell
), db_reg
.dword
[0] );
1361 writel ( db_reg
.dword
[0], ( hermon_send_wq
->doorbell
) );
1363 /* Update work queue's index */
1370 * Post receive work queue entry
1372 * @v ibdev Infiniband device
1374 * @v iobuf I/O buffer
1375 * @ret rc Return status code
1377 static int hermon_post_recv ( struct ib_device
*ibdev
,
1378 struct ib_queue_pair
*qp
,
1379 struct io_buffer
*iobuf
) {
1380 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1381 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1382 struct ib_work_queue
*wq
= &qp
->recv
;
1383 struct hermon_recv_work_queue
*hermon_recv_wq
= &hermon_qp
->recv
;
1384 struct hermonprm_recv_wqe
*wqe
;
1385 unsigned int wqe_idx_mask
;
1387 /* Allocate work queue entry */
1388 wqe_idx_mask
= ( wq
->num_wqes
- 1 );
1389 if ( wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] ) {
1390 DBGC ( hermon
, "Hermon %p receive queue full", hermon
);
1393 wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] = iobuf
;
1394 wqe
= &hermon_recv_wq
->wqe
[wq
->next_idx
& wqe_idx_mask
].recv
;
1396 /* Construct work queue entry */
1397 MLX_FILL_1 ( &wqe
->data
[0], 0, byte_count
, iob_tailroom ( iobuf
) );
1398 MLX_FILL_1 ( &wqe
->data
[0], 1, l_key
, hermon
->lkey
);
1399 MLX_FILL_1 ( &wqe
->data
[0], 3,
1400 local_address_l
, virt_to_bus ( iobuf
->data
) );
1402 /* Update work queue's index */
1405 /* Update doorbell record */
1407 MLX_FILL_1 ( &hermon_recv_wq
->doorbell
, 0, receive_wqe_counter
,
1408 ( wq
->next_idx
& 0xffff ) );
1416 * @v ibdev Infiniband device
1417 * @v cq Completion queue
1418 * @v cqe Hardware completion queue entry
1419 * @ret rc Return status code
1421 static int hermon_complete ( struct ib_device
*ibdev
,
1422 struct ib_completion_queue
*cq
,
1423 union hermonprm_completion_entry
*cqe
) {
1424 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1425 struct ib_work_queue
*wq
;
1426 struct ib_queue_pair
*qp
;
1427 struct hermon_queue_pair
*hermon_qp
;
1428 struct io_buffer
*iobuf
;
1429 struct ib_address_vector recv_av
;
1430 struct ib_global_route_header
*grh
;
1431 struct ib_address_vector
*av
;
1432 unsigned int opcode
;
1435 unsigned int wqe_idx
;
1439 /* Parse completion */
1440 qpn
= MLX_GET ( &cqe
->normal
, qpn
);
1441 is_send
= MLX_GET ( &cqe
->normal
, s_r
);
1442 opcode
= MLX_GET ( &cqe
->normal
, opcode
);
1443 if ( opcode
>= HERMON_OPCODE_RECV_ERROR
) {
1444 /* "s" field is not valid for error opcodes */
1445 is_send
= ( opcode
== HERMON_OPCODE_SEND_ERROR
);
1446 DBGC ( hermon
, "Hermon %p CQN %lx syndrome %x vendor %x\n",
1447 hermon
, cq
->cqn
, MLX_GET ( &cqe
->error
, syndrome
),
1448 MLX_GET ( &cqe
->error
, vendor_error_syndrome
) );
1450 /* Don't return immediately; propagate error to completer */
1453 /* Identify work queue */
1454 wq
= ib_find_wq ( cq
, qpn
, is_send
);
1456 DBGC ( hermon
, "Hermon %p CQN %lx unknown %s QPN %lx\n",
1457 hermon
, cq
->cqn
, ( is_send
? "send" : "recv" ), qpn
);
1461 hermon_qp
= ib_qp_get_drvdata ( qp
);
1463 /* Identify I/O buffer */
1464 wqe_idx
= ( MLX_GET ( &cqe
->normal
, wqe_counter
) &
1465 ( wq
->num_wqes
- 1 ) );
1466 iobuf
= wq
->iobufs
[wqe_idx
];
1468 DBGC ( hermon
, "Hermon %p CQN %lx QPN %lx empty WQE %x\n",
1469 hermon
, cq
->cqn
, qp
->qpn
, wqe_idx
);
1472 wq
->iobufs
[wqe_idx
] = NULL
;
1475 /* Hand off to completion handler */
1476 ib_complete_send ( ibdev
, qp
, iobuf
, rc
);
1478 /* Set received length */
1479 len
= MLX_GET ( &cqe
->normal
, byte_cnt
);
1480 assert ( len
<= iob_tailroom ( iobuf
) );
1481 iob_put ( iobuf
, len
);
1482 switch ( qp
->type
) {
1486 assert ( iob_len ( iobuf
) >= sizeof ( *grh
) );
1488 iob_pull ( iobuf
, sizeof ( *grh
) );
1489 /* Construct address vector */
1491 memset ( av
, 0, sizeof ( *av
) );
1492 av
->qpn
= MLX_GET ( &cqe
->normal
, srq_rqpn
);
1493 av
->lid
= MLX_GET ( &cqe
->normal
, slid_smac47_32
);
1494 av
->sl
= MLX_GET ( &cqe
->normal
, sl
);
1495 av
->gid_present
= MLX_GET ( &cqe
->normal
, g
);
1496 memcpy ( &av
->gid
, &grh
->sgid
, sizeof ( av
->gid
) );
1505 /* Hand off to completion handler */
1506 ib_complete_recv ( ibdev
, qp
, av
, iobuf
, rc
);
1513 * Poll completion queue
1515 * @v ibdev Infiniband device
1516 * @v cq Completion queue
1518 static void hermon_poll_cq ( struct ib_device
*ibdev
,
1519 struct ib_completion_queue
*cq
) {
1520 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1521 struct hermon_completion_queue
*hermon_cq
= ib_cq_get_drvdata ( cq
);
1522 union hermonprm_completion_entry
*cqe
;
1523 unsigned int cqe_idx_mask
;
1527 /* Look for completion entry */
1528 cqe_idx_mask
= ( cq
->num_cqes
- 1 );
1529 cqe
= &hermon_cq
->cqe
[cq
->next_idx
& cqe_idx_mask
];
1530 if ( MLX_GET ( &cqe
->normal
, owner
) ^
1531 ( ( cq
->next_idx
& cq
->num_cqes
) ? 1 : 0 ) ) {
1532 /* Entry still owned by hardware; end of poll */
1535 DBGCP ( hermon
, "Hermon %p completion:\n", hermon
);
1536 DBGCP_HD ( hermon
, cqe
, sizeof ( *cqe
) );
1538 /* Handle completion */
1539 if ( ( rc
= hermon_complete ( ibdev
, cq
, cqe
) ) != 0 ) {
1540 DBGC ( hermon
, "Hermon %p failed to complete: %s\n",
1541 hermon
, strerror ( rc
) );
1542 DBGC_HD ( hermon
, cqe
, sizeof ( *cqe
) );
1545 /* Update completion queue's index */
1548 /* Update doorbell record */
1549 MLX_FILL_1 ( &hermon_cq
->doorbell
, 0, update_ci
,
1550 ( cq
->next_idx
& 0x00ffffffUL
) );
1554 /***************************************************************************
1558 ***************************************************************************
1562 * Create event queue
1564 * @v hermon Hermon device
1565 * @ret rc Return status code
1567 static int hermon_create_eq ( struct hermon
*hermon
) {
1568 struct hermon_event_queue
*hermon_eq
= &hermon
->eq
;
1569 struct hermonprm_eqc eqctx
;
1570 struct hermonprm_event_mask mask
;
1574 /* Select event queue number */
1575 hermon_eq
->eqn
= ( 4 * hermon
->cap
.reserved_uars
);
1576 if ( hermon_eq
->eqn
< hermon
->cap
.reserved_eqs
)
1577 hermon_eq
->eqn
= hermon
->cap
.reserved_eqs
;
1579 /* Calculate doorbell address */
1580 hermon_eq
->doorbell
=
1581 ( hermon
->uar
+ HERMON_DB_EQ_OFFSET ( hermon_eq
->eqn
) );
1583 /* Allocate event queue itself */
1584 hermon_eq
->eqe_size
=
1585 ( HERMON_NUM_EQES
* sizeof ( hermon_eq
->eqe
[0] ) );
1586 hermon_eq
->eqe
= malloc_dma ( hermon_eq
->eqe_size
,
1587 sizeof ( hermon_eq
->eqe
[0] ) );
1588 if ( ! hermon_eq
->eqe
) {
1592 memset ( hermon_eq
->eqe
, 0, hermon_eq
->eqe_size
);
1593 for ( i
= 0 ; i
< HERMON_NUM_EQES
; i
++ ) {
1594 MLX_FILL_1 ( &hermon_eq
->eqe
[i
].generic
, 7, owner
, 1 );
1598 /* Allocate MTT entries */
1599 if ( ( rc
= hermon_alloc_mtt ( hermon
, hermon_eq
->eqe
,
1600 hermon_eq
->eqe_size
,
1601 &hermon_eq
->mtt
) ) != 0 )
1604 /* Hand queue over to hardware */
1605 memset ( &eqctx
, 0, sizeof ( eqctx
) );
1606 MLX_FILL_1 ( &eqctx
, 0, st
, 0xa /* "Fired" */ );
1607 MLX_FILL_1 ( &eqctx
, 2,
1608 page_offset
, ( hermon_eq
->mtt
.page_offset
>> 5 ) );
1609 MLX_FILL_1 ( &eqctx
, 3, log_eq_size
, fls ( HERMON_NUM_EQES
- 1 ) );
1610 MLX_FILL_1 ( &eqctx
, 7, mtt_base_addr_l
,
1611 ( hermon_eq
->mtt
.mtt_base_addr
>> 3 ) );
1612 if ( ( rc
= hermon_cmd_sw2hw_eq ( hermon
, hermon_eq
->eqn
,
1614 DBGC ( hermon
, "Hermon %p SW2HW_EQ failed: %s\n",
1615 hermon
, strerror ( rc
) );
1619 /* Map events to this event queue */
1620 memset ( &mask
, 0, sizeof ( mask
) );
1621 MLX_FILL_1 ( &mask
, 1, port_state_change
, 1 );
1622 if ( ( rc
= hermon_cmd_map_eq ( hermon
,
1623 ( HERMON_MAP_EQ
| hermon_eq
->eqn
),
1625 DBGC ( hermon
, "Hermon %p MAP_EQ failed: %s\n",
1626 hermon
, strerror ( rc
) );
1630 DBGC ( hermon
, "Hermon %p EQN %#lx ring at [%p,%p])\n",
1631 hermon
, hermon_eq
->eqn
, hermon_eq
->eqe
,
1632 ( ( ( void * ) hermon_eq
->eqe
) + hermon_eq
->eqe_size
) );
1636 hermon_cmd_hw2sw_eq ( hermon
, hermon_eq
->eqn
, &eqctx
);
1638 hermon_free_mtt ( hermon
, &hermon_eq
->mtt
);
1640 free_dma ( hermon_eq
->eqe
, hermon_eq
->eqe_size
);
1642 memset ( hermon_eq
, 0, sizeof ( *hermon_eq
) );
1647 * Destroy event queue
1649 * @v hermon Hermon device
1651 static void hermon_destroy_eq ( struct hermon
*hermon
) {
1652 struct hermon_event_queue
*hermon_eq
= &hermon
->eq
;
1653 struct hermonprm_eqc eqctx
;
1654 struct hermonprm_event_mask mask
;
1657 /* Unmap events from event queue */
1658 memset ( &mask
, 0, sizeof ( mask
) );
1659 MLX_FILL_1 ( &mask
, 1, port_state_change
, 1 );
1660 if ( ( rc
= hermon_cmd_map_eq ( hermon
,
1661 ( HERMON_UNMAP_EQ
| hermon_eq
->eqn
),
1663 DBGC ( hermon
, "Hermon %p FATAL MAP_EQ failed to unmap: %s\n",
1664 hermon
, strerror ( rc
) );
1665 /* Continue; HCA may die but system should survive */
1668 /* Take ownership back from hardware */
1669 if ( ( rc
= hermon_cmd_hw2sw_eq ( hermon
, hermon_eq
->eqn
,
1671 DBGC ( hermon
, "Hermon %p FATAL HW2SW_EQ failed: %s\n",
1672 hermon
, strerror ( rc
) );
1673 /* Leak memory and return; at least we avoid corruption */
1677 /* Free MTT entries */
1678 hermon_free_mtt ( hermon
, &hermon_eq
->mtt
);
1681 free_dma ( hermon_eq
->eqe
, hermon_eq
->eqe_size
);
1682 memset ( hermon_eq
, 0, sizeof ( *hermon_eq
) );
1686 * Handle port state event
1688 * @v hermon Hermon device
1689 * @v eqe Port state change event queue entry
1691 static void hermon_event_port_state_change ( struct hermon
*hermon
,
1692 union hermonprm_event_entry
*eqe
){
1696 /* Get port and link status */
1697 port
= ( MLX_GET ( &eqe
->port_state_change
, data
.p
) - 1 );
1698 link_up
= ( MLX_GET ( &eqe
->generic
, event_sub_type
) & 0x04 );
1699 DBGC ( hermon
, "Hermon %p port %d link %s\n", hermon
, ( port
+ 1 ),
1700 ( link_up
? "up" : "down" ) );
1703 if ( port
>= hermon
->cap
.num_ports
) {
1704 DBGC ( hermon
, "Hermon %p port %d does not exist!\n",
1705 hermon
, ( port
+ 1 ) );
1709 /* Update MAD parameters */
1710 ib_smc_update ( hermon
->ibdev
[port
], hermon_mad
);
1712 /* Notify Infiniband core of link state change */
1713 ib_link_state_changed ( hermon
->ibdev
[port
] );
1719 * @v ibdev Infiniband device
1721 static void hermon_poll_eq ( struct ib_device
*ibdev
) {
1722 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1723 struct hermon_event_queue
*hermon_eq
= &hermon
->eq
;
1724 union hermonprm_event_entry
*eqe
;
1725 union hermonprm_doorbell_register db_reg
;
1726 unsigned int eqe_idx_mask
;
1727 unsigned int event_type
;
1730 /* Look for event entry */
1731 eqe_idx_mask
= ( HERMON_NUM_EQES
- 1 );
1732 eqe
= &hermon_eq
->eqe
[hermon_eq
->next_idx
& eqe_idx_mask
];
1733 if ( MLX_GET ( &eqe
->generic
, owner
) ^
1734 ( ( hermon_eq
->next_idx
& HERMON_NUM_EQES
) ? 1 : 0 ) ) {
1735 /* Entry still owned by hardware; end of poll */
1738 DBGCP ( hermon
, "Hermon %p event:\n", hermon
);
1739 DBGCP_HD ( hermon
, eqe
, sizeof ( *eqe
) );
1742 event_type
= MLX_GET ( &eqe
->generic
, event_type
);
1743 switch ( event_type
) {
1744 case HERMON_EV_PORT_STATE_CHANGE
:
1745 hermon_event_port_state_change ( hermon
, eqe
);
1748 DBGC ( hermon
, "Hermon %p unrecognised event type "
1749 "%#x:\n", hermon
, event_type
);
1750 DBGC_HD ( hermon
, eqe
, sizeof ( *eqe
) );
1754 /* Update event queue's index */
1755 hermon_eq
->next_idx
++;
1758 MLX_FILL_1 ( &db_reg
.event
, 0,
1759 ci
, ( hermon_eq
->next_idx
& 0x00ffffffUL
) );
1760 DBGCP ( hermon
, "Ringing doorbell %08lx with %08x\n",
1761 virt_to_phys ( hermon_eq
->doorbell
),
1763 writel ( db_reg
.dword
[0], hermon_eq
->doorbell
);
1767 /***************************************************************************
1769 * Infiniband link-layer operations
1771 ***************************************************************************
1777 * @v ibdev Infiniband device
1778 * @ret port_type Port type, or negative error
1780 static int hermon_sense_port_type ( struct ib_device
*ibdev
) {
1781 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1782 struct hermonprm_sense_port sense_port
;
1786 /* If DPDP is not supported, always assume Infiniband */
1787 if ( ! hermon
->cap
.dpdp
)
1788 return HERMON_PORT_TYPE_IB
;
1790 /* Sense the port type */
1791 if ( ( rc
= hermon_cmd_sense_port ( hermon
, ibdev
->port
,
1792 &sense_port
) ) != 0 ) {
1793 DBGC ( hermon
, "Hermon %p port %d sense failed: %s\n",
1794 hermon
, ibdev
->port
, strerror ( rc
) );
1797 port_type
= MLX_GET ( &sense_port
, port_type
);
1799 DBGC ( hermon
, "Hermon %p port %d type %d\n",
1800 hermon
, ibdev
->port
, port_type
);
1805 * Initialise Infiniband link
1807 * @v ibdev Infiniband device
1808 * @ret rc Return status code
1810 static int hermon_open ( struct ib_device
*ibdev
) {
1811 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1812 struct hermonprm_init_port init_port
;
1816 /* Check we are connected to an Infiniband network */
1817 if ( ( rc
= port_type
= hermon_sense_port_type ( ibdev
) ) < 0 )
1819 if ( port_type
!= HERMON_PORT_TYPE_IB
) {
1820 DBGC ( hermon
, "Hermon %p port %d not connected to an "
1821 "Infiniband network", hermon
, ibdev
->port
);
1826 memset ( &init_port
, 0, sizeof ( init_port
) );
1827 MLX_FILL_2 ( &init_port
, 0,
1830 MLX_FILL_2 ( &init_port
, 1,
1831 mtu
, HERMON_MTU_2048
,
1833 MLX_FILL_1 ( &init_port
, 2, max_pkey
, 64 );
1834 if ( ( rc
= hermon_cmd_init_port ( hermon
, ibdev
->port
,
1835 &init_port
) ) != 0 ) {
1836 DBGC ( hermon
, "Hermon %p could not intialise port: %s\n",
1837 hermon
, strerror ( rc
) );
1841 /* Update MAD parameters */
1842 ib_smc_update ( ibdev
, hermon_mad
);
1848 * Close Infiniband link
1850 * @v ibdev Infiniband device
1852 static void hermon_close ( struct ib_device
*ibdev
) {
1853 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1856 if ( ( rc
= hermon_cmd_close_port ( hermon
, ibdev
->port
) ) != 0 ) {
1857 DBGC ( hermon
, "Hermon %p could not close port: %s\n",
1858 hermon
, strerror ( rc
) );
1859 /* Nothing we can do about this */
1864 * Inform embedded subnet management agent of a received MAD
1866 * @v ibdev Infiniband device
1868 * @ret rc Return status code
1870 static int hermon_inform_sma ( struct ib_device
*ibdev
,
1871 union ib_mad
*mad
) {
1874 /* Send the MAD to the embedded SMA */
1875 if ( ( rc
= hermon_mad ( ibdev
, mad
) ) != 0 )
1878 /* Update parameters held in software */
1879 ib_smc_update ( ibdev
, hermon_mad
);
1884 /***************************************************************************
1886 * Multicast group operations
1888 ***************************************************************************
1892 * Attach to multicast group
1894 * @v ibdev Infiniband device
1896 * @v gid Multicast GID
1897 * @ret rc Return status code
1899 static int hermon_mcast_attach ( struct ib_device
*ibdev
,
1900 struct ib_queue_pair
*qp
,
1901 struct ib_gid
*gid
) {
1902 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1903 struct hermonprm_mgm_hash hash
;
1904 struct hermonprm_mcg_entry mcg
;
1908 /* Generate hash table index */
1909 if ( ( rc
= hermon_cmd_mgid_hash ( hermon
, gid
, &hash
) ) != 0 ) {
1910 DBGC ( hermon
, "Hermon %p could not hash GID: %s\n",
1911 hermon
, strerror ( rc
) );
1914 index
= MLX_GET ( &hash
, hash
);
1916 /* Check for existing hash table entry */
1917 if ( ( rc
= hermon_cmd_read_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
1918 DBGC ( hermon
, "Hermon %p could not read MCG %#x: %s\n",
1919 hermon
, index
, strerror ( rc
) );
1922 if ( MLX_GET ( &mcg
, hdr
.members_count
) != 0 ) {
1923 /* FIXME: this implementation allows only a single QP
1924 * per multicast group, and doesn't handle hash
1925 * collisions. Sufficient for IPoIB but may need to
1926 * be extended in future.
1928 DBGC ( hermon
, "Hermon %p MGID index %#x already in use\n",
1933 /* Update hash table entry */
1934 MLX_FILL_1 ( &mcg
, 1, hdr
.members_count
, 1 );
1935 MLX_FILL_1 ( &mcg
, 8, qp
[0].qpn
, qp
->qpn
);
1936 memcpy ( &mcg
.u
.dwords
[4], gid
, sizeof ( *gid
) );
1937 if ( ( rc
= hermon_cmd_write_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
1938 DBGC ( hermon
, "Hermon %p could not write MCG %#x: %s\n",
1939 hermon
, index
, strerror ( rc
) );
1947 * Detach from multicast group
1949 * @v ibdev Infiniband device
1951 * @v gid Multicast GID
1953 static void hermon_mcast_detach ( struct ib_device
*ibdev
,
1954 struct ib_queue_pair
*qp __unused
,
1955 struct ib_gid
*gid
) {
1956 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1957 struct hermonprm_mgm_hash hash
;
1958 struct hermonprm_mcg_entry mcg
;
1962 /* Generate hash table index */
1963 if ( ( rc
= hermon_cmd_mgid_hash ( hermon
, gid
, &hash
) ) != 0 ) {
1964 DBGC ( hermon
, "Hermon %p could not hash GID: %s\n",
1965 hermon
, strerror ( rc
) );
1968 index
= MLX_GET ( &hash
, hash
);
1970 /* Clear hash table entry */
1971 memset ( &mcg
, 0, sizeof ( mcg
) );
1972 if ( ( rc
= hermon_cmd_write_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
1973 DBGC ( hermon
, "Hermon %p could not write MCG %#x: %s\n",
1974 hermon
, index
, strerror ( rc
) );
1979 /** Hermon Infiniband operations */
1980 static struct ib_device_operations hermon_ib_operations
= {
1981 .create_cq
= hermon_create_cq
,
1982 .destroy_cq
= hermon_destroy_cq
,
1983 .create_qp
= hermon_create_qp
,
1984 .modify_qp
= hermon_modify_qp
,
1985 .destroy_qp
= hermon_destroy_qp
,
1986 .post_send
= hermon_post_send
,
1987 .post_recv
= hermon_post_recv
,
1988 .poll_cq
= hermon_poll_cq
,
1989 .poll_eq
= hermon_poll_eq
,
1990 .open
= hermon_open
,
1991 .close
= hermon_close
,
1992 .mcast_attach
= hermon_mcast_attach
,
1993 .mcast_detach
= hermon_mcast_detach
,
1994 .set_port_info
= hermon_inform_sma
,
1995 .set_pkey_table
= hermon_inform_sma
,
1998 /***************************************************************************
2002 ***************************************************************************
2006 * Map virtual to physical address for firmware usage
2008 * @v hermon Hermon device
2009 * @v map Mapping function
2010 * @v va Virtual address
2011 * @v pa Physical address
2012 * @v len Length of region
2013 * @ret rc Return status code
2015 static int hermon_map_vpm ( struct hermon
*hermon
,
2016 int ( *map
) ( struct hermon
*hermon
,
2017 const struct hermonprm_virtual_physical_mapping
* ),
2018 uint64_t va
, physaddr_t pa
, size_t len
) {
2019 struct hermonprm_virtual_physical_mapping mapping
;
2022 assert ( ( va
& ( HERMON_PAGE_SIZE
- 1 ) ) == 0 );
2023 assert ( ( pa
& ( HERMON_PAGE_SIZE
- 1 ) ) == 0 );
2024 assert ( ( len
& ( HERMON_PAGE_SIZE
- 1 ) ) == 0 );
2026 /* These mappings tend to generate huge volumes of
2027 * uninteresting debug data, which basically makes it
2028 * impossible to use debugging otherwise.
2030 DBG_DISABLE ( DBGLVL_LOG
| DBGLVL_EXTRA
);
2033 memset ( &mapping
, 0, sizeof ( mapping
) );
2034 MLX_FILL_1 ( &mapping
, 0, va_h
, ( va
>> 32 ) );
2035 MLX_FILL_1 ( &mapping
, 1, va_l
, ( va
>> 12 ) );
2036 MLX_FILL_2 ( &mapping
, 3,
2038 pa_l
, ( pa
>> 12 ) );
2039 if ( ( rc
= map ( hermon
, &mapping
) ) != 0 ) {
2040 DBG_ENABLE ( DBGLVL_LOG
| DBGLVL_EXTRA
);
2041 DBGC ( hermon
, "Hermon %p could not map %llx => %lx: "
2042 "%s\n", hermon
, va
, pa
, strerror ( rc
) );
2045 pa
+= HERMON_PAGE_SIZE
;
2046 va
+= HERMON_PAGE_SIZE
;
2047 len
-= HERMON_PAGE_SIZE
;
2050 DBG_ENABLE ( DBGLVL_LOG
| DBGLVL_EXTRA
);
2055 * Start firmware running
2057 * @v hermon Hermon device
2058 * @ret rc Return status code
2060 static int hermon_start_firmware ( struct hermon
*hermon
) {
2061 struct hermonprm_query_fw fw
;
2062 unsigned int fw_pages
;
2067 /* Get firmware parameters */
2068 if ( ( rc
= hermon_cmd_query_fw ( hermon
, &fw
) ) != 0 ) {
2069 DBGC ( hermon
, "Hermon %p could not query firmware: %s\n",
2070 hermon
, strerror ( rc
) );
2073 DBGC ( hermon
, "Hermon %p firmware version %d.%d.%d\n", hermon
,
2074 MLX_GET ( &fw
, fw_rev_major
), MLX_GET ( &fw
, fw_rev_minor
),
2075 MLX_GET ( &fw
, fw_rev_subminor
) );
2076 fw_pages
= MLX_GET ( &fw
, fw_pages
);
2077 DBGC ( hermon
, "Hermon %p requires %d pages (%d kB) for firmware\n",
2078 hermon
, fw_pages
, ( fw_pages
* ( HERMON_PAGE_SIZE
/ 1024 ) ) );
2080 /* Allocate firmware pages and map firmware area */
2081 fw_size
= ( fw_pages
* HERMON_PAGE_SIZE
);
2082 hermon
->firmware_area
= umalloc ( fw_size
);
2083 if ( ! hermon
->firmware_area
) {
2087 fw_base
= user_to_phys ( hermon
->firmware_area
, 0 );
2088 DBGC ( hermon
, "Hermon %p firmware area at physical [%lx,%lx)\n",
2089 hermon
, fw_base
, ( fw_base
+ fw_size
) );
2090 if ( ( rc
= hermon_map_vpm ( hermon
, hermon_cmd_map_fa
,
2091 0, fw_base
, fw_size
) ) != 0 ) {
2092 DBGC ( hermon
, "Hermon %p could not map firmware: %s\n",
2093 hermon
, strerror ( rc
) );
2097 /* Start firmware */
2098 if ( ( rc
= hermon_cmd_run_fw ( hermon
) ) != 0 ) {
2099 DBGC ( hermon
, "Hermon %p could not run firmware: %s\n",
2100 hermon
, strerror ( rc
) );
2104 DBGC ( hermon
, "Hermon %p firmware started\n", hermon
);
2109 hermon_cmd_unmap_fa ( hermon
);
2110 ufree ( hermon
->firmware_area
);
2111 hermon
->firmware_area
= UNULL
;
2118 * Stop firmware running
2120 * @v hermon Hermon device
2122 static void hermon_stop_firmware ( struct hermon
*hermon
) {
2125 if ( ( rc
= hermon_cmd_unmap_fa ( hermon
) ) != 0 ) {
2126 DBGC ( hermon
, "Hermon %p FATAL could not stop firmware: %s\n",
2127 hermon
, strerror ( rc
) );
2128 /* Leak memory and return; at least we avoid corruption */
2131 ufree ( hermon
->firmware_area
);
2132 hermon
->firmware_area
= UNULL
;
2135 /***************************************************************************
2137 * Infinihost Context Memory management
2139 ***************************************************************************
2145 * @v hermon Hermon device
2146 * @ret rc Return status code
2148 static int hermon_get_cap ( struct hermon
*hermon
) {
2149 struct hermonprm_query_dev_cap dev_cap
;
2152 if ( ( rc
= hermon_cmd_query_dev_cap ( hermon
, &dev_cap
) ) != 0 ) {
2153 DBGC ( hermon
, "Hermon %p could not get device limits: %s\n",
2154 hermon
, strerror ( rc
) );
2158 hermon
->cap
.cmpt_entry_size
= MLX_GET ( &dev_cap
, c_mpt_entry_sz
);
2159 hermon
->cap
.reserved_qps
=
2160 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_qps
) );
2161 hermon
->cap
.qpc_entry_size
= MLX_GET ( &dev_cap
, qpc_entry_sz
);
2162 hermon
->cap
.altc_entry_size
= MLX_GET ( &dev_cap
, altc_entry_sz
);
2163 hermon
->cap
.auxc_entry_size
= MLX_GET ( &dev_cap
, aux_entry_sz
);
2164 hermon
->cap
.reserved_srqs
=
2165 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_srqs
) );
2166 hermon
->cap
.srqc_entry_size
= MLX_GET ( &dev_cap
, srq_entry_sz
);
2167 hermon
->cap
.reserved_cqs
=
2168 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_cqs
) );
2169 hermon
->cap
.cqc_entry_size
= MLX_GET ( &dev_cap
, cqc_entry_sz
);
2170 hermon
->cap
.reserved_eqs
= MLX_GET ( &dev_cap
, num_rsvd_eqs
);
2171 hermon
->cap
.eqc_entry_size
= MLX_GET ( &dev_cap
, eqc_entry_sz
);
2172 hermon
->cap
.reserved_mtts
=
2173 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_mtts
) );
2174 hermon
->cap
.mtt_entry_size
= MLX_GET ( &dev_cap
, mtt_entry_sz
);
2175 hermon
->cap
.reserved_mrws
=
2176 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_mrws
) );
2177 hermon
->cap
.dmpt_entry_size
= MLX_GET ( &dev_cap
, d_mpt_entry_sz
);
2178 hermon
->cap
.reserved_uars
= MLX_GET ( &dev_cap
, num_rsvd_uars
);
2179 hermon
->cap
.num_ports
= MLX_GET ( &dev_cap
, num_ports
);
2180 hermon
->cap
.dpdp
= MLX_GET ( &dev_cap
, dpdp
);
2183 if ( hermon
->cap
.num_ports
> HERMON_MAX_PORTS
) {
2184 DBGC ( hermon
, "Hermon %p has %d ports (only %d supported)\n",
2185 hermon
, hermon
->cap
.num_ports
, HERMON_MAX_PORTS
);
2186 hermon
->cap
.num_ports
= HERMON_MAX_PORTS
;
2195 * @v log_num_entries Log2 of the number of entries
2196 * @v entry_size Entry size
2197 * @ret usage Usage size in ICM
2199 static size_t icm_usage ( unsigned int log_num_entries
, size_t entry_size
) {
2202 usage
= ( ( 1 << log_num_entries
) * entry_size
);
2203 usage
= ( ( usage
+ HERMON_PAGE_SIZE
- 1 ) &
2204 ~( HERMON_PAGE_SIZE
- 1 ) );
2211 * @v hermon Hermon device
2212 * @v init_hca INIT_HCA structure to fill in
2213 * @ret rc Return status code
2215 static int hermon_alloc_icm ( struct hermon
*hermon
,
2216 struct hermonprm_init_hca
*init_hca
) {
2217 struct hermonprm_scalar_parameter icm_size
;
2218 struct hermonprm_scalar_parameter icm_aux_size
;
2219 uint64_t icm_offset
= 0;
2220 unsigned int log_num_qps
, log_num_srqs
, log_num_cqs
, log_num_eqs
;
2221 unsigned int log_num_mtts
, log_num_mpts
;
2222 size_t cmpt_max_len
;
2223 size_t qp_cmpt_len
, srq_cmpt_len
, cq_cmpt_len
, eq_cmpt_len
;
2224 size_t icm_len
, icm_aux_len
;
2225 physaddr_t icm_phys
;
2230 * Start by carving up the ICM virtual address space
2234 /* Calculate number of each object type within ICM */
2235 log_num_qps
= fls ( hermon
->cap
.reserved_qps
+
2236 HERMON_RSVD_SPECIAL_QPS
+ HERMON_MAX_QPS
- 1 );
2237 log_num_srqs
= fls ( hermon
->cap
.reserved_srqs
- 1 );
2238 log_num_cqs
= fls ( hermon
->cap
.reserved_cqs
+ HERMON_MAX_CQS
- 1 );
2239 log_num_eqs
= fls ( hermon
->cap
.reserved_eqs
+ HERMON_MAX_EQS
- 1 );
2240 log_num_mtts
= fls ( hermon
->cap
.reserved_mtts
+ HERMON_MAX_MTTS
- 1 );
2242 /* ICM starts with the cMPT tables, which are sparse */
2243 cmpt_max_len
= ( HERMON_CMPT_MAX_ENTRIES
*
2244 ( ( uint64_t ) hermon
->cap
.cmpt_entry_size
) );
2245 qp_cmpt_len
= icm_usage ( log_num_qps
, hermon
->cap
.cmpt_entry_size
);
2246 hermon
->icm_map
[HERMON_ICM_QP_CMPT
].offset
= icm_offset
;
2247 hermon
->icm_map
[HERMON_ICM_QP_CMPT
].len
= qp_cmpt_len
;
2248 icm_offset
+= cmpt_max_len
;
2249 srq_cmpt_len
= icm_usage ( log_num_srqs
, hermon
->cap
.cmpt_entry_size
);
2250 hermon
->icm_map
[HERMON_ICM_SRQ_CMPT
].offset
= icm_offset
;
2251 hermon
->icm_map
[HERMON_ICM_SRQ_CMPT
].len
= srq_cmpt_len
;
2252 icm_offset
+= cmpt_max_len
;
2253 cq_cmpt_len
= icm_usage ( log_num_cqs
, hermon
->cap
.cmpt_entry_size
);
2254 hermon
->icm_map
[HERMON_ICM_CQ_CMPT
].offset
= icm_offset
;
2255 hermon
->icm_map
[HERMON_ICM_CQ_CMPT
].len
= cq_cmpt_len
;
2256 icm_offset
+= cmpt_max_len
;
2257 eq_cmpt_len
= icm_usage ( log_num_eqs
, hermon
->cap
.cmpt_entry_size
);
2258 hermon
->icm_map
[HERMON_ICM_EQ_CMPT
].offset
= icm_offset
;
2259 hermon
->icm_map
[HERMON_ICM_EQ_CMPT
].len
= eq_cmpt_len
;
2260 icm_offset
+= cmpt_max_len
;
2262 hermon
->icm_map
[HERMON_ICM_OTHER
].offset
= icm_offset
;
2264 /* Queue pair contexts */
2265 MLX_FILL_1 ( init_hca
, 12,
2266 qpc_eec_cqc_eqc_rdb_parameters
.qpc_base_addr_h
,
2267 ( icm_offset
>> 32 ) );
2268 MLX_FILL_2 ( init_hca
, 13,
2269 qpc_eec_cqc_eqc_rdb_parameters
.qpc_base_addr_l
,
2270 ( icm_offset
>> 5 ),
2271 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_qp
,
2273 DBGC ( hermon
, "Hermon %p ICM QPC base = %llx\n", hermon
, icm_offset
);
2274 icm_offset
+= icm_usage ( log_num_qps
, hermon
->cap
.qpc_entry_size
);
2276 /* Extended alternate path contexts */
2277 MLX_FILL_1 ( init_hca
, 24,
2278 qpc_eec_cqc_eqc_rdb_parameters
.altc_base_addr_h
,
2279 ( icm_offset
>> 32 ) );
2280 MLX_FILL_1 ( init_hca
, 25,
2281 qpc_eec_cqc_eqc_rdb_parameters
.altc_base_addr_l
,
2283 DBGC ( hermon
, "Hermon %p ICM ALTC base = %llx\n", hermon
, icm_offset
);
2284 icm_offset
+= icm_usage ( log_num_qps
,
2285 hermon
->cap
.altc_entry_size
);
2287 /* Extended auxiliary contexts */
2288 MLX_FILL_1 ( init_hca
, 28,
2289 qpc_eec_cqc_eqc_rdb_parameters
.auxc_base_addr_h
,
2290 ( icm_offset
>> 32 ) );
2291 MLX_FILL_1 ( init_hca
, 29,
2292 qpc_eec_cqc_eqc_rdb_parameters
.auxc_base_addr_l
,
2294 DBGC ( hermon
, "Hermon %p ICM AUXC base = %llx\n", hermon
, icm_offset
);
2295 icm_offset
+= icm_usage ( log_num_qps
,
2296 hermon
->cap
.auxc_entry_size
);
2298 /* Shared receive queue contexts */
2299 MLX_FILL_1 ( init_hca
, 18,
2300 qpc_eec_cqc_eqc_rdb_parameters
.srqc_base_addr_h
,
2301 ( icm_offset
>> 32 ) );
2302 MLX_FILL_2 ( init_hca
, 19,
2303 qpc_eec_cqc_eqc_rdb_parameters
.srqc_base_addr_l
,
2304 ( icm_offset
>> 5 ),
2305 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_srq
,
2307 DBGC ( hermon
, "Hermon %p ICM SRQC base = %llx\n", hermon
, icm_offset
);
2308 icm_offset
+= icm_usage ( log_num_srqs
,
2309 hermon
->cap
.srqc_entry_size
);
2311 /* Completion queue contexts */
2312 MLX_FILL_1 ( init_hca
, 20,
2313 qpc_eec_cqc_eqc_rdb_parameters
.cqc_base_addr_h
,
2314 ( icm_offset
>> 32 ) );
2315 MLX_FILL_2 ( init_hca
, 21,
2316 qpc_eec_cqc_eqc_rdb_parameters
.cqc_base_addr_l
,
2317 ( icm_offset
>> 5 ),
2318 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_cq
,
2320 DBGC ( hermon
, "Hermon %p ICM CQC base = %llx\n", hermon
, icm_offset
);
2321 icm_offset
+= icm_usage ( log_num_cqs
, hermon
->cap
.cqc_entry_size
);
2323 /* Event queue contexts */
2324 MLX_FILL_1 ( init_hca
, 32,
2325 qpc_eec_cqc_eqc_rdb_parameters
.eqc_base_addr_h
,
2326 ( icm_offset
>> 32 ) );
2327 MLX_FILL_2 ( init_hca
, 33,
2328 qpc_eec_cqc_eqc_rdb_parameters
.eqc_base_addr_l
,
2329 ( icm_offset
>> 5 ),
2330 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_eq
,
2332 DBGC ( hermon
, "Hermon %p ICM EQC base = %llx\n", hermon
, icm_offset
);
2333 icm_offset
+= icm_usage ( log_num_eqs
, hermon
->cap
.eqc_entry_size
);
2335 /* Memory translation table */
2336 MLX_FILL_1 ( init_hca
, 64,
2337 tpt_parameters
.mtt_base_addr_h
, ( icm_offset
>> 32 ) );
2338 MLX_FILL_1 ( init_hca
, 65,
2339 tpt_parameters
.mtt_base_addr_l
, icm_offset
);
2340 DBGC ( hermon
, "Hermon %p ICM MTT base = %llx\n", hermon
, icm_offset
);
2341 icm_offset
+= icm_usage ( log_num_mtts
,
2342 hermon
->cap
.mtt_entry_size
);
2344 /* Memory protection table */
2345 log_num_mpts
= fls ( hermon
->cap
.reserved_mrws
+ 1 - 1 );
2346 MLX_FILL_1 ( init_hca
, 60,
2347 tpt_parameters
.dmpt_base_adr_h
, ( icm_offset
>> 32 ) );
2348 MLX_FILL_1 ( init_hca
, 61,
2349 tpt_parameters
.dmpt_base_adr_l
, icm_offset
);
2350 MLX_FILL_1 ( init_hca
, 62,
2351 tpt_parameters
.log_dmpt_sz
, log_num_mpts
);
2352 DBGC ( hermon
, "Hermon %p ICM DMPT base = %llx\n", hermon
, icm_offset
);
2353 icm_offset
+= icm_usage ( log_num_mpts
,
2354 hermon
->cap
.dmpt_entry_size
);
2356 /* Multicast table */
2357 MLX_FILL_1 ( init_hca
, 48,
2358 multicast_parameters
.mc_base_addr_h
,
2359 ( icm_offset
>> 32 ) );
2360 MLX_FILL_1 ( init_hca
, 49,
2361 multicast_parameters
.mc_base_addr_l
, icm_offset
);
2362 MLX_FILL_1 ( init_hca
, 52,
2363 multicast_parameters
.log_mc_table_entry_sz
,
2364 fls ( sizeof ( struct hermonprm_mcg_entry
) - 1 ) );
2365 MLX_FILL_1 ( init_hca
, 53,
2366 multicast_parameters
.log_mc_table_hash_sz
, 3 );
2367 MLX_FILL_1 ( init_hca
, 54,
2368 multicast_parameters
.log_mc_table_sz
, 3 );
2369 DBGC ( hermon
, "Hermon %p ICM MC base = %llx\n", hermon
, icm_offset
);
2370 icm_offset
+= ( ( 8 * sizeof ( struct hermonprm_mcg_entry
) +
2371 HERMON_PAGE_SIZE
- 1 ) & ~( HERMON_PAGE_SIZE
- 1 ) );
2373 hermon
->icm_map
[HERMON_ICM_OTHER
].len
=
2374 ( icm_offset
- hermon
->icm_map
[HERMON_ICM_OTHER
].offset
);
2377 * Allocate and map physical memory for (portions of) ICM
2380 * ICM AUX area (aligned to its own size)
2385 /* Calculate physical memory required for ICM */
2387 for ( i
= 0 ; i
< HERMON_ICM_NUM_REGIONS
; i
++ ) {
2388 icm_len
+= hermon
->icm_map
[i
].len
;
2391 /* Get ICM auxiliary area size */
2392 memset ( &icm_size
, 0, sizeof ( icm_size
) );
2393 MLX_FILL_1 ( &icm_size
, 0, value_hi
, ( icm_offset
>> 32 ) );
2394 MLX_FILL_1 ( &icm_size
, 1, value
, icm_offset
);
2395 if ( ( rc
= hermon_cmd_set_icm_size ( hermon
, &icm_size
,
2396 &icm_aux_size
) ) != 0 ) {
2397 DBGC ( hermon
, "Hermon %p could not set ICM size: %s\n",
2398 hermon
, strerror ( rc
) );
2399 goto err_set_icm_size
;
2401 icm_aux_len
= ( MLX_GET ( &icm_aux_size
, value
) * HERMON_PAGE_SIZE
);
2403 /* Allocate ICM data and auxiliary area */
2404 DBGC ( hermon
, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
2405 hermon
, ( icm_len
/ 1024 ), ( icm_aux_len
/ 1024 ) );
2406 hermon
->icm
= umalloc ( icm_aux_len
+ icm_len
);
2407 if ( ! hermon
->icm
) {
2411 icm_phys
= user_to_phys ( hermon
->icm
, 0 );
2413 /* Map ICM auxiliary area */
2414 DBGC ( hermon
, "Hermon %p mapping ICM AUX => %08lx\n",
2416 if ( ( rc
= hermon_map_vpm ( hermon
, hermon_cmd_map_icm_aux
,
2417 0, icm_phys
, icm_aux_len
) ) != 0 ) {
2418 DBGC ( hermon
, "Hermon %p could not map AUX ICM: %s\n",
2419 hermon
, strerror ( rc
) );
2420 goto err_map_icm_aux
;
2422 icm_phys
+= icm_aux_len
;
2425 for ( i
= 0 ; i
< HERMON_ICM_NUM_REGIONS
; i
++ ) {
2426 DBGC ( hermon
, "Hermon %p mapping ICM %llx+%zx => %08lx\n",
2427 hermon
, hermon
->icm_map
[i
].offset
,
2428 hermon
->icm_map
[i
].len
, icm_phys
);
2429 if ( ( rc
= hermon_map_vpm ( hermon
, hermon_cmd_map_icm
,
2430 hermon
->icm_map
[i
].offset
,
2432 hermon
->icm_map
[i
].len
) ) != 0 ){
2433 DBGC ( hermon
, "Hermon %p could not map ICM: %s\n",
2434 hermon
, strerror ( rc
) );
2437 icm_phys
+= hermon
->icm_map
[i
].len
;
2443 assert ( i
== 0 ); /* We don't handle partial failure at present */
2445 hermon_cmd_unmap_icm_aux ( hermon
);
2446 ufree ( hermon
->icm
);
2447 hermon
->icm
= UNULL
;
2456 * @v hermon Hermon device
2458 static void hermon_free_icm ( struct hermon
*hermon
) {
2459 struct hermonprm_scalar_parameter unmap_icm
;
2462 for ( i
= ( HERMON_ICM_NUM_REGIONS
- 1 ) ; i
>= 0 ; i
-- ) {
2463 memset ( &unmap_icm
, 0, sizeof ( unmap_icm
) );
2464 MLX_FILL_1 ( &unmap_icm
, 0, value_hi
,
2465 ( hermon
->icm_map
[i
].offset
>> 32 ) );
2466 MLX_FILL_1 ( &unmap_icm
, 1, value
,
2467 hermon
->icm_map
[i
].offset
);
2468 hermon_cmd_unmap_icm ( hermon
,
2469 ( 1 << fls ( ( hermon
->icm_map
[i
].len
/
2470 HERMON_PAGE_SIZE
) - 1)),
2473 hermon_cmd_unmap_icm_aux ( hermon
);
2474 ufree ( hermon
->icm
);
2475 hermon
->icm
= UNULL
;
2478 /***************************************************************************
2482 ***************************************************************************
2486 * Set up memory protection table
2488 * @v hermon Hermon device
2489 * @ret rc Return status code
2491 static int hermon_setup_mpt ( struct hermon
*hermon
) {
2492 struct hermonprm_mpt mpt
;
2497 key
= ( hermon
->cap
.reserved_mrws
| HERMON_MKEY_PREFIX
);
2498 hermon
->lkey
= ( ( key
<< 8 ) | ( key
>> 24 ) );
2500 /* Initialise memory protection table */
2501 memset ( &mpt
, 0, sizeof ( mpt
) );
2502 MLX_FILL_7 ( &mpt
, 0,
2510 MLX_FILL_1 ( &mpt
, 2, mem_key
, key
);
2511 MLX_FILL_1 ( &mpt
, 3,
2512 pd
, HERMON_GLOBAL_PD
);
2513 MLX_FILL_1 ( &mpt
, 10, len64
, 1 );
2514 if ( ( rc
= hermon_cmd_sw2hw_mpt ( hermon
,
2515 hermon
->cap
.reserved_mrws
,
2517 DBGC ( hermon
, "Hermon %p could not set up MPT: %s\n",
2518 hermon
, strerror ( rc
) );
2526 * Configure special queue pairs
2528 * @v hermon Hermon device
2529 * @ret rc Return status code
2531 static int hermon_configure_special_qps ( struct hermon
*hermon
) {
2534 /* Special QP block must be aligned on its own size */
2535 hermon
->special_qpn_base
= ( ( hermon
->cap
.reserved_qps
+
2536 HERMON_NUM_SPECIAL_QPS
- 1 )
2537 & ~( HERMON_NUM_SPECIAL_QPS
- 1 ) );
2538 hermon
->qpn_base
= ( hermon
->special_qpn_base
+
2539 HERMON_NUM_SPECIAL_QPS
);
2540 DBGC ( hermon
, "Hermon %p special QPs at [%lx,%lx]\n", hermon
,
2541 hermon
->special_qpn_base
, ( hermon
->qpn_base
- 1 ) );
2543 /* Issue command to configure special QPs */
2544 if ( ( rc
= hermon_cmd_conf_special_qp ( hermon
, 0x00,
2545 hermon
->special_qpn_base
) ) != 0 ) {
2546 DBGC ( hermon
, "Hermon %p could not configure special QPs: "
2547 "%s\n", hermon
, strerror ( rc
) );
2557 * @v hermon Hermon device
2560 static void hermon_reset ( struct hermon
*hermon
,
2561 struct pci_device
*pci
) {
2562 struct pci_config_backup backup
;
2563 static const uint8_t backup_exclude
[] =
2564 PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
2566 pci_backup ( pci
, &backup
, backup_exclude
);
2567 writel ( HERMON_RESET_MAGIC
,
2568 ( hermon
->config
+ HERMON_RESET_OFFSET
) );
2569 mdelay ( HERMON_RESET_WAIT_TIME_MS
);
2570 pci_restore ( pci
, &backup
, backup_exclude
);
2578 * @ret rc Return status code
2580 static int hermon_probe ( struct pci_device
*pci
,
2581 const struct pci_device_id
*id __unused
) {
2582 struct hermon
*hermon
;
2583 struct ib_device
*ibdev
;
2584 struct hermonprm_init_hca init_hca
;
2588 /* Allocate Hermon device */
2589 hermon
= zalloc ( sizeof ( *hermon
) );
2592 goto err_alloc_hermon
;
2594 pci_set_drvdata ( pci
, hermon
);
2596 /* Fix up PCI device */
2597 adjust_pci_device ( pci
);
2600 hermon
->config
= ioremap ( pci_bar_start ( pci
, HERMON_PCI_CONFIG_BAR
),
2601 HERMON_PCI_CONFIG_BAR_SIZE
);
2602 hermon
->uar
= ioremap ( pci_bar_start ( pci
, HERMON_PCI_UAR_BAR
),
2603 HERMON_UAR_NON_EQ_PAGE
* HERMON_PAGE_SIZE
);
2606 hermon_reset ( hermon
, pci
);
2608 /* Allocate space for mailboxes */
2609 hermon
->mailbox_in
= malloc_dma ( HERMON_MBOX_SIZE
,
2610 HERMON_MBOX_ALIGN
);
2611 if ( ! hermon
->mailbox_in
) {
2613 goto err_mailbox_in
;
2615 hermon
->mailbox_out
= malloc_dma ( HERMON_MBOX_SIZE
,
2616 HERMON_MBOX_ALIGN
);
2617 if ( ! hermon
->mailbox_out
) {
2619 goto err_mailbox_out
;
2622 /* Start firmware */
2623 if ( ( rc
= hermon_start_firmware ( hermon
) ) != 0 )
2624 goto err_start_firmware
;
2626 /* Get device limits */
2627 if ( ( rc
= hermon_get_cap ( hermon
) ) != 0 )
2630 /* Allocate Infiniband devices */
2631 for ( i
= 0 ; i
< hermon
->cap
.num_ports
; i
++ ) {
2632 ibdev
= alloc_ibdev ( 0 );
2635 goto err_alloc_ibdev
;
2637 hermon
->ibdev
[i
] = ibdev
;
2638 ibdev
->op
= &hermon_ib_operations
;
2639 ibdev
->dev
= &pci
->dev
;
2640 ibdev
->port
= ( HERMON_PORT_BASE
+ i
);
2641 ib_set_drvdata ( ibdev
, hermon
);
2645 memset ( &init_hca
, 0, sizeof ( init_hca
) );
2646 if ( ( rc
= hermon_alloc_icm ( hermon
, &init_hca
) ) != 0 )
2649 /* Initialise HCA */
2650 MLX_FILL_1 ( &init_hca
, 0, version
, 0x02 /* "Must be 0x02" */ );
2651 MLX_FILL_1 ( &init_hca
, 5, udp
, 1 );
2652 MLX_FILL_1 ( &init_hca
, 74, uar_parameters
.log_max_uars
, 8 );
2653 if ( ( rc
= hermon_cmd_init_hca ( hermon
, &init_hca
) ) != 0 ) {
2654 DBGC ( hermon
, "Hermon %p could not initialise HCA: %s\n",
2655 hermon
, strerror ( rc
) );
2659 /* Set up memory protection */
2660 if ( ( rc
= hermon_setup_mpt ( hermon
) ) != 0 )
2662 for ( i
= 0 ; i
< hermon
->cap
.num_ports
; i
++ )
2663 hermon
->ibdev
[i
]->rdma_key
= hermon
->lkey
;
2665 /* Set up event queue */
2666 if ( ( rc
= hermon_create_eq ( hermon
) ) != 0 )
2669 /* Configure special QPs */
2670 if ( ( rc
= hermon_configure_special_qps ( hermon
) ) != 0 )
2671 goto err_conf_special_qps
;
2673 /* Update IPoIB MAC address */
2674 for ( i
= 0 ; i
< hermon
->cap
.num_ports
; i
++ ) {
2675 ib_smc_update ( hermon
->ibdev
[i
], hermon_mad
);
2678 /* Register Infiniband devices */
2679 for ( i
= 0 ; i
< hermon
->cap
.num_ports
; i
++ ) {
2680 if ( ( rc
= register_ibdev ( hermon
->ibdev
[i
] ) ) != 0 ) {
2681 DBGC ( hermon
, "Hermon %p could not register IB "
2682 "device: %s\n", hermon
, strerror ( rc
) );
2683 goto err_register_ibdev
;
2689 i
= hermon
->cap
.num_ports
;
2691 for ( i
-- ; ( signed int ) i
>= 0 ; i
-- )
2692 unregister_ibdev ( hermon
->ibdev
[i
] );
2693 err_conf_special_qps
:
2694 hermon_destroy_eq ( hermon
);
2697 hermon_cmd_close_hca ( hermon
);
2699 hermon_free_icm ( hermon
);
2701 i
= hermon
->cap
.num_ports
;
2703 for ( i
-- ; ( signed int ) i
>= 0 ; i
-- )
2704 ibdev_put ( hermon
->ibdev
[i
] );
2706 hermon_stop_firmware ( hermon
);
2708 free_dma ( hermon
->mailbox_out
, HERMON_MBOX_SIZE
);
2710 free_dma ( hermon
->mailbox_in
, HERMON_MBOX_SIZE
);
2722 static void hermon_remove ( struct pci_device
*pci
) {
2723 struct hermon
*hermon
= pci_get_drvdata ( pci
);
2726 for ( i
= ( hermon
->cap
.num_ports
- 1 ) ; i
>= 0 ; i
-- )
2727 unregister_ibdev ( hermon
->ibdev
[i
] );
2728 hermon_destroy_eq ( hermon
);
2729 hermon_cmd_close_hca ( hermon
);
2730 hermon_free_icm ( hermon
);
2731 hermon_stop_firmware ( hermon
);
2732 hermon_stop_firmware ( hermon
);
2733 free_dma ( hermon
->mailbox_out
, HERMON_MBOX_SIZE
);
2734 free_dma ( hermon
->mailbox_in
, HERMON_MBOX_SIZE
);
2735 for ( i
= ( hermon
->cap
.num_ports
- 1 ) ; i
>= 0 ; i
-- )
2736 ibdev_put ( hermon
->ibdev
[i
] );
2740 static struct pci_device_id hermon_nics
[] = {
2741 PCI_ROM ( 0x15b3, 0x6340, "mt25408", "MT25408 HCA driver", 0 ),
2742 PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver", 0 ),
2743 PCI_ROM ( 0x15b3, 0x6732, "mt26418", "MT26418 HCA driver", 0 ),
2744 PCI_ROM ( 0x15b3, 0x673c, "mt26428", "MT26428 HCA driver", 0 ),
2747 struct pci_driver hermon_driver __pci_driver
= {
2749 .id_count
= ( sizeof ( hermon_nics
) / sizeof ( hermon_nics
[0] ) ),
2750 .probe
= hermon_probe
,
2751 .remove
= hermon_remove
,