2 * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3 * Copyright (C) 2008 Mellanox Technologies Ltd.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License, or any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <gpxe/malloc.h>
30 #include <gpxe/umalloc.h>
31 #include <gpxe/iobuf.h>
32 #include <gpxe/netdevice.h>
33 #include <gpxe/infiniband.h>
39 * Mellanox Hermon Infiniband HCA
43 /***************************************************************************
45 * Queue number allocation
47 ***************************************************************************
51 * Allocate offsets within usage bitmask
53 * @v bits Usage bitmask
54 * @v bits_len Length of usage bitmask
55 * @v num_bits Number of contiguous bits to allocate within bitmask
56 * @ret bit First free bit within bitmask, or negative error
58 static int hermon_bitmask_alloc ( hermon_bitmask_t
*bits
,
59 unsigned int bits_len
,
60 unsigned int num_bits
) {
62 hermon_bitmask_t mask
= 1;
63 unsigned int found
= 0;
65 /* Search bits for num_bits contiguous free bits */
66 while ( bit
< bits_len
) {
67 if ( ( mask
& *bits
) == 0 ) {
68 if ( ++found
== num_bits
)
74 mask
= ( mask
<< 1 ) | ( mask
>> ( 8 * sizeof ( mask
) - 1 ) );
81 /* Mark bits as in-use */
86 mask
= ( mask
>> 1 ) | ( mask
<< ( 8 * sizeof ( mask
) - 1 ) );
89 return ( bit
- num_bits
+ 1 );
93 * Free offsets within usage bitmask
95 * @v bits Usage bitmask
96 * @v bit Starting bit within bitmask
97 * @v num_bits Number of contiguous bits to free within bitmask
99 static void hermon_bitmask_free ( hermon_bitmask_t
*bits
,
100 int bit
, unsigned int num_bits
) {
101 hermon_bitmask_t mask
;
103 for ( ; num_bits
; bit
++, num_bits
-- ) {
104 mask
= ( 1 << ( bit
% ( 8 * sizeof ( mask
) ) ) );
105 bits
[ ( bit
/ ( 8 * sizeof ( mask
) ) ) ] &= ~mask
;
109 /***************************************************************************
113 ***************************************************************************
117 * Wait for Hermon command completion
119 * @v hermon Hermon device
120 * @v hcr HCA command registers
121 * @ret rc Return status code
123 static int hermon_cmd_wait ( struct hermon
*hermon
,
124 struct hermonprm_hca_command_register
*hcr
) {
127 for ( wait
= HERMON_HCR_MAX_WAIT_MS
; wait
; wait
-- ) {
129 readl ( hermon
->config
+ HERMON_HCR_REG ( 6 ) );
130 if ( ( MLX_GET ( hcr
, go
) == 0 ) &&
131 ( MLX_GET ( hcr
, t
) == hermon
->toggle
) )
141 * @v hermon Hermon device
142 * @v command Command opcode, flags and input/output lengths
143 * @v op_mod Opcode modifier (0 if no modifier applicable)
144 * @v in Input parameters
145 * @v in_mod Input modifier (0 if no modifier applicable)
146 * @v out Output parameters
147 * @ret rc Return status code
149 static int hermon_cmd ( struct hermon
*hermon
, unsigned long command
,
150 unsigned int op_mod
, const void *in
,
151 unsigned int in_mod
, void *out
) {
152 struct hermonprm_hca_command_register hcr
;
153 unsigned int opcode
= HERMON_HCR_OPCODE ( command
);
154 size_t in_len
= HERMON_HCR_IN_LEN ( command
);
155 size_t out_len
= HERMON_HCR_OUT_LEN ( command
);
162 assert ( in_len
<= HERMON_MBOX_SIZE
);
163 assert ( out_len
<= HERMON_MBOX_SIZE
);
165 DBGC2 ( hermon
, "Hermon %p command %02x in %zx%s out %zx%s\n",
166 hermon
, opcode
, in_len
,
167 ( ( command
& HERMON_HCR_IN_MBOX
) ? "(mbox)" : "" ), out_len
,
168 ( ( command
& HERMON_HCR_OUT_MBOX
) ? "(mbox)" : "" ) );
170 /* Check that HCR is free */
171 if ( ( rc
= hermon_cmd_wait ( hermon
, &hcr
) ) != 0 ) {
172 DBGC ( hermon
, "Hermon %p command interface locked\n",
177 /* Flip HCR toggle */
178 hermon
->toggle
= ( 1 - hermon
->toggle
);
181 memset ( &hcr
, 0, sizeof ( hcr
) );
182 in_buffer
= &hcr
.u
.dwords
[0];
183 if ( in_len
&& ( command
& HERMON_HCR_IN_MBOX
) ) {
184 in_buffer
= hermon
->mailbox_in
;
185 MLX_FILL_1 ( &hcr
, 1, in_param_l
, virt_to_bus ( in_buffer
) );
187 memcpy ( in_buffer
, in
, in_len
);
188 MLX_FILL_1 ( &hcr
, 2, input_modifier
, in_mod
);
189 out_buffer
= &hcr
.u
.dwords
[3];
190 if ( out_len
&& ( command
& HERMON_HCR_OUT_MBOX
) ) {
191 out_buffer
= hermon
->mailbox_out
;
192 MLX_FILL_1 ( &hcr
, 4, out_param_l
,
193 virt_to_bus ( out_buffer
) );
195 MLX_FILL_4 ( &hcr
, 6,
197 opcode_modifier
, op_mod
,
200 DBGC ( hermon
, "Hermon %p issuing command:\n", hermon
);
201 DBGC_HDA ( hermon
, virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
202 &hcr
, sizeof ( hcr
) );
203 if ( in_len
&& ( command
& HERMON_HCR_IN_MBOX
) ) {
204 DBGC2 ( hermon
, "Input mailbox:\n" );
205 DBGC2_HDA ( hermon
, virt_to_phys ( in_buffer
), in_buffer
,
206 ( ( in_len
< 512 ) ? in_len
: 512 ) );
210 for ( i
= 0 ; i
< ( sizeof ( hcr
) / sizeof ( hcr
.u
.dwords
[0] ) ) ;
212 writel ( hcr
.u
.dwords
[i
],
213 hermon
->config
+ HERMON_HCR_REG ( i
) );
217 /* Wait for command completion */
218 if ( ( rc
= hermon_cmd_wait ( hermon
, &hcr
) ) != 0 ) {
219 DBGC ( hermon
, "Hermon %p timed out waiting for command:\n",
222 virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
223 &hcr
, sizeof ( hcr
) );
227 /* Check command status */
228 status
= MLX_GET ( &hcr
, status
);
230 DBGC ( hermon
, "Hermon %p command failed with status %02x:\n",
233 virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
234 &hcr
, sizeof ( hcr
) );
238 /* Read output parameters, if any */
239 hcr
.u
.dwords
[3] = readl ( hermon
->config
+ HERMON_HCR_REG ( 3 ) );
240 hcr
.u
.dwords
[4] = readl ( hermon
->config
+ HERMON_HCR_REG ( 4 ) );
241 memcpy ( out
, out_buffer
, out_len
);
243 DBGC2 ( hermon
, "Output%s:\n",
244 ( command
& HERMON_HCR_OUT_MBOX
) ? " mailbox" : "" );
245 DBGC2_HDA ( hermon
, virt_to_phys ( out_buffer
), out_buffer
,
246 ( ( out_len
< 512 ) ? out_len
: 512 ) );
253 hermon_cmd_query_dev_cap ( struct hermon
*hermon
,
254 struct hermonprm_query_dev_cap
*dev_cap
) {
255 return hermon_cmd ( hermon
,
256 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_DEV_CAP
,
257 1, sizeof ( *dev_cap
) ),
258 0, NULL
, 0, dev_cap
);
262 hermon_cmd_query_fw ( struct hermon
*hermon
, struct hermonprm_query_fw
*fw
) {
263 return hermon_cmd ( hermon
,
264 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_FW
,
270 hermon_cmd_init_hca ( struct hermon
*hermon
,
271 const struct hermonprm_init_hca
*init_hca
) {
272 return hermon_cmd ( hermon
,
273 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_HCA
,
274 1, sizeof ( *init_hca
) ),
275 0, init_hca
, 0, NULL
);
279 hermon_cmd_close_hca ( struct hermon
*hermon
) {
280 return hermon_cmd ( hermon
,
281 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_HCA
),
286 hermon_cmd_init_port ( struct hermon
*hermon
, unsigned int port
,
287 const struct hermonprm_init_port
*init_port
) {
288 return hermon_cmd ( hermon
,
289 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_PORT
,
290 1, sizeof ( *init_port
) ),
291 0, init_port
, port
, NULL
);
295 hermon_cmd_close_port ( struct hermon
*hermon
, unsigned int port
) {
296 return hermon_cmd ( hermon
,
297 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_PORT
),
298 0, NULL
, port
, NULL
);
302 hermon_cmd_sw2hw_mpt ( struct hermon
*hermon
, unsigned int index
,
303 const struct hermonprm_mpt
*mpt
) {
304 return hermon_cmd ( hermon
,
305 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_MPT
,
306 1, sizeof ( *mpt
) ),
307 0, mpt
, index
, NULL
);
311 hermon_cmd_write_mtt ( struct hermon
*hermon
,
312 const struct hermonprm_write_mtt
*write_mtt
) {
313 return hermon_cmd ( hermon
,
314 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MTT
,
315 1, sizeof ( *write_mtt
) ),
316 0, write_mtt
, 1, NULL
);
320 hermon_cmd_sw2hw_eq ( struct hermon
*hermon
, unsigned int index
,
321 const struct hermonprm_eqc
*eqc
) {
322 return hermon_cmd ( hermon
,
323 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_EQ
,
324 1, sizeof ( *eqc
) ),
325 0, eqc
, index
, NULL
);
329 hermon_cmd_hw2sw_eq ( struct hermon
*hermon
, unsigned int index
) {
330 return hermon_cmd ( hermon
,
331 HERMON_HCR_VOID_CMD ( HERMON_HCR_HW2SW_EQ
),
332 1, NULL
, index
, NULL
);
336 hermon_cmd_sw2hw_cq ( struct hermon
*hermon
, unsigned long cqn
,
337 const struct hermonprm_completion_queue_context
*cqctx
){
338 return hermon_cmd ( hermon
,
339 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_CQ
,
340 1, sizeof ( *cqctx
) ),
341 0, cqctx
, cqn
, NULL
);
345 hermon_cmd_hw2sw_cq ( struct hermon
*hermon
, unsigned long cqn
,
346 struct hermonprm_completion_queue_context
*cqctx
) {
347 return hermon_cmd ( hermon
,
348 HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_CQ
,
349 1, sizeof ( *cqctx
) ),
350 0, NULL
, cqn
, cqctx
);
354 hermon_cmd_rst2init_qp ( struct hermon
*hermon
, unsigned long qpn
,
355 const struct hermonprm_qp_ee_state_transitions
*ctx
){
356 return hermon_cmd ( hermon
,
357 HERMON_HCR_IN_CMD ( HERMON_HCR_RST2INIT_QP
,
358 1, sizeof ( *ctx
) ),
363 hermon_cmd_init2rtr_qp ( struct hermon
*hermon
, unsigned long qpn
,
364 const struct hermonprm_qp_ee_state_transitions
*ctx
){
365 return hermon_cmd ( hermon
,
366 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT2RTR_QP
,
367 1, sizeof ( *ctx
) ),
372 hermon_cmd_rtr2rts_qp ( struct hermon
*hermon
, unsigned long qpn
,
373 const struct hermonprm_qp_ee_state_transitions
*ctx
) {
374 return hermon_cmd ( hermon
,
375 HERMON_HCR_IN_CMD ( HERMON_HCR_RTR2RTS_QP
,
376 1, sizeof ( *ctx
) ),
381 hermon_cmd_2rst_qp ( struct hermon
*hermon
, unsigned long qpn
) {
382 return hermon_cmd ( hermon
,
383 HERMON_HCR_VOID_CMD ( HERMON_HCR_2RST_QP
),
384 0x03, NULL
, qpn
, NULL
);
388 hermon_cmd_mad_ifc ( struct hermon
*hermon
, unsigned int port
,
389 union hermonprm_mad
*mad
) {
390 return hermon_cmd ( hermon
,
391 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MAD_IFC
,
393 1, sizeof ( *mad
) ),
394 0x03, mad
, port
, mad
);
398 hermon_cmd_read_mcg ( struct hermon
*hermon
, unsigned int index
,
399 struct hermonprm_mcg_entry
*mcg
) {
400 return hermon_cmd ( hermon
,
401 HERMON_HCR_OUT_CMD ( HERMON_HCR_READ_MCG
,
402 1, sizeof ( *mcg
) ),
403 0, NULL
, index
, mcg
);
407 hermon_cmd_write_mcg ( struct hermon
*hermon
, unsigned int index
,
408 const struct hermonprm_mcg_entry
*mcg
) {
409 return hermon_cmd ( hermon
,
410 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MCG
,
411 1, sizeof ( *mcg
) ),
412 0, mcg
, index
, NULL
);
416 hermon_cmd_mgid_hash ( struct hermon
*hermon
, const struct ib_gid
*gid
,
417 struct hermonprm_mgm_hash
*hash
) {
418 return hermon_cmd ( hermon
,
419 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MGID_HASH
,
421 0, sizeof ( *hash
) ),
426 hermon_cmd_run_fw ( struct hermon
*hermon
) {
427 return hermon_cmd ( hermon
,
428 HERMON_HCR_VOID_CMD ( HERMON_HCR_RUN_FW
),
433 hermon_cmd_unmap_icm ( struct hermon
*hermon
, unsigned int page_count
,
434 const struct hermonprm_scalar_parameter
*offset
) {
435 return hermon_cmd ( hermon
,
436 HERMON_HCR_IN_CMD ( HERMON_HCR_UNMAP_ICM
,
437 0, sizeof ( *offset
) ),
438 0, offset
, page_count
, NULL
);
442 hermon_cmd_map_icm ( struct hermon
*hermon
,
443 const struct hermonprm_virtual_physical_mapping
*map
) {
444 return hermon_cmd ( hermon
,
445 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM
,
446 1, sizeof ( *map
) ),
451 hermon_cmd_unmap_icm_aux ( struct hermon
*hermon
) {
452 return hermon_cmd ( hermon
,
453 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_ICM_AUX
),
458 hermon_cmd_map_icm_aux ( struct hermon
*hermon
,
459 const struct hermonprm_virtual_physical_mapping
*map
) {
460 return hermon_cmd ( hermon
,
461 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM_AUX
,
462 1, sizeof ( *map
) ),
467 hermon_cmd_set_icm_size ( struct hermon
*hermon
,
468 const struct hermonprm_scalar_parameter
*icm_size
,
469 struct hermonprm_scalar_parameter
*icm_aux_size
) {
470 return hermon_cmd ( hermon
,
471 HERMON_HCR_INOUT_CMD ( HERMON_HCR_SET_ICM_SIZE
,
472 0, sizeof ( *icm_size
),
473 0, sizeof (*icm_aux_size
) ),
474 0, icm_size
, 0, icm_aux_size
);
478 hermon_cmd_unmap_fa ( struct hermon
*hermon
) {
479 return hermon_cmd ( hermon
,
480 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_FA
),
485 hermon_cmd_map_fa ( struct hermon
*hermon
,
486 const struct hermonprm_virtual_physical_mapping
*map
) {
487 return hermon_cmd ( hermon
,
488 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_FA
,
489 1, sizeof ( *map
) ),
493 /***************************************************************************
495 * Memory translation table operations
497 ***************************************************************************
501 * Allocate MTT entries
503 * @v hermon Hermon device
504 * @v memory Memory to map into MTT
505 * @v len Length of memory to map
506 * @v mtt MTT descriptor to fill in
507 * @ret rc Return status code
509 static int hermon_alloc_mtt ( struct hermon
*hermon
,
510 const void *memory
, size_t len
,
511 struct hermon_mtt
*mtt
) {
512 struct hermonprm_write_mtt write_mtt
;
514 unsigned int page_offset
;
515 unsigned int num_pages
;
517 unsigned int mtt_base_addr
;
521 /* Find available MTT entries */
522 start
= virt_to_phys ( memory
);
523 page_offset
= ( start
& ( HERMON_PAGE_SIZE
- 1 ) );
524 start
-= page_offset
;
526 num_pages
= ( ( len
+ HERMON_PAGE_SIZE
- 1 ) / HERMON_PAGE_SIZE
);
527 mtt_offset
= hermon_bitmask_alloc ( hermon
->mtt_inuse
, HERMON_MAX_MTTS
,
529 if ( mtt_offset
< 0 ) {
530 DBGC ( hermon
, "Hermon %p could not allocate %d MTT entries\n",
535 mtt_base_addr
= ( ( hermon
->cap
.reserved_mtts
+ mtt_offset
) *
536 hermon
->cap
.mtt_entry_size
);
538 /* Fill in MTT structure */
539 mtt
->mtt_offset
= mtt_offset
;
540 mtt
->num_pages
= num_pages
;
541 mtt
->mtt_base_addr
= mtt_base_addr
;
542 mtt
->page_offset
= page_offset
;
544 /* Construct and issue WRITE_MTT commands */
545 for ( i
= 0 ; i
< num_pages
; i
++ ) {
546 memset ( &write_mtt
, 0, sizeof ( write_mtt
) );
547 MLX_FILL_1 ( &write_mtt
.mtt_base_addr
, 1,
548 value
, mtt_base_addr
);
549 MLX_FILL_2 ( &write_mtt
.mtt
, 1,
551 ptag_l
, ( start
>> 3 ) );
552 if ( ( rc
= hermon_cmd_write_mtt ( hermon
,
553 &write_mtt
) ) != 0 ) {
554 DBGC ( hermon
, "Hermon %p could not write MTT at %x\n",
555 hermon
, mtt_base_addr
);
558 start
+= HERMON_PAGE_SIZE
;
559 mtt_base_addr
+= hermon
->cap
.mtt_entry_size
;
565 hermon_bitmask_free ( hermon
->mtt_inuse
, mtt_offset
, num_pages
);
573 * @v hermon Hermon device
574 * @v mtt MTT descriptor
576 static void hermon_free_mtt ( struct hermon
*hermon
,
577 struct hermon_mtt
*mtt
) {
578 hermon_bitmask_free ( hermon
->mtt_inuse
, mtt
->mtt_offset
,
582 /***************************************************************************
584 * Completion queue operations
586 ***************************************************************************
590 * Create completion queue
592 * @v ibdev Infiniband device
593 * @v cq Completion queue
594 * @ret rc Return status code
596 static int hermon_create_cq ( struct ib_device
*ibdev
,
597 struct ib_completion_queue
*cq
) {
598 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
599 struct hermon_completion_queue
*hermon_cq
;
600 struct hermonprm_completion_queue_context cqctx
;
605 /* Find a free completion queue number */
606 cqn_offset
= hermon_bitmask_alloc ( hermon
->cq_inuse
,
608 if ( cqn_offset
< 0 ) {
609 DBGC ( hermon
, "Hermon %p out of completion queues\n",
614 cq
->cqn
= ( hermon
->cap
.reserved_cqs
+ cqn_offset
);
616 /* Allocate control structures */
617 hermon_cq
= zalloc ( sizeof ( *hermon_cq
) );
623 /* Allocate completion queue itself */
624 hermon_cq
->cqe_size
= ( cq
->num_cqes
* sizeof ( hermon_cq
->cqe
[0] ) );
625 hermon_cq
->cqe
= malloc_dma ( hermon_cq
->cqe_size
,
626 sizeof ( hermon_cq
->cqe
[0] ) );
627 if ( ! hermon_cq
->cqe
) {
631 memset ( hermon_cq
->cqe
, 0, hermon_cq
->cqe_size
);
632 for ( i
= 0 ; i
< cq
->num_cqes
; i
++ ) {
633 MLX_FILL_1 ( &hermon_cq
->cqe
[i
].normal
, 7, owner
, 1 );
637 /* Allocate MTT entries */
638 if ( ( rc
= hermon_alloc_mtt ( hermon
, hermon_cq
->cqe
,
640 &hermon_cq
->mtt
) ) != 0 )
643 /* Hand queue over to hardware */
644 memset ( &cqctx
, 0, sizeof ( cqctx
) );
645 MLX_FILL_1 ( &cqctx
, 0, st
, 0xa /* "Event fired" */ );
646 MLX_FILL_1 ( &cqctx
, 2,
647 page_offset
, ( hermon_cq
->mtt
.page_offset
>> 5 ) );
648 MLX_FILL_2 ( &cqctx
, 3,
649 usr_page
, HERMON_UAR_PAGE
,
650 log_cq_size
, fls ( cq
->num_cqes
- 1 ) );
651 MLX_FILL_1 ( &cqctx
, 7, mtt_base_addr_l
,
652 ( hermon_cq
->mtt
.mtt_base_addr
>> 3 ) );
653 MLX_FILL_1 ( &cqctx
, 15, db_record_addr_l
,
654 ( virt_to_phys ( &hermon_cq
->doorbell
) >> 3 ) );
655 if ( ( rc
= hermon_cmd_sw2hw_cq ( hermon
, cq
->cqn
, &cqctx
) ) != 0 ) {
656 DBGC ( hermon
, "Hermon %p SW2HW_CQ failed: %s\n",
657 hermon
, strerror ( rc
) );
661 DBGC ( hermon
, "Hermon %p CQN %#lx ring at [%p,%p)\n",
662 hermon
, cq
->cqn
, hermon_cq
->cqe
,
663 ( ( ( void * ) hermon_cq
->cqe
) + hermon_cq
->cqe_size
) );
664 ib_cq_set_drvdata ( cq
, hermon_cq
);
668 hermon_free_mtt ( hermon
, &hermon_cq
->mtt
);
670 free_dma ( hermon_cq
->cqe
, hermon_cq
->cqe_size
);
674 hermon_bitmask_free ( hermon
->cq_inuse
, cqn_offset
, 1 );
680 * Destroy completion queue
682 * @v ibdev Infiniband device
683 * @v cq Completion queue
685 static void hermon_destroy_cq ( struct ib_device
*ibdev
,
686 struct ib_completion_queue
*cq
) {
687 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
688 struct hermon_completion_queue
*hermon_cq
= ib_cq_get_drvdata ( cq
);
689 struct hermonprm_completion_queue_context cqctx
;
693 /* Take ownership back from hardware */
694 if ( ( rc
= hermon_cmd_hw2sw_cq ( hermon
, cq
->cqn
, &cqctx
) ) != 0 ) {
695 DBGC ( hermon
, "Hermon %p FATAL HW2SW_CQ failed on CQN %#lx: "
696 "%s\n", hermon
, cq
->cqn
, strerror ( rc
) );
697 /* Leak memory and return; at least we avoid corruption */
701 /* Free MTT entries */
702 hermon_free_mtt ( hermon
, &hermon_cq
->mtt
);
705 free_dma ( hermon_cq
->cqe
, hermon_cq
->cqe_size
);
708 /* Mark queue number as free */
709 cqn_offset
= ( cq
->cqn
- hermon
->cap
.reserved_cqs
);
710 hermon_bitmask_free ( hermon
->cq_inuse
, cqn_offset
, 1 );
712 ib_cq_set_drvdata ( cq
, NULL
);
715 /***************************************************************************
717 * Queue pair operations
719 ***************************************************************************
725 * @v ibdev Infiniband device
727 * @ret rc Return status code
729 static int hermon_create_qp ( struct ib_device
*ibdev
,
730 struct ib_queue_pair
*qp
) {
731 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
732 struct hermon_queue_pair
*hermon_qp
;
733 struct hermonprm_qp_ee_state_transitions qpctx
;
737 /* Find a free queue pair number */
738 qpn_offset
= hermon_bitmask_alloc ( hermon
->qp_inuse
,
740 if ( qpn_offset
< 0 ) {
741 DBGC ( hermon
, "Hermon %p out of queue pairs\n", hermon
);
745 qp
->qpn
= ( HERMON_QPN_BASE
+ hermon
->cap
.reserved_qps
+
748 /* Allocate control structures */
749 hermon_qp
= zalloc ( sizeof ( *hermon_qp
) );
755 /* Allocate work queue buffer */
756 hermon_qp
->send
.num_wqes
= ( qp
->send
.num_wqes
/* headroom */ + 1 +
757 ( 2048 / sizeof ( hermon_qp
->send
.wqe
[0] ) ) );
758 hermon_qp
->send
.num_wqes
=
759 ( 1 << fls ( hermon_qp
->send
.num_wqes
- 1 ) ); /* round up */
760 hermon_qp
->send
.wqe_size
= ( hermon_qp
->send
.num_wqes
*
761 sizeof ( hermon_qp
->send
.wqe
[0] ) );
762 hermon_qp
->recv
.wqe_size
= ( qp
->recv
.num_wqes
*
763 sizeof ( hermon_qp
->recv
.wqe
[0] ) );
764 hermon_qp
->wqe_size
= ( hermon_qp
->send
.wqe_size
+
765 hermon_qp
->recv
.wqe_size
);
766 hermon_qp
->wqe
= malloc_dma ( hermon_qp
->wqe_size
,
767 sizeof ( hermon_qp
->send
.wqe
[0] ) );
768 if ( ! hermon_qp
->wqe
) {
772 hermon_qp
->send
.wqe
= hermon_qp
->wqe
;
773 memset ( hermon_qp
->send
.wqe
, 0xff, hermon_qp
->send
.wqe_size
);
774 hermon_qp
->recv
.wqe
= ( hermon_qp
->wqe
+ hermon_qp
->send
.wqe_size
);
775 memset ( hermon_qp
->recv
.wqe
, 0, hermon_qp
->recv
.wqe_size
);
777 /* Allocate MTT entries */
778 if ( ( rc
= hermon_alloc_mtt ( hermon
, hermon_qp
->wqe
,
780 &hermon_qp
->mtt
) ) != 0 ) {
784 /* Transition queue to INIT state */
785 memset ( &qpctx
, 0, sizeof ( qpctx
) );
786 MLX_FILL_2 ( &qpctx
, 2,
787 qpc_eec_data
.pm_state
, 0x03 /* Always 0x03 for UD */,
788 qpc_eec_data
.st
, HERMON_ST_UD
);
789 MLX_FILL_1 ( &qpctx
, 3, qpc_eec_data
.pd
, HERMON_GLOBAL_PD
);
790 MLX_FILL_4 ( &qpctx
, 4,
791 qpc_eec_data
.log_rq_size
, fls ( qp
->recv
.num_wqes
- 1 ),
792 qpc_eec_data
.log_rq_stride
,
793 ( fls ( sizeof ( hermon_qp
->recv
.wqe
[0] ) - 1 ) - 4 ),
794 qpc_eec_data
.log_sq_size
,
795 fls ( hermon_qp
->send
.num_wqes
- 1 ),
796 qpc_eec_data
.log_sq_stride
,
797 ( fls ( sizeof ( hermon_qp
->send
.wqe
[0] ) - 1 ) - 4 ) );
798 MLX_FILL_1 ( &qpctx
, 5,
799 qpc_eec_data
.usr_page
, HERMON_UAR_PAGE
);
800 MLX_FILL_1 ( &qpctx
, 33, qpc_eec_data
.cqn_snd
, qp
->send
.cq
->cqn
);
801 MLX_FILL_1 ( &qpctx
, 38, qpc_eec_data
.page_offset
,
802 ( hermon_qp
->mtt
.page_offset
>> 6 ) );
803 MLX_FILL_1 ( &qpctx
, 41, qpc_eec_data
.cqn_rcv
, qp
->recv
.cq
->cqn
);
804 MLX_FILL_1 ( &qpctx
, 43, qpc_eec_data
.db_record_addr_l
,
805 ( virt_to_phys ( &hermon_qp
->recv
.doorbell
) >> 2 ) );
806 MLX_FILL_1 ( &qpctx
, 44, qpc_eec_data
.q_key
, qp
->qkey
);
807 MLX_FILL_1 ( &qpctx
, 53, qpc_eec_data
.mtt_base_addr_l
,
808 ( hermon_qp
->mtt
.mtt_base_addr
>> 3 ) );
809 if ( ( rc
= hermon_cmd_rst2init_qp ( hermon
, qp
->qpn
,
811 DBGC ( hermon
, "Hermon %p RST2INIT_QP failed: %s\n",
812 hermon
, strerror ( rc
) );
813 goto err_rst2init_qp
;
816 /* Transition queue to RTR state */
817 memset ( &qpctx
, 0, sizeof ( qpctx
) );
818 MLX_FILL_2 ( &qpctx
, 4,
819 qpc_eec_data
.mtu
, HERMON_MTU_2048
,
820 qpc_eec_data
.msg_max
, 11 /* 2^11 = 2048 */ );
821 MLX_FILL_1 ( &qpctx
, 16,
822 qpc_eec_data
.primary_address_path
.sched_queue
,
823 ( 0x83 /* default policy */ |
824 ( ( ibdev
->port
- 1 ) << 6 ) ) );
825 if ( ( rc
= hermon_cmd_init2rtr_qp ( hermon
, qp
->qpn
,
827 DBGC ( hermon
, "Hermon %p INIT2RTR_QP failed: %s\n",
828 hermon
, strerror ( rc
) );
829 goto err_init2rtr_qp
;
831 memset ( &qpctx
, 0, sizeof ( qpctx
) );
832 if ( ( rc
= hermon_cmd_rtr2rts_qp ( hermon
, qp
->qpn
, &qpctx
) ) != 0 ){
833 DBGC ( hermon
, "Hermon %p RTR2RTS_QP failed: %s\n",
834 hermon
, strerror ( rc
) );
838 DBGC ( hermon
, "Hermon %p QPN %#lx send ring at [%p,%p)\n",
839 hermon
, qp
->qpn
, hermon_qp
->send
.wqe
,
840 ( ((void *)hermon_qp
->send
.wqe
) + hermon_qp
->send
.wqe_size
) );
841 DBGC ( hermon
, "Hermon %p QPN %#lx receive ring at [%p,%p)\n",
842 hermon
, qp
->qpn
, hermon_qp
->recv
.wqe
,
843 ( ((void *)hermon_qp
->recv
.wqe
) + hermon_qp
->recv
.wqe_size
) );
844 ib_qp_set_drvdata ( qp
, hermon_qp
);
849 hermon_cmd_2rst_qp ( hermon
, qp
->qpn
);
851 hermon_free_mtt ( hermon
, &hermon_qp
->mtt
);
853 free_dma ( hermon_qp
->wqe
, hermon_qp
->wqe_size
);
857 hermon_bitmask_free ( hermon
->qp_inuse
, qpn_offset
, 1 );
865 * @v ibdev Infiniband device
868 static void hermon_destroy_qp ( struct ib_device
*ibdev
,
869 struct ib_queue_pair
*qp
) {
870 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
871 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
875 /* Take ownership back from hardware */
876 if ( ( rc
= hermon_cmd_2rst_qp ( hermon
, qp
->qpn
) ) != 0 ) {
877 DBGC ( hermon
, "Hermon %p FATAL 2RST_QP failed on QPN %#lx: "
878 "%s\n", hermon
, qp
->qpn
, strerror ( rc
) );
879 /* Leak memory and return; at least we avoid corruption */
883 /* Free MTT entries */
884 hermon_free_mtt ( hermon
, &hermon_qp
->mtt
);
887 free_dma ( hermon_qp
->wqe
, hermon_qp
->wqe_size
);
890 /* Mark queue number as free */
891 qpn_offset
= ( qp
->qpn
- HERMON_QPN_BASE
-
892 hermon
->cap
.reserved_qps
);
893 hermon_bitmask_free ( hermon
->qp_inuse
, qpn_offset
, 1 );
895 ib_qp_set_drvdata ( qp
, NULL
);
898 /***************************************************************************
900 * Work request operations
902 ***************************************************************************
905 /** GID used for GID-less send work queue entries */
906 static const struct ib_gid hermon_no_gid
= {
907 { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
911 * Post send work queue entry
913 * @v ibdev Infiniband device
915 * @v av Address vector
916 * @v iobuf I/O buffer
917 * @ret rc Return status code
919 static int hermon_post_send ( struct ib_device
*ibdev
,
920 struct ib_queue_pair
*qp
,
921 struct ib_address_vector
*av
,
922 struct io_buffer
*iobuf
) {
923 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
924 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
925 struct ib_work_queue
*wq
= &qp
->send
;
926 struct hermon_send_work_queue
*hermon_send_wq
= &hermon_qp
->send
;
927 struct hermonprm_ud_send_wqe
*wqe
;
928 const struct ib_gid
*gid
;
929 union hermonprm_doorbell_register db_reg
;
930 unsigned int wqe_idx_mask
;
932 /* Allocate work queue entry */
933 wqe_idx_mask
= ( wq
->num_wqes
- 1 );
934 if ( wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] ) {
935 DBGC ( hermon
, "Hermon %p send queue full", hermon
);
938 wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] = iobuf
;
939 wqe
= &hermon_send_wq
->wqe
[ wq
->next_idx
&
940 ( hermon_send_wq
->num_wqes
- 1 ) ].ud
;
942 /* Construct work queue entry */
943 memset ( ( ( ( void * ) wqe
) + 4 /* avoid ctrl.owner */ ), 0,
944 ( sizeof ( *wqe
) - 4 ) );
945 MLX_FILL_1 ( &wqe
->ctrl
, 1, ds
, ( sizeof ( *wqe
) / 16 ) );
946 MLX_FILL_1 ( &wqe
->ctrl
, 2, c
, 0x03 /* generate completion */ );
947 MLX_FILL_2 ( &wqe
->ud
, 0,
948 ud_address_vector
.pd
, HERMON_GLOBAL_PD
,
949 ud_address_vector
.port_number
, ibdev
->port
);
950 MLX_FILL_2 ( &wqe
->ud
, 1,
951 ud_address_vector
.rlid
, av
->dlid
,
952 ud_address_vector
.g
, av
->gid_present
);
953 MLX_FILL_1 ( &wqe
->ud
, 2,
954 ud_address_vector
.max_stat_rate
,
955 ( ( ( av
->rate
< 2 ) || ( av
->rate
> 10 ) ) ?
956 8 : ( av
->rate
+ 5 ) ) );
957 MLX_FILL_1 ( &wqe
->ud
, 3, ud_address_vector
.sl
, av
->sl
);
958 gid
= ( av
->gid_present
? &av
->gid
: &hermon_no_gid
);
959 memcpy ( &wqe
->ud
.u
.dwords
[4], gid
, sizeof ( *gid
) );
960 MLX_FILL_1 ( &wqe
->ud
, 8, destination_qp
, av
->dest_qp
);
961 MLX_FILL_1 ( &wqe
->ud
, 9, q_key
, av
->qkey
);
962 MLX_FILL_1 ( &wqe
->data
[0], 0, byte_count
, iob_len ( iobuf
) );
963 MLX_FILL_1 ( &wqe
->data
[0], 1, l_key
, hermon
->reserved_lkey
);
964 MLX_FILL_1 ( &wqe
->data
[0], 3,
965 local_address_l
, virt_to_bus ( iobuf
->data
) );
967 MLX_FILL_2 ( &wqe
->ctrl
, 0,
968 opcode
, HERMON_OPCODE_SEND
,
970 ( ( wq
->next_idx
& hermon_send_wq
->num_wqes
) ? 1 : 0 ) );
971 DBGCP ( hermon
, "Hermon %p posting send WQE:\n", hermon
);
972 DBGCP_HD ( hermon
, wqe
, sizeof ( *wqe
) );
975 /* Ring doorbell register */
976 MLX_FILL_1 ( &db_reg
.send
, 0, qn
, qp
->qpn
);
977 DBGCP ( hermon
, "Ringing doorbell %08lx with %08lx\n",
978 virt_to_phys ( hermon
->uar
+ HERMON_DB_POST_SND_OFFSET
),
980 writel ( db_reg
.dword
[0], ( hermon
->uar
+ HERMON_DB_POST_SND_OFFSET
));
982 /* Update work queue's index */
989 * Post receive work queue entry
991 * @v ibdev Infiniband device
993 * @v iobuf I/O buffer
994 * @ret rc Return status code
996 static int hermon_post_recv ( struct ib_device
*ibdev
,
997 struct ib_queue_pair
*qp
,
998 struct io_buffer
*iobuf
) {
999 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1000 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1001 struct ib_work_queue
*wq
= &qp
->recv
;
1002 struct hermon_recv_work_queue
*hermon_recv_wq
= &hermon_qp
->recv
;
1003 struct hermonprm_recv_wqe
*wqe
;
1004 unsigned int wqe_idx_mask
;
1006 /* Allocate work queue entry */
1007 wqe_idx_mask
= ( wq
->num_wqes
- 1 );
1008 if ( wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] ) {
1009 DBGC ( hermon
, "Hermon %p receive queue full", hermon
);
1012 wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] = iobuf
;
1013 wqe
= &hermon_recv_wq
->wqe
[wq
->next_idx
& wqe_idx_mask
].recv
;
1015 /* Construct work queue entry */
1016 MLX_FILL_1 ( &wqe
->data
[0], 0, byte_count
, iob_tailroom ( iobuf
) );
1017 MLX_FILL_1 ( &wqe
->data
[0], 1, l_key
, hermon
->reserved_lkey
);
1018 MLX_FILL_1 ( &wqe
->data
[0], 3,
1019 local_address_l
, virt_to_bus ( iobuf
->data
) );
1021 /* Update work queue's index */
1024 /* Update doorbell record */
1026 MLX_FILL_1 ( &hermon_recv_wq
->doorbell
, 0, receive_wqe_counter
,
1027 ( wq
->next_idx
& 0xffff ) );
1035 * @v ibdev Infiniband device
1036 * @v cq Completion queue
1037 * @v cqe Hardware completion queue entry
1038 * @v complete_send Send completion handler
1039 * @v complete_recv Receive completion handler
1040 * @ret rc Return status code
1042 static int hermon_complete ( struct ib_device
*ibdev
,
1043 struct ib_completion_queue
*cq
,
1044 union hermonprm_completion_entry
*cqe
,
1045 ib_completer_t complete_send
,
1046 ib_completer_t complete_recv
) {
1047 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1048 struct ib_completion completion
;
1049 struct ib_work_queue
*wq
;
1050 struct ib_queue_pair
*qp
;
1051 struct hermon_queue_pair
*hermon_qp
;
1052 struct io_buffer
*iobuf
;
1053 ib_completer_t complete
;
1054 unsigned int opcode
;
1057 unsigned int wqe_idx
;
1060 /* Parse completion */
1061 memset ( &completion
, 0, sizeof ( completion
) );
1062 qpn
= MLX_GET ( &cqe
->normal
, qpn
);
1063 is_send
= MLX_GET ( &cqe
->normal
, s_r
);
1064 opcode
= MLX_GET ( &cqe
->normal
, opcode
);
1065 if ( opcode
>= HERMON_OPCODE_RECV_ERROR
) {
1066 /* "s" field is not valid for error opcodes */
1067 is_send
= ( opcode
== HERMON_OPCODE_SEND_ERROR
);
1068 completion
.syndrome
= MLX_GET ( &cqe
->error
, syndrome
);
1069 DBGC ( hermon
, "Hermon %p CQN %lx syndrome %x vendor %lx\n",
1070 hermon
, cq
->cqn
, completion
.syndrome
,
1071 MLX_GET ( &cqe
->error
, vendor_error_syndrome
) );
1073 /* Don't return immediately; propagate error to completer */
1076 /* Identify work queue */
1077 wq
= ib_find_wq ( cq
, qpn
, is_send
);
1079 DBGC ( hermon
, "Hermon %p CQN %lx unknown %s QPN %lx\n",
1080 hermon
, cq
->cqn
, ( is_send
? "send" : "recv" ), qpn
);
1084 hermon_qp
= ib_qp_get_drvdata ( qp
);
1086 /* Identify I/O buffer */
1087 wqe_idx
= ( MLX_GET ( &cqe
->normal
, wqe_counter
) &
1088 ( wq
->num_wqes
- 1 ) );
1089 iobuf
= wq
->iobufs
[wqe_idx
];
1091 DBGC ( hermon
, "Hermon %p CQN %lx QPN %lx empty WQE %x\n",
1092 hermon
, cq
->cqn
, qpn
, wqe_idx
);
1095 wq
->iobufs
[wqe_idx
] = NULL
;
1097 /* Fill in length for received packets */
1099 completion
.len
= MLX_GET ( &cqe
->normal
, byte_cnt
);
1100 if ( completion
.len
> iob_tailroom ( iobuf
) ) {
1101 DBGC ( hermon
, "Hermon %p CQN %lx QPN %lx IDX %x "
1102 "overlength received packet length %zd\n",
1103 hermon
, cq
->cqn
, qpn
, wqe_idx
, completion
.len
);
1108 /* Pass off to caller's completion handler */
1109 complete
= ( is_send
? complete_send
: complete_recv
);
1110 complete ( ibdev
, qp
, &completion
, iobuf
);
1116 * Poll completion queue
1118 * @v ibdev Infiniband device
1119 * @v cq Completion queue
1120 * @v complete_send Send completion handler
1121 * @v complete_recv Receive completion handler
1123 static void hermon_poll_cq ( struct ib_device
*ibdev
,
1124 struct ib_completion_queue
*cq
,
1125 ib_completer_t complete_send
,
1126 ib_completer_t complete_recv
) {
1127 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1128 struct hermon_completion_queue
*hermon_cq
= ib_cq_get_drvdata ( cq
);
1129 union hermonprm_completion_entry
*cqe
;
1130 unsigned int cqe_idx_mask
;
1134 /* Look for completion entry */
1135 cqe_idx_mask
= ( cq
->num_cqes
- 1 );
1136 cqe
= &hermon_cq
->cqe
[cq
->next_idx
& cqe_idx_mask
];
1137 if ( MLX_GET ( &cqe
->normal
, owner
) ^
1138 ( ( cq
->next_idx
& cq
->num_cqes
) ? 1 : 0 ) ) {
1139 /* Entry still owned by hardware; end of poll */
1142 DBGCP ( hermon
, "Hermon %p completion:\n", hermon
);
1143 DBGCP_HD ( hermon
, cqe
, sizeof ( *cqe
) );
1145 /* Handle completion */
1146 if ( ( rc
= hermon_complete ( ibdev
, cq
, cqe
, complete_send
,
1147 complete_recv
) ) != 0 ) {
1148 DBGC ( hermon
, "Hermon %p failed to complete: %s\n",
1149 hermon
, strerror ( rc
) );
1150 DBGC_HD ( hermon
, cqe
, sizeof ( *cqe
) );
1153 /* Update completion queue's index */
1156 /* Update doorbell record */
1157 MLX_FILL_1 ( &hermon_cq
->doorbell
, 0, update_ci
,
1158 ( cq
->next_idx
& 0xffffffUL
) );
1162 /***************************************************************************
1164 * Infiniband link-layer operations
1166 ***************************************************************************
1170 * Initialise Infiniband link
1172 * @v ibdev Infiniband device
1173 * @ret rc Return status code
1175 static int hermon_open ( struct ib_device
*ibdev
) {
1176 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1177 struct hermonprm_init_port init_port
;
1180 memset ( &init_port
, 0, sizeof ( init_port
) );
1181 MLX_FILL_2 ( &init_port
, 0,
1184 MLX_FILL_2 ( &init_port
, 1,
1185 mtu
, HERMON_MTU_2048
,
1187 MLX_FILL_1 ( &init_port
, 2, max_pkey
, 64 );
1188 if ( ( rc
= hermon_cmd_init_port ( hermon
, ibdev
->port
,
1189 &init_port
) ) != 0 ) {
1190 DBGC ( hermon
, "Hermon %p could not intialise port: %s\n",
1191 hermon
, strerror ( rc
) );
1199 * Close Infiniband link
1201 * @v ibdev Infiniband device
1203 static void hermon_close ( struct ib_device
*ibdev
) {
1204 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1207 if ( ( rc
= hermon_cmd_close_port ( hermon
, ibdev
->port
) ) != 0 ) {
1208 DBGC ( hermon
, "Hermon %p could not close port: %s\n",
1209 hermon
, strerror ( rc
) );
1210 /* Nothing we can do about this */
1214 /***************************************************************************
1216 * Multicast group operations
1218 ***************************************************************************
1222 * Attach to multicast group
1224 * @v ibdev Infiniband device
1226 * @v gid Multicast GID
1227 * @ret rc Return status code
1229 static int hermon_mcast_attach ( struct ib_device
*ibdev
,
1230 struct ib_queue_pair
*qp
,
1231 struct ib_gid
*gid
) {
1232 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1233 struct hermonprm_mgm_hash hash
;
1234 struct hermonprm_mcg_entry mcg
;
1238 /* Generate hash table index */
1239 if ( ( rc
= hermon_cmd_mgid_hash ( hermon
, gid
, &hash
) ) != 0 ) {
1240 DBGC ( hermon
, "Hermon %p could not hash GID: %s\n",
1241 hermon
, strerror ( rc
) );
1244 index
= MLX_GET ( &hash
, hash
);
1246 /* Check for existing hash table entry */
1247 if ( ( rc
= hermon_cmd_read_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
1248 DBGC ( hermon
, "Hermon %p could not read MCG %#x: %s\n",
1249 hermon
, index
, strerror ( rc
) );
1252 if ( MLX_GET ( &mcg
, hdr
.members_count
) != 0 ) {
1253 /* FIXME: this implementation allows only a single QP
1254 * per multicast group, and doesn't handle hash
1255 * collisions. Sufficient for IPoIB but may need to
1256 * be extended in future.
1258 DBGC ( hermon
, "Hermon %p MGID index %#x already in use\n",
1263 /* Update hash table entry */
1264 MLX_FILL_1 ( &mcg
, 1, hdr
.members_count
, 1 );
1265 MLX_FILL_1 ( &mcg
, 8, qp
[0].qpn
, qp
->qpn
);
1266 memcpy ( &mcg
.u
.dwords
[4], gid
, sizeof ( *gid
) );
1267 if ( ( rc
= hermon_cmd_write_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
1268 DBGC ( hermon
, "Hermon %p could not write MCG %#x: %s\n",
1269 hermon
, index
, strerror ( rc
) );
1277 * Detach from multicast group
1279 * @v ibdev Infiniband device
1281 * @v gid Multicast GID
1283 static void hermon_mcast_detach ( struct ib_device
*ibdev
,
1284 struct ib_queue_pair
*qp __unused
,
1285 struct ib_gid
*gid
) {
1286 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1287 struct hermonprm_mgm_hash hash
;
1288 struct hermonprm_mcg_entry mcg
;
1292 /* Generate hash table index */
1293 if ( ( rc
= hermon_cmd_mgid_hash ( hermon
, gid
, &hash
) ) != 0 ) {
1294 DBGC ( hermon
, "Hermon %p could not hash GID: %s\n",
1295 hermon
, strerror ( rc
) );
1298 index
= MLX_GET ( &hash
, hash
);
1300 /* Clear hash table entry */
1301 memset ( &mcg
, 0, sizeof ( mcg
) );
1302 if ( ( rc
= hermon_cmd_write_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
1303 DBGC ( hermon
, "Hermon %p could not write MCG %#x: %s\n",
1304 hermon
, index
, strerror ( rc
) );
1309 /***************************************************************************
1313 ***************************************************************************
1317 * Issue management datagram
1319 * @v ibdev Infiniband device
1320 * @v mad Management datagram
1321 * @v len Length of management datagram
1322 * @ret rc Return status code
1324 static int hermon_mad ( struct ib_device
*ibdev
, struct ib_mad_hdr
*mad
,
1326 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1327 union hermonprm_mad mad_ifc
;
1330 /* Copy in request packet */
1331 memset ( &mad_ifc
, 0, sizeof ( mad_ifc
) );
1332 assert ( len
<= sizeof ( mad_ifc
.mad
) );
1333 memcpy ( &mad_ifc
.mad
, mad
, len
);
1336 if ( ( rc
= hermon_cmd_mad_ifc ( hermon
, ibdev
->port
,
1337 &mad_ifc
) ) != 0 ) {
1338 DBGC ( hermon
, "Hermon %p could not issue MAD IFC: %s\n",
1339 hermon
, strerror ( rc
) );
1343 /* Copy out reply packet */
1344 memcpy ( mad
, &mad_ifc
.mad
, len
);
1346 if ( mad
->status
!= 0 ) {
1347 DBGC ( hermon
, "Hermon %p MAD IFC status %04x\n",
1348 hermon
, ntohs ( mad
->status
) );
1354 /** Hermon Infiniband operations */
1355 static struct ib_device_operations hermon_ib_operations
= {
1356 .create_cq
= hermon_create_cq
,
1357 .destroy_cq
= hermon_destroy_cq
,
1358 .create_qp
= hermon_create_qp
,
1359 .destroy_qp
= hermon_destroy_qp
,
1360 .post_send
= hermon_post_send
,
1361 .post_recv
= hermon_post_recv
,
1362 .poll_cq
= hermon_poll_cq
,
1363 .open
= hermon_open
,
1364 .close
= hermon_close
,
1365 .mcast_attach
= hermon_mcast_attach
,
1366 .mcast_detach
= hermon_mcast_detach
,
1370 /***************************************************************************
1374 ***************************************************************************
1378 * Start firmware running
1380 * @v hermon Hermon device
1381 * @ret rc Return status code
1383 static int hermon_start_firmware ( struct hermon
*hermon
) {
1384 struct hermonprm_query_fw fw
;
1385 struct hermonprm_virtual_physical_mapping map_fa
;
1386 unsigned int fw_pages
;
1387 unsigned int log2_fw_pages
;
1392 /* Get firmware parameters */
1393 if ( ( rc
= hermon_cmd_query_fw ( hermon
, &fw
) ) != 0 ) {
1394 DBGC ( hermon
, "Hermon %p could not query firmware: %s\n",
1395 hermon
, strerror ( rc
) );
1398 DBGC ( hermon
, "Hermon %p firmware version %ld.%ld.%ld\n", hermon
,
1399 MLX_GET ( &fw
, fw_rev_major
), MLX_GET ( &fw
, fw_rev_minor
),
1400 MLX_GET ( &fw
, fw_rev_subminor
) );
1401 fw_pages
= MLX_GET ( &fw
, fw_pages
);
1402 log2_fw_pages
= fls ( fw_pages
- 1 );
1403 fw_pages
= ( 1 << log2_fw_pages
);
1404 DBGC ( hermon
, "Hermon %p requires %d kB for firmware\n",
1405 hermon
, ( fw_pages
* 4 ) );
1407 /* Allocate firmware pages and map firmware area */
1408 fw_size
= ( fw_pages
* HERMON_PAGE_SIZE
);
1409 hermon
->firmware_area
= umalloc ( fw_size
);
1410 if ( ! hermon
->firmware_area
) {
1414 fw_base
= ( user_to_phys ( hermon
->firmware_area
, fw_size
) &
1416 DBGC ( hermon
, "Hermon %p firmware area at physical [%lx,%lx)\n",
1417 hermon
, fw_base
, ( fw_base
+ fw_size
) );
1418 memset ( &map_fa
, 0, sizeof ( map_fa
) );
1419 MLX_FILL_2 ( &map_fa
, 3,
1420 log2size
, log2_fw_pages
,
1421 pa_l
, ( fw_base
>> 12 ) );
1422 if ( ( rc
= hermon_cmd_map_fa ( hermon
, &map_fa
) ) != 0 ) {
1423 DBGC ( hermon
, "Hermon %p could not map firmware: %s\n",
1424 hermon
, strerror ( rc
) );
1428 /* Start firmware */
1429 if ( ( rc
= hermon_cmd_run_fw ( hermon
) ) != 0 ) {
1430 DBGC ( hermon
, "Hermon %p could not run firmware: %s\n",
1431 hermon
, strerror ( rc
) );
1435 DBGC ( hermon
, "Hermon %p firmware started\n", hermon
);
1439 hermon_cmd_unmap_fa ( hermon
);
1441 ufree ( hermon
->firmware_area
);
1442 hermon
->firmware_area
= UNULL
;
1449 * Stop firmware running
1451 * @v hermon Hermon device
1453 static void hermon_stop_firmware ( struct hermon
*hermon
) {
1456 if ( ( rc
= hermon_cmd_unmap_fa ( hermon
) ) != 0 ) {
1457 DBGC ( hermon
, "Hermon %p FATAL could not stop firmware: %s\n",
1458 hermon
, strerror ( rc
) );
1459 /* Leak memory and return; at least we avoid corruption */
1462 ufree ( hermon
->firmware_area
);
1463 hermon
->firmware_area
= UNULL
;
1466 /***************************************************************************
1468 * Infinihost Context Memory management
1470 ***************************************************************************
1476 * @v hermon Hermon device
1477 * @ret rc Return status code
1479 static int hermon_get_cap ( struct hermon
*hermon
) {
1480 struct hermonprm_query_dev_cap dev_cap
;
1483 if ( ( rc
= hermon_cmd_query_dev_cap ( hermon
, &dev_cap
) ) != 0 ) {
1484 DBGC ( hermon
, "Hermon %p could not get device limits: %s\n",
1485 hermon
, strerror ( rc
) );
1489 hermon
->cap
.cmpt_entry_size
= MLX_GET ( &dev_cap
, c_mpt_entry_sz
);
1490 hermon
->cap
.reserved_qps
=
1491 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_qps
) );
1492 hermon
->cap
.qpc_entry_size
= MLX_GET ( &dev_cap
, qpc_entry_sz
);
1493 hermon
->cap
.altc_entry_size
= MLX_GET ( &dev_cap
, altc_entry_sz
);
1494 hermon
->cap
.auxc_entry_size
= MLX_GET ( &dev_cap
, aux_entry_sz
);
1495 hermon
->cap
.reserved_srqs
=
1496 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_srqs
) );
1497 hermon
->cap
.srqc_entry_size
= MLX_GET ( &dev_cap
, srq_entry_sz
);
1498 hermon
->cap
.reserved_cqs
=
1499 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_cqs
) );
1500 hermon
->cap
.cqc_entry_size
= MLX_GET ( &dev_cap
, cqc_entry_sz
);
1501 hermon
->cap
.reserved_eqs
= MLX_GET ( &dev_cap
, num_rsvd_eqs
);
1502 hermon
->cap
.eqc_entry_size
= MLX_GET ( &dev_cap
, eqc_entry_sz
);
1503 hermon
->cap
.reserved_mtts
=
1504 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_mtts
) );
1505 hermon
->cap
.mtt_entry_size
= MLX_GET ( &dev_cap
, mtt_entry_sz
);
1506 hermon
->cap
.reserved_mrws
=
1507 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_mrws
) );
1508 hermon
->cap
.dmpt_entry_size
= MLX_GET ( &dev_cap
, d_mpt_entry_sz
);
1509 hermon
->cap
.reserved_uars
= MLX_GET ( &dev_cap
, num_rsvd_uars
);
1517 * @v log_num_entries Log2 of the number of entries
1518 * @v entry_size Entry size
1519 * @ret usage Usage size in ICM
1521 static size_t icm_usage ( unsigned int log_num_entries
, size_t entry_size
) {
1524 usage
= ( ( 1 << log_num_entries
) * entry_size
);
1525 usage
= ( ( usage
+ HERMON_PAGE_SIZE
- 1 ) &
1526 ~( HERMON_PAGE_SIZE
- 1 ) );
1533 * @v hermon Hermon device
1534 * @v init_hca INIT_HCA structure to fill in
1535 * @ret rc Return status code
1537 static int hermon_alloc_icm ( struct hermon
*hermon
,
1538 struct hermonprm_init_hca
*init_hca
) {
1539 struct hermonprm_scalar_parameter icm_size
;
1540 struct hermonprm_scalar_parameter icm_aux_size
;
1541 struct hermonprm_virtual_physical_mapping map_icm_aux
;
1542 struct hermonprm_virtual_physical_mapping map_icm
;
1543 uint64_t icm_offset
= 0;
1544 unsigned int log_num_qps
, log_num_srqs
, log_num_cqs
, log_num_eqs
;
1545 unsigned int log_num_mtts
, log_num_mpts
;
1546 size_t cmpt_max_len
;
1547 size_t qp_cmpt_len
, srq_cmpt_len
, cq_cmpt_len
, eq_cmpt_len
;
1548 size_t icm_len
, icm_aux_len
;
1549 physaddr_t icm_phys
;
1554 * Start by carving up the ICM virtual address space
1558 /* Calculate number of each object type within ICM */
1559 log_num_qps
= fls ( hermon
->cap
.reserved_qps
+ HERMON_MAX_QPS
- 1 );
1560 log_num_srqs
= fls ( hermon
->cap
.reserved_srqs
- 1 );
1561 log_num_cqs
= fls ( hermon
->cap
.reserved_cqs
+ HERMON_MAX_CQS
- 1 );
1562 log_num_eqs
= fls ( hermon
->cap
.reserved_eqs
+ HERMON_MAX_EQS
- 1 );
1563 log_num_mtts
= fls ( hermon
->cap
.reserved_mtts
+ HERMON_MAX_MTTS
- 1 );
1565 /* ICM starts with the cMPT tables, which are sparse */
1566 cmpt_max_len
= ( HERMON_CMPT_MAX_ENTRIES
*
1567 ( ( uint64_t ) hermon
->cap
.cmpt_entry_size
) );
1568 qp_cmpt_len
= icm_usage ( log_num_qps
, hermon
->cap
.cmpt_entry_size
);
1569 hermon
->icm_map
[HERMON_ICM_QP_CMPT
].offset
= icm_offset
;
1570 hermon
->icm_map
[HERMON_ICM_QP_CMPT
].len
= qp_cmpt_len
;
1571 icm_offset
+= cmpt_max_len
;
1572 srq_cmpt_len
= icm_usage ( log_num_srqs
, hermon
->cap
.cmpt_entry_size
);
1573 hermon
->icm_map
[HERMON_ICM_SRQ_CMPT
].offset
= icm_offset
;
1574 hermon
->icm_map
[HERMON_ICM_SRQ_CMPT
].len
= srq_cmpt_len
;
1575 icm_offset
+= cmpt_max_len
;
1576 cq_cmpt_len
= icm_usage ( log_num_cqs
, hermon
->cap
.cmpt_entry_size
);
1577 hermon
->icm_map
[HERMON_ICM_CQ_CMPT
].offset
= icm_offset
;
1578 hermon
->icm_map
[HERMON_ICM_CQ_CMPT
].len
= cq_cmpt_len
;
1579 icm_offset
+= cmpt_max_len
;
1580 eq_cmpt_len
= icm_usage ( log_num_eqs
, hermon
->cap
.cmpt_entry_size
);
1581 hermon
->icm_map
[HERMON_ICM_EQ_CMPT
].offset
= icm_offset
;
1582 hermon
->icm_map
[HERMON_ICM_EQ_CMPT
].len
= eq_cmpt_len
;
1583 icm_offset
+= cmpt_max_len
;
1585 hermon
->icm_map
[HERMON_ICM_OTHER
].offset
= icm_offset
;
1587 /* Queue pair contexts */
1588 MLX_FILL_1 ( init_hca
, 12,
1589 qpc_eec_cqc_eqc_rdb_parameters
.qpc_base_addr_h
,
1590 ( icm_offset
>> 32 ) );
1591 MLX_FILL_2 ( init_hca
, 13,
1592 qpc_eec_cqc_eqc_rdb_parameters
.qpc_base_addr_l
,
1593 ( icm_offset
>> 5 ),
1594 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_qp
,
1596 DBGC ( hermon
, "Hermon %p ICM QPC base = %llx\n", hermon
, icm_offset
);
1597 icm_offset
+= icm_usage ( log_num_qps
, hermon
->cap
.qpc_entry_size
);
1599 /* Extended alternate path contexts */
1600 MLX_FILL_1 ( init_hca
, 24,
1601 qpc_eec_cqc_eqc_rdb_parameters
.altc_base_addr_h
,
1602 ( icm_offset
>> 32 ) );
1603 MLX_FILL_1 ( init_hca
, 25,
1604 qpc_eec_cqc_eqc_rdb_parameters
.altc_base_addr_l
,
1606 DBGC ( hermon
, "Hermon %p ICM ALTC base = %llx\n", hermon
, icm_offset
);
1607 icm_offset
+= icm_usage ( log_num_qps
,
1608 hermon
->cap
.altc_entry_size
);
1610 /* Extended auxiliary contexts */
1611 MLX_FILL_1 ( init_hca
, 28,
1612 qpc_eec_cqc_eqc_rdb_parameters
.auxc_base_addr_h
,
1613 ( icm_offset
>> 32 ) );
1614 MLX_FILL_1 ( init_hca
, 29,
1615 qpc_eec_cqc_eqc_rdb_parameters
.auxc_base_addr_l
,
1617 DBGC ( hermon
, "Hermon %p ICM AUXC base = %llx\n", hermon
, icm_offset
);
1618 icm_offset
+= icm_usage ( log_num_qps
,
1619 hermon
->cap
.auxc_entry_size
);
1621 /* Shared receive queue contexts */
1622 MLX_FILL_1 ( init_hca
, 18,
1623 qpc_eec_cqc_eqc_rdb_parameters
.srqc_base_addr_h
,
1624 ( icm_offset
>> 32 ) );
1625 MLX_FILL_2 ( init_hca
, 19,
1626 qpc_eec_cqc_eqc_rdb_parameters
.srqc_base_addr_l
,
1627 ( icm_offset
>> 5 ),
1628 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_srq
,
1630 DBGC ( hermon
, "Hermon %p ICM SRQC base = %llx\n", hermon
, icm_offset
);
1631 icm_offset
+= icm_usage ( log_num_srqs
,
1632 hermon
->cap
.srqc_entry_size
);
1634 /* Completion queue contexts */
1635 MLX_FILL_1 ( init_hca
, 20,
1636 qpc_eec_cqc_eqc_rdb_parameters
.cqc_base_addr_h
,
1637 ( icm_offset
>> 32 ) );
1638 MLX_FILL_2 ( init_hca
, 21,
1639 qpc_eec_cqc_eqc_rdb_parameters
.cqc_base_addr_l
,
1640 ( icm_offset
>> 5 ),
1641 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_cq
,
1643 DBGC ( hermon
, "Hermon %p ICM CQC base = %llx\n", hermon
, icm_offset
);
1644 icm_offset
+= icm_usage ( log_num_cqs
, hermon
->cap
.cqc_entry_size
);
1646 /* Event queue contexts */
1647 MLX_FILL_1 ( init_hca
, 32,
1648 qpc_eec_cqc_eqc_rdb_parameters
.eqc_base_addr_h
,
1649 ( icm_offset
>> 32 ) );
1650 MLX_FILL_2 ( init_hca
, 33,
1651 qpc_eec_cqc_eqc_rdb_parameters
.eqc_base_addr_l
,
1652 ( icm_offset
>> 5 ),
1653 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_eq
,
1655 DBGC ( hermon
, "Hermon %p ICM EQC base = %llx\n", hermon
, icm_offset
);
1656 icm_offset
+= icm_usage ( log_num_eqs
, hermon
->cap
.eqc_entry_size
);
1658 /* Memory translation table */
1659 MLX_FILL_1 ( init_hca
, 64,
1660 tpt_parameters
.mtt_base_addr_h
, ( icm_offset
>> 32 ) );
1661 MLX_FILL_1 ( init_hca
, 65,
1662 tpt_parameters
.mtt_base_addr_l
, icm_offset
);
1663 DBGC ( hermon
, "Hermon %p ICM MTT base = %llx\n", hermon
, icm_offset
);
1664 icm_offset
+= icm_usage ( log_num_mtts
,
1665 hermon
->cap
.mtt_entry_size
);
1667 /* Memory protection table */
1668 log_num_mpts
= fls ( hermon
->cap
.reserved_mrws
+ 1 - 1 );
1669 MLX_FILL_1 ( init_hca
, 60,
1670 tpt_parameters
.dmpt_base_adr_h
, ( icm_offset
>> 32 ) );
1671 MLX_FILL_1 ( init_hca
, 61,
1672 tpt_parameters
.dmpt_base_adr_l
, icm_offset
);
1673 MLX_FILL_1 ( init_hca
, 62,
1674 tpt_parameters
.log_dmpt_sz
, log_num_mpts
);
1675 DBGC ( hermon
, "Hermon %p ICM DMPT base = %llx\n", hermon
, icm_offset
);
1676 icm_offset
+= icm_usage ( log_num_mpts
,
1677 hermon
->cap
.dmpt_entry_size
);
1679 /* Multicast table */
1680 MLX_FILL_1 ( init_hca
, 48,
1681 multicast_parameters
.mc_base_addr_h
,
1682 ( icm_offset
>> 32 ) );
1683 MLX_FILL_1 ( init_hca
, 49,
1684 multicast_parameters
.mc_base_addr_l
, icm_offset
);
1685 MLX_FILL_1 ( init_hca
, 52,
1686 multicast_parameters
.log_mc_table_entry_sz
,
1687 fls ( sizeof ( struct hermonprm_mcg_entry
) - 1 ) );
1688 MLX_FILL_1 ( init_hca
, 53,
1689 multicast_parameters
.log_mc_table_hash_sz
, 3 );
1690 MLX_FILL_1 ( init_hca
, 54,
1691 multicast_parameters
.log_mc_table_sz
, 3 );
1692 DBGC ( hermon
, "Hermon %p ICM MC base = %llx\n", hermon
, icm_offset
);
1693 icm_offset
+= ( ( 8 * sizeof ( struct hermonprm_mcg_entry
) +
1694 HERMON_PAGE_SIZE
- 1 ) & ~( HERMON_PAGE_SIZE
- 1 ) );
1696 hermon
->icm_map
[HERMON_ICM_OTHER
].len
=
1697 ( icm_offset
- hermon
->icm_map
[HERMON_ICM_OTHER
].offset
);
1700 * Allocate and map physical memory for (portions of) ICM
1703 * ICM AUX area (aligned to its own size)
1708 /* Calculate physical memory required for ICM */
1710 for ( i
= 0 ; i
< HERMON_ICM_NUM_REGIONS
; i
++ ) {
1711 icm_len
+= hermon
->icm_map
[i
].len
;
1714 /* Get ICM auxiliary area size */
1715 memset ( &icm_size
, 0, sizeof ( icm_size
) );
1716 MLX_FILL_1 ( &icm_size
, 0, value_hi
, ( icm_offset
>> 32 ) );
1717 MLX_FILL_1 ( &icm_size
, 1, value
, icm_offset
);
1718 if ( ( rc
= hermon_cmd_set_icm_size ( hermon
, &icm_size
,
1719 &icm_aux_size
) ) != 0 ) {
1720 DBGC ( hermon
, "Hermon %p could not set ICM size: %s\n",
1721 hermon
, strerror ( rc
) );
1722 goto err_set_icm_size
;
1724 icm_aux_len
= ( MLX_GET ( &icm_aux_size
, value
) * HERMON_PAGE_SIZE
);
1725 /* Must round up to nearest power of two :( */
1726 icm_aux_len
= ( 1 << fls ( icm_aux_len
- 1 ) );
1728 /* Allocate ICM data and auxiliary area */
1729 DBGC ( hermon
, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
1730 hermon
, ( icm_len
/ 1024 ), ( icm_aux_len
/ 1024 ) );
1731 hermon
->icm
= umalloc ( 2 * icm_aux_len
+ icm_len
);
1732 if ( ! hermon
->icm
) {
1736 icm_phys
= user_to_phys ( hermon
->icm
, 0 );
1738 /* Map ICM auxiliary area */
1739 icm_phys
= ( ( icm_phys
+ icm_aux_len
- 1 ) & ~( icm_aux_len
- 1 ) );
1740 memset ( &map_icm_aux
, 0, sizeof ( map_icm_aux
) );
1741 MLX_FILL_2 ( &map_icm_aux
, 3,
1742 log2size
, fls ( ( icm_aux_len
/ HERMON_PAGE_SIZE
) - 1 ),
1743 pa_l
, ( icm_phys
>> 12 ) );
1744 DBGC ( hermon
, "Hermon %p mapping ICM AUX (2^%d pages) => %08lx\n",
1745 hermon
, fls ( ( icm_aux_len
/ HERMON_PAGE_SIZE
) - 1 ),
1747 if ( ( rc
= hermon_cmd_map_icm_aux ( hermon
, &map_icm_aux
) ) != 0 ) {
1748 DBGC ( hermon
, "Hermon %p could not map AUX ICM: %s\n",
1749 hermon
, strerror ( rc
) );
1750 goto err_map_icm_aux
;
1752 icm_phys
+= icm_aux_len
;
1755 for ( i
= 0 ; i
< HERMON_ICM_NUM_REGIONS
; i
++ ) {
1756 memset ( &map_icm
, 0, sizeof ( map_icm
) );
1757 MLX_FILL_1 ( &map_icm
, 0,
1758 va_h
, ( hermon
->icm_map
[i
].offset
>> 32 ) );
1759 MLX_FILL_1 ( &map_icm
, 1,
1760 va_l
, ( hermon
->icm_map
[i
].offset
>> 12 ) );
1761 MLX_FILL_2 ( &map_icm
, 3,
1763 fls ( ( hermon
->icm_map
[i
].len
/
1764 HERMON_PAGE_SIZE
) - 1 ),
1765 pa_l
, ( icm_phys
>> 12 ) );
1766 DBGC ( hermon
, "Hermon %p mapping ICM %llx+%zx (2^%d pages) "
1767 "=> %08lx\n", hermon
, hermon
->icm_map
[i
].offset
,
1768 hermon
->icm_map
[i
].len
,
1769 fls ( ( hermon
->icm_map
[i
].len
/
1770 HERMON_PAGE_SIZE
) - 1 ), icm_phys
);
1771 if ( ( rc
= hermon_cmd_map_icm ( hermon
, &map_icm
) ) != 0 ) {
1772 DBGC ( hermon
, "Hermon %p could not map ICM: %s\n",
1773 hermon
, strerror ( rc
) );
1776 icm_phys
+= hermon
->icm_map
[i
].len
;
1782 assert ( i
== 0 ); /* We don't handle partial failure at present */
1783 hermon_cmd_unmap_icm_aux ( hermon
);
1785 ufree ( hermon
->icm
);
1786 hermon
->icm
= UNULL
;
1795 * @v hermon Hermon device
1797 static void hermon_free_icm ( struct hermon
*hermon
) {
1798 struct hermonprm_scalar_parameter unmap_icm
;
1801 for ( i
= ( HERMON_ICM_NUM_REGIONS
- 1 ) ; i
>= 0 ; i
-- ) {
1802 memset ( &unmap_icm
, 0, sizeof ( unmap_icm
) );
1803 MLX_FILL_1 ( &unmap_icm
, 0, value_hi
,
1804 ( hermon
->icm_map
[i
].offset
>> 32 ) );
1805 MLX_FILL_1 ( &unmap_icm
, 1, value
,
1806 hermon
->icm_map
[i
].offset
);
1807 hermon_cmd_unmap_icm ( hermon
,
1808 ( 1 << fls ( ( hermon
->icm_map
[i
].len
/
1809 HERMON_PAGE_SIZE
) - 1)),
1812 hermon_cmd_unmap_icm_aux ( hermon
);
1813 ufree ( hermon
->icm
);
1814 hermon
->icm
= UNULL
;
1817 /***************************************************************************
1821 ***************************************************************************
1825 * Set up memory protection table
1827 * @v hermon Hermon device
1828 * @ret rc Return status code
1830 static int hermon_setup_mpt ( struct hermon
*hermon
) {
1831 struct hermonprm_mpt mpt
;
1836 key
= ( hermon
->cap
.reserved_mrws
| HERMON_MKEY_PREFIX
);
1837 hermon
->reserved_lkey
= ( ( key
<< 8 ) | ( key
>> 24 ) );
1839 /* Initialise memory protection table */
1840 memset ( &mpt
, 0, sizeof ( mpt
) );
1841 MLX_FILL_4 ( &mpt
, 0,
1846 MLX_FILL_1 ( &mpt
, 2, mem_key
, key
);
1847 MLX_FILL_1 ( &mpt
, 3, pd
, HERMON_GLOBAL_PD
);
1848 MLX_FILL_1 ( &mpt
, 10, len64
, 1 );
1849 if ( ( rc
= hermon_cmd_sw2hw_mpt ( hermon
,
1850 hermon
->cap
.reserved_mrws
,
1852 DBGC ( hermon
, "Hermon %p could not set up MPT: %s\n",
1853 hermon
, strerror ( rc
) );
1865 * @ret rc Return status code
1867 static int hermon_probe ( struct pci_device
*pci
,
1868 const struct pci_device_id
*id __unused
) {
1869 struct hermon
*hermon
;
1870 struct ib_device
*ibdev
;
1871 struct hermonprm_init_hca init_hca
;
1875 /* Allocate Hermon device */
1876 hermon
= zalloc ( sizeof ( *hermon
) );
1879 goto err_alloc_hermon
;
1881 pci_set_drvdata ( pci
, hermon
);
1883 /* Allocate Infiniband devices */
1884 for ( i
= 0 ; i
< HERMON_NUM_PORTS
; i
++ ) {
1885 ibdev
= alloc_ibdev ( 0 );
1888 goto err_alloc_ibdev
;
1890 hermon
->ibdev
[i
] = ibdev
;
1891 ibdev
->op
= &hermon_ib_operations
;
1892 ibdev
->dev
= &pci
->dev
;
1893 ibdev
->port
= ( HERMON_PORT_BASE
+ i
);
1894 ib_set_drvdata ( ibdev
, hermon
);
1897 /* Fix up PCI device */
1898 adjust_pci_device ( pci
);
1901 hermon
->config
= ioremap ( pci_bar_start ( pci
, HERMON_PCI_CONFIG_BAR
),
1902 HERMON_PCI_CONFIG_BAR_SIZE
);
1903 hermon
->uar
= ioremap ( ( pci_bar_start ( pci
, HERMON_PCI_UAR_BAR
) +
1904 HERMON_UAR_PAGE
* HERMON_PAGE_SIZE
),
1907 /* Allocate space for mailboxes */
1908 hermon
->mailbox_in
= malloc_dma ( HERMON_MBOX_SIZE
,
1909 HERMON_MBOX_ALIGN
);
1910 if ( ! hermon
->mailbox_in
) {
1912 goto err_mailbox_in
;
1914 hermon
->mailbox_out
= malloc_dma ( HERMON_MBOX_SIZE
,
1915 HERMON_MBOX_ALIGN
);
1916 if ( ! hermon
->mailbox_out
) {
1918 goto err_mailbox_out
;
1921 /* Start firmware */
1922 if ( ( rc
= hermon_start_firmware ( hermon
) ) != 0 )
1923 goto err_start_firmware
;
1925 /* Get device limits */
1926 if ( ( rc
= hermon_get_cap ( hermon
) ) != 0 )
1930 memset ( &init_hca
, 0, sizeof ( init_hca
) );
1931 if ( ( rc
= hermon_alloc_icm ( hermon
, &init_hca
) ) != 0 )
1934 /* Initialise HCA */
1935 MLX_FILL_1 ( &init_hca
, 0, version
, 0x02 /* "Must be 0x02" */ );
1936 MLX_FILL_1 ( &init_hca
, 5, udp
, 1 );
1937 MLX_FILL_1 ( &init_hca
, 74, uar_parameters
.log_max_uars
, 8 );
1938 if ( ( rc
= hermon_cmd_init_hca ( hermon
, &init_hca
) ) != 0 ) {
1939 DBGC ( hermon
, "Hermon %p could not initialise HCA: %s\n",
1940 hermon
, strerror ( rc
) );
1944 /* Set up memory protection */
1945 if ( ( rc
= hermon_setup_mpt ( hermon
) ) != 0 )
1948 /* Register Infiniband devices */
1949 for ( i
= 0 ; i
< HERMON_NUM_PORTS
; i
++ ) {
1950 if ( ( rc
= register_ibdev ( hermon
->ibdev
[i
] ) ) != 0 ) {
1951 DBGC ( hermon
, "Hermon %p could not register IB "
1952 "device: %s\n", hermon
, strerror ( rc
) );
1953 goto err_register_ibdev
;
1959 i
= ( HERMON_NUM_PORTS
- 1 );
1961 for ( ; i
>= 0 ; i
-- )
1962 unregister_ibdev ( hermon
->ibdev
[i
] );
1964 hermon_cmd_close_hca ( hermon
);
1966 hermon_free_icm ( hermon
);
1969 hermon_stop_firmware ( hermon
);
1971 free_dma ( hermon
->mailbox_out
, HERMON_MBOX_SIZE
);
1973 free_dma ( hermon
->mailbox_in
, HERMON_MBOX_SIZE
);
1975 i
= ( HERMON_NUM_PORTS
- 1 );
1977 for ( ; i
>= 0 ; i
-- )
1978 free_ibdev ( hermon
->ibdev
[i
] );
1989 static void hermon_remove ( struct pci_device
*pci
) {
1990 struct hermon
*hermon
= pci_get_drvdata ( pci
);
1993 for ( i
= ( HERMON_NUM_PORTS
- 1 ) ; i
>= 0 ; i
-- )
1994 unregister_ibdev ( hermon
->ibdev
[i
] );
1995 hermon_cmd_close_hca ( hermon
);
1996 hermon_free_icm ( hermon
);
1997 hermon_stop_firmware ( hermon
);
1998 hermon_stop_firmware ( hermon
);
1999 free_dma ( hermon
->mailbox_out
, HERMON_MBOX_SIZE
);
2000 free_dma ( hermon
->mailbox_in
, HERMON_MBOX_SIZE
);
2001 for ( i
= ( HERMON_NUM_PORTS
- 1 ) ; i
>= 0 ; i
-- )
2002 free_ibdev ( hermon
->ibdev
[i
] );
2006 static struct pci_device_id hermon_nics
[] = {
2007 PCI_ROM ( 0x15b3, 0x6340, "mt25408", "MT25408 HCA driver" ),
2008 PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver" ),
2011 struct pci_driver hermon_driver __pci_driver
= {
2013 .id_count
= ( sizeof ( hermon_nics
) / sizeof ( hermon_nics
[0] ) ),
2014 .probe
= hermon_probe
,
2015 .remove
= hermon_remove
,