2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
12 * Copyright (c) 2005 IBM Corporation
14 * All rights reserved.
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/hvcall.h>
46 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55 #define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60 #define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61 #define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
64 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
69 #define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70 #define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71 #define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72 #define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73 #define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
75 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
80 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
83 #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84 #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85 #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
87 #define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88 #define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89 #define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
91 static DEFINE_SPINLOCK(hcall_lock
);
93 static u32
get_longbusy_msecs(int longbusy_rc
)
95 switch (longbusy_rc
) {
96 case H_LONG_BUSY_ORDER_1_MSEC
:
98 case H_LONG_BUSY_ORDER_10_MSEC
:
100 case H_LONG_BUSY_ORDER_100_MSEC
:
102 case H_LONG_BUSY_ORDER_1_SEC
:
104 case H_LONG_BUSY_ORDER_10_SEC
:
106 case H_LONG_BUSY_ORDER_100_SEC
:
113 static long ehca_plpar_hcall_norets(unsigned long opcode
,
124 unsigned long flags
= 0;
126 if (unlikely(ehca_debug_level
>= 2))
127 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT
,
128 opcode
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
);
130 for (i
= 0; i
< 5; i
++) {
131 /* serialize hCalls to work around firmware issue */
132 if (ehca_lock_hcalls
)
133 spin_lock_irqsave(&hcall_lock
, flags
);
135 ret
= plpar_hcall_norets(opcode
, arg1
, arg2
, arg3
, arg4
,
138 if (ehca_lock_hcalls
)
139 spin_unlock_irqrestore(&hcall_lock
, flags
);
141 if (H_IS_LONG_BUSY(ret
)) {
142 sleep_msecs
= get_longbusy_msecs(ret
);
143 msleep_interruptible(sleep_msecs
);
148 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT
,
149 opcode
, ret
, arg1
, arg2
, arg3
,
150 arg4
, arg5
, arg6
, arg7
);
152 if (unlikely(ehca_debug_level
>= 2))
153 ehca_gen_dbg("opcode=%lx ret=%li", opcode
, ret
);
161 static long ehca_plpar_hcall9(unsigned long opcode
,
162 unsigned long *outs
, /* array of 9 outputs */
175 unsigned long flags
= 0;
177 if (unlikely(ehca_debug_level
>= 2))
178 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT
, opcode
,
179 arg1
, arg2
, arg3
, arg4
, arg5
,
180 arg6
, arg7
, arg8
, arg9
);
182 for (i
= 0; i
< 5; i
++) {
183 /* serialize hCalls to work around firmware issue */
184 if (ehca_lock_hcalls
)
185 spin_lock_irqsave(&hcall_lock
, flags
);
187 ret
= plpar_hcall9(opcode
, outs
,
188 arg1
, arg2
, arg3
, arg4
, arg5
,
189 arg6
, arg7
, arg8
, arg9
);
191 if (ehca_lock_hcalls
)
192 spin_unlock_irqrestore(&hcall_lock
, flags
);
194 if (H_IS_LONG_BUSY(ret
)) {
195 sleep_msecs
= get_longbusy_msecs(ret
);
196 msleep_interruptible(sleep_msecs
);
200 if (ret
< H_SUCCESS
) {
201 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT
,
202 opcode
, arg1
, arg2
, arg3
, arg4
, arg5
,
203 arg6
, arg7
, arg8
, arg9
);
204 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT
,
205 ret
, outs
[0], outs
[1], outs
[2], outs
[3],
206 outs
[4], outs
[5], outs
[6], outs
[7],
208 } else if (unlikely(ehca_debug_level
>= 2))
209 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT
,
210 ret
, outs
[0], outs
[1], outs
[2], outs
[3],
211 outs
[4], outs
[5], outs
[6], outs
[7],
219 u64
hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle
,
220 struct ehca_pfeq
*pfeq
,
221 const u32 neq_control
,
222 const u32 number_of_entries
,
223 struct ipz_eq_handle
*eq_handle
,
224 u32
*act_nr_of_entries
,
229 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
230 u64 allocate_controls
;
233 allocate_controls
= 3ULL;
235 /* ISN is associated */
236 if (neq_control
!= 1)
237 allocate_controls
= (1ULL << (63 - 7)) | allocate_controls
;
238 else /* notification event queue */
239 allocate_controls
= (1ULL << 63) | allocate_controls
;
241 ret
= ehca_plpar_hcall9(H_ALLOC_RESOURCE
, outs
,
242 adapter_handle
.handle
, /* r4 */
243 allocate_controls
, /* r5 */
244 number_of_entries
, /* r6 */
246 eq_handle
->handle
= outs
[0];
247 *act_nr_of_entries
= (u32
)outs
[3];
248 *act_pages
= (u32
)outs
[4];
249 *eq_ist
= (u32
)outs
[5];
251 if (ret
== H_NOT_ENOUGH_RESOURCES
)
252 ehca_gen_err("Not enough resource - ret=%lli ", ret
);
257 u64
hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle
,
258 struct ipz_eq_handle eq_handle
,
259 const u64 event_mask
)
261 return ehca_plpar_hcall_norets(H_RESET_EVENTS
,
262 adapter_handle
.handle
, /* r4 */
263 eq_handle
.handle
, /* r5 */
268 u64
hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle
,
270 struct ehca_alloc_cq_parms
*param
)
274 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
276 ret
= ehca_plpar_hcall9(H_ALLOC_RESOURCE
, outs
,
277 adapter_handle
.handle
, /* r4 */
279 param
->eq_handle
.handle
, /* r6 */
281 param
->nr_cqe
, /* r8 */
283 cq
->ipz_cq_handle
.handle
= outs
[0];
284 param
->act_nr_of_entries
= (u32
)outs
[3];
285 param
->act_pages
= (u32
)outs
[4];
287 if (ret
== H_SUCCESS
) {
288 rc
= hcp_galpas_ctor(&cq
->galpas
, 0, outs
[5], outs
[6]);
290 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
293 ehca_plpar_hcall_norets(H_FREE_RESOURCE
,
294 adapter_handle
.handle
, /* r4 */
295 cq
->ipz_cq_handle
.handle
, /* r5 */
301 if (ret
== H_NOT_ENOUGH_RESOURCES
)
302 ehca_gen_err("Not enough resources. ret=%lli", ret
);
307 u64
hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle
,
308 struct ehca_alloc_qp_parms
*parms
, int is_user
)
312 u64 allocate_controls
, max_r10_reg
, r11
, r12
;
313 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
316 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS
, parms
->ext_type
)
317 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN
, 0)
318 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE
, parms
->servicetype
)
319 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE
, parms
->sigtype
)
320 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE
, parms
->qp_storage
)
321 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE
,
322 parms
->squeue
.page_size
)
323 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE
,
324 parms
->rqueue
.page_size
)
325 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING
,
326 !!(parms
->ll_comp_flags
& LLQP_RECV_COMP
))
327 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING
,
328 !!(parms
->ll_comp_flags
& LLQP_SEND_COMP
))
329 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL
,
330 parms
->ud_av_l_key_ctl
)
331 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE
, 1);
334 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR
,
335 parms
->squeue
.max_wr
+ 1)
336 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR
,
337 parms
->rqueue
.max_wr
+ 1)
338 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE
,
339 parms
->squeue
.max_sge
)
340 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE
,
341 parms
->rqueue
.max_sge
);
343 r11
= EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN
, parms
->srq_token
);
345 if (parms
->ext_type
== EQPT_SRQ
)
346 r12
= EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT
, parms
->srq_limit
);
348 r12
= EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN
, parms
->srq_qpn
);
350 ret
= ehca_plpar_hcall9(H_ALLOC_RESOURCE
, outs
,
351 adapter_handle
.handle
, /* r4 */
352 allocate_controls
, /* r5 */
353 parms
->send_cq_handle
.handle
,
354 parms
->recv_cq_handle
.handle
,
355 parms
->eq_handle
.handle
,
356 ((u64
)parms
->token
<< 32) | parms
->pd
.value
,
357 max_r10_reg
, r11
, r12
);
359 parms
->qp_handle
.handle
= outs
[0];
360 parms
->real_qp_num
= (u32
)outs
[1];
361 parms
->squeue
.act_nr_wqes
=
362 (u16
)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR
, outs
[2]);
363 parms
->rqueue
.act_nr_wqes
=
364 (u16
)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR
, outs
[2]);
365 parms
->squeue
.act_nr_sges
=
366 (u8
)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE
, outs
[3]);
367 parms
->rqueue
.act_nr_sges
=
368 (u8
)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE
, outs
[3]);
369 parms
->squeue
.queue_size
=
370 (u32
)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES
, outs
[4]);
371 parms
->rqueue
.queue_size
=
372 (u32
)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES
, outs
[4]);
374 if (ret
== H_SUCCESS
) {
375 rc
= hcp_galpas_ctor(&parms
->galpas
, is_user
, outs
[6], outs
[6]);
377 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
380 ehca_plpar_hcall_norets(H_FREE_RESOURCE
,
381 adapter_handle
.handle
, /* r4 */
382 parms
->qp_handle
.handle
, /* r5 */
388 if (ret
== H_NOT_ENOUGH_RESOURCES
)
389 ehca_gen_err("Not enough resources. ret=%lli", ret
);
394 u64
hipz_h_query_port(const struct ipz_adapter_handle adapter_handle
,
396 struct hipz_query_port
*query_port_response_block
)
399 u64 r_cb
= virt_to_abs(query_port_response_block
);
401 if (r_cb
& (EHCA_PAGESIZE
-1)) {
402 ehca_gen_err("response block not page aligned");
406 ret
= ehca_plpar_hcall_norets(H_QUERY_PORT
,
407 adapter_handle
.handle
, /* r4 */
412 if (ehca_debug_level
>= 2)
413 ehca_dmp(query_port_response_block
, 64, "response_block");
418 u64
hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle
,
419 const u8 port_id
, const u32 port_cap
,
420 const u8 init_type
, const int modify_mask
)
422 u64 port_attributes
= port_cap
;
424 if (modify_mask
& IB_PORT_SHUTDOWN
)
425 port_attributes
|= EHCA_BMASK_SET(H_MP_SHUTDOWN
, 1);
426 if (modify_mask
& IB_PORT_INIT_TYPE
)
427 port_attributes
|= EHCA_BMASK_SET(H_MP_INIT_TYPE
, init_type
);
428 if (modify_mask
& IB_PORT_RESET_QKEY_CNTR
)
429 port_attributes
|= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR
, 1);
431 return ehca_plpar_hcall_norets(H_MODIFY_PORT
,
432 adapter_handle
.handle
, /* r4 */
434 port_attributes
, /* r6 */
438 u64
hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle
,
439 struct hipz_query_hca
*query_hca_rblock
)
441 u64 r_cb
= virt_to_abs(query_hca_rblock
);
443 if (r_cb
& (EHCA_PAGESIZE
-1)) {
444 ehca_gen_err("response_block=%p not page aligned",
449 return ehca_plpar_hcall_norets(H_QUERY_HCA
,
450 adapter_handle
.handle
, /* r4 */
455 u64
hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle
,
458 const u64 resource_handle
,
459 const u64 logical_address_of_page
,
462 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES
,
463 adapter_handle
.handle
, /* r4 */
464 (u64
)queue_type
| ((u64
)pagesize
) << 8,
466 resource_handle
, /* r6 */
467 logical_address_of_page
, /* r7 */
472 u64
hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle
,
473 const struct ipz_eq_handle eq_handle
,
474 struct ehca_pfeq
*pfeq
,
477 const u64 logical_address_of_page
,
481 ehca_gen_err("Ppage counter=%llx", count
);
484 return hipz_h_register_rpage(adapter_handle
,
488 logical_address_of_page
, count
);
491 u64
hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle
,
495 ret
= ehca_plpar_hcall_norets(H_QUERY_INT_STATE
,
496 adapter_handle
.handle
, /* r4 */
500 if (ret
!= H_SUCCESS
&& ret
!= H_BUSY
)
501 ehca_gen_err("Could not query interrupt state.");
506 u64
hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle
,
507 const struct ipz_cq_handle cq_handle
,
508 struct ehca_pfcq
*pfcq
,
511 const u64 logical_address_of_page
,
513 const struct h_galpa gal
)
516 ehca_gen_err("Page counter=%llx", count
);
520 return hipz_h_register_rpage(adapter_handle
, pagesize
, queue_type
,
521 cq_handle
.handle
, logical_address_of_page
,
525 u64
hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle
,
526 const struct ipz_qp_handle qp_handle
,
527 struct ehca_pfqp
*pfqp
,
530 const u64 logical_address_of_page
,
532 const struct h_galpa galpa
)
535 ehca_gen_err("Page counter=%llx", count
);
539 return hipz_h_register_rpage(adapter_handle
, pagesize
, queue_type
,
540 qp_handle
.handle
, logical_address_of_page
,
544 u64
hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle
,
545 const struct ipz_qp_handle qp_handle
,
546 struct ehca_pfqp
*pfqp
,
547 void **log_addr_next_sq_wqe2processed
,
548 void **log_addr_next_rq_wqe2processed
,
549 int dis_and_get_function_code
)
552 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
554 ret
= ehca_plpar_hcall9(H_DISABLE_AND_GETC
, outs
,
555 adapter_handle
.handle
, /* r4 */
556 dis_and_get_function_code
, /* r5 */
557 qp_handle
.handle
, /* r6 */
559 if (log_addr_next_sq_wqe2processed
)
560 *log_addr_next_sq_wqe2processed
= (void *)outs
[0];
561 if (log_addr_next_rq_wqe2processed
)
562 *log_addr_next_rq_wqe2processed
= (void *)outs
[1];
567 u64
hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle
,
568 const struct ipz_qp_handle qp_handle
,
569 struct ehca_pfqp
*pfqp
,
570 const u64 update_mask
,
571 struct hcp_modify_qp_control_block
*mqpcb
,
575 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
576 ret
= ehca_plpar_hcall9(H_MODIFY_QP
, outs
,
577 adapter_handle
.handle
, /* r4 */
578 qp_handle
.handle
, /* r5 */
579 update_mask
, /* r6 */
580 virt_to_abs(mqpcb
), /* r7 */
583 if (ret
== H_NOT_ENOUGH_RESOURCES
)
584 ehca_gen_err("Insufficient resources ret=%lli", ret
);
589 u64
hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle
,
590 const struct ipz_qp_handle qp_handle
,
591 struct ehca_pfqp
*pfqp
,
592 struct hcp_modify_qp_control_block
*qqpcb
,
595 return ehca_plpar_hcall_norets(H_QUERY_QP
,
596 adapter_handle
.handle
, /* r4 */
597 qp_handle
.handle
, /* r5 */
598 virt_to_abs(qqpcb
), /* r6 */
602 u64
hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle
,
606 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
608 ret
= hcp_galpas_dtor(&qp
->galpas
);
610 ehca_gen_err("Could not destruct qp->galpas");
613 ret
= ehca_plpar_hcall9(H_DISABLE_AND_GETC
, outs
,
614 adapter_handle
.handle
, /* r4 */
617 qp
->ipz_qp_handle
.handle
, /* r6 */
619 if (ret
== H_HARDWARE
)
620 ehca_gen_err("HCA not operational. ret=%lli", ret
);
622 ret
= ehca_plpar_hcall_norets(H_FREE_RESOURCE
,
623 adapter_handle
.handle
, /* r4 */
624 qp
->ipz_qp_handle
.handle
, /* r5 */
627 if (ret
== H_RESOURCE
)
628 ehca_gen_err("Resource still in use. ret=%lli", ret
);
633 u64
hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle
,
634 const struct ipz_qp_handle qp_handle
,
638 return ehca_plpar_hcall_norets(H_DEFINE_AQP0
,
639 adapter_handle
.handle
, /* r4 */
640 qp_handle
.handle
, /* r5 */
645 u64
hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle
,
646 const struct ipz_qp_handle qp_handle
,
648 u32 port
, u32
* pma_qp_nr
,
652 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
654 ret
= ehca_plpar_hcall9(H_DEFINE_AQP1
, outs
,
655 adapter_handle
.handle
, /* r4 */
656 qp_handle
.handle
, /* r5 */
659 *pma_qp_nr
= (u32
)outs
[0];
660 *bma_qp_nr
= (u32
)outs
[1];
662 if (ret
== H_ALIAS_EXIST
)
663 ehca_gen_err("AQP1 already exists. ret=%lli", ret
);
668 u64
hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle
,
669 const struct ipz_qp_handle qp_handle
,
672 u64 subnet_prefix
, u64 interface_id
)
676 ret
= ehca_plpar_hcall_norets(H_ATTACH_MCQP
,
677 adapter_handle
.handle
, /* r4 */
678 qp_handle
.handle
, /* r5 */
680 interface_id
, /* r7 */
681 subnet_prefix
, /* r8 */
684 if (ret
== H_NOT_ENOUGH_RESOURCES
)
685 ehca_gen_err("Not enough resources. ret=%lli", ret
);
690 u64
hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle
,
691 const struct ipz_qp_handle qp_handle
,
694 u64 subnet_prefix
, u64 interface_id
)
696 return ehca_plpar_hcall_norets(H_DETACH_MCQP
,
697 adapter_handle
.handle
, /* r4 */
698 qp_handle
.handle
, /* r5 */
700 interface_id
, /* r7 */
701 subnet_prefix
, /* r8 */
705 u64
hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle
,
711 ret
= hcp_galpas_dtor(&cq
->galpas
);
713 ehca_gen_err("Could not destruct cp->galpas");
717 ret
= ehca_plpar_hcall_norets(H_FREE_RESOURCE
,
718 adapter_handle
.handle
, /* r4 */
719 cq
->ipz_cq_handle
.handle
, /* r5 */
720 force_flag
!= 0 ? 1L : 0L, /* r6 */
723 if (ret
== H_RESOURCE
)
724 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret
);
729 u64
hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle
,
734 ret
= hcp_galpas_dtor(&eq
->galpas
);
736 ehca_gen_err("Could not destruct eq->galpas");
740 ret
= ehca_plpar_hcall_norets(H_FREE_RESOURCE
,
741 adapter_handle
.handle
, /* r4 */
742 eq
->ipz_eq_handle
.handle
, /* r5 */
745 if (ret
== H_RESOURCE
)
746 ehca_gen_err("Resource in use. ret=%lli ", ret
);
751 u64
hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle
,
752 const struct ehca_mr
*mr
,
755 const u32 access_ctrl
,
756 const struct ipz_pd pd
,
757 struct ehca_mr_hipzout_parms
*outparms
)
760 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
762 ret
= ehca_plpar_hcall9(H_ALLOC_RESOURCE
, outs
,
763 adapter_handle
.handle
, /* r4 */
767 (((u64
)access_ctrl
) << 32ULL), /* r8 */
770 outparms
->handle
.handle
= outs
[0];
771 outparms
->lkey
= (u32
)outs
[2];
772 outparms
->rkey
= (u32
)outs
[3];
777 u64
hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle
,
778 const struct ehca_mr
*mr
,
781 const u64 logical_address_of_page
,
786 if (unlikely(ehca_debug_level
>= 3)) {
790 kpage
= (u64
*)abs_to_virt(logical_address_of_page
);
791 for (i
= 0; i
< count
; i
++)
792 ehca_gen_dbg("kpage[%d]=%p",
793 i
, (void *)kpage
[i
]);
795 ehca_gen_dbg("kpage=%p",
796 (void *)logical_address_of_page
);
799 if ((count
> 1) && (logical_address_of_page
& (EHCA_PAGESIZE
-1))) {
800 ehca_gen_err("logical_address_of_page not on a 4k boundary "
801 "adapter_handle=%llx mr=%p mr_handle=%llx "
802 "pagesize=%x queue_type=%x "
803 "logical_address_of_page=%llx count=%llx",
804 adapter_handle
.handle
, mr
,
805 mr
->ipz_mr_handle
.handle
, pagesize
, queue_type
,
806 logical_address_of_page
, count
);
809 ret
= hipz_h_register_rpage(adapter_handle
, pagesize
,
811 mr
->ipz_mr_handle
.handle
,
812 logical_address_of_page
, count
);
816 u64
hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle
,
817 const struct ehca_mr
*mr
,
818 struct ehca_mr_hipzout_parms
*outparms
)
821 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
823 ret
= ehca_plpar_hcall9(H_QUERY_MR
, outs
,
824 adapter_handle
.handle
, /* r4 */
825 mr
->ipz_mr_handle
.handle
, /* r5 */
826 0, 0, 0, 0, 0, 0, 0);
827 outparms
->len
= outs
[0];
828 outparms
->vaddr
= outs
[1];
829 outparms
->acl
= outs
[4] >> 32;
830 outparms
->lkey
= (u32
)(outs
[5] >> 32);
831 outparms
->rkey
= (u32
)(outs
[5] & (0xffffffff));
836 u64
hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle
,
837 const struct ehca_mr
*mr
)
839 return ehca_plpar_hcall_norets(H_FREE_RESOURCE
,
840 adapter_handle
.handle
, /* r4 */
841 mr
->ipz_mr_handle
.handle
, /* r5 */
845 u64
hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle
,
846 const struct ehca_mr
*mr
,
849 const u32 access_ctrl
,
850 const struct ipz_pd pd
,
851 const u64 mr_addr_cb
,
852 struct ehca_mr_hipzout_parms
*outparms
)
855 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
857 ret
= ehca_plpar_hcall9(H_REREGISTER_PMR
, outs
,
858 adapter_handle
.handle
, /* r4 */
859 mr
->ipz_mr_handle
.handle
, /* r5 */
863 ((((u64
)access_ctrl
) << 32ULL) | pd
.value
),
866 outparms
->vaddr
= outs
[1];
867 outparms
->lkey
= (u32
)outs
[2];
868 outparms
->rkey
= (u32
)outs
[3];
873 u64
hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle
,
874 const struct ehca_mr
*mr
,
875 const struct ehca_mr
*orig_mr
,
877 const u32 access_ctrl
,
878 const struct ipz_pd pd
,
879 struct ehca_mr_hipzout_parms
*outparms
)
882 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
884 ret
= ehca_plpar_hcall9(H_REGISTER_SMR
, outs
,
885 adapter_handle
.handle
, /* r4 */
886 orig_mr
->ipz_mr_handle
.handle
, /* r5 */
888 (((u64
)access_ctrl
) << 32ULL), /* r7 */
891 outparms
->handle
.handle
= outs
[0];
892 outparms
->lkey
= (u32
)outs
[2];
893 outparms
->rkey
= (u32
)outs
[3];
898 u64
hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle
,
899 const struct ehca_mw
*mw
,
900 const struct ipz_pd pd
,
901 struct ehca_mw_hipzout_parms
*outparms
)
904 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
906 ret
= ehca_plpar_hcall9(H_ALLOC_RESOURCE
, outs
,
907 adapter_handle
.handle
, /* r4 */
911 outparms
->handle
.handle
= outs
[0];
912 outparms
->rkey
= (u32
)outs
[3];
917 u64
hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle
,
918 const struct ehca_mw
*mw
,
919 struct ehca_mw_hipzout_parms
*outparms
)
922 unsigned long outs
[PLPAR_HCALL9_BUFSIZE
];
924 ret
= ehca_plpar_hcall9(H_QUERY_MW
, outs
,
925 adapter_handle
.handle
, /* r4 */
926 mw
->ipz_mw_handle
.handle
, /* r5 */
927 0, 0, 0, 0, 0, 0, 0);
928 outparms
->rkey
= (u32
)outs
[3];
933 u64
hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle
,
934 const struct ehca_mw
*mw
)
936 return ehca_plpar_hcall_norets(H_FREE_RESOURCE
,
937 adapter_handle
.handle
, /* r4 */
938 mw
->ipz_mw_handle
.handle
, /* r5 */
942 u64
hipz_h_error_data(const struct ipz_adapter_handle adapter_handle
,
943 const u64 ressource_handle
,
945 unsigned long *byte_count
)
947 u64 r_cb
= virt_to_abs(rblock
);
949 if (r_cb
& (EHCA_PAGESIZE
-1)) {
950 ehca_gen_err("rblock not page aligned.");
954 return ehca_plpar_hcall_norets(H_ERROR_DATA
,
955 adapter_handle
.handle
,
961 u64
hipz_h_eoi(int irq
)
966 xirr
= (0xffULL
<< 24) | irq
;
968 return plpar_hcall_norets(H_EOI
, xirr
);