iwlwifi: introduce host commands callbacks
[linux/fpc-iii.git] / drivers / infiniband / hw / ehca / hcp_if.c
blob7029aa6537517b043321004a4da114590c5ca04f
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
12 * Copyright (c) 2005 IBM Corporation
14 * All rights reserved.
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * BSD.
19 * OpenIB BSD License
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/hvcall.h>
46 #include "ehca_tools.h"
47 #include "hcp_if.h"
48 #include "hcp_phyp.h"
49 #include "hipz_fns.h"
50 #include "ipz_pt_fn.h"
52 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55 #define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60 #define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61 #define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
64 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
69 #define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70 #define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71 #define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72 #define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73 #define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
75 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
80 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
83 #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84 #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85 #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
87 #define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88 #define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89 #define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
91 static DEFINE_SPINLOCK(hcall_lock);
93 static u32 get_longbusy_msecs(int longbusy_rc)
95 switch (longbusy_rc) {
96 case H_LONG_BUSY_ORDER_1_MSEC:
97 return 1;
98 case H_LONG_BUSY_ORDER_10_MSEC:
99 return 10;
100 case H_LONG_BUSY_ORDER_100_MSEC:
101 return 100;
102 case H_LONG_BUSY_ORDER_1_SEC:
103 return 1000;
104 case H_LONG_BUSY_ORDER_10_SEC:
105 return 10000;
106 case H_LONG_BUSY_ORDER_100_SEC:
107 return 100000;
108 default:
109 return 1;
113 static long ehca_plpar_hcall_norets(unsigned long opcode,
114 unsigned long arg1,
115 unsigned long arg2,
116 unsigned long arg3,
117 unsigned long arg4,
118 unsigned long arg5,
119 unsigned long arg6,
120 unsigned long arg7)
122 long ret;
123 int i, sleep_msecs;
124 unsigned long flags = 0;
126 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
127 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
129 for (i = 0; i < 5; i++) {
130 /* serialize hCalls to work around firmware issue */
131 if (ehca_lock_hcalls)
132 spin_lock_irqsave(&hcall_lock, flags);
134 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
135 arg5, arg6, arg7);
137 if (ehca_lock_hcalls)
138 spin_unlock_irqrestore(&hcall_lock, flags);
140 if (H_IS_LONG_BUSY(ret)) {
141 sleep_msecs = get_longbusy_msecs(ret);
142 msleep_interruptible(sleep_msecs);
143 continue;
146 if (ret < H_SUCCESS)
147 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
148 opcode, ret, arg1, arg2, arg3,
149 arg4, arg5, arg6, arg7);
150 else
151 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
153 return ret;
156 return H_BUSY;
159 static long ehca_plpar_hcall9(unsigned long opcode,
160 unsigned long *outs, /* array of 9 outputs */
161 unsigned long arg1,
162 unsigned long arg2,
163 unsigned long arg3,
164 unsigned long arg4,
165 unsigned long arg5,
166 unsigned long arg6,
167 unsigned long arg7,
168 unsigned long arg8,
169 unsigned long arg9)
171 long ret;
172 int i, sleep_msecs;
173 unsigned long flags = 0;
175 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
176 arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
178 for (i = 0; i < 5; i++) {
179 /* serialize hCalls to work around firmware issue */
180 if (ehca_lock_hcalls)
181 spin_lock_irqsave(&hcall_lock, flags);
183 ret = plpar_hcall9(opcode, outs,
184 arg1, arg2, arg3, arg4, arg5,
185 arg6, arg7, arg8, arg9);
187 if (ehca_lock_hcalls)
188 spin_unlock_irqrestore(&hcall_lock, flags);
190 if (H_IS_LONG_BUSY(ret)) {
191 sleep_msecs = get_longbusy_msecs(ret);
192 msleep_interruptible(sleep_msecs);
193 continue;
196 if (ret < H_SUCCESS) {
197 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
198 opcode, arg1, arg2, arg3, arg4, arg5,
199 arg6, arg7, arg8, arg9);
200 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
201 ret, outs[0], outs[1], outs[2], outs[3],
202 outs[4], outs[5], outs[6], outs[7],
203 outs[8]);
204 } else
205 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
206 ret, outs[0], outs[1], outs[2], outs[3],
207 outs[4], outs[5], outs[6], outs[7],
208 outs[8]);
209 return ret;
212 return H_BUSY;
215 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
216 struct ehca_pfeq *pfeq,
217 const u32 neq_control,
218 const u32 number_of_entries,
219 struct ipz_eq_handle *eq_handle,
220 u32 *act_nr_of_entries,
221 u32 *act_pages,
222 u32 *eq_ist)
224 u64 ret;
225 u64 outs[PLPAR_HCALL9_BUFSIZE];
226 u64 allocate_controls;
228 /* resource type */
229 allocate_controls = 3ULL;
231 /* ISN is associated */
232 if (neq_control != 1)
233 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
234 else /* notification event queue */
235 allocate_controls = (1ULL << 63) | allocate_controls;
237 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
238 adapter_handle.handle, /* r4 */
239 allocate_controls, /* r5 */
240 number_of_entries, /* r6 */
241 0, 0, 0, 0, 0, 0);
242 eq_handle->handle = outs[0];
243 *act_nr_of_entries = (u32)outs[3];
244 *act_pages = (u32)outs[4];
245 *eq_ist = (u32)outs[5];
247 if (ret == H_NOT_ENOUGH_RESOURCES)
248 ehca_gen_err("Not enough resource - ret=%li ", ret);
250 return ret;
253 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
254 struct ipz_eq_handle eq_handle,
255 const u64 event_mask)
257 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
258 adapter_handle.handle, /* r4 */
259 eq_handle.handle, /* r5 */
260 event_mask, /* r6 */
261 0, 0, 0, 0);
264 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
265 struct ehca_cq *cq,
266 struct ehca_alloc_cq_parms *param)
268 u64 ret;
269 u64 outs[PLPAR_HCALL9_BUFSIZE];
271 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
272 adapter_handle.handle, /* r4 */
273 2, /* r5 */
274 param->eq_handle.handle, /* r6 */
275 cq->token, /* r7 */
276 param->nr_cqe, /* r8 */
277 0, 0, 0, 0);
278 cq->ipz_cq_handle.handle = outs[0];
279 param->act_nr_of_entries = (u32)outs[3];
280 param->act_pages = (u32)outs[4];
282 if (ret == H_SUCCESS)
283 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
285 if (ret == H_NOT_ENOUGH_RESOURCES)
286 ehca_gen_err("Not enough resources. ret=%li", ret);
288 return ret;
291 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
292 struct ehca_alloc_qp_parms *parms)
294 u64 ret;
295 u64 allocate_controls, max_r10_reg, r11, r12;
296 u64 outs[PLPAR_HCALL9_BUFSIZE];
298 allocate_controls =
299 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
300 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
301 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
302 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
303 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
304 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
305 parms->squeue.page_size)
306 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
307 parms->rqueue.page_size)
308 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
309 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
310 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
311 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
312 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
313 parms->ud_av_l_key_ctl)
314 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
316 max_r10_reg =
317 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
318 parms->squeue.max_wr + 1)
319 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
320 parms->rqueue.max_wr + 1)
321 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
322 parms->squeue.max_sge)
323 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
324 parms->rqueue.max_sge);
326 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
328 if (parms->ext_type == EQPT_SRQ)
329 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
330 else
331 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
333 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
334 adapter_handle.handle, /* r4 */
335 allocate_controls, /* r5 */
336 parms->send_cq_handle.handle,
337 parms->recv_cq_handle.handle,
338 parms->eq_handle.handle,
339 ((u64)parms->token << 32) | parms->pd.value,
340 max_r10_reg, r11, r12);
342 parms->qp_handle.handle = outs[0];
343 parms->real_qp_num = (u32)outs[1];
344 parms->squeue.act_nr_wqes =
345 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
346 parms->rqueue.act_nr_wqes =
347 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
348 parms->squeue.act_nr_sges =
349 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
350 parms->rqueue.act_nr_sges =
351 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
352 parms->squeue.queue_size =
353 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
354 parms->rqueue.queue_size =
355 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
357 if (ret == H_SUCCESS)
358 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
360 if (ret == H_NOT_ENOUGH_RESOURCES)
361 ehca_gen_err("Not enough resources. ret=%li", ret);
363 return ret;
366 u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
367 const u8 port_id,
368 struct hipz_query_port *query_port_response_block)
370 u64 ret;
371 u64 r_cb = virt_to_abs(query_port_response_block);
373 if (r_cb & (EHCA_PAGESIZE-1)) {
374 ehca_gen_err("response block not page aligned");
375 return H_PARAMETER;
378 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
379 adapter_handle.handle, /* r4 */
380 port_id, /* r5 */
381 r_cb, /* r6 */
382 0, 0, 0, 0);
384 if (ehca_debug_level)
385 ehca_dmp(query_port_response_block, 64, "response_block");
387 return ret;
390 u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
391 const u8 port_id, const u32 port_cap,
392 const u8 init_type, const int modify_mask)
394 u64 port_attributes = port_cap;
396 if (modify_mask & IB_PORT_SHUTDOWN)
397 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
398 if (modify_mask & IB_PORT_INIT_TYPE)
399 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
400 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
401 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
403 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
404 adapter_handle.handle, /* r4 */
405 port_id, /* r5 */
406 port_attributes, /* r6 */
407 0, 0, 0, 0);
410 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
411 struct hipz_query_hca *query_hca_rblock)
413 u64 r_cb = virt_to_abs(query_hca_rblock);
415 if (r_cb & (EHCA_PAGESIZE-1)) {
416 ehca_gen_err("response_block=%p not page aligned",
417 query_hca_rblock);
418 return H_PARAMETER;
421 return ehca_plpar_hcall_norets(H_QUERY_HCA,
422 adapter_handle.handle, /* r4 */
423 r_cb, /* r5 */
424 0, 0, 0, 0, 0);
427 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
428 const u8 pagesize,
429 const u8 queue_type,
430 const u64 resource_handle,
431 const u64 logical_address_of_page,
432 u64 count)
434 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
435 adapter_handle.handle, /* r4 */
436 (u64)queue_type | ((u64)pagesize) << 8,
437 /* r5 */
438 resource_handle, /* r6 */
439 logical_address_of_page, /* r7 */
440 count, /* r8 */
441 0, 0);
444 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
445 const struct ipz_eq_handle eq_handle,
446 struct ehca_pfeq *pfeq,
447 const u8 pagesize,
448 const u8 queue_type,
449 const u64 logical_address_of_page,
450 const u64 count)
452 if (count != 1) {
453 ehca_gen_err("Ppage counter=%lx", count);
454 return H_PARAMETER;
456 return hipz_h_register_rpage(adapter_handle,
457 pagesize,
458 queue_type,
459 eq_handle.handle,
460 logical_address_of_page, count);
463 u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
464 u32 ist)
466 u64 ret;
467 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
468 adapter_handle.handle, /* r4 */
469 ist, /* r5 */
470 0, 0, 0, 0, 0);
472 if (ret != H_SUCCESS && ret != H_BUSY)
473 ehca_gen_err("Could not query interrupt state.");
475 return ret;
478 u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
479 const struct ipz_cq_handle cq_handle,
480 struct ehca_pfcq *pfcq,
481 const u8 pagesize,
482 const u8 queue_type,
483 const u64 logical_address_of_page,
484 const u64 count,
485 const struct h_galpa gal)
487 if (count != 1) {
488 ehca_gen_err("Page counter=%lx", count);
489 return H_PARAMETER;
492 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
493 cq_handle.handle, logical_address_of_page,
494 count);
497 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
498 const struct ipz_qp_handle qp_handle,
499 struct ehca_pfqp *pfqp,
500 const u8 pagesize,
501 const u8 queue_type,
502 const u64 logical_address_of_page,
503 const u64 count,
504 const struct h_galpa galpa)
506 if (count > 1) {
507 ehca_gen_err("Page counter=%lx", count);
508 return H_PARAMETER;
511 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
512 qp_handle.handle, logical_address_of_page,
513 count);
516 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
517 const struct ipz_qp_handle qp_handle,
518 struct ehca_pfqp *pfqp,
519 void **log_addr_next_sq_wqe2processed,
520 void **log_addr_next_rq_wqe2processed,
521 int dis_and_get_function_code)
523 u64 ret;
524 u64 outs[PLPAR_HCALL9_BUFSIZE];
526 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
527 adapter_handle.handle, /* r4 */
528 dis_and_get_function_code, /* r5 */
529 qp_handle.handle, /* r6 */
530 0, 0, 0, 0, 0, 0);
531 if (log_addr_next_sq_wqe2processed)
532 *log_addr_next_sq_wqe2processed = (void *)outs[0];
533 if (log_addr_next_rq_wqe2processed)
534 *log_addr_next_rq_wqe2processed = (void *)outs[1];
536 return ret;
539 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
540 const struct ipz_qp_handle qp_handle,
541 struct ehca_pfqp *pfqp,
542 const u64 update_mask,
543 struct hcp_modify_qp_control_block *mqpcb,
544 struct h_galpa gal)
546 u64 ret;
547 u64 outs[PLPAR_HCALL9_BUFSIZE];
548 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
549 adapter_handle.handle, /* r4 */
550 qp_handle.handle, /* r5 */
551 update_mask, /* r6 */
552 virt_to_abs(mqpcb), /* r7 */
553 0, 0, 0, 0, 0);
555 if (ret == H_NOT_ENOUGH_RESOURCES)
556 ehca_gen_err("Insufficient resources ret=%li", ret);
558 return ret;
561 u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
562 const struct ipz_qp_handle qp_handle,
563 struct ehca_pfqp *pfqp,
564 struct hcp_modify_qp_control_block *qqpcb,
565 struct h_galpa gal)
567 return ehca_plpar_hcall_norets(H_QUERY_QP,
568 adapter_handle.handle, /* r4 */
569 qp_handle.handle, /* r5 */
570 virt_to_abs(qqpcb), /* r6 */
571 0, 0, 0, 0);
574 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
575 struct ehca_qp *qp)
577 u64 ret;
578 u64 outs[PLPAR_HCALL9_BUFSIZE];
580 ret = hcp_galpas_dtor(&qp->galpas);
581 if (ret) {
582 ehca_gen_err("Could not destruct qp->galpas");
583 return H_RESOURCE;
585 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
586 adapter_handle.handle, /* r4 */
587 /* function code */
588 1, /* r5 */
589 qp->ipz_qp_handle.handle, /* r6 */
590 0, 0, 0, 0, 0, 0);
591 if (ret == H_HARDWARE)
592 ehca_gen_err("HCA not operational. ret=%li", ret);
594 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
595 adapter_handle.handle, /* r4 */
596 qp->ipz_qp_handle.handle, /* r5 */
597 0, 0, 0, 0, 0);
599 if (ret == H_RESOURCE)
600 ehca_gen_err("Resource still in use. ret=%li", ret);
602 return ret;
605 u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
606 const struct ipz_qp_handle qp_handle,
607 struct h_galpa gal,
608 u32 port)
610 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
611 adapter_handle.handle, /* r4 */
612 qp_handle.handle, /* r5 */
613 port, /* r6 */
614 0, 0, 0, 0);
617 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
618 const struct ipz_qp_handle qp_handle,
619 struct h_galpa gal,
620 u32 port, u32 * pma_qp_nr,
621 u32 * bma_qp_nr)
623 u64 ret;
624 u64 outs[PLPAR_HCALL9_BUFSIZE];
626 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
627 adapter_handle.handle, /* r4 */
628 qp_handle.handle, /* r5 */
629 port, /* r6 */
630 0, 0, 0, 0, 0, 0);
631 *pma_qp_nr = (u32)outs[0];
632 *bma_qp_nr = (u32)outs[1];
634 if (ret == H_ALIAS_EXIST)
635 ehca_gen_err("AQP1 already exists. ret=%li", ret);
637 return ret;
640 u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
641 const struct ipz_qp_handle qp_handle,
642 struct h_galpa gal,
643 u16 mcg_dlid,
644 u64 subnet_prefix, u64 interface_id)
646 u64 ret;
648 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
649 adapter_handle.handle, /* r4 */
650 qp_handle.handle, /* r5 */
651 mcg_dlid, /* r6 */
652 interface_id, /* r7 */
653 subnet_prefix, /* r8 */
654 0, 0);
656 if (ret == H_NOT_ENOUGH_RESOURCES)
657 ehca_gen_err("Not enough resources. ret=%li", ret);
659 return ret;
662 u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
663 const struct ipz_qp_handle qp_handle,
664 struct h_galpa gal,
665 u16 mcg_dlid,
666 u64 subnet_prefix, u64 interface_id)
668 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
669 adapter_handle.handle, /* r4 */
670 qp_handle.handle, /* r5 */
671 mcg_dlid, /* r6 */
672 interface_id, /* r7 */
673 subnet_prefix, /* r8 */
674 0, 0);
677 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
678 struct ehca_cq *cq,
679 u8 force_flag)
681 u64 ret;
683 ret = hcp_galpas_dtor(&cq->galpas);
684 if (ret) {
685 ehca_gen_err("Could not destruct cp->galpas");
686 return H_RESOURCE;
689 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
690 adapter_handle.handle, /* r4 */
691 cq->ipz_cq_handle.handle, /* r5 */
692 force_flag != 0 ? 1L : 0L, /* r6 */
693 0, 0, 0, 0);
695 if (ret == H_RESOURCE)
696 ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret);
698 return ret;
701 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
702 struct ehca_eq *eq)
704 u64 ret;
706 ret = hcp_galpas_dtor(&eq->galpas);
707 if (ret) {
708 ehca_gen_err("Could not destruct eq->galpas");
709 return H_RESOURCE;
712 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
713 adapter_handle.handle, /* r4 */
714 eq->ipz_eq_handle.handle, /* r5 */
715 0, 0, 0, 0, 0);
717 if (ret == H_RESOURCE)
718 ehca_gen_err("Resource in use. ret=%li ", ret);
720 return ret;
723 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
724 const struct ehca_mr *mr,
725 const u64 vaddr,
726 const u64 length,
727 const u32 access_ctrl,
728 const struct ipz_pd pd,
729 struct ehca_mr_hipzout_parms *outparms)
731 u64 ret;
732 u64 outs[PLPAR_HCALL9_BUFSIZE];
734 ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
735 "vaddr=%lx length=%lx",
736 (u32)PAGE_SIZE, access_ctrl, vaddr, length);
737 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
738 adapter_handle.handle, /* r4 */
739 5, /* r5 */
740 vaddr, /* r6 */
741 length, /* r7 */
742 (((u64)access_ctrl) << 32ULL), /* r8 */
743 pd.value, /* r9 */
744 0, 0, 0);
745 outparms->handle.handle = outs[0];
746 outparms->lkey = (u32)outs[2];
747 outparms->rkey = (u32)outs[3];
749 return ret;
752 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
753 const struct ehca_mr *mr,
754 const u8 pagesize,
755 const u8 queue_type,
756 const u64 logical_address_of_page,
757 const u64 count)
759 u64 ret;
761 if (unlikely(ehca_debug_level >= 2)) {
762 if (count > 1) {
763 u64 *kpage;
764 int i;
765 kpage = (u64 *)abs_to_virt(logical_address_of_page);
766 for (i = 0; i < count; i++)
767 ehca_gen_dbg("kpage[%d]=%p",
768 i, (void *)kpage[i]);
769 } else
770 ehca_gen_dbg("kpage=%p",
771 (void *)logical_address_of_page);
774 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
775 ehca_gen_err("logical_address_of_page not on a 4k boundary "
776 "adapter_handle=%lx mr=%p mr_handle=%lx "
777 "pagesize=%x queue_type=%x "
778 "logical_address_of_page=%lx count=%lx",
779 adapter_handle.handle, mr,
780 mr->ipz_mr_handle.handle, pagesize, queue_type,
781 logical_address_of_page, count);
782 ret = H_PARAMETER;
783 } else
784 ret = hipz_h_register_rpage(adapter_handle, pagesize,
785 queue_type,
786 mr->ipz_mr_handle.handle,
787 logical_address_of_page, count);
788 return ret;
791 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
792 const struct ehca_mr *mr,
793 struct ehca_mr_hipzout_parms *outparms)
795 u64 ret;
796 u64 outs[PLPAR_HCALL9_BUFSIZE];
798 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
799 adapter_handle.handle, /* r4 */
800 mr->ipz_mr_handle.handle, /* r5 */
801 0, 0, 0, 0, 0, 0, 0);
802 outparms->len = outs[0];
803 outparms->vaddr = outs[1];
804 outparms->acl = outs[4] >> 32;
805 outparms->lkey = (u32)(outs[5] >> 32);
806 outparms->rkey = (u32)(outs[5] & (0xffffffff));
808 return ret;
811 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
812 const struct ehca_mr *mr)
814 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
815 adapter_handle.handle, /* r4 */
816 mr->ipz_mr_handle.handle, /* r5 */
817 0, 0, 0, 0, 0);
820 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
821 const struct ehca_mr *mr,
822 const u64 vaddr_in,
823 const u64 length,
824 const u32 access_ctrl,
825 const struct ipz_pd pd,
826 const u64 mr_addr_cb,
827 struct ehca_mr_hipzout_parms *outparms)
829 u64 ret;
830 u64 outs[PLPAR_HCALL9_BUFSIZE];
832 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
833 adapter_handle.handle, /* r4 */
834 mr->ipz_mr_handle.handle, /* r5 */
835 vaddr_in, /* r6 */
836 length, /* r7 */
837 /* r8 */
838 ((((u64)access_ctrl) << 32ULL) | pd.value),
839 mr_addr_cb, /* r9 */
840 0, 0, 0);
841 outparms->vaddr = outs[1];
842 outparms->lkey = (u32)outs[2];
843 outparms->rkey = (u32)outs[3];
845 return ret;
848 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
849 const struct ehca_mr *mr,
850 const struct ehca_mr *orig_mr,
851 const u64 vaddr_in,
852 const u32 access_ctrl,
853 const struct ipz_pd pd,
854 struct ehca_mr_hipzout_parms *outparms)
856 u64 ret;
857 u64 outs[PLPAR_HCALL9_BUFSIZE];
859 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
860 adapter_handle.handle, /* r4 */
861 orig_mr->ipz_mr_handle.handle, /* r5 */
862 vaddr_in, /* r6 */
863 (((u64)access_ctrl) << 32ULL), /* r7 */
864 pd.value, /* r8 */
865 0, 0, 0, 0);
866 outparms->handle.handle = outs[0];
867 outparms->lkey = (u32)outs[2];
868 outparms->rkey = (u32)outs[3];
870 return ret;
873 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
874 const struct ehca_mw *mw,
875 const struct ipz_pd pd,
876 struct ehca_mw_hipzout_parms *outparms)
878 u64 ret;
879 u64 outs[PLPAR_HCALL9_BUFSIZE];
881 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
882 adapter_handle.handle, /* r4 */
883 6, /* r5 */
884 pd.value, /* r6 */
885 0, 0, 0, 0, 0, 0);
886 outparms->handle.handle = outs[0];
887 outparms->rkey = (u32)outs[3];
889 return ret;
892 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
893 const struct ehca_mw *mw,
894 struct ehca_mw_hipzout_parms *outparms)
896 u64 ret;
897 u64 outs[PLPAR_HCALL9_BUFSIZE];
899 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
900 adapter_handle.handle, /* r4 */
901 mw->ipz_mw_handle.handle, /* r5 */
902 0, 0, 0, 0, 0, 0, 0);
903 outparms->rkey = (u32)outs[3];
905 return ret;
908 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
909 const struct ehca_mw *mw)
911 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
912 adapter_handle.handle, /* r4 */
913 mw->ipz_mw_handle.handle, /* r5 */
914 0, 0, 0, 0, 0);
917 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
918 const u64 ressource_handle,
919 void *rblock,
920 unsigned long *byte_count)
922 u64 r_cb = virt_to_abs(rblock);
924 if (r_cb & (EHCA_PAGESIZE-1)) {
925 ehca_gen_err("rblock not page aligned.");
926 return H_PARAMETER;
929 return ehca_plpar_hcall_norets(H_ERROR_DATA,
930 adapter_handle.handle,
931 ressource_handle,
932 r_cb,
933 0, 0, 0, 0);