x86/amd-iommu: Add function to complete a tlb flush
[linux/fpc-iii.git] / drivers / infiniband / hw / ehca / hcp_if.c
blob4d5dc3304d427233c787f2b5c97b24cb5d9b8431
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
12 * Copyright (c) 2005 IBM Corporation
14 * All rights reserved.
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * BSD.
19 * OpenIB BSD License
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/hvcall.h>
46 #include "ehca_tools.h"
47 #include "hcp_if.h"
48 #include "hcp_phyp.h"
49 #include "hipz_fns.h"
50 #include "ipz_pt_fn.h"
52 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55 #define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60 #define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61 #define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
64 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
69 #define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70 #define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71 #define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72 #define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73 #define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
75 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
80 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
83 #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84 #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85 #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
87 #define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88 #define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89 #define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
91 static DEFINE_SPINLOCK(hcall_lock);
93 static u32 get_longbusy_msecs(int longbusy_rc)
95 switch (longbusy_rc) {
96 case H_LONG_BUSY_ORDER_1_MSEC:
97 return 1;
98 case H_LONG_BUSY_ORDER_10_MSEC:
99 return 10;
100 case H_LONG_BUSY_ORDER_100_MSEC:
101 return 100;
102 case H_LONG_BUSY_ORDER_1_SEC:
103 return 1000;
104 case H_LONG_BUSY_ORDER_10_SEC:
105 return 10000;
106 case H_LONG_BUSY_ORDER_100_SEC:
107 return 100000;
108 default:
109 return 1;
113 static long ehca_plpar_hcall_norets(unsigned long opcode,
114 unsigned long arg1,
115 unsigned long arg2,
116 unsigned long arg3,
117 unsigned long arg4,
118 unsigned long arg5,
119 unsigned long arg6,
120 unsigned long arg7)
122 long ret;
123 int i, sleep_msecs;
124 unsigned long flags = 0;
126 if (unlikely(ehca_debug_level >= 2))
127 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
128 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
130 for (i = 0; i < 5; i++) {
131 /* serialize hCalls to work around firmware issue */
132 if (ehca_lock_hcalls)
133 spin_lock_irqsave(&hcall_lock, flags);
135 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
136 arg5, arg6, arg7);
138 if (ehca_lock_hcalls)
139 spin_unlock_irqrestore(&hcall_lock, flags);
141 if (H_IS_LONG_BUSY(ret)) {
142 sleep_msecs = get_longbusy_msecs(ret);
143 msleep_interruptible(sleep_msecs);
144 continue;
147 if (ret < H_SUCCESS)
148 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
149 opcode, ret, arg1, arg2, arg3,
150 arg4, arg5, arg6, arg7);
151 else
152 if (unlikely(ehca_debug_level >= 2))
153 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
155 return ret;
158 return H_BUSY;
161 static long ehca_plpar_hcall9(unsigned long opcode,
162 unsigned long *outs, /* array of 9 outputs */
163 unsigned long arg1,
164 unsigned long arg2,
165 unsigned long arg3,
166 unsigned long arg4,
167 unsigned long arg5,
168 unsigned long arg6,
169 unsigned long arg7,
170 unsigned long arg8,
171 unsigned long arg9)
173 long ret;
174 int i, sleep_msecs;
175 unsigned long flags = 0;
177 if (unlikely(ehca_debug_level >= 2))
178 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
179 arg1, arg2, arg3, arg4, arg5,
180 arg6, arg7, arg8, arg9);
182 for (i = 0; i < 5; i++) {
183 /* serialize hCalls to work around firmware issue */
184 if (ehca_lock_hcalls)
185 spin_lock_irqsave(&hcall_lock, flags);
187 ret = plpar_hcall9(opcode, outs,
188 arg1, arg2, arg3, arg4, arg5,
189 arg6, arg7, arg8, arg9);
191 if (ehca_lock_hcalls)
192 spin_unlock_irqrestore(&hcall_lock, flags);
194 if (H_IS_LONG_BUSY(ret)) {
195 sleep_msecs = get_longbusy_msecs(ret);
196 msleep_interruptible(sleep_msecs);
197 continue;
200 if (ret < H_SUCCESS) {
201 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
202 opcode, arg1, arg2, arg3, arg4, arg5,
203 arg6, arg7, arg8, arg9);
204 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
205 ret, outs[0], outs[1], outs[2], outs[3],
206 outs[4], outs[5], outs[6], outs[7],
207 outs[8]);
208 } else if (unlikely(ehca_debug_level >= 2))
209 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
210 ret, outs[0], outs[1], outs[2], outs[3],
211 outs[4], outs[5], outs[6], outs[7],
212 outs[8]);
213 return ret;
216 return H_BUSY;
219 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
220 struct ehca_pfeq *pfeq,
221 const u32 neq_control,
222 const u32 number_of_entries,
223 struct ipz_eq_handle *eq_handle,
224 u32 *act_nr_of_entries,
225 u32 *act_pages,
226 u32 *eq_ist)
228 u64 ret;
229 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
230 u64 allocate_controls;
232 /* resource type */
233 allocate_controls = 3ULL;
235 /* ISN is associated */
236 if (neq_control != 1)
237 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
238 else /* notification event queue */
239 allocate_controls = (1ULL << 63) | allocate_controls;
241 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
242 adapter_handle.handle, /* r4 */
243 allocate_controls, /* r5 */
244 number_of_entries, /* r6 */
245 0, 0, 0, 0, 0, 0);
246 eq_handle->handle = outs[0];
247 *act_nr_of_entries = (u32)outs[3];
248 *act_pages = (u32)outs[4];
249 *eq_ist = (u32)outs[5];
251 if (ret == H_NOT_ENOUGH_RESOURCES)
252 ehca_gen_err("Not enough resource - ret=%lli ", ret);
254 return ret;
257 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
258 struct ipz_eq_handle eq_handle,
259 const u64 event_mask)
261 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
262 adapter_handle.handle, /* r4 */
263 eq_handle.handle, /* r5 */
264 event_mask, /* r6 */
265 0, 0, 0, 0);
268 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
269 struct ehca_cq *cq,
270 struct ehca_alloc_cq_parms *param)
272 u64 ret;
273 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
276 adapter_handle.handle, /* r4 */
277 2, /* r5 */
278 param->eq_handle.handle, /* r6 */
279 cq->token, /* r7 */
280 param->nr_cqe, /* r8 */
281 0, 0, 0, 0);
282 cq->ipz_cq_handle.handle = outs[0];
283 param->act_nr_of_entries = (u32)outs[3];
284 param->act_pages = (u32)outs[4];
286 if (ret == H_SUCCESS)
287 hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
289 if (ret == H_NOT_ENOUGH_RESOURCES)
290 ehca_gen_err("Not enough resources. ret=%lli", ret);
292 return ret;
295 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
296 struct ehca_alloc_qp_parms *parms, int is_user)
298 u64 ret;
299 u64 allocate_controls, max_r10_reg, r11, r12;
300 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
302 allocate_controls =
303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
304 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
305 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
306 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
307 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
308 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
309 parms->squeue.page_size)
310 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
311 parms->rqueue.page_size)
312 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
313 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
314 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
315 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
316 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
317 parms->ud_av_l_key_ctl)
318 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
320 max_r10_reg =
321 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
322 parms->squeue.max_wr + 1)
323 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
324 parms->rqueue.max_wr + 1)
325 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
326 parms->squeue.max_sge)
327 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
328 parms->rqueue.max_sge);
330 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
332 if (parms->ext_type == EQPT_SRQ)
333 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
334 else
335 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
337 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
338 adapter_handle.handle, /* r4 */
339 allocate_controls, /* r5 */
340 parms->send_cq_handle.handle,
341 parms->recv_cq_handle.handle,
342 parms->eq_handle.handle,
343 ((u64)parms->token << 32) | parms->pd.value,
344 max_r10_reg, r11, r12);
346 parms->qp_handle.handle = outs[0];
347 parms->real_qp_num = (u32)outs[1];
348 parms->squeue.act_nr_wqes =
349 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
350 parms->rqueue.act_nr_wqes =
351 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
352 parms->squeue.act_nr_sges =
353 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
354 parms->rqueue.act_nr_sges =
355 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
356 parms->squeue.queue_size =
357 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
358 parms->rqueue.queue_size =
359 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
361 if (ret == H_SUCCESS)
362 hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
364 if (ret == H_NOT_ENOUGH_RESOURCES)
365 ehca_gen_err("Not enough resources. ret=%lli", ret);
367 return ret;
370 u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
371 const u8 port_id,
372 struct hipz_query_port *query_port_response_block)
374 u64 ret;
375 u64 r_cb = virt_to_abs(query_port_response_block);
377 if (r_cb & (EHCA_PAGESIZE-1)) {
378 ehca_gen_err("response block not page aligned");
379 return H_PARAMETER;
382 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
383 adapter_handle.handle, /* r4 */
384 port_id, /* r5 */
385 r_cb, /* r6 */
386 0, 0, 0, 0);
388 if (ehca_debug_level >= 2)
389 ehca_dmp(query_port_response_block, 64, "response_block");
391 return ret;
394 u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
395 const u8 port_id, const u32 port_cap,
396 const u8 init_type, const int modify_mask)
398 u64 port_attributes = port_cap;
400 if (modify_mask & IB_PORT_SHUTDOWN)
401 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
402 if (modify_mask & IB_PORT_INIT_TYPE)
403 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
404 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
405 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
407 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
408 adapter_handle.handle, /* r4 */
409 port_id, /* r5 */
410 port_attributes, /* r6 */
411 0, 0, 0, 0);
414 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
415 struct hipz_query_hca *query_hca_rblock)
417 u64 r_cb = virt_to_abs(query_hca_rblock);
419 if (r_cb & (EHCA_PAGESIZE-1)) {
420 ehca_gen_err("response_block=%p not page aligned",
421 query_hca_rblock);
422 return H_PARAMETER;
425 return ehca_plpar_hcall_norets(H_QUERY_HCA,
426 adapter_handle.handle, /* r4 */
427 r_cb, /* r5 */
428 0, 0, 0, 0, 0);
431 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
432 const u8 pagesize,
433 const u8 queue_type,
434 const u64 resource_handle,
435 const u64 logical_address_of_page,
436 u64 count)
438 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
439 adapter_handle.handle, /* r4 */
440 (u64)queue_type | ((u64)pagesize) << 8,
441 /* r5 */
442 resource_handle, /* r6 */
443 logical_address_of_page, /* r7 */
444 count, /* r8 */
445 0, 0);
448 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
449 const struct ipz_eq_handle eq_handle,
450 struct ehca_pfeq *pfeq,
451 const u8 pagesize,
452 const u8 queue_type,
453 const u64 logical_address_of_page,
454 const u64 count)
456 if (count != 1) {
457 ehca_gen_err("Ppage counter=%llx", count);
458 return H_PARAMETER;
460 return hipz_h_register_rpage(adapter_handle,
461 pagesize,
462 queue_type,
463 eq_handle.handle,
464 logical_address_of_page, count);
467 u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
468 u32 ist)
470 u64 ret;
471 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
472 adapter_handle.handle, /* r4 */
473 ist, /* r5 */
474 0, 0, 0, 0, 0);
476 if (ret != H_SUCCESS && ret != H_BUSY)
477 ehca_gen_err("Could not query interrupt state.");
479 return ret;
482 u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
483 const struct ipz_cq_handle cq_handle,
484 struct ehca_pfcq *pfcq,
485 const u8 pagesize,
486 const u8 queue_type,
487 const u64 logical_address_of_page,
488 const u64 count,
489 const struct h_galpa gal)
491 if (count != 1) {
492 ehca_gen_err("Page counter=%llx", count);
493 return H_PARAMETER;
496 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
497 cq_handle.handle, logical_address_of_page,
498 count);
501 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
502 const struct ipz_qp_handle qp_handle,
503 struct ehca_pfqp *pfqp,
504 const u8 pagesize,
505 const u8 queue_type,
506 const u64 logical_address_of_page,
507 const u64 count,
508 const struct h_galpa galpa)
510 if (count > 1) {
511 ehca_gen_err("Page counter=%llx", count);
512 return H_PARAMETER;
515 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
516 qp_handle.handle, logical_address_of_page,
517 count);
520 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
521 const struct ipz_qp_handle qp_handle,
522 struct ehca_pfqp *pfqp,
523 void **log_addr_next_sq_wqe2processed,
524 void **log_addr_next_rq_wqe2processed,
525 int dis_and_get_function_code)
527 u64 ret;
528 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
531 adapter_handle.handle, /* r4 */
532 dis_and_get_function_code, /* r5 */
533 qp_handle.handle, /* r6 */
534 0, 0, 0, 0, 0, 0);
535 if (log_addr_next_sq_wqe2processed)
536 *log_addr_next_sq_wqe2processed = (void *)outs[0];
537 if (log_addr_next_rq_wqe2processed)
538 *log_addr_next_rq_wqe2processed = (void *)outs[1];
540 return ret;
543 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
544 const struct ipz_qp_handle qp_handle,
545 struct ehca_pfqp *pfqp,
546 const u64 update_mask,
547 struct hcp_modify_qp_control_block *mqpcb,
548 struct h_galpa gal)
550 u64 ret;
551 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
553 adapter_handle.handle, /* r4 */
554 qp_handle.handle, /* r5 */
555 update_mask, /* r6 */
556 virt_to_abs(mqpcb), /* r7 */
557 0, 0, 0, 0, 0);
559 if (ret == H_NOT_ENOUGH_RESOURCES)
560 ehca_gen_err("Insufficient resources ret=%lli", ret);
562 return ret;
565 u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
566 const struct ipz_qp_handle qp_handle,
567 struct ehca_pfqp *pfqp,
568 struct hcp_modify_qp_control_block *qqpcb,
569 struct h_galpa gal)
571 return ehca_plpar_hcall_norets(H_QUERY_QP,
572 adapter_handle.handle, /* r4 */
573 qp_handle.handle, /* r5 */
574 virt_to_abs(qqpcb), /* r6 */
575 0, 0, 0, 0);
578 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
579 struct ehca_qp *qp)
581 u64 ret;
582 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
584 ret = hcp_galpas_dtor(&qp->galpas);
585 if (ret) {
586 ehca_gen_err("Could not destruct qp->galpas");
587 return H_RESOURCE;
589 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
590 adapter_handle.handle, /* r4 */
591 /* function code */
592 1, /* r5 */
593 qp->ipz_qp_handle.handle, /* r6 */
594 0, 0, 0, 0, 0, 0);
595 if (ret == H_HARDWARE)
596 ehca_gen_err("HCA not operational. ret=%lli", ret);
598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
599 adapter_handle.handle, /* r4 */
600 qp->ipz_qp_handle.handle, /* r5 */
601 0, 0, 0, 0, 0);
603 if (ret == H_RESOURCE)
604 ehca_gen_err("Resource still in use. ret=%lli", ret);
606 return ret;
609 u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
610 const struct ipz_qp_handle qp_handle,
611 struct h_galpa gal,
612 u32 port)
614 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
615 adapter_handle.handle, /* r4 */
616 qp_handle.handle, /* r5 */
617 port, /* r6 */
618 0, 0, 0, 0);
621 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
622 const struct ipz_qp_handle qp_handle,
623 struct h_galpa gal,
624 u32 port, u32 * pma_qp_nr,
625 u32 * bma_qp_nr)
627 u64 ret;
628 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
631 adapter_handle.handle, /* r4 */
632 qp_handle.handle, /* r5 */
633 port, /* r6 */
634 0, 0, 0, 0, 0, 0);
635 *pma_qp_nr = (u32)outs[0];
636 *bma_qp_nr = (u32)outs[1];
638 if (ret == H_ALIAS_EXIST)
639 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
641 return ret;
644 u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
645 const struct ipz_qp_handle qp_handle,
646 struct h_galpa gal,
647 u16 mcg_dlid,
648 u64 subnet_prefix, u64 interface_id)
650 u64 ret;
652 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
653 adapter_handle.handle, /* r4 */
654 qp_handle.handle, /* r5 */
655 mcg_dlid, /* r6 */
656 interface_id, /* r7 */
657 subnet_prefix, /* r8 */
658 0, 0);
660 if (ret == H_NOT_ENOUGH_RESOURCES)
661 ehca_gen_err("Not enough resources. ret=%lli", ret);
663 return ret;
666 u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
667 const struct ipz_qp_handle qp_handle,
668 struct h_galpa gal,
669 u16 mcg_dlid,
670 u64 subnet_prefix, u64 interface_id)
672 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
673 adapter_handle.handle, /* r4 */
674 qp_handle.handle, /* r5 */
675 mcg_dlid, /* r6 */
676 interface_id, /* r7 */
677 subnet_prefix, /* r8 */
678 0, 0);
681 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
682 struct ehca_cq *cq,
683 u8 force_flag)
685 u64 ret;
687 ret = hcp_galpas_dtor(&cq->galpas);
688 if (ret) {
689 ehca_gen_err("Could not destruct cp->galpas");
690 return H_RESOURCE;
693 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
694 adapter_handle.handle, /* r4 */
695 cq->ipz_cq_handle.handle, /* r5 */
696 force_flag != 0 ? 1L : 0L, /* r6 */
697 0, 0, 0, 0);
699 if (ret == H_RESOURCE)
700 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
702 return ret;
705 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
706 struct ehca_eq *eq)
708 u64 ret;
710 ret = hcp_galpas_dtor(&eq->galpas);
711 if (ret) {
712 ehca_gen_err("Could not destruct eq->galpas");
713 return H_RESOURCE;
716 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
717 adapter_handle.handle, /* r4 */
718 eq->ipz_eq_handle.handle, /* r5 */
719 0, 0, 0, 0, 0);
721 if (ret == H_RESOURCE)
722 ehca_gen_err("Resource in use. ret=%lli ", ret);
724 return ret;
727 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
728 const struct ehca_mr *mr,
729 const u64 vaddr,
730 const u64 length,
731 const u32 access_ctrl,
732 const struct ipz_pd pd,
733 struct ehca_mr_hipzout_parms *outparms)
735 u64 ret;
736 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
739 adapter_handle.handle, /* r4 */
740 5, /* r5 */
741 vaddr, /* r6 */
742 length, /* r7 */
743 (((u64)access_ctrl) << 32ULL), /* r8 */
744 pd.value, /* r9 */
745 0, 0, 0);
746 outparms->handle.handle = outs[0];
747 outparms->lkey = (u32)outs[2];
748 outparms->rkey = (u32)outs[3];
750 return ret;
753 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
754 const struct ehca_mr *mr,
755 const u8 pagesize,
756 const u8 queue_type,
757 const u64 logical_address_of_page,
758 const u64 count)
760 u64 ret;
762 if (unlikely(ehca_debug_level >= 3)) {
763 if (count > 1) {
764 u64 *kpage;
765 int i;
766 kpage = (u64 *)abs_to_virt(logical_address_of_page);
767 for (i = 0; i < count; i++)
768 ehca_gen_dbg("kpage[%d]=%p",
769 i, (void *)kpage[i]);
770 } else
771 ehca_gen_dbg("kpage=%p",
772 (void *)logical_address_of_page);
775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
776 ehca_gen_err("logical_address_of_page not on a 4k boundary "
777 "adapter_handle=%llx mr=%p mr_handle=%llx "
778 "pagesize=%x queue_type=%x "
779 "logical_address_of_page=%llx count=%llx",
780 adapter_handle.handle, mr,
781 mr->ipz_mr_handle.handle, pagesize, queue_type,
782 logical_address_of_page, count);
783 ret = H_PARAMETER;
784 } else
785 ret = hipz_h_register_rpage(adapter_handle, pagesize,
786 queue_type,
787 mr->ipz_mr_handle.handle,
788 logical_address_of_page, count);
789 return ret;
792 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
793 const struct ehca_mr *mr,
794 struct ehca_mr_hipzout_parms *outparms)
796 u64 ret;
797 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
800 adapter_handle.handle, /* r4 */
801 mr->ipz_mr_handle.handle, /* r5 */
802 0, 0, 0, 0, 0, 0, 0);
803 outparms->len = outs[0];
804 outparms->vaddr = outs[1];
805 outparms->acl = outs[4] >> 32;
806 outparms->lkey = (u32)(outs[5] >> 32);
807 outparms->rkey = (u32)(outs[5] & (0xffffffff));
809 return ret;
812 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
813 const struct ehca_mr *mr)
815 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
816 adapter_handle.handle, /* r4 */
817 mr->ipz_mr_handle.handle, /* r5 */
818 0, 0, 0, 0, 0);
821 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
822 const struct ehca_mr *mr,
823 const u64 vaddr_in,
824 const u64 length,
825 const u32 access_ctrl,
826 const struct ipz_pd pd,
827 const u64 mr_addr_cb,
828 struct ehca_mr_hipzout_parms *outparms)
830 u64 ret;
831 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
834 adapter_handle.handle, /* r4 */
835 mr->ipz_mr_handle.handle, /* r5 */
836 vaddr_in, /* r6 */
837 length, /* r7 */
838 /* r8 */
839 ((((u64)access_ctrl) << 32ULL) | pd.value),
840 mr_addr_cb, /* r9 */
841 0, 0, 0);
842 outparms->vaddr = outs[1];
843 outparms->lkey = (u32)outs[2];
844 outparms->rkey = (u32)outs[3];
846 return ret;
849 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
850 const struct ehca_mr *mr,
851 const struct ehca_mr *orig_mr,
852 const u64 vaddr_in,
853 const u32 access_ctrl,
854 const struct ipz_pd pd,
855 struct ehca_mr_hipzout_parms *outparms)
857 u64 ret;
858 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
861 adapter_handle.handle, /* r4 */
862 orig_mr->ipz_mr_handle.handle, /* r5 */
863 vaddr_in, /* r6 */
864 (((u64)access_ctrl) << 32ULL), /* r7 */
865 pd.value, /* r8 */
866 0, 0, 0, 0);
867 outparms->handle.handle = outs[0];
868 outparms->lkey = (u32)outs[2];
869 outparms->rkey = (u32)outs[3];
871 return ret;
874 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
875 const struct ehca_mw *mw,
876 const struct ipz_pd pd,
877 struct ehca_mw_hipzout_parms *outparms)
879 u64 ret;
880 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
883 adapter_handle.handle, /* r4 */
884 6, /* r5 */
885 pd.value, /* r6 */
886 0, 0, 0, 0, 0, 0);
887 outparms->handle.handle = outs[0];
888 outparms->rkey = (u32)outs[3];
890 return ret;
893 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
894 const struct ehca_mw *mw,
895 struct ehca_mw_hipzout_parms *outparms)
897 u64 ret;
898 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
901 adapter_handle.handle, /* r4 */
902 mw->ipz_mw_handle.handle, /* r5 */
903 0, 0, 0, 0, 0, 0, 0);
904 outparms->rkey = (u32)outs[3];
906 return ret;
909 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
910 const struct ehca_mw *mw)
912 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
913 adapter_handle.handle, /* r4 */
914 mw->ipz_mw_handle.handle, /* r5 */
915 0, 0, 0, 0, 0);
918 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
919 const u64 ressource_handle,
920 void *rblock,
921 unsigned long *byte_count)
923 u64 r_cb = virt_to_abs(rblock);
925 if (r_cb & (EHCA_PAGESIZE-1)) {
926 ehca_gen_err("rblock not page aligned.");
927 return H_PARAMETER;
930 return ehca_plpar_hcall_norets(H_ERROR_DATA,
931 adapter_handle.handle,
932 ressource_handle,
933 r_cb,
934 0, 0, 0, 0);
937 u64 hipz_h_eoi(int irq)
939 unsigned long xirr;
941 iosync();
942 xirr = (0xffULL << 24) | irq;
944 return plpar_hcall_norets(H_EOI, xirr);