OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / infiniband / hw / ehca / hcp_if.c
blobe6f9cdd94c7a9e0bc5114024974eb47de59e1e6e
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
12 * Copyright (c) 2005 IBM Corporation
14 * All rights reserved.
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * BSD.
19 * OpenIB BSD License
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/hvcall.h>
46 #include "ehca_tools.h"
47 #include "hcp_if.h"
48 #include "hcp_phyp.h"
49 #include "hipz_fns.h"
50 #include "ipz_pt_fn.h"
52 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55 #define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60 #define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61 #define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
64 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
69 #define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70 #define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71 #define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72 #define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73 #define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
75 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
80 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
83 #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84 #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85 #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
87 #define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88 #define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89 #define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
91 static DEFINE_SPINLOCK(hcall_lock);
93 static u32 get_longbusy_msecs(int longbusy_rc)
95 switch (longbusy_rc) {
96 case H_LONG_BUSY_ORDER_1_MSEC:
97 return 1;
98 case H_LONG_BUSY_ORDER_10_MSEC:
99 return 10;
100 case H_LONG_BUSY_ORDER_100_MSEC:
101 return 100;
102 case H_LONG_BUSY_ORDER_1_SEC:
103 return 1000;
104 case H_LONG_BUSY_ORDER_10_SEC:
105 return 10000;
106 case H_LONG_BUSY_ORDER_100_SEC:
107 return 100000;
108 default:
109 return 1;
113 static long ehca_plpar_hcall_norets(unsigned long opcode,
114 unsigned long arg1,
115 unsigned long arg2,
116 unsigned long arg3,
117 unsigned long arg4,
118 unsigned long arg5,
119 unsigned long arg6,
120 unsigned long arg7)
122 long ret;
123 int i, sleep_msecs;
124 unsigned long flags = 0;
126 if (unlikely(ehca_debug_level >= 2))
127 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
128 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
130 for (i = 0; i < 5; i++) {
131 /* serialize hCalls to work around firmware issue */
132 if (ehca_lock_hcalls)
133 spin_lock_irqsave(&hcall_lock, flags);
135 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
136 arg5, arg6, arg7);
138 if (ehca_lock_hcalls)
139 spin_unlock_irqrestore(&hcall_lock, flags);
141 if (H_IS_LONG_BUSY(ret)) {
142 sleep_msecs = get_longbusy_msecs(ret);
143 msleep_interruptible(sleep_msecs);
144 continue;
147 if (ret < H_SUCCESS)
148 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
149 opcode, ret, arg1, arg2, arg3,
150 arg4, arg5, arg6, arg7);
151 else
152 if (unlikely(ehca_debug_level >= 2))
153 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
155 return ret;
158 return H_BUSY;
161 static long ehca_plpar_hcall9(unsigned long opcode,
162 unsigned long *outs, /* array of 9 outputs */
163 unsigned long arg1,
164 unsigned long arg2,
165 unsigned long arg3,
166 unsigned long arg4,
167 unsigned long arg5,
168 unsigned long arg6,
169 unsigned long arg7,
170 unsigned long arg8,
171 unsigned long arg9)
173 long ret;
174 int i, sleep_msecs;
175 unsigned long flags = 0;
177 if (unlikely(ehca_debug_level >= 2))
178 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
179 arg1, arg2, arg3, arg4, arg5,
180 arg6, arg7, arg8, arg9);
182 for (i = 0; i < 5; i++) {
183 /* serialize hCalls to work around firmware issue */
184 if (ehca_lock_hcalls)
185 spin_lock_irqsave(&hcall_lock, flags);
187 ret = plpar_hcall9(opcode, outs,
188 arg1, arg2, arg3, arg4, arg5,
189 arg6, arg7, arg8, arg9);
191 if (ehca_lock_hcalls)
192 spin_unlock_irqrestore(&hcall_lock, flags);
194 if (H_IS_LONG_BUSY(ret)) {
195 sleep_msecs = get_longbusy_msecs(ret);
196 msleep_interruptible(sleep_msecs);
197 continue;
200 if (ret < H_SUCCESS) {
201 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
202 opcode, arg1, arg2, arg3, arg4, arg5,
203 arg6, arg7, arg8, arg9);
204 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
205 ret, outs[0], outs[1], outs[2], outs[3],
206 outs[4], outs[5], outs[6], outs[7],
207 outs[8]);
208 } else if (unlikely(ehca_debug_level >= 2))
209 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
210 ret, outs[0], outs[1], outs[2], outs[3],
211 outs[4], outs[5], outs[6], outs[7],
212 outs[8]);
213 return ret;
216 return H_BUSY;
219 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
220 struct ehca_pfeq *pfeq,
221 const u32 neq_control,
222 const u32 number_of_entries,
223 struct ipz_eq_handle *eq_handle,
224 u32 *act_nr_of_entries,
225 u32 *act_pages,
226 u32 *eq_ist)
228 u64 ret;
229 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
230 u64 allocate_controls;
232 /* resource type */
233 allocate_controls = 3ULL;
235 /* ISN is associated */
236 if (neq_control != 1)
237 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
238 else /* notification event queue */
239 allocate_controls = (1ULL << 63) | allocate_controls;
241 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
242 adapter_handle.handle, /* r4 */
243 allocate_controls, /* r5 */
244 number_of_entries, /* r6 */
245 0, 0, 0, 0, 0, 0);
246 eq_handle->handle = outs[0];
247 *act_nr_of_entries = (u32)outs[3];
248 *act_pages = (u32)outs[4];
249 *eq_ist = (u32)outs[5];
251 if (ret == H_NOT_ENOUGH_RESOURCES)
252 ehca_gen_err("Not enough resource - ret=%lli ", ret);
254 return ret;
257 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
258 struct ipz_eq_handle eq_handle,
259 const u64 event_mask)
261 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
262 adapter_handle.handle, /* r4 */
263 eq_handle.handle, /* r5 */
264 event_mask, /* r6 */
265 0, 0, 0, 0);
268 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
269 struct ehca_cq *cq,
270 struct ehca_alloc_cq_parms *param)
272 int rc;
273 u64 ret;
274 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
276 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
277 adapter_handle.handle, /* r4 */
278 2, /* r5 */
279 param->eq_handle.handle, /* r6 */
280 cq->token, /* r7 */
281 param->nr_cqe, /* r8 */
282 0, 0, 0, 0);
283 cq->ipz_cq_handle.handle = outs[0];
284 param->act_nr_of_entries = (u32)outs[3];
285 param->act_pages = (u32)outs[4];
287 if (ret == H_SUCCESS) {
288 rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
289 if (rc) {
290 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
291 rc, outs[5]);
293 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
294 adapter_handle.handle, /* r4 */
295 cq->ipz_cq_handle.handle, /* r5 */
296 0, 0, 0, 0, 0);
297 ret = H_NO_MEM;
301 if (ret == H_NOT_ENOUGH_RESOURCES)
302 ehca_gen_err("Not enough resources. ret=%lli", ret);
304 return ret;
307 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
308 struct ehca_alloc_qp_parms *parms, int is_user)
310 int rc;
311 u64 ret;
312 u64 allocate_controls, max_r10_reg, r11, r12;
313 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
315 allocate_controls =
316 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
317 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
318 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
319 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
320 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
321 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
322 parms->squeue.page_size)
323 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
324 parms->rqueue.page_size)
325 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
326 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
327 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
328 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
329 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
330 parms->ud_av_l_key_ctl)
331 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
333 max_r10_reg =
334 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
335 parms->squeue.max_wr + 1)
336 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
337 parms->rqueue.max_wr + 1)
338 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
339 parms->squeue.max_sge)
340 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
341 parms->rqueue.max_sge);
343 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
345 if (parms->ext_type == EQPT_SRQ)
346 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
347 else
348 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
350 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
351 adapter_handle.handle, /* r4 */
352 allocate_controls, /* r5 */
353 parms->send_cq_handle.handle,
354 parms->recv_cq_handle.handle,
355 parms->eq_handle.handle,
356 ((u64)parms->token << 32) | parms->pd.value,
357 max_r10_reg, r11, r12);
359 parms->qp_handle.handle = outs[0];
360 parms->real_qp_num = (u32)outs[1];
361 parms->squeue.act_nr_wqes =
362 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
363 parms->rqueue.act_nr_wqes =
364 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
365 parms->squeue.act_nr_sges =
366 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
367 parms->rqueue.act_nr_sges =
368 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
369 parms->squeue.queue_size =
370 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
371 parms->rqueue.queue_size =
372 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
374 if (ret == H_SUCCESS) {
375 rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
376 if (rc) {
377 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
378 rc, outs[6]);
380 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
381 adapter_handle.handle, /* r4 */
382 parms->qp_handle.handle, /* r5 */
383 0, 0, 0, 0, 0);
384 ret = H_NO_MEM;
388 if (ret == H_NOT_ENOUGH_RESOURCES)
389 ehca_gen_err("Not enough resources. ret=%lli", ret);
391 return ret;
394 u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
395 const u8 port_id,
396 struct hipz_query_port *query_port_response_block)
398 u64 ret;
399 u64 r_cb = virt_to_abs(query_port_response_block);
401 if (r_cb & (EHCA_PAGESIZE-1)) {
402 ehca_gen_err("response block not page aligned");
403 return H_PARAMETER;
406 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
407 adapter_handle.handle, /* r4 */
408 port_id, /* r5 */
409 r_cb, /* r6 */
410 0, 0, 0, 0);
412 if (ehca_debug_level >= 2)
413 ehca_dmp(query_port_response_block, 64, "response_block");
415 return ret;
418 u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
419 const u8 port_id, const u32 port_cap,
420 const u8 init_type, const int modify_mask)
422 u64 port_attributes = port_cap;
424 if (modify_mask & IB_PORT_SHUTDOWN)
425 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
426 if (modify_mask & IB_PORT_INIT_TYPE)
427 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
428 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
429 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
431 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
432 adapter_handle.handle, /* r4 */
433 port_id, /* r5 */
434 port_attributes, /* r6 */
435 0, 0, 0, 0);
438 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
439 struct hipz_query_hca *query_hca_rblock)
441 u64 r_cb = virt_to_abs(query_hca_rblock);
443 if (r_cb & (EHCA_PAGESIZE-1)) {
444 ehca_gen_err("response_block=%p not page aligned",
445 query_hca_rblock);
446 return H_PARAMETER;
449 return ehca_plpar_hcall_norets(H_QUERY_HCA,
450 adapter_handle.handle, /* r4 */
451 r_cb, /* r5 */
452 0, 0, 0, 0, 0);
455 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
456 const u8 pagesize,
457 const u8 queue_type,
458 const u64 resource_handle,
459 const u64 logical_address_of_page,
460 u64 count)
462 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
463 adapter_handle.handle, /* r4 */
464 (u64)queue_type | ((u64)pagesize) << 8,
465 /* r5 */
466 resource_handle, /* r6 */
467 logical_address_of_page, /* r7 */
468 count, /* r8 */
469 0, 0);
472 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
473 const struct ipz_eq_handle eq_handle,
474 struct ehca_pfeq *pfeq,
475 const u8 pagesize,
476 const u8 queue_type,
477 const u64 logical_address_of_page,
478 const u64 count)
480 if (count != 1) {
481 ehca_gen_err("Ppage counter=%llx", count);
482 return H_PARAMETER;
484 return hipz_h_register_rpage(adapter_handle,
485 pagesize,
486 queue_type,
487 eq_handle.handle,
488 logical_address_of_page, count);
491 u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
492 u32 ist)
494 u64 ret;
495 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
496 adapter_handle.handle, /* r4 */
497 ist, /* r5 */
498 0, 0, 0, 0, 0);
500 if (ret != H_SUCCESS && ret != H_BUSY)
501 ehca_gen_err("Could not query interrupt state.");
503 return ret;
506 u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
507 const struct ipz_cq_handle cq_handle,
508 struct ehca_pfcq *pfcq,
509 const u8 pagesize,
510 const u8 queue_type,
511 const u64 logical_address_of_page,
512 const u64 count,
513 const struct h_galpa gal)
515 if (count != 1) {
516 ehca_gen_err("Page counter=%llx", count);
517 return H_PARAMETER;
520 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
521 cq_handle.handle, logical_address_of_page,
522 count);
525 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
526 const struct ipz_qp_handle qp_handle,
527 struct ehca_pfqp *pfqp,
528 const u8 pagesize,
529 const u8 queue_type,
530 const u64 logical_address_of_page,
531 const u64 count,
532 const struct h_galpa galpa)
534 if (count > 1) {
535 ehca_gen_err("Page counter=%llx", count);
536 return H_PARAMETER;
539 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
540 qp_handle.handle, logical_address_of_page,
541 count);
544 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
545 const struct ipz_qp_handle qp_handle,
546 struct ehca_pfqp *pfqp,
547 void **log_addr_next_sq_wqe2processed,
548 void **log_addr_next_rq_wqe2processed,
549 int dis_and_get_function_code)
551 u64 ret;
552 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
554 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
555 adapter_handle.handle, /* r4 */
556 dis_and_get_function_code, /* r5 */
557 qp_handle.handle, /* r6 */
558 0, 0, 0, 0, 0, 0);
559 if (log_addr_next_sq_wqe2processed)
560 *log_addr_next_sq_wqe2processed = (void *)outs[0];
561 if (log_addr_next_rq_wqe2processed)
562 *log_addr_next_rq_wqe2processed = (void *)outs[1];
564 return ret;
567 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
568 const struct ipz_qp_handle qp_handle,
569 struct ehca_pfqp *pfqp,
570 const u64 update_mask,
571 struct hcp_modify_qp_control_block *mqpcb,
572 struct h_galpa gal)
574 u64 ret;
575 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
576 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
577 adapter_handle.handle, /* r4 */
578 qp_handle.handle, /* r5 */
579 update_mask, /* r6 */
580 virt_to_abs(mqpcb), /* r7 */
581 0, 0, 0, 0, 0);
583 if (ret == H_NOT_ENOUGH_RESOURCES)
584 ehca_gen_err("Insufficient resources ret=%lli", ret);
586 return ret;
589 u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
590 const struct ipz_qp_handle qp_handle,
591 struct ehca_pfqp *pfqp,
592 struct hcp_modify_qp_control_block *qqpcb,
593 struct h_galpa gal)
595 return ehca_plpar_hcall_norets(H_QUERY_QP,
596 adapter_handle.handle, /* r4 */
597 qp_handle.handle, /* r5 */
598 virt_to_abs(qqpcb), /* r6 */
599 0, 0, 0, 0);
602 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
603 struct ehca_qp *qp)
605 u64 ret;
606 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
608 ret = hcp_galpas_dtor(&qp->galpas);
609 if (ret) {
610 ehca_gen_err("Could not destruct qp->galpas");
611 return H_RESOURCE;
613 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
614 adapter_handle.handle, /* r4 */
615 /* function code */
616 1, /* r5 */
617 qp->ipz_qp_handle.handle, /* r6 */
618 0, 0, 0, 0, 0, 0);
619 if (ret == H_HARDWARE)
620 ehca_gen_err("HCA not operational. ret=%lli", ret);
622 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
623 adapter_handle.handle, /* r4 */
624 qp->ipz_qp_handle.handle, /* r5 */
625 0, 0, 0, 0, 0);
627 if (ret == H_RESOURCE)
628 ehca_gen_err("Resource still in use. ret=%lli", ret);
630 return ret;
633 u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
634 const struct ipz_qp_handle qp_handle,
635 struct h_galpa gal,
636 u32 port)
638 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
639 adapter_handle.handle, /* r4 */
640 qp_handle.handle, /* r5 */
641 port, /* r6 */
642 0, 0, 0, 0);
645 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
646 const struct ipz_qp_handle qp_handle,
647 struct h_galpa gal,
648 u32 port, u32 * pma_qp_nr,
649 u32 * bma_qp_nr)
651 u64 ret;
652 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
654 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
655 adapter_handle.handle, /* r4 */
656 qp_handle.handle, /* r5 */
657 port, /* r6 */
658 0, 0, 0, 0, 0, 0);
659 *pma_qp_nr = (u32)outs[0];
660 *bma_qp_nr = (u32)outs[1];
662 if (ret == H_ALIAS_EXIST)
663 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
665 return ret;
668 u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
669 const struct ipz_qp_handle qp_handle,
670 struct h_galpa gal,
671 u16 mcg_dlid,
672 u64 subnet_prefix, u64 interface_id)
674 u64 ret;
676 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
677 adapter_handle.handle, /* r4 */
678 qp_handle.handle, /* r5 */
679 mcg_dlid, /* r6 */
680 interface_id, /* r7 */
681 subnet_prefix, /* r8 */
682 0, 0);
684 if (ret == H_NOT_ENOUGH_RESOURCES)
685 ehca_gen_err("Not enough resources. ret=%lli", ret);
687 return ret;
690 u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
691 const struct ipz_qp_handle qp_handle,
692 struct h_galpa gal,
693 u16 mcg_dlid,
694 u64 subnet_prefix, u64 interface_id)
696 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
697 adapter_handle.handle, /* r4 */
698 qp_handle.handle, /* r5 */
699 mcg_dlid, /* r6 */
700 interface_id, /* r7 */
701 subnet_prefix, /* r8 */
702 0, 0);
705 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
706 struct ehca_cq *cq,
707 u8 force_flag)
709 u64 ret;
711 ret = hcp_galpas_dtor(&cq->galpas);
712 if (ret) {
713 ehca_gen_err("Could not destruct cp->galpas");
714 return H_RESOURCE;
717 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
718 adapter_handle.handle, /* r4 */
719 cq->ipz_cq_handle.handle, /* r5 */
720 force_flag != 0 ? 1L : 0L, /* r6 */
721 0, 0, 0, 0);
723 if (ret == H_RESOURCE)
724 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
726 return ret;
729 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
730 struct ehca_eq *eq)
732 u64 ret;
734 ret = hcp_galpas_dtor(&eq->galpas);
735 if (ret) {
736 ehca_gen_err("Could not destruct eq->galpas");
737 return H_RESOURCE;
740 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
741 adapter_handle.handle, /* r4 */
742 eq->ipz_eq_handle.handle, /* r5 */
743 0, 0, 0, 0, 0);
745 if (ret == H_RESOURCE)
746 ehca_gen_err("Resource in use. ret=%lli ", ret);
748 return ret;
751 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
752 const struct ehca_mr *mr,
753 const u64 vaddr,
754 const u64 length,
755 const u32 access_ctrl,
756 const struct ipz_pd pd,
757 struct ehca_mr_hipzout_parms *outparms)
759 u64 ret;
760 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
762 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
763 adapter_handle.handle, /* r4 */
764 5, /* r5 */
765 vaddr, /* r6 */
766 length, /* r7 */
767 (((u64)access_ctrl) << 32ULL), /* r8 */
768 pd.value, /* r9 */
769 0, 0, 0);
770 outparms->handle.handle = outs[0];
771 outparms->lkey = (u32)outs[2];
772 outparms->rkey = (u32)outs[3];
774 return ret;
777 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
778 const struct ehca_mr *mr,
779 const u8 pagesize,
780 const u8 queue_type,
781 const u64 logical_address_of_page,
782 const u64 count)
784 u64 ret;
786 if (unlikely(ehca_debug_level >= 3)) {
787 if (count > 1) {
788 u64 *kpage;
789 int i;
790 kpage = (u64 *)abs_to_virt(logical_address_of_page);
791 for (i = 0; i < count; i++)
792 ehca_gen_dbg("kpage[%d]=%p",
793 i, (void *)kpage[i]);
794 } else
795 ehca_gen_dbg("kpage=%p",
796 (void *)logical_address_of_page);
799 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
800 ehca_gen_err("logical_address_of_page not on a 4k boundary "
801 "adapter_handle=%llx mr=%p mr_handle=%llx "
802 "pagesize=%x queue_type=%x "
803 "logical_address_of_page=%llx count=%llx",
804 adapter_handle.handle, mr,
805 mr->ipz_mr_handle.handle, pagesize, queue_type,
806 logical_address_of_page, count);
807 ret = H_PARAMETER;
808 } else
809 ret = hipz_h_register_rpage(adapter_handle, pagesize,
810 queue_type,
811 mr->ipz_mr_handle.handle,
812 logical_address_of_page, count);
813 return ret;
816 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
817 const struct ehca_mr *mr,
818 struct ehca_mr_hipzout_parms *outparms)
820 u64 ret;
821 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
823 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
824 adapter_handle.handle, /* r4 */
825 mr->ipz_mr_handle.handle, /* r5 */
826 0, 0, 0, 0, 0, 0, 0);
827 outparms->len = outs[0];
828 outparms->vaddr = outs[1];
829 outparms->acl = outs[4] >> 32;
830 outparms->lkey = (u32)(outs[5] >> 32);
831 outparms->rkey = (u32)(outs[5] & (0xffffffff));
833 return ret;
836 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
837 const struct ehca_mr *mr)
839 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
840 adapter_handle.handle, /* r4 */
841 mr->ipz_mr_handle.handle, /* r5 */
842 0, 0, 0, 0, 0);
845 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
846 const struct ehca_mr *mr,
847 const u64 vaddr_in,
848 const u64 length,
849 const u32 access_ctrl,
850 const struct ipz_pd pd,
851 const u64 mr_addr_cb,
852 struct ehca_mr_hipzout_parms *outparms)
854 u64 ret;
855 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
857 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
858 adapter_handle.handle, /* r4 */
859 mr->ipz_mr_handle.handle, /* r5 */
860 vaddr_in, /* r6 */
861 length, /* r7 */
862 /* r8 */
863 ((((u64)access_ctrl) << 32ULL) | pd.value),
864 mr_addr_cb, /* r9 */
865 0, 0, 0);
866 outparms->vaddr = outs[1];
867 outparms->lkey = (u32)outs[2];
868 outparms->rkey = (u32)outs[3];
870 return ret;
873 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
874 const struct ehca_mr *mr,
875 const struct ehca_mr *orig_mr,
876 const u64 vaddr_in,
877 const u32 access_ctrl,
878 const struct ipz_pd pd,
879 struct ehca_mr_hipzout_parms *outparms)
881 u64 ret;
882 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
884 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
885 adapter_handle.handle, /* r4 */
886 orig_mr->ipz_mr_handle.handle, /* r5 */
887 vaddr_in, /* r6 */
888 (((u64)access_ctrl) << 32ULL), /* r7 */
889 pd.value, /* r8 */
890 0, 0, 0, 0);
891 outparms->handle.handle = outs[0];
892 outparms->lkey = (u32)outs[2];
893 outparms->rkey = (u32)outs[3];
895 return ret;
898 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
899 const struct ehca_mw *mw,
900 const struct ipz_pd pd,
901 struct ehca_mw_hipzout_parms *outparms)
903 u64 ret;
904 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
906 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
907 adapter_handle.handle, /* r4 */
908 6, /* r5 */
909 pd.value, /* r6 */
910 0, 0, 0, 0, 0, 0);
911 outparms->handle.handle = outs[0];
912 outparms->rkey = (u32)outs[3];
914 return ret;
917 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
918 const struct ehca_mw *mw,
919 struct ehca_mw_hipzout_parms *outparms)
921 u64 ret;
922 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
924 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
925 adapter_handle.handle, /* r4 */
926 mw->ipz_mw_handle.handle, /* r5 */
927 0, 0, 0, 0, 0, 0, 0);
928 outparms->rkey = (u32)outs[3];
930 return ret;
933 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
934 const struct ehca_mw *mw)
936 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
937 adapter_handle.handle, /* r4 */
938 mw->ipz_mw_handle.handle, /* r5 */
939 0, 0, 0, 0, 0);
942 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
943 const u64 ressource_handle,
944 void *rblock,
945 unsigned long *byte_count)
947 u64 r_cb = virt_to_abs(rblock);
949 if (r_cb & (EHCA_PAGESIZE-1)) {
950 ehca_gen_err("rblock not page aligned.");
951 return H_PARAMETER;
954 return ehca_plpar_hcall_norets(H_ERROR_DATA,
955 adapter_handle.handle,
956 ressource_handle,
957 r_cb,
958 0, 0, 0, 0);
961 u64 hipz_h_eoi(int irq)
963 unsigned long xirr;
965 iosync();
966 xirr = (0xffULL << 24) | irq;
968 return plpar_hcall_norets(H_EOI, xirr);