Linux 2.6.20.7
[linux/fpc-iii.git] / drivers / net / ehea / ehea_phyp.c
blob37716e05e808fe50e777da3490331bf60ea9a64e
1 /*
2 * linux/drivers/net/ehea/ehea_phyp.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include "ehea_phyp.h"
32 static inline u16 get_order_of_qentries(u16 queue_entries)
34 u8 ld = 1; /* logarithmus dualis */
35 while (((1U << ld) - 1) < queue_entries)
36 ld++;
37 return ld - 1;
40 /* Defines for H_CALL H_ALLOC_RESOURCE */
41 #define H_ALL_RES_TYPE_QP 1
42 #define H_ALL_RES_TYPE_CQ 2
43 #define H_ALL_RES_TYPE_EQ 3
44 #define H_ALL_RES_TYPE_MR 5
45 #define H_ALL_RES_TYPE_MW 6
47 static long ehea_plpar_hcall_norets(unsigned long opcode,
48 unsigned long arg1,
49 unsigned long arg2,
50 unsigned long arg3,
51 unsigned long arg4,
52 unsigned long arg5,
53 unsigned long arg6,
54 unsigned long arg7)
56 long ret;
57 int i, sleep_msecs;
59 for (i = 0; i < 5; i++) {
60 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
61 arg5, arg6, arg7);
63 if (H_IS_LONG_BUSY(ret)) {
64 sleep_msecs = get_longbusy_msecs(ret);
65 msleep_interruptible(sleep_msecs);
66 continue;
69 if (ret < H_SUCCESS)
70 ehea_error("opcode=%lx ret=%lx"
71 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
72 " arg5=%lx arg6=%lx arg7=%lx ",
73 opcode, ret,
74 arg1, arg2, arg3, arg4, arg5,
75 arg6, arg7);
77 return ret;
80 return H_BUSY;
83 static long ehea_plpar_hcall9(unsigned long opcode,
84 unsigned long *outs, /* array of 9 outputs */
85 unsigned long arg1,
86 unsigned long arg2,
87 unsigned long arg3,
88 unsigned long arg4,
89 unsigned long arg5,
90 unsigned long arg6,
91 unsigned long arg7,
92 unsigned long arg8,
93 unsigned long arg9)
95 long ret;
96 int i, sleep_msecs;
97 u8 cb_cat;
99 for (i = 0; i < 5; i++) {
100 ret = plpar_hcall9(opcode, outs,
101 arg1, arg2, arg3, arg4, arg5,
102 arg6, arg7, arg8, arg9);
104 if (H_IS_LONG_BUSY(ret)) {
105 sleep_msecs = get_longbusy_msecs(ret);
106 msleep_interruptible(sleep_msecs);
107 continue;
110 cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
112 if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
113 && (opcode == H_MODIFY_HEA_PORT))
114 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
115 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
116 && (arg3 == H_PORT_CB7_DUCQPN)))))
117 ehea_error("opcode=%lx ret=%lx"
118 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
119 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
120 " arg9=%lx"
121 " out1=%lx out2=%lx out3=%lx out4=%lx"
122 " out5=%lx out6=%lx out7=%lx out8=%lx"
123 " out9=%lx",
124 opcode, ret,
125 arg1, arg2, arg3, arg4, arg5,
126 arg6, arg7, arg8, arg9,
127 outs[0], outs[1], outs[2], outs[3],
128 outs[4], outs[5], outs[6], outs[7],
129 outs[8]);
130 return ret;
133 return H_BUSY;
136 u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
137 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
139 return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
140 adapter_handle, /* R4 */
141 qp_category, /* R5 */
142 qp_handle, /* R6 */
143 sel_mask, /* R7 */
144 virt_to_abs(cb_addr), /* R8 */
145 0, 0);
148 /* input param R5 */
149 #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
150 #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
151 #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
152 #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
153 #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
154 #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
155 #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
156 #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
157 #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
158 #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
160 /* input param R9 */
161 #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
162 #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63)
164 /* input param R10 */
165 #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
166 #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
167 #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
168 #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
169 /* Max Send Scatter Gather Elements */
170 #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
171 #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
172 /* Max Receive SG Elements RQ1 */
173 #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
174 #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
176 /* input param R11 */
177 #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
178 /* max swqe immediate data length */
179 #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
181 /* input param R12 */
182 #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
183 /* Threshold RQ2 */
184 #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
185 /* Threshold RQ3 */
187 /* output param R6 */
188 #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
189 #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
190 #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
191 #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
193 /* output param, R7 */
194 #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
195 #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
196 #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
197 #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
198 #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
200 /* output param R8,R9 */
201 #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
202 #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
203 #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
204 #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
206 /* output param R11,R12 */
207 #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
208 #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
209 #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
210 #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
212 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
213 struct ehea_qp_init_attr *init_attr, const u32 pd,
214 u64 *qp_handle, struct h_epas *h_epas)
216 u64 hret;
217 u64 outs[PLPAR_HCALL9_BUFSIZE];
219 u64 allocate_controls =
220 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
221 | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
222 | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */
223 | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */
224 | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
225 | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
226 | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
227 | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
228 | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
230 u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
231 | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
233 u64 max_r10_reg =
234 EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
235 get_order_of_qentries(init_attr->max_nr_send_wqes))
236 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
237 get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
238 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
239 get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
240 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
241 get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
242 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
243 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
244 init_attr->wqe_size_enc_rq1)
245 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
246 init_attr->wqe_size_enc_rq2)
247 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
248 init_attr->wqe_size_enc_rq3);
250 u64 r11_in =
251 EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
252 | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
253 u64 threshold =
254 EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
255 | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
257 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
258 outs,
259 adapter_handle, /* R4 */
260 allocate_controls, /* R5 */
261 init_attr->send_cq_handle, /* R6 */
262 init_attr->recv_cq_handle, /* R7 */
263 init_attr->aff_eq_handle, /* R8 */
264 r9_reg, /* R9 */
265 max_r10_reg, /* R10 */
266 r11_in, /* R11 */
267 threshold); /* R12 */
269 *qp_handle = outs[0];
270 init_attr->qp_nr = (u32)outs[1];
272 init_attr->act_nr_send_wqes =
273 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
274 init_attr->act_nr_rwqes_rq1 =
275 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
276 init_attr->act_nr_rwqes_rq2 =
277 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
278 init_attr->act_nr_rwqes_rq3 =
279 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
281 init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
282 init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
283 init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
284 init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
286 init_attr->nr_sq_pages =
287 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
288 init_attr->nr_rq1_pages =
289 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
290 init_attr->nr_rq2_pages =
291 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
292 init_attr->nr_rq3_pages =
293 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
295 init_attr->liobn_sq =
296 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
297 init_attr->liobn_rq1 =
298 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
299 init_attr->liobn_rq2 =
300 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
301 init_attr->liobn_rq3 =
302 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
304 if (!hret)
305 hcp_epas_ctor(h_epas, outs[6], outs[6]);
307 return hret;
310 u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
311 struct ehea_cq_attr *cq_attr,
312 u64 *cq_handle, struct h_epas *epas)
314 u64 hret;
315 u64 outs[PLPAR_HCALL9_BUFSIZE];
317 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
318 outs,
319 adapter_handle, /* R4 */
320 H_ALL_RES_TYPE_CQ, /* R5 */
321 cq_attr->eq_handle, /* R6 */
322 cq_attr->cq_token, /* R7 */
323 cq_attr->max_nr_of_cqes, /* R8 */
324 0, 0, 0, 0); /* R9-R12 */
326 *cq_handle = outs[0];
327 cq_attr->act_nr_of_cqes = outs[3];
328 cq_attr->nr_pages = outs[4];
330 if (!hret)
331 hcp_epas_ctor(epas, outs[5], outs[6]);
333 return hret;
336 /* Defines for H_CALL H_ALLOC_RESOURCE */
337 #define H_ALL_RES_TYPE_QP 1
338 #define H_ALL_RES_TYPE_CQ 2
339 #define H_ALL_RES_TYPE_EQ 3
340 #define H_ALL_RES_TYPE_MR 5
341 #define H_ALL_RES_TYPE_MW 6
343 /* input param R5 */
344 #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
345 #define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
346 #define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
347 #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
348 /* input param R6 */
349 #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
351 /* output param R6 */
352 #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
354 /* output param R7 */
355 #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
357 /* output param R8 */
358 #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
360 /* output param R9 */
361 #define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
362 #define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
364 /* output param R10 */
365 #define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
367 /* output param R11 */
368 #define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
370 /* output param R12 */
371 #define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
373 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
374 struct ehea_eq_attr *eq_attr, u64 *eq_handle)
376 u64 hret, allocate_controls;
377 u64 outs[PLPAR_HCALL9_BUFSIZE];
379 /* resource type */
380 allocate_controls =
381 EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
382 | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
383 | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
384 | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
386 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
387 outs,
388 adapter_handle, /* R4 */
389 allocate_controls, /* R5 */
390 eq_attr->max_nr_of_eqes, /* R6 */
391 0, 0, 0, 0, 0, 0); /* R7-R10 */
393 *eq_handle = outs[0];
394 eq_attr->act_nr_of_eqes = outs[3];
395 eq_attr->nr_pages = outs[4];
396 eq_attr->ist1 = outs[5];
397 eq_attr->ist2 = outs[6];
398 eq_attr->ist3 = outs[7];
399 eq_attr->ist4 = outs[8];
401 return hret;
404 u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
405 const u64 qp_handle, const u64 sel_mask,
406 void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
407 u16 *out_swr, u16 *out_rwr)
409 u64 hret;
410 u64 outs[PLPAR_HCALL9_BUFSIZE];
412 hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
413 outs,
414 adapter_handle, /* R4 */
415 (u64) cat, /* R5 */
416 qp_handle, /* R6 */
417 sel_mask, /* R7 */
418 virt_to_abs(cb_addr), /* R8 */
419 0, 0, 0, 0); /* R9-R12 */
421 *inv_attr_id = outs[0];
422 *out_swr = outs[3];
423 *out_rwr = outs[4];
424 *proc_mask = outs[5];
426 return hret;
429 u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
430 const u8 queue_type, const u64 resource_handle,
431 const u64 log_pageaddr, u64 count)
433 u64 reg_control;
435 reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
436 | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
438 return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
439 adapter_handle, /* R4 */
440 reg_control, /* R5 */
441 resource_handle, /* R6 */
442 log_pageaddr, /* R7 */
443 count, /* R8 */
444 0, 0); /* R9-R10 */
447 u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
448 const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
449 struct ehea_mr *mr)
451 u64 hret;
452 u64 outs[PLPAR_HCALL9_BUFSIZE];
454 hret = ehea_plpar_hcall9(H_REGISTER_SMR,
455 outs,
456 adapter_handle , /* R4 */
457 orig_mr_handle, /* R5 */
458 vaddr_in, /* R6 */
459 (((u64)access_ctrl) << 32ULL), /* R7 */
460 pd, /* R8 */
461 0, 0, 0, 0); /* R9-R12 */
463 mr->handle = outs[0];
464 mr->lkey = (u32)outs[2];
466 return hret;
469 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
471 u64 outs[PLPAR_HCALL9_BUFSIZE];
473 return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
474 outs,
475 adapter_handle, /* R4 */
476 H_DISABLE_GET_EHEA_WQE_P, /* R5 */
477 qp_handle, /* R6 */
478 0, 0, 0, 0, 0, 0); /* R7-R12 */
481 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle)
483 return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
484 adapter_handle, /* R4 */
485 res_handle, /* R5 */
486 0, 0, 0, 0, 0); /* R6-R10 */
489 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
490 const u64 length, const u32 access_ctrl,
491 const u32 pd, u64 *mr_handle, u32 *lkey)
493 u64 hret;
494 u64 outs[PLPAR_HCALL9_BUFSIZE];
496 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
497 outs,
498 adapter_handle, /* R4 */
499 5, /* R5 */
500 vaddr, /* R6 */
501 length, /* R7 */
502 (((u64) access_ctrl) << 32ULL), /* R8 */
503 pd, /* R9 */
504 0, 0, 0); /* R10-R12 */
506 *mr_handle = outs[0];
507 *lkey = (u32)outs[2];
508 return hret;
511 u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
512 const u8 pagesize, const u8 queue_type,
513 const u64 log_pageaddr, const u64 count)
515 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
516 ehea_error("not on pageboundary");
517 return H_PARAMETER;
520 return ehea_h_register_rpage(adapter_handle, pagesize,
521 queue_type, mr_handle,
522 log_pageaddr, count);
525 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
527 u64 hret, cb_logaddr;
529 cb_logaddr = virt_to_abs(cb_addr);
531 hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
532 adapter_handle, /* R4 */
533 cb_logaddr, /* R5 */
534 0, 0, 0, 0, 0); /* R6-R10 */
535 #ifdef DEBUG
536 ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
537 #endif
538 return hret;
541 u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
542 const u8 cb_cat, const u64 select_mask,
543 void *cb_addr)
545 u64 port_info;
546 u64 cb_logaddr = virt_to_abs(cb_addr);
547 u64 arr_index = 0;
549 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
550 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
552 return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
553 adapter_handle, /* R4 */
554 port_info, /* R5 */
555 select_mask, /* R6 */
556 arr_index, /* R7 */
557 cb_logaddr, /* R8 */
558 0, 0); /* R9-R10 */
561 u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
562 const u8 cb_cat, const u64 select_mask,
563 void *cb_addr)
565 u64 outs[PLPAR_HCALL9_BUFSIZE];
566 u64 port_info;
567 u64 arr_index = 0;
568 u64 cb_logaddr = virt_to_abs(cb_addr);
570 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
571 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
572 #ifdef DEBUG
573 ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
574 #endif
575 return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
576 outs,
577 adapter_handle, /* R4 */
578 port_info, /* R5 */
579 select_mask, /* R6 */
580 arr_index, /* R7 */
581 cb_logaddr, /* R8 */
582 0, 0, 0, 0); /* R9-R12 */
585 u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
586 const u8 reg_type, const u64 mc_mac_addr,
587 const u16 vlan_id, const u32 hcall_id)
589 u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
590 u64 mac_addr = mc_mac_addr >> 16;
592 r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
593 r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
594 r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
595 r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
597 return ehea_plpar_hcall_norets(hcall_id,
598 adapter_handle, /* R4 */
599 r5_port_num, /* R5 */
600 r6_reg_type, /* R6 */
601 r7_mc_mac_addr, /* R7 */
602 r8_vlan_id, /* R8 */
603 0, 0); /* R9-R12 */
606 u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
607 const u64 event_mask)
609 return ehea_plpar_hcall_norets(H_RESET_EVENTS,
610 adapter_handle, /* R4 */
611 neq_handle, /* R5 */
612 event_mask, /* R6 */
613 0, 0, 0, 0); /* R7-R12 */