2 * linux/drivers/net/ehea/ehea_qmr.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include "ehea_phyp.h"
34 struct ehea_bmap
*ehea_bmap
= NULL
;
38 static void *hw_qpageit_get_inc(struct hw_queue
*queue
)
40 void *retvalue
= hw_qeit_get(queue
);
42 queue
->current_q_offset
+= queue
->pagesize
;
43 if (queue
->current_q_offset
> queue
->queue_length
) {
44 queue
->current_q_offset
-= queue
->pagesize
;
46 } else if (((u64
) retvalue
) & (EHEA_PAGESIZE
-1)) {
47 ehea_error("not on pageboundary");
53 static int hw_queue_ctor(struct hw_queue
*queue
, const u32 nr_of_pages
,
54 const u32 pagesize
, const u32 qe_size
)
56 int pages_per_kpage
= PAGE_SIZE
/ pagesize
;
59 if ((pagesize
> PAGE_SIZE
) || (!pages_per_kpage
)) {
60 ehea_error("pagesize conflict! kernel pagesize=%d, "
61 "ehea pagesize=%d", (int)PAGE_SIZE
, (int)pagesize
);
65 queue
->queue_length
= nr_of_pages
* pagesize
;
66 queue
->queue_pages
= kmalloc(nr_of_pages
* sizeof(void *), GFP_KERNEL
);
67 if (!queue
->queue_pages
) {
68 ehea_error("no mem for queue_pages");
73 * allocate pages for queue:
74 * outer loop allocates whole kernel pages (page aligned) and
75 * inner loop divides a kernel page into smaller hea queue pages
78 while (i
< nr_of_pages
) {
79 u8
*kpage
= (u8
*)get_zeroed_page(GFP_KERNEL
);
82 for (k
= 0; k
< pages_per_kpage
&& i
< nr_of_pages
; k
++) {
83 (queue
->queue_pages
)[i
] = (struct ehea_page
*)kpage
;
89 queue
->current_q_offset
= 0;
90 queue
->qe_size
= qe_size
;
91 queue
->pagesize
= pagesize
;
92 queue
->toggle_state
= 1;
96 for (i
= 0; i
< nr_of_pages
; i
+= pages_per_kpage
) {
97 if (!(queue
->queue_pages
)[i
])
99 free_page((unsigned long)(queue
->queue_pages
)[i
]);
104 static void hw_queue_dtor(struct hw_queue
*queue
)
106 int pages_per_kpage
= PAGE_SIZE
/ queue
->pagesize
;
109 if (!queue
|| !queue
->queue_pages
)
112 nr_pages
= queue
->queue_length
/ queue
->pagesize
;
114 for (i
= 0; i
< nr_pages
; i
+= pages_per_kpage
)
115 free_page((unsigned long)(queue
->queue_pages
)[i
]);
117 kfree(queue
->queue_pages
);
120 struct ehea_cq
*ehea_create_cq(struct ehea_adapter
*adapter
,
121 int nr_of_cqe
, u64 eq_handle
, u32 cq_token
)
125 u64
*cq_handle_ref
, hret
, rpage
;
126 u32 act_nr_of_entries
, act_pages
, counter
;
130 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
132 ehea_error("no mem for cq");
136 cq
->attr
.max_nr_of_cqes
= nr_of_cqe
;
137 cq
->attr
.cq_token
= cq_token
;
138 cq
->attr
.eq_handle
= eq_handle
;
140 cq
->adapter
= adapter
;
142 cq_handle_ref
= &cq
->fw_handle
;
143 act_nr_of_entries
= 0;
146 hret
= ehea_h_alloc_resource_cq(adapter
->handle
, &cq
->attr
,
147 &cq
->fw_handle
, &cq
->epas
);
148 if (hret
!= H_SUCCESS
) {
149 ehea_error("alloc_resource_cq failed");
153 ret
= hw_queue_ctor(&cq
->hw_queue
, cq
->attr
.nr_pages
,
154 EHEA_PAGESIZE
, sizeof(struct ehea_cqe
));
158 for (counter
= 0; counter
< cq
->attr
.nr_pages
; counter
++) {
159 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
161 ehea_error("hw_qpageit_get_inc failed");
165 rpage
= virt_to_abs(vpage
);
166 hret
= ehea_h_register_rpage(adapter
->handle
,
167 0, EHEA_CQ_REGISTER_ORIG
,
168 cq
->fw_handle
, rpage
, 1);
169 if (hret
< H_SUCCESS
) {
170 ehea_error("register_rpage_cq failed ehea_cq=%p "
171 "hret=%lx counter=%i act_pages=%i",
172 cq
, hret
, counter
, cq
->attr
.nr_pages
);
176 if (counter
== (cq
->attr
.nr_pages
- 1)) {
177 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
179 if ((hret
!= H_SUCCESS
) || (vpage
)) {
180 ehea_error("registration of pages not "
181 "complete hret=%lx\n", hret
);
185 if ((hret
!= H_PAGE_REGISTERED
) || (!vpage
)) {
186 ehea_error("CQ: registration of page failed "
193 hw_qeit_reset(&cq
->hw_queue
);
194 epa
= cq
->epas
.kernel
;
195 ehea_reset_cq_ep(cq
);
196 ehea_reset_cq_n1(cq
);
201 hw_queue_dtor(&cq
->hw_queue
);
204 ehea_h_free_resource(adapter
->handle
, cq
->fw_handle
, FORCE_FREE
);
213 u64
ehea_destroy_cq_res(struct ehea_cq
*cq
, u64 force
)
216 u64 adapter_handle
= cq
->adapter
->handle
;
218 /* deregister all previous registered pages */
219 hret
= ehea_h_free_resource(adapter_handle
, cq
->fw_handle
, force
);
220 if (hret
!= H_SUCCESS
)
223 hw_queue_dtor(&cq
->hw_queue
);
229 int ehea_destroy_cq(struct ehea_cq
*cq
)
235 hcp_epas_dtor(&cq
->epas
);
236 hret
= ehea_destroy_cq_res(cq
, NORMAL_FREE
);
237 if (hret
== H_R_STATE
) {
238 ehea_error_data(cq
->adapter
, cq
->fw_handle
);
239 hret
= ehea_destroy_cq_res(cq
, FORCE_FREE
);
242 if (hret
!= H_SUCCESS
) {
243 ehea_error("destroy CQ failed");
250 struct ehea_eq
*ehea_create_eq(struct ehea_adapter
*adapter
,
251 const enum ehea_eq_type type
,
252 const u32 max_nr_of_eqes
, const u8 eqe_gen
)
259 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
261 ehea_error("no mem for eq");
265 eq
->adapter
= adapter
;
266 eq
->attr
.type
= type
;
267 eq
->attr
.max_nr_of_eqes
= max_nr_of_eqes
;
268 eq
->attr
.eqe_gen
= eqe_gen
;
269 spin_lock_init(&eq
->spinlock
);
271 hret
= ehea_h_alloc_resource_eq(adapter
->handle
,
272 &eq
->attr
, &eq
->fw_handle
);
273 if (hret
!= H_SUCCESS
) {
274 ehea_error("alloc_resource_eq failed");
278 ret
= hw_queue_ctor(&eq
->hw_queue
, eq
->attr
.nr_pages
,
279 EHEA_PAGESIZE
, sizeof(struct ehea_eqe
));
281 ehea_error("can't allocate eq pages");
285 for (i
= 0; i
< eq
->attr
.nr_pages
; i
++) {
286 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
288 ehea_error("hw_qpageit_get_inc failed");
293 rpage
= virt_to_abs(vpage
);
295 hret
= ehea_h_register_rpage(adapter
->handle
, 0,
296 EHEA_EQ_REGISTER_ORIG
,
297 eq
->fw_handle
, rpage
, 1);
299 if (i
== (eq
->attr
.nr_pages
- 1)) {
301 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
302 if ((hret
!= H_SUCCESS
) || (vpage
))
306 if ((hret
!= H_PAGE_REGISTERED
) || (!vpage
))
312 hw_qeit_reset(&eq
->hw_queue
);
316 hw_queue_dtor(&eq
->hw_queue
);
319 ehea_h_free_resource(adapter
->handle
, eq
->fw_handle
, FORCE_FREE
);
326 struct ehea_eqe
*ehea_poll_eq(struct ehea_eq
*eq
)
328 struct ehea_eqe
*eqe
;
331 spin_lock_irqsave(&eq
->spinlock
, flags
);
332 eqe
= (struct ehea_eqe
*)hw_eqit_eq_get_inc_valid(&eq
->hw_queue
);
333 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
338 u64
ehea_destroy_eq_res(struct ehea_eq
*eq
, u64 force
)
343 spin_lock_irqsave(&eq
->spinlock
, flags
);
345 hret
= ehea_h_free_resource(eq
->adapter
->handle
, eq
->fw_handle
, force
);
346 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
348 if (hret
!= H_SUCCESS
)
351 hw_queue_dtor(&eq
->hw_queue
);
357 int ehea_destroy_eq(struct ehea_eq
*eq
)
363 hcp_epas_dtor(&eq
->epas
);
365 hret
= ehea_destroy_eq_res(eq
, NORMAL_FREE
);
366 if (hret
== H_R_STATE
) {
367 ehea_error_data(eq
->adapter
, eq
->fw_handle
);
368 hret
= ehea_destroy_eq_res(eq
, FORCE_FREE
);
371 if (hret
!= H_SUCCESS
) {
372 ehea_error("destroy EQ failed");
380 * allocates memory for a queue and registers pages in phyp
382 int ehea_qp_alloc_register(struct ehea_qp
*qp
, struct hw_queue
*hw_queue
,
383 int nr_pages
, int wqe_size
, int act_nr_sges
,
384 struct ehea_adapter
*adapter
, int h_call_q_selector
)
390 ret
= hw_queue_ctor(hw_queue
, nr_pages
, EHEA_PAGESIZE
, wqe_size
);
394 for (cnt
= 0; cnt
< nr_pages
; cnt
++) {
395 vpage
= hw_qpageit_get_inc(hw_queue
);
397 ehea_error("hw_qpageit_get_inc failed");
400 rpage
= virt_to_abs(vpage
);
401 hret
= ehea_h_register_rpage(adapter
->handle
,
402 0, h_call_q_selector
,
403 qp
->fw_handle
, rpage
, 1);
404 if (hret
< H_SUCCESS
) {
405 ehea_error("register_rpage_qp failed");
409 hw_qeit_reset(hw_queue
);
413 hw_queue_dtor(hw_queue
);
417 static inline u32
map_wqe_size(u8 wqe_enc_size
)
419 return 128 << wqe_enc_size
;
422 struct ehea_qp
*ehea_create_qp(struct ehea_adapter
*adapter
,
423 u32 pd
, struct ehea_qp_init_attr
*init_attr
)
428 u32 wqe_size_in_bytes_sq
, wqe_size_in_bytes_rq1
;
429 u32 wqe_size_in_bytes_rq2
, wqe_size_in_bytes_rq3
;
432 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
434 ehea_error("no mem for qp");
438 qp
->adapter
= adapter
;
440 hret
= ehea_h_alloc_resource_qp(adapter
->handle
, init_attr
, pd
,
441 &qp
->fw_handle
, &qp
->epas
);
442 if (hret
!= H_SUCCESS
) {
443 ehea_error("ehea_h_alloc_resource_qp failed");
447 wqe_size_in_bytes_sq
= map_wqe_size(init_attr
->act_wqe_size_enc_sq
);
448 wqe_size_in_bytes_rq1
= map_wqe_size(init_attr
->act_wqe_size_enc_rq1
);
449 wqe_size_in_bytes_rq2
= map_wqe_size(init_attr
->act_wqe_size_enc_rq2
);
450 wqe_size_in_bytes_rq3
= map_wqe_size(init_attr
->act_wqe_size_enc_rq3
);
452 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_squeue
, init_attr
->nr_sq_pages
,
453 wqe_size_in_bytes_sq
,
454 init_attr
->act_wqe_size_enc_sq
, adapter
,
457 ehea_error("can't register for sq ret=%x", ret
);
461 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue1
,
462 init_attr
->nr_rq1_pages
,
463 wqe_size_in_bytes_rq1
,
464 init_attr
->act_wqe_size_enc_rq1
,
467 ehea_error("can't register for rq1 ret=%x", ret
);
471 if (init_attr
->rq_count
> 1) {
472 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue2
,
473 init_attr
->nr_rq2_pages
,
474 wqe_size_in_bytes_rq2
,
475 init_attr
->act_wqe_size_enc_rq2
,
478 ehea_error("can't register for rq2 ret=%x", ret
);
483 if (init_attr
->rq_count
> 2) {
484 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue3
,
485 init_attr
->nr_rq3_pages
,
486 wqe_size_in_bytes_rq3
,
487 init_attr
->act_wqe_size_enc_rq3
,
490 ehea_error("can't register for rq3 ret=%x", ret
);
495 qp
->init_attr
= *init_attr
;
500 hw_queue_dtor(&qp
->hw_rqueue2
);
503 hw_queue_dtor(&qp
->hw_rqueue1
);
506 hw_queue_dtor(&qp
->hw_squeue
);
509 ehea_h_disable_and_get_hea(adapter
->handle
, qp
->fw_handle
);
510 ehea_h_free_resource(adapter
->handle
, qp
->fw_handle
, FORCE_FREE
);
517 u64
ehea_destroy_qp_res(struct ehea_qp
*qp
, u64 force
)
520 struct ehea_qp_init_attr
*qp_attr
= &qp
->init_attr
;
523 ehea_h_disable_and_get_hea(qp
->adapter
->handle
, qp
->fw_handle
);
524 hret
= ehea_h_free_resource(qp
->adapter
->handle
, qp
->fw_handle
, force
);
525 if (hret
!= H_SUCCESS
)
528 hw_queue_dtor(&qp
->hw_squeue
);
529 hw_queue_dtor(&qp
->hw_rqueue1
);
531 if (qp_attr
->rq_count
> 1)
532 hw_queue_dtor(&qp
->hw_rqueue2
);
533 if (qp_attr
->rq_count
> 2)
534 hw_queue_dtor(&qp
->hw_rqueue3
);
540 int ehea_destroy_qp(struct ehea_qp
*qp
)
546 hcp_epas_dtor(&qp
->epas
);
548 hret
= ehea_destroy_qp_res(qp
, NORMAL_FREE
);
549 if (hret
== H_R_STATE
) {
550 ehea_error_data(qp
->adapter
, qp
->fw_handle
);
551 hret
= ehea_destroy_qp_res(qp
, FORCE_FREE
);
554 if (hret
!= H_SUCCESS
) {
555 ehea_error("destroy QP failed");
562 static inline int ehea_calc_index(unsigned long i
, unsigned long s
)
564 return (i
>> s
) & EHEA_INDEX_MASK
;
567 static inline int ehea_init_top_bmap(struct ehea_top_bmap
*ehea_top_bmap
,
570 if(!ehea_top_bmap
->dir
[dir
]) {
571 ehea_top_bmap
->dir
[dir
] =
572 kzalloc(sizeof(struct ehea_dir_bmap
), GFP_KERNEL
);
573 if (!ehea_top_bmap
->dir
[dir
])
579 static inline int ehea_init_bmap(struct ehea_bmap
*ehea_bmap
, int top
, int dir
)
581 if(!ehea_bmap
->top
[top
]) {
582 ehea_bmap
->top
[top
] =
583 kzalloc(sizeof(struct ehea_top_bmap
), GFP_KERNEL
);
584 if (!ehea_bmap
->top
[top
])
587 return ehea_init_top_bmap(ehea_bmap
->top
[top
], dir
);
590 static int ehea_create_busmap_callback(unsigned long pfn
,
591 unsigned long nr_pages
, void *arg
)
593 unsigned long i
, mr_len
, start_section
, end_section
;
594 start_section
= (pfn
* PAGE_SIZE
) / EHEA_SECTSIZE
;
595 end_section
= start_section
+ ((nr_pages
* PAGE_SIZE
) / EHEA_SECTSIZE
);
596 mr_len
= *(unsigned long *)arg
;
598 ehea_bmap
= kzalloc(sizeof(struct ehea_bmap
), GFP_KERNEL
);
602 for (i
= start_section
; i
< end_section
; i
++) {
607 top
= ehea_calc_index(i
, EHEA_TOP_INDEX_SHIFT
);
608 dir
= ehea_calc_index(i
, EHEA_DIR_INDEX_SHIFT
);
610 ret
= ehea_init_bmap(ehea_bmap
, top
, dir
);
614 idx
= i
& EHEA_INDEX_MASK
;
615 vaddr
= EHEA_BUSMAP_START
+ mr_len
+ i
* EHEA_SECTSIZE
;
617 ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
] = vaddr
;
620 mr_len
+= nr_pages
* PAGE_SIZE
;
621 *(unsigned long *)arg
= mr_len
;
626 static unsigned long ehea_mr_len
;
628 static DEFINE_MUTEX(ehea_busmap_mutex
);
630 int ehea_create_busmap(void)
633 mutex_lock(&ehea_busmap_mutex
);
635 ret
= walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS
, &ehea_mr_len
,
636 ehea_create_busmap_callback
);
637 mutex_unlock(&ehea_busmap_mutex
);
641 void ehea_destroy_busmap(void)
644 mutex_lock(&ehea_busmap_mutex
);
648 for (top
= 0; top
< EHEA_MAP_ENTRIES
; top
++) {
649 if (!ehea_bmap
->top
[top
])
652 for (dir
= 0; dir
< EHEA_MAP_ENTRIES
; dir
++) {
653 if (!ehea_bmap
->top
[top
]->dir
[dir
])
656 kfree(ehea_bmap
->top
[top
]->dir
[dir
]);
659 kfree(ehea_bmap
->top
[top
]);
665 mutex_unlock(&ehea_busmap_mutex
);
668 u64
ehea_map_vaddr(void *caddr
)
671 unsigned long index
, offset
;
674 return EHEA_INVAL_ADDR
;
676 index
= virt_to_abs(caddr
) >> SECTION_SIZE_BITS
;
677 top
= (index
>> EHEA_TOP_INDEX_SHIFT
) & EHEA_INDEX_MASK
;
678 if (!ehea_bmap
->top
[top
])
679 return EHEA_INVAL_ADDR
;
681 dir
= (index
>> EHEA_DIR_INDEX_SHIFT
) & EHEA_INDEX_MASK
;
682 if (!ehea_bmap
->top
[top
]->dir
[dir
])
683 return EHEA_INVAL_ADDR
;
685 idx
= index
& EHEA_INDEX_MASK
;
686 if (!ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
])
687 return EHEA_INVAL_ADDR
;
689 offset
= (unsigned long)caddr
& (EHEA_SECTSIZE
- 1);
690 return ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
] | offset
;
693 static inline void *ehea_calc_sectbase(int top
, int dir
, int idx
)
695 unsigned long ret
= idx
;
696 ret
|= dir
<< EHEA_DIR_INDEX_SHIFT
;
697 ret
|= top
<< EHEA_TOP_INDEX_SHIFT
;
698 return abs_to_virt(ret
<< SECTION_SIZE_BITS
);
701 static u64
ehea_reg_mr_section(int top
, int dir
, int idx
, u64
*pt
,
702 struct ehea_adapter
*adapter
,
708 u64 pt_abs
= virt_to_abs(pt
);
710 void *sectbase
= ehea_calc_sectbase(top
, dir
, idx
);
712 for (j
= 0; j
< (EHEA_PAGES_PER_SECTION
/ EHEA_MAX_RPAGE
); j
++) {
714 for (m
= 0; m
< EHEA_MAX_RPAGE
; m
++) {
715 pg
= sectbase
+ ((k
++) * EHEA_PAGESIZE
);
716 pt
[m
] = virt_to_abs(pg
);
718 hret
= ehea_h_register_rpage_mr(adapter
->handle
, mr
->handle
, 0,
719 0, pt_abs
, EHEA_MAX_RPAGE
);
721 if ((hret
!= H_SUCCESS
)
722 && (hret
!= H_PAGE_REGISTERED
)) {
723 ehea_h_free_resource(adapter
->handle
, mr
->handle
,
725 ehea_error("register_rpage_mr failed");
732 static u64
ehea_reg_mr_sections(int top
, int dir
, u64
*pt
,
733 struct ehea_adapter
*adapter
,
736 u64 hret
= H_SUCCESS
;
739 for (idx
= 0; idx
< EHEA_MAP_ENTRIES
; idx
++) {
740 if (!ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
])
743 hret
= ehea_reg_mr_section(top
, dir
, idx
, pt
, adapter
, mr
);
744 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
))
750 static u64
ehea_reg_mr_dir_sections(int top
, u64
*pt
,
751 struct ehea_adapter
*adapter
,
754 u64 hret
= H_SUCCESS
;
757 for (dir
= 0; dir
< EHEA_MAP_ENTRIES
; dir
++) {
758 if (!ehea_bmap
->top
[top
]->dir
[dir
])
761 hret
= ehea_reg_mr_sections(top
, dir
, pt
, adapter
, mr
);
762 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
))
768 int ehea_reg_kernel_mr(struct ehea_adapter
*adapter
, struct ehea_mr
*mr
)
773 u32 acc_ctrl
= EHEA_MR_ACC_CTRL
;
777 pt
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
779 ehea_error("no mem");
784 hret
= ehea_h_alloc_resource_mr(adapter
->handle
, EHEA_BUSMAP_START
,
785 ehea_mr_len
, acc_ctrl
, adapter
->pd
,
786 &mr
->handle
, &mr
->lkey
);
788 if (hret
!= H_SUCCESS
) {
789 ehea_error("alloc_resource_mr failed");
795 ehea_h_free_resource(adapter
->handle
, mr
->handle
, FORCE_FREE
);
796 ehea_error("no busmap available");
801 for (top
= 0; top
< EHEA_MAP_ENTRIES
; top
++) {
802 if (!ehea_bmap
->top
[top
])
805 hret
= ehea_reg_mr_dir_sections(top
, pt
, adapter
, mr
);
806 if((hret
!= H_PAGE_REGISTERED
) && (hret
!= H_SUCCESS
))
810 if (hret
!= H_SUCCESS
) {
811 ehea_h_free_resource(adapter
->handle
, mr
->handle
, FORCE_FREE
);
812 ehea_error("registering mr failed");
817 mr
->vaddr
= EHEA_BUSMAP_START
;
818 mr
->adapter
= adapter
;
825 int ehea_rem_mr(struct ehea_mr
*mr
)
829 if (!mr
|| !mr
->adapter
)
832 hret
= ehea_h_free_resource(mr
->adapter
->handle
, mr
->handle
,
834 if (hret
!= H_SUCCESS
) {
835 ehea_error("destroy MR failed");
842 int ehea_gen_smr(struct ehea_adapter
*adapter
, struct ehea_mr
*old_mr
,
843 struct ehea_mr
*shared_mr
)
847 hret
= ehea_h_register_smr(adapter
->handle
, old_mr
->handle
,
848 old_mr
->vaddr
, EHEA_MR_ACC_CTRL
,
849 adapter
->pd
, shared_mr
);
850 if (hret
!= H_SUCCESS
)
853 shared_mr
->adapter
= adapter
;
858 void print_error_data(u64
*data
)
861 u64 type
= EHEA_BMASK_GET(ERROR_DATA_TYPE
, data
[2]);
862 u64 resource
= data
[1];
864 length
= EHEA_BMASK_GET(ERROR_DATA_LENGTH
, data
[0]);
866 if (length
> EHEA_PAGESIZE
)
867 length
= EHEA_PAGESIZE
;
869 if (type
== 0x8) /* Queue Pair */
870 ehea_error("QP (resource=%lX) state: AER=0x%lX, AERR=0x%lX, "
871 "port=%lX", resource
, data
[6], data
[12], data
[22]);
873 if (type
== 0x4) /* Completion Queue */
874 ehea_error("CQ (resource=%lX) state: AER=0x%lX", resource
,
877 if (type
== 0x3) /* Event Queue */
878 ehea_error("EQ (resource=%lX) state: AER=0x%lX", resource
,
881 ehea_dump(data
, length
, "error data");
884 void ehea_error_data(struct ehea_adapter
*adapter
, u64 res_handle
)
889 rblock
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
891 ehea_error("Cannot allocate rblock memory.");
895 ret
= ehea_h_error_data(adapter
->handle
,
899 if (ret
== H_R_STATE
)
900 ehea_error("No error data is available: %lX.", res_handle
);
901 else if (ret
== H_SUCCESS
)
902 print_error_data(rblock
);
904 ehea_error("Error data could not be fetched: %lX", res_handle
);