1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
5 * eHEA ethernet device driver for IBM eServer System p
7 * (C) Copyright IBM Corp. 2006
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/slab.h>
20 #include "ehea_phyp.h"
23 static struct ehea_bmap
*ehea_bmap
;
25 static void *hw_qpageit_get_inc(struct hw_queue
*queue
)
27 void *retvalue
= hw_qeit_get(queue
);
29 queue
->current_q_offset
+= queue
->pagesize
;
30 if (queue
->current_q_offset
> queue
->queue_length
) {
31 queue
->current_q_offset
-= queue
->pagesize
;
33 } else if (((u64
) retvalue
) & (EHEA_PAGESIZE
-1)) {
34 pr_err("not on pageboundary\n");
40 static int hw_queue_ctor(struct hw_queue
*queue
, const u32 nr_of_pages
,
41 const u32 pagesize
, const u32 qe_size
)
43 int pages_per_kpage
= PAGE_SIZE
/ pagesize
;
46 if ((pagesize
> PAGE_SIZE
) || (!pages_per_kpage
)) {
47 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
48 (int)PAGE_SIZE
, (int)pagesize
);
52 queue
->queue_length
= nr_of_pages
* pagesize
;
53 queue
->queue_pages
= kmalloc_array(nr_of_pages
, sizeof(void *),
55 if (!queue
->queue_pages
)
59 * allocate pages for queue:
60 * outer loop allocates whole kernel pages (page aligned) and
61 * inner loop divides a kernel page into smaller hea queue pages
64 while (i
< nr_of_pages
) {
65 u8
*kpage
= (u8
*)get_zeroed_page(GFP_KERNEL
);
68 for (k
= 0; k
< pages_per_kpage
&& i
< nr_of_pages
; k
++) {
69 (queue
->queue_pages
)[i
] = (struct ehea_page
*)kpage
;
75 queue
->current_q_offset
= 0;
76 queue
->qe_size
= qe_size
;
77 queue
->pagesize
= pagesize
;
78 queue
->toggle_state
= 1;
82 for (i
= 0; i
< nr_of_pages
; i
+= pages_per_kpage
) {
83 if (!(queue
->queue_pages
)[i
])
85 free_page((unsigned long)(queue
->queue_pages
)[i
]);
90 static void hw_queue_dtor(struct hw_queue
*queue
)
95 if (!queue
|| !queue
->queue_pages
)
98 pages_per_kpage
= PAGE_SIZE
/ queue
->pagesize
;
100 nr_pages
= queue
->queue_length
/ queue
->pagesize
;
102 for (i
= 0; i
< nr_pages
; i
+= pages_per_kpage
)
103 free_page((unsigned long)(queue
->queue_pages
)[i
]);
105 kfree(queue
->queue_pages
);
108 struct ehea_cq
*ehea_create_cq(struct ehea_adapter
*adapter
,
109 int nr_of_cqe
, u64 eq_handle
, u32 cq_token
)
117 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
121 cq
->attr
.max_nr_of_cqes
= nr_of_cqe
;
122 cq
->attr
.cq_token
= cq_token
;
123 cq
->attr
.eq_handle
= eq_handle
;
125 cq
->adapter
= adapter
;
127 hret
= ehea_h_alloc_resource_cq(adapter
->handle
, &cq
->attr
,
128 &cq
->fw_handle
, &cq
->epas
);
129 if (hret
!= H_SUCCESS
) {
130 pr_err("alloc_resource_cq failed\n");
134 ret
= hw_queue_ctor(&cq
->hw_queue
, cq
->attr
.nr_pages
,
135 EHEA_PAGESIZE
, sizeof(struct ehea_cqe
));
139 for (counter
= 0; counter
< cq
->attr
.nr_pages
; counter
++) {
140 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
142 pr_err("hw_qpageit_get_inc failed\n");
147 hret
= ehea_h_register_rpage(adapter
->handle
,
148 0, EHEA_CQ_REGISTER_ORIG
,
149 cq
->fw_handle
, rpage
, 1);
150 if (hret
< H_SUCCESS
) {
151 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
152 cq
, hret
, counter
, cq
->attr
.nr_pages
);
156 if (counter
== (cq
->attr
.nr_pages
- 1)) {
157 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
159 if ((hret
!= H_SUCCESS
) || (vpage
)) {
160 pr_err("registration of pages not complete hret=%llx\n",
165 if (hret
!= H_PAGE_REGISTERED
) {
166 pr_err("CQ: registration of page failed hret=%llx\n",
173 hw_qeit_reset(&cq
->hw_queue
);
174 ehea_reset_cq_ep(cq
);
175 ehea_reset_cq_n1(cq
);
180 hw_queue_dtor(&cq
->hw_queue
);
183 ehea_h_free_resource(adapter
->handle
, cq
->fw_handle
, FORCE_FREE
);
192 static u64
ehea_destroy_cq_res(struct ehea_cq
*cq
, u64 force
)
195 u64 adapter_handle
= cq
->adapter
->handle
;
197 /* deregister all previous registered pages */
198 hret
= ehea_h_free_resource(adapter_handle
, cq
->fw_handle
, force
);
199 if (hret
!= H_SUCCESS
)
202 hw_queue_dtor(&cq
->hw_queue
);
208 int ehea_destroy_cq(struct ehea_cq
*cq
)
214 hcp_epas_dtor(&cq
->epas
);
215 hret
= ehea_destroy_cq_res(cq
, NORMAL_FREE
);
216 if (hret
== H_R_STATE
) {
217 ehea_error_data(cq
->adapter
, cq
->fw_handle
, &aer
, &aerr
);
218 hret
= ehea_destroy_cq_res(cq
, FORCE_FREE
);
221 if (hret
!= H_SUCCESS
) {
222 pr_err("destroy CQ failed\n");
229 struct ehea_eq
*ehea_create_eq(struct ehea_adapter
*adapter
,
230 const enum ehea_eq_type type
,
231 const u32 max_nr_of_eqes
, const u8 eqe_gen
)
238 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
242 eq
->adapter
= adapter
;
243 eq
->attr
.type
= type
;
244 eq
->attr
.max_nr_of_eqes
= max_nr_of_eqes
;
245 eq
->attr
.eqe_gen
= eqe_gen
;
246 spin_lock_init(&eq
->spinlock
);
248 hret
= ehea_h_alloc_resource_eq(adapter
->handle
,
249 &eq
->attr
, &eq
->fw_handle
);
250 if (hret
!= H_SUCCESS
) {
251 pr_err("alloc_resource_eq failed\n");
255 ret
= hw_queue_ctor(&eq
->hw_queue
, eq
->attr
.nr_pages
,
256 EHEA_PAGESIZE
, sizeof(struct ehea_eqe
));
258 pr_err("can't allocate eq pages\n");
262 for (i
= 0; i
< eq
->attr
.nr_pages
; i
++) {
263 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
265 pr_err("hw_qpageit_get_inc failed\n");
272 hret
= ehea_h_register_rpage(adapter
->handle
, 0,
273 EHEA_EQ_REGISTER_ORIG
,
274 eq
->fw_handle
, rpage
, 1);
276 if (i
== (eq
->attr
.nr_pages
- 1)) {
278 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
279 if ((hret
!= H_SUCCESS
) || (vpage
))
283 if (hret
!= H_PAGE_REGISTERED
)
289 hw_qeit_reset(&eq
->hw_queue
);
293 hw_queue_dtor(&eq
->hw_queue
);
296 ehea_h_free_resource(adapter
->handle
, eq
->fw_handle
, FORCE_FREE
);
303 struct ehea_eqe
*ehea_poll_eq(struct ehea_eq
*eq
)
305 struct ehea_eqe
*eqe
;
308 spin_lock_irqsave(&eq
->spinlock
, flags
);
309 eqe
= hw_eqit_eq_get_inc_valid(&eq
->hw_queue
);
310 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
315 static u64
ehea_destroy_eq_res(struct ehea_eq
*eq
, u64 force
)
320 spin_lock_irqsave(&eq
->spinlock
, flags
);
322 hret
= ehea_h_free_resource(eq
->adapter
->handle
, eq
->fw_handle
, force
);
323 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
325 if (hret
!= H_SUCCESS
)
328 hw_queue_dtor(&eq
->hw_queue
);
334 int ehea_destroy_eq(struct ehea_eq
*eq
)
340 hcp_epas_dtor(&eq
->epas
);
342 hret
= ehea_destroy_eq_res(eq
, NORMAL_FREE
);
343 if (hret
== H_R_STATE
) {
344 ehea_error_data(eq
->adapter
, eq
->fw_handle
, &aer
, &aerr
);
345 hret
= ehea_destroy_eq_res(eq
, FORCE_FREE
);
348 if (hret
!= H_SUCCESS
) {
349 pr_err("destroy EQ failed\n");
356 /* allocates memory for a queue and registers pages in phyp */
357 static int ehea_qp_alloc_register(struct ehea_qp
*qp
, struct hw_queue
*hw_queue
,
358 int nr_pages
, int wqe_size
, int act_nr_sges
,
359 struct ehea_adapter
*adapter
, int h_call_q_selector
)
365 ret
= hw_queue_ctor(hw_queue
, nr_pages
, EHEA_PAGESIZE
, wqe_size
);
369 for (cnt
= 0; cnt
< nr_pages
; cnt
++) {
370 vpage
= hw_qpageit_get_inc(hw_queue
);
372 pr_err("hw_qpageit_get_inc failed\n");
376 hret
= ehea_h_register_rpage(adapter
->handle
,
377 0, h_call_q_selector
,
378 qp
->fw_handle
, rpage
, 1);
379 if (hret
< H_SUCCESS
) {
380 pr_err("register_rpage_qp failed\n");
384 hw_qeit_reset(hw_queue
);
388 hw_queue_dtor(hw_queue
);
392 static inline u32
map_wqe_size(u8 wqe_enc_size
)
394 return 128 << wqe_enc_size
;
397 struct ehea_qp
*ehea_create_qp(struct ehea_adapter
*adapter
,
398 u32 pd
, struct ehea_qp_init_attr
*init_attr
)
403 u32 wqe_size_in_bytes_sq
, wqe_size_in_bytes_rq1
;
404 u32 wqe_size_in_bytes_rq2
, wqe_size_in_bytes_rq3
;
407 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
411 qp
->adapter
= adapter
;
413 hret
= ehea_h_alloc_resource_qp(adapter
->handle
, init_attr
, pd
,
414 &qp
->fw_handle
, &qp
->epas
);
415 if (hret
!= H_SUCCESS
) {
416 pr_err("ehea_h_alloc_resource_qp failed\n");
420 wqe_size_in_bytes_sq
= map_wqe_size(init_attr
->act_wqe_size_enc_sq
);
421 wqe_size_in_bytes_rq1
= map_wqe_size(init_attr
->act_wqe_size_enc_rq1
);
422 wqe_size_in_bytes_rq2
= map_wqe_size(init_attr
->act_wqe_size_enc_rq2
);
423 wqe_size_in_bytes_rq3
= map_wqe_size(init_attr
->act_wqe_size_enc_rq3
);
425 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_squeue
, init_attr
->nr_sq_pages
,
426 wqe_size_in_bytes_sq
,
427 init_attr
->act_wqe_size_enc_sq
, adapter
,
430 pr_err("can't register for sq ret=%x\n", ret
);
434 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue1
,
435 init_attr
->nr_rq1_pages
,
436 wqe_size_in_bytes_rq1
,
437 init_attr
->act_wqe_size_enc_rq1
,
440 pr_err("can't register for rq1 ret=%x\n", ret
);
444 if (init_attr
->rq_count
> 1) {
445 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue2
,
446 init_attr
->nr_rq2_pages
,
447 wqe_size_in_bytes_rq2
,
448 init_attr
->act_wqe_size_enc_rq2
,
451 pr_err("can't register for rq2 ret=%x\n", ret
);
456 if (init_attr
->rq_count
> 2) {
457 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue3
,
458 init_attr
->nr_rq3_pages
,
459 wqe_size_in_bytes_rq3
,
460 init_attr
->act_wqe_size_enc_rq3
,
463 pr_err("can't register for rq3 ret=%x\n", ret
);
468 qp
->init_attr
= *init_attr
;
473 hw_queue_dtor(&qp
->hw_rqueue2
);
476 hw_queue_dtor(&qp
->hw_rqueue1
);
479 hw_queue_dtor(&qp
->hw_squeue
);
482 ehea_h_disable_and_get_hea(adapter
->handle
, qp
->fw_handle
);
483 ehea_h_free_resource(adapter
->handle
, qp
->fw_handle
, FORCE_FREE
);
490 static u64
ehea_destroy_qp_res(struct ehea_qp
*qp
, u64 force
)
493 struct ehea_qp_init_attr
*qp_attr
= &qp
->init_attr
;
496 ehea_h_disable_and_get_hea(qp
->adapter
->handle
, qp
->fw_handle
);
497 hret
= ehea_h_free_resource(qp
->adapter
->handle
, qp
->fw_handle
, force
);
498 if (hret
!= H_SUCCESS
)
501 hw_queue_dtor(&qp
->hw_squeue
);
502 hw_queue_dtor(&qp
->hw_rqueue1
);
504 if (qp_attr
->rq_count
> 1)
505 hw_queue_dtor(&qp
->hw_rqueue2
);
506 if (qp_attr
->rq_count
> 2)
507 hw_queue_dtor(&qp
->hw_rqueue3
);
513 int ehea_destroy_qp(struct ehea_qp
*qp
)
519 hcp_epas_dtor(&qp
->epas
);
521 hret
= ehea_destroy_qp_res(qp
, NORMAL_FREE
);
522 if (hret
== H_R_STATE
) {
523 ehea_error_data(qp
->adapter
, qp
->fw_handle
, &aer
, &aerr
);
524 hret
= ehea_destroy_qp_res(qp
, FORCE_FREE
);
527 if (hret
!= H_SUCCESS
) {
528 pr_err("destroy QP failed\n");
535 static inline int ehea_calc_index(unsigned long i
, unsigned long s
)
537 return (i
>> s
) & EHEA_INDEX_MASK
;
540 static inline int ehea_init_top_bmap(struct ehea_top_bmap
*ehea_top_bmap
,
543 if (!ehea_top_bmap
->dir
[dir
]) {
544 ehea_top_bmap
->dir
[dir
] =
545 kzalloc(sizeof(struct ehea_dir_bmap
), GFP_KERNEL
);
546 if (!ehea_top_bmap
->dir
[dir
])
552 static inline int ehea_init_bmap(struct ehea_bmap
*ehea_bmap
, int top
, int dir
)
554 if (!ehea_bmap
->top
[top
]) {
555 ehea_bmap
->top
[top
] =
556 kzalloc(sizeof(struct ehea_top_bmap
), GFP_KERNEL
);
557 if (!ehea_bmap
->top
[top
])
560 return ehea_init_top_bmap(ehea_bmap
->top
[top
], dir
);
563 static DEFINE_MUTEX(ehea_busmap_mutex
);
564 static unsigned long ehea_mr_len
;
566 #define EHEA_BUSMAP_ADD_SECT 1
567 #define EHEA_BUSMAP_REM_SECT 0
569 static void ehea_rebuild_busmap(void)
571 u64 vaddr
= EHEA_BUSMAP_START
;
574 for (top
= 0; top
< EHEA_MAP_ENTRIES
; top
++) {
575 struct ehea_top_bmap
*ehea_top
;
576 int valid_dir_entries
= 0;
578 if (!ehea_bmap
->top
[top
])
580 ehea_top
= ehea_bmap
->top
[top
];
581 for (dir
= 0; dir
< EHEA_MAP_ENTRIES
; dir
++) {
582 struct ehea_dir_bmap
*ehea_dir
;
583 int valid_entries
= 0;
585 if (!ehea_top
->dir
[dir
])
588 ehea_dir
= ehea_top
->dir
[dir
];
589 for (idx
= 0; idx
< EHEA_MAP_ENTRIES
; idx
++) {
590 if (!ehea_dir
->ent
[idx
])
593 ehea_dir
->ent
[idx
] = vaddr
;
594 vaddr
+= EHEA_SECTSIZE
;
596 if (!valid_entries
) {
597 ehea_top
->dir
[dir
] = NULL
;
601 if (!valid_dir_entries
) {
602 ehea_bmap
->top
[top
] = NULL
;
608 static int ehea_update_busmap(unsigned long pfn
, unsigned long nr_pages
, int add
)
610 unsigned long i
, start_section
, end_section
;
616 ehea_bmap
= kzalloc(sizeof(struct ehea_bmap
), GFP_KERNEL
);
621 start_section
= (pfn
* PAGE_SIZE
) / EHEA_SECTSIZE
;
622 end_section
= start_section
+ ((nr_pages
* PAGE_SIZE
) / EHEA_SECTSIZE
);
623 /* Mark entries as valid or invalid only; address is assigned later */
624 for (i
= start_section
; i
< end_section
; i
++) {
626 int top
= ehea_calc_index(i
, EHEA_TOP_INDEX_SHIFT
);
627 int dir
= ehea_calc_index(i
, EHEA_DIR_INDEX_SHIFT
);
628 int idx
= i
& EHEA_INDEX_MASK
;
631 int ret
= ehea_init_bmap(ehea_bmap
, top
, dir
);
634 flag
= 1; /* valid */
635 ehea_mr_len
+= EHEA_SECTSIZE
;
637 if (!ehea_bmap
->top
[top
])
639 if (!ehea_bmap
->top
[top
]->dir
[dir
])
641 flag
= 0; /* invalid */
642 ehea_mr_len
-= EHEA_SECTSIZE
;
645 ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
] = flag
;
647 ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
651 int ehea_add_sect_bmap(unsigned long pfn
, unsigned long nr_pages
)
655 mutex_lock(&ehea_busmap_mutex
);
656 ret
= ehea_update_busmap(pfn
, nr_pages
, EHEA_BUSMAP_ADD_SECT
);
657 mutex_unlock(&ehea_busmap_mutex
);
661 int ehea_rem_sect_bmap(unsigned long pfn
, unsigned long nr_pages
)
665 mutex_lock(&ehea_busmap_mutex
);
666 ret
= ehea_update_busmap(pfn
, nr_pages
, EHEA_BUSMAP_REM_SECT
);
667 mutex_unlock(&ehea_busmap_mutex
);
671 static int ehea_is_hugepage(unsigned long pfn
)
673 if (pfn
& EHEA_HUGEPAGE_PFN_MASK
)
676 if (page_shift(pfn_to_page(pfn
)) != EHEA_HUGEPAGESHIFT
)
682 static int ehea_create_busmap_callback(unsigned long initial_pfn
,
683 unsigned long total_nr_pages
, void *arg
)
686 unsigned long pfn
, start_pfn
, end_pfn
, nr_pages
;
688 if ((total_nr_pages
* PAGE_SIZE
) < EHEA_HUGEPAGE_SIZE
)
689 return ehea_update_busmap(initial_pfn
, total_nr_pages
,
690 EHEA_BUSMAP_ADD_SECT
);
692 /* Given chunk is >= 16GB -> check for hugepages */
693 start_pfn
= initial_pfn
;
694 end_pfn
= initial_pfn
+ total_nr_pages
;
697 while (pfn
< end_pfn
) {
698 if (ehea_is_hugepage(pfn
)) {
699 /* Add mem found in front of the hugepage */
700 nr_pages
= pfn
- start_pfn
;
701 ret
= ehea_update_busmap(start_pfn
, nr_pages
,
702 EHEA_BUSMAP_ADD_SECT
);
706 /* Skip the hugepage */
707 pfn
+= (EHEA_HUGEPAGE_SIZE
/ PAGE_SIZE
);
710 pfn
+= (EHEA_SECTSIZE
/ PAGE_SIZE
);
713 /* Add mem found behind the hugepage(s) */
714 nr_pages
= pfn
- start_pfn
;
715 return ehea_update_busmap(start_pfn
, nr_pages
, EHEA_BUSMAP_ADD_SECT
);
718 int ehea_create_busmap(void)
722 mutex_lock(&ehea_busmap_mutex
);
724 ret
= walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS
, NULL
,
725 ehea_create_busmap_callback
);
726 mutex_unlock(&ehea_busmap_mutex
);
730 void ehea_destroy_busmap(void)
733 mutex_lock(&ehea_busmap_mutex
);
737 for (top
= 0; top
< EHEA_MAP_ENTRIES
; top
++) {
738 if (!ehea_bmap
->top
[top
])
741 for (dir
= 0; dir
< EHEA_MAP_ENTRIES
; dir
++) {
742 if (!ehea_bmap
->top
[top
]->dir
[dir
])
745 kfree(ehea_bmap
->top
[top
]->dir
[dir
]);
748 kfree(ehea_bmap
->top
[top
]);
754 mutex_unlock(&ehea_busmap_mutex
);
757 u64
ehea_map_vaddr(void *caddr
)
760 unsigned long index
, offset
;
763 return EHEA_INVAL_ADDR
;
765 index
= __pa(caddr
) >> SECTION_SIZE_BITS
;
766 top
= (index
>> EHEA_TOP_INDEX_SHIFT
) & EHEA_INDEX_MASK
;
767 if (!ehea_bmap
->top
[top
])
768 return EHEA_INVAL_ADDR
;
770 dir
= (index
>> EHEA_DIR_INDEX_SHIFT
) & EHEA_INDEX_MASK
;
771 if (!ehea_bmap
->top
[top
]->dir
[dir
])
772 return EHEA_INVAL_ADDR
;
774 idx
= index
& EHEA_INDEX_MASK
;
775 if (!ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
])
776 return EHEA_INVAL_ADDR
;
778 offset
= (unsigned long)caddr
& (EHEA_SECTSIZE
- 1);
779 return ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
] | offset
;
782 static inline void *ehea_calc_sectbase(int top
, int dir
, int idx
)
784 unsigned long ret
= idx
;
785 ret
|= dir
<< EHEA_DIR_INDEX_SHIFT
;
786 ret
|= top
<< EHEA_TOP_INDEX_SHIFT
;
787 return __va(ret
<< SECTION_SIZE_BITS
);
790 static u64
ehea_reg_mr_section(int top
, int dir
, int idx
, u64
*pt
,
791 struct ehea_adapter
*adapter
,
797 u64 pt_abs
= __pa(pt
);
799 void *sectbase
= ehea_calc_sectbase(top
, dir
, idx
);
801 for (j
= 0; j
< (EHEA_PAGES_PER_SECTION
/ EHEA_MAX_RPAGE
); j
++) {
803 for (m
= 0; m
< EHEA_MAX_RPAGE
; m
++) {
804 pg
= sectbase
+ ((k
++) * EHEA_PAGESIZE
);
807 hret
= ehea_h_register_rpage_mr(adapter
->handle
, mr
->handle
, 0,
808 0, pt_abs
, EHEA_MAX_RPAGE
);
810 if ((hret
!= H_SUCCESS
) &&
811 (hret
!= H_PAGE_REGISTERED
)) {
812 ehea_h_free_resource(adapter
->handle
, mr
->handle
,
814 pr_err("register_rpage_mr failed\n");
821 static u64
ehea_reg_mr_sections(int top
, int dir
, u64
*pt
,
822 struct ehea_adapter
*adapter
,
825 u64 hret
= H_SUCCESS
;
828 for (idx
= 0; idx
< EHEA_MAP_ENTRIES
; idx
++) {
829 if (!ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
])
832 hret
= ehea_reg_mr_section(top
, dir
, idx
, pt
, adapter
, mr
);
833 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
))
839 static u64
ehea_reg_mr_dir_sections(int top
, u64
*pt
,
840 struct ehea_adapter
*adapter
,
843 u64 hret
= H_SUCCESS
;
846 for (dir
= 0; dir
< EHEA_MAP_ENTRIES
; dir
++) {
847 if (!ehea_bmap
->top
[top
]->dir
[dir
])
850 hret
= ehea_reg_mr_sections(top
, dir
, pt
, adapter
, mr
);
851 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
))
857 int ehea_reg_kernel_mr(struct ehea_adapter
*adapter
, struct ehea_mr
*mr
)
862 u32 acc_ctrl
= EHEA_MR_ACC_CTRL
;
866 pt
= (void *)get_zeroed_page(GFP_KERNEL
);
873 hret
= ehea_h_alloc_resource_mr(adapter
->handle
, EHEA_BUSMAP_START
,
874 ehea_mr_len
, acc_ctrl
, adapter
->pd
,
875 &mr
->handle
, &mr
->lkey
);
877 if (hret
!= H_SUCCESS
) {
878 pr_err("alloc_resource_mr failed\n");
884 ehea_h_free_resource(adapter
->handle
, mr
->handle
, FORCE_FREE
);
885 pr_err("no busmap available\n");
890 for (top
= 0; top
< EHEA_MAP_ENTRIES
; top
++) {
891 if (!ehea_bmap
->top
[top
])
894 hret
= ehea_reg_mr_dir_sections(top
, pt
, adapter
, mr
);
895 if((hret
!= H_PAGE_REGISTERED
) && (hret
!= H_SUCCESS
))
899 if (hret
!= H_SUCCESS
) {
900 ehea_h_free_resource(adapter
->handle
, mr
->handle
, FORCE_FREE
);
901 pr_err("registering mr failed\n");
906 mr
->vaddr
= EHEA_BUSMAP_START
;
907 mr
->adapter
= adapter
;
910 free_page((unsigned long)pt
);
914 int ehea_rem_mr(struct ehea_mr
*mr
)
918 if (!mr
|| !mr
->adapter
)
921 hret
= ehea_h_free_resource(mr
->adapter
->handle
, mr
->handle
,
923 if (hret
!= H_SUCCESS
) {
924 pr_err("destroy MR failed\n");
931 int ehea_gen_smr(struct ehea_adapter
*adapter
, struct ehea_mr
*old_mr
,
932 struct ehea_mr
*shared_mr
)
936 hret
= ehea_h_register_smr(adapter
->handle
, old_mr
->handle
,
937 old_mr
->vaddr
, EHEA_MR_ACC_CTRL
,
938 adapter
->pd
, shared_mr
);
939 if (hret
!= H_SUCCESS
)
942 shared_mr
->adapter
= adapter
;
947 static void print_error_data(u64
*data
)
950 u64 type
= EHEA_BMASK_GET(ERROR_DATA_TYPE
, data
[2]);
951 u64 resource
= data
[1];
953 length
= EHEA_BMASK_GET(ERROR_DATA_LENGTH
, data
[0]);
955 if (length
> EHEA_PAGESIZE
)
956 length
= EHEA_PAGESIZE
;
958 if (type
== EHEA_AER_RESTYPE_QP
)
959 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
960 resource
, data
[6], data
[12], data
[22]);
961 else if (type
== EHEA_AER_RESTYPE_CQ
)
962 pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
964 else if (type
== EHEA_AER_RESTYPE_EQ
)
965 pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
968 ehea_dump(data
, length
, "error data");
971 u64
ehea_error_data(struct ehea_adapter
*adapter
, u64 res_handle
,
978 rblock
= (void *)get_zeroed_page(GFP_KERNEL
);
980 pr_err("Cannot allocate rblock memory\n");
984 ret
= ehea_h_error_data(adapter
->handle
, res_handle
, rblock
);
986 if (ret
== H_SUCCESS
) {
987 type
= EHEA_BMASK_GET(ERROR_DATA_TYPE
, rblock
[2]);
990 print_error_data(rblock
);
991 } else if (ret
== H_R_STATE
) {
992 pr_err("No error data available: %llX\n", res_handle
);
994 pr_err("Error data could not be fetched: %llX\n", res_handle
);
996 free_page((unsigned long)rblock
);