2 * linux/drivers/net/ehea/ehea_qmr.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/slab.h>
34 #include "ehea_phyp.h"
37 struct ehea_bmap
*ehea_bmap
= NULL
;
41 static void *hw_qpageit_get_inc(struct hw_queue
*queue
)
43 void *retvalue
= hw_qeit_get(queue
);
45 queue
->current_q_offset
+= queue
->pagesize
;
46 if (queue
->current_q_offset
> queue
->queue_length
) {
47 queue
->current_q_offset
-= queue
->pagesize
;
49 } else if (((u64
) retvalue
) & (EHEA_PAGESIZE
-1)) {
50 pr_err("not on pageboundary\n");
56 static int hw_queue_ctor(struct hw_queue
*queue
, const u32 nr_of_pages
,
57 const u32 pagesize
, const u32 qe_size
)
59 int pages_per_kpage
= PAGE_SIZE
/ pagesize
;
62 if ((pagesize
> PAGE_SIZE
) || (!pages_per_kpage
)) {
63 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
64 (int)PAGE_SIZE
, (int)pagesize
);
68 queue
->queue_length
= nr_of_pages
* pagesize
;
69 queue
->queue_pages
= kmalloc(nr_of_pages
* sizeof(void *), GFP_KERNEL
);
70 if (!queue
->queue_pages
) {
71 pr_err("no mem for queue_pages\n");
76 * allocate pages for queue:
77 * outer loop allocates whole kernel pages (page aligned) and
78 * inner loop divides a kernel page into smaller hea queue pages
81 while (i
< nr_of_pages
) {
82 u8
*kpage
= (u8
*)get_zeroed_page(GFP_KERNEL
);
85 for (k
= 0; k
< pages_per_kpage
&& i
< nr_of_pages
; k
++) {
86 (queue
->queue_pages
)[i
] = (struct ehea_page
*)kpage
;
92 queue
->current_q_offset
= 0;
93 queue
->qe_size
= qe_size
;
94 queue
->pagesize
= pagesize
;
95 queue
->toggle_state
= 1;
99 for (i
= 0; i
< nr_of_pages
; i
+= pages_per_kpage
) {
100 if (!(queue
->queue_pages
)[i
])
102 free_page((unsigned long)(queue
->queue_pages
)[i
]);
107 static void hw_queue_dtor(struct hw_queue
*queue
)
109 int pages_per_kpage
= PAGE_SIZE
/ queue
->pagesize
;
112 if (!queue
|| !queue
->queue_pages
)
115 nr_pages
= queue
->queue_length
/ queue
->pagesize
;
117 for (i
= 0; i
< nr_pages
; i
+= pages_per_kpage
)
118 free_page((unsigned long)(queue
->queue_pages
)[i
]);
120 kfree(queue
->queue_pages
);
123 struct ehea_cq
*ehea_create_cq(struct ehea_adapter
*adapter
,
124 int nr_of_cqe
, u64 eq_handle
, u32 cq_token
)
128 u64
*cq_handle_ref
, hret
, rpage
;
129 u32 act_nr_of_entries
, act_pages
, counter
;
133 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
135 pr_err("no mem for cq\n");
139 cq
->attr
.max_nr_of_cqes
= nr_of_cqe
;
140 cq
->attr
.cq_token
= cq_token
;
141 cq
->attr
.eq_handle
= eq_handle
;
143 cq
->adapter
= adapter
;
145 cq_handle_ref
= &cq
->fw_handle
;
146 act_nr_of_entries
= 0;
149 hret
= ehea_h_alloc_resource_cq(adapter
->handle
, &cq
->attr
,
150 &cq
->fw_handle
, &cq
->epas
);
151 if (hret
!= H_SUCCESS
) {
152 pr_err("alloc_resource_cq failed\n");
156 ret
= hw_queue_ctor(&cq
->hw_queue
, cq
->attr
.nr_pages
,
157 EHEA_PAGESIZE
, sizeof(struct ehea_cqe
));
161 for (counter
= 0; counter
< cq
->attr
.nr_pages
; counter
++) {
162 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
164 pr_err("hw_qpageit_get_inc failed\n");
168 rpage
= virt_to_abs(vpage
);
169 hret
= ehea_h_register_rpage(adapter
->handle
,
170 0, EHEA_CQ_REGISTER_ORIG
,
171 cq
->fw_handle
, rpage
, 1);
172 if (hret
< H_SUCCESS
) {
173 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
174 cq
, hret
, counter
, cq
->attr
.nr_pages
);
178 if (counter
== (cq
->attr
.nr_pages
- 1)) {
179 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
181 if ((hret
!= H_SUCCESS
) || (vpage
)) {
182 pr_err("registration of pages not complete hret=%llx\n",
187 if (hret
!= H_PAGE_REGISTERED
) {
188 pr_err("CQ: registration of page failed hret=%llx\n",
195 hw_qeit_reset(&cq
->hw_queue
);
196 epa
= cq
->epas
.kernel
;
197 ehea_reset_cq_ep(cq
);
198 ehea_reset_cq_n1(cq
);
203 hw_queue_dtor(&cq
->hw_queue
);
206 ehea_h_free_resource(adapter
->handle
, cq
->fw_handle
, FORCE_FREE
);
215 u64
ehea_destroy_cq_res(struct ehea_cq
*cq
, u64 force
)
218 u64 adapter_handle
= cq
->adapter
->handle
;
220 /* deregister all previous registered pages */
221 hret
= ehea_h_free_resource(adapter_handle
, cq
->fw_handle
, force
);
222 if (hret
!= H_SUCCESS
)
225 hw_queue_dtor(&cq
->hw_queue
);
231 int ehea_destroy_cq(struct ehea_cq
*cq
)
237 hcp_epas_dtor(&cq
->epas
);
238 hret
= ehea_destroy_cq_res(cq
, NORMAL_FREE
);
239 if (hret
== H_R_STATE
) {
240 ehea_error_data(cq
->adapter
, cq
->fw_handle
, &aer
, &aerr
);
241 hret
= ehea_destroy_cq_res(cq
, FORCE_FREE
);
244 if (hret
!= H_SUCCESS
) {
245 pr_err("destroy CQ failed\n");
252 struct ehea_eq
*ehea_create_eq(struct ehea_adapter
*adapter
,
253 const enum ehea_eq_type type
,
254 const u32 max_nr_of_eqes
, const u8 eqe_gen
)
261 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
263 pr_err("no mem for eq\n");
267 eq
->adapter
= adapter
;
268 eq
->attr
.type
= type
;
269 eq
->attr
.max_nr_of_eqes
= max_nr_of_eqes
;
270 eq
->attr
.eqe_gen
= eqe_gen
;
271 spin_lock_init(&eq
->spinlock
);
273 hret
= ehea_h_alloc_resource_eq(adapter
->handle
,
274 &eq
->attr
, &eq
->fw_handle
);
275 if (hret
!= H_SUCCESS
) {
276 pr_err("alloc_resource_eq failed\n");
280 ret
= hw_queue_ctor(&eq
->hw_queue
, eq
->attr
.nr_pages
,
281 EHEA_PAGESIZE
, sizeof(struct ehea_eqe
));
283 pr_err("can't allocate eq pages\n");
287 for (i
= 0; i
< eq
->attr
.nr_pages
; i
++) {
288 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
290 pr_err("hw_qpageit_get_inc failed\n");
295 rpage
= virt_to_abs(vpage
);
297 hret
= ehea_h_register_rpage(adapter
->handle
, 0,
298 EHEA_EQ_REGISTER_ORIG
,
299 eq
->fw_handle
, rpage
, 1);
301 if (i
== (eq
->attr
.nr_pages
- 1)) {
303 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
304 if ((hret
!= H_SUCCESS
) || (vpage
))
308 if (hret
!= H_PAGE_REGISTERED
)
314 hw_qeit_reset(&eq
->hw_queue
);
318 hw_queue_dtor(&eq
->hw_queue
);
321 ehea_h_free_resource(adapter
->handle
, eq
->fw_handle
, FORCE_FREE
);
328 struct ehea_eqe
*ehea_poll_eq(struct ehea_eq
*eq
)
330 struct ehea_eqe
*eqe
;
333 spin_lock_irqsave(&eq
->spinlock
, flags
);
334 eqe
= (struct ehea_eqe
*)hw_eqit_eq_get_inc_valid(&eq
->hw_queue
);
335 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
340 u64
ehea_destroy_eq_res(struct ehea_eq
*eq
, u64 force
)
345 spin_lock_irqsave(&eq
->spinlock
, flags
);
347 hret
= ehea_h_free_resource(eq
->adapter
->handle
, eq
->fw_handle
, force
);
348 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
350 if (hret
!= H_SUCCESS
)
353 hw_queue_dtor(&eq
->hw_queue
);
359 int ehea_destroy_eq(struct ehea_eq
*eq
)
365 hcp_epas_dtor(&eq
->epas
);
367 hret
= ehea_destroy_eq_res(eq
, NORMAL_FREE
);
368 if (hret
== H_R_STATE
) {
369 ehea_error_data(eq
->adapter
, eq
->fw_handle
, &aer
, &aerr
);
370 hret
= ehea_destroy_eq_res(eq
, FORCE_FREE
);
373 if (hret
!= H_SUCCESS
) {
374 pr_err("destroy EQ failed\n");
382 * allocates memory for a queue and registers pages in phyp
384 int ehea_qp_alloc_register(struct ehea_qp
*qp
, struct hw_queue
*hw_queue
,
385 int nr_pages
, int wqe_size
, int act_nr_sges
,
386 struct ehea_adapter
*adapter
, int h_call_q_selector
)
392 ret
= hw_queue_ctor(hw_queue
, nr_pages
, EHEA_PAGESIZE
, wqe_size
);
396 for (cnt
= 0; cnt
< nr_pages
; cnt
++) {
397 vpage
= hw_qpageit_get_inc(hw_queue
);
399 pr_err("hw_qpageit_get_inc failed\n");
402 rpage
= virt_to_abs(vpage
);
403 hret
= ehea_h_register_rpage(adapter
->handle
,
404 0, h_call_q_selector
,
405 qp
->fw_handle
, rpage
, 1);
406 if (hret
< H_SUCCESS
) {
407 pr_err("register_rpage_qp failed\n");
411 hw_qeit_reset(hw_queue
);
415 hw_queue_dtor(hw_queue
);
419 static inline u32
map_wqe_size(u8 wqe_enc_size
)
421 return 128 << wqe_enc_size
;
424 struct ehea_qp
*ehea_create_qp(struct ehea_adapter
*adapter
,
425 u32 pd
, struct ehea_qp_init_attr
*init_attr
)
430 u32 wqe_size_in_bytes_sq
, wqe_size_in_bytes_rq1
;
431 u32 wqe_size_in_bytes_rq2
, wqe_size_in_bytes_rq3
;
434 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
436 pr_err("no mem for qp\n");
440 qp
->adapter
= adapter
;
442 hret
= ehea_h_alloc_resource_qp(adapter
->handle
, init_attr
, pd
,
443 &qp
->fw_handle
, &qp
->epas
);
444 if (hret
!= H_SUCCESS
) {
445 pr_err("ehea_h_alloc_resource_qp failed\n");
449 wqe_size_in_bytes_sq
= map_wqe_size(init_attr
->act_wqe_size_enc_sq
);
450 wqe_size_in_bytes_rq1
= map_wqe_size(init_attr
->act_wqe_size_enc_rq1
);
451 wqe_size_in_bytes_rq2
= map_wqe_size(init_attr
->act_wqe_size_enc_rq2
);
452 wqe_size_in_bytes_rq3
= map_wqe_size(init_attr
->act_wqe_size_enc_rq3
);
454 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_squeue
, init_attr
->nr_sq_pages
,
455 wqe_size_in_bytes_sq
,
456 init_attr
->act_wqe_size_enc_sq
, adapter
,
459 pr_err("can't register for sq ret=%x\n", ret
);
463 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue1
,
464 init_attr
->nr_rq1_pages
,
465 wqe_size_in_bytes_rq1
,
466 init_attr
->act_wqe_size_enc_rq1
,
469 pr_err("can't register for rq1 ret=%x\n", ret
);
473 if (init_attr
->rq_count
> 1) {
474 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue2
,
475 init_attr
->nr_rq2_pages
,
476 wqe_size_in_bytes_rq2
,
477 init_attr
->act_wqe_size_enc_rq2
,
480 pr_err("can't register for rq2 ret=%x\n", ret
);
485 if (init_attr
->rq_count
> 2) {
486 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue3
,
487 init_attr
->nr_rq3_pages
,
488 wqe_size_in_bytes_rq3
,
489 init_attr
->act_wqe_size_enc_rq3
,
492 pr_err("can't register for rq3 ret=%x\n", ret
);
497 qp
->init_attr
= *init_attr
;
502 hw_queue_dtor(&qp
->hw_rqueue2
);
505 hw_queue_dtor(&qp
->hw_rqueue1
);
508 hw_queue_dtor(&qp
->hw_squeue
);
511 ehea_h_disable_and_get_hea(adapter
->handle
, qp
->fw_handle
);
512 ehea_h_free_resource(adapter
->handle
, qp
->fw_handle
, FORCE_FREE
);
519 u64
ehea_destroy_qp_res(struct ehea_qp
*qp
, u64 force
)
522 struct ehea_qp_init_attr
*qp_attr
= &qp
->init_attr
;
525 ehea_h_disable_and_get_hea(qp
->adapter
->handle
, qp
->fw_handle
);
526 hret
= ehea_h_free_resource(qp
->adapter
->handle
, qp
->fw_handle
, force
);
527 if (hret
!= H_SUCCESS
)
530 hw_queue_dtor(&qp
->hw_squeue
);
531 hw_queue_dtor(&qp
->hw_rqueue1
);
533 if (qp_attr
->rq_count
> 1)
534 hw_queue_dtor(&qp
->hw_rqueue2
);
535 if (qp_attr
->rq_count
> 2)
536 hw_queue_dtor(&qp
->hw_rqueue3
);
542 int ehea_destroy_qp(struct ehea_qp
*qp
)
548 hcp_epas_dtor(&qp
->epas
);
550 hret
= ehea_destroy_qp_res(qp
, NORMAL_FREE
);
551 if (hret
== H_R_STATE
) {
552 ehea_error_data(qp
->adapter
, qp
->fw_handle
, &aer
, &aerr
);
553 hret
= ehea_destroy_qp_res(qp
, FORCE_FREE
);
556 if (hret
!= H_SUCCESS
) {
557 pr_err("destroy QP failed\n");
564 static inline int ehea_calc_index(unsigned long i
, unsigned long s
)
566 return (i
>> s
) & EHEA_INDEX_MASK
;
569 static inline int ehea_init_top_bmap(struct ehea_top_bmap
*ehea_top_bmap
,
572 if (!ehea_top_bmap
->dir
[dir
]) {
573 ehea_top_bmap
->dir
[dir
] =
574 kzalloc(sizeof(struct ehea_dir_bmap
), GFP_KERNEL
);
575 if (!ehea_top_bmap
->dir
[dir
])
581 static inline int ehea_init_bmap(struct ehea_bmap
*ehea_bmap
, int top
, int dir
)
583 if (!ehea_bmap
->top
[top
]) {
584 ehea_bmap
->top
[top
] =
585 kzalloc(sizeof(struct ehea_top_bmap
), GFP_KERNEL
);
586 if (!ehea_bmap
->top
[top
])
589 return ehea_init_top_bmap(ehea_bmap
->top
[top
], dir
);
592 static DEFINE_MUTEX(ehea_busmap_mutex
);
593 static unsigned long ehea_mr_len
;
595 #define EHEA_BUSMAP_ADD_SECT 1
596 #define EHEA_BUSMAP_REM_SECT 0
598 static void ehea_rebuild_busmap(void)
600 u64 vaddr
= EHEA_BUSMAP_START
;
603 for (top
= 0; top
< EHEA_MAP_ENTRIES
; top
++) {
604 struct ehea_top_bmap
*ehea_top
;
605 int valid_dir_entries
= 0;
607 if (!ehea_bmap
->top
[top
])
609 ehea_top
= ehea_bmap
->top
[top
];
610 for (dir
= 0; dir
< EHEA_MAP_ENTRIES
; dir
++) {
611 struct ehea_dir_bmap
*ehea_dir
;
612 int valid_entries
= 0;
614 if (!ehea_top
->dir
[dir
])
617 ehea_dir
= ehea_top
->dir
[dir
];
618 for (idx
= 0; idx
< EHEA_MAP_ENTRIES
; idx
++) {
619 if (!ehea_dir
->ent
[idx
])
622 ehea_dir
->ent
[idx
] = vaddr
;
623 vaddr
+= EHEA_SECTSIZE
;
625 if (!valid_entries
) {
626 ehea_top
->dir
[dir
] = NULL
;
630 if (!valid_dir_entries
) {
631 ehea_bmap
->top
[top
] = NULL
;
637 static int ehea_update_busmap(unsigned long pfn
, unsigned long nr_pages
, int add
)
639 unsigned long i
, start_section
, end_section
;
645 ehea_bmap
= kzalloc(sizeof(struct ehea_bmap
), GFP_KERNEL
);
650 start_section
= (pfn
* PAGE_SIZE
) / EHEA_SECTSIZE
;
651 end_section
= start_section
+ ((nr_pages
* PAGE_SIZE
) / EHEA_SECTSIZE
);
652 /* Mark entries as valid or invalid only; address is assigned later */
653 for (i
= start_section
; i
< end_section
; i
++) {
655 int top
= ehea_calc_index(i
, EHEA_TOP_INDEX_SHIFT
);
656 int dir
= ehea_calc_index(i
, EHEA_DIR_INDEX_SHIFT
);
657 int idx
= i
& EHEA_INDEX_MASK
;
660 int ret
= ehea_init_bmap(ehea_bmap
, top
, dir
);
663 flag
= 1; /* valid */
664 ehea_mr_len
+= EHEA_SECTSIZE
;
666 if (!ehea_bmap
->top
[top
])
668 if (!ehea_bmap
->top
[top
]->dir
[dir
])
670 flag
= 0; /* invalid */
671 ehea_mr_len
-= EHEA_SECTSIZE
;
674 ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
] = flag
;
676 ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
680 int ehea_add_sect_bmap(unsigned long pfn
, unsigned long nr_pages
)
684 mutex_lock(&ehea_busmap_mutex
);
685 ret
= ehea_update_busmap(pfn
, nr_pages
, EHEA_BUSMAP_ADD_SECT
);
686 mutex_unlock(&ehea_busmap_mutex
);
690 int ehea_rem_sect_bmap(unsigned long pfn
, unsigned long nr_pages
)
694 mutex_lock(&ehea_busmap_mutex
);
695 ret
= ehea_update_busmap(pfn
, nr_pages
, EHEA_BUSMAP_REM_SECT
);
696 mutex_unlock(&ehea_busmap_mutex
);
700 static int ehea_is_hugepage(unsigned long pfn
)
704 if (pfn
& EHEA_HUGEPAGE_PFN_MASK
)
707 page_order
= compound_order(pfn_to_page(pfn
));
708 if (page_order
+ PAGE_SHIFT
!= EHEA_HUGEPAGESHIFT
)
714 static int ehea_create_busmap_callback(unsigned long initial_pfn
,
715 unsigned long total_nr_pages
, void *arg
)
718 unsigned long pfn
, start_pfn
, end_pfn
, nr_pages
;
720 if ((total_nr_pages
* PAGE_SIZE
) < EHEA_HUGEPAGE_SIZE
)
721 return ehea_update_busmap(initial_pfn
, total_nr_pages
,
722 EHEA_BUSMAP_ADD_SECT
);
724 /* Given chunk is >= 16GB -> check for hugepages */
725 start_pfn
= initial_pfn
;
726 end_pfn
= initial_pfn
+ total_nr_pages
;
729 while (pfn
< end_pfn
) {
730 if (ehea_is_hugepage(pfn
)) {
731 /* Add mem found in front of the hugepage */
732 nr_pages
= pfn
- start_pfn
;
733 ret
= ehea_update_busmap(start_pfn
, nr_pages
,
734 EHEA_BUSMAP_ADD_SECT
);
738 /* Skip the hugepage */
739 pfn
+= (EHEA_HUGEPAGE_SIZE
/ PAGE_SIZE
);
742 pfn
+= (EHEA_SECTSIZE
/ PAGE_SIZE
);
745 /* Add mem found behind the hugepage(s) */
746 nr_pages
= pfn
- start_pfn
;
747 return ehea_update_busmap(start_pfn
, nr_pages
, EHEA_BUSMAP_ADD_SECT
);
750 int ehea_create_busmap(void)
754 mutex_lock(&ehea_busmap_mutex
);
756 ret
= walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS
, NULL
,
757 ehea_create_busmap_callback
);
758 mutex_unlock(&ehea_busmap_mutex
);
762 void ehea_destroy_busmap(void)
765 mutex_lock(&ehea_busmap_mutex
);
769 for (top
= 0; top
< EHEA_MAP_ENTRIES
; top
++) {
770 if (!ehea_bmap
->top
[top
])
773 for (dir
= 0; dir
< EHEA_MAP_ENTRIES
; dir
++) {
774 if (!ehea_bmap
->top
[top
]->dir
[dir
])
777 kfree(ehea_bmap
->top
[top
]->dir
[dir
]);
780 kfree(ehea_bmap
->top
[top
]);
786 mutex_unlock(&ehea_busmap_mutex
);
789 u64
ehea_map_vaddr(void *caddr
)
792 unsigned long index
, offset
;
795 return EHEA_INVAL_ADDR
;
797 index
= virt_to_abs(caddr
) >> SECTION_SIZE_BITS
;
798 top
= (index
>> EHEA_TOP_INDEX_SHIFT
) & EHEA_INDEX_MASK
;
799 if (!ehea_bmap
->top
[top
])
800 return EHEA_INVAL_ADDR
;
802 dir
= (index
>> EHEA_DIR_INDEX_SHIFT
) & EHEA_INDEX_MASK
;
803 if (!ehea_bmap
->top
[top
]->dir
[dir
])
804 return EHEA_INVAL_ADDR
;
806 idx
= index
& EHEA_INDEX_MASK
;
807 if (!ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
])
808 return EHEA_INVAL_ADDR
;
810 offset
= (unsigned long)caddr
& (EHEA_SECTSIZE
- 1);
811 return ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
] | offset
;
814 static inline void *ehea_calc_sectbase(int top
, int dir
, int idx
)
816 unsigned long ret
= idx
;
817 ret
|= dir
<< EHEA_DIR_INDEX_SHIFT
;
818 ret
|= top
<< EHEA_TOP_INDEX_SHIFT
;
819 return abs_to_virt(ret
<< SECTION_SIZE_BITS
);
822 static u64
ehea_reg_mr_section(int top
, int dir
, int idx
, u64
*pt
,
823 struct ehea_adapter
*adapter
,
829 u64 pt_abs
= virt_to_abs(pt
);
831 void *sectbase
= ehea_calc_sectbase(top
, dir
, idx
);
833 for (j
= 0; j
< (EHEA_PAGES_PER_SECTION
/ EHEA_MAX_RPAGE
); j
++) {
835 for (m
= 0; m
< EHEA_MAX_RPAGE
; m
++) {
836 pg
= sectbase
+ ((k
++) * EHEA_PAGESIZE
);
837 pt
[m
] = virt_to_abs(pg
);
839 hret
= ehea_h_register_rpage_mr(adapter
->handle
, mr
->handle
, 0,
840 0, pt_abs
, EHEA_MAX_RPAGE
);
842 if ((hret
!= H_SUCCESS
) &&
843 (hret
!= H_PAGE_REGISTERED
)) {
844 ehea_h_free_resource(adapter
->handle
, mr
->handle
,
846 pr_err("register_rpage_mr failed\n");
853 static u64
ehea_reg_mr_sections(int top
, int dir
, u64
*pt
,
854 struct ehea_adapter
*adapter
,
857 u64 hret
= H_SUCCESS
;
860 for (idx
= 0; idx
< EHEA_MAP_ENTRIES
; idx
++) {
861 if (!ehea_bmap
->top
[top
]->dir
[dir
]->ent
[idx
])
864 hret
= ehea_reg_mr_section(top
, dir
, idx
, pt
, adapter
, mr
);
865 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
))
871 static u64
ehea_reg_mr_dir_sections(int top
, u64
*pt
,
872 struct ehea_adapter
*adapter
,
875 u64 hret
= H_SUCCESS
;
878 for (dir
= 0; dir
< EHEA_MAP_ENTRIES
; dir
++) {
879 if (!ehea_bmap
->top
[top
]->dir
[dir
])
882 hret
= ehea_reg_mr_sections(top
, dir
, pt
, adapter
, mr
);
883 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
))
889 int ehea_reg_kernel_mr(struct ehea_adapter
*adapter
, struct ehea_mr
*mr
)
894 u32 acc_ctrl
= EHEA_MR_ACC_CTRL
;
898 pt
= (void *)get_zeroed_page(GFP_KERNEL
);
905 hret
= ehea_h_alloc_resource_mr(adapter
->handle
, EHEA_BUSMAP_START
,
906 ehea_mr_len
, acc_ctrl
, adapter
->pd
,
907 &mr
->handle
, &mr
->lkey
);
909 if (hret
!= H_SUCCESS
) {
910 pr_err("alloc_resource_mr failed\n");
916 ehea_h_free_resource(adapter
->handle
, mr
->handle
, FORCE_FREE
);
917 pr_err("no busmap available\n");
922 for (top
= 0; top
< EHEA_MAP_ENTRIES
; top
++) {
923 if (!ehea_bmap
->top
[top
])
926 hret
= ehea_reg_mr_dir_sections(top
, pt
, adapter
, mr
);
927 if((hret
!= H_PAGE_REGISTERED
) && (hret
!= H_SUCCESS
))
931 if (hret
!= H_SUCCESS
) {
932 ehea_h_free_resource(adapter
->handle
, mr
->handle
, FORCE_FREE
);
933 pr_err("registering mr failed\n");
938 mr
->vaddr
= EHEA_BUSMAP_START
;
939 mr
->adapter
= adapter
;
942 free_page((unsigned long)pt
);
946 int ehea_rem_mr(struct ehea_mr
*mr
)
950 if (!mr
|| !mr
->adapter
)
953 hret
= ehea_h_free_resource(mr
->adapter
->handle
, mr
->handle
,
955 if (hret
!= H_SUCCESS
) {
956 pr_err("destroy MR failed\n");
963 int ehea_gen_smr(struct ehea_adapter
*adapter
, struct ehea_mr
*old_mr
,
964 struct ehea_mr
*shared_mr
)
968 hret
= ehea_h_register_smr(adapter
->handle
, old_mr
->handle
,
969 old_mr
->vaddr
, EHEA_MR_ACC_CTRL
,
970 adapter
->pd
, shared_mr
);
971 if (hret
!= H_SUCCESS
)
974 shared_mr
->adapter
= adapter
;
979 void print_error_data(u64
*data
)
982 u64 type
= EHEA_BMASK_GET(ERROR_DATA_TYPE
, data
[2]);
983 u64 resource
= data
[1];
985 length
= EHEA_BMASK_GET(ERROR_DATA_LENGTH
, data
[0]);
987 if (length
> EHEA_PAGESIZE
)
988 length
= EHEA_PAGESIZE
;
990 if (type
== EHEA_AER_RESTYPE_QP
)
991 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
992 resource
, data
[6], data
[12], data
[22]);
993 else if (type
== EHEA_AER_RESTYPE_CQ
)
994 pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
996 else if (type
== EHEA_AER_RESTYPE_EQ
)
997 pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
1000 ehea_dump(data
, length
, "error data");
1003 u64
ehea_error_data(struct ehea_adapter
*adapter
, u64 res_handle
,
1004 u64
*aer
, u64
*aerr
)
1010 rblock
= (void *)get_zeroed_page(GFP_KERNEL
);
1012 pr_err("Cannot allocate rblock memory\n");
1016 ret
= ehea_h_error_data(adapter
->handle
, res_handle
, rblock
);
1018 if (ret
== H_SUCCESS
) {
1019 type
= EHEA_BMASK_GET(ERROR_DATA_TYPE
, rblock
[2]);
1022 print_error_data(rblock
);
1023 } else if (ret
== H_R_STATE
) {
1024 pr_err("No error data available: %llX\n", res_handle
);
1026 pr_err("Error data could not be fetched: %llX\n", res_handle
);
1028 free_page((unsigned long)rblock
);