2 * linux/drivers/net/ehea/ehea_qmr.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include "ehea_phyp.h"
35 struct ehea_busmap ehea_bmap
= { 0, 0, NULL
};
36 extern u64 ehea_driver_flags
;
37 extern struct work_struct ehea_rereg_mr_task
;
40 static void *hw_qpageit_get_inc(struct hw_queue
*queue
)
42 void *retvalue
= hw_qeit_get(queue
);
44 queue
->current_q_offset
+= queue
->pagesize
;
45 if (queue
->current_q_offset
> queue
->queue_length
) {
46 queue
->current_q_offset
-= queue
->pagesize
;
48 } else if (((u64
) retvalue
) & (EHEA_PAGESIZE
-1)) {
49 ehea_error("not on pageboundary");
55 static int hw_queue_ctor(struct hw_queue
*queue
, const u32 nr_of_pages
,
56 const u32 pagesize
, const u32 qe_size
)
58 int pages_per_kpage
= PAGE_SIZE
/ pagesize
;
61 if ((pagesize
> PAGE_SIZE
) || (!pages_per_kpage
)) {
62 ehea_error("pagesize conflict! kernel pagesize=%d, "
63 "ehea pagesize=%d", (int)PAGE_SIZE
, (int)pagesize
);
67 queue
->queue_length
= nr_of_pages
* pagesize
;
68 queue
->queue_pages
= kmalloc(nr_of_pages
* sizeof(void*), GFP_KERNEL
);
69 if (!queue
->queue_pages
) {
70 ehea_error("no mem for queue_pages");
75 * allocate pages for queue:
76 * outer loop allocates whole kernel pages (page aligned) and
77 * inner loop divides a kernel page into smaller hea queue pages
80 while (i
< nr_of_pages
) {
81 u8
*kpage
= (u8
*)get_zeroed_page(GFP_KERNEL
);
84 for (k
= 0; k
< pages_per_kpage
&& i
< nr_of_pages
; k
++) {
85 (queue
->queue_pages
)[i
] = (struct ehea_page
*)kpage
;
91 queue
->current_q_offset
= 0;
92 queue
->qe_size
= qe_size
;
93 queue
->pagesize
= pagesize
;
94 queue
->toggle_state
= 1;
98 for (i
= 0; i
< nr_of_pages
; i
+= pages_per_kpage
) {
99 if (!(queue
->queue_pages
)[i
])
101 free_page((unsigned long)(queue
->queue_pages
)[i
]);
106 static void hw_queue_dtor(struct hw_queue
*queue
)
108 int pages_per_kpage
= PAGE_SIZE
/ queue
->pagesize
;
111 if (!queue
|| !queue
->queue_pages
)
114 nr_pages
= queue
->queue_length
/ queue
->pagesize
;
116 for (i
= 0; i
< nr_pages
; i
+= pages_per_kpage
)
117 free_page((unsigned long)(queue
->queue_pages
)[i
]);
119 kfree(queue
->queue_pages
);
122 struct ehea_cq
*ehea_create_cq(struct ehea_adapter
*adapter
,
123 int nr_of_cqe
, u64 eq_handle
, u32 cq_token
)
127 u64
*cq_handle_ref
, hret
, rpage
;
128 u32 act_nr_of_entries
, act_pages
, counter
;
132 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
134 ehea_error("no mem for cq");
138 cq
->attr
.max_nr_of_cqes
= nr_of_cqe
;
139 cq
->attr
.cq_token
= cq_token
;
140 cq
->attr
.eq_handle
= eq_handle
;
142 cq
->adapter
= adapter
;
144 cq_handle_ref
= &cq
->fw_handle
;
145 act_nr_of_entries
= 0;
148 hret
= ehea_h_alloc_resource_cq(adapter
->handle
, &cq
->attr
,
149 &cq
->fw_handle
, &cq
->epas
);
150 if (hret
!= H_SUCCESS
) {
151 ehea_error("alloc_resource_cq failed");
155 ret
= hw_queue_ctor(&cq
->hw_queue
, cq
->attr
.nr_pages
,
156 EHEA_PAGESIZE
, sizeof(struct ehea_cqe
));
160 for (counter
= 0; counter
< cq
->attr
.nr_pages
; counter
++) {
161 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
163 ehea_error("hw_qpageit_get_inc failed");
167 rpage
= virt_to_abs(vpage
);
168 hret
= ehea_h_register_rpage(adapter
->handle
,
169 0, EHEA_CQ_REGISTER_ORIG
,
170 cq
->fw_handle
, rpage
, 1);
171 if (hret
< H_SUCCESS
) {
172 ehea_error("register_rpage_cq failed ehea_cq=%p "
173 "hret=%lx counter=%i act_pages=%i",
174 cq
, hret
, counter
, cq
->attr
.nr_pages
);
178 if (counter
== (cq
->attr
.nr_pages
- 1)) {
179 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
181 if ((hret
!= H_SUCCESS
) || (vpage
)) {
182 ehea_error("registration of pages not "
183 "complete hret=%lx\n", hret
);
187 if ((hret
!= H_PAGE_REGISTERED
) || (!vpage
)) {
188 ehea_error("CQ: registration of page failed "
195 hw_qeit_reset(&cq
->hw_queue
);
196 epa
= cq
->epas
.kernel
;
197 ehea_reset_cq_ep(cq
);
198 ehea_reset_cq_n1(cq
);
203 hw_queue_dtor(&cq
->hw_queue
);
206 ehea_h_free_resource(adapter
->handle
, cq
->fw_handle
, FORCE_FREE
);
215 u64
ehea_destroy_cq_res(struct ehea_cq
*cq
, u64 force
)
218 u64 adapter_handle
= cq
->adapter
->handle
;
220 /* deregister all previous registered pages */
221 hret
= ehea_h_free_resource(adapter_handle
, cq
->fw_handle
, force
);
222 if (hret
!= H_SUCCESS
)
225 hw_queue_dtor(&cq
->hw_queue
);
231 int ehea_destroy_cq(struct ehea_cq
*cq
)
237 hcp_epas_dtor(&cq
->epas
);
239 if ((hret
= ehea_destroy_cq_res(cq
, NORMAL_FREE
)) == H_R_STATE
) {
240 ehea_error_data(cq
->adapter
, cq
->fw_handle
);
241 hret
= ehea_destroy_cq_res(cq
, FORCE_FREE
);
244 if (hret
!= H_SUCCESS
) {
245 ehea_error("destroy CQ failed");
252 struct ehea_eq
*ehea_create_eq(struct ehea_adapter
*adapter
,
253 const enum ehea_eq_type type
,
254 const u32 max_nr_of_eqes
, const u8 eqe_gen
)
261 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
263 ehea_error("no mem for eq");
267 eq
->adapter
= adapter
;
268 eq
->attr
.type
= type
;
269 eq
->attr
.max_nr_of_eqes
= max_nr_of_eqes
;
270 eq
->attr
.eqe_gen
= eqe_gen
;
271 spin_lock_init(&eq
->spinlock
);
273 hret
= ehea_h_alloc_resource_eq(adapter
->handle
,
274 &eq
->attr
, &eq
->fw_handle
);
275 if (hret
!= H_SUCCESS
) {
276 ehea_error("alloc_resource_eq failed");
280 ret
= hw_queue_ctor(&eq
->hw_queue
, eq
->attr
.nr_pages
,
281 EHEA_PAGESIZE
, sizeof(struct ehea_eqe
));
283 ehea_error("can't allocate eq pages");
287 for (i
= 0; i
< eq
->attr
.nr_pages
; i
++) {
288 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
290 ehea_error("hw_qpageit_get_inc failed");
295 rpage
= virt_to_abs(vpage
);
297 hret
= ehea_h_register_rpage(adapter
->handle
, 0,
298 EHEA_EQ_REGISTER_ORIG
,
299 eq
->fw_handle
, rpage
, 1);
301 if (i
== (eq
->attr
.nr_pages
- 1)) {
303 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
304 if ((hret
!= H_SUCCESS
) || (vpage
)) {
308 if ((hret
!= H_PAGE_REGISTERED
) || (!vpage
)) {
314 hw_qeit_reset(&eq
->hw_queue
);
318 hw_queue_dtor(&eq
->hw_queue
);
321 ehea_h_free_resource(adapter
->handle
, eq
->fw_handle
, FORCE_FREE
);
328 struct ehea_eqe
*ehea_poll_eq(struct ehea_eq
*eq
)
330 struct ehea_eqe
*eqe
;
333 spin_lock_irqsave(&eq
->spinlock
, flags
);
334 eqe
= (struct ehea_eqe
*)hw_eqit_eq_get_inc_valid(&eq
->hw_queue
);
335 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
340 u64
ehea_destroy_eq_res(struct ehea_eq
*eq
, u64 force
)
345 spin_lock_irqsave(&eq
->spinlock
, flags
);
347 hret
= ehea_h_free_resource(eq
->adapter
->handle
, eq
->fw_handle
, force
);
348 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
350 if (hret
!= H_SUCCESS
)
353 hw_queue_dtor(&eq
->hw_queue
);
359 int ehea_destroy_eq(struct ehea_eq
*eq
)
365 hcp_epas_dtor(&eq
->epas
);
367 if ((hret
= ehea_destroy_eq_res(eq
, NORMAL_FREE
)) == H_R_STATE
) {
368 ehea_error_data(eq
->adapter
, eq
->fw_handle
);
369 hret
= ehea_destroy_eq_res(eq
, FORCE_FREE
);
372 if (hret
!= H_SUCCESS
) {
373 ehea_error("destroy EQ failed");
381 * allocates memory for a queue and registers pages in phyp
383 int ehea_qp_alloc_register(struct ehea_qp
*qp
, struct hw_queue
*hw_queue
,
384 int nr_pages
, int wqe_size
, int act_nr_sges
,
385 struct ehea_adapter
*adapter
, int h_call_q_selector
)
391 ret
= hw_queue_ctor(hw_queue
, nr_pages
, EHEA_PAGESIZE
, wqe_size
);
395 for (cnt
= 0; cnt
< nr_pages
; cnt
++) {
396 vpage
= hw_qpageit_get_inc(hw_queue
);
398 ehea_error("hw_qpageit_get_inc failed");
401 rpage
= virt_to_abs(vpage
);
402 hret
= ehea_h_register_rpage(adapter
->handle
,
403 0, h_call_q_selector
,
404 qp
->fw_handle
, rpage
, 1);
405 if (hret
< H_SUCCESS
) {
406 ehea_error("register_rpage_qp failed");
410 hw_qeit_reset(hw_queue
);
414 hw_queue_dtor(hw_queue
);
418 static inline u32
map_wqe_size(u8 wqe_enc_size
)
420 return 128 << wqe_enc_size
;
423 struct ehea_qp
*ehea_create_qp(struct ehea_adapter
*adapter
,
424 u32 pd
, struct ehea_qp_init_attr
*init_attr
)
429 u32 wqe_size_in_bytes_sq
, wqe_size_in_bytes_rq1
;
430 u32 wqe_size_in_bytes_rq2
, wqe_size_in_bytes_rq3
;
433 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
435 ehea_error("no mem for qp");
439 qp
->adapter
= adapter
;
441 hret
= ehea_h_alloc_resource_qp(adapter
->handle
, init_attr
, pd
,
442 &qp
->fw_handle
, &qp
->epas
);
443 if (hret
!= H_SUCCESS
) {
444 ehea_error("ehea_h_alloc_resource_qp failed");
448 wqe_size_in_bytes_sq
= map_wqe_size(init_attr
->act_wqe_size_enc_sq
);
449 wqe_size_in_bytes_rq1
= map_wqe_size(init_attr
->act_wqe_size_enc_rq1
);
450 wqe_size_in_bytes_rq2
= map_wqe_size(init_attr
->act_wqe_size_enc_rq2
);
451 wqe_size_in_bytes_rq3
= map_wqe_size(init_attr
->act_wqe_size_enc_rq3
);
453 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_squeue
, init_attr
->nr_sq_pages
,
454 wqe_size_in_bytes_sq
,
455 init_attr
->act_wqe_size_enc_sq
, adapter
,
458 ehea_error("can't register for sq ret=%x", ret
);
462 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue1
,
463 init_attr
->nr_rq1_pages
,
464 wqe_size_in_bytes_rq1
,
465 init_attr
->act_wqe_size_enc_rq1
,
468 ehea_error("can't register for rq1 ret=%x", ret
);
472 if (init_attr
->rq_count
> 1) {
473 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue2
,
474 init_attr
->nr_rq2_pages
,
475 wqe_size_in_bytes_rq2
,
476 init_attr
->act_wqe_size_enc_rq2
,
479 ehea_error("can't register for rq2 ret=%x", ret
);
484 if (init_attr
->rq_count
> 2) {
485 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue3
,
486 init_attr
->nr_rq3_pages
,
487 wqe_size_in_bytes_rq3
,
488 init_attr
->act_wqe_size_enc_rq3
,
491 ehea_error("can't register for rq3 ret=%x", ret
);
496 qp
->init_attr
= *init_attr
;
501 hw_queue_dtor(&qp
->hw_rqueue2
);
504 hw_queue_dtor(&qp
->hw_rqueue1
);
507 hw_queue_dtor(&qp
->hw_squeue
);
510 ehea_h_disable_and_get_hea(adapter
->handle
, qp
->fw_handle
);
511 ehea_h_free_resource(adapter
->handle
, qp
->fw_handle
, FORCE_FREE
);
518 u64
ehea_destroy_qp_res(struct ehea_qp
*qp
, u64 force
)
521 struct ehea_qp_init_attr
*qp_attr
= &qp
->init_attr
;
524 ehea_h_disable_and_get_hea(qp
->adapter
->handle
, qp
->fw_handle
);
525 hret
= ehea_h_free_resource(qp
->adapter
->handle
, qp
->fw_handle
, force
);
526 if (hret
!= H_SUCCESS
)
529 hw_queue_dtor(&qp
->hw_squeue
);
530 hw_queue_dtor(&qp
->hw_rqueue1
);
532 if (qp_attr
->rq_count
> 1)
533 hw_queue_dtor(&qp
->hw_rqueue2
);
534 if (qp_attr
->rq_count
> 2)
535 hw_queue_dtor(&qp
->hw_rqueue3
);
541 int ehea_destroy_qp(struct ehea_qp
*qp
)
547 hcp_epas_dtor(&qp
->epas
);
549 if ((hret
= ehea_destroy_qp_res(qp
, NORMAL_FREE
)) == H_R_STATE
) {
550 ehea_error_data(qp
->adapter
, qp
->fw_handle
);
551 hret
= ehea_destroy_qp_res(qp
, FORCE_FREE
);
554 if (hret
!= H_SUCCESS
) {
555 ehea_error("destroy QP failed");
562 int ehea_create_busmap( void )
564 u64 vaddr
= EHEA_BUSMAP_START
;
565 unsigned long high_section_index
= 0;
569 * Sections are not in ascending order -> Loop over all sections and
570 * find the highest PFN to compute the required map size.
572 ehea_bmap
.valid_sections
= 0;
574 for (i
= 0; i
< NR_MEM_SECTIONS
; i
++)
575 if (valid_section_nr(i
))
576 high_section_index
= i
;
578 ehea_bmap
.entries
= high_section_index
+ 1;
579 ehea_bmap
.vaddr
= vmalloc(ehea_bmap
.entries
* sizeof(*ehea_bmap
.vaddr
));
581 if (!ehea_bmap
.vaddr
)
584 for (i
= 0 ; i
< ehea_bmap
.entries
; i
++) {
585 unsigned long pfn
= section_nr_to_pfn(i
);
587 if (pfn_valid(pfn
)) {
588 ehea_bmap
.vaddr
[i
] = vaddr
;
589 vaddr
+= EHEA_SECTSIZE
;
590 ehea_bmap
.valid_sections
++;
592 ehea_bmap
.vaddr
[i
] = 0;
598 void ehea_destroy_busmap( void )
600 vfree(ehea_bmap
.vaddr
);
603 u64
ehea_map_vaddr(void *caddr
)
606 unsigned long index
= __pa(caddr
) >> SECTION_SIZE_BITS
;
608 if (likely(index
< ehea_bmap
.entries
)) {
609 mapped_addr
= ehea_bmap
.vaddr
[index
];
610 if (likely(mapped_addr
))
611 mapped_addr
|= (((unsigned long)caddr
)
612 & (EHEA_SECTSIZE
- 1));
618 if (unlikely(mapped_addr
== -1))
619 if (!test_and_set_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))
620 schedule_work(&ehea_rereg_mr_task
);
625 int ehea_reg_kernel_mr(struct ehea_adapter
*adapter
, struct ehea_mr
*mr
)
630 u64 hret
, pt_abs
, i
, j
, m
, mr_len
;
631 u32 acc_ctrl
= EHEA_MR_ACC_CTRL
;
633 mr_len
= ehea_bmap
.valid_sections
* EHEA_SECTSIZE
;
635 pt
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
637 ehea_error("no mem");
641 pt_abs
= virt_to_abs(pt
);
643 hret
= ehea_h_alloc_resource_mr(adapter
->handle
,
644 EHEA_BUSMAP_START
, mr_len
,
645 acc_ctrl
, adapter
->pd
,
646 &mr
->handle
, &mr
->lkey
);
647 if (hret
!= H_SUCCESS
) {
648 ehea_error("alloc_resource_mr failed");
653 for (i
= 0 ; i
< ehea_bmap
.entries
; i
++)
654 if (ehea_bmap
.vaddr
[i
]) {
655 void *sectbase
= __va(i
<< SECTION_SIZE_BITS
);
658 for (j
= 0; j
< (EHEA_PAGES_PER_SECTION
/
659 EHEA_MAX_RPAGE
); j
++) {
661 for (m
= 0; m
< EHEA_MAX_RPAGE
; m
++) {
662 pg
= sectbase
+ ((k
++) * EHEA_PAGESIZE
);
663 pt
[m
] = virt_to_abs(pg
);
666 hret
= ehea_h_register_rpage_mr(adapter
->handle
,
670 if ((hret
!= H_SUCCESS
)
671 && (hret
!= H_PAGE_REGISTERED
)) {
672 ehea_h_free_resource(adapter
->handle
,
675 ehea_error("register_rpage_mr failed");
682 if (hret
!= H_SUCCESS
) {
683 ehea_h_free_resource(adapter
->handle
, mr
->handle
, FORCE_FREE
);
684 ehea_error("registering mr failed");
689 mr
->vaddr
= EHEA_BUSMAP_START
;
690 mr
->adapter
= adapter
;
697 int ehea_rem_mr(struct ehea_mr
*mr
)
701 if (!mr
|| !mr
->adapter
)
704 hret
= ehea_h_free_resource(mr
->adapter
->handle
, mr
->handle
,
706 if (hret
!= H_SUCCESS
) {
707 ehea_error("destroy MR failed");
714 int ehea_gen_smr(struct ehea_adapter
*adapter
, struct ehea_mr
*old_mr
,
715 struct ehea_mr
*shared_mr
)
719 hret
= ehea_h_register_smr(adapter
->handle
, old_mr
->handle
,
720 old_mr
->vaddr
, EHEA_MR_ACC_CTRL
,
721 adapter
->pd
, shared_mr
);
722 if (hret
!= H_SUCCESS
)
725 shared_mr
->adapter
= adapter
;
730 void print_error_data(u64
*data
)
733 u64 type
= EHEA_BMASK_GET(ERROR_DATA_TYPE
, data
[2]);
734 u64 resource
= data
[1];
736 length
= EHEA_BMASK_GET(ERROR_DATA_LENGTH
, data
[0]);
738 if (length
> EHEA_PAGESIZE
)
739 length
= EHEA_PAGESIZE
;
741 if (type
== 0x8) /* Queue Pair */
742 ehea_error("QP (resource=%lX) state: AER=0x%lX, AERR=0x%lX, "
743 "port=%lX", resource
, data
[6], data
[12], data
[22]);
745 if (type
== 0x4) /* Completion Queue */
746 ehea_error("CQ (resource=%lX) state: AER=0x%lX", resource
,
749 if (type
== 0x3) /* Event Queue */
750 ehea_error("EQ (resource=%lX) state: AER=0x%lX", resource
,
753 ehea_dump(data
, length
, "error data");
756 void ehea_error_data(struct ehea_adapter
*adapter
, u64 res_handle
)
761 rblock
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
763 ehea_error("Cannot allocate rblock memory.");
767 ret
= ehea_h_error_data(adapter
->handle
,
771 if (ret
== H_R_STATE
)
772 ehea_error("No error data is available: %lX.", res_handle
);
773 else if (ret
== H_SUCCESS
)
774 print_error_data(rblock
);
776 ehea_error("Error data could not be fetched: %lX", res_handle
);