Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / drivers / net / ethernet / ibm / ehea / ehea_qmr.c
blobc25b05b94daae698e0b8640e40da8614ea9ad4f7
1 /*
2 * linux/drivers/net/ehea/ehea_qmr.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/mm.h>
32 #include <linux/slab.h>
33 #include "ehea.h"
34 #include "ehea_phyp.h"
35 #include "ehea_qmr.h"
37 static struct ehea_bmap *ehea_bmap;
39 static void *hw_qpageit_get_inc(struct hw_queue *queue)
41 void *retvalue = hw_qeit_get(queue);
43 queue->current_q_offset += queue->pagesize;
44 if (queue->current_q_offset > queue->queue_length) {
45 queue->current_q_offset -= queue->pagesize;
46 retvalue = NULL;
47 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
48 pr_err("not on pageboundary\n");
49 retvalue = NULL;
51 return retvalue;
54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
55 const u32 pagesize, const u32 qe_size)
57 int pages_per_kpage = PAGE_SIZE / pagesize;
58 int i, k;
60 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
61 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
62 (int)PAGE_SIZE, (int)pagesize);
63 return -EINVAL;
66 queue->queue_length = nr_of_pages * pagesize;
67 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
68 if (!queue->queue_pages) {
69 pr_err("no mem for queue_pages\n");
70 return -ENOMEM;
74 * allocate pages for queue:
75 * outer loop allocates whole kernel pages (page aligned) and
76 * inner loop divides a kernel page into smaller hea queue pages
78 i = 0;
79 while (i < nr_of_pages) {
80 u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
81 if (!kpage)
82 goto out_nomem;
83 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
84 (queue->queue_pages)[i] = (struct ehea_page *)kpage;
85 kpage += pagesize;
86 i++;
90 queue->current_q_offset = 0;
91 queue->qe_size = qe_size;
92 queue->pagesize = pagesize;
93 queue->toggle_state = 1;
95 return 0;
96 out_nomem:
97 for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
98 if (!(queue->queue_pages)[i])
99 break;
100 free_page((unsigned long)(queue->queue_pages)[i]);
102 return -ENOMEM;
105 static void hw_queue_dtor(struct hw_queue *queue)
107 int pages_per_kpage = PAGE_SIZE / queue->pagesize;
108 int i, nr_pages;
110 if (!queue || !queue->queue_pages)
111 return;
113 nr_pages = queue->queue_length / queue->pagesize;
115 for (i = 0; i < nr_pages; i += pages_per_kpage)
116 free_page((unsigned long)(queue->queue_pages)[i]);
118 kfree(queue->queue_pages);
121 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
122 int nr_of_cqe, u64 eq_handle, u32 cq_token)
124 struct ehea_cq *cq;
125 struct h_epa epa;
126 u64 *cq_handle_ref, hret, rpage;
127 u32 act_nr_of_entries, act_pages, counter;
128 int ret;
129 void *vpage;
131 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
132 if (!cq) {
133 pr_err("no mem for cq\n");
134 goto out_nomem;
137 cq->attr.max_nr_of_cqes = nr_of_cqe;
138 cq->attr.cq_token = cq_token;
139 cq->attr.eq_handle = eq_handle;
141 cq->adapter = adapter;
143 cq_handle_ref = &cq->fw_handle;
144 act_nr_of_entries = 0;
145 act_pages = 0;
147 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
148 &cq->fw_handle, &cq->epas);
149 if (hret != H_SUCCESS) {
150 pr_err("alloc_resource_cq failed\n");
151 goto out_freemem;
154 ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
155 EHEA_PAGESIZE, sizeof(struct ehea_cqe));
156 if (ret)
157 goto out_freeres;
159 for (counter = 0; counter < cq->attr.nr_pages; counter++) {
160 vpage = hw_qpageit_get_inc(&cq->hw_queue);
161 if (!vpage) {
162 pr_err("hw_qpageit_get_inc failed\n");
163 goto out_kill_hwq;
166 rpage = virt_to_abs(vpage);
167 hret = ehea_h_register_rpage(adapter->handle,
168 0, EHEA_CQ_REGISTER_ORIG,
169 cq->fw_handle, rpage, 1);
170 if (hret < H_SUCCESS) {
171 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
172 cq, hret, counter, cq->attr.nr_pages);
173 goto out_kill_hwq;
176 if (counter == (cq->attr.nr_pages - 1)) {
177 vpage = hw_qpageit_get_inc(&cq->hw_queue);
179 if ((hret != H_SUCCESS) || (vpage)) {
180 pr_err("registration of pages not complete hret=%llx\n",
181 hret);
182 goto out_kill_hwq;
184 } else {
185 if (hret != H_PAGE_REGISTERED) {
186 pr_err("CQ: registration of page failed hret=%llx\n",
187 hret);
188 goto out_kill_hwq;
193 hw_qeit_reset(&cq->hw_queue);
194 epa = cq->epas.kernel;
195 ehea_reset_cq_ep(cq);
196 ehea_reset_cq_n1(cq);
198 return cq;
200 out_kill_hwq:
201 hw_queue_dtor(&cq->hw_queue);
203 out_freeres:
204 ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
206 out_freemem:
207 kfree(cq);
209 out_nomem:
210 return NULL;
213 static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
215 u64 hret;
216 u64 adapter_handle = cq->adapter->handle;
218 /* deregister all previous registered pages */
219 hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
220 if (hret != H_SUCCESS)
221 return hret;
223 hw_queue_dtor(&cq->hw_queue);
224 kfree(cq);
226 return hret;
229 int ehea_destroy_cq(struct ehea_cq *cq)
231 u64 hret, aer, aerr;
232 if (!cq)
233 return 0;
235 hcp_epas_dtor(&cq->epas);
236 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
237 if (hret == H_R_STATE) {
238 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
239 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
242 if (hret != H_SUCCESS) {
243 pr_err("destroy CQ failed\n");
244 return -EIO;
247 return 0;
250 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
251 const enum ehea_eq_type type,
252 const u32 max_nr_of_eqes, const u8 eqe_gen)
254 int ret, i;
255 u64 hret, rpage;
256 void *vpage;
257 struct ehea_eq *eq;
259 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
260 if (!eq) {
261 pr_err("no mem for eq\n");
262 return NULL;
265 eq->adapter = adapter;
266 eq->attr.type = type;
267 eq->attr.max_nr_of_eqes = max_nr_of_eqes;
268 eq->attr.eqe_gen = eqe_gen;
269 spin_lock_init(&eq->spinlock);
271 hret = ehea_h_alloc_resource_eq(adapter->handle,
272 &eq->attr, &eq->fw_handle);
273 if (hret != H_SUCCESS) {
274 pr_err("alloc_resource_eq failed\n");
275 goto out_freemem;
278 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
279 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
280 if (ret) {
281 pr_err("can't allocate eq pages\n");
282 goto out_freeres;
285 for (i = 0; i < eq->attr.nr_pages; i++) {
286 vpage = hw_qpageit_get_inc(&eq->hw_queue);
287 if (!vpage) {
288 pr_err("hw_qpageit_get_inc failed\n");
289 hret = H_RESOURCE;
290 goto out_kill_hwq;
293 rpage = virt_to_abs(vpage);
295 hret = ehea_h_register_rpage(adapter->handle, 0,
296 EHEA_EQ_REGISTER_ORIG,
297 eq->fw_handle, rpage, 1);
299 if (i == (eq->attr.nr_pages - 1)) {
300 /* last page */
301 vpage = hw_qpageit_get_inc(&eq->hw_queue);
302 if ((hret != H_SUCCESS) || (vpage))
303 goto out_kill_hwq;
305 } else {
306 if (hret != H_PAGE_REGISTERED)
307 goto out_kill_hwq;
312 hw_qeit_reset(&eq->hw_queue);
313 return eq;
315 out_kill_hwq:
316 hw_queue_dtor(&eq->hw_queue);
318 out_freeres:
319 ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
321 out_freemem:
322 kfree(eq);
323 return NULL;
326 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
328 struct ehea_eqe *eqe;
329 unsigned long flags;
331 spin_lock_irqsave(&eq->spinlock, flags);
332 eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
333 spin_unlock_irqrestore(&eq->spinlock, flags);
335 return eqe;
338 static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
340 u64 hret;
341 unsigned long flags;
343 spin_lock_irqsave(&eq->spinlock, flags);
345 hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
346 spin_unlock_irqrestore(&eq->spinlock, flags);
348 if (hret != H_SUCCESS)
349 return hret;
351 hw_queue_dtor(&eq->hw_queue);
352 kfree(eq);
354 return hret;
357 int ehea_destroy_eq(struct ehea_eq *eq)
359 u64 hret, aer, aerr;
360 if (!eq)
361 return 0;
363 hcp_epas_dtor(&eq->epas);
365 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
366 if (hret == H_R_STATE) {
367 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
368 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
371 if (hret != H_SUCCESS) {
372 pr_err("destroy EQ failed\n");
373 return -EIO;
376 return 0;
380 * allocates memory for a queue and registers pages in phyp
382 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
383 int nr_pages, int wqe_size, int act_nr_sges,
384 struct ehea_adapter *adapter, int h_call_q_selector)
386 u64 hret, rpage;
387 int ret, cnt;
388 void *vpage;
390 ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
391 if (ret)
392 return ret;
394 for (cnt = 0; cnt < nr_pages; cnt++) {
395 vpage = hw_qpageit_get_inc(hw_queue);
396 if (!vpage) {
397 pr_err("hw_qpageit_get_inc failed\n");
398 goto out_kill_hwq;
400 rpage = virt_to_abs(vpage);
401 hret = ehea_h_register_rpage(adapter->handle,
402 0, h_call_q_selector,
403 qp->fw_handle, rpage, 1);
404 if (hret < H_SUCCESS) {
405 pr_err("register_rpage_qp failed\n");
406 goto out_kill_hwq;
409 hw_qeit_reset(hw_queue);
410 return 0;
412 out_kill_hwq:
413 hw_queue_dtor(hw_queue);
414 return -EIO;
417 static inline u32 map_wqe_size(u8 wqe_enc_size)
419 return 128 << wqe_enc_size;
422 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
423 u32 pd, struct ehea_qp_init_attr *init_attr)
425 int ret;
426 u64 hret;
427 struct ehea_qp *qp;
428 u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
429 u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
432 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
433 if (!qp) {
434 pr_err("no mem for qp\n");
435 return NULL;
438 qp->adapter = adapter;
440 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
441 &qp->fw_handle, &qp->epas);
442 if (hret != H_SUCCESS) {
443 pr_err("ehea_h_alloc_resource_qp failed\n");
444 goto out_freemem;
447 wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
448 wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
449 wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
450 wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
452 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
453 wqe_size_in_bytes_sq,
454 init_attr->act_wqe_size_enc_sq, adapter,
456 if (ret) {
457 pr_err("can't register for sq ret=%x\n", ret);
458 goto out_freeres;
461 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
462 init_attr->nr_rq1_pages,
463 wqe_size_in_bytes_rq1,
464 init_attr->act_wqe_size_enc_rq1,
465 adapter, 1);
466 if (ret) {
467 pr_err("can't register for rq1 ret=%x\n", ret);
468 goto out_kill_hwsq;
471 if (init_attr->rq_count > 1) {
472 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
473 init_attr->nr_rq2_pages,
474 wqe_size_in_bytes_rq2,
475 init_attr->act_wqe_size_enc_rq2,
476 adapter, 2);
477 if (ret) {
478 pr_err("can't register for rq2 ret=%x\n", ret);
479 goto out_kill_hwr1q;
483 if (init_attr->rq_count > 2) {
484 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
485 init_attr->nr_rq3_pages,
486 wqe_size_in_bytes_rq3,
487 init_attr->act_wqe_size_enc_rq3,
488 adapter, 3);
489 if (ret) {
490 pr_err("can't register for rq3 ret=%x\n", ret);
491 goto out_kill_hwr2q;
495 qp->init_attr = *init_attr;
497 return qp;
499 out_kill_hwr2q:
500 hw_queue_dtor(&qp->hw_rqueue2);
502 out_kill_hwr1q:
503 hw_queue_dtor(&qp->hw_rqueue1);
505 out_kill_hwsq:
506 hw_queue_dtor(&qp->hw_squeue);
508 out_freeres:
509 ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
510 ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
512 out_freemem:
513 kfree(qp);
514 return NULL;
517 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
519 u64 hret;
520 struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
523 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
524 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
525 if (hret != H_SUCCESS)
526 return hret;
528 hw_queue_dtor(&qp->hw_squeue);
529 hw_queue_dtor(&qp->hw_rqueue1);
531 if (qp_attr->rq_count > 1)
532 hw_queue_dtor(&qp->hw_rqueue2);
533 if (qp_attr->rq_count > 2)
534 hw_queue_dtor(&qp->hw_rqueue3);
535 kfree(qp);
537 return hret;
540 int ehea_destroy_qp(struct ehea_qp *qp)
542 u64 hret, aer, aerr;
543 if (!qp)
544 return 0;
546 hcp_epas_dtor(&qp->epas);
548 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
549 if (hret == H_R_STATE) {
550 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
551 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
554 if (hret != H_SUCCESS) {
555 pr_err("destroy QP failed\n");
556 return -EIO;
559 return 0;
562 static inline int ehea_calc_index(unsigned long i, unsigned long s)
564 return (i >> s) & EHEA_INDEX_MASK;
567 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
568 int dir)
570 if (!ehea_top_bmap->dir[dir]) {
571 ehea_top_bmap->dir[dir] =
572 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
573 if (!ehea_top_bmap->dir[dir])
574 return -ENOMEM;
576 return 0;
579 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
581 if (!ehea_bmap->top[top]) {
582 ehea_bmap->top[top] =
583 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
584 if (!ehea_bmap->top[top])
585 return -ENOMEM;
587 return ehea_init_top_bmap(ehea_bmap->top[top], dir);
590 static DEFINE_MUTEX(ehea_busmap_mutex);
591 static unsigned long ehea_mr_len;
593 #define EHEA_BUSMAP_ADD_SECT 1
594 #define EHEA_BUSMAP_REM_SECT 0
596 static void ehea_rebuild_busmap(void)
598 u64 vaddr = EHEA_BUSMAP_START;
599 int top, dir, idx;
601 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
602 struct ehea_top_bmap *ehea_top;
603 int valid_dir_entries = 0;
605 if (!ehea_bmap->top[top])
606 continue;
607 ehea_top = ehea_bmap->top[top];
608 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
609 struct ehea_dir_bmap *ehea_dir;
610 int valid_entries = 0;
612 if (!ehea_top->dir[dir])
613 continue;
614 valid_dir_entries++;
615 ehea_dir = ehea_top->dir[dir];
616 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
617 if (!ehea_dir->ent[idx])
618 continue;
619 valid_entries++;
620 ehea_dir->ent[idx] = vaddr;
621 vaddr += EHEA_SECTSIZE;
623 if (!valid_entries) {
624 ehea_top->dir[dir] = NULL;
625 kfree(ehea_dir);
628 if (!valid_dir_entries) {
629 ehea_bmap->top[top] = NULL;
630 kfree(ehea_top);
635 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
637 unsigned long i, start_section, end_section;
639 if (!nr_pages)
640 return 0;
642 if (!ehea_bmap) {
643 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
644 if (!ehea_bmap)
645 return -ENOMEM;
648 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
649 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
650 /* Mark entries as valid or invalid only; address is assigned later */
651 for (i = start_section; i < end_section; i++) {
652 u64 flag;
653 int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
654 int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
655 int idx = i & EHEA_INDEX_MASK;
657 if (add) {
658 int ret = ehea_init_bmap(ehea_bmap, top, dir);
659 if (ret)
660 return ret;
661 flag = 1; /* valid */
662 ehea_mr_len += EHEA_SECTSIZE;
663 } else {
664 if (!ehea_bmap->top[top])
665 continue;
666 if (!ehea_bmap->top[top]->dir[dir])
667 continue;
668 flag = 0; /* invalid */
669 ehea_mr_len -= EHEA_SECTSIZE;
672 ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
674 ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
675 return 0;
678 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
680 int ret;
682 mutex_lock(&ehea_busmap_mutex);
683 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
684 mutex_unlock(&ehea_busmap_mutex);
685 return ret;
688 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
690 int ret;
692 mutex_lock(&ehea_busmap_mutex);
693 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
694 mutex_unlock(&ehea_busmap_mutex);
695 return ret;
698 static int ehea_is_hugepage(unsigned long pfn)
700 int page_order;
702 if (pfn & EHEA_HUGEPAGE_PFN_MASK)
703 return 0;
705 page_order = compound_order(pfn_to_page(pfn));
706 if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
707 return 0;
709 return 1;
712 static int ehea_create_busmap_callback(unsigned long initial_pfn,
713 unsigned long total_nr_pages, void *arg)
715 int ret;
716 unsigned long pfn, start_pfn, end_pfn, nr_pages;
718 if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
719 return ehea_update_busmap(initial_pfn, total_nr_pages,
720 EHEA_BUSMAP_ADD_SECT);
722 /* Given chunk is >= 16GB -> check for hugepages */
723 start_pfn = initial_pfn;
724 end_pfn = initial_pfn + total_nr_pages;
725 pfn = start_pfn;
727 while (pfn < end_pfn) {
728 if (ehea_is_hugepage(pfn)) {
729 /* Add mem found in front of the hugepage */
730 nr_pages = pfn - start_pfn;
731 ret = ehea_update_busmap(start_pfn, nr_pages,
732 EHEA_BUSMAP_ADD_SECT);
733 if (ret)
734 return ret;
736 /* Skip the hugepage */
737 pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
738 start_pfn = pfn;
739 } else
740 pfn += (EHEA_SECTSIZE / PAGE_SIZE);
743 /* Add mem found behind the hugepage(s) */
744 nr_pages = pfn - start_pfn;
745 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
748 int ehea_create_busmap(void)
750 int ret;
752 mutex_lock(&ehea_busmap_mutex);
753 ehea_mr_len = 0;
754 ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
755 ehea_create_busmap_callback);
756 mutex_unlock(&ehea_busmap_mutex);
757 return ret;
760 void ehea_destroy_busmap(void)
762 int top, dir;
763 mutex_lock(&ehea_busmap_mutex);
764 if (!ehea_bmap)
765 goto out_destroy;
767 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
768 if (!ehea_bmap->top[top])
769 continue;
771 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
772 if (!ehea_bmap->top[top]->dir[dir])
773 continue;
775 kfree(ehea_bmap->top[top]->dir[dir]);
778 kfree(ehea_bmap->top[top]);
781 kfree(ehea_bmap);
782 ehea_bmap = NULL;
783 out_destroy:
784 mutex_unlock(&ehea_busmap_mutex);
787 u64 ehea_map_vaddr(void *caddr)
789 int top, dir, idx;
790 unsigned long index, offset;
792 if (!ehea_bmap)
793 return EHEA_INVAL_ADDR;
795 index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
796 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
797 if (!ehea_bmap->top[top])
798 return EHEA_INVAL_ADDR;
800 dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
801 if (!ehea_bmap->top[top]->dir[dir])
802 return EHEA_INVAL_ADDR;
804 idx = index & EHEA_INDEX_MASK;
805 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
806 return EHEA_INVAL_ADDR;
808 offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
809 return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
812 static inline void *ehea_calc_sectbase(int top, int dir, int idx)
814 unsigned long ret = idx;
815 ret |= dir << EHEA_DIR_INDEX_SHIFT;
816 ret |= top << EHEA_TOP_INDEX_SHIFT;
817 return abs_to_virt(ret << SECTION_SIZE_BITS);
820 static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
821 struct ehea_adapter *adapter,
822 struct ehea_mr *mr)
824 void *pg;
825 u64 j, m, hret;
826 unsigned long k = 0;
827 u64 pt_abs = virt_to_abs(pt);
829 void *sectbase = ehea_calc_sectbase(top, dir, idx);
831 for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
833 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
834 pg = sectbase + ((k++) * EHEA_PAGESIZE);
835 pt[m] = virt_to_abs(pg);
837 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
838 0, pt_abs, EHEA_MAX_RPAGE);
840 if ((hret != H_SUCCESS) &&
841 (hret != H_PAGE_REGISTERED)) {
842 ehea_h_free_resource(adapter->handle, mr->handle,
843 FORCE_FREE);
844 pr_err("register_rpage_mr failed\n");
845 return hret;
848 return hret;
851 static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
852 struct ehea_adapter *adapter,
853 struct ehea_mr *mr)
855 u64 hret = H_SUCCESS;
856 int idx;
858 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
859 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
860 continue;
862 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
863 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
864 return hret;
866 return hret;
869 static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
870 struct ehea_adapter *adapter,
871 struct ehea_mr *mr)
873 u64 hret = H_SUCCESS;
874 int dir;
876 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
877 if (!ehea_bmap->top[top]->dir[dir])
878 continue;
880 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
881 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
882 return hret;
884 return hret;
887 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
889 int ret;
890 u64 *pt;
891 u64 hret;
892 u32 acc_ctrl = EHEA_MR_ACC_CTRL;
894 unsigned long top;
896 pt = (void *)get_zeroed_page(GFP_KERNEL);
897 if (!pt) {
898 pr_err("no mem\n");
899 ret = -ENOMEM;
900 goto out;
903 hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
904 ehea_mr_len, acc_ctrl, adapter->pd,
905 &mr->handle, &mr->lkey);
907 if (hret != H_SUCCESS) {
908 pr_err("alloc_resource_mr failed\n");
909 ret = -EIO;
910 goto out;
913 if (!ehea_bmap) {
914 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
915 pr_err("no busmap available\n");
916 ret = -EIO;
917 goto out;
920 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
921 if (!ehea_bmap->top[top])
922 continue;
924 hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
925 if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
926 break;
929 if (hret != H_SUCCESS) {
930 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
931 pr_err("registering mr failed\n");
932 ret = -EIO;
933 goto out;
936 mr->vaddr = EHEA_BUSMAP_START;
937 mr->adapter = adapter;
938 ret = 0;
939 out:
940 free_page((unsigned long)pt);
941 return ret;
944 int ehea_rem_mr(struct ehea_mr *mr)
946 u64 hret;
948 if (!mr || !mr->adapter)
949 return -EINVAL;
951 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
952 FORCE_FREE);
953 if (hret != H_SUCCESS) {
954 pr_err("destroy MR failed\n");
955 return -EIO;
958 return 0;
961 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
962 struct ehea_mr *shared_mr)
964 u64 hret;
966 hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
967 old_mr->vaddr, EHEA_MR_ACC_CTRL,
968 adapter->pd, shared_mr);
969 if (hret != H_SUCCESS)
970 return -EIO;
972 shared_mr->adapter = adapter;
974 return 0;
977 static void print_error_data(u64 *data)
979 int length;
980 u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
981 u64 resource = data[1];
983 length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
985 if (length > EHEA_PAGESIZE)
986 length = EHEA_PAGESIZE;
988 if (type == EHEA_AER_RESTYPE_QP)
989 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
990 resource, data[6], data[12], data[22]);
991 else if (type == EHEA_AER_RESTYPE_CQ)
992 pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
993 resource, data[6]);
994 else if (type == EHEA_AER_RESTYPE_EQ)
995 pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
996 resource, data[6]);
998 ehea_dump(data, length, "error data");
1001 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1002 u64 *aer, u64 *aerr)
1004 unsigned long ret;
1005 u64 *rblock;
1006 u64 type = 0;
1008 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1009 if (!rblock) {
1010 pr_err("Cannot allocate rblock memory\n");
1011 goto out;
1014 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1016 if (ret == H_SUCCESS) {
1017 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1018 *aer = rblock[6];
1019 *aerr = rblock[12];
1020 print_error_data(rblock);
1021 } else if (ret == H_R_STATE) {
1022 pr_err("No error data available: %llX\n", res_handle);
1023 } else
1024 pr_err("Error data could not be fetched: %llX\n", res_handle);
1026 free_page((unsigned long)rblock);
1027 out:
1028 return type;