Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / net / ethernet / ibm / ehea / ehea_qmr.c
bloba0820f72b25c88bf9141354231813864c8159fec
1 /*
2 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/mm.h>
32 #include <linux/slab.h>
33 #include "ehea.h"
34 #include "ehea_phyp.h"
35 #include "ehea_qmr.h"
37 static struct ehea_bmap *ehea_bmap;
39 static void *hw_qpageit_get_inc(struct hw_queue *queue)
41 void *retvalue = hw_qeit_get(queue);
43 queue->current_q_offset += queue->pagesize;
44 if (queue->current_q_offset > queue->queue_length) {
45 queue->current_q_offset -= queue->pagesize;
46 retvalue = NULL;
47 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
48 pr_err("not on pageboundary\n");
49 retvalue = NULL;
51 return retvalue;
54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
55 const u32 pagesize, const u32 qe_size)
57 int pages_per_kpage = PAGE_SIZE / pagesize;
58 int i, k;
60 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
61 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
62 (int)PAGE_SIZE, (int)pagesize);
63 return -EINVAL;
66 queue->queue_length = nr_of_pages * pagesize;
67 queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
68 GFP_KERNEL);
69 if (!queue->queue_pages)
70 return -ENOMEM;
73 * allocate pages for queue:
74 * outer loop allocates whole kernel pages (page aligned) and
75 * inner loop divides a kernel page into smaller hea queue pages
77 i = 0;
78 while (i < nr_of_pages) {
79 u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
80 if (!kpage)
81 goto out_nomem;
82 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
83 (queue->queue_pages)[i] = (struct ehea_page *)kpage;
84 kpage += pagesize;
85 i++;
89 queue->current_q_offset = 0;
90 queue->qe_size = qe_size;
91 queue->pagesize = pagesize;
92 queue->toggle_state = 1;
94 return 0;
95 out_nomem:
96 for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
97 if (!(queue->queue_pages)[i])
98 break;
99 free_page((unsigned long)(queue->queue_pages)[i]);
101 return -ENOMEM;
104 static void hw_queue_dtor(struct hw_queue *queue)
106 int pages_per_kpage;
107 int i, nr_pages;
109 if (!queue || !queue->queue_pages)
110 return;
112 pages_per_kpage = PAGE_SIZE / queue->pagesize;
114 nr_pages = queue->queue_length / queue->pagesize;
116 for (i = 0; i < nr_pages; i += pages_per_kpage)
117 free_page((unsigned long)(queue->queue_pages)[i]);
119 kfree(queue->queue_pages);
122 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
123 int nr_of_cqe, u64 eq_handle, u32 cq_token)
125 struct ehea_cq *cq;
126 struct h_epa epa;
127 u64 *cq_handle_ref, hret, rpage;
128 u32 act_nr_of_entries, act_pages, counter;
129 int ret;
130 void *vpage;
132 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
133 if (!cq)
134 goto out_nomem;
136 cq->attr.max_nr_of_cqes = nr_of_cqe;
137 cq->attr.cq_token = cq_token;
138 cq->attr.eq_handle = eq_handle;
140 cq->adapter = adapter;
142 cq_handle_ref = &cq->fw_handle;
143 act_nr_of_entries = 0;
144 act_pages = 0;
146 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
147 &cq->fw_handle, &cq->epas);
148 if (hret != H_SUCCESS) {
149 pr_err("alloc_resource_cq failed\n");
150 goto out_freemem;
153 ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
154 EHEA_PAGESIZE, sizeof(struct ehea_cqe));
155 if (ret)
156 goto out_freeres;
158 for (counter = 0; counter < cq->attr.nr_pages; counter++) {
159 vpage = hw_qpageit_get_inc(&cq->hw_queue);
160 if (!vpage) {
161 pr_err("hw_qpageit_get_inc failed\n");
162 goto out_kill_hwq;
165 rpage = __pa(vpage);
166 hret = ehea_h_register_rpage(adapter->handle,
167 0, EHEA_CQ_REGISTER_ORIG,
168 cq->fw_handle, rpage, 1);
169 if (hret < H_SUCCESS) {
170 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
171 cq, hret, counter, cq->attr.nr_pages);
172 goto out_kill_hwq;
175 if (counter == (cq->attr.nr_pages - 1)) {
176 vpage = hw_qpageit_get_inc(&cq->hw_queue);
178 if ((hret != H_SUCCESS) || (vpage)) {
179 pr_err("registration of pages not complete hret=%llx\n",
180 hret);
181 goto out_kill_hwq;
183 } else {
184 if (hret != H_PAGE_REGISTERED) {
185 pr_err("CQ: registration of page failed hret=%llx\n",
186 hret);
187 goto out_kill_hwq;
192 hw_qeit_reset(&cq->hw_queue);
193 epa = cq->epas.kernel;
194 ehea_reset_cq_ep(cq);
195 ehea_reset_cq_n1(cq);
197 return cq;
199 out_kill_hwq:
200 hw_queue_dtor(&cq->hw_queue);
202 out_freeres:
203 ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
205 out_freemem:
206 kfree(cq);
208 out_nomem:
209 return NULL;
212 static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
214 u64 hret;
215 u64 adapter_handle = cq->adapter->handle;
217 /* deregister all previous registered pages */
218 hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
219 if (hret != H_SUCCESS)
220 return hret;
222 hw_queue_dtor(&cq->hw_queue);
223 kfree(cq);
225 return hret;
228 int ehea_destroy_cq(struct ehea_cq *cq)
230 u64 hret, aer, aerr;
231 if (!cq)
232 return 0;
234 hcp_epas_dtor(&cq->epas);
235 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
236 if (hret == H_R_STATE) {
237 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
238 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
241 if (hret != H_SUCCESS) {
242 pr_err("destroy CQ failed\n");
243 return -EIO;
246 return 0;
249 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
250 const enum ehea_eq_type type,
251 const u32 max_nr_of_eqes, const u8 eqe_gen)
253 int ret, i;
254 u64 hret, rpage;
255 void *vpage;
256 struct ehea_eq *eq;
258 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
259 if (!eq)
260 return NULL;
262 eq->adapter = adapter;
263 eq->attr.type = type;
264 eq->attr.max_nr_of_eqes = max_nr_of_eqes;
265 eq->attr.eqe_gen = eqe_gen;
266 spin_lock_init(&eq->spinlock);
268 hret = ehea_h_alloc_resource_eq(adapter->handle,
269 &eq->attr, &eq->fw_handle);
270 if (hret != H_SUCCESS) {
271 pr_err("alloc_resource_eq failed\n");
272 goto out_freemem;
275 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
276 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
277 if (ret) {
278 pr_err("can't allocate eq pages\n");
279 goto out_freeres;
282 for (i = 0; i < eq->attr.nr_pages; i++) {
283 vpage = hw_qpageit_get_inc(&eq->hw_queue);
284 if (!vpage) {
285 pr_err("hw_qpageit_get_inc failed\n");
286 hret = H_RESOURCE;
287 goto out_kill_hwq;
290 rpage = __pa(vpage);
292 hret = ehea_h_register_rpage(adapter->handle, 0,
293 EHEA_EQ_REGISTER_ORIG,
294 eq->fw_handle, rpage, 1);
296 if (i == (eq->attr.nr_pages - 1)) {
297 /* last page */
298 vpage = hw_qpageit_get_inc(&eq->hw_queue);
299 if ((hret != H_SUCCESS) || (vpage))
300 goto out_kill_hwq;
302 } else {
303 if (hret != H_PAGE_REGISTERED)
304 goto out_kill_hwq;
309 hw_qeit_reset(&eq->hw_queue);
310 return eq;
312 out_kill_hwq:
313 hw_queue_dtor(&eq->hw_queue);
315 out_freeres:
316 ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
318 out_freemem:
319 kfree(eq);
320 return NULL;
323 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
325 struct ehea_eqe *eqe;
326 unsigned long flags;
328 spin_lock_irqsave(&eq->spinlock, flags);
329 eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
330 spin_unlock_irqrestore(&eq->spinlock, flags);
332 return eqe;
335 static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
337 u64 hret;
338 unsigned long flags;
340 spin_lock_irqsave(&eq->spinlock, flags);
342 hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
343 spin_unlock_irqrestore(&eq->spinlock, flags);
345 if (hret != H_SUCCESS)
346 return hret;
348 hw_queue_dtor(&eq->hw_queue);
349 kfree(eq);
351 return hret;
354 int ehea_destroy_eq(struct ehea_eq *eq)
356 u64 hret, aer, aerr;
357 if (!eq)
358 return 0;
360 hcp_epas_dtor(&eq->epas);
362 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
363 if (hret == H_R_STATE) {
364 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
365 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
368 if (hret != H_SUCCESS) {
369 pr_err("destroy EQ failed\n");
370 return -EIO;
373 return 0;
376 /* allocates memory for a queue and registers pages in phyp */
377 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
378 int nr_pages, int wqe_size, int act_nr_sges,
379 struct ehea_adapter *adapter, int h_call_q_selector)
381 u64 hret, rpage;
382 int ret, cnt;
383 void *vpage;
385 ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
386 if (ret)
387 return ret;
389 for (cnt = 0; cnt < nr_pages; cnt++) {
390 vpage = hw_qpageit_get_inc(hw_queue);
391 if (!vpage) {
392 pr_err("hw_qpageit_get_inc failed\n");
393 goto out_kill_hwq;
395 rpage = __pa(vpage);
396 hret = ehea_h_register_rpage(adapter->handle,
397 0, h_call_q_selector,
398 qp->fw_handle, rpage, 1);
399 if (hret < H_SUCCESS) {
400 pr_err("register_rpage_qp failed\n");
401 goto out_kill_hwq;
404 hw_qeit_reset(hw_queue);
405 return 0;
407 out_kill_hwq:
408 hw_queue_dtor(hw_queue);
409 return -EIO;
412 static inline u32 map_wqe_size(u8 wqe_enc_size)
414 return 128 << wqe_enc_size;
417 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
418 u32 pd, struct ehea_qp_init_attr *init_attr)
420 int ret;
421 u64 hret;
422 struct ehea_qp *qp;
423 u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
424 u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
427 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
428 if (!qp)
429 return NULL;
431 qp->adapter = adapter;
433 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
434 &qp->fw_handle, &qp->epas);
435 if (hret != H_SUCCESS) {
436 pr_err("ehea_h_alloc_resource_qp failed\n");
437 goto out_freemem;
440 wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
441 wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
442 wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
443 wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
445 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
446 wqe_size_in_bytes_sq,
447 init_attr->act_wqe_size_enc_sq, adapter,
449 if (ret) {
450 pr_err("can't register for sq ret=%x\n", ret);
451 goto out_freeres;
454 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
455 init_attr->nr_rq1_pages,
456 wqe_size_in_bytes_rq1,
457 init_attr->act_wqe_size_enc_rq1,
458 adapter, 1);
459 if (ret) {
460 pr_err("can't register for rq1 ret=%x\n", ret);
461 goto out_kill_hwsq;
464 if (init_attr->rq_count > 1) {
465 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
466 init_attr->nr_rq2_pages,
467 wqe_size_in_bytes_rq2,
468 init_attr->act_wqe_size_enc_rq2,
469 adapter, 2);
470 if (ret) {
471 pr_err("can't register for rq2 ret=%x\n", ret);
472 goto out_kill_hwr1q;
476 if (init_attr->rq_count > 2) {
477 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
478 init_attr->nr_rq3_pages,
479 wqe_size_in_bytes_rq3,
480 init_attr->act_wqe_size_enc_rq3,
481 adapter, 3);
482 if (ret) {
483 pr_err("can't register for rq3 ret=%x\n", ret);
484 goto out_kill_hwr2q;
488 qp->init_attr = *init_attr;
490 return qp;
492 out_kill_hwr2q:
493 hw_queue_dtor(&qp->hw_rqueue2);
495 out_kill_hwr1q:
496 hw_queue_dtor(&qp->hw_rqueue1);
498 out_kill_hwsq:
499 hw_queue_dtor(&qp->hw_squeue);
501 out_freeres:
502 ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
503 ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
505 out_freemem:
506 kfree(qp);
507 return NULL;
510 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
512 u64 hret;
513 struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
516 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
517 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
518 if (hret != H_SUCCESS)
519 return hret;
521 hw_queue_dtor(&qp->hw_squeue);
522 hw_queue_dtor(&qp->hw_rqueue1);
524 if (qp_attr->rq_count > 1)
525 hw_queue_dtor(&qp->hw_rqueue2);
526 if (qp_attr->rq_count > 2)
527 hw_queue_dtor(&qp->hw_rqueue3);
528 kfree(qp);
530 return hret;
533 int ehea_destroy_qp(struct ehea_qp *qp)
535 u64 hret, aer, aerr;
536 if (!qp)
537 return 0;
539 hcp_epas_dtor(&qp->epas);
541 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
542 if (hret == H_R_STATE) {
543 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
544 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
547 if (hret != H_SUCCESS) {
548 pr_err("destroy QP failed\n");
549 return -EIO;
552 return 0;
555 static inline int ehea_calc_index(unsigned long i, unsigned long s)
557 return (i >> s) & EHEA_INDEX_MASK;
560 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
561 int dir)
563 if (!ehea_top_bmap->dir[dir]) {
564 ehea_top_bmap->dir[dir] =
565 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
566 if (!ehea_top_bmap->dir[dir])
567 return -ENOMEM;
569 return 0;
572 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
574 if (!ehea_bmap->top[top]) {
575 ehea_bmap->top[top] =
576 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
577 if (!ehea_bmap->top[top])
578 return -ENOMEM;
580 return ehea_init_top_bmap(ehea_bmap->top[top], dir);
583 static DEFINE_MUTEX(ehea_busmap_mutex);
584 static unsigned long ehea_mr_len;
586 #define EHEA_BUSMAP_ADD_SECT 1
587 #define EHEA_BUSMAP_REM_SECT 0
589 static void ehea_rebuild_busmap(void)
591 u64 vaddr = EHEA_BUSMAP_START;
592 int top, dir, idx;
594 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
595 struct ehea_top_bmap *ehea_top;
596 int valid_dir_entries = 0;
598 if (!ehea_bmap->top[top])
599 continue;
600 ehea_top = ehea_bmap->top[top];
601 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
602 struct ehea_dir_bmap *ehea_dir;
603 int valid_entries = 0;
605 if (!ehea_top->dir[dir])
606 continue;
607 valid_dir_entries++;
608 ehea_dir = ehea_top->dir[dir];
609 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
610 if (!ehea_dir->ent[idx])
611 continue;
612 valid_entries++;
613 ehea_dir->ent[idx] = vaddr;
614 vaddr += EHEA_SECTSIZE;
616 if (!valid_entries) {
617 ehea_top->dir[dir] = NULL;
618 kfree(ehea_dir);
621 if (!valid_dir_entries) {
622 ehea_bmap->top[top] = NULL;
623 kfree(ehea_top);
628 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
630 unsigned long i, start_section, end_section;
632 if (!nr_pages)
633 return 0;
635 if (!ehea_bmap) {
636 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
637 if (!ehea_bmap)
638 return -ENOMEM;
641 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
642 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
643 /* Mark entries as valid or invalid only; address is assigned later */
644 for (i = start_section; i < end_section; i++) {
645 u64 flag;
646 int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
647 int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
648 int idx = i & EHEA_INDEX_MASK;
650 if (add) {
651 int ret = ehea_init_bmap(ehea_bmap, top, dir);
652 if (ret)
653 return ret;
654 flag = 1; /* valid */
655 ehea_mr_len += EHEA_SECTSIZE;
656 } else {
657 if (!ehea_bmap->top[top])
658 continue;
659 if (!ehea_bmap->top[top]->dir[dir])
660 continue;
661 flag = 0; /* invalid */
662 ehea_mr_len -= EHEA_SECTSIZE;
665 ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
667 ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
668 return 0;
671 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
673 int ret;
675 mutex_lock(&ehea_busmap_mutex);
676 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
677 mutex_unlock(&ehea_busmap_mutex);
678 return ret;
681 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
683 int ret;
685 mutex_lock(&ehea_busmap_mutex);
686 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
687 mutex_unlock(&ehea_busmap_mutex);
688 return ret;
691 static int ehea_is_hugepage(unsigned long pfn)
693 int page_order;
695 if (pfn & EHEA_HUGEPAGE_PFN_MASK)
696 return 0;
698 page_order = compound_order(pfn_to_page(pfn));
699 if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
700 return 0;
702 return 1;
705 static int ehea_create_busmap_callback(unsigned long initial_pfn,
706 unsigned long total_nr_pages, void *arg)
708 int ret;
709 unsigned long pfn, start_pfn, end_pfn, nr_pages;
711 if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
712 return ehea_update_busmap(initial_pfn, total_nr_pages,
713 EHEA_BUSMAP_ADD_SECT);
715 /* Given chunk is >= 16GB -> check for hugepages */
716 start_pfn = initial_pfn;
717 end_pfn = initial_pfn + total_nr_pages;
718 pfn = start_pfn;
720 while (pfn < end_pfn) {
721 if (ehea_is_hugepage(pfn)) {
722 /* Add mem found in front of the hugepage */
723 nr_pages = pfn - start_pfn;
724 ret = ehea_update_busmap(start_pfn, nr_pages,
725 EHEA_BUSMAP_ADD_SECT);
726 if (ret)
727 return ret;
729 /* Skip the hugepage */
730 pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
731 start_pfn = pfn;
732 } else
733 pfn += (EHEA_SECTSIZE / PAGE_SIZE);
736 /* Add mem found behind the hugepage(s) */
737 nr_pages = pfn - start_pfn;
738 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
741 int ehea_create_busmap(void)
743 int ret;
745 mutex_lock(&ehea_busmap_mutex);
746 ehea_mr_len = 0;
747 ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
748 ehea_create_busmap_callback);
749 mutex_unlock(&ehea_busmap_mutex);
750 return ret;
753 void ehea_destroy_busmap(void)
755 int top, dir;
756 mutex_lock(&ehea_busmap_mutex);
757 if (!ehea_bmap)
758 goto out_destroy;
760 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
761 if (!ehea_bmap->top[top])
762 continue;
764 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
765 if (!ehea_bmap->top[top]->dir[dir])
766 continue;
768 kfree(ehea_bmap->top[top]->dir[dir]);
771 kfree(ehea_bmap->top[top]);
774 kfree(ehea_bmap);
775 ehea_bmap = NULL;
776 out_destroy:
777 mutex_unlock(&ehea_busmap_mutex);
780 u64 ehea_map_vaddr(void *caddr)
782 int top, dir, idx;
783 unsigned long index, offset;
785 if (!ehea_bmap)
786 return EHEA_INVAL_ADDR;
788 index = __pa(caddr) >> SECTION_SIZE_BITS;
789 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
790 if (!ehea_bmap->top[top])
791 return EHEA_INVAL_ADDR;
793 dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
794 if (!ehea_bmap->top[top]->dir[dir])
795 return EHEA_INVAL_ADDR;
797 idx = index & EHEA_INDEX_MASK;
798 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
799 return EHEA_INVAL_ADDR;
801 offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
802 return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
805 static inline void *ehea_calc_sectbase(int top, int dir, int idx)
807 unsigned long ret = idx;
808 ret |= dir << EHEA_DIR_INDEX_SHIFT;
809 ret |= top << EHEA_TOP_INDEX_SHIFT;
810 return __va(ret << SECTION_SIZE_BITS);
813 static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
814 struct ehea_adapter *adapter,
815 struct ehea_mr *mr)
817 void *pg;
818 u64 j, m, hret;
819 unsigned long k = 0;
820 u64 pt_abs = __pa(pt);
822 void *sectbase = ehea_calc_sectbase(top, dir, idx);
824 for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
826 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
827 pg = sectbase + ((k++) * EHEA_PAGESIZE);
828 pt[m] = __pa(pg);
830 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
831 0, pt_abs, EHEA_MAX_RPAGE);
833 if ((hret != H_SUCCESS) &&
834 (hret != H_PAGE_REGISTERED)) {
835 ehea_h_free_resource(adapter->handle, mr->handle,
836 FORCE_FREE);
837 pr_err("register_rpage_mr failed\n");
838 return hret;
841 return hret;
844 static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
845 struct ehea_adapter *adapter,
846 struct ehea_mr *mr)
848 u64 hret = H_SUCCESS;
849 int idx;
851 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
852 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
853 continue;
855 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
856 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
857 return hret;
859 return hret;
862 static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
863 struct ehea_adapter *adapter,
864 struct ehea_mr *mr)
866 u64 hret = H_SUCCESS;
867 int dir;
869 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
870 if (!ehea_bmap->top[top]->dir[dir])
871 continue;
873 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
874 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
875 return hret;
877 return hret;
880 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
882 int ret;
883 u64 *pt;
884 u64 hret;
885 u32 acc_ctrl = EHEA_MR_ACC_CTRL;
887 unsigned long top;
889 pt = (void *)get_zeroed_page(GFP_KERNEL);
890 if (!pt) {
891 pr_err("no mem\n");
892 ret = -ENOMEM;
893 goto out;
896 hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
897 ehea_mr_len, acc_ctrl, adapter->pd,
898 &mr->handle, &mr->lkey);
900 if (hret != H_SUCCESS) {
901 pr_err("alloc_resource_mr failed\n");
902 ret = -EIO;
903 goto out;
906 if (!ehea_bmap) {
907 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
908 pr_err("no busmap available\n");
909 ret = -EIO;
910 goto out;
913 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
914 if (!ehea_bmap->top[top])
915 continue;
917 hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
918 if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
919 break;
922 if (hret != H_SUCCESS) {
923 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
924 pr_err("registering mr failed\n");
925 ret = -EIO;
926 goto out;
929 mr->vaddr = EHEA_BUSMAP_START;
930 mr->adapter = adapter;
931 ret = 0;
932 out:
933 free_page((unsigned long)pt);
934 return ret;
937 int ehea_rem_mr(struct ehea_mr *mr)
939 u64 hret;
941 if (!mr || !mr->adapter)
942 return -EINVAL;
944 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
945 FORCE_FREE);
946 if (hret != H_SUCCESS) {
947 pr_err("destroy MR failed\n");
948 return -EIO;
951 return 0;
954 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
955 struct ehea_mr *shared_mr)
957 u64 hret;
959 hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
960 old_mr->vaddr, EHEA_MR_ACC_CTRL,
961 adapter->pd, shared_mr);
962 if (hret != H_SUCCESS)
963 return -EIO;
965 shared_mr->adapter = adapter;
967 return 0;
970 static void print_error_data(u64 *data)
972 int length;
973 u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
974 u64 resource = data[1];
976 length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
978 if (length > EHEA_PAGESIZE)
979 length = EHEA_PAGESIZE;
981 if (type == EHEA_AER_RESTYPE_QP)
982 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
983 resource, data[6], data[12], data[22]);
984 else if (type == EHEA_AER_RESTYPE_CQ)
985 pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
986 resource, data[6]);
987 else if (type == EHEA_AER_RESTYPE_EQ)
988 pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
989 resource, data[6]);
991 ehea_dump(data, length, "error data");
994 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
995 u64 *aer, u64 *aerr)
997 unsigned long ret;
998 u64 *rblock;
999 u64 type = 0;
1001 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1002 if (!rblock) {
1003 pr_err("Cannot allocate rblock memory\n");
1004 goto out;
1007 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1009 if (ret == H_SUCCESS) {
1010 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1011 *aer = rblock[6];
1012 *aerr = rblock[12];
1013 print_error_data(rblock);
1014 } else if (ret == H_R_STATE) {
1015 pr_err("No error data available: %llX\n", res_handle);
1016 } else
1017 pr_err("Error data could not be fetched: %llX\n", res_handle);
1019 free_page((unsigned long)rblock);
1020 out:
1021 return type;