1 // SPDX-License-Identifier: GPL-2.0-only
3 * IBM Accelerator Family 'GenWQE'
5 * (C) Copyright IBM Corp. 2013
7 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
8 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
9 * Author: Michael Jung <mijung@gmx.net>
10 * Author: Michael Ruettger <michael@ibmra.de>
14 * Miscelanous functionality used in the other GenWQE driver parts.
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/vmalloc.h>
20 #include <linux/page-flags.h>
21 #include <linux/scatterlist.h>
22 #include <linux/hugetlb.h>
23 #include <linux/iommu.h>
24 #include <linux/pci.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/ctype.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/delay.h>
30 #include <asm/pgtable.h>
32 #include "genwqe_driver.h"
33 #include "card_base.h"
34 #include "card_ddcb.h"
37 * __genwqe_writeq() - Write 64-bit register
38 * @cd: genwqe device descriptor
39 * @byte_offs: byte offset within BAR
42 * Return: 0 if success; < 0 if error
44 int __genwqe_writeq(struct genwqe_dev
*cd
, u64 byte_offs
, u64 val
)
46 struct pci_dev
*pci_dev
= cd
->pci_dev
;
48 if (cd
->err_inject
& GENWQE_INJECT_HARDWARE_FAILURE
)
54 if (pci_channel_offline(pci_dev
))
57 __raw_writeq((__force u64
)cpu_to_be64(val
), cd
->mmio
+ byte_offs
);
62 * __genwqe_readq() - Read 64-bit register
63 * @cd: genwqe device descriptor
64 * @byte_offs: offset within BAR
66 * Return: value from register
68 u64
__genwqe_readq(struct genwqe_dev
*cd
, u64 byte_offs
)
70 if (cd
->err_inject
& GENWQE_INJECT_HARDWARE_FAILURE
)
71 return 0xffffffffffffffffull
;
73 if ((cd
->err_inject
& GENWQE_INJECT_GFIR_FATAL
) &&
74 (byte_offs
== IO_SLC_CFGREG_GFIR
))
75 return 0x000000000000ffffull
;
77 if ((cd
->err_inject
& GENWQE_INJECT_GFIR_INFO
) &&
78 (byte_offs
== IO_SLC_CFGREG_GFIR
))
79 return 0x00000000ffff0000ull
;
82 return 0xffffffffffffffffull
;
84 return be64_to_cpu((__force __be64
)__raw_readq(cd
->mmio
+ byte_offs
));
88 * __genwqe_writel() - Write 32-bit register
89 * @cd: genwqe device descriptor
90 * @byte_offs: byte offset within BAR
93 * Return: 0 if success; < 0 if error
95 int __genwqe_writel(struct genwqe_dev
*cd
, u64 byte_offs
, u32 val
)
97 struct pci_dev
*pci_dev
= cd
->pci_dev
;
99 if (cd
->err_inject
& GENWQE_INJECT_HARDWARE_FAILURE
)
102 if (cd
->mmio
== NULL
)
105 if (pci_channel_offline(pci_dev
))
108 __raw_writel((__force u32
)cpu_to_be32(val
), cd
->mmio
+ byte_offs
);
113 * __genwqe_readl() - Read 32-bit register
114 * @cd: genwqe device descriptor
115 * @byte_offs: offset within BAR
117 * Return: Value from register
119 u32
__genwqe_readl(struct genwqe_dev
*cd
, u64 byte_offs
)
121 if (cd
->err_inject
& GENWQE_INJECT_HARDWARE_FAILURE
)
124 if (cd
->mmio
== NULL
)
127 return be32_to_cpu((__force __be32
)__raw_readl(cd
->mmio
+ byte_offs
));
131 * genwqe_read_app_id() - Extract app_id
133 * app_unitcfg need to be filled with valid data first
135 int genwqe_read_app_id(struct genwqe_dev
*cd
, char *app_name
, int len
)
138 u32 app_id
= (u32
)cd
->app_unitcfg
;
140 memset(app_name
, 0, len
);
141 for (i
= 0, j
= 0; j
< min(len
, 4); j
++) {
142 char ch
= (char)((app_id
>> (24 - j
*8)) & 0xff);
146 app_name
[i
++] = isprint(ch
) ? ch
: 'X';
152 * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
154 * Existing kernel functions seem to use a different polynom,
155 * therefore we could not use them here.
157 * Genwqe's Polynomial = 0x20044009
159 #define CRC32_POLYNOMIAL 0x20044009
160 static u32 crc32_tab
[256]; /* crc32 lookup table */
162 void genwqe_init_crc32(void)
167 for (i
= 0; i
< 256; i
++) {
169 for (j
= 0; j
< 8; j
++) {
170 if (crc
& 0x80000000)
171 crc
= (crc
<< 1) ^ CRC32_POLYNOMIAL
;
180 * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
181 * @buff: pointer to data buffer
182 * @len: length of data for calculation
183 * @init: initial crc (0xffffffff at start)
185 * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
187 * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
188 * result in a crc32 of 0xf33cb7d3.
190 * The existing kernel crc functions did not cover this polynom yet.
192 * Return: crc32 checksum.
194 u32
genwqe_crc32(u8
*buff
, size_t len
, u32 init
)
201 i
= ((crc
>> 24) ^ *buff
++) & 0xFF;
202 crc
= (crc
<< 8) ^ crc32_tab
[i
];
207 void *__genwqe_alloc_consistent(struct genwqe_dev
*cd
, size_t size
,
208 dma_addr_t
*dma_handle
)
210 if (get_order(size
) >= MAX_ORDER
)
213 return dma_alloc_coherent(&cd
->pci_dev
->dev
, size
, dma_handle
,
217 void __genwqe_free_consistent(struct genwqe_dev
*cd
, size_t size
,
218 void *vaddr
, dma_addr_t dma_handle
)
223 dma_free_coherent(&cd
->pci_dev
->dev
, size
, vaddr
, dma_handle
);
226 static void genwqe_unmap_pages(struct genwqe_dev
*cd
, dma_addr_t
*dma_list
,
230 struct pci_dev
*pci_dev
= cd
->pci_dev
;
232 for (i
= 0; (i
< num_pages
) && (dma_list
[i
] != 0x0); i
++) {
233 pci_unmap_page(pci_dev
, dma_list
[i
],
234 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
239 static int genwqe_map_pages(struct genwqe_dev
*cd
,
240 struct page
**page_list
, int num_pages
,
241 dma_addr_t
*dma_list
)
244 struct pci_dev
*pci_dev
= cd
->pci_dev
;
246 /* establish DMA mapping for requested pages */
247 for (i
= 0; i
< num_pages
; i
++) {
251 daddr
= pci_map_page(pci_dev
, page_list
[i
],
254 PCI_DMA_BIDIRECTIONAL
); /* FIXME rd/rw */
256 if (pci_dma_mapping_error(pci_dev
, daddr
)) {
257 dev_err(&pci_dev
->dev
,
258 "[%s] err: no dma addr daddr=%016llx!\n",
259 __func__
, (long long)daddr
);
268 genwqe_unmap_pages(cd
, dma_list
, num_pages
);
272 static int genwqe_sgl_size(int num_pages
)
274 int len
, num_tlb
= num_pages
/ 7;
276 len
= sizeof(struct sg_entry
) * (num_pages
+num_tlb
+ 1);
277 return roundup(len
, PAGE_SIZE
);
281 * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
283 * Allocates memory for sgl and overlapping pages. Pages which might
284 * overlap other user-space memory blocks are being cached for DMAs,
285 * such that we do not run into syncronization issues. Data is copied
286 * from user-space into the cached pages.
288 int genwqe_alloc_sync_sgl(struct genwqe_dev
*cd
, struct genwqe_sgl
*sgl
,
289 void __user
*user_addr
, size_t user_size
, int write
)
292 struct pci_dev
*pci_dev
= cd
->pci_dev
;
294 sgl
->fpage_offs
= offset_in_page((unsigned long)user_addr
);
295 sgl
->fpage_size
= min_t(size_t, PAGE_SIZE
-sgl
->fpage_offs
, user_size
);
296 sgl
->nr_pages
= DIV_ROUND_UP(sgl
->fpage_offs
+ user_size
, PAGE_SIZE
);
297 sgl
->lpage_size
= (user_size
- sgl
->fpage_size
) % PAGE_SIZE
;
299 dev_dbg(&pci_dev
->dev
, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
300 __func__
, user_addr
, user_size
, sgl
->nr_pages
,
301 sgl
->fpage_offs
, sgl
->fpage_size
, sgl
->lpage_size
);
303 sgl
->user_addr
= user_addr
;
304 sgl
->user_size
= user_size
;
306 sgl
->sgl_size
= genwqe_sgl_size(sgl
->nr_pages
);
308 if (get_order(sgl
->sgl_size
) > MAX_ORDER
) {
309 dev_err(&pci_dev
->dev
,
310 "[%s] err: too much memory requested!\n", __func__
);
314 sgl
->sgl
= __genwqe_alloc_consistent(cd
, sgl
->sgl_size
,
316 if (sgl
->sgl
== NULL
) {
317 dev_err(&pci_dev
->dev
,
318 "[%s] err: no memory available!\n", __func__
);
322 /* Only use buffering on incomplete pages */
323 if ((sgl
->fpage_size
!= 0) && (sgl
->fpage_size
!= PAGE_SIZE
)) {
324 sgl
->fpage
= __genwqe_alloc_consistent(cd
, PAGE_SIZE
,
325 &sgl
->fpage_dma_addr
);
326 if (sgl
->fpage
== NULL
)
329 /* Sync with user memory */
330 if (copy_from_user(sgl
->fpage
+ sgl
->fpage_offs
,
331 user_addr
, sgl
->fpage_size
)) {
336 if (sgl
->lpage_size
!= 0) {
337 sgl
->lpage
= __genwqe_alloc_consistent(cd
, PAGE_SIZE
,
338 &sgl
->lpage_dma_addr
);
339 if (sgl
->lpage
== NULL
)
342 /* Sync with user memory */
343 if (copy_from_user(sgl
->lpage
, user_addr
+ user_size
-
344 sgl
->lpage_size
, sgl
->lpage_size
)) {
352 __genwqe_free_consistent(cd
, PAGE_SIZE
, sgl
->lpage
,
353 sgl
->lpage_dma_addr
);
355 sgl
->lpage_dma_addr
= 0;
357 __genwqe_free_consistent(cd
, PAGE_SIZE
, sgl
->fpage
,
358 sgl
->fpage_dma_addr
);
360 sgl
->fpage_dma_addr
= 0;
362 __genwqe_free_consistent(cd
, sgl
->sgl_size
, sgl
->sgl
,
365 sgl
->sgl_dma_addr
= 0;
371 int genwqe_setup_sgl(struct genwqe_dev
*cd
, struct genwqe_sgl
*sgl
,
372 dma_addr_t
*dma_list
)
375 unsigned long dma_offs
, map_offs
;
376 dma_addr_t prev_daddr
= 0;
377 struct sg_entry
*s
, *last_s
= NULL
;
378 size_t size
= sgl
->user_size
;
380 dma_offs
= 128; /* next block if needed/dma_offset */
381 map_offs
= sgl
->fpage_offs
; /* offset in first page */
383 s
= &sgl
->sgl
[0]; /* first set of 8 entries */
385 while (p
< sgl
->nr_pages
) {
387 unsigned int size_to_map
;
389 /* always write the chaining entry, cleanup is done later */
391 s
[j
].target_addr
= cpu_to_be64(sgl
->sgl_dma_addr
+ dma_offs
);
392 s
[j
].len
= cpu_to_be32(128);
393 s
[j
].flags
= cpu_to_be32(SG_CHAINED
);
397 /* DMA mapping for requested page, offs, size */
398 size_to_map
= min(size
, PAGE_SIZE
- map_offs
);
400 if ((p
== 0) && (sgl
->fpage
!= NULL
)) {
401 daddr
= sgl
->fpage_dma_addr
+ map_offs
;
403 } else if ((p
== sgl
->nr_pages
- 1) &&
404 (sgl
->lpage
!= NULL
)) {
405 daddr
= sgl
->lpage_dma_addr
;
407 daddr
= dma_list
[p
] + map_offs
;
413 if (prev_daddr
== daddr
) {
414 u32 prev_len
= be32_to_cpu(last_s
->len
);
416 /* pr_info("daddr combining: "
417 "%016llx/%08x -> %016llx\n",
418 prev_daddr, prev_len, daddr); */
420 last_s
->len
= cpu_to_be32(prev_len
+
423 p
++; /* process next page */
424 if (p
== sgl
->nr_pages
)
425 goto fixup
; /* nothing to do */
427 prev_daddr
= daddr
+ size_to_map
;
431 /* start new entry */
432 s
[j
].target_addr
= cpu_to_be64(daddr
);
433 s
[j
].len
= cpu_to_be32(size_to_map
);
434 s
[j
].flags
= cpu_to_be32(SG_DATA
);
435 prev_daddr
= daddr
+ size_to_map
;
439 p
++; /* process next page */
440 if (p
== sgl
->nr_pages
)
441 goto fixup
; /* nothing to do */
444 s
+= 8; /* continue 8 elements further */
447 if (j
== 1) { /* combining happened on last entry! */
448 s
-= 8; /* full shift needed on previous sgl block */
449 j
= 7; /* shift all elements */
452 for (i
= 0; i
< j
; i
++) /* move elements 1 up */
455 s
[i
].target_addr
= cpu_to_be64(0);
456 s
[i
].len
= cpu_to_be32(0);
457 s
[i
].flags
= cpu_to_be32(SG_END_LIST
);
462 * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
464 * After the DMA transfer has been completed we free the memory for
465 * the sgl and the cached pages. Data is being transferred from cached
466 * pages into user-space buffers.
468 int genwqe_free_sync_sgl(struct genwqe_dev
*cd
, struct genwqe_sgl
*sgl
)
473 struct pci_dev
*pci_dev
= cd
->pci_dev
;
477 res
= copy_to_user(sgl
->user_addr
,
478 sgl
->fpage
+ sgl
->fpage_offs
, sgl
->fpage_size
);
480 dev_err(&pci_dev
->dev
,
481 "[%s] err: copying fpage! (res=%lu)\n",
486 __genwqe_free_consistent(cd
, PAGE_SIZE
, sgl
->fpage
,
487 sgl
->fpage_dma_addr
);
489 sgl
->fpage_dma_addr
= 0;
493 offset
= sgl
->user_size
- sgl
->lpage_size
;
494 res
= copy_to_user(sgl
->user_addr
+ offset
, sgl
->lpage
,
497 dev_err(&pci_dev
->dev
,
498 "[%s] err: copying lpage! (res=%lu)\n",
503 __genwqe_free_consistent(cd
, PAGE_SIZE
, sgl
->lpage
,
504 sgl
->lpage_dma_addr
);
506 sgl
->lpage_dma_addr
= 0;
508 __genwqe_free_consistent(cd
, sgl
->sgl_size
, sgl
->sgl
,
512 sgl
->sgl_dma_addr
= 0x0;
518 * genwqe_free_user_pages() - Give pinned pages back
520 * Documentation of get_user_pages is in mm/gup.c:
522 * If the page is written to, set_page_dirty (or set_page_dirty_lock,
523 * as appropriate) must be called after the page is finished with, and
524 * before put_page is called.
526 static int genwqe_free_user_pages(struct page
**page_list
,
527 unsigned int nr_pages
, int dirty
)
531 for (i
= 0; i
< nr_pages
; i
++) {
532 if (page_list
[i
] != NULL
) {
534 set_page_dirty_lock(page_list
[i
]);
535 put_page(page_list
[i
]);
542 * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
543 * @cd: pointer to genwqe device
545 * @uaddr: user virtual address
546 * @size: size of memory to be mapped
548 * We need to think about how we could speed this up. Of course it is
549 * not a good idea to do this over and over again, like we are
550 * currently doing it. Nevertheless, I am curious where on the path
551 * the performance is spend. Most probably within the memory
552 * allocation functions, but maybe also in the DMA mapping code.
554 * Restrictions: The maximum size of the possible mapping currently depends
555 * on the amount of memory we can get using kzalloc() for the
556 * page_list and pci_alloc_consistent for the sg_list.
557 * The sg_list is currently itself not scattered, which could
558 * be fixed with some effort. The page_list must be split into
559 * PAGE_SIZE chunks too. All that will make the complicated
560 * code more complicated.
562 * Return: 0 if success
564 int genwqe_user_vmap(struct genwqe_dev
*cd
, struct dma_mapping
*m
, void *uaddr
,
568 unsigned long data
, offs
;
569 struct pci_dev
*pci_dev
= cd
->pci_dev
;
571 if ((uaddr
== NULL
) || (size
== 0)) {
572 m
->size
= 0; /* mark unused and not added */
578 /* determine space needed for page_list. */
579 data
= (unsigned long)uaddr
;
580 offs
= offset_in_page(data
);
581 if (size
> ULONG_MAX
- PAGE_SIZE
- offs
) {
582 m
->size
= 0; /* mark unused and not added */
585 m
->nr_pages
= DIV_ROUND_UP(offs
+ size
, PAGE_SIZE
);
587 m
->page_list
= kcalloc(m
->nr_pages
,
588 sizeof(struct page
*) + sizeof(dma_addr_t
),
591 dev_err(&pci_dev
->dev
, "err: alloc page_list failed\n");
594 m
->size
= 0; /* mark unused and not added */
597 m
->dma_list
= (dma_addr_t
*)(m
->page_list
+ m
->nr_pages
);
599 /* pin user pages in memory */
600 rc
= get_user_pages_fast(data
& PAGE_MASK
, /* page aligned addr */
602 m
->write
? FOLL_WRITE
: 0, /* readable/writable */
603 m
->page_list
); /* ptrs to pages */
605 goto fail_get_user_pages
;
607 /* assumption: get_user_pages can be killed by signals. */
608 if (rc
< m
->nr_pages
) {
609 genwqe_free_user_pages(m
->page_list
, rc
, m
->write
);
611 goto fail_get_user_pages
;
614 rc
= genwqe_map_pages(cd
, m
->page_list
, m
->nr_pages
, m
->dma_list
);
616 goto fail_free_user_pages
;
620 fail_free_user_pages
:
621 genwqe_free_user_pages(m
->page_list
, m
->nr_pages
, m
->write
);
629 m
->size
= 0; /* mark unused and not added */
634 * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
636 * @cd: pointer to genwqe device
639 int genwqe_user_vunmap(struct genwqe_dev
*cd
, struct dma_mapping
*m
)
641 struct pci_dev
*pci_dev
= cd
->pci_dev
;
643 if (!dma_mapping_used(m
)) {
644 dev_err(&pci_dev
->dev
, "[%s] err: mapping %p not used!\n",
650 genwqe_unmap_pages(cd
, m
->dma_list
, m
->nr_pages
);
653 genwqe_free_user_pages(m
->page_list
, m
->nr_pages
, m
->write
);
662 m
->size
= 0; /* mark as unused and not added */
667 * genwqe_card_type() - Get chip type SLU Configuration Register
668 * @cd: pointer to the genwqe device descriptor
669 * Return: 0: Altera Stratix-IV 230
670 * 1: Altera Stratix-IV 530
671 * 2: Altera Stratix-V A4
672 * 3: Altera Stratix-V A7
674 u8
genwqe_card_type(struct genwqe_dev
*cd
)
676 u64 card_type
= cd
->slu_unitcfg
;
678 return (u8
)((card_type
& IO_SLU_UNITCFG_TYPE_MASK
) >> 20);
682 * genwqe_card_reset() - Reset the card
683 * @cd: pointer to the genwqe device descriptor
685 int genwqe_card_reset(struct genwqe_dev
*cd
)
688 struct pci_dev
*pci_dev
= cd
->pci_dev
;
690 if (!genwqe_is_privileged(cd
))
694 __genwqe_writeq(cd
, IO_SLC_CFGREG_SOFTRESET
, 0x1ull
);
696 __genwqe_readq(cd
, IO_HSU_FIR_CLR
);
697 __genwqe_readq(cd
, IO_APP_FIR_CLR
);
698 __genwqe_readq(cd
, IO_SLU_FIR_CLR
);
701 * Read-modify-write to preserve the stealth bits
703 * For SL >= 039, Stealth WE bit allows removing
704 * the read-modify-wrote.
705 * r-m-w may require a mask 0x3C to avoid hitting hard
706 * reset again for error reset (should be 0, chicken).
708 softrst
= __genwqe_readq(cd
, IO_SLC_CFGREG_SOFTRESET
) & 0x3cull
;
709 __genwqe_writeq(cd
, IO_SLC_CFGREG_SOFTRESET
, softrst
| 0x2ull
);
711 /* give ERRORRESET some time to finish */
714 if (genwqe_need_err_masking(cd
)) {
715 dev_info(&pci_dev
->dev
,
716 "[%s] masking errors for old bitstreams\n", __func__
);
717 __genwqe_writeq(cd
, IO_SLC_MISC_DEBUG
, 0x0aull
);
722 int genwqe_read_softreset(struct genwqe_dev
*cd
)
726 if (!genwqe_is_privileged(cd
))
729 bitstream
= __genwqe_readq(cd
, IO_SLU_BITSTREAM
) & 0x1;
730 cd
->softreset
= (bitstream
== 0) ? 0x8ull
: 0xcull
;
735 * genwqe_set_interrupt_capability() - Configure MSI capability structure
736 * @cd: pointer to the device
737 * Return: 0 if no error
739 int genwqe_set_interrupt_capability(struct genwqe_dev
*cd
, int count
)
743 rc
= pci_alloc_irq_vectors(cd
->pci_dev
, 1, count
, PCI_IRQ_MSI
);
750 * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
751 * @cd: pointer to the device
753 void genwqe_reset_interrupt_capability(struct genwqe_dev
*cd
)
755 pci_free_irq_vectors(cd
->pci_dev
);
759 * set_reg_idx() - Fill array with data. Ignore illegal offsets.
761 * @r: debug register array
762 * @i: index to desired entry
763 * @m: maximum possible entries
764 * @addr: addr which is read
765 * @index: index in debug array
768 static int set_reg_idx(struct genwqe_dev
*cd
, struct genwqe_reg
*r
,
769 unsigned int *i
, unsigned int m
, u32 addr
, u32 idx
,
772 if (WARN_ON_ONCE(*i
>= m
))
782 static int set_reg(struct genwqe_dev
*cd
, struct genwqe_reg
*r
,
783 unsigned int *i
, unsigned int m
, u32 addr
, u64 val
)
785 return set_reg_idx(cd
, r
, i
, m
, addr
, 0, val
);
788 int genwqe_read_ffdc_regs(struct genwqe_dev
*cd
, struct genwqe_reg
*regs
,
789 unsigned int max_regs
, int all
)
791 unsigned int i
, j
, idx
= 0;
792 u32 ufir_addr
, ufec_addr
, sfir_addr
, sfec_addr
;
793 u64 gfir
, sluid
, appid
, ufir
, ufec
, sfir
, sfec
;
796 gfir
= __genwqe_readq(cd
, IO_SLC_CFGREG_GFIR
);
797 set_reg(cd
, regs
, &idx
, max_regs
, IO_SLC_CFGREG_GFIR
, gfir
);
799 /* UnitCfg for SLU */
800 sluid
= __genwqe_readq(cd
, IO_SLU_UNITCFG
); /* 0x00000000 */
801 set_reg(cd
, regs
, &idx
, max_regs
, IO_SLU_UNITCFG
, sluid
);
803 /* UnitCfg for APP */
804 appid
= __genwqe_readq(cd
, IO_APP_UNITCFG
); /* 0x02000000 */
805 set_reg(cd
, regs
, &idx
, max_regs
, IO_APP_UNITCFG
, appid
);
807 /* Check all chip Units */
808 for (i
= 0; i
< GENWQE_MAX_UNITS
; i
++) {
811 ufir_addr
= (i
<< 24) | 0x008;
812 ufir
= __genwqe_readq(cd
, ufir_addr
);
813 set_reg(cd
, regs
, &idx
, max_regs
, ufir_addr
, ufir
);
816 ufec_addr
= (i
<< 24) | 0x018;
817 ufec
= __genwqe_readq(cd
, ufec_addr
);
818 set_reg(cd
, regs
, &idx
, max_regs
, ufec_addr
, ufec
);
820 for (j
= 0; j
< 64; j
++) {
821 /* wherever there is a primary 1, read the 2ndary */
822 if (!all
&& (!(ufir
& (1ull << j
))))
825 sfir_addr
= (i
<< 24) | (0x100 + 8 * j
);
826 sfir
= __genwqe_readq(cd
, sfir_addr
);
827 set_reg(cd
, regs
, &idx
, max_regs
, sfir_addr
, sfir
);
829 sfec_addr
= (i
<< 24) | (0x300 + 8 * j
);
830 sfec
= __genwqe_readq(cd
, sfec_addr
);
831 set_reg(cd
, regs
, &idx
, max_regs
, sfec_addr
, sfec
);
835 /* fill with invalid data until end */
836 for (i
= idx
; i
< max_regs
; i
++) {
837 regs
[i
].addr
= 0xffffffff;
838 regs
[i
].val
= 0xffffffffffffffffull
;
844 * genwqe_ffdc_buff_size() - Calculates the number of dump registers
846 int genwqe_ffdc_buff_size(struct genwqe_dev
*cd
, int uid
)
848 int entries
= 0, ring
, traps
, traces
, trace_entries
;
849 u32 eevptr_addr
, l_addr
, d_len
, d_type
;
850 u64 eevptr
, val
, addr
;
852 eevptr_addr
= GENWQE_UID_OFFS(uid
) | IO_EXTENDED_ERROR_POINTER
;
853 eevptr
= __genwqe_readq(cd
, eevptr_addr
);
855 if ((eevptr
!= 0x0) && (eevptr
!= -1ull)) {
856 l_addr
= GENWQE_UID_OFFS(uid
) | eevptr
;
859 val
= __genwqe_readq(cd
, l_addr
);
861 if ((val
== 0x0) || (val
== -1ull))
865 d_len
= (val
& 0x0000007fff000000ull
) >> 24;
868 d_type
= (val
& 0x0000008000000000ull
) >> 36;
870 if (d_type
) { /* repeat */
872 } else { /* size in bytes! */
873 entries
+= d_len
>> 3;
880 for (ring
= 0; ring
< 8; ring
++) {
881 addr
= GENWQE_UID_OFFS(uid
) | IO_EXTENDED_DIAG_MAP(ring
);
882 val
= __genwqe_readq(cd
, addr
);
884 if ((val
== 0x0ull
) || (val
== -1ull))
887 traps
= (val
>> 24) & 0xff;
888 traces
= (val
>> 16) & 0xff;
889 trace_entries
= val
& 0xffff;
891 entries
+= traps
+ (traces
* trace_entries
);
897 * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
899 int genwqe_ffdc_buff_read(struct genwqe_dev
*cd
, int uid
,
900 struct genwqe_reg
*regs
, unsigned int max_regs
)
902 int i
, traps
, traces
, trace
, trace_entries
, trace_entry
, ring
;
903 unsigned int idx
= 0;
904 u32 eevptr_addr
, l_addr
, d_addr
, d_len
, d_type
;
905 u64 eevptr
, e
, val
, addr
;
907 eevptr_addr
= GENWQE_UID_OFFS(uid
) | IO_EXTENDED_ERROR_POINTER
;
908 eevptr
= __genwqe_readq(cd
, eevptr_addr
);
910 if ((eevptr
!= 0x0) && (eevptr
!= 0xffffffffffffffffull
)) {
911 l_addr
= GENWQE_UID_OFFS(uid
) | eevptr
;
913 e
= __genwqe_readq(cd
, l_addr
);
914 if ((e
== 0x0) || (e
== 0xffffffffffffffffull
))
917 d_addr
= (e
& 0x0000000000ffffffull
); /* 23:0 */
918 d_len
= (e
& 0x0000007fff000000ull
) >> 24; /* 38:24 */
919 d_type
= (e
& 0x0000008000000000ull
) >> 36; /* 39 */
920 d_addr
|= GENWQE_UID_OFFS(uid
);
923 for (i
= 0; i
< (int)d_len
; i
++) {
924 val
= __genwqe_readq(cd
, d_addr
);
925 set_reg_idx(cd
, regs
, &idx
, max_regs
,
929 d_len
>>= 3; /* Size in bytes! */
930 for (i
= 0; i
< (int)d_len
; i
++, d_addr
+= 8) {
931 val
= __genwqe_readq(cd
, d_addr
);
932 set_reg_idx(cd
, regs
, &idx
, max_regs
,
941 * To save time, there are only 6 traces poplulated on Uid=2,
942 * Ring=1. each with iters=512.
944 for (ring
= 0; ring
< 8; ring
++) { /* 0 is fls, 1 is fds,
945 2...7 are ASI rings */
946 addr
= GENWQE_UID_OFFS(uid
) | IO_EXTENDED_DIAG_MAP(ring
);
947 val
= __genwqe_readq(cd
, addr
);
949 if ((val
== 0x0ull
) || (val
== -1ull))
952 traps
= (val
>> 24) & 0xff; /* Number of Traps */
953 traces
= (val
>> 16) & 0xff; /* Number of Traces */
954 trace_entries
= val
& 0xffff; /* Entries per trace */
956 /* Note: This is a combined loop that dumps both the traps */
957 /* (for the trace == 0 case) as well as the traces 1 to */
959 for (trace
= 0; trace
<= traces
; trace
++) {
961 GENWQE_EXTENDED_DIAG_SELECTOR(ring
, trace
);
963 addr
= (GENWQE_UID_OFFS(uid
) |
964 IO_EXTENDED_DIAG_SELECTOR
);
965 __genwqe_writeq(cd
, addr
, diag_sel
);
967 for (trace_entry
= 0;
968 trace_entry
< (trace
? trace_entries
: traps
);
970 addr
= (GENWQE_UID_OFFS(uid
) |
971 IO_EXTENDED_DIAG_READ_MBX
);
972 val
= __genwqe_readq(cd
, addr
);
973 set_reg_idx(cd
, regs
, &idx
, max_regs
, addr
,
974 (diag_sel
<<16) | trace_entry
, val
);
982 * genwqe_write_vreg() - Write register in virtual window
984 * Note, these registers are only accessible to the PF through the
985 * VF-window. It is not intended for the VF to access.
987 int genwqe_write_vreg(struct genwqe_dev
*cd
, u32 reg
, u64 val
, int func
)
989 __genwqe_writeq(cd
, IO_PF_SLC_VIRTUAL_WINDOW
, func
& 0xf);
990 __genwqe_writeq(cd
, reg
, val
);
995 * genwqe_read_vreg() - Read register in virtual window
997 * Note, these registers are only accessible to the PF through the
998 * VF-window. It is not intended for the VF to access.
1000 u64
genwqe_read_vreg(struct genwqe_dev
*cd
, u32 reg
, int func
)
1002 __genwqe_writeq(cd
, IO_PF_SLC_VIRTUAL_WINDOW
, func
& 0xf);
1003 return __genwqe_readq(cd
, reg
);
1007 * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
1009 * Note: From a design perspective it turned out to be a bad idea to
1010 * use codes here to specifiy the frequency/speed values. An old
1011 * driver cannot understand new codes and is therefore always a
1012 * problem. Better is to measure out the value or put the
1013 * speed/frequency directly into a register which is always a valid
1014 * value for old as well as for new software.
1016 * Return: Card clock in MHz
1018 int genwqe_base_clock_frequency(struct genwqe_dev
*cd
)
1020 u16 speed
; /* MHz MHz MHz MHz */
1021 static const int speed_grade
[] = { 250, 200, 166, 175 };
1023 speed
= (u16
)((cd
->slu_unitcfg
>> 28) & 0x0full
);
1024 if (speed
>= ARRAY_SIZE(speed_grade
))
1025 return 0; /* illegal value */
1027 return speed_grade
[speed
];
1031 * genwqe_stop_traps() - Stop traps
1033 * Before reading out the analysis data, we need to stop the traps.
1035 void genwqe_stop_traps(struct genwqe_dev
*cd
)
1037 __genwqe_writeq(cd
, IO_SLC_MISC_DEBUG_SET
, 0xcull
);
1041 * genwqe_start_traps() - Start traps
1043 * After having read the data, we can/must enable the traps again.
1045 void genwqe_start_traps(struct genwqe_dev
*cd
)
1047 __genwqe_writeq(cd
, IO_SLC_MISC_DEBUG_CLR
, 0xcull
);
1049 if (genwqe_need_err_masking(cd
))
1050 __genwqe_writeq(cd
, IO_SLC_MISC_DEBUG
, 0x0aull
);