drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / sparc / kernel / pci_fire.c
blob0b91bde80fdc5e9d70492bd03edf4c116a9ad834
1 // SPDX-License-Identifier: GPL-2.0
2 /* pci_fire.c: Sun4u platform PCI-E controller support.
4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5 */
6 #include <linux/kernel.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/init.h>
10 #include <linux/msi.h>
11 #include <linux/export.h>
12 #include <linux/irq.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/numa.h>
17 #include <asm/prom.h>
18 #include <asm/irq.h>
19 #include <asm/upa.h>
21 #include "pci_impl.h"
23 #define DRIVER_NAME "fire"
24 #define PFX DRIVER_NAME ": "
26 #define FIRE_IOMMU_CONTROL 0x40000UL
27 #define FIRE_IOMMU_TSBBASE 0x40008UL
28 #define FIRE_IOMMU_FLUSH 0x40100UL
29 #define FIRE_IOMMU_FLUSHINV 0x40108UL
31 static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
33 struct iommu *iommu = pbm->iommu;
34 u32 vdma[2], dma_mask;
35 u64 control;
36 int tsbsize, err;
38 /* No virtual-dma property on these guys, use largest size. */
39 vdma[0] = 0xc0000000; /* base */
40 vdma[1] = 0x40000000; /* size */
41 dma_mask = 0xffffffff;
42 tsbsize = 128;
44 /* Register addresses. */
45 iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
46 iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
47 iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
48 iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
50 /* We use the main control/status register of FIRE as the write
51 * completion register.
53 iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
56 * Invalidate TLB Entries.
58 upa_writeq(~(u64)0, iommu->iommu_flushinv);
60 err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
61 pbm->numa_node);
62 if (err)
63 return err;
65 upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
67 control = upa_readq(iommu->iommu_control);
68 control |= (0x00000400 /* TSB cache snoop enable */ |
69 0x00000300 /* Cache mode */ |
70 0x00000002 /* Bypass enable */ |
71 0x00000001 /* Translation enable */);
72 upa_writeq(control, iommu->iommu_control);
74 return 0;
77 #ifdef CONFIG_PCI_MSI
78 struct pci_msiq_entry {
79 u64 word0;
80 #define MSIQ_WORD0_RESV 0x8000000000000000UL
81 #define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
82 #define MSIQ_WORD0_FMT_TYPE_SHIFT 56
83 #define MSIQ_WORD0_LEN 0x00ffc00000000000UL
84 #define MSIQ_WORD0_LEN_SHIFT 46
85 #define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
86 #define MSIQ_WORD0_ADDR0_SHIFT 32
87 #define MSIQ_WORD0_RID 0x00000000ffff0000UL
88 #define MSIQ_WORD0_RID_SHIFT 16
89 #define MSIQ_WORD0_DATA0 0x000000000000ffffUL
90 #define MSIQ_WORD0_DATA0_SHIFT 0
92 #define MSIQ_TYPE_MSG 0x6
93 #define MSIQ_TYPE_MSI32 0xb
94 #define MSIQ_TYPE_MSI64 0xf
96 u64 word1;
97 #define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
98 #define MSIQ_WORD1_ADDR1_SHIFT 16
99 #define MSIQ_WORD1_DATA1 0x000000000000ffffUL
100 #define MSIQ_WORD1_DATA1_SHIFT 0
102 u64 resv[6];
105 /* All MSI registers are offset from pbm->pbm_regs */
106 #define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
107 #define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
109 #define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
110 #define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
111 #define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
113 #define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
114 #define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
115 #define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
116 #define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
118 #define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
119 #define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
120 #define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
121 #define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
122 #define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
124 #define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
125 #define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
126 #define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
128 #define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
129 #define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
131 #define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
132 #define MSI_MAP_VALID 0x8000000000000000UL
133 #define MSI_MAP_EQWR_N 0x4000000000000000UL
134 #define MSI_MAP_EQNUM 0x000000000000003fUL
136 #define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
137 #define MSI_CLEAR_EQWR_N 0x4000000000000000UL
139 #define IMONDO_DATA0 0x02C000UL
140 #define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
142 #define IMONDO_DATA1 0x02C008UL
143 #define IMONDO_DATA1_DATA 0xffffffffffffffffUL
145 #define MSI_32BIT_ADDR 0x034000UL
146 #define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
148 #define MSI_64BIT_ADDR 0x034008UL
149 #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
151 static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
152 unsigned long *head)
154 *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
155 return 0;
158 static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
159 unsigned long *head, unsigned long *msi)
161 unsigned long type_fmt, type, msi_num;
162 struct pci_msiq_entry *base, *ep;
164 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
165 ep = &base[*head];
167 if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
168 return 0;
170 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
171 MSIQ_WORD0_FMT_TYPE_SHIFT);
172 type = (type_fmt >> 3);
173 if (unlikely(type != MSIQ_TYPE_MSI32 &&
174 type != MSIQ_TYPE_MSI64))
175 return -EINVAL;
177 *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
178 MSIQ_WORD0_DATA0_SHIFT);
180 upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
182 /* Clear the entry. */
183 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
185 /* Go to next entry in ring. */
186 (*head)++;
187 if (*head >= pbm->msiq_ent_count)
188 *head = 0;
190 return 1;
193 static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
194 unsigned long head)
196 upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
197 return 0;
200 static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
201 unsigned long msi, int is_msi64)
203 u64 val;
205 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
206 val &= ~(MSI_MAP_EQNUM);
207 val |= msiqid;
208 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
210 upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
212 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
213 val |= MSI_MAP_VALID;
214 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
216 return 0;
219 static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
221 u64 val;
223 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
225 val &= ~MSI_MAP_VALID;
227 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
229 return 0;
232 static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
234 unsigned long pages, order, i;
236 order = get_order(512 * 1024);
237 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
238 if (pages == 0UL) {
239 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
240 order);
241 return -ENOMEM;
243 memset((char *)pages, 0, PAGE_SIZE << order);
244 pbm->msi_queues = (void *) pages;
246 upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
247 __pa(pbm->msi_queues)),
248 pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
250 upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
251 upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
253 upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
254 upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
256 for (i = 0; i < pbm->msiq_num; i++) {
257 upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
258 upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
261 return 0;
264 static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
266 unsigned long pages, order;
268 order = get_order(512 * 1024);
269 pages = (unsigned long) pbm->msi_queues;
271 free_pages(pages, order);
273 pbm->msi_queues = NULL;
276 static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
277 unsigned long msiqid,
278 unsigned long devino)
280 unsigned long cregs = (unsigned long) pbm->pbm_regs;
281 unsigned long imap_reg, iclr_reg, int_ctrlr;
282 unsigned int irq;
283 int fixup;
284 u64 val;
286 imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
287 iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
289 /* XXX iterate amongst the 4 IRQ controllers XXX */
290 int_ctrlr = (1UL << 6);
292 val = upa_readq(imap_reg);
293 val |= (1UL << 63) | int_ctrlr;
294 upa_writeq(val, imap_reg);
296 fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
298 irq = build_irq(fixup, iclr_reg, imap_reg);
299 if (!irq)
300 return -ENOMEM;
302 upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
303 pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
305 return irq;
308 static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
309 .get_head = pci_fire_get_head,
310 .dequeue_msi = pci_fire_dequeue_msi,
311 .set_head = pci_fire_set_head,
312 .msi_setup = pci_fire_msi_setup,
313 .msi_teardown = pci_fire_msi_teardown,
314 .msiq_alloc = pci_fire_msiq_alloc,
315 .msiq_free = pci_fire_msiq_free,
316 .msiq_build_irq = pci_fire_msiq_build_irq,
319 static void pci_fire_msi_init(struct pci_pbm_info *pbm)
321 sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
323 #else /* CONFIG_PCI_MSI */
324 static void pci_fire_msi_init(struct pci_pbm_info *pbm)
327 #endif /* !(CONFIG_PCI_MSI) */
329 /* Based at pbm->controller_regs */
330 #define FIRE_PARITY_CONTROL 0x470010UL
331 #define FIRE_PARITY_ENAB 0x8000000000000000UL
332 #define FIRE_FATAL_RESET_CTL 0x471028UL
333 #define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
334 #define FIRE_FATAL_RESET_MB 0x0000000002000000UL
335 #define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
336 #define FIRE_FATAL_RESET_APE 0x0000000000004000UL
337 #define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
338 #define FIRE_FATAL_RESET_JW 0x0000000000000004UL
339 #define FIRE_FATAL_RESET_JI 0x0000000000000002UL
340 #define FIRE_FATAL_RESET_JR 0x0000000000000001UL
341 #define FIRE_CORE_INTR_ENABLE 0x471800UL
343 /* Based at pbm->pbm_regs */
344 #define FIRE_TLU_CTRL 0x80000UL
345 #define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
346 #define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
347 #define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
348 #define FIRE_TLU_DEV_CTRL 0x90008UL
349 #define FIRE_TLU_LINK_CTRL 0x90020UL
350 #define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
351 #define FIRE_LPU_RESET 0xe2008UL
352 #define FIRE_LPU_LLCFG 0xe2200UL
353 #define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
354 #define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
355 #define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
356 #define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
357 #define FIRE_LPU_TXL_FIFOP 0xe2430UL
358 #define FIRE_LPU_LTSSM_CFG2 0xe2788UL
359 #define FIRE_LPU_LTSSM_CFG3 0xe2790UL
360 #define FIRE_LPU_LTSSM_CFG4 0xe2798UL
361 #define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
362 #define FIRE_DMC_IENAB 0x31800UL
363 #define FIRE_DMC_DBG_SEL_A 0x53000UL
364 #define FIRE_DMC_DBG_SEL_B 0x53008UL
365 #define FIRE_PEC_IENAB 0x51800UL
367 static void pci_fire_hw_init(struct pci_pbm_info *pbm)
369 u64 val;
371 upa_writeq(FIRE_PARITY_ENAB,
372 pbm->controller_regs + FIRE_PARITY_CONTROL);
374 upa_writeq((FIRE_FATAL_RESET_SPARE |
375 FIRE_FATAL_RESET_MB |
376 FIRE_FATAL_RESET_CPE |
377 FIRE_FATAL_RESET_APE |
378 FIRE_FATAL_RESET_PIO |
379 FIRE_FATAL_RESET_JW |
380 FIRE_FATAL_RESET_JI |
381 FIRE_FATAL_RESET_JR),
382 pbm->controller_regs + FIRE_FATAL_RESET_CTL);
384 upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
386 val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
387 val |= (FIRE_TLU_CTRL_TIM |
388 FIRE_TLU_CTRL_QDET |
389 FIRE_TLU_CTRL_CFG);
390 upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
391 upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
392 upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
393 pbm->pbm_regs + FIRE_TLU_LINK_CTRL);
395 upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
396 upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
397 upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
398 pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
399 upa_writeq(((0xffff << 16) | (0x0000 << 0)),
400 pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
401 upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
402 upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
403 upa_writeq((2 << 16) | (140 << 8),
404 pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
405 upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);
407 upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
408 upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
409 upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);
411 upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
414 static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
415 struct platform_device *op, u32 portid)
417 const struct linux_prom64_registers *regs;
418 struct device_node *dp = op->dev.of_node;
419 int err;
421 pbm->numa_node = NUMA_NO_NODE;
423 pbm->pci_ops = &sun4u_pci_ops;
424 pbm->config_space_reg_bits = 12;
426 pbm->index = pci_num_pbms++;
428 pbm->portid = portid;
429 pbm->op = op;
430 pbm->name = dp->full_name;
432 regs = of_get_property(dp, "reg", NULL);
433 pbm->pbm_regs = regs[0].phys_addr;
434 pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
436 printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
438 pci_determine_mem_io_space(pbm);
440 pci_get_pbm_props(pbm);
442 pci_fire_hw_init(pbm);
444 err = pci_fire_pbm_iommu_init(pbm);
445 if (err)
446 return err;
448 pci_fire_msi_init(pbm);
450 pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
452 /* XXX register error interrupt handlers XXX */
454 pbm->next = pci_pbm_root;
455 pci_pbm_root = pbm;
457 return 0;
460 static int fire_probe(struct platform_device *op)
462 struct device_node *dp = op->dev.of_node;
463 struct pci_pbm_info *pbm;
464 struct iommu *iommu;
465 u32 portid;
466 int err;
468 portid = of_getintprop_default(dp, "portid", 0xff);
470 err = -ENOMEM;
471 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
472 if (!pbm) {
473 printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
474 goto out_err;
477 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
478 if (!iommu) {
479 printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
480 goto out_free_controller;
483 pbm->iommu = iommu;
485 err = pci_fire_pbm_init(pbm, op, portid);
486 if (err)
487 goto out_free_iommu;
489 dev_set_drvdata(&op->dev, pbm);
491 return 0;
493 out_free_iommu:
494 kfree(pbm->iommu);
496 out_free_controller:
497 kfree(pbm);
499 out_err:
500 return err;
503 static const struct of_device_id fire_match[] = {
505 .name = "pci",
506 .compatible = "pciex108e,80f0",
511 static struct platform_driver fire_driver = {
512 .driver = {
513 .name = DRIVER_NAME,
514 .of_match_table = fire_match,
516 .probe = fire_probe,
519 static int __init fire_init(void)
521 return platform_driver_register(&fire_driver);
524 subsys_initcall(fire_init);