* add p cc
[mascara-docs.git] / i386 / linux / linux-2.3.21 / arch / sparc64 / kernel / pci_iommu.c
bloba7f469ec8ed95d302a54e67fca8899f10d984735
1 /* $Id: pci_iommu.c,v 1.1 1999/08/30 10:00:47 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
7 #include <asm/pbm.h>
8 #include <asm/iommu.h>
9 #include <asm/scatterlist.h>
11 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
12 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
14 /* Accessing IOMMU and Streaming Buffer registers.
15 * REG parameter is a physical address. All registers
16 * are 64-bits in size.
18 #define pci_iommu_read(__reg) \
19 ({ u64 __ret; \
20 __asm__ __volatile__("ldxa [%1] %2, %0" \
21 : "=r" (__ret) \
22 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
23 : "memory"); \
24 __ret; \
26 #define pci_iommu_write(__reg, __val) \
27 __asm__ __volatile__("stxa %0, [%1] %2" \
28 : /* no outputs */ \
29 : "r" (__val), "r" (__reg), \
30 "i" (ASI_PHYS_BYPASS_EC_E))
32 /* Find a range of iommu mappings of size NPAGES in page
33 * table PGT. Return pointer to first iopte.
35 static iopte_t *iommu_find_range(unsigned long npages, iopte_t *pgt, int pgt_size)
37 int i;
39 pgt_size -= npages;
40 for (i = 0; i < pgt_size; i++) {
41 if (!iopte_val(pgt[i]) & IOPTE_VALID) {
42 int scan;
44 for (scan = 1; scan < npages; scan++) {
45 if (iopte_val(pgt[i + scan]) & IOPTE_VALID) {
46 i += scan;
47 goto do_next;
50 return &pgt[i];
52 do_next:
54 return NULL;
57 #define IOPTE_CONSISTANT(CTX, PADDR) \
58 (IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE | \
59 (((CTX) << 47) & IOPTE_CONTEXT) | \
60 ((PADDR) & IOPTE_PAGE))
62 #define IOPTE_STREAMING(CTX, PADDR) \
63 (IOPTE_CONSISTANT(CTX, PADDR) | IOPTE_STBUF)
65 #define IOPTE_INVALID 0UL
67 /* Map kernel buffer at ADDR of size SZ using consistant mode
68 * DMA for PCI device PDEV. Return 32-bit PCI DMA address.
70 u32 pci_map_consistant(struct pci_dev *pdev, void *addr, int sz)
72 struct pcidev_cookie *pcp = pdev->sysdata;
73 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
74 iopte_t *base;
75 unsigned long flags, npages, oaddr;
76 u32 ret;
78 spin_lock_irqsave(&iommu->lock, flags);
79 oaddr = (unsigned long)addr;
80 npages = PAGE_ALIGN(oaddr + sz) - (oaddr & PAGE_MASK);
81 npages >>= PAGE_SHIFT;
82 base = iommu_find_range(npages,
83 iommu->page_table, iommu->page_table_sz);
84 ret = 0;
85 if (base != NULL) {
86 unsigned long i, base_paddr, ctx;
88 ret = (iommu->page_table_map_base +
89 ((base - iommu->page_table) << PAGE_SHIFT));
90 ret |= (oaddr & ~PAGE_MASK);
91 base_paddr = __pa(oaddr & PAGE_MASK);
92 ctx = 0;
93 if (iommu->iommu_has_ctx_flush)
94 ctx = iommu->iommu_cur_ctx++;
95 for (i = 0; i < npages; i++, base++, base_paddr += PAGE_SIZE)
96 iopte_val(*base) = IOPTE_CONSISTANT(ctx, base_paddr);
98 spin_unlock_irqrestore(&iommu->lock, flags);
100 return ret;
103 /* Unmap a consistant DMA translation. */
104 void pci_unmap_consistant(struct pci_dev *pdev, u32 bus_addr, int sz)
106 struct pcidev_cookie *pcp = pdev->sysdata;
107 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
108 iopte_t *base;
109 unsigned long flags, npages, i, ctx;
111 spin_lock_irqsave(&iommu->lock, flags);
112 npages = PAGE_ALIGN(bus_addr + sz) - (bus_addr & PAGE_MASK);
113 npages >>= PAGE_SHIFT;
114 base = iommu->page_table +
115 ((bus_addr - iommu->page_table_map_base) >> PAGE_SHIFT);
117 /* Data for consistant mappings cannot enter the streaming
118 * buffers, so we only need to update the TSB and flush
119 * those entries from the IOMMU's TLB.
122 /* Step 1: Clear out the TSB entries. Save away
123 * the context if necessary.
125 ctx = 0;
126 if (iommu->iommu_has_ctx_flush)
127 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
128 for (i = 0; i < npages; i++, base++)
129 iopte_val(*base) = IOPTE_INVALID;
131 /* Step 2: Flush from IOMMU TLB. */
132 if (iommu->iommu_has_ctx_flush) {
133 pci_iommu_write(iommu->iommu_ctxflush, ctx);
134 } else {
135 bus_addr &= PAGE_MASK;
136 for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
137 pci_iommu_write(iommu->iommu_flush, bus_addr);
140 /* Step 3: Ensure completion of previous PIO writes. */
141 (void) pci_iommu_read(iommu->write_complete_reg);
143 spin_unlock_irqrestore(&iommu->lock, flags);
146 /* Map a single buffer at PTR of SZ bytes for PCI DMA
147 * in streaming mode.
149 u32 pci_map_single(struct pci_dev *pdev, void *ptr, int sz)
151 struct pcidev_cookie *pcp = pdev->sysdata;
152 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
153 iopte_t *base;
154 unsigned long flags, npages, oaddr;
155 u32 ret;
157 spin_lock_irqsave(&iommu->lock, flags);
158 oaddr = (unsigned long)ptr;
159 npages = PAGE_ALIGN(oaddr + sz) - (oaddr & PAGE_MASK);
160 npages >>= PAGE_SHIFT;
161 base = iommu_find_range(npages,
162 iommu->page_table, iommu->page_table_sz);
163 ret = 0;
164 if (base != NULL) {
165 unsigned long i, base_paddr, ctx;
167 ret = (iommu->page_table_map_base +
168 ((base - iommu->page_table) << PAGE_SHIFT));
169 ret |= (oaddr & ~PAGE_MASK);
170 base_paddr = __pa(oaddr & PAGE_MASK);
171 ctx = 0;
172 if (iommu->iommu_has_ctx_flush)
173 ctx = iommu->iommu_cur_ctx++;
174 for (i = 0; i < npages; i++, base++, base_paddr += PAGE_SIZE)
175 iopte_val(*base) = IOPTE_STREAMING(ctx, base_paddr);
177 spin_unlock_irqrestore(&iommu->lock, flags);
179 return ret;
182 /* Unmap a single streaming mode DMA translation. */
183 void pci_unmap_single(struct pci_dev *pdev, u32 bus_addr, int sz)
185 struct pcidev_cookie *pcp = pdev->sysdata;
186 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
187 struct pci_strbuf *strbuf = &pcp->pbm->stc;
188 iopte_t *base;
189 unsigned long flags, npages, i, ctx;
191 spin_lock_irqsave(&iommu->lock, flags);
192 npages = PAGE_ALIGN(bus_addr + sz) - (bus_addr & PAGE_MASK);
193 npages >>= PAGE_SHIFT;
194 base = iommu->page_table +
195 ((bus_addr - iommu->page_table_map_base) >> PAGE_SHIFT);
196 bus_addr &= PAGE_MASK;
198 /* Step 1: Record the context, if any. */
199 ctx = 0;
200 if (iommu->iommu_has_ctx_flush)
201 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
203 /* Step 2: Kick data out of streaming buffers if necessary. */
204 if (strbuf->strbuf_enabled) {
205 u32 vaddr = bus_addr;
207 PCI_STC_FLUSHFLAG_INIT(strbuf);
208 if (strbuf->strbuf_has_ctx_flush &&
209 iommu->iommu_has_ctx_flush) {
210 unsigned long matchreg, flushreg;
212 flushreg = strbuf->strbuf_ctxflush;
213 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
214 do {
215 pci_iommu_write(flushreg, ctx);
216 } while(((long)pci_iommu_read(matchreg)) < 0L);
217 } else {
218 for (i = 0; i < npages; i++, vaddr += PAGE_SIZE)
219 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
222 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
223 (void) pci_iommu_read(iommu->write_complete_reg);
224 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
225 membar("#LoadLoad");
228 /* Step 3: Clear out TSB entries. */
229 for (i = 0; i < npages; i++, base++)
230 iopte_val(*base) = IOPTE_INVALID;
232 /* Step 4: Flush the IOMMU TLB. */
233 if (iommu->iommu_has_ctx_flush) {
234 pci_iommu_write(iommu->iommu_ctxflush, ctx);
235 } else {
236 for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
237 pci_iommu_write(iommu->iommu_flush, bus_addr);
240 /* Step 5: Ensure completion of previous PIO writes. */
241 (void) pci_iommu_read(iommu->write_complete_reg);
243 spin_unlock_irqrestore(&iommu->lock, flags);
246 /* Map a set of buffers described by SGLIST with NELEMS array
247 * elements in streaming mode for PCI DMA.
249 void pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems)
251 struct pcidev_cookie *pcp = pdev->sysdata;
252 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
253 unsigned long flags, ctx, i;
255 spin_lock_irqsave(&iommu->lock, flags);
257 /* Step 1: Choose a context if necessary. */
258 ctx = 0;
259 if (iommu->iommu_has_ctx_flush)
260 ctx = iommu->iommu_cur_ctx++;
262 /* Step 2: Create the mappings. */
263 for (i = 0; i < nelems; i++) {
264 unsigned long oaddr, npages;
265 iopte_t *base;
267 oaddr = (unsigned long)sglist[i].address;
268 npages = PAGE_ALIGN(oaddr + sglist[i].length) - (oaddr & PAGE_MASK);
269 npages >>= PAGE_SHIFT;
270 base = iommu_find_range(npages,
271 iommu->page_table, iommu->page_table_sz);
272 if (base != NULL) {
273 unsigned long j, base_paddr;
274 u32 dvma_addr;
276 dvma_addr = (iommu->page_table_map_base +
277 ((base - iommu->page_table) << PAGE_SHIFT));
278 dvma_addr |= (oaddr & ~PAGE_MASK);
279 sglist[i].dvma_address = dvma_addr;
280 sglist[i].dvma_length = sglist[i].length;
281 base_paddr = __pa(oaddr & PAGE_MASK);
282 for (j = 0; j < npages; j++, base++, base_paddr += PAGE_SIZE)
283 iopte_val(*base) = IOPTE_STREAMING(ctx, base_paddr);
284 } else {
285 sglist[i].dvma_address = 0;
286 sglist[i].dvma_length = 0;
290 spin_unlock_irqrestore(&iommu->lock, flags);
293 /* Unmap a set of streaming mode DMA translations. */
294 void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems)
296 struct pcidev_cookie *pcp = pdev->sysdata;
297 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
298 struct pci_strbuf *strbuf = &pcp->pbm->stc;
299 unsigned long flags, ctx, i;
301 spin_lock_irqsave(&iommu->lock, flags);
303 /* Step 1: Record the context, if any. */
304 ctx = 0;
305 if (iommu->iommu_has_ctx_flush) {
306 iopte_t *iopte;
308 iopte = iommu->page_table +
309 ((sglist[0].dvma_address - iommu->page_table_map_base) >> PAGE_SHIFT);
310 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
313 /* Step 2: Kick data out of streaming buffers if necessary. */
314 if (strbuf->strbuf_enabled) {
315 PCI_STC_FLUSHFLAG_INIT(strbuf);
316 if (strbuf->strbuf_has_ctx_flush &&
317 iommu->iommu_has_ctx_flush) {
318 unsigned long matchreg, flushreg;
320 flushreg = strbuf->strbuf_ctxflush;
321 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
322 do {
323 pci_iommu_write(flushreg, ctx);
324 } while(((long)pci_iommu_read(matchreg)) < 0L);
325 } else {
326 for (i = 0; i < nelems; i++) {
327 unsigned long j, npages;
328 u32 vaddr;
330 j = sglist[i].dvma_length;
331 if (!j)
332 break;
333 vaddr = sglist[i].dvma_address;
334 npages = PAGE_ALIGN(vaddr + j) - (vaddr & PAGE_MASK);
335 npages >>= PAGE_SHIFT;
336 vaddr &= PAGE_MASK;
337 for (j = 0; j < npages; j++, vaddr += PAGE_SIZE)
338 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
341 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
342 (void) pci_iommu_read(iommu->write_complete_reg);
343 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
344 membar("#LoadLoad");
348 /* Step 3: Clear out TSB entries. */
349 for (i = 0; i < nelems; i++) {
350 unsigned long j, npages;
351 iopte_t *base;
352 u32 vaddr;
354 j = sglist[i].dvma_length;
355 if (!j)
356 break;
357 vaddr = sglist[i].dvma_address;
358 npages = PAGE_ALIGN(vaddr + j) - (vaddr & PAGE_MASK);
359 npages >>= PAGE_SHIFT;
360 base = iommu->page_table +
361 ((vaddr - iommu->page_table_map_base) >> PAGE_SHIFT);
362 for (j = 0; j < npages; j++, base++)
363 iopte_val(*base) = IOPTE_INVALID;
366 /* Step 4: Flush the IOMMU TLB. */
367 if (iommu->iommu_has_ctx_flush) {
368 pci_iommu_write(iommu->iommu_ctxflush, ctx);
369 } else {
370 for (i = 0; i < nelems; i++) {
371 unsigned long j, npages;
372 u32 vaddr;
374 j = sglist[i].dvma_length;
375 if (!j)
376 break;
377 vaddr = sglist[i].dvma_address;
378 npages = PAGE_ALIGN(vaddr + j) - (vaddr & PAGE_MASK);
379 npages >>= PAGE_SHIFT;
380 for (j = 0; j < npages; j++, vaddr += PAGE_SIZE)
381 pci_iommu_write(iommu->iommu_flush, vaddr);
385 /* Step 5: Ensure completion of previous PIO writes. */
386 (void) pci_iommu_read(iommu->write_complete_reg);
388 spin_unlock_irqrestore(&iommu->lock, flags);
391 /* Make physical memory consistant for a single
392 * streaming mode DMA translation after a transfer.
394 void pci_dma_sync_single(struct pci_dev *pdev, u32 bus_addr, int sz)
396 struct pcidev_cookie *pcp = pdev->sysdata;
397 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
398 struct pci_strbuf *strbuf = &pcp->pbm->stc;
399 unsigned long flags, ctx, npages;
401 if (!strbuf->strbuf_enabled)
402 return;
404 spin_lock_irqsave(&iommu->lock, flags);
406 npages = PAGE_ALIGN(bus_addr + sz) - (bus_addr & PAGE_MASK);
407 npages >>= PAGE_SHIFT;
408 bus_addr &= PAGE_MASK;
410 /* Step 1: Record the context, if any. */
411 ctx = 0;
412 if (iommu->iommu_has_ctx_flush &&
413 strbuf->strbuf_has_ctx_flush) {
414 iopte_t *iopte;
416 iopte = iommu->page_table +
417 ((bus_addr - iommu->page_table_map_base)>>PAGE_SHIFT);
418 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
421 /* Step 2: Kick data out of streaming buffers. */
422 PCI_STC_FLUSHFLAG_INIT(strbuf);
423 if (iommu->iommu_has_ctx_flush &&
424 strbuf->strbuf_has_ctx_flush) {
425 unsigned long matchreg, flushreg;
427 flushreg = strbuf->strbuf_ctxflush;
428 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
429 do {
430 pci_iommu_write(flushreg, ctx);
431 } while(((long)pci_iommu_read(matchreg)) < 0L);
432 } else {
433 unsigned long i;
435 for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
436 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
439 /* Step 3: Perform flush synchronization sequence. */
440 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
441 (void) pci_iommu_read(iommu->write_complete_reg);
442 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
443 membar("#LoadLoad");
445 spin_unlock_irqrestore(&iommu->lock, flags);
448 /* Make physical memory consistant for a set of streaming
449 * mode DMA translations after a transfer.
451 void pci_dma_sync_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems)
453 struct pcidev_cookie *pcp = pdev->sysdata;
454 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
455 struct pci_strbuf *strbuf = &pcp->pbm->stc;
456 unsigned long flags, ctx;
458 if (!strbuf->strbuf_enabled)
459 return;
461 spin_lock_irqsave(&iommu->lock, flags);
463 /* Step 1: Record the context, if any. */
464 ctx = 0;
465 if (iommu->iommu_has_ctx_flush &&
466 strbuf->strbuf_has_ctx_flush) {
467 iopte_t *iopte;
469 iopte = iommu->page_table +
470 ((sglist[0].dvma_address - iommu->page_table_map_base) >> PAGE_SHIFT);
471 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
474 /* Step 2: Kick data out of streaming buffers. */
475 PCI_STC_FLUSHFLAG_INIT(strbuf);
476 if (iommu->iommu_has_ctx_flush &&
477 strbuf->strbuf_has_ctx_flush) {
478 unsigned long matchreg, flushreg;
480 flushreg = strbuf->strbuf_ctxflush;
481 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
482 do {
483 pci_iommu_write(flushreg, ctx);
484 } while (((long)pci_iommu_read(matchreg)) < 0L);
485 } else {
486 unsigned long i;
488 for(i = 0; i < nelems; i++) {
489 unsigned long bus_addr, npages, j;
491 j = sglist[i].dvma_length;
492 if (!j)
493 break;
494 bus_addr = sglist[i].dvma_address;
495 npages = PAGE_ALIGN(bus_addr + j) - (bus_addr & PAGE_MASK);
496 npages >>= PAGE_SHIFT;
497 bus_addr &= PAGE_MASK;
498 for(j = 0; i < npages; i++, bus_addr += PAGE_SIZE)
499 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
503 /* Step 3: Perform flush synchronization sequence. */
504 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
505 (void) pci_iommu_read(iommu->write_complete_reg);
506 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
507 membar("#LoadLoad");
509 spin_unlock_irqrestore(&iommu->lock, flags);