1 // SPDX-License-Identifier: GPL-2.0-only
3 * PS3 address space management.
5 * Copyright (C) 2006 Sony Computer Entertainment Inc.
6 * Copyright 2006 Sony Corp.
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/export.h>
12 #include <linux/memblock.h>
13 #include <linux/slab.h>
15 #include <asm/cell-regs.h>
16 #include <asm/firmware.h>
18 #include <asm/lv1call.h>
19 #include <asm/setup.h>
24 #define DBG udbg_printf
30 #if defined(CONFIG_PS3_DYNAMIC_DMA)
43 static unsigned long __init
make_page_sizes(unsigned long a
, unsigned long b
)
45 return (a
<< 56) | (b
<< 48);
49 ALLOCATE_MEMORY_TRY_ALT_UNIT
= 0X04,
50 ALLOCATE_MEMORY_ADDR_ZERO
= 0X08,
53 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
56 HTAB_SIZE_MAX
= 20U, /* HV limit of 1MB */
57 HTAB_SIZE_MIN
= 18U, /* CPU limit of 256KB */
60 /*============================================================================*/
61 /* virtual address space routines */
62 /*============================================================================*/
65 * struct mem_region - memory region structure
67 * @size: size in bytes
68 * @offset: difference between base and rm.size
69 * @destroy: flag if region should be destroyed upon shutdown
80 * struct map - address space state variables holder
81 * @total: total memory available as reported by HV
82 * @vas_id - HV virtual address space id
83 * @htab_size: htab size in bytes
85 * The HV virtual address space (vas) allows for hotplug memory regions.
86 * Memory regions can be created and destroyed in the vas at runtime.
87 * @rm: real mode (bootmem) region
88 * @r1: highmem region(s)
91 * virt_addr: a cpu 'translated' effective address
92 * phys_addr: an address in what Linux thinks is the physical address space
93 * lpar_addr: an address in the HV virtual address space
94 * bus_addr: an io controller 'translated' address on a device bus
101 struct mem_region rm
;
102 struct mem_region r1
;
105 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
106 static void __maybe_unused
_debug_dump_map(const struct map
*m
,
107 const char *func
, int line
)
109 DBG("%s:%d: map.total = %llxh\n", func
, line
, m
->total
);
110 DBG("%s:%d: map.rm.size = %llxh\n", func
, line
, m
->rm
.size
);
111 DBG("%s:%d: map.vas_id = %llu\n", func
, line
, m
->vas_id
);
112 DBG("%s:%d: map.htab_size = %llxh\n", func
, line
, m
->htab_size
);
113 DBG("%s:%d: map.r1.base = %llxh\n", func
, line
, m
->r1
.base
);
114 DBG("%s:%d: map.r1.offset = %lxh\n", func
, line
, m
->r1
.offset
);
115 DBG("%s:%d: map.r1.size = %llxh\n", func
, line
, m
->r1
.size
);
118 static struct map map
;
121 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
122 * @phys_addr: linux physical address
125 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr
)
127 BUG_ON(is_kernel_addr(phys_addr
));
128 return (phys_addr
< map
.rm
.size
|| phys_addr
>= map
.total
)
129 ? phys_addr
: phys_addr
+ map
.r1
.offset
;
132 EXPORT_SYMBOL(ps3_mm_phys_to_lpar
);
135 * ps3_mm_vas_create - create the virtual address space
138 void __init
ps3_mm_vas_create(unsigned long* htab_size
)
147 result
= lv1_query_logical_partition_address_region_info(0,
148 &start_address
, &size
, &access_right
, &max_page_size
,
152 DBG("%s:%d: lv1_query_logical_partition_address_region_info "
153 "failed: %s\n", __func__
, __LINE__
,
158 if (max_page_size
< PAGE_SHIFT_16M
) {
159 DBG("%s:%d: bad max_page_size %llxh\n", __func__
, __LINE__
,
164 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE
> HTAB_SIZE_MAX
);
165 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE
< HTAB_SIZE_MIN
);
167 result
= lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE
,
168 2, make_page_sizes(PAGE_SHIFT_16M
, PAGE_SHIFT_64K
),
169 &map
.vas_id
, &map
.htab_size
);
172 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
173 __func__
, __LINE__
, ps3_result(result
));
177 result
= lv1_select_virtual_address_space(map
.vas_id
);
180 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
181 __func__
, __LINE__
, ps3_result(result
));
185 *htab_size
= map
.htab_size
;
187 debug_dump_map(&map
);
192 panic("ps3_mm_vas_create failed");
196 * ps3_mm_vas_destroy -
198 * called during kexec sequence with MMU off.
201 notrace
void ps3_mm_vas_destroy(void)
206 result
= lv1_select_virtual_address_space(0);
207 result
+= lv1_destruct_virtual_address_space(map
.vas_id
);
217 static int __init
ps3_mm_get_repository_highmem(struct mem_region
*r
)
221 /* Assume a single highmem region. */
223 result
= ps3_repository_read_highmem_info(0, &r
->base
, &r
->size
);
228 if (!r
->base
|| !r
->size
) {
233 r
->offset
= r
->base
- map
.rm
.size
;
235 DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
236 __func__
, __LINE__
, r
->base
, r
->size
);
241 DBG("%s:%d: No high region in repository.\n", __func__
, __LINE__
);
243 r
->size
= r
->base
= r
->offset
= 0;
247 static int ps3_mm_set_repository_highmem(const struct mem_region
*r
)
249 /* Assume a single highmem region. */
251 return r
? ps3_repository_write_highmem_info(0, r
->base
, r
->size
) :
252 ps3_repository_write_highmem_info(0, 0, 0);
256 * ps3_mm_region_create - create a memory region in the vas
257 * @r: pointer to a struct mem_region to accept initialized values
258 * @size: requested region size
260 * This implementation creates the region with the vas large page size.
261 * @size is rounded down to a multiple of the vas large page size.
264 static int ps3_mm_region_create(struct mem_region
*r
, unsigned long size
)
269 r
->size
= ALIGN_DOWN(size
, 1 << PAGE_SHIFT_16M
);
271 DBG("%s:%d requested %lxh\n", __func__
, __LINE__
, size
);
272 DBG("%s:%d actual %llxh\n", __func__
, __LINE__
, r
->size
);
273 DBG("%s:%d difference %llxh (%lluMB)\n", __func__
, __LINE__
,
274 size
- r
->size
, (size
- r
->size
) / 1024 / 1024);
277 DBG("%s:%d: size == 0\n", __func__
, __LINE__
);
282 result
= lv1_allocate_memory(r
->size
, PAGE_SHIFT_16M
, 0,
283 ALLOCATE_MEMORY_TRY_ALT_UNIT
, &r
->base
, &muid
);
285 if (result
|| r
->base
< map
.rm
.size
) {
286 DBG("%s:%d: lv1_allocate_memory failed: %s\n",
287 __func__
, __LINE__
, ps3_result(result
));
292 r
->offset
= r
->base
- map
.rm
.size
;
296 r
->size
= r
->base
= r
->offset
= 0;
301 * ps3_mm_region_destroy - destroy a memory region
302 * @r: pointer to struct mem_region
305 static void ps3_mm_region_destroy(struct mem_region
*r
)
314 result
= lv1_release_memory(r
->base
);
320 r
->size
= r
->base
= r
->offset
= 0;
321 map
.total
= map
.rm
.size
;
324 ps3_mm_set_repository_highmem(NULL
);
327 /*============================================================================*/
329 /*============================================================================*/
332 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
333 * @r: pointer to dma region structure
334 * @lpar_addr: HV lpar address
337 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region
*r
,
338 unsigned long lpar_addr
)
340 if (lpar_addr
>= map
.rm
.size
)
341 lpar_addr
-= map
.r1
.offset
;
342 BUG_ON(lpar_addr
< r
->offset
);
343 BUG_ON(lpar_addr
>= r
->offset
+ r
->len
);
344 return r
->bus_addr
+ lpar_addr
- r
->offset
;
347 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
348 static void __maybe_unused
_dma_dump_region(const struct ps3_dma_region
*r
,
349 const char *func
, int line
)
351 DBG("%s:%d: dev %llu:%llu\n", func
, line
, r
->dev
->bus_id
,
353 DBG("%s:%d: page_size %u\n", func
, line
, r
->page_size
);
354 DBG("%s:%d: bus_addr %lxh\n", func
, line
, r
->bus_addr
);
355 DBG("%s:%d: len %lxh\n", func
, line
, r
->len
);
356 DBG("%s:%d: offset %lxh\n", func
, line
, r
->offset
);
360 * dma_chunk - A chunk of dma pages mapped by the io controller.
361 * @region - The dma region that owns this chunk.
362 * @lpar_addr: Starting lpar address of the area to map.
363 * @bus_addr: Starting ioc bus address of the area to map.
364 * @len: Length in bytes of the area to map.
365 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
366 * list of all chunks owned by the region.
368 * This implementation uses a very simple dma page manager
369 * based on the dma_chunk structure. This scheme assumes
370 * that all drivers use very well behaved dma ops.
374 struct ps3_dma_region
*region
;
375 unsigned long lpar_addr
;
376 unsigned long bus_addr
;
378 struct list_head link
;
379 unsigned int usage_count
;
382 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
383 static void _dma_dump_chunk (const struct dma_chunk
* c
, const char* func
,
386 DBG("%s:%d: r.dev %llu:%llu\n", func
, line
,
387 c
->region
->dev
->bus_id
, c
->region
->dev
->dev_id
);
388 DBG("%s:%d: r.bus_addr %lxh\n", func
, line
, c
->region
->bus_addr
);
389 DBG("%s:%d: r.page_size %u\n", func
, line
, c
->region
->page_size
);
390 DBG("%s:%d: r.len %lxh\n", func
, line
, c
->region
->len
);
391 DBG("%s:%d: r.offset %lxh\n", func
, line
, c
->region
->offset
);
392 DBG("%s:%d: c.lpar_addr %lxh\n", func
, line
, c
->lpar_addr
);
393 DBG("%s:%d: c.bus_addr %lxh\n", func
, line
, c
->bus_addr
);
394 DBG("%s:%d: c.len %lxh\n", func
, line
, c
->len
);
397 static struct dma_chunk
* dma_find_chunk(struct ps3_dma_region
*r
,
398 unsigned long bus_addr
, unsigned long len
)
401 unsigned long aligned_bus
= ALIGN_DOWN(bus_addr
, 1 << r
->page_size
);
402 unsigned long aligned_len
= ALIGN(len
+bus_addr
-aligned_bus
,
405 list_for_each_entry(c
, &r
->chunk_list
.head
, link
) {
407 if (aligned_bus
>= c
->bus_addr
&&
408 aligned_bus
+ aligned_len
<= c
->bus_addr
+ c
->len
)
412 if (aligned_bus
+ aligned_len
<= c
->bus_addr
)
416 if (aligned_bus
>= c
->bus_addr
+ c
->len
)
419 /* we don't handle the multi-chunk case for now */
426 static struct dma_chunk
*dma_find_chunk_lpar(struct ps3_dma_region
*r
,
427 unsigned long lpar_addr
, unsigned long len
)
430 unsigned long aligned_lpar
= ALIGN_DOWN(lpar_addr
, 1 << r
->page_size
);
431 unsigned long aligned_len
= ALIGN(len
+ lpar_addr
- aligned_lpar
,
434 list_for_each_entry(c
, &r
->chunk_list
.head
, link
) {
436 if (c
->lpar_addr
<= aligned_lpar
&&
437 aligned_lpar
< c
->lpar_addr
+ c
->len
) {
438 if (aligned_lpar
+ aligned_len
<= c
->lpar_addr
+ c
->len
)
446 if (aligned_lpar
+ aligned_len
<= c
->lpar_addr
) {
450 if (c
->lpar_addr
+ c
->len
<= aligned_lpar
) {
457 static int dma_sb_free_chunk(struct dma_chunk
*c
)
462 result
= lv1_unmap_device_dma_region(c
->region
->dev
->bus_id
,
463 c
->region
->dev
->dev_id
, c
->bus_addr
, c
->len
);
471 static int dma_ioc0_free_chunk(struct dma_chunk
*c
)
475 unsigned long offset
;
476 struct ps3_dma_region
*r
= c
->region
;
478 DBG("%s:start\n", __func__
);
479 for (iopage
= 0; iopage
< (c
->len
>> r
->page_size
); iopage
++) {
480 offset
= (1 << r
->page_size
) * iopage
;
481 /* put INVALID entry */
482 result
= lv1_put_iopte(0,
483 c
->bus_addr
+ offset
,
484 c
->lpar_addr
+ offset
,
487 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__
,
488 c
->bus_addr
+ offset
,
489 c
->lpar_addr
+ offset
,
493 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__
,
494 __LINE__
, ps3_result(result
));
498 DBG("%s:end\n", __func__
);
503 * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
504 * @r: Pointer to a struct ps3_dma_region.
505 * @phys_addr: Starting physical address of the area to map.
506 * @len: Length in bytes of the area to map.
507 * c_out: A pointer to receive an allocated struct dma_chunk for this area.
509 * This is the lowest level dma mapping routine, and is the one that will
510 * make the HV call to add the pages into the io controller address space.
513 static int dma_sb_map_pages(struct ps3_dma_region
*r
, unsigned long phys_addr
,
514 unsigned long len
, struct dma_chunk
**c_out
, u64 iopte_flag
)
519 c
= kzalloc(sizeof(*c
), GFP_ATOMIC
);
526 c
->lpar_addr
= ps3_mm_phys_to_lpar(phys_addr
);
527 c
->bus_addr
= dma_sb_lpar_to_bus(r
, c
->lpar_addr
);
530 BUG_ON(iopte_flag
!= 0xf800000000000000UL
);
531 result
= lv1_map_device_dma_region(c
->region
->dev
->bus_id
,
532 c
->region
->dev
->dev_id
, c
->lpar_addr
,
533 c
->bus_addr
, c
->len
, iopte_flag
);
535 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
536 __func__
, __LINE__
, ps3_result(result
));
540 list_add(&c
->link
, &r
->chunk_list
.head
);
549 DBG(" <- %s:%d\n", __func__
, __LINE__
);
553 static int dma_ioc0_map_pages(struct ps3_dma_region
*r
, unsigned long phys_addr
,
554 unsigned long len
, struct dma_chunk
**c_out
,
558 struct dma_chunk
*c
, *last
;
560 unsigned long offset
;
562 DBG(KERN_ERR
"%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__
,
563 phys_addr
, ps3_mm_phys_to_lpar(phys_addr
), len
);
564 c
= kzalloc(sizeof(*c
), GFP_ATOMIC
);
572 c
->lpar_addr
= ps3_mm_phys_to_lpar(phys_addr
);
573 /* allocate IO address */
574 if (list_empty(&r
->chunk_list
.head
)) {
576 c
->bus_addr
= r
->bus_addr
;
578 /* derive from last bus addr*/
579 last
= list_entry(r
->chunk_list
.head
.next
,
580 struct dma_chunk
, link
);
581 c
->bus_addr
= last
->bus_addr
+ last
->len
;
582 DBG("%s: last bus=%#lx, len=%#lx\n", __func__
,
583 last
->bus_addr
, last
->len
);
586 /* FIXME: check whether length exceeds region size */
588 /* build ioptes for the area */
589 pages
= len
>> r
->page_size
;
590 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__
,
591 r
->page_size
, r
->len
, pages
, iopte_flag
);
592 for (iopage
= 0; iopage
< pages
; iopage
++) {
593 offset
= (1 << r
->page_size
) * iopage
;
594 result
= lv1_put_iopte(0,
595 c
->bus_addr
+ offset
,
596 c
->lpar_addr
+ offset
,
600 pr_warn("%s:%d: lv1_put_iopte failed: %s\n",
601 __func__
, __LINE__
, ps3_result(result
));
604 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__
,
605 iopage
, c
->bus_addr
+ offset
, c
->lpar_addr
+ offset
,
609 /* be sure that last allocated one is inserted at head */
610 list_add(&c
->link
, &r
->chunk_list
.head
);
613 DBG("%s: end\n", __func__
);
617 for (iopage
--; 0 <= iopage
; iopage
--) {
619 c
->bus_addr
+ offset
,
620 c
->lpar_addr
+ offset
,
631 * dma_sb_region_create - Create a device dma region.
632 * @r: Pointer to a struct ps3_dma_region.
634 * This is the lowest level dma region create routine, and is the one that
635 * will make the HV call to create the region.
638 static int dma_sb_region_create(struct ps3_dma_region
*r
)
643 DBG(" -> %s:%d:\n", __func__
, __LINE__
);
647 if (!r
->dev
->bus_id
) {
648 pr_info("%s:%d: %llu:%llu no dma\n", __func__
, __LINE__
,
649 r
->dev
->bus_id
, r
->dev
->dev_id
);
653 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__
,
654 __LINE__
, r
->len
, r
->page_size
, r
->offset
);
657 BUG_ON(!r
->page_size
);
658 BUG_ON(!r
->region_ops
);
660 INIT_LIST_HEAD(&r
->chunk_list
.head
);
661 spin_lock_init(&r
->chunk_list
.lock
);
663 result
= lv1_allocate_device_dma_region(r
->dev
->bus_id
, r
->dev
->dev_id
,
664 roundup_pow_of_two(r
->len
), r
->page_size
, r
->region_type
,
666 r
->bus_addr
= bus_addr
;
669 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
670 __func__
, __LINE__
, ps3_result(result
));
671 r
->len
= r
->bus_addr
= 0;
677 static int dma_ioc0_region_create(struct ps3_dma_region
*r
)
682 INIT_LIST_HEAD(&r
->chunk_list
.head
);
683 spin_lock_init(&r
->chunk_list
.lock
);
685 result
= lv1_allocate_io_segment(0,
689 r
->bus_addr
= bus_addr
;
691 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
692 __func__
, __LINE__
, ps3_result(result
));
693 r
->len
= r
->bus_addr
= 0;
695 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__
,
696 r
->len
, r
->page_size
, r
->bus_addr
);
701 * dma_region_free - Free a device dma region.
702 * @r: Pointer to a struct ps3_dma_region.
704 * This is the lowest level dma region free routine, and is the one that
705 * will make the HV call to free the region.
708 static int dma_sb_region_free(struct ps3_dma_region
*r
)
712 struct dma_chunk
*tmp
;
716 if (!r
->dev
->bus_id
) {
717 pr_info("%s:%d: %llu:%llu no dma\n", __func__
, __LINE__
,
718 r
->dev
->bus_id
, r
->dev
->dev_id
);
722 list_for_each_entry_safe(c
, tmp
, &r
->chunk_list
.head
, link
) {
724 dma_sb_free_chunk(c
);
727 result
= lv1_free_device_dma_region(r
->dev
->bus_id
, r
->dev
->dev_id
,
731 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
732 __func__
, __LINE__
, ps3_result(result
));
739 static int dma_ioc0_region_free(struct ps3_dma_region
*r
)
742 struct dma_chunk
*c
, *n
;
744 DBG("%s: start\n", __func__
);
745 list_for_each_entry_safe(c
, n
, &r
->chunk_list
.head
, link
) {
747 dma_ioc0_free_chunk(c
);
750 result
= lv1_release_io_segment(0, r
->bus_addr
);
753 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
754 __func__
, __LINE__
, ps3_result(result
));
757 DBG("%s: end\n", __func__
);
763 * dma_sb_map_area - Map an area of memory into a device dma region.
764 * @r: Pointer to a struct ps3_dma_region.
765 * @virt_addr: Starting virtual address of the area to map.
766 * @len: Length in bytes of the area to map.
767 * @bus_addr: A pointer to return the starting ioc bus address of the area to
770 * This is the common dma mapping routine.
773 static int dma_sb_map_area(struct ps3_dma_region
*r
, unsigned long virt_addr
,
774 unsigned long len
, dma_addr_t
*bus_addr
,
780 unsigned long phys_addr
= is_kernel_addr(virt_addr
) ? __pa(virt_addr
)
782 unsigned long aligned_phys
= ALIGN_DOWN(phys_addr
, 1 << r
->page_size
);
783 unsigned long aligned_len
= ALIGN(len
+ phys_addr
- aligned_phys
,
785 *bus_addr
= dma_sb_lpar_to_bus(r
, ps3_mm_phys_to_lpar(phys_addr
));
787 if (!USE_DYNAMIC_DMA
) {
788 unsigned long lpar_addr
= ps3_mm_phys_to_lpar(phys_addr
);
789 DBG(" -> %s:%d\n", __func__
, __LINE__
);
790 DBG("%s:%d virt_addr %lxh\n", __func__
, __LINE__
,
792 DBG("%s:%d phys_addr %lxh\n", __func__
, __LINE__
,
794 DBG("%s:%d lpar_addr %lxh\n", __func__
, __LINE__
,
796 DBG("%s:%d len %lxh\n", __func__
, __LINE__
, len
);
797 DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__
, __LINE__
,
801 spin_lock_irqsave(&r
->chunk_list
.lock
, flags
);
802 c
= dma_find_chunk(r
, *bus_addr
, len
);
805 DBG("%s:%d: reusing mapped chunk", __func__
, __LINE__
);
808 spin_unlock_irqrestore(&r
->chunk_list
.lock
, flags
);
812 result
= dma_sb_map_pages(r
, aligned_phys
, aligned_len
, &c
, iopte_flag
);
816 DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
817 __func__
, __LINE__
, result
);
818 spin_unlock_irqrestore(&r
->chunk_list
.lock
, flags
);
824 spin_unlock_irqrestore(&r
->chunk_list
.lock
, flags
);
828 static int dma_ioc0_map_area(struct ps3_dma_region
*r
, unsigned long virt_addr
,
829 unsigned long len
, dma_addr_t
*bus_addr
,
835 unsigned long phys_addr
= is_kernel_addr(virt_addr
) ? __pa(virt_addr
)
837 unsigned long aligned_phys
= ALIGN_DOWN(phys_addr
, 1 << r
->page_size
);
838 unsigned long aligned_len
= ALIGN(len
+ phys_addr
- aligned_phys
,
841 DBG(KERN_ERR
"%s: vaddr=%#lx, len=%#lx\n", __func__
,
843 DBG(KERN_ERR
"%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__
,
844 phys_addr
, aligned_phys
, aligned_len
);
846 spin_lock_irqsave(&r
->chunk_list
.lock
, flags
);
847 c
= dma_find_chunk_lpar(r
, ps3_mm_phys_to_lpar(phys_addr
), len
);
852 *bus_addr
= c
->bus_addr
+ phys_addr
- aligned_phys
;
854 spin_unlock_irqrestore(&r
->chunk_list
.lock
, flags
);
858 result
= dma_ioc0_map_pages(r
, aligned_phys
, aligned_len
, &c
,
863 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
864 __func__
, __LINE__
, result
);
865 spin_unlock_irqrestore(&r
->chunk_list
.lock
, flags
);
868 *bus_addr
= c
->bus_addr
+ phys_addr
- aligned_phys
;
869 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__
,
870 virt_addr
, phys_addr
, aligned_phys
, *bus_addr
);
873 spin_unlock_irqrestore(&r
->chunk_list
.lock
, flags
);
878 * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
879 * @r: Pointer to a struct ps3_dma_region.
880 * @bus_addr: The starting ioc bus address of the area to unmap.
881 * @len: Length in bytes of the area to unmap.
883 * This is the common dma unmap routine.
886 static int dma_sb_unmap_area(struct ps3_dma_region
*r
, dma_addr_t bus_addr
,
892 spin_lock_irqsave(&r
->chunk_list
.lock
, flags
);
893 c
= dma_find_chunk(r
, bus_addr
, len
);
896 unsigned long aligned_bus
= ALIGN_DOWN(bus_addr
,
898 unsigned long aligned_len
= ALIGN(len
+ bus_addr
899 - aligned_bus
, 1 << r
->page_size
);
900 DBG("%s:%d: not found: bus_addr %llxh\n",
901 __func__
, __LINE__
, bus_addr
);
902 DBG("%s:%d: not found: len %lxh\n",
903 __func__
, __LINE__
, len
);
904 DBG("%s:%d: not found: aligned_bus %lxh\n",
905 __func__
, __LINE__
, aligned_bus
);
906 DBG("%s:%d: not found: aligned_len %lxh\n",
907 __func__
, __LINE__
, aligned_len
);
913 if (!c
->usage_count
) {
915 dma_sb_free_chunk(c
);
918 spin_unlock_irqrestore(&r
->chunk_list
.lock
, flags
);
922 static int dma_ioc0_unmap_area(struct ps3_dma_region
*r
,
923 dma_addr_t bus_addr
, unsigned long len
)
928 DBG("%s: start a=%#llx l=%#lx\n", __func__
, bus_addr
, len
);
929 spin_lock_irqsave(&r
->chunk_list
.lock
, flags
);
930 c
= dma_find_chunk(r
, bus_addr
, len
);
933 unsigned long aligned_bus
= ALIGN_DOWN(bus_addr
,
935 unsigned long aligned_len
= ALIGN(len
+ bus_addr
938 DBG("%s:%d: not found: bus_addr %llxh\n",
939 __func__
, __LINE__
, bus_addr
);
940 DBG("%s:%d: not found: len %lxh\n",
941 __func__
, __LINE__
, len
);
942 DBG("%s:%d: not found: aligned_bus %lxh\n",
943 __func__
, __LINE__
, aligned_bus
);
944 DBG("%s:%d: not found: aligned_len %lxh\n",
945 __func__
, __LINE__
, aligned_len
);
951 if (!c
->usage_count
) {
953 dma_ioc0_free_chunk(c
);
956 spin_unlock_irqrestore(&r
->chunk_list
.lock
, flags
);
957 DBG("%s: end\n", __func__
);
962 * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
963 * @r: Pointer to a struct ps3_dma_region.
965 * This routine creates an HV dma region for the device and maps all available
966 * ram into the io controller bus address space.
969 static int dma_sb_region_create_linear(struct ps3_dma_region
*r
)
972 unsigned long virt_addr
, len
;
975 if (r
->len
> 16*1024*1024) { /* FIXME: need proper fix */
976 /* force 16M dma pages for linear mapping */
977 if (r
->page_size
!= PS3_DMA_16M
) {
978 pr_info("%s:%d: forcing 16M pages for linear map\n",
980 r
->page_size
= PS3_DMA_16M
;
981 r
->len
= ALIGN(r
->len
, 1 << r
->page_size
);
985 result
= dma_sb_region_create(r
);
988 if (r
->offset
< map
.rm
.size
) {
989 /* Map (part of) 1st RAM chunk */
990 virt_addr
= map
.rm
.base
+ r
->offset
;
991 len
= map
.rm
.size
- r
->offset
;
994 result
= dma_sb_map_area(r
, virt_addr
, len
, &tmp
,
995 CBE_IOPTE_PP_W
| CBE_IOPTE_PP_R
| CBE_IOPTE_SO_RW
|
1000 if (r
->offset
+ r
->len
> map
.rm
.size
) {
1001 /* Map (part of) 2nd RAM chunk */
1002 virt_addr
= map
.rm
.size
;
1004 if (r
->offset
>= map
.rm
.size
)
1005 virt_addr
+= r
->offset
- map
.rm
.size
;
1007 len
-= map
.rm
.size
- r
->offset
;
1008 result
= dma_sb_map_area(r
, virt_addr
, len
, &tmp
,
1009 CBE_IOPTE_PP_W
| CBE_IOPTE_PP_R
| CBE_IOPTE_SO_RW
|
1018 * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1019 * @r: Pointer to a struct ps3_dma_region.
1021 * This routine will unmap all mapped areas and free the HV dma region.
1024 static int dma_sb_region_free_linear(struct ps3_dma_region
*r
)
1027 dma_addr_t bus_addr
;
1028 unsigned long len
, lpar_addr
;
1030 if (r
->offset
< map
.rm
.size
) {
1031 /* Unmap (part of) 1st RAM chunk */
1032 lpar_addr
= map
.rm
.base
+ r
->offset
;
1033 len
= map
.rm
.size
- r
->offset
;
1036 bus_addr
= dma_sb_lpar_to_bus(r
, lpar_addr
);
1037 result
= dma_sb_unmap_area(r
, bus_addr
, len
);
1041 if (r
->offset
+ r
->len
> map
.rm
.size
) {
1042 /* Unmap (part of) 2nd RAM chunk */
1043 lpar_addr
= map
.r1
.base
;
1045 if (r
->offset
>= map
.rm
.size
)
1046 lpar_addr
+= r
->offset
- map
.rm
.size
;
1048 len
-= map
.rm
.size
- r
->offset
;
1049 bus_addr
= dma_sb_lpar_to_bus(r
, lpar_addr
);
1050 result
= dma_sb_unmap_area(r
, bus_addr
, len
);
1054 result
= dma_sb_region_free(r
);
1061 * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1062 * @r: Pointer to a struct ps3_dma_region.
1063 * @virt_addr: Starting virtual address of the area to map.
1064 * @len: Length in bytes of the area to map.
1065 * @bus_addr: A pointer to return the starting ioc bus address of the area to
1068 * This routine just returns the corresponding bus address. Actual mapping
1069 * occurs in dma_region_create_linear().
1072 static int dma_sb_map_area_linear(struct ps3_dma_region
*r
,
1073 unsigned long virt_addr
, unsigned long len
, dma_addr_t
*bus_addr
,
1076 unsigned long phys_addr
= is_kernel_addr(virt_addr
) ? __pa(virt_addr
)
1078 *bus_addr
= dma_sb_lpar_to_bus(r
, ps3_mm_phys_to_lpar(phys_addr
));
1083 * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1084 * @r: Pointer to a struct ps3_dma_region.
1085 * @bus_addr: The starting ioc bus address of the area to unmap.
1086 * @len: Length in bytes of the area to unmap.
1088 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1091 static int dma_sb_unmap_area_linear(struct ps3_dma_region
*r
,
1092 dma_addr_t bus_addr
, unsigned long len
)
1097 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops
= {
1098 .create
= dma_sb_region_create
,
1099 .free
= dma_sb_region_free
,
1100 .map
= dma_sb_map_area
,
1101 .unmap
= dma_sb_unmap_area
1104 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops
= {
1105 .create
= dma_sb_region_create_linear
,
1106 .free
= dma_sb_region_free_linear
,
1107 .map
= dma_sb_map_area_linear
,
1108 .unmap
= dma_sb_unmap_area_linear
1111 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops
= {
1112 .create
= dma_ioc0_region_create
,
1113 .free
= dma_ioc0_region_free
,
1114 .map
= dma_ioc0_map_area
,
1115 .unmap
= dma_ioc0_unmap_area
1118 int ps3_dma_region_init(struct ps3_system_bus_device
*dev
,
1119 struct ps3_dma_region
*r
, enum ps3_dma_page_size page_size
,
1120 enum ps3_dma_region_type region_type
, void *addr
, unsigned long len
)
1122 unsigned long lpar_addr
;
1125 lpar_addr
= addr
? ps3_mm_phys_to_lpar(__pa(addr
)) : 0;
1128 r
->page_size
= page_size
;
1129 r
->region_type
= region_type
;
1130 r
->offset
= lpar_addr
;
1131 if (r
->offset
>= map
.rm
.size
)
1132 r
->offset
-= map
.r1
.offset
;
1133 r
->len
= len
? len
: ALIGN(map
.total
, 1 << r
->page_size
);
1135 dev
->core
.dma_mask
= &r
->dma_mask
;
1137 result
= dma_set_mask_and_coherent(&dev
->core
, DMA_BIT_MASK(32));
1140 dev_err(&dev
->core
, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
1141 __func__
, __LINE__
, result
);
1145 switch (dev
->dev_type
) {
1146 case PS3_DEVICE_TYPE_SB
:
1147 r
->region_ops
= (USE_DYNAMIC_DMA
)
1148 ? &ps3_dma_sb_region_ops
1149 : &ps3_dma_sb_region_linear_ops
;
1151 case PS3_DEVICE_TYPE_IOC0
:
1152 r
->region_ops
= &ps3_dma_ioc0_region_ops
;
1160 EXPORT_SYMBOL(ps3_dma_region_init
);
1162 int ps3_dma_region_create(struct ps3_dma_region
*r
)
1165 BUG_ON(!r
->region_ops
);
1166 BUG_ON(!r
->region_ops
->create
);
1167 return r
->region_ops
->create(r
);
1169 EXPORT_SYMBOL(ps3_dma_region_create
);
1171 int ps3_dma_region_free(struct ps3_dma_region
*r
)
1174 BUG_ON(!r
->region_ops
);
1175 BUG_ON(!r
->region_ops
->free
);
1176 return r
->region_ops
->free(r
);
1178 EXPORT_SYMBOL(ps3_dma_region_free
);
1180 int ps3_dma_map(struct ps3_dma_region
*r
, unsigned long virt_addr
,
1181 unsigned long len
, dma_addr_t
*bus_addr
,
1184 return r
->region_ops
->map(r
, virt_addr
, len
, bus_addr
, iopte_flag
);
1187 int ps3_dma_unmap(struct ps3_dma_region
*r
, dma_addr_t bus_addr
,
1190 return r
->region_ops
->unmap(r
, bus_addr
, len
);
1193 /*============================================================================*/
1194 /* system startup routines */
1195 /*============================================================================*/
1198 * ps3_mm_init - initialize the address space state variables
1201 void __init
ps3_mm_init(void)
1205 DBG(" -> %s:%d\n", __func__
, __LINE__
);
1207 result
= ps3_repository_read_mm_info(&map
.rm
.base
, &map
.rm
.size
,
1211 panic("ps3_repository_read_mm_info() failed");
1213 map
.rm
.offset
= map
.rm
.base
;
1214 map
.vas_id
= map
.htab_size
= 0;
1216 /* this implementation assumes map.rm.base is zero */
1218 BUG_ON(map
.rm
.base
);
1219 BUG_ON(!map
.rm
.size
);
1221 /* Check if we got the highmem region from an earlier boot step */
1223 if (ps3_mm_get_repository_highmem(&map
.r1
)) {
1224 result
= ps3_mm_region_create(&map
.r1
, map
.total
- map
.rm
.size
);
1227 ps3_mm_set_repository_highmem(&map
.r1
);
1230 /* correct map.total for the real total amount of memory we use */
1231 map
.total
= map
.rm
.size
+ map
.r1
.size
;
1234 DBG("%s:%d: No highmem region found\n", __func__
, __LINE__
);
1236 DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
1237 __func__
, __LINE__
, map
.rm
.size
,
1238 map
.total
- map
.rm
.size
);
1239 memblock_add(map
.rm
.size
, map
.total
- map
.rm
.size
);
1242 DBG(" <- %s:%d\n", __func__
, __LINE__
);
1246 * ps3_mm_shutdown - final cleanup of address space
1248 * called during kexec sequence with MMU off.
1251 notrace
void ps3_mm_shutdown(void)
1253 ps3_mm_region_destroy(&map
.r1
);