2 * Coherency fabric: low level functions
4 * Copyright (C) 2012 Marvell
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
12 * This file implements the assembly function to add a CPU to the
13 * coherency fabric. This function is called by each of the secondary
14 * CPUs during their early boot in an SMP kernel, this why this
15 * function have to callable from assembly. It can also be called by a
16 * primary CPU from C code during its boot.
19 #include <linux/linkage.h>
20 #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
21 #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
23 #include <asm/assembler.h>
28 * Returns the coherency base address in r1 (r0 is untouched), or 0 if
29 * the coherency fabric is not enabled.
31 ENTRY(ll_get_coherency_base)
32 mrc p15, 0, r1, c1, c0, 0
33 tst r1, #CR_M @ Check MMU bit enabled
37 * MMU is disabled, use the physical address of the coherency
38 * base address. However, if the coherency fabric isn't mapped
39 * (i.e its virtual address is zero), it means coherency is
40 * not enabled, so we return 0.
42 ldr r1, =coherency_base
51 * MMU is enabled, use the virtual address of the coherency
54 ldr r1, =coherency_base
58 ENDPROC(ll_get_coherency_base)
61 * Returns the coherency CPU mask in r3 (r0 is untouched). This
62 * coherency CPU mask can be used with the coherency fabric
63 * configuration and control registers. Note that the mask is already
64 * endian-swapped as appropriate so that the calling functions do not
65 * have to care about endianness issues while accessing the coherency
68 ENTRY(ll_get_coherency_cpumask)
69 mrc 15, 0, r3, cr0, cr0, 5
75 ENDPROC(ll_get_coherency_cpumask)
78 * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
79 * ll_disable_coherency() use the strex/ldrex instructions while the
80 * MMU can be disabled. The Armada XP SoC has an exclusive monitor
81 * that tracks transactions to Device and/or SO memory and thanks to
82 * that, exclusive transactions are functional even when the MMU is
86 ENTRY(ll_add_cpu_to_smp_group)
88 * As r0 is not modified by ll_get_coherency_base() and
89 * ll_get_coherency_cpumask(), we use it to temporarly save lr
90 * and avoid it being modified by the branch and link
91 * calls. This function is used very early in the secondary
92 * CPU boot, and no stack is available at this point.
95 bl ll_get_coherency_base
96 /* Bail out if the coherency is not enabled */
99 bl ll_get_coherency_cpumask
101 add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
109 ENDPROC(ll_add_cpu_to_smp_group)
111 ENTRY(ll_enable_coherency)
113 * As r0 is not modified by ll_get_coherency_base() and
114 * ll_get_coherency_cpumask(), we use it to temporarly save lr
115 * and avoid it being modified by the branch and link
116 * calls. This function is used very early in the secondary
117 * CPU boot, and no stack is available at this point.
120 bl ll_get_coherency_base
121 /* Bail out if the coherency is not enabled */
124 bl ll_get_coherency_cpumask
126 add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
136 ENDPROC(ll_enable_coherency)
138 ENTRY(ll_disable_coherency)
140 * As r0 is not modified by ll_get_coherency_base() and
141 * ll_get_coherency_cpumask(), we use it to temporarly save lr
142 * and avoid it being modified by the branch and link
143 * calls. This function is used very early in the secondary
144 * CPU boot, and no stack is available at this point.
147 bl ll_get_coherency_base
148 /* Bail out if the coherency is not enabled */
151 bl ll_get_coherency_cpumask
153 add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
162 ENDPROC(ll_disable_coherency)
166 .long coherency_phys_base - .