2 * General Purpose functions for the global management of the
3 * Communication Processor Module.
4 * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
6 * In addition to the individual control of the communication
7 * channels, there are a few functions that globally affect the
8 * communication processor.
10 * Buffer descriptors must be allocated from the dual ported memory
11 * space. The allocator for that is here. When the communication
12 * process is reset, we reclaim the memory available. There is
13 * currently no deallocator for this memory.
14 * The amount of space available is platform dependent. On the
15 * MBX, the EPPC software loads additional microcode into the
16 * communication processor, and uses some of the DP ram for this
17 * purpose. Current, the first 512 bytes and the last 256 bytes of
18 * memory are used. Right now I am conservative and only use the
19 * memory that can never be used for microcode. If there are
20 * applications that require more DP ram, we can expand the boundaries
21 * but then we have to be careful of any downloaded microcode.
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/param.h>
28 #include <linux/string.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/module.h>
33 #include <linux/spinlock.h>
34 #include <linux/slab.h>
36 #include <asm/pgtable.h>
37 #include <asm/8xx_immap.h>
40 #include <asm/tlbflush.h>
41 #include <asm/rheap.h>
45 #include <asm/fs_pd.h>
47 #ifdef CONFIG_8xx_GPIO
48 #include <linux/of_gpio.h>
51 #define CPM_MAP_SIZE (0x4000)
53 cpm8xx_t __iomem
*cpmp
; /* Pointer to comm processor space */
54 immap_t __iomem
*mpc8xx_immr
;
55 static cpic8xx_t __iomem
*cpic_reg
;
57 static struct irq_domain
*cpm_pic_host
;
59 static void cpm_mask_irq(struct irq_data
*d
)
61 unsigned int cpm_vec
= (unsigned int)irqd_to_hwirq(d
);
63 clrbits32(&cpic_reg
->cpic_cimr
, (1 << cpm_vec
));
66 static void cpm_unmask_irq(struct irq_data
*d
)
68 unsigned int cpm_vec
= (unsigned int)irqd_to_hwirq(d
);
70 setbits32(&cpic_reg
->cpic_cimr
, (1 << cpm_vec
));
73 static void cpm_end_irq(struct irq_data
*d
)
75 unsigned int cpm_vec
= (unsigned int)irqd_to_hwirq(d
);
77 out_be32(&cpic_reg
->cpic_cisr
, (1 << cpm_vec
));
80 static struct irq_chip cpm_pic
= {
82 .irq_mask
= cpm_mask_irq
,
83 .irq_unmask
= cpm_unmask_irq
,
84 .irq_eoi
= cpm_end_irq
,
91 /* Get the vector by setting the ACK bit and then reading
94 out_be16(&cpic_reg
->cpic_civr
, 1);
95 cpm_vec
= in_be16(&cpic_reg
->cpic_civr
);
98 return irq_linear_revmap(cpm_pic_host
, cpm_vec
);
101 static int cpm_pic_host_map(struct irq_domain
*h
, unsigned int virq
,
104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq
, hw
);
106 irq_set_status_flags(virq
, IRQ_LEVEL
);
107 irq_set_chip_and_handler(virq
, &cpm_pic
, handle_fasteoi_irq
);
111 /* The CPM can generate the error interrupt when there is a race condition
112 * between generating and masking interrupts. All we have to do is ACK it
113 * and return. This is a no-op function so we don't need any special
114 * tests in the interrupt handler.
116 static irqreturn_t
cpm_error_interrupt(int irq
, void *dev
)
121 static struct irqaction cpm_error_irqaction
= {
122 .handler
= cpm_error_interrupt
,
123 .flags
= IRQF_NO_THREAD
,
127 static const struct irq_domain_ops cpm_pic_host_ops
= {
128 .map
= cpm_pic_host_map
,
131 unsigned int cpm_pic_init(void)
133 struct device_node
*np
= NULL
;
135 unsigned int sirq
= NO_IRQ
, hwirq
, eirq
;
138 pr_debug("cpm_pic_init\n");
140 np
= of_find_compatible_node(NULL
, NULL
, "fsl,cpm1-pic");
142 np
= of_find_compatible_node(NULL
, "cpm-pic", "CPM");
144 printk(KERN_ERR
"CPM PIC init: can not find cpm-pic node\n");
148 ret
= of_address_to_resource(np
, 0, &res
);
152 cpic_reg
= ioremap(res
.start
, resource_size(&res
));
153 if (cpic_reg
== NULL
)
156 sirq
= irq_of_parse_and_map(np
, 0);
160 /* Initialize the CPM interrupt controller. */
161 hwirq
= (unsigned int)virq_to_hw(sirq
);
162 out_be32(&cpic_reg
->cpic_cicr
,
163 (CICR_SCD_SCC4
| CICR_SCC_SCC3
| CICR_SCB_SCC2
| CICR_SCA_SCC1
) |
164 ((hwirq
/2) << 13) | CICR_HP_MASK
);
166 out_be32(&cpic_reg
->cpic_cimr
, 0);
168 cpm_pic_host
= irq_domain_add_linear(np
, 64, &cpm_pic_host_ops
, NULL
);
169 if (cpm_pic_host
== NULL
) {
170 printk(KERN_ERR
"CPM2 PIC: failed to allocate irq host!\n");
175 /* Install our own error handler. */
176 np
= of_find_compatible_node(NULL
, NULL
, "fsl,cpm1");
178 np
= of_find_node_by_type(NULL
, "cpm");
180 printk(KERN_ERR
"CPM PIC init: can not find cpm node\n");
184 eirq
= irq_of_parse_and_map(np
, 0);
188 if (setup_irq(eirq
, &cpm_error_irqaction
))
189 printk(KERN_ERR
"Could not allocate CPM error IRQ!");
191 setbits32(&cpic_reg
->cpic_cicr
, CICR_IEN
);
198 void __init
cpm_reset(void)
200 sysconf8xx_t __iomem
*siu_conf
;
202 mpc8xx_immr
= ioremap(get_immrbase(), 0x4000);
204 printk(KERN_CRIT
"Could not map IMMR\n");
208 cpmp
= &mpc8xx_immr
->im_cpm
;
210 #ifndef CONFIG_PPC_EARLY_DEBUG_CPM
213 out_be16(&cpmp
->cp_cpcr
, CPM_CR_RST
| CPM_CR_FLG
);
217 while (in_be16(&cpmp
->cp_cpcr
) & CPM_CR_FLG
);
220 #ifdef CONFIG_UCODE_PATCH
221 cpm_load_patch(cpmp
);
224 /* Set SDMA Bus Request priority 5.
225 * On 860T, this also enables FEC priority 6. I am not sure
226 * this is what we really want for some applications, but the
227 * manual recommends it.
228 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
230 siu_conf
= immr_map(im_siu_conf
);
231 if ((mfspr(SPRN_IMMR
) & 0xffff) == 0x0900) /* MPC885 */
232 out_be32(&siu_conf
->sc_sdcr
, 0x40);
234 out_be32(&siu_conf
->sc_sdcr
, 1);
235 immr_unmap(siu_conf
);
240 static DEFINE_SPINLOCK(cmd_lock
);
242 #define MAX_CR_CMD_LOOPS 10000
244 int cpm_command(u32 command
, u8 opcode
)
249 if (command
& 0xffffff0f)
252 spin_lock_irqsave(&cmd_lock
, flags
);
255 out_be16(&cpmp
->cp_cpcr
, command
| CPM_CR_FLG
| (opcode
<< 8));
256 for (i
= 0; i
< MAX_CR_CMD_LOOPS
; i
++)
257 if ((in_be16(&cpmp
->cp_cpcr
) & CPM_CR_FLG
) == 0)
260 printk(KERN_ERR
"%s(): Not able to issue CPM command\n", __func__
);
263 spin_unlock_irqrestore(&cmd_lock
, flags
);
266 EXPORT_SYMBOL(cpm_command
);
268 /* Set a baud rate generator. This needs lots of work. There are
269 * four BRGs, any of which can be wired to any channel.
270 * The internal baud rate clock is the system clock divided by 16.
271 * This assumes the baudrate is 16x oversampled by the uart.
273 #define BRG_INT_CLK (get_brgfreq())
274 #define BRG_UART_CLK (BRG_INT_CLK/16)
275 #define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16)
278 cpm_setbrg(uint brg
, uint rate
)
282 /* This is good enough to get SMCs running.....
284 bp
= &cpmp
->cp_brgc1
;
286 /* The BRG has a 12-bit counter. For really slow baud rates (or
287 * really fast processors), we may have to further divide by 16.
289 if (((BRG_UART_CLK
/ rate
) - 1) < 4096)
290 out_be32(bp
, (((BRG_UART_CLK
/ rate
) - 1) << 1) | CPM_BRG_EN
);
292 out_be32(bp
, (((BRG_UART_CLK_DIV16
/ rate
) - 1) << 1) |
293 CPM_BRG_EN
| CPM_BRG_DIV16
);
296 struct cpm_ioport16
{
297 __be16 dir
, par
, odr_sor
, dat
, intr
;
301 struct cpm_ioport32b
{
302 __be32 dir
, par
, odr
, dat
;
305 struct cpm_ioport32e
{
306 __be32 dir
, par
, sor
, odr
, dat
;
309 static void cpm1_set_pin32(int port
, int pin
, int flags
)
311 struct cpm_ioport32e __iomem
*iop
;
312 pin
= 1 << (31 - pin
);
314 if (port
== CPM_PORTB
)
315 iop
= (struct cpm_ioport32e __iomem
*)
316 &mpc8xx_immr
->im_cpm
.cp_pbdir
;
318 iop
= (struct cpm_ioport32e __iomem
*)
319 &mpc8xx_immr
->im_cpm
.cp_pedir
;
321 if (flags
& CPM_PIN_OUTPUT
)
322 setbits32(&iop
->dir
, pin
);
324 clrbits32(&iop
->dir
, pin
);
326 if (!(flags
& CPM_PIN_GPIO
))
327 setbits32(&iop
->par
, pin
);
329 clrbits32(&iop
->par
, pin
);
331 if (port
== CPM_PORTB
) {
332 if (flags
& CPM_PIN_OPENDRAIN
)
333 setbits16(&mpc8xx_immr
->im_cpm
.cp_pbodr
, pin
);
335 clrbits16(&mpc8xx_immr
->im_cpm
.cp_pbodr
, pin
);
338 if (port
== CPM_PORTE
) {
339 if (flags
& CPM_PIN_SECONDARY
)
340 setbits32(&iop
->sor
, pin
);
342 clrbits32(&iop
->sor
, pin
);
344 if (flags
& CPM_PIN_OPENDRAIN
)
345 setbits32(&mpc8xx_immr
->im_cpm
.cp_peodr
, pin
);
347 clrbits32(&mpc8xx_immr
->im_cpm
.cp_peodr
, pin
);
351 static void cpm1_set_pin16(int port
, int pin
, int flags
)
353 struct cpm_ioport16 __iomem
*iop
=
354 (struct cpm_ioport16 __iomem
*)&mpc8xx_immr
->im_ioport
;
356 pin
= 1 << (15 - pin
);
361 if (flags
& CPM_PIN_OUTPUT
)
362 setbits16(&iop
->dir
, pin
);
364 clrbits16(&iop
->dir
, pin
);
366 if (!(flags
& CPM_PIN_GPIO
))
367 setbits16(&iop
->par
, pin
);
369 clrbits16(&iop
->par
, pin
);
371 if (port
== CPM_PORTA
) {
372 if (flags
& CPM_PIN_OPENDRAIN
)
373 setbits16(&iop
->odr_sor
, pin
);
375 clrbits16(&iop
->odr_sor
, pin
);
377 if (port
== CPM_PORTC
) {
378 if (flags
& CPM_PIN_SECONDARY
)
379 setbits16(&iop
->odr_sor
, pin
);
381 clrbits16(&iop
->odr_sor
, pin
);
385 void cpm1_set_pin(enum cpm_port port
, int pin
, int flags
)
387 if (port
== CPM_PORTB
|| port
== CPM_PORTE
)
388 cpm1_set_pin32(port
, pin
, flags
);
390 cpm1_set_pin16(port
, pin
, flags
);
393 int cpm1_clk_setup(enum cpm_clk_target target
, int clock
, int mode
)
401 {CPM_CLK_SCC1
, CPM_BRG1
, 0},
402 {CPM_CLK_SCC1
, CPM_BRG2
, 1},
403 {CPM_CLK_SCC1
, CPM_BRG3
, 2},
404 {CPM_CLK_SCC1
, CPM_BRG4
, 3},
405 {CPM_CLK_SCC1
, CPM_CLK1
, 4},
406 {CPM_CLK_SCC1
, CPM_CLK2
, 5},
407 {CPM_CLK_SCC1
, CPM_CLK3
, 6},
408 {CPM_CLK_SCC1
, CPM_CLK4
, 7},
410 {CPM_CLK_SCC2
, CPM_BRG1
, 0},
411 {CPM_CLK_SCC2
, CPM_BRG2
, 1},
412 {CPM_CLK_SCC2
, CPM_BRG3
, 2},
413 {CPM_CLK_SCC2
, CPM_BRG4
, 3},
414 {CPM_CLK_SCC2
, CPM_CLK1
, 4},
415 {CPM_CLK_SCC2
, CPM_CLK2
, 5},
416 {CPM_CLK_SCC2
, CPM_CLK3
, 6},
417 {CPM_CLK_SCC2
, CPM_CLK4
, 7},
419 {CPM_CLK_SCC3
, CPM_BRG1
, 0},
420 {CPM_CLK_SCC3
, CPM_BRG2
, 1},
421 {CPM_CLK_SCC3
, CPM_BRG3
, 2},
422 {CPM_CLK_SCC3
, CPM_BRG4
, 3},
423 {CPM_CLK_SCC3
, CPM_CLK5
, 4},
424 {CPM_CLK_SCC3
, CPM_CLK6
, 5},
425 {CPM_CLK_SCC3
, CPM_CLK7
, 6},
426 {CPM_CLK_SCC3
, CPM_CLK8
, 7},
428 {CPM_CLK_SCC4
, CPM_BRG1
, 0},
429 {CPM_CLK_SCC4
, CPM_BRG2
, 1},
430 {CPM_CLK_SCC4
, CPM_BRG3
, 2},
431 {CPM_CLK_SCC4
, CPM_BRG4
, 3},
432 {CPM_CLK_SCC4
, CPM_CLK5
, 4},
433 {CPM_CLK_SCC4
, CPM_CLK6
, 5},
434 {CPM_CLK_SCC4
, CPM_CLK7
, 6},
435 {CPM_CLK_SCC4
, CPM_CLK8
, 7},
437 {CPM_CLK_SMC1
, CPM_BRG1
, 0},
438 {CPM_CLK_SMC1
, CPM_BRG2
, 1},
439 {CPM_CLK_SMC1
, CPM_BRG3
, 2},
440 {CPM_CLK_SMC1
, CPM_BRG4
, 3},
441 {CPM_CLK_SMC1
, CPM_CLK1
, 4},
442 {CPM_CLK_SMC1
, CPM_CLK2
, 5},
443 {CPM_CLK_SMC1
, CPM_CLK3
, 6},
444 {CPM_CLK_SMC1
, CPM_CLK4
, 7},
446 {CPM_CLK_SMC2
, CPM_BRG1
, 0},
447 {CPM_CLK_SMC2
, CPM_BRG2
, 1},
448 {CPM_CLK_SMC2
, CPM_BRG3
, 2},
449 {CPM_CLK_SMC2
, CPM_BRG4
, 3},
450 {CPM_CLK_SMC2
, CPM_CLK5
, 4},
451 {CPM_CLK_SMC2
, CPM_CLK6
, 5},
452 {CPM_CLK_SMC2
, CPM_CLK7
, 6},
453 {CPM_CLK_SMC2
, CPM_CLK8
, 7},
458 reg
= &mpc8xx_immr
->im_cpm
.cp_sicr
;
463 reg
= &mpc8xx_immr
->im_cpm
.cp_sicr
;
468 reg
= &mpc8xx_immr
->im_cpm
.cp_sicr
;
473 reg
= &mpc8xx_immr
->im_cpm
.cp_sicr
;
478 reg
= &mpc8xx_immr
->im_cpm
.cp_simode
;
483 reg
= &mpc8xx_immr
->im_cpm
.cp_simode
;
488 printk(KERN_ERR
"cpm1_clock_setup: invalid clock target\n");
492 for (i
= 0; i
< ARRAY_SIZE(clk_map
); i
++) {
493 if (clk_map
[i
][0] == target
&& clk_map
[i
][1] == clock
) {
494 bits
= clk_map
[i
][2];
499 if (i
== ARRAY_SIZE(clk_map
)) {
500 printk(KERN_ERR
"cpm1_clock_setup: invalid clock combination\n");
507 if (reg
== &mpc8xx_immr
->im_cpm
.cp_sicr
) {
508 if (mode
== CPM_CLK_RTX
) {
511 } else if (mode
== CPM_CLK_RX
) {
517 out_be32(reg
, (in_be32(reg
) & ~mask
) | bits
);
523 * GPIO LIB API implementation
525 #ifdef CONFIG_8xx_GPIO
527 struct cpm1_gpio16_chip
{
528 struct of_mm_gpio_chip mm_gc
;
531 /* shadowed data register to clear/set bits safely */
535 static inline struct cpm1_gpio16_chip
*
536 to_cpm1_gpio16_chip(struct of_mm_gpio_chip
*mm_gc
)
538 return container_of(mm_gc
, struct cpm1_gpio16_chip
, mm_gc
);
541 static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip
*mm_gc
)
543 struct cpm1_gpio16_chip
*cpm1_gc
= to_cpm1_gpio16_chip(mm_gc
);
544 struct cpm_ioport16 __iomem
*iop
= mm_gc
->regs
;
546 cpm1_gc
->cpdata
= in_be16(&iop
->dat
);
549 static int cpm1_gpio16_get(struct gpio_chip
*gc
, unsigned int gpio
)
551 struct of_mm_gpio_chip
*mm_gc
= to_of_mm_gpio_chip(gc
);
552 struct cpm_ioport16 __iomem
*iop
= mm_gc
->regs
;
555 pin_mask
= 1 << (15 - gpio
);
557 return !!(in_be16(&iop
->dat
) & pin_mask
);
560 static void __cpm1_gpio16_set(struct of_mm_gpio_chip
*mm_gc
, u16 pin_mask
,
563 struct cpm1_gpio16_chip
*cpm1_gc
= to_cpm1_gpio16_chip(mm_gc
);
564 struct cpm_ioport16 __iomem
*iop
= mm_gc
->regs
;
567 cpm1_gc
->cpdata
|= pin_mask
;
569 cpm1_gc
->cpdata
&= ~pin_mask
;
571 out_be16(&iop
->dat
, cpm1_gc
->cpdata
);
574 static void cpm1_gpio16_set(struct gpio_chip
*gc
, unsigned int gpio
, int value
)
576 struct of_mm_gpio_chip
*mm_gc
= to_of_mm_gpio_chip(gc
);
577 struct cpm1_gpio16_chip
*cpm1_gc
= to_cpm1_gpio16_chip(mm_gc
);
579 u16 pin_mask
= 1 << (15 - gpio
);
581 spin_lock_irqsave(&cpm1_gc
->lock
, flags
);
583 __cpm1_gpio16_set(mm_gc
, pin_mask
, value
);
585 spin_unlock_irqrestore(&cpm1_gc
->lock
, flags
);
588 static int cpm1_gpio16_dir_out(struct gpio_chip
*gc
, unsigned int gpio
, int val
)
590 struct of_mm_gpio_chip
*mm_gc
= to_of_mm_gpio_chip(gc
);
591 struct cpm1_gpio16_chip
*cpm1_gc
= to_cpm1_gpio16_chip(mm_gc
);
592 struct cpm_ioport16 __iomem
*iop
= mm_gc
->regs
;
594 u16 pin_mask
= 1 << (15 - gpio
);
596 spin_lock_irqsave(&cpm1_gc
->lock
, flags
);
598 setbits16(&iop
->dir
, pin_mask
);
599 __cpm1_gpio16_set(mm_gc
, pin_mask
, val
);
601 spin_unlock_irqrestore(&cpm1_gc
->lock
, flags
);
606 static int cpm1_gpio16_dir_in(struct gpio_chip
*gc
, unsigned int gpio
)
608 struct of_mm_gpio_chip
*mm_gc
= to_of_mm_gpio_chip(gc
);
609 struct cpm1_gpio16_chip
*cpm1_gc
= to_cpm1_gpio16_chip(mm_gc
);
610 struct cpm_ioport16 __iomem
*iop
= mm_gc
->regs
;
612 u16 pin_mask
= 1 << (15 - gpio
);
614 spin_lock_irqsave(&cpm1_gc
->lock
, flags
);
616 clrbits16(&iop
->dir
, pin_mask
);
618 spin_unlock_irqrestore(&cpm1_gc
->lock
, flags
);
623 int cpm1_gpiochip_add16(struct device_node
*np
)
625 struct cpm1_gpio16_chip
*cpm1_gc
;
626 struct of_mm_gpio_chip
*mm_gc
;
627 struct gpio_chip
*gc
;
629 cpm1_gc
= kzalloc(sizeof(*cpm1_gc
), GFP_KERNEL
);
633 spin_lock_init(&cpm1_gc
->lock
);
635 mm_gc
= &cpm1_gc
->mm_gc
;
638 mm_gc
->save_regs
= cpm1_gpio16_save_regs
;
640 gc
->direction_input
= cpm1_gpio16_dir_in
;
641 gc
->direction_output
= cpm1_gpio16_dir_out
;
642 gc
->get
= cpm1_gpio16_get
;
643 gc
->set
= cpm1_gpio16_set
;
645 return of_mm_gpiochip_add(np
, mm_gc
);
648 struct cpm1_gpio32_chip
{
649 struct of_mm_gpio_chip mm_gc
;
652 /* shadowed data register to clear/set bits safely */
656 static inline struct cpm1_gpio32_chip
*
657 to_cpm1_gpio32_chip(struct of_mm_gpio_chip
*mm_gc
)
659 return container_of(mm_gc
, struct cpm1_gpio32_chip
, mm_gc
);
662 static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip
*mm_gc
)
664 struct cpm1_gpio32_chip
*cpm1_gc
= to_cpm1_gpio32_chip(mm_gc
);
665 struct cpm_ioport32b __iomem
*iop
= mm_gc
->regs
;
667 cpm1_gc
->cpdata
= in_be32(&iop
->dat
);
670 static int cpm1_gpio32_get(struct gpio_chip
*gc
, unsigned int gpio
)
672 struct of_mm_gpio_chip
*mm_gc
= to_of_mm_gpio_chip(gc
);
673 struct cpm_ioport32b __iomem
*iop
= mm_gc
->regs
;
676 pin_mask
= 1 << (31 - gpio
);
678 return !!(in_be32(&iop
->dat
) & pin_mask
);
681 static void __cpm1_gpio32_set(struct of_mm_gpio_chip
*mm_gc
, u32 pin_mask
,
684 struct cpm1_gpio32_chip
*cpm1_gc
= to_cpm1_gpio32_chip(mm_gc
);
685 struct cpm_ioport32b __iomem
*iop
= mm_gc
->regs
;
688 cpm1_gc
->cpdata
|= pin_mask
;
690 cpm1_gc
->cpdata
&= ~pin_mask
;
692 out_be32(&iop
->dat
, cpm1_gc
->cpdata
);
695 static void cpm1_gpio32_set(struct gpio_chip
*gc
, unsigned int gpio
, int value
)
697 struct of_mm_gpio_chip
*mm_gc
= to_of_mm_gpio_chip(gc
);
698 struct cpm1_gpio32_chip
*cpm1_gc
= to_cpm1_gpio32_chip(mm_gc
);
700 u32 pin_mask
= 1 << (31 - gpio
);
702 spin_lock_irqsave(&cpm1_gc
->lock
, flags
);
704 __cpm1_gpio32_set(mm_gc
, pin_mask
, value
);
706 spin_unlock_irqrestore(&cpm1_gc
->lock
, flags
);
709 static int cpm1_gpio32_dir_out(struct gpio_chip
*gc
, unsigned int gpio
, int val
)
711 struct of_mm_gpio_chip
*mm_gc
= to_of_mm_gpio_chip(gc
);
712 struct cpm1_gpio32_chip
*cpm1_gc
= to_cpm1_gpio32_chip(mm_gc
);
713 struct cpm_ioport32b __iomem
*iop
= mm_gc
->regs
;
715 u32 pin_mask
= 1 << (31 - gpio
);
717 spin_lock_irqsave(&cpm1_gc
->lock
, flags
);
719 setbits32(&iop
->dir
, pin_mask
);
720 __cpm1_gpio32_set(mm_gc
, pin_mask
, val
);
722 spin_unlock_irqrestore(&cpm1_gc
->lock
, flags
);
727 static int cpm1_gpio32_dir_in(struct gpio_chip
*gc
, unsigned int gpio
)
729 struct of_mm_gpio_chip
*mm_gc
= to_of_mm_gpio_chip(gc
);
730 struct cpm1_gpio32_chip
*cpm1_gc
= to_cpm1_gpio32_chip(mm_gc
);
731 struct cpm_ioport32b __iomem
*iop
= mm_gc
->regs
;
733 u32 pin_mask
= 1 << (31 - gpio
);
735 spin_lock_irqsave(&cpm1_gc
->lock
, flags
);
737 clrbits32(&iop
->dir
, pin_mask
);
739 spin_unlock_irqrestore(&cpm1_gc
->lock
, flags
);
744 int cpm1_gpiochip_add32(struct device_node
*np
)
746 struct cpm1_gpio32_chip
*cpm1_gc
;
747 struct of_mm_gpio_chip
*mm_gc
;
748 struct gpio_chip
*gc
;
750 cpm1_gc
= kzalloc(sizeof(*cpm1_gc
), GFP_KERNEL
);
754 spin_lock_init(&cpm1_gc
->lock
);
756 mm_gc
= &cpm1_gc
->mm_gc
;
759 mm_gc
->save_regs
= cpm1_gpio32_save_regs
;
761 gc
->direction_input
= cpm1_gpio32_dir_in
;
762 gc
->direction_output
= cpm1_gpio32_dir_out
;
763 gc
->get
= cpm1_gpio32_get
;
764 gc
->set
= cpm1_gpio32_set
;
766 return of_mm_gpiochip_add(np
, mm_gc
);
769 static int cpm_init_par_io(void)
771 struct device_node
*np
;
773 for_each_compatible_node(np
, NULL
, "fsl,cpm1-pario-bank-a")
774 cpm1_gpiochip_add16(np
);
776 for_each_compatible_node(np
, NULL
, "fsl,cpm1-pario-bank-b")
777 cpm1_gpiochip_add32(np
);
779 for_each_compatible_node(np
, NULL
, "fsl,cpm1-pario-bank-c")
780 cpm1_gpiochip_add16(np
);
782 for_each_compatible_node(np
, NULL
, "fsl,cpm1-pario-bank-d")
783 cpm1_gpiochip_add16(np
);
785 /* Port E uses CPM2 layout */
786 for_each_compatible_node(np
, NULL
, "fsl,cpm1-pario-bank-e")
787 cpm2_gpiochip_add32(np
);
790 arch_initcall(cpm_init_par_io
);
792 #endif /* CONFIG_8xx_GPIO */