2 * Intel MID GPIO driver
4 * Copyright (c) 2008-2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * Moorestown platform Langwell chip.
18 * Medfield platform Penwell chip.
19 * Clovertrail platform Cloverview chip.
20 * Merrifield platform Tangier chip.
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/platform_device.h>
26 #include <linux/kernel.h>
27 #include <linux/delay.h>
28 #include <linux/stddef.h>
29 #include <linux/interrupt.h>
30 #include <linux/init.h>
31 #include <linux/irq.h>
33 #include <linux/gpio.h>
34 #include <linux/slab.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/irqdomain.h>
38 #define INTEL_MID_IRQ_TYPE_EDGE (1 << 0)
39 #define INTEL_MID_IRQ_TYPE_LEVEL (1 << 1)
42 * Langwell chip has 64 pins and thus there are 2 32bit registers to control
43 * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
44 * registers to control them, so we only define the order here instead of a
45 * structure, to get a bit offset for a pin (use GPDR as an example):
50 * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
52 * so the bit of reg_addr is to control pin offset's GPDR feature
56 GPLR
= 0, /* pin level read-only */
57 GPDR
, /* pin direction */
60 GRER
, /* rising edge detect */
61 GFER
, /* falling edge detect */
62 GEDR
, /* edge detect result */
63 GAFR
, /* alt function */
66 /* intel_mid gpio driver data */
67 struct intel_mid_gpio_ddata
{
68 u16 ngpio
; /* number of gpio pins */
69 u32 gplr_offset
; /* offset of first GPLR register from base */
70 u32 flis_base
; /* base address of FLIS registers */
71 u32 flis_len
; /* length of FLIS registers */
72 u32 (*get_flis_offset
)(int gpio
);
73 u32 chip_irq_type
; /* chip interrupt type */
76 struct intel_mid_gpio
{
77 struct gpio_chip chip
;
78 void __iomem
*reg_base
;
81 struct irq_domain
*domain
;
84 #define to_intel_gpio_priv(chip) container_of(chip, struct intel_mid_gpio, chip)
86 static void __iomem
*gpio_reg(struct gpio_chip
*chip
, unsigned offset
,
87 enum GPIO_REG reg_type
)
89 struct intel_mid_gpio
*priv
= to_intel_gpio_priv(chip
);
90 unsigned nreg
= chip
->ngpio
/ 32;
93 return priv
->reg_base
+ reg_type
* nreg
* 4 + reg
* 4;
96 static void __iomem
*gpio_reg_2bit(struct gpio_chip
*chip
, unsigned offset
,
97 enum GPIO_REG reg_type
)
99 struct intel_mid_gpio
*priv
= to_intel_gpio_priv(chip
);
100 unsigned nreg
= chip
->ngpio
/ 32;
101 u8 reg
= offset
/ 16;
103 return priv
->reg_base
+ reg_type
* nreg
* 4 + reg
* 4;
106 static int intel_gpio_request(struct gpio_chip
*chip
, unsigned offset
)
108 void __iomem
*gafr
= gpio_reg_2bit(chip
, offset
, GAFR
);
109 u32 value
= readl(gafr
);
110 int shift
= (offset
% 16) << 1, af
= (value
>> shift
) & 3;
113 value
&= ~(3 << shift
);
119 static int intel_gpio_get(struct gpio_chip
*chip
, unsigned offset
)
121 void __iomem
*gplr
= gpio_reg(chip
, offset
, GPLR
);
123 return readl(gplr
) & BIT(offset
% 32);
126 static void intel_gpio_set(struct gpio_chip
*chip
, unsigned offset
, int value
)
128 void __iomem
*gpsr
, *gpcr
;
131 gpsr
= gpio_reg(chip
, offset
, GPSR
);
132 writel(BIT(offset
% 32), gpsr
);
134 gpcr
= gpio_reg(chip
, offset
, GPCR
);
135 writel(BIT(offset
% 32), gpcr
);
139 static int intel_gpio_direction_input(struct gpio_chip
*chip
, unsigned offset
)
141 struct intel_mid_gpio
*priv
= to_intel_gpio_priv(chip
);
142 void __iomem
*gpdr
= gpio_reg(chip
, offset
, GPDR
);
147 pm_runtime_get(&priv
->pdev
->dev
);
149 spin_lock_irqsave(&priv
->lock
, flags
);
151 value
&= ~BIT(offset
% 32);
153 spin_unlock_irqrestore(&priv
->lock
, flags
);
156 pm_runtime_put(&priv
->pdev
->dev
);
161 static int intel_gpio_direction_output(struct gpio_chip
*chip
,
162 unsigned offset
, int value
)
164 struct intel_mid_gpio
*priv
= to_intel_gpio_priv(chip
);
165 void __iomem
*gpdr
= gpio_reg(chip
, offset
, GPDR
);
168 intel_gpio_set(chip
, offset
, value
);
171 pm_runtime_get(&priv
->pdev
->dev
);
173 spin_lock_irqsave(&priv
->lock
, flags
);
175 value
|= BIT(offset
% 32);
177 spin_unlock_irqrestore(&priv
->lock
, flags
);
180 pm_runtime_put(&priv
->pdev
->dev
);
185 static int intel_gpio_to_irq(struct gpio_chip
*chip
, unsigned offset
)
187 struct intel_mid_gpio
*priv
= to_intel_gpio_priv(chip
);
188 return irq_create_mapping(priv
->domain
, offset
);
191 static int intel_mid_irq_type(struct irq_data
*d
, unsigned type
)
193 struct intel_mid_gpio
*priv
= irq_data_get_irq_chip_data(d
);
194 u32 gpio
= irqd_to_hwirq(d
);
197 void __iomem
*grer
= gpio_reg(&priv
->chip
, gpio
, GRER
);
198 void __iomem
*gfer
= gpio_reg(&priv
->chip
, gpio
, GFER
);
200 if (gpio
>= priv
->chip
.ngpio
)
204 pm_runtime_get(&priv
->pdev
->dev
);
206 spin_lock_irqsave(&priv
->lock
, flags
);
207 if (type
& IRQ_TYPE_EDGE_RISING
)
208 value
= readl(grer
) | BIT(gpio
% 32);
210 value
= readl(grer
) & (~BIT(gpio
% 32));
213 if (type
& IRQ_TYPE_EDGE_FALLING
)
214 value
= readl(gfer
) | BIT(gpio
% 32);
216 value
= readl(gfer
) & (~BIT(gpio
% 32));
218 spin_unlock_irqrestore(&priv
->lock
, flags
);
221 pm_runtime_put(&priv
->pdev
->dev
);
226 static void intel_mid_irq_unmask(struct irq_data
*d
)
230 static void intel_mid_irq_mask(struct irq_data
*d
)
234 static int intel_mid_irq_reqres(struct irq_data
*d
)
236 struct intel_mid_gpio
*priv
= irq_data_get_irq_chip_data(d
);
238 if (gpio_lock_as_irq(&priv
->chip
, irqd_to_hwirq(d
))) {
239 dev_err(priv
->chip
.dev
,
240 "unable to lock HW IRQ %lu for IRQ\n",
247 static void intel_mid_irq_relres(struct irq_data
*d
)
249 struct intel_mid_gpio
*priv
= irq_data_get_irq_chip_data(d
);
251 gpio_unlock_as_irq(&priv
->chip
, irqd_to_hwirq(d
));
254 static struct irq_chip intel_mid_irqchip
= {
255 .name
= "INTEL_MID-GPIO",
256 .irq_mask
= intel_mid_irq_mask
,
257 .irq_unmask
= intel_mid_irq_unmask
,
258 .irq_set_type
= intel_mid_irq_type
,
259 .irq_request_resources
= intel_mid_irq_reqres
,
260 .irq_release_resources
= intel_mid_irq_relres
,
263 static const struct intel_mid_gpio_ddata gpio_lincroft
= {
267 static const struct intel_mid_gpio_ddata gpio_penwell_aon
= {
269 .chip_irq_type
= INTEL_MID_IRQ_TYPE_EDGE
,
272 static const struct intel_mid_gpio_ddata gpio_penwell_core
= {
274 .chip_irq_type
= INTEL_MID_IRQ_TYPE_EDGE
,
277 static const struct intel_mid_gpio_ddata gpio_cloverview_aon
= {
279 .chip_irq_type
= INTEL_MID_IRQ_TYPE_EDGE
| INTEL_MID_IRQ_TYPE_LEVEL
,
282 static const struct intel_mid_gpio_ddata gpio_cloverview_core
= {
284 .chip_irq_type
= INTEL_MID_IRQ_TYPE_EDGE
,
287 static const struct intel_mid_gpio_ddata gpio_tangier
= {
290 .flis_base
= 0xff0c0000,
292 .get_flis_offset
= NULL
,
293 .chip_irq_type
= INTEL_MID_IRQ_TYPE_EDGE
,
296 static const struct pci_device_id intel_gpio_ids
[] = {
299 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x080f),
300 .driver_data
= (kernel_ulong_t
)&gpio_lincroft
,
304 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x081f),
305 .driver_data
= (kernel_ulong_t
)&gpio_penwell_aon
,
309 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x081a),
310 .driver_data
= (kernel_ulong_t
)&gpio_penwell_core
,
314 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x08eb),
315 .driver_data
= (kernel_ulong_t
)&gpio_cloverview_aon
,
318 /* Cloverview Core */
319 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x08f7),
320 .driver_data
= (kernel_ulong_t
)&gpio_cloverview_core
,
324 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x1199),
325 .driver_data
= (kernel_ulong_t
)&gpio_tangier
,
329 MODULE_DEVICE_TABLE(pci
, intel_gpio_ids
);
331 static void intel_mid_irq_handler(unsigned irq
, struct irq_desc
*desc
)
333 struct irq_data
*data
= irq_desc_get_irq_data(desc
);
334 struct intel_mid_gpio
*priv
= irq_data_get_irq_handler_data(data
);
335 struct irq_chip
*chip
= irq_data_get_irq_chip(data
);
336 u32 base
, gpio
, mask
;
337 unsigned long pending
;
340 /* check GPIO controller to check which pin triggered the interrupt */
341 for (base
= 0; base
< priv
->chip
.ngpio
; base
+= 32) {
342 gedr
= gpio_reg(&priv
->chip
, base
, GEDR
);
343 while ((pending
= readl(gedr
))) {
344 gpio
= __ffs(pending
);
346 /* Clear before handling so we can't lose an edge */
348 generic_handle_irq(irq_find_mapping(priv
->domain
,
356 static void intel_mid_irq_init_hw(struct intel_mid_gpio
*priv
)
361 for (base
= 0; base
< priv
->chip
.ngpio
; base
+= 32) {
362 /* Clear the rising-edge detect register */
363 reg
= gpio_reg(&priv
->chip
, base
, GRER
);
365 /* Clear the falling-edge detect register */
366 reg
= gpio_reg(&priv
->chip
, base
, GFER
);
368 /* Clear the edge detect status register */
369 reg
= gpio_reg(&priv
->chip
, base
, GEDR
);
374 static int intel_gpio_irq_map(struct irq_domain
*d
, unsigned int irq
,
375 irq_hw_number_t hwirq
)
377 struct intel_mid_gpio
*priv
= d
->host_data
;
379 irq_set_chip_and_handler(irq
, &intel_mid_irqchip
, handle_simple_irq
);
380 irq_set_chip_data(irq
, priv
);
381 irq_set_irq_type(irq
, IRQ_TYPE_NONE
);
386 static const struct irq_domain_ops intel_gpio_irq_ops
= {
387 .map
= intel_gpio_irq_map
,
388 .xlate
= irq_domain_xlate_twocell
,
391 static int intel_gpio_runtime_idle(struct device
*dev
)
393 int err
= pm_schedule_suspend(dev
, 500);
394 return err
?: -EBUSY
;
397 static const struct dev_pm_ops intel_gpio_pm_ops
= {
398 SET_RUNTIME_PM_OPS(NULL
, NULL
, intel_gpio_runtime_idle
)
401 static int intel_gpio_probe(struct pci_dev
*pdev
,
402 const struct pci_device_id
*id
)
405 struct intel_mid_gpio
*priv
;
409 struct intel_mid_gpio_ddata
*ddata
=
410 (struct intel_mid_gpio_ddata
*)id
->driver_data
;
412 retval
= pcim_enable_device(pdev
);
416 retval
= pcim_iomap_regions(pdev
, 1 << 0 | 1 << 1, pci_name(pdev
));
418 dev_err(&pdev
->dev
, "I/O memory mapping error\n");
422 base
= pcim_iomap_table(pdev
)[1];
424 irq_base
= readl(base
);
425 gpio_base
= readl(sizeof(u32
) + base
);
427 /* release the IO mapping, since we already get the info from bar1 */
428 pcim_iounmap_regions(pdev
, 1 << 1);
430 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
432 dev_err(&pdev
->dev
, "can't allocate chip data\n");
436 priv
->reg_base
= pcim_iomap_table(pdev
)[0];
437 priv
->chip
.label
= dev_name(&pdev
->dev
);
438 priv
->chip
.dev
= &pdev
->dev
;
439 priv
->chip
.request
= intel_gpio_request
;
440 priv
->chip
.direction_input
= intel_gpio_direction_input
;
441 priv
->chip
.direction_output
= intel_gpio_direction_output
;
442 priv
->chip
.get
= intel_gpio_get
;
443 priv
->chip
.set
= intel_gpio_set
;
444 priv
->chip
.to_irq
= intel_gpio_to_irq
;
445 priv
->chip
.base
= gpio_base
;
446 priv
->chip
.ngpio
= ddata
->ngpio
;
447 priv
->chip
.can_sleep
= false;
450 spin_lock_init(&priv
->lock
);
452 priv
->domain
= irq_domain_add_simple(pdev
->dev
.of_node
, ddata
->ngpio
,
453 irq_base
, &intel_gpio_irq_ops
, priv
);
457 pci_set_drvdata(pdev
, priv
);
458 retval
= gpiochip_add(&priv
->chip
);
460 dev_err(&pdev
->dev
, "gpiochip_add error %d\n", retval
);
464 intel_mid_irq_init_hw(priv
);
466 irq_set_handler_data(pdev
->irq
, priv
);
467 irq_set_chained_handler(pdev
->irq
, intel_mid_irq_handler
);
469 pm_runtime_put_noidle(&pdev
->dev
);
470 pm_runtime_allow(&pdev
->dev
);
475 static struct pci_driver intel_gpio_driver
= {
476 .name
= "intel_mid_gpio",
477 .id_table
= intel_gpio_ids
,
478 .probe
= intel_gpio_probe
,
480 .pm
= &intel_gpio_pm_ops
,
484 static int __init
intel_gpio_init(void)
486 return pci_register_driver(&intel_gpio_driver
);
489 device_initcall(intel_gpio_init
);