2 * Copyright (c) 2011-2016 Synaptics Incorporated
3 * Copyright (c) 2011 Unixphere
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/rmi.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15 #include <linux/irq.h>
17 #include "rmi_driver.h"
19 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
21 #define RMI_PAGE_SELECT_REGISTER 0x00FF
22 #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
23 #define RMI_SPI_XFER_SIZE_LIMIT 255
25 #define BUFFER_SIZE_INCREMENT 32
30 RMI_SPI_V2_READ_UNIFIED
,
31 RMI_SPI_V2_READ_SPLIT
,
40 struct rmi_spi_xport
{
41 struct rmi_transport_dev xport
;
42 struct spi_device
*spi
;
44 struct mutex page_mutex
;
53 struct spi_transfer
*rx_xfers
;
54 struct spi_transfer
*tx_xfers
;
59 static int rmi_spi_manage_pools(struct rmi_spi_xport
*rmi_spi
, int len
)
61 struct spi_device
*spi
= rmi_spi
->spi
;
62 int buf_size
= rmi_spi
->xfer_buf_size
63 ? rmi_spi
->xfer_buf_size
: RMI_SPI_DEFAULT_XFER_BUF_SIZE
;
64 struct spi_transfer
*xfer_buf
;
68 while (buf_size
< len
)
71 if (buf_size
> RMI_SPI_XFER_SIZE_LIMIT
)
72 buf_size
= RMI_SPI_XFER_SIZE_LIMIT
;
74 tmp
= rmi_spi
->rx_buf
;
75 buf
= devm_kzalloc(&spi
->dev
, buf_size
* 2,
76 GFP_KERNEL
| GFP_DMA
);
80 rmi_spi
->rx_buf
= buf
;
81 rmi_spi
->tx_buf
= &rmi_spi
->rx_buf
[buf_size
];
82 rmi_spi
->xfer_buf_size
= buf_size
;
85 devm_kfree(&spi
->dev
, tmp
);
87 if (rmi_spi
->xport
.pdata
.spi_data
.read_delay_us
)
88 rmi_spi
->rx_xfer_count
= buf_size
;
90 rmi_spi
->rx_xfer_count
= 1;
92 if (rmi_spi
->xport
.pdata
.spi_data
.write_delay_us
)
93 rmi_spi
->tx_xfer_count
= buf_size
;
95 rmi_spi
->tx_xfer_count
= 1;
98 * Allocate a pool of spi_transfer buffers for devices which need
101 tmp
= rmi_spi
->rx_xfers
;
102 xfer_buf
= devm_kzalloc(&spi
->dev
,
103 (rmi_spi
->rx_xfer_count
+ rmi_spi
->tx_xfer_count
)
104 * sizeof(struct spi_transfer
), GFP_KERNEL
);
108 rmi_spi
->rx_xfers
= xfer_buf
;
109 rmi_spi
->tx_xfers
= &xfer_buf
[rmi_spi
->rx_xfer_count
];
112 devm_kfree(&spi
->dev
, tmp
);
117 static int rmi_spi_xfer(struct rmi_spi_xport
*rmi_spi
,
118 const struct rmi_spi_cmd
*cmd
, const u8
*tx_buf
,
119 int tx_len
, u8
*rx_buf
, int rx_len
)
121 struct spi_device
*spi
= rmi_spi
->spi
;
122 struct rmi_device_platform_data_spi
*spi_data
=
123 &rmi_spi
->xport
.pdata
.spi_data
;
124 struct spi_message msg
;
125 struct spi_transfer
*xfer
;
131 u16 addr
= cmd
->addr
;
133 spi_message_init(&msg
);
140 case RMI_SPI_V2_READ_UNIFIED
:
141 case RMI_SPI_V2_READ_SPLIT
:
142 case RMI_SPI_V2_WRITE
:
147 total_tx_len
= cmd_len
+ tx_len
;
148 len
= max(total_tx_len
, rx_len
);
150 if (len
> RMI_SPI_XFER_SIZE_LIMIT
)
153 if (rmi_spi
->xfer_buf_size
< len
)
154 rmi_spi_manage_pools(rmi_spi
, len
);
158 * SPI needs an address. Use 0x7FF if we want to keep
159 * reading from the last position of the register pointer.
165 rmi_spi
->tx_buf
[0] = (addr
>> 8);
166 rmi_spi
->tx_buf
[1] = addr
& 0xFF;
169 rmi_spi
->tx_buf
[0] = (addr
>> 8) | 0x80;
170 rmi_spi
->tx_buf
[1] = addr
& 0xFF;
172 case RMI_SPI_V2_READ_UNIFIED
:
174 case RMI_SPI_V2_READ_SPLIT
:
176 case RMI_SPI_V2_WRITE
:
177 rmi_spi
->tx_buf
[0] = 0x40;
178 rmi_spi
->tx_buf
[1] = (addr
>> 8) & 0xFF;
179 rmi_spi
->tx_buf
[2] = addr
& 0xFF;
180 rmi_spi
->tx_buf
[3] = tx_len
;
185 memcpy(&rmi_spi
->tx_buf
[cmd_len
], tx_buf
, tx_len
);
187 if (rmi_spi
->tx_xfer_count
> 1) {
188 for (i
= 0; i
< total_tx_len
; i
++) {
189 xfer
= &rmi_spi
->tx_xfers
[i
];
190 memset(xfer
, 0, sizeof(struct spi_transfer
));
191 xfer
->tx_buf
= &rmi_spi
->tx_buf
[i
];
193 xfer
->delay_usecs
= spi_data
->write_delay_us
;
194 spi_message_add_tail(xfer
, &msg
);
197 xfer
= rmi_spi
->tx_xfers
;
198 memset(xfer
, 0, sizeof(struct spi_transfer
));
199 xfer
->tx_buf
= rmi_spi
->tx_buf
;
200 xfer
->len
= total_tx_len
;
201 spi_message_add_tail(xfer
, &msg
);
204 rmi_dbg(RMI_DEBUG_XPORT
, &spi
->dev
, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
205 __func__
, cmd
->op
== RMI_SPI_WRITE
? "WRITE" : "READ",
206 total_tx_len
, total_tx_len
, rmi_spi
->tx_buf
);
209 if (rmi_spi
->rx_xfer_count
> 1) {
210 for (i
= 0; i
< rx_len
; i
++) {
211 xfer
= &rmi_spi
->rx_xfers
[i
];
212 memset(xfer
, 0, sizeof(struct spi_transfer
));
213 xfer
->rx_buf
= &rmi_spi
->rx_buf
[i
];
215 xfer
->delay_usecs
= spi_data
->read_delay_us
;
216 spi_message_add_tail(xfer
, &msg
);
219 xfer
= rmi_spi
->rx_xfers
;
220 memset(xfer
, 0, sizeof(struct spi_transfer
));
221 xfer
->rx_buf
= rmi_spi
->rx_buf
;
223 spi_message_add_tail(xfer
, &msg
);
227 ret
= spi_sync(spi
, &msg
);
229 dev_err(&spi
->dev
, "spi xfer failed: %d\n", ret
);
234 memcpy(rx_buf
, rmi_spi
->rx_buf
, rx_len
);
235 rmi_dbg(RMI_DEBUG_XPORT
, &spi
->dev
, "%s: (%d) %*ph\n",
236 __func__
, rx_len
, rx_len
, rx_buf
);
243 * rmi_set_page - Set RMI page
244 * @xport: The pointer to the rmi_transport_dev struct
245 * @page: The new page address.
247 * RMI devices have 16-bit addressing, but some of the transport
248 * implementations (like SMBus) only have 8-bit addressing. So RMI implements
249 * a page address at 0xff of every page so we can reliable page addresses
250 * every 256 registers.
252 * The page_mutex lock must be held when this function is entered.
254 * Returns zero on success, non-zero on failure.
256 static int rmi_set_page(struct rmi_spi_xport
*rmi_spi
, u8 page
)
258 struct rmi_spi_cmd cmd
;
261 cmd
.op
= RMI_SPI_WRITE
;
262 cmd
.addr
= RMI_PAGE_SELECT_REGISTER
;
264 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, &page
, 1, NULL
, 0);
267 rmi_spi
->page
= page
;
272 static int rmi_spi_write_block(struct rmi_transport_dev
*xport
, u16 addr
,
273 const void *buf
, size_t len
)
275 struct rmi_spi_xport
*rmi_spi
=
276 container_of(xport
, struct rmi_spi_xport
, xport
);
277 struct rmi_spi_cmd cmd
;
280 mutex_lock(&rmi_spi
->page_mutex
);
282 if (RMI_SPI_PAGE(addr
) != rmi_spi
->page
) {
283 ret
= rmi_set_page(rmi_spi
, RMI_SPI_PAGE(addr
));
288 cmd
.op
= RMI_SPI_WRITE
;
291 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, buf
, len
, NULL
, 0);
294 mutex_unlock(&rmi_spi
->page_mutex
);
298 static int rmi_spi_read_block(struct rmi_transport_dev
*xport
, u16 addr
,
299 void *buf
, size_t len
)
301 struct rmi_spi_xport
*rmi_spi
=
302 container_of(xport
, struct rmi_spi_xport
, xport
);
303 struct rmi_spi_cmd cmd
;
306 mutex_lock(&rmi_spi
->page_mutex
);
308 if (RMI_SPI_PAGE(addr
) != rmi_spi
->page
) {
309 ret
= rmi_set_page(rmi_spi
, RMI_SPI_PAGE(addr
));
314 cmd
.op
= RMI_SPI_READ
;
317 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, NULL
, 0, buf
, len
);
320 mutex_unlock(&rmi_spi
->page_mutex
);
324 static const struct rmi_transport_ops rmi_spi_ops
= {
325 .write_block
= rmi_spi_write_block
,
326 .read_block
= rmi_spi_read_block
,
329 static irqreturn_t
rmi_spi_irq(int irq
, void *dev_id
)
331 struct rmi_spi_xport
*rmi_spi
= dev_id
;
332 struct rmi_device
*rmi_dev
= rmi_spi
->xport
.rmi_dev
;
335 ret
= rmi_process_interrupt_requests(rmi_dev
);
337 rmi_dbg(RMI_DEBUG_XPORT
, &rmi_dev
->dev
,
338 "Failed to process interrupt request: %d\n", ret
);
343 static int rmi_spi_init_irq(struct spi_device
*spi
)
345 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
346 int irq_flags
= irqd_get_trigger_type(irq_get_irq_data(rmi_spi
->irq
));
350 irq_flags
= IRQF_TRIGGER_LOW
;
352 ret
= devm_request_threaded_irq(&spi
->dev
, rmi_spi
->irq
, NULL
,
353 rmi_spi_irq
, irq_flags
| IRQF_ONESHOT
,
354 dev_name(&spi
->dev
), rmi_spi
);
356 dev_warn(&spi
->dev
, "Failed to register interrupt %d\n",
365 static int rmi_spi_of_probe(struct spi_device
*spi
,
366 struct rmi_device_platform_data
*pdata
)
368 struct device
*dev
= &spi
->dev
;
371 retval
= rmi_of_property_read_u32(dev
,
372 &pdata
->spi_data
.read_delay_us
,
373 "spi-rx-delay-us", 1);
377 retval
= rmi_of_property_read_u32(dev
,
378 &pdata
->spi_data
.write_delay_us
,
379 "spi-tx-delay-us", 1);
386 static const struct of_device_id rmi_spi_of_match
[] = {
387 { .compatible
= "syna,rmi4-spi" },
390 MODULE_DEVICE_TABLE(of
, rmi_spi_of_match
);
392 static inline int rmi_spi_of_probe(struct spi_device
*spi
,
393 struct rmi_device_platform_data
*pdata
)
399 static int rmi_spi_probe(struct spi_device
*spi
)
401 struct rmi_spi_xport
*rmi_spi
;
402 struct rmi_device_platform_data
*pdata
;
403 struct rmi_device_platform_data
*spi_pdata
= spi
->dev
.platform_data
;
406 if (spi
->master
->flags
& SPI_MASTER_HALF_DUPLEX
)
409 rmi_spi
= devm_kzalloc(&spi
->dev
, sizeof(struct rmi_spi_xport
),
414 pdata
= &rmi_spi
->xport
.pdata
;
416 if (spi
->dev
.of_node
) {
417 retval
= rmi_spi_of_probe(spi
, pdata
);
420 } else if (spi_pdata
) {
424 if (pdata
->spi_data
.bits_per_word
)
425 spi
->bits_per_word
= pdata
->spi_data
.bits_per_word
;
427 if (pdata
->spi_data
.mode
)
428 spi
->mode
= pdata
->spi_data
.mode
;
430 retval
= spi_setup(spi
);
432 dev_err(&spi
->dev
, "spi_setup failed!\n");
437 rmi_spi
->irq
= spi
->irq
;
440 mutex_init(&rmi_spi
->page_mutex
);
442 rmi_spi
->xport
.dev
= &spi
->dev
;
443 rmi_spi
->xport
.proto_name
= "spi";
444 rmi_spi
->xport
.ops
= &rmi_spi_ops
;
446 spi_set_drvdata(spi
, rmi_spi
);
448 retval
= rmi_spi_manage_pools(rmi_spi
, RMI_SPI_DEFAULT_XFER_BUF_SIZE
);
453 * Setting the page to zero will (a) make sure the PSR is in a
454 * known state, and (b) make sure we can talk to the device.
456 retval
= rmi_set_page(rmi_spi
, 0);
458 dev_err(&spi
->dev
, "Failed to set page select to 0.\n");
462 retval
= rmi_register_transport_device(&rmi_spi
->xport
);
464 dev_err(&spi
->dev
, "failed to register transport.\n");
468 retval
= rmi_spi_init_irq(spi
);
472 dev_info(&spi
->dev
, "registered RMI SPI driver\n");
476 static int rmi_spi_remove(struct spi_device
*spi
)
478 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
480 rmi_unregister_transport_device(&rmi_spi
->xport
);
485 #ifdef CONFIG_PM_SLEEP
486 static int rmi_spi_suspend(struct device
*dev
)
488 struct spi_device
*spi
= to_spi_device(dev
);
489 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
492 ret
= rmi_driver_suspend(rmi_spi
->xport
.rmi_dev
);
494 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
496 disable_irq(rmi_spi
->irq
);
497 if (device_may_wakeup(&spi
->dev
)) {
498 ret
= enable_irq_wake(rmi_spi
->irq
);
500 dev_warn(dev
, "Failed to enable irq for wake: %d\n",
506 static int rmi_spi_resume(struct device
*dev
)
508 struct spi_device
*spi
= to_spi_device(dev
);
509 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
512 enable_irq(rmi_spi
->irq
);
513 if (device_may_wakeup(&spi
->dev
)) {
514 ret
= disable_irq_wake(rmi_spi
->irq
);
516 dev_warn(dev
, "Failed to disable irq for wake: %d\n",
520 ret
= rmi_driver_resume(rmi_spi
->xport
.rmi_dev
);
522 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
529 static int rmi_spi_runtime_suspend(struct device
*dev
)
531 struct spi_device
*spi
= to_spi_device(dev
);
532 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
535 ret
= rmi_driver_suspend(rmi_spi
->xport
.rmi_dev
);
537 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
539 disable_irq(rmi_spi
->irq
);
544 static int rmi_spi_runtime_resume(struct device
*dev
)
546 struct spi_device
*spi
= to_spi_device(dev
);
547 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
550 enable_irq(rmi_spi
->irq
);
552 ret
= rmi_driver_resume(rmi_spi
->xport
.rmi_dev
);
554 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
560 static const struct dev_pm_ops rmi_spi_pm
= {
561 SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend
, rmi_spi_resume
)
562 SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend
, rmi_spi_runtime_resume
,
566 static const struct spi_device_id rmi_id
[] = {
570 MODULE_DEVICE_TABLE(spi
, rmi_id
);
572 static struct spi_driver rmi_spi_driver
= {
576 .of_match_table
= of_match_ptr(rmi_spi_of_match
),
579 .probe
= rmi_spi_probe
,
580 .remove
= rmi_spi_remove
,
583 module_spi_driver(rmi_spi_driver
);
585 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
586 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
587 MODULE_DESCRIPTION("RMI SPI driver");
588 MODULE_LICENSE("GPL");
589 MODULE_VERSION(RMI_DRIVER_VERSION
);