2 * Copyright (c) 2011-2016 Synaptics Incorporated
3 * Copyright (c) 2011 Unixphere
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/rmi.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
16 #include "rmi_driver.h"
18 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
20 #define RMI_PAGE_SELECT_REGISTER 0x00FF
21 #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
22 #define RMI_SPI_XFER_SIZE_LIMIT 255
24 #define BUFFER_SIZE_INCREMENT 32
29 RMI_SPI_V2_READ_UNIFIED
,
30 RMI_SPI_V2_READ_SPLIT
,
39 struct rmi_spi_xport
{
40 struct rmi_transport_dev xport
;
41 struct spi_device
*spi
;
43 struct mutex page_mutex
;
50 struct spi_transfer
*rx_xfers
;
51 struct spi_transfer
*tx_xfers
;
56 static int rmi_spi_manage_pools(struct rmi_spi_xport
*rmi_spi
, int len
)
58 struct spi_device
*spi
= rmi_spi
->spi
;
59 int buf_size
= rmi_spi
->xfer_buf_size
60 ? rmi_spi
->xfer_buf_size
: RMI_SPI_DEFAULT_XFER_BUF_SIZE
;
61 struct spi_transfer
*xfer_buf
;
65 while (buf_size
< len
)
68 if (buf_size
> RMI_SPI_XFER_SIZE_LIMIT
)
69 buf_size
= RMI_SPI_XFER_SIZE_LIMIT
;
71 tmp
= rmi_spi
->rx_buf
;
72 buf
= devm_kcalloc(&spi
->dev
, buf_size
, 2,
73 GFP_KERNEL
| GFP_DMA
);
77 rmi_spi
->rx_buf
= buf
;
78 rmi_spi
->tx_buf
= &rmi_spi
->rx_buf
[buf_size
];
79 rmi_spi
->xfer_buf_size
= buf_size
;
82 devm_kfree(&spi
->dev
, tmp
);
84 if (rmi_spi
->xport
.pdata
.spi_data
.read_delay_us
)
85 rmi_spi
->rx_xfer_count
= buf_size
;
87 rmi_spi
->rx_xfer_count
= 1;
89 if (rmi_spi
->xport
.pdata
.spi_data
.write_delay_us
)
90 rmi_spi
->tx_xfer_count
= buf_size
;
92 rmi_spi
->tx_xfer_count
= 1;
95 * Allocate a pool of spi_transfer buffers for devices which need
98 tmp
= rmi_spi
->rx_xfers
;
99 xfer_buf
= devm_kcalloc(&spi
->dev
,
100 rmi_spi
->rx_xfer_count
+ rmi_spi
->tx_xfer_count
,
101 sizeof(struct spi_transfer
),
106 rmi_spi
->rx_xfers
= xfer_buf
;
107 rmi_spi
->tx_xfers
= &xfer_buf
[rmi_spi
->rx_xfer_count
];
110 devm_kfree(&spi
->dev
, tmp
);
115 static int rmi_spi_xfer(struct rmi_spi_xport
*rmi_spi
,
116 const struct rmi_spi_cmd
*cmd
, const u8
*tx_buf
,
117 int tx_len
, u8
*rx_buf
, int rx_len
)
119 struct spi_device
*spi
= rmi_spi
->spi
;
120 struct rmi_device_platform_data_spi
*spi_data
=
121 &rmi_spi
->xport
.pdata
.spi_data
;
122 struct spi_message msg
;
123 struct spi_transfer
*xfer
;
129 u16 addr
= cmd
->addr
;
131 spi_message_init(&msg
);
138 case RMI_SPI_V2_READ_UNIFIED
:
139 case RMI_SPI_V2_READ_SPLIT
:
140 case RMI_SPI_V2_WRITE
:
145 total_tx_len
= cmd_len
+ tx_len
;
146 len
= max(total_tx_len
, rx_len
);
148 if (len
> RMI_SPI_XFER_SIZE_LIMIT
)
151 if (rmi_spi
->xfer_buf_size
< len
) {
152 ret
= rmi_spi_manage_pools(rmi_spi
, len
);
159 * SPI needs an address. Use 0x7FF if we want to keep
160 * reading from the last position of the register pointer.
166 rmi_spi
->tx_buf
[0] = (addr
>> 8);
167 rmi_spi
->tx_buf
[1] = addr
& 0xFF;
170 rmi_spi
->tx_buf
[0] = (addr
>> 8) | 0x80;
171 rmi_spi
->tx_buf
[1] = addr
& 0xFF;
173 case RMI_SPI_V2_READ_UNIFIED
:
175 case RMI_SPI_V2_READ_SPLIT
:
177 case RMI_SPI_V2_WRITE
:
178 rmi_spi
->tx_buf
[0] = 0x40;
179 rmi_spi
->tx_buf
[1] = (addr
>> 8) & 0xFF;
180 rmi_spi
->tx_buf
[2] = addr
& 0xFF;
181 rmi_spi
->tx_buf
[3] = tx_len
;
186 memcpy(&rmi_spi
->tx_buf
[cmd_len
], tx_buf
, tx_len
);
188 if (rmi_spi
->tx_xfer_count
> 1) {
189 for (i
= 0; i
< total_tx_len
; i
++) {
190 xfer
= &rmi_spi
->tx_xfers
[i
];
191 memset(xfer
, 0, sizeof(struct spi_transfer
));
192 xfer
->tx_buf
= &rmi_spi
->tx_buf
[i
];
194 xfer
->delay_usecs
= spi_data
->write_delay_us
;
195 spi_message_add_tail(xfer
, &msg
);
198 xfer
= rmi_spi
->tx_xfers
;
199 memset(xfer
, 0, sizeof(struct spi_transfer
));
200 xfer
->tx_buf
= rmi_spi
->tx_buf
;
201 xfer
->len
= total_tx_len
;
202 spi_message_add_tail(xfer
, &msg
);
205 rmi_dbg(RMI_DEBUG_XPORT
, &spi
->dev
, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
206 __func__
, cmd
->op
== RMI_SPI_WRITE
? "WRITE" : "READ",
207 total_tx_len
, total_tx_len
, rmi_spi
->tx_buf
);
210 if (rmi_spi
->rx_xfer_count
> 1) {
211 for (i
= 0; i
< rx_len
; i
++) {
212 xfer
= &rmi_spi
->rx_xfers
[i
];
213 memset(xfer
, 0, sizeof(struct spi_transfer
));
214 xfer
->rx_buf
= &rmi_spi
->rx_buf
[i
];
216 xfer
->delay_usecs
= spi_data
->read_delay_us
;
217 spi_message_add_tail(xfer
, &msg
);
220 xfer
= rmi_spi
->rx_xfers
;
221 memset(xfer
, 0, sizeof(struct spi_transfer
));
222 xfer
->rx_buf
= rmi_spi
->rx_buf
;
224 spi_message_add_tail(xfer
, &msg
);
228 ret
= spi_sync(spi
, &msg
);
230 dev_err(&spi
->dev
, "spi xfer failed: %d\n", ret
);
235 memcpy(rx_buf
, rmi_spi
->rx_buf
, rx_len
);
236 rmi_dbg(RMI_DEBUG_XPORT
, &spi
->dev
, "%s: (%d) %*ph\n",
237 __func__
, rx_len
, rx_len
, rx_buf
);
244 * rmi_set_page - Set RMI page
245 * @xport: The pointer to the rmi_transport_dev struct
246 * @page: The new page address.
248 * RMI devices have 16-bit addressing, but some of the transport
249 * implementations (like SMBus) only have 8-bit addressing. So RMI implements
250 * a page address at 0xff of every page so we can reliable page addresses
251 * every 256 registers.
253 * The page_mutex lock must be held when this function is entered.
255 * Returns zero on success, non-zero on failure.
257 static int rmi_set_page(struct rmi_spi_xport
*rmi_spi
, u8 page
)
259 struct rmi_spi_cmd cmd
;
262 cmd
.op
= RMI_SPI_WRITE
;
263 cmd
.addr
= RMI_PAGE_SELECT_REGISTER
;
265 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, &page
, 1, NULL
, 0);
268 rmi_spi
->page
= page
;
273 static int rmi_spi_write_block(struct rmi_transport_dev
*xport
, u16 addr
,
274 const void *buf
, size_t len
)
276 struct rmi_spi_xport
*rmi_spi
=
277 container_of(xport
, struct rmi_spi_xport
, xport
);
278 struct rmi_spi_cmd cmd
;
281 mutex_lock(&rmi_spi
->page_mutex
);
283 if (RMI_SPI_PAGE(addr
) != rmi_spi
->page
) {
284 ret
= rmi_set_page(rmi_spi
, RMI_SPI_PAGE(addr
));
289 cmd
.op
= RMI_SPI_WRITE
;
292 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, buf
, len
, NULL
, 0);
295 mutex_unlock(&rmi_spi
->page_mutex
);
299 static int rmi_spi_read_block(struct rmi_transport_dev
*xport
, u16 addr
,
300 void *buf
, size_t len
)
302 struct rmi_spi_xport
*rmi_spi
=
303 container_of(xport
, struct rmi_spi_xport
, xport
);
304 struct rmi_spi_cmd cmd
;
307 mutex_lock(&rmi_spi
->page_mutex
);
309 if (RMI_SPI_PAGE(addr
) != rmi_spi
->page
) {
310 ret
= rmi_set_page(rmi_spi
, RMI_SPI_PAGE(addr
));
315 cmd
.op
= RMI_SPI_READ
;
318 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, NULL
, 0, buf
, len
);
321 mutex_unlock(&rmi_spi
->page_mutex
);
325 static const struct rmi_transport_ops rmi_spi_ops
= {
326 .write_block
= rmi_spi_write_block
,
327 .read_block
= rmi_spi_read_block
,
331 static int rmi_spi_of_probe(struct spi_device
*spi
,
332 struct rmi_device_platform_data
*pdata
)
334 struct device
*dev
= &spi
->dev
;
337 retval
= rmi_of_property_read_u32(dev
,
338 &pdata
->spi_data
.read_delay_us
,
339 "spi-rx-delay-us", 1);
343 retval
= rmi_of_property_read_u32(dev
,
344 &pdata
->spi_data
.write_delay_us
,
345 "spi-tx-delay-us", 1);
352 static const struct of_device_id rmi_spi_of_match
[] = {
353 { .compatible
= "syna,rmi4-spi" },
356 MODULE_DEVICE_TABLE(of
, rmi_spi_of_match
);
358 static inline int rmi_spi_of_probe(struct spi_device
*spi
,
359 struct rmi_device_platform_data
*pdata
)
365 static void rmi_spi_unregister_transport(void *data
)
367 struct rmi_spi_xport
*rmi_spi
= data
;
369 rmi_unregister_transport_device(&rmi_spi
->xport
);
372 static int rmi_spi_probe(struct spi_device
*spi
)
374 struct rmi_spi_xport
*rmi_spi
;
375 struct rmi_device_platform_data
*pdata
;
376 struct rmi_device_platform_data
*spi_pdata
= spi
->dev
.platform_data
;
379 if (spi
->master
->flags
& SPI_MASTER_HALF_DUPLEX
)
382 rmi_spi
= devm_kzalloc(&spi
->dev
, sizeof(struct rmi_spi_xport
),
387 pdata
= &rmi_spi
->xport
.pdata
;
389 if (spi
->dev
.of_node
) {
390 error
= rmi_spi_of_probe(spi
, pdata
);
393 } else if (spi_pdata
) {
397 if (pdata
->spi_data
.bits_per_word
)
398 spi
->bits_per_word
= pdata
->spi_data
.bits_per_word
;
400 if (pdata
->spi_data
.mode
)
401 spi
->mode
= pdata
->spi_data
.mode
;
403 error
= spi_setup(spi
);
405 dev_err(&spi
->dev
, "spi_setup failed!\n");
409 pdata
->irq
= spi
->irq
;
412 mutex_init(&rmi_spi
->page_mutex
);
414 rmi_spi
->xport
.dev
= &spi
->dev
;
415 rmi_spi
->xport
.proto_name
= "spi";
416 rmi_spi
->xport
.ops
= &rmi_spi_ops
;
418 spi_set_drvdata(spi
, rmi_spi
);
420 error
= rmi_spi_manage_pools(rmi_spi
, RMI_SPI_DEFAULT_XFER_BUF_SIZE
);
425 * Setting the page to zero will (a) make sure the PSR is in a
426 * known state, and (b) make sure we can talk to the device.
428 error
= rmi_set_page(rmi_spi
, 0);
430 dev_err(&spi
->dev
, "Failed to set page select to 0.\n");
434 dev_info(&spi
->dev
, "registering SPI-connected sensor\n");
436 error
= rmi_register_transport_device(&rmi_spi
->xport
);
438 dev_err(&spi
->dev
, "failed to register sensor: %d\n", error
);
442 error
= devm_add_action_or_reset(&spi
->dev
,
443 rmi_spi_unregister_transport
,
451 #ifdef CONFIG_PM_SLEEP
452 static int rmi_spi_suspend(struct device
*dev
)
454 struct spi_device
*spi
= to_spi_device(dev
);
455 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
458 ret
= rmi_driver_suspend(rmi_spi
->xport
.rmi_dev
, true);
460 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
465 static int rmi_spi_resume(struct device
*dev
)
467 struct spi_device
*spi
= to_spi_device(dev
);
468 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
471 ret
= rmi_driver_resume(rmi_spi
->xport
.rmi_dev
, true);
473 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
480 static int rmi_spi_runtime_suspend(struct device
*dev
)
482 struct spi_device
*spi
= to_spi_device(dev
);
483 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
486 ret
= rmi_driver_suspend(rmi_spi
->xport
.rmi_dev
, false);
488 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
493 static int rmi_spi_runtime_resume(struct device
*dev
)
495 struct spi_device
*spi
= to_spi_device(dev
);
496 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
499 ret
= rmi_driver_resume(rmi_spi
->xport
.rmi_dev
, false);
501 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
507 static const struct dev_pm_ops rmi_spi_pm
= {
508 SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend
, rmi_spi_resume
)
509 SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend
, rmi_spi_runtime_resume
,
513 static const struct spi_device_id rmi_id
[] = {
517 MODULE_DEVICE_TABLE(spi
, rmi_id
);
519 static struct spi_driver rmi_spi_driver
= {
523 .of_match_table
= of_match_ptr(rmi_spi_of_match
),
526 .probe
= rmi_spi_probe
,
529 module_spi_driver(rmi_spi_driver
);
531 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
532 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
533 MODULE_DESCRIPTION("RMI SPI driver");
534 MODULE_LICENSE("GPL");