2 * Copyright (c) 2011-2016 Synaptics Incorporated
3 * Copyright (c) 2011 Unixphere
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/rmi.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
16 #include "rmi_driver.h"
18 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
20 #define RMI_PAGE_SELECT_REGISTER 0x00FF
21 #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
22 #define RMI_SPI_XFER_SIZE_LIMIT 255
24 #define BUFFER_SIZE_INCREMENT 32
29 RMI_SPI_V2_READ_UNIFIED
,
30 RMI_SPI_V2_READ_SPLIT
,
39 struct rmi_spi_xport
{
40 struct rmi_transport_dev xport
;
41 struct spi_device
*spi
;
43 struct mutex page_mutex
;
50 struct spi_transfer
*rx_xfers
;
51 struct spi_transfer
*tx_xfers
;
56 static int rmi_spi_manage_pools(struct rmi_spi_xport
*rmi_spi
, int len
)
58 struct spi_device
*spi
= rmi_spi
->spi
;
59 int buf_size
= rmi_spi
->xfer_buf_size
60 ? rmi_spi
->xfer_buf_size
: RMI_SPI_DEFAULT_XFER_BUF_SIZE
;
61 struct spi_transfer
*xfer_buf
;
65 while (buf_size
< len
)
68 if (buf_size
> RMI_SPI_XFER_SIZE_LIMIT
)
69 buf_size
= RMI_SPI_XFER_SIZE_LIMIT
;
71 tmp
= rmi_spi
->rx_buf
;
72 buf
= devm_kzalloc(&spi
->dev
, buf_size
* 2,
73 GFP_KERNEL
| GFP_DMA
);
77 rmi_spi
->rx_buf
= buf
;
78 rmi_spi
->tx_buf
= &rmi_spi
->rx_buf
[buf_size
];
79 rmi_spi
->xfer_buf_size
= buf_size
;
82 devm_kfree(&spi
->dev
, tmp
);
84 if (rmi_spi
->xport
.pdata
.spi_data
.read_delay_us
)
85 rmi_spi
->rx_xfer_count
= buf_size
;
87 rmi_spi
->rx_xfer_count
= 1;
89 if (rmi_spi
->xport
.pdata
.spi_data
.write_delay_us
)
90 rmi_spi
->tx_xfer_count
= buf_size
;
92 rmi_spi
->tx_xfer_count
= 1;
95 * Allocate a pool of spi_transfer buffers for devices which need
98 tmp
= rmi_spi
->rx_xfers
;
99 xfer_buf
= devm_kzalloc(&spi
->dev
,
100 (rmi_spi
->rx_xfer_count
+ rmi_spi
->tx_xfer_count
)
101 * sizeof(struct spi_transfer
), GFP_KERNEL
);
105 rmi_spi
->rx_xfers
= xfer_buf
;
106 rmi_spi
->tx_xfers
= &xfer_buf
[rmi_spi
->rx_xfer_count
];
109 devm_kfree(&spi
->dev
, tmp
);
114 static int rmi_spi_xfer(struct rmi_spi_xport
*rmi_spi
,
115 const struct rmi_spi_cmd
*cmd
, const u8
*tx_buf
,
116 int tx_len
, u8
*rx_buf
, int rx_len
)
118 struct spi_device
*spi
= rmi_spi
->spi
;
119 struct rmi_device_platform_data_spi
*spi_data
=
120 &rmi_spi
->xport
.pdata
.spi_data
;
121 struct spi_message msg
;
122 struct spi_transfer
*xfer
;
128 u16 addr
= cmd
->addr
;
130 spi_message_init(&msg
);
137 case RMI_SPI_V2_READ_UNIFIED
:
138 case RMI_SPI_V2_READ_SPLIT
:
139 case RMI_SPI_V2_WRITE
:
144 total_tx_len
= cmd_len
+ tx_len
;
145 len
= max(total_tx_len
, rx_len
);
147 if (len
> RMI_SPI_XFER_SIZE_LIMIT
)
150 if (rmi_spi
->xfer_buf_size
< len
)
151 rmi_spi_manage_pools(rmi_spi
, len
);
155 * SPI needs an address. Use 0x7FF if we want to keep
156 * reading from the last position of the register pointer.
162 rmi_spi
->tx_buf
[0] = (addr
>> 8);
163 rmi_spi
->tx_buf
[1] = addr
& 0xFF;
166 rmi_spi
->tx_buf
[0] = (addr
>> 8) | 0x80;
167 rmi_spi
->tx_buf
[1] = addr
& 0xFF;
169 case RMI_SPI_V2_READ_UNIFIED
:
171 case RMI_SPI_V2_READ_SPLIT
:
173 case RMI_SPI_V2_WRITE
:
174 rmi_spi
->tx_buf
[0] = 0x40;
175 rmi_spi
->tx_buf
[1] = (addr
>> 8) & 0xFF;
176 rmi_spi
->tx_buf
[2] = addr
& 0xFF;
177 rmi_spi
->tx_buf
[3] = tx_len
;
182 memcpy(&rmi_spi
->tx_buf
[cmd_len
], tx_buf
, tx_len
);
184 if (rmi_spi
->tx_xfer_count
> 1) {
185 for (i
= 0; i
< total_tx_len
; i
++) {
186 xfer
= &rmi_spi
->tx_xfers
[i
];
187 memset(xfer
, 0, sizeof(struct spi_transfer
));
188 xfer
->tx_buf
= &rmi_spi
->tx_buf
[i
];
190 xfer
->delay_usecs
= spi_data
->write_delay_us
;
191 spi_message_add_tail(xfer
, &msg
);
194 xfer
= rmi_spi
->tx_xfers
;
195 memset(xfer
, 0, sizeof(struct spi_transfer
));
196 xfer
->tx_buf
= rmi_spi
->tx_buf
;
197 xfer
->len
= total_tx_len
;
198 spi_message_add_tail(xfer
, &msg
);
201 rmi_dbg(RMI_DEBUG_XPORT
, &spi
->dev
, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
202 __func__
, cmd
->op
== RMI_SPI_WRITE
? "WRITE" : "READ",
203 total_tx_len
, total_tx_len
, rmi_spi
->tx_buf
);
206 if (rmi_spi
->rx_xfer_count
> 1) {
207 for (i
= 0; i
< rx_len
; i
++) {
208 xfer
= &rmi_spi
->rx_xfers
[i
];
209 memset(xfer
, 0, sizeof(struct spi_transfer
));
210 xfer
->rx_buf
= &rmi_spi
->rx_buf
[i
];
212 xfer
->delay_usecs
= spi_data
->read_delay_us
;
213 spi_message_add_tail(xfer
, &msg
);
216 xfer
= rmi_spi
->rx_xfers
;
217 memset(xfer
, 0, sizeof(struct spi_transfer
));
218 xfer
->rx_buf
= rmi_spi
->rx_buf
;
220 spi_message_add_tail(xfer
, &msg
);
224 ret
= spi_sync(spi
, &msg
);
226 dev_err(&spi
->dev
, "spi xfer failed: %d\n", ret
);
231 memcpy(rx_buf
, rmi_spi
->rx_buf
, rx_len
);
232 rmi_dbg(RMI_DEBUG_XPORT
, &spi
->dev
, "%s: (%d) %*ph\n",
233 __func__
, rx_len
, rx_len
, rx_buf
);
240 * rmi_set_page - Set RMI page
241 * @xport: The pointer to the rmi_transport_dev struct
242 * @page: The new page address.
244 * RMI devices have 16-bit addressing, but some of the transport
245 * implementations (like SMBus) only have 8-bit addressing. So RMI implements
246 * a page address at 0xff of every page so we can reliable page addresses
247 * every 256 registers.
249 * The page_mutex lock must be held when this function is entered.
251 * Returns zero on success, non-zero on failure.
253 static int rmi_set_page(struct rmi_spi_xport
*rmi_spi
, u8 page
)
255 struct rmi_spi_cmd cmd
;
258 cmd
.op
= RMI_SPI_WRITE
;
259 cmd
.addr
= RMI_PAGE_SELECT_REGISTER
;
261 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, &page
, 1, NULL
, 0);
264 rmi_spi
->page
= page
;
269 static int rmi_spi_write_block(struct rmi_transport_dev
*xport
, u16 addr
,
270 const void *buf
, size_t len
)
272 struct rmi_spi_xport
*rmi_spi
=
273 container_of(xport
, struct rmi_spi_xport
, xport
);
274 struct rmi_spi_cmd cmd
;
277 mutex_lock(&rmi_spi
->page_mutex
);
279 if (RMI_SPI_PAGE(addr
) != rmi_spi
->page
) {
280 ret
= rmi_set_page(rmi_spi
, RMI_SPI_PAGE(addr
));
285 cmd
.op
= RMI_SPI_WRITE
;
288 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, buf
, len
, NULL
, 0);
291 mutex_unlock(&rmi_spi
->page_mutex
);
295 static int rmi_spi_read_block(struct rmi_transport_dev
*xport
, u16 addr
,
296 void *buf
, size_t len
)
298 struct rmi_spi_xport
*rmi_spi
=
299 container_of(xport
, struct rmi_spi_xport
, xport
);
300 struct rmi_spi_cmd cmd
;
303 mutex_lock(&rmi_spi
->page_mutex
);
305 if (RMI_SPI_PAGE(addr
) != rmi_spi
->page
) {
306 ret
= rmi_set_page(rmi_spi
, RMI_SPI_PAGE(addr
));
311 cmd
.op
= RMI_SPI_READ
;
314 ret
= rmi_spi_xfer(rmi_spi
, &cmd
, NULL
, 0, buf
, len
);
317 mutex_unlock(&rmi_spi
->page_mutex
);
321 static const struct rmi_transport_ops rmi_spi_ops
= {
322 .write_block
= rmi_spi_write_block
,
323 .read_block
= rmi_spi_read_block
,
327 static int rmi_spi_of_probe(struct spi_device
*spi
,
328 struct rmi_device_platform_data
*pdata
)
330 struct device
*dev
= &spi
->dev
;
333 retval
= rmi_of_property_read_u32(dev
,
334 &pdata
->spi_data
.read_delay_us
,
335 "spi-rx-delay-us", 1);
339 retval
= rmi_of_property_read_u32(dev
,
340 &pdata
->spi_data
.write_delay_us
,
341 "spi-tx-delay-us", 1);
348 static const struct of_device_id rmi_spi_of_match
[] = {
349 { .compatible
= "syna,rmi4-spi" },
352 MODULE_DEVICE_TABLE(of
, rmi_spi_of_match
);
354 static inline int rmi_spi_of_probe(struct spi_device
*spi
,
355 struct rmi_device_platform_data
*pdata
)
361 static void rmi_spi_unregister_transport(void *data
)
363 struct rmi_spi_xport
*rmi_spi
= data
;
365 rmi_unregister_transport_device(&rmi_spi
->xport
);
368 static int rmi_spi_probe(struct spi_device
*spi
)
370 struct rmi_spi_xport
*rmi_spi
;
371 struct rmi_device_platform_data
*pdata
;
372 struct rmi_device_platform_data
*spi_pdata
= spi
->dev
.platform_data
;
375 if (spi
->master
->flags
& SPI_MASTER_HALF_DUPLEX
)
378 rmi_spi
= devm_kzalloc(&spi
->dev
, sizeof(struct rmi_spi_xport
),
383 pdata
= &rmi_spi
->xport
.pdata
;
385 if (spi
->dev
.of_node
) {
386 error
= rmi_spi_of_probe(spi
, pdata
);
389 } else if (spi_pdata
) {
393 if (pdata
->spi_data
.bits_per_word
)
394 spi
->bits_per_word
= pdata
->spi_data
.bits_per_word
;
396 if (pdata
->spi_data
.mode
)
397 spi
->mode
= pdata
->spi_data
.mode
;
399 error
= spi_setup(spi
);
401 dev_err(&spi
->dev
, "spi_setup failed!\n");
405 pdata
->irq
= spi
->irq
;
408 mutex_init(&rmi_spi
->page_mutex
);
410 rmi_spi
->xport
.dev
= &spi
->dev
;
411 rmi_spi
->xport
.proto_name
= "spi";
412 rmi_spi
->xport
.ops
= &rmi_spi_ops
;
414 spi_set_drvdata(spi
, rmi_spi
);
416 error
= rmi_spi_manage_pools(rmi_spi
, RMI_SPI_DEFAULT_XFER_BUF_SIZE
);
421 * Setting the page to zero will (a) make sure the PSR is in a
422 * known state, and (b) make sure we can talk to the device.
424 error
= rmi_set_page(rmi_spi
, 0);
426 dev_err(&spi
->dev
, "Failed to set page select to 0.\n");
430 dev_info(&spi
->dev
, "registering SPI-connected sensor\n");
432 error
= rmi_register_transport_device(&rmi_spi
->xport
);
434 dev_err(&spi
->dev
, "failed to register sensor: %d\n", error
);
438 error
= devm_add_action_or_reset(&spi
->dev
,
439 rmi_spi_unregister_transport
,
447 #ifdef CONFIG_PM_SLEEP
448 static int rmi_spi_suspend(struct device
*dev
)
450 struct spi_device
*spi
= to_spi_device(dev
);
451 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
454 ret
= rmi_driver_suspend(rmi_spi
->xport
.rmi_dev
, true);
456 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
461 static int rmi_spi_resume(struct device
*dev
)
463 struct spi_device
*spi
= to_spi_device(dev
);
464 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
467 ret
= rmi_driver_resume(rmi_spi
->xport
.rmi_dev
, true);
469 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
476 static int rmi_spi_runtime_suspend(struct device
*dev
)
478 struct spi_device
*spi
= to_spi_device(dev
);
479 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
482 ret
= rmi_driver_suspend(rmi_spi
->xport
.rmi_dev
, false);
484 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
489 static int rmi_spi_runtime_resume(struct device
*dev
)
491 struct spi_device
*spi
= to_spi_device(dev
);
492 struct rmi_spi_xport
*rmi_spi
= spi_get_drvdata(spi
);
495 ret
= rmi_driver_resume(rmi_spi
->xport
.rmi_dev
, false);
497 dev_warn(dev
, "Failed to resume device: %d\n", ret
);
503 static const struct dev_pm_ops rmi_spi_pm
= {
504 SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend
, rmi_spi_resume
)
505 SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend
, rmi_spi_runtime_resume
,
509 static const struct spi_device_id rmi_id
[] = {
513 MODULE_DEVICE_TABLE(spi
, rmi_id
);
515 static struct spi_driver rmi_spi_driver
= {
519 .of_match_table
= of_match_ptr(rmi_spi_of_match
),
522 .probe
= rmi_spi_probe
,
525 module_spi_driver(rmi_spi_driver
);
527 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
528 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
529 MODULE_DESCRIPTION("RMI SPI driver");
530 MODULE_LICENSE("GPL");
531 MODULE_VERSION(RMI_DRIVER_VERSION
);