1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 2014-2016 Google Inc.
6 * Copyright 2014-2016 Linaro Ltd.
9 #include <linux/bitops.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/greybus.h>
14 #include <linux/spi/spi.h>
19 struct gb_connection
*connection
;
20 struct device
*parent
;
21 struct spi_transfer
*first_xfer
;
22 struct spi_transfer
*last_xfer
;
23 struct spilib_ops
*ops
;
27 unsigned int op_timeout
;
30 u32 bits_per_word_mask
;
36 #define GB_SPI_STATE_MSG_DONE ((void *)0)
37 #define GB_SPI_STATE_MSG_IDLE ((void *)1)
38 #define GB_SPI_STATE_MSG_RUNNING ((void *)2)
39 #define GB_SPI_STATE_OP_READY ((void *)3)
40 #define GB_SPI_STATE_OP_DONE ((void *)4)
41 #define GB_SPI_STATE_MSG_ERROR ((void *)-1)
43 #define XFER_TIMEOUT_TOLERANCE 200
45 static struct spi_master
*get_master_from_spi(struct gb_spilib
*spi
)
47 return gb_connection_get_data(spi
->connection
);
50 static int tx_header_fit_operation(u32 tx_size
, u32 count
, size_t data_max
)
54 data_max
-= sizeof(struct gb_spi_transfer_request
);
55 headers_size
= (count
+ 1) * sizeof(struct gb_spi_transfer
);
57 return tx_size
+ headers_size
> data_max
? 0 : 1;
60 static size_t calc_rx_xfer_size(u32 rx_size
, u32
*tx_xfer_size
, u32 len
,
65 data_max
-= sizeof(struct gb_spi_transfer_response
);
67 if (rx_size
+ len
> data_max
)
68 rx_xfer_size
= data_max
- rx_size
;
72 /* if this is a write_read, for symmetry read the same as write */
73 if (*tx_xfer_size
&& rx_xfer_size
> *tx_xfer_size
)
74 rx_xfer_size
= *tx_xfer_size
;
75 if (*tx_xfer_size
&& rx_xfer_size
< *tx_xfer_size
)
76 *tx_xfer_size
= rx_xfer_size
;
81 static size_t calc_tx_xfer_size(u32 tx_size
, u32 count
, size_t len
,
86 data_max
-= sizeof(struct gb_spi_transfer_request
);
87 headers_size
= (count
+ 1) * sizeof(struct gb_spi_transfer
);
89 if (tx_size
+ headers_size
+ len
> data_max
)
90 return data_max
- (tx_size
+ sizeof(struct gb_spi_transfer
));
95 static void clean_xfer_state(struct gb_spilib
*spi
)
97 spi
->first_xfer
= NULL
;
98 spi
->last_xfer
= NULL
;
99 spi
->rx_xfer_offset
= 0;
100 spi
->tx_xfer_offset
= 0;
101 spi
->last_xfer_size
= 0;
105 static bool is_last_xfer_done(struct gb_spilib
*spi
)
107 struct spi_transfer
*last_xfer
= spi
->last_xfer
;
109 if ((spi
->tx_xfer_offset
+ spi
->last_xfer_size
== last_xfer
->len
) ||
110 (spi
->rx_xfer_offset
+ spi
->last_xfer_size
== last_xfer
->len
))
116 static int setup_next_xfer(struct gb_spilib
*spi
, struct spi_message
*msg
)
118 struct spi_transfer
*last_xfer
= spi
->last_xfer
;
120 if (msg
->state
!= GB_SPI_STATE_OP_DONE
)
124 * if we transferred all content of the last transfer, reset values and
125 * check if this was the last transfer in the message
127 if (is_last_xfer_done(spi
)) {
128 spi
->tx_xfer_offset
= 0;
129 spi
->rx_xfer_offset
= 0;
131 if (last_xfer
== list_last_entry(&msg
->transfers
,
134 msg
->state
= GB_SPI_STATE_MSG_DONE
;
136 spi
->first_xfer
= list_next_entry(last_xfer
,
141 spi
->first_xfer
= last_xfer
;
142 if (last_xfer
->tx_buf
)
143 spi
->tx_xfer_offset
+= spi
->last_xfer_size
;
145 if (last_xfer
->rx_buf
)
146 spi
->rx_xfer_offset
+= spi
->last_xfer_size
;
151 static struct spi_transfer
*get_next_xfer(struct spi_transfer
*xfer
,
152 struct spi_message
*msg
)
154 if (xfer
== list_last_entry(&msg
->transfers
, struct spi_transfer
,
158 return list_next_entry(xfer
, transfer_list
);
161 /* Routines to transfer data */
162 static struct gb_operation
*gb_spi_operation_create(struct gb_spilib
*spi
,
163 struct gb_connection
*connection
, struct spi_message
*msg
)
165 struct gb_spi_transfer_request
*request
;
166 struct spi_device
*dev
= msg
->spi
;
167 struct spi_transfer
*xfer
;
168 struct gb_spi_transfer
*gb_xfer
;
169 struct gb_operation
*operation
;
170 u32 tx_size
= 0, rx_size
= 0, count
= 0, xfer_len
= 0, request_size
;
171 u32 tx_xfer_size
= 0, rx_xfer_size
= 0, len
;
173 unsigned int xfer_timeout
;
177 data_max
= gb_operation_get_payload_size_max(connection
);
178 xfer
= spi
->first_xfer
;
180 /* Find number of transfers queued and tx/rx length in the message */
182 while (msg
->state
!= GB_SPI_STATE_OP_READY
) {
183 msg
->state
= GB_SPI_STATE_MSG_RUNNING
;
184 spi
->last_xfer
= xfer
;
186 if (!xfer
->tx_buf
&& !xfer
->rx_buf
) {
188 "bufferless transfer, length %u\n", xfer
->len
);
189 msg
->state
= GB_SPI_STATE_MSG_ERROR
;
197 len
= xfer
->len
- spi
->tx_xfer_offset
;
198 if (!tx_header_fit_operation(tx_size
, count
, data_max
))
200 tx_xfer_size
= calc_tx_xfer_size(tx_size
, count
,
202 spi
->last_xfer_size
= tx_xfer_size
;
206 len
= xfer
->len
- spi
->rx_xfer_offset
;
207 rx_xfer_size
= calc_rx_xfer_size(rx_size
, &tx_xfer_size
,
209 spi
->last_xfer_size
= rx_xfer_size
;
212 tx_size
+= tx_xfer_size
;
213 rx_size
+= rx_xfer_size
;
215 total_len
+= spi
->last_xfer_size
;
218 xfer
= get_next_xfer(xfer
, msg
);
219 if (!xfer
|| total_len
>= data_max
)
220 msg
->state
= GB_SPI_STATE_OP_READY
;
224 * In addition to space for all message descriptors we need
225 * to have enough to hold all tx data.
227 request_size
= sizeof(*request
);
228 request_size
+= count
* sizeof(*gb_xfer
);
229 request_size
+= tx_size
;
231 /* Response consists only of incoming data */
232 operation
= gb_operation_create(connection
, GB_SPI_TYPE_TRANSFER
,
233 request_size
, rx_size
, GFP_KERNEL
);
237 request
= operation
->request
->payload
;
238 request
->count
= cpu_to_le16(count
);
239 request
->mode
= dev
->mode
;
240 request
->chip_select
= dev
->chip_select
;
242 gb_xfer
= &request
->transfers
[0];
243 tx_data
= gb_xfer
+ count
; /* place tx data after last gb_xfer */
245 /* Fill in the transfers array */
246 xfer
= spi
->first_xfer
;
247 while (msg
->state
!= GB_SPI_STATE_OP_DONE
) {
248 if (xfer
== spi
->last_xfer
)
249 xfer_len
= spi
->last_xfer_size
;
251 xfer_len
= xfer
->len
;
253 /* make sure we do not timeout in a slow transfer */
254 xfer_timeout
= xfer_len
* 8 * MSEC_PER_SEC
/ xfer
->speed_hz
;
255 xfer_timeout
+= GB_OPERATION_TIMEOUT_DEFAULT
;
257 if (xfer_timeout
> spi
->op_timeout
)
258 spi
->op_timeout
= xfer_timeout
;
260 gb_xfer
->speed_hz
= cpu_to_le32(xfer
->speed_hz
);
261 gb_xfer
->len
= cpu_to_le32(xfer_len
);
262 gb_xfer
->delay_usecs
= cpu_to_le16(xfer
->delay_usecs
);
263 gb_xfer
->cs_change
= xfer
->cs_change
;
264 gb_xfer
->bits_per_word
= xfer
->bits_per_word
;
268 gb_xfer
->xfer_flags
|= GB_SPI_XFER_WRITE
;
269 memcpy(tx_data
, xfer
->tx_buf
+ spi
->tx_xfer_offset
,
275 gb_xfer
->xfer_flags
|= GB_SPI_XFER_READ
;
277 if (xfer
== spi
->last_xfer
) {
278 if (!is_last_xfer_done(spi
))
279 gb_xfer
->xfer_flags
|= GB_SPI_XFER_INPROGRESS
;
280 msg
->state
= GB_SPI_STATE_OP_DONE
;
285 xfer
= get_next_xfer(xfer
, msg
);
288 msg
->actual_length
+= total_len
;
293 static void gb_spi_decode_response(struct gb_spilib
*spi
,
294 struct spi_message
*msg
,
295 struct gb_spi_transfer_response
*response
)
297 struct spi_transfer
*xfer
= spi
->first_xfer
;
298 void *rx_data
= response
->data
;
304 if (xfer
== spi
->first_xfer
)
305 xfer_len
= xfer
->len
- spi
->rx_xfer_offset
;
306 else if (xfer
== spi
->last_xfer
)
307 xfer_len
= spi
->last_xfer_size
;
309 xfer_len
= xfer
->len
;
311 memcpy(xfer
->rx_buf
+ spi
->rx_xfer_offset
, rx_data
,
316 if (xfer
== spi
->last_xfer
)
319 xfer
= list_next_entry(xfer
, transfer_list
);
323 static int gb_spi_transfer_one_message(struct spi_master
*master
,
324 struct spi_message
*msg
)
326 struct gb_spilib
*spi
= spi_master_get_devdata(master
);
327 struct gb_connection
*connection
= spi
->connection
;
328 struct gb_spi_transfer_response
*response
;
329 struct gb_operation
*operation
;
332 spi
->first_xfer
= list_first_entry_or_null(&msg
->transfers
,
335 if (!spi
->first_xfer
) {
340 msg
->state
= GB_SPI_STATE_MSG_IDLE
;
342 while (msg
->state
!= GB_SPI_STATE_MSG_DONE
&&
343 msg
->state
!= GB_SPI_STATE_MSG_ERROR
) {
344 operation
= gb_spi_operation_create(spi
, connection
, msg
);
346 msg
->state
= GB_SPI_STATE_MSG_ERROR
;
351 ret
= gb_operation_request_send_sync_timeout(operation
,
354 response
= operation
->response
->payload
;
356 gb_spi_decode_response(spi
, msg
, response
);
359 "transfer operation failed: %d\n", ret
);
360 msg
->state
= GB_SPI_STATE_MSG_ERROR
;
363 gb_operation_put(operation
);
364 setup_next_xfer(spi
, msg
);
369 clean_xfer_state(spi
);
370 spi_finalize_current_message(master
);
375 static int gb_spi_prepare_transfer_hardware(struct spi_master
*master
)
377 struct gb_spilib
*spi
= spi_master_get_devdata(master
);
379 return spi
->ops
->prepare_transfer_hardware(spi
->parent
);
382 static int gb_spi_unprepare_transfer_hardware(struct spi_master
*master
)
384 struct gb_spilib
*spi
= spi_master_get_devdata(master
);
386 spi
->ops
->unprepare_transfer_hardware(spi
->parent
);
391 static int gb_spi_setup(struct spi_device
*spi
)
393 /* Nothing to do for now */
397 static void gb_spi_cleanup(struct spi_device
*spi
)
399 /* Nothing to do for now */
402 /* Routines to get controller information */
405 * Map Greybus spi mode bits/flags/bpw into Linux ones.
406 * All bits are same for now and so these macro's return same values.
408 #define gb_spi_mode_map(mode) mode
409 #define gb_spi_flags_map(flags) flags
411 static int gb_spi_get_master_config(struct gb_spilib
*spi
)
413 struct gb_spi_master_config_response response
;
417 ret
= gb_operation_sync(spi
->connection
, GB_SPI_TYPE_MASTER_CONFIG
,
418 NULL
, 0, &response
, sizeof(response
));
422 mode
= le16_to_cpu(response
.mode
);
423 spi
->mode
= gb_spi_mode_map(mode
);
425 flags
= le16_to_cpu(response
.flags
);
426 spi
->flags
= gb_spi_flags_map(flags
);
428 spi
->bits_per_word_mask
= le32_to_cpu(response
.bits_per_word_mask
);
429 spi
->num_chipselect
= response
.num_chipselect
;
431 spi
->min_speed_hz
= le32_to_cpu(response
.min_speed_hz
);
432 spi
->max_speed_hz
= le32_to_cpu(response
.max_speed_hz
);
437 static int gb_spi_setup_device(struct gb_spilib
*spi
, u8 cs
)
439 struct spi_master
*master
= get_master_from_spi(spi
);
440 struct gb_spi_device_config_request request
;
441 struct gb_spi_device_config_response response
;
442 struct spi_board_info spi_board
= { {0} };
443 struct spi_device
*spidev
;
447 request
.chip_select
= cs
;
449 ret
= gb_operation_sync(spi
->connection
, GB_SPI_TYPE_DEVICE_CONFIG
,
450 &request
, sizeof(request
),
451 &response
, sizeof(response
));
455 dev_type
= response
.device_type
;
457 if (dev_type
== GB_SPI_SPI_DEV
)
458 strlcpy(spi_board
.modalias
, "spidev",
459 sizeof(spi_board
.modalias
));
460 else if (dev_type
== GB_SPI_SPI_NOR
)
461 strlcpy(spi_board
.modalias
, "spi-nor",
462 sizeof(spi_board
.modalias
));
463 else if (dev_type
== GB_SPI_SPI_MODALIAS
)
464 memcpy(spi_board
.modalias
, response
.name
,
465 sizeof(spi_board
.modalias
));
469 spi_board
.mode
= le16_to_cpu(response
.mode
);
470 spi_board
.bus_num
= master
->bus_num
;
471 spi_board
.chip_select
= cs
;
472 spi_board
.max_speed_hz
= le32_to_cpu(response
.max_speed_hz
);
474 spidev
= spi_new_device(master
, &spi_board
);
481 int gb_spilib_master_init(struct gb_connection
*connection
, struct device
*dev
,
482 struct spilib_ops
*ops
)
484 struct gb_spilib
*spi
;
485 struct spi_master
*master
;
489 /* Allocate master with space for data */
490 master
= spi_alloc_master(dev
, sizeof(*spi
));
492 dev_err(dev
, "cannot alloc SPI master\n");
496 spi
= spi_master_get_devdata(master
);
497 spi
->connection
= connection
;
498 gb_connection_set_data(connection
, master
);
502 /* get master configuration */
503 ret
= gb_spi_get_master_config(spi
);
507 master
->bus_num
= -1; /* Allow spi-core to allocate it dynamically */
508 master
->num_chipselect
= spi
->num_chipselect
;
509 master
->mode_bits
= spi
->mode
;
510 master
->flags
= spi
->flags
;
511 master
->bits_per_word_mask
= spi
->bits_per_word_mask
;
514 master
->cleanup
= gb_spi_cleanup
;
515 master
->setup
= gb_spi_setup
;
516 master
->transfer_one_message
= gb_spi_transfer_one_message
;
518 if (ops
&& ops
->prepare_transfer_hardware
) {
519 master
->prepare_transfer_hardware
=
520 gb_spi_prepare_transfer_hardware
;
523 if (ops
&& ops
->unprepare_transfer_hardware
) {
524 master
->unprepare_transfer_hardware
=
525 gb_spi_unprepare_transfer_hardware
;
528 master
->auto_runtime_pm
= true;
530 ret
= spi_register_master(master
);
534 /* now, fetch the devices configuration */
535 for (i
= 0; i
< spi
->num_chipselect
; i
++) {
536 ret
= gb_spi_setup_device(spi
, i
);
538 dev_err(dev
, "failed to allocate spi device %d: %d\n",
540 goto exit_spi_unregister
;
547 spi_master_put(master
);
552 spi_unregister_master(master
);
556 EXPORT_SYMBOL_GPL(gb_spilib_master_init
);
558 void gb_spilib_master_exit(struct gb_connection
*connection
)
560 struct spi_master
*master
= gb_connection_get_data(connection
);
562 spi_unregister_master(master
);
564 EXPORT_SYMBOL_GPL(gb_spilib_master_exit
);
566 MODULE_LICENSE("GPL v2");