4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <uapi/linux/sched/types.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/ioport.h>
40 #include <linux/acpi.h>
41 #include <linux/highmem.h>
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/spi.h>
46 static void spidev_release(struct device
*dev
)
48 struct spi_device
*spi
= to_spi_device(dev
);
50 /* spi masters may cleanup for released devices */
51 if (spi
->master
->cleanup
)
52 spi
->master
->cleanup(spi
);
54 spi_master_put(spi
->master
);
59 modalias_show(struct device
*dev
, struct device_attribute
*a
, char *buf
)
61 const struct spi_device
*spi
= to_spi_device(dev
);
64 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
- 1);
68 return sprintf(buf
, "%s%s\n", SPI_MODULE_PREFIX
, spi
->modalias
);
70 static DEVICE_ATTR_RO(modalias
);
72 #define SPI_STATISTICS_ATTRS(field, file) \
73 static ssize_t spi_master_##field##_show(struct device *dev, \
74 struct device_attribute *attr, \
77 struct spi_master *master = container_of(dev, \
78 struct spi_master, dev); \
79 return spi_statistics_##field##_show(&master->statistics, buf); \
81 static struct device_attribute dev_attr_spi_master_##field = { \
82 .attr = { .name = file, .mode = S_IRUGO }, \
83 .show = spi_master_##field##_show, \
85 static ssize_t spi_device_##field##_show(struct device *dev, \
86 struct device_attribute *attr, \
89 struct spi_device *spi = to_spi_device(dev); \
90 return spi_statistics_##field##_show(&spi->statistics, buf); \
92 static struct device_attribute dev_attr_spi_device_##field = { \
93 .attr = { .name = file, .mode = S_IRUGO }, \
94 .show = spi_device_##field##_show, \
97 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
98 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
101 unsigned long flags; \
103 spin_lock_irqsave(&stat->lock, flags); \
104 len = sprintf(buf, format_string, stat->field); \
105 spin_unlock_irqrestore(&stat->lock, flags); \
108 SPI_STATISTICS_ATTRS(name, file)
110 #define SPI_STATISTICS_SHOW(field, format_string) \
111 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
112 field, format_string)
114 SPI_STATISTICS_SHOW(messages
, "%lu");
115 SPI_STATISTICS_SHOW(transfers
, "%lu");
116 SPI_STATISTICS_SHOW(errors
, "%lu");
117 SPI_STATISTICS_SHOW(timedout
, "%lu");
119 SPI_STATISTICS_SHOW(spi_sync
, "%lu");
120 SPI_STATISTICS_SHOW(spi_sync_immediate
, "%lu");
121 SPI_STATISTICS_SHOW(spi_async
, "%lu");
123 SPI_STATISTICS_SHOW(bytes
, "%llu");
124 SPI_STATISTICS_SHOW(bytes_rx
, "%llu");
125 SPI_STATISTICS_SHOW(bytes_tx
, "%llu");
127 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
128 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
129 "transfer_bytes_histo_" number, \
130 transfer_bytes_histo[index], "%lu")
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
149 SPI_STATISTICS_SHOW(transfers_split_maxsize
, "%lu");
151 static struct attribute
*spi_dev_attrs
[] = {
152 &dev_attr_modalias
.attr
,
156 static const struct attribute_group spi_dev_group
= {
157 .attrs
= spi_dev_attrs
,
160 static struct attribute
*spi_device_statistics_attrs
[] = {
161 &dev_attr_spi_device_messages
.attr
,
162 &dev_attr_spi_device_transfers
.attr
,
163 &dev_attr_spi_device_errors
.attr
,
164 &dev_attr_spi_device_timedout
.attr
,
165 &dev_attr_spi_device_spi_sync
.attr
,
166 &dev_attr_spi_device_spi_sync_immediate
.attr
,
167 &dev_attr_spi_device_spi_async
.attr
,
168 &dev_attr_spi_device_bytes
.attr
,
169 &dev_attr_spi_device_bytes_rx
.attr
,
170 &dev_attr_spi_device_bytes_tx
.attr
,
171 &dev_attr_spi_device_transfer_bytes_histo0
.attr
,
172 &dev_attr_spi_device_transfer_bytes_histo1
.attr
,
173 &dev_attr_spi_device_transfer_bytes_histo2
.attr
,
174 &dev_attr_spi_device_transfer_bytes_histo3
.attr
,
175 &dev_attr_spi_device_transfer_bytes_histo4
.attr
,
176 &dev_attr_spi_device_transfer_bytes_histo5
.attr
,
177 &dev_attr_spi_device_transfer_bytes_histo6
.attr
,
178 &dev_attr_spi_device_transfer_bytes_histo7
.attr
,
179 &dev_attr_spi_device_transfer_bytes_histo8
.attr
,
180 &dev_attr_spi_device_transfer_bytes_histo9
.attr
,
181 &dev_attr_spi_device_transfer_bytes_histo10
.attr
,
182 &dev_attr_spi_device_transfer_bytes_histo11
.attr
,
183 &dev_attr_spi_device_transfer_bytes_histo12
.attr
,
184 &dev_attr_spi_device_transfer_bytes_histo13
.attr
,
185 &dev_attr_spi_device_transfer_bytes_histo14
.attr
,
186 &dev_attr_spi_device_transfer_bytes_histo15
.attr
,
187 &dev_attr_spi_device_transfer_bytes_histo16
.attr
,
188 &dev_attr_spi_device_transfers_split_maxsize
.attr
,
192 static const struct attribute_group spi_device_statistics_group
= {
193 .name
= "statistics",
194 .attrs
= spi_device_statistics_attrs
,
197 static const struct attribute_group
*spi_dev_groups
[] = {
199 &spi_device_statistics_group
,
203 static struct attribute
*spi_master_statistics_attrs
[] = {
204 &dev_attr_spi_master_messages
.attr
,
205 &dev_attr_spi_master_transfers
.attr
,
206 &dev_attr_spi_master_errors
.attr
,
207 &dev_attr_spi_master_timedout
.attr
,
208 &dev_attr_spi_master_spi_sync
.attr
,
209 &dev_attr_spi_master_spi_sync_immediate
.attr
,
210 &dev_attr_spi_master_spi_async
.attr
,
211 &dev_attr_spi_master_bytes
.attr
,
212 &dev_attr_spi_master_bytes_rx
.attr
,
213 &dev_attr_spi_master_bytes_tx
.attr
,
214 &dev_attr_spi_master_transfer_bytes_histo0
.attr
,
215 &dev_attr_spi_master_transfer_bytes_histo1
.attr
,
216 &dev_attr_spi_master_transfer_bytes_histo2
.attr
,
217 &dev_attr_spi_master_transfer_bytes_histo3
.attr
,
218 &dev_attr_spi_master_transfer_bytes_histo4
.attr
,
219 &dev_attr_spi_master_transfer_bytes_histo5
.attr
,
220 &dev_attr_spi_master_transfer_bytes_histo6
.attr
,
221 &dev_attr_spi_master_transfer_bytes_histo7
.attr
,
222 &dev_attr_spi_master_transfer_bytes_histo8
.attr
,
223 &dev_attr_spi_master_transfer_bytes_histo9
.attr
,
224 &dev_attr_spi_master_transfer_bytes_histo10
.attr
,
225 &dev_attr_spi_master_transfer_bytes_histo11
.attr
,
226 &dev_attr_spi_master_transfer_bytes_histo12
.attr
,
227 &dev_attr_spi_master_transfer_bytes_histo13
.attr
,
228 &dev_attr_spi_master_transfer_bytes_histo14
.attr
,
229 &dev_attr_spi_master_transfer_bytes_histo15
.attr
,
230 &dev_attr_spi_master_transfer_bytes_histo16
.attr
,
231 &dev_attr_spi_master_transfers_split_maxsize
.attr
,
235 static const struct attribute_group spi_master_statistics_group
= {
236 .name
= "statistics",
237 .attrs
= spi_master_statistics_attrs
,
240 static const struct attribute_group
*spi_master_groups
[] = {
241 &spi_master_statistics_group
,
245 void spi_statistics_add_transfer_stats(struct spi_statistics
*stats
,
246 struct spi_transfer
*xfer
,
247 struct spi_master
*master
)
250 int l2len
= min(fls(xfer
->len
), SPI_STATISTICS_HISTO_SIZE
) - 1;
255 spin_lock_irqsave(&stats
->lock
, flags
);
258 stats
->transfer_bytes_histo
[l2len
]++;
260 stats
->bytes
+= xfer
->len
;
261 if ((xfer
->tx_buf
) &&
262 (xfer
->tx_buf
!= master
->dummy_tx
))
263 stats
->bytes_tx
+= xfer
->len
;
264 if ((xfer
->rx_buf
) &&
265 (xfer
->rx_buf
!= master
->dummy_rx
))
266 stats
->bytes_rx
+= xfer
->len
;
268 spin_unlock_irqrestore(&stats
->lock
, flags
);
270 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats
);
272 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
273 * and the sysfs version makes coldplug work too.
276 static const struct spi_device_id
*spi_match_id(const struct spi_device_id
*id
,
277 const struct spi_device
*sdev
)
279 while (id
->name
[0]) {
280 if (!strcmp(sdev
->modalias
, id
->name
))
287 const struct spi_device_id
*spi_get_device_id(const struct spi_device
*sdev
)
289 const struct spi_driver
*sdrv
= to_spi_driver(sdev
->dev
.driver
);
291 return spi_match_id(sdrv
->id_table
, sdev
);
293 EXPORT_SYMBOL_GPL(spi_get_device_id
);
295 static int spi_match_device(struct device
*dev
, struct device_driver
*drv
)
297 const struct spi_device
*spi
= to_spi_device(dev
);
298 const struct spi_driver
*sdrv
= to_spi_driver(drv
);
300 /* Attempt an OF style match */
301 if (of_driver_match_device(dev
, drv
))
305 if (acpi_driver_match_device(dev
, drv
))
309 return !!spi_match_id(sdrv
->id_table
, spi
);
311 return strcmp(spi
->modalias
, drv
->name
) == 0;
314 static int spi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
316 const struct spi_device
*spi
= to_spi_device(dev
);
319 rc
= acpi_device_uevent_modalias(dev
, env
);
323 add_uevent_var(env
, "MODALIAS=%s%s", SPI_MODULE_PREFIX
, spi
->modalias
);
327 struct bus_type spi_bus_type
= {
329 .dev_groups
= spi_dev_groups
,
330 .match
= spi_match_device
,
331 .uevent
= spi_uevent
,
333 EXPORT_SYMBOL_GPL(spi_bus_type
);
336 static int spi_drv_probe(struct device
*dev
)
338 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
339 struct spi_device
*spi
= to_spi_device(dev
);
342 ret
= of_clk_set_defaults(dev
->of_node
, false);
347 spi
->irq
= of_irq_get(dev
->of_node
, 0);
348 if (spi
->irq
== -EPROBE_DEFER
)
349 return -EPROBE_DEFER
;
354 ret
= dev_pm_domain_attach(dev
, true);
355 if (ret
!= -EPROBE_DEFER
) {
356 ret
= sdrv
->probe(spi
);
358 dev_pm_domain_detach(dev
, true);
364 static int spi_drv_remove(struct device
*dev
)
366 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
369 ret
= sdrv
->remove(to_spi_device(dev
));
370 dev_pm_domain_detach(dev
, true);
375 static void spi_drv_shutdown(struct device
*dev
)
377 const struct spi_driver
*sdrv
= to_spi_driver(dev
->driver
);
379 sdrv
->shutdown(to_spi_device(dev
));
383 * __spi_register_driver - register a SPI driver
384 * @owner: owner module of the driver to register
385 * @sdrv: the driver to register
388 * Return: zero on success, else a negative error code.
390 int __spi_register_driver(struct module
*owner
, struct spi_driver
*sdrv
)
392 sdrv
->driver
.owner
= owner
;
393 sdrv
->driver
.bus
= &spi_bus_type
;
395 sdrv
->driver
.probe
= spi_drv_probe
;
397 sdrv
->driver
.remove
= spi_drv_remove
;
399 sdrv
->driver
.shutdown
= spi_drv_shutdown
;
400 return driver_register(&sdrv
->driver
);
402 EXPORT_SYMBOL_GPL(__spi_register_driver
);
404 /*-------------------------------------------------------------------------*/
406 /* SPI devices should normally not be created by SPI device drivers; that
407 * would make them board-specific. Similarly with SPI master drivers.
408 * Device registration normally goes into like arch/.../mach.../board-YYY.c
409 * with other readonly (flashable) information about mainboard devices.
413 struct list_head list
;
414 struct spi_board_info board_info
;
417 static LIST_HEAD(board_list
);
418 static LIST_HEAD(spi_master_list
);
421 * Used to protect add/del opertion for board_info list and
422 * spi_master list, and their matching process
424 static DEFINE_MUTEX(board_lock
);
427 * spi_alloc_device - Allocate a new SPI device
428 * @master: Controller to which device is connected
431 * Allows a driver to allocate and initialize a spi_device without
432 * registering it immediately. This allows a driver to directly
433 * fill the spi_device with device parameters before calling
434 * spi_add_device() on it.
436 * Caller is responsible to call spi_add_device() on the returned
437 * spi_device structure to add it to the SPI master. If the caller
438 * needs to discard the spi_device without adding it, then it should
439 * call spi_dev_put() on it.
441 * Return: a pointer to the new device, or NULL.
443 struct spi_device
*spi_alloc_device(struct spi_master
*master
)
445 struct spi_device
*spi
;
447 if (!spi_master_get(master
))
450 spi
= kzalloc(sizeof(*spi
), GFP_KERNEL
);
452 spi_master_put(master
);
456 spi
->master
= master
;
457 spi
->dev
.parent
= &master
->dev
;
458 spi
->dev
.bus
= &spi_bus_type
;
459 spi
->dev
.release
= spidev_release
;
460 spi
->cs_gpio
= -ENOENT
;
462 spin_lock_init(&spi
->statistics
.lock
);
464 device_initialize(&spi
->dev
);
467 EXPORT_SYMBOL_GPL(spi_alloc_device
);
469 static void spi_dev_set_name(struct spi_device
*spi
)
471 struct acpi_device
*adev
= ACPI_COMPANION(&spi
->dev
);
474 dev_set_name(&spi
->dev
, "spi-%s", acpi_dev_name(adev
));
478 dev_set_name(&spi
->dev
, "%s.%u", dev_name(&spi
->master
->dev
),
482 static int spi_dev_check(struct device
*dev
, void *data
)
484 struct spi_device
*spi
= to_spi_device(dev
);
485 struct spi_device
*new_spi
= data
;
487 if (spi
->master
== new_spi
->master
&&
488 spi
->chip_select
== new_spi
->chip_select
)
494 * spi_add_device - Add spi_device allocated with spi_alloc_device
495 * @spi: spi_device to register
497 * Companion function to spi_alloc_device. Devices allocated with
498 * spi_alloc_device can be added onto the spi bus with this function.
500 * Return: 0 on success; negative errno on failure
502 int spi_add_device(struct spi_device
*spi
)
504 static DEFINE_MUTEX(spi_add_lock
);
505 struct spi_master
*master
= spi
->master
;
506 struct device
*dev
= master
->dev
.parent
;
509 /* Chipselects are numbered 0..max; validate. */
510 if (spi
->chip_select
>= master
->num_chipselect
) {
511 dev_err(dev
, "cs%d >= max %d\n",
513 master
->num_chipselect
);
517 /* Set the bus ID string */
518 spi_dev_set_name(spi
);
520 /* We need to make sure there's no other device with this
521 * chipselect **BEFORE** we call setup(), else we'll trash
522 * its configuration. Lock against concurrent add() calls.
524 mutex_lock(&spi_add_lock
);
526 status
= bus_for_each_dev(&spi_bus_type
, NULL
, spi
, spi_dev_check
);
528 dev_err(dev
, "chipselect %d already in use\n",
533 if (master
->cs_gpios
)
534 spi
->cs_gpio
= master
->cs_gpios
[spi
->chip_select
];
536 /* Drivers may modify this initial i/o setup, but will
537 * normally rely on the device being setup. Devices
538 * using SPI_CS_HIGH can't coexist well otherwise...
540 status
= spi_setup(spi
);
542 dev_err(dev
, "can't setup %s, status %d\n",
543 dev_name(&spi
->dev
), status
);
547 /* Device may be bound to an active driver when this returns */
548 status
= device_add(&spi
->dev
);
550 dev_err(dev
, "can't add %s, status %d\n",
551 dev_name(&spi
->dev
), status
);
553 dev_dbg(dev
, "registered child %s\n", dev_name(&spi
->dev
));
556 mutex_unlock(&spi_add_lock
);
559 EXPORT_SYMBOL_GPL(spi_add_device
);
562 * spi_new_device - instantiate one new SPI device
563 * @master: Controller to which device is connected
564 * @chip: Describes the SPI device
567 * On typical mainboards, this is purely internal; and it's not needed
568 * after board init creates the hard-wired devices. Some development
569 * platforms may not be able to use spi_register_board_info though, and
570 * this is exported so that for example a USB or parport based adapter
571 * driver could add devices (which it would learn about out-of-band).
573 * Return: the new device, or NULL.
575 struct spi_device
*spi_new_device(struct spi_master
*master
,
576 struct spi_board_info
*chip
)
578 struct spi_device
*proxy
;
581 /* NOTE: caller did any chip->bus_num checks necessary.
583 * Also, unless we change the return value convention to use
584 * error-or-pointer (not NULL-or-pointer), troubleshootability
585 * suggests syslogged diagnostics are best here (ugh).
588 proxy
= spi_alloc_device(master
);
592 WARN_ON(strlen(chip
->modalias
) >= sizeof(proxy
->modalias
));
594 proxy
->chip_select
= chip
->chip_select
;
595 proxy
->max_speed_hz
= chip
->max_speed_hz
;
596 proxy
->mode
= chip
->mode
;
597 proxy
->irq
= chip
->irq
;
598 strlcpy(proxy
->modalias
, chip
->modalias
, sizeof(proxy
->modalias
));
599 proxy
->dev
.platform_data
= (void *) chip
->platform_data
;
600 proxy
->controller_data
= chip
->controller_data
;
601 proxy
->controller_state
= NULL
;
603 status
= spi_add_device(proxy
);
611 EXPORT_SYMBOL_GPL(spi_new_device
);
614 * spi_unregister_device - unregister a single SPI device
615 * @spi: spi_device to unregister
617 * Start making the passed SPI device vanish. Normally this would be handled
618 * by spi_unregister_master().
620 void spi_unregister_device(struct spi_device
*spi
)
625 if (spi
->dev
.of_node
) {
626 of_node_clear_flag(spi
->dev
.of_node
, OF_POPULATED
);
627 of_node_put(spi
->dev
.of_node
);
629 if (ACPI_COMPANION(&spi
->dev
))
630 acpi_device_clear_enumerated(ACPI_COMPANION(&spi
->dev
));
631 device_unregister(&spi
->dev
);
633 EXPORT_SYMBOL_GPL(spi_unregister_device
);
635 static void spi_match_master_to_boardinfo(struct spi_master
*master
,
636 struct spi_board_info
*bi
)
638 struct spi_device
*dev
;
640 if (master
->bus_num
!= bi
->bus_num
)
643 dev
= spi_new_device(master
, bi
);
645 dev_err(master
->dev
.parent
, "can't create new device for %s\n",
650 * spi_register_board_info - register SPI devices for a given board
651 * @info: array of chip descriptors
652 * @n: how many descriptors are provided
655 * Board-specific early init code calls this (probably during arch_initcall)
656 * with segments of the SPI device table. Any device nodes are created later,
657 * after the relevant parent SPI controller (bus_num) is defined. We keep
658 * this table of devices forever, so that reloading a controller driver will
659 * not make Linux forget about these hard-wired devices.
661 * Other code can also call this, e.g. a particular add-on board might provide
662 * SPI devices through its expansion connector, so code initializing that board
663 * would naturally declare its SPI devices.
665 * The board info passed can safely be __initdata ... but be careful of
666 * any embedded pointers (platform_data, etc), they're copied as-is.
668 * Return: zero on success, else a negative error code.
670 int spi_register_board_info(struct spi_board_info
const *info
, unsigned n
)
672 struct boardinfo
*bi
;
678 bi
= kcalloc(n
, sizeof(*bi
), GFP_KERNEL
);
682 for (i
= 0; i
< n
; i
++, bi
++, info
++) {
683 struct spi_master
*master
;
685 memcpy(&bi
->board_info
, info
, sizeof(*info
));
686 mutex_lock(&board_lock
);
687 list_add_tail(&bi
->list
, &board_list
);
688 list_for_each_entry(master
, &spi_master_list
, list
)
689 spi_match_master_to_boardinfo(master
, &bi
->board_info
);
690 mutex_unlock(&board_lock
);
696 /*-------------------------------------------------------------------------*/
698 static void spi_set_cs(struct spi_device
*spi
, bool enable
)
700 if (spi
->mode
& SPI_CS_HIGH
)
703 if (gpio_is_valid(spi
->cs_gpio
)) {
704 gpio_set_value(spi
->cs_gpio
, !enable
);
705 /* Some SPI masters need both GPIO CS & slave_select */
706 if ((spi
->master
->flags
& SPI_MASTER_GPIO_SS
) &&
708 spi
->master
->set_cs(spi
, !enable
);
709 } else if (spi
->master
->set_cs
) {
710 spi
->master
->set_cs(spi
, !enable
);
714 #ifdef CONFIG_HAS_DMA
715 static int spi_map_buf(struct spi_master
*master
, struct device
*dev
,
716 struct sg_table
*sgt
, void *buf
, size_t len
,
717 enum dma_data_direction dir
)
719 const bool vmalloced_buf
= is_vmalloc_addr(buf
);
720 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
721 #ifdef CONFIG_HIGHMEM
722 const bool kmap_buf
= ((unsigned long)buf
>= PKMAP_BASE
&&
723 (unsigned long)buf
< (PKMAP_BASE
+
724 (LAST_PKMAP
* PAGE_SIZE
)));
726 const bool kmap_buf
= false;
730 struct page
*vm_page
;
731 struct scatterlist
*sg
;
736 if (vmalloced_buf
|| kmap_buf
) {
737 desc_len
= min_t(int, max_seg_size
, PAGE_SIZE
);
738 sgs
= DIV_ROUND_UP(len
+ offset_in_page(buf
), desc_len
);
739 } else if (virt_addr_valid(buf
)) {
740 desc_len
= min_t(int, max_seg_size
, master
->max_dma_len
);
741 sgs
= DIV_ROUND_UP(len
, desc_len
);
746 ret
= sg_alloc_table(sgt
, sgs
, GFP_KERNEL
);
751 for (i
= 0; i
< sgs
; i
++) {
753 if (vmalloced_buf
|| kmap_buf
) {
755 len
, desc_len
- offset_in_page(buf
));
757 vm_page
= vmalloc_to_page(buf
);
759 vm_page
= kmap_to_page(buf
);
764 sg_set_page(sg
, vm_page
,
765 min
, offset_in_page(buf
));
767 min
= min_t(size_t, len
, desc_len
);
769 sg_set_buf(sg
, sg_buf
, min
);
777 ret
= dma_map_sg(dev
, sgt
->sgl
, sgt
->nents
, dir
);
790 static void spi_unmap_buf(struct spi_master
*master
, struct device
*dev
,
791 struct sg_table
*sgt
, enum dma_data_direction dir
)
793 if (sgt
->orig_nents
) {
794 dma_unmap_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
799 static int __spi_map_msg(struct spi_master
*master
, struct spi_message
*msg
)
801 struct device
*tx_dev
, *rx_dev
;
802 struct spi_transfer
*xfer
;
805 if (!master
->can_dma
)
809 tx_dev
= master
->dma_tx
->device
->dev
;
811 tx_dev
= master
->dev
.parent
;
814 rx_dev
= master
->dma_rx
->device
->dev
;
816 rx_dev
= master
->dev
.parent
;
818 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
819 if (!master
->can_dma(master
, msg
->spi
, xfer
))
822 if (xfer
->tx_buf
!= NULL
) {
823 ret
= spi_map_buf(master
, tx_dev
, &xfer
->tx_sg
,
824 (void *)xfer
->tx_buf
, xfer
->len
,
830 if (xfer
->rx_buf
!= NULL
) {
831 ret
= spi_map_buf(master
, rx_dev
, &xfer
->rx_sg
,
832 xfer
->rx_buf
, xfer
->len
,
835 spi_unmap_buf(master
, tx_dev
, &xfer
->tx_sg
,
842 master
->cur_msg_mapped
= true;
847 static int __spi_unmap_msg(struct spi_master
*master
, struct spi_message
*msg
)
849 struct spi_transfer
*xfer
;
850 struct device
*tx_dev
, *rx_dev
;
852 if (!master
->cur_msg_mapped
|| !master
->can_dma
)
856 tx_dev
= master
->dma_tx
->device
->dev
;
858 tx_dev
= master
->dev
.parent
;
861 rx_dev
= master
->dma_rx
->device
->dev
;
863 rx_dev
= master
->dev
.parent
;
865 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
866 if (!master
->can_dma(master
, msg
->spi
, xfer
))
869 spi_unmap_buf(master
, rx_dev
, &xfer
->rx_sg
, DMA_FROM_DEVICE
);
870 spi_unmap_buf(master
, tx_dev
, &xfer
->tx_sg
, DMA_TO_DEVICE
);
875 #else /* !CONFIG_HAS_DMA */
876 static inline int spi_map_buf(struct spi_master
*master
,
877 struct device
*dev
, struct sg_table
*sgt
,
878 void *buf
, size_t len
,
879 enum dma_data_direction dir
)
884 static inline void spi_unmap_buf(struct spi_master
*master
,
885 struct device
*dev
, struct sg_table
*sgt
,
886 enum dma_data_direction dir
)
890 static inline int __spi_map_msg(struct spi_master
*master
,
891 struct spi_message
*msg
)
896 static inline int __spi_unmap_msg(struct spi_master
*master
,
897 struct spi_message
*msg
)
901 #endif /* !CONFIG_HAS_DMA */
903 static inline int spi_unmap_msg(struct spi_master
*master
,
904 struct spi_message
*msg
)
906 struct spi_transfer
*xfer
;
908 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
910 * Restore the original value of tx_buf or rx_buf if they are
913 if (xfer
->tx_buf
== master
->dummy_tx
)
915 if (xfer
->rx_buf
== master
->dummy_rx
)
919 return __spi_unmap_msg(master
, msg
);
922 static int spi_map_msg(struct spi_master
*master
, struct spi_message
*msg
)
924 struct spi_transfer
*xfer
;
926 unsigned int max_tx
, max_rx
;
928 if (master
->flags
& (SPI_MASTER_MUST_RX
| SPI_MASTER_MUST_TX
)) {
932 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
933 if ((master
->flags
& SPI_MASTER_MUST_TX
) &&
935 max_tx
= max(xfer
->len
, max_tx
);
936 if ((master
->flags
& SPI_MASTER_MUST_RX
) &&
938 max_rx
= max(xfer
->len
, max_rx
);
942 tmp
= krealloc(master
->dummy_tx
, max_tx
,
943 GFP_KERNEL
| GFP_DMA
);
946 master
->dummy_tx
= tmp
;
947 memset(tmp
, 0, max_tx
);
951 tmp
= krealloc(master
->dummy_rx
, max_rx
,
952 GFP_KERNEL
| GFP_DMA
);
955 master
->dummy_rx
= tmp
;
958 if (max_tx
|| max_rx
) {
959 list_for_each_entry(xfer
, &msg
->transfers
,
962 xfer
->tx_buf
= master
->dummy_tx
;
964 xfer
->rx_buf
= master
->dummy_rx
;
969 return __spi_map_msg(master
, msg
);
973 * spi_transfer_one_message - Default implementation of transfer_one_message()
975 * This is a standard implementation of transfer_one_message() for
976 * drivers which implement a transfer_one() operation. It provides
977 * standard handling of delays and chip select management.
979 static int spi_transfer_one_message(struct spi_master
*master
,
980 struct spi_message
*msg
)
982 struct spi_transfer
*xfer
;
983 bool keep_cs
= false;
985 unsigned long long ms
= 1;
986 struct spi_statistics
*statm
= &master
->statistics
;
987 struct spi_statistics
*stats
= &msg
->spi
->statistics
;
989 spi_set_cs(msg
->spi
, true);
991 SPI_STATISTICS_INCREMENT_FIELD(statm
, messages
);
992 SPI_STATISTICS_INCREMENT_FIELD(stats
, messages
);
994 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
995 trace_spi_transfer_start(msg
, xfer
);
997 spi_statistics_add_transfer_stats(statm
, xfer
, master
);
998 spi_statistics_add_transfer_stats(stats
, xfer
, master
);
1000 if (xfer
->tx_buf
|| xfer
->rx_buf
) {
1001 reinit_completion(&master
->xfer_completion
);
1003 ret
= master
->transfer_one(master
, msg
->spi
, xfer
);
1005 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1007 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1009 dev_err(&msg
->spi
->dev
,
1010 "SPI transfer failed: %d\n", ret
);
1016 ms
= 8LL * 1000LL * xfer
->len
;
1017 do_div(ms
, xfer
->speed_hz
);
1018 ms
+= ms
+ 200; /* some tolerance */
1023 ms
= wait_for_completion_timeout(&master
->xfer_completion
,
1024 msecs_to_jiffies(ms
));
1028 SPI_STATISTICS_INCREMENT_FIELD(statm
,
1030 SPI_STATISTICS_INCREMENT_FIELD(stats
,
1032 dev_err(&msg
->spi
->dev
,
1033 "SPI transfer timed out\n");
1034 msg
->status
= -ETIMEDOUT
;
1038 dev_err(&msg
->spi
->dev
,
1039 "Bufferless transfer has length %u\n",
1043 trace_spi_transfer_stop(msg
, xfer
);
1045 if (msg
->status
!= -EINPROGRESS
)
1048 if (xfer
->delay_usecs
) {
1049 u16 us
= xfer
->delay_usecs
;
1054 usleep_range(us
, us
+ DIV_ROUND_UP(us
, 10));
1057 if (xfer
->cs_change
) {
1058 if (list_is_last(&xfer
->transfer_list
,
1062 spi_set_cs(msg
->spi
, false);
1064 spi_set_cs(msg
->spi
, true);
1068 msg
->actual_length
+= xfer
->len
;
1072 if (ret
!= 0 || !keep_cs
)
1073 spi_set_cs(msg
->spi
, false);
1075 if (msg
->status
== -EINPROGRESS
)
1078 if (msg
->status
&& master
->handle_err
)
1079 master
->handle_err(master
, msg
);
1081 spi_res_release(master
, msg
);
1083 spi_finalize_current_message(master
);
1089 * spi_finalize_current_transfer - report completion of a transfer
1090 * @master: the master reporting completion
1092 * Called by SPI drivers using the core transfer_one_message()
1093 * implementation to notify it that the current interrupt driven
1094 * transfer has finished and the next one may be scheduled.
1096 void spi_finalize_current_transfer(struct spi_master
*master
)
1098 complete(&master
->xfer_completion
);
1100 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer
);
1103 * __spi_pump_messages - function which processes spi message queue
1104 * @master: master to process queue for
1105 * @in_kthread: true if we are in the context of the message pump thread
1107 * This function checks if there is any spi message in the queue that
1108 * needs processing and if so call out to the driver to initialize hardware
1109 * and transfer each message.
1111 * Note that it is called both from the kthread itself and also from
1112 * inside spi_sync(); the queue extraction handling at the top of the
1113 * function should deal with this safely.
1115 static void __spi_pump_messages(struct spi_master
*master
, bool in_kthread
)
1117 unsigned long flags
;
1118 bool was_busy
= false;
1122 spin_lock_irqsave(&master
->queue_lock
, flags
);
1124 /* Make sure we are not already running a message */
1125 if (master
->cur_msg
) {
1126 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1130 /* If another context is idling the device then defer */
1131 if (master
->idling
) {
1132 kthread_queue_work(&master
->kworker
, &master
->pump_messages
);
1133 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1137 /* Check if the queue is idle */
1138 if (list_empty(&master
->queue
) || !master
->running
) {
1139 if (!master
->busy
) {
1140 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1144 /* Only do teardown in the thread */
1146 kthread_queue_work(&master
->kworker
,
1147 &master
->pump_messages
);
1148 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1152 master
->busy
= false;
1153 master
->idling
= true;
1154 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1156 kfree(master
->dummy_rx
);
1157 master
->dummy_rx
= NULL
;
1158 kfree(master
->dummy_tx
);
1159 master
->dummy_tx
= NULL
;
1160 if (master
->unprepare_transfer_hardware
&&
1161 master
->unprepare_transfer_hardware(master
))
1162 dev_err(&master
->dev
,
1163 "failed to unprepare transfer hardware\n");
1164 if (master
->auto_runtime_pm
) {
1165 pm_runtime_mark_last_busy(master
->dev
.parent
);
1166 pm_runtime_put_autosuspend(master
->dev
.parent
);
1168 trace_spi_master_idle(master
);
1170 spin_lock_irqsave(&master
->queue_lock
, flags
);
1171 master
->idling
= false;
1172 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1176 /* Extract head of queue */
1178 list_first_entry(&master
->queue
, struct spi_message
, queue
);
1180 list_del_init(&master
->cur_msg
->queue
);
1184 master
->busy
= true;
1185 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1187 mutex_lock(&master
->io_mutex
);
1189 if (!was_busy
&& master
->auto_runtime_pm
) {
1190 ret
= pm_runtime_get_sync(master
->dev
.parent
);
1192 dev_err(&master
->dev
, "Failed to power device: %d\n",
1194 mutex_unlock(&master
->io_mutex
);
1200 trace_spi_master_busy(master
);
1202 if (!was_busy
&& master
->prepare_transfer_hardware
) {
1203 ret
= master
->prepare_transfer_hardware(master
);
1205 dev_err(&master
->dev
,
1206 "failed to prepare transfer hardware\n");
1208 if (master
->auto_runtime_pm
)
1209 pm_runtime_put(master
->dev
.parent
);
1210 mutex_unlock(&master
->io_mutex
);
1215 trace_spi_message_start(master
->cur_msg
);
1217 if (master
->prepare_message
) {
1218 ret
= master
->prepare_message(master
, master
->cur_msg
);
1220 dev_err(&master
->dev
,
1221 "failed to prepare message: %d\n", ret
);
1222 master
->cur_msg
->status
= ret
;
1223 spi_finalize_current_message(master
);
1226 master
->cur_msg_prepared
= true;
1229 ret
= spi_map_msg(master
, master
->cur_msg
);
1231 master
->cur_msg
->status
= ret
;
1232 spi_finalize_current_message(master
);
1236 ret
= master
->transfer_one_message(master
, master
->cur_msg
);
1238 dev_err(&master
->dev
,
1239 "failed to transfer one message from queue\n");
1244 mutex_unlock(&master
->io_mutex
);
1246 /* Prod the scheduler in case transfer_one() was busy waiting */
1252 * spi_pump_messages - kthread work function which processes spi message queue
1253 * @work: pointer to kthread work struct contained in the master struct
1255 static void spi_pump_messages(struct kthread_work
*work
)
1257 struct spi_master
*master
=
1258 container_of(work
, struct spi_master
, pump_messages
);
1260 __spi_pump_messages(master
, true);
1263 static int spi_init_queue(struct spi_master
*master
)
1265 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
1267 master
->running
= false;
1268 master
->busy
= false;
1270 kthread_init_worker(&master
->kworker
);
1271 master
->kworker_task
= kthread_run(kthread_worker_fn
,
1272 &master
->kworker
, "%s",
1273 dev_name(&master
->dev
));
1274 if (IS_ERR(master
->kworker_task
)) {
1275 dev_err(&master
->dev
, "failed to create message pump task\n");
1276 return PTR_ERR(master
->kworker_task
);
1278 kthread_init_work(&master
->pump_messages
, spi_pump_messages
);
1281 * Master config will indicate if this controller should run the
1282 * message pump with high (realtime) priority to reduce the transfer
1283 * latency on the bus by minimising the delay between a transfer
1284 * request and the scheduling of the message pump thread. Without this
1285 * setting the message pump thread will remain at default priority.
1288 dev_info(&master
->dev
,
1289 "will run message pump with realtime priority\n");
1290 sched_setscheduler(master
->kworker_task
, SCHED_FIFO
, ¶m
);
1297 * spi_get_next_queued_message() - called by driver to check for queued
1299 * @master: the master to check for queued messages
1301 * If there are more messages in the queue, the next message is returned from
1304 * Return: the next message in the queue, else NULL if the queue is empty.
1306 struct spi_message
*spi_get_next_queued_message(struct spi_master
*master
)
1308 struct spi_message
*next
;
1309 unsigned long flags
;
1311 /* get a pointer to the next message, if any */
1312 spin_lock_irqsave(&master
->queue_lock
, flags
);
1313 next
= list_first_entry_or_null(&master
->queue
, struct spi_message
,
1315 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1319 EXPORT_SYMBOL_GPL(spi_get_next_queued_message
);
1322 * spi_finalize_current_message() - the current message is complete
1323 * @master: the master to return the message to
1325 * Called by the driver to notify the core that the message in the front of the
1326 * queue is complete and can be removed from the queue.
1328 void spi_finalize_current_message(struct spi_master
*master
)
1330 struct spi_message
*mesg
;
1331 unsigned long flags
;
1334 spin_lock_irqsave(&master
->queue_lock
, flags
);
1335 mesg
= master
->cur_msg
;
1336 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1338 spi_unmap_msg(master
, mesg
);
1340 if (master
->cur_msg_prepared
&& master
->unprepare_message
) {
1341 ret
= master
->unprepare_message(master
, mesg
);
1343 dev_err(&master
->dev
,
1344 "failed to unprepare message: %d\n", ret
);
1348 spin_lock_irqsave(&master
->queue_lock
, flags
);
1349 master
->cur_msg
= NULL
;
1350 master
->cur_msg_prepared
= false;
1351 kthread_queue_work(&master
->kworker
, &master
->pump_messages
);
1352 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1354 trace_spi_message_done(mesg
);
1358 mesg
->complete(mesg
->context
);
1360 EXPORT_SYMBOL_GPL(spi_finalize_current_message
);
1362 static int spi_start_queue(struct spi_master
*master
)
1364 unsigned long flags
;
1366 spin_lock_irqsave(&master
->queue_lock
, flags
);
1368 if (master
->running
|| master
->busy
) {
1369 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1373 master
->running
= true;
1374 master
->cur_msg
= NULL
;
1375 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1377 kthread_queue_work(&master
->kworker
, &master
->pump_messages
);
1382 static int spi_stop_queue(struct spi_master
*master
)
1384 unsigned long flags
;
1385 unsigned limit
= 500;
1388 spin_lock_irqsave(&master
->queue_lock
, flags
);
1391 * This is a bit lame, but is optimized for the common execution path.
1392 * A wait_queue on the master->busy could be used, but then the common
1393 * execution path (pump_messages) would be required to call wake_up or
1394 * friends on every SPI message. Do this instead.
1396 while ((!list_empty(&master
->queue
) || master
->busy
) && limit
--) {
1397 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1398 usleep_range(10000, 11000);
1399 spin_lock_irqsave(&master
->queue_lock
, flags
);
1402 if (!list_empty(&master
->queue
) || master
->busy
)
1405 master
->running
= false;
1407 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1410 dev_warn(&master
->dev
,
1411 "could not stop message queue\n");
1417 static int spi_destroy_queue(struct spi_master
*master
)
1421 ret
= spi_stop_queue(master
);
1424 * kthread_flush_worker will block until all work is done.
1425 * If the reason that stop_queue timed out is that the work will never
1426 * finish, then it does no good to call flush/stop thread, so
1430 dev_err(&master
->dev
, "problem destroying queue\n");
1434 kthread_flush_worker(&master
->kworker
);
1435 kthread_stop(master
->kworker_task
);
1440 static int __spi_queued_transfer(struct spi_device
*spi
,
1441 struct spi_message
*msg
,
1444 struct spi_master
*master
= spi
->master
;
1445 unsigned long flags
;
1447 spin_lock_irqsave(&master
->queue_lock
, flags
);
1449 if (!master
->running
) {
1450 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1453 msg
->actual_length
= 0;
1454 msg
->status
= -EINPROGRESS
;
1456 list_add_tail(&msg
->queue
, &master
->queue
);
1457 if (!master
->busy
&& need_pump
)
1458 kthread_queue_work(&master
->kworker
, &master
->pump_messages
);
1460 spin_unlock_irqrestore(&master
->queue_lock
, flags
);
1465 * spi_queued_transfer - transfer function for queued transfers
1466 * @spi: spi device which is requesting transfer
1467 * @msg: spi message which is to handled is queued to driver queue
1469 * Return: zero on success, else a negative error code.
1471 static int spi_queued_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
1473 return __spi_queued_transfer(spi
, msg
, true);
1476 static int spi_master_initialize_queue(struct spi_master
*master
)
1480 master
->transfer
= spi_queued_transfer
;
1481 if (!master
->transfer_one_message
)
1482 master
->transfer_one_message
= spi_transfer_one_message
;
1484 /* Initialize and start queue */
1485 ret
= spi_init_queue(master
);
1487 dev_err(&master
->dev
, "problem initializing queue\n");
1488 goto err_init_queue
;
1490 master
->queued
= true;
1491 ret
= spi_start_queue(master
);
1493 dev_err(&master
->dev
, "problem starting queue\n");
1494 goto err_start_queue
;
1500 spi_destroy_queue(master
);
1505 /*-------------------------------------------------------------------------*/
1507 #if defined(CONFIG_OF)
1508 static int of_spi_parse_dt(struct spi_master
*master
, struct spi_device
*spi
,
1509 struct device_node
*nc
)
1514 /* Device address */
1515 rc
= of_property_read_u32(nc
, "reg", &value
);
1517 dev_err(&master
->dev
, "%s has no valid 'reg' property (%d)\n",
1521 spi
->chip_select
= value
;
1523 /* Mode (clock phase/polarity/etc.) */
1524 if (of_find_property(nc
, "spi-cpha", NULL
))
1525 spi
->mode
|= SPI_CPHA
;
1526 if (of_find_property(nc
, "spi-cpol", NULL
))
1527 spi
->mode
|= SPI_CPOL
;
1528 if (of_find_property(nc
, "spi-cs-high", NULL
))
1529 spi
->mode
|= SPI_CS_HIGH
;
1530 if (of_find_property(nc
, "spi-3wire", NULL
))
1531 spi
->mode
|= SPI_3WIRE
;
1532 if (of_find_property(nc
, "spi-lsb-first", NULL
))
1533 spi
->mode
|= SPI_LSB_FIRST
;
1535 /* Device DUAL/QUAD mode */
1536 if (!of_property_read_u32(nc
, "spi-tx-bus-width", &value
)) {
1541 spi
->mode
|= SPI_TX_DUAL
;
1544 spi
->mode
|= SPI_TX_QUAD
;
1547 dev_warn(&master
->dev
,
1548 "spi-tx-bus-width %d not supported\n",
1554 if (!of_property_read_u32(nc
, "spi-rx-bus-width", &value
)) {
1559 spi
->mode
|= SPI_RX_DUAL
;
1562 spi
->mode
|= SPI_RX_QUAD
;
1565 dev_warn(&master
->dev
,
1566 "spi-rx-bus-width %d not supported\n",
1573 rc
= of_property_read_u32(nc
, "spi-max-frequency", &value
);
1575 dev_err(&master
->dev
, "%s has no valid 'spi-max-frequency' property (%d)\n",
1579 spi
->max_speed_hz
= value
;
1584 static struct spi_device
*
1585 of_register_spi_device(struct spi_master
*master
, struct device_node
*nc
)
1587 struct spi_device
*spi
;
1590 /* Alloc an spi_device */
1591 spi
= spi_alloc_device(master
);
1593 dev_err(&master
->dev
, "spi_device alloc error for %s\n",
1599 /* Select device driver */
1600 rc
= of_modalias_node(nc
, spi
->modalias
,
1601 sizeof(spi
->modalias
));
1603 dev_err(&master
->dev
, "cannot find modalias for %s\n",
1608 rc
= of_spi_parse_dt(master
, spi
, nc
);
1612 /* Store a pointer to the node in the device structure */
1614 spi
->dev
.of_node
= nc
;
1616 /* Register the new device */
1617 rc
= spi_add_device(spi
);
1619 dev_err(&master
->dev
, "spi_device register error %s\n",
1621 goto err_of_node_put
;
1634 * of_register_spi_devices() - Register child devices onto the SPI bus
1635 * @master: Pointer to spi_master device
1637 * Registers an spi_device for each child node of master node which has a 'reg'
1640 static void of_register_spi_devices(struct spi_master
*master
)
1642 struct spi_device
*spi
;
1643 struct device_node
*nc
;
1645 if (!master
->dev
.of_node
)
1648 for_each_available_child_of_node(master
->dev
.of_node
, nc
) {
1649 if (of_node_test_and_set_flag(nc
, OF_POPULATED
))
1651 spi
= of_register_spi_device(master
, nc
);
1653 dev_warn(&master
->dev
, "Failed to create SPI device for %s\n",
1655 of_node_clear_flag(nc
, OF_POPULATED
);
1660 static void of_register_spi_devices(struct spi_master
*master
) { }
1664 static int acpi_spi_add_resource(struct acpi_resource
*ares
, void *data
)
1666 struct spi_device
*spi
= data
;
1667 struct spi_master
*master
= spi
->master
;
1669 if (ares
->type
== ACPI_RESOURCE_TYPE_SERIAL_BUS
) {
1670 struct acpi_resource_spi_serialbus
*sb
;
1672 sb
= &ares
->data
.spi_serial_bus
;
1673 if (sb
->type
== ACPI_RESOURCE_SERIAL_TYPE_SPI
) {
1675 * ACPI DeviceSelection numbering is handled by the
1676 * host controller driver in Windows and can vary
1677 * from driver to driver. In Linux we always expect
1678 * 0 .. max - 1 so we need to ask the driver to
1679 * translate between the two schemes.
1681 if (master
->fw_translate_cs
) {
1682 int cs
= master
->fw_translate_cs(master
,
1683 sb
->device_selection
);
1686 spi
->chip_select
= cs
;
1688 spi
->chip_select
= sb
->device_selection
;
1691 spi
->max_speed_hz
= sb
->connection_speed
;
1693 if (sb
->clock_phase
== ACPI_SPI_SECOND_PHASE
)
1694 spi
->mode
|= SPI_CPHA
;
1695 if (sb
->clock_polarity
== ACPI_SPI_START_HIGH
)
1696 spi
->mode
|= SPI_CPOL
;
1697 if (sb
->device_polarity
== ACPI_SPI_ACTIVE_HIGH
)
1698 spi
->mode
|= SPI_CS_HIGH
;
1700 } else if (spi
->irq
< 0) {
1703 if (acpi_dev_resource_interrupt(ares
, 0, &r
))
1707 /* Always tell the ACPI core to skip this resource */
1711 static acpi_status
acpi_register_spi_device(struct spi_master
*master
,
1712 struct acpi_device
*adev
)
1714 struct list_head resource_list
;
1715 struct spi_device
*spi
;
1718 if (acpi_bus_get_status(adev
) || !adev
->status
.present
||
1719 acpi_device_enumerated(adev
))
1722 spi
= spi_alloc_device(master
);
1724 dev_err(&master
->dev
, "failed to allocate SPI device for %s\n",
1725 dev_name(&adev
->dev
));
1726 return AE_NO_MEMORY
;
1729 ACPI_COMPANION_SET(&spi
->dev
, adev
);
1732 INIT_LIST_HEAD(&resource_list
);
1733 ret
= acpi_dev_get_resources(adev
, &resource_list
,
1734 acpi_spi_add_resource
, spi
);
1735 acpi_dev_free_resource_list(&resource_list
);
1737 if (ret
< 0 || !spi
->max_speed_hz
) {
1742 acpi_set_modalias(adev
, acpi_device_hid(adev
), spi
->modalias
,
1743 sizeof(spi
->modalias
));
1746 spi
->irq
= acpi_dev_gpio_irq_get(adev
, 0);
1748 acpi_device_set_enumerated(adev
);
1750 adev
->power
.flags
.ignore_parent
= true;
1751 if (spi_add_device(spi
)) {
1752 adev
->power
.flags
.ignore_parent
= false;
1753 dev_err(&master
->dev
, "failed to add SPI device %s from ACPI\n",
1754 dev_name(&adev
->dev
));
1761 static acpi_status
acpi_spi_add_device(acpi_handle handle
, u32 level
,
1762 void *data
, void **return_value
)
1764 struct spi_master
*master
= data
;
1765 struct acpi_device
*adev
;
1767 if (acpi_bus_get_device(handle
, &adev
))
1770 return acpi_register_spi_device(master
, adev
);
1773 static void acpi_register_spi_devices(struct spi_master
*master
)
1778 handle
= ACPI_HANDLE(master
->dev
.parent
);
1782 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
, 1,
1783 acpi_spi_add_device
, NULL
,
1785 if (ACPI_FAILURE(status
))
1786 dev_warn(&master
->dev
, "failed to enumerate SPI slaves\n");
1789 static inline void acpi_register_spi_devices(struct spi_master
*master
) {}
1790 #endif /* CONFIG_ACPI */
1792 static void spi_master_release(struct device
*dev
)
1794 struct spi_master
*master
;
1796 master
= container_of(dev
, struct spi_master
, dev
);
1800 static struct class spi_master_class
= {
1801 .name
= "spi_master",
1802 .owner
= THIS_MODULE
,
1803 .dev_release
= spi_master_release
,
1804 .dev_groups
= spi_master_groups
,
1809 * spi_alloc_master - allocate SPI master controller
1810 * @dev: the controller, possibly using the platform_bus
1811 * @size: how much zeroed driver-private data to allocate; the pointer to this
1812 * memory is in the driver_data field of the returned device,
1813 * accessible with spi_master_get_devdata().
1814 * Context: can sleep
1816 * This call is used only by SPI master controller drivers, which are the
1817 * only ones directly touching chip registers. It's how they allocate
1818 * an spi_master structure, prior to calling spi_register_master().
1820 * This must be called from context that can sleep.
1822 * The caller is responsible for assigning the bus number and initializing
1823 * the master's methods before calling spi_register_master(); and (after errors
1824 * adding the device) calling spi_master_put() to prevent a memory leak.
1826 * Return: the SPI master structure on success, else NULL.
1828 struct spi_master
*spi_alloc_master(struct device
*dev
, unsigned size
)
1830 struct spi_master
*master
;
1835 master
= kzalloc(size
+ sizeof(*master
), GFP_KERNEL
);
1839 device_initialize(&master
->dev
);
1840 master
->bus_num
= -1;
1841 master
->num_chipselect
= 1;
1842 master
->dev
.class = &spi_master_class
;
1843 master
->dev
.parent
= dev
;
1844 pm_suspend_ignore_children(&master
->dev
, true);
1845 spi_master_set_devdata(master
, &master
[1]);
1849 EXPORT_SYMBOL_GPL(spi_alloc_master
);
1852 static int of_spi_register_master(struct spi_master
*master
)
1855 struct device_node
*np
= master
->dev
.of_node
;
1860 nb
= of_gpio_named_count(np
, "cs-gpios");
1861 master
->num_chipselect
= max_t(int, nb
, master
->num_chipselect
);
1863 /* Return error only for an incorrectly formed cs-gpios property */
1864 if (nb
== 0 || nb
== -ENOENT
)
1869 cs
= devm_kzalloc(&master
->dev
,
1870 sizeof(int) * master
->num_chipselect
,
1872 master
->cs_gpios
= cs
;
1874 if (!master
->cs_gpios
)
1877 for (i
= 0; i
< master
->num_chipselect
; i
++)
1880 for (i
= 0; i
< nb
; i
++)
1881 cs
[i
] = of_get_named_gpio(np
, "cs-gpios", i
);
1886 static int of_spi_register_master(struct spi_master
*master
)
1893 * spi_register_master - register SPI master controller
1894 * @master: initialized master, originally from spi_alloc_master()
1895 * Context: can sleep
1897 * SPI master controllers connect to their drivers using some non-SPI bus,
1898 * such as the platform bus. The final stage of probe() in that code
1899 * includes calling spi_register_master() to hook up to this SPI bus glue.
1901 * SPI controllers use board specific (often SOC specific) bus numbers,
1902 * and board-specific addressing for SPI devices combines those numbers
1903 * with chip select numbers. Since SPI does not directly support dynamic
1904 * device identification, boards need configuration tables telling which
1905 * chip is at which address.
1907 * This must be called from context that can sleep. It returns zero on
1908 * success, else a negative error code (dropping the master's refcount).
1909 * After a successful return, the caller is responsible for calling
1910 * spi_unregister_master().
1912 * Return: zero on success, else a negative error code.
1914 int spi_register_master(struct spi_master
*master
)
1916 static atomic_t dyn_bus_id
= ATOMIC_INIT((1<<15) - 1);
1917 struct device
*dev
= master
->dev
.parent
;
1918 struct boardinfo
*bi
;
1919 int status
= -ENODEV
;
1925 status
= of_spi_register_master(master
);
1929 /* even if it's just one always-selected device, there must
1930 * be at least one chipselect
1932 if (master
->num_chipselect
== 0)
1935 if ((master
->bus_num
< 0) && master
->dev
.of_node
)
1936 master
->bus_num
= of_alias_get_id(master
->dev
.of_node
, "spi");
1938 /* convention: dynamically assigned bus IDs count down from the max */
1939 if (master
->bus_num
< 0) {
1940 /* FIXME switch to an IDR based scheme, something like
1941 * I2C now uses, so we can't run out of "dynamic" IDs
1943 master
->bus_num
= atomic_dec_return(&dyn_bus_id
);
1947 INIT_LIST_HEAD(&master
->queue
);
1948 spin_lock_init(&master
->queue_lock
);
1949 spin_lock_init(&master
->bus_lock_spinlock
);
1950 mutex_init(&master
->bus_lock_mutex
);
1951 mutex_init(&master
->io_mutex
);
1952 master
->bus_lock_flag
= 0;
1953 init_completion(&master
->xfer_completion
);
1954 if (!master
->max_dma_len
)
1955 master
->max_dma_len
= INT_MAX
;
1957 /* register the device, then userspace will see it.
1958 * registration fails if the bus ID is in use.
1960 dev_set_name(&master
->dev
, "spi%u", master
->bus_num
);
1961 status
= device_add(&master
->dev
);
1964 dev_dbg(dev
, "registered master %s%s\n", dev_name(&master
->dev
),
1965 dynamic
? " (dynamic)" : "");
1967 /* If we're using a queued driver, start the queue */
1968 if (master
->transfer
)
1969 dev_info(dev
, "master is unqueued, this is deprecated\n");
1971 status
= spi_master_initialize_queue(master
);
1973 device_del(&master
->dev
);
1977 /* add statistics */
1978 spin_lock_init(&master
->statistics
.lock
);
1980 mutex_lock(&board_lock
);
1981 list_add_tail(&master
->list
, &spi_master_list
);
1982 list_for_each_entry(bi
, &board_list
, list
)
1983 spi_match_master_to_boardinfo(master
, &bi
->board_info
);
1984 mutex_unlock(&board_lock
);
1986 /* Register devices from the device tree and ACPI */
1987 of_register_spi_devices(master
);
1988 acpi_register_spi_devices(master
);
1992 EXPORT_SYMBOL_GPL(spi_register_master
);
1994 static void devm_spi_unregister(struct device
*dev
, void *res
)
1996 spi_unregister_master(*(struct spi_master
**)res
);
2000 * dev_spi_register_master - register managed SPI master controller
2001 * @dev: device managing SPI master
2002 * @master: initialized master, originally from spi_alloc_master()
2003 * Context: can sleep
2005 * Register a SPI device as with spi_register_master() which will
2006 * automatically be unregister
2008 * Return: zero on success, else a negative error code.
2010 int devm_spi_register_master(struct device
*dev
, struct spi_master
*master
)
2012 struct spi_master
**ptr
;
2015 ptr
= devres_alloc(devm_spi_unregister
, sizeof(*ptr
), GFP_KERNEL
);
2019 ret
= spi_register_master(master
);
2022 devres_add(dev
, ptr
);
2029 EXPORT_SYMBOL_GPL(devm_spi_register_master
);
2031 static int __unregister(struct device
*dev
, void *null
)
2033 spi_unregister_device(to_spi_device(dev
));
2038 * spi_unregister_master - unregister SPI master controller
2039 * @master: the master being unregistered
2040 * Context: can sleep
2042 * This call is used only by SPI master controller drivers, which are the
2043 * only ones directly touching chip registers.
2045 * This must be called from context that can sleep.
2047 void spi_unregister_master(struct spi_master
*master
)
2051 if (master
->queued
) {
2052 if (spi_destroy_queue(master
))
2053 dev_err(&master
->dev
, "queue remove failed\n");
2056 mutex_lock(&board_lock
);
2057 list_del(&master
->list
);
2058 mutex_unlock(&board_lock
);
2060 dummy
= device_for_each_child(&master
->dev
, NULL
, __unregister
);
2061 device_unregister(&master
->dev
);
2063 EXPORT_SYMBOL_GPL(spi_unregister_master
);
2065 int spi_master_suspend(struct spi_master
*master
)
2069 /* Basically no-ops for non-queued masters */
2070 if (!master
->queued
)
2073 ret
= spi_stop_queue(master
);
2075 dev_err(&master
->dev
, "queue stop failed\n");
2079 EXPORT_SYMBOL_GPL(spi_master_suspend
);
2081 int spi_master_resume(struct spi_master
*master
)
2085 if (!master
->queued
)
2088 ret
= spi_start_queue(master
);
2090 dev_err(&master
->dev
, "queue restart failed\n");
2094 EXPORT_SYMBOL_GPL(spi_master_resume
);
2096 static int __spi_master_match(struct device
*dev
, const void *data
)
2098 struct spi_master
*m
;
2099 const u16
*bus_num
= data
;
2101 m
= container_of(dev
, struct spi_master
, dev
);
2102 return m
->bus_num
== *bus_num
;
2106 * spi_busnum_to_master - look up master associated with bus_num
2107 * @bus_num: the master's bus number
2108 * Context: can sleep
2110 * This call may be used with devices that are registered after
2111 * arch init time. It returns a refcounted pointer to the relevant
2112 * spi_master (which the caller must release), or NULL if there is
2113 * no such master registered.
2115 * Return: the SPI master structure on success, else NULL.
2117 struct spi_master
*spi_busnum_to_master(u16 bus_num
)
2120 struct spi_master
*master
= NULL
;
2122 dev
= class_find_device(&spi_master_class
, NULL
, &bus_num
,
2123 __spi_master_match
);
2125 master
= container_of(dev
, struct spi_master
, dev
);
2126 /* reference got in class_find_device */
2129 EXPORT_SYMBOL_GPL(spi_busnum_to_master
);
2131 /*-------------------------------------------------------------------------*/
2133 /* Core methods for SPI resource management */
2136 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2137 * during the processing of a spi_message while using
2139 * @spi: the spi device for which we allocate memory
2140 * @release: the release code to execute for this resource
2141 * @size: size to alloc and return
2142 * @gfp: GFP allocation flags
2144 * Return: the pointer to the allocated data
2146 * This may get enhanced in the future to allocate from a memory pool
2147 * of the @spi_device or @spi_master to avoid repeated allocations.
2149 void *spi_res_alloc(struct spi_device
*spi
,
2150 spi_res_release_t release
,
2151 size_t size
, gfp_t gfp
)
2153 struct spi_res
*sres
;
2155 sres
= kzalloc(sizeof(*sres
) + size
, gfp
);
2159 INIT_LIST_HEAD(&sres
->entry
);
2160 sres
->release
= release
;
2164 EXPORT_SYMBOL_GPL(spi_res_alloc
);
2167 * spi_res_free - free an spi resource
2168 * @res: pointer to the custom data of a resource
2171 void spi_res_free(void *res
)
2173 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2178 WARN_ON(!list_empty(&sres
->entry
));
2181 EXPORT_SYMBOL_GPL(spi_res_free
);
2184 * spi_res_add - add a spi_res to the spi_message
2185 * @message: the spi message
2186 * @res: the spi_resource
2188 void spi_res_add(struct spi_message
*message
, void *res
)
2190 struct spi_res
*sres
= container_of(res
, struct spi_res
, data
);
2192 WARN_ON(!list_empty(&sres
->entry
));
2193 list_add_tail(&sres
->entry
, &message
->resources
);
2195 EXPORT_SYMBOL_GPL(spi_res_add
);
2198 * spi_res_release - release all spi resources for this message
2199 * @master: the @spi_master
2200 * @message: the @spi_message
2202 void spi_res_release(struct spi_master
*master
,
2203 struct spi_message
*message
)
2205 struct spi_res
*res
;
2207 while (!list_empty(&message
->resources
)) {
2208 res
= list_last_entry(&message
->resources
,
2209 struct spi_res
, entry
);
2212 res
->release(master
, message
, res
->data
);
2214 list_del(&res
->entry
);
2219 EXPORT_SYMBOL_GPL(spi_res_release
);
2221 /*-------------------------------------------------------------------------*/
2223 /* Core methods for spi_message alterations */
2225 static void __spi_replace_transfers_release(struct spi_master
*master
,
2226 struct spi_message
*msg
,
2229 struct spi_replaced_transfers
*rxfer
= res
;
2232 /* call extra callback if requested */
2234 rxfer
->release(master
, msg
, res
);
2236 /* insert replaced transfers back into the message */
2237 list_splice(&rxfer
->replaced_transfers
, rxfer
->replaced_after
);
2239 /* remove the formerly inserted entries */
2240 for (i
= 0; i
< rxfer
->inserted
; i
++)
2241 list_del(&rxfer
->inserted_transfers
[i
].transfer_list
);
2245 * spi_replace_transfers - replace transfers with several transfers
2246 * and register change with spi_message.resources
2247 * @msg: the spi_message we work upon
2248 * @xfer_first: the first spi_transfer we want to replace
2249 * @remove: number of transfers to remove
2250 * @insert: the number of transfers we want to insert instead
2251 * @release: extra release code necessary in some circumstances
2252 * @extradatasize: extra data to allocate (with alignment guarantees
2253 * of struct @spi_transfer)
2256 * Returns: pointer to @spi_replaced_transfers,
2257 * PTR_ERR(...) in case of errors.
2259 struct spi_replaced_transfers
*spi_replace_transfers(
2260 struct spi_message
*msg
,
2261 struct spi_transfer
*xfer_first
,
2264 spi_replaced_release_t release
,
2265 size_t extradatasize
,
2268 struct spi_replaced_transfers
*rxfer
;
2269 struct spi_transfer
*xfer
;
2272 /* allocate the structure using spi_res */
2273 rxfer
= spi_res_alloc(msg
->spi
, __spi_replace_transfers_release
,
2274 insert
* sizeof(struct spi_transfer
)
2275 + sizeof(struct spi_replaced_transfers
)
2279 return ERR_PTR(-ENOMEM
);
2281 /* the release code to invoke before running the generic release */
2282 rxfer
->release
= release
;
2284 /* assign extradata */
2287 &rxfer
->inserted_transfers
[insert
];
2289 /* init the replaced_transfers list */
2290 INIT_LIST_HEAD(&rxfer
->replaced_transfers
);
2292 /* assign the list_entry after which we should reinsert
2293 * the @replaced_transfers - it may be spi_message.messages!
2295 rxfer
->replaced_after
= xfer_first
->transfer_list
.prev
;
2297 /* remove the requested number of transfers */
2298 for (i
= 0; i
< remove
; i
++) {
2299 /* if the entry after replaced_after it is msg->transfers
2300 * then we have been requested to remove more transfers
2301 * than are in the list
2303 if (rxfer
->replaced_after
->next
== &msg
->transfers
) {
2304 dev_err(&msg
->spi
->dev
,
2305 "requested to remove more spi_transfers than are available\n");
2306 /* insert replaced transfers back into the message */
2307 list_splice(&rxfer
->replaced_transfers
,
2308 rxfer
->replaced_after
);
2310 /* free the spi_replace_transfer structure */
2311 spi_res_free(rxfer
);
2313 /* and return with an error */
2314 return ERR_PTR(-EINVAL
);
2317 /* remove the entry after replaced_after from list of
2318 * transfers and add it to list of replaced_transfers
2320 list_move_tail(rxfer
->replaced_after
->next
,
2321 &rxfer
->replaced_transfers
);
2324 /* create copy of the given xfer with identical settings
2325 * based on the first transfer to get removed
2327 for (i
= 0; i
< insert
; i
++) {
2328 /* we need to run in reverse order */
2329 xfer
= &rxfer
->inserted_transfers
[insert
- 1 - i
];
2331 /* copy all spi_transfer data */
2332 memcpy(xfer
, xfer_first
, sizeof(*xfer
));
2335 list_add(&xfer
->transfer_list
, rxfer
->replaced_after
);
2337 /* clear cs_change and delay_usecs for all but the last */
2339 xfer
->cs_change
= false;
2340 xfer
->delay_usecs
= 0;
2344 /* set up inserted */
2345 rxfer
->inserted
= insert
;
2347 /* and register it with spi_res/spi_message */
2348 spi_res_add(msg
, rxfer
);
2352 EXPORT_SYMBOL_GPL(spi_replace_transfers
);
2354 static int __spi_split_transfer_maxsize(struct spi_master
*master
,
2355 struct spi_message
*msg
,
2356 struct spi_transfer
**xferp
,
2360 struct spi_transfer
*xfer
= *xferp
, *xfers
;
2361 struct spi_replaced_transfers
*srt
;
2365 /* warn once about this fact that we are splitting a transfer */
2366 dev_warn_once(&msg
->spi
->dev
,
2367 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2368 xfer
->len
, maxsize
);
2370 /* calculate how many we have to replace */
2371 count
= DIV_ROUND_UP(xfer
->len
, maxsize
);
2373 /* create replacement */
2374 srt
= spi_replace_transfers(msg
, xfer
, 1, count
, NULL
, 0, gfp
);
2376 return PTR_ERR(srt
);
2377 xfers
= srt
->inserted_transfers
;
2379 /* now handle each of those newly inserted spi_transfers
2380 * note that the replacements spi_transfers all are preset
2381 * to the same values as *xferp, so tx_buf, rx_buf and len
2382 * are all identical (as well as most others)
2383 * so we just have to fix up len and the pointers.
2385 * this also includes support for the depreciated
2386 * spi_message.is_dma_mapped interface
2389 /* the first transfer just needs the length modified, so we
2390 * run it outside the loop
2392 xfers
[0].len
= min_t(size_t, maxsize
, xfer
[0].len
);
2394 /* all the others need rx_buf/tx_buf also set */
2395 for (i
= 1, offset
= maxsize
; i
< count
; offset
+= maxsize
, i
++) {
2396 /* update rx_buf, tx_buf and dma */
2397 if (xfers
[i
].rx_buf
)
2398 xfers
[i
].rx_buf
+= offset
;
2399 if (xfers
[i
].rx_dma
)
2400 xfers
[i
].rx_dma
+= offset
;
2401 if (xfers
[i
].tx_buf
)
2402 xfers
[i
].tx_buf
+= offset
;
2403 if (xfers
[i
].tx_dma
)
2404 xfers
[i
].tx_dma
+= offset
;
2407 xfers
[i
].len
= min(maxsize
, xfers
[i
].len
- offset
);
2410 /* we set up xferp to the last entry we have inserted,
2411 * so that we skip those already split transfers
2413 *xferp
= &xfers
[count
- 1];
2415 /* increment statistics counters */
2416 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
,
2417 transfers_split_maxsize
);
2418 SPI_STATISTICS_INCREMENT_FIELD(&msg
->spi
->statistics
,
2419 transfers_split_maxsize
);
2425 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2426 * when an individual transfer exceeds a
2428 * @master: the @spi_master for this transfer
2429 * @msg: the @spi_message to transform
2430 * @maxsize: the maximum when to apply this
2431 * @gfp: GFP allocation flags
2433 * Return: status of transformation
2435 int spi_split_transfers_maxsize(struct spi_master
*master
,
2436 struct spi_message
*msg
,
2440 struct spi_transfer
*xfer
;
2443 /* iterate over the transfer_list,
2444 * but note that xfer is advanced to the last transfer inserted
2445 * to avoid checking sizes again unnecessarily (also xfer does
2446 * potentiall belong to a different list by the time the
2447 * replacement has happened
2449 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
2450 if (xfer
->len
> maxsize
) {
2451 ret
= __spi_split_transfer_maxsize(
2452 master
, msg
, &xfer
, maxsize
, gfp
);
2460 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize
);
2462 /*-------------------------------------------------------------------------*/
2464 /* Core methods for SPI master protocol drivers. Some of the
2465 * other core methods are currently defined as inline functions.
2468 static int __spi_validate_bits_per_word(struct spi_master
*master
, u8 bits_per_word
)
2470 if (master
->bits_per_word_mask
) {
2471 /* Only 32 bits fit in the mask */
2472 if (bits_per_word
> 32)
2474 if (!(master
->bits_per_word_mask
&
2475 SPI_BPW_MASK(bits_per_word
)))
2483 * spi_setup - setup SPI mode and clock rate
2484 * @spi: the device whose settings are being modified
2485 * Context: can sleep, and no requests are queued to the device
2487 * SPI protocol drivers may need to update the transfer mode if the
2488 * device doesn't work with its default. They may likewise need
2489 * to update clock rates or word sizes from initial values. This function
2490 * changes those settings, and must be called from a context that can sleep.
2491 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2492 * effect the next time the device is selected and data is transferred to
2493 * or from it. When this function returns, the spi device is deselected.
2495 * Note that this call will fail if the protocol driver specifies an option
2496 * that the underlying controller or its driver does not support. For
2497 * example, not all hardware supports wire transfers using nine bit words,
2498 * LSB-first wire encoding, or active-high chipselects.
2500 * Return: zero on success, else a negative error code.
2502 int spi_setup(struct spi_device
*spi
)
2504 unsigned bad_bits
, ugly_bits
;
2507 /* check mode to prevent that DUAL and QUAD set at the same time
2509 if (((spi
->mode
& SPI_TX_DUAL
) && (spi
->mode
& SPI_TX_QUAD
)) ||
2510 ((spi
->mode
& SPI_RX_DUAL
) && (spi
->mode
& SPI_RX_QUAD
))) {
2512 "setup: can not select dual and quad at the same time\n");
2515 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2517 if ((spi
->mode
& SPI_3WIRE
) && (spi
->mode
&
2518 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
)))
2520 /* help drivers fail *cleanly* when they need options
2521 * that aren't supported with their current master
2523 bad_bits
= spi
->mode
& ~spi
->master
->mode_bits
;
2524 ugly_bits
= bad_bits
&
2525 (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_RX_DUAL
| SPI_RX_QUAD
);
2528 "setup: ignoring unsupported mode bits %x\n",
2530 spi
->mode
&= ~ugly_bits
;
2531 bad_bits
&= ~ugly_bits
;
2534 dev_err(&spi
->dev
, "setup: unsupported mode bits %x\n",
2539 if (!spi
->bits_per_word
)
2540 spi
->bits_per_word
= 8;
2542 status
= __spi_validate_bits_per_word(spi
->master
, spi
->bits_per_word
);
2546 if (!spi
->max_speed_hz
)
2547 spi
->max_speed_hz
= spi
->master
->max_speed_hz
;
2549 if (spi
->master
->setup
)
2550 status
= spi
->master
->setup(spi
);
2552 spi_set_cs(spi
, false);
2554 dev_dbg(&spi
->dev
, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2555 (int) (spi
->mode
& (SPI_CPOL
| SPI_CPHA
)),
2556 (spi
->mode
& SPI_CS_HIGH
) ? "cs_high, " : "",
2557 (spi
->mode
& SPI_LSB_FIRST
) ? "lsb, " : "",
2558 (spi
->mode
& SPI_3WIRE
) ? "3wire, " : "",
2559 (spi
->mode
& SPI_LOOP
) ? "loopback, " : "",
2560 spi
->bits_per_word
, spi
->max_speed_hz
,
2565 EXPORT_SYMBOL_GPL(spi_setup
);
2567 static int __spi_validate(struct spi_device
*spi
, struct spi_message
*message
)
2569 struct spi_master
*master
= spi
->master
;
2570 struct spi_transfer
*xfer
;
2573 if (list_empty(&message
->transfers
))
2576 /* Half-duplex links include original MicroWire, and ones with
2577 * only one data pin like SPI_3WIRE (switches direction) or where
2578 * either MOSI or MISO is missing. They can also be caused by
2579 * software limitations.
2581 if ((master
->flags
& SPI_MASTER_HALF_DUPLEX
)
2582 || (spi
->mode
& SPI_3WIRE
)) {
2583 unsigned flags
= master
->flags
;
2585 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2586 if (xfer
->rx_buf
&& xfer
->tx_buf
)
2588 if ((flags
& SPI_MASTER_NO_TX
) && xfer
->tx_buf
)
2590 if ((flags
& SPI_MASTER_NO_RX
) && xfer
->rx_buf
)
2596 * Set transfer bits_per_word and max speed as spi device default if
2597 * it is not set for this transfer.
2598 * Set transfer tx_nbits and rx_nbits as single transfer default
2599 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2601 message
->frame_length
= 0;
2602 list_for_each_entry(xfer
, &message
->transfers
, transfer_list
) {
2603 message
->frame_length
+= xfer
->len
;
2604 if (!xfer
->bits_per_word
)
2605 xfer
->bits_per_word
= spi
->bits_per_word
;
2607 if (!xfer
->speed_hz
)
2608 xfer
->speed_hz
= spi
->max_speed_hz
;
2609 if (!xfer
->speed_hz
)
2610 xfer
->speed_hz
= master
->max_speed_hz
;
2612 if (master
->max_speed_hz
&&
2613 xfer
->speed_hz
> master
->max_speed_hz
)
2614 xfer
->speed_hz
= master
->max_speed_hz
;
2616 if (__spi_validate_bits_per_word(master
, xfer
->bits_per_word
))
2620 * SPI transfer length should be multiple of SPI word size
2621 * where SPI word size should be power-of-two multiple
2623 if (xfer
->bits_per_word
<= 8)
2625 else if (xfer
->bits_per_word
<= 16)
2630 /* No partial transfers accepted */
2631 if (xfer
->len
% w_size
)
2634 if (xfer
->speed_hz
&& master
->min_speed_hz
&&
2635 xfer
->speed_hz
< master
->min_speed_hz
)
2638 if (xfer
->tx_buf
&& !xfer
->tx_nbits
)
2639 xfer
->tx_nbits
= SPI_NBITS_SINGLE
;
2640 if (xfer
->rx_buf
&& !xfer
->rx_nbits
)
2641 xfer
->rx_nbits
= SPI_NBITS_SINGLE
;
2642 /* check transfer tx/rx_nbits:
2643 * 1. check the value matches one of single, dual and quad
2644 * 2. check tx/rx_nbits match the mode in spi_device
2647 if (xfer
->tx_nbits
!= SPI_NBITS_SINGLE
&&
2648 xfer
->tx_nbits
!= SPI_NBITS_DUAL
&&
2649 xfer
->tx_nbits
!= SPI_NBITS_QUAD
)
2651 if ((xfer
->tx_nbits
== SPI_NBITS_DUAL
) &&
2652 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
2654 if ((xfer
->tx_nbits
== SPI_NBITS_QUAD
) &&
2655 !(spi
->mode
& SPI_TX_QUAD
))
2658 /* check transfer rx_nbits */
2660 if (xfer
->rx_nbits
!= SPI_NBITS_SINGLE
&&
2661 xfer
->rx_nbits
!= SPI_NBITS_DUAL
&&
2662 xfer
->rx_nbits
!= SPI_NBITS_QUAD
)
2664 if ((xfer
->rx_nbits
== SPI_NBITS_DUAL
) &&
2665 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
2667 if ((xfer
->rx_nbits
== SPI_NBITS_QUAD
) &&
2668 !(spi
->mode
& SPI_RX_QUAD
))
2673 message
->status
= -EINPROGRESS
;
2678 static int __spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2680 struct spi_master
*master
= spi
->master
;
2684 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
, spi_async
);
2685 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_async
);
2687 trace_spi_message_submit(message
);
2689 return master
->transfer(spi
, message
);
2693 * spi_async - asynchronous SPI transfer
2694 * @spi: device with which data will be exchanged
2695 * @message: describes the data transfers, including completion callback
2696 * Context: any (irqs may be blocked, etc)
2698 * This call may be used in_irq and other contexts which can't sleep,
2699 * as well as from task contexts which can sleep.
2701 * The completion callback is invoked in a context which can't sleep.
2702 * Before that invocation, the value of message->status is undefined.
2703 * When the callback is issued, message->status holds either zero (to
2704 * indicate complete success) or a negative error code. After that
2705 * callback returns, the driver which issued the transfer request may
2706 * deallocate the associated memory; it's no longer in use by any SPI
2707 * core or controller driver code.
2709 * Note that although all messages to a spi_device are handled in
2710 * FIFO order, messages may go to different devices in other orders.
2711 * Some device might be higher priority, or have various "hard" access
2712 * time requirements, for example.
2714 * On detection of any fault during the transfer, processing of
2715 * the entire message is aborted, and the device is deselected.
2716 * Until returning from the associated message completion callback,
2717 * no other spi_message queued to that device will be processed.
2718 * (This rule applies equally to all the synchronous transfer calls,
2719 * which are wrappers around this core asynchronous primitive.)
2721 * Return: zero on success, else a negative error code.
2723 int spi_async(struct spi_device
*spi
, struct spi_message
*message
)
2725 struct spi_master
*master
= spi
->master
;
2727 unsigned long flags
;
2729 ret
= __spi_validate(spi
, message
);
2733 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2735 if (master
->bus_lock_flag
)
2738 ret
= __spi_async(spi
, message
);
2740 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2744 EXPORT_SYMBOL_GPL(spi_async
);
2747 * spi_async_locked - version of spi_async with exclusive bus usage
2748 * @spi: device with which data will be exchanged
2749 * @message: describes the data transfers, including completion callback
2750 * Context: any (irqs may be blocked, etc)
2752 * This call may be used in_irq and other contexts which can't sleep,
2753 * as well as from task contexts which can sleep.
2755 * The completion callback is invoked in a context which can't sleep.
2756 * Before that invocation, the value of message->status is undefined.
2757 * When the callback is issued, message->status holds either zero (to
2758 * indicate complete success) or a negative error code. After that
2759 * callback returns, the driver which issued the transfer request may
2760 * deallocate the associated memory; it's no longer in use by any SPI
2761 * core or controller driver code.
2763 * Note that although all messages to a spi_device are handled in
2764 * FIFO order, messages may go to different devices in other orders.
2765 * Some device might be higher priority, or have various "hard" access
2766 * time requirements, for example.
2768 * On detection of any fault during the transfer, processing of
2769 * the entire message is aborted, and the device is deselected.
2770 * Until returning from the associated message completion callback,
2771 * no other spi_message queued to that device will be processed.
2772 * (This rule applies equally to all the synchronous transfer calls,
2773 * which are wrappers around this core asynchronous primitive.)
2775 * Return: zero on success, else a negative error code.
2777 int spi_async_locked(struct spi_device
*spi
, struct spi_message
*message
)
2779 struct spi_master
*master
= spi
->master
;
2781 unsigned long flags
;
2783 ret
= __spi_validate(spi
, message
);
2787 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2789 ret
= __spi_async(spi
, message
);
2791 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2796 EXPORT_SYMBOL_GPL(spi_async_locked
);
2799 int spi_flash_read(struct spi_device
*spi
,
2800 struct spi_flash_read_message
*msg
)
2803 struct spi_master
*master
= spi
->master
;
2804 struct device
*rx_dev
= NULL
;
2807 if ((msg
->opcode_nbits
== SPI_NBITS_DUAL
||
2808 msg
->addr_nbits
== SPI_NBITS_DUAL
) &&
2809 !(spi
->mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
)))
2811 if ((msg
->opcode_nbits
== SPI_NBITS_QUAD
||
2812 msg
->addr_nbits
== SPI_NBITS_QUAD
) &&
2813 !(spi
->mode
& SPI_TX_QUAD
))
2815 if (msg
->data_nbits
== SPI_NBITS_DUAL
&&
2816 !(spi
->mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
)))
2818 if (msg
->data_nbits
== SPI_NBITS_QUAD
&&
2819 !(spi
->mode
& SPI_RX_QUAD
))
2822 if (master
->auto_runtime_pm
) {
2823 ret
= pm_runtime_get_sync(master
->dev
.parent
);
2825 dev_err(&master
->dev
, "Failed to power device: %d\n",
2831 mutex_lock(&master
->bus_lock_mutex
);
2832 mutex_lock(&master
->io_mutex
);
2833 if (master
->dma_rx
) {
2834 rx_dev
= master
->dma_rx
->device
->dev
;
2835 ret
= spi_map_buf(master
, rx_dev
, &msg
->rx_sg
,
2839 msg
->cur_msg_mapped
= true;
2841 ret
= master
->spi_flash_read(spi
, msg
);
2842 if (msg
->cur_msg_mapped
)
2843 spi_unmap_buf(master
, rx_dev
, &msg
->rx_sg
,
2845 mutex_unlock(&master
->io_mutex
);
2846 mutex_unlock(&master
->bus_lock_mutex
);
2848 if (master
->auto_runtime_pm
)
2849 pm_runtime_put(master
->dev
.parent
);
2853 EXPORT_SYMBOL_GPL(spi_flash_read
);
2855 /*-------------------------------------------------------------------------*/
2857 /* Utility methods for SPI master protocol drivers, layered on
2858 * top of the core. Some other utility methods are defined as
2862 static void spi_complete(void *arg
)
2867 static int __spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
2869 DECLARE_COMPLETION_ONSTACK(done
);
2871 struct spi_master
*master
= spi
->master
;
2872 unsigned long flags
;
2874 status
= __spi_validate(spi
, message
);
2878 message
->complete
= spi_complete
;
2879 message
->context
= &done
;
2882 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
, spi_sync
);
2883 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
, spi_sync
);
2885 /* If we're not using the legacy transfer method then we will
2886 * try to transfer in the calling context so special case.
2887 * This code would be less tricky if we could remove the
2888 * support for driver implemented message queues.
2890 if (master
->transfer
== spi_queued_transfer
) {
2891 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2893 trace_spi_message_submit(message
);
2895 status
= __spi_queued_transfer(spi
, message
, false);
2897 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
2899 status
= spi_async_locked(spi
, message
);
2903 /* Push out the messages in the calling context if we
2906 if (master
->transfer
== spi_queued_transfer
) {
2907 SPI_STATISTICS_INCREMENT_FIELD(&master
->statistics
,
2908 spi_sync_immediate
);
2909 SPI_STATISTICS_INCREMENT_FIELD(&spi
->statistics
,
2910 spi_sync_immediate
);
2911 __spi_pump_messages(master
, false);
2914 wait_for_completion(&done
);
2915 status
= message
->status
;
2917 message
->context
= NULL
;
2922 * spi_sync - blocking/synchronous SPI data transfers
2923 * @spi: device with which data will be exchanged
2924 * @message: describes the data transfers
2925 * Context: can sleep
2927 * This call may only be used from a context that may sleep. The sleep
2928 * is non-interruptible, and has no timeout. Low-overhead controller
2929 * drivers may DMA directly into and out of the message buffers.
2931 * Note that the SPI device's chip select is active during the message,
2932 * and then is normally disabled between messages. Drivers for some
2933 * frequently-used devices may want to minimize costs of selecting a chip,
2934 * by leaving it selected in anticipation that the next message will go
2935 * to the same chip. (That may increase power usage.)
2937 * Also, the caller is guaranteeing that the memory associated with the
2938 * message will not be freed before this call returns.
2940 * Return: zero on success, else a negative error code.
2942 int spi_sync(struct spi_device
*spi
, struct spi_message
*message
)
2946 mutex_lock(&spi
->master
->bus_lock_mutex
);
2947 ret
= __spi_sync(spi
, message
);
2948 mutex_unlock(&spi
->master
->bus_lock_mutex
);
2952 EXPORT_SYMBOL_GPL(spi_sync
);
2955 * spi_sync_locked - version of spi_sync with exclusive bus usage
2956 * @spi: device with which data will be exchanged
2957 * @message: describes the data transfers
2958 * Context: can sleep
2960 * This call may only be used from a context that may sleep. The sleep
2961 * is non-interruptible, and has no timeout. Low-overhead controller
2962 * drivers may DMA directly into and out of the message buffers.
2964 * This call should be used by drivers that require exclusive access to the
2965 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2966 * be released by a spi_bus_unlock call when the exclusive access is over.
2968 * Return: zero on success, else a negative error code.
2970 int spi_sync_locked(struct spi_device
*spi
, struct spi_message
*message
)
2972 return __spi_sync(spi
, message
);
2974 EXPORT_SYMBOL_GPL(spi_sync_locked
);
2977 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2978 * @master: SPI bus master that should be locked for exclusive bus access
2979 * Context: can sleep
2981 * This call may only be used from a context that may sleep. The sleep
2982 * is non-interruptible, and has no timeout.
2984 * This call should be used by drivers that require exclusive access to the
2985 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2986 * exclusive access is over. Data transfer must be done by spi_sync_locked
2987 * and spi_async_locked calls when the SPI bus lock is held.
2989 * Return: always zero.
2991 int spi_bus_lock(struct spi_master
*master
)
2993 unsigned long flags
;
2995 mutex_lock(&master
->bus_lock_mutex
);
2997 spin_lock_irqsave(&master
->bus_lock_spinlock
, flags
);
2998 master
->bus_lock_flag
= 1;
2999 spin_unlock_irqrestore(&master
->bus_lock_spinlock
, flags
);
3001 /* mutex remains locked until spi_bus_unlock is called */
3005 EXPORT_SYMBOL_GPL(spi_bus_lock
);
3008 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3009 * @master: SPI bus master that was locked for exclusive bus access
3010 * Context: can sleep
3012 * This call may only be used from a context that may sleep. The sleep
3013 * is non-interruptible, and has no timeout.
3015 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3018 * Return: always zero.
3020 int spi_bus_unlock(struct spi_master
*master
)
3022 master
->bus_lock_flag
= 0;
3024 mutex_unlock(&master
->bus_lock_mutex
);
3028 EXPORT_SYMBOL_GPL(spi_bus_unlock
);
3030 /* portable code must never pass more than 32 bytes */
3031 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3036 * spi_write_then_read - SPI synchronous write followed by read
3037 * @spi: device with which data will be exchanged
3038 * @txbuf: data to be written (need not be dma-safe)
3039 * @n_tx: size of txbuf, in bytes
3040 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3041 * @n_rx: size of rxbuf, in bytes
3042 * Context: can sleep
3044 * This performs a half duplex MicroWire style transaction with the
3045 * device, sending txbuf and then reading rxbuf. The return value
3046 * is zero for success, else a negative errno status code.
3047 * This call may only be used from a context that may sleep.
3049 * Parameters to this routine are always copied using a small buffer;
3050 * portable code should never use this for more than 32 bytes.
3051 * Performance-sensitive or bulk transfer code should instead use
3052 * spi_{async,sync}() calls with dma-safe buffers.
3054 * Return: zero on success, else a negative error code.
3056 int spi_write_then_read(struct spi_device
*spi
,
3057 const void *txbuf
, unsigned n_tx
,
3058 void *rxbuf
, unsigned n_rx
)
3060 static DEFINE_MUTEX(lock
);
3063 struct spi_message message
;
3064 struct spi_transfer x
[2];
3067 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3068 * copying here, (as a pure convenience thing), but we can
3069 * keep heap costs out of the hot path unless someone else is
3070 * using the pre-allocated buffer or the transfer is too large.
3072 if ((n_tx
+ n_rx
) > SPI_BUFSIZ
|| !mutex_trylock(&lock
)) {
3073 local_buf
= kmalloc(max((unsigned)SPI_BUFSIZ
, n_tx
+ n_rx
),
3074 GFP_KERNEL
| GFP_DMA
);
3081 spi_message_init(&message
);
3082 memset(x
, 0, sizeof(x
));
3085 spi_message_add_tail(&x
[0], &message
);
3089 spi_message_add_tail(&x
[1], &message
);
3092 memcpy(local_buf
, txbuf
, n_tx
);
3093 x
[0].tx_buf
= local_buf
;
3094 x
[1].rx_buf
= local_buf
+ n_tx
;
3097 status
= spi_sync(spi
, &message
);
3099 memcpy(rxbuf
, x
[1].rx_buf
, n_rx
);
3101 if (x
[0].tx_buf
== buf
)
3102 mutex_unlock(&lock
);
3108 EXPORT_SYMBOL_GPL(spi_write_then_read
);
3110 /*-------------------------------------------------------------------------*/
3112 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3113 static int __spi_of_device_match(struct device
*dev
, void *data
)
3115 return dev
->of_node
== data
;
3118 /* must call put_device() when done with returned spi_device device */
3119 static struct spi_device
*of_find_spi_device_by_node(struct device_node
*node
)
3121 struct device
*dev
= bus_find_device(&spi_bus_type
, NULL
, node
,
3122 __spi_of_device_match
);
3123 return dev
? to_spi_device(dev
) : NULL
;
3126 static int __spi_of_master_match(struct device
*dev
, const void *data
)
3128 return dev
->of_node
== data
;
3131 /* the spi masters are not using spi_bus, so we find it with another way */
3132 static struct spi_master
*of_find_spi_master_by_node(struct device_node
*node
)
3136 dev
= class_find_device(&spi_master_class
, NULL
, node
,
3137 __spi_of_master_match
);
3141 /* reference got in class_find_device */
3142 return container_of(dev
, struct spi_master
, dev
);
3145 static int of_spi_notify(struct notifier_block
*nb
, unsigned long action
,
3148 struct of_reconfig_data
*rd
= arg
;
3149 struct spi_master
*master
;
3150 struct spi_device
*spi
;
3152 switch (of_reconfig_get_state_change(action
, arg
)) {
3153 case OF_RECONFIG_CHANGE_ADD
:
3154 master
= of_find_spi_master_by_node(rd
->dn
->parent
);
3156 return NOTIFY_OK
; /* not for us */
3158 if (of_node_test_and_set_flag(rd
->dn
, OF_POPULATED
)) {
3159 put_device(&master
->dev
);
3163 spi
= of_register_spi_device(master
, rd
->dn
);
3164 put_device(&master
->dev
);
3167 pr_err("%s: failed to create for '%s'\n",
3168 __func__
, rd
->dn
->full_name
);
3169 of_node_clear_flag(rd
->dn
, OF_POPULATED
);
3170 return notifier_from_errno(PTR_ERR(spi
));
3174 case OF_RECONFIG_CHANGE_REMOVE
:
3175 /* already depopulated? */
3176 if (!of_node_check_flag(rd
->dn
, OF_POPULATED
))
3179 /* find our device by node */
3180 spi
= of_find_spi_device_by_node(rd
->dn
);
3182 return NOTIFY_OK
; /* no? not meant for us */
3184 /* unregister takes one ref away */
3185 spi_unregister_device(spi
);
3187 /* and put the reference of the find */
3188 put_device(&spi
->dev
);
3195 static struct notifier_block spi_of_notifier
= {
3196 .notifier_call
= of_spi_notify
,
3198 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3199 extern struct notifier_block spi_of_notifier
;
3200 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3202 #if IS_ENABLED(CONFIG_ACPI)
3203 static int spi_acpi_master_match(struct device
*dev
, const void *data
)
3205 return ACPI_COMPANION(dev
->parent
) == data
;
3208 static int spi_acpi_device_match(struct device
*dev
, void *data
)
3210 return ACPI_COMPANION(dev
) == data
;
3213 static struct spi_master
*acpi_spi_find_master_by_adev(struct acpi_device
*adev
)
3217 dev
= class_find_device(&spi_master_class
, NULL
, adev
,
3218 spi_acpi_master_match
);
3222 return container_of(dev
, struct spi_master
, dev
);
3225 static struct spi_device
*acpi_spi_find_device_by_adev(struct acpi_device
*adev
)
3229 dev
= bus_find_device(&spi_bus_type
, NULL
, adev
, spi_acpi_device_match
);
3231 return dev
? to_spi_device(dev
) : NULL
;
3234 static int acpi_spi_notify(struct notifier_block
*nb
, unsigned long value
,
3237 struct acpi_device
*adev
= arg
;
3238 struct spi_master
*master
;
3239 struct spi_device
*spi
;
3242 case ACPI_RECONFIG_DEVICE_ADD
:
3243 master
= acpi_spi_find_master_by_adev(adev
->parent
);
3247 acpi_register_spi_device(master
, adev
);
3248 put_device(&master
->dev
);
3250 case ACPI_RECONFIG_DEVICE_REMOVE
:
3251 if (!acpi_device_enumerated(adev
))
3254 spi
= acpi_spi_find_device_by_adev(adev
);
3258 spi_unregister_device(spi
);
3259 put_device(&spi
->dev
);
3266 static struct notifier_block spi_acpi_notifier
= {
3267 .notifier_call
= acpi_spi_notify
,
3270 extern struct notifier_block spi_acpi_notifier
;
3273 static int __init
spi_init(void)
3277 buf
= kmalloc(SPI_BUFSIZ
, GFP_KERNEL
);
3283 status
= bus_register(&spi_bus_type
);
3287 status
= class_register(&spi_master_class
);
3291 if (IS_ENABLED(CONFIG_OF_DYNAMIC
))
3292 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier
));
3293 if (IS_ENABLED(CONFIG_ACPI
))
3294 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier
));
3299 bus_unregister(&spi_bus_type
);
3307 /* board_info is normally registered in arch_initcall(),
3308 * but even essential drivers wait till later
3310 * REVISIT only boardinfo really needs static linking. the rest (device and
3311 * driver registration) _could_ be dynamically linked (modular) ... costs
3312 * include needing to have boardinfo data structures be much more public.
3314 postcore_initcall(spi_init
);