Linux 4.19.133
[linux/fpc-iii.git] / drivers / base / regmap / regmap.c
blobc7d946b745efe7518402d3d973f28cdb398888e1
1 /*
2 * Register map access API
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/of.h>
19 #include <linux/rbtree.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/log2.h>
23 #include <linux/hwspinlock.h>
24 #include <asm/unaligned.h>
26 #define CREATE_TRACE_POINTS
27 #include "trace.h"
29 #include "internal.h"
32 * Sometimes for failures during very early init the trace
33 * infrastructure isn't available early enough to be used. For this
34 * sort of problem defining LOG_DEVICE will add printks for basic
35 * register I/O on a specific device.
37 #undef LOG_DEVICE
39 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
40 unsigned int mask, unsigned int val,
41 bool *change, bool force_write);
43 static int _regmap_bus_reg_read(void *context, unsigned int reg,
44 unsigned int *val);
45 static int _regmap_bus_read(void *context, unsigned int reg,
46 unsigned int *val);
47 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
48 unsigned int val);
49 static int _regmap_bus_reg_write(void *context, unsigned int reg,
50 unsigned int val);
51 static int _regmap_bus_raw_write(void *context, unsigned int reg,
52 unsigned int val);
54 bool regmap_reg_in_ranges(unsigned int reg,
55 const struct regmap_range *ranges,
56 unsigned int nranges)
58 const struct regmap_range *r;
59 int i;
61 for (i = 0, r = ranges; i < nranges; i++, r++)
62 if (regmap_reg_in_range(reg, r))
63 return true;
64 return false;
66 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
68 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
69 const struct regmap_access_table *table)
71 /* Check "no ranges" first */
72 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
73 return false;
75 /* In case zero "yes ranges" are supplied, any reg is OK */
76 if (!table->n_yes_ranges)
77 return true;
79 return regmap_reg_in_ranges(reg, table->yes_ranges,
80 table->n_yes_ranges);
82 EXPORT_SYMBOL_GPL(regmap_check_range_table);
84 bool regmap_writeable(struct regmap *map, unsigned int reg)
86 if (map->max_register && reg > map->max_register)
87 return false;
89 if (map->writeable_reg)
90 return map->writeable_reg(map->dev, reg);
92 if (map->wr_table)
93 return regmap_check_range_table(map, reg, map->wr_table);
95 return true;
98 bool regmap_cached(struct regmap *map, unsigned int reg)
100 int ret;
101 unsigned int val;
103 if (map->cache_type == REGCACHE_NONE)
104 return false;
106 if (!map->cache_ops)
107 return false;
109 if (map->max_register && reg > map->max_register)
110 return false;
112 map->lock(map->lock_arg);
113 ret = regcache_read(map, reg, &val);
114 map->unlock(map->lock_arg);
115 if (ret)
116 return false;
118 return true;
121 bool regmap_readable(struct regmap *map, unsigned int reg)
123 if (!map->reg_read)
124 return false;
126 if (map->max_register && reg > map->max_register)
127 return false;
129 if (map->format.format_write)
130 return false;
132 if (map->readable_reg)
133 return map->readable_reg(map->dev, reg);
135 if (map->rd_table)
136 return regmap_check_range_table(map, reg, map->rd_table);
138 return true;
141 bool regmap_volatile(struct regmap *map, unsigned int reg)
143 if (!map->format.format_write && !regmap_readable(map, reg))
144 return false;
146 if (map->volatile_reg)
147 return map->volatile_reg(map->dev, reg);
149 if (map->volatile_table)
150 return regmap_check_range_table(map, reg, map->volatile_table);
152 if (map->cache_ops)
153 return false;
154 else
155 return true;
158 bool regmap_precious(struct regmap *map, unsigned int reg)
160 if (!regmap_readable(map, reg))
161 return false;
163 if (map->precious_reg)
164 return map->precious_reg(map->dev, reg);
166 if (map->precious_table)
167 return regmap_check_range_table(map, reg, map->precious_table);
169 return false;
172 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
174 if (map->readable_noinc_reg)
175 return map->readable_noinc_reg(map->dev, reg);
177 if (map->rd_noinc_table)
178 return regmap_check_range_table(map, reg, map->rd_noinc_table);
180 return true;
183 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
184 size_t num)
186 unsigned int i;
188 for (i = 0; i < num; i++)
189 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
190 return false;
192 return true;
195 static void regmap_format_2_6_write(struct regmap *map,
196 unsigned int reg, unsigned int val)
198 u8 *out = map->work_buf;
200 *out = (reg << 6) | val;
203 static void regmap_format_4_12_write(struct regmap *map,
204 unsigned int reg, unsigned int val)
206 __be16 *out = map->work_buf;
207 *out = cpu_to_be16((reg << 12) | val);
210 static void regmap_format_7_9_write(struct regmap *map,
211 unsigned int reg, unsigned int val)
213 __be16 *out = map->work_buf;
214 *out = cpu_to_be16((reg << 9) | val);
217 static void regmap_format_10_14_write(struct regmap *map,
218 unsigned int reg, unsigned int val)
220 u8 *out = map->work_buf;
222 out[2] = val;
223 out[1] = (val >> 8) | (reg << 6);
224 out[0] = reg >> 2;
227 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
229 u8 *b = buf;
231 b[0] = val << shift;
234 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
236 put_unaligned_be16(val << shift, buf);
239 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
241 put_unaligned_le16(val << shift, buf);
244 static void regmap_format_16_native(void *buf, unsigned int val,
245 unsigned int shift)
247 u16 v = val << shift;
249 memcpy(buf, &v, sizeof(v));
252 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
254 u8 *b = buf;
256 val <<= shift;
258 b[0] = val >> 16;
259 b[1] = val >> 8;
260 b[2] = val;
263 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
265 put_unaligned_be32(val << shift, buf);
268 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
270 put_unaligned_le32(val << shift, buf);
273 static void regmap_format_32_native(void *buf, unsigned int val,
274 unsigned int shift)
276 u32 v = val << shift;
278 memcpy(buf, &v, sizeof(v));
281 #ifdef CONFIG_64BIT
282 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
284 put_unaligned_be64((u64) val << shift, buf);
287 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
289 put_unaligned_le64((u64) val << shift, buf);
292 static void regmap_format_64_native(void *buf, unsigned int val,
293 unsigned int shift)
295 u64 v = (u64) val << shift;
297 memcpy(buf, &v, sizeof(v));
299 #endif
301 static void regmap_parse_inplace_noop(void *buf)
305 static unsigned int regmap_parse_8(const void *buf)
307 const u8 *b = buf;
309 return b[0];
312 static unsigned int regmap_parse_16_be(const void *buf)
314 return get_unaligned_be16(buf);
317 static unsigned int regmap_parse_16_le(const void *buf)
319 return get_unaligned_le16(buf);
322 static void regmap_parse_16_be_inplace(void *buf)
324 u16 v = get_unaligned_be16(buf);
326 memcpy(buf, &v, sizeof(v));
329 static void regmap_parse_16_le_inplace(void *buf)
331 u16 v = get_unaligned_le16(buf);
333 memcpy(buf, &v, sizeof(v));
336 static unsigned int regmap_parse_16_native(const void *buf)
338 u16 v;
340 memcpy(&v, buf, sizeof(v));
341 return v;
344 static unsigned int regmap_parse_24(const void *buf)
346 const u8 *b = buf;
347 unsigned int ret = b[2];
348 ret |= ((unsigned int)b[1]) << 8;
349 ret |= ((unsigned int)b[0]) << 16;
351 return ret;
354 static unsigned int regmap_parse_32_be(const void *buf)
356 return get_unaligned_be32(buf);
359 static unsigned int regmap_parse_32_le(const void *buf)
361 return get_unaligned_le32(buf);
364 static void regmap_parse_32_be_inplace(void *buf)
366 u32 v = get_unaligned_be32(buf);
368 memcpy(buf, &v, sizeof(v));
371 static void regmap_parse_32_le_inplace(void *buf)
373 u32 v = get_unaligned_le32(buf);
375 memcpy(buf, &v, sizeof(v));
378 static unsigned int regmap_parse_32_native(const void *buf)
380 u32 v;
382 memcpy(&v, buf, sizeof(v));
383 return v;
386 #ifdef CONFIG_64BIT
387 static unsigned int regmap_parse_64_be(const void *buf)
389 return get_unaligned_be64(buf);
392 static unsigned int regmap_parse_64_le(const void *buf)
394 return get_unaligned_le64(buf);
397 static void regmap_parse_64_be_inplace(void *buf)
399 u64 v = get_unaligned_be64(buf);
401 memcpy(buf, &v, sizeof(v));
404 static void regmap_parse_64_le_inplace(void *buf)
406 u64 v = get_unaligned_le64(buf);
408 memcpy(buf, &v, sizeof(v));
411 static unsigned int regmap_parse_64_native(const void *buf)
413 u64 v;
415 memcpy(&v, buf, sizeof(v));
416 return v;
418 #endif
420 static void regmap_lock_hwlock(void *__map)
422 struct regmap *map = __map;
424 hwspin_lock_timeout(map->hwlock, UINT_MAX);
427 static void regmap_lock_hwlock_irq(void *__map)
429 struct regmap *map = __map;
431 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
434 static void regmap_lock_hwlock_irqsave(void *__map)
436 struct regmap *map = __map;
438 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
439 &map->spinlock_flags);
442 static void regmap_unlock_hwlock(void *__map)
444 struct regmap *map = __map;
446 hwspin_unlock(map->hwlock);
449 static void regmap_unlock_hwlock_irq(void *__map)
451 struct regmap *map = __map;
453 hwspin_unlock_irq(map->hwlock);
456 static void regmap_unlock_hwlock_irqrestore(void *__map)
458 struct regmap *map = __map;
460 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
463 static void regmap_lock_unlock_none(void *__map)
468 static void regmap_lock_mutex(void *__map)
470 struct regmap *map = __map;
471 mutex_lock(&map->mutex);
474 static void regmap_unlock_mutex(void *__map)
476 struct regmap *map = __map;
477 mutex_unlock(&map->mutex);
480 static void regmap_lock_spinlock(void *__map)
481 __acquires(&map->spinlock)
483 struct regmap *map = __map;
484 unsigned long flags;
486 spin_lock_irqsave(&map->spinlock, flags);
487 map->spinlock_flags = flags;
490 static void regmap_unlock_spinlock(void *__map)
491 __releases(&map->spinlock)
493 struct regmap *map = __map;
494 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
497 static void dev_get_regmap_release(struct device *dev, void *res)
500 * We don't actually have anything to do here; the goal here
501 * is not to manage the regmap but to provide a simple way to
502 * get the regmap back given a struct device.
506 static bool _regmap_range_add(struct regmap *map,
507 struct regmap_range_node *data)
509 struct rb_root *root = &map->range_tree;
510 struct rb_node **new = &(root->rb_node), *parent = NULL;
512 while (*new) {
513 struct regmap_range_node *this =
514 rb_entry(*new, struct regmap_range_node, node);
516 parent = *new;
517 if (data->range_max < this->range_min)
518 new = &((*new)->rb_left);
519 else if (data->range_min > this->range_max)
520 new = &((*new)->rb_right);
521 else
522 return false;
525 rb_link_node(&data->node, parent, new);
526 rb_insert_color(&data->node, root);
528 return true;
531 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
532 unsigned int reg)
534 struct rb_node *node = map->range_tree.rb_node;
536 while (node) {
537 struct regmap_range_node *this =
538 rb_entry(node, struct regmap_range_node, node);
540 if (reg < this->range_min)
541 node = node->rb_left;
542 else if (reg > this->range_max)
543 node = node->rb_right;
544 else
545 return this;
548 return NULL;
551 static void regmap_range_exit(struct regmap *map)
553 struct rb_node *next;
554 struct regmap_range_node *range_node;
556 next = rb_first(&map->range_tree);
557 while (next) {
558 range_node = rb_entry(next, struct regmap_range_node, node);
559 next = rb_next(&range_node->node);
560 rb_erase(&range_node->node, &map->range_tree);
561 kfree(range_node);
564 kfree(map->selector_work_buf);
567 int regmap_attach_dev(struct device *dev, struct regmap *map,
568 const struct regmap_config *config)
570 struct regmap **m;
572 map->dev = dev;
574 regmap_debugfs_init(map, config->name);
576 /* Add a devres resource for dev_get_regmap() */
577 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
578 if (!m) {
579 regmap_debugfs_exit(map);
580 return -ENOMEM;
582 *m = map;
583 devres_add(dev, m);
585 return 0;
587 EXPORT_SYMBOL_GPL(regmap_attach_dev);
589 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
590 const struct regmap_config *config)
592 enum regmap_endian endian;
594 /* Retrieve the endianness specification from the regmap config */
595 endian = config->reg_format_endian;
597 /* If the regmap config specified a non-default value, use that */
598 if (endian != REGMAP_ENDIAN_DEFAULT)
599 return endian;
601 /* Retrieve the endianness specification from the bus config */
602 if (bus && bus->reg_format_endian_default)
603 endian = bus->reg_format_endian_default;
605 /* If the bus specified a non-default value, use that */
606 if (endian != REGMAP_ENDIAN_DEFAULT)
607 return endian;
609 /* Use this if no other value was found */
610 return REGMAP_ENDIAN_BIG;
613 enum regmap_endian regmap_get_val_endian(struct device *dev,
614 const struct regmap_bus *bus,
615 const struct regmap_config *config)
617 struct device_node *np;
618 enum regmap_endian endian;
620 /* Retrieve the endianness specification from the regmap config */
621 endian = config->val_format_endian;
623 /* If the regmap config specified a non-default value, use that */
624 if (endian != REGMAP_ENDIAN_DEFAULT)
625 return endian;
627 /* If the dev and dev->of_node exist try to get endianness from DT */
628 if (dev && dev->of_node) {
629 np = dev->of_node;
631 /* Parse the device's DT node for an endianness specification */
632 if (of_property_read_bool(np, "big-endian"))
633 endian = REGMAP_ENDIAN_BIG;
634 else if (of_property_read_bool(np, "little-endian"))
635 endian = REGMAP_ENDIAN_LITTLE;
636 else if (of_property_read_bool(np, "native-endian"))
637 endian = REGMAP_ENDIAN_NATIVE;
639 /* If the endianness was specified in DT, use that */
640 if (endian != REGMAP_ENDIAN_DEFAULT)
641 return endian;
644 /* Retrieve the endianness specification from the bus config */
645 if (bus && bus->val_format_endian_default)
646 endian = bus->val_format_endian_default;
648 /* If the bus specified a non-default value, use that */
649 if (endian != REGMAP_ENDIAN_DEFAULT)
650 return endian;
652 /* Use this if no other value was found */
653 return REGMAP_ENDIAN_BIG;
655 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
657 struct regmap *__regmap_init(struct device *dev,
658 const struct regmap_bus *bus,
659 void *bus_context,
660 const struct regmap_config *config,
661 struct lock_class_key *lock_key,
662 const char *lock_name)
664 struct regmap *map;
665 int ret = -EINVAL;
666 enum regmap_endian reg_endian, val_endian;
667 int i, j;
669 if (!config)
670 goto err;
672 map = kzalloc(sizeof(*map), GFP_KERNEL);
673 if (map == NULL) {
674 ret = -ENOMEM;
675 goto err;
678 if (config->name) {
679 map->name = kstrdup_const(config->name, GFP_KERNEL);
680 if (!map->name) {
681 ret = -ENOMEM;
682 goto err_map;
686 if (config->disable_locking) {
687 map->lock = map->unlock = regmap_lock_unlock_none;
688 regmap_debugfs_disable(map);
689 } else if (config->lock && config->unlock) {
690 map->lock = config->lock;
691 map->unlock = config->unlock;
692 map->lock_arg = config->lock_arg;
693 } else if (config->use_hwlock) {
694 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
695 if (!map->hwlock) {
696 ret = -ENXIO;
697 goto err_name;
700 switch (config->hwlock_mode) {
701 case HWLOCK_IRQSTATE:
702 map->lock = regmap_lock_hwlock_irqsave;
703 map->unlock = regmap_unlock_hwlock_irqrestore;
704 break;
705 case HWLOCK_IRQ:
706 map->lock = regmap_lock_hwlock_irq;
707 map->unlock = regmap_unlock_hwlock_irq;
708 break;
709 default:
710 map->lock = regmap_lock_hwlock;
711 map->unlock = regmap_unlock_hwlock;
712 break;
715 map->lock_arg = map;
716 } else {
717 if ((bus && bus->fast_io) ||
718 config->fast_io) {
719 spin_lock_init(&map->spinlock);
720 map->lock = regmap_lock_spinlock;
721 map->unlock = regmap_unlock_spinlock;
722 lockdep_set_class_and_name(&map->spinlock,
723 lock_key, lock_name);
724 } else {
725 mutex_init(&map->mutex);
726 map->lock = regmap_lock_mutex;
727 map->unlock = regmap_unlock_mutex;
728 lockdep_set_class_and_name(&map->mutex,
729 lock_key, lock_name);
731 map->lock_arg = map;
735 * When we write in fast-paths with regmap_bulk_write() don't allocate
736 * scratch buffers with sleeping allocations.
738 if ((bus && bus->fast_io) || config->fast_io)
739 map->alloc_flags = GFP_ATOMIC;
740 else
741 map->alloc_flags = GFP_KERNEL;
743 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
744 map->format.pad_bytes = config->pad_bits / 8;
745 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
746 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
747 config->val_bits + config->pad_bits, 8);
748 map->reg_shift = config->pad_bits % 8;
749 if (config->reg_stride)
750 map->reg_stride = config->reg_stride;
751 else
752 map->reg_stride = 1;
753 if (is_power_of_2(map->reg_stride))
754 map->reg_stride_order = ilog2(map->reg_stride);
755 else
756 map->reg_stride_order = -1;
757 map->use_single_read = config->use_single_rw || !bus || !bus->read;
758 map->use_single_write = config->use_single_rw || !bus || !bus->write;
759 map->can_multi_write = config->can_multi_write && bus && bus->write;
760 if (bus) {
761 map->max_raw_read = bus->max_raw_read;
762 map->max_raw_write = bus->max_raw_write;
764 map->dev = dev;
765 map->bus = bus;
766 map->bus_context = bus_context;
767 map->max_register = config->max_register;
768 map->wr_table = config->wr_table;
769 map->rd_table = config->rd_table;
770 map->volatile_table = config->volatile_table;
771 map->precious_table = config->precious_table;
772 map->rd_noinc_table = config->rd_noinc_table;
773 map->writeable_reg = config->writeable_reg;
774 map->readable_reg = config->readable_reg;
775 map->volatile_reg = config->volatile_reg;
776 map->precious_reg = config->precious_reg;
777 map->readable_noinc_reg = config->readable_noinc_reg;
778 map->cache_type = config->cache_type;
780 spin_lock_init(&map->async_lock);
781 INIT_LIST_HEAD(&map->async_list);
782 INIT_LIST_HEAD(&map->async_free);
783 init_waitqueue_head(&map->async_waitq);
785 if (config->read_flag_mask ||
786 config->write_flag_mask ||
787 config->zero_flag_mask) {
788 map->read_flag_mask = config->read_flag_mask;
789 map->write_flag_mask = config->write_flag_mask;
790 } else if (bus) {
791 map->read_flag_mask = bus->read_flag_mask;
794 if (!bus) {
795 map->reg_read = config->reg_read;
796 map->reg_write = config->reg_write;
798 map->defer_caching = false;
799 goto skip_format_initialization;
800 } else if (!bus->read || !bus->write) {
801 map->reg_read = _regmap_bus_reg_read;
802 map->reg_write = _regmap_bus_reg_write;
804 map->defer_caching = false;
805 goto skip_format_initialization;
806 } else {
807 map->reg_read = _regmap_bus_read;
808 map->reg_update_bits = bus->reg_update_bits;
811 reg_endian = regmap_get_reg_endian(bus, config);
812 val_endian = regmap_get_val_endian(dev, bus, config);
814 switch (config->reg_bits + map->reg_shift) {
815 case 2:
816 switch (config->val_bits) {
817 case 6:
818 map->format.format_write = regmap_format_2_6_write;
819 break;
820 default:
821 goto err_hwlock;
823 break;
825 case 4:
826 switch (config->val_bits) {
827 case 12:
828 map->format.format_write = regmap_format_4_12_write;
829 break;
830 default:
831 goto err_hwlock;
833 break;
835 case 7:
836 switch (config->val_bits) {
837 case 9:
838 map->format.format_write = regmap_format_7_9_write;
839 break;
840 default:
841 goto err_hwlock;
843 break;
845 case 10:
846 switch (config->val_bits) {
847 case 14:
848 map->format.format_write = regmap_format_10_14_write;
849 break;
850 default:
851 goto err_hwlock;
853 break;
855 case 8:
856 map->format.format_reg = regmap_format_8;
857 break;
859 case 16:
860 switch (reg_endian) {
861 case REGMAP_ENDIAN_BIG:
862 map->format.format_reg = regmap_format_16_be;
863 break;
864 case REGMAP_ENDIAN_LITTLE:
865 map->format.format_reg = regmap_format_16_le;
866 break;
867 case REGMAP_ENDIAN_NATIVE:
868 map->format.format_reg = regmap_format_16_native;
869 break;
870 default:
871 goto err_hwlock;
873 break;
875 case 24:
876 if (reg_endian != REGMAP_ENDIAN_BIG)
877 goto err_hwlock;
878 map->format.format_reg = regmap_format_24;
879 break;
881 case 32:
882 switch (reg_endian) {
883 case REGMAP_ENDIAN_BIG:
884 map->format.format_reg = regmap_format_32_be;
885 break;
886 case REGMAP_ENDIAN_LITTLE:
887 map->format.format_reg = regmap_format_32_le;
888 break;
889 case REGMAP_ENDIAN_NATIVE:
890 map->format.format_reg = regmap_format_32_native;
891 break;
892 default:
893 goto err_hwlock;
895 break;
897 #ifdef CONFIG_64BIT
898 case 64:
899 switch (reg_endian) {
900 case REGMAP_ENDIAN_BIG:
901 map->format.format_reg = regmap_format_64_be;
902 break;
903 case REGMAP_ENDIAN_LITTLE:
904 map->format.format_reg = regmap_format_64_le;
905 break;
906 case REGMAP_ENDIAN_NATIVE:
907 map->format.format_reg = regmap_format_64_native;
908 break;
909 default:
910 goto err_hwlock;
912 break;
913 #endif
915 default:
916 goto err_hwlock;
919 if (val_endian == REGMAP_ENDIAN_NATIVE)
920 map->format.parse_inplace = regmap_parse_inplace_noop;
922 switch (config->val_bits) {
923 case 8:
924 map->format.format_val = regmap_format_8;
925 map->format.parse_val = regmap_parse_8;
926 map->format.parse_inplace = regmap_parse_inplace_noop;
927 break;
928 case 16:
929 switch (val_endian) {
930 case REGMAP_ENDIAN_BIG:
931 map->format.format_val = regmap_format_16_be;
932 map->format.parse_val = regmap_parse_16_be;
933 map->format.parse_inplace = regmap_parse_16_be_inplace;
934 break;
935 case REGMAP_ENDIAN_LITTLE:
936 map->format.format_val = regmap_format_16_le;
937 map->format.parse_val = regmap_parse_16_le;
938 map->format.parse_inplace = regmap_parse_16_le_inplace;
939 break;
940 case REGMAP_ENDIAN_NATIVE:
941 map->format.format_val = regmap_format_16_native;
942 map->format.parse_val = regmap_parse_16_native;
943 break;
944 default:
945 goto err_hwlock;
947 break;
948 case 24:
949 if (val_endian != REGMAP_ENDIAN_BIG)
950 goto err_hwlock;
951 map->format.format_val = regmap_format_24;
952 map->format.parse_val = regmap_parse_24;
953 break;
954 case 32:
955 switch (val_endian) {
956 case REGMAP_ENDIAN_BIG:
957 map->format.format_val = regmap_format_32_be;
958 map->format.parse_val = regmap_parse_32_be;
959 map->format.parse_inplace = regmap_parse_32_be_inplace;
960 break;
961 case REGMAP_ENDIAN_LITTLE:
962 map->format.format_val = regmap_format_32_le;
963 map->format.parse_val = regmap_parse_32_le;
964 map->format.parse_inplace = regmap_parse_32_le_inplace;
965 break;
966 case REGMAP_ENDIAN_NATIVE:
967 map->format.format_val = regmap_format_32_native;
968 map->format.parse_val = regmap_parse_32_native;
969 break;
970 default:
971 goto err_hwlock;
973 break;
974 #ifdef CONFIG_64BIT
975 case 64:
976 switch (val_endian) {
977 case REGMAP_ENDIAN_BIG:
978 map->format.format_val = regmap_format_64_be;
979 map->format.parse_val = regmap_parse_64_be;
980 map->format.parse_inplace = regmap_parse_64_be_inplace;
981 break;
982 case REGMAP_ENDIAN_LITTLE:
983 map->format.format_val = regmap_format_64_le;
984 map->format.parse_val = regmap_parse_64_le;
985 map->format.parse_inplace = regmap_parse_64_le_inplace;
986 break;
987 case REGMAP_ENDIAN_NATIVE:
988 map->format.format_val = regmap_format_64_native;
989 map->format.parse_val = regmap_parse_64_native;
990 break;
991 default:
992 goto err_hwlock;
994 break;
995 #endif
998 if (map->format.format_write) {
999 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1000 (val_endian != REGMAP_ENDIAN_BIG))
1001 goto err_hwlock;
1002 map->use_single_write = true;
1005 if (!map->format.format_write &&
1006 !(map->format.format_reg && map->format.format_val))
1007 goto err_hwlock;
1009 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1010 if (map->work_buf == NULL) {
1011 ret = -ENOMEM;
1012 goto err_hwlock;
1015 if (map->format.format_write) {
1016 map->defer_caching = false;
1017 map->reg_write = _regmap_bus_formatted_write;
1018 } else if (map->format.format_val) {
1019 map->defer_caching = true;
1020 map->reg_write = _regmap_bus_raw_write;
1023 skip_format_initialization:
1025 map->range_tree = RB_ROOT;
1026 for (i = 0; i < config->num_ranges; i++) {
1027 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1028 struct regmap_range_node *new;
1030 /* Sanity check */
1031 if (range_cfg->range_max < range_cfg->range_min) {
1032 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1033 range_cfg->range_max, range_cfg->range_min);
1034 goto err_range;
1037 if (range_cfg->range_max > map->max_register) {
1038 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1039 range_cfg->range_max, map->max_register);
1040 goto err_range;
1043 if (range_cfg->selector_reg > map->max_register) {
1044 dev_err(map->dev,
1045 "Invalid range %d: selector out of map\n", i);
1046 goto err_range;
1049 if (range_cfg->window_len == 0) {
1050 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1052 goto err_range;
1055 /* Make sure, that this register range has no selector
1056 or data window within its boundary */
1057 for (j = 0; j < config->num_ranges; j++) {
1058 unsigned sel_reg = config->ranges[j].selector_reg;
1059 unsigned win_min = config->ranges[j].window_start;
1060 unsigned win_max = win_min +
1061 config->ranges[j].window_len - 1;
1063 /* Allow data window inside its own virtual range */
1064 if (j == i)
1065 continue;
1067 if (range_cfg->range_min <= sel_reg &&
1068 sel_reg <= range_cfg->range_max) {
1069 dev_err(map->dev,
1070 "Range %d: selector for %d in window\n",
1071 i, j);
1072 goto err_range;
1075 if (!(win_max < range_cfg->range_min ||
1076 win_min > range_cfg->range_max)) {
1077 dev_err(map->dev,
1078 "Range %d: window for %d in window\n",
1079 i, j);
1080 goto err_range;
1084 new = kzalloc(sizeof(*new), GFP_KERNEL);
1085 if (new == NULL) {
1086 ret = -ENOMEM;
1087 goto err_range;
1090 new->map = map;
1091 new->name = range_cfg->name;
1092 new->range_min = range_cfg->range_min;
1093 new->range_max = range_cfg->range_max;
1094 new->selector_reg = range_cfg->selector_reg;
1095 new->selector_mask = range_cfg->selector_mask;
1096 new->selector_shift = range_cfg->selector_shift;
1097 new->window_start = range_cfg->window_start;
1098 new->window_len = range_cfg->window_len;
1100 if (!_regmap_range_add(map, new)) {
1101 dev_err(map->dev, "Failed to add range %d\n", i);
1102 kfree(new);
1103 goto err_range;
1106 if (map->selector_work_buf == NULL) {
1107 map->selector_work_buf =
1108 kzalloc(map->format.buf_size, GFP_KERNEL);
1109 if (map->selector_work_buf == NULL) {
1110 ret = -ENOMEM;
1111 goto err_range;
1116 ret = regcache_init(map, config);
1117 if (ret != 0)
1118 goto err_range;
1120 if (dev) {
1121 ret = regmap_attach_dev(dev, map, config);
1122 if (ret != 0)
1123 goto err_regcache;
1124 } else {
1125 regmap_debugfs_init(map, config->name);
1128 return map;
1130 err_regcache:
1131 regcache_exit(map);
1132 err_range:
1133 regmap_range_exit(map);
1134 kfree(map->work_buf);
1135 err_hwlock:
1136 if (map->hwlock)
1137 hwspin_lock_free(map->hwlock);
1138 err_name:
1139 kfree_const(map->name);
1140 err_map:
1141 kfree(map);
1142 err:
1143 return ERR_PTR(ret);
1145 EXPORT_SYMBOL_GPL(__regmap_init);
1147 static void devm_regmap_release(struct device *dev, void *res)
1149 regmap_exit(*(struct regmap **)res);
1152 struct regmap *__devm_regmap_init(struct device *dev,
1153 const struct regmap_bus *bus,
1154 void *bus_context,
1155 const struct regmap_config *config,
1156 struct lock_class_key *lock_key,
1157 const char *lock_name)
1159 struct regmap **ptr, *regmap;
1161 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1162 if (!ptr)
1163 return ERR_PTR(-ENOMEM);
1165 regmap = __regmap_init(dev, bus, bus_context, config,
1166 lock_key, lock_name);
1167 if (!IS_ERR(regmap)) {
1168 *ptr = regmap;
1169 devres_add(dev, ptr);
1170 } else {
1171 devres_free(ptr);
1174 return regmap;
1176 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1178 static void regmap_field_init(struct regmap_field *rm_field,
1179 struct regmap *regmap, struct reg_field reg_field)
1181 rm_field->regmap = regmap;
1182 rm_field->reg = reg_field.reg;
1183 rm_field->shift = reg_field.lsb;
1184 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1185 rm_field->id_size = reg_field.id_size;
1186 rm_field->id_offset = reg_field.id_offset;
1190 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1192 * @dev: Device that will be interacted with
1193 * @regmap: regmap bank in which this register field is located.
1194 * @reg_field: Register field with in the bank.
1196 * The return value will be an ERR_PTR() on error or a valid pointer
1197 * to a struct regmap_field. The regmap_field will be automatically freed
1198 * by the device management code.
1200 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1201 struct regmap *regmap, struct reg_field reg_field)
1203 struct regmap_field *rm_field = devm_kzalloc(dev,
1204 sizeof(*rm_field), GFP_KERNEL);
1205 if (!rm_field)
1206 return ERR_PTR(-ENOMEM);
1208 regmap_field_init(rm_field, regmap, reg_field);
1210 return rm_field;
1213 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1216 * devm_regmap_field_free() - Free a register field allocated using
1217 * devm_regmap_field_alloc.
1219 * @dev: Device that will be interacted with
1220 * @field: regmap field which should be freed.
1222 * Free register field allocated using devm_regmap_field_alloc(). Usually
1223 * drivers need not call this function, as the memory allocated via devm
1224 * will be freed as per device-driver life-cyle.
1226 void devm_regmap_field_free(struct device *dev,
1227 struct regmap_field *field)
1229 devm_kfree(dev, field);
1231 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1234 * regmap_field_alloc() - Allocate and initialise a register field.
1236 * @regmap: regmap bank in which this register field is located.
1237 * @reg_field: Register field with in the bank.
1239 * The return value will be an ERR_PTR() on error or a valid pointer
1240 * to a struct regmap_field. The regmap_field should be freed by the
1241 * user once its finished working with it using regmap_field_free().
1243 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1244 struct reg_field reg_field)
1246 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1248 if (!rm_field)
1249 return ERR_PTR(-ENOMEM);
1251 regmap_field_init(rm_field, regmap, reg_field);
1253 return rm_field;
1255 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1258 * regmap_field_free() - Free register field allocated using
1259 * regmap_field_alloc.
1261 * @field: regmap field which should be freed.
1263 void regmap_field_free(struct regmap_field *field)
1265 kfree(field);
1267 EXPORT_SYMBOL_GPL(regmap_field_free);
1270 * regmap_reinit_cache() - Reinitialise the current register cache
1272 * @map: Register map to operate on.
1273 * @config: New configuration. Only the cache data will be used.
1275 * Discard any existing register cache for the map and initialize a
1276 * new cache. This can be used to restore the cache to defaults or to
1277 * update the cache configuration to reflect runtime discovery of the
1278 * hardware.
1280 * No explicit locking is done here, the user needs to ensure that
1281 * this function will not race with other calls to regmap.
1283 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1285 regcache_exit(map);
1286 regmap_debugfs_exit(map);
1288 map->max_register = config->max_register;
1289 map->writeable_reg = config->writeable_reg;
1290 map->readable_reg = config->readable_reg;
1291 map->volatile_reg = config->volatile_reg;
1292 map->precious_reg = config->precious_reg;
1293 map->readable_noinc_reg = config->readable_noinc_reg;
1294 map->cache_type = config->cache_type;
1296 regmap_debugfs_init(map, config->name);
1298 map->cache_bypass = false;
1299 map->cache_only = false;
1301 return regcache_init(map, config);
1303 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1306 * regmap_exit() - Free a previously allocated register map
1308 * @map: Register map to operate on.
1310 void regmap_exit(struct regmap *map)
1312 struct regmap_async *async;
1314 regcache_exit(map);
1315 regmap_debugfs_exit(map);
1316 regmap_range_exit(map);
1317 if (map->bus && map->bus->free_context)
1318 map->bus->free_context(map->bus_context);
1319 kfree(map->work_buf);
1320 while (!list_empty(&map->async_free)) {
1321 async = list_first_entry_or_null(&map->async_free,
1322 struct regmap_async,
1323 list);
1324 list_del(&async->list);
1325 kfree(async->work_buf);
1326 kfree(async);
1328 if (map->hwlock)
1329 hwspin_lock_free(map->hwlock);
1330 kfree_const(map->name);
1331 kfree(map->patch);
1332 kfree(map);
1334 EXPORT_SYMBOL_GPL(regmap_exit);
1336 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1338 struct regmap **r = res;
1339 if (!r || !*r) {
1340 WARN_ON(!r || !*r);
1341 return 0;
1344 /* If the user didn't specify a name match any */
1345 if (data)
1346 return (*r)->name == data;
1347 else
1348 return 1;
1352 * dev_get_regmap() - Obtain the regmap (if any) for a device
1354 * @dev: Device to retrieve the map for
1355 * @name: Optional name for the register map, usually NULL.
1357 * Returns the regmap for the device if one is present, or NULL. If
1358 * name is specified then it must match the name specified when
1359 * registering the device, if it is NULL then the first regmap found
1360 * will be used. Devices with multiple register maps are very rare,
1361 * generic code should normally not need to specify a name.
1363 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1365 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1366 dev_get_regmap_match, (void *)name);
1368 if (!r)
1369 return NULL;
1370 return *r;
1372 EXPORT_SYMBOL_GPL(dev_get_regmap);
1375 * regmap_get_device() - Obtain the device from a regmap
1377 * @map: Register map to operate on.
1379 * Returns the underlying device that the regmap has been created for.
1381 struct device *regmap_get_device(struct regmap *map)
1383 return map->dev;
1385 EXPORT_SYMBOL_GPL(regmap_get_device);
1387 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1388 struct regmap_range_node *range,
1389 unsigned int val_num)
1391 void *orig_work_buf;
1392 unsigned int win_offset;
1393 unsigned int win_page;
1394 bool page_chg;
1395 int ret;
1397 win_offset = (*reg - range->range_min) % range->window_len;
1398 win_page = (*reg - range->range_min) / range->window_len;
1400 if (val_num > 1) {
1401 /* Bulk write shouldn't cross range boundary */
1402 if (*reg + val_num - 1 > range->range_max)
1403 return -EINVAL;
1405 /* ... or single page boundary */
1406 if (val_num > range->window_len - win_offset)
1407 return -EINVAL;
1410 /* It is possible to have selector register inside data window.
1411 In that case, selector register is located on every page and
1412 it needs no page switching, when accessed alone. */
1413 if (val_num > 1 ||
1414 range->window_start + win_offset != range->selector_reg) {
1415 /* Use separate work_buf during page switching */
1416 orig_work_buf = map->work_buf;
1417 map->work_buf = map->selector_work_buf;
1419 ret = _regmap_update_bits(map, range->selector_reg,
1420 range->selector_mask,
1421 win_page << range->selector_shift,
1422 &page_chg, false);
1424 map->work_buf = orig_work_buf;
1426 if (ret != 0)
1427 return ret;
1430 *reg = range->window_start + win_offset;
1432 return 0;
1435 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1436 unsigned long mask)
1438 u8 *buf;
1439 int i;
1441 if (!mask || !map->work_buf)
1442 return;
1444 buf = map->work_buf;
1446 for (i = 0; i < max_bytes; i++)
1447 buf[i] |= (mask >> (8 * i)) & 0xff;
1450 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1451 const void *val, size_t val_len)
1453 struct regmap_range_node *range;
1454 unsigned long flags;
1455 void *work_val = map->work_buf + map->format.reg_bytes +
1456 map->format.pad_bytes;
1457 void *buf;
1458 int ret = -ENOTSUPP;
1459 size_t len;
1460 int i;
1462 WARN_ON(!map->bus);
1464 /* Check for unwritable registers before we start */
1465 if (map->writeable_reg)
1466 for (i = 0; i < val_len / map->format.val_bytes; i++)
1467 if (!map->writeable_reg(map->dev,
1468 reg + regmap_get_offset(map, i)))
1469 return -EINVAL;
1471 if (!map->cache_bypass && map->format.parse_val) {
1472 unsigned int ival;
1473 int val_bytes = map->format.val_bytes;
1474 for (i = 0; i < val_len / val_bytes; i++) {
1475 ival = map->format.parse_val(val + (i * val_bytes));
1476 ret = regcache_write(map,
1477 reg + regmap_get_offset(map, i),
1478 ival);
1479 if (ret) {
1480 dev_err(map->dev,
1481 "Error in caching of register: %x ret: %d\n",
1482 reg + i, ret);
1483 return ret;
1486 if (map->cache_only) {
1487 map->cache_dirty = true;
1488 return 0;
1492 range = _regmap_range_lookup(map, reg);
1493 if (range) {
1494 int val_num = val_len / map->format.val_bytes;
1495 int win_offset = (reg - range->range_min) % range->window_len;
1496 int win_residue = range->window_len - win_offset;
1498 /* If the write goes beyond the end of the window split it */
1499 while (val_num > win_residue) {
1500 dev_dbg(map->dev, "Writing window %d/%zu\n",
1501 win_residue, val_len / map->format.val_bytes);
1502 ret = _regmap_raw_write_impl(map, reg, val,
1503 win_residue *
1504 map->format.val_bytes);
1505 if (ret != 0)
1506 return ret;
1508 reg += win_residue;
1509 val_num -= win_residue;
1510 val += win_residue * map->format.val_bytes;
1511 val_len -= win_residue * map->format.val_bytes;
1513 win_offset = (reg - range->range_min) %
1514 range->window_len;
1515 win_residue = range->window_len - win_offset;
1518 ret = _regmap_select_page(map, &reg, range, val_num);
1519 if (ret != 0)
1520 return ret;
1523 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1524 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1525 map->write_flag_mask);
1528 * Essentially all I/O mechanisms will be faster with a single
1529 * buffer to write. Since register syncs often generate raw
1530 * writes of single registers optimise that case.
1532 if (val != work_val && val_len == map->format.val_bytes) {
1533 memcpy(work_val, val, map->format.val_bytes);
1534 val = work_val;
1537 if (map->async && map->bus->async_write) {
1538 struct regmap_async *async;
1540 trace_regmap_async_write_start(map, reg, val_len);
1542 spin_lock_irqsave(&map->async_lock, flags);
1543 async = list_first_entry_or_null(&map->async_free,
1544 struct regmap_async,
1545 list);
1546 if (async)
1547 list_del(&async->list);
1548 spin_unlock_irqrestore(&map->async_lock, flags);
1550 if (!async) {
1551 async = map->bus->async_alloc();
1552 if (!async)
1553 return -ENOMEM;
1555 async->work_buf = kzalloc(map->format.buf_size,
1556 GFP_KERNEL | GFP_DMA);
1557 if (!async->work_buf) {
1558 kfree(async);
1559 return -ENOMEM;
1563 async->map = map;
1565 /* If the caller supplied the value we can use it safely. */
1566 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1567 map->format.reg_bytes + map->format.val_bytes);
1569 spin_lock_irqsave(&map->async_lock, flags);
1570 list_add_tail(&async->list, &map->async_list);
1571 spin_unlock_irqrestore(&map->async_lock, flags);
1573 if (val != work_val)
1574 ret = map->bus->async_write(map->bus_context,
1575 async->work_buf,
1576 map->format.reg_bytes +
1577 map->format.pad_bytes,
1578 val, val_len, async);
1579 else
1580 ret = map->bus->async_write(map->bus_context,
1581 async->work_buf,
1582 map->format.reg_bytes +
1583 map->format.pad_bytes +
1584 val_len, NULL, 0, async);
1586 if (ret != 0) {
1587 dev_err(map->dev, "Failed to schedule write: %d\n",
1588 ret);
1590 spin_lock_irqsave(&map->async_lock, flags);
1591 list_move(&async->list, &map->async_free);
1592 spin_unlock_irqrestore(&map->async_lock, flags);
1595 return ret;
1598 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1600 /* If we're doing a single register write we can probably just
1601 * send the work_buf directly, otherwise try to do a gather
1602 * write.
1604 if (val == work_val)
1605 ret = map->bus->write(map->bus_context, map->work_buf,
1606 map->format.reg_bytes +
1607 map->format.pad_bytes +
1608 val_len);
1609 else if (map->bus->gather_write)
1610 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1611 map->format.reg_bytes +
1612 map->format.pad_bytes,
1613 val, val_len);
1614 else
1615 ret = -ENOTSUPP;
1617 /* If that didn't work fall back on linearising by hand. */
1618 if (ret == -ENOTSUPP) {
1619 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1620 buf = kzalloc(len, GFP_KERNEL);
1621 if (!buf)
1622 return -ENOMEM;
1624 memcpy(buf, map->work_buf, map->format.reg_bytes);
1625 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1626 val, val_len);
1627 ret = map->bus->write(map->bus_context, buf, len);
1629 kfree(buf);
1630 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1631 /* regcache_drop_region() takes lock that we already have,
1632 * thus call map->cache_ops->drop() directly
1634 if (map->cache_ops && map->cache_ops->drop)
1635 map->cache_ops->drop(map, reg, reg + 1);
1638 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1640 return ret;
1644 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1646 * @map: Map to check.
1648 bool regmap_can_raw_write(struct regmap *map)
1650 return map->bus && map->bus->write && map->format.format_val &&
1651 map->format.format_reg;
1653 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1656 * regmap_get_raw_read_max - Get the maximum size we can read
1658 * @map: Map to check.
1660 size_t regmap_get_raw_read_max(struct regmap *map)
1662 return map->max_raw_read;
1664 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1667 * regmap_get_raw_write_max - Get the maximum size we can read
1669 * @map: Map to check.
1671 size_t regmap_get_raw_write_max(struct regmap *map)
1673 return map->max_raw_write;
1675 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1677 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1678 unsigned int val)
1680 int ret;
1681 struct regmap_range_node *range;
1682 struct regmap *map = context;
1684 WARN_ON(!map->bus || !map->format.format_write);
1686 range = _regmap_range_lookup(map, reg);
1687 if (range) {
1688 ret = _regmap_select_page(map, &reg, range, 1);
1689 if (ret != 0)
1690 return ret;
1693 map->format.format_write(map, reg, val);
1695 trace_regmap_hw_write_start(map, reg, 1);
1697 ret = map->bus->write(map->bus_context, map->work_buf,
1698 map->format.buf_size);
1700 trace_regmap_hw_write_done(map, reg, 1);
1702 return ret;
1705 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1706 unsigned int val)
1708 struct regmap *map = context;
1710 return map->bus->reg_write(map->bus_context, reg, val);
1713 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1714 unsigned int val)
1716 struct regmap *map = context;
1718 WARN_ON(!map->bus || !map->format.format_val);
1720 map->format.format_val(map->work_buf + map->format.reg_bytes
1721 + map->format.pad_bytes, val, 0);
1722 return _regmap_raw_write_impl(map, reg,
1723 map->work_buf +
1724 map->format.reg_bytes +
1725 map->format.pad_bytes,
1726 map->format.val_bytes);
1729 static inline void *_regmap_map_get_context(struct regmap *map)
1731 return (map->bus) ? map : map->bus_context;
1734 int _regmap_write(struct regmap *map, unsigned int reg,
1735 unsigned int val)
1737 int ret;
1738 void *context = _regmap_map_get_context(map);
1740 if (!regmap_writeable(map, reg))
1741 return -EIO;
1743 if (!map->cache_bypass && !map->defer_caching) {
1744 ret = regcache_write(map, reg, val);
1745 if (ret != 0)
1746 return ret;
1747 if (map->cache_only) {
1748 map->cache_dirty = true;
1749 return 0;
1753 #ifdef LOG_DEVICE
1754 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1755 dev_info(map->dev, "%x <= %x\n", reg, val);
1756 #endif
1758 trace_regmap_reg_write(map, reg, val);
1760 return map->reg_write(context, reg, val);
1764 * regmap_write() - Write a value to a single register
1766 * @map: Register map to write to
1767 * @reg: Register to write to
1768 * @val: Value to be written
1770 * A value of zero will be returned on success, a negative errno will
1771 * be returned in error cases.
1773 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1775 int ret;
1777 if (!IS_ALIGNED(reg, map->reg_stride))
1778 return -EINVAL;
1780 map->lock(map->lock_arg);
1782 ret = _regmap_write(map, reg, val);
1784 map->unlock(map->lock_arg);
1786 return ret;
1788 EXPORT_SYMBOL_GPL(regmap_write);
1791 * regmap_write_async() - Write a value to a single register asynchronously
1793 * @map: Register map to write to
1794 * @reg: Register to write to
1795 * @val: Value to be written
1797 * A value of zero will be returned on success, a negative errno will
1798 * be returned in error cases.
1800 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1802 int ret;
1804 if (!IS_ALIGNED(reg, map->reg_stride))
1805 return -EINVAL;
1807 map->lock(map->lock_arg);
1809 map->async = true;
1811 ret = _regmap_write(map, reg, val);
1813 map->async = false;
1815 map->unlock(map->lock_arg);
1817 return ret;
1819 EXPORT_SYMBOL_GPL(regmap_write_async);
1821 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1822 const void *val, size_t val_len)
1824 size_t val_bytes = map->format.val_bytes;
1825 size_t val_count = val_len / val_bytes;
1826 size_t chunk_count, chunk_bytes;
1827 size_t chunk_regs = val_count;
1828 int ret, i;
1830 if (!val_count)
1831 return -EINVAL;
1833 if (map->use_single_write)
1834 chunk_regs = 1;
1835 else if (map->max_raw_write && val_len > map->max_raw_write)
1836 chunk_regs = map->max_raw_write / val_bytes;
1838 chunk_count = val_count / chunk_regs;
1839 chunk_bytes = chunk_regs * val_bytes;
1841 /* Write as many bytes as possible with chunk_size */
1842 for (i = 0; i < chunk_count; i++) {
1843 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
1844 if (ret)
1845 return ret;
1847 reg += regmap_get_offset(map, chunk_regs);
1848 val += chunk_bytes;
1849 val_len -= chunk_bytes;
1852 /* Write remaining bytes */
1853 if (val_len)
1854 ret = _regmap_raw_write_impl(map, reg, val, val_len);
1856 return ret;
1860 * regmap_raw_write() - Write raw values to one or more registers
1862 * @map: Register map to write to
1863 * @reg: Initial register to write to
1864 * @val: Block of data to be written, laid out for direct transmission to the
1865 * device
1866 * @val_len: Length of data pointed to by val.
1868 * This function is intended to be used for things like firmware
1869 * download where a large block of data needs to be transferred to the
1870 * device. No formatting will be done on the data provided.
1872 * A value of zero will be returned on success, a negative errno will
1873 * be returned in error cases.
1875 int regmap_raw_write(struct regmap *map, unsigned int reg,
1876 const void *val, size_t val_len)
1878 int ret;
1880 if (!regmap_can_raw_write(map))
1881 return -EINVAL;
1882 if (val_len % map->format.val_bytes)
1883 return -EINVAL;
1885 map->lock(map->lock_arg);
1887 ret = _regmap_raw_write(map, reg, val, val_len);
1889 map->unlock(map->lock_arg);
1891 return ret;
1893 EXPORT_SYMBOL_GPL(regmap_raw_write);
1896 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
1897 * register field.
1899 * @field: Register field to write to
1900 * @mask: Bitmask to change
1901 * @val: Value to be written
1902 * @change: Boolean indicating if a write was done
1903 * @async: Boolean indicating asynchronously
1904 * @force: Boolean indicating use force update
1906 * Perform a read/modify/write cycle on the register field with change,
1907 * async, force option.
1909 * A value of zero will be returned on success, a negative errno will
1910 * be returned in error cases.
1912 int regmap_field_update_bits_base(struct regmap_field *field,
1913 unsigned int mask, unsigned int val,
1914 bool *change, bool async, bool force)
1916 mask = (mask << field->shift) & field->mask;
1918 return regmap_update_bits_base(field->regmap, field->reg,
1919 mask, val << field->shift,
1920 change, async, force);
1922 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
1925 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
1926 * register field with port ID
1928 * @field: Register field to write to
1929 * @id: port ID
1930 * @mask: Bitmask to change
1931 * @val: Value to be written
1932 * @change: Boolean indicating if a write was done
1933 * @async: Boolean indicating asynchronously
1934 * @force: Boolean indicating use force update
1936 * A value of zero will be returned on success, a negative errno will
1937 * be returned in error cases.
1939 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
1940 unsigned int mask, unsigned int val,
1941 bool *change, bool async, bool force)
1943 if (id >= field->id_size)
1944 return -EINVAL;
1946 mask = (mask << field->shift) & field->mask;
1948 return regmap_update_bits_base(field->regmap,
1949 field->reg + (field->id_offset * id),
1950 mask, val << field->shift,
1951 change, async, force);
1953 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
1956 * regmap_bulk_write() - Write multiple registers to the device
1958 * @map: Register map to write to
1959 * @reg: First register to be write from
1960 * @val: Block of data to be written, in native register size for device
1961 * @val_count: Number of registers to write
1963 * This function is intended to be used for writing a large block of
1964 * data to the device either in single transfer or multiple transfer.
1966 * A value of zero will be returned on success, a negative errno will
1967 * be returned in error cases.
1969 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1970 size_t val_count)
1972 int ret = 0, i;
1973 size_t val_bytes = map->format.val_bytes;
1975 if (!IS_ALIGNED(reg, map->reg_stride))
1976 return -EINVAL;
1979 * Some devices don't support bulk write, for them we have a series of
1980 * single write operations.
1982 if (!map->bus || !map->format.parse_inplace) {
1983 map->lock(map->lock_arg);
1984 for (i = 0; i < val_count; i++) {
1985 unsigned int ival;
1987 switch (val_bytes) {
1988 case 1:
1989 ival = *(u8 *)(val + (i * val_bytes));
1990 break;
1991 case 2:
1992 ival = *(u16 *)(val + (i * val_bytes));
1993 break;
1994 case 4:
1995 ival = *(u32 *)(val + (i * val_bytes));
1996 break;
1997 #ifdef CONFIG_64BIT
1998 case 8:
1999 ival = *(u64 *)(val + (i * val_bytes));
2000 break;
2001 #endif
2002 default:
2003 ret = -EINVAL;
2004 goto out;
2007 ret = _regmap_write(map,
2008 reg + regmap_get_offset(map, i),
2009 ival);
2010 if (ret != 0)
2011 goto out;
2013 out:
2014 map->unlock(map->lock_arg);
2015 } else {
2016 void *wval;
2018 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2019 if (!wval)
2020 return -ENOMEM;
2022 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2023 map->format.parse_inplace(wval + i);
2025 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2027 kfree(wval);
2029 return ret;
2031 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2034 * _regmap_raw_multi_reg_write()
2036 * the (register,newvalue) pairs in regs have not been formatted, but
2037 * they are all in the same page and have been changed to being page
2038 * relative. The page register has been written if that was necessary.
2040 static int _regmap_raw_multi_reg_write(struct regmap *map,
2041 const struct reg_sequence *regs,
2042 size_t num_regs)
2044 int ret;
2045 void *buf;
2046 int i;
2047 u8 *u8;
2048 size_t val_bytes = map->format.val_bytes;
2049 size_t reg_bytes = map->format.reg_bytes;
2050 size_t pad_bytes = map->format.pad_bytes;
2051 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2052 size_t len = pair_size * num_regs;
2054 if (!len)
2055 return -EINVAL;
2057 buf = kzalloc(len, GFP_KERNEL);
2058 if (!buf)
2059 return -ENOMEM;
2061 /* We have to linearise by hand. */
2063 u8 = buf;
2065 for (i = 0; i < num_regs; i++) {
2066 unsigned int reg = regs[i].reg;
2067 unsigned int val = regs[i].def;
2068 trace_regmap_hw_write_start(map, reg, 1);
2069 map->format.format_reg(u8, reg, map->reg_shift);
2070 u8 += reg_bytes + pad_bytes;
2071 map->format.format_val(u8, val, 0);
2072 u8 += val_bytes;
2074 u8 = buf;
2075 *u8 |= map->write_flag_mask;
2077 ret = map->bus->write(map->bus_context, buf, len);
2079 kfree(buf);
2081 for (i = 0; i < num_regs; i++) {
2082 int reg = regs[i].reg;
2083 trace_regmap_hw_write_done(map, reg, 1);
2085 return ret;
2088 static unsigned int _regmap_register_page(struct regmap *map,
2089 unsigned int reg,
2090 struct regmap_range_node *range)
2092 unsigned int win_page = (reg - range->range_min) / range->window_len;
2094 return win_page;
2097 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2098 struct reg_sequence *regs,
2099 size_t num_regs)
2101 int ret;
2102 int i, n;
2103 struct reg_sequence *base;
2104 unsigned int this_page = 0;
2105 unsigned int page_change = 0;
2107 * the set of registers are not neccessarily in order, but
2108 * since the order of write must be preserved this algorithm
2109 * chops the set each time the page changes. This also applies
2110 * if there is a delay required at any point in the sequence.
2112 base = regs;
2113 for (i = 0, n = 0; i < num_regs; i++, n++) {
2114 unsigned int reg = regs[i].reg;
2115 struct regmap_range_node *range;
2117 range = _regmap_range_lookup(map, reg);
2118 if (range) {
2119 unsigned int win_page = _regmap_register_page(map, reg,
2120 range);
2122 if (i == 0)
2123 this_page = win_page;
2124 if (win_page != this_page) {
2125 this_page = win_page;
2126 page_change = 1;
2130 /* If we have both a page change and a delay make sure to
2131 * write the regs and apply the delay before we change the
2132 * page.
2135 if (page_change || regs[i].delay_us) {
2137 /* For situations where the first write requires
2138 * a delay we need to make sure we don't call
2139 * raw_multi_reg_write with n=0
2140 * This can't occur with page breaks as we
2141 * never write on the first iteration
2143 if (regs[i].delay_us && i == 0)
2144 n = 1;
2146 ret = _regmap_raw_multi_reg_write(map, base, n);
2147 if (ret != 0)
2148 return ret;
2150 if (regs[i].delay_us)
2151 udelay(regs[i].delay_us);
2153 base += n;
2154 n = 0;
2156 if (page_change) {
2157 ret = _regmap_select_page(map,
2158 &base[n].reg,
2159 range, 1);
2160 if (ret != 0)
2161 return ret;
2163 page_change = 0;
2169 if (n > 0)
2170 return _regmap_raw_multi_reg_write(map, base, n);
2171 return 0;
2174 static int _regmap_multi_reg_write(struct regmap *map,
2175 const struct reg_sequence *regs,
2176 size_t num_regs)
2178 int i;
2179 int ret;
2181 if (!map->can_multi_write) {
2182 for (i = 0; i < num_regs; i++) {
2183 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2184 if (ret != 0)
2185 return ret;
2187 if (regs[i].delay_us)
2188 udelay(regs[i].delay_us);
2190 return 0;
2193 if (!map->format.parse_inplace)
2194 return -EINVAL;
2196 if (map->writeable_reg)
2197 for (i = 0; i < num_regs; i++) {
2198 int reg = regs[i].reg;
2199 if (!map->writeable_reg(map->dev, reg))
2200 return -EINVAL;
2201 if (!IS_ALIGNED(reg, map->reg_stride))
2202 return -EINVAL;
2205 if (!map->cache_bypass) {
2206 for (i = 0; i < num_regs; i++) {
2207 unsigned int val = regs[i].def;
2208 unsigned int reg = regs[i].reg;
2209 ret = regcache_write(map, reg, val);
2210 if (ret) {
2211 dev_err(map->dev,
2212 "Error in caching of register: %x ret: %d\n",
2213 reg, ret);
2214 return ret;
2217 if (map->cache_only) {
2218 map->cache_dirty = true;
2219 return 0;
2223 WARN_ON(!map->bus);
2225 for (i = 0; i < num_regs; i++) {
2226 unsigned int reg = regs[i].reg;
2227 struct regmap_range_node *range;
2229 /* Coalesce all the writes between a page break or a delay
2230 * in a sequence
2232 range = _regmap_range_lookup(map, reg);
2233 if (range || regs[i].delay_us) {
2234 size_t len = sizeof(struct reg_sequence)*num_regs;
2235 struct reg_sequence *base = kmemdup(regs, len,
2236 GFP_KERNEL);
2237 if (!base)
2238 return -ENOMEM;
2239 ret = _regmap_range_multi_paged_reg_write(map, base,
2240 num_regs);
2241 kfree(base);
2243 return ret;
2246 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2250 * regmap_multi_reg_write() - Write multiple registers to the device
2252 * @map: Register map to write to
2253 * @regs: Array of structures containing register,value to be written
2254 * @num_regs: Number of registers to write
2256 * Write multiple registers to the device where the set of register, value
2257 * pairs are supplied in any order, possibly not all in a single range.
2259 * The 'normal' block write mode will send ultimately send data on the
2260 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2261 * addressed. However, this alternative block multi write mode will send
2262 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2263 * must of course support the mode.
2265 * A value of zero will be returned on success, a negative errno will be
2266 * returned in error cases.
2268 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2269 int num_regs)
2271 int ret;
2273 map->lock(map->lock_arg);
2275 ret = _regmap_multi_reg_write(map, regs, num_regs);
2277 map->unlock(map->lock_arg);
2279 return ret;
2281 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2284 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2285 * device but not the cache
2287 * @map: Register map to write to
2288 * @regs: Array of structures containing register,value to be written
2289 * @num_regs: Number of registers to write
2291 * Write multiple registers to the device but not the cache where the set
2292 * of register are supplied in any order.
2294 * This function is intended to be used for writing a large block of data
2295 * atomically to the device in single transfer for those I2C client devices
2296 * that implement this alternative block write mode.
2298 * A value of zero will be returned on success, a negative errno will
2299 * be returned in error cases.
2301 int regmap_multi_reg_write_bypassed(struct regmap *map,
2302 const struct reg_sequence *regs,
2303 int num_regs)
2305 int ret;
2306 bool bypass;
2308 map->lock(map->lock_arg);
2310 bypass = map->cache_bypass;
2311 map->cache_bypass = true;
2313 ret = _regmap_multi_reg_write(map, regs, num_regs);
2315 map->cache_bypass = bypass;
2317 map->unlock(map->lock_arg);
2319 return ret;
2321 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2324 * regmap_raw_write_async() - Write raw values to one or more registers
2325 * asynchronously
2327 * @map: Register map to write to
2328 * @reg: Initial register to write to
2329 * @val: Block of data to be written, laid out for direct transmission to the
2330 * device. Must be valid until regmap_async_complete() is called.
2331 * @val_len: Length of data pointed to by val.
2333 * This function is intended to be used for things like firmware
2334 * download where a large block of data needs to be transferred to the
2335 * device. No formatting will be done on the data provided.
2337 * If supported by the underlying bus the write will be scheduled
2338 * asynchronously, helping maximise I/O speed on higher speed buses
2339 * like SPI. regmap_async_complete() can be called to ensure that all
2340 * asynchrnous writes have been completed.
2342 * A value of zero will be returned on success, a negative errno will
2343 * be returned in error cases.
2345 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2346 const void *val, size_t val_len)
2348 int ret;
2350 if (val_len % map->format.val_bytes)
2351 return -EINVAL;
2352 if (!IS_ALIGNED(reg, map->reg_stride))
2353 return -EINVAL;
2355 map->lock(map->lock_arg);
2357 map->async = true;
2359 ret = _regmap_raw_write(map, reg, val, val_len);
2361 map->async = false;
2363 map->unlock(map->lock_arg);
2365 return ret;
2367 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2369 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2370 unsigned int val_len)
2372 struct regmap_range_node *range;
2373 int ret;
2375 WARN_ON(!map->bus);
2377 if (!map->bus || !map->bus->read)
2378 return -EINVAL;
2380 range = _regmap_range_lookup(map, reg);
2381 if (range) {
2382 ret = _regmap_select_page(map, &reg, range,
2383 val_len / map->format.val_bytes);
2384 if (ret != 0)
2385 return ret;
2388 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2389 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2390 map->read_flag_mask);
2391 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2393 ret = map->bus->read(map->bus_context, map->work_buf,
2394 map->format.reg_bytes + map->format.pad_bytes,
2395 val, val_len);
2397 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2399 return ret;
2402 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2403 unsigned int *val)
2405 struct regmap *map = context;
2407 return map->bus->reg_read(map->bus_context, reg, val);
2410 static int _regmap_bus_read(void *context, unsigned int reg,
2411 unsigned int *val)
2413 int ret;
2414 struct regmap *map = context;
2415 void *work_val = map->work_buf + map->format.reg_bytes +
2416 map->format.pad_bytes;
2418 if (!map->format.parse_val)
2419 return -EINVAL;
2421 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
2422 if (ret == 0)
2423 *val = map->format.parse_val(work_val);
2425 return ret;
2428 static int _regmap_read(struct regmap *map, unsigned int reg,
2429 unsigned int *val)
2431 int ret;
2432 void *context = _regmap_map_get_context(map);
2434 if (!map->cache_bypass) {
2435 ret = regcache_read(map, reg, val);
2436 if (ret == 0)
2437 return 0;
2440 if (map->cache_only)
2441 return -EBUSY;
2443 if (!regmap_readable(map, reg))
2444 return -EIO;
2446 ret = map->reg_read(context, reg, val);
2447 if (ret == 0) {
2448 #ifdef LOG_DEVICE
2449 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
2450 dev_info(map->dev, "%x => %x\n", reg, *val);
2451 #endif
2453 trace_regmap_reg_read(map, reg, *val);
2455 if (!map->cache_bypass)
2456 regcache_write(map, reg, *val);
2459 return ret;
2463 * regmap_read() - Read a value from a single register
2465 * @map: Register map to read from
2466 * @reg: Register to be read from
2467 * @val: Pointer to store read value
2469 * A value of zero will be returned on success, a negative errno will
2470 * be returned in error cases.
2472 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2474 int ret;
2476 if (!IS_ALIGNED(reg, map->reg_stride))
2477 return -EINVAL;
2479 map->lock(map->lock_arg);
2481 ret = _regmap_read(map, reg, val);
2483 map->unlock(map->lock_arg);
2485 return ret;
2487 EXPORT_SYMBOL_GPL(regmap_read);
2490 * regmap_raw_read() - Read raw data from the device
2492 * @map: Register map to read from
2493 * @reg: First register to be read from
2494 * @val: Pointer to store read value
2495 * @val_len: Size of data to read
2497 * A value of zero will be returned on success, a negative errno will
2498 * be returned in error cases.
2500 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2501 size_t val_len)
2503 size_t val_bytes = map->format.val_bytes;
2504 size_t val_count = val_len / val_bytes;
2505 unsigned int v;
2506 int ret, i;
2508 if (!map->bus)
2509 return -EINVAL;
2510 if (val_len % map->format.val_bytes)
2511 return -EINVAL;
2512 if (!IS_ALIGNED(reg, map->reg_stride))
2513 return -EINVAL;
2514 if (val_count == 0)
2515 return -EINVAL;
2517 map->lock(map->lock_arg);
2519 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2520 map->cache_type == REGCACHE_NONE) {
2521 size_t chunk_count, chunk_bytes;
2522 size_t chunk_regs = val_count;
2524 if (!map->bus->read) {
2525 ret = -ENOTSUPP;
2526 goto out;
2529 if (map->use_single_read)
2530 chunk_regs = 1;
2531 else if (map->max_raw_read && val_len > map->max_raw_read)
2532 chunk_regs = map->max_raw_read / val_bytes;
2534 chunk_count = val_count / chunk_regs;
2535 chunk_bytes = chunk_regs * val_bytes;
2537 /* Read bytes that fit into whole chunks */
2538 for (i = 0; i < chunk_count; i++) {
2539 ret = _regmap_raw_read(map, reg, val, chunk_bytes);
2540 if (ret != 0)
2541 goto out;
2543 reg += regmap_get_offset(map, chunk_regs);
2544 val += chunk_bytes;
2545 val_len -= chunk_bytes;
2548 /* Read remaining bytes */
2549 if (val_len) {
2550 ret = _regmap_raw_read(map, reg, val, val_len);
2551 if (ret != 0)
2552 goto out;
2554 } else {
2555 /* Otherwise go word by word for the cache; should be low
2556 * cost as we expect to hit the cache.
2558 for (i = 0; i < val_count; i++) {
2559 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2560 &v);
2561 if (ret != 0)
2562 goto out;
2564 map->format.format_val(val + (i * val_bytes), v, 0);
2568 out:
2569 map->unlock(map->lock_arg);
2571 return ret;
2573 EXPORT_SYMBOL_GPL(regmap_raw_read);
2576 * regmap_noinc_read(): Read data from a register without incrementing the
2577 * register number
2579 * @map: Register map to read from
2580 * @reg: Register to read from
2581 * @val: Pointer to data buffer
2582 * @val_len: Length of output buffer in bytes.
2584 * The regmap API usually assumes that bulk bus read operations will read a
2585 * range of registers. Some devices have certain registers for which a read
2586 * operation read will read from an internal FIFO.
2588 * The target register must be volatile but registers after it can be
2589 * completely unrelated cacheable registers.
2591 * This will attempt multiple reads as required to read val_len bytes.
2593 * A value of zero will be returned on success, a negative errno will be
2594 * returned in error cases.
2596 int regmap_noinc_read(struct regmap *map, unsigned int reg,
2597 void *val, size_t val_len)
2599 size_t read_len;
2600 int ret;
2602 if (!map->bus)
2603 return -EINVAL;
2604 if (!map->bus->read)
2605 return -ENOTSUPP;
2606 if (val_len % map->format.val_bytes)
2607 return -EINVAL;
2608 if (!IS_ALIGNED(reg, map->reg_stride))
2609 return -EINVAL;
2610 if (val_len == 0)
2611 return -EINVAL;
2613 map->lock(map->lock_arg);
2615 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2616 ret = -EINVAL;
2617 goto out_unlock;
2620 while (val_len) {
2621 if (map->max_raw_read && map->max_raw_read < val_len)
2622 read_len = map->max_raw_read;
2623 else
2624 read_len = val_len;
2625 ret = _regmap_raw_read(map, reg, val, read_len);
2626 if (ret)
2627 goto out_unlock;
2628 val = ((u8 *)val) + read_len;
2629 val_len -= read_len;
2632 out_unlock:
2633 map->unlock(map->lock_arg);
2634 return ret;
2636 EXPORT_SYMBOL_GPL(regmap_noinc_read);
2639 * regmap_field_read(): Read a value to a single register field
2641 * @field: Register field to read from
2642 * @val: Pointer to store read value
2644 * A value of zero will be returned on success, a negative errno will
2645 * be returned in error cases.
2647 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2649 int ret;
2650 unsigned int reg_val;
2651 ret = regmap_read(field->regmap, field->reg, &reg_val);
2652 if (ret != 0)
2653 return ret;
2655 reg_val &= field->mask;
2656 reg_val >>= field->shift;
2657 *val = reg_val;
2659 return ret;
2661 EXPORT_SYMBOL_GPL(regmap_field_read);
2664 * regmap_fields_read() - Read a value to a single register field with port ID
2666 * @field: Register field to read from
2667 * @id: port ID
2668 * @val: Pointer to store read value
2670 * A value of zero will be returned on success, a negative errno will
2671 * be returned in error cases.
2673 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2674 unsigned int *val)
2676 int ret;
2677 unsigned int reg_val;
2679 if (id >= field->id_size)
2680 return -EINVAL;
2682 ret = regmap_read(field->regmap,
2683 field->reg + (field->id_offset * id),
2684 &reg_val);
2685 if (ret != 0)
2686 return ret;
2688 reg_val &= field->mask;
2689 reg_val >>= field->shift;
2690 *val = reg_val;
2692 return ret;
2694 EXPORT_SYMBOL_GPL(regmap_fields_read);
2697 * regmap_bulk_read() - Read multiple registers from the device
2699 * @map: Register map to read from
2700 * @reg: First register to be read from
2701 * @val: Pointer to store read value, in native register size for device
2702 * @val_count: Number of registers to read
2704 * A value of zero will be returned on success, a negative errno will
2705 * be returned in error cases.
2707 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2708 size_t val_count)
2710 int ret, i;
2711 size_t val_bytes = map->format.val_bytes;
2712 bool vol = regmap_volatile_range(map, reg, val_count);
2714 if (!IS_ALIGNED(reg, map->reg_stride))
2715 return -EINVAL;
2716 if (val_count == 0)
2717 return -EINVAL;
2719 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2720 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
2721 if (ret != 0)
2722 return ret;
2724 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2725 map->format.parse_inplace(val + i);
2726 } else {
2727 #ifdef CONFIG_64BIT
2728 u64 *u64 = val;
2729 #endif
2730 u32 *u32 = val;
2731 u16 *u16 = val;
2732 u8 *u8 = val;
2734 map->lock(map->lock_arg);
2736 for (i = 0; i < val_count; i++) {
2737 unsigned int ival;
2739 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2740 &ival);
2741 if (ret != 0)
2742 goto out;
2744 switch (map->format.val_bytes) {
2745 #ifdef CONFIG_64BIT
2746 case 8:
2747 u64[i] = ival;
2748 break;
2749 #endif
2750 case 4:
2751 u32[i] = ival;
2752 break;
2753 case 2:
2754 u16[i] = ival;
2755 break;
2756 case 1:
2757 u8[i] = ival;
2758 break;
2759 default:
2760 ret = -EINVAL;
2761 goto out;
2765 out:
2766 map->unlock(map->lock_arg);
2769 return ret;
2771 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2773 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2774 unsigned int mask, unsigned int val,
2775 bool *change, bool force_write)
2777 int ret;
2778 unsigned int tmp, orig;
2780 if (change)
2781 *change = false;
2783 if (regmap_volatile(map, reg) && map->reg_update_bits) {
2784 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2785 if (ret == 0 && change)
2786 *change = true;
2787 } else {
2788 ret = _regmap_read(map, reg, &orig);
2789 if (ret != 0)
2790 return ret;
2792 tmp = orig & ~mask;
2793 tmp |= val & mask;
2795 if (force_write || (tmp != orig)) {
2796 ret = _regmap_write(map, reg, tmp);
2797 if (ret == 0 && change)
2798 *change = true;
2802 return ret;
2806 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
2808 * @map: Register map to update
2809 * @reg: Register to update
2810 * @mask: Bitmask to change
2811 * @val: New value for bitmask
2812 * @change: Boolean indicating if a write was done
2813 * @async: Boolean indicating asynchronously
2814 * @force: Boolean indicating use force update
2816 * Perform a read/modify/write cycle on a register map with change, async, force
2817 * options.
2819 * If async is true:
2821 * With most buses the read must be done synchronously so this is most useful
2822 * for devices with a cache which do not need to interact with the hardware to
2823 * determine the current register value.
2825 * Returns zero for success, a negative number on error.
2827 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2828 unsigned int mask, unsigned int val,
2829 bool *change, bool async, bool force)
2831 int ret;
2833 map->lock(map->lock_arg);
2835 map->async = async;
2837 ret = _regmap_update_bits(map, reg, mask, val, change, force);
2839 map->async = false;
2841 map->unlock(map->lock_arg);
2843 return ret;
2845 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
2847 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2849 struct regmap *map = async->map;
2850 bool wake;
2852 trace_regmap_async_io_complete(map);
2854 spin_lock(&map->async_lock);
2855 list_move(&async->list, &map->async_free);
2856 wake = list_empty(&map->async_list);
2858 if (ret != 0)
2859 map->async_ret = ret;
2861 spin_unlock(&map->async_lock);
2863 if (wake)
2864 wake_up(&map->async_waitq);
2866 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2868 static int regmap_async_is_done(struct regmap *map)
2870 unsigned long flags;
2871 int ret;
2873 spin_lock_irqsave(&map->async_lock, flags);
2874 ret = list_empty(&map->async_list);
2875 spin_unlock_irqrestore(&map->async_lock, flags);
2877 return ret;
2881 * regmap_async_complete - Ensure all asynchronous I/O has completed.
2883 * @map: Map to operate on.
2885 * Blocks until any pending asynchronous I/O has completed. Returns
2886 * an error code for any failed I/O operations.
2888 int regmap_async_complete(struct regmap *map)
2890 unsigned long flags;
2891 int ret;
2893 /* Nothing to do with no async support */
2894 if (!map->bus || !map->bus->async_write)
2895 return 0;
2897 trace_regmap_async_complete_start(map);
2899 wait_event(map->async_waitq, regmap_async_is_done(map));
2901 spin_lock_irqsave(&map->async_lock, flags);
2902 ret = map->async_ret;
2903 map->async_ret = 0;
2904 spin_unlock_irqrestore(&map->async_lock, flags);
2906 trace_regmap_async_complete_done(map);
2908 return ret;
2910 EXPORT_SYMBOL_GPL(regmap_async_complete);
2913 * regmap_register_patch - Register and apply register updates to be applied
2914 * on device initialistion
2916 * @map: Register map to apply updates to.
2917 * @regs: Values to update.
2918 * @num_regs: Number of entries in regs.
2920 * Register a set of register updates to be applied to the device
2921 * whenever the device registers are synchronised with the cache and
2922 * apply them immediately. Typically this is used to apply
2923 * corrections to be applied to the device defaults on startup, such
2924 * as the updates some vendors provide to undocumented registers.
2926 * The caller must ensure that this function cannot be called
2927 * concurrently with either itself or regcache_sync().
2929 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2930 int num_regs)
2932 struct reg_sequence *p;
2933 int ret;
2934 bool bypass;
2936 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2937 num_regs))
2938 return 0;
2940 p = krealloc(map->patch,
2941 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2942 GFP_KERNEL);
2943 if (p) {
2944 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2945 map->patch = p;
2946 map->patch_regs += num_regs;
2947 } else {
2948 return -ENOMEM;
2951 map->lock(map->lock_arg);
2953 bypass = map->cache_bypass;
2955 map->cache_bypass = true;
2956 map->async = true;
2958 ret = _regmap_multi_reg_write(map, regs, num_regs);
2960 map->async = false;
2961 map->cache_bypass = bypass;
2963 map->unlock(map->lock_arg);
2965 regmap_async_complete(map);
2967 return ret;
2969 EXPORT_SYMBOL_GPL(regmap_register_patch);
2972 * regmap_get_val_bytes() - Report the size of a register value
2974 * @map: Register map to operate on.
2976 * Report the size of a register value, mainly intended to for use by
2977 * generic infrastructure built on top of regmap.
2979 int regmap_get_val_bytes(struct regmap *map)
2981 if (map->format.format_write)
2982 return -EINVAL;
2984 return map->format.val_bytes;
2986 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2989 * regmap_get_max_register() - Report the max register value
2991 * @map: Register map to operate on.
2993 * Report the max register value, mainly intended to for use by
2994 * generic infrastructure built on top of regmap.
2996 int regmap_get_max_register(struct regmap *map)
2998 return map->max_register ? map->max_register : -EINVAL;
3000 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3003 * regmap_get_reg_stride() - Report the register address stride
3005 * @map: Register map to operate on.
3007 * Report the register address stride, mainly intended to for use by
3008 * generic infrastructure built on top of regmap.
3010 int regmap_get_reg_stride(struct regmap *map)
3012 return map->reg_stride;
3014 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3016 int regmap_parse_val(struct regmap *map, const void *buf,
3017 unsigned int *val)
3019 if (!map->format.parse_val)
3020 return -EINVAL;
3022 *val = map->format.parse_val(buf);
3024 return 0;
3026 EXPORT_SYMBOL_GPL(regmap_parse_val);
3028 static int __init regmap_initcall(void)
3030 regmap_debugfs_initcall();
3032 return 0;
3034 postcore_initcall(regmap_initcall);