1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, Sony Mobile Communications AB.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 #include <linux/hwspinlock.h>
9 #include <linux/module.h>
11 #include <linux/of_address.h>
12 #include <linux/of_reserved_mem.h>
13 #include <linux/platform_device.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/soc/qcom/smem.h>
17 #include <linux/soc/qcom/socinfo.h>
20 * The Qualcomm shared memory system is a allocate only heap structure that
21 * consists of one of more memory areas that can be accessed by the processors
24 * All systems contains a global heap, accessible by all processors in the SoC,
25 * with a table of contents data structure (@smem_header) at the beginning of
26 * the main shared memory block.
28 * The global header contains meta data for allocations as well as a fixed list
29 * of 512 entries (@smem_global_entry) that can be initialized to reference
30 * parts of the shared memory space.
33 * In addition to this global heap a set of "private" heaps can be set up at
34 * boot time with access restrictions so that only certain processor pairs can
37 * These partitions are referenced from an optional partition table
38 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
39 * partition table entries (@smem_ptable_entry) lists the involved processors
40 * (or hosts) and their location in the main shared memory region.
42 * Each partition starts with a header (@smem_partition_header) that identifies
43 * the partition and holds properties for the two internal memory regions. The
44 * two regions are cached and non-cached memory respectively. Each region
45 * contain a link list of allocation headers (@smem_private_entry) followed by
48 * Items in the non-cached region are allocated from the start of the partition
49 * while items in the cached region are allocated from the end. The free area
50 * is hence the region between the cached and non-cached offsets. The header of
51 * cached items comes after the data.
53 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
54 * for the global heap. A new global partition is created from the global heap
55 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
56 * set by the bootloader.
58 * To synchronize allocations in the shared memory heaps a remote spinlock must
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
65 * The version member of the smem header contains an array of versions for the
66 * various software components in the SoC. We verify that the boot loader
67 * version is a valid version as a sanity check.
69 #define SMEM_MASTER_SBL_VERSION_INDEX 7
70 #define SMEM_GLOBAL_HEAP_VERSION 11
71 #define SMEM_GLOBAL_PART_VERSION 12
74 * The first 8 items are only to be allocated by the boot loader while
75 * initializing the heap.
77 #define SMEM_ITEM_LAST_FIXED 8
79 /* Highest accepted item number, for both global and private heaps */
80 #define SMEM_ITEM_COUNT 512
82 /* Processor/host identifier for the application processor */
83 #define SMEM_HOST_APPS 0
85 /* Processor/host identifier for the global partition */
86 #define SMEM_GLOBAL_HOST 0xfffe
88 /* Max number of processors/hosts in a system */
89 #define SMEM_HOST_COUNT 20
92 * struct smem_proc_comm - proc_comm communication struct (legacy)
93 * @command: current command to be executed
94 * @status: status of the currently requested command
95 * @params: parameters to the command
97 struct smem_proc_comm
{
104 * struct smem_global_entry - entry to reference smem items on the heap
105 * @allocated: boolean to indicate if this entry is used
106 * @offset: offset to the allocated space
107 * @size: size of the allocated space, 8 byte aligned
108 * @aux_base: base address for the memory region used by this unit, or 0 for
109 * the default region. bits 0,1 are reserved
111 struct smem_global_entry
{
115 __le32 aux_base
; /* bits 1:0 reserved */
117 #define AUX_BASE_MASK 0xfffffffc
120 * struct smem_header - header found in beginning of primary smem region
121 * @proc_comm: proc_comm communication interface (legacy)
122 * @version: array of versions for the various subsystems
123 * @initialized: boolean to indicate that smem is initialized
124 * @free_offset: index of the first unallocated byte in smem
125 * @available: number of bytes available for allocation
126 * @reserved: reserved field, must be 0
127 * @toc: array of references to items
130 struct smem_proc_comm proc_comm
[4];
136 struct smem_global_entry toc
[SMEM_ITEM_COUNT
];
140 * struct smem_ptable_entry - one entry in the @smem_ptable list
141 * @offset: offset, within the main shared memory region, of the partition
142 * @size: size of the partition
143 * @flags: flags for the partition (currently unused)
144 * @host0: first processor/host with access to this partition
145 * @host1: second processor/host with access to this partition
146 * @cacheline: alignment for "cached" entries
147 * @reserved: reserved entries for later use
149 struct smem_ptable_entry
{
160 * struct smem_ptable - partition table for the private partitions
161 * @magic: magic number, must be SMEM_PTABLE_MAGIC
162 * @version: version of the partition table
163 * @num_entries: number of partitions in the table
164 * @reserved: for now reserved entries
165 * @entry: list of @smem_ptable_entry for the @num_entries partitions
172 struct smem_ptable_entry entry
[];
175 static const u8 SMEM_PTABLE_MAGIC
[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
178 * struct smem_partition_header - header of the partitions
179 * @magic: magic number, must be SMEM_PART_MAGIC
180 * @host0: first processor/host with access to this partition
181 * @host1: second processor/host with access to this partition
182 * @size: size of the partition
183 * @offset_free_uncached: offset to the first free byte of uncached memory in
185 * @offset_free_cached: offset to the first free byte of cached memory in this
187 * @reserved: for now reserved entries
189 struct smem_partition_header
{
194 __le32 offset_free_uncached
;
195 __le32 offset_free_cached
;
200 * struct smem_partition - describes smem partition
201 * @virt_base: starting virtual address of partition
202 * @phys_base: starting physical address of partition
203 * @cacheline: alignment for "cached" entries
204 * @size: size of partition
206 struct smem_partition
{
207 void __iomem
*virt_base
;
208 phys_addr_t phys_base
;
213 static const u8 SMEM_PART_MAGIC
[] = { 0x24, 0x50, 0x52, 0x54 };
216 * struct smem_private_entry - header of each item in the private partition
217 * @canary: magic number, must be SMEM_PRIVATE_CANARY
218 * @item: identifying number of the smem item
219 * @size: size of the data, including padding bytes
220 * @padding_data: number of bytes of padding of data
221 * @padding_hdr: number of bytes of padding between the header and the data
222 * @reserved: for now reserved entry
224 struct smem_private_entry
{
225 u16 canary
; /* bytes are the same so no swapping needed */
227 __le32 size
; /* includes padding bytes */
232 #define SMEM_PRIVATE_CANARY 0xa5a5
235 * struct smem_info - smem region info located after the table of contents
236 * @magic: magic number, must be SMEM_INFO_MAGIC
237 * @size: size of the smem region
238 * @base_addr: base address of the smem region
239 * @reserved: for now reserved entry
240 * @num_items: highest accepted item number
250 static const u8 SMEM_INFO_MAGIC
[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
253 * struct smem_region - representation of a chunk of memory used for smem
254 * @aux_base: identifier of aux_mem base
255 * @virt_base: virtual base address of memory with this aux_mem identifier
256 * @size: size of the memory region
259 phys_addr_t aux_base
;
260 void __iomem
*virt_base
;
265 * struct qcom_smem - device data for the smem device
266 * @dev: device pointer
267 * @hwlock: reference to a hwspinlock
268 * @ptable: virtual base of partition table
269 * @global_partition: describes for global partition when in use
270 * @partitions: list of partitions of current processor/host
271 * @item_count: max accepted item number
272 * @socinfo: platform device pointer
273 * @num_regions: number of @regions
274 * @regions: list of the memory regions defining the shared memory
279 struct hwspinlock
*hwlock
;
282 struct platform_device
*socinfo
;
283 struct smem_ptable
*ptable
;
284 struct smem_partition global_partition
;
285 struct smem_partition partitions
[SMEM_HOST_COUNT
];
287 unsigned num_regions
;
288 struct smem_region regions
[] __counted_by(num_regions
);
292 phdr_to_last_uncached_entry(struct smem_partition_header
*phdr
)
296 return p
+ le32_to_cpu(phdr
->offset_free_uncached
);
299 static struct smem_private_entry
*
300 phdr_to_first_cached_entry(struct smem_partition_header
*phdr
,
304 struct smem_private_entry
*e
;
306 return p
+ le32_to_cpu(phdr
->size
) - ALIGN(sizeof(*e
), cacheline
);
310 phdr_to_last_cached_entry(struct smem_partition_header
*phdr
)
314 return p
+ le32_to_cpu(phdr
->offset_free_cached
);
317 static struct smem_private_entry
*
318 phdr_to_first_uncached_entry(struct smem_partition_header
*phdr
)
322 return p
+ sizeof(*phdr
);
325 static struct smem_private_entry
*
326 uncached_entry_next(struct smem_private_entry
*e
)
330 return p
+ sizeof(*e
) + le16_to_cpu(e
->padding_hdr
) +
331 le32_to_cpu(e
->size
);
334 static struct smem_private_entry
*
335 cached_entry_next(struct smem_private_entry
*e
, size_t cacheline
)
339 return p
- le32_to_cpu(e
->size
) - ALIGN(sizeof(*e
), cacheline
);
342 static void *uncached_entry_to_item(struct smem_private_entry
*e
)
346 return p
+ sizeof(*e
) + le16_to_cpu(e
->padding_hdr
);
349 static void *cached_entry_to_item(struct smem_private_entry
*e
)
353 return p
- le32_to_cpu(e
->size
);
356 /* Pointer to the one and only smem handle */
357 static struct qcom_smem
*__smem
;
359 /* Timeout (ms) for the trylock of remote spinlocks */
360 #define HWSPINLOCK_TIMEOUT 1000
362 /* The qcom hwspinlock id is always plus one from the smem host id */
363 #define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1)
366 * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
367 * @host: remote processor id
369 * Busts the hwspin_lock for the given smem host id. This helper is intended
370 * for remoteproc drivers that manage remoteprocs with an equivalent smem
371 * driver instance in the remote firmware. Drivers can force a release of the
372 * smem hwspin_lock if the rproc unexpectedly goes into a bad state.
374 * Context: Process context.
376 * Returns: 0 on success, otherwise negative errno.
378 int qcom_smem_bust_hwspin_lock_by_host(unsigned int host
)
380 /* This function is for remote procs, so ignore SMEM_HOST_APPS */
381 if (host
== SMEM_HOST_APPS
|| host
>= SMEM_HOST_COUNT
)
384 return hwspin_lock_bust(__smem
->hwlock
, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host
));
386 EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host
);
389 * qcom_smem_is_available() - Check if SMEM is available
391 * Return: true if SMEM is available, false otherwise.
393 bool qcom_smem_is_available(void)
397 EXPORT_SYMBOL_GPL(qcom_smem_is_available
);
399 static int qcom_smem_alloc_private(struct qcom_smem
*smem
,
400 struct smem_partition
*part
,
404 struct smem_private_entry
*hdr
, *end
;
405 struct smem_partition_header
*phdr
;
410 phdr
= (struct smem_partition_header __force
*)part
->virt_base
;
411 p_end
= (void *)phdr
+ part
->size
;
413 hdr
= phdr_to_first_uncached_entry(phdr
);
414 end
= phdr_to_last_uncached_entry(phdr
);
415 cached
= phdr_to_last_cached_entry(phdr
);
417 if (WARN_ON((void *)end
> p_end
|| cached
> p_end
))
421 if (hdr
->canary
!= SMEM_PRIVATE_CANARY
)
423 if (le16_to_cpu(hdr
->item
) == item
)
426 hdr
= uncached_entry_next(hdr
);
429 if (WARN_ON((void *)hdr
> p_end
))
432 /* Check that we don't grow into the cached region */
433 alloc_size
= sizeof(*hdr
) + ALIGN(size
, 8);
434 if ((void *)hdr
+ alloc_size
> cached
) {
435 dev_err(smem
->dev
, "Out of memory\n");
439 hdr
->canary
= SMEM_PRIVATE_CANARY
;
440 hdr
->item
= cpu_to_le16(item
);
441 hdr
->size
= cpu_to_le32(ALIGN(size
, 8));
442 hdr
->padding_data
= cpu_to_le16(le32_to_cpu(hdr
->size
) - size
);
443 hdr
->padding_hdr
= 0;
446 * Ensure the header is written before we advance the free offset, so
447 * that remote processors that does not take the remote spinlock still
448 * gets a consistent view of the linked list.
451 le32_add_cpu(&phdr
->offset_free_uncached
, alloc_size
);
455 dev_err(smem
->dev
, "Found invalid canary in hosts %hu:%hu partition\n",
456 le16_to_cpu(phdr
->host0
), le16_to_cpu(phdr
->host1
));
461 static int qcom_smem_alloc_global(struct qcom_smem
*smem
,
465 struct smem_global_entry
*entry
;
466 struct smem_header
*header
;
468 header
= smem
->regions
[0].virt_base
;
469 entry
= &header
->toc
[item
];
470 if (entry
->allocated
)
473 size
= ALIGN(size
, 8);
474 if (WARN_ON(size
> le32_to_cpu(header
->available
)))
477 entry
->offset
= header
->free_offset
;
478 entry
->size
= cpu_to_le32(size
);
481 * Ensure the header is consistent before we mark the item allocated,
482 * so that remote processors will get a consistent view of the item
483 * even though they do not take the spinlock on read.
486 entry
->allocated
= cpu_to_le32(1);
488 le32_add_cpu(&header
->free_offset
, size
);
489 le32_add_cpu(&header
->available
, -size
);
495 * qcom_smem_alloc() - allocate space for a smem item
496 * @host: remote processor id, or -1
497 * @item: smem item handle
498 * @size: number of bytes to be allocated
500 * Allocate space for a given smem item of size @size, given that the item is
503 * Return: 0 on success, negative errno on failure.
505 int qcom_smem_alloc(unsigned host
, unsigned item
, size_t size
)
507 struct smem_partition
*part
;
512 return -EPROBE_DEFER
;
514 if (item
< SMEM_ITEM_LAST_FIXED
) {
516 "Rejecting allocation of static entry %d\n", item
);
520 if (WARN_ON(item
>= __smem
->item_count
))
523 ret
= hwspin_lock_timeout_irqsave(__smem
->hwlock
,
529 if (host
< SMEM_HOST_COUNT
&& __smem
->partitions
[host
].virt_base
) {
530 part
= &__smem
->partitions
[host
];
531 ret
= qcom_smem_alloc_private(__smem
, part
, item
, size
);
532 } else if (__smem
->global_partition
.virt_base
) {
533 part
= &__smem
->global_partition
;
534 ret
= qcom_smem_alloc_private(__smem
, part
, item
, size
);
536 ret
= qcom_smem_alloc_global(__smem
, item
, size
);
539 hwspin_unlock_irqrestore(__smem
->hwlock
, &flags
);
543 EXPORT_SYMBOL_GPL(qcom_smem_alloc
);
545 static void *qcom_smem_get_global(struct qcom_smem
*smem
,
549 struct smem_header
*header
;
550 struct smem_region
*region
;
551 struct smem_global_entry
*entry
;
557 header
= smem
->regions
[0].virt_base
;
558 entry
= &header
->toc
[item
];
559 if (!entry
->allocated
)
560 return ERR_PTR(-ENXIO
);
562 aux_base
= le32_to_cpu(entry
->aux_base
) & AUX_BASE_MASK
;
564 for (i
= 0; i
< smem
->num_regions
; i
++) {
565 region
= &smem
->regions
[i
];
567 if ((u32
)region
->aux_base
== aux_base
|| !aux_base
) {
568 e_size
= le32_to_cpu(entry
->size
);
569 entry_offset
= le32_to_cpu(entry
->offset
);
571 if (WARN_ON(e_size
+ entry_offset
> region
->size
))
572 return ERR_PTR(-EINVAL
);
577 return region
->virt_base
+ entry_offset
;
581 return ERR_PTR(-ENOENT
);
584 static void *qcom_smem_get_private(struct qcom_smem
*smem
,
585 struct smem_partition
*part
,
589 struct smem_private_entry
*e
, *end
;
590 struct smem_partition_header
*phdr
;
591 void *item_ptr
, *p_end
;
595 phdr
= (struct smem_partition_header __force
*)part
->virt_base
;
596 p_end
= (void *)phdr
+ part
->size
;
598 e
= phdr_to_first_uncached_entry(phdr
);
599 end
= phdr_to_last_uncached_entry(phdr
);
602 if (e
->canary
!= SMEM_PRIVATE_CANARY
)
605 if (le16_to_cpu(e
->item
) == item
) {
607 e_size
= le32_to_cpu(e
->size
);
608 padding_data
= le16_to_cpu(e
->padding_data
);
610 if (WARN_ON(e_size
> part
->size
|| padding_data
> e_size
))
611 return ERR_PTR(-EINVAL
);
613 *size
= e_size
- padding_data
;
616 item_ptr
= uncached_entry_to_item(e
);
617 if (WARN_ON(item_ptr
> p_end
))
618 return ERR_PTR(-EINVAL
);
623 e
= uncached_entry_next(e
);
626 if (WARN_ON((void *)e
> p_end
))
627 return ERR_PTR(-EINVAL
);
629 /* Item was not found in the uncached list, search the cached list */
631 e
= phdr_to_first_cached_entry(phdr
, part
->cacheline
);
632 end
= phdr_to_last_cached_entry(phdr
);
634 if (WARN_ON((void *)e
< (void *)phdr
|| (void *)end
> p_end
))
635 return ERR_PTR(-EINVAL
);
638 if (e
->canary
!= SMEM_PRIVATE_CANARY
)
641 if (le16_to_cpu(e
->item
) == item
) {
643 e_size
= le32_to_cpu(e
->size
);
644 padding_data
= le16_to_cpu(e
->padding_data
);
646 if (WARN_ON(e_size
> part
->size
|| padding_data
> e_size
))
647 return ERR_PTR(-EINVAL
);
649 *size
= e_size
- padding_data
;
652 item_ptr
= cached_entry_to_item(e
);
653 if (WARN_ON(item_ptr
< (void *)phdr
))
654 return ERR_PTR(-EINVAL
);
659 e
= cached_entry_next(e
, part
->cacheline
);
662 if (WARN_ON((void *)e
< (void *)phdr
))
663 return ERR_PTR(-EINVAL
);
665 return ERR_PTR(-ENOENT
);
668 dev_err(smem
->dev
, "Found invalid canary in hosts %hu:%hu partition\n",
669 le16_to_cpu(phdr
->host0
), le16_to_cpu(phdr
->host1
));
671 return ERR_PTR(-EINVAL
);
675 * qcom_smem_get() - resolve ptr of size of a smem item
676 * @host: the remote processor, or -1
677 * @item: smem item handle
678 * @size: pointer to be filled out with size of the item
680 * Looks up smem item and returns pointer to it. Size of smem
681 * item is returned in @size.
683 * Return: a pointer to an SMEM item on success, ERR_PTR() on failure.
685 void *qcom_smem_get(unsigned host
, unsigned item
, size_t *size
)
687 struct smem_partition
*part
;
688 void *ptr
= ERR_PTR(-EPROBE_DEFER
);
693 if (WARN_ON(item
>= __smem
->item_count
))
694 return ERR_PTR(-EINVAL
);
696 if (host
< SMEM_HOST_COUNT
&& __smem
->partitions
[host
].virt_base
) {
697 part
= &__smem
->partitions
[host
];
698 ptr
= qcom_smem_get_private(__smem
, part
, item
, size
);
699 } else if (__smem
->global_partition
.virt_base
) {
700 part
= &__smem
->global_partition
;
701 ptr
= qcom_smem_get_private(__smem
, part
, item
, size
);
703 ptr
= qcom_smem_get_global(__smem
, item
, size
);
708 EXPORT_SYMBOL_GPL(qcom_smem_get
);
711 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
712 * @host: the remote processor identifying a partition, or -1
714 * To be used by smem clients as a quick way to determine if any new
715 * allocations has been made.
717 * Return: number of available bytes on success, negative errno on failure.
719 int qcom_smem_get_free_space(unsigned host
)
721 struct smem_partition
*part
;
722 struct smem_partition_header
*phdr
;
723 struct smem_header
*header
;
727 return -EPROBE_DEFER
;
729 if (host
< SMEM_HOST_COUNT
&& __smem
->partitions
[host
].virt_base
) {
730 part
= &__smem
->partitions
[host
];
731 phdr
= part
->virt_base
;
732 ret
= le32_to_cpu(phdr
->offset_free_cached
) -
733 le32_to_cpu(phdr
->offset_free_uncached
);
735 if (ret
> le32_to_cpu(part
->size
))
737 } else if (__smem
->global_partition
.virt_base
) {
738 part
= &__smem
->global_partition
;
739 phdr
= part
->virt_base
;
740 ret
= le32_to_cpu(phdr
->offset_free_cached
) -
741 le32_to_cpu(phdr
->offset_free_uncached
);
743 if (ret
> le32_to_cpu(part
->size
))
746 header
= __smem
->regions
[0].virt_base
;
747 ret
= le32_to_cpu(header
->available
);
749 if (ret
> __smem
->regions
[0].size
)
755 EXPORT_SYMBOL_GPL(qcom_smem_get_free_space
);
757 static bool addr_in_range(void __iomem
*base
, size_t size
, void *addr
)
759 return base
&& ((void __iomem
*)addr
>= base
&& (void __iomem
*)addr
< base
+ size
);
763 * qcom_smem_virt_to_phys() - return the physical address associated
764 * with an smem item pointer (previously returned by qcom_smem_get()
765 * @p: the virtual address to convert
767 * Return: physical address of the SMEM item (if found), 0 otherwise
769 phys_addr_t
qcom_smem_virt_to_phys(void *p
)
771 struct smem_partition
*part
;
772 struct smem_region
*area
;
776 for (i
= 0; i
< SMEM_HOST_COUNT
; i
++) {
777 part
= &__smem
->partitions
[i
];
779 if (addr_in_range(part
->virt_base
, part
->size
, p
)) {
780 offset
= p
- part
->virt_base
;
782 return (phys_addr_t
)part
->phys_base
+ offset
;
786 part
= &__smem
->global_partition
;
788 if (addr_in_range(part
->virt_base
, part
->size
, p
)) {
789 offset
= p
- part
->virt_base
;
791 return (phys_addr_t
)part
->phys_base
+ offset
;
794 for (i
= 0; i
< __smem
->num_regions
; i
++) {
795 area
= &__smem
->regions
[i
];
797 if (addr_in_range(area
->virt_base
, area
->size
, p
)) {
798 offset
= p
- area
->virt_base
;
800 return (phys_addr_t
)area
->aux_base
+ offset
;
806 EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys
);
809 * qcom_smem_get_soc_id() - return the SoC ID
810 * @id: On success, we return the SoC ID here.
812 * Look up SoC ID from HW/SW build ID and return it.
814 * Return: 0 on success, negative errno on failure.
816 int qcom_smem_get_soc_id(u32
*id
)
818 struct socinfo
*info
;
820 info
= qcom_smem_get(QCOM_SMEM_HOST_ANY
, SMEM_HW_SW_BUILD_ID
, NULL
);
822 return PTR_ERR(info
);
824 *id
= __le32_to_cpu(info
->id
);
828 EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id
);
831 * qcom_smem_get_feature_code() - return the feature code
832 * @code: On success, return the feature code here.
834 * Look up the feature code identifier from SMEM and return it.
836 * Return: 0 on success, negative errno on failure.
838 int qcom_smem_get_feature_code(u32
*code
)
840 struct socinfo
*info
;
843 info
= qcom_smem_get(QCOM_SMEM_HOST_ANY
, SMEM_HW_SW_BUILD_ID
, NULL
);
845 return PTR_ERR(info
);
847 /* This only makes sense for socinfo >= 16 */
848 if (__le32_to_cpu(info
->fmt
) < SOCINFO_VERSION(0, 16))
851 raw_code
= __le32_to_cpu(info
->feature_code
);
853 /* Ensure the value makes sense */
854 if (raw_code
> SOCINFO_FC_INT_MAX
)
855 raw_code
= SOCINFO_FC_UNKNOWN
;
861 EXPORT_SYMBOL_GPL(qcom_smem_get_feature_code
);
863 static int qcom_smem_get_sbl_version(struct qcom_smem
*smem
)
865 struct smem_header
*header
;
868 header
= smem
->regions
[0].virt_base
;
869 versions
= header
->version
;
871 return le32_to_cpu(versions
[SMEM_MASTER_SBL_VERSION_INDEX
]);
874 static struct smem_ptable
*qcom_smem_get_ptable(struct qcom_smem
*smem
)
876 struct smem_ptable
*ptable
;
879 ptable
= smem
->ptable
;
880 if (memcmp(ptable
->magic
, SMEM_PTABLE_MAGIC
, sizeof(ptable
->magic
)))
881 return ERR_PTR(-ENOENT
);
883 version
= le32_to_cpu(ptable
->version
);
886 "Unsupported partition header version %d\n", version
);
887 return ERR_PTR(-EINVAL
);
892 static u32
qcom_smem_get_item_count(struct qcom_smem
*smem
)
894 struct smem_ptable
*ptable
;
895 struct smem_info
*info
;
897 ptable
= qcom_smem_get_ptable(smem
);
898 if (IS_ERR_OR_NULL(ptable
))
899 return SMEM_ITEM_COUNT
;
901 info
= (struct smem_info
*)&ptable
->entry
[ptable
->num_entries
];
902 if (memcmp(info
->magic
, SMEM_INFO_MAGIC
, sizeof(info
->magic
)))
903 return SMEM_ITEM_COUNT
;
905 return le16_to_cpu(info
->num_items
);
909 * Validate the partition header for a partition whose partition
910 * table entry is supplied. Returns a pointer to its header if
911 * valid, or a null pointer otherwise.
913 static struct smem_partition_header
*
914 qcom_smem_partition_header(struct qcom_smem
*smem
,
915 struct smem_ptable_entry
*entry
, u16 host0
, u16 host1
)
917 struct smem_partition_header
*header
;
921 phys_addr
= smem
->regions
[0].aux_base
+ le32_to_cpu(entry
->offset
);
922 header
= devm_ioremap_wc(smem
->dev
, phys_addr
, le32_to_cpu(entry
->size
));
927 if (memcmp(header
->magic
, SMEM_PART_MAGIC
, sizeof(header
->magic
))) {
928 dev_err(smem
->dev
, "bad partition magic %4ph\n", header
->magic
);
932 if (host0
!= le16_to_cpu(header
->host0
)) {
933 dev_err(smem
->dev
, "bad host0 (%hu != %hu)\n",
934 host0
, le16_to_cpu(header
->host0
));
937 if (host1
!= le16_to_cpu(header
->host1
)) {
938 dev_err(smem
->dev
, "bad host1 (%hu != %hu)\n",
939 host1
, le16_to_cpu(header
->host1
));
943 size
= le32_to_cpu(header
->size
);
944 if (size
!= le32_to_cpu(entry
->size
)) {
945 dev_err(smem
->dev
, "bad partition size (%u != %u)\n",
946 size
, le32_to_cpu(entry
->size
));
950 if (le32_to_cpu(header
->offset_free_uncached
) > size
) {
951 dev_err(smem
->dev
, "bad partition free uncached (%u > %u)\n",
952 le32_to_cpu(header
->offset_free_uncached
), size
);
959 static int qcom_smem_set_global_partition(struct qcom_smem
*smem
)
961 struct smem_partition_header
*header
;
962 struct smem_ptable_entry
*entry
;
963 struct smem_ptable
*ptable
;
967 if (smem
->global_partition
.virt_base
) {
968 dev_err(smem
->dev
, "Already found the global partition\n");
972 ptable
= qcom_smem_get_ptable(smem
);
974 return PTR_ERR(ptable
);
976 for (i
= 0; i
< le32_to_cpu(ptable
->num_entries
); i
++) {
977 entry
= &ptable
->entry
[i
];
978 if (!le32_to_cpu(entry
->offset
))
980 if (!le32_to_cpu(entry
->size
))
983 if (le16_to_cpu(entry
->host0
) != SMEM_GLOBAL_HOST
)
986 if (le16_to_cpu(entry
->host1
) == SMEM_GLOBAL_HOST
) {
993 dev_err(smem
->dev
, "Missing entry for global partition\n");
997 header
= qcom_smem_partition_header(smem
, entry
,
998 SMEM_GLOBAL_HOST
, SMEM_GLOBAL_HOST
);
1002 smem
->global_partition
.virt_base
= (void __iomem
*)header
;
1003 smem
->global_partition
.phys_base
= smem
->regions
[0].aux_base
+
1004 le32_to_cpu(entry
->offset
);
1005 smem
->global_partition
.size
= le32_to_cpu(entry
->size
);
1006 smem
->global_partition
.cacheline
= le32_to_cpu(entry
->cacheline
);
1012 qcom_smem_enumerate_partitions(struct qcom_smem
*smem
, u16 local_host
)
1014 struct smem_partition_header
*header
;
1015 struct smem_ptable_entry
*entry
;
1016 struct smem_ptable
*ptable
;
1021 ptable
= qcom_smem_get_ptable(smem
);
1023 return PTR_ERR(ptable
);
1025 for (i
= 0; i
< le32_to_cpu(ptable
->num_entries
); i
++) {
1026 entry
= &ptable
->entry
[i
];
1027 if (!le32_to_cpu(entry
->offset
))
1029 if (!le32_to_cpu(entry
->size
))
1032 host0
= le16_to_cpu(entry
->host0
);
1033 host1
= le16_to_cpu(entry
->host1
);
1034 if (host0
== local_host
)
1035 remote_host
= host1
;
1036 else if (host1
== local_host
)
1037 remote_host
= host0
;
1041 if (remote_host
>= SMEM_HOST_COUNT
) {
1042 dev_err(smem
->dev
, "bad host %u\n", remote_host
);
1046 if (smem
->partitions
[remote_host
].virt_base
) {
1047 dev_err(smem
->dev
, "duplicate host %u\n", remote_host
);
1051 header
= qcom_smem_partition_header(smem
, entry
, host0
, host1
);
1055 smem
->partitions
[remote_host
].virt_base
= (void __iomem
*)header
;
1056 smem
->partitions
[remote_host
].phys_base
= smem
->regions
[0].aux_base
+
1057 le32_to_cpu(entry
->offset
);
1058 smem
->partitions
[remote_host
].size
= le32_to_cpu(entry
->size
);
1059 smem
->partitions
[remote_host
].cacheline
= le32_to_cpu(entry
->cacheline
);
1065 static int qcom_smem_map_toc(struct qcom_smem
*smem
, struct smem_region
*region
)
1069 /* map starting 4K for smem header */
1070 region
->virt_base
= devm_ioremap_wc(smem
->dev
, region
->aux_base
, SZ_4K
);
1071 ptable_start
= region
->aux_base
+ region
->size
- SZ_4K
;
1072 /* map last 4k for toc */
1073 smem
->ptable
= devm_ioremap_wc(smem
->dev
, ptable_start
, SZ_4K
);
1075 if (!region
->virt_base
|| !smem
->ptable
)
1081 static int qcom_smem_map_global(struct qcom_smem
*smem
, u32 size
)
1085 phys_addr
= smem
->regions
[0].aux_base
;
1087 smem
->regions
[0].size
= size
;
1088 smem
->regions
[0].virt_base
= devm_ioremap_wc(smem
->dev
, phys_addr
, size
);
1090 if (!smem
->regions
[0].virt_base
)
1096 static int qcom_smem_resolve_mem(struct qcom_smem
*smem
, const char *name
,
1097 struct smem_region
*region
)
1099 struct device
*dev
= smem
->dev
;
1100 struct device_node
*np
;
1104 np
= of_parse_phandle(dev
->of_node
, name
, 0);
1106 dev_err(dev
, "No %s specified\n", name
);
1110 ret
= of_address_to_resource(np
, 0, &r
);
1115 region
->aux_base
= r
.start
;
1116 region
->size
= resource_size(&r
);
1121 static int qcom_smem_probe(struct platform_device
*pdev
)
1123 struct smem_header
*header
;
1124 struct reserved_mem
*rmem
;
1125 struct qcom_smem
*smem
;
1126 unsigned long flags
;
1135 if (of_property_present(pdev
->dev
.of_node
, "qcom,rpm-msg-ram"))
1138 smem
= devm_kzalloc(&pdev
->dev
, struct_size(smem
, regions
, num_regions
),
1143 smem
->dev
= &pdev
->dev
;
1144 smem
->num_regions
= num_regions
;
1146 rmem
= of_reserved_mem_lookup(pdev
->dev
.of_node
);
1148 smem
->regions
[0].aux_base
= rmem
->base
;
1149 smem
->regions
[0].size
= rmem
->size
;
1152 * Fall back to the memory-region reference, if we're not a
1153 * reserved-memory node.
1155 ret
= qcom_smem_resolve_mem(smem
, "memory-region", &smem
->regions
[0]);
1160 if (num_regions
> 1) {
1161 ret
= qcom_smem_resolve_mem(smem
, "qcom,rpm-msg-ram", &smem
->regions
[1]);
1167 ret
= qcom_smem_map_toc(smem
, &smem
->regions
[0]);
1171 for (i
= 1; i
< num_regions
; i
++) {
1172 smem
->regions
[i
].virt_base
= devm_ioremap_wc(&pdev
->dev
,
1173 smem
->regions
[i
].aux_base
,
1174 smem
->regions
[i
].size
);
1175 if (!smem
->regions
[i
].virt_base
) {
1176 dev_err(&pdev
->dev
, "failed to remap %pa\n", &smem
->regions
[i
].aux_base
);
1181 header
= smem
->regions
[0].virt_base
;
1182 if (le32_to_cpu(header
->initialized
) != 1 ||
1183 le32_to_cpu(header
->reserved
)) {
1184 dev_err(&pdev
->dev
, "SMEM is not initialized by SBL\n");
1188 hwlock_id
= of_hwspin_lock_get_id(pdev
->dev
.of_node
, 0);
1190 return dev_err_probe(&pdev
->dev
, hwlock_id
,
1191 "failed to retrieve hwlock\n");
1193 smem
->hwlock
= hwspin_lock_request_specific(hwlock_id
);
1197 ret
= hwspin_lock_timeout_irqsave(smem
->hwlock
, HWSPINLOCK_TIMEOUT
, &flags
);
1200 size
= readl_relaxed(&header
->available
) + readl_relaxed(&header
->free_offset
);
1201 hwspin_unlock_irqrestore(smem
->hwlock
, &flags
);
1203 version
= qcom_smem_get_sbl_version(smem
);
1205 * smem header mapping is required only in heap version scheme, so unmap
1206 * it here. It will be remapped in qcom_smem_map_global() when whole
1207 * partition is mapped again.
1209 devm_iounmap(smem
->dev
, smem
->regions
[0].virt_base
);
1210 switch (version
>> 16) {
1211 case SMEM_GLOBAL_PART_VERSION
:
1212 ret
= qcom_smem_set_global_partition(smem
);
1215 smem
->item_count
= qcom_smem_get_item_count(smem
);
1217 case SMEM_GLOBAL_HEAP_VERSION
:
1218 qcom_smem_map_global(smem
, size
);
1219 smem
->item_count
= SMEM_ITEM_COUNT
;
1222 dev_err(&pdev
->dev
, "Unsupported SMEM version 0x%x\n", version
);
1226 BUILD_BUG_ON(SMEM_HOST_APPS
>= SMEM_HOST_COUNT
);
1227 ret
= qcom_smem_enumerate_partitions(smem
, SMEM_HOST_APPS
);
1228 if (ret
< 0 && ret
!= -ENOENT
)
1233 smem
->socinfo
= platform_device_register_data(&pdev
->dev
, "qcom-socinfo",
1234 PLATFORM_DEVID_NONE
, NULL
,
1236 if (IS_ERR(smem
->socinfo
))
1237 dev_dbg(&pdev
->dev
, "failed to register socinfo device\n");
1242 static void qcom_smem_remove(struct platform_device
*pdev
)
1244 platform_device_unregister(__smem
->socinfo
);
1246 hwspin_lock_free(__smem
->hwlock
);
1250 static const struct of_device_id qcom_smem_of_match
[] = {
1251 { .compatible
= "qcom,smem" },
1254 MODULE_DEVICE_TABLE(of
, qcom_smem_of_match
);
1256 static struct platform_driver qcom_smem_driver
= {
1257 .probe
= qcom_smem_probe
,
1258 .remove
= qcom_smem_remove
,
1260 .name
= "qcom-smem",
1261 .of_match_table
= qcom_smem_of_match
,
1262 .suppress_bind_attrs
= true,
1266 static int __init
qcom_smem_init(void)
1268 return platform_driver_register(&qcom_smem_driver
);
1270 arch_initcall(qcom_smem_init
);
1272 static void __exit
qcom_smem_exit(void)
1274 platform_driver_unregister(&qcom_smem_driver
);
1276 module_exit(qcom_smem_exit
)
1278 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1279 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
1280 MODULE_LICENSE("GPL v2");