1 // SPDX-License-Identifier: GPL-2.0-only
3 * The On Chip Memory (OCMEM) allocator allows various clients to allocate
4 * memory from OCMEM based on performance, latency and power requirements.
5 * This is typically used by the GPU, camera/video, and audio components on
6 * some Snapdragon SoCs.
8 * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
9 * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com>
12 #include <linux/bitfield.h>
13 #include <linux/cleanup.h>
14 #include <linux/clk.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/firmware/qcom/qcom_scm.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <soc/qcom/ocmem.h>
30 MODE_DEFAULT
= WIDE_MODE
,
33 enum ocmem_macro_state
{
42 enum region_mode mode
;
43 unsigned int num_macros
;
44 enum ocmem_macro_state macro_state
[4];
45 unsigned long macro_size
;
46 unsigned long region_size
;
51 unsigned long macro_size
;
56 const struct ocmem_config
*config
;
57 struct resource
*memory
;
60 struct clk
*iface_clk
;
61 unsigned int num_ports
;
62 unsigned int num_macros
;
64 struct ocmem_region
*regions
;
65 unsigned long active_allocations
;
68 #define OCMEM_MIN_ALIGN SZ_64K
69 #define OCMEM_MIN_ALLOC SZ_64K
71 #define OCMEM_REG_HW_VERSION 0x00000000
72 #define OCMEM_REG_HW_PROFILE 0x00000004
74 #define OCMEM_REG_REGION_MODE_CTL 0x00001000
75 #define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001
76 #define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002
77 #define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004
78 #define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008
80 #define OCMEM_REG_GFX_MPU_START 0x00001004
81 #define OCMEM_REG_GFX_MPU_END 0x00001008
83 #define OCMEM_HW_VERSION_MAJOR(val) FIELD_GET(GENMASK(31, 28), val)
84 #define OCMEM_HW_VERSION_MINOR(val) FIELD_GET(GENMASK(27, 16), val)
85 #define OCMEM_HW_VERSION_STEP(val) FIELD_GET(GENMASK(15, 0), val)
87 #define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_GET(0x0000000f, (val))
88 #define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_GET(0x00003f00, (val))
90 #define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000
91 #define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000
92 #define OCMEM_REG_GEN_STATUS 0x0000000c
94 #define OCMEM_REG_PSGSC_STATUS 0x00000038
95 #define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0))
97 #define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val))
98 #define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val))
99 #define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val))
100 #define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val))
102 static inline void ocmem_write(struct ocmem
*ocmem
, u32 reg
, u32 data
)
104 writel(data
, ocmem
->mmio
+ reg
);
107 static inline u32
ocmem_read(struct ocmem
*ocmem
, u32 reg
)
109 return readl(ocmem
->mmio
+ reg
);
112 static void update_ocmem(struct ocmem
*ocmem
)
114 uint32_t region_mode_ctrl
= 0x0;
117 if (!qcom_scm_ocmem_lock_available()) {
118 for (i
= 0; i
< ocmem
->config
->num_regions
; i
++) {
119 struct ocmem_region
*region
= &ocmem
->regions
[i
];
121 if (region
->mode
== THIN_MODE
)
122 region_mode_ctrl
|= BIT(i
);
125 dev_dbg(ocmem
->dev
, "ocmem_region_mode_control %x\n",
127 ocmem_write(ocmem
, OCMEM_REG_REGION_MODE_CTL
, region_mode_ctrl
);
130 for (i
= 0; i
< ocmem
->config
->num_regions
; i
++) {
131 struct ocmem_region
*region
= &ocmem
->regions
[i
];
134 data
= OCMEM_PSGSC_CTL_MACRO0_MODE(region
->macro_state
[0]) |
135 OCMEM_PSGSC_CTL_MACRO1_MODE(region
->macro_state
[1]) |
136 OCMEM_PSGSC_CTL_MACRO2_MODE(region
->macro_state
[2]) |
137 OCMEM_PSGSC_CTL_MACRO3_MODE(region
->macro_state
[3]);
139 ocmem_write(ocmem
, OCMEM_REG_PSGSC_CTL(i
), data
);
143 static unsigned long phys_to_offset(struct ocmem
*ocmem
,
146 if (addr
< ocmem
->memory
->start
|| addr
>= ocmem
->memory
->end
)
149 return addr
- ocmem
->memory
->start
;
152 static unsigned long device_address(struct ocmem
*ocmem
,
153 enum ocmem_client client
,
156 WARN_ON(client
!= OCMEM_GRAPHICS
);
158 /* TODO: gpu uses phys_to_offset, but others do not.. */
159 return phys_to_offset(ocmem
, addr
);
162 static void update_range(struct ocmem
*ocmem
, struct ocmem_buf
*buf
,
163 enum ocmem_macro_state mstate
, enum region_mode rmode
)
165 unsigned long offset
= 0;
168 for (i
= 0; i
< ocmem
->config
->num_regions
; i
++) {
169 struct ocmem_region
*region
= &ocmem
->regions
[i
];
171 if (buf
->offset
<= offset
&& offset
< buf
->offset
+ buf
->len
)
172 region
->mode
= rmode
;
174 for (j
= 0; j
< region
->num_macros
; j
++) {
175 if (buf
->offset
<= offset
&&
176 offset
< buf
->offset
+ buf
->len
)
177 region
->macro_state
[j
] = mstate
;
179 offset
+= region
->macro_size
;
186 struct ocmem
*of_get_ocmem(struct device
*dev
)
188 struct platform_device
*pdev
;
191 struct device_node
*devnode
__free(device_node
) = of_parse_phandle(dev
->of_node
,
193 if (!devnode
|| !devnode
->parent
) {
194 dev_err(dev
, "Cannot look up sram phandle\n");
195 return ERR_PTR(-ENODEV
);
198 pdev
= of_find_device_by_node(devnode
->parent
);
200 dev_err(dev
, "Cannot find device node %s\n", devnode
->name
);
201 return ERR_PTR(-EPROBE_DEFER
);
204 ocmem
= platform_get_drvdata(pdev
);
206 dev_err(dev
, "Cannot get ocmem\n");
207 put_device(&pdev
->dev
);
208 return ERR_PTR(-ENODEV
);
212 EXPORT_SYMBOL_GPL(of_get_ocmem
);
214 struct ocmem_buf
*ocmem_allocate(struct ocmem
*ocmem
, enum ocmem_client client
,
219 /* TODO: add support for other clients... */
220 if (WARN_ON(client
!= OCMEM_GRAPHICS
))
221 return ERR_PTR(-ENODEV
);
223 if (size
< OCMEM_MIN_ALLOC
|| !IS_ALIGNED(size
, OCMEM_MIN_ALIGN
))
224 return ERR_PTR(-EINVAL
);
226 if (test_and_set_bit_lock(BIT(client
), &ocmem
->active_allocations
))
227 return ERR_PTR(-EBUSY
);
229 struct ocmem_buf
*buf
__free(kfree
) = kzalloc(sizeof(*buf
), GFP_KERNEL
);
236 buf
->addr
= device_address(ocmem
, client
, buf
->offset
);
239 update_range(ocmem
, buf
, CORE_ON
, WIDE_MODE
);
241 if (qcom_scm_ocmem_lock_available()) {
242 ret
= qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID
,
243 buf
->offset
, buf
->len
, WIDE_MODE
);
245 dev_err(ocmem
->dev
, "could not lock: %d\n", ret
);
250 ocmem_write(ocmem
, OCMEM_REG_GFX_MPU_START
, buf
->offset
);
251 ocmem_write(ocmem
, OCMEM_REG_GFX_MPU_END
,
252 buf
->offset
+ buf
->len
);
255 dev_dbg(ocmem
->dev
, "using %ldK of OCMEM at 0x%08lx for client %d\n",
256 size
/ 1024, buf
->addr
, client
);
261 clear_bit_unlock(BIT(client
), &ocmem
->active_allocations
);
265 EXPORT_SYMBOL_GPL(ocmem_allocate
);
267 void ocmem_free(struct ocmem
*ocmem
, enum ocmem_client client
,
268 struct ocmem_buf
*buf
)
270 /* TODO: add support for other clients... */
271 if (WARN_ON(client
!= OCMEM_GRAPHICS
))
274 update_range(ocmem
, buf
, CLK_OFF
, MODE_DEFAULT
);
276 if (qcom_scm_ocmem_lock_available()) {
279 ret
= qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID
,
280 buf
->offset
, buf
->len
);
282 dev_err(ocmem
->dev
, "could not unlock: %d\n", ret
);
284 ocmem_write(ocmem
, OCMEM_REG_GFX_MPU_START
, 0x0);
285 ocmem_write(ocmem
, OCMEM_REG_GFX_MPU_END
, 0x0);
290 clear_bit_unlock(BIT(client
), &ocmem
->active_allocations
);
292 EXPORT_SYMBOL_GPL(ocmem_free
);
294 static int ocmem_dev_probe(struct platform_device
*pdev
)
296 struct device
*dev
= &pdev
->dev
;
297 unsigned long reg
, region_size
;
298 int i
, j
, ret
, num_banks
;
301 if (!qcom_scm_is_available())
302 return -EPROBE_DEFER
;
304 ocmem
= devm_kzalloc(dev
, sizeof(*ocmem
), GFP_KERNEL
);
309 ocmem
->config
= device_get_match_data(dev
);
311 ocmem
->core_clk
= devm_clk_get(dev
, "core");
312 if (IS_ERR(ocmem
->core_clk
))
313 return dev_err_probe(dev
, PTR_ERR(ocmem
->core_clk
),
314 "Unable to get core clock\n");
316 ocmem
->iface_clk
= devm_clk_get_optional(dev
, "iface");
317 if (IS_ERR(ocmem
->iface_clk
))
318 return dev_err_probe(dev
, PTR_ERR(ocmem
->iface_clk
),
319 "Unable to get iface clock\n");
321 ocmem
->mmio
= devm_platform_ioremap_resource_byname(pdev
, "ctrl");
322 if (IS_ERR(ocmem
->mmio
))
323 return dev_err_probe(&pdev
->dev
, PTR_ERR(ocmem
->mmio
),
324 "Failed to ioremap ocmem_ctrl resource\n");
326 ocmem
->memory
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
328 if (!ocmem
->memory
) {
329 dev_err(dev
, "Could not get mem region\n");
333 /* The core clock is synchronous with graphics */
334 WARN_ON(clk_set_rate(ocmem
->core_clk
, 1000) < 0);
336 ret
= clk_prepare_enable(ocmem
->core_clk
);
338 return dev_err_probe(ocmem
->dev
, ret
, "Failed to enable core clock\n");
340 ret
= clk_prepare_enable(ocmem
->iface_clk
);
342 clk_disable_unprepare(ocmem
->core_clk
);
343 return dev_err_probe(ocmem
->dev
, ret
, "Failed to enable iface clock\n");
346 if (qcom_scm_restore_sec_cfg_available()) {
347 dev_dbg(dev
, "configuring scm\n");
348 ret
= qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID
, 0);
350 dev_err_probe(dev
, ret
, "Could not enable secure configuration\n");
351 goto err_clk_disable
;
355 reg
= ocmem_read(ocmem
, OCMEM_REG_HW_VERSION
);
356 dev_dbg(dev
, "OCMEM hardware version: %lu.%lu.%lu\n",
357 OCMEM_HW_VERSION_MAJOR(reg
),
358 OCMEM_HW_VERSION_MINOR(reg
),
359 OCMEM_HW_VERSION_STEP(reg
));
361 reg
= ocmem_read(ocmem
, OCMEM_REG_HW_PROFILE
);
362 ocmem
->num_ports
= OCMEM_HW_PROFILE_NUM_PORTS(reg
);
363 ocmem
->num_macros
= OCMEM_HW_PROFILE_NUM_MACROS(reg
);
364 ocmem
->interleaved
= !!(reg
& OCMEM_HW_PROFILE_INTERLEAVING
);
366 num_banks
= ocmem
->num_ports
/ 2;
367 region_size
= ocmem
->config
->macro_size
* num_banks
;
369 dev_info(dev
, "%u ports, %u regions, %u macros, %sinterleaved\n",
370 ocmem
->num_ports
, ocmem
->config
->num_regions
,
371 ocmem
->num_macros
, ocmem
->interleaved
? "" : "not ");
373 ocmem
->regions
= devm_kcalloc(dev
, ocmem
->config
->num_regions
,
374 sizeof(struct ocmem_region
), GFP_KERNEL
);
375 if (!ocmem
->regions
) {
377 goto err_clk_disable
;
380 for (i
= 0; i
< ocmem
->config
->num_regions
; i
++) {
381 struct ocmem_region
*region
= &ocmem
->regions
[i
];
383 if (WARN_ON(num_banks
> ARRAY_SIZE(region
->macro_state
))) {
385 goto err_clk_disable
;
388 region
->mode
= MODE_DEFAULT
;
389 region
->num_macros
= num_banks
;
391 if (i
== (ocmem
->config
->num_regions
- 1) &&
392 reg
& OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE
) {
393 region
->macro_size
= ocmem
->config
->macro_size
/ 2;
394 region
->region_size
= region_size
/ 2;
396 region
->macro_size
= ocmem
->config
->macro_size
;
397 region
->region_size
= region_size
;
400 for (j
= 0; j
< ARRAY_SIZE(region
->macro_state
); j
++)
401 region
->macro_state
[j
] = CLK_OFF
;
404 platform_set_drvdata(pdev
, ocmem
);
409 clk_disable_unprepare(ocmem
->core_clk
);
410 clk_disable_unprepare(ocmem
->iface_clk
);
414 static void ocmem_dev_remove(struct platform_device
*pdev
)
416 struct ocmem
*ocmem
= platform_get_drvdata(pdev
);
418 clk_disable_unprepare(ocmem
->core_clk
);
419 clk_disable_unprepare(ocmem
->iface_clk
);
422 static const struct ocmem_config ocmem_8226_config
= {
424 .macro_size
= SZ_128K
,
427 static const struct ocmem_config ocmem_8974_config
= {
429 .macro_size
= SZ_128K
,
432 static const struct of_device_id ocmem_of_match
[] = {
433 { .compatible
= "qcom,msm8226-ocmem", .data
= &ocmem_8226_config
},
434 { .compatible
= "qcom,msm8974-ocmem", .data
= &ocmem_8974_config
},
438 MODULE_DEVICE_TABLE(of
, ocmem_of_match
);
440 static struct platform_driver ocmem_driver
= {
441 .probe
= ocmem_dev_probe
,
442 .remove
= ocmem_dev_remove
,
445 .of_match_table
= ocmem_of_match
,
449 module_platform_driver(ocmem_driver
);
451 MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs");
452 MODULE_LICENSE("GPL v2");