x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / media / platform / sti / hva / hva-hw.c
blobec25bdcfa3d1e11c776c2fe96d7671264c27234e
1 /*
2 * Copyright (C) STMicroelectronics SA 2015
3 * Authors: Yannick Fertre <yannick.fertre@st.com>
4 * Hugues Fruchet <hugues.fruchet@st.com>
5 * License terms: GNU General Public License (GPL), version 2
6 */
8 #include <linux/clk.h>
9 #include <linux/interrupt.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
13 #include <linux/seq_file.h>
14 #endif
16 #include "hva.h"
17 #include "hva-hw.h"
19 /* HVA register offsets */
20 #define HVA_HIF_REG_RST 0x0100U
21 #define HVA_HIF_REG_RST_ACK 0x0104U
22 #define HVA_HIF_REG_MIF_CFG 0x0108U
23 #define HVA_HIF_REG_HEC_MIF_CFG 0x010CU
24 #define HVA_HIF_REG_CFL 0x0110U
25 #define HVA_HIF_FIFO_CMD 0x0114U
26 #define HVA_HIF_FIFO_STS 0x0118U
27 #define HVA_HIF_REG_SFL 0x011CU
28 #define HVA_HIF_REG_IT_ACK 0x0120U
29 #define HVA_HIF_REG_ERR_IT_ACK 0x0124U
30 #define HVA_HIF_REG_LMI_ERR 0x0128U
31 #define HVA_HIF_REG_EMI_ERR 0x012CU
32 #define HVA_HIF_REG_HEC_MIF_ERR 0x0130U
33 #define HVA_HIF_REG_HEC_STS 0x0134U
34 #define HVA_HIF_REG_HVC_STS 0x0138U
35 #define HVA_HIF_REG_HJE_STS 0x013CU
36 #define HVA_HIF_REG_CNT 0x0140U
37 #define HVA_HIF_REG_HEC_CHKSYN_DIS 0x0144U
38 #define HVA_HIF_REG_CLK_GATING 0x0148U
39 #define HVA_HIF_REG_VERSION 0x014CU
40 #define HVA_HIF_REG_BSM 0x0150U
42 /* define value for version id register (HVA_HIF_REG_VERSION) */
43 #define VERSION_ID_MASK 0x0000FFFF
45 /* define values for BSM register (HVA_HIF_REG_BSM) */
46 #define BSM_CFG_VAL1 0x0003F000
47 #define BSM_CFG_VAL2 0x003F0000
49 /* define values for memory interface register (HVA_HIF_REG_MIF_CFG) */
50 #define MIF_CFG_VAL1 0x04460446
51 #define MIF_CFG_VAL2 0x04460806
52 #define MIF_CFG_VAL3 0x00000000
54 /* define value for HEC memory interface register (HVA_HIF_REG_MIF_CFG) */
55 #define HEC_MIF_CFG_VAL 0x000000C4
57 /* Bits definition for clock gating register (HVA_HIF_REG_CLK_GATING) */
58 #define CLK_GATING_HVC BIT(0)
59 #define CLK_GATING_HEC BIT(1)
60 #define CLK_GATING_HJE BIT(2)
62 /* fix hva clock rate */
63 #define CLK_RATE 300000000
65 /* fix delay for pmruntime */
66 #define AUTOSUSPEND_DELAY_MS 3
69 * hw encode error values
70 * NO_ERROR: Success, Task OK
71 * H264_BITSTREAM_OVERSIZE: VECH264 Bitstream size > bitstream buffer
72 * H264_FRAME_SKIPPED: VECH264 Frame skipped (refers to CPB Buffer Size)
73 * H264_SLICE_LIMIT_SIZE: VECH264 MB > slice limit size
74 * H264_MAX_SLICE_NUMBER: VECH264 max slice number reached
75 * H264_SLICE_READY: VECH264 Slice ready
76 * TASK_LIST_FULL: HVA/FPC task list full
77 (discard latest transform command)
78 * UNKNOWN_COMMAND: Transform command not known by HVA/FPC
79 * WRONG_CODEC_OR_RESOLUTION: Wrong Codec or Resolution Selection
80 * NO_INT_COMPLETION: Time-out on interrupt completion
81 * LMI_ERR: Local Memory Interface Error
82 * EMI_ERR: External Memory Interface Error
83 * HECMI_ERR: HEC Memory Interface Error
85 enum hva_hw_error {
86 NO_ERROR = 0x0,
87 H264_BITSTREAM_OVERSIZE = 0x2,
88 H264_FRAME_SKIPPED = 0x4,
89 H264_SLICE_LIMIT_SIZE = 0x5,
90 H264_MAX_SLICE_NUMBER = 0x7,
91 H264_SLICE_READY = 0x8,
92 TASK_LIST_FULL = 0xF0,
93 UNKNOWN_COMMAND = 0xF1,
94 WRONG_CODEC_OR_RESOLUTION = 0xF4,
95 NO_INT_COMPLETION = 0x100,
96 LMI_ERR = 0x101,
97 EMI_ERR = 0x102,
98 HECMI_ERR = 0x103,
101 static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
103 struct hva_dev *hva = data;
105 /* read status registers */
106 hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
107 hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
109 /* acknowledge interruption */
110 writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
112 return IRQ_WAKE_THREAD;
115 static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
117 struct hva_dev *hva = arg;
118 struct device *dev = hva_to_dev(hva);
119 u32 status = hva->sts_reg & 0xFF;
120 u8 ctx_id = 0;
121 struct hva_ctx *ctx = NULL;
123 dev_dbg(dev, "%s %s: status: 0x%02x fifo level: 0x%02x\n",
124 HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
127 * status: task_id[31:16] client_id[15:8] status[7:0]
128 * the context identifier is retrieved from the client identifier
130 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
131 if (ctx_id >= HVA_MAX_INSTANCES) {
132 dev_err(dev, "%s %s: bad context identifier: %d\n",
133 ctx->name, __func__, ctx_id);
134 ctx->hw_err = true;
135 goto out;
138 ctx = hva->instances[ctx_id];
139 if (!ctx)
140 goto out;
142 switch (status) {
143 case NO_ERROR:
144 dev_dbg(dev, "%s %s: no error\n",
145 ctx->name, __func__);
146 ctx->hw_err = false;
147 break;
148 case H264_SLICE_READY:
149 dev_dbg(dev, "%s %s: h264 slice ready\n",
150 ctx->name, __func__);
151 ctx->hw_err = false;
152 break;
153 case H264_FRAME_SKIPPED:
154 dev_dbg(dev, "%s %s: h264 frame skipped\n",
155 ctx->name, __func__);
156 ctx->hw_err = false;
157 break;
158 case H264_BITSTREAM_OVERSIZE:
159 dev_err(dev, "%s %s:h264 bitstream oversize\n",
160 ctx->name, __func__);
161 ctx->hw_err = true;
162 break;
163 case H264_SLICE_LIMIT_SIZE:
164 dev_err(dev, "%s %s: h264 slice limit size is reached\n",
165 ctx->name, __func__);
166 ctx->hw_err = true;
167 break;
168 case H264_MAX_SLICE_NUMBER:
169 dev_err(dev, "%s %s: h264 max slice number is reached\n",
170 ctx->name, __func__);
171 ctx->hw_err = true;
172 break;
173 case TASK_LIST_FULL:
174 dev_err(dev, "%s %s:task list full\n",
175 ctx->name, __func__);
176 ctx->hw_err = true;
177 break;
178 case UNKNOWN_COMMAND:
179 dev_err(dev, "%s %s: command not known\n",
180 ctx->name, __func__);
181 ctx->hw_err = true;
182 break;
183 case WRONG_CODEC_OR_RESOLUTION:
184 dev_err(dev, "%s %s: wrong codec or resolution\n",
185 ctx->name, __func__);
186 ctx->hw_err = true;
187 break;
188 default:
189 dev_err(dev, "%s %s: status not recognized\n",
190 ctx->name, __func__);
191 ctx->hw_err = true;
192 break;
194 out:
195 complete(&hva->interrupt);
197 return IRQ_HANDLED;
200 static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
202 struct hva_dev *hva = data;
204 /* read status registers */
205 hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
206 hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
208 /* read error registers */
209 hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
210 hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
211 hva->hec_mif_err_reg = readl_relaxed(hva->regs +
212 HVA_HIF_REG_HEC_MIF_ERR);
214 /* acknowledge interruption */
215 writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
217 return IRQ_WAKE_THREAD;
220 static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
222 struct hva_dev *hva = arg;
223 struct device *dev = hva_to_dev(hva);
224 u8 ctx_id = 0;
225 struct hva_ctx *ctx;
227 dev_dbg(dev, "%s status: 0x%02x fifo level: 0x%02x\n",
228 HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
231 * status: task_id[31:16] client_id[15:8] status[7:0]
232 * the context identifier is retrieved from the client identifier
234 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
235 if (ctx_id >= HVA_MAX_INSTANCES) {
236 dev_err(dev, "%s bad context identifier: %d\n", HVA_PREFIX,
237 ctx_id);
238 goto out;
241 ctx = hva->instances[ctx_id];
242 if (!ctx)
243 goto out;
245 if (hva->lmi_err_reg) {
246 dev_err(dev, "%s local memory interface error: 0x%08x\n",
247 ctx->name, hva->lmi_err_reg);
248 ctx->hw_err = true;
251 if (hva->emi_err_reg) {
252 dev_err(dev, "%s external memory interface error: 0x%08x\n",
253 ctx->name, hva->emi_err_reg);
254 ctx->hw_err = true;
257 if (hva->hec_mif_err_reg) {
258 dev_err(dev, "%s hec memory interface error: 0x%08x\n",
259 ctx->name, hva->hec_mif_err_reg);
260 ctx->hw_err = true;
262 out:
263 complete(&hva->interrupt);
265 return IRQ_HANDLED;
268 static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
270 struct device *dev = hva_to_dev(hva);
271 unsigned long int version;
273 if (pm_runtime_get_sync(dev) < 0) {
274 dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
275 mutex_unlock(&hva->protect_mutex);
276 return -EFAULT;
279 version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
280 VERSION_ID_MASK;
282 pm_runtime_put_autosuspend(dev);
284 switch (version) {
285 case HVA_VERSION_V400:
286 dev_dbg(dev, "%s IP hardware version 0x%lx\n",
287 HVA_PREFIX, version);
288 break;
289 default:
290 dev_err(dev, "%s unknown IP hardware version 0x%lx\n",
291 HVA_PREFIX, version);
292 version = HVA_VERSION_UNKNOWN;
293 break;
296 return version;
299 int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
301 struct device *dev = &pdev->dev;
302 struct resource *regs;
303 struct resource *esram;
304 int ret;
306 WARN_ON(!hva);
308 /* get memory for registers */
309 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
310 hva->regs = devm_ioremap_resource(dev, regs);
311 if (IS_ERR(hva->regs)) {
312 dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
313 return PTR_ERR(hva->regs);
316 /* get memory for esram */
317 esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
318 if (!esram) {
319 dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
320 return -ENODEV;
322 hva->esram_addr = esram->start;
323 hva->esram_size = resource_size(esram);
325 dev_info(dev, "%s esram reserved for address: 0x%x size:%d\n",
326 HVA_PREFIX, hva->esram_addr, hva->esram_size);
328 /* get clock resource */
329 hva->clk = devm_clk_get(dev, "clk_hva");
330 if (IS_ERR(hva->clk)) {
331 dev_err(dev, "%s failed to get clock\n", HVA_PREFIX);
332 return PTR_ERR(hva->clk);
335 ret = clk_prepare(hva->clk);
336 if (ret < 0) {
337 dev_err(dev, "%s failed to prepare clock\n", HVA_PREFIX);
338 hva->clk = ERR_PTR(-EINVAL);
339 return ret;
342 /* get status interruption resource */
343 ret = platform_get_irq(pdev, 0);
344 if (ret < 0) {
345 dev_err(dev, "%s failed to get status IRQ\n", HVA_PREFIX);
346 goto err_clk;
348 hva->irq_its = ret;
350 ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
351 hva_hw_its_irq_thread,
352 IRQF_ONESHOT,
353 "hva_its_irq", hva);
354 if (ret) {
355 dev_err(dev, "%s failed to install status IRQ 0x%x\n",
356 HVA_PREFIX, hva->irq_its);
357 goto err_clk;
359 disable_irq(hva->irq_its);
361 /* get error interruption resource */
362 ret = platform_get_irq(pdev, 1);
363 if (ret < 0) {
364 dev_err(dev, "%s failed to get error IRQ\n", HVA_PREFIX);
365 goto err_clk;
367 hva->irq_err = ret;
369 ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
370 hva_hw_err_irq_thread,
371 IRQF_ONESHOT,
372 "hva_err_irq", hva);
373 if (ret) {
374 dev_err(dev, "%s failed to install error IRQ 0x%x\n",
375 HVA_PREFIX, hva->irq_err);
376 goto err_clk;
378 disable_irq(hva->irq_err);
380 /* initialise protection mutex */
381 mutex_init(&hva->protect_mutex);
383 /* initialise completion signal */
384 init_completion(&hva->interrupt);
386 /* initialise runtime power management */
387 pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
388 pm_runtime_use_autosuspend(dev);
389 pm_runtime_set_suspended(dev);
390 pm_runtime_enable(dev);
392 ret = pm_runtime_get_sync(dev);
393 if (ret < 0) {
394 dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
395 goto err_clk;
398 /* check IP hardware version */
399 hva->ip_version = hva_hw_get_ip_version(hva);
401 if (hva->ip_version == HVA_VERSION_UNKNOWN) {
402 ret = -EINVAL;
403 goto err_pm;
406 dev_info(dev, "%s found hva device (version 0x%lx)\n", HVA_PREFIX,
407 hva->ip_version);
409 return 0;
411 err_pm:
412 pm_runtime_put(dev);
413 err_clk:
414 if (hva->clk)
415 clk_unprepare(hva->clk);
417 return ret;
420 void hva_hw_remove(struct hva_dev *hva)
422 struct device *dev = hva_to_dev(hva);
424 disable_irq(hva->irq_its);
425 disable_irq(hva->irq_err);
427 pm_runtime_put_autosuspend(dev);
428 pm_runtime_disable(dev);
431 int hva_hw_runtime_suspend(struct device *dev)
433 struct hva_dev *hva = dev_get_drvdata(dev);
435 clk_disable_unprepare(hva->clk);
437 return 0;
440 int hva_hw_runtime_resume(struct device *dev)
442 struct hva_dev *hva = dev_get_drvdata(dev);
444 if (clk_prepare_enable(hva->clk)) {
445 dev_err(hva->dev, "%s failed to prepare hva clk\n",
446 HVA_PREFIX);
447 return -EINVAL;
450 if (clk_set_rate(hva->clk, CLK_RATE)) {
451 dev_err(dev, "%s failed to set clock frequency\n",
452 HVA_PREFIX);
453 return -EINVAL;
456 return 0;
459 int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
460 struct hva_buffer *task)
462 struct hva_dev *hva = ctx_to_hdev(ctx);
463 struct device *dev = hva_to_dev(hva);
464 u8 client_id = ctx->id;
465 int ret;
466 u32 reg = 0;
468 mutex_lock(&hva->protect_mutex);
470 /* enable irqs */
471 enable_irq(hva->irq_its);
472 enable_irq(hva->irq_err);
474 if (pm_runtime_get_sync(dev) < 0) {
475 dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
476 ctx->sys_errors++;
477 ret = -EFAULT;
478 goto out;
481 reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
482 switch (cmd) {
483 case H264_ENC:
484 reg |= CLK_GATING_HVC;
485 break;
486 default:
487 dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
488 ctx->encode_errors++;
489 ret = -EFAULT;
490 goto out;
492 writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
494 dev_dbg(dev, "%s %s: write configuration registers\n", ctx->name,
495 __func__);
497 /* byte swap config */
498 writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
500 /* define Max Opcode Size and Max Message Size for LMI and EMI */
501 writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
502 writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
505 * command FIFO: task_id[31:16] client_id[15:8] command_type[7:0]
506 * the context identifier is provided as client identifier to the
507 * hardware, and is retrieved in the interrupt functions from the
508 * status register
510 dev_dbg(dev, "%s %s: send task (cmd: %d, task_desc: %pad)\n",
511 ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
512 writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
513 writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
515 if (!wait_for_completion_timeout(&hva->interrupt,
516 msecs_to_jiffies(2000))) {
517 dev_err(dev, "%s %s: time out on completion\n", ctx->name,
518 __func__);
519 ctx->encode_errors++;
520 ret = -EFAULT;
521 goto out;
524 /* get encoding status */
525 ret = ctx->hw_err ? -EFAULT : 0;
527 ctx->encode_errors += ctx->hw_err ? 1 : 0;
529 out:
530 disable_irq(hva->irq_its);
531 disable_irq(hva->irq_err);
533 switch (cmd) {
534 case H264_ENC:
535 reg &= ~CLK_GATING_HVC;
536 writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
537 break;
538 default:
539 dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
542 pm_runtime_put_autosuspend(dev);
543 mutex_unlock(&hva->protect_mutex);
545 return ret;
548 #ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
549 #define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
550 #reg, readl_relaxed(hva->regs + reg))
552 void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
554 struct device *dev = hva_to_dev(hva);
556 mutex_lock(&hva->protect_mutex);
558 if (pm_runtime_get_sync(dev) < 0) {
559 seq_puts(s, "Cannot wake up IP\n");
560 mutex_unlock(&hva->protect_mutex);
561 return;
564 seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
566 DUMP(HVA_HIF_REG_RST);
567 DUMP(HVA_HIF_REG_RST_ACK);
568 DUMP(HVA_HIF_REG_MIF_CFG);
569 DUMP(HVA_HIF_REG_HEC_MIF_CFG);
570 DUMP(HVA_HIF_REG_CFL);
571 DUMP(HVA_HIF_REG_SFL);
572 DUMP(HVA_HIF_REG_LMI_ERR);
573 DUMP(HVA_HIF_REG_EMI_ERR);
574 DUMP(HVA_HIF_REG_HEC_MIF_ERR);
575 DUMP(HVA_HIF_REG_HEC_STS);
576 DUMP(HVA_HIF_REG_HVC_STS);
577 DUMP(HVA_HIF_REG_HJE_STS);
578 DUMP(HVA_HIF_REG_CNT);
579 DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
580 DUMP(HVA_HIF_REG_CLK_GATING);
581 DUMP(HVA_HIF_REG_VERSION);
583 pm_runtime_put_autosuspend(dev);
584 mutex_unlock(&hva->protect_mutex);
586 #endif