1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013--2024 Intel Corporation
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/dma-mapping.h>
10 #include <linux/firmware.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/pci-ats.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/property.h>
19 #include <linux/scatterlist.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
23 #include <media/ipu-bridge.h>
24 #include <media/ipu6-pci-table.h>
28 #include "ipu6-buttress.h"
30 #include "ipu6-isys.h"
32 #include "ipu6-platform-buttress-regs.h"
33 #include "ipu6-platform-isys-csi2-reg.h"
34 #include "ipu6-platform-regs.h"
36 #define IPU6_PCI_BAR 0
38 struct ipu6_cell_program
{
64 u32 cell_pmem_data_bus_address
;
65 u32 cell_dmem_data_bus_address
;
66 u32 cell_pmem_control_bus_address
;
67 u32 cell_dmem_control_bus_address
;
73 static struct ipu6_isys_internal_pdata isys_ipdata
= {
75 .offset
= IPU6_UNIFIED_OFFSET
,
79 .offset
= IPU6_ISYS_IOMMU0_OFFSET
,
80 .info_bits
= IPU6_INFO_REQUEST_DESTINATION_IOSF
,
83 3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 .insert_read_before_invalidate
= false,
92 .l1_stream_id_reg_offset
=
93 IPU6_MMU_L1_STREAM_ID_REG_OFFSET
,
94 .l2_stream_id_reg_offset
=
95 IPU6_MMU_L2_STREAM_ID_REG_OFFSET
,
98 .offset
= IPU6_ISYS_IOMMU1_OFFSET
,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
110 .insert_read_before_invalidate
= false,
111 .l1_stream_id_reg_offset
=
112 IPU6_MMU_L1_STREAM_ID_REG_OFFSET
,
113 .l2_stream_id_reg_offset
=
114 IPU6_MMU_L2_STREAM_ID_REG_OFFSET
,
117 .offset
= IPU6_ISYS_IOMMUI_OFFSET
,
121 .insert_read_before_invalidate
= false,
125 .cdc_fifo_threshold
= {6, 8, 2},
126 .dmem_offset
= IPU6_ISYS_DMEM_OFFSET
,
127 .spc_offset
= IPU6_ISYS_SPC_OFFSET
,
129 .isys_dma_overshoot
= IPU6_ISYS_OVERALLOC_MIN
,
132 static struct ipu6_psys_internal_pdata psys_ipdata
= {
134 .offset
= IPU6_UNIFIED_OFFSET
,
138 .offset
= IPU6_PSYS_IOMMU0_OFFSET
,
140 IPU6_INFO_REQUEST_DESTINATION_IOSF
,
143 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
148 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
151 .insert_read_before_invalidate
= false,
152 .l1_stream_id_reg_offset
=
153 IPU6_MMU_L1_STREAM_ID_REG_OFFSET
,
154 .l2_stream_id_reg_offset
=
155 IPU6_MMU_L2_STREAM_ID_REG_OFFSET
,
158 .offset
= IPU6_PSYS_IOMMU1_OFFSET
,
162 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
164 5, 4, 14, 6, 4, 14, 6, 4, 8,
169 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
171 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
174 .insert_read_before_invalidate
= false,
175 .l1_stream_id_reg_offset
=
176 IPU6_MMU_L1_STREAM_ID_REG_OFFSET
,
177 .l2_stream_id_reg_offset
=
178 IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET
,
181 .offset
= IPU6_PSYS_IOMMU1R_OFFSET
,
185 1, 4, 4, 4, 4, 16, 8, 4, 32,
186 16, 16, 2, 2, 2, 1, 12
190 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
193 .insert_read_before_invalidate
= false,
194 .l1_stream_id_reg_offset
=
195 IPU6_MMU_L1_STREAM_ID_REG_OFFSET
,
196 .l2_stream_id_reg_offset
=
197 IPU6_MMU_L2_STREAM_ID_REG_OFFSET
,
200 .offset
= IPU6_PSYS_IOMMUI_OFFSET
,
204 .insert_read_before_invalidate
= false,
207 .dmem_offset
= IPU6_PSYS_DMEM_OFFSET
,
211 static const struct ipu6_buttress_ctrl isys_buttress_ctrl
= {
212 .ratio
= IPU6_IS_FREQ_CTL_DEFAULT_RATIO
,
213 .qos_floor
= IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO
,
214 .freq_ctl
= IPU6_BUTTRESS_REG_IS_FREQ_CTL
,
215 .pwr_sts_shift
= IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT
,
216 .pwr_sts_mask
= IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK
,
217 .pwr_sts_on
= IPU6_BUTTRESS_PWR_STATE_UP_DONE
,
218 .pwr_sts_off
= IPU6_BUTTRESS_PWR_STATE_DN_DONE
,
221 static const struct ipu6_buttress_ctrl psys_buttress_ctrl
= {
222 .ratio
= IPU6_PS_FREQ_CTL_DEFAULT_RATIO
,
223 .qos_floor
= IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO
,
224 .freq_ctl
= IPU6_BUTTRESS_REG_PS_FREQ_CTL
,
225 .pwr_sts_shift
= IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT
,
226 .pwr_sts_mask
= IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK
,
227 .pwr_sts_on
= IPU6_BUTTRESS_PWR_STATE_UP_DONE
,
228 .pwr_sts_off
= IPU6_BUTTRESS_PWR_STATE_DN_DONE
,
232 ipu6_pkg_dir_configure_spc(struct ipu6_device
*isp
,
233 const struct ipu6_hw_variants
*hw_variant
,
234 int pkg_dir_idx
, void __iomem
*base
,
235 u64
*pkg_dir
, dma_addr_t pkg_dir_vied_address
)
237 struct ipu6_cell_program
*prog
;
238 void __iomem
*spc_base
;
243 server_fw_addr
= lower_32_bits(*(pkg_dir
+ (pkg_dir_idx
+ 1) * 2));
244 if (pkg_dir_idx
== IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX
)
245 dma_addr
= sg_dma_address(isp
->isys
->fw_sgt
.sgl
);
247 dma_addr
= sg_dma_address(isp
->psys
->fw_sgt
.sgl
);
249 pg_offset
= server_fw_addr
- dma_addr
;
250 prog
= (struct ipu6_cell_program
*)((uintptr_t)isp
->cpd_fw
->data
+
252 spc_base
= base
+ prog
->regs_addr
;
253 if (spc_base
!= (base
+ hw_variant
->spc_offset
))
254 dev_warn(&isp
->pdev
->dev
,
255 "SPC reg addr %p not matching value from CPD %p\n",
256 base
+ hw_variant
->spc_offset
, spc_base
);
257 writel(server_fw_addr
+ prog
->blob_offset
+
258 prog
->icache_source
, spc_base
+ IPU6_PSYS_REG_SPC_ICACHE_BASE
);
259 writel(IPU6_INFO_REQUEST_DESTINATION_IOSF
,
260 spc_base
+ IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER
);
261 writel(prog
->start
[1], spc_base
+ IPU6_PSYS_REG_SPC_START_PC
);
262 writel(pkg_dir_vied_address
, base
+ hw_variant
->dmem_offset
);
265 void ipu6_configure_spc(struct ipu6_device
*isp
,
266 const struct ipu6_hw_variants
*hw_variant
,
267 int pkg_dir_idx
, void __iomem
*base
, u64
*pkg_dir
,
268 dma_addr_t pkg_dir_dma_addr
)
270 void __iomem
*dmem_base
= base
+ hw_variant
->dmem_offset
;
271 void __iomem
*spc_regs_base
= base
+ hw_variant
->spc_offset
;
274 val
= readl(spc_regs_base
+ IPU6_PSYS_REG_SPC_STATUS_CTRL
);
275 val
|= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE
;
276 writel(val
, spc_regs_base
+ IPU6_PSYS_REG_SPC_STATUS_CTRL
);
278 if (isp
->secure_mode
)
279 writel(IPU6_PKG_DIR_IMR_OFFSET
, dmem_base
);
281 ipu6_pkg_dir_configure_spc(isp
, hw_variant
, pkg_dir_idx
, base
,
282 pkg_dir
, pkg_dir_dma_addr
);
284 EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc
, "INTEL_IPU6");
286 #define IPU6_ISYS_CSI2_NPORTS 4
287 #define IPU6SE_ISYS_CSI2_NPORTS 4
288 #define IPU6_TGL_ISYS_CSI2_NPORTS 8
289 #define IPU6EP_MTL_ISYS_CSI2_NPORTS 6
291 static void ipu6_internal_pdata_init(struct ipu6_device
*isp
)
293 u8 hw_ver
= isp
->hw_ver
;
295 isys_ipdata
.num_parallel_streams
= IPU6_ISYS_NUM_STREAMS
;
296 isys_ipdata
.sram_gran_shift
= IPU6_SRAM_GRANULARITY_SHIFT
;
297 isys_ipdata
.sram_gran_size
= IPU6_SRAM_GRANULARITY_SIZE
;
298 isys_ipdata
.max_sram_size
= IPU6_MAX_SRAM_SIZE
;
299 isys_ipdata
.sensor_type_start
= IPU6_FW_ISYS_SENSOR_TYPE_START
;
300 isys_ipdata
.sensor_type_end
= IPU6_FW_ISYS_SENSOR_TYPE_END
;
301 isys_ipdata
.max_streams
= IPU6_ISYS_NUM_STREAMS
;
302 isys_ipdata
.max_send_queues
= IPU6_N_MAX_SEND_QUEUES
;
303 isys_ipdata
.max_sram_blocks
= IPU6_NOF_SRAM_BLOCKS_MAX
;
304 isys_ipdata
.max_devq_size
= IPU6_DEV_SEND_QUEUE_SIZE
;
305 isys_ipdata
.csi2
.nports
= IPU6_ISYS_CSI2_NPORTS
;
306 isys_ipdata
.csi2
.irq_mask
= IPU6_CSI_RX_ERROR_IRQ_MASK
;
307 isys_ipdata
.csi2
.ctrl0_irq_edge
= IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE
;
308 isys_ipdata
.csi2
.ctrl0_irq_clear
=
309 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR
;
310 isys_ipdata
.csi2
.ctrl0_irq_mask
= IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK
;
311 isys_ipdata
.csi2
.ctrl0_irq_enable
=
312 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE
;
313 isys_ipdata
.csi2
.ctrl0_irq_status
=
314 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS
;
315 isys_ipdata
.csi2
.ctrl0_irq_lnp
=
316 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE
;
317 isys_ipdata
.enhanced_iwake
= is_ipu6ep_mtl(hw_ver
) || is_ipu6ep(hw_ver
);
318 psys_ipdata
.hw_variant
.spc_offset
= IPU6_PSYS_SPC_OFFSET
;
319 isys_ipdata
.csi2
.fw_access_port_ofs
= CSI_REG_HUB_FW_ACCESS_PORT_OFS
;
321 if (is_ipu6ep(hw_ver
)) {
322 isys_ipdata
.ltr
= IPU6EP_LTR_VALUE
;
323 isys_ipdata
.memopen_threshold
= IPU6EP_MIN_MEMOPEN_TH
;
326 if (is_ipu6_tgl(hw_ver
))
327 isys_ipdata
.csi2
.nports
= IPU6_TGL_ISYS_CSI2_NPORTS
;
329 if (is_ipu6ep_mtl(hw_ver
)) {
330 isys_ipdata
.csi2
.nports
= IPU6EP_MTL_ISYS_CSI2_NPORTS
;
332 isys_ipdata
.csi2
.ctrl0_irq_edge
=
333 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE
;
334 isys_ipdata
.csi2
.ctrl0_irq_clear
=
335 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR
;
336 isys_ipdata
.csi2
.ctrl0_irq_mask
=
337 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK
;
338 isys_ipdata
.csi2
.ctrl0_irq_enable
=
339 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE
;
340 isys_ipdata
.csi2
.ctrl0_irq_lnp
=
341 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE
;
342 isys_ipdata
.csi2
.ctrl0_irq_status
=
343 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS
;
344 isys_ipdata
.csi2
.fw_access_port_ofs
=
345 CSI_REG_HUB_FW_ACCESS_PORT_V6OFS
;
346 isys_ipdata
.ltr
= IPU6EP_MTL_LTR_VALUE
;
347 isys_ipdata
.memopen_threshold
= IPU6EP_MTL_MIN_MEMOPEN_TH
;
350 if (is_ipu6se(hw_ver
)) {
351 isys_ipdata
.csi2
.nports
= IPU6SE_ISYS_CSI2_NPORTS
;
352 isys_ipdata
.csi2
.irq_mask
= IPU6SE_CSI_RX_ERROR_IRQ_MASK
;
353 isys_ipdata
.num_parallel_streams
= IPU6SE_ISYS_NUM_STREAMS
;
354 isys_ipdata
.sram_gran_shift
= IPU6SE_SRAM_GRANULARITY_SHIFT
;
355 isys_ipdata
.sram_gran_size
= IPU6SE_SRAM_GRANULARITY_SIZE
;
356 isys_ipdata
.max_sram_size
= IPU6SE_MAX_SRAM_SIZE
;
357 isys_ipdata
.sensor_type_start
=
358 IPU6SE_FW_ISYS_SENSOR_TYPE_START
;
359 isys_ipdata
.sensor_type_end
= IPU6SE_FW_ISYS_SENSOR_TYPE_END
;
360 isys_ipdata
.max_streams
= IPU6SE_ISYS_NUM_STREAMS
;
361 isys_ipdata
.max_send_queues
= IPU6SE_N_MAX_SEND_QUEUES
;
362 isys_ipdata
.max_sram_blocks
= IPU6SE_NOF_SRAM_BLOCKS_MAX
;
363 isys_ipdata
.max_devq_size
= IPU6SE_DEV_SEND_QUEUE_SIZE
;
364 psys_ipdata
.hw_variant
.spc_offset
= IPU6SE_PSYS_SPC_OFFSET
;
368 static struct ipu6_bus_device
*
369 ipu6_isys_init(struct pci_dev
*pdev
, struct device
*parent
,
370 struct ipu6_buttress_ctrl
*ctrl
, void __iomem
*base
,
371 const struct ipu6_isys_internal_pdata
*ipdata
)
373 struct device
*dev
= &pdev
->dev
;
374 struct ipu6_bus_device
*isys_adev
;
375 struct ipu6_isys_pdata
*pdata
;
378 ret
= ipu_bridge_init(dev
, ipu_bridge_parse_ssdb
);
380 dev_err_probe(dev
, ret
, "IPU6 bridge init failed\n");
384 pdata
= kzalloc(sizeof(*pdata
), GFP_KERNEL
);
386 return ERR_PTR(-ENOMEM
);
389 pdata
->ipdata
= ipdata
;
391 isys_adev
= ipu6_bus_initialize_device(pdev
, parent
, pdata
, ctrl
,
393 if (IS_ERR(isys_adev
)) {
395 return dev_err_cast_probe(dev
, isys_adev
,
396 "ipu6_bus_initialize_device isys failed\n");
399 isys_adev
->mmu
= ipu6_mmu_init(dev
, base
, ISYS_MMID
,
400 &ipdata
->hw_variant
);
401 if (IS_ERR(isys_adev
->mmu
)) {
402 put_device(&isys_adev
->auxdev
.dev
);
404 return dev_err_cast_probe(dev
, isys_adev
->mmu
,
405 "ipu6_mmu_init(isys_adev->mmu) failed\n");
408 isys_adev
->mmu
->dev
= &isys_adev
->auxdev
.dev
;
410 ret
= ipu6_bus_add_device(isys_adev
);
419 static struct ipu6_bus_device
*
420 ipu6_psys_init(struct pci_dev
*pdev
, struct device
*parent
,
421 struct ipu6_buttress_ctrl
*ctrl
, void __iomem
*base
,
422 const struct ipu6_psys_internal_pdata
*ipdata
)
424 struct ipu6_bus_device
*psys_adev
;
425 struct ipu6_psys_pdata
*pdata
;
428 pdata
= kzalloc(sizeof(*pdata
), GFP_KERNEL
);
430 return ERR_PTR(-ENOMEM
);
433 pdata
->ipdata
= ipdata
;
435 psys_adev
= ipu6_bus_initialize_device(pdev
, parent
, pdata
, ctrl
,
437 if (IS_ERR(psys_adev
)) {
439 return dev_err_cast_probe(&pdev
->dev
, psys_adev
,
440 "ipu6_bus_initialize_device psys failed\n");
443 psys_adev
->mmu
= ipu6_mmu_init(&pdev
->dev
, base
, PSYS_MMID
,
444 &ipdata
->hw_variant
);
445 if (IS_ERR(psys_adev
->mmu
)) {
446 put_device(&psys_adev
->auxdev
.dev
);
448 return dev_err_cast_probe(&pdev
->dev
, psys_adev
->mmu
,
449 "ipu6_mmu_init(psys_adev->mmu) failed\n");
452 psys_adev
->mmu
->dev
= &psys_adev
->auxdev
.dev
;
454 ret
= ipu6_bus_add_device(psys_adev
);
463 static int ipu6_pci_config_setup(struct pci_dev
*dev
, u8 hw_ver
)
467 /* disable IPU6 PCI ATS on mtl ES2 */
468 if (is_ipu6ep_mtl(hw_ver
) && boot_cpu_data
.x86_stepping
== 0x2 &&
469 pci_ats_supported(dev
))
470 pci_disable_ats(dev
);
472 /* No PCI msi capability for IPU6EP */
473 if (is_ipu6ep(hw_ver
) || is_ipu6ep_mtl(hw_ver
)) {
474 /* likely do nothing as msi not enabled by default */
475 pci_disable_msi(dev
);
479 ret
= pci_alloc_irq_vectors(dev
, 1, 1, PCI_IRQ_MSI
);
481 return dev_err_probe(&dev
->dev
, ret
, "Request msi failed");
486 static void ipu6_configure_vc_mechanism(struct ipu6_device
*isp
)
488 u32 val
= readl(isp
->base
+ BUTTRESS_REG_BTRS_CTRL
);
490 if (IPU6_BTRS_ARB_STALL_MODE_VC0
== IPU6_BTRS_ARB_MODE_TYPE_STALL
)
491 val
|= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0
;
493 val
&= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0
;
495 if (IPU6_BTRS_ARB_STALL_MODE_VC1
== IPU6_BTRS_ARB_MODE_TYPE_STALL
)
496 val
|= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1
;
498 val
&= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1
;
500 writel(val
, isp
->base
+ BUTTRESS_REG_BTRS_CTRL
);
503 static int ipu6_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
505 struct ipu6_buttress_ctrl
*isys_ctrl
= NULL
, *psys_ctrl
= NULL
;
506 struct device
*dev
= &pdev
->dev
;
507 void __iomem
*isys_base
= NULL
;
508 void __iomem
*psys_base
= NULL
;
509 struct ipu6_device
*isp
;
511 u32 val
, version
, sku_id
;
514 isp
= devm_kzalloc(dev
, sizeof(*isp
), GFP_KERNEL
);
519 INIT_LIST_HEAD(&isp
->devices
);
521 ret
= pcim_enable_device(pdev
);
523 return dev_err_probe(dev
, ret
, "Enable PCI device failed\n");
525 phys
= pci_resource_start(pdev
, IPU6_PCI_BAR
);
526 dev_dbg(dev
, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR
, &phys
);
528 ret
= pcim_iomap_regions(pdev
, 1 << IPU6_PCI_BAR
, pci_name(pdev
));
530 return dev_err_probe(dev
, ret
, "Failed to I/O mem remapping\n");
532 isp
->base
= pcim_iomap_table(pdev
)[IPU6_PCI_BAR
];
533 pci_set_drvdata(pdev
, isp
);
534 pci_set_master(pdev
);
536 isp
->cpd_metadata_cmpnt_size
= sizeof(struct ipu6_cpd_metadata_cmpnt
);
537 switch (id
->device
) {
538 case PCI_DEVICE_ID_INTEL_IPU6
:
539 isp
->hw_ver
= IPU6_VER_6
;
540 isp
->cpd_fw_name
= IPU6_FIRMWARE_NAME
;
542 case PCI_DEVICE_ID_INTEL_IPU6SE
:
543 isp
->hw_ver
= IPU6_VER_6SE
;
544 isp
->cpd_fw_name
= IPU6SE_FIRMWARE_NAME
;
545 isp
->cpd_metadata_cmpnt_size
=
546 sizeof(struct ipu6se_cpd_metadata_cmpnt
);
548 case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP
:
549 case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP
:
550 isp
->hw_ver
= IPU6_VER_6EP
;
551 isp
->cpd_fw_name
= IPU6EP_FIRMWARE_NAME
;
553 case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN
:
554 isp
->hw_ver
= IPU6_VER_6EP
;
555 isp
->cpd_fw_name
= IPU6EPADLN_FIRMWARE_NAME
;
557 case PCI_DEVICE_ID_INTEL_IPU6EP_MTL
:
558 isp
->hw_ver
= IPU6_VER_6EP_MTL
;
559 isp
->cpd_fw_name
= IPU6EPMTL_FIRMWARE_NAME
;
562 return dev_err_probe(dev
, -ENODEV
,
563 "Unsupported IPU6 device %x\n",
567 ipu6_internal_pdata_init(isp
);
569 isys_base
= isp
->base
+ isys_ipdata
.hw_variant
.offset
;
570 psys_base
= isp
->base
+ psys_ipdata
.hw_variant
.offset
;
572 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(39));
574 return dev_err_probe(dev
, ret
, "Failed to set DMA mask\n");
576 dma_set_max_seg_size(dev
, UINT_MAX
);
578 ret
= ipu6_pci_config_setup(pdev
, isp
->hw_ver
);
582 ret
= ipu6_buttress_init(isp
);
586 ret
= request_firmware(&isp
->cpd_fw
, isp
->cpd_fw_name
, dev
);
588 dev_err_probe(&isp
->pdev
->dev
, ret
,
589 "Requesting signed firmware %s failed\n",
594 ret
= ipu6_cpd_validate_cpd_file(isp
, isp
->cpd_fw
->data
,
597 dev_err_probe(&isp
->pdev
->dev
, ret
,
598 "Failed to validate cpd\n");
599 goto out_ipu6_bus_del_devices
;
602 isys_ctrl
= devm_kmemdup(dev
, &isys_buttress_ctrl
,
603 sizeof(isys_buttress_ctrl
), GFP_KERNEL
);
606 goto out_ipu6_bus_del_devices
;
609 isp
->isys
= ipu6_isys_init(pdev
, dev
, isys_ctrl
, isys_base
,
611 if (IS_ERR(isp
->isys
)) {
612 ret
= PTR_ERR(isp
->isys
);
613 goto out_ipu6_bus_del_devices
;
616 psys_ctrl
= devm_kmemdup(dev
, &psys_buttress_ctrl
,
617 sizeof(psys_buttress_ctrl
), GFP_KERNEL
);
620 goto out_ipu6_bus_del_devices
;
623 isp
->psys
= ipu6_psys_init(pdev
, &isp
->isys
->auxdev
.dev
, psys_ctrl
,
624 psys_base
, &psys_ipdata
);
625 if (IS_ERR(isp
->psys
)) {
626 ret
= PTR_ERR(isp
->psys
);
627 goto out_ipu6_bus_del_devices
;
630 ret
= pm_runtime_resume_and_get(&isp
->psys
->auxdev
.dev
);
632 goto out_ipu6_bus_del_devices
;
634 ret
= ipu6_mmu_hw_init(isp
->psys
->mmu
);
636 dev_err_probe(&isp
->pdev
->dev
, ret
,
637 "Failed to set MMU hardware\n");
638 goto out_ipu6_bus_del_devices
;
641 ret
= ipu6_buttress_map_fw_image(isp
->psys
, isp
->cpd_fw
,
644 dev_err_probe(&isp
->pdev
->dev
, ret
, "failed to map fw image\n");
645 goto out_ipu6_bus_del_devices
;
648 ret
= ipu6_cpd_create_pkg_dir(isp
->psys
, isp
->cpd_fw
->data
);
650 dev_err_probe(&isp
->pdev
->dev
, ret
,
651 "failed to create pkg dir\n");
652 goto out_ipu6_bus_del_devices
;
655 ret
= devm_request_threaded_irq(dev
, pdev
->irq
, ipu6_buttress_isr
,
656 ipu6_buttress_isr_threaded
,
657 IRQF_SHARED
, IPU6_NAME
, isp
);
659 dev_err_probe(dev
, ret
, "Requesting irq failed\n");
660 goto out_ipu6_bus_del_devices
;
663 ret
= ipu6_buttress_authenticate(isp
);
665 dev_err_probe(&isp
->pdev
->dev
, ret
,
666 "FW authentication failed\n");
670 ipu6_mmu_hw_cleanup(isp
->psys
->mmu
);
671 pm_runtime_put(&isp
->psys
->auxdev
.dev
);
673 /* Configure the arbitration mechanisms for VC requests */
674 ipu6_configure_vc_mechanism(isp
);
676 val
= readl(isp
->base
+ BUTTRESS_REG_SKU
);
677 sku_id
= FIELD_GET(GENMASK(6, 4), val
);
678 version
= FIELD_GET(GENMASK(3, 0), val
);
679 dev_info(dev
, "IPU%u-v%u[%x] hardware version %d\n", version
, sku_id
,
680 pdev
->device
, isp
->hw_ver
);
682 pm_runtime_put_noidle(dev
);
683 pm_runtime_allow(dev
);
685 isp
->bus_ready_to_probe
= true;
690 devm_free_irq(dev
, pdev
->irq
, isp
);
691 out_ipu6_bus_del_devices
:
693 ipu6_cpd_free_pkg_dir(isp
->psys
);
694 ipu6_buttress_unmap_fw_image(isp
->psys
, &isp
->psys
->fw_sgt
);
696 if (!IS_ERR_OR_NULL(isp
->psys
) && !IS_ERR_OR_NULL(isp
->psys
->mmu
))
697 ipu6_mmu_cleanup(isp
->psys
->mmu
);
698 if (!IS_ERR_OR_NULL(isp
->isys
) && !IS_ERR_OR_NULL(isp
->isys
->mmu
))
699 ipu6_mmu_cleanup(isp
->isys
->mmu
);
700 ipu6_bus_del_devices(pdev
);
701 release_firmware(isp
->cpd_fw
);
703 ipu6_buttress_exit(isp
);
708 static void ipu6_pci_remove(struct pci_dev
*pdev
)
710 struct ipu6_device
*isp
= pci_get_drvdata(pdev
);
711 struct ipu6_mmu
*isys_mmu
= isp
->isys
->mmu
;
712 struct ipu6_mmu
*psys_mmu
= isp
->psys
->mmu
;
714 devm_free_irq(&pdev
->dev
, pdev
->irq
, isp
);
715 ipu6_cpd_free_pkg_dir(isp
->psys
);
717 ipu6_buttress_unmap_fw_image(isp
->psys
, &isp
->psys
->fw_sgt
);
718 ipu6_buttress_exit(isp
);
720 ipu6_bus_del_devices(pdev
);
722 pm_runtime_forbid(&pdev
->dev
);
723 pm_runtime_get_noresume(&pdev
->dev
);
725 release_firmware(isp
->cpd_fw
);
727 ipu6_mmu_cleanup(psys_mmu
);
728 ipu6_mmu_cleanup(isys_mmu
);
731 static void ipu6_pci_reset_prepare(struct pci_dev
*pdev
)
733 struct ipu6_device
*isp
= pci_get_drvdata(pdev
);
735 pm_runtime_forbid(&isp
->pdev
->dev
);
738 static void ipu6_pci_reset_done(struct pci_dev
*pdev
)
740 struct ipu6_device
*isp
= pci_get_drvdata(pdev
);
742 ipu6_buttress_restore(isp
);
743 if (isp
->secure_mode
)
744 ipu6_buttress_reset_authentication(isp
);
746 isp
->need_ipc_reset
= true;
747 pm_runtime_allow(&isp
->pdev
->dev
);
751 * PCI base driver code requires driver to provide these to enable
752 * PCI device level PM state transitions (D0<->D3)
754 static int ipu6_suspend(struct device
*dev
)
756 struct pci_dev
*pdev
= to_pci_dev(dev
);
758 synchronize_irq(pdev
->irq
);
762 static int ipu6_resume(struct device
*dev
)
764 struct pci_dev
*pdev
= to_pci_dev(dev
);
765 struct ipu6_device
*isp
= pci_get_drvdata(pdev
);
766 struct ipu6_buttress
*b
= &isp
->buttress
;
769 /* Configure the arbitration mechanisms for VC requests */
770 ipu6_configure_vc_mechanism(isp
);
772 isp
->secure_mode
= ipu6_buttress_get_secure_mode(isp
);
773 dev_info(dev
, "IPU6 in %s mode\n",
774 isp
->secure_mode
? "secure" : "non-secure");
776 ipu6_buttress_restore(isp
);
778 ret
= ipu6_buttress_ipc_reset(isp
, &b
->cse
);
780 dev_err(&isp
->pdev
->dev
, "IPC reset protocol failed!\n");
782 ret
= pm_runtime_resume_and_get(&isp
->psys
->auxdev
.dev
);
784 dev_err(&isp
->psys
->auxdev
.dev
, "Failed to get runtime PM\n");
788 ret
= ipu6_buttress_authenticate(isp
);
790 dev_err(&isp
->pdev
->dev
, "FW authentication failed(%d)\n", ret
);
792 pm_runtime_put(&isp
->psys
->auxdev
.dev
);
797 static int ipu6_runtime_resume(struct device
*dev
)
799 struct pci_dev
*pdev
= to_pci_dev(dev
);
800 struct ipu6_device
*isp
= pci_get_drvdata(pdev
);
803 ipu6_configure_vc_mechanism(isp
);
804 ipu6_buttress_restore(isp
);
806 if (isp
->need_ipc_reset
) {
807 struct ipu6_buttress
*b
= &isp
->buttress
;
809 isp
->need_ipc_reset
= false;
810 ret
= ipu6_buttress_ipc_reset(isp
, &b
->cse
);
812 dev_err(&isp
->pdev
->dev
, "IPC reset protocol failed\n");
818 static const struct dev_pm_ops ipu6_pm_ops
= {
819 SYSTEM_SLEEP_PM_OPS(&ipu6_suspend
, &ipu6_resume
)
820 RUNTIME_PM_OPS(&ipu6_suspend
, &ipu6_runtime_resume
, NULL
)
823 MODULE_DEVICE_TABLE(pci
, ipu6_pci_tbl
);
825 static const struct pci_error_handlers pci_err_handlers
= {
826 .reset_prepare
= ipu6_pci_reset_prepare
,
827 .reset_done
= ipu6_pci_reset_done
,
830 static struct pci_driver ipu6_pci_driver
= {
832 .id_table
= ipu6_pci_tbl
,
833 .probe
= ipu6_pci_probe
,
834 .remove
= ipu6_pci_remove
,
836 .pm
= pm_ptr(&ipu6_pm_ops
),
838 .err_handler
= &pci_err_handlers
,
841 module_pci_driver(ipu6_pci_driver
);
843 MODULE_IMPORT_NS("INTEL_IPU_BRIDGE");
844 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
845 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
846 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
847 MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
848 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
849 MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
850 MODULE_LICENSE("GPL");
851 MODULE_DESCRIPTION("Intel IPU6 PCI driver");