1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2023, Intel Corporation.
4 * Intel Visual Sensing Controller Interface Linux driver
7 #include <linux/align.h>
8 #include <linux/cache.h>
9 #include <linux/cleanup.h>
10 #include <linux/iopoll.h>
11 #include <linux/list.h>
12 #include <linux/mei.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/overflow.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/timekeeping.h>
19 #include <linux/types.h>
21 #include <asm-generic/bug.h>
22 #include <linux/unaligned.h>
27 #define MEI_VSC_DRV_NAME "intel_vsc"
29 #define MEI_VSC_MAX_MSG_SIZE 512
31 #define MEI_VSC_POLL_DELAY_US (100 * USEC_PER_MSEC)
32 #define MEI_VSC_POLL_TIMEOUT_US (400 * USEC_PER_MSEC)
34 #define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw))
36 struct mei_vsc_host_timestamp
{
47 atomic_t write_lock_cnt
;
53 char tx_buf
[MEI_VSC_MAX_MSG_SIZE
+ sizeof(struct mei_msg_hdr
)] ____cacheline_aligned
;
55 char rx_buf
[MEI_VSC_MAX_MSG_SIZE
+ sizeof(struct mei_msg_hdr
)] ____cacheline_aligned
;
58 static int mei_vsc_read_helper(struct mei_vsc_hw
*hw
, u8
*buf
,
61 struct mei_vsc_host_timestamp ts
= {
62 .realtime
= ktime_to_ns(ktime_get_real()),
63 .boottime
= ktime_to_ns(ktime_get_boottime()),
66 return vsc_tp_xfer(hw
->tp
, VSC_TP_CMD_READ
, &ts
, sizeof(ts
),
70 static int mei_vsc_write_helper(struct mei_vsc_hw
*hw
, u8
*buf
, u32 len
)
74 return vsc_tp_xfer(hw
->tp
, VSC_TP_CMD_WRITE
, buf
, len
, &status
,
78 static int mei_vsc_fw_status(struct mei_device
*mei_dev
,
79 struct mei_fw_status
*fw_status
)
89 static inline enum mei_pg_state
mei_vsc_pg_state(struct mei_device
*mei_dev
)
94 static void mei_vsc_intr_enable(struct mei_device
*mei_dev
)
96 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
98 vsc_tp_intr_enable(hw
->tp
);
101 static void mei_vsc_intr_disable(struct mei_device
*mei_dev
)
103 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
105 vsc_tp_intr_disable(hw
->tp
);
108 /* mei framework requires this ops */
109 static void mei_vsc_intr_clear(struct mei_device
*mei_dev
)
113 /* wait for pending irq handler */
114 static void mei_vsc_synchronize_irq(struct mei_device
*mei_dev
)
116 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
118 vsc_tp_intr_synchronize(hw
->tp
);
121 static int mei_vsc_hw_config(struct mei_device
*mei_dev
)
126 static bool mei_vsc_host_is_ready(struct mei_device
*mei_dev
)
128 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
130 return hw
->host_ready
;
133 static bool mei_vsc_hw_is_ready(struct mei_device
*mei_dev
)
135 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
140 static int mei_vsc_hw_start(struct mei_device
*mei_dev
)
142 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
146 hw
->host_ready
= true;
148 vsc_tp_intr_enable(hw
->tp
);
150 ret
= read_poll_timeout(mei_vsc_read_helper
, rlen
,
151 rlen
>= 0, MEI_VSC_POLL_DELAY_US
,
152 MEI_VSC_POLL_TIMEOUT_US
, true,
153 hw
, &buf
, sizeof(buf
));
155 dev_err(mei_dev
->dev
, "wait fw ready failed: %d\n", ret
);
164 static bool mei_vsc_hbuf_is_ready(struct mei_device
*mei_dev
)
166 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
168 return atomic_read(&hw
->write_lock_cnt
) == 0;
171 static int mei_vsc_hbuf_empty_slots(struct mei_device
*mei_dev
)
173 return MEI_VSC_MAX_MSG_SIZE
/ MEI_SLOT_SIZE
;
176 static u32
mei_vsc_hbuf_depth(const struct mei_device
*mei_dev
)
178 return MEI_VSC_MAX_MSG_SIZE
/ MEI_SLOT_SIZE
;
181 static int mei_vsc_write(struct mei_device
*mei_dev
,
182 const void *hdr
, size_t hdr_len
,
183 const void *data
, size_t data_len
)
185 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
186 char *buf
= hw
->tx_buf
;
189 if (WARN_ON(!hdr
|| !IS_ALIGNED(hdr_len
, 4)))
192 if (!data
|| data_len
> MEI_VSC_MAX_MSG_SIZE
)
195 atomic_inc(&hw
->write_lock_cnt
);
197 memcpy(buf
, hdr
, hdr_len
);
198 memcpy(buf
+ hdr_len
, data
, data_len
);
200 ret
= mei_vsc_write_helper(hw
, buf
, hdr_len
+ data_len
);
202 atomic_dec_if_positive(&hw
->write_lock_cnt
);
204 return ret
< 0 ? ret
: 0;
207 static inline u32
mei_vsc_read(const struct mei_device
*mei_dev
)
209 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
212 ret
= mei_vsc_read_helper(hw
, hw
->rx_buf
, sizeof(hw
->rx_buf
));
213 if (ret
< 0 || ret
< sizeof(u32
))
217 hw
->rx_hdr
= get_unaligned_le32(hw
->rx_buf
);
222 static int mei_vsc_count_full_read_slots(struct mei_device
*mei_dev
)
224 return MEI_VSC_MAX_MSG_SIZE
/ MEI_SLOT_SIZE
;
227 static int mei_vsc_read_slots(struct mei_device
*mei_dev
, unsigned char *buf
,
230 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
231 struct mei_msg_hdr
*hdr
;
233 hdr
= (struct mei_msg_hdr
*)&hw
->rx_hdr
;
234 if (len
!= hdr
->length
|| hdr
->length
+ sizeof(*hdr
) != hw
->rx_len
)
237 memcpy(buf
, hw
->rx_buf
+ sizeof(*hdr
), len
);
242 static bool mei_vsc_pg_in_transition(struct mei_device
*mei_dev
)
244 return mei_dev
->pg_event
>= MEI_PG_EVENT_WAIT
&&
245 mei_dev
->pg_event
<= MEI_PG_EVENT_INTR_WAIT
;
248 static bool mei_vsc_pg_is_enabled(struct mei_device
*mei_dev
)
253 static int mei_vsc_hw_reset(struct mei_device
*mei_dev
, bool intr_enable
)
255 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
257 vsc_tp_reset(hw
->tp
);
259 return vsc_tp_init(hw
->tp
, mei_dev
->dev
);
262 static const struct mei_hw_ops mei_vsc_hw_ops
= {
263 .fw_status
= mei_vsc_fw_status
,
264 .pg_state
= mei_vsc_pg_state
,
266 .host_is_ready
= mei_vsc_host_is_ready
,
267 .hw_is_ready
= mei_vsc_hw_is_ready
,
268 .hw_reset
= mei_vsc_hw_reset
,
269 .hw_config
= mei_vsc_hw_config
,
270 .hw_start
= mei_vsc_hw_start
,
272 .pg_in_transition
= mei_vsc_pg_in_transition
,
273 .pg_is_enabled
= mei_vsc_pg_is_enabled
,
275 .intr_clear
= mei_vsc_intr_clear
,
276 .intr_enable
= mei_vsc_intr_enable
,
277 .intr_disable
= mei_vsc_intr_disable
,
278 .synchronize_irq
= mei_vsc_synchronize_irq
,
280 .hbuf_free_slots
= mei_vsc_hbuf_empty_slots
,
281 .hbuf_is_ready
= mei_vsc_hbuf_is_ready
,
282 .hbuf_depth
= mei_vsc_hbuf_depth
,
283 .write
= mei_vsc_write
,
285 .rdbuf_full_slots
= mei_vsc_count_full_read_slots
,
286 .read_hdr
= mei_vsc_read
,
287 .read
= mei_vsc_read_slots
,
290 static void mei_vsc_event_cb(void *context
)
292 struct mei_device
*mei_dev
= context
;
293 struct mei_vsc_hw
*hw
= mei_dev_to_vsc_hw(mei_dev
);
294 struct list_head cmpl_list
;
298 if (mei_dev
->dev_state
== MEI_DEV_RESETTING
||
299 mei_dev
->dev_state
== MEI_DEV_INITIALIZING
)
302 INIT_LIST_HEAD(&cmpl_list
);
304 guard(mutex
)(&mei_dev
->device_lock
);
306 while (vsc_tp_need_read(hw
->tp
)) {
307 /* check slots available for reading */
308 slots
= mei_count_full_read_slots(mei_dev
);
310 ret
= mei_irq_read_handler(mei_dev
, &cmpl_list
, &slots
);
312 if (ret
!= -ENODATA
) {
313 if (mei_dev
->dev_state
!= MEI_DEV_RESETTING
&&
314 mei_dev
->dev_state
!= MEI_DEV_POWER_DOWN
)
315 schedule_work(&mei_dev
->reset_work
);
322 mei_dev
->hbuf_is_ready
= mei_hbuf_is_ready(mei_dev
);
323 ret
= mei_irq_write_handler(mei_dev
, &cmpl_list
);
325 dev_err(mei_dev
->dev
, "dispatch write request failed: %d\n", ret
);
327 mei_dev
->hbuf_is_ready
= mei_hbuf_is_ready(mei_dev
);
328 mei_irq_compl_handler(mei_dev
, &cmpl_list
);
331 static int mei_vsc_probe(struct platform_device
*pdev
)
333 struct device
*dev
= &pdev
->dev
;
334 struct mei_device
*mei_dev
;
335 struct mei_vsc_hw
*hw
;
339 tp
= *(struct vsc_tp
**)dev_get_platdata(dev
);
341 return dev_err_probe(dev
, -ENODEV
, "no platform data\n");
343 mei_dev
= devm_kzalloc(dev
, size_add(sizeof(*mei_dev
), sizeof(*hw
)),
348 mei_device_init(mei_dev
, dev
, false, &mei_vsc_hw_ops
);
349 mei_dev
->fw_f_fw_ver_supported
= 0;
350 mei_dev
->kind
= "ivsc";
352 hw
= mei_dev_to_vsc_hw(mei_dev
);
353 atomic_set(&hw
->write_lock_cnt
, 0);
356 platform_set_drvdata(pdev
, mei_dev
);
358 vsc_tp_register_event_cb(tp
, mei_vsc_event_cb
, mei_dev
);
360 ret
= mei_start(mei_dev
);
362 dev_err_probe(dev
, ret
, "init hw failed\n");
366 ret
= mei_register(mei_dev
, dev
);
370 pm_runtime_enable(mei_dev
->dev
);
378 mei_cancel_work(mei_dev
);
380 mei_disable_interrupts(mei_dev
);
385 static void mei_vsc_remove(struct platform_device
*pdev
)
387 struct mei_device
*mei_dev
= platform_get_drvdata(pdev
);
389 pm_runtime_disable(mei_dev
->dev
);
393 mei_disable_interrupts(mei_dev
);
395 mei_deregister(mei_dev
);
398 static int mei_vsc_suspend(struct device
*dev
)
400 struct mei_device
*mei_dev
;
403 mei_dev
= dev_get_drvdata(dev
);
407 mutex_lock(&mei_dev
->device_lock
);
409 if (!mei_write_is_idle(mei_dev
))
412 mutex_unlock(&mei_dev
->device_lock
);
417 static int mei_vsc_resume(struct device
*dev
)
419 struct mei_device
*mei_dev
;
421 mei_dev
= dev_get_drvdata(dev
);
428 static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops
, mei_vsc_suspend
, mei_vsc_resume
);
430 static const struct platform_device_id mei_vsc_id_table
[] = {
431 { MEI_VSC_DRV_NAME
},
434 MODULE_DEVICE_TABLE(platform
, mei_vsc_id_table
);
436 static struct platform_driver mei_vsc_drv
= {
437 .probe
= mei_vsc_probe
,
438 .remove
= mei_vsc_remove
,
439 .id_table
= mei_vsc_id_table
,
441 .name
= MEI_VSC_DRV_NAME
,
442 .pm
= &mei_vsc_pm_ops
,
443 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
446 module_platform_driver(mei_vsc_drv
);
448 MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
449 MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
450 MODULE_DESCRIPTION("Intel Visual Sensing Controller Interface");
451 MODULE_LICENSE("GPL");
452 MODULE_IMPORT_NS("VSC_TP");