1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
7 #include <linux/export.h>
8 #include <linux/kthread.h>
9 #include <linux/interrupt.h>
11 #include <linux/jiffies.h>
12 #include <linux/slab.h>
13 #include <linux/pm_runtime.h>
15 #include <linux/mei.h>
23 * mei_irq_compl_handler - dispatch complete handlers
24 * for the completed callbacks
27 * @cmpl_list: list of completed cbs
29 void mei_irq_compl_handler(struct mei_device
*dev
, struct list_head
*cmpl_list
)
31 struct mei_cl_cb
*cb
, *next
;
34 list_for_each_entry_safe(cb
, next
, cmpl_list
, list
) {
36 list_del_init(&cb
->list
);
38 dev_dbg(dev
->dev
, "completing call back.\n");
39 mei_cl_complete(cl
, cb
);
42 EXPORT_SYMBOL_GPL(mei_irq_compl_handler
);
45 * mei_cl_hbm_equal - check if hbm is addressed to the client
48 * @mei_hdr: header of mei client message
50 * Return: true if matches, false otherwise
52 static inline int mei_cl_hbm_equal(struct mei_cl
*cl
,
53 struct mei_msg_hdr
*mei_hdr
)
55 return mei_cl_host_addr(cl
) == mei_hdr
->host_addr
&&
56 mei_cl_me_id(cl
) == mei_hdr
->me_addr
;
60 * mei_irq_discard_msg - discard received message
63 * @hdr: message header
65 static void mei_irq_discard_msg(struct mei_device
*dev
, struct mei_msg_hdr
*hdr
)
68 mei_dma_ring_read(dev
, NULL
, hdr
->extension
[0]);
70 * no need to check for size as it is guarantied
71 * that length fits into rd_msg_buf
73 mei_read_slots(dev
, dev
->rd_msg_buf
, hdr
->length
);
74 dev_dbg(dev
->dev
, "discarding message " MEI_HDR_FMT
"\n",
79 * mei_cl_irq_read_msg - process client message
82 * @mei_hdr: header of mei client message
83 * @cmpl_list: completion list
87 static int mei_cl_irq_read_msg(struct mei_cl
*cl
,
88 struct mei_msg_hdr
*mei_hdr
,
89 struct list_head
*cmpl_list
)
91 struct mei_device
*dev
= cl
->dev
;
96 cb
= list_first_entry_or_null(&cl
->rd_pending
, struct mei_cl_cb
, list
);
98 if (!mei_cl_is_fixed_address(cl
)) {
99 cl_err(dev
, cl
, "pending read cb not found\n");
102 cb
= mei_cl_alloc_cb(cl
, mei_cl_mtu(cl
), MEI_FOP_READ
, cl
->fp
);
105 list_add_tail(&cb
->list
, &cl
->rd_pending
);
108 if (!mei_cl_is_connected(cl
)) {
109 cl_dbg(dev
, cl
, "not connected\n");
110 cb
->status
= -ENODEV
;
114 length
= mei_hdr
->dma_ring
? mei_hdr
->extension
[0] : mei_hdr
->length
;
116 buf_sz
= length
+ cb
->buf_idx
;
117 /* catch for integer overflow */
118 if (buf_sz
< cb
->buf_idx
) {
119 cl_err(dev
, cl
, "message is too big len %d idx %zu\n",
120 length
, cb
->buf_idx
);
121 cb
->status
= -EMSGSIZE
;
125 if (cb
->buf
.size
< buf_sz
) {
126 cl_dbg(dev
, cl
, "message overflow. size %zu len %d idx %zu\n",
127 cb
->buf
.size
, length
, cb
->buf_idx
);
128 cb
->status
= -EMSGSIZE
;
132 if (mei_hdr
->dma_ring
)
133 mei_dma_ring_read(dev
, cb
->buf
.data
+ cb
->buf_idx
, length
);
135 /* for DMA read 0 length to generate an interrupt to the device */
136 mei_read_slots(dev
, cb
->buf
.data
+ cb
->buf_idx
, mei_hdr
->length
);
138 cb
->buf_idx
+= length
;
140 if (mei_hdr
->msg_complete
) {
141 cl_dbg(dev
, cl
, "completed read length = %zu\n", cb
->buf_idx
);
142 list_move_tail(&cb
->list
, cmpl_list
);
144 pm_runtime_mark_last_busy(dev
->dev
);
145 pm_request_autosuspend(dev
->dev
);
152 list_move_tail(&cb
->list
, cmpl_list
);
153 mei_irq_discard_msg(dev
, mei_hdr
);
158 * mei_cl_irq_disconnect_rsp - send disconnection response message
161 * @cb: callback block.
162 * @cmpl_list: complete list.
164 * Return: 0, OK; otherwise, error.
166 static int mei_cl_irq_disconnect_rsp(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
167 struct list_head
*cmpl_list
)
169 struct mei_device
*dev
= cl
->dev
;
174 msg_slots
= mei_hbm2slots(sizeof(struct hbm_client_connect_response
));
175 slots
= mei_hbuf_empty_slots(dev
);
179 if ((u32
)slots
< msg_slots
)
182 ret
= mei_hbm_cl_disconnect_rsp(dev
, cl
);
183 list_move_tail(&cb
->list
, cmpl_list
);
189 * mei_cl_irq_read - processes client read related operation from the
190 * interrupt thread context - request for flow control credits
193 * @cb: callback block.
194 * @cmpl_list: complete list.
196 * Return: 0, OK; otherwise, error.
198 static int mei_cl_irq_read(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
199 struct list_head
*cmpl_list
)
201 struct mei_device
*dev
= cl
->dev
;
206 if (!list_empty(&cl
->rd_pending
))
209 msg_slots
= mei_hbm2slots(sizeof(struct hbm_flow_control
));
210 slots
= mei_hbuf_empty_slots(dev
);
214 if ((u32
)slots
< msg_slots
)
217 ret
= mei_hbm_cl_flow_control_req(dev
, cl
);
221 list_move_tail(&cb
->list
, cmpl_list
);
225 list_move_tail(&cb
->list
, &cl
->rd_pending
);
230 static inline bool hdr_is_hbm(struct mei_msg_hdr
*mei_hdr
)
232 return mei_hdr
->host_addr
== 0 && mei_hdr
->me_addr
== 0;
235 static inline bool hdr_is_fixed(struct mei_msg_hdr
*mei_hdr
)
237 return mei_hdr
->host_addr
== 0 && mei_hdr
->me_addr
!= 0;
240 static inline int hdr_is_valid(u32 msg_hdr
)
242 struct mei_msg_hdr
*mei_hdr
;
244 mei_hdr
= (struct mei_msg_hdr
*)&msg_hdr
;
245 if (!msg_hdr
|| mei_hdr
->reserved
)
248 if (mei_hdr
->dma_ring
&& mei_hdr
->length
!= MEI_SLOT_SIZE
)
255 * mei_irq_read_handler - bottom half read routine after ISR to
256 * handle the read processing.
258 * @dev: the device structure
259 * @cmpl_list: An instance of our list structure
260 * @slots: slots to read.
262 * Return: 0 on success, <0 on failure.
264 int mei_irq_read_handler(struct mei_device
*dev
,
265 struct list_head
*cmpl_list
, s32
*slots
)
267 struct mei_msg_hdr
*mei_hdr
;
271 if (!dev
->rd_msg_hdr
[0]) {
272 dev
->rd_msg_hdr
[0] = mei_read_hdr(dev
);
274 dev_dbg(dev
->dev
, "slots =%08x.\n", *slots
);
276 ret
= hdr_is_valid(dev
->rd_msg_hdr
[0]);
278 dev_err(dev
->dev
, "corrupted message header 0x%08X\n",
284 mei_hdr
= (struct mei_msg_hdr
*)dev
->rd_msg_hdr
;
285 dev_dbg(dev
->dev
, MEI_HDR_FMT
, MEI_HDR_PRM(mei_hdr
));
287 if (mei_slots2data(*slots
) < mei_hdr
->length
) {
288 dev_err(dev
->dev
, "less data available than length=%08x.\n",
290 /* we can't read the message */
295 if (mei_hdr
->dma_ring
) {
296 dev
->rd_msg_hdr
[1] = mei_read_hdr(dev
);
302 if (hdr_is_hbm(mei_hdr
)) {
303 ret
= mei_hbm_dispatch(dev
, mei_hdr
);
305 dev_dbg(dev
->dev
, "mei_hbm_dispatch failed ret = %d\n",
312 /* find recipient cl */
313 list_for_each_entry(cl
, &dev
->file_list
, link
) {
314 if (mei_cl_hbm_equal(cl
, mei_hdr
)) {
315 cl_dbg(dev
, cl
, "got a message\n");
320 /* if no recipient cl was found we assume corrupted header */
321 if (&cl
->link
== &dev
->file_list
) {
322 /* A message for not connected fixed address clients
323 * should be silently discarded
324 * On power down client may be force cleaned,
325 * silently discard such messages
327 if (hdr_is_fixed(mei_hdr
) ||
328 dev
->dev_state
== MEI_DEV_POWER_DOWN
) {
329 mei_irq_discard_msg(dev
, mei_hdr
);
333 dev_err(dev
->dev
, "no destination client found 0x%08X\n",
339 ret
= mei_cl_irq_read_msg(cl
, mei_hdr
, cmpl_list
);
343 /* reset the number of slots and header */
344 memset(dev
->rd_msg_hdr
, 0, sizeof(dev
->rd_msg_hdr
));
345 *slots
= mei_count_full_read_slots(dev
);
346 if (*slots
== -EOVERFLOW
) {
347 /* overflow - reset */
348 dev_err(dev
->dev
, "resetting due to slots overflow.\n");
349 /* set the event since message has been read */
356 EXPORT_SYMBOL_GPL(mei_irq_read_handler
);
360 * mei_irq_write_handler - dispatch write requests
363 * @dev: the device structure
364 * @cmpl_list: An instance of our list structure
366 * Return: 0 on success, <0 on failure.
368 int mei_irq_write_handler(struct mei_device
*dev
, struct list_head
*cmpl_list
)
372 struct mei_cl_cb
*cb
, *next
;
377 if (!mei_hbuf_acquire(dev
))
380 slots
= mei_hbuf_empty_slots(dev
);
387 /* complete all waiting for write CB */
388 dev_dbg(dev
->dev
, "complete all waiting for write cb.\n");
390 list_for_each_entry_safe(cb
, next
, &dev
->write_waiting_list
, list
) {
394 cl_dbg(dev
, cl
, "MEI WRITE COMPLETE\n");
395 cl
->writing_state
= MEI_WRITE_COMPLETE
;
396 list_move_tail(&cb
->list
, cmpl_list
);
399 /* complete control write list CB */
400 dev_dbg(dev
->dev
, "complete control write list cb.\n");
401 list_for_each_entry_safe(cb
, next
, &dev
->ctrl_wr_list
, list
) {
403 switch (cb
->fop_type
) {
404 case MEI_FOP_DISCONNECT
:
405 /* send disconnect message */
406 ret
= mei_cl_irq_disconnect(cl
, cb
, cmpl_list
);
412 /* send flow control message */
413 ret
= mei_cl_irq_read(cl
, cb
, cmpl_list
);
418 case MEI_FOP_CONNECT
:
419 /* connect message */
420 ret
= mei_cl_irq_connect(cl
, cb
, cmpl_list
);
425 case MEI_FOP_DISCONNECT_RSP
:
426 /* send disconnect resp */
427 ret
= mei_cl_irq_disconnect_rsp(cl
, cb
, cmpl_list
);
432 case MEI_FOP_NOTIFY_START
:
433 case MEI_FOP_NOTIFY_STOP
:
434 ret
= mei_cl_irq_notify(cl
, cb
, cmpl_list
);
443 /* complete write list CB */
444 dev_dbg(dev
->dev
, "complete write list cb.\n");
445 list_for_each_entry_safe(cb
, next
, &dev
->write_list
, list
) {
447 ret
= mei_cl_irq_write(cl
, cb
, cmpl_list
);
453 EXPORT_SYMBOL_GPL(mei_irq_write_handler
);
457 * mei_connect_timeout - connect/disconnect timeouts
461 static void mei_connect_timeout(struct mei_cl
*cl
)
463 struct mei_device
*dev
= cl
->dev
;
465 if (cl
->state
== MEI_FILE_CONNECTING
) {
466 if (dev
->hbm_f_dot_supported
) {
467 cl
->state
= MEI_FILE_DISCONNECT_REQUIRED
;
475 #define MEI_STALL_TIMER_FREQ (2 * HZ)
477 * mei_schedule_stall_timer - re-arm stall_timer work
479 * Schedule stall timer
481 * @dev: the device structure
483 void mei_schedule_stall_timer(struct mei_device
*dev
)
485 schedule_delayed_work(&dev
->timer_work
, MEI_STALL_TIMER_FREQ
);
489 * mei_timer - timer function.
491 * @work: pointer to the work_struct structure
494 void mei_timer(struct work_struct
*work
)
497 struct mei_device
*dev
= container_of(work
,
498 struct mei_device
, timer_work
.work
);
499 bool reschedule_timer
= false;
501 mutex_lock(&dev
->device_lock
);
503 /* Catch interrupt stalls during HBM init handshake */
504 if (dev
->dev_state
== MEI_DEV_INIT_CLIENTS
&&
505 dev
->hbm_state
!= MEI_HBM_IDLE
) {
507 if (dev
->init_clients_timer
) {
508 if (--dev
->init_clients_timer
== 0) {
509 dev_err(dev
->dev
, "timer: init clients timeout hbm_state = %d.\n",
514 reschedule_timer
= true;
518 if (dev
->dev_state
!= MEI_DEV_ENABLED
)
521 /*** connect/disconnect timeouts ***/
522 list_for_each_entry(cl
, &dev
->file_list
, link
) {
523 if (cl
->timer_count
) {
524 if (--cl
->timer_count
== 0) {
525 dev_err(dev
->dev
, "timer: connect/disconnect timeout.\n");
526 mei_connect_timeout(cl
);
529 reschedule_timer
= true;
534 if (dev
->dev_state
!= MEI_DEV_DISABLED
&& reschedule_timer
)
535 mei_schedule_stall_timer(dev
);
537 mutex_unlock(&dev
->device_lock
);