xsk: Add overflow check for u64 division, stored into u32
[linux/fpc-iii.git] / drivers / misc / mei / interrupt.c
blob5a661cbdf2aefa7c8ad5a671cbcd96ff042017d1
1 /*
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
18 #include <linux/export.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/fs.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/mei.h>
28 #include "mei_dev.h"
29 #include "hbm.h"
30 #include "client.h"
33 /**
34 * mei_irq_compl_handler - dispatch complete handlers
35 * for the completed callbacks
37 * @dev: mei device
38 * @cmpl_list: list of completed cbs
40 void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
42 struct mei_cl_cb *cb, *next;
43 struct mei_cl *cl;
45 list_for_each_entry_safe(cb, next, cmpl_list, list) {
46 cl = cb->cl;
47 list_del_init(&cb->list);
49 dev_dbg(dev->dev, "completing call back.\n");
50 mei_cl_complete(cl, cb);
53 EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
55 /**
56 * mei_cl_hbm_equal - check if hbm is addressed to the client
58 * @cl: host client
59 * @mei_hdr: header of mei client message
61 * Return: true if matches, false otherwise
63 static inline int mei_cl_hbm_equal(struct mei_cl *cl,
64 struct mei_msg_hdr *mei_hdr)
66 return mei_cl_host_addr(cl) == mei_hdr->host_addr &&
67 mei_cl_me_id(cl) == mei_hdr->me_addr;
70 /**
71 * mei_irq_discard_msg - discard received message
73 * @dev: mei device
74 * @hdr: message header
76 static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
79 * no need to check for size as it is guarantied
80 * that length fits into rd_msg_buf
82 mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
83 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
84 MEI_HDR_PRM(hdr));
87 /**
88 * mei_cl_irq_read_msg - process client message
90 * @cl: reading client
91 * @mei_hdr: header of mei client message
92 * @cmpl_list: completion list
94 * Return: always 0
96 static int mei_cl_irq_read_msg(struct mei_cl *cl,
97 struct mei_msg_hdr *mei_hdr,
98 struct list_head *cmpl_list)
100 struct mei_device *dev = cl->dev;
101 struct mei_cl_cb *cb;
102 size_t buf_sz;
104 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
105 if (!cb) {
106 if (!mei_cl_is_fixed_address(cl)) {
107 cl_err(dev, cl, "pending read cb not found\n");
108 goto discard;
110 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp);
111 if (!cb)
112 goto discard;
113 list_add_tail(&cb->list, &cl->rd_pending);
116 if (!mei_cl_is_connected(cl)) {
117 cl_dbg(dev, cl, "not connected\n");
118 cb->status = -ENODEV;
119 goto discard;
122 buf_sz = mei_hdr->length + cb->buf_idx;
123 /* catch for integer overflow */
124 if (buf_sz < cb->buf_idx) {
125 cl_err(dev, cl, "message is too big len %d idx %zu\n",
126 mei_hdr->length, cb->buf_idx);
127 cb->status = -EMSGSIZE;
128 goto discard;
131 if (cb->buf.size < buf_sz) {
132 cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
133 cb->buf.size, mei_hdr->length, cb->buf_idx);
134 cb->status = -EMSGSIZE;
135 goto discard;
138 mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length);
140 cb->buf_idx += mei_hdr->length;
142 if (mei_hdr->msg_complete) {
143 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
144 list_move_tail(&cb->list, cmpl_list);
145 } else {
146 pm_runtime_mark_last_busy(dev->dev);
147 pm_request_autosuspend(dev->dev);
150 return 0;
152 discard:
153 if (cb)
154 list_move_tail(&cb->list, cmpl_list);
155 mei_irq_discard_msg(dev, mei_hdr);
156 return 0;
160 * mei_cl_irq_disconnect_rsp - send disconnection response message
162 * @cl: client
163 * @cb: callback block.
164 * @cmpl_list: complete list.
166 * Return: 0, OK; otherwise, error.
168 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
169 struct list_head *cmpl_list)
171 struct mei_device *dev = cl->dev;
172 u32 msg_slots;
173 int slots;
174 int ret;
176 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_response));
177 slots = mei_hbuf_empty_slots(dev);
178 if (slots < 0)
179 return -EOVERFLOW;
181 if ((u32)slots < msg_slots)
182 return -EMSGSIZE;
184 ret = mei_hbm_cl_disconnect_rsp(dev, cl);
185 list_move_tail(&cb->list, cmpl_list);
187 return ret;
191 * mei_cl_irq_read - processes client read related operation from the
192 * interrupt thread context - request for flow control credits
194 * @cl: client
195 * @cb: callback block.
196 * @cmpl_list: complete list.
198 * Return: 0, OK; otherwise, error.
200 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
201 struct list_head *cmpl_list)
203 struct mei_device *dev = cl->dev;
204 u32 msg_slots;
205 int slots;
206 int ret;
208 if (!list_empty(&cl->rd_pending))
209 return 0;
211 msg_slots = mei_hbm2slots(sizeof(struct hbm_flow_control));
212 slots = mei_hbuf_empty_slots(dev);
213 if (slots < 0)
214 return -EOVERFLOW;
216 if ((u32)slots < msg_slots)
217 return -EMSGSIZE;
219 ret = mei_hbm_cl_flow_control_req(dev, cl);
220 if (ret) {
221 cl->status = ret;
222 cb->buf_idx = 0;
223 list_move_tail(&cb->list, cmpl_list);
224 return ret;
227 list_move_tail(&cb->list, &cl->rd_pending);
229 return 0;
232 static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
234 return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
237 static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
239 return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
242 static inline int hdr_is_valid(u32 msg_hdr)
244 struct mei_msg_hdr *mei_hdr;
246 mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
247 if (!msg_hdr || mei_hdr->reserved)
248 return -EBADMSG;
250 return 0;
254 * mei_irq_read_handler - bottom half read routine after ISR to
255 * handle the read processing.
257 * @dev: the device structure
258 * @cmpl_list: An instance of our list structure
259 * @slots: slots to read.
261 * Return: 0 on success, <0 on failure.
263 int mei_irq_read_handler(struct mei_device *dev,
264 struct list_head *cmpl_list, s32 *slots)
266 struct mei_msg_hdr *mei_hdr;
267 struct mei_cl *cl;
268 int ret;
270 if (!dev->rd_msg_hdr) {
271 dev->rd_msg_hdr = mei_read_hdr(dev);
272 (*slots)--;
273 dev_dbg(dev->dev, "slots =%08x.\n", *slots);
275 ret = hdr_is_valid(dev->rd_msg_hdr);
276 if (ret) {
277 dev_err(dev->dev, "corrupted message header 0x%08X\n",
278 dev->rd_msg_hdr);
279 goto end;
283 mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr;
284 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
286 if (mei_slots2data(*slots) < mei_hdr->length) {
287 dev_err(dev->dev, "less data available than length=%08x.\n",
288 *slots);
289 /* we can't read the message */
290 ret = -ENODATA;
291 goto end;
294 /* HBM message */
295 if (hdr_is_hbm(mei_hdr)) {
296 ret = mei_hbm_dispatch(dev, mei_hdr);
297 if (ret) {
298 dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
299 ret);
300 goto end;
302 goto reset_slots;
305 /* find recipient cl */
306 list_for_each_entry(cl, &dev->file_list, link) {
307 if (mei_cl_hbm_equal(cl, mei_hdr)) {
308 cl_dbg(dev, cl, "got a message\n");
309 break;
313 /* if no recipient cl was found we assume corrupted header */
314 if (&cl->link == &dev->file_list) {
315 /* A message for not connected fixed address clients
316 * should be silently discarded
317 * On power down client may be force cleaned,
318 * silently discard such messages
320 if (hdr_is_fixed(mei_hdr) ||
321 dev->dev_state == MEI_DEV_POWER_DOWN) {
322 mei_irq_discard_msg(dev, mei_hdr);
323 ret = 0;
324 goto reset_slots;
326 dev_err(dev->dev, "no destination client found 0x%08X\n",
327 dev->rd_msg_hdr);
328 ret = -EBADMSG;
329 goto end;
332 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
335 reset_slots:
336 /* reset the number of slots and header */
337 *slots = mei_count_full_read_slots(dev);
338 dev->rd_msg_hdr = 0;
340 if (*slots == -EOVERFLOW) {
341 /* overflow - reset */
342 dev_err(dev->dev, "resetting due to slots overflow.\n");
343 /* set the event since message has been read */
344 ret = -ERANGE;
345 goto end;
347 end:
348 return ret;
350 EXPORT_SYMBOL_GPL(mei_irq_read_handler);
354 * mei_irq_write_handler - dispatch write requests
355 * after irq received
357 * @dev: the device structure
358 * @cmpl_list: An instance of our list structure
360 * Return: 0 on success, <0 on failure.
362 int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
365 struct mei_cl *cl;
366 struct mei_cl_cb *cb, *next;
367 s32 slots;
368 int ret;
371 if (!mei_hbuf_acquire(dev))
372 return 0;
374 slots = mei_hbuf_empty_slots(dev);
375 if (slots < 0)
376 return -EOVERFLOW;
378 if (slots == 0)
379 return -EMSGSIZE;
381 /* complete all waiting for write CB */
382 dev_dbg(dev->dev, "complete all waiting for write cb.\n");
384 list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
385 cl = cb->cl;
387 cl->status = 0;
388 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
389 cl->writing_state = MEI_WRITE_COMPLETE;
390 list_move_tail(&cb->list, cmpl_list);
393 /* complete control write list CB */
394 dev_dbg(dev->dev, "complete control write list cb.\n");
395 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
396 cl = cb->cl;
397 switch (cb->fop_type) {
398 case MEI_FOP_DISCONNECT:
399 /* send disconnect message */
400 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
401 if (ret)
402 return ret;
404 break;
405 case MEI_FOP_READ:
406 /* send flow control message */
407 ret = mei_cl_irq_read(cl, cb, cmpl_list);
408 if (ret)
409 return ret;
411 break;
412 case MEI_FOP_CONNECT:
413 /* connect message */
414 ret = mei_cl_irq_connect(cl, cb, cmpl_list);
415 if (ret)
416 return ret;
418 break;
419 case MEI_FOP_DISCONNECT_RSP:
420 /* send disconnect resp */
421 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
422 if (ret)
423 return ret;
424 break;
426 case MEI_FOP_NOTIFY_START:
427 case MEI_FOP_NOTIFY_STOP:
428 ret = mei_cl_irq_notify(cl, cb, cmpl_list);
429 if (ret)
430 return ret;
431 break;
432 default:
433 BUG();
437 /* complete write list CB */
438 dev_dbg(dev->dev, "complete write list cb.\n");
439 list_for_each_entry_safe(cb, next, &dev->write_list, list) {
440 cl = cb->cl;
441 ret = mei_cl_irq_write(cl, cb, cmpl_list);
442 if (ret)
443 return ret;
445 return 0;
447 EXPORT_SYMBOL_GPL(mei_irq_write_handler);
451 * mei_connect_timeout - connect/disconnect timeouts
453 * @cl: host client
455 static void mei_connect_timeout(struct mei_cl *cl)
457 struct mei_device *dev = cl->dev;
459 if (cl->state == MEI_FILE_CONNECTING) {
460 if (dev->hbm_f_dot_supported) {
461 cl->state = MEI_FILE_DISCONNECT_REQUIRED;
462 wake_up(&cl->wait);
463 return;
466 mei_reset(dev);
469 #define MEI_STALL_TIMER_FREQ (2 * HZ)
471 * mei_schedule_stall_timer - re-arm stall_timer work
473 * Schedule stall timer
475 * @dev: the device structure
477 void mei_schedule_stall_timer(struct mei_device *dev)
479 schedule_delayed_work(&dev->timer_work, MEI_STALL_TIMER_FREQ);
483 * mei_timer - timer function.
485 * @work: pointer to the work_struct structure
488 void mei_timer(struct work_struct *work)
490 struct mei_cl *cl;
491 struct mei_device *dev = container_of(work,
492 struct mei_device, timer_work.work);
493 bool reschedule_timer = false;
495 mutex_lock(&dev->device_lock);
497 /* Catch interrupt stalls during HBM init handshake */
498 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
499 dev->hbm_state != MEI_HBM_IDLE) {
501 if (dev->init_clients_timer) {
502 if (--dev->init_clients_timer == 0) {
503 dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
504 dev->hbm_state);
505 mei_reset(dev);
506 goto out;
508 reschedule_timer = true;
512 if (dev->dev_state != MEI_DEV_ENABLED)
513 goto out;
515 /*** connect/disconnect timeouts ***/
516 list_for_each_entry(cl, &dev->file_list, link) {
517 if (cl->timer_count) {
518 if (--cl->timer_count == 0) {
519 dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
520 mei_connect_timeout(cl);
521 goto out;
523 reschedule_timer = true;
527 out:
528 if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer)
529 mei_schedule_stall_timer(dev);
531 mutex_unlock(&dev->device_lock);