3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
22 #include <linux/mei.h>
29 * mei_me_cl_by_uuid - locate index of me client
32 * returns me client index or -ENOENT if not found
34 int mei_me_cl_by_uuid(const struct mei_device
*dev
, const uuid_le
*uuid
)
38 for (i
= 0; i
< dev
->me_clients_num
; ++i
)
39 if (uuid_le_cmp(*uuid
,
40 dev
->me_clients
[i
].props
.protocol_name
) == 0) {
50 * mei_me_cl_by_id return index to me_clients for client_id
52 * @dev: the device structure
53 * @client_id: me client id
55 * Locking: called under "dev->device_lock" lock
57 * returns index on success, -ENOENT on failure.
60 int mei_me_cl_by_id(struct mei_device
*dev
, u8 client_id
)
63 for (i
= 0; i
< dev
->me_clients_num
; i
++)
64 if (dev
->me_clients
[i
].client_id
== client_id
)
66 if (WARN_ON(dev
->me_clients
[i
].client_id
!= client_id
))
69 if (i
== dev
->me_clients_num
)
77 * mei_cl_cmp_id - tells if the clients are the same
82 * returns true - if the clients has same host and me ids
85 static inline bool mei_cl_cmp_id(const struct mei_cl
*cl1
,
86 const struct mei_cl
*cl2
)
89 (cl1
->host_client_id
== cl2
->host_client_id
) &&
90 (cl1
->me_client_id
== cl2
->me_client_id
);
94 * mei_io_list_flush - removes cbs belonging to cl.
96 * @list: an instance of our list structure
97 * @cl: host client, can be NULL for flushing the whole list
98 * @free: whether to free the cbs
100 static void __mei_io_list_flush(struct mei_cl_cb
*list
,
101 struct mei_cl
*cl
, bool free
)
103 struct mei_cl_cb
*cb
;
104 struct mei_cl_cb
*next
;
106 /* enable removing everything if no cl is specified */
107 list_for_each_entry_safe(cb
, next
, &list
->list
, list
) {
108 if (!cl
|| (cb
->cl
&& mei_cl_cmp_id(cl
, cb
->cl
))) {
117 * mei_io_list_flush - removes list entry belonging to cl.
119 * @list: An instance of our list structure
122 static inline void mei_io_list_flush(struct mei_cl_cb
*list
, struct mei_cl
*cl
)
124 __mei_io_list_flush(list
, cl
, false);
129 * mei_io_list_free - removes cb belonging to cl and free them
131 * @list: An instance of our list structure
134 static inline void mei_io_list_free(struct mei_cl_cb
*list
, struct mei_cl
*cl
)
136 __mei_io_list_flush(list
, cl
, true);
140 * mei_io_cb_free - free mei_cb_private related memory
142 * @cb: mei callback struct
144 void mei_io_cb_free(struct mei_cl_cb
*cb
)
149 kfree(cb
->request_buffer
.data
);
150 kfree(cb
->response_buffer
.data
);
155 * mei_io_cb_init - allocate and initialize io callback
158 * @fp: pointer to file structure
160 * returns mei_cl_cb pointer or NULL;
162 struct mei_cl_cb
*mei_io_cb_init(struct mei_cl
*cl
, struct file
*fp
)
164 struct mei_cl_cb
*cb
;
166 cb
= kzalloc(sizeof(struct mei_cl_cb
), GFP_KERNEL
);
170 mei_io_list_init(cb
);
172 cb
->file_object
= fp
;
179 * mei_io_cb_alloc_req_buf - allocate request buffer
181 * @cb: io callback structure
182 * @length: size of the buffer
184 * returns 0 on success
185 * -EINVAL if cb is NULL
186 * -ENOMEM if allocation failed
188 int mei_io_cb_alloc_req_buf(struct mei_cl_cb
*cb
, size_t length
)
196 cb
->request_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
197 if (!cb
->request_buffer
.data
)
199 cb
->request_buffer
.size
= length
;
203 * mei_io_cb_alloc_resp_buf - allocate respose buffer
205 * @cb: io callback structure
206 * @length: size of the buffer
208 * returns 0 on success
209 * -EINVAL if cb is NULL
210 * -ENOMEM if allocation failed
212 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb
*cb
, size_t length
)
220 cb
->response_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
221 if (!cb
->response_buffer
.data
)
223 cb
->response_buffer
.size
= length
;
230 * mei_cl_flush_queues - flushes queue lists belonging to cl.
234 int mei_cl_flush_queues(struct mei_cl
*cl
)
236 if (WARN_ON(!cl
|| !cl
->dev
))
239 dev_dbg(&cl
->dev
->pdev
->dev
, "remove list entry belonging to cl\n");
240 mei_io_list_flush(&cl
->dev
->read_list
, cl
);
241 mei_io_list_free(&cl
->dev
->write_list
, cl
);
242 mei_io_list_free(&cl
->dev
->write_waiting_list
, cl
);
243 mei_io_list_flush(&cl
->dev
->ctrl_wr_list
, cl
);
244 mei_io_list_flush(&cl
->dev
->ctrl_rd_list
, cl
);
245 mei_io_list_flush(&cl
->dev
->amthif_cmd_list
, cl
);
246 mei_io_list_flush(&cl
->dev
->amthif_rd_complete_list
, cl
);
252 * mei_cl_init - initializes intialize cl.
254 * @cl: host client to be initialized
257 void mei_cl_init(struct mei_cl
*cl
, struct mei_device
*dev
)
259 memset(cl
, 0, sizeof(struct mei_cl
));
260 init_waitqueue_head(&cl
->wait
);
261 init_waitqueue_head(&cl
->rx_wait
);
262 init_waitqueue_head(&cl
->tx_wait
);
263 INIT_LIST_HEAD(&cl
->link
);
264 INIT_LIST_HEAD(&cl
->device_link
);
265 cl
->reading_state
= MEI_IDLE
;
266 cl
->writing_state
= MEI_IDLE
;
271 * mei_cl_allocate - allocates cl structure and sets it up.
274 * returns The allocated file or NULL on failure
276 struct mei_cl
*mei_cl_allocate(struct mei_device
*dev
)
280 cl
= kmalloc(sizeof(struct mei_cl
), GFP_KERNEL
);
284 mei_cl_init(cl
, dev
);
290 * mei_cl_find_read_cb - find this cl's callback in the read list
294 * returns cb on success, NULL on error
296 struct mei_cl_cb
*mei_cl_find_read_cb(struct mei_cl
*cl
)
298 struct mei_device
*dev
= cl
->dev
;
299 struct mei_cl_cb
*cb
= NULL
;
300 struct mei_cl_cb
*next
= NULL
;
302 list_for_each_entry_safe(cb
, next
, &dev
->read_list
.list
, list
)
303 if (mei_cl_cmp_id(cl
, cb
->cl
))
308 /** mei_cl_link: allocte host id in the host map
311 * @id - fixed host id or -1 for genereting one
313 * returns 0 on success
314 * -EINVAL on incorrect values
315 * -ENONET if client not found
317 int mei_cl_link(struct mei_cl
*cl
, int id
)
319 struct mei_device
*dev
;
321 if (WARN_ON(!cl
|| !cl
->dev
))
326 /* If Id is not asigned get one*/
327 if (id
== MEI_HOST_CLIENT_ID_ANY
)
328 id
= find_first_zero_bit(dev
->host_clients_map
,
331 if (id
>= MEI_CLIENTS_MAX
) {
332 dev_err(&dev
->pdev
->dev
, "id exceded %d", MEI_CLIENTS_MAX
) ;
336 dev
->open_handle_count
++;
338 cl
->host_client_id
= id
;
339 list_add_tail(&cl
->link
, &dev
->file_list
);
341 set_bit(id
, dev
->host_clients_map
);
343 cl
->state
= MEI_FILE_INITIALIZING
;
345 dev_dbg(&dev
->pdev
->dev
, "link cl host id = %d\n", cl
->host_client_id
);
350 * mei_cl_unlink - remove me_cl from the list
354 int mei_cl_unlink(struct mei_cl
*cl
)
356 struct mei_device
*dev
;
357 struct mei_cl
*pos
, *next
;
359 /* don't shout on error exit path */
363 /* wd and amthif might not be initialized */
369 list_for_each_entry_safe(pos
, next
, &dev
->file_list
, link
) {
370 if (cl
->host_client_id
== pos
->host_client_id
) {
371 dev_dbg(&dev
->pdev
->dev
, "remove host client = %d, ME client = %d\n",
372 pos
->host_client_id
, pos
->me_client_id
);
373 list_del_init(&pos
->link
);
381 void mei_host_client_init(struct work_struct
*work
)
383 struct mei_device
*dev
= container_of(work
,
384 struct mei_device
, init_work
);
385 struct mei_client_properties
*client_props
;
388 mutex_lock(&dev
->device_lock
);
390 bitmap_zero(dev
->host_clients_map
, MEI_CLIENTS_MAX
);
391 dev
->open_handle_count
= 0;
394 * Reserving the first three client IDs
395 * 0: Reserved for MEI Bus Message communications
396 * 1: Reserved for Watchdog
397 * 2: Reserved for AMTHI
399 bitmap_set(dev
->host_clients_map
, 0, 3);
401 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
402 client_props
= &dev
->me_clients
[i
].props
;
404 if (!uuid_le_cmp(client_props
->protocol_name
, mei_amthif_guid
))
405 mei_amthif_host_init(dev
);
406 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_wd_guid
))
407 mei_wd_host_init(dev
);
408 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_nfc_guid
))
409 mei_nfc_host_init(dev
);
413 dev
->dev_state
= MEI_DEV_ENABLED
;
415 mutex_unlock(&dev
->device_lock
);
420 * mei_cl_disconnect - disconnect host clinet form the me one
424 * Locking: called under "dev->device_lock" lock
426 * returns 0 on success, <0 on failure.
428 int mei_cl_disconnect(struct mei_cl
*cl
)
430 struct mei_device
*dev
;
431 struct mei_cl_cb
*cb
;
434 if (WARN_ON(!cl
|| !cl
->dev
))
439 if (cl
->state
!= MEI_FILE_DISCONNECTING
)
442 cb
= mei_io_cb_init(cl
, NULL
);
446 cb
->fop_type
= MEI_FOP_CLOSE
;
447 if (dev
->hbuf_is_ready
) {
448 dev
->hbuf_is_ready
= false;
449 if (mei_hbm_cl_disconnect_req(dev
, cl
)) {
451 dev_err(&dev
->pdev
->dev
, "failed to disconnect.\n");
454 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
455 mdelay(10); /* Wait for hardware disconnection ready */
456 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
458 dev_dbg(&dev
->pdev
->dev
, "add disconnect cb to control write list\n");
459 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
462 mutex_unlock(&dev
->device_lock
);
464 err
= wait_event_timeout(dev
->wait_recvd_msg
,
465 MEI_FILE_DISCONNECTED
== cl
->state
,
466 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
468 mutex_lock(&dev
->device_lock
);
469 if (MEI_FILE_DISCONNECTED
== cl
->state
) {
471 dev_dbg(&dev
->pdev
->dev
, "successfully disconnected from FW client.\n");
474 if (MEI_FILE_DISCONNECTED
!= cl
->state
)
475 dev_dbg(&dev
->pdev
->dev
, "wrong status client disconnect.\n");
478 dev_dbg(&dev
->pdev
->dev
,
479 "wait failed disconnect err=%08x\n",
482 dev_dbg(&dev
->pdev
->dev
, "failed to disconnect from FW client.\n");
485 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
486 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
494 * mei_cl_is_other_connecting - checks if other
495 * client with the same me client id is connecting
497 * @cl: private data of the file object
499 * returns ture if other client is connected, 0 - otherwise.
501 bool mei_cl_is_other_connecting(struct mei_cl
*cl
)
503 struct mei_device
*dev
;
507 if (WARN_ON(!cl
|| !cl
->dev
))
512 list_for_each_entry_safe(pos
, next
, &dev
->file_list
, link
) {
513 if ((pos
->state
== MEI_FILE_CONNECTING
) &&
514 (pos
!= cl
) && cl
->me_client_id
== pos
->me_client_id
)
523 * mei_cl_connect - connect host clinet to the me one
527 * Locking: called under "dev->device_lock" lock
529 * returns 0 on success, <0 on failure.
531 int mei_cl_connect(struct mei_cl
*cl
, struct file
*file
)
533 struct mei_device
*dev
;
534 struct mei_cl_cb
*cb
;
537 if (WARN_ON(!cl
|| !cl
->dev
))
542 cb
= mei_io_cb_init(cl
, file
);
548 cb
->fop_type
= MEI_FOP_IOCTL
;
550 if (dev
->hbuf_is_ready
&& !mei_cl_is_other_connecting(cl
)) {
551 dev
->hbuf_is_ready
= false;
553 if (mei_hbm_cl_connect_req(dev
, cl
)) {
557 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
558 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
560 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
563 mutex_unlock(&dev
->device_lock
);
564 rets
= wait_event_timeout(dev
->wait_recvd_msg
,
565 (cl
->state
== MEI_FILE_CONNECTED
||
566 cl
->state
== MEI_FILE_DISCONNECTED
),
567 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
568 mutex_lock(&dev
->device_lock
);
570 if (cl
->state
!= MEI_FILE_CONNECTED
) {
573 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
574 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
586 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
588 * @cl: private data of the file object
590 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
591 * -ENOENT if mei_cl is not present
592 * -EINVAL if single_recv_buf == 0
594 int mei_cl_flow_ctrl_creds(struct mei_cl
*cl
)
596 struct mei_device
*dev
;
599 if (WARN_ON(!cl
|| !cl
->dev
))
604 if (!dev
->me_clients_num
)
607 if (cl
->mei_flow_ctrl_creds
> 0)
610 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
611 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
612 if (me_cl
->client_id
== cl
->me_client_id
) {
613 if (me_cl
->mei_flow_ctrl_creds
) {
614 if (WARN_ON(me_cl
->props
.single_recv_buf
== 0))
626 * mei_cl_flow_ctrl_reduce - reduces flow_control.
628 * @cl: private data of the file object
632 * -ENOENT when me client is not found
633 * -EINVAL when ctrl credits are <= 0
635 int mei_cl_flow_ctrl_reduce(struct mei_cl
*cl
)
637 struct mei_device
*dev
;
640 if (WARN_ON(!cl
|| !cl
->dev
))
645 if (!dev
->me_clients_num
)
648 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
649 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
650 if (me_cl
->client_id
== cl
->me_client_id
) {
651 if (me_cl
->props
.single_recv_buf
!= 0) {
652 if (WARN_ON(me_cl
->mei_flow_ctrl_creds
<= 0))
654 dev
->me_clients
[i
].mei_flow_ctrl_creds
--;
656 if (WARN_ON(cl
->mei_flow_ctrl_creds
<= 0))
658 cl
->mei_flow_ctrl_creds
--;
667 * mei_cl_read_start - the start read client message function.
671 * returns 0 on success, <0 on failure.
673 int mei_cl_read_start(struct mei_cl
*cl
, size_t length
)
675 struct mei_device
*dev
;
676 struct mei_cl_cb
*cb
;
680 if (WARN_ON(!cl
|| !cl
->dev
))
685 if (!mei_cl_is_connected(cl
))
689 dev_dbg(&dev
->pdev
->dev
, "read is pending.\n");
692 i
= mei_me_cl_by_id(dev
, cl
->me_client_id
);
694 dev_err(&dev
->pdev
->dev
, "no such me client %d\n",
699 cb
= mei_io_cb_init(cl
, NULL
);
703 /* always allocate at least client max message */
704 length
= max_t(size_t, length
, dev
->me_clients
[i
].props
.max_msg_length
);
705 rets
= mei_io_cb_alloc_resp_buf(cb
, length
);
709 cb
->fop_type
= MEI_FOP_READ
;
710 if (dev
->hbuf_is_ready
) {
711 dev
->hbuf_is_ready
= false;
712 if (mei_hbm_cl_flow_control_req(dev
, cl
)) {
716 list_add_tail(&cb
->list
, &dev
->read_list
.list
);
718 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
730 * mei_cl_irq_write_complete - write a message to device
731 * from the interrupt thread context
734 * @cb: callback block.
735 * @slots: free slots.
736 * @cmpl_list: complete list.
738 * returns 0, OK; otherwise error.
740 int mei_cl_irq_write_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
741 s32
*slots
, struct mei_cl_cb
*cmpl_list
)
743 struct mei_device
*dev
= cl
->dev
;
744 struct mei_msg_hdr mei_hdr
;
745 size_t len
= cb
->request_buffer
.size
- cb
->buf_idx
;
746 u32 msg_slots
= mei_data2slots(len
);
748 mei_hdr
.host_addr
= cl
->host_client_id
;
749 mei_hdr
.me_addr
= cl
->me_client_id
;
750 mei_hdr
.reserved
= 0;
752 if (*slots
>= msg_slots
) {
753 mei_hdr
.length
= len
;
754 mei_hdr
.msg_complete
= 1;
755 /* Split the message only if we can write the whole host buffer */
756 } else if (*slots
== dev
->hbuf_depth
) {
758 len
= (*slots
* sizeof(u32
)) - sizeof(struct mei_msg_hdr
);
759 mei_hdr
.length
= len
;
760 mei_hdr
.msg_complete
= 0;
762 /* wait for next time the host buffer is empty */
766 dev_dbg(&dev
->pdev
->dev
, "buf: size = %d idx = %lu\n",
767 cb
->request_buffer
.size
, cb
->buf_idx
);
768 dev_dbg(&dev
->pdev
->dev
, MEI_HDR_FMT
, MEI_HDR_PRM(&mei_hdr
));
771 if (mei_write_message(dev
, &mei_hdr
,
772 cb
->request_buffer
.data
+ cb
->buf_idx
)) {
773 cl
->status
= -ENODEV
;
774 list_move_tail(&cb
->list
, &cmpl_list
->list
);
779 cl
->writing_state
= MEI_WRITING
;
780 cb
->buf_idx
+= mei_hdr
.length
;
782 if (mei_hdr
.msg_complete
) {
783 if (mei_cl_flow_ctrl_reduce(cl
))
785 list_move_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
792 * mei_cl_write - submit a write cb to mei device
793 assumes device_lock is locked
796 * @cl: write callback with filled data
798 * returns numbe of bytes sent on success, <0 on failure.
800 int mei_cl_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
, bool blocking
)
802 struct mei_device
*dev
;
803 struct mei_msg_data
*buf
;
804 struct mei_msg_hdr mei_hdr
;
808 if (WARN_ON(!cl
|| !cl
->dev
))
817 buf
= &cb
->request_buffer
;
819 dev_dbg(&dev
->pdev
->dev
, "mei_cl_write %d\n", buf
->size
);
822 cb
->fop_type
= MEI_FOP_WRITE
;
824 rets
= mei_cl_flow_ctrl_creds(cl
);
828 /* Host buffer is not ready, we queue the request */
829 if (rets
== 0 || !dev
->hbuf_is_ready
) {
831 /* unseting complete will enqueue the cb for write */
832 mei_hdr
.msg_complete
= 0;
837 dev
->hbuf_is_ready
= false;
839 /* Check for a maximum length */
840 if (buf
->size
> mei_hbuf_max_len(dev
)) {
841 mei_hdr
.length
= mei_hbuf_max_len(dev
);
842 mei_hdr
.msg_complete
= 0;
844 mei_hdr
.length
= buf
->size
;
845 mei_hdr
.msg_complete
= 1;
848 mei_hdr
.host_addr
= cl
->host_client_id
;
849 mei_hdr
.me_addr
= cl
->me_client_id
;
850 mei_hdr
.reserved
= 0;
852 dev_dbg(&dev
->pdev
->dev
, "write " MEI_HDR_FMT
"\n",
853 MEI_HDR_PRM(&mei_hdr
));
856 if (mei_write_message(dev
, &mei_hdr
, buf
->data
)) {
861 cl
->writing_state
= MEI_WRITING
;
862 cb
->buf_idx
= mei_hdr
.length
;
866 if (mei_hdr
.msg_complete
) {
867 if (mei_cl_flow_ctrl_reduce(cl
)) {
871 list_add_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
873 list_add_tail(&cb
->list
, &dev
->write_list
.list
);
877 if (blocking
&& cl
->writing_state
!= MEI_WRITE_COMPLETE
) {
879 mutex_unlock(&dev
->device_lock
);
880 if (wait_event_interruptible(cl
->tx_wait
,
881 cl
->writing_state
== MEI_WRITE_COMPLETE
)) {
882 if (signal_pending(current
))
887 mutex_lock(&dev
->device_lock
);
895 * mei_cl_complete - processes completed operation for a client
897 * @cl: private data of the file object.
898 * @cb: callback block.
900 void mei_cl_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
902 if (cb
->fop_type
== MEI_FOP_WRITE
) {
905 cl
->writing_state
= MEI_WRITE_COMPLETE
;
906 if (waitqueue_active(&cl
->tx_wait
))
907 wake_up_interruptible(&cl
->tx_wait
);
909 } else if (cb
->fop_type
== MEI_FOP_READ
&&
910 MEI_READING
== cl
->reading_state
) {
911 cl
->reading_state
= MEI_READ_COMPLETE
;
912 if (waitqueue_active(&cl
->rx_wait
))
913 wake_up_interruptible(&cl
->rx_wait
);
915 mei_cl_bus_rx_event(cl
);
922 * mei_cl_all_disconnect - disconnect forcefully all connected clients
927 void mei_cl_all_disconnect(struct mei_device
*dev
)
929 struct mei_cl
*cl
, *next
;
931 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
932 cl
->state
= MEI_FILE_DISCONNECTED
;
933 cl
->mei_flow_ctrl_creds
= 0;
940 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
944 void mei_cl_all_wakeup(struct mei_device
*dev
)
946 struct mei_cl
*cl
, *next
;
947 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
948 if (waitqueue_active(&cl
->rx_wait
)) {
949 dev_dbg(&dev
->pdev
->dev
, "Waking up reading client!\n");
950 wake_up_interruptible(&cl
->rx_wait
);
952 if (waitqueue_active(&cl
->tx_wait
)) {
953 dev_dbg(&dev
->pdev
->dev
, "Waking up writing client!\n");
954 wake_up_interruptible(&cl
->tx_wait
);
960 * mei_cl_all_write_clear - clear all pending writes
964 void mei_cl_all_write_clear(struct mei_device
*dev
)
966 mei_io_list_free(&dev
->write_list
, NULL
);
967 mei_io_list_free(&dev
->write_waiting_list
, NULL
);