3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/sched.h>
18 #include <linux/wait.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/pm_runtime.h>
23 #include <linux/mei.h>
30 * mei_me_cl_by_uuid - locate me client by uuid
33 * @uuid: me client uuid
35 * Locking: called under "dev->device_lock" lock
37 * Return: me client or NULL if not found
39 struct mei_me_client
*mei_me_cl_by_uuid(const struct mei_device
*dev
,
42 struct mei_me_client
*me_cl
;
44 list_for_each_entry(me_cl
, &dev
->me_clients
, list
)
45 if (uuid_le_cmp(*uuid
, me_cl
->props
.protocol_name
) == 0)
52 * mei_me_cl_by_id - locate me client by client id
54 * @dev: the device structure
55 * @client_id: me client id
57 * Locking: called under "dev->device_lock" lock
59 * Return: me client or NULL if not found
61 struct mei_me_client
*mei_me_cl_by_id(struct mei_device
*dev
, u8 client_id
)
64 struct mei_me_client
*me_cl
;
66 list_for_each_entry(me_cl
, &dev
->me_clients
, list
)
67 if (me_cl
->client_id
== client_id
)
73 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
75 * @dev: the device structure
76 * @uuid: me client uuid
77 * @client_id: me client id
79 * Locking: called under "dev->device_lock" lock
81 * Return: me client or NULL if not found
83 struct mei_me_client
*mei_me_cl_by_uuid_id(struct mei_device
*dev
,
84 const uuid_le
*uuid
, u8 client_id
)
86 struct mei_me_client
*me_cl
;
88 list_for_each_entry(me_cl
, &dev
->me_clients
, list
)
89 if (uuid_le_cmp(*uuid
, me_cl
->props
.protocol_name
) == 0 &&
90 me_cl
->client_id
== client_id
)
96 * mei_me_cl_remove - remove me client matching uuid and client_id
98 * @dev: the device structure
99 * @uuid: me client uuid
100 * @client_id: me client address
102 void mei_me_cl_remove(struct mei_device
*dev
, const uuid_le
*uuid
, u8 client_id
)
104 struct mei_me_client
*me_cl
, *next
;
106 list_for_each_entry_safe(me_cl
, next
, &dev
->me_clients
, list
) {
107 if (uuid_le_cmp(*uuid
, me_cl
->props
.protocol_name
) == 0 &&
108 me_cl
->client_id
== client_id
) {
109 list_del(&me_cl
->list
);
118 * mei_cl_cmp_id - tells if the clients are the same
120 * @cl1: host client 1
121 * @cl2: host client 2
123 * Return: true - if the clients has same host and me ids
126 static inline bool mei_cl_cmp_id(const struct mei_cl
*cl1
,
127 const struct mei_cl
*cl2
)
130 (cl1
->host_client_id
== cl2
->host_client_id
) &&
131 (cl1
->me_client_id
== cl2
->me_client_id
);
135 * mei_io_list_flush - removes cbs belonging to cl.
137 * @list: an instance of our list structure
138 * @cl: host client, can be NULL for flushing the whole list
139 * @free: whether to free the cbs
141 static void __mei_io_list_flush(struct mei_cl_cb
*list
,
142 struct mei_cl
*cl
, bool free
)
144 struct mei_cl_cb
*cb
;
145 struct mei_cl_cb
*next
;
147 /* enable removing everything if no cl is specified */
148 list_for_each_entry_safe(cb
, next
, &list
->list
, list
) {
149 if (!cl
|| mei_cl_cmp_id(cl
, cb
->cl
)) {
158 * mei_io_list_flush - removes list entry belonging to cl.
160 * @list: An instance of our list structure
163 void mei_io_list_flush(struct mei_cl_cb
*list
, struct mei_cl
*cl
)
165 __mei_io_list_flush(list
, cl
, false);
170 * mei_io_list_free - removes cb belonging to cl and free them
172 * @list: An instance of our list structure
175 static inline void mei_io_list_free(struct mei_cl_cb
*list
, struct mei_cl
*cl
)
177 __mei_io_list_flush(list
, cl
, true);
181 * mei_io_cb_free - free mei_cb_private related memory
183 * @cb: mei callback struct
185 void mei_io_cb_free(struct mei_cl_cb
*cb
)
190 kfree(cb
->request_buffer
.data
);
191 kfree(cb
->response_buffer
.data
);
196 * mei_io_cb_init - allocate and initialize io callback
199 * @fp: pointer to file structure
201 * Return: mei_cl_cb pointer or NULL;
203 struct mei_cl_cb
*mei_io_cb_init(struct mei_cl
*cl
, struct file
*fp
)
205 struct mei_cl_cb
*cb
;
207 cb
= kzalloc(sizeof(struct mei_cl_cb
), GFP_KERNEL
);
211 mei_io_list_init(cb
);
213 cb
->file_object
= fp
;
220 * mei_io_cb_alloc_req_buf - allocate request buffer
222 * @cb: io callback structure
223 * @length: size of the buffer
225 * Return: 0 on success
226 * -EINVAL if cb is NULL
227 * -ENOMEM if allocation failed
229 int mei_io_cb_alloc_req_buf(struct mei_cl_cb
*cb
, size_t length
)
237 cb
->request_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
238 if (!cb
->request_buffer
.data
)
240 cb
->request_buffer
.size
= length
;
244 * mei_io_cb_alloc_resp_buf - allocate response buffer
246 * @cb: io callback structure
247 * @length: size of the buffer
249 * Return: 0 on success
250 * -EINVAL if cb is NULL
251 * -ENOMEM if allocation failed
253 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb
*cb
, size_t length
)
261 cb
->response_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
262 if (!cb
->response_buffer
.data
)
264 cb
->response_buffer
.size
= length
;
271 * mei_cl_flush_queues - flushes queue lists belonging to cl.
275 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
277 int mei_cl_flush_queues(struct mei_cl
*cl
)
279 struct mei_device
*dev
;
281 if (WARN_ON(!cl
|| !cl
->dev
))
286 cl_dbg(dev
, cl
, "remove list entry belonging to cl\n");
287 mei_io_list_flush(&cl
->dev
->read_list
, cl
);
288 mei_io_list_free(&cl
->dev
->write_list
, cl
);
289 mei_io_list_free(&cl
->dev
->write_waiting_list
, cl
);
290 mei_io_list_flush(&cl
->dev
->ctrl_wr_list
, cl
);
291 mei_io_list_flush(&cl
->dev
->ctrl_rd_list
, cl
);
292 mei_io_list_flush(&cl
->dev
->amthif_cmd_list
, cl
);
293 mei_io_list_flush(&cl
->dev
->amthif_rd_complete_list
, cl
);
299 * mei_cl_init - initializes cl.
301 * @cl: host client to be initialized
304 void mei_cl_init(struct mei_cl
*cl
, struct mei_device
*dev
)
306 memset(cl
, 0, sizeof(struct mei_cl
));
307 init_waitqueue_head(&cl
->wait
);
308 init_waitqueue_head(&cl
->rx_wait
);
309 init_waitqueue_head(&cl
->tx_wait
);
310 INIT_LIST_HEAD(&cl
->link
);
311 INIT_LIST_HEAD(&cl
->device_link
);
312 cl
->reading_state
= MEI_IDLE
;
313 cl
->writing_state
= MEI_IDLE
;
318 * mei_cl_allocate - allocates cl structure and sets it up.
321 * Return: The allocated file or NULL on failure
323 struct mei_cl
*mei_cl_allocate(struct mei_device
*dev
)
327 cl
= kmalloc(sizeof(struct mei_cl
), GFP_KERNEL
);
331 mei_cl_init(cl
, dev
);
337 * mei_cl_find_read_cb - find this cl's callback in the read list
341 * Return: cb on success, NULL on error
343 struct mei_cl_cb
*mei_cl_find_read_cb(struct mei_cl
*cl
)
345 struct mei_device
*dev
= cl
->dev
;
346 struct mei_cl_cb
*cb
;
348 list_for_each_entry(cb
, &dev
->read_list
.list
, list
)
349 if (mei_cl_cmp_id(cl
, cb
->cl
))
354 /** mei_cl_link: allocate host id in the host map
357 * @id - fixed host id or -1 for generic one
359 * Return: 0 on success
360 * -EINVAL on incorrect values
361 * -ENONET if client not found
363 int mei_cl_link(struct mei_cl
*cl
, int id
)
365 struct mei_device
*dev
;
366 long open_handle_count
;
368 if (WARN_ON(!cl
|| !cl
->dev
))
373 /* If Id is not assigned get one*/
374 if (id
== MEI_HOST_CLIENT_ID_ANY
)
375 id
= find_first_zero_bit(dev
->host_clients_map
,
378 if (id
>= MEI_CLIENTS_MAX
) {
379 dev_err(dev
->dev
, "id exceeded %d", MEI_CLIENTS_MAX
);
383 open_handle_count
= dev
->open_handle_count
+ dev
->iamthif_open_count
;
384 if (open_handle_count
>= MEI_MAX_OPEN_HANDLE_COUNT
) {
385 dev_err(dev
->dev
, "open_handle_count exceeded %d",
386 MEI_MAX_OPEN_HANDLE_COUNT
);
390 dev
->open_handle_count
++;
392 cl
->host_client_id
= id
;
393 list_add_tail(&cl
->link
, &dev
->file_list
);
395 set_bit(id
, dev
->host_clients_map
);
397 cl
->state
= MEI_FILE_INITIALIZING
;
399 cl_dbg(dev
, cl
, "link cl\n");
404 * mei_cl_unlink - remove me_cl from the list
410 int mei_cl_unlink(struct mei_cl
*cl
)
412 struct mei_device
*dev
;
414 /* don't shout on error exit path */
418 /* wd and amthif might not be initialized */
424 cl_dbg(dev
, cl
, "unlink client");
426 if (dev
->open_handle_count
> 0)
427 dev
->open_handle_count
--;
429 /* never clear the 0 bit */
430 if (cl
->host_client_id
)
431 clear_bit(cl
->host_client_id
, dev
->host_clients_map
);
433 list_del_init(&cl
->link
);
435 cl
->state
= MEI_FILE_INITIALIZING
;
441 void mei_host_client_init(struct work_struct
*work
)
443 struct mei_device
*dev
= container_of(work
,
444 struct mei_device
, init_work
);
445 struct mei_me_client
*me_cl
;
446 struct mei_client_properties
*props
;
448 mutex_lock(&dev
->device_lock
);
450 list_for_each_entry(me_cl
, &dev
->me_clients
, list
) {
451 props
= &me_cl
->props
;
453 if (!uuid_le_cmp(props
->protocol_name
, mei_amthif_guid
))
454 mei_amthif_host_init(dev
);
455 else if (!uuid_le_cmp(props
->protocol_name
, mei_wd_guid
))
456 mei_wd_host_init(dev
);
457 else if (!uuid_le_cmp(props
->protocol_name
, mei_nfc_guid
))
458 mei_nfc_host_init(dev
);
462 dev
->dev_state
= MEI_DEV_ENABLED
;
463 dev
->reset_count
= 0;
465 mutex_unlock(&dev
->device_lock
);
467 pm_runtime_mark_last_busy(dev
->dev
);
468 dev_dbg(dev
->dev
, "rpm: autosuspend\n");
469 pm_runtime_autosuspend(dev
->dev
);
473 * mei_hbuf_acquire - try to acquire host buffer
475 * @dev: the device structure
476 * Return: true if host buffer was acquired
478 bool mei_hbuf_acquire(struct mei_device
*dev
)
480 if (mei_pg_state(dev
) == MEI_PG_ON
||
481 dev
->pg_event
== MEI_PG_EVENT_WAIT
) {
482 dev_dbg(dev
->dev
, "device is in pg\n");
486 if (!dev
->hbuf_is_ready
) {
487 dev_dbg(dev
->dev
, "hbuf is not ready\n");
491 dev
->hbuf_is_ready
= false;
497 * mei_cl_disconnect - disconnect host client from the me one
501 * Locking: called under "dev->device_lock" lock
503 * Return: 0 on success, <0 on failure.
505 int mei_cl_disconnect(struct mei_cl
*cl
)
507 struct mei_device
*dev
;
508 struct mei_cl_cb
*cb
;
511 if (WARN_ON(!cl
|| !cl
->dev
))
516 cl_dbg(dev
, cl
, "disconnecting");
518 if (cl
->state
!= MEI_FILE_DISCONNECTING
)
521 rets
= pm_runtime_get(dev
->dev
);
522 if (rets
< 0 && rets
!= -EINPROGRESS
) {
523 pm_runtime_put_noidle(dev
->dev
);
524 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
528 cb
= mei_io_cb_init(cl
, NULL
);
534 cb
->fop_type
= MEI_FOP_DISCONNECT
;
536 if (mei_hbuf_acquire(dev
)) {
537 if (mei_hbm_cl_disconnect_req(dev
, cl
)) {
539 cl_err(dev
, cl
, "failed to disconnect.\n");
542 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
543 mdelay(10); /* Wait for hardware disconnection ready */
544 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
546 cl_dbg(dev
, cl
, "add disconnect cb to control write list\n");
547 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
550 mutex_unlock(&dev
->device_lock
);
552 wait_event_timeout(cl
->wait
,
553 MEI_FILE_DISCONNECTED
== cl
->state
,
554 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
556 mutex_lock(&dev
->device_lock
);
558 if (MEI_FILE_DISCONNECTED
== cl
->state
) {
560 cl_dbg(dev
, cl
, "successfully disconnected from FW client.\n");
562 cl_dbg(dev
, cl
, "timeout on disconnect from FW client.\n");
566 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
567 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
569 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
570 pm_runtime_mark_last_busy(dev
->dev
);
571 pm_runtime_put_autosuspend(dev
->dev
);
579 * mei_cl_is_other_connecting - checks if other
580 * client with the same me client id is connecting
582 * @cl: private data of the file object
584 * Return: true if other client is connected, false - otherwise.
586 bool mei_cl_is_other_connecting(struct mei_cl
*cl
)
588 struct mei_device
*dev
;
589 struct mei_cl
*ocl
; /* the other client */
591 if (WARN_ON(!cl
|| !cl
->dev
))
596 list_for_each_entry(ocl
, &dev
->file_list
, link
) {
597 if (ocl
->state
== MEI_FILE_CONNECTING
&&
599 cl
->me_client_id
== ocl
->me_client_id
)
608 * mei_cl_connect - connect host client to the me one
611 * @file: pointer to file structure
613 * Locking: called under "dev->device_lock" lock
615 * Return: 0 on success, <0 on failure.
617 int mei_cl_connect(struct mei_cl
*cl
, struct file
*file
)
619 struct mei_device
*dev
;
620 struct mei_cl_cb
*cb
;
623 if (WARN_ON(!cl
|| !cl
->dev
))
628 rets
= pm_runtime_get(dev
->dev
);
629 if (rets
< 0 && rets
!= -EINPROGRESS
) {
630 pm_runtime_put_noidle(dev
->dev
);
631 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
635 cb
= mei_io_cb_init(cl
, file
);
641 cb
->fop_type
= MEI_FOP_CONNECT
;
643 /* run hbuf acquire last so we don't have to undo */
644 if (!mei_cl_is_other_connecting(cl
) && mei_hbuf_acquire(dev
)) {
645 cl
->state
= MEI_FILE_CONNECTING
;
646 if (mei_hbm_cl_connect_req(dev
, cl
)) {
650 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
651 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
653 cl
->state
= MEI_FILE_INITIALIZING
;
654 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
657 mutex_unlock(&dev
->device_lock
);
658 wait_event_timeout(cl
->wait
,
659 (cl
->state
== MEI_FILE_CONNECTED
||
660 cl
->state
== MEI_FILE_DISCONNECTED
),
661 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
662 mutex_lock(&dev
->device_lock
);
664 if (cl
->state
!= MEI_FILE_CONNECTED
) {
665 cl
->state
= MEI_FILE_DISCONNECTED
;
666 /* something went really wrong */
668 cl
->status
= -EFAULT
;
670 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
671 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
677 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
678 pm_runtime_mark_last_busy(dev
->dev
);
679 pm_runtime_put_autosuspend(dev
->dev
);
686 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
688 * @cl: private data of the file object
690 * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
691 * -ENOENT if mei_cl is not present
692 * -EINVAL if single_recv_buf == 0
694 int mei_cl_flow_ctrl_creds(struct mei_cl
*cl
)
696 struct mei_device
*dev
;
697 struct mei_me_client
*me_cl
;
699 if (WARN_ON(!cl
|| !cl
->dev
))
704 if (cl
->mei_flow_ctrl_creds
> 0)
707 me_cl
= mei_me_cl_by_id(dev
, cl
->me_client_id
);
709 cl_err(dev
, cl
, "no such me client %d\n", cl
->me_client_id
);
713 if (me_cl
->mei_flow_ctrl_creds
) {
714 if (WARN_ON(me_cl
->props
.single_recv_buf
== 0))
722 * mei_cl_flow_ctrl_reduce - reduces flow_control.
724 * @cl: private data of the file object
728 * -ENOENT when me client is not found
729 * -EINVAL when ctrl credits are <= 0
731 int mei_cl_flow_ctrl_reduce(struct mei_cl
*cl
)
733 struct mei_device
*dev
;
734 struct mei_me_client
*me_cl
;
736 if (WARN_ON(!cl
|| !cl
->dev
))
741 me_cl
= mei_me_cl_by_id(dev
, cl
->me_client_id
);
743 cl_err(dev
, cl
, "no such me client %d\n", cl
->me_client_id
);
747 if (me_cl
->props
.single_recv_buf
) {
748 if (WARN_ON(me_cl
->mei_flow_ctrl_creds
<= 0))
750 me_cl
->mei_flow_ctrl_creds
--;
752 if (WARN_ON(cl
->mei_flow_ctrl_creds
<= 0))
754 cl
->mei_flow_ctrl_creds
--;
760 * mei_cl_read_start - the start read client message function.
763 * @length: number of bytes to read
765 * Return: 0 on success, <0 on failure.
767 int mei_cl_read_start(struct mei_cl
*cl
, size_t length
)
769 struct mei_device
*dev
;
770 struct mei_cl_cb
*cb
;
771 struct mei_me_client
*me_cl
;
774 if (WARN_ON(!cl
|| !cl
->dev
))
779 if (!mei_cl_is_connected(cl
))
783 cl_dbg(dev
, cl
, "read is pending.\n");
786 me_cl
= mei_me_cl_by_uuid_id(dev
, &cl
->cl_uuid
, cl
->me_client_id
);
788 cl_err(dev
, cl
, "no such me client %d\n", cl
->me_client_id
);
792 rets
= pm_runtime_get(dev
->dev
);
793 if (rets
< 0 && rets
!= -EINPROGRESS
) {
794 pm_runtime_put_noidle(dev
->dev
);
795 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
799 cb
= mei_io_cb_init(cl
, NULL
);
805 /* always allocate at least client max message */
806 length
= max_t(size_t, length
, me_cl
->props
.max_msg_length
);
807 rets
= mei_io_cb_alloc_resp_buf(cb
, length
);
811 cb
->fop_type
= MEI_FOP_READ
;
812 if (mei_hbuf_acquire(dev
)) {
813 rets
= mei_hbm_cl_flow_control_req(dev
, cl
);
817 list_add_tail(&cb
->list
, &dev
->read_list
.list
);
819 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
825 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
826 pm_runtime_mark_last_busy(dev
->dev
);
827 pm_runtime_put_autosuspend(dev
->dev
);
836 * mei_cl_irq_write - write a message to device
837 * from the interrupt thread context
840 * @cb: callback block.
841 * @cmpl_list: complete list.
843 * Return: 0, OK; otherwise error.
845 int mei_cl_irq_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
846 struct mei_cl_cb
*cmpl_list
)
848 struct mei_device
*dev
;
849 struct mei_msg_data
*buf
;
850 struct mei_msg_hdr mei_hdr
;
856 if (WARN_ON(!cl
|| !cl
->dev
))
861 buf
= &cb
->request_buffer
;
863 rets
= mei_cl_flow_ctrl_creds(cl
);
868 cl_dbg(dev
, cl
, "No flow control credentials: not sending.\n");
872 slots
= mei_hbuf_empty_slots(dev
);
873 len
= buf
->size
- cb
->buf_idx
;
874 msg_slots
= mei_data2slots(len
);
876 mei_hdr
.host_addr
= cl
->host_client_id
;
877 mei_hdr
.me_addr
= cl
->me_client_id
;
878 mei_hdr
.reserved
= 0;
879 mei_hdr
.internal
= cb
->internal
;
881 if (slots
>= msg_slots
) {
882 mei_hdr
.length
= len
;
883 mei_hdr
.msg_complete
= 1;
884 /* Split the message only if we can write the whole host buffer */
885 } else if (slots
== dev
->hbuf_depth
) {
887 len
= (slots
* sizeof(u32
)) - sizeof(struct mei_msg_hdr
);
888 mei_hdr
.length
= len
;
889 mei_hdr
.msg_complete
= 0;
891 /* wait for next time the host buffer is empty */
895 cl_dbg(dev
, cl
, "buf: size = %d idx = %lu\n",
896 cb
->request_buffer
.size
, cb
->buf_idx
);
898 rets
= mei_write_message(dev
, &mei_hdr
, buf
->data
+ cb
->buf_idx
);
901 list_move_tail(&cb
->list
, &cmpl_list
->list
);
906 cl
->writing_state
= MEI_WRITING
;
907 cb
->buf_idx
+= mei_hdr
.length
;
909 if (mei_hdr
.msg_complete
) {
910 if (mei_cl_flow_ctrl_reduce(cl
))
912 list_move_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
919 * mei_cl_write - submit a write cb to mei device
920 * assumes device_lock is locked
923 * @cb: write callback with filled data
924 * @blocking: block until completed
926 * Return: number of bytes sent on success, <0 on failure.
928 int mei_cl_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
, bool blocking
)
930 struct mei_device
*dev
;
931 struct mei_msg_data
*buf
;
932 struct mei_msg_hdr mei_hdr
;
936 if (WARN_ON(!cl
|| !cl
->dev
))
945 buf
= &cb
->request_buffer
;
947 cl_dbg(dev
, cl
, "size=%d\n", buf
->size
);
949 rets
= pm_runtime_get(dev
->dev
);
950 if (rets
< 0 && rets
!= -EINPROGRESS
) {
951 pm_runtime_put_noidle(dev
->dev
);
952 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
956 cb
->fop_type
= MEI_FOP_WRITE
;
958 cl
->writing_state
= MEI_IDLE
;
960 mei_hdr
.host_addr
= cl
->host_client_id
;
961 mei_hdr
.me_addr
= cl
->me_client_id
;
962 mei_hdr
.reserved
= 0;
963 mei_hdr
.msg_complete
= 0;
964 mei_hdr
.internal
= cb
->internal
;
966 rets
= mei_cl_flow_ctrl_creds(cl
);
971 cl_dbg(dev
, cl
, "No flow control credentials: not sending.\n");
975 if (!mei_hbuf_acquire(dev
)) {
976 cl_dbg(dev
, cl
, "Cannot acquire the host buffer: not sending.\n");
981 /* Check for a maximum length */
982 if (buf
->size
> mei_hbuf_max_len(dev
)) {
983 mei_hdr
.length
= mei_hbuf_max_len(dev
);
984 mei_hdr
.msg_complete
= 0;
986 mei_hdr
.length
= buf
->size
;
987 mei_hdr
.msg_complete
= 1;
990 rets
= mei_write_message(dev
, &mei_hdr
, buf
->data
);
994 cl
->writing_state
= MEI_WRITING
;
995 cb
->buf_idx
= mei_hdr
.length
;
998 if (mei_hdr
.msg_complete
) {
999 rets
= mei_cl_flow_ctrl_reduce(cl
);
1003 list_add_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
1005 list_add_tail(&cb
->list
, &dev
->write_list
.list
);
1009 if (blocking
&& cl
->writing_state
!= MEI_WRITE_COMPLETE
) {
1011 mutex_unlock(&dev
->device_lock
);
1012 rets
= wait_event_interruptible(cl
->tx_wait
,
1013 cl
->writing_state
== MEI_WRITE_COMPLETE
);
1014 mutex_lock(&dev
->device_lock
);
1015 /* wait_event_interruptible returns -ERESTARTSYS */
1017 if (signal_pending(current
))
1025 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
1026 pm_runtime_mark_last_busy(dev
->dev
);
1027 pm_runtime_put_autosuspend(dev
->dev
);
1034 * mei_cl_complete - processes completed operation for a client
1036 * @cl: private data of the file object.
1037 * @cb: callback block.
1039 void mei_cl_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
1041 if (cb
->fop_type
== MEI_FOP_WRITE
) {
1044 cl
->writing_state
= MEI_WRITE_COMPLETE
;
1045 if (waitqueue_active(&cl
->tx_wait
))
1046 wake_up_interruptible(&cl
->tx_wait
);
1048 } else if (cb
->fop_type
== MEI_FOP_READ
&&
1049 MEI_READING
== cl
->reading_state
) {
1050 cl
->reading_state
= MEI_READ_COMPLETE
;
1051 if (waitqueue_active(&cl
->rx_wait
))
1052 wake_up_interruptible(&cl
->rx_wait
);
1054 mei_cl_bus_rx_event(cl
);
1061 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1066 void mei_cl_all_disconnect(struct mei_device
*dev
)
1070 list_for_each_entry(cl
, &dev
->file_list
, link
) {
1071 cl
->state
= MEI_FILE_DISCONNECTED
;
1072 cl
->mei_flow_ctrl_creds
= 0;
1073 cl
->timer_count
= 0;
1079 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
1083 void mei_cl_all_wakeup(struct mei_device
*dev
)
1087 list_for_each_entry(cl
, &dev
->file_list
, link
) {
1088 if (waitqueue_active(&cl
->rx_wait
)) {
1089 cl_dbg(dev
, cl
, "Waking up reading client!\n");
1090 wake_up_interruptible(&cl
->rx_wait
);
1092 if (waitqueue_active(&cl
->tx_wait
)) {
1093 cl_dbg(dev
, cl
, "Waking up writing client!\n");
1094 wake_up_interruptible(&cl
->tx_wait
);
1100 * mei_cl_all_write_clear - clear all pending writes
1104 void mei_cl_all_write_clear(struct mei_device
*dev
)
1106 mei_io_list_free(&dev
->write_list
, NULL
);
1107 mei_io_list_free(&dev
->write_waiting_list
, NULL
);