3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
22 #include <linux/mei.h>
29 * mei_me_cl_by_uuid - locate index of me client
32 * returns me client index or -ENOENT if not found
34 int mei_me_cl_by_uuid(const struct mei_device
*dev
, const uuid_le
*uuid
)
38 for (i
= 0; i
< dev
->me_clients_num
; ++i
)
39 if (uuid_le_cmp(*uuid
,
40 dev
->me_clients
[i
].props
.protocol_name
) == 0) {
50 * mei_me_cl_by_id return index to me_clients for client_id
52 * @dev: the device structure
53 * @client_id: me client id
55 * Locking: called under "dev->device_lock" lock
57 * returns index on success, -ENOENT on failure.
60 int mei_me_cl_by_id(struct mei_device
*dev
, u8 client_id
)
63 for (i
= 0; i
< dev
->me_clients_num
; i
++)
64 if (dev
->me_clients
[i
].client_id
== client_id
)
66 if (WARN_ON(dev
->me_clients
[i
].client_id
!= client_id
))
69 if (i
== dev
->me_clients_num
)
77 * mei_io_list_flush - removes list entry belonging to cl.
79 * @list: An instance of our list structure
82 void mei_io_list_flush(struct mei_cl_cb
*list
, struct mei_cl
*cl
)
85 struct mei_cl_cb
*next
;
87 list_for_each_entry_safe(cb
, next
, &list
->list
, list
) {
88 if (cb
->cl
&& mei_cl_cmp_id(cl
, cb
->cl
))
94 * mei_io_cb_free - free mei_cb_private related memory
96 * @cb: mei callback struct
98 void mei_io_cb_free(struct mei_cl_cb
*cb
)
103 kfree(cb
->request_buffer
.data
);
104 kfree(cb
->response_buffer
.data
);
109 * mei_io_cb_init - allocate and initialize io callback
112 * @fp: pointer to file structure
114 * returns mei_cl_cb pointer or NULL;
116 struct mei_cl_cb
*mei_io_cb_init(struct mei_cl
*cl
, struct file
*fp
)
118 struct mei_cl_cb
*cb
;
120 cb
= kzalloc(sizeof(struct mei_cl_cb
), GFP_KERNEL
);
124 mei_io_list_init(cb
);
126 cb
->file_object
= fp
;
133 * mei_io_cb_alloc_req_buf - allocate request buffer
135 * @cb: io callback structure
136 * @length: size of the buffer
138 * returns 0 on success
139 * -EINVAL if cb is NULL
140 * -ENOMEM if allocation failed
142 int mei_io_cb_alloc_req_buf(struct mei_cl_cb
*cb
, size_t length
)
150 cb
->request_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
151 if (!cb
->request_buffer
.data
)
153 cb
->request_buffer
.size
= length
;
157 * mei_io_cb_alloc_resp_buf - allocate response buffer
159 * @cb: io callback structure
160 * @length: size of the buffer
162 * returns 0 on success
163 * -EINVAL if cb is NULL
164 * -ENOMEM if allocation failed
166 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb
*cb
, size_t length
)
174 cb
->response_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
175 if (!cb
->response_buffer
.data
)
177 cb
->response_buffer
.size
= length
;
184 * mei_cl_flush_queues - flushes queue lists belonging to cl.
188 int mei_cl_flush_queues(struct mei_cl
*cl
)
190 struct mei_device
*dev
;
192 if (WARN_ON(!cl
|| !cl
->dev
))
197 cl_dbg(dev
, cl
, "remove list entry belonging to cl\n");
198 mei_io_list_flush(&cl
->dev
->read_list
, cl
);
199 mei_io_list_flush(&cl
->dev
->write_list
, cl
);
200 mei_io_list_flush(&cl
->dev
->write_waiting_list
, cl
);
201 mei_io_list_flush(&cl
->dev
->ctrl_wr_list
, cl
);
202 mei_io_list_flush(&cl
->dev
->ctrl_rd_list
, cl
);
203 mei_io_list_flush(&cl
->dev
->amthif_cmd_list
, cl
);
204 mei_io_list_flush(&cl
->dev
->amthif_rd_complete_list
, cl
);
210 * mei_cl_init - initializes cl.
212 * @cl: host client to be initialized
215 void mei_cl_init(struct mei_cl
*cl
, struct mei_device
*dev
)
217 memset(cl
, 0, sizeof(struct mei_cl
));
218 init_waitqueue_head(&cl
->wait
);
219 init_waitqueue_head(&cl
->rx_wait
);
220 init_waitqueue_head(&cl
->tx_wait
);
221 INIT_LIST_HEAD(&cl
->link
);
222 INIT_LIST_HEAD(&cl
->device_link
);
223 cl
->reading_state
= MEI_IDLE
;
224 cl
->writing_state
= MEI_IDLE
;
229 * mei_cl_allocate - allocates cl structure and sets it up.
232 * returns The allocated file or NULL on failure
234 struct mei_cl
*mei_cl_allocate(struct mei_device
*dev
)
238 cl
= kmalloc(sizeof(struct mei_cl
), GFP_KERNEL
);
242 mei_cl_init(cl
, dev
);
248 * mei_cl_find_read_cb - find this cl's callback in the read list
252 * returns cb on success, NULL on error
254 struct mei_cl_cb
*mei_cl_find_read_cb(struct mei_cl
*cl
)
256 struct mei_device
*dev
= cl
->dev
;
257 struct mei_cl_cb
*cb
= NULL
;
258 struct mei_cl_cb
*next
= NULL
;
260 list_for_each_entry_safe(cb
, next
, &dev
->read_list
.list
, list
)
261 if (mei_cl_cmp_id(cl
, cb
->cl
))
266 /** mei_cl_link: allocate host id in the host map
269 * @id - fixed host id or -1 for generic one
271 * returns 0 on success
272 * -EINVAL on incorrect values
273 * -ENONET if client not found
275 int mei_cl_link(struct mei_cl
*cl
, int id
)
277 struct mei_device
*dev
;
278 long open_handle_count
;
280 if (WARN_ON(!cl
|| !cl
->dev
))
285 /* If Id is not assigned get one*/
286 if (id
== MEI_HOST_CLIENT_ID_ANY
)
287 id
= find_first_zero_bit(dev
->host_clients_map
,
290 if (id
>= MEI_CLIENTS_MAX
) {
291 dev_err(&dev
->pdev
->dev
, "id exceeded %d", MEI_CLIENTS_MAX
);
295 open_handle_count
= dev
->open_handle_count
+ dev
->iamthif_open_count
;
296 if (open_handle_count
>= MEI_MAX_OPEN_HANDLE_COUNT
) {
297 dev_err(&dev
->pdev
->dev
, "open_handle_count exceeded %d",
298 MEI_MAX_OPEN_HANDLE_COUNT
);
302 dev
->open_handle_count
++;
304 cl
->host_client_id
= id
;
305 list_add_tail(&cl
->link
, &dev
->file_list
);
307 set_bit(id
, dev
->host_clients_map
);
309 cl
->state
= MEI_FILE_INITIALIZING
;
311 cl_dbg(dev
, cl
, "link cl\n");
316 * mei_cl_unlink - remove me_cl from the list
320 int mei_cl_unlink(struct mei_cl
*cl
)
322 struct mei_device
*dev
;
324 /* don't shout on error exit path */
328 /* wd and amthif might not be initialized */
334 cl_dbg(dev
, cl
, "unlink client");
336 if (dev
->open_handle_count
> 0)
337 dev
->open_handle_count
--;
339 /* never clear the 0 bit */
340 if (cl
->host_client_id
)
341 clear_bit(cl
->host_client_id
, dev
->host_clients_map
);
343 list_del_init(&cl
->link
);
345 cl
->state
= MEI_FILE_INITIALIZING
;
351 void mei_host_client_init(struct work_struct
*work
)
353 struct mei_device
*dev
= container_of(work
,
354 struct mei_device
, init_work
);
355 struct mei_client_properties
*client_props
;
358 mutex_lock(&dev
->device_lock
);
360 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
361 client_props
= &dev
->me_clients
[i
].props
;
363 if (!uuid_le_cmp(client_props
->protocol_name
, mei_amthif_guid
))
364 mei_amthif_host_init(dev
);
365 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_wd_guid
))
366 mei_wd_host_init(dev
);
367 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_nfc_guid
))
368 mei_nfc_host_init(dev
);
372 dev
->dev_state
= MEI_DEV_ENABLED
;
373 dev
->reset_count
= 0;
375 mutex_unlock(&dev
->device_lock
);
380 * mei_cl_disconnect - disconnect host client from the me one
384 * Locking: called under "dev->device_lock" lock
386 * returns 0 on success, <0 on failure.
388 int mei_cl_disconnect(struct mei_cl
*cl
)
390 struct mei_device
*dev
;
391 struct mei_cl_cb
*cb
;
394 if (WARN_ON(!cl
|| !cl
->dev
))
399 cl_dbg(dev
, cl
, "disconnecting");
401 if (cl
->state
!= MEI_FILE_DISCONNECTING
)
404 cb
= mei_io_cb_init(cl
, NULL
);
408 cb
->fop_type
= MEI_FOP_CLOSE
;
409 if (dev
->hbuf_is_ready
) {
410 dev
->hbuf_is_ready
= false;
411 if (mei_hbm_cl_disconnect_req(dev
, cl
)) {
413 cl_err(dev
, cl
, "failed to disconnect.\n");
416 mdelay(10); /* Wait for hardware disconnection ready */
417 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
419 cl_dbg(dev
, cl
, "add disconnect cb to control write list\n");
420 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
423 mutex_unlock(&dev
->device_lock
);
425 err
= wait_event_timeout(dev
->wait_recvd_msg
,
426 MEI_FILE_DISCONNECTED
== cl
->state
,
427 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
429 mutex_lock(&dev
->device_lock
);
430 if (MEI_FILE_DISCONNECTED
== cl
->state
) {
432 cl_dbg(dev
, cl
, "successfully disconnected from FW client.\n");
435 if (MEI_FILE_DISCONNECTED
!= cl
->state
)
436 cl_err(dev
, cl
, "wrong status client disconnect.\n");
439 cl_dbg(dev
, cl
, "wait failed disconnect err=%08x\n",
442 cl_err(dev
, cl
, "failed to disconnect from FW client.\n");
445 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
446 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
454 * mei_cl_is_other_connecting - checks if other
455 * client with the same me client id is connecting
457 * @cl: private data of the file object
459 * returns true if other client is connected, false - otherwise.
461 bool mei_cl_is_other_connecting(struct mei_cl
*cl
)
463 struct mei_device
*dev
;
467 if (WARN_ON(!cl
|| !cl
->dev
))
472 list_for_each_entry_safe(pos
, next
, &dev
->file_list
, link
) {
473 if ((pos
->state
== MEI_FILE_CONNECTING
) &&
474 (pos
!= cl
) && cl
->me_client_id
== pos
->me_client_id
)
483 * mei_cl_connect - connect host client to the me one
487 * Locking: called under "dev->device_lock" lock
489 * returns 0 on success, <0 on failure.
491 int mei_cl_connect(struct mei_cl
*cl
, struct file
*file
)
493 struct mei_device
*dev
;
494 struct mei_cl_cb
*cb
;
497 if (WARN_ON(!cl
|| !cl
->dev
))
502 cb
= mei_io_cb_init(cl
, file
);
508 cb
->fop_type
= MEI_FOP_IOCTL
;
510 if (dev
->hbuf_is_ready
&& !mei_cl_is_other_connecting(cl
)) {
511 dev
->hbuf_is_ready
= false;
513 if (mei_hbm_cl_connect_req(dev
, cl
)) {
517 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
518 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
520 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
523 mutex_unlock(&dev
->device_lock
);
524 rets
= wait_event_timeout(dev
->wait_recvd_msg
,
525 (cl
->state
== MEI_FILE_CONNECTED
||
526 cl
->state
== MEI_FILE_DISCONNECTED
),
527 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
528 mutex_lock(&dev
->device_lock
);
530 if (cl
->state
!= MEI_FILE_CONNECTED
) {
533 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
534 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
546 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
548 * @cl: private data of the file object
550 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
551 * -ENOENT if mei_cl is not present
552 * -EINVAL if single_recv_buf == 0
554 int mei_cl_flow_ctrl_creds(struct mei_cl
*cl
)
556 struct mei_device
*dev
;
559 if (WARN_ON(!cl
|| !cl
->dev
))
564 if (!dev
->me_clients_num
)
567 if (cl
->mei_flow_ctrl_creds
> 0)
570 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
571 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
572 if (me_cl
->client_id
== cl
->me_client_id
) {
573 if (me_cl
->mei_flow_ctrl_creds
) {
574 if (WARN_ON(me_cl
->props
.single_recv_buf
== 0))
586 * mei_cl_flow_ctrl_reduce - reduces flow_control.
588 * @cl: private data of the file object
592 * -ENOENT when me client is not found
593 * -EINVAL when ctrl credits are <= 0
595 int mei_cl_flow_ctrl_reduce(struct mei_cl
*cl
)
597 struct mei_device
*dev
;
600 if (WARN_ON(!cl
|| !cl
->dev
))
605 if (!dev
->me_clients_num
)
608 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
609 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
610 if (me_cl
->client_id
== cl
->me_client_id
) {
611 if (me_cl
->props
.single_recv_buf
!= 0) {
612 if (WARN_ON(me_cl
->mei_flow_ctrl_creds
<= 0))
614 dev
->me_clients
[i
].mei_flow_ctrl_creds
--;
616 if (WARN_ON(cl
->mei_flow_ctrl_creds
<= 0))
618 cl
->mei_flow_ctrl_creds
--;
627 * mei_cl_read_start - the start read client message function.
631 * returns 0 on success, <0 on failure.
633 int mei_cl_read_start(struct mei_cl
*cl
, size_t length
)
635 struct mei_device
*dev
;
636 struct mei_cl_cb
*cb
;
640 if (WARN_ON(!cl
|| !cl
->dev
))
645 if (!mei_cl_is_connected(cl
))
649 cl_dbg(dev
, cl
, "read is pending.\n");
652 i
= mei_me_cl_by_id(dev
, cl
->me_client_id
);
654 cl_err(dev
, cl
, "no such me client %d\n", cl
->me_client_id
);
658 cb
= mei_io_cb_init(cl
, NULL
);
662 /* always allocate at least client max message */
663 length
= max_t(size_t, length
, dev
->me_clients
[i
].props
.max_msg_length
);
664 rets
= mei_io_cb_alloc_resp_buf(cb
, length
);
668 cb
->fop_type
= MEI_FOP_READ
;
670 if (dev
->hbuf_is_ready
) {
671 dev
->hbuf_is_ready
= false;
672 if (mei_hbm_cl_flow_control_req(dev
, cl
)) {
673 cl_err(dev
, cl
, "flow control send failed\n");
677 list_add_tail(&cb
->list
, &dev
->read_list
.list
);
679 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
688 * mei_cl_irq_write_complete - write a message to device
689 * from the interrupt thread context
692 * @cb: callback block.
693 * @slots: free slots.
694 * @cmpl_list: complete list.
696 * returns 0, OK; otherwise error.
698 int mei_cl_irq_write_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
699 s32
*slots
, struct mei_cl_cb
*cmpl_list
)
701 struct mei_device
*dev
;
702 struct mei_msg_data
*buf
;
703 struct mei_msg_hdr mei_hdr
;
709 if (WARN_ON(!cl
|| !cl
->dev
))
714 buf
= &cb
->request_buffer
;
716 rets
= mei_cl_flow_ctrl_creds(cl
);
721 cl_dbg(dev
, cl
, "No flow control credentials: not sending.\n");
725 len
= buf
->size
- cb
->buf_idx
;
726 msg_slots
= mei_data2slots(len
);
728 mei_hdr
.host_addr
= cl
->host_client_id
;
729 mei_hdr
.me_addr
= cl
->me_client_id
;
730 mei_hdr
.reserved
= 0;
731 mei_hdr
.internal
= cb
->internal
;
733 if (*slots
>= msg_slots
) {
734 mei_hdr
.length
= len
;
735 mei_hdr
.msg_complete
= 1;
736 /* Split the message only if we can write the whole host buffer */
737 } else if (*slots
== dev
->hbuf_depth
) {
739 len
= (*slots
* sizeof(u32
)) - sizeof(struct mei_msg_hdr
);
740 mei_hdr
.length
= len
;
741 mei_hdr
.msg_complete
= 0;
743 /* wait for next time the host buffer is empty */
747 cl_dbg(dev
, cl
, "buf: size = %d idx = %lu\n",
748 cb
->request_buffer
.size
, cb
->buf_idx
);
751 rets
= mei_write_message(dev
, &mei_hdr
, buf
->data
+ cb
->buf_idx
);
754 list_move_tail(&cb
->list
, &cmpl_list
->list
);
759 cl
->writing_state
= MEI_WRITING
;
760 cb
->buf_idx
+= mei_hdr
.length
;
762 if (mei_hdr
.msg_complete
) {
763 if (mei_cl_flow_ctrl_reduce(cl
))
765 list_move_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
772 * mei_cl_write - submit a write cb to mei device
773 assumes device_lock is locked
776 * @cl: write callback with filled data
778 * returns number of bytes sent on success, <0 on failure.
780 int mei_cl_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
, bool blocking
)
782 struct mei_device
*dev
;
783 struct mei_msg_data
*buf
;
784 struct mei_msg_hdr mei_hdr
;
788 if (WARN_ON(!cl
|| !cl
->dev
))
797 buf
= &cb
->request_buffer
;
799 cl_dbg(dev
, cl
, "mei_cl_write %d\n", buf
->size
);
802 cb
->fop_type
= MEI_FOP_WRITE
;
804 rets
= mei_cl_flow_ctrl_creds(cl
);
808 /* Host buffer is not ready, we queue the request */
809 if (rets
== 0 || !dev
->hbuf_is_ready
) {
811 /* unseting complete will enqueue the cb for write */
812 mei_hdr
.msg_complete
= 0;
817 dev
->hbuf_is_ready
= false;
819 /* Check for a maximum length */
820 if (buf
->size
> mei_hbuf_max_len(dev
)) {
821 mei_hdr
.length
= mei_hbuf_max_len(dev
);
822 mei_hdr
.msg_complete
= 0;
824 mei_hdr
.length
= buf
->size
;
825 mei_hdr
.msg_complete
= 1;
828 mei_hdr
.host_addr
= cl
->host_client_id
;
829 mei_hdr
.me_addr
= cl
->me_client_id
;
830 mei_hdr
.reserved
= 0;
831 mei_hdr
.internal
= cb
->internal
;
834 rets
= mei_write_message(dev
, &mei_hdr
, buf
->data
);
838 cl
->writing_state
= MEI_WRITING
;
839 cb
->buf_idx
= mei_hdr
.length
;
843 if (mei_hdr
.msg_complete
) {
844 if (mei_cl_flow_ctrl_reduce(cl
)) {
848 list_add_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
850 list_add_tail(&cb
->list
, &dev
->write_list
.list
);
854 if (blocking
&& cl
->writing_state
!= MEI_WRITE_COMPLETE
) {
856 mutex_unlock(&dev
->device_lock
);
857 if (wait_event_interruptible(cl
->tx_wait
,
858 cl
->writing_state
== MEI_WRITE_COMPLETE
)) {
859 if (signal_pending(current
))
864 mutex_lock(&dev
->device_lock
);
872 * mei_cl_complete - processes completed operation for a client
874 * @cl: private data of the file object.
875 * @cb: callback block.
877 void mei_cl_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
879 if (cb
->fop_type
== MEI_FOP_WRITE
) {
882 cl
->writing_state
= MEI_WRITE_COMPLETE
;
883 if (waitqueue_active(&cl
->tx_wait
))
884 wake_up_interruptible(&cl
->tx_wait
);
886 } else if (cb
->fop_type
== MEI_FOP_READ
&&
887 MEI_READING
== cl
->reading_state
) {
888 cl
->reading_state
= MEI_READ_COMPLETE
;
889 if (waitqueue_active(&cl
->rx_wait
))
890 wake_up_interruptible(&cl
->rx_wait
);
892 mei_cl_bus_rx_event(cl
);
899 * mei_cl_all_disconnect - disconnect forcefully all connected clients
904 void mei_cl_all_disconnect(struct mei_device
*dev
)
906 struct mei_cl
*cl
, *next
;
908 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
909 cl
->state
= MEI_FILE_DISCONNECTED
;
910 cl
->mei_flow_ctrl_creds
= 0;
918 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
922 void mei_cl_all_wakeup(struct mei_device
*dev
)
924 struct mei_cl
*cl
, *next
;
925 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
926 if (waitqueue_active(&cl
->rx_wait
)) {
927 cl_dbg(dev
, cl
, "Waking up reading client!\n");
928 wake_up_interruptible(&cl
->rx_wait
);
930 if (waitqueue_active(&cl
->tx_wait
)) {
931 cl_dbg(dev
, cl
, "Waking up writing client!\n");
932 wake_up_interruptible(&cl
->tx_wait
);
938 * mei_cl_all_write_clear - clear all pending writes
942 void mei_cl_all_write_clear(struct mei_device
*dev
)
944 struct mei_cl_cb
*cb
, *next
;
946 list_for_each_entry_safe(cb
, next
, &dev
->write_list
.list
, list
) {