1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
7 #include <linux/sched/signal.h>
8 #include <linux/wait.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/mei.h>
21 * mei_me_cl_init - initialize me client
25 void mei_me_cl_init(struct mei_me_client
*me_cl
)
27 INIT_LIST_HEAD(&me_cl
->list
);
28 kref_init(&me_cl
->refcnt
);
32 * mei_me_cl_get - increases me client refcount
36 * Locking: called under "dev->device_lock" lock
38 * Return: me client or NULL
40 struct mei_me_client
*mei_me_cl_get(struct mei_me_client
*me_cl
)
42 if (me_cl
&& kref_get_unless_zero(&me_cl
->refcnt
))
49 * mei_me_cl_release - free me client
51 * @ref: me_client refcount
53 * Locking: called under "dev->device_lock" lock
55 static void mei_me_cl_release(struct kref
*ref
)
57 struct mei_me_client
*me_cl
=
58 container_of(ref
, struct mei_me_client
, refcnt
);
64 * mei_me_cl_put - decrease me client refcount and free client if necessary
68 * Locking: called under "dev->device_lock" lock
70 void mei_me_cl_put(struct mei_me_client
*me_cl
)
73 kref_put(&me_cl
->refcnt
, mei_me_cl_release
);
77 * __mei_me_cl_del - delete me client from the list and decrease
83 * Locking: dev->me_clients_rwsem
85 static void __mei_me_cl_del(struct mei_device
*dev
, struct mei_me_client
*me_cl
)
90 list_del_init(&me_cl
->list
);
95 * mei_me_cl_del - delete me client from the list and decrease
101 void mei_me_cl_del(struct mei_device
*dev
, struct mei_me_client
*me_cl
)
103 down_write(&dev
->me_clients_rwsem
);
104 __mei_me_cl_del(dev
, me_cl
);
105 up_write(&dev
->me_clients_rwsem
);
109 * mei_me_cl_add - add me client to the list
114 void mei_me_cl_add(struct mei_device
*dev
, struct mei_me_client
*me_cl
)
116 down_write(&dev
->me_clients_rwsem
);
117 list_add(&me_cl
->list
, &dev
->me_clients
);
118 up_write(&dev
->me_clients_rwsem
);
122 * __mei_me_cl_by_uuid - locate me client by uuid
123 * increases ref count
126 * @uuid: me client uuid
128 * Return: me client or NULL if not found
130 * Locking: dev->me_clients_rwsem
132 static struct mei_me_client
*__mei_me_cl_by_uuid(struct mei_device
*dev
,
135 struct mei_me_client
*me_cl
;
138 WARN_ON(!rwsem_is_locked(&dev
->me_clients_rwsem
));
140 list_for_each_entry(me_cl
, &dev
->me_clients
, list
) {
141 pn
= &me_cl
->props
.protocol_name
;
142 if (uuid_le_cmp(*uuid
, *pn
) == 0)
143 return mei_me_cl_get(me_cl
);
150 * mei_me_cl_by_uuid - locate me client by uuid
151 * increases ref count
154 * @uuid: me client uuid
156 * Return: me client or NULL if not found
158 * Locking: dev->me_clients_rwsem
160 struct mei_me_client
*mei_me_cl_by_uuid(struct mei_device
*dev
,
163 struct mei_me_client
*me_cl
;
165 down_read(&dev
->me_clients_rwsem
);
166 me_cl
= __mei_me_cl_by_uuid(dev
, uuid
);
167 up_read(&dev
->me_clients_rwsem
);
173 * mei_me_cl_by_id - locate me client by client id
174 * increases ref count
176 * @dev: the device structure
177 * @client_id: me client id
179 * Return: me client or NULL if not found
181 * Locking: dev->me_clients_rwsem
183 struct mei_me_client
*mei_me_cl_by_id(struct mei_device
*dev
, u8 client_id
)
186 struct mei_me_client
*__me_cl
, *me_cl
= NULL
;
188 down_read(&dev
->me_clients_rwsem
);
189 list_for_each_entry(__me_cl
, &dev
->me_clients
, list
) {
190 if (__me_cl
->client_id
== client_id
) {
191 me_cl
= mei_me_cl_get(__me_cl
);
195 up_read(&dev
->me_clients_rwsem
);
201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
202 * increases ref count
204 * @dev: the device structure
205 * @uuid: me client uuid
206 * @client_id: me client id
208 * Return: me client or null if not found
210 * Locking: dev->me_clients_rwsem
212 static struct mei_me_client
*__mei_me_cl_by_uuid_id(struct mei_device
*dev
,
213 const uuid_le
*uuid
, u8 client_id
)
215 struct mei_me_client
*me_cl
;
218 WARN_ON(!rwsem_is_locked(&dev
->me_clients_rwsem
));
220 list_for_each_entry(me_cl
, &dev
->me_clients
, list
) {
221 pn
= &me_cl
->props
.protocol_name
;
222 if (uuid_le_cmp(*uuid
, *pn
) == 0 &&
223 me_cl
->client_id
== client_id
)
224 return mei_me_cl_get(me_cl
);
232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
233 * increases ref count
235 * @dev: the device structure
236 * @uuid: me client uuid
237 * @client_id: me client id
239 * Return: me client or null if not found
241 struct mei_me_client
*mei_me_cl_by_uuid_id(struct mei_device
*dev
,
242 const uuid_le
*uuid
, u8 client_id
)
244 struct mei_me_client
*me_cl
;
246 down_read(&dev
->me_clients_rwsem
);
247 me_cl
= __mei_me_cl_by_uuid_id(dev
, uuid
, client_id
);
248 up_read(&dev
->me_clients_rwsem
);
254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
256 * @dev: the device structure
257 * @uuid: me client uuid
259 * Locking: called under "dev->device_lock" lock
261 void mei_me_cl_rm_by_uuid(struct mei_device
*dev
, const uuid_le
*uuid
)
263 struct mei_me_client
*me_cl
;
265 dev_dbg(dev
->dev
, "remove %pUl\n", uuid
);
267 down_write(&dev
->me_clients_rwsem
);
268 me_cl
= __mei_me_cl_by_uuid(dev
, uuid
);
269 __mei_me_cl_del(dev
, me_cl
);
270 mei_me_cl_put(me_cl
);
271 up_write(&dev
->me_clients_rwsem
);
275 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
277 * @dev: the device structure
278 * @uuid: me client uuid
281 * Locking: called under "dev->device_lock" lock
283 void mei_me_cl_rm_by_uuid_id(struct mei_device
*dev
, const uuid_le
*uuid
, u8 id
)
285 struct mei_me_client
*me_cl
;
287 dev_dbg(dev
->dev
, "remove %pUl %d\n", uuid
, id
);
289 down_write(&dev
->me_clients_rwsem
);
290 me_cl
= __mei_me_cl_by_uuid_id(dev
, uuid
, id
);
291 __mei_me_cl_del(dev
, me_cl
);
292 mei_me_cl_put(me_cl
);
293 up_write(&dev
->me_clients_rwsem
);
297 * mei_me_cl_rm_all - remove all me clients
299 * @dev: the device structure
301 * Locking: called under "dev->device_lock" lock
303 void mei_me_cl_rm_all(struct mei_device
*dev
)
305 struct mei_me_client
*me_cl
, *next
;
307 down_write(&dev
->me_clients_rwsem
);
308 list_for_each_entry_safe(me_cl
, next
, &dev
->me_clients
, list
)
309 __mei_me_cl_del(dev
, me_cl
);
310 up_write(&dev
->me_clients_rwsem
);
314 * mei_io_cb_free - free mei_cb_private related memory
316 * @cb: mei callback struct
318 void mei_io_cb_free(struct mei_cl_cb
*cb
)
330 * mei_tx_cb_enqueue - queue tx callback
332 * @cb: mei callback struct
333 * @head: an instance of list to queue on
335 * Locking: called under "dev->device_lock" lock
337 static inline void mei_tx_cb_enqueue(struct mei_cl_cb
*cb
,
338 struct list_head
*head
)
340 list_add_tail(&cb
->list
, head
);
341 cb
->cl
->tx_cb_queued
++;
345 * mei_tx_cb_dequeue - dequeue tx callback
347 * @cb: mei callback struct to dequeue and free
349 * Locking: called under "dev->device_lock" lock
351 static inline void mei_tx_cb_dequeue(struct mei_cl_cb
*cb
)
353 if (!WARN_ON(cb
->cl
->tx_cb_queued
== 0))
354 cb
->cl
->tx_cb_queued
--;
360 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
363 * @fp: pointer to file structure
365 * Locking: called under "dev->device_lock" lock
367 static void mei_cl_set_read_by_fp(const struct mei_cl
*cl
,
368 const struct file
*fp
)
370 struct mei_cl_vtag
*cl_vtag
;
372 list_for_each_entry(cl_vtag
, &cl
->vtag_map
, list
) {
373 if (cl_vtag
->fp
== fp
) {
374 cl_vtag
->pending_read
= true;
381 * mei_io_cb_init - allocate and initialize io callback
384 * @type: operation type
385 * @fp: pointer to file structure
387 * Return: mei_cl_cb pointer or NULL;
389 static struct mei_cl_cb
*mei_io_cb_init(struct mei_cl
*cl
,
390 enum mei_cb_file_ops type
,
391 const struct file
*fp
)
393 struct mei_cl_cb
*cb
;
395 cb
= kzalloc(sizeof(*cb
), GFP_KERNEL
);
399 INIT_LIST_HEAD(&cb
->list
);
411 * mei_io_list_flush_cl - removes cbs belonging to the cl.
413 * @head: an instance of our list structure
416 static void mei_io_list_flush_cl(struct list_head
*head
,
417 const struct mei_cl
*cl
)
419 struct mei_cl_cb
*cb
, *next
;
421 list_for_each_entry_safe(cb
, next
, head
, list
) {
423 list_del_init(&cb
->list
);
424 if (cb
->fop_type
== MEI_FOP_READ
)
431 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
433 * @head: An instance of our list structure
435 * @fp: file pointer (matching cb file object), may be NULL
437 static void mei_io_tx_list_free_cl(struct list_head
*head
,
438 const struct mei_cl
*cl
,
439 const struct file
*fp
)
441 struct mei_cl_cb
*cb
, *next
;
443 list_for_each_entry_safe(cb
, next
, head
, list
) {
444 if (cl
== cb
->cl
&& (!fp
|| fp
== cb
->fp
))
445 mei_tx_cb_dequeue(cb
);
450 * mei_io_list_free_fp - free cb from a list that matches file pointer
453 * @fp: file pointer (matching cb file object), may be NULL
455 static void mei_io_list_free_fp(struct list_head
*head
, const struct file
*fp
)
457 struct mei_cl_cb
*cb
, *next
;
459 list_for_each_entry_safe(cb
, next
, head
, list
)
460 if (!fp
|| fp
== cb
->fp
)
465 * mei_cl_free_pending - free pending cb
469 static void mei_cl_free_pending(struct mei_cl
*cl
)
471 struct mei_cl_cb
*cb
;
473 cb
= list_first_entry_or_null(&cl
->rd_pending
, struct mei_cl_cb
, list
);
478 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
481 * @length: size of the buffer
482 * @fop_type: operation type
483 * @fp: associated file pointer (might be NULL)
485 * Return: cb on success and NULL on failure
487 struct mei_cl_cb
*mei_cl_alloc_cb(struct mei_cl
*cl
, size_t length
,
488 enum mei_cb_file_ops fop_type
,
489 const struct file
*fp
)
491 struct mei_cl_cb
*cb
;
493 cb
= mei_io_cb_init(cl
, fop_type
, fp
);
500 cb
->buf
.data
= kmalloc(roundup(length
, MEI_SLOT_SIZE
), GFP_KERNEL
);
505 cb
->buf
.size
= length
;
511 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
512 * and enqueuing of the control commands cb
515 * @length: size of the buffer
516 * @fop_type: operation type
517 * @fp: associated file pointer (might be NULL)
519 * Return: cb on success and NULL on failure
520 * Locking: called under "dev->device_lock" lock
522 struct mei_cl_cb
*mei_cl_enqueue_ctrl_wr_cb(struct mei_cl
*cl
, size_t length
,
523 enum mei_cb_file_ops fop_type
,
524 const struct file
*fp
)
526 struct mei_cl_cb
*cb
;
528 /* for RX always allocate at least client's mtu */
530 length
= max_t(size_t, length
, mei_cl_mtu(cl
));
532 cb
= mei_cl_alloc_cb(cl
, length
, fop_type
, fp
);
536 list_add_tail(&cb
->list
, &cl
->dev
->ctrl_wr_list
);
541 * mei_cl_read_cb - find this cl's callback in the read list
542 * for a specific file
545 * @fp: file pointer (matching cb file object), may be NULL
547 * Return: cb on success, NULL if cb is not found
549 struct mei_cl_cb
*mei_cl_read_cb(struct mei_cl
*cl
, const struct file
*fp
)
551 struct mei_cl_cb
*cb
;
552 struct mei_cl_cb
*ret_cb
= NULL
;
554 spin_lock(&cl
->rd_completed_lock
);
555 list_for_each_entry(cb
, &cl
->rd_completed
, list
)
556 if (!fp
|| fp
== cb
->fp
) {
560 spin_unlock(&cl
->rd_completed_lock
);
565 * mei_cl_flush_queues - flushes queue lists belonging to cl.
568 * @fp: file pointer (matching cb file object), may be NULL
570 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
572 int mei_cl_flush_queues(struct mei_cl
*cl
, const struct file
*fp
)
574 struct mei_device
*dev
;
576 if (WARN_ON(!cl
|| !cl
->dev
))
581 cl_dbg(dev
, cl
, "remove list entry belonging to cl\n");
582 mei_io_tx_list_free_cl(&cl
->dev
->write_list
, cl
, fp
);
583 mei_io_tx_list_free_cl(&cl
->dev
->write_waiting_list
, cl
, fp
);
584 /* free pending and control cb only in final flush */
586 mei_io_list_flush_cl(&cl
->dev
->ctrl_wr_list
, cl
);
587 mei_io_list_flush_cl(&cl
->dev
->ctrl_rd_list
, cl
);
588 mei_cl_free_pending(cl
);
590 spin_lock(&cl
->rd_completed_lock
);
591 mei_io_list_free_fp(&cl
->rd_completed
, fp
);
592 spin_unlock(&cl
->rd_completed_lock
);
598 * mei_cl_init - initializes cl.
600 * @cl: host client to be initialized
603 static void mei_cl_init(struct mei_cl
*cl
, struct mei_device
*dev
)
605 memset(cl
, 0, sizeof(*cl
));
606 init_waitqueue_head(&cl
->wait
);
607 init_waitqueue_head(&cl
->rx_wait
);
608 init_waitqueue_head(&cl
->tx_wait
);
609 init_waitqueue_head(&cl
->ev_wait
);
610 INIT_LIST_HEAD(&cl
->vtag_map
);
611 spin_lock_init(&cl
->rd_completed_lock
);
612 INIT_LIST_HEAD(&cl
->rd_completed
);
613 INIT_LIST_HEAD(&cl
->rd_pending
);
614 INIT_LIST_HEAD(&cl
->link
);
615 cl
->writing_state
= MEI_IDLE
;
616 cl
->state
= MEI_FILE_UNINITIALIZED
;
621 * mei_cl_allocate - allocates cl structure and sets it up.
624 * Return: The allocated file or NULL on failure
626 struct mei_cl
*mei_cl_allocate(struct mei_device
*dev
)
630 cl
= kmalloc(sizeof(*cl
), GFP_KERNEL
);
634 mei_cl_init(cl
, dev
);
640 * mei_cl_link - allocate host id in the host map
644 * Return: 0 on success
645 * -EINVAL on incorrect values
646 * -EMFILE if open count exceeded.
648 int mei_cl_link(struct mei_cl
*cl
)
650 struct mei_device
*dev
;
653 if (WARN_ON(!cl
|| !cl
->dev
))
658 id
= find_first_zero_bit(dev
->host_clients_map
, MEI_CLIENTS_MAX
);
659 if (id
>= MEI_CLIENTS_MAX
) {
660 dev_err(dev
->dev
, "id exceeded %d", MEI_CLIENTS_MAX
);
664 if (dev
->open_handle_count
>= MEI_MAX_OPEN_HANDLE_COUNT
) {
665 dev_err(dev
->dev
, "open_handle_count exceeded %d",
666 MEI_MAX_OPEN_HANDLE_COUNT
);
670 dev
->open_handle_count
++;
672 cl
->host_client_id
= id
;
673 list_add_tail(&cl
->link
, &dev
->file_list
);
675 set_bit(id
, dev
->host_clients_map
);
677 cl
->state
= MEI_FILE_INITIALIZING
;
679 cl_dbg(dev
, cl
, "link cl\n");
684 * mei_cl_unlink - remove host client from the list
690 int mei_cl_unlink(struct mei_cl
*cl
)
692 struct mei_device
*dev
;
694 /* don't shout on error exit path */
698 if (WARN_ON(!cl
->dev
))
703 cl_dbg(dev
, cl
, "unlink client");
705 if (cl
->state
== MEI_FILE_UNINITIALIZED
)
708 if (dev
->open_handle_count
> 0)
709 dev
->open_handle_count
--;
711 /* never clear the 0 bit */
712 if (cl
->host_client_id
)
713 clear_bit(cl
->host_client_id
, dev
->host_clients_map
);
715 list_del_init(&cl
->link
);
717 cl
->state
= MEI_FILE_UNINITIALIZED
;
718 cl
->writing_state
= MEI_IDLE
;
720 WARN_ON(!list_empty(&cl
->rd_completed
) ||
721 !list_empty(&cl
->rd_pending
) ||
722 !list_empty(&cl
->link
));
727 void mei_host_client_init(struct mei_device
*dev
)
729 mei_set_devstate(dev
, MEI_DEV_ENABLED
);
730 dev
->reset_count
= 0;
732 schedule_work(&dev
->bus_rescan_work
);
734 pm_runtime_mark_last_busy(dev
->dev
);
735 dev_dbg(dev
->dev
, "rpm: autosuspend\n");
736 pm_request_autosuspend(dev
->dev
);
740 * mei_hbuf_acquire - try to acquire host buffer
742 * @dev: the device structure
743 * Return: true if host buffer was acquired
745 bool mei_hbuf_acquire(struct mei_device
*dev
)
747 if (mei_pg_state(dev
) == MEI_PG_ON
||
748 mei_pg_in_transition(dev
)) {
749 dev_dbg(dev
->dev
, "device is in pg\n");
753 if (!dev
->hbuf_is_ready
) {
754 dev_dbg(dev
->dev
, "hbuf is not ready\n");
758 dev
->hbuf_is_ready
= false;
764 * mei_cl_wake_all - wake up readers, writers and event waiters so
765 * they can be interrupted
769 static void mei_cl_wake_all(struct mei_cl
*cl
)
771 struct mei_device
*dev
= cl
->dev
;
773 /* synchronized under device mutex */
774 if (waitqueue_active(&cl
->rx_wait
)) {
775 cl_dbg(dev
, cl
, "Waking up reading client!\n");
776 wake_up_interruptible(&cl
->rx_wait
);
778 /* synchronized under device mutex */
779 if (waitqueue_active(&cl
->tx_wait
)) {
780 cl_dbg(dev
, cl
, "Waking up writing client!\n");
781 wake_up_interruptible(&cl
->tx_wait
);
783 /* synchronized under device mutex */
784 if (waitqueue_active(&cl
->ev_wait
)) {
785 cl_dbg(dev
, cl
, "Waking up waiting for event clients!\n");
786 wake_up_interruptible(&cl
->ev_wait
);
788 /* synchronized under device mutex */
789 if (waitqueue_active(&cl
->wait
)) {
790 cl_dbg(dev
, cl
, "Waking up ctrl write clients!\n");
796 * mei_cl_set_disconnected - set disconnected state and clear
797 * associated states and resources
801 static void mei_cl_set_disconnected(struct mei_cl
*cl
)
803 struct mei_device
*dev
= cl
->dev
;
805 if (cl
->state
== MEI_FILE_DISCONNECTED
||
806 cl
->state
<= MEI_FILE_INITIALIZING
)
809 cl
->state
= MEI_FILE_DISCONNECTED
;
810 mei_io_tx_list_free_cl(&dev
->write_list
, cl
, NULL
);
811 mei_io_tx_list_free_cl(&dev
->write_waiting_list
, cl
, NULL
);
812 mei_io_list_flush_cl(&dev
->ctrl_rd_list
, cl
);
813 mei_io_list_flush_cl(&dev
->ctrl_wr_list
, cl
);
815 cl
->rx_flow_ctrl_creds
= 0;
816 cl
->tx_flow_ctrl_creds
= 0;
822 if (!WARN_ON(cl
->me_cl
->connect_count
== 0))
823 cl
->me_cl
->connect_count
--;
825 if (cl
->me_cl
->connect_count
== 0)
826 cl
->me_cl
->tx_flow_ctrl_creds
= 0;
828 mei_me_cl_put(cl
->me_cl
);
832 static int mei_cl_set_connecting(struct mei_cl
*cl
, struct mei_me_client
*me_cl
)
834 if (!mei_me_cl_get(me_cl
))
837 /* only one connection is allowed for fixed address clients */
838 if (me_cl
->props
.fixed_address
) {
839 if (me_cl
->connect_count
) {
840 mei_me_cl_put(me_cl
);
846 cl
->state
= MEI_FILE_CONNECTING
;
847 cl
->me_cl
->connect_count
++;
853 * mei_cl_send_disconnect - send disconnect request
856 * @cb: callback block
858 * Return: 0, OK; otherwise, error.
860 static int mei_cl_send_disconnect(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
862 struct mei_device
*dev
;
867 ret
= mei_hbm_cl_disconnect_req(dev
, cl
);
870 cl
->state
= MEI_FILE_DISCONNECT_REPLY
;
874 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
);
875 cl
->timer_count
= dev
->timeouts
.connect
;
876 mei_schedule_stall_timer(dev
);
882 * mei_cl_irq_disconnect - processes close related operation from
883 * interrupt thread context - send disconnect request
886 * @cb: callback block.
887 * @cmpl_list: complete list.
889 * Return: 0, OK; otherwise, error.
891 int mei_cl_irq_disconnect(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
892 struct list_head
*cmpl_list
)
894 struct mei_device
*dev
= cl
->dev
;
899 msg_slots
= mei_hbm2slots(sizeof(struct hbm_client_connect_request
));
900 slots
= mei_hbuf_empty_slots(dev
);
904 if ((u32
)slots
< msg_slots
)
907 ret
= mei_cl_send_disconnect(cl
, cb
);
909 list_move_tail(&cb
->list
, cmpl_list
);
915 * __mei_cl_disconnect - disconnect host client from the me one
916 * internal function runtime pm has to be already acquired
920 * Return: 0 on success, <0 on failure.
922 static int __mei_cl_disconnect(struct mei_cl
*cl
)
924 struct mei_device
*dev
;
925 struct mei_cl_cb
*cb
;
930 cl
->state
= MEI_FILE_DISCONNECTING
;
932 cb
= mei_cl_enqueue_ctrl_wr_cb(cl
, 0, MEI_FOP_DISCONNECT
, NULL
);
938 if (mei_hbuf_acquire(dev
)) {
939 rets
= mei_cl_send_disconnect(cl
, cb
);
941 cl_err(dev
, cl
, "failed to disconnect.\n");
946 mutex_unlock(&dev
->device_lock
);
947 wait_event_timeout(cl
->wait
,
948 cl
->state
== MEI_FILE_DISCONNECT_REPLY
||
949 cl
->state
== MEI_FILE_DISCONNECTED
,
950 dev
->timeouts
.cl_connect
);
951 mutex_lock(&dev
->device_lock
);
954 if (cl
->state
!= MEI_FILE_DISCONNECT_REPLY
&&
955 cl
->state
!= MEI_FILE_DISCONNECTED
) {
956 cl_dbg(dev
, cl
, "timeout on disconnect from FW client.\n");
961 /* we disconnect also on error */
962 mei_cl_set_disconnected(cl
);
964 cl_dbg(dev
, cl
, "successfully disconnected from FW client.\n");
971 * mei_cl_disconnect - disconnect host client from the me one
975 * Locking: called under "dev->device_lock" lock
977 * Return: 0 on success, <0 on failure.
979 int mei_cl_disconnect(struct mei_cl
*cl
)
981 struct mei_device
*dev
;
984 if (WARN_ON(!cl
|| !cl
->dev
))
989 cl_dbg(dev
, cl
, "disconnecting");
991 if (!mei_cl_is_connected(cl
))
994 if (mei_cl_is_fixed_address(cl
)) {
995 mei_cl_set_disconnected(cl
);
999 if (dev
->dev_state
== MEI_DEV_POWERING_DOWN
||
1000 dev
->dev_state
== MEI_DEV_POWER_DOWN
) {
1001 cl_dbg(dev
, cl
, "Device is powering down, don't bother with disconnection\n");
1002 mei_cl_set_disconnected(cl
);
1006 rets
= pm_runtime_get(dev
->dev
);
1007 if (rets
< 0 && rets
!= -EINPROGRESS
) {
1008 pm_runtime_put_noidle(dev
->dev
);
1009 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
1013 rets
= __mei_cl_disconnect(cl
);
1015 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
1016 pm_runtime_mark_last_busy(dev
->dev
);
1017 pm_runtime_put_autosuspend(dev
->dev
);
1024 * mei_cl_is_other_connecting - checks if other
1025 * client with the same me client id is connecting
1027 * @cl: private data of the file object
1029 * Return: true if other client is connected, false - otherwise.
1031 static bool mei_cl_is_other_connecting(struct mei_cl
*cl
)
1033 struct mei_device
*dev
;
1034 struct mei_cl_cb
*cb
;
1038 list_for_each_entry(cb
, &dev
->ctrl_rd_list
, list
) {
1039 if (cb
->fop_type
== MEI_FOP_CONNECT
&&
1040 mei_cl_me_id(cl
) == mei_cl_me_id(cb
->cl
))
1048 * mei_cl_send_connect - send connect request
1051 * @cb: callback block
1053 * Return: 0, OK; otherwise, error.
1055 static int mei_cl_send_connect(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
1057 struct mei_device
*dev
;
1062 ret
= mei_hbm_cl_connect_req(dev
, cl
);
1065 cl
->state
= MEI_FILE_DISCONNECT_REPLY
;
1069 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
);
1070 cl
->timer_count
= dev
->timeouts
.connect
;
1071 mei_schedule_stall_timer(dev
);
1076 * mei_cl_irq_connect - send connect request in irq_thread context
1079 * @cb: callback block
1080 * @cmpl_list: complete list
1082 * Return: 0, OK; otherwise, error.
1084 int mei_cl_irq_connect(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
1085 struct list_head
*cmpl_list
)
1087 struct mei_device
*dev
= cl
->dev
;
1092 if (mei_cl_is_other_connecting(cl
))
1095 msg_slots
= mei_hbm2slots(sizeof(struct hbm_client_connect_request
));
1096 slots
= mei_hbuf_empty_slots(dev
);
1100 if ((u32
)slots
< msg_slots
)
1103 rets
= mei_cl_send_connect(cl
, cb
);
1105 list_move_tail(&cb
->list
, cmpl_list
);
1111 * mei_cl_connect - connect host client to the me one
1115 * @fp: pointer to file structure
1117 * Locking: called under "dev->device_lock" lock
1119 * Return: 0 on success, <0 on failure.
1121 int mei_cl_connect(struct mei_cl
*cl
, struct mei_me_client
*me_cl
,
1122 const struct file
*fp
)
1124 struct mei_device
*dev
;
1125 struct mei_cl_cb
*cb
;
1128 if (WARN_ON(!cl
|| !cl
->dev
|| !me_cl
))
1133 rets
= mei_cl_set_connecting(cl
, me_cl
);
1137 if (mei_cl_is_fixed_address(cl
)) {
1138 cl
->state
= MEI_FILE_CONNECTED
;
1143 rets
= pm_runtime_get(dev
->dev
);
1144 if (rets
< 0 && rets
!= -EINPROGRESS
) {
1145 pm_runtime_put_noidle(dev
->dev
);
1146 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
1150 cb
= mei_cl_enqueue_ctrl_wr_cb(cl
, 0, MEI_FOP_CONNECT
, fp
);
1156 /* run hbuf acquire last so we don't have to undo */
1157 if (!mei_cl_is_other_connecting(cl
) && mei_hbuf_acquire(dev
)) {
1158 rets
= mei_cl_send_connect(cl
, cb
);
1163 mutex_unlock(&dev
->device_lock
);
1164 wait_event_timeout(cl
->wait
,
1165 (cl
->state
== MEI_FILE_CONNECTED
||
1166 cl
->state
== MEI_FILE_DISCONNECTED
||
1167 cl
->state
== MEI_FILE_DISCONNECT_REQUIRED
||
1168 cl
->state
== MEI_FILE_DISCONNECT_REPLY
),
1169 dev
->timeouts
.cl_connect
);
1170 mutex_lock(&dev
->device_lock
);
1172 if (!mei_cl_is_connected(cl
)) {
1173 if (cl
->state
== MEI_FILE_DISCONNECT_REQUIRED
) {
1174 mei_io_list_flush_cl(&dev
->ctrl_rd_list
, cl
);
1175 mei_io_list_flush_cl(&dev
->ctrl_wr_list
, cl
);
1176 /* ignore disconnect return valuue;
1177 * in case of failure reset will be invoked
1179 __mei_cl_disconnect(cl
);
1184 /* timeout or something went really wrong */
1186 cl
->status
= -EFAULT
;
1191 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
1192 pm_runtime_mark_last_busy(dev
->dev
);
1193 pm_runtime_put_autosuspend(dev
->dev
);
1198 if (!mei_cl_is_connected(cl
))
1199 mei_cl_set_disconnected(cl
);
1205 * mei_cl_alloc_linked - allocate and link host client
1207 * @dev: the device structure
1209 * Return: cl on success ERR_PTR on failure
1211 struct mei_cl
*mei_cl_alloc_linked(struct mei_device
*dev
)
1216 cl
= mei_cl_allocate(dev
);
1222 ret
= mei_cl_link(cl
);
1229 return ERR_PTR(ret
);
1233 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1237 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1239 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl
*cl
)
1241 if (WARN_ON(!cl
|| !cl
->me_cl
))
1244 if (cl
->tx_flow_ctrl_creds
> 0)
1247 if (mei_cl_is_fixed_address(cl
))
1250 if (mei_cl_is_single_recv_buf(cl
)) {
1251 if (cl
->me_cl
->tx_flow_ctrl_creds
> 0)
1258 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1265 * -EINVAL when ctrl credits are <= 0
1267 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl
*cl
)
1269 if (WARN_ON(!cl
|| !cl
->me_cl
))
1272 if (mei_cl_is_fixed_address(cl
))
1275 if (mei_cl_is_single_recv_buf(cl
)) {
1276 if (WARN_ON(cl
->me_cl
->tx_flow_ctrl_creds
<= 0))
1278 cl
->me_cl
->tx_flow_ctrl_creds
--;
1280 if (WARN_ON(cl
->tx_flow_ctrl_creds
<= 0))
1282 cl
->tx_flow_ctrl_creds
--;
1288 * mei_cl_vtag_alloc - allocate and fill the vtag structure
1290 * @fp: pointer to file structure
1294 * * Pointer to allocated struct - on success
1295 * * ERR_PTR(-ENOMEM) on memory allocation failure
1297 struct mei_cl_vtag
*mei_cl_vtag_alloc(struct file
*fp
, u8 vtag
)
1299 struct mei_cl_vtag
*cl_vtag
;
1301 cl_vtag
= kzalloc(sizeof(*cl_vtag
), GFP_KERNEL
);
1303 return ERR_PTR(-ENOMEM
);
1305 INIT_LIST_HEAD(&cl_vtag
->list
);
1306 cl_vtag
->vtag
= vtag
;
1313 * mei_cl_fp_by_vtag - obtain the file pointer by vtag
1316 * @vtag: virtual tag
1319 * * A file pointer - on success
1320 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
1322 const struct file
*mei_cl_fp_by_vtag(const struct mei_cl
*cl
, u8 vtag
)
1324 struct mei_cl_vtag
*vtag_l
;
1326 list_for_each_entry(vtag_l
, &cl
->vtag_map
, list
)
1327 /* The client on bus has one fixed fp */
1328 if ((cl
->cldev
&& mei_cldev_enabled(cl
->cldev
)) ||
1329 vtag_l
->vtag
== vtag
)
1332 return ERR_PTR(-ENOENT
);
1336 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
1341 static void mei_cl_reset_read_by_vtag(const struct mei_cl
*cl
, u8 vtag
)
1343 struct mei_cl_vtag
*vtag_l
;
1345 list_for_each_entry(vtag_l
, &cl
->vtag_map
, list
) {
1346 /* The client on bus has one fixed vtag map */
1347 if ((cl
->cldev
&& mei_cldev_enabled(cl
->cldev
)) ||
1348 vtag_l
->vtag
== vtag
) {
1349 vtag_l
->pending_read
= false;
1356 * mei_cl_read_vtag_add_fc - add flow control for next pending reader
1361 static void mei_cl_read_vtag_add_fc(struct mei_cl
*cl
)
1363 struct mei_cl_vtag
*cl_vtag
;
1365 list_for_each_entry(cl_vtag
, &cl
->vtag_map
, list
) {
1366 if (cl_vtag
->pending_read
) {
1367 if (mei_cl_enqueue_ctrl_wr_cb(cl
,
1371 cl
->rx_flow_ctrl_creds
++;
1378 * mei_cl_vt_support_check - check if client support vtags
1383 * * 0 - supported, or not connected at all
1384 * * -EOPNOTSUPP - vtags are not supported by client
1386 int mei_cl_vt_support_check(const struct mei_cl
*cl
)
1388 struct mei_device
*dev
= cl
->dev
;
1390 if (!dev
->hbm_f_vt_supported
)
1396 return cl
->me_cl
->props
.vt_supported
? 0 : -EOPNOTSUPP
;
1400 * mei_cl_add_rd_completed - add read completed callback to list with lock
1404 * @cb: callback block
1407 void mei_cl_add_rd_completed(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
1409 const struct file
*fp
;
1411 if (!mei_cl_vt_support_check(cl
)) {
1412 fp
= mei_cl_fp_by_vtag(cl
, cb
->vtag
);
1414 /* client already disconnected, discarding */
1419 mei_cl_reset_read_by_vtag(cl
, cb
->vtag
);
1420 mei_cl_read_vtag_add_fc(cl
);
1423 spin_lock(&cl
->rd_completed_lock
);
1424 list_add_tail(&cb
->list
, &cl
->rd_completed
);
1425 spin_unlock(&cl
->rd_completed_lock
);
1429 * mei_cl_del_rd_completed - free read completed callback with lock
1432 * @cb: callback block
1435 void mei_cl_del_rd_completed(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
1437 spin_lock(&cl
->rd_completed_lock
);
1439 spin_unlock(&cl
->rd_completed_lock
);
1443 * mei_cl_notify_fop2req - convert fop to proper request
1445 * @fop: client notification start response command
1447 * Return: MEI_HBM_NOTIFICATION_START/STOP
1449 u8
mei_cl_notify_fop2req(enum mei_cb_file_ops fop
)
1451 if (fop
== MEI_FOP_NOTIFY_START
)
1452 return MEI_HBM_NOTIFICATION_START
;
1454 return MEI_HBM_NOTIFICATION_STOP
;
1458 * mei_cl_notify_req2fop - convert notification request top file operation type
1460 * @req: hbm notification request type
1462 * Return: MEI_FOP_NOTIFY_START/STOP
1464 enum mei_cb_file_ops
mei_cl_notify_req2fop(u8 req
)
1466 if (req
== MEI_HBM_NOTIFICATION_START
)
1467 return MEI_FOP_NOTIFY_START
;
1469 return MEI_FOP_NOTIFY_STOP
;
1473 * mei_cl_irq_notify - send notification request in irq_thread context
1476 * @cb: callback block.
1477 * @cmpl_list: complete list.
1479 * Return: 0 on such and error otherwise.
1481 int mei_cl_irq_notify(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
1482 struct list_head
*cmpl_list
)
1484 struct mei_device
*dev
= cl
->dev
;
1490 msg_slots
= mei_hbm2slots(sizeof(struct hbm_client_connect_request
));
1491 slots
= mei_hbuf_empty_slots(dev
);
1495 if ((u32
)slots
< msg_slots
)
1498 request
= mei_cl_notify_fop2req(cb
->fop_type
);
1499 ret
= mei_hbm_cl_notify_req(dev
, cl
, request
);
1502 list_move_tail(&cb
->list
, cmpl_list
);
1506 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
);
1511 * mei_cl_notify_request - send notification stop/start request
1514 * @fp: associate request with file
1515 * @request: 1 for start or 0 for stop
1517 * Locking: called under "dev->device_lock" lock
1519 * Return: 0 on such and error otherwise.
1521 int mei_cl_notify_request(struct mei_cl
*cl
,
1522 const struct file
*fp
, u8 request
)
1524 struct mei_device
*dev
;
1525 struct mei_cl_cb
*cb
;
1526 enum mei_cb_file_ops fop_type
;
1529 if (WARN_ON(!cl
|| !cl
->dev
))
1534 if (!dev
->hbm_f_ev_supported
) {
1535 cl_dbg(dev
, cl
, "notifications not supported\n");
1539 if (!mei_cl_is_connected(cl
))
1542 rets
= pm_runtime_get(dev
->dev
);
1543 if (rets
< 0 && rets
!= -EINPROGRESS
) {
1544 pm_runtime_put_noidle(dev
->dev
);
1545 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
1549 fop_type
= mei_cl_notify_req2fop(request
);
1550 cb
= mei_cl_enqueue_ctrl_wr_cb(cl
, 0, fop_type
, fp
);
1556 if (mei_hbuf_acquire(dev
)) {
1557 if (mei_hbm_cl_notify_req(dev
, cl
, request
)) {
1561 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
);
1564 mutex_unlock(&dev
->device_lock
);
1565 wait_event_timeout(cl
->wait
,
1566 cl
->notify_en
== request
||
1568 !mei_cl_is_connected(cl
),
1569 dev
->timeouts
.cl_connect
);
1570 mutex_lock(&dev
->device_lock
);
1572 if (cl
->notify_en
!= request
&& !cl
->status
)
1573 cl
->status
= -EFAULT
;
1578 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
1579 pm_runtime_mark_last_busy(dev
->dev
);
1580 pm_runtime_put_autosuspend(dev
->dev
);
1587 * mei_cl_notify - raise notification
1591 * Locking: called under "dev->device_lock" lock
1593 void mei_cl_notify(struct mei_cl
*cl
)
1595 struct mei_device
*dev
;
1597 if (!cl
|| !cl
->dev
)
1605 cl_dbg(dev
, cl
, "notify event");
1606 cl
->notify_ev
= true;
1607 if (!mei_cl_bus_notify_event(cl
))
1608 wake_up_interruptible(&cl
->ev_wait
);
1611 kill_fasync(&cl
->ev_async
, SIGIO
, POLL_PRI
);
1616 * mei_cl_notify_get - get or wait for notification event
1619 * @block: this request is blocking
1620 * @notify_ev: true if notification event was received
1622 * Locking: called under "dev->device_lock" lock
1624 * Return: 0 on such and error otherwise.
1626 int mei_cl_notify_get(struct mei_cl
*cl
, bool block
, bool *notify_ev
)
1628 struct mei_device
*dev
;
1633 if (WARN_ON(!cl
|| !cl
->dev
))
1638 if (!dev
->hbm_f_ev_supported
) {
1639 cl_dbg(dev
, cl
, "notifications not supported\n");
1643 if (!mei_cl_is_connected(cl
))
1652 mutex_unlock(&dev
->device_lock
);
1653 rets
= wait_event_interruptible(cl
->ev_wait
, cl
->notify_ev
);
1654 mutex_lock(&dev
->device_lock
);
1660 *notify_ev
= cl
->notify_ev
;
1661 cl
->notify_ev
= false;
1666 * mei_cl_read_start - the start read client message function.
1669 * @length: number of bytes to read
1670 * @fp: pointer to file structure
1672 * Return: 0 on success, <0 on failure.
1674 int mei_cl_read_start(struct mei_cl
*cl
, size_t length
, const struct file
*fp
)
1676 struct mei_device
*dev
;
1677 struct mei_cl_cb
*cb
;
1680 if (WARN_ON(!cl
|| !cl
->dev
))
1685 if (!mei_cl_is_connected(cl
))
1688 if (!mei_me_cl_is_active(cl
->me_cl
)) {
1689 cl_err(dev
, cl
, "no such me client\n");
1693 if (mei_cl_is_fixed_address(cl
))
1696 /* HW currently supports only one pending read */
1697 if (cl
->rx_flow_ctrl_creds
) {
1698 mei_cl_set_read_by_fp(cl
, fp
);
1702 cb
= mei_cl_enqueue_ctrl_wr_cb(cl
, length
, MEI_FOP_READ
, fp
);
1706 mei_cl_set_read_by_fp(cl
, fp
);
1708 rets
= pm_runtime_get(dev
->dev
);
1709 if (rets
< 0 && rets
!= -EINPROGRESS
) {
1710 pm_runtime_put_noidle(dev
->dev
);
1711 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
1716 if (mei_hbuf_acquire(dev
)) {
1717 rets
= mei_hbm_cl_flow_control_req(dev
, cl
);
1721 list_move_tail(&cb
->list
, &cl
->rd_pending
);
1723 cl
->rx_flow_ctrl_creds
++;
1726 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
1727 pm_runtime_mark_last_busy(dev
->dev
);
1728 pm_runtime_put_autosuspend(dev
->dev
);
1736 static inline u8
mei_ext_hdr_set_vtag(void *ext
, u8 vtag
)
1738 struct mei_ext_hdr_vtag
*vtag_hdr
= ext
;
1740 vtag_hdr
->hdr
.type
= MEI_EXT_HDR_VTAG
;
1741 vtag_hdr
->hdr
.length
= mei_data2slots(sizeof(*vtag_hdr
));
1742 vtag_hdr
->vtag
= vtag
;
1743 vtag_hdr
->reserved
= 0;
1744 return vtag_hdr
->hdr
.length
;
1747 static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr
*ext
)
1749 return ext
&& ext
->type
== MEI_EXT_HDR_GSC
;
1752 static inline u8
mei_ext_hdr_set_gsc(struct mei_ext_hdr
*ext
, struct mei_ext_hdr
*gsc_hdr
)
1754 memcpy(ext
, gsc_hdr
, mei_ext_hdr_len(gsc_hdr
));
1759 * mei_msg_hdr_init - allocate and initialize mei message header
1761 * @cb: message callback structure
1763 * Return: a pointer to initialized header or ERR_PTR on failure
1765 static struct mei_msg_hdr
*mei_msg_hdr_init(const struct mei_cl_cb
*cb
)
1768 struct mei_ext_meta_hdr
*meta
;
1769 struct mei_msg_hdr
*mei_hdr
;
1770 bool is_ext
, is_hbm
, is_gsc
, is_vtag
;
1771 struct mei_ext_hdr
*next_ext
;
1774 return ERR_PTR(-EINVAL
);
1776 /* Extended header for vtag is attached only on the first fragment */
1777 is_vtag
= (cb
->vtag
&& cb
->buf_idx
== 0);
1778 is_hbm
= cb
->cl
->me_cl
->client_id
== 0;
1779 is_gsc
= ((!is_hbm
) && cb
->cl
->dev
->hbm_f_gsc_supported
&& mei_ext_hdr_is_gsc(cb
->ext_hdr
));
1780 is_ext
= is_vtag
|| is_gsc
;
1782 /* Compute extended header size */
1783 hdr_len
= sizeof(*mei_hdr
);
1788 hdr_len
+= sizeof(*meta
);
1790 hdr_len
+= sizeof(struct mei_ext_hdr_vtag
);
1793 hdr_len
+= mei_ext_hdr_len(cb
->ext_hdr
);
1796 mei_hdr
= kzalloc(hdr_len
, GFP_KERNEL
);
1798 return ERR_PTR(-ENOMEM
);
1800 mei_hdr
->host_addr
= mei_cl_host_addr(cb
->cl
);
1801 mei_hdr
->me_addr
= mei_cl_me_id(cb
->cl
);
1802 mei_hdr
->internal
= cb
->internal
;
1803 mei_hdr
->extended
= is_ext
;
1808 meta
= (struct mei_ext_meta_hdr
*)mei_hdr
->extension
;
1810 next_ext
= (struct mei_ext_hdr
*)meta
->hdrs
;
1813 meta
->size
+= mei_ext_hdr_set_vtag(next_ext
, cb
->vtag
);
1814 next_ext
= mei_ext_next(next_ext
);
1819 meta
->size
+= mei_ext_hdr_set_gsc(next_ext
, cb
->ext_hdr
);
1820 next_ext
= mei_ext_next(next_ext
);
1824 mei_hdr
->length
= hdr_len
- sizeof(*mei_hdr
);
1829 * mei_cl_irq_write - write a message to device
1830 * from the interrupt thread context
1833 * @cb: callback block.
1834 * @cmpl_list: complete list.
1836 * Return: 0, OK; otherwise error.
1838 int mei_cl_irq_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
1839 struct list_head
*cmpl_list
)
1841 struct mei_device
*dev
;
1842 struct mei_msg_data
*buf
;
1843 struct mei_msg_hdr
*mei_hdr
= NULL
;
1845 size_t hbuf_len
, dr_len
;
1853 const void *data
= NULL
;
1855 if (WARN_ON(!cl
|| !cl
->dev
))
1862 first_chunk
= cb
->buf_idx
== 0;
1864 rets
= first_chunk
? mei_cl_tx_flow_ctrl_creds(cl
) : 1;
1869 cl_dbg(dev
, cl
, "No flow control credentials: not sending.\n");
1874 buf_len
= buf
->size
- cb
->buf_idx
;
1875 data
= buf
->data
+ cb
->buf_idx
;
1877 hbuf_slots
= mei_hbuf_empty_slots(dev
);
1878 if (hbuf_slots
< 0) {
1883 hbuf_len
= mei_slots2data(hbuf_slots
) & MEI_MSG_MAX_LEN_MASK
;
1884 dr_slots
= mei_dma_ring_empty_slots(dev
);
1885 dr_len
= mei_slots2data(dr_slots
);
1887 mei_hdr
= mei_msg_hdr_init(cb
);
1888 if (IS_ERR(mei_hdr
)) {
1889 rets
= PTR_ERR(mei_hdr
);
1894 hdr_len
= sizeof(*mei_hdr
) + mei_hdr
->length
;
1897 * Split the message only if we can write the whole host buffer
1898 * otherwise wait for next time the host buffer is empty.
1900 if (hdr_len
+ buf_len
<= hbuf_len
) {
1902 mei_hdr
->msg_complete
= 1;
1903 } else if (dr_slots
&& hbuf_len
>= hdr_len
+ sizeof(dma_len
)) {
1904 mei_hdr
->dma_ring
= 1;
1905 if (buf_len
> dr_len
)
1908 mei_hdr
->msg_complete
= 1;
1910 data_len
= sizeof(dma_len
);
1913 } else if ((u32
)hbuf_slots
== mei_hbuf_depth(dev
)) {
1914 buf_len
= hbuf_len
- hdr_len
;
1920 mei_hdr
->length
+= data_len
;
1922 if (mei_hdr
->dma_ring
&& buf
->data
)
1923 mei_dma_ring_write(dev
, buf
->data
+ cb
->buf_idx
, buf_len
);
1924 rets
= mei_write_message(dev
, mei_hdr
, hdr_len
, data
, data_len
);
1930 cl
->writing_state
= MEI_WRITING
;
1931 cb
->buf_idx
+= buf_len
;
1934 if (mei_cl_tx_flow_ctrl_creds_reduce(cl
)) {
1940 if (mei_hdr
->msg_complete
)
1941 list_move_tail(&cb
->list
, &dev
->write_waiting_list
);
1949 list_move_tail(&cb
->list
, cmpl_list
);
1954 * mei_cl_write - submit a write cb to mei device
1955 * assumes device_lock is locked
1958 * @cb: write callback with filled data
1959 * @timeout: send timeout in milliseconds.
1960 * effective only for blocking writes: the cb->blocking is set.
1961 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
1963 * Return: number of bytes sent on success, <0 on failure.
1965 ssize_t
mei_cl_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
, unsigned long timeout
)
1967 struct mei_device
*dev
;
1968 struct mei_msg_data
*buf
;
1969 struct mei_msg_hdr
*mei_hdr
= NULL
;
1971 size_t hbuf_len
, dr_len
;
1981 if (WARN_ON(!cl
|| !cl
->dev
))
1990 buf_len
= buf
->size
;
1992 cl_dbg(dev
, cl
, "buf_len=%zd\n", buf_len
);
1994 blocking
= cb
->blocking
;
1997 rets
= pm_runtime_get(dev
->dev
);
1998 if (rets
< 0 && rets
!= -EINPROGRESS
) {
1999 pm_runtime_put_noidle(dev
->dev
);
2000 cl_err(dev
, cl
, "rpm: get failed %zd\n", rets
);
2005 cl
->writing_state
= MEI_IDLE
;
2008 rets
= mei_cl_tx_flow_ctrl_creds(cl
);
2012 mei_hdr
= mei_msg_hdr_init(cb
);
2013 if (IS_ERR(mei_hdr
)) {
2014 rets
= PTR_ERR(mei_hdr
);
2019 hdr_len
= sizeof(*mei_hdr
) + mei_hdr
->length
;
2022 cl_dbg(dev
, cl
, "No flow control credentials: not sending.\n");
2027 if (!mei_hbuf_acquire(dev
)) {
2028 cl_dbg(dev
, cl
, "Cannot acquire the host buffer: not sending.\n");
2033 hbuf_slots
= mei_hbuf_empty_slots(dev
);
2034 if (hbuf_slots
< 0) {
2035 buf_len
= -EOVERFLOW
;
2039 hbuf_len
= mei_slots2data(hbuf_slots
) & MEI_MSG_MAX_LEN_MASK
;
2040 dr_slots
= mei_dma_ring_empty_slots(dev
);
2041 dr_len
= mei_slots2data(dr_slots
);
2043 if (hdr_len
+ buf_len
<= hbuf_len
) {
2045 mei_hdr
->msg_complete
= 1;
2046 } else if (dr_slots
&& hbuf_len
>= hdr_len
+ sizeof(dma_len
)) {
2047 mei_hdr
->dma_ring
= 1;
2048 if (buf_len
> dr_len
)
2051 mei_hdr
->msg_complete
= 1;
2053 data_len
= sizeof(dma_len
);
2057 buf_len
= hbuf_len
- hdr_len
;
2061 mei_hdr
->length
+= data_len
;
2063 if (mei_hdr
->dma_ring
&& buf
->data
)
2064 mei_dma_ring_write(dev
, buf
->data
, buf_len
);
2065 rets
= mei_write_message(dev
, mei_hdr
, hdr_len
, data
, data_len
);
2070 rets
= mei_cl_tx_flow_ctrl_creds_reduce(cl
);
2074 cl
->writing_state
= MEI_WRITING
;
2075 cb
->buf_idx
= buf_len
;
2076 /* restore return value */
2077 buf_len
= buf
->size
;
2080 if (mei_hdr
->msg_complete
)
2081 mei_tx_cb_enqueue(cb
, &dev
->write_waiting_list
);
2083 mei_tx_cb_enqueue(cb
, &dev
->write_list
);
2086 if (blocking
&& cl
->writing_state
!= MEI_WRITE_COMPLETE
) {
2088 mutex_unlock(&dev
->device_lock
);
2089 rets
= wait_event_interruptible_timeout(cl
->tx_wait
,
2090 cl
->writing_state
== MEI_WRITE_COMPLETE
||
2091 (!mei_cl_is_connected(cl
)),
2092 msecs_to_jiffies(timeout
));
2093 mutex_lock(&dev
->device_lock
);
2094 /* clean all queue on timeout as something fatal happened */
2097 mei_io_tx_list_free_cl(&dev
->write_list
, cl
, NULL
);
2098 mei_io_tx_list_free_cl(&dev
->write_waiting_list
, cl
, NULL
);
2100 /* wait_event_interruptible returns -ERESTARTSYS */
2104 if (signal_pending(current
))
2108 if (cl
->writing_state
!= MEI_WRITE_COMPLETE
) {
2116 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
2117 pm_runtime_mark_last_busy(dev
->dev
);
2118 pm_runtime_put_autosuspend(dev
->dev
);
2128 * mei_cl_complete - processes completed operation for a client
2130 * @cl: private data of the file object.
2131 * @cb: callback block.
2133 void mei_cl_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
2135 struct mei_device
*dev
= cl
->dev
;
2137 switch (cb
->fop_type
) {
2139 mei_tx_cb_dequeue(cb
);
2140 cl
->writing_state
= MEI_WRITE_COMPLETE
;
2141 if (waitqueue_active(&cl
->tx_wait
)) {
2142 wake_up_interruptible(&cl
->tx_wait
);
2144 pm_runtime_mark_last_busy(dev
->dev
);
2145 pm_request_autosuspend(dev
->dev
);
2150 mei_cl_add_rd_completed(cl
, cb
);
2151 if (!mei_cl_is_fixed_address(cl
) &&
2152 !WARN_ON(!cl
->rx_flow_ctrl_creds
))
2153 cl
->rx_flow_ctrl_creds
--;
2154 if (!mei_cl_bus_rx_event(cl
))
2155 wake_up_interruptible(&cl
->rx_wait
);
2158 case MEI_FOP_CONNECT
:
2159 case MEI_FOP_DISCONNECT
:
2160 case MEI_FOP_NOTIFY_STOP
:
2161 case MEI_FOP_NOTIFY_START
:
2162 case MEI_FOP_DMA_MAP
:
2163 case MEI_FOP_DMA_UNMAP
:
2164 if (waitqueue_active(&cl
->wait
))
2168 case MEI_FOP_DISCONNECT_RSP
:
2170 mei_cl_set_disconnected(cl
);
2179 * mei_cl_all_disconnect - disconnect forcefully all connected clients
2183 void mei_cl_all_disconnect(struct mei_device
*dev
)
2187 list_for_each_entry(cl
, &dev
->file_list
, link
)
2188 mei_cl_set_disconnected(cl
);
2190 EXPORT_SYMBOL_GPL(mei_cl_all_disconnect
);
2192 static struct mei_cl
*mei_cl_dma_map_find(struct mei_device
*dev
, u8 buffer_id
)
2196 list_for_each_entry(cl
, &dev
->file_list
, link
)
2197 if (cl
->dma
.buffer_id
== buffer_id
)
2203 * mei_cl_irq_dma_map - send client dma map request in irq_thread context
2206 * @cb: callback block.
2207 * @cmpl_list: complete list.
2209 * Return: 0 on such and error otherwise.
2211 int mei_cl_irq_dma_map(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
2212 struct list_head
*cmpl_list
)
2214 struct mei_device
*dev
= cl
->dev
;
2219 msg_slots
= mei_hbm2slots(sizeof(struct hbm_client_dma_map_request
));
2220 slots
= mei_hbuf_empty_slots(dev
);
2224 if ((u32
)slots
< msg_slots
)
2227 ret
= mei_hbm_cl_dma_map_req(dev
, cl
);
2230 list_move_tail(&cb
->list
, cmpl_list
);
2234 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
);
2239 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
2242 * @cb: callback block.
2243 * @cmpl_list: complete list.
2245 * Return: 0 on such and error otherwise.
2247 int mei_cl_irq_dma_unmap(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
2248 struct list_head
*cmpl_list
)
2250 struct mei_device
*dev
= cl
->dev
;
2255 msg_slots
= mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request
));
2256 slots
= mei_hbuf_empty_slots(dev
);
2260 if ((u32
)slots
< msg_slots
)
2263 ret
= mei_hbm_cl_dma_unmap_req(dev
, cl
);
2266 list_move_tail(&cb
->list
, cmpl_list
);
2270 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
);
2274 static int mei_cl_dma_alloc(struct mei_cl
*cl
, u8 buf_id
, size_t size
)
2276 cl
->dma
.vaddr
= dmam_alloc_coherent(cl
->dev
->dev
, size
,
2277 &cl
->dma
.daddr
, GFP_KERNEL
);
2281 cl
->dma
.buffer_id
= buf_id
;
2282 cl
->dma
.size
= size
;
2287 static void mei_cl_dma_free(struct mei_cl
*cl
)
2289 cl
->dma
.buffer_id
= 0;
2290 dmam_free_coherent(cl
->dev
->dev
,
2291 cl
->dma
.size
, cl
->dma
.vaddr
, cl
->dma
.daddr
);
2293 cl
->dma
.vaddr
= NULL
;
2298 * mei_cl_dma_alloc_and_map - send client dma map request
2301 * @fp: pointer to file structure
2302 * @buffer_id: id of the mapped buffer
2303 * @size: size of the buffer
2305 * Locking: called under "dev->device_lock" lock
2314 int mei_cl_dma_alloc_and_map(struct mei_cl
*cl
, const struct file
*fp
,
2315 u8 buffer_id
, size_t size
)
2317 struct mei_device
*dev
;
2318 struct mei_cl_cb
*cb
;
2321 if (WARN_ON(!cl
|| !cl
->dev
))
2326 if (!dev
->hbm_f_cd_supported
) {
2327 cl_dbg(dev
, cl
, "client dma is not supported\n");
2334 if (mei_cl_is_connected(cl
))
2340 if (mei_cl_dma_map_find(dev
, buffer_id
)) {
2341 cl_dbg(dev
, cl
, "client dma with id %d is already allocated\n",
2346 rets
= pm_runtime_get(dev
->dev
);
2347 if (rets
< 0 && rets
!= -EINPROGRESS
) {
2348 pm_runtime_put_noidle(dev
->dev
);
2349 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
2353 rets
= mei_cl_dma_alloc(cl
, buffer_id
, size
);
2355 pm_runtime_put_noidle(dev
->dev
);
2359 cb
= mei_cl_enqueue_ctrl_wr_cb(cl
, 0, MEI_FOP_DMA_MAP
, fp
);
2365 if (mei_hbuf_acquire(dev
)) {
2366 if (mei_hbm_cl_dma_map_req(dev
, cl
)) {
2370 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
);
2375 mutex_unlock(&dev
->device_lock
);
2376 wait_event_timeout(cl
->wait
,
2377 cl
->dma_mapped
|| cl
->status
,
2378 dev
->timeouts
.cl_connect
);
2379 mutex_lock(&dev
->device_lock
);
2381 if (!cl
->dma_mapped
&& !cl
->status
)
2382 cl
->status
= -EFAULT
;
2388 mei_cl_dma_free(cl
);
2390 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
2391 pm_runtime_mark_last_busy(dev
->dev
);
2392 pm_runtime_put_autosuspend(dev
->dev
);
2399 * mei_cl_dma_unmap - send client dma unmap request
2402 * @fp: pointer to file structure
2404 * Locking: called under "dev->device_lock" lock
2406 * Return: 0 on such and error otherwise.
2408 int mei_cl_dma_unmap(struct mei_cl
*cl
, const struct file
*fp
)
2410 struct mei_device
*dev
;
2411 struct mei_cl_cb
*cb
;
2414 if (WARN_ON(!cl
|| !cl
->dev
))
2419 if (!dev
->hbm_f_cd_supported
) {
2420 cl_dbg(dev
, cl
, "client dma is not supported\n");
2424 /* do not allow unmap for connected client */
2425 if (mei_cl_is_connected(cl
))
2428 if (!cl
->dma_mapped
)
2431 rets
= pm_runtime_get(dev
->dev
);
2432 if (rets
< 0 && rets
!= -EINPROGRESS
) {
2433 pm_runtime_put_noidle(dev
->dev
);
2434 cl_err(dev
, cl
, "rpm: get failed %d\n", rets
);
2438 cb
= mei_cl_enqueue_ctrl_wr_cb(cl
, 0, MEI_FOP_DMA_UNMAP
, fp
);
2444 if (mei_hbuf_acquire(dev
)) {
2445 if (mei_hbm_cl_dma_unmap_req(dev
, cl
)) {
2449 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
);
2454 mutex_unlock(&dev
->device_lock
);
2455 wait_event_timeout(cl
->wait
,
2456 !cl
->dma_mapped
|| cl
->status
,
2457 dev
->timeouts
.cl_connect
);
2458 mutex_lock(&dev
->device_lock
);
2460 if (cl
->dma_mapped
&& !cl
->status
)
2461 cl
->status
= -EFAULT
;
2466 mei_cl_dma_free(cl
);
2468 cl_dbg(dev
, cl
, "rpm: autosuspend\n");
2469 pm_runtime_mark_last_busy(dev
->dev
);
2470 pm_runtime_put_autosuspend(dev
->dev
);