1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2003-2016, Intel Corporation.
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
16 int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl
*cl
)
18 unsigned long tx_free_flags
;
21 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, tx_free_flags
);
22 size
= cl
->tx_ring_free_size
* cl
->device
->fw_client
->props
.max_msg_length
;
23 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, tx_free_flags
);
27 EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size
);
29 int ishtp_cl_get_tx_free_rings(struct ishtp_cl
*cl
)
31 return cl
->tx_ring_free_size
;
33 EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings
);
36 * ishtp_read_list_flush() - Flush read queue
37 * @cl: ishtp client instance
39 * Used to remove all entries from read queue for a client
41 static void ishtp_read_list_flush(struct ishtp_cl
*cl
)
43 struct ishtp_cl_rb
*rb
;
44 struct ishtp_cl_rb
*next
;
47 spin_lock_irqsave(&cl
->dev
->read_list_spinlock
, flags
);
48 list_for_each_entry_safe(rb
, next
, &cl
->dev
->read_list
.list
, list
)
49 if (rb
->cl
&& ishtp_cl_cmp_id(cl
, rb
->cl
)) {
53 spin_unlock_irqrestore(&cl
->dev
->read_list_spinlock
, flags
);
57 * ishtp_cl_flush_queues() - Flush all queues for a client
58 * @cl: ishtp client instance
60 * Used to remove all queues for a client. This is called when a client device
61 * needs reset due to error, S3 resume or during module removal
63 * Return: 0 on success else -EINVAL if device is NULL
65 int ishtp_cl_flush_queues(struct ishtp_cl
*cl
)
67 if (WARN_ON(!cl
|| !cl
->dev
))
70 ishtp_read_list_flush(cl
);
74 EXPORT_SYMBOL(ishtp_cl_flush_queues
);
77 * ishtp_cl_init() - Initialize all fields of a client device
78 * @cl: ishtp client instance
81 * Initializes a client device fields: Init spinlocks, init queues etc.
82 * This function is called during new client creation
84 static void ishtp_cl_init(struct ishtp_cl
*cl
, struct ishtp_device
*dev
)
86 memset(cl
, 0, sizeof(struct ishtp_cl
));
87 init_waitqueue_head(&cl
->wait_ctrl_res
);
88 spin_lock_init(&cl
->free_list_spinlock
);
89 spin_lock_init(&cl
->in_process_spinlock
);
90 spin_lock_init(&cl
->tx_list_spinlock
);
91 spin_lock_init(&cl
->tx_free_list_spinlock
);
92 spin_lock_init(&cl
->fc_spinlock
);
93 INIT_LIST_HEAD(&cl
->link
);
96 INIT_LIST_HEAD(&cl
->free_rb_list
.list
);
97 INIT_LIST_HEAD(&cl
->tx_list
.list
);
98 INIT_LIST_HEAD(&cl
->tx_free_list
.list
);
99 INIT_LIST_HEAD(&cl
->in_process_list
.list
);
101 cl
->rx_ring_size
= CL_DEF_RX_RING_SIZE
;
102 cl
->tx_ring_size
= CL_DEF_TX_RING_SIZE
;
103 cl
->tx_ring_free_size
= cl
->tx_ring_size
;
106 cl
->last_tx_path
= CL_TX_PATH_IPC
;
107 cl
->last_dma_acked
= 1;
108 cl
->last_dma_addr
= NULL
;
109 cl
->last_ipc_acked
= 1;
113 * ishtp_cl_allocate() - allocates client structure and sets it up.
116 * Allocate memory for new client device and call to initialize each field.
118 * Return: The allocated client instance or NULL on failure
120 struct ishtp_cl
*ishtp_cl_allocate(struct ishtp_cl_device
*cl_device
)
124 cl
= kmalloc(sizeof(struct ishtp_cl
), GFP_KERNEL
);
128 ishtp_cl_init(cl
, cl_device
->ishtp_dev
);
131 EXPORT_SYMBOL(ishtp_cl_allocate
);
134 * ishtp_cl_free() - Frees a client device
135 * @cl: client device instance
137 * Frees a client device
139 void ishtp_cl_free(struct ishtp_cl
*cl
)
141 struct ishtp_device
*dev
;
151 spin_lock_irqsave(&dev
->cl_list_lock
, flags
);
152 ishtp_cl_free_rx_ring(cl
);
153 ishtp_cl_free_tx_ring(cl
);
155 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags
);
157 EXPORT_SYMBOL(ishtp_cl_free
);
160 * ishtp_cl_link() - Reserve a host id and link the client instance
161 * @cl: client device instance
163 * This allocates a single bit in the hostmap. This function will make sure
164 * that not many client sessions are opened at the same time. Once allocated
165 * the client device instance is added to the ishtp device in the current
168 * Return: 0 or error code on failure
170 int ishtp_cl_link(struct ishtp_cl
*cl
)
172 struct ishtp_device
*dev
;
173 unsigned long flags
, flags_cl
;
176 if (WARN_ON(!cl
|| !cl
->dev
))
181 spin_lock_irqsave(&dev
->device_lock
, flags
);
183 if (dev
->open_handle_count
>= ISHTP_MAX_OPEN_HANDLE_COUNT
) {
188 id
= find_first_zero_bit(dev
->host_clients_map
, ISHTP_CLIENTS_MAX
);
190 if (id
>= ISHTP_CLIENTS_MAX
) {
191 spin_unlock_irqrestore(&dev
->device_lock
, flags
);
192 dev_err(&cl
->device
->dev
, "id exceeded %d", ISHTP_CLIENTS_MAX
);
196 dev
->open_handle_count
++;
197 cl
->host_client_id
= id
;
198 spin_lock_irqsave(&dev
->cl_list_lock
, flags_cl
);
199 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
) {
203 list_add_tail(&cl
->link
, &dev
->cl_list
);
204 set_bit(id
, dev
->host_clients_map
);
205 cl
->state
= ISHTP_CL_INITIALIZING
;
208 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags_cl
);
210 spin_unlock_irqrestore(&dev
->device_lock
, flags
);
213 EXPORT_SYMBOL(ishtp_cl_link
);
216 * ishtp_cl_unlink() - remove fw_cl from the client device list
217 * @cl: client device instance
219 * Remove a previously linked device to a ishtp device
221 void ishtp_cl_unlink(struct ishtp_cl
*cl
)
223 struct ishtp_device
*dev
;
224 struct ishtp_cl
*pos
;
227 /* don't shout on error exit path */
233 spin_lock_irqsave(&dev
->device_lock
, flags
);
234 if (dev
->open_handle_count
> 0) {
235 clear_bit(cl
->host_client_id
, dev
->host_clients_map
);
236 dev
->open_handle_count
--;
238 spin_unlock_irqrestore(&dev
->device_lock
, flags
);
241 * This checks that 'cl' is actually linked into device's structure,
242 * before attempting 'list_del'
244 spin_lock_irqsave(&dev
->cl_list_lock
, flags
);
245 list_for_each_entry(pos
, &dev
->cl_list
, link
)
246 if (cl
->host_client_id
== pos
->host_client_id
) {
247 list_del_init(&pos
->link
);
250 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags
);
252 EXPORT_SYMBOL(ishtp_cl_unlink
);
255 * ishtp_cl_disconnect() - Send disconnect request to firmware
256 * @cl: client device instance
258 * Send a disconnect request for a client to firmware.
260 * Return: 0 if successful disconnect response from the firmware or error
263 int ishtp_cl_disconnect(struct ishtp_cl
*cl
)
265 struct ishtp_device
*dev
;
268 if (WARN_ON(!cl
|| !cl
->dev
))
273 dev
->print_log(dev
, "%s() state %d\n", __func__
, cl
->state
);
275 if (cl
->state
!= ISHTP_CL_DISCONNECTING
) {
276 dev
->print_log(dev
, "%s() Disconnect in progress\n", __func__
);
280 if (ishtp_hbm_cl_disconnect_req(dev
, cl
)) {
281 dev
->print_log(dev
, "%s() Failed to disconnect\n", __func__
);
282 dev_err(&cl
->device
->dev
, "failed to disconnect.\n");
286 err
= wait_event_interruptible_timeout(cl
->wait_ctrl_res
,
287 (dev
->dev_state
!= ISHTP_DEV_ENABLED
||
288 cl
->state
== ISHTP_CL_DISCONNECTED
),
289 ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT
));
292 * If FW reset arrived, this will happen. Don't check cl->,
293 * as 'cl' may be freed already
295 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
) {
296 dev
->print_log(dev
, "%s() dev_state != ISHTP_DEV_ENABLED\n",
301 if (cl
->state
== ISHTP_CL_DISCONNECTED
) {
302 dev
->print_log(dev
, "%s() successful\n", __func__
);
308 EXPORT_SYMBOL(ishtp_cl_disconnect
);
311 * ishtp_cl_is_other_connecting() - Check other client is connecting
312 * @cl: client device instance
314 * Checks if other client with the same fw client id is connecting
316 * Return: true if other client is connected else false
318 static bool ishtp_cl_is_other_connecting(struct ishtp_cl
*cl
)
320 struct ishtp_device
*dev
;
321 struct ishtp_cl
*pos
;
324 if (WARN_ON(!cl
|| !cl
->dev
))
328 spin_lock_irqsave(&dev
->cl_list_lock
, flags
);
329 list_for_each_entry(pos
, &dev
->cl_list
, link
) {
330 if ((pos
->state
== ISHTP_CL_CONNECTING
) && (pos
!= cl
) &&
331 cl
->fw_client_id
== pos
->fw_client_id
) {
332 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags
);
336 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags
);
342 * ishtp_cl_connect() - Send connect request to firmware
343 * @cl: client device instance
345 * Send a connect request for a client to firmware. If successful it will
346 * RX and TX ring buffers
348 * Return: 0 if successful connect response from the firmware and able
349 * to bind and allocate ring buffers or error code on failure
351 int ishtp_cl_connect(struct ishtp_cl
*cl
)
353 struct ishtp_device
*dev
;
356 if (WARN_ON(!cl
|| !cl
->dev
))
361 dev
->print_log(dev
, "%s() current_state = %d\n", __func__
, cl
->state
);
363 if (ishtp_cl_is_other_connecting(cl
)) {
364 dev
->print_log(dev
, "%s() Busy\n", __func__
);
368 if (ishtp_hbm_cl_connect_req(dev
, cl
)) {
369 dev
->print_log(dev
, "%s() HBM connect req fail\n", __func__
);
373 rets
= wait_event_interruptible_timeout(cl
->wait_ctrl_res
,
374 (dev
->dev_state
== ISHTP_DEV_ENABLED
&&
375 (cl
->state
== ISHTP_CL_CONNECTED
||
376 cl
->state
== ISHTP_CL_DISCONNECTED
)),
377 ishtp_secs_to_jiffies(
378 ISHTP_CL_CONNECT_TIMEOUT
));
380 * If FW reset arrived, this will happen. Don't check cl->,
381 * as 'cl' may be freed already
383 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
) {
384 dev
->print_log(dev
, "%s() dev_state != ISHTP_DEV_ENABLED\n",
389 if (cl
->state
!= ISHTP_CL_CONNECTED
) {
390 dev
->print_log(dev
, "%s() state != ISHTP_CL_CONNECTED\n",
397 dev
->print_log(dev
, "%s() Invalid status\n", __func__
);
401 rets
= ishtp_cl_device_bind(cl
);
403 dev
->print_log(dev
, "%s() Bind error\n", __func__
);
404 ishtp_cl_disconnect(cl
);
408 rets
= ishtp_cl_alloc_rx_ring(cl
);
410 dev
->print_log(dev
, "%s() Alloc RX ring failed\n", __func__
);
411 /* if failed allocation, disconnect */
412 ishtp_cl_disconnect(cl
);
416 rets
= ishtp_cl_alloc_tx_ring(cl
);
418 dev
->print_log(dev
, "%s() Alloc TX ring failed\n", __func__
);
419 /* if failed allocation, disconnect */
420 ishtp_cl_free_rx_ring(cl
);
421 ishtp_cl_disconnect(cl
);
425 /* Upon successful connection and allocation, emit flow-control */
426 rets
= ishtp_cl_read_start(cl
);
428 dev
->print_log(dev
, "%s() successful\n", __func__
);
432 EXPORT_SYMBOL(ishtp_cl_connect
);
435 * ishtp_cl_read_start() - Prepare to read client message
436 * @cl: client device instance
438 * Get a free buffer from pool of free read buffers and add to read buffer
439 * pool to add contents. Send a flow control request to firmware to be able
442 * Return: 0 if successful or error code on failure
444 int ishtp_cl_read_start(struct ishtp_cl
*cl
)
446 struct ishtp_device
*dev
;
447 struct ishtp_cl_rb
*rb
;
451 unsigned long dev_flags
;
453 if (WARN_ON(!cl
|| !cl
->dev
))
458 if (cl
->state
!= ISHTP_CL_CONNECTED
)
461 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
)
464 i
= ishtp_fw_cl_by_id(dev
, cl
->fw_client_id
);
466 dev_err(&cl
->device
->dev
, "no such fw client %d\n",
471 /* The current rb is the head of the free rb list */
472 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
473 if (list_empty(&cl
->free_rb_list
.list
)) {
474 dev_warn(&cl
->device
->dev
,
475 "[ishtp-ish] Rx buffers pool is empty\n");
478 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
481 rb
= list_entry(cl
->free_rb_list
.list
.next
, struct ishtp_cl_rb
, list
);
482 list_del_init(&rb
->list
);
483 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
488 INIT_LIST_HEAD(&rb
->list
);
492 * This must be BEFORE sending flow control -
493 * response in ISR may come too fast...
495 spin_lock_irqsave(&dev
->read_list_spinlock
, dev_flags
);
496 list_add_tail(&rb
->list
, &dev
->read_list
.list
);
497 spin_unlock_irqrestore(&dev
->read_list_spinlock
, dev_flags
);
498 if (ishtp_hbm_cl_flow_control_req(dev
, cl
)) {
503 /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
505 spin_lock_irqsave(&dev
->read_list_spinlock
, dev_flags
);
507 spin_unlock_irqrestore(&dev
->read_list_spinlock
, dev_flags
);
509 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
510 list_add_tail(&rb
->list
, &cl
->free_rb_list
.list
);
511 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
517 * ishtp_cl_send() - Send a message to firmware
518 * @cl: client device instance
519 * @buf: message buffer
520 * @length: length of message
522 * If the client is correct state to send message, this function gets a buffer
523 * from tx ring buffers, copy the message data and call to send the message
524 * using ishtp_cl_send_msg()
526 * Return: 0 if successful or error code on failure
528 int ishtp_cl_send(struct ishtp_cl
*cl
, uint8_t *buf
, size_t length
)
530 struct ishtp_device
*dev
;
532 struct ishtp_cl_tx_ring
*cl_msg
;
533 int have_msg_to_send
= 0;
534 unsigned long tx_flags
, tx_free_flags
;
536 if (WARN_ON(!cl
|| !cl
->dev
))
541 if (cl
->state
!= ISHTP_CL_CONNECTED
) {
546 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
) {
551 /* Check if we have fw client device */
552 id
= ishtp_fw_cl_by_id(dev
, cl
->fw_client_id
);
558 if (length
> dev
->fw_clients
[id
].props
.max_msg_length
) {
564 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, tx_free_flags
);
565 if (list_empty(&cl
->tx_free_list
.list
)) {
566 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
,
572 cl_msg
= list_first_entry(&cl
->tx_free_list
.list
,
573 struct ishtp_cl_tx_ring
, list
);
574 if (!cl_msg
->send_buf
.data
) {
575 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
,
578 /* Should not happen, as free list is pre-allocated */
581 * This is safe, as 'length' is already checked for not exceeding
582 * max ISHTP message size per client
584 list_del_init(&cl_msg
->list
);
585 --cl
->tx_ring_free_size
;
587 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, tx_free_flags
);
588 memcpy(cl_msg
->send_buf
.data
, buf
, length
);
589 cl_msg
->send_buf
.size
= length
;
590 spin_lock_irqsave(&cl
->tx_list_spinlock
, tx_flags
);
591 have_msg_to_send
= !list_empty(&cl
->tx_list
.list
);
592 list_add_tail(&cl_msg
->list
, &cl
->tx_list
.list
);
593 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
595 if (!have_msg_to_send
&& cl
->ishtp_flow_ctrl_creds
> 0)
596 ishtp_cl_send_msg(dev
, cl
);
600 EXPORT_SYMBOL(ishtp_cl_send
);
603 * ishtp_cl_read_complete() - read complete
604 * @rb: Pointer to client request block
606 * If the message is completely received call ishtp_cl_bus_rx_event()
609 static void ishtp_cl_read_complete(struct ishtp_cl_rb
*rb
)
612 int schedule_work_flag
= 0;
613 struct ishtp_cl
*cl
= rb
->cl
;
615 spin_lock_irqsave(&cl
->in_process_spinlock
, flags
);
617 * if in-process list is empty, then need to schedule
618 * the processing thread
620 schedule_work_flag
= list_empty(&cl
->in_process_list
.list
);
621 list_add_tail(&rb
->list
, &cl
->in_process_list
.list
);
622 spin_unlock_irqrestore(&cl
->in_process_spinlock
, flags
);
624 if (schedule_work_flag
)
625 ishtp_cl_bus_rx_event(cl
->device
);
629 * ipc_tx_callback() - IPC tx callback function
630 * @prm: Pointer to client device instance
632 * Send message over IPC either first time or on callback on previous message
635 static void ipc_tx_callback(void *prm
)
637 struct ishtp_cl
*cl
= prm
;
638 struct ishtp_cl_tx_ring
*cl_msg
;
640 struct ishtp_device
*dev
= (cl
? cl
->dev
: NULL
);
641 struct ishtp_msg_hdr ishtp_hdr
;
642 unsigned long tx_flags
, tx_free_flags
;
649 * Other conditions if some critical error has
650 * occurred before this callback is called
652 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
)
655 if (cl
->state
!= ISHTP_CL_CONNECTED
)
658 spin_lock_irqsave(&cl
->tx_list_spinlock
, tx_flags
);
659 if (list_empty(&cl
->tx_list
.list
)) {
660 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
664 if (cl
->ishtp_flow_ctrl_creds
!= 1 && !cl
->sending
) {
665 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
670 --cl
->ishtp_flow_ctrl_creds
;
671 cl
->last_ipc_acked
= 0;
672 cl
->last_tx_path
= CL_TX_PATH_IPC
;
676 cl_msg
= list_entry(cl
->tx_list
.list
.next
, struct ishtp_cl_tx_ring
,
678 rem
= cl_msg
->send_buf
.size
- cl
->tx_offs
;
680 ishtp_hdr
.host_addr
= cl
->host_client_id
;
681 ishtp_hdr
.fw_addr
= cl
->fw_client_id
;
682 ishtp_hdr
.reserved
= 0;
683 pmsg
= cl_msg
->send_buf
.data
+ cl
->tx_offs
;
685 if (rem
<= dev
->mtu
) {
686 ishtp_hdr
.length
= rem
;
687 ishtp_hdr
.msg_complete
= 1;
689 list_del_init(&cl_msg
->list
); /* Must be before write */
690 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
691 /* Submit to IPC queue with no callback */
692 ishtp_write_message(dev
, &ishtp_hdr
, pmsg
);
693 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, tx_free_flags
);
694 list_add_tail(&cl_msg
->list
, &cl
->tx_free_list
.list
);
695 ++cl
->tx_ring_free_size
;
696 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
,
699 /* Send IPC fragment */
700 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
701 cl
->tx_offs
+= dev
->mtu
;
702 ishtp_hdr
.length
= dev
->mtu
;
703 ishtp_hdr
.msg_complete
= 0;
704 ishtp_send_msg(dev
, &ishtp_hdr
, pmsg
, ipc_tx_callback
, cl
);
709 * ishtp_cl_send_msg_ipc() -Send message using IPC
710 * @dev: ISHTP device instance
711 * @cl: Pointer to client device instance
713 * Send message over IPC not using DMA
715 static void ishtp_cl_send_msg_ipc(struct ishtp_device
*dev
,
718 /* If last DMA message wasn't acked yet, leave this one in Tx queue */
719 if (cl
->last_tx_path
== CL_TX_PATH_DMA
&& cl
->last_dma_acked
== 0)
724 ++cl
->send_msg_cnt_ipc
;
728 * ishtp_cl_send_msg_dma() -Send message using DMA
729 * @dev: ISHTP device instance
730 * @cl: Pointer to client device instance
732 * Send message using DMA
734 static void ishtp_cl_send_msg_dma(struct ishtp_device
*dev
,
737 struct ishtp_msg_hdr hdr
;
738 struct dma_xfer_hbm dma_xfer
;
739 unsigned char *msg_addr
;
741 struct ishtp_cl_tx_ring
*cl_msg
;
742 unsigned long tx_flags
, tx_free_flags
;
744 /* If last IPC message wasn't acked yet, leave this one in Tx queue */
745 if (cl
->last_tx_path
== CL_TX_PATH_IPC
&& cl
->last_ipc_acked
== 0)
748 spin_lock_irqsave(&cl
->tx_list_spinlock
, tx_flags
);
749 if (list_empty(&cl
->tx_list
.list
)) {
750 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
754 cl_msg
= list_entry(cl
->tx_list
.list
.next
, struct ishtp_cl_tx_ring
,
757 msg_addr
= ishtp_cl_get_dma_send_buf(dev
, cl_msg
->send_buf
.size
);
759 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
760 if (dev
->transfer_path
== CL_TX_PATH_DEFAULT
)
761 ishtp_cl_send_msg_ipc(dev
, cl
);
765 list_del_init(&cl_msg
->list
); /* Must be before write */
766 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
768 --cl
->ishtp_flow_ctrl_creds
;
769 cl
->last_dma_acked
= 0;
770 cl
->last_dma_addr
= msg_addr
;
771 cl
->last_tx_path
= CL_TX_PATH_DMA
;
773 /* write msg to dma buf */
774 memcpy(msg_addr
, cl_msg
->send_buf
.data
, cl_msg
->send_buf
.size
);
776 /* send dma_xfer hbm msg */
777 off
= msg_addr
- (unsigned char *)dev
->ishtp_host_dma_tx_buf
;
778 ishtp_hbm_hdr(&hdr
, sizeof(struct dma_xfer_hbm
));
779 dma_xfer
.hbm
= DMA_XFER
;
780 dma_xfer
.fw_client_id
= cl
->fw_client_id
;
781 dma_xfer
.host_client_id
= cl
->host_client_id
;
782 dma_xfer
.reserved
= 0;
783 dma_xfer
.msg_addr
= dev
->ishtp_host_dma_tx_buf_phys
+ off
;
784 dma_xfer
.msg_length
= cl_msg
->send_buf
.size
;
785 dma_xfer
.reserved2
= 0;
786 ishtp_write_message(dev
, &hdr
, (unsigned char *)&dma_xfer
);
787 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, tx_free_flags
);
788 list_add_tail(&cl_msg
->list
, &cl
->tx_free_list
.list
);
789 ++cl
->tx_ring_free_size
;
790 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, tx_free_flags
);
791 ++cl
->send_msg_cnt_dma
;
795 * ishtp_cl_send_msg() -Send message using DMA or IPC
796 * @dev: ISHTP device instance
797 * @cl: Pointer to client device instance
799 * Send message using DMA or IPC based on transfer_path
801 void ishtp_cl_send_msg(struct ishtp_device
*dev
, struct ishtp_cl
*cl
)
803 if (dev
->transfer_path
== CL_TX_PATH_DMA
)
804 ishtp_cl_send_msg_dma(dev
, cl
);
806 ishtp_cl_send_msg_ipc(dev
, cl
);
810 * recv_ishtp_cl_msg() -Receive client message
811 * @dev: ISHTP device instance
812 * @ishtp_hdr: Pointer to message header
814 * Receive and dispatch ISHTP client messages. This function executes in ISR
815 * or work queue context
817 void recv_ishtp_cl_msg(struct ishtp_device
*dev
,
818 struct ishtp_msg_hdr
*ishtp_hdr
)
821 struct ishtp_cl_rb
*rb
;
822 struct ishtp_cl_rb
*new_rb
;
823 unsigned char *buffer
= NULL
;
824 struct ishtp_cl_rb
*complete_rb
= NULL
;
828 if (ishtp_hdr
->reserved
) {
829 dev_err(dev
->devc
, "corrupted message header.\n");
833 if (ishtp_hdr
->length
> IPC_PAYLOAD_SIZE
) {
835 "ISHTP message length in hdr exceeds IPC MTU\n");
839 spin_lock_irqsave(&dev
->read_list_spinlock
, flags
);
841 list_for_each_entry(rb
, &dev
->read_list
.list
, list
) {
844 if (!cl
|| !(cl
->host_client_id
== ishtp_hdr
->host_addr
&&
845 cl
->fw_client_id
== ishtp_hdr
->fw_addr
) ||
846 !(cl
->state
== ISHTP_CL_CONNECTED
))
849 /* If no Rx buffer is allocated, disband the rb */
850 if (rb
->buffer
.size
== 0 || rb
->buffer
.data
== NULL
) {
851 spin_unlock_irqrestore(&dev
->read_list_spinlock
, flags
);
852 dev_err(&cl
->device
->dev
,
853 "Rx buffer is not allocated.\n");
855 ishtp_io_rb_free(rb
);
856 cl
->status
= -ENOMEM
;
861 * If message buffer overflown (exceeds max. client msg
862 * size, drop message and return to free buffer.
863 * Do we need to disconnect such a client? (We don't send
864 * back FC, so communication will be stuck anyway)
866 if (rb
->buffer
.size
< ishtp_hdr
->length
+ rb
->buf_idx
) {
867 spin_unlock_irqrestore(&dev
->read_list_spinlock
, flags
);
868 dev_err(&cl
->device
->dev
,
869 "message overflow. size %d len %d idx %ld\n",
870 rb
->buffer
.size
, ishtp_hdr
->length
,
873 ishtp_cl_io_rb_recycle(rb
);
878 buffer
= rb
->buffer
.data
+ rb
->buf_idx
;
879 dev
->ops
->ishtp_read(dev
, buffer
, ishtp_hdr
->length
);
881 rb
->buf_idx
+= ishtp_hdr
->length
;
882 if (ishtp_hdr
->msg_complete
) {
883 /* Last fragment in message - it's complete */
888 --cl
->out_flow_ctrl_creds
;
890 * the whole msg arrived, send a new FC, and add a new
891 * rb buffer for the next coming msg
893 spin_lock(&cl
->free_list_spinlock
);
895 if (!list_empty(&cl
->free_rb_list
.list
)) {
896 new_rb
= list_entry(cl
->free_rb_list
.list
.next
,
897 struct ishtp_cl_rb
, list
);
898 list_del_init(&new_rb
->list
);
899 spin_unlock(&cl
->free_list_spinlock
);
902 INIT_LIST_HEAD(&new_rb
->list
);
903 list_add_tail(&new_rb
->list
,
904 &dev
->read_list
.list
);
906 ishtp_hbm_cl_flow_control_req(dev
, cl
);
908 spin_unlock(&cl
->free_list_spinlock
);
911 /* One more fragment in message (even if this was last) */
912 ++cl
->recv_msg_num_frags
;
915 * We can safely break here (and in BH too),
916 * a single input message can go only to a single request!
921 spin_unlock_irqrestore(&dev
->read_list_spinlock
, flags
);
922 /* If it's nobody's message, just read and discard it */
924 uint8_t rd_msg_buf
[ISHTP_RD_MSG_BUF_SIZE
];
926 dev_err(dev
->devc
, "Dropped Rx msg - no request\n");
927 dev
->ops
->ishtp_read(dev
, rd_msg_buf
, ishtp_hdr
->length
);
932 cl
= complete_rb
->cl
;
933 cl
->ts_rx
= ktime_get();
934 ++cl
->recv_msg_cnt_ipc
;
935 ishtp_cl_read_complete(complete_rb
);
942 * recv_ishtp_cl_msg_dma() -Receive client message
943 * @dev: ISHTP device instance
944 * @msg: message pointer
947 * Receive and dispatch ISHTP client messages using DMA. This function executes
948 * in ISR or work queue context
950 void recv_ishtp_cl_msg_dma(struct ishtp_device
*dev
, void *msg
,
951 struct dma_xfer_hbm
*hbm
)
954 struct ishtp_cl_rb
*rb
;
955 struct ishtp_cl_rb
*new_rb
;
956 unsigned char *buffer
= NULL
;
957 struct ishtp_cl_rb
*complete_rb
= NULL
;
960 spin_lock_irqsave(&dev
->read_list_spinlock
, flags
);
962 list_for_each_entry(rb
, &dev
->read_list
.list
, list
) {
964 if (!cl
|| !(cl
->host_client_id
== hbm
->host_client_id
&&
965 cl
->fw_client_id
== hbm
->fw_client_id
) ||
966 !(cl
->state
== ISHTP_CL_CONNECTED
))
970 * If no Rx buffer is allocated, disband the rb
972 if (rb
->buffer
.size
== 0 || rb
->buffer
.data
== NULL
) {
973 spin_unlock_irqrestore(&dev
->read_list_spinlock
, flags
);
974 dev_err(&cl
->device
->dev
,
975 "response buffer is not allocated.\n");
977 ishtp_io_rb_free(rb
);
978 cl
->status
= -ENOMEM
;
983 * If message buffer overflown (exceeds max. client msg
984 * size, drop message and return to free buffer.
985 * Do we need to disconnect such a client? (We don't send
986 * back FC, so communication will be stuck anyway)
988 if (rb
->buffer
.size
< hbm
->msg_length
) {
989 spin_unlock_irqrestore(&dev
->read_list_spinlock
, flags
);
990 dev_err(&cl
->device
->dev
,
991 "message overflow. size %d len %d idx %ld\n",
992 rb
->buffer
.size
, hbm
->msg_length
, rb
->buf_idx
);
994 ishtp_cl_io_rb_recycle(rb
);
999 buffer
= rb
->buffer
.data
;
1000 memcpy(buffer
, msg
, hbm
->msg_length
);
1001 rb
->buf_idx
= hbm
->msg_length
;
1003 /* Last fragment in message - it's complete */
1005 list_del(&rb
->list
);
1008 --cl
->out_flow_ctrl_creds
;
1010 * the whole msg arrived, send a new FC, and add a new
1011 * rb buffer for the next coming msg
1013 spin_lock(&cl
->free_list_spinlock
);
1015 if (!list_empty(&cl
->free_rb_list
.list
)) {
1016 new_rb
= list_entry(cl
->free_rb_list
.list
.next
,
1017 struct ishtp_cl_rb
, list
);
1018 list_del_init(&new_rb
->list
);
1019 spin_unlock(&cl
->free_list_spinlock
);
1021 new_rb
->buf_idx
= 0;
1022 INIT_LIST_HEAD(&new_rb
->list
);
1023 list_add_tail(&new_rb
->list
,
1024 &dev
->read_list
.list
);
1026 ishtp_hbm_cl_flow_control_req(dev
, cl
);
1028 spin_unlock(&cl
->free_list_spinlock
);
1031 /* One more fragment in message (this is always last) */
1032 ++cl
->recv_msg_num_frags
;
1035 * We can safely break here (and in BH too),
1036 * a single input message can go only to a single request!
1041 spin_unlock_irqrestore(&dev
->read_list_spinlock
, flags
);
1042 /* If it's nobody's message, just read and discard it */
1044 dev_err(dev
->devc
, "Dropped Rx (DMA) msg - no request\n");
1049 cl
= complete_rb
->cl
;
1050 cl
->ts_rx
= ktime_get();
1051 ++cl
->recv_msg_cnt_dma
;
1052 ishtp_cl_read_complete(complete_rb
);
1058 void *ishtp_get_client_data(struct ishtp_cl
*cl
)
1060 return cl
->client_data
;
1062 EXPORT_SYMBOL(ishtp_get_client_data
);
1064 void ishtp_set_client_data(struct ishtp_cl
*cl
, void *data
)
1066 cl
->client_data
= data
;
1068 EXPORT_SYMBOL(ishtp_set_client_data
);
1070 struct ishtp_device
*ishtp_get_ishtp_device(struct ishtp_cl
*cl
)
1074 EXPORT_SYMBOL(ishtp_get_ishtp_device
);
1076 void ishtp_set_tx_ring_size(struct ishtp_cl
*cl
, int size
)
1078 cl
->tx_ring_size
= size
;
1080 EXPORT_SYMBOL(ishtp_set_tx_ring_size
);
1082 void ishtp_set_rx_ring_size(struct ishtp_cl
*cl
, int size
)
1084 cl
->rx_ring_size
= size
;
1086 EXPORT_SYMBOL(ishtp_set_rx_ring_size
);
1088 void ishtp_set_connection_state(struct ishtp_cl
*cl
, int state
)
1092 EXPORT_SYMBOL(ishtp_set_connection_state
);
1094 void ishtp_cl_set_fw_client_id(struct ishtp_cl
*cl
, int fw_client_id
)
1096 cl
->fw_client_id
= fw_client_id
;
1098 EXPORT_SYMBOL(ishtp_cl_set_fw_client_id
);