4 * Copyright (c) 2003-2016, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
26 * ishtp_read_list_flush() - Flush read queue
27 * @cl: ishtp client instance
29 * Used to remove all entries from read queue for a client
31 static void ishtp_read_list_flush(struct ishtp_cl
*cl
)
33 struct ishtp_cl_rb
*rb
;
34 struct ishtp_cl_rb
*next
;
37 spin_lock_irqsave(&cl
->dev
->read_list_spinlock
, flags
);
38 list_for_each_entry_safe(rb
, next
, &cl
->dev
->read_list
.list
, list
)
39 if (rb
->cl
&& ishtp_cl_cmp_id(cl
, rb
->cl
)) {
43 spin_unlock_irqrestore(&cl
->dev
->read_list_spinlock
, flags
);
47 * ishtp_cl_flush_queues() - Flush all queues for a client
48 * @cl: ishtp client instance
50 * Used to remove all queues for a client. This is called when a client device
51 * needs reset due to error, S3 resume or during module removal
53 * Return: 0 on success else -EINVAL if device is NULL
55 int ishtp_cl_flush_queues(struct ishtp_cl
*cl
)
57 if (WARN_ON(!cl
|| !cl
->dev
))
60 ishtp_read_list_flush(cl
);
64 EXPORT_SYMBOL(ishtp_cl_flush_queues
);
67 * ishtp_cl_init() - Initialize all fields of a client device
68 * @cl: ishtp client instance
71 * Initializes a client device fields: Init spinlocks, init queues etc.
72 * This function is called during new client creation
74 static void ishtp_cl_init(struct ishtp_cl
*cl
, struct ishtp_device
*dev
)
76 memset(cl
, 0, sizeof(struct ishtp_cl
));
77 init_waitqueue_head(&cl
->wait_ctrl_res
);
78 spin_lock_init(&cl
->free_list_spinlock
);
79 spin_lock_init(&cl
->in_process_spinlock
);
80 spin_lock_init(&cl
->tx_list_spinlock
);
81 spin_lock_init(&cl
->tx_free_list_spinlock
);
82 spin_lock_init(&cl
->fc_spinlock
);
83 INIT_LIST_HEAD(&cl
->link
);
86 INIT_LIST_HEAD(&cl
->free_rb_list
.list
);
87 INIT_LIST_HEAD(&cl
->tx_list
.list
);
88 INIT_LIST_HEAD(&cl
->tx_free_list
.list
);
89 INIT_LIST_HEAD(&cl
->in_process_list
.list
);
91 cl
->rx_ring_size
= CL_DEF_RX_RING_SIZE
;
92 cl
->tx_ring_size
= CL_DEF_TX_RING_SIZE
;
95 cl
->last_tx_path
= CL_TX_PATH_IPC
;
96 cl
->last_dma_acked
= 1;
97 cl
->last_dma_addr
= NULL
;
98 cl
->last_ipc_acked
= 1;
102 * ishtp_cl_allocate() - allocates client structure and sets it up.
105 * Allocate memory for new client device and call to initialize each field.
107 * Return: The allocated client instance or NULL on failure
109 struct ishtp_cl
*ishtp_cl_allocate(struct ishtp_device
*dev
)
113 cl
= kmalloc(sizeof(struct ishtp_cl
), GFP_KERNEL
);
117 ishtp_cl_init(cl
, dev
);
120 EXPORT_SYMBOL(ishtp_cl_allocate
);
123 * ishtp_cl_free() - Frees a client device
124 * @cl: client device instance
126 * Frees a client device
128 void ishtp_cl_free(struct ishtp_cl
*cl
)
130 struct ishtp_device
*dev
;
140 spin_lock_irqsave(&dev
->cl_list_lock
, flags
);
141 ishtp_cl_free_rx_ring(cl
);
142 ishtp_cl_free_tx_ring(cl
);
144 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags
);
146 EXPORT_SYMBOL(ishtp_cl_free
);
149 * ishtp_cl_link() - Reserve a host id and link the client instance
150 * @cl: client device instance
151 * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any
152 * id from the available can be used
155 * This allocates a single bit in the hostmap. This function will make sure
156 * that not many client sessions are opened at the same time. Once allocated
157 * the client device instance is added to the ishtp device in the current
160 * Return: 0 or error code on failure
162 int ishtp_cl_link(struct ishtp_cl
*cl
, int id
)
164 struct ishtp_device
*dev
;
165 unsigned long flags
, flags_cl
;
168 if (WARN_ON(!cl
|| !cl
->dev
))
173 spin_lock_irqsave(&dev
->device_lock
, flags
);
175 if (dev
->open_handle_count
>= ISHTP_MAX_OPEN_HANDLE_COUNT
) {
180 /* If Id is not assigned get one*/
181 if (id
== ISHTP_HOST_CLIENT_ID_ANY
)
182 id
= find_first_zero_bit(dev
->host_clients_map
,
185 if (id
>= ISHTP_CLIENTS_MAX
) {
186 spin_unlock_irqrestore(&dev
->device_lock
, flags
);
187 dev_err(&cl
->device
->dev
, "id exceeded %d", ISHTP_CLIENTS_MAX
);
191 dev
->open_handle_count
++;
192 cl
->host_client_id
= id
;
193 spin_lock_irqsave(&dev
->cl_list_lock
, flags_cl
);
194 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
) {
198 list_add_tail(&cl
->link
, &dev
->cl_list
);
199 set_bit(id
, dev
->host_clients_map
);
200 cl
->state
= ISHTP_CL_INITIALIZING
;
203 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags_cl
);
205 spin_unlock_irqrestore(&dev
->device_lock
, flags
);
208 EXPORT_SYMBOL(ishtp_cl_link
);
211 * ishtp_cl_unlink() - remove fw_cl from the client device list
212 * @cl: client device instance
214 * Remove a previously linked device to a ishtp device
216 void ishtp_cl_unlink(struct ishtp_cl
*cl
)
218 struct ishtp_device
*dev
;
219 struct ishtp_cl
*pos
;
222 /* don't shout on error exit path */
228 spin_lock_irqsave(&dev
->device_lock
, flags
);
229 if (dev
->open_handle_count
> 0) {
230 clear_bit(cl
->host_client_id
, dev
->host_clients_map
);
231 dev
->open_handle_count
--;
233 spin_unlock_irqrestore(&dev
->device_lock
, flags
);
236 * This checks that 'cl' is actually linked into device's structure,
237 * before attempting 'list_del'
239 spin_lock_irqsave(&dev
->cl_list_lock
, flags
);
240 list_for_each_entry(pos
, &dev
->cl_list
, link
)
241 if (cl
->host_client_id
== pos
->host_client_id
) {
242 list_del_init(&pos
->link
);
245 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags
);
247 EXPORT_SYMBOL(ishtp_cl_unlink
);
250 * ishtp_cl_disconnect() - Send disconnect request to firmware
251 * @cl: client device instance
253 * Send a disconnect request for a client to firmware.
255 * Return: 0 if successful disconnect response from the firmware or error
258 int ishtp_cl_disconnect(struct ishtp_cl
*cl
)
260 struct ishtp_device
*dev
;
263 if (WARN_ON(!cl
|| !cl
->dev
))
268 dev
->print_log(dev
, "%s() state %d\n", __func__
, cl
->state
);
270 if (cl
->state
!= ISHTP_CL_DISCONNECTING
) {
271 dev
->print_log(dev
, "%s() Disconnect in progress\n", __func__
);
275 if (ishtp_hbm_cl_disconnect_req(dev
, cl
)) {
276 dev
->print_log(dev
, "%s() Failed to disconnect\n", __func__
);
277 dev_err(&cl
->device
->dev
, "failed to disconnect.\n");
281 err
= wait_event_interruptible_timeout(cl
->wait_ctrl_res
,
282 (dev
->dev_state
!= ISHTP_DEV_ENABLED
||
283 cl
->state
== ISHTP_CL_DISCONNECTED
),
284 ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT
));
287 * If FW reset arrived, this will happen. Don't check cl->,
288 * as 'cl' may be freed already
290 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
) {
291 dev
->print_log(dev
, "%s() dev_state != ISHTP_DEV_ENABLED\n",
296 if (cl
->state
== ISHTP_CL_DISCONNECTED
) {
297 dev
->print_log(dev
, "%s() successful\n", __func__
);
303 EXPORT_SYMBOL(ishtp_cl_disconnect
);
306 * ishtp_cl_is_other_connecting() - Check other client is connecting
307 * @cl: client device instance
309 * Checks if other client with the same fw client id is connecting
311 * Return: true if other client is connected else false
313 static bool ishtp_cl_is_other_connecting(struct ishtp_cl
*cl
)
315 struct ishtp_device
*dev
;
316 struct ishtp_cl
*pos
;
319 if (WARN_ON(!cl
|| !cl
->dev
))
323 spin_lock_irqsave(&dev
->cl_list_lock
, flags
);
324 list_for_each_entry(pos
, &dev
->cl_list
, link
) {
325 if ((pos
->state
== ISHTP_CL_CONNECTING
) && (pos
!= cl
) &&
326 cl
->fw_client_id
== pos
->fw_client_id
) {
327 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags
);
331 spin_unlock_irqrestore(&dev
->cl_list_lock
, flags
);
337 * ishtp_cl_connect() - Send connect request to firmware
338 * @cl: client device instance
340 * Send a connect request for a client to firmware. If successful it will
341 * RX and TX ring buffers
343 * Return: 0 if successful connect response from the firmware and able
344 * to bind and allocate ring buffers or error code on failure
346 int ishtp_cl_connect(struct ishtp_cl
*cl
)
348 struct ishtp_device
*dev
;
351 if (WARN_ON(!cl
|| !cl
->dev
))
356 dev
->print_log(dev
, "%s() current_state = %d\n", __func__
, cl
->state
);
358 if (ishtp_cl_is_other_connecting(cl
)) {
359 dev
->print_log(dev
, "%s() Busy\n", __func__
);
363 if (ishtp_hbm_cl_connect_req(dev
, cl
)) {
364 dev
->print_log(dev
, "%s() HBM connect req fail\n", __func__
);
368 rets
= wait_event_interruptible_timeout(cl
->wait_ctrl_res
,
369 (dev
->dev_state
== ISHTP_DEV_ENABLED
&&
370 (cl
->state
== ISHTP_CL_CONNECTED
||
371 cl
->state
== ISHTP_CL_DISCONNECTED
)),
372 ishtp_secs_to_jiffies(
373 ISHTP_CL_CONNECT_TIMEOUT
));
375 * If FW reset arrived, this will happen. Don't check cl->,
376 * as 'cl' may be freed already
378 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
) {
379 dev
->print_log(dev
, "%s() dev_state != ISHTP_DEV_ENABLED\n",
384 if (cl
->state
!= ISHTP_CL_CONNECTED
) {
385 dev
->print_log(dev
, "%s() state != ISHTP_CL_CONNECTED\n",
392 dev
->print_log(dev
, "%s() Invalid status\n", __func__
);
396 rets
= ishtp_cl_device_bind(cl
);
398 dev
->print_log(dev
, "%s() Bind error\n", __func__
);
399 ishtp_cl_disconnect(cl
);
403 rets
= ishtp_cl_alloc_rx_ring(cl
);
405 dev
->print_log(dev
, "%s() Alloc RX ring failed\n", __func__
);
406 /* if failed allocation, disconnect */
407 ishtp_cl_disconnect(cl
);
411 rets
= ishtp_cl_alloc_tx_ring(cl
);
413 dev
->print_log(dev
, "%s() Alloc TX ring failed\n", __func__
);
414 /* if failed allocation, disconnect */
415 ishtp_cl_free_rx_ring(cl
);
416 ishtp_cl_disconnect(cl
);
420 /* Upon successful connection and allocation, emit flow-control */
421 rets
= ishtp_cl_read_start(cl
);
423 dev
->print_log(dev
, "%s() successful\n", __func__
);
427 EXPORT_SYMBOL(ishtp_cl_connect
);
430 * ishtp_cl_read_start() - Prepare to read client message
431 * @cl: client device instance
433 * Get a free buffer from pool of free read buffers and add to read buffer
434 * pool to add contents. Send a flow control request to firmware to be able
437 * Return: 0 if successful or error code on failure
439 int ishtp_cl_read_start(struct ishtp_cl
*cl
)
441 struct ishtp_device
*dev
;
442 struct ishtp_cl_rb
*rb
;
446 unsigned long dev_flags
;
448 if (WARN_ON(!cl
|| !cl
->dev
))
453 if (cl
->state
!= ISHTP_CL_CONNECTED
)
456 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
)
459 i
= ishtp_fw_cl_by_id(dev
, cl
->fw_client_id
);
461 dev_err(&cl
->device
->dev
, "no such fw client %d\n",
466 /* The current rb is the head of the free rb list */
467 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
468 if (list_empty(&cl
->free_rb_list
.list
)) {
469 dev_warn(&cl
->device
->dev
,
470 "[ishtp-ish] Rx buffers pool is empty\n");
473 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
476 rb
= list_entry(cl
->free_rb_list
.list
.next
, struct ishtp_cl_rb
, list
);
477 list_del_init(&rb
->list
);
478 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
483 INIT_LIST_HEAD(&rb
->list
);
487 * This must be BEFORE sending flow control -
488 * response in ISR may come too fast...
490 spin_lock_irqsave(&dev
->read_list_spinlock
, dev_flags
);
491 list_add_tail(&rb
->list
, &dev
->read_list
.list
);
492 spin_unlock_irqrestore(&dev
->read_list_spinlock
, dev_flags
);
493 if (ishtp_hbm_cl_flow_control_req(dev
, cl
)) {
498 /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
500 spin_lock_irqsave(&dev
->read_list_spinlock
, dev_flags
);
502 spin_unlock_irqrestore(&dev
->read_list_spinlock
, dev_flags
);
504 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
505 list_add_tail(&rb
->list
, &cl
->free_rb_list
.list
);
506 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
512 * ishtp_cl_send() - Send a message to firmware
513 * @cl: client device instance
514 * @buf: message buffer
515 * @length: length of message
517 * If the client is correct state to send message, this function gets a buffer
518 * from tx ring buffers, copy the message data and call to send the message
519 * using ishtp_cl_send_msg()
521 * Return: 0 if successful or error code on failure
523 int ishtp_cl_send(struct ishtp_cl
*cl
, uint8_t *buf
, size_t length
)
525 struct ishtp_device
*dev
;
527 struct ishtp_cl_tx_ring
*cl_msg
;
528 int have_msg_to_send
= 0;
529 unsigned long tx_flags
, tx_free_flags
;
531 if (WARN_ON(!cl
|| !cl
->dev
))
536 if (cl
->state
!= ISHTP_CL_CONNECTED
) {
541 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
) {
546 /* Check if we have fw client device */
547 id
= ishtp_fw_cl_by_id(dev
, cl
->fw_client_id
);
553 if (length
> dev
->fw_clients
[id
].props
.max_msg_length
) {
559 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, tx_free_flags
);
560 if (list_empty(&cl
->tx_free_list
.list
)) {
561 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
,
567 cl_msg
= list_first_entry(&cl
->tx_free_list
.list
,
568 struct ishtp_cl_tx_ring
, list
);
569 if (!cl_msg
->send_buf
.data
) {
570 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
,
573 /* Should not happen, as free list is pre-allocated */
576 * This is safe, as 'length' is already checked for not exceeding
577 * max ISHTP message size per client
579 list_del_init(&cl_msg
->list
);
580 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, tx_free_flags
);
581 memcpy(cl_msg
->send_buf
.data
, buf
, length
);
582 cl_msg
->send_buf
.size
= length
;
583 spin_lock_irqsave(&cl
->tx_list_spinlock
, tx_flags
);
584 have_msg_to_send
= !list_empty(&cl
->tx_list
.list
);
585 list_add_tail(&cl_msg
->list
, &cl
->tx_list
.list
);
586 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
588 if (!have_msg_to_send
&& cl
->ishtp_flow_ctrl_creds
> 0)
589 ishtp_cl_send_msg(dev
, cl
);
593 EXPORT_SYMBOL(ishtp_cl_send
);
596 * ishtp_cl_read_complete() - read complete
597 * @rb: Pointer to client request block
599 * If the message is completely received call ishtp_cl_bus_rx_event()
602 static void ishtp_cl_read_complete(struct ishtp_cl_rb
*rb
)
605 int schedule_work_flag
= 0;
606 struct ishtp_cl
*cl
= rb
->cl
;
608 spin_lock_irqsave(&cl
->in_process_spinlock
, flags
);
610 * if in-process list is empty, then need to schedule
611 * the processing thread
613 schedule_work_flag
= list_empty(&cl
->in_process_list
.list
);
614 list_add_tail(&rb
->list
, &cl
->in_process_list
.list
);
615 spin_unlock_irqrestore(&cl
->in_process_spinlock
, flags
);
617 if (schedule_work_flag
)
618 ishtp_cl_bus_rx_event(cl
->device
);
622 * ipc_tx_callback() - IPC tx callback function
623 * @prm: Pointer to client device instance
625 * Send message over IPC either first time or on callback on previous message
628 static void ipc_tx_callback(void *prm
)
630 struct ishtp_cl
*cl
= prm
;
631 struct ishtp_cl_tx_ring
*cl_msg
;
633 struct ishtp_device
*dev
= (cl
? cl
->dev
: NULL
);
634 struct ishtp_msg_hdr ishtp_hdr
;
635 unsigned long tx_flags
, tx_free_flags
;
642 * Other conditions if some critical error has
643 * occurred before this callback is called
645 if (dev
->dev_state
!= ISHTP_DEV_ENABLED
)
648 if (cl
->state
!= ISHTP_CL_CONNECTED
)
651 spin_lock_irqsave(&cl
->tx_list_spinlock
, tx_flags
);
652 if (list_empty(&cl
->tx_list
.list
)) {
653 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
657 if (cl
->ishtp_flow_ctrl_creds
!= 1 && !cl
->sending
) {
658 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
663 --cl
->ishtp_flow_ctrl_creds
;
664 cl
->last_ipc_acked
= 0;
665 cl
->last_tx_path
= CL_TX_PATH_IPC
;
669 cl_msg
= list_entry(cl
->tx_list
.list
.next
, struct ishtp_cl_tx_ring
,
671 rem
= cl_msg
->send_buf
.size
- cl
->tx_offs
;
673 ishtp_hdr
.host_addr
= cl
->host_client_id
;
674 ishtp_hdr
.fw_addr
= cl
->fw_client_id
;
675 ishtp_hdr
.reserved
= 0;
676 pmsg
= cl_msg
->send_buf
.data
+ cl
->tx_offs
;
678 if (rem
<= dev
->mtu
) {
679 ishtp_hdr
.length
= rem
;
680 ishtp_hdr
.msg_complete
= 1;
682 list_del_init(&cl_msg
->list
); /* Must be before write */
683 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
684 /* Submit to IPC queue with no callback */
685 ishtp_write_message(dev
, &ishtp_hdr
, pmsg
);
686 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, tx_free_flags
);
687 list_add_tail(&cl_msg
->list
, &cl
->tx_free_list
.list
);
688 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
,
691 /* Send IPC fragment */
692 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
693 cl
->tx_offs
+= dev
->mtu
;
694 ishtp_hdr
.length
= dev
->mtu
;
695 ishtp_hdr
.msg_complete
= 0;
696 ishtp_send_msg(dev
, &ishtp_hdr
, pmsg
, ipc_tx_callback
, cl
);
701 * ishtp_cl_send_msg_ipc() -Send message using IPC
702 * @dev: ISHTP device instance
703 * @cl: Pointer to client device instance
705 * Send message over IPC not using DMA
707 static void ishtp_cl_send_msg_ipc(struct ishtp_device
*dev
,
710 /* If last DMA message wasn't acked yet, leave this one in Tx queue */
711 if (cl
->last_tx_path
== CL_TX_PATH_DMA
&& cl
->last_dma_acked
== 0)
716 ++cl
->send_msg_cnt_ipc
;
720 * ishtp_cl_send_msg_dma() -Send message using DMA
721 * @dev: ISHTP device instance
722 * @cl: Pointer to client device instance
724 * Send message using DMA
726 static void ishtp_cl_send_msg_dma(struct ishtp_device
*dev
,
729 struct ishtp_msg_hdr hdr
;
730 struct dma_xfer_hbm dma_xfer
;
731 unsigned char *msg_addr
;
733 struct ishtp_cl_tx_ring
*cl_msg
;
734 unsigned long tx_flags
, tx_free_flags
;
736 /* If last IPC message wasn't acked yet, leave this one in Tx queue */
737 if (cl
->last_tx_path
== CL_TX_PATH_IPC
&& cl
->last_ipc_acked
== 0)
740 spin_lock_irqsave(&cl
->tx_list_spinlock
, tx_flags
);
741 if (list_empty(&cl
->tx_list
.list
)) {
742 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
746 cl_msg
= list_entry(cl
->tx_list
.list
.next
, struct ishtp_cl_tx_ring
,
749 msg_addr
= ishtp_cl_get_dma_send_buf(dev
, cl_msg
->send_buf
.size
);
751 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
752 if (dev
->transfer_path
== CL_TX_PATH_DEFAULT
)
753 ishtp_cl_send_msg_ipc(dev
, cl
);
757 list_del_init(&cl_msg
->list
); /* Must be before write */
758 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
760 --cl
->ishtp_flow_ctrl_creds
;
761 cl
->last_dma_acked
= 0;
762 cl
->last_dma_addr
= msg_addr
;
763 cl
->last_tx_path
= CL_TX_PATH_DMA
;
765 /* write msg to dma buf */
766 memcpy(msg_addr
, cl_msg
->send_buf
.data
, cl_msg
->send_buf
.size
);
768 /* send dma_xfer hbm msg */
769 off
= msg_addr
- (unsigned char *)dev
->ishtp_host_dma_tx_buf
;
770 ishtp_hbm_hdr(&hdr
, sizeof(struct dma_xfer_hbm
));
771 dma_xfer
.hbm
= DMA_XFER
;
772 dma_xfer
.fw_client_id
= cl
->fw_client_id
;
773 dma_xfer
.host_client_id
= cl
->host_client_id
;
774 dma_xfer
.reserved
= 0;
775 dma_xfer
.msg_addr
= dev
->ishtp_host_dma_tx_buf_phys
+ off
;
776 dma_xfer
.msg_length
= cl_msg
->send_buf
.size
;
777 dma_xfer
.reserved2
= 0;
778 ishtp_write_message(dev
, &hdr
, (unsigned char *)&dma_xfer
);
779 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, tx_free_flags
);
780 list_add_tail(&cl_msg
->list
, &cl
->tx_free_list
.list
);
781 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, tx_free_flags
);
782 ++cl
->send_msg_cnt_dma
;
786 * ishtp_cl_send_msg() -Send message using DMA or IPC
787 * @dev: ISHTP device instance
788 * @cl: Pointer to client device instance
790 * Send message using DMA or IPC based on transfer_path
792 void ishtp_cl_send_msg(struct ishtp_device
*dev
, struct ishtp_cl
*cl
)
794 if (dev
->transfer_path
== CL_TX_PATH_DMA
)
795 ishtp_cl_send_msg_dma(dev
, cl
);
797 ishtp_cl_send_msg_ipc(dev
, cl
);
801 * recv_ishtp_cl_msg() -Receive client message
802 * @dev: ISHTP device instance
803 * @ishtp_hdr: Pointer to message header
805 * Receive and dispatch ISHTP client messages. This function executes in ISR
808 void recv_ishtp_cl_msg(struct ishtp_device
*dev
,
809 struct ishtp_msg_hdr
*ishtp_hdr
)
812 struct ishtp_cl_rb
*rb
;
813 struct ishtp_cl_rb
*new_rb
;
814 unsigned char *buffer
= NULL
;
815 struct ishtp_cl_rb
*complete_rb
= NULL
;
816 unsigned long dev_flags
;
820 if (ishtp_hdr
->reserved
) {
821 dev_err(dev
->devc
, "corrupted message header.\n");
825 if (ishtp_hdr
->length
> IPC_PAYLOAD_SIZE
) {
827 "ISHTP message length in hdr exceeds IPC MTU\n");
831 spin_lock_irqsave(&dev
->read_list_spinlock
, dev_flags
);
833 list_for_each_entry(rb
, &dev
->read_list
.list
, list
) {
836 if (!cl
|| !(cl
->host_client_id
== ishtp_hdr
->host_addr
&&
837 cl
->fw_client_id
== ishtp_hdr
->fw_addr
) ||
838 !(cl
->state
== ISHTP_CL_CONNECTED
))
841 /* If no Rx buffer is allocated, disband the rb */
842 if (rb
->buffer
.size
== 0 || rb
->buffer
.data
== NULL
) {
843 spin_unlock_irqrestore(&dev
->read_list_spinlock
,
845 dev_err(&cl
->device
->dev
,
846 "Rx buffer is not allocated.\n");
848 ishtp_io_rb_free(rb
);
849 cl
->status
= -ENOMEM
;
854 * If message buffer overflown (exceeds max. client msg
855 * size, drop message and return to free buffer.
856 * Do we need to disconnect such a client? (We don't send
857 * back FC, so communication will be stuck anyway)
859 if (rb
->buffer
.size
< ishtp_hdr
->length
+ rb
->buf_idx
) {
860 spin_unlock_irqrestore(&dev
->read_list_spinlock
,
862 dev_err(&cl
->device
->dev
,
863 "message overflow. size %d len %d idx %ld\n",
864 rb
->buffer
.size
, ishtp_hdr
->length
,
867 ishtp_cl_io_rb_recycle(rb
);
872 buffer
= rb
->buffer
.data
+ rb
->buf_idx
;
873 dev
->ops
->ishtp_read(dev
, buffer
, ishtp_hdr
->length
);
875 rb
->buf_idx
+= ishtp_hdr
->length
;
876 if (ishtp_hdr
->msg_complete
) {
877 /* Last fragment in message - it's complete */
882 --cl
->out_flow_ctrl_creds
;
884 * the whole msg arrived, send a new FC, and add a new
885 * rb buffer for the next coming msg
887 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
889 if (!list_empty(&cl
->free_rb_list
.list
)) {
890 new_rb
= list_entry(cl
->free_rb_list
.list
.next
,
891 struct ishtp_cl_rb
, list
);
892 list_del_init(&new_rb
->list
);
893 spin_unlock_irqrestore(&cl
->free_list_spinlock
,
897 INIT_LIST_HEAD(&new_rb
->list
);
898 list_add_tail(&new_rb
->list
,
899 &dev
->read_list
.list
);
901 ishtp_hbm_cl_flow_control_req(dev
, cl
);
903 spin_unlock_irqrestore(&cl
->free_list_spinlock
,
907 /* One more fragment in message (even if this was last) */
908 ++cl
->recv_msg_num_frags
;
911 * We can safely break here (and in BH too),
912 * a single input message can go only to a single request!
917 spin_unlock_irqrestore(&dev
->read_list_spinlock
, dev_flags
);
918 /* If it's nobody's message, just read and discard it */
920 uint8_t rd_msg_buf
[ISHTP_RD_MSG_BUF_SIZE
];
922 dev_err(dev
->devc
, "Dropped Rx msg - no request\n");
923 dev
->ops
->ishtp_read(dev
, rd_msg_buf
, ishtp_hdr
->length
);
928 getnstimeofday(&cl
->ts_rx
);
929 ++cl
->recv_msg_cnt_ipc
;
930 ishtp_cl_read_complete(complete_rb
);
937 * recv_ishtp_cl_msg_dma() -Receive client message
938 * @dev: ISHTP device instance
939 * @msg: message pointer
942 * Receive and dispatch ISHTP client messages using DMA. This function executes
945 void recv_ishtp_cl_msg_dma(struct ishtp_device
*dev
, void *msg
,
946 struct dma_xfer_hbm
*hbm
)
949 struct ishtp_cl_rb
*rb
;
950 struct ishtp_cl_rb
*new_rb
;
951 unsigned char *buffer
= NULL
;
952 struct ishtp_cl_rb
*complete_rb
= NULL
;
953 unsigned long dev_flags
;
956 spin_lock_irqsave(&dev
->read_list_spinlock
, dev_flags
);
957 list_for_each_entry(rb
, &dev
->read_list
.list
, list
) {
959 if (!cl
|| !(cl
->host_client_id
== hbm
->host_client_id
&&
960 cl
->fw_client_id
== hbm
->fw_client_id
) ||
961 !(cl
->state
== ISHTP_CL_CONNECTED
))
965 * If no Rx buffer is allocated, disband the rb
967 if (rb
->buffer
.size
== 0 || rb
->buffer
.data
== NULL
) {
968 spin_unlock_irqrestore(&dev
->read_list_spinlock
,
970 dev_err(&cl
->device
->dev
,
971 "response buffer is not allocated.\n");
973 ishtp_io_rb_free(rb
);
974 cl
->status
= -ENOMEM
;
979 * If message buffer overflown (exceeds max. client msg
980 * size, drop message and return to free buffer.
981 * Do we need to disconnect such a client? (We don't send
982 * back FC, so communication will be stuck anyway)
984 if (rb
->buffer
.size
< hbm
->msg_length
) {
985 spin_unlock_irqrestore(&dev
->read_list_spinlock
,
987 dev_err(&cl
->device
->dev
,
988 "message overflow. size %d len %d idx %ld\n",
989 rb
->buffer
.size
, hbm
->msg_length
, rb
->buf_idx
);
991 ishtp_cl_io_rb_recycle(rb
);
996 buffer
= rb
->buffer
.data
;
997 memcpy(buffer
, msg
, hbm
->msg_length
);
998 rb
->buf_idx
= hbm
->msg_length
;
1000 /* Last fragment in message - it's complete */
1002 list_del(&rb
->list
);
1005 --cl
->out_flow_ctrl_creds
;
1007 * the whole msg arrived, send a new FC, and add a new
1008 * rb buffer for the next coming msg
1010 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
1012 if (!list_empty(&cl
->free_rb_list
.list
)) {
1013 new_rb
= list_entry(cl
->free_rb_list
.list
.next
,
1014 struct ishtp_cl_rb
, list
);
1015 list_del_init(&new_rb
->list
);
1016 spin_unlock_irqrestore(&cl
->free_list_spinlock
,
1019 new_rb
->buf_idx
= 0;
1020 INIT_LIST_HEAD(&new_rb
->list
);
1021 list_add_tail(&new_rb
->list
,
1022 &dev
->read_list
.list
);
1024 ishtp_hbm_cl_flow_control_req(dev
, cl
);
1026 spin_unlock_irqrestore(&cl
->free_list_spinlock
,
1030 /* One more fragment in message (this is always last) */
1031 ++cl
->recv_msg_num_frags
;
1034 * We can safely break here (and in BH too),
1035 * a single input message can go only to a single request!
1040 spin_unlock_irqrestore(&dev
->read_list_spinlock
, dev_flags
);
1041 /* If it's nobody's message, just read and discard it */
1043 dev_err(dev
->devc
, "Dropped Rx (DMA) msg - no request\n");
1048 getnstimeofday(&cl
->ts_rx
);
1049 ++cl
->recv_msg_cnt_dma
;
1050 ishtp_cl_read_complete(complete_rb
);