4 * Copyright (c) 2003-2016, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/slab.h>
21 * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
22 * @cl: client device instance
24 * Allocate and initialize RX ring buffers
26 * Return: 0 on success else -ENOMEM
28 int ishtp_cl_alloc_rx_ring(struct ishtp_cl
*cl
)
30 size_t len
= cl
->device
->fw_client
->props
.max_msg_length
;
32 struct ishtp_cl_rb
*rb
;
36 for (j
= 0; j
< cl
->rx_ring_size
; ++j
) {
37 rb
= ishtp_io_rb_init(cl
);
42 ret
= ishtp_io_rb_alloc_buf(rb
, len
);
45 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
46 list_add_tail(&rb
->list
, &cl
->free_rb_list
.list
);
47 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
53 dev_err(&cl
->device
->dev
, "error in allocating Rx buffers\n");
54 ishtp_cl_free_rx_ring(cl
);
59 * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
60 * @cl: client device instance
62 * Allocate and initialize TX ring buffers
64 * Return: 0 on success else -ENOMEM
66 int ishtp_cl_alloc_tx_ring(struct ishtp_cl
*cl
)
68 size_t len
= cl
->device
->fw_client
->props
.max_msg_length
;
72 cl
->tx_ring_free_size
= 0;
74 /* Allocate pool to free Tx bufs */
75 for (j
= 0; j
< cl
->tx_ring_size
; ++j
) {
76 struct ishtp_cl_tx_ring
*tx_buf
;
78 tx_buf
= kzalloc(sizeof(struct ishtp_cl_tx_ring
), GFP_KERNEL
);
82 tx_buf
->send_buf
.data
= kmalloc(len
, GFP_KERNEL
);
83 if (!tx_buf
->send_buf
.data
) {
88 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, flags
);
89 list_add_tail(&tx_buf
->list
, &cl
->tx_free_list
.list
);
90 ++cl
->tx_ring_free_size
;
91 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, flags
);
95 dev_err(&cl
->device
->dev
, "error in allocating Tx pool\n");
96 ishtp_cl_free_rx_ring(cl
);
101 * ishtp_cl_free_rx_ring() - Free RX ring buffers
102 * @cl: client device instance
104 * Free RX ring buffers
106 void ishtp_cl_free_rx_ring(struct ishtp_cl
*cl
)
108 struct ishtp_cl_rb
*rb
;
111 /* release allocated memory - pass over free_rb_list */
112 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
113 while (!list_empty(&cl
->free_rb_list
.list
)) {
114 rb
= list_entry(cl
->free_rb_list
.list
.next
, struct ishtp_cl_rb
,
117 kfree(rb
->buffer
.data
);
120 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
121 /* release allocated memory - pass over in_process_list */
122 spin_lock_irqsave(&cl
->in_process_spinlock
, flags
);
123 while (!list_empty(&cl
->in_process_list
.list
)) {
124 rb
= list_entry(cl
->in_process_list
.list
.next
,
125 struct ishtp_cl_rb
, list
);
127 kfree(rb
->buffer
.data
);
130 spin_unlock_irqrestore(&cl
->in_process_spinlock
, flags
);
134 * ishtp_cl_free_tx_ring() - Free TX ring buffers
135 * @cl: client device instance
137 * Free TX ring buffers
139 void ishtp_cl_free_tx_ring(struct ishtp_cl
*cl
)
141 struct ishtp_cl_tx_ring
*tx_buf
;
144 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, flags
);
145 /* release allocated memory - pass over tx_free_list */
146 while (!list_empty(&cl
->tx_free_list
.list
)) {
147 tx_buf
= list_entry(cl
->tx_free_list
.list
.next
,
148 struct ishtp_cl_tx_ring
, list
);
149 list_del(&tx_buf
->list
);
150 --cl
->tx_ring_free_size
;
151 kfree(tx_buf
->send_buf
.data
);
154 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, flags
);
156 spin_lock_irqsave(&cl
->tx_list_spinlock
, flags
);
157 /* release allocated memory - pass over tx_list */
158 while (!list_empty(&cl
->tx_list
.list
)) {
159 tx_buf
= list_entry(cl
->tx_list
.list
.next
,
160 struct ishtp_cl_tx_ring
, list
);
161 list_del(&tx_buf
->list
);
162 kfree(tx_buf
->send_buf
.data
);
165 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, flags
);
169 * ishtp_io_rb_free() - Free IO request block
170 * @rb: IO request block
172 * Free io request block memory
174 void ishtp_io_rb_free(struct ishtp_cl_rb
*rb
)
179 kfree(rb
->buffer
.data
);
184 * ishtp_io_rb_init() - Allocate and init IO request block
185 * @cl: client device instance
187 * Allocate and initialize request block
189 * Return: Allocted IO request block pointer
191 struct ishtp_cl_rb
*ishtp_io_rb_init(struct ishtp_cl
*cl
)
193 struct ishtp_cl_rb
*rb
;
195 rb
= kzalloc(sizeof(struct ishtp_cl_rb
), GFP_KERNEL
);
199 INIT_LIST_HEAD(&rb
->list
);
206 * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
207 * @rb: IO request block
208 * @length: length of response buffer
210 * Allocate respose buffer
212 * Return: 0 on success else -ENOMEM
214 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb
*rb
, size_t length
)
222 rb
->buffer
.data
= kmalloc(length
, GFP_KERNEL
);
223 if (!rb
->buffer
.data
)
226 rb
->buffer
.size
= length
;
231 * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
232 * @rb: IO request block
234 * Re-append rb to its client's free list and send flow control if needed
236 * Return: 0 on success else -EFAULT
238 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb
*rb
)
248 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
249 list_add_tail(&rb
->list
, &cl
->free_rb_list
.list
);
250 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
253 * If we returned the first buffer to empty 'free' list,
256 if (!cl
->out_flow_ctrl_creds
)
257 rets
= ishtp_cl_read_start(cl
);
261 EXPORT_SYMBOL(ishtp_cl_io_rb_recycle
);
264 * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
265 * @cl: Pointer to client device instance
267 * Look client device tx buffer list, and check whether this list is empty
269 * Return: true if client tx buffer list is empty else false
271 bool ishtp_cl_tx_empty(struct ishtp_cl
*cl
)
274 unsigned long tx_flags
;
276 spin_lock_irqsave(&cl
->tx_list_spinlock
, tx_flags
);
277 tx_list_empty
= list_empty(&cl
->tx_list
.list
);
278 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
280 return !!tx_list_empty
;
282 EXPORT_SYMBOL(ishtp_cl_tx_empty
);
285 * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
286 * @cl: Pointer to client device instance
288 * Check client device in-processing buffer list and get a rb from it.
290 * Return: rb pointer if buffer list isn't empty else NULL
292 struct ishtp_cl_rb
*ishtp_cl_rx_get_rb(struct ishtp_cl
*cl
)
294 unsigned long rx_flags
;
295 struct ishtp_cl_rb
*rb
;
297 spin_lock_irqsave(&cl
->in_process_spinlock
, rx_flags
);
298 rb
= list_first_entry_or_null(&cl
->in_process_list
.list
,
299 struct ishtp_cl_rb
, list
);
301 list_del_init(&rb
->list
);
302 spin_unlock_irqrestore(&cl
->in_process_spinlock
, rx_flags
);
306 EXPORT_SYMBOL(ishtp_cl_rx_get_rb
);