1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2003-2016, Intel Corporation.
8 #include <linux/slab.h>
12 * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
13 * @cl: client device instance
15 * Allocate and initialize RX ring buffers
17 * Return: 0 on success else -ENOMEM
19 int ishtp_cl_alloc_rx_ring(struct ishtp_cl
*cl
)
21 size_t len
= cl
->device
->fw_client
->props
.max_msg_length
;
23 struct ishtp_cl_rb
*rb
;
27 for (j
= 0; j
< cl
->rx_ring_size
; ++j
) {
28 rb
= ishtp_io_rb_init(cl
);
33 ret
= ishtp_io_rb_alloc_buf(rb
, len
);
36 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
37 list_add_tail(&rb
->list
, &cl
->free_rb_list
.list
);
38 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
44 dev_err(&cl
->device
->dev
, "error in allocating Rx buffers\n");
45 ishtp_cl_free_rx_ring(cl
);
50 * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
51 * @cl: client device instance
53 * Allocate and initialize TX ring buffers
55 * Return: 0 on success else -ENOMEM
57 int ishtp_cl_alloc_tx_ring(struct ishtp_cl
*cl
)
59 size_t len
= cl
->device
->fw_client
->props
.max_msg_length
;
63 cl
->tx_ring_free_size
= 0;
65 /* Allocate pool to free Tx bufs */
66 for (j
= 0; j
< cl
->tx_ring_size
; ++j
) {
67 struct ishtp_cl_tx_ring
*tx_buf
;
69 tx_buf
= kzalloc(sizeof(struct ishtp_cl_tx_ring
), GFP_KERNEL
);
73 tx_buf
->send_buf
.data
= kmalloc(len
, GFP_KERNEL
);
74 if (!tx_buf
->send_buf
.data
) {
79 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, flags
);
80 list_add_tail(&tx_buf
->list
, &cl
->tx_free_list
.list
);
81 ++cl
->tx_ring_free_size
;
82 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, flags
);
86 dev_err(&cl
->device
->dev
, "error in allocating Tx pool\n");
87 ishtp_cl_free_tx_ring(cl
);
92 * ishtp_cl_free_rx_ring() - Free RX ring buffers
93 * @cl: client device instance
95 * Free RX ring buffers
97 void ishtp_cl_free_rx_ring(struct ishtp_cl
*cl
)
99 struct ishtp_cl_rb
*rb
;
102 /* release allocated memory - pass over free_rb_list */
103 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
104 while (!list_empty(&cl
->free_rb_list
.list
)) {
105 rb
= list_entry(cl
->free_rb_list
.list
.next
, struct ishtp_cl_rb
,
108 kfree(rb
->buffer
.data
);
111 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
112 /* release allocated memory - pass over in_process_list */
113 spin_lock_irqsave(&cl
->in_process_spinlock
, flags
);
114 while (!list_empty(&cl
->in_process_list
.list
)) {
115 rb
= list_entry(cl
->in_process_list
.list
.next
,
116 struct ishtp_cl_rb
, list
);
118 kfree(rb
->buffer
.data
);
121 spin_unlock_irqrestore(&cl
->in_process_spinlock
, flags
);
125 * ishtp_cl_free_tx_ring() - Free TX ring buffers
126 * @cl: client device instance
128 * Free TX ring buffers
130 void ishtp_cl_free_tx_ring(struct ishtp_cl
*cl
)
132 struct ishtp_cl_tx_ring
*tx_buf
;
135 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, flags
);
136 /* release allocated memory - pass over tx_free_list */
137 while (!list_empty(&cl
->tx_free_list
.list
)) {
138 tx_buf
= list_entry(cl
->tx_free_list
.list
.next
,
139 struct ishtp_cl_tx_ring
, list
);
140 list_del(&tx_buf
->list
);
141 --cl
->tx_ring_free_size
;
142 kfree(tx_buf
->send_buf
.data
);
145 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, flags
);
147 spin_lock_irqsave(&cl
->tx_list_spinlock
, flags
);
148 /* release allocated memory - pass over tx_list */
149 while (!list_empty(&cl
->tx_list
.list
)) {
150 tx_buf
= list_entry(cl
->tx_list
.list
.next
,
151 struct ishtp_cl_tx_ring
, list
);
152 list_del(&tx_buf
->list
);
153 kfree(tx_buf
->send_buf
.data
);
156 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, flags
);
160 * ishtp_io_rb_free() - Free IO request block
161 * @rb: IO request block
163 * Free io request block memory
165 void ishtp_io_rb_free(struct ishtp_cl_rb
*rb
)
170 kfree(rb
->buffer
.data
);
175 * ishtp_io_rb_init() - Allocate and init IO request block
176 * @cl: client device instance
178 * Allocate and initialize request block
180 * Return: Allocted IO request block pointer
182 struct ishtp_cl_rb
*ishtp_io_rb_init(struct ishtp_cl
*cl
)
184 struct ishtp_cl_rb
*rb
;
186 rb
= kzalloc(sizeof(struct ishtp_cl_rb
), GFP_KERNEL
);
190 INIT_LIST_HEAD(&rb
->list
);
197 * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
198 * @rb: IO request block
199 * @length: length of response buffer
201 * Allocate respose buffer
203 * Return: 0 on success else -ENOMEM
205 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb
*rb
, size_t length
)
213 rb
->buffer
.data
= kmalloc(length
, GFP_KERNEL
);
214 if (!rb
->buffer
.data
)
217 rb
->buffer
.size
= length
;
222 * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
223 * @rb: IO request block
225 * Re-append rb to its client's free list and send flow control if needed
227 * Return: 0 on success else -EFAULT
229 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb
*rb
)
239 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
240 list_add_tail(&rb
->list
, &cl
->free_rb_list
.list
);
241 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
244 * If we returned the first buffer to empty 'free' list,
247 if (!cl
->out_flow_ctrl_creds
)
248 rets
= ishtp_cl_read_start(cl
);
252 EXPORT_SYMBOL(ishtp_cl_io_rb_recycle
);
255 * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
256 * @cl: Pointer to client device instance
258 * Look client device tx buffer list, and check whether this list is empty
260 * Return: true if client tx buffer list is empty else false
262 bool ishtp_cl_tx_empty(struct ishtp_cl
*cl
)
265 unsigned long tx_flags
;
267 spin_lock_irqsave(&cl
->tx_list_spinlock
, tx_flags
);
268 tx_list_empty
= list_empty(&cl
->tx_list
.list
);
269 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, tx_flags
);
271 return !!tx_list_empty
;
273 EXPORT_SYMBOL(ishtp_cl_tx_empty
);
276 * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
277 * @cl: Pointer to client device instance
279 * Check client device in-processing buffer list and get a rb from it.
281 * Return: rb pointer if buffer list isn't empty else NULL
283 struct ishtp_cl_rb
*ishtp_cl_rx_get_rb(struct ishtp_cl
*cl
)
285 unsigned long rx_flags
;
286 struct ishtp_cl_rb
*rb
;
288 spin_lock_irqsave(&cl
->in_process_spinlock
, rx_flags
);
289 rb
= list_first_entry_or_null(&cl
->in_process_list
.list
,
290 struct ishtp_cl_rb
, list
);
292 list_del_init(&rb
->list
);
293 spin_unlock_irqrestore(&cl
->in_process_spinlock
, rx_flags
);
297 EXPORT_SYMBOL(ishtp_cl_rx_get_rb
);