4 * Copyright (c) 2003-2016, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/slab.h>
21 * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
22 * @cl: client device instance
24 * Allocate and initialize RX ring buffers
26 * Return: 0 on success else -ENOMEM
28 int ishtp_cl_alloc_rx_ring(struct ishtp_cl
*cl
)
30 size_t len
= cl
->device
->fw_client
->props
.max_msg_length
;
32 struct ishtp_cl_rb
*rb
;
36 for (j
= 0; j
< cl
->rx_ring_size
; ++j
) {
37 rb
= ishtp_io_rb_init(cl
);
42 ret
= ishtp_io_rb_alloc_buf(rb
, len
);
45 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
46 list_add_tail(&rb
->list
, &cl
->free_rb_list
.list
);
47 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
53 dev_err(&cl
->device
->dev
, "error in allocating Rx buffers\n");
54 ishtp_cl_free_rx_ring(cl
);
59 * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
60 * @cl: client device instance
62 * Allocate and initialize TX ring buffers
64 * Return: 0 on success else -ENOMEM
66 int ishtp_cl_alloc_tx_ring(struct ishtp_cl
*cl
)
68 size_t len
= cl
->device
->fw_client
->props
.max_msg_length
;
72 /* Allocate pool to free Tx bufs */
73 for (j
= 0; j
< cl
->tx_ring_size
; ++j
) {
74 struct ishtp_cl_tx_ring
*tx_buf
;
76 tx_buf
= kzalloc(sizeof(struct ishtp_cl_tx_ring
), GFP_KERNEL
);
80 tx_buf
->send_buf
.data
= kmalloc(len
, GFP_KERNEL
);
81 if (!tx_buf
->send_buf
.data
) {
86 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, flags
);
87 list_add_tail(&tx_buf
->list
, &cl
->tx_free_list
.list
);
88 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, flags
);
92 dev_err(&cl
->device
->dev
, "error in allocating Tx pool\n");
93 ishtp_cl_free_rx_ring(cl
);
98 * ishtp_cl_free_rx_ring() - Free RX ring buffers
99 * @cl: client device instance
101 * Free RX ring buffers
103 void ishtp_cl_free_rx_ring(struct ishtp_cl
*cl
)
105 struct ishtp_cl_rb
*rb
;
108 /* release allocated memory - pass over free_rb_list */
109 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
110 while (!list_empty(&cl
->free_rb_list
.list
)) {
111 rb
= list_entry(cl
->free_rb_list
.list
.next
, struct ishtp_cl_rb
,
114 kfree(rb
->buffer
.data
);
117 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
118 /* release allocated memory - pass over in_process_list */
119 spin_lock_irqsave(&cl
->in_process_spinlock
, flags
);
120 while (!list_empty(&cl
->in_process_list
.list
)) {
121 rb
= list_entry(cl
->in_process_list
.list
.next
,
122 struct ishtp_cl_rb
, list
);
124 kfree(rb
->buffer
.data
);
127 spin_unlock_irqrestore(&cl
->in_process_spinlock
, flags
);
131 * ishtp_cl_free_tx_ring() - Free TX ring buffers
132 * @cl: client device instance
134 * Free TX ring buffers
136 void ishtp_cl_free_tx_ring(struct ishtp_cl
*cl
)
138 struct ishtp_cl_tx_ring
*tx_buf
;
141 spin_lock_irqsave(&cl
->tx_free_list_spinlock
, flags
);
142 /* release allocated memory - pass over tx_free_list */
143 while (!list_empty(&cl
->tx_free_list
.list
)) {
144 tx_buf
= list_entry(cl
->tx_free_list
.list
.next
,
145 struct ishtp_cl_tx_ring
, list
);
146 list_del(&tx_buf
->list
);
147 kfree(tx_buf
->send_buf
.data
);
150 spin_unlock_irqrestore(&cl
->tx_free_list_spinlock
, flags
);
152 spin_lock_irqsave(&cl
->tx_list_spinlock
, flags
);
153 /* release allocated memory - pass over tx_list */
154 while (!list_empty(&cl
->tx_list
.list
)) {
155 tx_buf
= list_entry(cl
->tx_list
.list
.next
,
156 struct ishtp_cl_tx_ring
, list
);
157 list_del(&tx_buf
->list
);
158 kfree(tx_buf
->send_buf
.data
);
161 spin_unlock_irqrestore(&cl
->tx_list_spinlock
, flags
);
165 * ishtp_io_rb_free() - Free IO request block
166 * @rb: IO request block
168 * Free io request block memory
170 void ishtp_io_rb_free(struct ishtp_cl_rb
*rb
)
175 kfree(rb
->buffer
.data
);
180 * ishtp_io_rb_init() - Allocate and init IO request block
181 * @cl: client device instance
183 * Allocate and initialize request block
185 * Return: Allocted IO request block pointer
187 struct ishtp_cl_rb
*ishtp_io_rb_init(struct ishtp_cl
*cl
)
189 struct ishtp_cl_rb
*rb
;
191 rb
= kzalloc(sizeof(struct ishtp_cl_rb
), GFP_KERNEL
);
195 INIT_LIST_HEAD(&rb
->list
);
202 * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
203 * @rb: IO request block
204 * @length: length of response buffer
206 * Allocate respose buffer
208 * Return: 0 on success else -ENOMEM
210 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb
*rb
, size_t length
)
218 rb
->buffer
.data
= kmalloc(length
, GFP_KERNEL
);
219 if (!rb
->buffer
.data
)
222 rb
->buffer
.size
= length
;
227 * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
228 * @rb: IO request block
230 * Re-append rb to its client's free list and send flow control if needed
232 * Return: 0 on success else -EFAULT
234 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb
*rb
)
244 spin_lock_irqsave(&cl
->free_list_spinlock
, flags
);
245 list_add_tail(&rb
->list
, &cl
->free_rb_list
.list
);
246 spin_unlock_irqrestore(&cl
->free_list_spinlock
, flags
);
249 * If we returned the first buffer to empty 'free' list,
252 if (!cl
->out_flow_ctrl_creds
)
253 rets
= ishtp_cl_read_start(cl
);
257 EXPORT_SYMBOL(ishtp_cl_io_rb_recycle
);