Linux 5.1.15
[linux/fpc-iii.git] / drivers / hid / intel-ish-hid / ishtp / client-buffers.c
blob248651c35497e6cf6ca619d6d5e8fef3814a672a
1 /*
2 * ISHTP Ring Buffers
4 * Copyright (c) 2003-2016, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
17 #include <linux/slab.h>
18 #include "client.h"
20 /**
21 * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
22 * @cl: client device instance
24 * Allocate and initialize RX ring buffers
26 * Return: 0 on success else -ENOMEM
28 int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
30 size_t len = cl->device->fw_client->props.max_msg_length;
31 int j;
32 struct ishtp_cl_rb *rb;
33 int ret = 0;
34 unsigned long flags;
36 for (j = 0; j < cl->rx_ring_size; ++j) {
37 rb = ishtp_io_rb_init(cl);
38 if (!rb) {
39 ret = -ENOMEM;
40 goto out;
42 ret = ishtp_io_rb_alloc_buf(rb, len);
43 if (ret)
44 goto out;
45 spin_lock_irqsave(&cl->free_list_spinlock, flags);
46 list_add_tail(&rb->list, &cl->free_rb_list.list);
47 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
50 return 0;
52 out:
53 dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
54 ishtp_cl_free_rx_ring(cl);
55 return ret;
58 /**
59 * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
60 * @cl: client device instance
62 * Allocate and initialize TX ring buffers
64 * Return: 0 on success else -ENOMEM
66 int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
68 size_t len = cl->device->fw_client->props.max_msg_length;
69 int j;
70 unsigned long flags;
72 cl->tx_ring_free_size = 0;
74 /* Allocate pool to free Tx bufs */
75 for (j = 0; j < cl->tx_ring_size; ++j) {
76 struct ishtp_cl_tx_ring *tx_buf;
78 tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
79 if (!tx_buf)
80 goto out;
82 tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
83 if (!tx_buf->send_buf.data) {
84 kfree(tx_buf);
85 goto out;
88 spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
89 list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
90 ++cl->tx_ring_free_size;
91 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
93 return 0;
94 out:
95 dev_err(&cl->device->dev, "error in allocating Tx pool\n");
96 ishtp_cl_free_rx_ring(cl);
97 return -ENOMEM;
101 * ishtp_cl_free_rx_ring() - Free RX ring buffers
102 * @cl: client device instance
104 * Free RX ring buffers
106 void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
108 struct ishtp_cl_rb *rb;
109 unsigned long flags;
111 /* release allocated memory - pass over free_rb_list */
112 spin_lock_irqsave(&cl->free_list_spinlock, flags);
113 while (!list_empty(&cl->free_rb_list.list)) {
114 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
115 list);
116 list_del(&rb->list);
117 kfree(rb->buffer.data);
118 kfree(rb);
120 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
121 /* release allocated memory - pass over in_process_list */
122 spin_lock_irqsave(&cl->in_process_spinlock, flags);
123 while (!list_empty(&cl->in_process_list.list)) {
124 rb = list_entry(cl->in_process_list.list.next,
125 struct ishtp_cl_rb, list);
126 list_del(&rb->list);
127 kfree(rb->buffer.data);
128 kfree(rb);
130 spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
134 * ishtp_cl_free_tx_ring() - Free TX ring buffers
135 * @cl: client device instance
137 * Free TX ring buffers
139 void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
141 struct ishtp_cl_tx_ring *tx_buf;
142 unsigned long flags;
144 spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
145 /* release allocated memory - pass over tx_free_list */
146 while (!list_empty(&cl->tx_free_list.list)) {
147 tx_buf = list_entry(cl->tx_free_list.list.next,
148 struct ishtp_cl_tx_ring, list);
149 list_del(&tx_buf->list);
150 --cl->tx_ring_free_size;
151 kfree(tx_buf->send_buf.data);
152 kfree(tx_buf);
154 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
156 spin_lock_irqsave(&cl->tx_list_spinlock, flags);
157 /* release allocated memory - pass over tx_list */
158 while (!list_empty(&cl->tx_list.list)) {
159 tx_buf = list_entry(cl->tx_list.list.next,
160 struct ishtp_cl_tx_ring, list);
161 list_del(&tx_buf->list);
162 kfree(tx_buf->send_buf.data);
163 kfree(tx_buf);
165 spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
169 * ishtp_io_rb_free() - Free IO request block
170 * @rb: IO request block
172 * Free io request block memory
174 void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
176 if (rb == NULL)
177 return;
179 kfree(rb->buffer.data);
180 kfree(rb);
184 * ishtp_io_rb_init() - Allocate and init IO request block
185 * @cl: client device instance
187 * Allocate and initialize request block
189 * Return: Allocted IO request block pointer
191 struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
193 struct ishtp_cl_rb *rb;
195 rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
196 if (!rb)
197 return NULL;
199 INIT_LIST_HEAD(&rb->list);
200 rb->cl = cl;
201 rb->buf_idx = 0;
202 return rb;
206 * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
207 * @rb: IO request block
208 * @length: length of response buffer
210 * Allocate respose buffer
212 * Return: 0 on success else -ENOMEM
214 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
216 if (!rb)
217 return -EINVAL;
219 if (length == 0)
220 return 0;
222 rb->buffer.data = kmalloc(length, GFP_KERNEL);
223 if (!rb->buffer.data)
224 return -ENOMEM;
226 rb->buffer.size = length;
227 return 0;
231 * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
232 * @rb: IO request block
234 * Re-append rb to its client's free list and send flow control if needed
236 * Return: 0 on success else -EFAULT
238 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
240 struct ishtp_cl *cl;
241 int rets = 0;
242 unsigned long flags;
244 if (!rb || !rb->cl)
245 return -EFAULT;
247 cl = rb->cl;
248 spin_lock_irqsave(&cl->free_list_spinlock, flags);
249 list_add_tail(&rb->list, &cl->free_rb_list.list);
250 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
253 * If we returned the first buffer to empty 'free' list,
254 * send flow control
256 if (!cl->out_flow_ctrl_creds)
257 rets = ishtp_cl_read_start(cl);
259 return rets;
261 EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
264 * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
265 * @cl: Pointer to client device instance
267 * Look client device tx buffer list, and check whether this list is empty
269 * Return: true if client tx buffer list is empty else false
271 bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
273 int tx_list_empty;
274 unsigned long tx_flags;
276 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
277 tx_list_empty = list_empty(&cl->tx_list.list);
278 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
280 return !!tx_list_empty;
282 EXPORT_SYMBOL(ishtp_cl_tx_empty);
285 * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
286 * @cl: Pointer to client device instance
288 * Check client device in-processing buffer list and get a rb from it.
290 * Return: rb pointer if buffer list isn't empty else NULL
292 struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
294 unsigned long rx_flags;
295 struct ishtp_cl_rb *rb;
297 spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
298 rb = list_first_entry_or_null(&cl->in_process_list.list,
299 struct ishtp_cl_rb, list);
300 if (rb)
301 list_del_init(&rb->list);
302 spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
304 return rb;
306 EXPORT_SYMBOL(ishtp_cl_rx_get_rb);