treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / sw / rxe / rxe_queue.h
blobacd0a925481c953c79e2fec14a0ec977841060b0
1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #ifndef RXE_QUEUE_H
35 #define RXE_QUEUE_H
37 /* implements a simple circular buffer that can optionally be
38 * shared between user space and the kernel and can be resized
40 * the requested element size is rounded up to a power of 2
41 * and the number of elements in the buffer is also rounded
42 * up to a power of 2. Since the queue is empty when the
43 * producer and consumer indices match the maximum capacity
44 * of the queue is one less than the number of element slots
47 /* this data structure is shared between user space and kernel
48 * space for those cases where the queue is shared. It contains
49 * the producer and consumer indices. Is also contains a copy
50 * of the queue size parameters for user space to use but the
51 * kernel must use the parameters in the rxe_queue struct
52 * this MUST MATCH the corresponding librxe struct
53 * for performance reasons arrange to have producer and consumer
54 * pointers in separate cache lines
55 * the kernel should always mask the indices to avoid accessing
56 * memory outside of the data area
58 struct rxe_queue_buf {
59 __u32 log2_elem_size;
60 __u32 index_mask;
61 __u32 pad_1[30];
62 __u32 producer_index;
63 __u32 pad_2[31];
64 __u32 consumer_index;
65 __u32 pad_3[31];
66 __u8 data[0];
69 struct rxe_queue {
70 struct rxe_dev *rxe;
71 struct rxe_queue_buf *buf;
72 struct rxe_mmap_info *ip;
73 size_t buf_size;
74 size_t elem_size;
75 unsigned int log2_elem_size;
76 unsigned int index_mask;
79 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
80 struct ib_udata *udata, struct rxe_queue_buf *buf,
81 size_t buf_size, struct rxe_mmap_info **ip_p);
83 void rxe_queue_reset(struct rxe_queue *q);
85 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
86 int *num_elem,
87 unsigned int elem_size);
89 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
90 unsigned int elem_size, struct ib_udata *udata,
91 struct mminfo __user *outbuf,
92 /* Protect producers while resizing queue */
93 spinlock_t *producer_lock,
94 /* Protect consumers while resizing queue */
95 spinlock_t *consumer_lock);
97 void rxe_queue_cleanup(struct rxe_queue *queue);
99 static inline int next_index(struct rxe_queue *q, int index)
101 return (index + 1) & q->buf->index_mask;
104 static inline int queue_empty(struct rxe_queue *q)
106 return ((q->buf->producer_index - q->buf->consumer_index)
107 & q->index_mask) == 0;
110 static inline int queue_full(struct rxe_queue *q)
112 return ((q->buf->producer_index + 1 - q->buf->consumer_index)
113 & q->index_mask) == 0;
116 static inline void advance_producer(struct rxe_queue *q)
118 q->buf->producer_index = (q->buf->producer_index + 1)
119 & q->index_mask;
122 static inline void advance_consumer(struct rxe_queue *q)
124 q->buf->consumer_index = (q->buf->consumer_index + 1)
125 & q->index_mask;
128 static inline void *producer_addr(struct rxe_queue *q)
130 return q->buf->data + ((q->buf->producer_index & q->index_mask)
131 << q->log2_elem_size);
134 static inline void *consumer_addr(struct rxe_queue *q)
136 return q->buf->data + ((q->buf->consumer_index & q->index_mask)
137 << q->log2_elem_size);
140 static inline unsigned int producer_index(struct rxe_queue *q)
142 return q->buf->producer_index;
145 static inline unsigned int consumer_index(struct rxe_queue *q)
147 return q->buf->consumer_index;
150 static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
152 return q->buf->data + ((index & q->index_mask)
153 << q->buf->log2_elem_size);
156 static inline unsigned int index_from_addr(const struct rxe_queue *q,
157 const void *addr)
159 return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
160 & q->index_mask;
163 static inline unsigned int queue_count(const struct rxe_queue *q)
165 return (q->buf->producer_index - q->buf->consumer_index)
166 & q->index_mask;
169 static inline void *queue_head(struct rxe_queue *q)
171 return queue_empty(q) ? NULL : consumer_addr(q);
174 #endif /* RXE_QUEUE_H */