treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / misc / mic / scif / scif_rb.c
blobe425882ae06d12fda331ca4c3bedb677d4bff00f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel MIC Platform Software Stack (MPSS)
5 * Copyright(c) 2014 Intel Corporation.
7 * Intel SCIF driver.
8 */
9 #include <linux/circ_buf.h>
10 #include <linux/types.h>
11 #include <linux/io.h>
12 #include <linux/errno.h>
14 #include "scif_rb.h"
16 #define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
17 #define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
19 /**
20 * scif_rb_init - Initializes the ring buffer
21 * @rb: ring buffer
22 * @read_ptr: A pointer to the read offset
23 * @write_ptr: A pointer to the write offset
24 * @rb_base: A pointer to the base of the ring buffer
25 * @size: The size of the ring buffer in powers of two
27 void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
28 void *rb_base, u8 size)
30 rb->rb_base = rb_base;
31 rb->size = (1 << size);
32 rb->read_ptr = read_ptr;
33 rb->write_ptr = write_ptr;
34 rb->current_read_offset = *read_ptr;
35 rb->current_write_offset = *write_ptr;
38 /* Copies a message to the ring buffer -- handles the wrap around case */
39 static void memcpy_torb(struct scif_rb *rb, void *header,
40 void *msg, u32 size)
42 u32 size1, size2;
44 if (header + size >= rb->rb_base + rb->size) {
45 /* Need to call two copies if it wraps around */
46 size1 = (u32)(rb->rb_base + rb->size - header);
47 size2 = size - size1;
48 memcpy_toio((void __iomem __force *)header, msg, size1);
49 memcpy_toio((void __iomem __force *)rb->rb_base,
50 msg + size1, size2);
51 } else {
52 memcpy_toio((void __iomem __force *)header, msg, size);
56 /* Copies a message from the ring buffer -- handles the wrap around case */
57 static void memcpy_fromrb(struct scif_rb *rb, void *header,
58 void *msg, u32 size)
60 u32 size1, size2;
62 if (header + size >= rb->rb_base + rb->size) {
63 /* Need to call two copies if it wraps around */
64 size1 = (u32)(rb->rb_base + rb->size - header);
65 size2 = size - size1;
66 memcpy_fromio(msg, (void __iomem __force *)header, size1);
67 memcpy_fromio(msg + size1,
68 (void __iomem __force *)rb->rb_base, size2);
69 } else {
70 memcpy_fromio(msg, (void __iomem __force *)header, size);
74 /**
75 * scif_rb_space - Query space available for writing to the RB
76 * @rb: ring buffer
78 * Return: size available for writing to RB in bytes.
80 u32 scif_rb_space(struct scif_rb *rb)
82 rb->current_read_offset = *rb->read_ptr;
84 * Update from the HW read pointer only once the peer has exposed the
85 * new empty slot. This barrier is paired with the memory barrier
86 * scif_rb_update_read_ptr()
88 mb();
89 return scif_rb_ring_space(rb->current_write_offset,
90 rb->current_read_offset, rb->size);
93 /**
94 * scif_rb_write - Write a message to the RB
95 * @rb: ring buffer
96 * @msg: buffer to send the message. Must be at least size bytes long
97 * @size: the size (in bytes) to be copied to the RB
99 * This API does not block if there isn't enough space in the RB.
100 * Returns: 0 on success or -ENOMEM on failure
102 int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
104 void *header;
106 if (scif_rb_space(rb) < size)
107 return -ENOMEM;
108 header = rb->rb_base + rb->current_write_offset;
109 memcpy_torb(rb, header, msg, size);
111 * Wait until scif_rb_commit(). Update the local ring
112 * buffer data, not the shared data until commit.
114 rb->current_write_offset =
115 (rb->current_write_offset + size) & (rb->size - 1);
116 return 0;
120 * scif_rb_commit - To submit the message to let the peer fetch it
121 * @rb: ring buffer
123 void scif_rb_commit(struct scif_rb *rb)
126 * We must ensure ordering between the all the data committed
127 * previously before we expose the new message to the peer by
128 * updating the write_ptr. This write barrier is paired with
129 * the read barrier in scif_rb_count(..)
131 wmb();
132 WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
133 #ifdef CONFIG_INTEL_MIC_CARD
135 * X100 Si bug: For the case where a Core is performing an EXT_WR
136 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
137 * same address with the same data before it does the Doorbell Write.
138 * This way, if ordering is violated for the Interrupt Message, it will
139 * fall just behind the first Posted associated with the first EXT_WR.
141 WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
142 #endif
146 * scif_rb_get - To get next message from the ring buffer
147 * @rb: ring buffer
148 * @size: Number of bytes to be read
150 * Return: NULL if no bytes to be read from the ring buffer, otherwise the
151 * pointer to the next byte
153 static void *scif_rb_get(struct scif_rb *rb, u32 size)
155 void *header = NULL;
157 if (scif_rb_count(rb, size) >= size)
158 header = rb->rb_base + rb->current_read_offset;
159 return header;
163 * scif_rb_get_next - Read from ring buffer.
164 * @rb: ring buffer
165 * @msg: buffer to hold the message. Must be at least size bytes long
166 * @size: Number of bytes to be read
168 * Return: number of bytes read if available bytes are >= size, otherwise
169 * returns zero.
171 u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
173 void *header = NULL;
174 int read_size = 0;
176 header = scif_rb_get(rb, size);
177 if (header) {
178 u32 next_cmd_offset =
179 (rb->current_read_offset + size) & (rb->size - 1);
181 read_size = size;
182 rb->current_read_offset = next_cmd_offset;
183 memcpy_fromrb(rb, header, msg, size);
185 return read_size;
189 * scif_rb_update_read_ptr
190 * @rb: ring buffer
192 void scif_rb_update_read_ptr(struct scif_rb *rb)
194 u32 new_offset;
196 new_offset = rb->current_read_offset;
198 * We must ensure ordering between the all the data committed or read
199 * previously before we expose the empty slot to the peer by updating
200 * the read_ptr. This barrier is paired with the memory barrier in
201 * scif_rb_space(..)
203 mb();
204 WRITE_ONCE(*rb->read_ptr, new_offset);
205 #ifdef CONFIG_INTEL_MIC_CARD
207 * X100 Si Bug: For the case where a Core is performing an EXT_WR
208 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
209 * same address with the same data before it does the Doorbell Write.
210 * This way, if ordering is violated for the Interrupt Message, it will
211 * fall just behind the first Posted associated with the first EXT_WR.
213 WRITE_ONCE(*rb->read_ptr, new_offset);
214 #endif
218 * scif_rb_count
219 * @rb: ring buffer
220 * @size: Number of bytes expected to be read
222 * Return: number of bytes that can be read from the RB
224 u32 scif_rb_count(struct scif_rb *rb, u32 size)
226 if (scif_rb_ring_cnt(rb->current_write_offset,
227 rb->current_read_offset,
228 rb->size) < size) {
229 rb->current_write_offset = *rb->write_ptr;
231 * Update from the HW write pointer if empty only once the peer
232 * has exposed the new message. This read barrier is paired
233 * with the write barrier in scif_rb_commit(..)
235 smp_rmb();
237 return scif_rb_ring_cnt(rb->current_write_offset,
238 rb->current_read_offset,
239 rb->size);