Linux 4.18.10
[linux/fpc-iii.git] / drivers / misc / mic / scif / scif_rb.c
blobb665757ca89a853243d929e6051fa993471544d3
1 /*
2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * Intel SCIF driver.
18 #include <linux/circ_buf.h>
19 #include <linux/types.h>
20 #include <linux/io.h>
21 #include <linux/errno.h>
23 #include "scif_rb.h"
25 #define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
26 #define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
28 /**
29 * scif_rb_init - Initializes the ring buffer
30 * @rb: ring buffer
31 * @read_ptr: A pointer to the read offset
32 * @write_ptr: A pointer to the write offset
33 * @rb_base: A pointer to the base of the ring buffer
34 * @size: The size of the ring buffer in powers of two
36 void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
37 void *rb_base, u8 size)
39 rb->rb_base = rb_base;
40 rb->size = (1 << size);
41 rb->read_ptr = read_ptr;
42 rb->write_ptr = write_ptr;
43 rb->current_read_offset = *read_ptr;
44 rb->current_write_offset = *write_ptr;
47 /* Copies a message to the ring buffer -- handles the wrap around case */
48 static void memcpy_torb(struct scif_rb *rb, void *header,
49 void *msg, u32 size)
51 u32 size1, size2;
53 if (header + size >= rb->rb_base + rb->size) {
54 /* Need to call two copies if it wraps around */
55 size1 = (u32)(rb->rb_base + rb->size - header);
56 size2 = size - size1;
57 memcpy_toio((void __iomem __force *)header, msg, size1);
58 memcpy_toio((void __iomem __force *)rb->rb_base,
59 msg + size1, size2);
60 } else {
61 memcpy_toio((void __iomem __force *)header, msg, size);
65 /* Copies a message from the ring buffer -- handles the wrap around case */
66 static void memcpy_fromrb(struct scif_rb *rb, void *header,
67 void *msg, u32 size)
69 u32 size1, size2;
71 if (header + size >= rb->rb_base + rb->size) {
72 /* Need to call two copies if it wraps around */
73 size1 = (u32)(rb->rb_base + rb->size - header);
74 size2 = size - size1;
75 memcpy_fromio(msg, (void __iomem __force *)header, size1);
76 memcpy_fromio(msg + size1,
77 (void __iomem __force *)rb->rb_base, size2);
78 } else {
79 memcpy_fromio(msg, (void __iomem __force *)header, size);
83 /**
84 * scif_rb_space - Query space available for writing to the RB
85 * @rb: ring buffer
87 * Return: size available for writing to RB in bytes.
89 u32 scif_rb_space(struct scif_rb *rb)
91 rb->current_read_offset = *rb->read_ptr;
93 * Update from the HW read pointer only once the peer has exposed the
94 * new empty slot. This barrier is paired with the memory barrier
95 * scif_rb_update_read_ptr()
97 mb();
98 return scif_rb_ring_space(rb->current_write_offset,
99 rb->current_read_offset, rb->size);
103 * scif_rb_write - Write a message to the RB
104 * @rb: ring buffer
105 * @msg: buffer to send the message. Must be at least size bytes long
106 * @size: the size (in bytes) to be copied to the RB
108 * This API does not block if there isn't enough space in the RB.
109 * Returns: 0 on success or -ENOMEM on failure
111 int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
113 void *header;
115 if (scif_rb_space(rb) < size)
116 return -ENOMEM;
117 header = rb->rb_base + rb->current_write_offset;
118 memcpy_torb(rb, header, msg, size);
120 * Wait until scif_rb_commit(). Update the local ring
121 * buffer data, not the shared data until commit.
123 rb->current_write_offset =
124 (rb->current_write_offset + size) & (rb->size - 1);
125 return 0;
129 * scif_rb_commit - To submit the message to let the peer fetch it
130 * @rb: ring buffer
132 void scif_rb_commit(struct scif_rb *rb)
135 * We must ensure ordering between the all the data committed
136 * previously before we expose the new message to the peer by
137 * updating the write_ptr. This write barrier is paired with
138 * the read barrier in scif_rb_count(..)
140 wmb();
141 WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
142 #ifdef CONFIG_INTEL_MIC_CARD
144 * X100 Si bug: For the case where a Core is performing an EXT_WR
145 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
146 * same address with the same data before it does the Doorbell Write.
147 * This way, if ordering is violated for the Interrupt Message, it will
148 * fall just behind the first Posted associated with the first EXT_WR.
150 WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
151 #endif
155 * scif_rb_get - To get next message from the ring buffer
156 * @rb: ring buffer
157 * @size: Number of bytes to be read
159 * Return: NULL if no bytes to be read from the ring buffer, otherwise the
160 * pointer to the next byte
162 static void *scif_rb_get(struct scif_rb *rb, u32 size)
164 void *header = NULL;
166 if (scif_rb_count(rb, size) >= size)
167 header = rb->rb_base + rb->current_read_offset;
168 return header;
172 * scif_rb_get_next - Read from ring buffer.
173 * @rb: ring buffer
174 * @msg: buffer to hold the message. Must be at least size bytes long
175 * @size: Number of bytes to be read
177 * Return: number of bytes read if available bytes are >= size, otherwise
178 * returns zero.
180 u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
182 void *header = NULL;
183 int read_size = 0;
185 header = scif_rb_get(rb, size);
186 if (header) {
187 u32 next_cmd_offset =
188 (rb->current_read_offset + size) & (rb->size - 1);
190 read_size = size;
191 rb->current_read_offset = next_cmd_offset;
192 memcpy_fromrb(rb, header, msg, size);
194 return read_size;
198 * scif_rb_update_read_ptr
199 * @rb: ring buffer
201 void scif_rb_update_read_ptr(struct scif_rb *rb)
203 u32 new_offset;
205 new_offset = rb->current_read_offset;
207 * We must ensure ordering between the all the data committed or read
208 * previously before we expose the empty slot to the peer by updating
209 * the read_ptr. This barrier is paired with the memory barrier in
210 * scif_rb_space(..)
212 mb();
213 WRITE_ONCE(*rb->read_ptr, new_offset);
214 #ifdef CONFIG_INTEL_MIC_CARD
216 * X100 Si Bug: For the case where a Core is performing an EXT_WR
217 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
218 * same address with the same data before it does the Doorbell Write.
219 * This way, if ordering is violated for the Interrupt Message, it will
220 * fall just behind the first Posted associated with the first EXT_WR.
222 WRITE_ONCE(*rb->read_ptr, new_offset);
223 #endif
227 * scif_rb_count
228 * @rb: ring buffer
229 * @size: Number of bytes expected to be read
231 * Return: number of bytes that can be read from the RB
233 u32 scif_rb_count(struct scif_rb *rb, u32 size)
235 if (scif_rb_ring_cnt(rb->current_write_offset,
236 rb->current_read_offset,
237 rb->size) < size) {
238 rb->current_write_offset = *rb->write_ptr;
240 * Update from the HW write pointer if empty only once the peer
241 * has exposed the new message. This read barrier is paired
242 * with the write barrier in scif_rb_commit(..)
244 smp_rmb();
246 return scif_rb_ring_cnt(rb->current_write_offset,
247 rb->current_read_offset,
248 rb->size);