1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
5 #include <linux/dma-mapping.h>
11 * mei_dmam_dscr_alloc() - allocate a managed coherent buffer
12 * for the dma descriptor
14 * @dscr: dma descriptor
17 * * 0 - on success or zero allocation request
18 * * -EINVAL - if size is not power of 2
19 * * -ENOMEM - of allocation has failed
21 static int mei_dmam_dscr_alloc(struct mei_device
*dev
,
22 struct mei_dma_dscr
*dscr
)
27 if (WARN_ON(!is_power_of_2(dscr
->size
)))
33 dscr
->vaddr
= dmam_alloc_coherent(dev
->dev
, dscr
->size
, &dscr
->daddr
,
42 * mei_dmam_dscr_free() - free a managed coherent buffer
43 * from the dma descriptor
45 * @dscr: dma descriptor
47 static void mei_dmam_dscr_free(struct mei_device
*dev
,
48 struct mei_dma_dscr
*dscr
)
53 dmam_free_coherent(dev
->dev
, dscr
->size
, dscr
->vaddr
, dscr
->daddr
);
58 * mei_dmam_ring_free() - free dma ring buffers
61 void mei_dmam_ring_free(struct mei_device
*dev
)
65 for (i
= 0; i
< DMA_DSCR_NUM
; i
++)
66 mei_dmam_dscr_free(dev
, &dev
->dr_dscr
[i
]);
70 * mei_dmam_ring_alloc() - allocate dma ring buffers
73 * Return: -ENOMEM on allocation failure 0 otherwise
75 int mei_dmam_ring_alloc(struct mei_device
*dev
)
79 for (i
= 0; i
< DMA_DSCR_NUM
; i
++)
80 if (mei_dmam_dscr_alloc(dev
, &dev
->dr_dscr
[i
]))
86 mei_dmam_ring_free(dev
);
91 * mei_dma_ring_is_allocated() - check if dma ring is allocated
94 * Return: true if dma ring is allocated
96 bool mei_dma_ring_is_allocated(struct mei_device
*dev
)
98 return !!dev
->dr_dscr
[DMA_DSCR_HOST
].vaddr
;
102 struct hbm_dma_ring_ctrl
*mei_dma_ring_ctrl(struct mei_device
*dev
)
104 return (struct hbm_dma_ring_ctrl
*)dev
->dr_dscr
[DMA_DSCR_CTRL
].vaddr
;
108 * mei_dma_ring_reset() - reset the dma control block
111 void mei_dma_ring_reset(struct mei_device
*dev
)
113 struct hbm_dma_ring_ctrl
*ctrl
= mei_dma_ring_ctrl(dev
);
118 memset(ctrl
, 0, sizeof(*ctrl
));
122 * mei_dma_copy_from() - copy from dma ring into buffer
125 * @offset: offset in slots.
126 * @n: number of slots to copy.
128 static size_t mei_dma_copy_from(struct mei_device
*dev
, unsigned char *buf
,
131 unsigned char *dbuf
= dev
->dr_dscr
[DMA_DSCR_DEVICE
].vaddr
;
133 size_t b_offset
= offset
<< 2;
136 memcpy(buf
, dbuf
+ b_offset
, b_n
);
142 * mei_dma_copy_to() - copy to a buffer to the dma ring
145 * @offset: offset in slots.
146 * @n: number of slots to copy.
148 static size_t mei_dma_copy_to(struct mei_device
*dev
, unsigned char *buf
,
151 unsigned char *hbuf
= dev
->dr_dscr
[DMA_DSCR_HOST
].vaddr
;
153 size_t b_offset
= offset
<< 2;
156 memcpy(hbuf
+ b_offset
, buf
, b_n
);
162 * mei_dma_ring_read() - read data from the ring
164 * @buf: buffer to read into: may be NULL in case of droping the data.
165 * @len: length to read.
167 void mei_dma_ring_read(struct mei_device
*dev
, unsigned char *buf
, u32 len
)
169 struct hbm_dma_ring_ctrl
*ctrl
= mei_dma_ring_ctrl(dev
);
171 u32 rd_idx
, rem
, slots
;
176 dev_dbg(dev
->dev
, "reading from dma %u bytes\n", len
);
181 dbuf_depth
= dev
->dr_dscr
[DMA_DSCR_DEVICE
].size
>> 2;
182 rd_idx
= READ_ONCE(ctrl
->dbuf_rd_idx
) & (dbuf_depth
- 1);
183 slots
= mei_data2slots(len
);
185 /* if buf is NULL we drop the packet by advancing the pointer.*/
189 if (rd_idx
+ slots
> dbuf_depth
) {
190 buf
+= mei_dma_copy_from(dev
, buf
, rd_idx
, dbuf_depth
- rd_idx
);
191 rem
= slots
- (dbuf_depth
- rd_idx
);
197 mei_dma_copy_from(dev
, buf
, rd_idx
, rem
);
199 WRITE_ONCE(ctrl
->dbuf_rd_idx
, ctrl
->dbuf_rd_idx
+ slots
);
202 static inline u32
mei_dma_ring_hbuf_depth(struct mei_device
*dev
)
204 return dev
->dr_dscr
[DMA_DSCR_HOST
].size
>> 2;
208 * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
211 * Return: number of empty slots
213 u32
mei_dma_ring_empty_slots(struct mei_device
*dev
)
215 struct hbm_dma_ring_ctrl
*ctrl
= mei_dma_ring_ctrl(dev
);
216 u32 wr_idx
, rd_idx
, hbuf_depth
, empty
;
218 if (!mei_dma_ring_is_allocated(dev
))
224 /* easier to work in slots */
225 hbuf_depth
= mei_dma_ring_hbuf_depth(dev
);
226 rd_idx
= READ_ONCE(ctrl
->hbuf_rd_idx
);
227 wr_idx
= READ_ONCE(ctrl
->hbuf_wr_idx
);
230 empty
= rd_idx
- wr_idx
;
232 empty
= hbuf_depth
- (wr_idx
- rd_idx
);
238 * mei_dma_ring_write - write data to dma ring host buffer
241 * @buf: data will be written
244 void mei_dma_ring_write(struct mei_device
*dev
, unsigned char *buf
, u32 len
)
246 struct hbm_dma_ring_ctrl
*ctrl
= mei_dma_ring_ctrl(dev
);
248 u32 wr_idx
, rem
, slots
;
253 dev_dbg(dev
->dev
, "writing to dma %u bytes\n", len
);
254 hbuf_depth
= mei_dma_ring_hbuf_depth(dev
);
255 wr_idx
= READ_ONCE(ctrl
->hbuf_wr_idx
) & (hbuf_depth
- 1);
256 slots
= mei_data2slots(len
);
258 if (wr_idx
+ slots
> hbuf_depth
) {
259 buf
+= mei_dma_copy_to(dev
, buf
, wr_idx
, hbuf_depth
- wr_idx
);
260 rem
= slots
- (hbuf_depth
- wr_idx
);
266 mei_dma_copy_to(dev
, buf
, wr_idx
, rem
);
268 WRITE_ONCE(ctrl
->hbuf_wr_idx
, ctrl
->hbuf_wr_idx
+ slots
);