1 // SPDX-License-Identifier: MIT
2 /* Copyright (C) 2006-2017 Oracle Corporation */
4 #include <linux/vbox_err.h>
6 #include "vboxvideo_guest.h"
7 #include "hgsmi_channels.h"
10 * There is a hardware ring buffer in the graphics device video RAM, formerly
11 * in the VBox VMMDev PCI memory space.
12 * All graphics commands go there serialized by vbva_buffer_begin_update.
13 * and vbva_buffer_end_update.
15 * free_offset is writing position. data_offset is reading position.
16 * free_offset == data_offset means buffer is empty.
17 * There must be always gap between data_offset and free_offset when data
19 * Guest only changes free_offset, host changes data_offset.
22 static u32
vbva_buffer_available(const struct vbva_buffer
*vbva
)
24 s32 diff
= vbva
->data_offset
- vbva
->free_offset
;
26 return diff
> 0 ? diff
: vbva
->data_len
+ diff
;
29 static void vbva_buffer_place_data_at(struct vbva_buf_ctx
*vbva_ctx
,
30 const void *p
, u32 len
, u32 offset
)
32 struct vbva_buffer
*vbva
= vbva_ctx
->vbva
;
33 u32 bytes_till_boundary
= vbva
->data_len
- offset
;
34 u8
*dst
= &vbva
->data
[offset
];
35 s32 diff
= len
- bytes_till_boundary
;
38 /* Chunk will not cross buffer boundary. */
41 /* Chunk crosses buffer boundary. */
42 memcpy(dst
, p
, bytes_till_boundary
);
43 memcpy(&vbva
->data
[0], (u8
*)p
+ bytes_till_boundary
, diff
);
47 static void vbva_buffer_flush(struct gen_pool
*ctx
)
51 p
= hgsmi_buffer_alloc(ctx
, sizeof(*p
), HGSMI_CH_VBVA
, VBVA_FLUSH
);
57 hgsmi_buffer_submit(ctx
, p
);
58 hgsmi_buffer_free(ctx
, p
);
61 bool vbva_write(struct vbva_buf_ctx
*vbva_ctx
, struct gen_pool
*ctx
,
62 const void *p
, u32 len
)
64 struct vbva_record
*record
;
65 struct vbva_buffer
*vbva
;
68 vbva
= vbva_ctx
->vbva
;
69 record
= vbva_ctx
->record
;
71 if (!vbva
|| vbva_ctx
->buffer_overflow
||
72 !record
|| !(record
->len_and_flags
& VBVA_F_RECORD_PARTIAL
))
75 available
= vbva_buffer_available(vbva
);
80 if (chunk
>= available
) {
81 vbva_buffer_flush(ctx
);
82 available
= vbva_buffer_available(vbva
);
85 if (chunk
>= available
) {
86 if (WARN_ON(available
<= vbva
->partial_write_tresh
)) {
87 vbva_ctx
->buffer_overflow
= true;
90 chunk
= available
- vbva
->partial_write_tresh
;
93 vbva_buffer_place_data_at(vbva_ctx
, p
, chunk
,
96 vbva
->free_offset
= (vbva
->free_offset
+ chunk
) %
98 record
->len_and_flags
+= chunk
;
107 static bool vbva_inform_host(struct vbva_buf_ctx
*vbva_ctx
,
108 struct gen_pool
*ctx
, s32 screen
, bool enable
)
110 struct vbva_enable_ex
*p
;
113 p
= hgsmi_buffer_alloc(ctx
, sizeof(*p
), HGSMI_CH_VBVA
, VBVA_ENABLE
);
117 p
->base
.flags
= enable
? VBVA_F_ENABLE
: VBVA_F_DISABLE
;
118 p
->base
.offset
= vbva_ctx
->buffer_offset
;
119 p
->base
.result
= VERR_NOT_SUPPORTED
;
121 p
->base
.flags
|= VBVA_F_EXTENDED
| VBVA_F_ABSOFFSET
;
122 p
->screen_id
= screen
;
125 hgsmi_buffer_submit(ctx
, p
);
128 ret
= p
->base
.result
>= 0;
132 hgsmi_buffer_free(ctx
, p
);
137 bool vbva_enable(struct vbva_buf_ctx
*vbva_ctx
, struct gen_pool
*ctx
,
138 struct vbva_buffer
*vbva
, s32 screen
)
142 memset(vbva
, 0, sizeof(*vbva
));
143 vbva
->partial_write_tresh
= 256;
144 vbva
->data_len
= vbva_ctx
->buffer_length
- sizeof(struct vbva_buffer
);
145 vbva_ctx
->vbva
= vbva
;
147 ret
= vbva_inform_host(vbva_ctx
, ctx
, screen
, true);
149 vbva_disable(vbva_ctx
, ctx
, screen
);
154 void vbva_disable(struct vbva_buf_ctx
*vbva_ctx
, struct gen_pool
*ctx
,
157 vbva_ctx
->buffer_overflow
= false;
158 vbva_ctx
->record
= NULL
;
159 vbva_ctx
->vbva
= NULL
;
161 vbva_inform_host(vbva_ctx
, ctx
, screen
, false);
164 bool vbva_buffer_begin_update(struct vbva_buf_ctx
*vbva_ctx
,
165 struct gen_pool
*ctx
)
167 struct vbva_record
*record
;
170 if (!vbva_ctx
->vbva
||
171 !(vbva_ctx
->vbva
->host_flags
.host_events
& VBVA_F_MODE_ENABLED
))
174 WARN_ON(vbva_ctx
->buffer_overflow
|| vbva_ctx
->record
);
176 next
= (vbva_ctx
->vbva
->record_free_index
+ 1) % VBVA_MAX_RECORDS
;
178 /* Flush if all slots in the records queue are used */
179 if (next
== vbva_ctx
->vbva
->record_first_index
)
180 vbva_buffer_flush(ctx
);
182 /* If even after flush there is no place then fail the request */
183 if (next
== vbva_ctx
->vbva
->record_first_index
)
186 record
= &vbva_ctx
->vbva
->records
[vbva_ctx
->vbva
->record_free_index
];
187 record
->len_and_flags
= VBVA_F_RECORD_PARTIAL
;
188 vbva_ctx
->vbva
->record_free_index
= next
;
189 /* Remember which record we are using. */
190 vbva_ctx
->record
= record
;
195 void vbva_buffer_end_update(struct vbva_buf_ctx
*vbva_ctx
)
197 struct vbva_record
*record
= vbva_ctx
->record
;
199 WARN_ON(!vbva_ctx
->vbva
|| !record
||
200 !(record
->len_and_flags
& VBVA_F_RECORD_PARTIAL
));
202 /* Mark the record completed. */
203 record
->len_and_flags
&= ~VBVA_F_RECORD_PARTIAL
;
205 vbva_ctx
->buffer_overflow
= false;
206 vbva_ctx
->record
= NULL
;
209 void vbva_setup_buffer_context(struct vbva_buf_ctx
*vbva_ctx
,
210 u32 buffer_offset
, u32 buffer_length
)
212 vbva_ctx
->buffer_offset
= buffer_offset
;
213 vbva_ctx
->buffer_length
= buffer_length
;