2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
30 #include "qxl_object.h"
33 qxl_allocate_chunk(struct qxl_device
*qdev
,
34 struct qxl_release
*release
,
35 struct qxl_drm_image
*image
,
36 unsigned int chunk_size
)
38 struct qxl_drm_chunk
*chunk
;
41 chunk
= kmalloc(sizeof(struct qxl_drm_chunk
), GFP_KERNEL
);
45 ret
= qxl_alloc_bo_reserved(qdev
, release
, chunk_size
, &chunk
->bo
);
51 list_add_tail(&chunk
->head
, &image
->chunk_list
);
56 qxl_image_alloc_objects(struct qxl_device
*qdev
,
57 struct qxl_release
*release
,
58 struct qxl_drm_image
**image_ptr
,
59 int height
, int stride
)
61 struct qxl_drm_image
*image
;
64 image
= kmalloc(sizeof(struct qxl_drm_image
), GFP_KERNEL
);
68 INIT_LIST_HEAD(&image
->chunk_list
);
70 ret
= qxl_alloc_bo_reserved(qdev
, release
, sizeof(struct qxl_image
), &image
->bo
);
76 ret
= qxl_allocate_chunk(qdev
, release
, image
, sizeof(struct qxl_data_chunk
) + stride
* height
);
78 qxl_bo_unref(&image
->bo
);
86 void qxl_image_free_objects(struct qxl_device
*qdev
, struct qxl_drm_image
*dimage
)
88 struct qxl_drm_chunk
*chunk
, *tmp
;
90 list_for_each_entry_safe(chunk
, tmp
, &dimage
->chunk_list
, head
) {
91 qxl_bo_unref(&chunk
->bo
);
95 qxl_bo_unref(&dimage
->bo
);
100 qxl_image_init_helper(struct qxl_device
*qdev
,
101 struct qxl_release
*release
,
102 struct qxl_drm_image
*dimage
,
104 int width
, int height
,
105 int depth
, unsigned int hash
,
108 struct qxl_drm_chunk
*drv_chunk
;
109 struct qxl_image
*image
;
110 struct qxl_data_chunk
*chunk
;
113 int linesize
= width
* depth
/ 8;
114 struct qxl_bo
*chunk_bo
, *image_bo
;
117 /* FIXME: Check integer overflow */
118 /* TODO: variable number of chunks */
120 drv_chunk
= list_first_entry(&dimage
->chunk_list
, struct qxl_drm_chunk
, head
);
122 chunk_bo
= drv_chunk
->bo
;
123 chunk_stride
= stride
; /* TODO: should use linesize, but it renders
124 wrong (check the bitmaps are sent correctly
127 ptr
= qxl_bo_kmap_atomic_page(qdev
, chunk_bo
, 0);
129 chunk
->data_size
= height
* chunk_stride
;
130 chunk
->prev_chunk
= 0;
131 chunk
->next_chunk
= 0;
132 qxl_bo_kunmap_atomic_page(qdev
, chunk_bo
, ptr
);
135 void *k_data
, *i_data
;
139 if (stride
== linesize
&& chunk_stride
== stride
) {
140 remain
= linesize
* height
;
142 i_data
= (void *)data
;
145 ptr
= qxl_bo_kmap_atomic_page(qdev
, chunk_bo
, page
<< PAGE_SHIFT
);
149 k_data
= chunk
->data
;
150 size
= PAGE_SIZE
- offsetof(struct qxl_data_chunk
, data
);
155 size
= min(size
, remain
);
157 memcpy(k_data
, i_data
, size
);
159 qxl_bo_kunmap_atomic_page(qdev
, chunk_bo
, ptr
);
165 unsigned page_base
, page_offset
, out_offset
;
166 for (i
= 0 ; i
< height
; ++i
) {
167 i_data
= (void *)data
+ i
* stride
;
169 out_offset
= offsetof(struct qxl_data_chunk
, data
) + i
* chunk_stride
;
172 page_base
= out_offset
& PAGE_MASK
;
173 page_offset
= offset_in_page(out_offset
);
174 size
= min((int)(PAGE_SIZE
- page_offset
), remain
);
176 ptr
= qxl_bo_kmap_atomic_page(qdev
, chunk_bo
, page_base
);
177 k_data
= ptr
+ page_offset
;
178 memcpy(k_data
, i_data
, size
);
179 qxl_bo_kunmap_atomic_page(qdev
, chunk_bo
, ptr
);
187 qxl_bo_kunmap(chunk_bo
);
189 image_bo
= dimage
->bo
;
190 ptr
= qxl_bo_kmap_atomic_page(qdev
, image_bo
, 0);
193 image
->descriptor
.id
= 0;
194 image
->descriptor
.type
= SPICE_IMAGE_TYPE_BITMAP
;
196 image
->descriptor
.flags
= 0;
197 image
->descriptor
.width
= width
;
198 image
->descriptor
.height
= height
;
202 /* TODO: BE? check by arch? */
203 image
->u
.bitmap
.format
= SPICE_BITMAP_FMT_1BIT_BE
;
206 image
->u
.bitmap
.format
= SPICE_BITMAP_FMT_24BIT
;
209 image
->u
.bitmap
.format
= SPICE_BITMAP_FMT_32BIT
;
212 DRM_ERROR("unsupported image bit depth\n");
213 return -EINVAL
; /* TODO: cleanup */
215 image
->u
.bitmap
.flags
= QXL_BITMAP_TOP_DOWN
;
216 image
->u
.bitmap
.x
= width
;
217 image
->u
.bitmap
.y
= height
;
218 image
->u
.bitmap
.stride
= chunk_stride
;
219 image
->u
.bitmap
.palette
= 0;
220 image
->u
.bitmap
.data
= qxl_bo_physical_address(qdev
, chunk_bo
, 0);
222 qxl_bo_kunmap_atomic_page(qdev
, image_bo
, ptr
);
227 int qxl_image_init(struct qxl_device
*qdev
,
228 struct qxl_release
*release
,
229 struct qxl_drm_image
*dimage
,
231 int x
, int y
, int width
, int height
,
232 int depth
, int stride
)
234 data
+= y
* stride
+ x
* (depth
/ 8);
235 return qxl_image_init_helper(qdev
, release
, dimage
, data
,
236 width
, height
, depth
, 0, stride
);