2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
30 #include "qxl_object.h"
33 qxl_allocate_chunk(struct qxl_device
*qdev
,
34 struct qxl_release
*release
,
35 struct qxl_drm_image
*image
,
36 unsigned int chunk_size
)
38 struct qxl_drm_chunk
*chunk
;
41 chunk
= kmalloc(sizeof(struct qxl_drm_chunk
), GFP_KERNEL
);
45 ret
= qxl_alloc_bo_reserved(qdev
, release
, chunk_size
, &chunk
->bo
);
51 list_add_tail(&chunk
->head
, &image
->chunk_list
);
56 qxl_image_alloc_objects(struct qxl_device
*qdev
,
57 struct qxl_release
*release
,
58 struct qxl_drm_image
**image_ptr
,
59 int height
, int stride
)
61 struct qxl_drm_image
*image
;
64 image
= kmalloc(sizeof(struct qxl_drm_image
), GFP_KERNEL
);
68 INIT_LIST_HEAD(&image
->chunk_list
);
70 ret
= qxl_alloc_bo_reserved(qdev
, release
, sizeof(struct qxl_image
), &image
->bo
);
76 ret
= qxl_allocate_chunk(qdev
, release
, image
, sizeof(struct qxl_data_chunk
) + stride
* height
);
78 qxl_bo_unref(&image
->bo
);
86 void qxl_image_free_objects(struct qxl_device
*qdev
, struct qxl_drm_image
*dimage
)
88 struct qxl_drm_chunk
*chunk
, *tmp
;
90 list_for_each_entry_safe(chunk
, tmp
, &dimage
->chunk_list
, head
) {
91 qxl_bo_unref(&chunk
->bo
);
95 qxl_bo_unref(&dimage
->bo
);
100 qxl_image_init_helper(struct qxl_device
*qdev
,
101 struct qxl_release
*release
,
102 struct qxl_drm_image
*dimage
,
104 int width
, int height
,
105 int depth
, unsigned int hash
,
108 struct qxl_drm_chunk
*drv_chunk
;
109 struct qxl_image
*image
;
110 struct qxl_data_chunk
*chunk
;
113 int linesize
= width
* depth
/ 8;
114 struct qxl_bo
*chunk_bo
, *image_bo
;
117 /* FIXME: Check integer overflow */
118 /* TODO: variable number of chunks */
120 drv_chunk
= list_first_entry(&dimage
->chunk_list
, struct qxl_drm_chunk
, head
);
122 chunk_bo
= drv_chunk
->bo
;
123 chunk_stride
= stride
; /* TODO: should use linesize, but it renders
124 wrong (check the bitmaps are sent correctly
127 ptr
= qxl_bo_kmap_atomic_page(qdev
, chunk_bo
, 0);
129 chunk
->data_size
= height
* chunk_stride
;
130 chunk
->prev_chunk
= 0;
131 chunk
->next_chunk
= 0;
132 qxl_bo_kunmap_atomic_page(qdev
, chunk_bo
, ptr
);
135 void *k_data
, *i_data
;
140 if (stride
== linesize
&& chunk_stride
== stride
) {
141 remain
= linesize
* height
;
143 i_data
= (void *)data
;
146 ptr
= qxl_bo_kmap_atomic_page(qdev
, chunk_bo
, page
<< PAGE_SHIFT
);
150 k_data
= chunk
->data
;
151 size
= PAGE_SIZE
- offsetof(struct qxl_data_chunk
, data
);
156 size
= min(size
, remain
);
158 memcpy(k_data
, i_data
, size
);
160 qxl_bo_kunmap_atomic_page(qdev
, chunk_bo
, ptr
);
166 unsigned int page_base
, page_offset
, out_offset
;
168 for (i
= 0 ; i
< height
; ++i
) {
169 i_data
= (void *)data
+ i
* stride
;
171 out_offset
= offsetof(struct qxl_data_chunk
, data
) + i
* chunk_stride
;
174 page_base
= out_offset
& PAGE_MASK
;
175 page_offset
= offset_in_page(out_offset
);
176 size
= min((int)(PAGE_SIZE
- page_offset
), remain
);
178 ptr
= qxl_bo_kmap_atomic_page(qdev
, chunk_bo
, page_base
);
179 k_data
= ptr
+ page_offset
;
180 memcpy(k_data
, i_data
, size
);
181 qxl_bo_kunmap_atomic_page(qdev
, chunk_bo
, ptr
);
189 qxl_bo_kunmap(chunk_bo
);
191 image_bo
= dimage
->bo
;
192 ptr
= qxl_bo_kmap_atomic_page(qdev
, image_bo
, 0);
195 image
->descriptor
.id
= 0;
196 image
->descriptor
.type
= SPICE_IMAGE_TYPE_BITMAP
;
198 image
->descriptor
.flags
= 0;
199 image
->descriptor
.width
= width
;
200 image
->descriptor
.height
= height
;
204 /* TODO: BE? check by arch? */
205 image
->u
.bitmap
.format
= SPICE_BITMAP_FMT_1BIT_BE
;
208 image
->u
.bitmap
.format
= SPICE_BITMAP_FMT_24BIT
;
211 image
->u
.bitmap
.format
= SPICE_BITMAP_FMT_32BIT
;
214 DRM_ERROR("unsupported image bit depth\n");
215 qxl_bo_kunmap_atomic_page(qdev
, image_bo
, ptr
);
218 image
->u
.bitmap
.flags
= QXL_BITMAP_TOP_DOWN
;
219 image
->u
.bitmap
.x
= width
;
220 image
->u
.bitmap
.y
= height
;
221 image
->u
.bitmap
.stride
= chunk_stride
;
222 image
->u
.bitmap
.palette
= 0;
223 image
->u
.bitmap
.data
= qxl_bo_physical_address(qdev
, chunk_bo
, 0);
225 qxl_bo_kunmap_atomic_page(qdev
, image_bo
, ptr
);
230 int qxl_image_init(struct qxl_device
*qdev
,
231 struct qxl_release
*release
,
232 struct qxl_drm_image
*dimage
,
234 int x
, int y
, int width
, int height
,
235 int depth
, int stride
)
237 data
+= y
* stride
+ x
* (depth
/ 8);
238 return qxl_image_init_helper(qdev
, release
, dimage
, data
,
239 width
, height
, depth
, 0, stride
);