1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include "vmwgfx_drv.h"
30 #include <linux/highmem.h>
33 * Template that implements find_first_diff() for a generic
34 * unsigned integer type. @size and return value are in bytes.
36 #define VMW_FIND_FIRST_DIFF(_type) \
37 static size_t vmw_find_first_diff_ ## _type \
38 (const _type * dst, const _type * src, size_t size)\
42 for (i = 0; i < size; i += sizeof(_type)) { \
43 if (*dst++ != *src++) \
52 * Template that implements find_last_diff() for a generic
53 * unsigned integer type. Pointers point to the item following the
54 * *end* of the area to be examined. @size and return value are in
57 #define VMW_FIND_LAST_DIFF(_type) \
58 static ssize_t vmw_find_last_diff_ ## _type( \
59 const _type * dst, const _type * src, size_t size) \
62 if (*--dst != *--src) \
65 size -= sizeof(_type); \
72 * Instantiate find diff functions for relevant unsigned integer sizes,
73 * assuming that wider integers are faster (including aligning) up to the
74 * architecture native width, which is assumed to be 32 bit unless
75 * CONFIG_64BIT is defined.
77 VMW_FIND_FIRST_DIFF(u8
);
78 VMW_FIND_LAST_DIFF(u8
);
80 VMW_FIND_FIRST_DIFF(u16
);
81 VMW_FIND_LAST_DIFF(u16
);
83 VMW_FIND_FIRST_DIFF(u32
);
84 VMW_FIND_LAST_DIFF(u32
);
87 VMW_FIND_FIRST_DIFF(u64
);
88 VMW_FIND_LAST_DIFF(u64
);
92 /* We use size aligned copies. This computes (addr - align(addr)) */
93 #define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
97 * Template to compute find_first_diff() for a certain integer type
98 * including a head copy for alignment, and adjustment of parameters
99 * for tail find or increased resolution find using an unsigned integer find
100 * of smaller width. If finding is complete, and resolution is sufficient,
101 * the macro executes a return statement. Otherwise it falls through.
103 #define VMW_TRY_FIND_FIRST_DIFF(_type) \
105 unsigned int spill = SPILL(dst, _type); \
108 if (spill && spill == SPILL(src, _type) && \
109 sizeof(_type) - spill <= size) { \
110 spill = sizeof(_type) - spill; \
111 diff_offs = vmw_find_first_diff_u8(dst, src, spill); \
112 if (diff_offs < spill) \
113 return round_down(offset + diff_offs, granularity); \
121 if (!spill && !SPILL(src, _type)) { \
122 size_t to_copy = size & ~(sizeof(_type) - 1); \
124 diff_offs = vmw_find_first_diff_ ## _type \
125 ((_type *) dst, (_type *) src, to_copy); \
126 if (diff_offs >= size || granularity == sizeof(_type)) \
127 return (offset + diff_offs); \
132 offset += diff_offs; \
138 * vmw_find_first_diff - find the first difference between dst and src
140 * @dst: The destination address
141 * @src: The source address
142 * @size: Number of bytes to compare
143 * @granularity: The granularity needed for the return value in bytes.
144 * return: The offset from find start where the first difference was
145 * encountered in bytes. If no difference was found, the function returns
148 static size_t vmw_find_first_diff(const u8
*dst
, const u8
*src
, size_t size
,
154 * Try finding with large integers if alignment allows, or we can
155 * fix it. Fall through if we need better resolution or alignment
159 VMW_TRY_FIND_FIRST_DIFF(u64
);
161 VMW_TRY_FIND_FIRST_DIFF(u32
);
162 VMW_TRY_FIND_FIRST_DIFF(u16
);
164 return round_down(offset
+ vmw_find_first_diff_u8(dst
, src
, size
),
170 * Template to compute find_last_diff() for a certain integer type
171 * including a tail copy for alignment, and adjustment of parameters
172 * for head find or increased resolution find using an unsigned integer find
173 * of smaller width. If finding is complete, and resolution is sufficient,
174 * the macro executes a return statement. Otherwise it falls through.
176 #define VMW_TRY_FIND_LAST_DIFF(_type) \
178 unsigned int spill = SPILL(dst, _type); \
182 if (spill && spill <= size && spill == SPILL(src, _type)) { \
183 diff_offs = vmw_find_last_diff_u8(dst, src, spill); \
185 location = size - spill + diff_offs - 1; \
186 return round_down(location, granularity); \
194 if (!spill && !SPILL(src, _type)) { \
195 size_t to_copy = round_down(size, sizeof(_type)); \
197 diff_offs = vmw_find_last_diff_ ## _type \
198 ((_type *) dst, (_type *) src, to_copy); \
199 location = size - to_copy + diff_offs - sizeof(_type); \
200 if (location < 0 || granularity == sizeof(_type)) \
203 dst -= to_copy - diff_offs; \
204 src -= to_copy - diff_offs; \
205 size -= to_copy - diff_offs; \
211 * vmw_find_last_diff - find the last difference between dst and src
213 * @dst: The destination address
214 * @src: The source address
215 * @size: Number of bytes to compare
216 * @granularity: The granularity needed for the return value in bytes.
217 * return: The offset from find start where the last difference was
218 * encountered in bytes, or a negative value if no difference was found.
220 static ssize_t
vmw_find_last_diff(const u8
*dst
, const u8
*src
, size_t size
,
227 VMW_TRY_FIND_LAST_DIFF(u64
);
229 VMW_TRY_FIND_LAST_DIFF(u32
);
230 VMW_TRY_FIND_LAST_DIFF(u16
);
232 return round_down(vmw_find_last_diff_u8(dst
, src
, size
) - 1,
238 * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
239 * struct vmw_diff_cpy.
241 * @diff: The struct vmw_diff_cpy closure argument (unused).
242 * @dest: The copy destination.
243 * @src: The copy source.
244 * @n: Number of bytes to copy.
246 void vmw_memcpy(struct vmw_diff_cpy
*diff
, u8
*dest
, const u8
*src
, size_t n
)
248 memcpy(dest
, src
, n
);
253 * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
255 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
256 * @diff_offs: The offset from @diff->line_offset where the difference was
259 static void vmw_adjust_rect(struct vmw_diff_cpy
*diff
, size_t diff_offs
)
261 size_t offs
= (diff_offs
+ diff
->line_offset
) / diff
->cpp
;
262 struct drm_rect
*rect
= &diff
->rect
;
264 rect
->x1
= min_t(int, rect
->x1
, offs
);
265 rect
->x2
= max_t(int, rect
->x2
, offs
+ 1);
266 rect
->y1
= min_t(int, rect
->y1
, diff
->line
);
267 rect
->y2
= max_t(int, rect
->y2
, diff
->line
+ 1);
271 * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
273 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
274 * @dest: The copy destination.
275 * @src: The copy source.
276 * @n: Number of bytes to copy.
278 * In order to correctly track the modified content, the field @diff->line must
279 * be pre-loaded with the current line number, the field @diff->line_offset must
280 * be pre-loaded with the line offset in bytes where the copy starts, and
281 * finally the field @diff->cpp need to be preloaded with the number of bytes
282 * per unit in the horizontal direction of the area we're examining.
283 * Typically bytes per pixel.
284 * This is needed to know the needed granularity of the difference computing
285 * operations. A higher cpp generally leads to faster execution at the cost of
286 * bounding box width precision.
288 void vmw_diff_memcpy(struct vmw_diff_cpy
*diff
, u8
*dest
, const u8
*src
,
291 ssize_t csize
, byte_len
;
293 if (WARN_ON_ONCE(round_down(n
, diff
->cpp
) != n
))
296 /* TODO: Possibly use a single vmw_find_first_diff per line? */
297 csize
= vmw_find_first_diff(dest
, src
, n
, diff
->cpp
);
299 vmw_adjust_rect(diff
, csize
);
300 byte_len
= diff
->cpp
;
303 * Starting from where first difference was found, find
304 * location of last difference, and then copy.
306 diff
->line_offset
+= csize
;
310 csize
= vmw_find_last_diff(dest
, src
, n
, diff
->cpp
);
313 vmw_adjust_rect(diff
, csize
);
315 memcpy(dest
, src
, byte_len
);
317 diff
->line_offset
+= n
;
321 * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
323 * @mapped_dst: Already mapped destination page index in @dst_pages.
324 * @dst_addr: Kernel virtual address of mapped destination page.
325 * @dst_pages: Array of destination bo pages.
326 * @dst_num_pages: Number of destination bo pages.
327 * @dst_prot: Destination bo page protection.
328 * @mapped_src: Already mapped source page index in @dst_pages.
329 * @src_addr: Kernel virtual address of mapped source page.
330 * @src_pages: Array of source bo pages.
331 * @src_num_pages: Number of source bo pages.
332 * @src_prot: Source bo page protection.
333 * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
335 struct vmw_bo_blit_line_data
{
338 struct page
**dst_pages
;
343 struct page
**src_pages
;
346 struct vmw_diff_cpy
*diff
;
350 * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
352 * @d: Blit data as described above.
353 * @dst_offset: Destination copy start offset from start of bo.
354 * @src_offset: Source copy start offset from start of bo.
355 * @bytes_to_copy: Number of bytes to copy in this line.
357 static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data
*d
,
362 struct vmw_diff_cpy
*diff
= d
->diff
;
364 while (bytes_to_copy
) {
365 u32 copy_size
= bytes_to_copy
;
366 u32 dst_page
= dst_offset
>> PAGE_SHIFT
;
367 u32 src_page
= src_offset
>> PAGE_SHIFT
;
368 u32 dst_page_offset
= dst_offset
& ~PAGE_MASK
;
369 u32 src_page_offset
= src_offset
& ~PAGE_MASK
;
370 bool unmap_dst
= d
->dst_addr
&& dst_page
!= d
->mapped_dst
;
371 bool unmap_src
= d
->src_addr
&& (src_page
!= d
->mapped_src
||
374 copy_size
= min_t(u32
, copy_size
, PAGE_SIZE
- dst_page_offset
);
375 copy_size
= min_t(u32
, copy_size
, PAGE_SIZE
- src_page_offset
);
378 kunmap_atomic(d
->src_addr
);
383 kunmap_atomic(d
->dst_addr
);
388 if (WARN_ON_ONCE(dst_page
>= d
->dst_num_pages
))
392 kmap_atomic_prot(d
->dst_pages
[dst_page
],
397 d
->mapped_dst
= dst_page
;
401 if (WARN_ON_ONCE(src_page
>= d
->src_num_pages
))
405 kmap_atomic_prot(d
->src_pages
[src_page
],
410 d
->mapped_src
= src_page
;
412 diff
->do_cpy(diff
, d
->dst_addr
+ dst_page_offset
,
413 d
->src_addr
+ src_page_offset
, copy_size
);
415 bytes_to_copy
-= copy_size
;
416 dst_offset
+= copy_size
;
417 src_offset
+= copy_size
;
424 * ttm_bo_cpu_blit - in-kernel cpu blit.
426 * @dst: Destination buffer object.
427 * @dst_offset: Destination offset of blit start in bytes.
428 * @dst_stride: Destination stride in bytes.
429 * @src: Source buffer object.
430 * @src_offset: Source offset of blit start in bytes.
431 * @src_stride: Source stride in bytes.
433 * @h: Height of blit.
434 * return: Zero on success. Negative error value on failure. Will print out
435 * kernel warnings on caller bugs.
437 * Performs a CPU blit from one buffer object to another avoiding a full
438 * bo vmap which may exhaust- or fragment vmalloc space.
439 * On supported architectures (x86), we're using kmap_atomic which avoids
440 * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
441 * reference already set-up mappings.
443 * Neither of the buffer objects may be placed in PCI memory
444 * (Fixed memory in TTM terminology) when using this function.
446 int vmw_bo_cpu_blit(struct ttm_buffer_object
*dst
,
447 u32 dst_offset
, u32 dst_stride
,
448 struct ttm_buffer_object
*src
,
449 u32 src_offset
, u32 src_stride
,
451 struct vmw_diff_cpy
*diff
)
453 struct ttm_operation_ctx ctx
= {
454 .interruptible
= false,
457 u32 j
, initial_line
= dst_offset
/ dst_stride
;
458 struct vmw_bo_blit_line_data d
;
461 /* Buffer objects need to be either pinned or reserved: */
462 if (!(dst
->pin_count
))
463 dma_resv_assert_held(dst
->base
.resv
);
464 if (!(src
->pin_count
))
465 dma_resv_assert_held(src
->base
.resv
);
467 if (!ttm_tt_is_populated(dst
->ttm
)) {
468 ret
= dst
->bdev
->driver
->ttm_tt_populate(dst
->bdev
, dst
->ttm
, &ctx
);
473 if (!ttm_tt_is_populated(src
->ttm
)) {
474 ret
= src
->bdev
->driver
->ttm_tt_populate(src
->bdev
, src
->ttm
, &ctx
);
483 d
.dst_pages
= dst
->ttm
->pages
;
484 d
.src_pages
= src
->ttm
->pages
;
485 d
.dst_num_pages
= dst
->num_pages
;
486 d
.src_num_pages
= src
->num_pages
;
487 d
.dst_prot
= ttm_io_prot(dst
, &dst
->mem
, PAGE_KERNEL
);
488 d
.src_prot
= ttm_io_prot(src
, &src
->mem
, PAGE_KERNEL
);
491 for (j
= 0; j
< h
; ++j
) {
492 diff
->line
= j
+ initial_line
;
493 diff
->line_offset
= dst_offset
% dst_stride
;
494 ret
= vmw_bo_cpu_blit_line(&d
, dst_offset
, src_offset
, w
);
498 dst_offset
+= dst_stride
;
499 src_offset
+= src_stride
;
503 kunmap_atomic(d
.src_addr
);
505 kunmap_atomic(d
.dst_addr
);