1 // SPDX-License-Identifier: GPL-2.0-only
3 * dma-fence-util: misc functions for dma_fence objects
5 * Copyright (C) 2022 Advanced Micro Devices, Inc.
7 * Christian König <christian.koenig@amd.com>
10 #include <linux/dma-fence.h>
11 #include <linux/dma-fence-array.h>
12 #include <linux/dma-fence-chain.h>
13 #include <linux/dma-fence-unwrap.h>
14 #include <linux/slab.h>
15 #include <linux/sort.h>
17 /* Internal helper to start new array iteration, don't use directly */
18 static struct dma_fence
*
19 __dma_fence_unwrap_array(struct dma_fence_unwrap
*cursor
)
21 cursor
->array
= dma_fence_chain_contained(cursor
->chain
);
23 return dma_fence_array_first(cursor
->array
);
27 * dma_fence_unwrap_first - return the first fence from fence containers
28 * @head: the entrypoint into the containers
29 * @cursor: current position inside the containers
31 * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
34 struct dma_fence
*dma_fence_unwrap_first(struct dma_fence
*head
,
35 struct dma_fence_unwrap
*cursor
)
37 cursor
->chain
= dma_fence_get(head
);
38 return __dma_fence_unwrap_array(cursor
);
40 EXPORT_SYMBOL_GPL(dma_fence_unwrap_first
);
43 * dma_fence_unwrap_next - return the next fence from a fence containers
44 * @cursor: current position inside the containers
46 * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
47 * the next fence from them.
49 struct dma_fence
*dma_fence_unwrap_next(struct dma_fence_unwrap
*cursor
)
51 struct dma_fence
*tmp
;
54 tmp
= dma_fence_array_next(cursor
->array
, cursor
->index
);
58 cursor
->chain
= dma_fence_chain_walk(cursor
->chain
);
59 return __dma_fence_unwrap_array(cursor
);
61 EXPORT_SYMBOL_GPL(dma_fence_unwrap_next
);
64 static int fence_cmp(const void *_a
, const void *_b
)
66 struct dma_fence
*a
= *(struct dma_fence
**)_a
;
67 struct dma_fence
*b
= *(struct dma_fence
**)_b
;
69 if (a
->context
< b
->context
)
71 else if (a
->context
> b
->context
)
74 if (dma_fence_is_later(b
, a
))
76 else if (dma_fence_is_later(a
, b
))
82 /* Implementation for the dma_fence_merge() marco, don't use directly */
83 struct dma_fence
*__dma_fence_unwrap_merge(unsigned int num_fences
,
84 struct dma_fence
**fences
,
85 struct dma_fence_unwrap
*iter
)
87 struct dma_fence
*tmp
, *unsignaled
= NULL
, **array
;
88 struct dma_fence_array
*result
;
93 timestamp
= ns_to_ktime(0);
94 for (i
= 0; i
< num_fences
; ++i
) {
95 dma_fence_unwrap_for_each(tmp
, &iter
[i
], fences
[i
]) {
96 if (!dma_fence_is_signaled(tmp
)) {
97 dma_fence_put(unsignaled
);
98 unsignaled
= dma_fence_get(tmp
);
101 ktime_t t
= dma_fence_timestamp(tmp
);
103 if (ktime_after(t
, timestamp
))
110 * If we couldn't find a pending fence just return a private signaled
111 * fence with the timestamp of the last signaled one.
113 * Or if there was a single unsignaled fence left we can return it
114 * directly and early since that is a major path on many workloads.
117 return dma_fence_allocate_private_stub(timestamp
);
121 dma_fence_put(unsignaled
);
123 array
= kmalloc_array(count
, sizeof(*array
), GFP_KERNEL
);
128 for (i
= 0; i
< num_fences
; ++i
) {
129 dma_fence_unwrap_for_each(tmp
, &iter
[i
], fences
[i
]) {
130 if (!dma_fence_is_signaled(tmp
)) {
131 array
[count
++] = dma_fence_get(tmp
);
133 ktime_t t
= dma_fence_timestamp(tmp
);
135 if (ktime_after(t
, timestamp
))
141 if (count
== 0 || count
== 1)
142 goto return_fastpath
;
144 sort(array
, count
, sizeof(*array
), fence_cmp
, NULL
);
147 * Only keep the most recent fence for each context.
150 for (i
= 1; i
< count
; i
++) {
151 if (array
[i
]->context
== array
[j
]->context
)
152 dma_fence_put(array
[i
]);
154 array
[++j
] = array
[i
];
159 result
= dma_fence_array_create(count
, array
,
160 dma_fence_context_alloc(1),
163 for (i
= 0; i
< count
; i
++)
164 dma_fence_put(array
[i
]);
168 return &result
->base
;
173 tmp
= dma_fence_allocate_private_stub(timestamp
);
181 EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge
);