1 /* SPDX-License-Identifier: MIT */
4 * Copyright © 2019 Intel Corporation
7 #ifndef I915_SW_FENCE_WORK_H
8 #define I915_SW_FENCE_WORK_H
10 #include <linux/dma-fence.h>
11 #include <linux/spinlock.h>
12 #include <linux/workqueue.h>
14 #include "i915_sw_fence.h"
16 struct dma_fence_work
;
18 struct dma_fence_work_ops
{
20 int (*work
)(struct dma_fence_work
*f
);
21 void (*release
)(struct dma_fence_work
*f
);
24 struct dma_fence_work
{
28 struct i915_sw_fence chain
;
29 struct i915_sw_dma_fence_cb cb
;
31 struct work_struct work
;
32 const struct dma_fence_work_ops
*ops
;
36 DMA_FENCE_WORK_IMM
= DMA_FENCE_FLAG_USER_BITS
,
39 void dma_fence_work_init(struct dma_fence_work
*f
,
40 const struct dma_fence_work_ops
*ops
);
41 int dma_fence_work_chain(struct dma_fence_work
*f
, struct dma_fence
*signal
);
43 static inline void dma_fence_work_commit(struct dma_fence_work
*f
)
45 i915_sw_fence_commit(&f
->chain
);
49 * dma_fence_work_commit_imm: Commit the fence, and if possible execute locally.
50 * @f: the fenced worker
52 * Instead of always scheduling a worker to execute the callback (see
53 * dma_fence_work_commit()), we try to execute the callback immediately in
54 * the local context. It is required that the fence be committed before it
55 * is published, and that no other threads try to tamper with the number
56 * of asynchronous waits on the fence (or else the callback will be
57 * executed in the wrong context, i.e. not the callers).
59 static inline void dma_fence_work_commit_imm(struct dma_fence_work
*f
)
61 if (atomic_read(&f
->chain
.pending
) <= 1)
62 __set_bit(DMA_FENCE_WORK_IMM
, &f
->dma
.flags
);
64 dma_fence_work_commit(f
);
67 #endif /* I915_SW_FENCE_WORK_H */