2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
10 #include "i915_gem.h" /* GEM_BUG_ON */
11 #include "i915_request.h"
12 #include "intel_ring_types.h"
14 struct intel_engine_cs
;
17 intel_engine_create_ring(struct intel_engine_cs
*engine
, int size
);
19 u32
*intel_ring_begin(struct i915_request
*rq
, unsigned int num_dwords
);
20 int intel_ring_cacheline_align(struct i915_request
*rq
);
22 unsigned int intel_ring_update_space(struct intel_ring
*ring
);
24 void __intel_ring_pin(struct intel_ring
*ring
);
25 int intel_ring_pin(struct intel_ring
*ring
, struct i915_gem_ww_ctx
*ww
);
26 void intel_ring_unpin(struct intel_ring
*ring
);
27 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
);
29 void intel_ring_free(struct kref
*ref
);
31 static inline struct intel_ring
*intel_ring_get(struct intel_ring
*ring
)
37 static inline void intel_ring_put(struct intel_ring
*ring
)
39 kref_put(&ring
->ref
, intel_ring_free
);
42 static inline void intel_ring_advance(struct i915_request
*rq
, u32
*cs
)
46 * This serves as a placeholder in the code so that the reader
47 * can compare against the preceding intel_ring_begin() and
48 * check that the number of dwords emitted matches the space
49 * reserved for the command packet (i.e. the value passed to
50 * intel_ring_begin()).
52 GEM_BUG_ON((rq
->ring
->vaddr
+ rq
->ring
->emit
) != cs
);
55 static inline u32
intel_ring_wrap(const struct intel_ring
*ring
, u32 pos
)
57 return pos
& (ring
->size
- 1);
60 static inline int intel_ring_direction(const struct intel_ring
*ring
,
63 typecheck(typeof(ring
->size
), next
);
64 typecheck(typeof(ring
->size
), prev
);
65 return (next
- prev
) << ring
->wrap
;
69 intel_ring_offset_valid(const struct intel_ring
*ring
,
72 if (pos
& -ring
->size
) /* must be strictly within the ring */
75 if (!IS_ALIGNED(pos
, 8)) /* must be qword aligned */
81 static inline u32
intel_ring_offset(const struct i915_request
*rq
, void *addr
)
83 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
84 u32 offset
= addr
- rq
->ring
->vaddr
;
85 GEM_BUG_ON(offset
> rq
->ring
->size
);
86 return intel_ring_wrap(rq
->ring
, offset
);
90 assert_ring_tail_valid(const struct intel_ring
*ring
, unsigned int tail
)
92 unsigned int head
= READ_ONCE(ring
->head
);
94 GEM_BUG_ON(!intel_ring_offset_valid(ring
, tail
));
98 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
99 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
100 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
101 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
102 * same cacheline, the Head Pointer must not be greater than the Tail
105 * We use ring->head as the last known location of the actual RING_HEAD,
106 * it may have advanced but in the worst case it is equally the same
107 * as ring->head and so we should never program RING_TAIL to advance
108 * into the same cacheline as ring->head.
110 #define cacheline(a) round_down(a, CACHELINE_BYTES)
111 GEM_BUG_ON(cacheline(tail
) == cacheline(head
) && tail
< head
);
115 static inline unsigned int
116 intel_ring_set_tail(struct intel_ring
*ring
, unsigned int tail
)
118 /* Whilst writes to the tail are strictly order, there is no
119 * serialisation between readers and the writers. The tail may be
120 * read by i915_request_retire() just as it is being updated
121 * by execlists, as although the breadcrumb is complete, the context
122 * switch hasn't been seen.
124 assert_ring_tail_valid(ring
, tail
);
129 static inline unsigned int
130 __intel_ring_space(unsigned int head
, unsigned int tail
, unsigned int size
)
133 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
134 * same cacheline, the Head Pointer must not be greater than the Tail
137 GEM_BUG_ON(!is_power_of_2(size
));
138 return (head
- tail
- CACHELINE_BYTES
) & (size
- 1);
141 #endif /* INTEL_RING_H */