iris: add support INTEL_blackhole_render
[mesa.git] / src / gallium / drivers / iris / iris_batch.h
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef IRIS_BATCH_DOT_H
25 #define IRIS_BATCH_DOT_H
26
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30
31 #include "util/u_dynarray.h"
32
33 #include "drm-uapi/i915_drm.h"
34 #include "common/gen_decoder.h"
35
36 #include "iris_fence.h"
37
38 /* The kernel assumes batchbuffers are smaller than 256kB. */
39 #define MAX_BATCH_SIZE (256 * 1024)
40
41 /* Our target batch size - flush approximately at this point. */
42 #define BATCH_SZ (64 * 1024)
43
44 enum iris_batch_name {
45 IRIS_BATCH_RENDER,
46 IRIS_BATCH_COMPUTE,
47 };
48
49 #define IRIS_BATCH_COUNT 2
50
51 struct iris_address {
52 struct iris_bo *bo;
53 uint64_t offset;
54 bool write;
55 };
56
57 struct iris_batch {
58 struct iris_screen *screen;
59 struct iris_vtable *vtbl;
60 struct pipe_debug_callback *dbg;
61 struct pipe_device_reset_callback *reset;
62
63 /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */
64 enum iris_batch_name name;
65
66 /** Current batchbuffer being queued up. */
67 struct iris_bo *bo;
68 void *map;
69 void *map_next;
70
71 /** Size of the primary batch being submitted to execbuf (in bytes). */
72 unsigned primary_batch_size;
73
74 /** Total size of all chained batches (in bytes). */
75 unsigned total_chained_batch_size;
76
77 /** Last Surface State Base Address set in this hardware context. */
78 uint64_t last_surface_base_address;
79
80 uint32_t hw_ctx_id;
81
82 /** The validation list */
83 struct drm_i915_gem_exec_object2 *validation_list;
84 struct iris_bo **exec_bos;
85 int exec_count;
86 int exec_array_size;
87
88 /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first
89 * instruction is a MI_BATCH_BUFFER_END).
90 */
91 bool noop_enabled;
92
93 /**
94 * A list of iris_syncpts associated with this batch.
95 *
96 * The first list entry will always be a signalling sync-point, indicating
97 * that this batch has completed. The others are likely to be sync-points
98 * to wait on before executing the batch.
99 */
100 struct util_dynarray syncpts;
101
102 /** A list of drm_i915_exec_fences to have execbuf signal or wait on */
103 struct util_dynarray exec_fences;
104
105 /** The amount of aperture space (in bytes) used by all exec_bos */
106 int aperture_space;
107
108 /** A sync-point for the last batch that was submitted. */
109 struct iris_syncpt *last_syncpt;
110
111 /** List of other batches which we might need to flush to use a BO */
112 struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1];
113
114 struct {
115 /**
116 * Set of struct brw_bo * that have been rendered to within this
117 * batchbuffer and would need flushing before being used from another
118 * cache domain that isn't coherent with it (i.e. the sampler).
119 */
120 struct hash_table *render;
121
122 /**
123 * Set of struct brw_bo * that have been used as a depth buffer within
124 * this batchbuffer and would need flushing before being used from
125 * another cache domain that isn't coherent with it (i.e. the sampler).
126 */
127 struct set *depth;
128 } cache;
129
130 struct gen_batch_decode_ctx decoder;
131 struct hash_table_u64 *state_sizes;
132
133 /** Have we emitted any draw calls to this batch? */
134 bool contains_draw;
135
136 uint32_t last_aux_map_state;
137 };
138
139 void iris_init_batch(struct iris_batch *batch,
140 struct iris_screen *screen,
141 struct iris_vtable *vtbl,
142 struct pipe_debug_callback *dbg,
143 struct pipe_device_reset_callback *reset,
144 struct hash_table_u64 *state_sizes,
145 struct iris_batch *all_batches,
146 enum iris_batch_name name,
147 int priority);
148 void iris_chain_to_new_batch(struct iris_batch *batch);
149 void iris_batch_free(struct iris_batch *batch);
150 void iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate);
151
152 void _iris_batch_flush(struct iris_batch *batch, const char *file, int line);
153 #define iris_batch_flush(batch) _iris_batch_flush((batch), __FILE__, __LINE__)
154
155 bool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo);
156
157 uint64_t iris_batch_prepare_noop(struct iris_batch *batch,
158 bool noop_enable,
159 uint64_t dirty_flags);
160
161 #define RELOC_WRITE EXEC_OBJECT_WRITE
162
163 void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo,
164 bool writable);
165
166 enum pipe_reset_status iris_batch_check_for_reset(struct iris_batch *batch);
167
168 static inline unsigned
169 iris_batch_bytes_used(struct iris_batch *batch)
170 {
171 return batch->map_next - batch->map;
172 }
173
174 /**
175 * Ensure the current command buffer has \param size bytes of space
176 * remaining. If not, this creates a secondary batch buffer and emits
177 * a jump from the primary batch to the start of the secondary.
178 *
179 * Most callers want iris_get_command_space() instead.
180 */
181 static inline void
182 iris_require_command_space(struct iris_batch *batch, unsigned size)
183 {
184 const unsigned required_bytes = iris_batch_bytes_used(batch) + size;
185
186 if (required_bytes >= BATCH_SZ) {
187 iris_chain_to_new_batch(batch);
188 }
189 }
190
191 /**
192 * Allocate space in the current command buffer, and return a pointer
193 * to the mapped area so the caller can write commands there.
194 *
195 * This should be called whenever emitting commands.
196 */
197 static inline void *
198 iris_get_command_space(struct iris_batch *batch, unsigned bytes)
199 {
200 iris_require_command_space(batch, bytes);
201 void *map = batch->map_next;
202 batch->map_next += bytes;
203 return map;
204 }
205
206 /**
207 * Helper to emit GPU commands - allocates space, copies them there.
208 */
209 static inline void
210 iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
211 {
212 void *map = iris_get_command_space(batch, size);
213 memcpy(map, data, size);
214 }
215
216 /**
217 * Get a pointer to the batch's signalling syncpt. Does not refcount.
218 */
219 static inline struct iris_syncpt *
220 iris_batch_get_signal_syncpt(struct iris_batch *batch)
221 {
222 /* The signalling syncpt is the first one in the list. */
223 struct iris_syncpt *syncpt =
224 ((struct iris_syncpt **) util_dynarray_begin(&batch->syncpts))[0];
225 return syncpt;
226 }
227
228
229 /**
230 * Take a reference to the batch's signalling syncpt.
231 *
232 * Callers can use this to wait for the the current batch under construction
233 * to complete (after flushing it).
234 */
235 static inline void
236 iris_batch_reference_signal_syncpt(struct iris_batch *batch,
237 struct iris_syncpt **out_syncpt)
238 {
239 struct iris_syncpt *syncpt = iris_batch_get_signal_syncpt(batch);
240 iris_syncpt_reference(batch->screen, out_syncpt, syncpt);
241 }
242
243 /**
244 * Record the size of a piece of state for use in INTEL_DEBUG=bat printing.
245 */
246 static inline void
247 iris_record_state_size(struct hash_table_u64 *ht,
248 uint32_t offset_from_base,
249 uint32_t size)
250 {
251 if (ht) {
252 _mesa_hash_table_u64_insert(ht, offset_from_base,
253 (void *)(uintptr_t) size);
254 }
255 }
256
257 #endif