intel/blorp: Add initial support for indirect clear colors
[mesa.git] / src / mesa / drivers / dri / i965 / genX_blorp_exec.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25
26 #include "intel_batchbuffer.h"
27 #include "intel_mipmap_tree.h"
28 #include "intel_fbo.h"
29
30 #include "brw_context.h"
31 #include "brw_state.h"
32
33 #include "blorp/blorp_genX_exec.h"
34
35 #if GEN_GEN <= 5
36 #include "gen4_blorp_exec.h"
37 #endif
38
39 #include "brw_blorp.h"
40
41 static void *
42 blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
43 {
44 assert(batch->blorp->driver_ctx == batch->driver_batch);
45 struct brw_context *brw = batch->driver_batch;
46
47 intel_batchbuffer_begin(brw, n, RENDER_RING);
48 uint32_t *map = brw->batch.map_next;
49 brw->batch.map_next += n;
50 intel_batchbuffer_advance(brw);
51 return map;
52 }
53
54 static uint64_t
55 blorp_emit_reloc(struct blorp_batch *batch,
56 void *location, struct blorp_address address, uint32_t delta)
57 {
58 assert(batch->blorp->driver_ctx == batch->driver_batch);
59 struct brw_context *brw = batch->driver_batch;
60 uint32_t offset;
61
62 if (GEN_GEN < 6 && brw_ptr_in_state_buffer(&brw->batch, location)) {
63 offset = (char *)location - (char *)brw->batch.state_map;
64 return brw_state_reloc(&brw->batch, offset,
65 address.buffer, address.offset + delta,
66 address.reloc_flags);
67 }
68
69 assert(!brw_ptr_in_state_buffer(&brw->batch, location));
70
71 offset = (char *)location - (char *)brw->batch.map;
72 return brw_batch_reloc(&brw->batch, offset,
73 address.buffer, address.offset + delta,
74 address.reloc_flags);
75 }
76
77 static void
78 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
79 struct blorp_address address, uint32_t delta)
80 {
81 assert(batch->blorp->driver_ctx == batch->driver_batch);
82 struct brw_context *brw = batch->driver_batch;
83 struct brw_bo *bo = address.buffer;
84
85 uint64_t reloc_val =
86 brw_state_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
87 address.reloc_flags);
88
89 void *reloc_ptr = (void *)brw->batch.state_map + ss_offset;
90 #if GEN_GEN >= 8
91 *(uint64_t *)reloc_ptr = reloc_val;
92 #else
93 *(uint32_t *)reloc_ptr = reloc_val;
94 #endif
95 }
96
97 #if GEN_GEN >= 7
98 static struct blorp_address
99 blorp_get_surface_base_address(struct blorp_batch *batch)
100 {
101 assert(batch->blorp->driver_ctx == batch->driver_batch);
102 struct brw_context *brw = batch->driver_batch;
103 return (struct blorp_address) {
104 .buffer = brw->batch.state_bo,
105 .offset = 0,
106 };
107 }
108 #endif
109
110 static void *
111 blorp_alloc_dynamic_state(struct blorp_batch *batch,
112 uint32_t size,
113 uint32_t alignment,
114 uint32_t *offset)
115 {
116 assert(batch->blorp->driver_ctx == batch->driver_batch);
117 struct brw_context *brw = batch->driver_batch;
118
119 return brw_state_batch(brw, size, alignment, offset);
120 }
121
122 static void
123 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
124 unsigned state_size, unsigned state_alignment,
125 uint32_t *bt_offset, uint32_t *surface_offsets,
126 void **surface_maps)
127 {
128 assert(batch->blorp->driver_ctx == batch->driver_batch);
129 struct brw_context *brw = batch->driver_batch;
130
131 uint32_t *bt_map = brw_state_batch(brw,
132 num_entries * sizeof(uint32_t), 32,
133 bt_offset);
134
135 for (unsigned i = 0; i < num_entries; i++) {
136 surface_maps[i] = brw_state_batch(brw,
137 state_size, state_alignment,
138 &(surface_offsets)[i]);
139 bt_map[i] = surface_offsets[i];
140 }
141 }
142
143 static void *
144 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
145 struct blorp_address *addr)
146 {
147 assert(batch->blorp->driver_ctx == batch->driver_batch);
148 struct brw_context *brw = batch->driver_batch;
149
150 /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
151 *
152 * "The VF cache needs to be invalidated before binding and then using
153 * Vertex Buffers that overlap with any previously bound Vertex Buffer
154 * (at a 64B granularity) since the last invalidation. A VF cache
155 * invalidate is performed by setting the "VF Cache Invalidation Enable"
156 * bit in PIPE_CONTROL."
157 *
158 * This restriction first appears in the Skylake PRM but the internal docs
159 * also list it as being an issue on Broadwell. In order to avoid this
160 * problem, we align all vertex buffer allocations to 64 bytes.
161 */
162 uint32_t offset;
163 void *data = brw_state_batch(brw, size, 64, &offset);
164
165 *addr = (struct blorp_address) {
166 .buffer = brw->batch.state_bo,
167 .offset = offset,
168
169 #if GEN_GEN == 10
170 .mocs = CNL_MOCS_WB,
171 #elif GEN_GEN == 9
172 .mocs = SKL_MOCS_WB,
173 #elif GEN_GEN == 8
174 .mocs = BDW_MOCS_WB,
175 #elif GEN_GEN == 7
176 .mocs = GEN7_MOCS_L3,
177 #endif
178 };
179
180 return data;
181 }
182
183 #if GEN_GEN >= 8
184 static struct blorp_address
185 blorp_get_workaround_page(struct blorp_batch *batch)
186 {
187 assert(batch->blorp->driver_ctx == batch->driver_batch);
188 struct brw_context *brw = batch->driver_batch;
189
190 return (struct blorp_address) {
191 .buffer = brw->workaround_bo,
192 };
193 }
194 #endif
195
196 static void
197 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
198 {
199 /* All allocated states come from the batch which we will flush before we
200 * submit it. There's nothing for us to do here.
201 */
202 }
203
204 static void
205 blorp_emit_urb_config(struct blorp_batch *batch,
206 unsigned vs_entry_size, unsigned sf_entry_size)
207 {
208 assert(batch->blorp->driver_ctx == batch->driver_batch);
209 struct brw_context *brw = batch->driver_batch;
210
211 #if GEN_GEN >= 7
212 if (brw->urb.vsize >= vs_entry_size)
213 return;
214
215 gen7_upload_urb(brw, vs_entry_size, false, false);
216 #elif GEN_GEN == 6
217 gen6_upload_urb(brw, vs_entry_size, false, 0);
218 #else
219 /* We calculate it now and emit later. */
220 brw_calculate_urb_fence(brw, 0, vs_entry_size, sf_entry_size);
221 #endif
222 }
223
224 void
225 genX(blorp_exec)(struct blorp_batch *batch,
226 const struct blorp_params *params)
227 {
228 assert(batch->blorp->driver_ctx == batch->driver_batch);
229 struct brw_context *brw = batch->driver_batch;
230 struct gl_context *ctx = &brw->ctx;
231 bool check_aperture_failed_once = false;
232
233 /* Flush the sampler and render caches. We definitely need to flush the
234 * sampler cache so that we get updated contents from the render cache for
235 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
236 * docs to flush the cache between reinterpretations of the same surface
237 * data with different formats, which blorp does for stencil and depth
238 * data.
239 */
240 if (params->src.enabled)
241 brw_cache_flush_for_read(brw, params->src.addr.buffer);
242 if (params->dst.enabled)
243 brw_cache_flush_for_render(brw, params->dst.addr.buffer);
244 if (params->depth.enabled)
245 brw_cache_flush_for_depth(brw, params->depth.addr.buffer);
246 if (params->stencil.enabled)
247 brw_cache_flush_for_depth(brw, params->stencil.addr.buffer);
248
249 brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
250
251 retry:
252 intel_batchbuffer_require_space(brw, 1400, RENDER_RING);
253 brw_require_statebuffer_space(brw, 600);
254 intel_batchbuffer_save_state(brw);
255 brw->batch.no_wrap = true;
256
257 #if GEN_GEN == 6
258 /* Emit workaround flushes when we switch from drawing to blorping. */
259 brw_emit_post_sync_nonzero_flush(brw);
260 #endif
261
262 brw_upload_state_base_address(brw);
263
264 #if GEN_GEN >= 8
265 gen7_l3_state.emit(brw);
266 #endif
267
268 #if GEN_GEN >= 6
269 brw_emit_depth_stall_flushes(brw);
270 #endif
271
272 #if GEN_GEN == 8
273 gen8_write_pma_stall_bits(brw, 0);
274 #endif
275
276 blorp_emit(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
277 rect.ClippedDrawingRectangleXMax = MAX2(params->x1, params->x0) - 1;
278 rect.ClippedDrawingRectangleYMax = MAX2(params->y1, params->y0) - 1;
279 }
280
281 blorp_exec(batch, params);
282
283 brw->batch.no_wrap = false;
284
285 /* Check if the blorp op we just did would make our batch likely to fail to
286 * map all the BOs into the GPU at batch exec time later. If so, flush the
287 * batch and try again with nothing else in the batch.
288 */
289 if (!brw_batch_has_aperture_space(brw, 0)) {
290 if (!check_aperture_failed_once) {
291 check_aperture_failed_once = true;
292 intel_batchbuffer_reset_to_saved(brw);
293 intel_batchbuffer_flush(brw);
294 goto retry;
295 } else {
296 int ret = intel_batchbuffer_flush(brw);
297 WARN_ONCE(ret == -ENOSPC,
298 "i965: blorp emit exceeded available aperture space\n");
299 }
300 }
301
302 if (unlikely(brw->always_flush_batch))
303 intel_batchbuffer_flush(brw);
304
305 /* We've smashed all state compared to what the normal 3D pipeline
306 * rendering tracks for GL.
307 */
308 brw->ctx.NewDriverState |= BRW_NEW_BLORP;
309 brw->no_depth_or_stencil = !params->depth.enabled &&
310 !params->stencil.enabled;
311 brw->ib.index_size = -1;
312
313 if (params->dst.enabled)
314 brw_render_cache_add_bo(brw, params->dst.addr.buffer);
315 if (params->depth.enabled)
316 brw_depth_cache_add_bo(brw, params->depth.addr.buffer);
317 if (params->stencil.enabled)
318 brw_depth_cache_add_bo(brw, params->stencil.addr.buffer);
319 }