i965/miptree: Replace is_lossless_compressed with mt->aux_usage checks
[mesa.git] / src / mesa / drivers / dri / i965 / genX_blorp_exec.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25
26 #include "intel_batchbuffer.h"
27 #include "intel_mipmap_tree.h"
28 #include "intel_fbo.h"
29
30 #include "brw_context.h"
31 #include "brw_state.h"
32
33 #include "blorp/blorp_genX_exec.h"
34
35 #if GEN_GEN <= 5
36 #include "gen4_blorp_exec.h"
37 #endif
38
39 #include "brw_blorp.h"
40
41 static void *
42 blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
43 {
44 assert(batch->blorp->driver_ctx == batch->driver_batch);
45 struct brw_context *brw = batch->driver_batch;
46
47 intel_batchbuffer_begin(brw, n, RENDER_RING);
48 uint32_t *map = brw->batch.map_next;
49 brw->batch.map_next += n;
50 intel_batchbuffer_advance(brw);
51 return map;
52 }
53
54 static uint64_t
55 blorp_emit_reloc(struct blorp_batch *batch,
56 void *location, struct blorp_address address, uint32_t delta)
57 {
58 assert(batch->blorp->driver_ctx == batch->driver_batch);
59 struct brw_context *brw = batch->driver_batch;
60
61 uint32_t offset = (char *)location - (char *)brw->batch.map;
62 return brw_emit_reloc(&brw->batch, offset,
63 address.buffer, address.offset + delta,
64 address.read_domains,
65 address.write_domain);
66 }
67
68 static void
69 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
70 struct blorp_address address, uint32_t delta)
71 {
72 assert(batch->blorp->driver_ctx == batch->driver_batch);
73 struct brw_context *brw = batch->driver_batch;
74 struct brw_bo *bo = address.buffer;
75
76 brw_emit_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
77 address.read_domains, address.write_domain);
78
79 uint64_t reloc_val = bo->offset64 + address.offset + delta;
80 void *reloc_ptr = (void *)brw->batch.map + ss_offset;
81 #if GEN_GEN >= 8
82 *(uint64_t *)reloc_ptr = reloc_val;
83 #else
84 *(uint32_t *)reloc_ptr = reloc_val;
85 #endif
86 }
87
88 static void *
89 blorp_alloc_dynamic_state(struct blorp_batch *batch,
90 uint32_t size,
91 uint32_t alignment,
92 uint32_t *offset)
93 {
94 assert(batch->blorp->driver_ctx == batch->driver_batch);
95 struct brw_context *brw = batch->driver_batch;
96
97 return brw_state_batch(brw, size, alignment, offset);
98 }
99
100 static void
101 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
102 unsigned state_size, unsigned state_alignment,
103 uint32_t *bt_offset, uint32_t *surface_offsets,
104 void **surface_maps)
105 {
106 assert(batch->blorp->driver_ctx == batch->driver_batch);
107 struct brw_context *brw = batch->driver_batch;
108
109 uint32_t *bt_map = brw_state_batch(brw,
110 num_entries * sizeof(uint32_t), 32,
111 bt_offset);
112
113 for (unsigned i = 0; i < num_entries; i++) {
114 surface_maps[i] = brw_state_batch(brw,
115 state_size, state_alignment,
116 &(surface_offsets)[i]);
117 bt_map[i] = surface_offsets[i];
118 }
119 }
120
121 static void *
122 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
123 struct blorp_address *addr)
124 {
125 assert(batch->blorp->driver_ctx == batch->driver_batch);
126 struct brw_context *brw = batch->driver_batch;
127
128 /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
129 *
130 * "The VF cache needs to be invalidated before binding and then using
131 * Vertex Buffers that overlap with any previously bound Vertex Buffer
132 * (at a 64B granularity) since the last invalidation. A VF cache
133 * invalidate is performed by setting the "VF Cache Invalidation Enable"
134 * bit in PIPE_CONTROL."
135 *
136 * This restriction first appears in the Skylake PRM but the internal docs
137 * also list it as being an issue on Broadwell. In order to avoid this
138 * problem, we align all vertex buffer allocations to 64 bytes.
139 */
140 uint32_t offset;
141 void *data = brw_state_batch(brw, size, 64, &offset);
142
143 *addr = (struct blorp_address) {
144 .buffer = brw->batch.bo,
145 .read_domains = I915_GEM_DOMAIN_VERTEX,
146 .write_domain = 0,
147 .offset = offset,
148 };
149
150 return data;
151 }
152
153 #if GEN_GEN >= 8
154 static struct blorp_address
155 blorp_get_workaround_page(struct blorp_batch *batch)
156 {
157 assert(batch->blorp->driver_ctx == batch->driver_batch);
158 struct brw_context *brw = batch->driver_batch;
159
160 return (struct blorp_address) {
161 .buffer = brw->workaround_bo,
162 };
163 }
164 #endif
165
166 static void
167 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
168 {
169 /* All allocated states come from the batch which we will flush before we
170 * submit it. There's nothing for us to do here.
171 */
172 }
173
174 static void
175 blorp_emit_urb_config(struct blorp_batch *batch,
176 unsigned vs_entry_size, unsigned sf_entry_size)
177 {
178 assert(batch->blorp->driver_ctx == batch->driver_batch);
179 struct brw_context *brw = batch->driver_batch;
180
181 #if GEN_GEN >= 7
182 if (!(brw->ctx.NewDriverState & (BRW_NEW_CONTEXT | BRW_NEW_URB_SIZE)) &&
183 brw->urb.vsize >= vs_entry_size)
184 return;
185
186 gen7_upload_urb(brw, vs_entry_size, false, false);
187 #elif GEN_GEN == 6
188 gen6_upload_urb(brw, vs_entry_size, false, 0);
189 #else
190 /* We calculate it now and emit later. */
191 brw_calculate_urb_fence(brw, 0, vs_entry_size, sf_entry_size);
192 #endif
193 }
194
195 void
196 genX(blorp_exec)(struct blorp_batch *batch,
197 const struct blorp_params *params)
198 {
199 assert(batch->blorp->driver_ctx == batch->driver_batch);
200 struct brw_context *brw = batch->driver_batch;
201 struct gl_context *ctx = &brw->ctx;
202 const uint32_t estimated_max_batch_usage = GEN_GEN >= 8 ? 1920 : 1700;
203 bool check_aperture_failed_once = false;
204
205 /* Flush the sampler and render caches. We definitely need to flush the
206 * sampler cache so that we get updated contents from the render cache for
207 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
208 * docs to flush the cache between reinterpretations of the same surface
209 * data with different formats, which blorp does for stencil and depth
210 * data.
211 */
212 if (params->src.enabled)
213 brw_render_cache_set_check_flush(brw, params->src.addr.buffer);
214 brw_render_cache_set_check_flush(brw, params->dst.addr.buffer);
215
216 brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
217
218 retry:
219 intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
220 intel_batchbuffer_save_state(brw);
221 struct brw_bo *saved_bo = brw->batch.bo;
222 uint32_t saved_used = USED_BATCH(brw->batch);
223 uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
224
225 #if GEN_GEN == 6
226 /* Emit workaround flushes when we switch from drawing to blorping. */
227 brw_emit_post_sync_nonzero_flush(brw);
228 #endif
229
230 brw_upload_state_base_address(brw);
231
232 #if GEN_GEN >= 8
233 gen7_l3_state.emit(brw);
234 #endif
235
236 #if GEN_GEN >= 6
237 brw_emit_depth_stall_flushes(brw);
238 #endif
239
240 #if GEN_GEN == 8
241 gen8_write_pma_stall_bits(brw, 0);
242 #endif
243
244 blorp_emit(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
245 rect.ClippedDrawingRectangleXMax = MAX2(params->x1, params->x0) - 1;
246 rect.ClippedDrawingRectangleYMax = MAX2(params->y1, params->y0) - 1;
247 }
248
249 blorp_exec(batch, params);
250
251 /* Make sure we didn't wrap the batch unintentionally, and make sure we
252 * reserved enough space that a wrap will never happen.
253 */
254 assert(brw->batch.bo == saved_bo);
255 assert((USED_BATCH(brw->batch) - saved_used) * 4 +
256 (saved_state_batch_offset - brw->batch.state_batch_offset) <
257 estimated_max_batch_usage);
258 /* Shut up compiler warnings on release build */
259 (void)saved_bo;
260 (void)saved_used;
261 (void)saved_state_batch_offset;
262
263 /* Check if the blorp op we just did would make our batch likely to fail to
264 * map all the BOs into the GPU at batch exec time later. If so, flush the
265 * batch and try again with nothing else in the batch.
266 */
267 if (!brw_batch_has_aperture_space(brw, 0)) {
268 if (!check_aperture_failed_once) {
269 check_aperture_failed_once = true;
270 intel_batchbuffer_reset_to_saved(brw);
271 intel_batchbuffer_flush(brw);
272 goto retry;
273 } else {
274 int ret = intel_batchbuffer_flush(brw);
275 WARN_ONCE(ret == -ENOSPC,
276 "i965: blorp emit exceeded available aperture space\n");
277 }
278 }
279
280 if (unlikely(brw->always_flush_batch))
281 intel_batchbuffer_flush(brw);
282
283 /* We've smashed all state compared to what the normal 3D pipeline
284 * rendering tracks for GL.
285 */
286 brw->ctx.NewDriverState |= BRW_NEW_BLORP;
287 brw->no_depth_or_stencil = !params->depth.enabled &&
288 !params->stencil.enabled;
289 brw->ib.index_size = -1;
290
291 if (params->dst.enabled)
292 brw_render_cache_set_add_bo(brw, params->dst.addr.buffer);
293 if (params->depth.enabled)
294 brw_render_cache_set_add_bo(brw, params->depth.addr.buffer);
295 if (params->stencil.enabled)
296 brw_render_cache_set_add_bo(brw, params->stencil.addr.buffer);
297 }