iris/gen9: Optimize slice and subslice load balancing behavior.
[mesa.git] / src / gallium / drivers / iris / iris_blorp.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_blorp.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * GenX specific code for working with BLORP (blitting, resolves, clears
31 * on the 3D engine). This provides the driver-specific hooks needed to
32 * implement the BLORP API.
33 *
34 * See iris_blit.c, iris_clear.c, and so on.
35 */
36
37 #include <assert.h>
38
39 #include "iris_batch.h"
40 #include "iris_resource.h"
41 #include "iris_context.h"
42
43 #include "util/u_upload_mgr.h"
44 #include "intel/common/gen_l3_config.h"
45
46 #define BLORP_USE_SOFTPIN
47 #include "blorp/blorp_genX_exec.h"
48
49 #if GEN_GEN == 8
50 #define MOCS_WB 0x78
51 #else
52 #define MOCS_WB (2 << 1)
53 #endif
54
55 static uint32_t *
56 stream_state(struct iris_batch *batch,
57 struct u_upload_mgr *uploader,
58 unsigned size,
59 unsigned alignment,
60 uint32_t *out_offset,
61 struct iris_bo **out_bo)
62 {
63 struct pipe_resource *res = NULL;
64 void *ptr = NULL;
65
66 u_upload_alloc(uploader, 0, size, alignment, out_offset, &res, &ptr);
67
68 struct iris_bo *bo = iris_resource_bo(res);
69 iris_use_pinned_bo(batch, bo, false);
70
71 iris_record_state_size(batch->state_sizes,
72 bo->gtt_offset + *out_offset, size);
73
74 /* If the caller has asked for a BO, we leave them the responsibility of
75 * adding bo->gtt_offset (say, by handing an address to genxml). If not,
76 * we assume they want the offset from a base address.
77 */
78 if (out_bo)
79 *out_bo = bo;
80 else
81 *out_offset += iris_bo_offset_from_base_address(bo);
82
83 pipe_resource_reference(&res, NULL);
84
85 return ptr;
86 }
87
88 static void *
89 blorp_emit_dwords(struct blorp_batch *blorp_batch, unsigned n)
90 {
91 struct iris_batch *batch = blorp_batch->driver_batch;
92 return iris_get_command_space(batch, n * sizeof(uint32_t));
93 }
94
95 static uint64_t
96 combine_and_pin_address(struct blorp_batch *blorp_batch,
97 struct blorp_address addr)
98 {
99 struct iris_batch *batch = blorp_batch->driver_batch;
100 struct iris_bo *bo = addr.buffer;
101
102 iris_use_pinned_bo(batch, bo, addr.reloc_flags & RELOC_WRITE);
103
104 /* Assume this is a general address, not relative to a base. */
105 return bo->gtt_offset + addr.offset;
106 }
107
108 static uint64_t
109 blorp_emit_reloc(struct blorp_batch *blorp_batch, UNUSED void *location,
110 struct blorp_address addr, uint32_t delta)
111 {
112 return combine_and_pin_address(blorp_batch, addr) + delta;
113 }
114
115 static void
116 blorp_surface_reloc(struct blorp_batch *blorp_batch, uint32_t ss_offset,
117 struct blorp_address addr, uint32_t delta)
118 {
119 /* Let blorp_get_surface_address do the pinning. */
120 }
121
122 static uint64_t
123 blorp_get_surface_address(struct blorp_batch *blorp_batch,
124 struct blorp_address addr)
125 {
126 return combine_and_pin_address(blorp_batch, addr);
127 }
128
129 UNUSED static struct blorp_address
130 blorp_get_surface_base_address(UNUSED struct blorp_batch *blorp_batch)
131 {
132 return (struct blorp_address) { .offset = IRIS_MEMZONE_BINDER_START };
133 }
134
135 static void *
136 blorp_alloc_dynamic_state(struct blorp_batch *blorp_batch,
137 uint32_t size,
138 uint32_t alignment,
139 uint32_t *offset)
140 {
141 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
142 struct iris_batch *batch = blorp_batch->driver_batch;
143
144 return stream_state(batch, ice->state.dynamic_uploader,
145 size, alignment, offset, NULL);
146 }
147
148 static void
149 blorp_alloc_binding_table(struct blorp_batch *blorp_batch,
150 unsigned num_entries,
151 unsigned state_size,
152 unsigned state_alignment,
153 uint32_t *bt_offset,
154 uint32_t *surface_offsets,
155 void **surface_maps)
156 {
157 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
158 struct iris_binder *binder = &ice->state.binder;
159 struct iris_batch *batch = blorp_batch->driver_batch;
160
161 *bt_offset = iris_binder_reserve(ice, num_entries * sizeof(uint32_t));
162 uint32_t *bt_map = binder->map + *bt_offset;
163
164 for (unsigned i = 0; i < num_entries; i++) {
165 surface_maps[i] = stream_state(batch, ice->state.surface_uploader,
166 state_size, state_alignment,
167 &surface_offsets[i], NULL);
168 bt_map[i] = surface_offsets[i] - (uint32_t) binder->bo->gtt_offset;
169 }
170
171 iris_use_pinned_bo(batch, binder->bo, false);
172
173 ice->vtbl.update_surface_base_address(batch, binder);
174 }
175
176 static void *
177 blorp_alloc_vertex_buffer(struct blorp_batch *blorp_batch,
178 uint32_t size,
179 struct blorp_address *addr)
180 {
181 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
182 struct iris_batch *batch = blorp_batch->driver_batch;
183 struct iris_bo *bo;
184 uint32_t offset;
185
186 void *map = stream_state(batch, ice->ctx.stream_uploader, size, 64,
187 &offset, &bo);
188
189 *addr = (struct blorp_address) {
190 .buffer = bo,
191 .offset = offset,
192 .mocs = MOCS_WB,
193 };
194
195 return map;
196 }
197
198 /**
199 * See iris_upload_render_state's IRIS_DIRTY_VERTEX_BUFFERS handling for
200 * a comment about why these VF invalidations are needed.
201 */
202 static void
203 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *blorp_batch,
204 const struct blorp_address *addrs,
205 unsigned num_vbs)
206 {
207 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
208 struct iris_batch *batch = blorp_batch->driver_batch;
209 bool need_invalidate = false;
210
211 for (unsigned i = 0; i < num_vbs; i++) {
212 struct iris_bo *bo = addrs[i].buffer;
213 uint16_t high_bits = bo->gtt_offset >> 32u;
214
215 if (high_bits != ice->state.last_vbo_high_bits[i]) {
216 need_invalidate = true;
217 ice->state.last_vbo_high_bits[i] = high_bits;
218 }
219 }
220
221 if (need_invalidate) {
222 iris_emit_pipe_control_flush(batch,
223 "workaround: VF cache 32-bit key [blorp]",
224 PIPE_CONTROL_VF_CACHE_INVALIDATE |
225 PIPE_CONTROL_CS_STALL);
226 }
227 }
228
229 static struct blorp_address
230 blorp_get_workaround_page(struct blorp_batch *blorp_batch)
231 {
232 struct iris_batch *batch = blorp_batch->driver_batch;
233
234 return (struct blorp_address) { .buffer = batch->screen->workaround_bo };
235 }
236
237 static void
238 blorp_flush_range(UNUSED struct blorp_batch *blorp_batch,
239 UNUSED void *start,
240 UNUSED size_t size)
241 {
242 /* All allocated states come from the batch which we will flush before we
243 * submit it. There's nothing for us to do here.
244 */
245 }
246
247 static void
248 blorp_emit_urb_config(struct blorp_batch *blorp_batch,
249 unsigned vs_entry_size,
250 UNUSED unsigned sf_entry_size)
251 {
252 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
253 struct iris_batch *batch = blorp_batch->driver_batch;
254
255 unsigned size[4] = { vs_entry_size, 1, 1, 1 };
256
257 /* If last VS URB size is good enough for what the BLORP operation needed,
258 * then we can skip reconfiguration
259 */
260 if (ice->shaders.last_vs_entry_size >= vs_entry_size)
261 return;
262
263 genX(emit_urb_setup)(ice, batch, size, false, false);
264 ice->state.dirty |= IRIS_DIRTY_URB;
265 }
266
267 static void
268 iris_blorp_exec(struct blorp_batch *blorp_batch,
269 const struct blorp_params *params)
270 {
271 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
272 struct iris_batch *batch = blorp_batch->driver_batch;
273
274 #if GEN_GEN >= 11
275 /* The PIPE_CONTROL command description says:
276 *
277 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
278 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
279 * Target Cache Flush by enabling this bit. When render target flush
280 * is set due to new association of BTI, PS Scoreboard Stall bit must
281 * be set in this packet."
282 */
283 iris_emit_pipe_control_flush(batch,
284 "workaround: RT BTI change [blorp]",
285 PIPE_CONTROL_RENDER_TARGET_FLUSH |
286 PIPE_CONTROL_STALL_AT_SCOREBOARD);
287 #endif
288
289 /* Flush the sampler and render caches. We definitely need to flush the
290 * sampler cache so that we get updated contents from the render cache for
291 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
292 * docs to flush the cache between reinterpretations of the same surface
293 * data with different formats, which blorp does for stencil and depth
294 * data.
295 */
296 if (params->src.enabled)
297 iris_cache_flush_for_read(batch, params->src.addr.buffer);
298 if (params->dst.enabled) {
299 iris_cache_flush_for_render(batch, params->dst.addr.buffer,
300 params->dst.view.format,
301 params->dst.aux_usage);
302 }
303 if (params->depth.enabled)
304 iris_cache_flush_for_depth(batch, params->depth.addr.buffer);
305 if (params->stencil.enabled)
306 iris_cache_flush_for_depth(batch, params->stencil.addr.buffer);
307
308 iris_require_command_space(batch, 1400);
309
310 const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
311 if (ice->state.current_hash_scale != scale) {
312 genX(emit_hashing_mode)(ice, batch, params->x1 - params->x0,
313 params->y1 - params->y0, scale);
314 }
315
316 blorp_exec(blorp_batch, params);
317
318 /* We've smashed all state compared to what the normal 3D pipeline
319 * rendering tracks for GL.
320 */
321
322 uint64_t skip_bits = (IRIS_DIRTY_POLYGON_STIPPLE |
323 IRIS_DIRTY_SO_BUFFERS |
324 IRIS_DIRTY_SO_DECL_LIST |
325 IRIS_DIRTY_LINE_STIPPLE |
326 IRIS_ALL_DIRTY_FOR_COMPUTE |
327 IRIS_DIRTY_SCISSOR_RECT |
328 IRIS_DIRTY_UNCOMPILED_VS |
329 IRIS_DIRTY_UNCOMPILED_TCS |
330 IRIS_DIRTY_UNCOMPILED_TES |
331 IRIS_DIRTY_UNCOMPILED_GS |
332 IRIS_DIRTY_UNCOMPILED_FS |
333 IRIS_DIRTY_VF |
334 IRIS_DIRTY_URB |
335 IRIS_DIRTY_SF_CL_VIEWPORT |
336 IRIS_DIRTY_SAMPLER_STATES_VS |
337 IRIS_DIRTY_SAMPLER_STATES_TCS |
338 IRIS_DIRTY_SAMPLER_STATES_TES |
339 IRIS_DIRTY_SAMPLER_STATES_GS);
340
341 /* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if
342 * BLORP_BATCH_NO_EMIT_DEPTH_STENCIL is set.
343 */
344 if (blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL)
345 skip_bits |= IRIS_DIRTY_DEPTH_BUFFER;
346
347 if (!params->wm_prog_data)
348 skip_bits |= IRIS_DIRTY_BLEND_STATE | IRIS_DIRTY_PS_BLEND;
349
350 ice->state.dirty |= ~skip_bits;
351
352 if (params->dst.enabled) {
353 iris_render_cache_add_bo(batch, params->dst.addr.buffer,
354 params->dst.view.format,
355 params->dst.aux_usage);
356 }
357 if (params->depth.enabled)
358 iris_depth_cache_add_bo(batch, params->depth.addr.buffer);
359 if (params->stencil.enabled)
360 iris_depth_cache_add_bo(batch, params->stencil.addr.buffer);
361 }
362
363 void
364 genX(init_blorp)(struct iris_context *ice)
365 {
366 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
367
368 blorp_init(&ice->blorp, ice, &screen->isl_dev);
369 ice->blorp.compiler = screen->compiler;
370 ice->blorp.lookup_shader = iris_blorp_lookup_shader;
371 ice->blorp.upload_shader = iris_blorp_upload_shader;
372 ice->blorp.exec = iris_blorp_exec;
373 }