5793dc15064ca24b7429508a2ca371643cf44aca
[mesa.git] / src / gallium / drivers / iris / iris_draw.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_draw.c
25 *
26 * The main driver hooks for drawing and launching compute shaders.
27 */
28
29 #include <stdio.h>
30 #include <errno.h>
31 #include "pipe/p_defines.h"
32 #include "pipe/p_state.h"
33 #include "pipe/p_context.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_inlines.h"
36 #include "util/u_transfer.h"
37 #include "util/u_upload_mgr.h"
38 #include "intel/compiler/brw_compiler.h"
39 #include "intel/compiler/brw_eu_defines.h"
40 #include "iris_context.h"
41 #include "iris_defines.h"
42
43 static bool
44 prim_is_points_or_lines(const struct pipe_draw_info *draw)
45 {
46 /* We don't need to worry about adjacency - it can only be used with
47 * geometry shaders, and we don't care about this info when GS is on.
48 */
49 return draw->mode == PIPE_PRIM_POINTS ||
50 draw->mode == PIPE_PRIM_LINES ||
51 draw->mode == PIPE_PRIM_LINE_LOOP ||
52 draw->mode == PIPE_PRIM_LINE_STRIP;
53 }
54
55 /**
56 * Record the current primitive mode and restart information, flagging
57 * related packets as dirty if necessary.
58 *
59 * This must be called before updating compiled shaders, because the patch
60 * information informs the TCS key.
61 */
62 static void
63 iris_update_draw_info(struct iris_context *ice,
64 const struct pipe_draw_info *info)
65 {
66 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
67 const struct brw_compiler *compiler = screen->compiler;
68
69 if (ice->state.prim_mode != info->mode) {
70 ice->state.prim_mode = info->mode;
71 ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
72
73
74 /* For XY Clip enables */
75 bool points_or_lines = prim_is_points_or_lines(info);
76 if (points_or_lines != ice->state.prim_is_points_or_lines) {
77 ice->state.prim_is_points_or_lines = points_or_lines;
78 ice->state.dirty |= IRIS_DIRTY_CLIP;
79 }
80 }
81
82 if (info->mode == PIPE_PRIM_PATCHES &&
83 ice->state.vertices_per_patch != info->vertices_per_patch) {
84 ice->state.vertices_per_patch = info->vertices_per_patch;
85 ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
86
87 /* 8_PATCH TCS needs this for key->input_vertices */
88 if (compiler->use_tcs_8_patch)
89 ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_TCS;
90
91 /* Flag constants dirty for gl_PatchVerticesIn if needed. */
92 const struct shader_info *tcs_info =
93 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
94 if (tcs_info &&
95 tcs_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
96 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
97 ice->state.shaders[MESA_SHADER_TESS_CTRL].sysvals_need_upload = true;
98 }
99 }
100
101 if (ice->state.primitive_restart != info->primitive_restart ||
102 ice->state.cut_index != info->restart_index) {
103 ice->state.dirty |= IRIS_DIRTY_VF;
104 ice->state.primitive_restart = info->primitive_restart;
105 ice->state.cut_index = info->restart_index;
106 }
107 }
108
109 /**
110 * Update shader draw parameters, flagging VF packets as dirty if necessary.
111 */
112 static void
113 iris_update_draw_parameters(struct iris_context *ice,
114 const struct pipe_draw_info *info)
115 {
116 if (info->indirect) {
117 pipe_resource_reference(&ice->draw.draw_params_res,
118 info->indirect->buffer);
119 ice->draw.draw_params_offset = info->indirect->offset +
120 (info->index_size ? 12 : 8);
121 ice->draw.params.firstvertex = 0;
122 ice->draw.params.baseinstance = 0;
123 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
124 IRIS_DIRTY_VERTEX_ELEMENTS |
125 IRIS_DIRTY_VF_SGVS;
126 } else if (ice->draw.is_indirect ||
127 ice->draw.params.firstvertex !=
128 (info->index_size ? info->index_bias : info->start) ||
129 (ice->draw.params.baseinstance != info->start_instance)) {
130 pipe_resource_reference(&ice->draw.draw_params_res, NULL);
131 ice->draw.draw_params_offset = 0;
132 ice->draw.params.firstvertex =
133 info->index_size ? info->index_bias : info->start;
134 ice->draw.params.baseinstance = info->start_instance;
135 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
136 IRIS_DIRTY_VERTEX_ELEMENTS |
137 IRIS_DIRTY_VF_SGVS;
138 }
139 ice->draw.is_indirect = info->indirect;
140
141 if (ice->draw.derived_params.drawid != info->drawid ||
142 ice->draw.derived_params.is_indexed_draw != (info->index_size ? ~0 : 0)) {
143 ice->draw.derived_params.drawid = info->drawid;
144 ice->draw.derived_params.is_indexed_draw = info->index_size ? ~0 : 0;
145 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
146 IRIS_DIRTY_VERTEX_ELEMENTS |
147 IRIS_DIRTY_VF_SGVS;
148 }
149 }
150
151 static void
152 iris_indirect_draw_vbo(struct iris_context *ice,
153 const struct pipe_draw_info *dinfo)
154 {
155 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
156 struct pipe_draw_info info = *dinfo;
157
158 if (info.indirect->indirect_draw_count &&
159 ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
160 /* Upload MI_PREDICATE_RESULT to GPR2.*/
161 ice->vtbl.load_register_reg64(batch, CS_GPR(2), MI_PREDICATE_RESULT);
162 }
163
164 uint64_t orig_dirty = ice->state.dirty;
165
166 for (int i = 0; i < info.indirect->draw_count; i++) {
167 info.drawid = i;
168
169 iris_batch_maybe_flush(batch, 1500);
170
171 iris_update_draw_parameters(ice, &info);
172
173 ice->vtbl.upload_render_state(ice, batch, &info);
174
175 ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
176
177 info.indirect->offset += info.indirect->stride;
178 }
179
180 if (info.indirect->indirect_draw_count &&
181 ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
182 /* Restore MI_PREDICATE_RESULT. */
183 ice->vtbl.load_register_reg64(batch, MI_PREDICATE_RESULT, CS_GPR(2));
184 }
185
186 /* Put this back for post-draw resolves, we'll clear it again after. */
187 ice->state.dirty = orig_dirty;
188 }
189
190 static void
191 iris_simple_draw_vbo(struct iris_context *ice,
192 const struct pipe_draw_info *draw)
193 {
194 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
195
196 iris_batch_maybe_flush(batch, 1500);
197
198 iris_update_draw_parameters(ice, draw);
199
200 ice->vtbl.upload_render_state(ice, batch, draw);
201 }
202
203 /**
204 * The pipe->draw_vbo() driver hook. Performs a draw on the GPU.
205 */
206 void
207 iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
208 {
209 struct iris_context *ice = (struct iris_context *) ctx;
210 struct iris_screen *screen = (struct iris_screen*)ice->ctx.screen;
211 const struct gen_device_info *devinfo = &screen->devinfo;
212 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
213
214 if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
215 return;
216
217 /* We can't safely re-emit 3DSTATE_SO_BUFFERS because it may zero the
218 * write offsets, changing the behavior.
219 */
220 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
221 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER & ~IRIS_DIRTY_SO_BUFFERS;
222
223 iris_update_draw_info(ice, info);
224
225 if (devinfo->gen == 9)
226 gen9_toggle_preemption(ice, batch, info);
227
228 iris_update_compiled_shaders(ice);
229
230 if (ice->state.dirty & IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES) {
231 bool draw_aux_buffer_disabled[BRW_MAX_DRAW_BUFFERS] = { };
232 for (gl_shader_stage stage = 0; stage < MESA_SHADER_COMPUTE; stage++) {
233 if (ice->shaders.prog[stage])
234 iris_predraw_resolve_inputs(ice, batch, draw_aux_buffer_disabled,
235 stage, true);
236 }
237 iris_predraw_resolve_framebuffer(ice, batch, draw_aux_buffer_disabled);
238 }
239
240 iris_binder_reserve_3d(ice);
241
242 ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
243
244 if (info->indirect)
245 iris_indirect_draw_vbo(ice, info);
246 else
247 iris_simple_draw_vbo(ice, info);
248
249 iris_postdraw_update_resolve_tracking(ice, batch);
250
251 ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
252 }
253
254 static void
255 iris_update_grid_size_resource(struct iris_context *ice,
256 const struct pipe_grid_info *grid)
257 {
258 const struct iris_screen *screen = (void *) ice->ctx.screen;
259 const struct isl_device *isl_dev = &screen->isl_dev;
260 struct iris_state_ref *grid_ref = &ice->state.grid_size;
261 struct iris_state_ref *state_ref = &ice->state.grid_surf_state;
262
263 const struct iris_compiled_shader *shader = ice->shaders.prog[MESA_SHADER_COMPUTE];
264 bool grid_needs_surface = shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS];
265 bool grid_updated = false;
266
267 if (grid->indirect) {
268 pipe_resource_reference(&grid_ref->res, grid->indirect);
269 grid_ref->offset = grid->indirect_offset;
270
271 /* Zero out the grid size so that the next non-indirect grid launch will
272 * re-upload it properly.
273 */
274 memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
275 grid_updated = true;
276 } else if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) != 0) {
277 memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid));
278 u_upload_data(ice->state.dynamic_uploader, 0, sizeof(grid->grid), 4,
279 grid->grid, &grid_ref->offset, &grid_ref->res);
280 grid_updated = true;
281 }
282
283 /* If we changed the grid, the old surface state is invalid. */
284 if (grid_updated)
285 pipe_resource_reference(&state_ref->res, NULL);
286
287 /* Skip surface upload if we don't need it or we already have one */
288 if (!grid_needs_surface || state_ref->res)
289 return;
290
291 struct iris_bo *grid_bo = iris_resource_bo(grid_ref->res);
292
293 void *surf_map = NULL;
294 u_upload_alloc(ice->state.surface_uploader, 0, isl_dev->ss.size,
295 isl_dev->ss.align, &state_ref->offset, &state_ref->res,
296 &surf_map);
297 state_ref->offset +=
298 iris_bo_offset_from_base_address(iris_resource_bo(state_ref->res));
299 isl_buffer_fill_state(&screen->isl_dev, surf_map,
300 .address = grid_ref->offset + grid_bo->gtt_offset,
301 .size_B = sizeof(grid->grid),
302 .format = ISL_FORMAT_RAW,
303 .stride_B = 1,
304 .mocs = ice->vtbl.mocs(grid_bo));
305
306 ice->state.dirty |= IRIS_DIRTY_BINDINGS_CS;
307 }
308
309 void
310 iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
311 {
312 struct iris_context *ice = (struct iris_context *) ctx;
313 struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE];
314
315 if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
316 return;
317
318 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
319 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
320
321 /* We can't do resolves on the compute engine, so awkwardly, we have to
322 * do them on the render batch...
323 */
324 if (ice->state.dirty & IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES) {
325 iris_predraw_resolve_inputs(ice, &ice->batches[IRIS_BATCH_RENDER], NULL,
326 MESA_SHADER_COMPUTE, false);
327 }
328
329 iris_batch_maybe_flush(batch, 1500);
330
331 if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
332 iris_update_compiled_compute_shader(ice);
333
334 iris_update_grid_size_resource(ice, grid);
335
336 iris_binder_reserve_compute(ice);
337 ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
338
339 if (ice->state.compute_predicate) {
340 ice->vtbl.load_register_mem64(batch, MI_PREDICATE_RESULT,
341 ice->state.compute_predicate, 0);
342 ice->state.compute_predicate = NULL;
343 }
344
345 ice->vtbl.upload_compute_state(ice, batch, grid);
346
347 ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_COMPUTE;
348
349 /* Note: since compute shaders can't access the framebuffer, there's
350 * no need to call iris_postdraw_update_resolve_tracking.
351 */
352 }