v3d: Add support for handling OOM signals from the simulator.
[mesa.git] / src / gallium / drivers / iris / iris_draw.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_draw.c
25 *
26 * The main driver hooks for drawing and launching compute shaders.
27 */
28
29 #include <stdio.h>
30 #include <errno.h>
31 #include "pipe/p_defines.h"
32 #include "pipe/p_state.h"
33 #include "pipe/p_context.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_inlines.h"
36 #include "util/u_transfer.h"
37 #include "util/u_upload_mgr.h"
38 #include "intel/compiler/brw_compiler.h"
39 #include "iris_context.h"
40 #include "iris_defines.h"
41
42 /**
43 * Record the current primitive mode and restart information, flagging
44 * related packets as dirty if necessary.
45 */
46 static void
47 iris_update_draw_info(struct iris_context *ice,
48 const struct pipe_draw_info *info)
49 {
50 if (ice->state.prim_mode != info->mode) {
51 ice->state.prim_mode = info->mode;
52 ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
53 }
54
55 if (info->mode == PIPE_PRIM_PATCHES &&
56 ice->state.vertices_per_patch != info->vertices_per_patch) {
57 ice->state.vertices_per_patch = info->vertices_per_patch;
58 ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
59
60 /* Flag constants dirty for gl_PatchVerticesIn if needed. */
61 const struct shader_info *tcs_info =
62 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
63 if (tcs_info &&
64 tcs_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
65 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
66 ice->state.shaders[MESA_SHADER_TESS_CTRL].cbuf0_needs_upload = true;
67 }
68 }
69
70 if (ice->state.primitive_restart != info->primitive_restart ||
71 ice->state.cut_index != info->restart_index) {
72 ice->state.dirty |= IRIS_DIRTY_VF;
73 ice->state.primitive_restart = info->primitive_restart;
74 ice->state.cut_index = info->restart_index;
75 }
76
77 if (info->indirect) {
78 pipe_resource_reference(&ice->draw.draw_params_res,
79 info->indirect->buffer);
80 ice->draw.draw_params_offset = info->indirect->offset +
81 (info->index_size ? 12 : 8);
82 ice->draw.params.firstvertex = 0;
83 ice->draw.params.baseinstance = 0;
84 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
85 IRIS_DIRTY_VERTEX_ELEMENTS |
86 IRIS_DIRTY_VF_SGVS;
87 } else if (ice->draw.is_indirect ||
88 ice->draw.params.firstvertex !=
89 (info->index_size ? info->index_bias : info->start) ||
90 (ice->draw.params.baseinstance != info->start_instance)) {
91 pipe_resource_reference(&ice->draw.draw_params_res, NULL);
92 ice->draw.draw_params_offset = 0;
93 ice->draw.params.firstvertex =
94 info->index_size ? info->index_bias : info->start;
95 ice->draw.params.baseinstance = info->start_instance;
96 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
97 IRIS_DIRTY_VERTEX_ELEMENTS |
98 IRIS_DIRTY_VF_SGVS;
99 }
100 ice->draw.is_indirect = info->indirect;
101
102 if (ice->draw.derived_params.drawid != info->drawid ||
103 ice->draw.derived_params.is_indexed_draw != (info->index_size ? ~0 : 0)) {
104 ice->draw.derived_params.drawid = info->drawid;
105 ice->draw.derived_params.is_indexed_draw = info->index_size ? ~0 : 0;
106 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
107 IRIS_DIRTY_VERTEX_ELEMENTS |
108 IRIS_DIRTY_VF_SGVS;
109 }
110 }
111
112 /**
113 * The pipe->draw_vbo() driver hook. Performs a draw on the GPU.
114 */
115 void
116 iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
117 {
118 struct iris_context *ice = (struct iris_context *) ctx;
119 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
120
121 if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
122 return;
123
124 /* We can't safely re-emit 3DSTATE_SO_BUFFERS because it may zero the
125 * write offsets, changing the behavior.
126 */
127 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
128 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER & ~IRIS_DIRTY_SO_BUFFERS;
129
130 iris_batch_maybe_flush(batch, 1500);
131
132 iris_update_draw_info(ice, info);
133
134 iris_update_compiled_shaders(ice);
135
136 if (ice->state.dirty & IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES) {
137 bool draw_aux_buffer_disabled[BRW_MAX_DRAW_BUFFERS] = { };
138 for (gl_shader_stage stage = 0; stage < MESA_SHADER_COMPUTE; stage++) {
139 if (ice->shaders.prog[stage])
140 iris_predraw_resolve_inputs(ice, batch, draw_aux_buffer_disabled,
141 stage, true);
142 }
143 iris_predraw_resolve_framebuffer(ice, batch, draw_aux_buffer_disabled);
144 }
145
146 iris_binder_reserve_3d(ice);
147
148 ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
149 ice->vtbl.upload_render_state(ice, batch, info);
150
151 iris_postdraw_update_resolve_tracking(ice, batch);
152
153 ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
154 }
155
156 static void
157 iris_update_grid_size_resource(struct iris_context *ice,
158 const struct pipe_grid_info *grid)
159 {
160 const struct iris_screen *screen = (void *) ice->ctx.screen;
161 const struct isl_device *isl_dev = &screen->isl_dev;
162 struct iris_state_ref *grid_ref = &ice->state.grid_size;
163 struct iris_state_ref *state_ref = &ice->state.grid_surf_state;
164
165 // XXX: if the shader doesn't actually care about the grid info,
166 // don't bother uploading the surface?
167
168 if (grid->indirect) {
169 pipe_resource_reference(&grid_ref->res, grid->indirect);
170 grid_ref->offset = grid->indirect_offset;
171
172 /* Zero out the grid size so that the next non-indirect grid launch will
173 * re-upload it properly.
174 */
175 memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
176 } else {
177 /* If the size is the same, we don't need to upload anything. */
178 if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) == 0)
179 return;
180
181 memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid));
182
183 u_upload_data(ice->state.dynamic_uploader, 0, sizeof(grid->grid), 4,
184 grid->grid, &grid_ref->offset, &grid_ref->res);
185 }
186
187 void *surf_map = NULL;
188 u_upload_alloc(ice->state.surface_uploader, 0, isl_dev->ss.size,
189 isl_dev->ss.align, &state_ref->offset, &state_ref->res,
190 &surf_map);
191 state_ref->offset +=
192 iris_bo_offset_from_base_address(iris_resource_bo(state_ref->res));
193 isl_buffer_fill_state(&screen->isl_dev, surf_map,
194 .address = grid_ref->offset +
195 iris_resource_bo(grid_ref->res)->gtt_offset,
196 .size_B = sizeof(grid->grid),
197 .format = ISL_FORMAT_RAW,
198 .stride_B = 1,
199 .mocs = 4); // XXX: MOCS
200
201 ice->state.dirty |= IRIS_DIRTY_BINDINGS_CS;
202 }
203
204 void
205 iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
206 {
207 struct iris_context *ice = (struct iris_context *) ctx;
208 struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE];
209
210 if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
211 return;
212
213 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
214 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
215
216 /* We can't do resolves on the compute engine, so awkwardly, we have to
217 * do them on the render batch...
218 */
219 if (ice->state.dirty & IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES) {
220 iris_predraw_resolve_inputs(ice, &ice->batches[IRIS_BATCH_RENDER], NULL,
221 MESA_SHADER_COMPUTE, false);
222 }
223
224 iris_batch_maybe_flush(batch, 1500);
225
226 //if (dirty & IRIS_DIRTY_UNCOMPILED_CS)
227 iris_update_compiled_compute_shader(ice);
228
229 iris_update_grid_size_resource(ice, grid);
230
231 iris_binder_reserve_compute(ice);
232 ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
233
234 if (ice->state.compute_predicate) {
235 ice->vtbl.load_register_mem64(batch, MI_PREDICATE_RESULT,
236 ice->state.compute_predicate, 0);
237 ice->state.compute_predicate = NULL;
238 }
239
240 ice->vtbl.upload_compute_state(ice, batch, grid);
241
242 ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_COMPUTE;
243
244 /* Note: since compute shaders can't access the framebuffer, there's
245 * no need to call iris_postdraw_update_resolve_tracking.
246 */
247 }