iris: Resolves for compute
[mesa.git] / src / gallium / drivers / iris / iris_draw.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_draw.c
25 *
26 * The main driver hooks for drawing and launching compute shaders.
27 */
28
29 #include <stdio.h>
30 #include <errno.h>
31 #include "pipe/p_defines.h"
32 #include "pipe/p_state.h"
33 #include "pipe/p_context.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_inlines.h"
36 #include "util/u_transfer.h"
37 #include "util/u_upload_mgr.h"
38 #include "intel/compiler/brw_compiler.h"
39 #include "iris_context.h"
40 #include "iris_defines.h"
41
42 /**
43 * Record the current primitive mode and restart information, flagging
44 * related packets as dirty if necessary.
45 */
46 static void
47 iris_update_draw_info(struct iris_context *ice,
48 const struct pipe_draw_info *info)
49 {
50 if (ice->state.prim_mode != info->mode) {
51 ice->state.prim_mode = info->mode;
52 ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
53 }
54
55 if (info->mode == PIPE_PRIM_PATCHES &&
56 ice->state.vertices_per_patch != info->vertices_per_patch) {
57 ice->state.vertices_per_patch = info->vertices_per_patch;
58 ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
59
60 /* Flag constants dirty for gl_PatchVerticesIn if needed. */
61 const struct shader_info *tcs_info =
62 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
63 if (tcs_info &&
64 tcs_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
65 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
66 ice->state.shaders[MESA_SHADER_TESS_CTRL].cbuf0_needs_upload = true;
67 }
68 }
69
70 if (ice->state.primitive_restart != info->primitive_restart ||
71 ice->state.cut_index != info->restart_index) {
72 ice->state.dirty |= IRIS_DIRTY_VF;
73 ice->state.primitive_restart = info->primitive_restart;
74 ice->state.cut_index = info->restart_index;
75 }
76 }
77
78 /**
79 * The pipe->draw_vbo() driver hook. Performs a draw on the GPU.
80 */
81 void
82 iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
83 {
84 struct iris_context *ice = (struct iris_context *) ctx;
85 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
86
87 if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
88 return;
89
90 /* We can't safely re-emit 3DSTATE_SO_BUFFERS because it may zero the
91 * write offsets, changing the behavior.
92 */
93 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
94 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER & ~IRIS_DIRTY_SO_BUFFERS;
95
96 iris_batch_maybe_flush(batch, 1500);
97
98 iris_update_draw_info(ice, info);
99
100 iris_update_compiled_shaders(ice);
101
102 bool draw_aux_buffer_disabled[BRW_MAX_DRAW_BUFFERS] = { };
103 for (gl_shader_stage stage = 0; stage < MESA_SHADER_COMPUTE; stage++) {
104 if (ice->shaders.prog[stage])
105 iris_predraw_resolve_inputs(ice,batch, &ice->state.shaders[stage],
106 draw_aux_buffer_disabled, true);
107 }
108 iris_predraw_resolve_framebuffer(ice, batch, draw_aux_buffer_disabled);
109
110 iris_binder_reserve_3d(ice);
111
112 ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
113 ice->vtbl.upload_render_state(ice, batch, info);
114
115 ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
116
117 iris_postdraw_update_resolve_tracking(ice, batch);
118 }
119
120 static void
121 iris_update_grid_size_resource(struct iris_context *ice,
122 const struct pipe_grid_info *grid)
123 {
124 const struct iris_screen *screen = (void *) ice->ctx.screen;
125 const struct isl_device *isl_dev = &screen->isl_dev;
126 struct iris_state_ref *grid_ref = &ice->state.grid_size;
127 struct iris_state_ref *state_ref = &ice->state.grid_surf_state;
128
129 // XXX: if the shader doesn't actually care about the grid info,
130 // don't bother uploading the surface?
131
132 if (grid->indirect) {
133 pipe_resource_reference(&grid_ref->res, grid->indirect);
134 grid_ref->offset = grid->indirect_offset;
135
136 /* Zero out the grid size so that the next non-indirect grid launch will
137 * re-upload it properly.
138 */
139 memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
140 } else {
141 /* If the size is the same, we don't need to upload anything. */
142 if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) == 0)
143 return;
144
145 memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid));
146
147 u_upload_data(ice->state.dynamic_uploader, 0, sizeof(grid->grid), 4,
148 grid->grid, &grid_ref->offset, &grid_ref->res);
149 }
150
151 void *surf_map = NULL;
152 u_upload_alloc(ice->state.surface_uploader, 0, isl_dev->ss.size,
153 isl_dev->ss.align, &state_ref->offset, &state_ref->res,
154 &surf_map);
155 state_ref->offset +=
156 iris_bo_offset_from_base_address(iris_resource_bo(state_ref->res));
157 isl_buffer_fill_state(&screen->isl_dev, surf_map,
158 .address = grid_ref->offset +
159 iris_resource_bo(grid_ref->res)->gtt_offset,
160 .size_B = sizeof(grid->grid),
161 .format = ISL_FORMAT_RAW,
162 .stride_B = 1,
163 .mocs = 4); // XXX: MOCS
164
165 ice->state.dirty |= IRIS_DIRTY_BINDINGS_CS;
166 }
167
168 void
169 iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
170 {
171 struct iris_context *ice = (struct iris_context *) ctx;
172 struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE];
173
174 if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
175 return;
176
177 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
178 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
179
180 /* We can't do resolves on the compute engine, so awkwardly, we have to
181 * do them on the render batch...
182 */
183 for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
184 iris_predraw_resolve_inputs(ice, &ice->batches[IRIS_BATCH_RENDER],
185 &ice->state.shaders[stage], NULL, false);
186 }
187
188 iris_batch_maybe_flush(batch, 1500);
189
190 //if (dirty & IRIS_DIRTY_UNCOMPILED_CS)
191 iris_update_compiled_compute_shader(ice);
192
193 iris_update_grid_size_resource(ice, grid);
194
195 iris_binder_reserve_compute(ice);
196 ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
197
198 if (ice->state.compute_predicate) {
199 ice->vtbl.load_register_mem64(batch, MI_PREDICATE_DATA,
200 ice->state.compute_predicate, 0);
201 ice->state.compute_predicate = NULL;
202 }
203
204 ice->vtbl.upload_compute_state(ice, batch, grid);
205
206 ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_COMPUTE;
207
208 /* Note: since compute shaders can't access the framebuffer, there's
209 * no need to call iris_postdraw_update_resolve_tracking.
210 */
211 }