i965: Plumb compiler debug logging through a function pointer in brw_compiler
[mesa.git] / src / mesa / drivers / dri / i965 / brw_cs.cpp
1 /*
2 * Copyright (c) 2014 - 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24
25 #include "util/ralloc.h"
26 #include "brw_context.h"
27 #include "brw_cs.h"
28 #include "brw_fs.h"
29 #include "brw_eu.h"
30 #include "brw_wm.h"
31 #include "intel_mipmap_tree.h"
32 #include "brw_state.h"
33 #include "intel_batchbuffer.h"
34
35 extern "C"
36 bool
37 brw_cs_prog_data_compare(const void *in_a, const void *in_b)
38 {
39 const struct brw_cs_prog_data *a =
40 (const struct brw_cs_prog_data *)in_a;
41 const struct brw_cs_prog_data *b =
42 (const struct brw_cs_prog_data *)in_b;
43
44 /* Compare the base structure. */
45 if (!brw_stage_prog_data_compare(&a->base, &b->base))
46 return false;
47
48 /* Compare the rest of the structure. */
49 const unsigned offset = sizeof(struct brw_stage_prog_data);
50 if (memcmp(((char *) a) + offset, ((char *) b) + offset,
51 sizeof(struct brw_cs_prog_data) - offset))
52 return false;
53
54 return true;
55 }
56
57
58 static const unsigned *
59 brw_cs_emit(struct brw_context *brw,
60 void *mem_ctx,
61 const struct brw_cs_prog_key *key,
62 struct brw_cs_prog_data *prog_data,
63 struct gl_compute_program *cp,
64 struct gl_shader_program *prog,
65 unsigned *final_assembly_size)
66 {
67 bool start_busy = false;
68 double start_time = 0;
69
70 if (unlikely(brw->perf_debug)) {
71 start_busy = (brw->batch.last_bo &&
72 drm_intel_bo_busy(brw->batch.last_bo));
73 start_time = get_time();
74 }
75
76 struct brw_shader *shader =
77 (struct brw_shader *) prog->_LinkedShaders[MESA_SHADER_COMPUTE];
78
79 if (unlikely(INTEL_DEBUG & DEBUG_CS))
80 brw_dump_ir("compute", prog, &shader->base, &cp->Base);
81
82 prog_data->local_size[0] = cp->LocalSize[0];
83 prog_data->local_size[1] = cp->LocalSize[1];
84 prog_data->local_size[2] = cp->LocalSize[2];
85 int local_workgroup_size =
86 cp->LocalSize[0] * cp->LocalSize[1] * cp->LocalSize[2];
87
88 cfg_t *cfg = NULL;
89 const char *fail_msg = NULL;
90
91 /* Now the main event: Visit the shader IR and generate our CS IR for it.
92 */
93 fs_visitor v8(brw, mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
94 &cp->Base, 8);
95 if (!v8.run_cs()) {
96 fail_msg = v8.fail_msg;
97 } else if (local_workgroup_size <= 8 * brw->max_cs_threads) {
98 cfg = v8.cfg;
99 prog_data->simd_size = 8;
100 }
101
102 fs_visitor v16(brw, mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
103 &cp->Base, 16);
104 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
105 !fail_msg && !v8.simd16_unsupported &&
106 local_workgroup_size <= 16 * brw->max_cs_threads) {
107 /* Try a SIMD16 compile */
108 v16.import_uniforms(&v8);
109 if (!v16.run_cs()) {
110 perf_debug("SIMD16 shader failed to compile: %s", v16.fail_msg);
111 if (!cfg) {
112 fail_msg =
113 "Couldn't generate SIMD16 program and not "
114 "enough threads for SIMD8";
115 }
116 } else {
117 cfg = v16.cfg;
118 prog_data->simd_size = 16;
119 }
120 }
121
122 if (unlikely(cfg == NULL)) {
123 assert(fail_msg);
124 prog->LinkStatus = false;
125 ralloc_strcat(&prog->InfoLog, fail_msg);
126 _mesa_problem(NULL, "Failed to compile compute shader: %s\n",
127 fail_msg);
128 return NULL;
129 }
130
131 fs_generator g(brw->intelScreen->compiler,
132 mem_ctx, (void*) key, &prog_data->base, &cp->Base,
133 v8.promoted_constants, v8.runtime_check_aads_emit, "CS");
134 if (INTEL_DEBUG & DEBUG_CS) {
135 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %d",
136 prog->Label ? prog->Label : "unnamed",
137 prog->Name);
138 g.enable_debug(name);
139 }
140
141 g.generate_code(cfg, prog_data->simd_size);
142
143 if (unlikely(brw->perf_debug) && shader) {
144 if (shader->compiled_once) {
145 _mesa_problem(&brw->ctx, "CS programs shouldn't need recompiles");
146 }
147 shader->compiled_once = true;
148
149 if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
150 perf_debug("CS compile took %.03f ms and stalled the GPU\n",
151 (get_time() - start_time) * 1000);
152 }
153 }
154
155 return g.get_assembly(final_assembly_size);
156 }
157
158 static bool
159 brw_codegen_cs_prog(struct brw_context *brw,
160 struct gl_shader_program *prog,
161 struct brw_compute_program *cp,
162 struct brw_cs_prog_key *key)
163 {
164 struct gl_context *ctx = &brw->ctx;
165 const GLuint *program;
166 void *mem_ctx = ralloc_context(NULL);
167 GLuint program_size;
168 struct brw_cs_prog_data prog_data;
169
170 struct gl_shader *cs = prog->_LinkedShaders[MESA_SHADER_COMPUTE];
171 assert (cs);
172
173 memset(&prog_data, 0, sizeof(prog_data));
174
175 /* Allocate the references to the uniforms that will end up in the
176 * prog_data associated with the compiled program, and which will be freed
177 * by the state cache.
178 */
179 int param_count = cs->num_uniform_components;
180
181 /* The backend also sometimes adds params for texture size. */
182 param_count += 2 * ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
183 prog_data.base.param =
184 rzalloc_array(NULL, const gl_constant_value *, param_count);
185 prog_data.base.pull_param =
186 rzalloc_array(NULL, const gl_constant_value *, param_count);
187 prog_data.base.nr_params = param_count;
188
189 program = brw_cs_emit(brw, mem_ctx, key, &prog_data,
190 &cp->program, prog, &program_size);
191 if (program == NULL) {
192 ralloc_free(mem_ctx);
193 return false;
194 }
195
196 if (prog_data.base.total_scratch) {
197 brw_get_scratch_bo(brw, &brw->cs.base.scratch_bo,
198 prog_data.base.total_scratch * brw->max_cs_threads);
199 }
200
201 if (unlikely(INTEL_DEBUG & DEBUG_CS))
202 fprintf(stderr, "\n");
203
204 brw_upload_cache(&brw->cache, BRW_CACHE_CS_PROG,
205 key, sizeof(*key),
206 program, program_size,
207 &prog_data, sizeof(prog_data),
208 &brw->cs.base.prog_offset, &brw->cs.prog_data);
209 ralloc_free(mem_ctx);
210
211 return true;
212 }
213
214
215 static void
216 brw_cs_populate_key(struct brw_context *brw, struct brw_cs_prog_key *key)
217 {
218 /* BRW_NEW_COMPUTE_PROGRAM */
219 const struct brw_compute_program *cp =
220 (struct brw_compute_program *) brw->compute_program;
221
222 memset(key, 0, sizeof(*key));
223
224 /* The unique compute program ID */
225 key->program_string_id = cp->id;
226 }
227
228
229 extern "C"
230 void
231 brw_upload_cs_prog(struct brw_context *brw)
232 {
233 struct gl_context *ctx = &brw->ctx;
234 struct brw_cs_prog_key key;
235 struct brw_compute_program *cp = (struct brw_compute_program *)
236 brw->compute_program;
237
238 if (!cp)
239 return;
240
241 if (!brw_state_dirty(brw, 0, BRW_NEW_COMPUTE_PROGRAM))
242 return;
243
244 brw_cs_populate_key(brw, &key);
245
246 if (!brw_search_cache(&brw->cache, BRW_CACHE_CS_PROG,
247 &key, sizeof(key),
248 &brw->cs.base.prog_offset, &brw->cs.prog_data)) {
249 bool success =
250 brw_codegen_cs_prog(brw,
251 ctx->Shader.CurrentProgram[MESA_SHADER_COMPUTE],
252 cp, &key);
253 (void) success;
254 assert(success);
255 }
256 brw->cs.base.prog_data = &brw->cs.prog_data->base;
257 }
258
259
260 extern "C" bool
261 brw_cs_precompile(struct gl_context *ctx,
262 struct gl_shader_program *shader_prog,
263 struct gl_program *prog)
264 {
265 struct brw_context *brw = brw_context(ctx);
266 struct brw_cs_prog_key key;
267
268 struct gl_compute_program *cp = (struct gl_compute_program *) prog;
269 struct brw_compute_program *bcp = brw_compute_program(cp);
270
271 memset(&key, 0, sizeof(key));
272 key.program_string_id = bcp->id;
273
274 brw_setup_tex_for_precompile(brw, &key.tex, prog);
275
276 uint32_t old_prog_offset = brw->cs.base.prog_offset;
277 struct brw_cs_prog_data *old_prog_data = brw->cs.prog_data;
278
279 bool success = brw_codegen_cs_prog(brw, shader_prog, bcp, &key);
280
281 brw->cs.base.prog_offset = old_prog_offset;
282 brw->cs.prog_data = old_prog_data;
283
284 return success;
285 }
286
287
288 static void
289 brw_upload_cs_state(struct brw_context *brw)
290 {
291 if (!brw->cs.prog_data)
292 return;
293
294 uint32_t offset;
295 uint32_t *desc = (uint32_t*) brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
296 8 * 4, 64, &offset);
297 struct brw_stage_state *stage_state = &brw->cs.base;
298 struct brw_cs_prog_data *cs_prog_data = brw->cs.prog_data;
299 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
300
301 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
302 brw->vtbl.emit_buffer_surface_state(
303 brw, &stage_state->surf_offset[
304 prog_data->binding_table.shader_time_start],
305 brw->shader_time.bo, 0, BRW_SURFACEFORMAT_RAW,
306 brw->shader_time.bo->size, 1, true);
307 }
308
309 uint32_t *bind = (uint32_t*) brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
310 prog_data->binding_table.size_bytes,
311 32, &stage_state->bind_bo_offset);
312
313 uint32_t dwords = brw->gen < 8 ? 8 : 9;
314 BEGIN_BATCH(dwords);
315 OUT_BATCH(MEDIA_VFE_STATE << 16 | (dwords - 2));
316
317 if (prog_data->total_scratch) {
318 if (brw->gen >= 8)
319 OUT_RELOC64(stage_state->scratch_bo,
320 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
321 ffs(prog_data->total_scratch) - 11);
322 else
323 OUT_RELOC(stage_state->scratch_bo,
324 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
325 ffs(prog_data->total_scratch) - 11);
326 } else {
327 OUT_BATCH(0);
328 if (brw->gen >= 8)
329 OUT_BATCH(0);
330 }
331
332 const uint32_t vfe_num_urb_entries = brw->gen >= 8 ? 2 : 0;
333 const uint32_t vfe_gpgpu_mode =
334 brw->gen == 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE) : 0;
335 OUT_BATCH(SET_FIELD(brw->max_cs_threads - 1, MEDIA_VFE_STATE_MAX_THREADS) |
336 SET_FIELD(vfe_num_urb_entries, MEDIA_VFE_STATE_URB_ENTRIES) |
337 SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER) |
338 SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW) |
339 vfe_gpgpu_mode);
340
341 OUT_BATCH(0);
342 const uint32_t vfe_urb_allocation = brw->gen >= 8 ? 2 : 0;
343 OUT_BATCH(SET_FIELD(vfe_urb_allocation, MEDIA_VFE_STATE_URB_ALLOC));
344 OUT_BATCH(0);
345 OUT_BATCH(0);
346 OUT_BATCH(0);
347 ADVANCE_BATCH();
348
349 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
350 memcpy(bind, stage_state->surf_offset,
351 prog_data->binding_table.size_bytes);
352
353 memset(desc, 0, 8 * 4);
354
355 int dw = 0;
356 desc[dw++] = brw->cs.base.prog_offset;
357 if (brw->gen >= 8)
358 desc[dw++] = 0; /* Kernel Start Pointer High */
359 desc[dw++] = 0;
360 desc[dw++] = 0;
361 desc[dw++] = stage_state->bind_bo_offset;
362
363 BEGIN_BATCH(4);
364 OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD << 16 | (4 - 2));
365 OUT_BATCH(0);
366 OUT_BATCH(8 * 4);
367 OUT_BATCH(offset);
368 ADVANCE_BATCH();
369 }
370
371
372 extern "C"
373 const struct brw_tracked_state brw_cs_state = {
374 /* explicit initialisers aren't valid C++, comment
375 * them for documentation purposes */
376 /* .dirty = */{
377 /* .mesa = */ 0,
378 /* .brw = */ BRW_NEW_CS_PROG_DATA,
379 },
380 /* .emit = */ brw_upload_cs_state
381 };