54f700b7b11d60643979fd391257923b1d455580
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "util/imports.h"
34 #include "main/glspirv.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_to_nir.h"
38 #include "program/program.h"
39 #include "program/programopt.h"
40 #include "tnl/tnl.h"
41 #include "util/ralloc.h"
42 #include "compiler/glsl/ir.h"
43 #include "compiler/glsl/program.h"
44 #include "compiler/glsl/gl_nir.h"
45 #include "compiler/glsl/glsl_to_nir.h"
46
47 #include "brw_program.h"
48 #include "brw_context.h"
49 #include "compiler/brw_nir.h"
50 #include "brw_defines.h"
51 #include "intel_batchbuffer.h"
52
53 #include "brw_cs.h"
54 #include "brw_gs.h"
55 #include "brw_vs.h"
56 #include "brw_wm.h"
57 #include "brw_state.h"
58
59 #include "main/shaderapi.h"
60 #include "main/shaderobj.h"
61
62 static bool
63 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
64 {
65 if (is_scalar) {
66 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
67 type_size_scalar_bytes);
68 return nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
69 } else {
70 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
71 type_size_vec4_bytes);
72 return nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
73 }
74 }
75
76 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
77 GLuint id, bool is_arb_asm);
78
79 nir_shader *
80 brw_create_nir(struct brw_context *brw,
81 const struct gl_shader_program *shader_prog,
82 struct gl_program *prog,
83 gl_shader_stage stage,
84 bool is_scalar)
85 {
86 const struct gen_device_info *devinfo = &brw->screen->devinfo;
87 struct gl_context *ctx = &brw->ctx;
88 const nir_shader_compiler_options *options =
89 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
90 nir_shader *nir;
91
92 /* First, lower the GLSL/Mesa IR or SPIR-V to NIR */
93 if (shader_prog) {
94 if (shader_prog->data->spirv) {
95 nir = _mesa_spirv_to_nir(ctx, shader_prog, stage, options);
96 } else {
97 nir = glsl_to_nir(ctx, shader_prog, stage, options);
98
99 /* Remap the locations to slots so those requiring two slots will
100 * occupy two locations. For instance, if we have in the IR code a
101 * dvec3 attr0 in location 0 and vec4 attr1 in location 1, in NIR attr0
102 * will use locations/slots 0 and 1, and attr1 will use location/slot 2
103 */
104 if (nir->info.stage == MESA_SHADER_VERTEX)
105 nir_remap_dual_slot_attributes(nir, &prog->DualSlotInputs);
106 }
107 assert (nir);
108
109 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
110 nir_validate_shader(nir, "after glsl_to_nir or spirv_to_nir");
111 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
112 nir_shader_get_entrypoint(nir), true, false);
113 } else {
114 nir = prog_to_nir(prog, options);
115 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
116 }
117 nir_validate_shader(nir, "before brw_preprocess_nir");
118
119 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
120
121 if (!ctx->SoftFP64 && nir->info.uses_64bit &&
122 (options->lower_doubles_options & nir_lower_fp64_full_software)) {
123 ctx->SoftFP64 = glsl_float64_funcs_to_nir(ctx, options);
124 }
125
126 brw_preprocess_nir(brw->screen->compiler, nir, ctx->SoftFP64);
127
128 if (stage == MESA_SHADER_TESS_CTRL) {
129 /* Lower gl_PatchVerticesIn from a sys. value to a uniform on Gen8+. */
130 static const gl_state_index16 tokens[STATE_LENGTH] =
131 { STATE_INTERNAL, STATE_TCS_PATCH_VERTICES_IN };
132 nir_lower_patch_vertices(nir, 0, devinfo->gen >= 8 ? tokens : NULL);
133 }
134
135 if (stage == MESA_SHADER_TESS_EVAL) {
136 /* Lower gl_PatchVerticesIn to a constant if we have a TCS, or
137 * a uniform if we don't.
138 */
139 struct gl_linked_shader *tcs =
140 shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
141 uint32_t static_patch_vertices =
142 tcs ? tcs->Program->nir->info.tess.tcs_vertices_out : 0;
143 static const gl_state_index16 tokens[STATE_LENGTH] =
144 { STATE_INTERNAL, STATE_TES_PATCH_VERTICES_IN };
145 nir_lower_patch_vertices(nir, static_patch_vertices, tokens);
146 }
147
148 if (stage == MESA_SHADER_FRAGMENT) {
149 static const struct nir_lower_wpos_ytransform_options wpos_options = {
150 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
151 .fs_coord_pixel_center_integer = 1,
152 .fs_coord_origin_upper_left = 1,
153 };
154
155 bool progress = false;
156 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
157 if (progress) {
158 _mesa_add_state_reference(prog->Parameters,
159 wpos_options.state_tokens);
160 }
161 }
162
163 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
164
165 return nir;
166 }
167
168 static void
169 shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
170 {
171 assert(glsl_type_is_vector_or_scalar(type));
172
173 uint32_t comp_size = glsl_type_is_boolean(type)
174 ? 4 : glsl_get_bit_size(type) / 8;
175 unsigned length = glsl_get_vector_elements(type);
176 *size = comp_size * length,
177 *align = comp_size * (length == 3 ? 4 : length);
178 }
179
180 void
181 brw_nir_lower_resources(nir_shader *nir, struct gl_shader_program *shader_prog,
182 struct gl_program *prog,
183 const struct gen_device_info *devinfo)
184 {
185 NIR_PASS_V(prog->nir, gl_nir_lower_samplers, shader_prog);
186 prog->info.textures_used = prog->nir->info.textures_used;
187 prog->info.textures_used_by_txf = prog->nir->info.textures_used_by_txf;
188
189 NIR_PASS_V(prog->nir, brw_nir_lower_image_load_store, devinfo, NULL);
190
191 if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
192 shader_prog->data->spirv) {
193 NIR_PASS_V(prog->nir, nir_lower_vars_to_explicit_types,
194 nir_var_mem_shared, shared_type_info);
195 NIR_PASS_V(prog->nir, nir_lower_explicit_io,
196 nir_var_mem_shared, nir_address_format_32bit_offset);
197 }
198
199 NIR_PASS_V(prog->nir, gl_nir_lower_buffers, shader_prog);
200 /* Do a round of constant folding to clean up address calculations */
201 NIR_PASS_V(prog->nir, nir_opt_constant_folding);
202 }
203
204 void
205 brw_shader_gather_info(nir_shader *nir, struct gl_program *prog)
206 {
207 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
208
209 /* Copy the info we just generated back into the gl_program */
210 const char *prog_name = prog->info.name;
211 const char *prog_label = prog->info.label;
212 prog->info = nir->info;
213 prog->info.name = prog_name;
214 prog->info.label = prog_label;
215 }
216
217 static unsigned
218 get_new_program_id(struct intel_screen *screen)
219 {
220 return p_atomic_inc_return(&screen->program_id);
221 }
222
223 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
224 GLuint id, bool is_arb_asm)
225 {
226 struct brw_context *brw = brw_context(ctx);
227 struct brw_program *prog = rzalloc(NULL, struct brw_program);
228
229 if (prog) {
230 prog->id = get_new_program_id(brw->screen);
231
232 return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
233 }
234
235 return NULL;
236 }
237
238 static void brwDeleteProgram( struct gl_context *ctx,
239 struct gl_program *prog )
240 {
241 struct brw_context *brw = brw_context(ctx);
242
243 /* Beware! prog's refcount has reached zero, and it's about to be freed.
244 *
245 * In brw_upload_pipeline_state(), we compare brw->programs[i] to
246 * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
247 * pointer has changed.
248 *
249 * We cannot leave brw->programs[i] as a dangling pointer to the dead
250 * program. malloc() may allocate the same memory for a new gl_program,
251 * causing us to see matching pointers...but totally different programs.
252 *
253 * We cannot set brw->programs[i] to NULL, either. If we've deleted the
254 * active program, Mesa may set ctx->FooProgram._Current to NULL. That
255 * would cause us to see matching pointers (NULL == NULL), and fail to
256 * detect that a program has changed since our last draw.
257 *
258 * So, set it to a bogus gl_program pointer that will never match,
259 * causing us to properly reevaluate the state on our next draw.
260 *
261 * Getting this wrong causes heisenbugs which are very hard to catch,
262 * as you need a very specific allocation pattern to hit the problem.
263 */
264 static const struct gl_program deleted_program;
265
266 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
267 if (brw->programs[i] == prog)
268 brw->programs[i] = (struct gl_program *) &deleted_program;
269 }
270
271 _mesa_delete_program( ctx, prog );
272 }
273
274
275 static GLboolean
276 brwProgramStringNotify(struct gl_context *ctx,
277 GLenum target,
278 struct gl_program *prog)
279 {
280 assert(target == GL_VERTEX_PROGRAM_ARB || !prog->arb.IsPositionInvariant);
281
282 struct brw_context *brw = brw_context(ctx);
283 const struct brw_compiler *compiler = brw->screen->compiler;
284
285 switch (target) {
286 case GL_FRAGMENT_PROGRAM_ARB: {
287 struct brw_program *newFP = brw_program(prog);
288 const struct brw_program *curFP =
289 brw_program_const(brw->programs[MESA_SHADER_FRAGMENT]);
290
291 if (newFP == curFP)
292 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
293 _mesa_program_fragment_position_to_sysval(&newFP->program);
294 newFP->id = get_new_program_id(brw->screen);
295
296 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
297
298 brw_nir_lower_resources(prog->nir, NULL, prog, &brw->screen->devinfo);
299
300 brw_shader_gather_info(prog->nir, prog);
301
302 brw_fs_precompile(ctx, prog);
303 break;
304 }
305 case GL_VERTEX_PROGRAM_ARB: {
306 struct brw_program *newVP = brw_program(prog);
307 const struct brw_program *curVP =
308 brw_program_const(brw->programs[MESA_SHADER_VERTEX]);
309
310 if (newVP == curVP)
311 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
312 if (newVP->program.arb.IsPositionInvariant) {
313 _mesa_insert_mvp_code(ctx, &newVP->program);
314 }
315 newVP->id = get_new_program_id(brw->screen);
316
317 /* Also tell tnl about it:
318 */
319 _tnl_program_string(ctx, target, prog);
320
321 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
322 compiler->scalar_stage[MESA_SHADER_VERTEX]);
323
324 brw_nir_lower_resources(prog->nir, NULL, prog, &brw->screen->devinfo);
325
326 brw_shader_gather_info(prog->nir, prog);
327
328 brw_vs_precompile(ctx, prog);
329 break;
330 }
331 default:
332 /*
333 * driver->ProgramStringNotify is only called for ARB programs, fixed
334 * function vertex programs, and ir_to_mesa (which isn't used by the
335 * i965 back-end). Therefore, even after geometry shaders are added,
336 * this function should only ever be called with a target of
337 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
338 */
339 unreachable("Unexpected target in brwProgramStringNotify");
340 }
341
342 return true;
343 }
344
345 static void
346 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
347 {
348 struct brw_context *brw = brw_context(ctx);
349 const struct gen_device_info *devinfo = &brw->screen->devinfo;
350 unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;
351 assert(devinfo->gen >= 7 && devinfo->gen <= 11);
352
353 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
354 GL_ELEMENT_ARRAY_BARRIER_BIT |
355 GL_COMMAND_BARRIER_BIT))
356 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
357
358 if (barriers & GL_UNIFORM_BARRIER_BIT)
359 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
360 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
361
362 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
363 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
364
365 if (barriers & (GL_TEXTURE_UPDATE_BARRIER_BIT |
366 GL_PIXEL_BUFFER_BARRIER_BIT))
367 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
368 PIPE_CONTROL_RENDER_TARGET_FLUSH);
369
370 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
371 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
372 PIPE_CONTROL_RENDER_TARGET_FLUSH);
373
374 /* Typed surface messages are handled by the render cache on IVB, so we
375 * need to flush it too.
376 */
377 if (devinfo->gen == 7 && !devinfo->is_haswell)
378 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
379
380 brw_emit_pipe_control_flush(brw, bits);
381 }
382
383 static void
384 brw_framebuffer_fetch_barrier(struct gl_context *ctx)
385 {
386 struct brw_context *brw = brw_context(ctx);
387 const struct gen_device_info *devinfo = &brw->screen->devinfo;
388
389 if (!ctx->Extensions.EXT_shader_framebuffer_fetch) {
390 if (devinfo->gen >= 6) {
391 brw_emit_pipe_control_flush(brw,
392 PIPE_CONTROL_RENDER_TARGET_FLUSH |
393 PIPE_CONTROL_CS_STALL);
394 brw_emit_pipe_control_flush(brw,
395 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
396 } else {
397 brw_emit_pipe_control_flush(brw,
398 PIPE_CONTROL_RENDER_TARGET_FLUSH);
399 }
400 }
401 }
402
403 void
404 brw_get_scratch_bo(struct brw_context *brw,
405 struct brw_bo **scratch_bo, int size)
406 {
407 struct brw_bo *old_bo = *scratch_bo;
408
409 if (old_bo && old_bo->size < size) {
410 brw_bo_unreference(old_bo);
411 old_bo = NULL;
412 }
413
414 if (!old_bo) {
415 *scratch_bo =
416 brw_bo_alloc(brw->bufmgr, "scratch bo", size, BRW_MEMZONE_SCRATCH);
417 }
418 }
419
420 /**
421 * Reserve enough scratch space for the given stage to hold \p per_thread_size
422 * bytes times the given \p thread_count.
423 */
424 void
425 brw_alloc_stage_scratch(struct brw_context *brw,
426 struct brw_stage_state *stage_state,
427 unsigned per_thread_size)
428 {
429 if (stage_state->per_thread_scratch >= per_thread_size)
430 return;
431
432 stage_state->per_thread_scratch = per_thread_size;
433
434 if (stage_state->scratch_bo)
435 brw_bo_unreference(stage_state->scratch_bo);
436
437 const struct gen_device_info *devinfo = &brw->screen->devinfo;
438 unsigned thread_count;
439 switch(stage_state->stage) {
440 case MESA_SHADER_VERTEX:
441 thread_count = devinfo->max_vs_threads;
442 break;
443 case MESA_SHADER_TESS_CTRL:
444 thread_count = devinfo->max_tcs_threads;
445 break;
446 case MESA_SHADER_TESS_EVAL:
447 thread_count = devinfo->max_tes_threads;
448 break;
449 case MESA_SHADER_GEOMETRY:
450 thread_count = devinfo->max_gs_threads;
451 break;
452 case MESA_SHADER_FRAGMENT:
453 thread_count = devinfo->max_wm_threads;
454 break;
455 case MESA_SHADER_COMPUTE: {
456 unsigned subslices = MAX2(brw->screen->subslice_total, 1);
457
458 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
459 *
460 * "Scratch Space per slice is computed based on 4 sub-slices. SW must
461 * allocate scratch space enough so that each slice has 4 slices
462 * allowed."
463 *
464 * According to the other driver team, this applies to compute shaders
465 * as well. This is not currently documented at all.
466 *
467 * brw->screen->subslice_total is the TOTAL number of subslices
468 * and we wish to view that there are 4 subslices per slice
469 * instead of the actual number of subslices per slice.
470 *
471 * For, ICL, scratch space allocation is based on the number of threads
472 * in the base configuration.
473 */
474 if (devinfo->gen == 11)
475 subslices = 8;
476 else if (devinfo->gen >= 9 && devinfo->gen < 11)
477 subslices = 4 * brw->screen->devinfo.num_slices;
478
479 unsigned scratch_ids_per_subslice;
480 if (devinfo->gen >= 11) {
481 /* The MEDIA_VFE_STATE docs say:
482 *
483 * "Starting with this configuration, the Maximum Number of
484 * Threads must be set to (#EU * 8) for GPGPU dispatches.
485 *
486 * Although there are only 7 threads per EU in the configuration,
487 * the FFTID is calculated as if there are 8 threads per EU,
488 * which in turn requires a larger amount of Scratch Space to be
489 * allocated by the driver."
490 */
491 scratch_ids_per_subslice = 8 * 8;
492 } else if (devinfo->is_haswell) {
493 /* WaCSScratchSize:hsw
494 *
495 * Haswell's scratch space address calculation appears to be sparse
496 * rather than tightly packed. The Thread ID has bits indicating
497 * which subslice, EU within a subslice, and thread within an EU it
498 * is. There's a maximum of two slices and two subslices, so these
499 * can be stored with a single bit. Even though there are only 10 EUs
500 * per subslice, this is stored in 4 bits, so there's an effective
501 * maximum value of 16 EUs. Similarly, although there are only 7
502 * threads per EU, this is stored in a 3 bit number, giving an
503 * effective maximum value of 8 threads per EU.
504 *
505 * This means that we need to use 16 * 8 instead of 10 * 7 for the
506 * number of threads per subslice.
507 */
508 scratch_ids_per_subslice = 16 * 8;
509 } else if (devinfo->is_cherryview) {
510 /* Cherryview devices have either 6 or 8 EUs per subslice, and each
511 * EU has 7 threads. The 6 EU devices appear to calculate thread IDs
512 * as if it had 8 EUs.
513 */
514 scratch_ids_per_subslice = 8 * 7;
515 } else {
516 scratch_ids_per_subslice = devinfo->max_cs_threads;
517 }
518
519 thread_count = scratch_ids_per_subslice * subslices;
520 break;
521 }
522 default:
523 unreachable("Unsupported stage!");
524 }
525
526 stage_state->scratch_bo =
527 brw_bo_alloc(brw->bufmgr, "shader scratch space",
528 per_thread_size * thread_count, BRW_MEMZONE_SCRATCH);
529 }
530
531 void brwInitFragProgFuncs( struct dd_function_table *functions )
532 {
533 assert(functions->ProgramStringNotify == _tnl_program_string);
534
535 functions->NewProgram = brwNewProgram;
536 functions->DeleteProgram = brwDeleteProgram;
537 functions->ProgramStringNotify = brwProgramStringNotify;
538
539 functions->LinkShader = brw_link_shader;
540
541 functions->MemoryBarrier = brw_memory_barrier;
542 functions->FramebufferFetchBarrier = brw_framebuffer_fetch_barrier;
543 }
544
545 struct shader_times {
546 uint64_t time;
547 uint64_t written;
548 uint64_t reset;
549 };
550
551 void
552 brw_init_shader_time(struct brw_context *brw)
553 {
554 const int max_entries = 2048;
555 brw->shader_time.bo =
556 brw_bo_alloc(brw->bufmgr, "shader time",
557 max_entries * BRW_SHADER_TIME_STRIDE * 3,
558 BRW_MEMZONE_OTHER);
559 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
560 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
561 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
562 max_entries);
563 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
564 max_entries);
565 brw->shader_time.max_entries = max_entries;
566 }
567
568 static int
569 compare_time(const void *a, const void *b)
570 {
571 uint64_t * const *a_val = a;
572 uint64_t * const *b_val = b;
573
574 /* We don't just subtract because we're turning the value to an int. */
575 if (**a_val < **b_val)
576 return -1;
577 else if (**a_val == **b_val)
578 return 0;
579 else
580 return 1;
581 }
582
583 static void
584 print_shader_time_line(const char *stage, const char *name,
585 int shader_num, uint64_t time, uint64_t total)
586 {
587 fprintf(stderr, "%-6s%-18s", stage, name);
588
589 if (shader_num != 0)
590 fprintf(stderr, "%4d: ", shader_num);
591 else
592 fprintf(stderr, " : ");
593
594 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
595 (long long)time,
596 (double)time / 1000000000.0,
597 (double)time / total * 100.0);
598 }
599
600 static void
601 brw_report_shader_time(struct brw_context *brw)
602 {
603 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
604 return;
605
606 uint64_t scaled[brw->shader_time.num_entries];
607 uint64_t *sorted[brw->shader_time.num_entries];
608 uint64_t total_by_type[ST_CS + 1];
609 memset(total_by_type, 0, sizeof(total_by_type));
610 double total = 0;
611 for (int i = 0; i < brw->shader_time.num_entries; i++) {
612 uint64_t written = 0, reset = 0;
613 enum shader_time_shader_type type = brw->shader_time.types[i];
614
615 sorted[i] = &scaled[i];
616
617 switch (type) {
618 case ST_VS:
619 case ST_TCS:
620 case ST_TES:
621 case ST_GS:
622 case ST_FS8:
623 case ST_FS16:
624 case ST_FS32:
625 case ST_CS:
626 written = brw->shader_time.cumulative[i].written;
627 reset = brw->shader_time.cumulative[i].reset;
628 break;
629
630 default:
631 /* I sometimes want to print things that aren't the 3 shader times.
632 * Just print the sum in that case.
633 */
634 written = 1;
635 reset = 0;
636 break;
637 }
638
639 uint64_t time = brw->shader_time.cumulative[i].time;
640 if (written) {
641 scaled[i] = time / written * (written + reset);
642 } else {
643 scaled[i] = time;
644 }
645
646 switch (type) {
647 case ST_VS:
648 case ST_TCS:
649 case ST_TES:
650 case ST_GS:
651 case ST_FS8:
652 case ST_FS16:
653 case ST_FS32:
654 case ST_CS:
655 total_by_type[type] += scaled[i];
656 break;
657 default:
658 break;
659 }
660
661 total += scaled[i];
662 }
663
664 if (total == 0) {
665 fprintf(stderr, "No shader time collected yet\n");
666 return;
667 }
668
669 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
670
671 fprintf(stderr, "\n");
672 fprintf(stderr, "type ID cycles spent %% of total\n");
673 for (int s = 0; s < brw->shader_time.num_entries; s++) {
674 const char *stage;
675 /* Work back from the sorted pointers times to a time to print. */
676 int i = sorted[s] - scaled;
677
678 if (scaled[i] == 0)
679 continue;
680
681 int shader_num = brw->shader_time.ids[i];
682 const char *shader_name = brw->shader_time.names[i];
683
684 switch (brw->shader_time.types[i]) {
685 case ST_VS:
686 stage = "vs";
687 break;
688 case ST_TCS:
689 stage = "tcs";
690 break;
691 case ST_TES:
692 stage = "tes";
693 break;
694 case ST_GS:
695 stage = "gs";
696 break;
697 case ST_FS8:
698 stage = "fs8";
699 break;
700 case ST_FS16:
701 stage = "fs16";
702 break;
703 case ST_FS32:
704 stage = "fs32";
705 break;
706 case ST_CS:
707 stage = "cs";
708 break;
709 default:
710 stage = "other";
711 break;
712 }
713
714 print_shader_time_line(stage, shader_name, shader_num,
715 scaled[i], total);
716 }
717
718 fprintf(stderr, "\n");
719 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
720 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
721 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
722 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
723 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
724 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
725 print_shader_time_line("total", "fs32", 0, total_by_type[ST_FS32], total);
726 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
727 }
728
729 static void
730 brw_collect_shader_time(struct brw_context *brw)
731 {
732 if (!brw->shader_time.bo)
733 return;
734
735 /* This probably stalls on the last rendering. We could fix that by
736 * delaying reading the reports, but it doesn't look like it's a big
737 * overhead compared to the cost of tracking the time in the first place.
738 */
739 void *bo_map = brw_bo_map(brw, brw->shader_time.bo, MAP_READ | MAP_WRITE);
740
741 for (int i = 0; i < brw->shader_time.num_entries; i++) {
742 uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
743
744 brw->shader_time.cumulative[i].time += times[BRW_SHADER_TIME_STRIDE * 0 / 4];
745 brw->shader_time.cumulative[i].written += times[BRW_SHADER_TIME_STRIDE * 1 / 4];
746 brw->shader_time.cumulative[i].reset += times[BRW_SHADER_TIME_STRIDE * 2 / 4];
747 }
748
749 /* Zero the BO out to clear it out for our next collection.
750 */
751 memset(bo_map, 0, brw->shader_time.bo->size);
752 brw_bo_unmap(brw->shader_time.bo);
753 }
754
755 void
756 brw_collect_and_report_shader_time(struct brw_context *brw)
757 {
758 brw_collect_shader_time(brw);
759
760 if (brw->shader_time.report_time == 0 ||
761 get_time() - brw->shader_time.report_time >= 1.0) {
762 brw_report_shader_time(brw);
763 brw->shader_time.report_time = get_time();
764 }
765 }
766
767 /**
768 * Chooses an index in the shader_time buffer and sets up tracking information
769 * for our printouts.
770 *
771 * Note that this holds on to references to the underlying programs, which may
772 * change their lifetimes compared to normal operation.
773 */
774 int
775 brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
776 enum shader_time_shader_type type, bool is_glsl_sh)
777 {
778 int shader_time_index = brw->shader_time.num_entries++;
779 assert(shader_time_index < brw->shader_time.max_entries);
780 brw->shader_time.types[shader_time_index] = type;
781
782 const char *name;
783 if (prog->Id == 0) {
784 name = "ff";
785 } else if (is_glsl_sh) {
786 name = prog->info.label ?
787 ralloc_strdup(brw->shader_time.names, prog->info.label) : "glsl";
788 } else {
789 name = "prog";
790 }
791
792 brw->shader_time.names[shader_time_index] = name;
793 brw->shader_time.ids[shader_time_index] = prog->Id;
794
795 return shader_time_index;
796 }
797
798 void
799 brw_destroy_shader_time(struct brw_context *brw)
800 {
801 brw_bo_unreference(brw->shader_time.bo);
802 brw->shader_time.bo = NULL;
803 }
804
805 void
806 brw_stage_prog_data_free(const void *p)
807 {
808 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
809
810 ralloc_free(prog_data->param);
811 ralloc_free(prog_data->pull_param);
812 }
813
814 void
815 brw_dump_arb_asm(const char *stage, struct gl_program *prog)
816 {
817 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
818 stage, prog->Id, stage);
819 _mesa_print_program(prog);
820 }
821
822 void
823 brw_setup_tex_for_precompile(const struct gen_device_info *devinfo,
824 struct brw_sampler_prog_key_data *tex,
825 const struct gl_program *prog)
826 {
827 const bool has_shader_channel_select = devinfo->is_haswell || devinfo->gen >= 8;
828 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
829 for (unsigned i = 0; i < sampler_count; i++) {
830 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
831 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
832 tex->swizzles[i] =
833 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
834 } else {
835 /* Color sampler: assume no swizzling. */
836 tex->swizzles[i] = SWIZZLE_XYZW;
837 }
838 }
839 }
840
841 /**
842 * Sets up the starting offsets for the groups of binding table entries
843 * common to all pipeline stages.
844 *
845 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
846 * unused but also make sure that addition of small offsets to them will
847 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
848 */
849 uint32_t
850 brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
851 const struct gl_program *prog,
852 struct brw_stage_prog_data *stage_prog_data,
853 uint32_t next_binding_table_offset)
854 {
855 int num_textures = util_last_bit(prog->SamplersUsed);
856
857 stage_prog_data->binding_table.texture_start = next_binding_table_offset;
858 next_binding_table_offset += num_textures;
859
860 if (prog->info.num_ubos) {
861 assert(prog->info.num_ubos <= BRW_MAX_UBO);
862 stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
863 next_binding_table_offset += prog->info.num_ubos;
864 } else {
865 stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
866 }
867
868 if (prog->info.num_ssbos || prog->info.num_abos) {
869 assert(prog->info.num_abos <= BRW_MAX_ABO);
870 assert(prog->info.num_ssbos <= BRW_MAX_SSBO);
871 stage_prog_data->binding_table.ssbo_start = next_binding_table_offset;
872 next_binding_table_offset += prog->info.num_abos + prog->info.num_ssbos;
873 } else {
874 stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
875 }
876
877 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
878 stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
879 next_binding_table_offset++;
880 } else {
881 stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
882 }
883
884 if (prog->info.uses_texture_gather) {
885 if (devinfo->gen >= 8) {
886 stage_prog_data->binding_table.gather_texture_start =
887 stage_prog_data->binding_table.texture_start;
888 } else {
889 stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
890 next_binding_table_offset += num_textures;
891 }
892 } else {
893 stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
894 }
895
896 if (prog->info.num_images) {
897 stage_prog_data->binding_table.image_start = next_binding_table_offset;
898 next_binding_table_offset += prog->info.num_images;
899 } else {
900 stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
901 }
902
903 /* This may or may not be used depending on how the compile goes. */
904 stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
905 next_binding_table_offset++;
906
907 /* Plane 0 is just the regular texture section */
908 stage_prog_data->binding_table.plane_start[0] = stage_prog_data->binding_table.texture_start;
909
910 stage_prog_data->binding_table.plane_start[1] = next_binding_table_offset;
911 next_binding_table_offset += num_textures;
912
913 stage_prog_data->binding_table.plane_start[2] = next_binding_table_offset;
914 next_binding_table_offset += num_textures;
915
916 /* Set the binding table size. Some callers may append new entries
917 * and increase this accordingly.
918 */
919 stage_prog_data->binding_table.size_bytes = next_binding_table_offset * 4;
920
921 assert(next_binding_table_offset <= BRW_MAX_SURFACES);
922 return next_binding_table_offset;
923 }
924
925 void
926 brw_populate_default_key(const struct brw_compiler *compiler,
927 union brw_any_prog_key *prog_key,
928 struct gl_shader_program *sh_prog,
929 struct gl_program *prog)
930 {
931 switch (prog->info.stage) {
932 case MESA_SHADER_VERTEX:
933 brw_vs_populate_default_key(compiler, &prog_key->vs, prog);
934 break;
935 case MESA_SHADER_TESS_CTRL:
936 brw_tcs_populate_default_key(compiler, &prog_key->tcs, sh_prog, prog);
937 break;
938 case MESA_SHADER_TESS_EVAL:
939 brw_tes_populate_default_key(compiler, &prog_key->tes, sh_prog, prog);
940 break;
941 case MESA_SHADER_GEOMETRY:
942 brw_gs_populate_default_key(compiler, &prog_key->gs, prog);
943 break;
944 case MESA_SHADER_FRAGMENT:
945 brw_wm_populate_default_key(compiler, &prog_key->wm, prog);
946 break;
947 case MESA_SHADER_COMPUTE:
948 brw_cs_populate_default_key(compiler, &prog_key->cs, prog);
949 break;
950 default:
951 unreachable("Unsupported stage!");
952 }
953 }
954
955 void
956 brw_debug_recompile(struct brw_context *brw,
957 gl_shader_stage stage,
958 unsigned api_id,
959 struct brw_base_prog_key *key)
960 {
961 const struct brw_compiler *compiler = brw->screen->compiler;
962 enum brw_cache_id cache_id = brw_stage_cache_id(stage);
963
964 compiler->shader_perf_log(brw, "Recompiling %s shader for program %d\n",
965 _mesa_shader_stage_to_string(stage), api_id);
966
967 const void *old_key =
968 brw_find_previous_compile(&brw->cache, cache_id, key->program_string_id);
969
970 brw_debug_key_recompile(compiler, brw, stage, old_key, key);
971 }