i965: Replace brw_setup_tex_for_precompile brw with devinfo
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "main/glspirv.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_to_nir.h"
38 #include "program/program.h"
39 #include "program/programopt.h"
40 #include "tnl/tnl.h"
41 #include "util/ralloc.h"
42 #include "compiler/glsl/ir.h"
43 #include "compiler/glsl/glsl_to_nir.h"
44
45 #include "brw_program.h"
46 #include "brw_context.h"
47 #include "compiler/brw_nir.h"
48 #include "brw_defines.h"
49 #include "intel_batchbuffer.h"
50
51 static bool
52 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
53 {
54 if (is_scalar) {
55 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
56 type_size_scalar_bytes);
57 return nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
58 } else {
59 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
60 type_size_vec4_bytes);
61 return nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
62 }
63 }
64
65 nir_shader *
66 brw_create_nir(struct brw_context *brw,
67 const struct gl_shader_program *shader_prog,
68 struct gl_program *prog,
69 gl_shader_stage stage,
70 bool is_scalar)
71 {
72 struct gl_context *ctx = &brw->ctx;
73 const nir_shader_compiler_options *options =
74 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
75 nir_shader *nir;
76
77 /* First, lower the GLSL/Mesa IR or SPIR-V to NIR */
78 if (shader_prog) {
79 if (shader_prog->data->spirv) {
80 nir = _mesa_spirv_to_nir(ctx, shader_prog, stage, options);
81 } else {
82 nir = glsl_to_nir(shader_prog, stage, options);
83 }
84 assert (nir);
85
86 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
87 nir_lower_returns(nir);
88 nir_validate_shader(nir);
89 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
90 nir_shader_get_entrypoint(nir), true, false);
91 } else {
92 nir = prog_to_nir(prog, options);
93 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
94 }
95 nir_validate_shader(nir);
96
97 /* Lower PatchVerticesIn from system value to uniform. This needs to
98 * happen before brw_preprocess_nir, since that will lower system values
99 * to intrinsics.
100 *
101 * We only do this for TES if no TCS is present, since otherwise we know
102 * the number of vertices in the patch at link time and we can lower it
103 * directly to a constant. We do this in nir_lower_patch_vertices, which
104 * needs to run after brw_nir_preprocess has turned the system values
105 * into intrinsics.
106 */
107 const bool lower_patch_vertices_in_to_uniform =
108 (stage == MESA_SHADER_TESS_CTRL && brw->screen->devinfo.gen >= 8) ||
109 (stage == MESA_SHADER_TESS_EVAL &&
110 !shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]);
111
112 if (lower_patch_vertices_in_to_uniform)
113 brw_nir_lower_patch_vertices_in_to_uniform(nir);
114
115 nir = brw_preprocess_nir(brw->screen->compiler, nir);
116
117 if (stage == MESA_SHADER_TESS_EVAL && !lower_patch_vertices_in_to_uniform) {
118 assert(shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]);
119 struct gl_linked_shader *linked_tcs =
120 shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
121 uint32_t patch_vertices = linked_tcs->Program->info.tess.tcs_vertices_out;
122 nir_lower_tes_patch_vertices(nir, patch_vertices);
123 }
124
125 if (stage == MESA_SHADER_FRAGMENT) {
126 static const struct nir_lower_wpos_ytransform_options wpos_options = {
127 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
128 .fs_coord_pixel_center_integer = 1,
129 .fs_coord_origin_upper_left = 1,
130 };
131
132 bool progress = false;
133 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
134 if (progress) {
135 _mesa_add_state_reference(prog->Parameters,
136 wpos_options.state_tokens);
137 }
138 }
139
140 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
141
142 return nir;
143 }
144
145 void
146 brw_shader_gather_info(nir_shader *nir, struct gl_program *prog)
147 {
148 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
149
150 /* Copy the info we just generated back into the gl_program */
151 const char *prog_name = prog->info.name;
152 const char *prog_label = prog->info.label;
153 prog->info = nir->info;
154 prog->info.name = prog_name;
155 prog->info.label = prog_label;
156 }
157
158 static unsigned
159 get_new_program_id(struct intel_screen *screen)
160 {
161 return p_atomic_inc_return(&screen->program_id);
162 }
163
164 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
165 GLuint id, bool is_arb_asm)
166 {
167 struct brw_context *brw = brw_context(ctx);
168 struct brw_program *prog = rzalloc(NULL, struct brw_program);
169
170 if (prog) {
171 prog->id = get_new_program_id(brw->screen);
172
173 return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
174 }
175
176 return NULL;
177 }
178
179 static void brwDeleteProgram( struct gl_context *ctx,
180 struct gl_program *prog )
181 {
182 struct brw_context *brw = brw_context(ctx);
183
184 /* Beware! prog's refcount has reached zero, and it's about to be freed.
185 *
186 * In brw_upload_pipeline_state(), we compare brw->programs[i] to
187 * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
188 * pointer has changed.
189 *
190 * We cannot leave brw->programs[i] as a dangling pointer to the dead
191 * program. malloc() may allocate the same memory for a new gl_program,
192 * causing us to see matching pointers...but totally different programs.
193 *
194 * We cannot set brw->programs[i] to NULL, either. If we've deleted the
195 * active program, Mesa may set ctx->FooProgram._Current to NULL. That
196 * would cause us to see matching pointers (NULL == NULL), and fail to
197 * detect that a program has changed since our last draw.
198 *
199 * So, set it to a bogus gl_program pointer that will never match,
200 * causing us to properly reevaluate the state on our next draw.
201 *
202 * Getting this wrong causes heisenbugs which are very hard to catch,
203 * as you need a very specific allocation pattern to hit the problem.
204 */
205 static const struct gl_program deleted_program;
206
207 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
208 if (brw->programs[i] == prog)
209 brw->programs[i] = (struct gl_program *) &deleted_program;
210 }
211
212 _mesa_delete_program( ctx, prog );
213 }
214
215
216 static GLboolean
217 brwProgramStringNotify(struct gl_context *ctx,
218 GLenum target,
219 struct gl_program *prog)
220 {
221 assert(target == GL_VERTEX_PROGRAM_ARB || !prog->arb.IsPositionInvariant);
222
223 struct brw_context *brw = brw_context(ctx);
224 const struct brw_compiler *compiler = brw->screen->compiler;
225
226 switch (target) {
227 case GL_FRAGMENT_PROGRAM_ARB: {
228 struct brw_program *newFP = brw_program(prog);
229 const struct brw_program *curFP =
230 brw_program_const(brw->programs[MESA_SHADER_FRAGMENT]);
231
232 if (newFP == curFP)
233 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
234 newFP->id = get_new_program_id(brw->screen);
235
236 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
237
238 brw_shader_gather_info(prog->nir, prog);
239
240 brw_fs_precompile(ctx, prog);
241 break;
242 }
243 case GL_VERTEX_PROGRAM_ARB: {
244 struct brw_program *newVP = brw_program(prog);
245 const struct brw_program *curVP =
246 brw_program_const(brw->programs[MESA_SHADER_VERTEX]);
247
248 if (newVP == curVP)
249 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
250 if (newVP->program.arb.IsPositionInvariant) {
251 _mesa_insert_mvp_code(ctx, &newVP->program);
252 }
253 newVP->id = get_new_program_id(brw->screen);
254
255 /* Also tell tnl about it:
256 */
257 _tnl_program_string(ctx, target, prog);
258
259 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
260 compiler->scalar_stage[MESA_SHADER_VERTEX]);
261
262 brw_shader_gather_info(prog->nir, prog);
263
264 brw_vs_precompile(ctx, prog);
265 break;
266 }
267 default:
268 /*
269 * driver->ProgramStringNotify is only called for ARB programs, fixed
270 * function vertex programs, and ir_to_mesa (which isn't used by the
271 * i965 back-end). Therefore, even after geometry shaders are added,
272 * this function should only ever be called with a target of
273 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
274 */
275 unreachable("Unexpected target in brwProgramStringNotify");
276 }
277
278 return true;
279 }
280
281 static void
282 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
283 {
284 struct brw_context *brw = brw_context(ctx);
285 const struct gen_device_info *devinfo = &brw->screen->devinfo;
286 unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;
287 assert(devinfo->gen >= 7 && devinfo->gen <= 11);
288
289 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
290 GL_ELEMENT_ARRAY_BARRIER_BIT |
291 GL_COMMAND_BARRIER_BIT))
292 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
293
294 if (barriers & GL_UNIFORM_BARRIER_BIT)
295 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
296 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
297
298 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
299 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
300
301 if (barriers & (GL_TEXTURE_UPDATE_BARRIER_BIT |
302 GL_PIXEL_BUFFER_BARRIER_BIT))
303 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
304 PIPE_CONTROL_RENDER_TARGET_FLUSH);
305
306 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
307 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
308 PIPE_CONTROL_RENDER_TARGET_FLUSH);
309
310 /* Typed surface messages are handled by the render cache on IVB, so we
311 * need to flush it too.
312 */
313 if (devinfo->gen == 7 && !devinfo->is_haswell)
314 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
315
316 brw_emit_pipe_control_flush(brw, bits);
317 }
318
319 static void
320 brw_framebuffer_fetch_barrier(struct gl_context *ctx)
321 {
322 struct brw_context *brw = brw_context(ctx);
323 const struct gen_device_info *devinfo = &brw->screen->devinfo;
324
325 if (!ctx->Extensions.EXT_shader_framebuffer_fetch) {
326 if (devinfo->gen >= 6) {
327 brw_emit_pipe_control_flush(brw,
328 PIPE_CONTROL_RENDER_TARGET_FLUSH |
329 PIPE_CONTROL_CS_STALL);
330 brw_emit_pipe_control_flush(brw,
331 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
332 } else {
333 brw_emit_pipe_control_flush(brw,
334 PIPE_CONTROL_RENDER_TARGET_FLUSH);
335 }
336 }
337 }
338
339 void
340 brw_get_scratch_bo(struct brw_context *brw,
341 struct brw_bo **scratch_bo, int size)
342 {
343 struct brw_bo *old_bo = *scratch_bo;
344
345 if (old_bo && old_bo->size < size) {
346 brw_bo_unreference(old_bo);
347 old_bo = NULL;
348 }
349
350 if (!old_bo) {
351 *scratch_bo =
352 brw_bo_alloc(brw->bufmgr, "scratch bo", size, BRW_MEMZONE_SCRATCH);
353 }
354 }
355
356 /**
357 * Reserve enough scratch space for the given stage to hold \p per_thread_size
358 * bytes times the given \p thread_count.
359 */
360 void
361 brw_alloc_stage_scratch(struct brw_context *brw,
362 struct brw_stage_state *stage_state,
363 unsigned per_thread_size)
364 {
365 if (stage_state->per_thread_scratch >= per_thread_size)
366 return;
367
368 stage_state->per_thread_scratch = per_thread_size;
369
370 if (stage_state->scratch_bo)
371 brw_bo_unreference(stage_state->scratch_bo);
372
373 const struct gen_device_info *devinfo = &brw->screen->devinfo;
374 unsigned thread_count;
375 switch(stage_state->stage) {
376 case MESA_SHADER_VERTEX:
377 thread_count = devinfo->max_vs_threads;
378 break;
379 case MESA_SHADER_TESS_CTRL:
380 thread_count = devinfo->max_tcs_threads;
381 break;
382 case MESA_SHADER_TESS_EVAL:
383 thread_count = devinfo->max_tes_threads;
384 break;
385 case MESA_SHADER_GEOMETRY:
386 thread_count = devinfo->max_gs_threads;
387 break;
388 case MESA_SHADER_FRAGMENT:
389 thread_count = devinfo->max_wm_threads;
390 break;
391 case MESA_SHADER_COMPUTE: {
392 unsigned subslices = MAX2(brw->screen->subslice_total, 1);
393
394 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
395 *
396 * "Scratch Space per slice is computed based on 4 sub-slices. SW must
397 * allocate scratch space enough so that each slice has 4 slices
398 * allowed."
399 *
400 * According to the other driver team, this applies to compute shaders
401 * as well. This is not currently documented at all.
402 *
403 * brw->screen->subslice_total is the TOTAL number of subslices
404 * and we wish to view that there are 4 subslices per slice
405 * instead of the actual number of subslices per slice.
406 */
407 if (devinfo->gen >= 9)
408 subslices = 4 * brw->screen->devinfo.num_slices;
409
410 unsigned scratch_ids_per_subslice;
411 if (devinfo->is_haswell) {
412 /* WaCSScratchSize:hsw
413 *
414 * Haswell's scratch space address calculation appears to be sparse
415 * rather than tightly packed. The Thread ID has bits indicating
416 * which subslice, EU within a subslice, and thread within an EU it
417 * is. There's a maximum of two slices and two subslices, so these
418 * can be stored with a single bit. Even though there are only 10 EUs
419 * per subslice, this is stored in 4 bits, so there's an effective
420 * maximum value of 16 EUs. Similarly, although there are only 7
421 * threads per EU, this is stored in a 3 bit number, giving an
422 * effective maximum value of 8 threads per EU.
423 *
424 * This means that we need to use 16 * 8 instead of 10 * 7 for the
425 * number of threads per subslice.
426 */
427 scratch_ids_per_subslice = 16 * 8;
428 } else if (devinfo->is_cherryview) {
429 /* Cherryview devices have either 6 or 8 EUs per subslice, and each
430 * EU has 7 threads. The 6 EU devices appear to calculate thread IDs
431 * as if it had 8 EUs.
432 */
433 scratch_ids_per_subslice = 8 * 7;
434 } else {
435 scratch_ids_per_subslice = devinfo->max_cs_threads;
436 }
437
438 thread_count = scratch_ids_per_subslice * subslices;
439 break;
440 }
441 default:
442 unreachable("Unsupported stage!");
443 }
444
445 stage_state->scratch_bo =
446 brw_bo_alloc(brw->bufmgr, "shader scratch space",
447 per_thread_size * thread_count, BRW_MEMZONE_SCRATCH);
448 }
449
450 void brwInitFragProgFuncs( struct dd_function_table *functions )
451 {
452 assert(functions->ProgramStringNotify == _tnl_program_string);
453
454 functions->NewProgram = brwNewProgram;
455 functions->DeleteProgram = brwDeleteProgram;
456 functions->ProgramStringNotify = brwProgramStringNotify;
457
458 functions->LinkShader = brw_link_shader;
459
460 functions->MemoryBarrier = brw_memory_barrier;
461 functions->FramebufferFetchBarrier = brw_framebuffer_fetch_barrier;
462 }
463
464 struct shader_times {
465 uint64_t time;
466 uint64_t written;
467 uint64_t reset;
468 };
469
470 void
471 brw_init_shader_time(struct brw_context *brw)
472 {
473 const int max_entries = 2048;
474 brw->shader_time.bo =
475 brw_bo_alloc(brw->bufmgr, "shader time",
476 max_entries * BRW_SHADER_TIME_STRIDE * 3,
477 BRW_MEMZONE_OTHER);
478 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
479 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
480 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
481 max_entries);
482 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
483 max_entries);
484 brw->shader_time.max_entries = max_entries;
485 }
486
487 static int
488 compare_time(const void *a, const void *b)
489 {
490 uint64_t * const *a_val = a;
491 uint64_t * const *b_val = b;
492
493 /* We don't just subtract because we're turning the value to an int. */
494 if (**a_val < **b_val)
495 return -1;
496 else if (**a_val == **b_val)
497 return 0;
498 else
499 return 1;
500 }
501
502 static void
503 print_shader_time_line(const char *stage, const char *name,
504 int shader_num, uint64_t time, uint64_t total)
505 {
506 fprintf(stderr, "%-6s%-18s", stage, name);
507
508 if (shader_num != 0)
509 fprintf(stderr, "%4d: ", shader_num);
510 else
511 fprintf(stderr, " : ");
512
513 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
514 (long long)time,
515 (double)time / 1000000000.0,
516 (double)time / total * 100.0);
517 }
518
519 static void
520 brw_report_shader_time(struct brw_context *brw)
521 {
522 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
523 return;
524
525 uint64_t scaled[brw->shader_time.num_entries];
526 uint64_t *sorted[brw->shader_time.num_entries];
527 uint64_t total_by_type[ST_CS + 1];
528 memset(total_by_type, 0, sizeof(total_by_type));
529 double total = 0;
530 for (int i = 0; i < brw->shader_time.num_entries; i++) {
531 uint64_t written = 0, reset = 0;
532 enum shader_time_shader_type type = brw->shader_time.types[i];
533
534 sorted[i] = &scaled[i];
535
536 switch (type) {
537 case ST_VS:
538 case ST_TCS:
539 case ST_TES:
540 case ST_GS:
541 case ST_FS8:
542 case ST_FS16:
543 case ST_FS32:
544 case ST_CS:
545 written = brw->shader_time.cumulative[i].written;
546 reset = brw->shader_time.cumulative[i].reset;
547 break;
548
549 default:
550 /* I sometimes want to print things that aren't the 3 shader times.
551 * Just print the sum in that case.
552 */
553 written = 1;
554 reset = 0;
555 break;
556 }
557
558 uint64_t time = brw->shader_time.cumulative[i].time;
559 if (written) {
560 scaled[i] = time / written * (written + reset);
561 } else {
562 scaled[i] = time;
563 }
564
565 switch (type) {
566 case ST_VS:
567 case ST_TCS:
568 case ST_TES:
569 case ST_GS:
570 case ST_FS8:
571 case ST_FS16:
572 case ST_FS32:
573 case ST_CS:
574 total_by_type[type] += scaled[i];
575 break;
576 default:
577 break;
578 }
579
580 total += scaled[i];
581 }
582
583 if (total == 0) {
584 fprintf(stderr, "No shader time collected yet\n");
585 return;
586 }
587
588 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
589
590 fprintf(stderr, "\n");
591 fprintf(stderr, "type ID cycles spent %% of total\n");
592 for (int s = 0; s < brw->shader_time.num_entries; s++) {
593 const char *stage;
594 /* Work back from the sorted pointers times to a time to print. */
595 int i = sorted[s] - scaled;
596
597 if (scaled[i] == 0)
598 continue;
599
600 int shader_num = brw->shader_time.ids[i];
601 const char *shader_name = brw->shader_time.names[i];
602
603 switch (brw->shader_time.types[i]) {
604 case ST_VS:
605 stage = "vs";
606 break;
607 case ST_TCS:
608 stage = "tcs";
609 break;
610 case ST_TES:
611 stage = "tes";
612 break;
613 case ST_GS:
614 stage = "gs";
615 break;
616 case ST_FS8:
617 stage = "fs8";
618 break;
619 case ST_FS16:
620 stage = "fs16";
621 break;
622 case ST_FS32:
623 stage = "fs32";
624 break;
625 case ST_CS:
626 stage = "cs";
627 break;
628 default:
629 stage = "other";
630 break;
631 }
632
633 print_shader_time_line(stage, shader_name, shader_num,
634 scaled[i], total);
635 }
636
637 fprintf(stderr, "\n");
638 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
639 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
640 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
641 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
642 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
643 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
644 print_shader_time_line("total", "fs32", 0, total_by_type[ST_FS32], total);
645 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
646 }
647
648 static void
649 brw_collect_shader_time(struct brw_context *brw)
650 {
651 if (!brw->shader_time.bo)
652 return;
653
654 /* This probably stalls on the last rendering. We could fix that by
655 * delaying reading the reports, but it doesn't look like it's a big
656 * overhead compared to the cost of tracking the time in the first place.
657 */
658 void *bo_map = brw_bo_map(brw, brw->shader_time.bo, MAP_READ | MAP_WRITE);
659
660 for (int i = 0; i < brw->shader_time.num_entries; i++) {
661 uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
662
663 brw->shader_time.cumulative[i].time += times[BRW_SHADER_TIME_STRIDE * 0 / 4];
664 brw->shader_time.cumulative[i].written += times[BRW_SHADER_TIME_STRIDE * 1 / 4];
665 brw->shader_time.cumulative[i].reset += times[BRW_SHADER_TIME_STRIDE * 2 / 4];
666 }
667
668 /* Zero the BO out to clear it out for our next collection.
669 */
670 memset(bo_map, 0, brw->shader_time.bo->size);
671 brw_bo_unmap(brw->shader_time.bo);
672 }
673
674 void
675 brw_collect_and_report_shader_time(struct brw_context *brw)
676 {
677 brw_collect_shader_time(brw);
678
679 if (brw->shader_time.report_time == 0 ||
680 get_time() - brw->shader_time.report_time >= 1.0) {
681 brw_report_shader_time(brw);
682 brw->shader_time.report_time = get_time();
683 }
684 }
685
686 /**
687 * Chooses an index in the shader_time buffer and sets up tracking information
688 * for our printouts.
689 *
690 * Note that this holds on to references to the underlying programs, which may
691 * change their lifetimes compared to normal operation.
692 */
693 int
694 brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
695 enum shader_time_shader_type type, bool is_glsl_sh)
696 {
697 int shader_time_index = brw->shader_time.num_entries++;
698 assert(shader_time_index < brw->shader_time.max_entries);
699 brw->shader_time.types[shader_time_index] = type;
700
701 const char *name;
702 if (prog->Id == 0) {
703 name = "ff";
704 } else if (is_glsl_sh) {
705 name = prog->info.label ?
706 ralloc_strdup(brw->shader_time.names, prog->info.label) : "glsl";
707 } else {
708 name = "prog";
709 }
710
711 brw->shader_time.names[shader_time_index] = name;
712 brw->shader_time.ids[shader_time_index] = prog->Id;
713
714 return shader_time_index;
715 }
716
717 void
718 brw_destroy_shader_time(struct brw_context *brw)
719 {
720 brw_bo_unreference(brw->shader_time.bo);
721 brw->shader_time.bo = NULL;
722 }
723
724 void
725 brw_stage_prog_data_free(const void *p)
726 {
727 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
728
729 ralloc_free(prog_data->param);
730 ralloc_free(prog_data->pull_param);
731 }
732
733 void
734 brw_dump_arb_asm(const char *stage, struct gl_program *prog)
735 {
736 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
737 stage, prog->Id, stage);
738 _mesa_print_program(prog);
739 }
740
741 void
742 brw_setup_tex_for_precompile(const struct gen_device_info *devinfo,
743 struct brw_sampler_prog_key_data *tex,
744 struct gl_program *prog)
745 {
746 const bool has_shader_channel_select = devinfo->is_haswell || devinfo->gen >= 8;
747 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
748 for (unsigned i = 0; i < sampler_count; i++) {
749 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
750 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
751 tex->swizzles[i] =
752 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
753 } else {
754 /* Color sampler: assume no swizzling. */
755 tex->swizzles[i] = SWIZZLE_XYZW;
756 }
757 }
758 }
759
760 /**
761 * Sets up the starting offsets for the groups of binding table entries
762 * common to all pipeline stages.
763 *
764 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
765 * unused but also make sure that addition of small offsets to them will
766 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
767 */
768 uint32_t
769 brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
770 const struct gl_program *prog,
771 struct brw_stage_prog_data *stage_prog_data,
772 uint32_t next_binding_table_offset)
773 {
774 int num_textures = util_last_bit(prog->SamplersUsed);
775
776 stage_prog_data->binding_table.texture_start = next_binding_table_offset;
777 next_binding_table_offset += num_textures;
778
779 if (prog->info.num_ubos) {
780 assert(prog->info.num_ubos <= BRW_MAX_UBO);
781 stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
782 next_binding_table_offset += prog->info.num_ubos;
783 } else {
784 stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
785 }
786
787 if (prog->info.num_ssbos || prog->info.num_abos) {
788 assert(prog->info.num_abos <= BRW_MAX_ABO);
789 assert(prog->info.num_ssbos <= BRW_MAX_SSBO);
790 stage_prog_data->binding_table.ssbo_start = next_binding_table_offset;
791 next_binding_table_offset += prog->info.num_abos + prog->info.num_ssbos;
792 } else {
793 stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
794 }
795
796 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
797 stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
798 next_binding_table_offset++;
799 } else {
800 stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
801 }
802
803 if (prog->info.uses_texture_gather) {
804 if (devinfo->gen >= 8) {
805 stage_prog_data->binding_table.gather_texture_start =
806 stage_prog_data->binding_table.texture_start;
807 } else {
808 stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
809 next_binding_table_offset += num_textures;
810 }
811 } else {
812 stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
813 }
814
815 if (prog->info.num_images) {
816 stage_prog_data->binding_table.image_start = next_binding_table_offset;
817 next_binding_table_offset += prog->info.num_images;
818 } else {
819 stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
820 }
821
822 /* This may or may not be used depending on how the compile goes. */
823 stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
824 next_binding_table_offset++;
825
826 /* Plane 0 is just the regular texture section */
827 stage_prog_data->binding_table.plane_start[0] = stage_prog_data->binding_table.texture_start;
828
829 stage_prog_data->binding_table.plane_start[1] = next_binding_table_offset;
830 next_binding_table_offset += num_textures;
831
832 stage_prog_data->binding_table.plane_start[2] = next_binding_table_offset;
833 next_binding_table_offset += num_textures;
834
835 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
836
837 assert(next_binding_table_offset <= BRW_MAX_SURFACES);
838 return next_binding_table_offset;
839 }
840
841 void
842 brw_prog_key_set_id(union brw_any_prog_key *key, gl_shader_stage stage,
843 unsigned id)
844 {
845 static const unsigned stage_offsets[] = {
846 offsetof(struct brw_vs_prog_key, program_string_id),
847 offsetof(struct brw_tcs_prog_key, program_string_id),
848 offsetof(struct brw_tes_prog_key, program_string_id),
849 offsetof(struct brw_gs_prog_key, program_string_id),
850 offsetof(struct brw_wm_prog_key, program_string_id),
851 offsetof(struct brw_cs_prog_key, program_string_id),
852 };
853 assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_offsets));
854 *(unsigned*)((uint8_t*)key + stage_offsets[stage]) = id;
855 }