i965: Make brw_vs_outputs_written static.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "main/glspirv.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_to_nir.h"
38 #include "program/program.h"
39 #include "program/programopt.h"
40 #include "tnl/tnl.h"
41 #include "util/ralloc.h"
42 #include "compiler/glsl/ir.h"
43 #include "compiler/glsl/glsl_to_nir.h"
44 #include "compiler/nir/nir_serialize.h"
45
46 #include "brw_program.h"
47 #include "brw_context.h"
48 #include "compiler/brw_nir.h"
49 #include "brw_defines.h"
50 #include "intel_batchbuffer.h"
51
52 static bool
53 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
54 {
55 if (is_scalar) {
56 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
57 type_size_scalar_bytes);
58 return nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
59 } else {
60 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
61 type_size_vec4_bytes);
62 return nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
63 }
64 }
65
66 nir_shader *
67 brw_create_nir(struct brw_context *brw,
68 const struct gl_shader_program *shader_prog,
69 struct gl_program *prog,
70 gl_shader_stage stage,
71 bool is_scalar)
72 {
73 struct gl_context *ctx = &brw->ctx;
74 const nir_shader_compiler_options *options =
75 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
76 nir_shader *nir;
77
78 /* First, lower the GLSL/Mesa IR or SPIR-V to NIR */
79 if (shader_prog) {
80 if (shader_prog->_LinkedShaders[stage]->spirv_data)
81 nir = _mesa_spirv_to_nir(ctx, shader_prog, stage, options);
82 else
83 nir = glsl_to_nir(shader_prog, stage, options);
84 assert (nir);
85
86 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
87 nir_lower_returns(nir);
88 nir_validate_shader(nir);
89 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
90 nir_shader_get_entrypoint(nir), true, false);
91 } else {
92 nir = prog_to_nir(prog, options);
93 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
94 }
95 nir_validate_shader(nir);
96
97 /* Lower PatchVerticesIn from system value to uniform. This needs to
98 * happen before brw_preprocess_nir, since that will lower system values
99 * to intrinsics.
100 *
101 * We only do this for TES if no TCS is present, since otherwise we know
102 * the number of vertices in the patch at link time and we can lower it
103 * directly to a constant. We do this in nir_lower_patch_vertices, which
104 * needs to run after brw_nir_preprocess has turned the system values
105 * into intrinsics.
106 */
107 const bool lower_patch_vertices_in_to_uniform =
108 (stage == MESA_SHADER_TESS_CTRL && brw->screen->devinfo.gen >= 8) ||
109 (stage == MESA_SHADER_TESS_EVAL &&
110 !shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]);
111
112 if (lower_patch_vertices_in_to_uniform)
113 brw_nir_lower_patch_vertices_in_to_uniform(nir);
114
115 nir = brw_preprocess_nir(brw->screen->compiler, nir);
116
117 if (stage == MESA_SHADER_TESS_EVAL && !lower_patch_vertices_in_to_uniform) {
118 assert(shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]);
119 struct gl_linked_shader *linked_tcs =
120 shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
121 uint32_t patch_vertices = linked_tcs->Program->info.tess.tcs_vertices_out;
122 nir_lower_tes_patch_vertices(nir, patch_vertices);
123 }
124
125 if (stage == MESA_SHADER_FRAGMENT) {
126 static const struct nir_lower_wpos_ytransform_options wpos_options = {
127 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
128 .fs_coord_pixel_center_integer = 1,
129 .fs_coord_origin_upper_left = 1,
130 };
131
132 bool progress = false;
133 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
134 if (progress) {
135 _mesa_add_state_reference(prog->Parameters,
136 wpos_options.state_tokens);
137 }
138 }
139
140 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
141
142 return nir;
143 }
144
145 void
146 brw_shader_gather_info(nir_shader *nir, struct gl_program *prog)
147 {
148 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
149
150 /* Copy the info we just generated back into the gl_program */
151 const char *prog_name = prog->info.name;
152 const char *prog_label = prog->info.label;
153 prog->info = nir->info;
154 prog->info.name = prog_name;
155 prog->info.label = prog_label;
156 }
157
158 static unsigned
159 get_new_program_id(struct intel_screen *screen)
160 {
161 return p_atomic_inc_return(&screen->program_id);
162 }
163
164 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
165 GLuint id, bool is_arb_asm)
166 {
167 struct brw_context *brw = brw_context(ctx);
168 struct brw_program *prog = rzalloc(NULL, struct brw_program);
169
170 if (prog) {
171 prog->id = get_new_program_id(brw->screen);
172
173 return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
174 }
175
176 return NULL;
177 }
178
179 static void brwDeleteProgram( struct gl_context *ctx,
180 struct gl_program *prog )
181 {
182 struct brw_context *brw = brw_context(ctx);
183
184 /* Beware! prog's refcount has reached zero, and it's about to be freed.
185 *
186 * In brw_upload_pipeline_state(), we compare brw->programs[i] to
187 * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
188 * pointer has changed.
189 *
190 * We cannot leave brw->programs[i] as a dangling pointer to the dead
191 * program. malloc() may allocate the same memory for a new gl_program,
192 * causing us to see matching pointers...but totally different programs.
193 *
194 * We cannot set brw->programs[i] to NULL, either. If we've deleted the
195 * active program, Mesa may set ctx->FooProgram._Current to NULL. That
196 * would cause us to see matching pointers (NULL == NULL), and fail to
197 * detect that a program has changed since our last draw.
198 *
199 * So, set it to a bogus gl_program pointer that will never match,
200 * causing us to properly reevaluate the state on our next draw.
201 *
202 * Getting this wrong causes heisenbugs which are very hard to catch,
203 * as you need a very specific allocation pattern to hit the problem.
204 */
205 static const struct gl_program deleted_program;
206
207 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
208 if (brw->programs[i] == prog)
209 brw->programs[i] = (struct gl_program *) &deleted_program;
210 }
211
212 _mesa_delete_program( ctx, prog );
213 }
214
215
216 static GLboolean
217 brwProgramStringNotify(struct gl_context *ctx,
218 GLenum target,
219 struct gl_program *prog)
220 {
221 assert(target == GL_VERTEX_PROGRAM_ARB || !prog->arb.IsPositionInvariant);
222
223 struct brw_context *brw = brw_context(ctx);
224 const struct brw_compiler *compiler = brw->screen->compiler;
225
226 switch (target) {
227 case GL_FRAGMENT_PROGRAM_ARB: {
228 struct brw_program *newFP = brw_program(prog);
229 const struct brw_program *curFP =
230 brw_program_const(brw->programs[MESA_SHADER_FRAGMENT]);
231
232 if (newFP == curFP)
233 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
234 newFP->id = get_new_program_id(brw->screen);
235
236 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
237
238 brw_shader_gather_info(prog->nir, prog);
239
240 brw_fs_precompile(ctx, prog);
241 break;
242 }
243 case GL_VERTEX_PROGRAM_ARB: {
244 struct brw_program *newVP = brw_program(prog);
245 const struct brw_program *curVP =
246 brw_program_const(brw->programs[MESA_SHADER_VERTEX]);
247
248 if (newVP == curVP)
249 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
250 if (newVP->program.arb.IsPositionInvariant) {
251 _mesa_insert_mvp_code(ctx, &newVP->program);
252 }
253 newVP->id = get_new_program_id(brw->screen);
254
255 /* Also tell tnl about it:
256 */
257 _tnl_program_string(ctx, target, prog);
258
259 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
260 compiler->scalar_stage[MESA_SHADER_VERTEX]);
261
262 brw_shader_gather_info(prog->nir, prog);
263
264 brw_vs_precompile(ctx, prog);
265 break;
266 }
267 default:
268 /*
269 * driver->ProgramStringNotify is only called for ARB programs, fixed
270 * function vertex programs, and ir_to_mesa (which isn't used by the
271 * i965 back-end). Therefore, even after geometry shaders are added,
272 * this function should only ever be called with a target of
273 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
274 */
275 unreachable("Unexpected target in brwProgramStringNotify");
276 }
277
278 return true;
279 }
280
281 static void
282 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
283 {
284 struct brw_context *brw = brw_context(ctx);
285 const struct gen_device_info *devinfo = &brw->screen->devinfo;
286 unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;
287 assert(devinfo->gen >= 7 && devinfo->gen <= 11);
288
289 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
290 GL_ELEMENT_ARRAY_BARRIER_BIT |
291 GL_COMMAND_BARRIER_BIT))
292 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
293
294 if (barriers & GL_UNIFORM_BARRIER_BIT)
295 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
296 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
297
298 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
299 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
300
301 if (barriers & (GL_TEXTURE_UPDATE_BARRIER_BIT |
302 GL_PIXEL_BUFFER_BARRIER_BIT))
303 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
304 PIPE_CONTROL_RENDER_TARGET_FLUSH);
305
306 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
307 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
308 PIPE_CONTROL_RENDER_TARGET_FLUSH);
309
310 /* Typed surface messages are handled by the render cache on IVB, so we
311 * need to flush it too.
312 */
313 if (devinfo->gen == 7 && !devinfo->is_haswell)
314 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
315
316 brw_emit_pipe_control_flush(brw, bits);
317 }
318
319 static void
320 brw_framebuffer_fetch_barrier(struct gl_context *ctx)
321 {
322 struct brw_context *brw = brw_context(ctx);
323 const struct gen_device_info *devinfo = &brw->screen->devinfo;
324
325 if (!ctx->Extensions.EXT_shader_framebuffer_fetch) {
326 if (devinfo->gen >= 6) {
327 brw_emit_pipe_control_flush(brw,
328 PIPE_CONTROL_RENDER_TARGET_FLUSH |
329 PIPE_CONTROL_CS_STALL);
330 brw_emit_pipe_control_flush(brw,
331 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
332 } else {
333 brw_emit_pipe_control_flush(brw,
334 PIPE_CONTROL_RENDER_TARGET_FLUSH);
335 }
336 }
337 }
338
339 void
340 brw_get_scratch_bo(struct brw_context *brw,
341 struct brw_bo **scratch_bo, int size)
342 {
343 struct brw_bo *old_bo = *scratch_bo;
344
345 if (old_bo && old_bo->size < size) {
346 brw_bo_unreference(old_bo);
347 old_bo = NULL;
348 }
349
350 if (!old_bo) {
351 *scratch_bo = brw_bo_alloc(brw->bufmgr, "scratch bo", size);
352 }
353 }
354
355 /**
356 * Reserve enough scratch space for the given stage to hold \p per_thread_size
357 * bytes times the given \p thread_count.
358 */
359 void
360 brw_alloc_stage_scratch(struct brw_context *brw,
361 struct brw_stage_state *stage_state,
362 unsigned per_thread_size)
363 {
364 if (stage_state->per_thread_scratch >= per_thread_size)
365 return;
366
367 stage_state->per_thread_scratch = per_thread_size;
368
369 if (stage_state->scratch_bo)
370 brw_bo_unreference(stage_state->scratch_bo);
371
372 const struct gen_device_info *devinfo = &brw->screen->devinfo;
373 unsigned thread_count;
374 switch(stage_state->stage) {
375 case MESA_SHADER_VERTEX:
376 thread_count = devinfo->max_vs_threads;
377 break;
378 case MESA_SHADER_TESS_CTRL:
379 thread_count = devinfo->max_tcs_threads;
380 break;
381 case MESA_SHADER_TESS_EVAL:
382 thread_count = devinfo->max_tes_threads;
383 break;
384 case MESA_SHADER_GEOMETRY:
385 thread_count = devinfo->max_gs_threads;
386 break;
387 case MESA_SHADER_FRAGMENT:
388 thread_count = devinfo->max_wm_threads;
389 break;
390 case MESA_SHADER_COMPUTE: {
391 unsigned subslices = MAX2(brw->screen->subslice_total, 1);
392
393 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
394 *
395 * "Scratch Space per slice is computed based on 4 sub-slices. SW must
396 * allocate scratch space enough so that each slice has 4 slices
397 * allowed."
398 *
399 * According to the other driver team, this applies to compute shaders
400 * as well. This is not currently documented at all.
401 *
402 * brw->screen->subslice_total is the TOTAL number of subslices
403 * and we wish to view that there are 4 subslices per slice
404 * instead of the actual number of subslices per slice.
405 */
406 if (devinfo->gen >= 9)
407 subslices = 4 * brw->screen->devinfo.num_slices;
408
409 unsigned scratch_ids_per_subslice;
410 if (devinfo->is_haswell) {
411 /* WaCSScratchSize:hsw
412 *
413 * Haswell's scratch space address calculation appears to be sparse
414 * rather than tightly packed. The Thread ID has bits indicating
415 * which subslice, EU within a subslice, and thread within an EU it
416 * is. There's a maximum of two slices and two subslices, so these
417 * can be stored with a single bit. Even though there are only 10 EUs
418 * per subslice, this is stored in 4 bits, so there's an effective
419 * maximum value of 16 EUs. Similarly, although there are only 7
420 * threads per EU, this is stored in a 3 bit number, giving an
421 * effective maximum value of 8 threads per EU.
422 *
423 * This means that we need to use 16 * 8 instead of 10 * 7 for the
424 * number of threads per subslice.
425 */
426 scratch_ids_per_subslice = 16 * 8;
427 } else if (devinfo->is_cherryview) {
428 /* Cherryview devices have either 6 or 8 EUs per subslice, and each
429 * EU has 7 threads. The 6 EU devices appear to calculate thread IDs
430 * as if it had 8 EUs.
431 */
432 scratch_ids_per_subslice = 8 * 7;
433 } else {
434 scratch_ids_per_subslice = devinfo->max_cs_threads;
435 }
436
437 thread_count = scratch_ids_per_subslice * subslices;
438 break;
439 }
440 default:
441 unreachable("Unsupported stage!");
442 }
443
444 stage_state->scratch_bo =
445 brw_bo_alloc(brw->bufmgr, "shader scratch space",
446 per_thread_size * thread_count);
447 }
448
449 void brwInitFragProgFuncs( struct dd_function_table *functions )
450 {
451 assert(functions->ProgramStringNotify == _tnl_program_string);
452
453 functions->NewProgram = brwNewProgram;
454 functions->DeleteProgram = brwDeleteProgram;
455 functions->ProgramStringNotify = brwProgramStringNotify;
456
457 functions->LinkShader = brw_link_shader;
458
459 functions->MemoryBarrier = brw_memory_barrier;
460 functions->FramebufferFetchBarrier = brw_framebuffer_fetch_barrier;
461 }
462
463 struct shader_times {
464 uint64_t time;
465 uint64_t written;
466 uint64_t reset;
467 };
468
469 void
470 brw_init_shader_time(struct brw_context *brw)
471 {
472 const int max_entries = 2048;
473 brw->shader_time.bo =
474 brw_bo_alloc(brw->bufmgr, "shader time",
475 max_entries * BRW_SHADER_TIME_STRIDE * 3);
476 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
477 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
478 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
479 max_entries);
480 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
481 max_entries);
482 brw->shader_time.max_entries = max_entries;
483 }
484
485 static int
486 compare_time(const void *a, const void *b)
487 {
488 uint64_t * const *a_val = a;
489 uint64_t * const *b_val = b;
490
491 /* We don't just subtract because we're turning the value to an int. */
492 if (**a_val < **b_val)
493 return -1;
494 else if (**a_val == **b_val)
495 return 0;
496 else
497 return 1;
498 }
499
500 static void
501 print_shader_time_line(const char *stage, const char *name,
502 int shader_num, uint64_t time, uint64_t total)
503 {
504 fprintf(stderr, "%-6s%-18s", stage, name);
505
506 if (shader_num != 0)
507 fprintf(stderr, "%4d: ", shader_num);
508 else
509 fprintf(stderr, " : ");
510
511 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
512 (long long)time,
513 (double)time / 1000000000.0,
514 (double)time / total * 100.0);
515 }
516
517 static void
518 brw_report_shader_time(struct brw_context *brw)
519 {
520 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
521 return;
522
523 uint64_t scaled[brw->shader_time.num_entries];
524 uint64_t *sorted[brw->shader_time.num_entries];
525 uint64_t total_by_type[ST_CS + 1];
526 memset(total_by_type, 0, sizeof(total_by_type));
527 double total = 0;
528 for (int i = 0; i < brw->shader_time.num_entries; i++) {
529 uint64_t written = 0, reset = 0;
530 enum shader_time_shader_type type = brw->shader_time.types[i];
531
532 sorted[i] = &scaled[i];
533
534 switch (type) {
535 case ST_VS:
536 case ST_TCS:
537 case ST_TES:
538 case ST_GS:
539 case ST_FS8:
540 case ST_FS16:
541 case ST_CS:
542 written = brw->shader_time.cumulative[i].written;
543 reset = brw->shader_time.cumulative[i].reset;
544 break;
545
546 default:
547 /* I sometimes want to print things that aren't the 3 shader times.
548 * Just print the sum in that case.
549 */
550 written = 1;
551 reset = 0;
552 break;
553 }
554
555 uint64_t time = brw->shader_time.cumulative[i].time;
556 if (written) {
557 scaled[i] = time / written * (written + reset);
558 } else {
559 scaled[i] = time;
560 }
561
562 switch (type) {
563 case ST_VS:
564 case ST_TCS:
565 case ST_TES:
566 case ST_GS:
567 case ST_FS8:
568 case ST_FS16:
569 case ST_CS:
570 total_by_type[type] += scaled[i];
571 break;
572 default:
573 break;
574 }
575
576 total += scaled[i];
577 }
578
579 if (total == 0) {
580 fprintf(stderr, "No shader time collected yet\n");
581 return;
582 }
583
584 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
585
586 fprintf(stderr, "\n");
587 fprintf(stderr, "type ID cycles spent %% of total\n");
588 for (int s = 0; s < brw->shader_time.num_entries; s++) {
589 const char *stage;
590 /* Work back from the sorted pointers times to a time to print. */
591 int i = sorted[s] - scaled;
592
593 if (scaled[i] == 0)
594 continue;
595
596 int shader_num = brw->shader_time.ids[i];
597 const char *shader_name = brw->shader_time.names[i];
598
599 switch (brw->shader_time.types[i]) {
600 case ST_VS:
601 stage = "vs";
602 break;
603 case ST_TCS:
604 stage = "tcs";
605 break;
606 case ST_TES:
607 stage = "tes";
608 break;
609 case ST_GS:
610 stage = "gs";
611 break;
612 case ST_FS8:
613 stage = "fs8";
614 break;
615 case ST_FS16:
616 stage = "fs16";
617 break;
618 case ST_CS:
619 stage = "cs";
620 break;
621 default:
622 stage = "other";
623 break;
624 }
625
626 print_shader_time_line(stage, shader_name, shader_num,
627 scaled[i], total);
628 }
629
630 fprintf(stderr, "\n");
631 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
632 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
633 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
634 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
635 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
636 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
637 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
638 }
639
640 static void
641 brw_collect_shader_time(struct brw_context *brw)
642 {
643 if (!brw->shader_time.bo)
644 return;
645
646 /* This probably stalls on the last rendering. We could fix that by
647 * delaying reading the reports, but it doesn't look like it's a big
648 * overhead compared to the cost of tracking the time in the first place.
649 */
650 void *bo_map = brw_bo_map(brw, brw->shader_time.bo, MAP_READ | MAP_WRITE);
651
652 for (int i = 0; i < brw->shader_time.num_entries; i++) {
653 uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
654
655 brw->shader_time.cumulative[i].time += times[BRW_SHADER_TIME_STRIDE * 0 / 4];
656 brw->shader_time.cumulative[i].written += times[BRW_SHADER_TIME_STRIDE * 1 / 4];
657 brw->shader_time.cumulative[i].reset += times[BRW_SHADER_TIME_STRIDE * 2 / 4];
658 }
659
660 /* Zero the BO out to clear it out for our next collection.
661 */
662 memset(bo_map, 0, brw->shader_time.bo->size);
663 brw_bo_unmap(brw->shader_time.bo);
664 }
665
666 void
667 brw_collect_and_report_shader_time(struct brw_context *brw)
668 {
669 brw_collect_shader_time(brw);
670
671 if (brw->shader_time.report_time == 0 ||
672 get_time() - brw->shader_time.report_time >= 1.0) {
673 brw_report_shader_time(brw);
674 brw->shader_time.report_time = get_time();
675 }
676 }
677
678 /**
679 * Chooses an index in the shader_time buffer and sets up tracking information
680 * for our printouts.
681 *
682 * Note that this holds on to references to the underlying programs, which may
683 * change their lifetimes compared to normal operation.
684 */
685 int
686 brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
687 enum shader_time_shader_type type, bool is_glsl_sh)
688 {
689 int shader_time_index = brw->shader_time.num_entries++;
690 assert(shader_time_index < brw->shader_time.max_entries);
691 brw->shader_time.types[shader_time_index] = type;
692
693 const char *name;
694 if (prog->Id == 0) {
695 name = "ff";
696 } else if (is_glsl_sh) {
697 name = prog->info.label ?
698 ralloc_strdup(brw->shader_time.names, prog->info.label) : "glsl";
699 } else {
700 name = "prog";
701 }
702
703 brw->shader_time.names[shader_time_index] = name;
704 brw->shader_time.ids[shader_time_index] = prog->Id;
705
706 return shader_time_index;
707 }
708
709 void
710 brw_destroy_shader_time(struct brw_context *brw)
711 {
712 brw_bo_unreference(brw->shader_time.bo);
713 brw->shader_time.bo = NULL;
714 }
715
716 void
717 brw_stage_prog_data_free(const void *p)
718 {
719 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
720
721 ralloc_free(prog_data->param);
722 ralloc_free(prog_data->pull_param);
723 }
724
725 void
726 brw_dump_arb_asm(const char *stage, struct gl_program *prog)
727 {
728 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
729 stage, prog->Id, stage);
730 _mesa_print_program(prog);
731 }
732
733 void
734 brw_setup_tex_for_precompile(struct brw_context *brw,
735 struct brw_sampler_prog_key_data *tex,
736 struct gl_program *prog)
737 {
738 const struct gen_device_info *devinfo = &brw->screen->devinfo;
739 const bool has_shader_channel_select = devinfo->is_haswell || devinfo->gen >= 8;
740 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
741 for (unsigned i = 0; i < sampler_count; i++) {
742 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
743 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
744 tex->swizzles[i] =
745 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
746 } else {
747 /* Color sampler: assume no swizzling. */
748 tex->swizzles[i] = SWIZZLE_XYZW;
749 }
750 }
751 }
752
753 /**
754 * Sets up the starting offsets for the groups of binding table entries
755 * common to all pipeline stages.
756 *
757 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
758 * unused but also make sure that addition of small offsets to them will
759 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
760 */
761 uint32_t
762 brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
763 const struct gl_program *prog,
764 struct brw_stage_prog_data *stage_prog_data,
765 uint32_t next_binding_table_offset)
766 {
767 int num_textures = util_last_bit(prog->SamplersUsed);
768
769 stage_prog_data->binding_table.texture_start = next_binding_table_offset;
770 next_binding_table_offset += num_textures;
771
772 if (prog->info.num_ubos) {
773 assert(prog->info.num_ubos <= BRW_MAX_UBO);
774 stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
775 next_binding_table_offset += prog->info.num_ubos;
776 } else {
777 stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
778 }
779
780 if (prog->info.num_ssbos || prog->info.num_abos) {
781 assert(prog->info.num_abos <= BRW_MAX_ABO);
782 assert(prog->info.num_ssbos <= BRW_MAX_SSBO);
783 stage_prog_data->binding_table.ssbo_start = next_binding_table_offset;
784 next_binding_table_offset += prog->info.num_abos + prog->info.num_ssbos;
785 } else {
786 stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
787 }
788
789 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
790 stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
791 next_binding_table_offset++;
792 } else {
793 stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
794 }
795
796 if (prog->info.uses_texture_gather) {
797 if (devinfo->gen >= 8) {
798 stage_prog_data->binding_table.gather_texture_start =
799 stage_prog_data->binding_table.texture_start;
800 } else {
801 stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
802 next_binding_table_offset += num_textures;
803 }
804 } else {
805 stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
806 }
807
808 if (prog->info.num_images) {
809 stage_prog_data->binding_table.image_start = next_binding_table_offset;
810 next_binding_table_offset += prog->info.num_images;
811 } else {
812 stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
813 }
814
815 /* This may or may not be used depending on how the compile goes. */
816 stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
817 next_binding_table_offset++;
818
819 /* Plane 0 is just the regular texture section */
820 stage_prog_data->binding_table.plane_start[0] = stage_prog_data->binding_table.texture_start;
821
822 stage_prog_data->binding_table.plane_start[1] = next_binding_table_offset;
823 next_binding_table_offset += num_textures;
824
825 stage_prog_data->binding_table.plane_start[2] = next_binding_table_offset;
826 next_binding_table_offset += num_textures;
827
828 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
829
830 assert(next_binding_table_offset <= BRW_MAX_SURFACES);
831 return next_binding_table_offset;
832 }
833
834 void
835 brw_program_serialize_nir(struct gl_context *ctx, struct gl_program *prog)
836 {
837 struct blob writer;
838 blob_init(&writer);
839 nir_serialize(&writer, prog->nir);
840 prog->driver_cache_blob = ralloc_size(NULL, writer.size);
841 memcpy(prog->driver_cache_blob, writer.data, writer.size);
842 prog->driver_cache_blob_size = writer.size;
843 blob_finish(&writer);
844 }
845
846 void
847 brw_program_deserialize_nir(struct gl_context *ctx, struct gl_program *prog,
848 gl_shader_stage stage)
849 {
850 if (!prog->nir) {
851 assert(prog->driver_cache_blob && prog->driver_cache_blob_size > 0);
852 const struct nir_shader_compiler_options *options =
853 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
854 struct blob_reader reader;
855 blob_reader_init(&reader, prog->driver_cache_blob,
856 prog->driver_cache_blob_size);
857 prog->nir = nir_deserialize(NULL, options, &reader);
858 }
859
860 if (prog->driver_cache_blob) {
861 ralloc_free(prog->driver_cache_blob);
862 prog->driver_cache_blob = NULL;
863 prog->driver_cache_blob_size = 0;
864 }
865 }