i965: Introduce a "memory zone" concept on BO allocation.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "main/glspirv.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_to_nir.h"
38 #include "program/program.h"
39 #include "program/programopt.h"
40 #include "tnl/tnl.h"
41 #include "util/ralloc.h"
42 #include "compiler/glsl/ir.h"
43 #include "compiler/glsl/glsl_to_nir.h"
44 #include "compiler/nir/nir_serialize.h"
45
46 #include "brw_program.h"
47 #include "brw_context.h"
48 #include "compiler/brw_nir.h"
49 #include "brw_defines.h"
50 #include "intel_batchbuffer.h"
51
52 static bool
53 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
54 {
55 if (is_scalar) {
56 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
57 type_size_scalar_bytes);
58 return nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
59 } else {
60 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
61 type_size_vec4_bytes);
62 return nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
63 }
64 }
65
66 nir_shader *
67 brw_create_nir(struct brw_context *brw,
68 const struct gl_shader_program *shader_prog,
69 struct gl_program *prog,
70 gl_shader_stage stage,
71 bool is_scalar)
72 {
73 struct gl_context *ctx = &brw->ctx;
74 const nir_shader_compiler_options *options =
75 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
76 nir_shader *nir;
77
78 /* First, lower the GLSL/Mesa IR or SPIR-V to NIR */
79 if (shader_prog) {
80 if (shader_prog->_LinkedShaders[stage]->spirv_data)
81 nir = _mesa_spirv_to_nir(ctx, shader_prog, stage, options);
82 else
83 nir = glsl_to_nir(shader_prog, stage, options);
84 assert (nir);
85
86 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
87 nir_lower_returns(nir);
88 nir_validate_shader(nir);
89 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
90 nir_shader_get_entrypoint(nir), true, false);
91 } else {
92 nir = prog_to_nir(prog, options);
93 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
94 }
95 nir_validate_shader(nir);
96
97 /* Lower PatchVerticesIn from system value to uniform. This needs to
98 * happen before brw_preprocess_nir, since that will lower system values
99 * to intrinsics.
100 *
101 * We only do this for TES if no TCS is present, since otherwise we know
102 * the number of vertices in the patch at link time and we can lower it
103 * directly to a constant. We do this in nir_lower_patch_vertices, which
104 * needs to run after brw_nir_preprocess has turned the system values
105 * into intrinsics.
106 */
107 const bool lower_patch_vertices_in_to_uniform =
108 (stage == MESA_SHADER_TESS_CTRL && brw->screen->devinfo.gen >= 8) ||
109 (stage == MESA_SHADER_TESS_EVAL &&
110 !shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]);
111
112 if (lower_patch_vertices_in_to_uniform)
113 brw_nir_lower_patch_vertices_in_to_uniform(nir);
114
115 nir = brw_preprocess_nir(brw->screen->compiler, nir);
116
117 if (stage == MESA_SHADER_TESS_EVAL && !lower_patch_vertices_in_to_uniform) {
118 assert(shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]);
119 struct gl_linked_shader *linked_tcs =
120 shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
121 uint32_t patch_vertices = linked_tcs->Program->info.tess.tcs_vertices_out;
122 nir_lower_tes_patch_vertices(nir, patch_vertices);
123 }
124
125 if (stage == MESA_SHADER_FRAGMENT) {
126 static const struct nir_lower_wpos_ytransform_options wpos_options = {
127 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
128 .fs_coord_pixel_center_integer = 1,
129 .fs_coord_origin_upper_left = 1,
130 };
131
132 bool progress = false;
133 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
134 if (progress) {
135 _mesa_add_state_reference(prog->Parameters,
136 wpos_options.state_tokens);
137 }
138 }
139
140 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
141
142 return nir;
143 }
144
145 void
146 brw_shader_gather_info(nir_shader *nir, struct gl_program *prog)
147 {
148 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
149
150 /* Copy the info we just generated back into the gl_program */
151 const char *prog_name = prog->info.name;
152 const char *prog_label = prog->info.label;
153 prog->info = nir->info;
154 prog->info.name = prog_name;
155 prog->info.label = prog_label;
156 }
157
158 static unsigned
159 get_new_program_id(struct intel_screen *screen)
160 {
161 return p_atomic_inc_return(&screen->program_id);
162 }
163
164 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
165 GLuint id, bool is_arb_asm)
166 {
167 struct brw_context *brw = brw_context(ctx);
168 struct brw_program *prog = rzalloc(NULL, struct brw_program);
169
170 if (prog) {
171 prog->id = get_new_program_id(brw->screen);
172
173 return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
174 }
175
176 return NULL;
177 }
178
179 static void brwDeleteProgram( struct gl_context *ctx,
180 struct gl_program *prog )
181 {
182 struct brw_context *brw = brw_context(ctx);
183
184 /* Beware! prog's refcount has reached zero, and it's about to be freed.
185 *
186 * In brw_upload_pipeline_state(), we compare brw->programs[i] to
187 * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
188 * pointer has changed.
189 *
190 * We cannot leave brw->programs[i] as a dangling pointer to the dead
191 * program. malloc() may allocate the same memory for a new gl_program,
192 * causing us to see matching pointers...but totally different programs.
193 *
194 * We cannot set brw->programs[i] to NULL, either. If we've deleted the
195 * active program, Mesa may set ctx->FooProgram._Current to NULL. That
196 * would cause us to see matching pointers (NULL == NULL), and fail to
197 * detect that a program has changed since our last draw.
198 *
199 * So, set it to a bogus gl_program pointer that will never match,
200 * causing us to properly reevaluate the state on our next draw.
201 *
202 * Getting this wrong causes heisenbugs which are very hard to catch,
203 * as you need a very specific allocation pattern to hit the problem.
204 */
205 static const struct gl_program deleted_program;
206
207 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
208 if (brw->programs[i] == prog)
209 brw->programs[i] = (struct gl_program *) &deleted_program;
210 }
211
212 _mesa_delete_program( ctx, prog );
213 }
214
215
216 static GLboolean
217 brwProgramStringNotify(struct gl_context *ctx,
218 GLenum target,
219 struct gl_program *prog)
220 {
221 assert(target == GL_VERTEX_PROGRAM_ARB || !prog->arb.IsPositionInvariant);
222
223 struct brw_context *brw = brw_context(ctx);
224 const struct brw_compiler *compiler = brw->screen->compiler;
225
226 switch (target) {
227 case GL_FRAGMENT_PROGRAM_ARB: {
228 struct brw_program *newFP = brw_program(prog);
229 const struct brw_program *curFP =
230 brw_program_const(brw->programs[MESA_SHADER_FRAGMENT]);
231
232 if (newFP == curFP)
233 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
234 newFP->id = get_new_program_id(brw->screen);
235
236 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
237
238 brw_shader_gather_info(prog->nir, prog);
239
240 brw_fs_precompile(ctx, prog);
241 break;
242 }
243 case GL_VERTEX_PROGRAM_ARB: {
244 struct brw_program *newVP = brw_program(prog);
245 const struct brw_program *curVP =
246 brw_program_const(brw->programs[MESA_SHADER_VERTEX]);
247
248 if (newVP == curVP)
249 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
250 if (newVP->program.arb.IsPositionInvariant) {
251 _mesa_insert_mvp_code(ctx, &newVP->program);
252 }
253 newVP->id = get_new_program_id(brw->screen);
254
255 /* Also tell tnl about it:
256 */
257 _tnl_program_string(ctx, target, prog);
258
259 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
260 compiler->scalar_stage[MESA_SHADER_VERTEX]);
261
262 brw_shader_gather_info(prog->nir, prog);
263
264 brw_vs_precompile(ctx, prog);
265 break;
266 }
267 default:
268 /*
269 * driver->ProgramStringNotify is only called for ARB programs, fixed
270 * function vertex programs, and ir_to_mesa (which isn't used by the
271 * i965 back-end). Therefore, even after geometry shaders are added,
272 * this function should only ever be called with a target of
273 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
274 */
275 unreachable("Unexpected target in brwProgramStringNotify");
276 }
277
278 return true;
279 }
280
281 static void
282 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
283 {
284 struct brw_context *brw = brw_context(ctx);
285 const struct gen_device_info *devinfo = &brw->screen->devinfo;
286 unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;
287 assert(devinfo->gen >= 7 && devinfo->gen <= 11);
288
289 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
290 GL_ELEMENT_ARRAY_BARRIER_BIT |
291 GL_COMMAND_BARRIER_BIT))
292 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
293
294 if (barriers & GL_UNIFORM_BARRIER_BIT)
295 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
296 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
297
298 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
299 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
300
301 if (barriers & (GL_TEXTURE_UPDATE_BARRIER_BIT |
302 GL_PIXEL_BUFFER_BARRIER_BIT))
303 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
304 PIPE_CONTROL_RENDER_TARGET_FLUSH);
305
306 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
307 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
308 PIPE_CONTROL_RENDER_TARGET_FLUSH);
309
310 /* Typed surface messages are handled by the render cache on IVB, so we
311 * need to flush it too.
312 */
313 if (devinfo->gen == 7 && !devinfo->is_haswell)
314 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
315
316 brw_emit_pipe_control_flush(brw, bits);
317 }
318
319 static void
320 brw_framebuffer_fetch_barrier(struct gl_context *ctx)
321 {
322 struct brw_context *brw = brw_context(ctx);
323 const struct gen_device_info *devinfo = &brw->screen->devinfo;
324
325 if (!ctx->Extensions.EXT_shader_framebuffer_fetch) {
326 if (devinfo->gen >= 6) {
327 brw_emit_pipe_control_flush(brw,
328 PIPE_CONTROL_RENDER_TARGET_FLUSH |
329 PIPE_CONTROL_CS_STALL);
330 brw_emit_pipe_control_flush(brw,
331 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
332 } else {
333 brw_emit_pipe_control_flush(brw,
334 PIPE_CONTROL_RENDER_TARGET_FLUSH);
335 }
336 }
337 }
338
339 void
340 brw_get_scratch_bo(struct brw_context *brw,
341 struct brw_bo **scratch_bo, int size)
342 {
343 struct brw_bo *old_bo = *scratch_bo;
344
345 if (old_bo && old_bo->size < size) {
346 brw_bo_unreference(old_bo);
347 old_bo = NULL;
348 }
349
350 if (!old_bo) {
351 *scratch_bo =
352 brw_bo_alloc(brw->bufmgr, "scratch bo", size, BRW_MEMZONE_SCRATCH);
353 }
354 }
355
356 /**
357 * Reserve enough scratch space for the given stage to hold \p per_thread_size
358 * bytes times the given \p thread_count.
359 */
360 void
361 brw_alloc_stage_scratch(struct brw_context *brw,
362 struct brw_stage_state *stage_state,
363 unsigned per_thread_size)
364 {
365 if (stage_state->per_thread_scratch >= per_thread_size)
366 return;
367
368 stage_state->per_thread_scratch = per_thread_size;
369
370 if (stage_state->scratch_bo)
371 brw_bo_unreference(stage_state->scratch_bo);
372
373 const struct gen_device_info *devinfo = &brw->screen->devinfo;
374 unsigned thread_count;
375 switch(stage_state->stage) {
376 case MESA_SHADER_VERTEX:
377 thread_count = devinfo->max_vs_threads;
378 break;
379 case MESA_SHADER_TESS_CTRL:
380 thread_count = devinfo->max_tcs_threads;
381 break;
382 case MESA_SHADER_TESS_EVAL:
383 thread_count = devinfo->max_tes_threads;
384 break;
385 case MESA_SHADER_GEOMETRY:
386 thread_count = devinfo->max_gs_threads;
387 break;
388 case MESA_SHADER_FRAGMENT:
389 thread_count = devinfo->max_wm_threads;
390 break;
391 case MESA_SHADER_COMPUTE: {
392 unsigned subslices = MAX2(brw->screen->subslice_total, 1);
393
394 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
395 *
396 * "Scratch Space per slice is computed based on 4 sub-slices. SW must
397 * allocate scratch space enough so that each slice has 4 slices
398 * allowed."
399 *
400 * According to the other driver team, this applies to compute shaders
401 * as well. This is not currently documented at all.
402 *
403 * brw->screen->subslice_total is the TOTAL number of subslices
404 * and we wish to view that there are 4 subslices per slice
405 * instead of the actual number of subslices per slice.
406 */
407 if (devinfo->gen >= 9)
408 subslices = 4 * brw->screen->devinfo.num_slices;
409
410 unsigned scratch_ids_per_subslice;
411 if (devinfo->is_haswell) {
412 /* WaCSScratchSize:hsw
413 *
414 * Haswell's scratch space address calculation appears to be sparse
415 * rather than tightly packed. The Thread ID has bits indicating
416 * which subslice, EU within a subslice, and thread within an EU it
417 * is. There's a maximum of two slices and two subslices, so these
418 * can be stored with a single bit. Even though there are only 10 EUs
419 * per subslice, this is stored in 4 bits, so there's an effective
420 * maximum value of 16 EUs. Similarly, although there are only 7
421 * threads per EU, this is stored in a 3 bit number, giving an
422 * effective maximum value of 8 threads per EU.
423 *
424 * This means that we need to use 16 * 8 instead of 10 * 7 for the
425 * number of threads per subslice.
426 */
427 scratch_ids_per_subslice = 16 * 8;
428 } else if (devinfo->is_cherryview) {
429 /* Cherryview devices have either 6 or 8 EUs per subslice, and each
430 * EU has 7 threads. The 6 EU devices appear to calculate thread IDs
431 * as if it had 8 EUs.
432 */
433 scratch_ids_per_subslice = 8 * 7;
434 } else {
435 scratch_ids_per_subslice = devinfo->max_cs_threads;
436 }
437
438 thread_count = scratch_ids_per_subslice * subslices;
439 break;
440 }
441 default:
442 unreachable("Unsupported stage!");
443 }
444
445 stage_state->scratch_bo =
446 brw_bo_alloc(brw->bufmgr, "shader scratch space",
447 per_thread_size * thread_count, BRW_MEMZONE_SCRATCH);
448 }
449
450 void brwInitFragProgFuncs( struct dd_function_table *functions )
451 {
452 assert(functions->ProgramStringNotify == _tnl_program_string);
453
454 functions->NewProgram = brwNewProgram;
455 functions->DeleteProgram = brwDeleteProgram;
456 functions->ProgramStringNotify = brwProgramStringNotify;
457
458 functions->LinkShader = brw_link_shader;
459
460 functions->MemoryBarrier = brw_memory_barrier;
461 functions->FramebufferFetchBarrier = brw_framebuffer_fetch_barrier;
462 }
463
464 struct shader_times {
465 uint64_t time;
466 uint64_t written;
467 uint64_t reset;
468 };
469
470 void
471 brw_init_shader_time(struct brw_context *brw)
472 {
473 const int max_entries = 2048;
474 brw->shader_time.bo =
475 brw_bo_alloc(brw->bufmgr, "shader time",
476 max_entries * BRW_SHADER_TIME_STRIDE * 3,
477 BRW_MEMZONE_OTHER);
478 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
479 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
480 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
481 max_entries);
482 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
483 max_entries);
484 brw->shader_time.max_entries = max_entries;
485 }
486
487 static int
488 compare_time(const void *a, const void *b)
489 {
490 uint64_t * const *a_val = a;
491 uint64_t * const *b_val = b;
492
493 /* We don't just subtract because we're turning the value to an int. */
494 if (**a_val < **b_val)
495 return -1;
496 else if (**a_val == **b_val)
497 return 0;
498 else
499 return 1;
500 }
501
502 static void
503 print_shader_time_line(const char *stage, const char *name,
504 int shader_num, uint64_t time, uint64_t total)
505 {
506 fprintf(stderr, "%-6s%-18s", stage, name);
507
508 if (shader_num != 0)
509 fprintf(stderr, "%4d: ", shader_num);
510 else
511 fprintf(stderr, " : ");
512
513 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
514 (long long)time,
515 (double)time / 1000000000.0,
516 (double)time / total * 100.0);
517 }
518
519 static void
520 brw_report_shader_time(struct brw_context *brw)
521 {
522 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
523 return;
524
525 uint64_t scaled[brw->shader_time.num_entries];
526 uint64_t *sorted[brw->shader_time.num_entries];
527 uint64_t total_by_type[ST_CS + 1];
528 memset(total_by_type, 0, sizeof(total_by_type));
529 double total = 0;
530 for (int i = 0; i < brw->shader_time.num_entries; i++) {
531 uint64_t written = 0, reset = 0;
532 enum shader_time_shader_type type = brw->shader_time.types[i];
533
534 sorted[i] = &scaled[i];
535
536 switch (type) {
537 case ST_VS:
538 case ST_TCS:
539 case ST_TES:
540 case ST_GS:
541 case ST_FS8:
542 case ST_FS16:
543 case ST_CS:
544 written = brw->shader_time.cumulative[i].written;
545 reset = brw->shader_time.cumulative[i].reset;
546 break;
547
548 default:
549 /* I sometimes want to print things that aren't the 3 shader times.
550 * Just print the sum in that case.
551 */
552 written = 1;
553 reset = 0;
554 break;
555 }
556
557 uint64_t time = brw->shader_time.cumulative[i].time;
558 if (written) {
559 scaled[i] = time / written * (written + reset);
560 } else {
561 scaled[i] = time;
562 }
563
564 switch (type) {
565 case ST_VS:
566 case ST_TCS:
567 case ST_TES:
568 case ST_GS:
569 case ST_FS8:
570 case ST_FS16:
571 case ST_CS:
572 total_by_type[type] += scaled[i];
573 break;
574 default:
575 break;
576 }
577
578 total += scaled[i];
579 }
580
581 if (total == 0) {
582 fprintf(stderr, "No shader time collected yet\n");
583 return;
584 }
585
586 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
587
588 fprintf(stderr, "\n");
589 fprintf(stderr, "type ID cycles spent %% of total\n");
590 for (int s = 0; s < brw->shader_time.num_entries; s++) {
591 const char *stage;
592 /* Work back from the sorted pointers times to a time to print. */
593 int i = sorted[s] - scaled;
594
595 if (scaled[i] == 0)
596 continue;
597
598 int shader_num = brw->shader_time.ids[i];
599 const char *shader_name = brw->shader_time.names[i];
600
601 switch (brw->shader_time.types[i]) {
602 case ST_VS:
603 stage = "vs";
604 break;
605 case ST_TCS:
606 stage = "tcs";
607 break;
608 case ST_TES:
609 stage = "tes";
610 break;
611 case ST_GS:
612 stage = "gs";
613 break;
614 case ST_FS8:
615 stage = "fs8";
616 break;
617 case ST_FS16:
618 stage = "fs16";
619 break;
620 case ST_CS:
621 stage = "cs";
622 break;
623 default:
624 stage = "other";
625 break;
626 }
627
628 print_shader_time_line(stage, shader_name, shader_num,
629 scaled[i], total);
630 }
631
632 fprintf(stderr, "\n");
633 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
634 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
635 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
636 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
637 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
638 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
639 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
640 }
641
642 static void
643 brw_collect_shader_time(struct brw_context *brw)
644 {
645 if (!brw->shader_time.bo)
646 return;
647
648 /* This probably stalls on the last rendering. We could fix that by
649 * delaying reading the reports, but it doesn't look like it's a big
650 * overhead compared to the cost of tracking the time in the first place.
651 */
652 void *bo_map = brw_bo_map(brw, brw->shader_time.bo, MAP_READ | MAP_WRITE);
653
654 for (int i = 0; i < brw->shader_time.num_entries; i++) {
655 uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
656
657 brw->shader_time.cumulative[i].time += times[BRW_SHADER_TIME_STRIDE * 0 / 4];
658 brw->shader_time.cumulative[i].written += times[BRW_SHADER_TIME_STRIDE * 1 / 4];
659 brw->shader_time.cumulative[i].reset += times[BRW_SHADER_TIME_STRIDE * 2 / 4];
660 }
661
662 /* Zero the BO out to clear it out for our next collection.
663 */
664 memset(bo_map, 0, brw->shader_time.bo->size);
665 brw_bo_unmap(brw->shader_time.bo);
666 }
667
668 void
669 brw_collect_and_report_shader_time(struct brw_context *brw)
670 {
671 brw_collect_shader_time(brw);
672
673 if (brw->shader_time.report_time == 0 ||
674 get_time() - brw->shader_time.report_time >= 1.0) {
675 brw_report_shader_time(brw);
676 brw->shader_time.report_time = get_time();
677 }
678 }
679
680 /**
681 * Chooses an index in the shader_time buffer and sets up tracking information
682 * for our printouts.
683 *
684 * Note that this holds on to references to the underlying programs, which may
685 * change their lifetimes compared to normal operation.
686 */
687 int
688 brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
689 enum shader_time_shader_type type, bool is_glsl_sh)
690 {
691 int shader_time_index = brw->shader_time.num_entries++;
692 assert(shader_time_index < brw->shader_time.max_entries);
693 brw->shader_time.types[shader_time_index] = type;
694
695 const char *name;
696 if (prog->Id == 0) {
697 name = "ff";
698 } else if (is_glsl_sh) {
699 name = prog->info.label ?
700 ralloc_strdup(brw->shader_time.names, prog->info.label) : "glsl";
701 } else {
702 name = "prog";
703 }
704
705 brw->shader_time.names[shader_time_index] = name;
706 brw->shader_time.ids[shader_time_index] = prog->Id;
707
708 return shader_time_index;
709 }
710
711 void
712 brw_destroy_shader_time(struct brw_context *brw)
713 {
714 brw_bo_unreference(brw->shader_time.bo);
715 brw->shader_time.bo = NULL;
716 }
717
718 void
719 brw_stage_prog_data_free(const void *p)
720 {
721 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
722
723 ralloc_free(prog_data->param);
724 ralloc_free(prog_data->pull_param);
725 }
726
727 void
728 brw_dump_arb_asm(const char *stage, struct gl_program *prog)
729 {
730 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
731 stage, prog->Id, stage);
732 _mesa_print_program(prog);
733 }
734
735 void
736 brw_setup_tex_for_precompile(struct brw_context *brw,
737 struct brw_sampler_prog_key_data *tex,
738 struct gl_program *prog)
739 {
740 const struct gen_device_info *devinfo = &brw->screen->devinfo;
741 const bool has_shader_channel_select = devinfo->is_haswell || devinfo->gen >= 8;
742 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
743 for (unsigned i = 0; i < sampler_count; i++) {
744 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
745 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
746 tex->swizzles[i] =
747 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
748 } else {
749 /* Color sampler: assume no swizzling. */
750 tex->swizzles[i] = SWIZZLE_XYZW;
751 }
752 }
753 }
754
755 /**
756 * Sets up the starting offsets for the groups of binding table entries
757 * common to all pipeline stages.
758 *
759 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
760 * unused but also make sure that addition of small offsets to them will
761 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
762 */
763 uint32_t
764 brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
765 const struct gl_program *prog,
766 struct brw_stage_prog_data *stage_prog_data,
767 uint32_t next_binding_table_offset)
768 {
769 int num_textures = util_last_bit(prog->SamplersUsed);
770
771 stage_prog_data->binding_table.texture_start = next_binding_table_offset;
772 next_binding_table_offset += num_textures;
773
774 if (prog->info.num_ubos) {
775 assert(prog->info.num_ubos <= BRW_MAX_UBO);
776 stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
777 next_binding_table_offset += prog->info.num_ubos;
778 } else {
779 stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
780 }
781
782 if (prog->info.num_ssbos || prog->info.num_abos) {
783 assert(prog->info.num_abos <= BRW_MAX_ABO);
784 assert(prog->info.num_ssbos <= BRW_MAX_SSBO);
785 stage_prog_data->binding_table.ssbo_start = next_binding_table_offset;
786 next_binding_table_offset += prog->info.num_abos + prog->info.num_ssbos;
787 } else {
788 stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
789 }
790
791 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
792 stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
793 next_binding_table_offset++;
794 } else {
795 stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
796 }
797
798 if (prog->info.uses_texture_gather) {
799 if (devinfo->gen >= 8) {
800 stage_prog_data->binding_table.gather_texture_start =
801 stage_prog_data->binding_table.texture_start;
802 } else {
803 stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
804 next_binding_table_offset += num_textures;
805 }
806 } else {
807 stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
808 }
809
810 if (prog->info.num_images) {
811 stage_prog_data->binding_table.image_start = next_binding_table_offset;
812 next_binding_table_offset += prog->info.num_images;
813 } else {
814 stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
815 }
816
817 /* This may or may not be used depending on how the compile goes. */
818 stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
819 next_binding_table_offset++;
820
821 /* Plane 0 is just the regular texture section */
822 stage_prog_data->binding_table.plane_start[0] = stage_prog_data->binding_table.texture_start;
823
824 stage_prog_data->binding_table.plane_start[1] = next_binding_table_offset;
825 next_binding_table_offset += num_textures;
826
827 stage_prog_data->binding_table.plane_start[2] = next_binding_table_offset;
828 next_binding_table_offset += num_textures;
829
830 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
831
832 assert(next_binding_table_offset <= BRW_MAX_SURFACES);
833 return next_binding_table_offset;
834 }
835
836 void
837 brw_program_serialize_nir(struct gl_context *ctx, struct gl_program *prog)
838 {
839 struct blob writer;
840 blob_init(&writer);
841 nir_serialize(&writer, prog->nir);
842 prog->driver_cache_blob = ralloc_size(NULL, writer.size);
843 memcpy(prog->driver_cache_blob, writer.data, writer.size);
844 prog->driver_cache_blob_size = writer.size;
845 blob_finish(&writer);
846 }
847
848 void
849 brw_program_deserialize_nir(struct gl_context *ctx, struct gl_program *prog,
850 gl_shader_stage stage)
851 {
852 if (!prog->nir) {
853 assert(prog->driver_cache_blob && prog->driver_cache_blob_size > 0);
854 const struct nir_shader_compiler_options *options =
855 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
856 struct blob_reader reader;
857 blob_reader_init(&reader, prog->driver_cache_blob,
858 prog->driver_cache_blob_size);
859 prog->nir = nir_deserialize(NULL, options, &reader);
860 }
861
862 if (prog->driver_cache_blob) {
863 ralloc_free(prog->driver_cache_blob);
864 prog->driver_cache_blob = NULL;
865 prog->driver_cache_blob_size = 0;
866 }
867 }