i965/icl: Update the assert in brw_memory_barrier()
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "program/prog_parameter.h"
35 #include "program/prog_print.h"
36 #include "program/prog_to_nir.h"
37 #include "program/program.h"
38 #include "program/programopt.h"
39 #include "tnl/tnl.h"
40 #include "util/ralloc.h"
41 #include "compiler/glsl/ir.h"
42 #include "compiler/glsl/glsl_to_nir.h"
43 #include "compiler/nir/nir_serialize.h"
44
45 #include "brw_program.h"
46 #include "brw_context.h"
47 #include "compiler/brw_nir.h"
48 #include "brw_defines.h"
49 #include "intel_batchbuffer.h"
50
51 static bool
52 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
53 {
54 if (is_scalar) {
55 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
56 type_size_scalar_bytes);
57 return nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
58 } else {
59 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
60 type_size_vec4_bytes);
61 return nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
62 }
63 }
64
65 nir_shader *
66 brw_create_nir(struct brw_context *brw,
67 const struct gl_shader_program *shader_prog,
68 struct gl_program *prog,
69 gl_shader_stage stage,
70 bool is_scalar)
71 {
72 struct gl_context *ctx = &brw->ctx;
73 const nir_shader_compiler_options *options =
74 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
75 nir_shader *nir;
76
77 /* First, lower the GLSL IR or Mesa IR to NIR */
78 if (shader_prog) {
79 nir = glsl_to_nir(shader_prog, stage, options);
80 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
81 nir_lower_returns(nir);
82 nir_validate_shader(nir);
83 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
84 nir_shader_get_entrypoint(nir), true, false);
85 } else {
86 nir = prog_to_nir(prog, options);
87 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
88 }
89 nir_validate_shader(nir);
90
91 /* Lower PatchVerticesIn from system value to uniform. This needs to
92 * happen before brw_preprocess_nir, since that will lower system values
93 * to intrinsics.
94 *
95 * We only do this for TES if no TCS is present, since otherwise we know
96 * the number of vertices in the patch at link time and we can lower it
97 * directly to a constant. We do this in nir_lower_patch_vertices, which
98 * needs to run after brw_nir_preprocess has turned the system values
99 * into intrinsics.
100 */
101 const bool lower_patch_vertices_in_to_uniform =
102 (stage == MESA_SHADER_TESS_CTRL && brw->screen->devinfo.gen >= 8) ||
103 (stage == MESA_SHADER_TESS_EVAL &&
104 !shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]);
105
106 if (lower_patch_vertices_in_to_uniform)
107 brw_nir_lower_patch_vertices_in_to_uniform(nir);
108
109 nir = brw_preprocess_nir(brw->screen->compiler, nir);
110
111 if (stage == MESA_SHADER_TESS_EVAL && !lower_patch_vertices_in_to_uniform) {
112 assert(shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]);
113 struct gl_linked_shader *linked_tcs =
114 shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
115 uint32_t patch_vertices = linked_tcs->Program->info.tess.tcs_vertices_out;
116 nir_lower_tes_patch_vertices(nir, patch_vertices);
117 }
118
119 if (stage == MESA_SHADER_FRAGMENT) {
120 static const struct nir_lower_wpos_ytransform_options wpos_options = {
121 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
122 .fs_coord_pixel_center_integer = 1,
123 .fs_coord_origin_upper_left = 1,
124 };
125
126 bool progress = false;
127 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
128 if (progress) {
129 _mesa_add_state_reference(prog->Parameters,
130 wpos_options.state_tokens);
131 }
132 }
133
134 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
135
136 return nir;
137 }
138
139 void
140 brw_shader_gather_info(nir_shader *nir, struct gl_program *prog)
141 {
142 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
143
144 /* Copy the info we just generated back into the gl_program */
145 const char *prog_name = prog->info.name;
146 const char *prog_label = prog->info.label;
147 prog->info = nir->info;
148 prog->info.name = prog_name;
149 prog->info.label = prog_label;
150 }
151
152 static unsigned
153 get_new_program_id(struct intel_screen *screen)
154 {
155 return p_atomic_inc_return(&screen->program_id);
156 }
157
158 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
159 GLuint id, bool is_arb_asm)
160 {
161 struct brw_context *brw = brw_context(ctx);
162 struct brw_program *prog = rzalloc(NULL, struct brw_program);
163
164 if (prog) {
165 prog->id = get_new_program_id(brw->screen);
166
167 return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
168 }
169
170 return NULL;
171 }
172
173 static void brwDeleteProgram( struct gl_context *ctx,
174 struct gl_program *prog )
175 {
176 struct brw_context *brw = brw_context(ctx);
177
178 /* Beware! prog's refcount has reached zero, and it's about to be freed.
179 *
180 * In brw_upload_pipeline_state(), we compare brw->programs[i] to
181 * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
182 * pointer has changed.
183 *
184 * We cannot leave brw->programs[i] as a dangling pointer to the dead
185 * program. malloc() may allocate the same memory for a new gl_program,
186 * causing us to see matching pointers...but totally different programs.
187 *
188 * We cannot set brw->programs[i] to NULL, either. If we've deleted the
189 * active program, Mesa may set ctx->FooProgram._Current to NULL. That
190 * would cause us to see matching pointers (NULL == NULL), and fail to
191 * detect that a program has changed since our last draw.
192 *
193 * So, set it to a bogus gl_program pointer that will never match,
194 * causing us to properly reevaluate the state on our next draw.
195 *
196 * Getting this wrong causes heisenbugs which are very hard to catch,
197 * as you need a very specific allocation pattern to hit the problem.
198 */
199 static const struct gl_program deleted_program;
200
201 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
202 if (brw->programs[i] == prog)
203 brw->programs[i] = (struct gl_program *) &deleted_program;
204 }
205
206 _mesa_delete_program( ctx, prog );
207 }
208
209
210 static GLboolean
211 brwProgramStringNotify(struct gl_context *ctx,
212 GLenum target,
213 struct gl_program *prog)
214 {
215 assert(target == GL_VERTEX_PROGRAM_ARB || !prog->arb.IsPositionInvariant);
216
217 struct brw_context *brw = brw_context(ctx);
218 const struct brw_compiler *compiler = brw->screen->compiler;
219
220 switch (target) {
221 case GL_FRAGMENT_PROGRAM_ARB: {
222 struct brw_program *newFP = brw_program(prog);
223 const struct brw_program *curFP =
224 brw_program_const(brw->programs[MESA_SHADER_FRAGMENT]);
225
226 if (newFP == curFP)
227 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
228 newFP->id = get_new_program_id(brw->screen);
229
230 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
231
232 brw_shader_gather_info(prog->nir, prog);
233
234 brw_fs_precompile(ctx, prog);
235 break;
236 }
237 case GL_VERTEX_PROGRAM_ARB: {
238 struct brw_program *newVP = brw_program(prog);
239 const struct brw_program *curVP =
240 brw_program_const(brw->programs[MESA_SHADER_VERTEX]);
241
242 if (newVP == curVP)
243 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
244 if (newVP->program.arb.IsPositionInvariant) {
245 _mesa_insert_mvp_code(ctx, &newVP->program);
246 }
247 newVP->id = get_new_program_id(brw->screen);
248
249 /* Also tell tnl about it:
250 */
251 _tnl_program_string(ctx, target, prog);
252
253 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
254 compiler->scalar_stage[MESA_SHADER_VERTEX]);
255
256 brw_shader_gather_info(prog->nir, prog);
257
258 brw_vs_precompile(ctx, prog);
259 break;
260 }
261 default:
262 /*
263 * driver->ProgramStringNotify is only called for ARB programs, fixed
264 * function vertex programs, and ir_to_mesa (which isn't used by the
265 * i965 back-end). Therefore, even after geometry shaders are added,
266 * this function should only ever be called with a target of
267 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
268 */
269 unreachable("Unexpected target in brwProgramStringNotify");
270 }
271
272 return true;
273 }
274
275 static void
276 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
277 {
278 struct brw_context *brw = brw_context(ctx);
279 const struct gen_device_info *devinfo = &brw->screen->devinfo;
280 unsigned bits = (PIPE_CONTROL_DATA_CACHE_FLUSH |
281 PIPE_CONTROL_NO_WRITE |
282 PIPE_CONTROL_CS_STALL);
283 assert(devinfo->gen >= 7 && devinfo->gen <= 11);
284
285 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
286 GL_ELEMENT_ARRAY_BARRIER_BIT |
287 GL_COMMAND_BARRIER_BIT))
288 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
289
290 if (barriers & GL_UNIFORM_BARRIER_BIT)
291 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
292 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
293
294 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
295 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
296
297 if (barriers & (GL_TEXTURE_UPDATE_BARRIER_BIT |
298 GL_PIXEL_BUFFER_BARRIER_BIT))
299 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
300 PIPE_CONTROL_RENDER_TARGET_FLUSH);
301
302 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
303 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
304 PIPE_CONTROL_RENDER_TARGET_FLUSH);
305
306 /* Typed surface messages are handled by the render cache on IVB, so we
307 * need to flush it too.
308 */
309 if (devinfo->gen == 7 && !devinfo->is_haswell)
310 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
311
312 brw_emit_pipe_control_flush(brw, bits);
313 }
314
315 static void
316 brw_blend_barrier(struct gl_context *ctx)
317 {
318 struct brw_context *brw = brw_context(ctx);
319 const struct gen_device_info *devinfo = &brw->screen->devinfo;
320
321 if (!ctx->Extensions.MESA_shader_framebuffer_fetch) {
322 if (devinfo->gen >= 6) {
323 brw_emit_pipe_control_flush(brw,
324 PIPE_CONTROL_RENDER_TARGET_FLUSH |
325 PIPE_CONTROL_CS_STALL);
326 brw_emit_pipe_control_flush(brw,
327 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
328 } else {
329 brw_emit_pipe_control_flush(brw,
330 PIPE_CONTROL_RENDER_TARGET_FLUSH);
331 }
332 }
333 }
334
335 void
336 brw_get_scratch_bo(struct brw_context *brw,
337 struct brw_bo **scratch_bo, int size)
338 {
339 struct brw_bo *old_bo = *scratch_bo;
340
341 if (old_bo && old_bo->size < size) {
342 brw_bo_unreference(old_bo);
343 old_bo = NULL;
344 }
345
346 if (!old_bo) {
347 *scratch_bo = brw_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
348 }
349 }
350
351 /**
352 * Reserve enough scratch space for the given stage to hold \p per_thread_size
353 * bytes times the given \p thread_count.
354 */
355 void
356 brw_alloc_stage_scratch(struct brw_context *brw,
357 struct brw_stage_state *stage_state,
358 unsigned per_thread_size)
359 {
360 if (stage_state->per_thread_scratch >= per_thread_size)
361 return;
362
363 stage_state->per_thread_scratch = per_thread_size;
364
365 if (stage_state->scratch_bo)
366 brw_bo_unreference(stage_state->scratch_bo);
367
368 const struct gen_device_info *devinfo = &brw->screen->devinfo;
369 unsigned thread_count;
370 switch(stage_state->stage) {
371 case MESA_SHADER_VERTEX:
372 thread_count = devinfo->max_vs_threads;
373 break;
374 case MESA_SHADER_TESS_CTRL:
375 thread_count = devinfo->max_tcs_threads;
376 break;
377 case MESA_SHADER_TESS_EVAL:
378 thread_count = devinfo->max_tes_threads;
379 break;
380 case MESA_SHADER_GEOMETRY:
381 thread_count = devinfo->max_gs_threads;
382 break;
383 case MESA_SHADER_FRAGMENT:
384 thread_count = devinfo->max_wm_threads;
385 break;
386 case MESA_SHADER_COMPUTE: {
387 unsigned subslices = MAX2(brw->screen->subslice_total, 1);
388
389 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
390 *
391 * "Scratch Space per slice is computed based on 4 sub-slices. SW must
392 * allocate scratch space enough so that each slice has 4 slices
393 * allowed."
394 *
395 * According to the other driver team, this applies to compute shaders
396 * as well. This is not currently documented at all.
397 *
398 * brw->screen->subslice_total is the TOTAL number of subslices
399 * and we wish to view that there are 4 subslices per slice
400 * instead of the actual number of subslices per slice.
401 */
402 if (devinfo->gen >= 9)
403 subslices = 4 * brw->screen->devinfo.num_slices;
404
405 /* WaCSScratchSize:hsw
406 *
407 * Haswell's scratch space address calculation appears to be sparse
408 * rather than tightly packed. The Thread ID has bits indicating
409 * which subslice, EU within a subslice, and thread within an EU
410 * it is. There's a maximum of two slices and two subslices, so these
411 * can be stored with a single bit. Even though there are only 10 EUs
412 * per subslice, this is stored in 4 bits, so there's an effective
413 * maximum value of 16 EUs. Similarly, although there are only 7
414 * threads per EU, this is stored in a 3 bit number, giving an effective
415 * maximum value of 8 threads per EU.
416 *
417 * This means that we need to use 16 * 8 instead of 10 * 7 for the
418 * number of threads per subslice.
419 */
420 const unsigned scratch_ids_per_subslice =
421 devinfo->is_haswell ? 16 * 8 : devinfo->max_cs_threads;
422
423 thread_count = scratch_ids_per_subslice * subslices;
424 break;
425 }
426 default:
427 unreachable("Unsupported stage!");
428 }
429
430 stage_state->scratch_bo =
431 brw_bo_alloc(brw->bufmgr, "shader scratch space",
432 per_thread_size * thread_count, 4096);
433 }
434
435 void brwInitFragProgFuncs( struct dd_function_table *functions )
436 {
437 assert(functions->ProgramStringNotify == _tnl_program_string);
438
439 functions->NewProgram = brwNewProgram;
440 functions->DeleteProgram = brwDeleteProgram;
441 functions->ProgramStringNotify = brwProgramStringNotify;
442
443 functions->LinkShader = brw_link_shader;
444
445 functions->MemoryBarrier = brw_memory_barrier;
446 functions->BlendBarrier = brw_blend_barrier;
447 }
448
449 struct shader_times {
450 uint64_t time;
451 uint64_t written;
452 uint64_t reset;
453 };
454
455 void
456 brw_init_shader_time(struct brw_context *brw)
457 {
458 const int max_entries = 2048;
459 brw->shader_time.bo =
460 brw_bo_alloc(brw->bufmgr, "shader time",
461 max_entries * BRW_SHADER_TIME_STRIDE * 3, 4096);
462 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
463 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
464 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
465 max_entries);
466 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
467 max_entries);
468 brw->shader_time.max_entries = max_entries;
469 }
470
471 static int
472 compare_time(const void *a, const void *b)
473 {
474 uint64_t * const *a_val = a;
475 uint64_t * const *b_val = b;
476
477 /* We don't just subtract because we're turning the value to an int. */
478 if (**a_val < **b_val)
479 return -1;
480 else if (**a_val == **b_val)
481 return 0;
482 else
483 return 1;
484 }
485
486 static void
487 print_shader_time_line(const char *stage, const char *name,
488 int shader_num, uint64_t time, uint64_t total)
489 {
490 fprintf(stderr, "%-6s%-18s", stage, name);
491
492 if (shader_num != 0)
493 fprintf(stderr, "%4d: ", shader_num);
494 else
495 fprintf(stderr, " : ");
496
497 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
498 (long long)time,
499 (double)time / 1000000000.0,
500 (double)time / total * 100.0);
501 }
502
503 static void
504 brw_report_shader_time(struct brw_context *brw)
505 {
506 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
507 return;
508
509 uint64_t scaled[brw->shader_time.num_entries];
510 uint64_t *sorted[brw->shader_time.num_entries];
511 uint64_t total_by_type[ST_CS + 1];
512 memset(total_by_type, 0, sizeof(total_by_type));
513 double total = 0;
514 for (int i = 0; i < brw->shader_time.num_entries; i++) {
515 uint64_t written = 0, reset = 0;
516 enum shader_time_shader_type type = brw->shader_time.types[i];
517
518 sorted[i] = &scaled[i];
519
520 switch (type) {
521 case ST_VS:
522 case ST_TCS:
523 case ST_TES:
524 case ST_GS:
525 case ST_FS8:
526 case ST_FS16:
527 case ST_CS:
528 written = brw->shader_time.cumulative[i].written;
529 reset = brw->shader_time.cumulative[i].reset;
530 break;
531
532 default:
533 /* I sometimes want to print things that aren't the 3 shader times.
534 * Just print the sum in that case.
535 */
536 written = 1;
537 reset = 0;
538 break;
539 }
540
541 uint64_t time = brw->shader_time.cumulative[i].time;
542 if (written) {
543 scaled[i] = time / written * (written + reset);
544 } else {
545 scaled[i] = time;
546 }
547
548 switch (type) {
549 case ST_VS:
550 case ST_TCS:
551 case ST_TES:
552 case ST_GS:
553 case ST_FS8:
554 case ST_FS16:
555 case ST_CS:
556 total_by_type[type] += scaled[i];
557 break;
558 default:
559 break;
560 }
561
562 total += scaled[i];
563 }
564
565 if (total == 0) {
566 fprintf(stderr, "No shader time collected yet\n");
567 return;
568 }
569
570 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
571
572 fprintf(stderr, "\n");
573 fprintf(stderr, "type ID cycles spent %% of total\n");
574 for (int s = 0; s < brw->shader_time.num_entries; s++) {
575 const char *stage;
576 /* Work back from the sorted pointers times to a time to print. */
577 int i = sorted[s] - scaled;
578
579 if (scaled[i] == 0)
580 continue;
581
582 int shader_num = brw->shader_time.ids[i];
583 const char *shader_name = brw->shader_time.names[i];
584
585 switch (brw->shader_time.types[i]) {
586 case ST_VS:
587 stage = "vs";
588 break;
589 case ST_TCS:
590 stage = "tcs";
591 break;
592 case ST_TES:
593 stage = "tes";
594 break;
595 case ST_GS:
596 stage = "gs";
597 break;
598 case ST_FS8:
599 stage = "fs8";
600 break;
601 case ST_FS16:
602 stage = "fs16";
603 break;
604 case ST_CS:
605 stage = "cs";
606 break;
607 default:
608 stage = "other";
609 break;
610 }
611
612 print_shader_time_line(stage, shader_name, shader_num,
613 scaled[i], total);
614 }
615
616 fprintf(stderr, "\n");
617 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
618 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
619 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
620 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
621 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
622 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
623 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
624 }
625
626 static void
627 brw_collect_shader_time(struct brw_context *brw)
628 {
629 if (!brw->shader_time.bo)
630 return;
631
632 /* This probably stalls on the last rendering. We could fix that by
633 * delaying reading the reports, but it doesn't look like it's a big
634 * overhead compared to the cost of tracking the time in the first place.
635 */
636 void *bo_map = brw_bo_map(brw, brw->shader_time.bo, MAP_READ | MAP_WRITE);
637
638 for (int i = 0; i < brw->shader_time.num_entries; i++) {
639 uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
640
641 brw->shader_time.cumulative[i].time += times[BRW_SHADER_TIME_STRIDE * 0 / 4];
642 brw->shader_time.cumulative[i].written += times[BRW_SHADER_TIME_STRIDE * 1 / 4];
643 brw->shader_time.cumulative[i].reset += times[BRW_SHADER_TIME_STRIDE * 2 / 4];
644 }
645
646 /* Zero the BO out to clear it out for our next collection.
647 */
648 memset(bo_map, 0, brw->shader_time.bo->size);
649 brw_bo_unmap(brw->shader_time.bo);
650 }
651
652 void
653 brw_collect_and_report_shader_time(struct brw_context *brw)
654 {
655 brw_collect_shader_time(brw);
656
657 if (brw->shader_time.report_time == 0 ||
658 get_time() - brw->shader_time.report_time >= 1.0) {
659 brw_report_shader_time(brw);
660 brw->shader_time.report_time = get_time();
661 }
662 }
663
664 /**
665 * Chooses an index in the shader_time buffer and sets up tracking information
666 * for our printouts.
667 *
668 * Note that this holds on to references to the underlying programs, which may
669 * change their lifetimes compared to normal operation.
670 */
671 int
672 brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
673 enum shader_time_shader_type type, bool is_glsl_sh)
674 {
675 int shader_time_index = brw->shader_time.num_entries++;
676 assert(shader_time_index < brw->shader_time.max_entries);
677 brw->shader_time.types[shader_time_index] = type;
678
679 const char *name;
680 if (prog->Id == 0) {
681 name = "ff";
682 } else if (is_glsl_sh) {
683 name = prog->info.label ?
684 ralloc_strdup(brw->shader_time.names, prog->info.label) : "glsl";
685 } else {
686 name = "prog";
687 }
688
689 brw->shader_time.names[shader_time_index] = name;
690 brw->shader_time.ids[shader_time_index] = prog->Id;
691
692 return shader_time_index;
693 }
694
695 void
696 brw_destroy_shader_time(struct brw_context *brw)
697 {
698 brw_bo_unreference(brw->shader_time.bo);
699 brw->shader_time.bo = NULL;
700 }
701
702 void
703 brw_stage_prog_data_free(const void *p)
704 {
705 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
706
707 ralloc_free(prog_data->param);
708 ralloc_free(prog_data->pull_param);
709 }
710
711 void
712 brw_dump_arb_asm(const char *stage, struct gl_program *prog)
713 {
714 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
715 stage, prog->Id, stage);
716 _mesa_print_program(prog);
717 }
718
719 void
720 brw_setup_tex_for_precompile(struct brw_context *brw,
721 struct brw_sampler_prog_key_data *tex,
722 struct gl_program *prog)
723 {
724 const struct gen_device_info *devinfo = &brw->screen->devinfo;
725 const bool has_shader_channel_select = devinfo->is_haswell || devinfo->gen >= 8;
726 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
727 for (unsigned i = 0; i < sampler_count; i++) {
728 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
729 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
730 tex->swizzles[i] =
731 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
732 } else {
733 /* Color sampler: assume no swizzling. */
734 tex->swizzles[i] = SWIZZLE_XYZW;
735 }
736 }
737 }
738
739 /**
740 * Sets up the starting offsets for the groups of binding table entries
741 * common to all pipeline stages.
742 *
743 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
744 * unused but also make sure that addition of small offsets to them will
745 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
746 */
747 uint32_t
748 brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
749 const struct gl_program *prog,
750 struct brw_stage_prog_data *stage_prog_data,
751 uint32_t next_binding_table_offset)
752 {
753 int num_textures = util_last_bit(prog->SamplersUsed);
754
755 stage_prog_data->binding_table.texture_start = next_binding_table_offset;
756 next_binding_table_offset += num_textures;
757
758 if (prog->info.num_ubos) {
759 assert(prog->info.num_ubos <= BRW_MAX_UBO);
760 stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
761 next_binding_table_offset += prog->info.num_ubos;
762 } else {
763 stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
764 }
765
766 if (prog->info.num_ssbos || prog->info.num_abos) {
767 assert(prog->info.num_abos <= BRW_MAX_ABO);
768 assert(prog->info.num_ssbos <= BRW_MAX_SSBO);
769 stage_prog_data->binding_table.ssbo_start = next_binding_table_offset;
770 next_binding_table_offset += prog->info.num_abos + prog->info.num_ssbos;
771 } else {
772 stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
773 }
774
775 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
776 stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
777 next_binding_table_offset++;
778 } else {
779 stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
780 }
781
782 if (prog->info.uses_texture_gather) {
783 if (devinfo->gen >= 8) {
784 stage_prog_data->binding_table.gather_texture_start =
785 stage_prog_data->binding_table.texture_start;
786 } else {
787 stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
788 next_binding_table_offset += num_textures;
789 }
790 } else {
791 stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
792 }
793
794 if (prog->info.num_images) {
795 stage_prog_data->binding_table.image_start = next_binding_table_offset;
796 next_binding_table_offset += prog->info.num_images;
797 } else {
798 stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
799 }
800
801 /* This may or may not be used depending on how the compile goes. */
802 stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
803 next_binding_table_offset++;
804
805 /* Plane 0 is just the regular texture section */
806 stage_prog_data->binding_table.plane_start[0] = stage_prog_data->binding_table.texture_start;
807
808 stage_prog_data->binding_table.plane_start[1] = next_binding_table_offset;
809 next_binding_table_offset += num_textures;
810
811 stage_prog_data->binding_table.plane_start[2] = next_binding_table_offset;
812 next_binding_table_offset += num_textures;
813
814 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
815
816 assert(next_binding_table_offset <= BRW_MAX_SURFACES);
817 return next_binding_table_offset;
818 }
819
820 void
821 brw_program_serialize_nir(struct gl_context *ctx, struct gl_program *prog)
822 {
823 struct blob writer;
824 blob_init(&writer);
825 nir_serialize(&writer, prog->nir);
826 prog->driver_cache_blob = ralloc_size(NULL, writer.size);
827 memcpy(prog->driver_cache_blob, writer.data, writer.size);
828 prog->driver_cache_blob_size = writer.size;
829 blob_finish(&writer);
830 }
831
832 void
833 brw_program_deserialize_nir(struct gl_context *ctx, struct gl_program *prog,
834 gl_shader_stage stage)
835 {
836 if (!prog->nir) {
837 assert(prog->driver_cache_blob && prog->driver_cache_blob_size > 0);
838 const struct nir_shader_compiler_options *options =
839 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
840 struct blob_reader reader;
841 blob_reader_init(&reader, prog->driver_cache_blob,
842 prog->driver_cache_blob_size);
843 prog->nir = nir_deserialize(NULL, options, &reader);
844 }
845
846 if (prog->driver_cache_blob) {
847 ralloc_free(prog->driver_cache_blob);
848 prog->driver_cache_blob = NULL;
849 prog->driver_cache_blob_size = 0;
850 }
851 }