i965: compute scratch space size correctly for Gen9+
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "program/prog_parameter.h"
35 #include "program/prog_print.h"
36 #include "program/prog_to_nir.h"
37 #include "program/program.h"
38 #include "program/programopt.h"
39 #include "tnl/tnl.h"
40 #include "util/ralloc.h"
41 #include "compiler/glsl/ir.h"
42 #include "compiler/glsl/glsl_to_nir.h"
43 #include "compiler/nir/nir_serialize.h"
44
45 #include "brw_program.h"
46 #include "brw_context.h"
47 #include "compiler/brw_nir.h"
48 #include "brw_defines.h"
49 #include "intel_batchbuffer.h"
50
51 static bool
52 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
53 {
54 if (is_scalar) {
55 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
56 type_size_scalar_bytes);
57 return nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
58 } else {
59 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
60 type_size_vec4_bytes);
61 return nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
62 }
63 }
64
65 nir_shader *
66 brw_create_nir(struct brw_context *brw,
67 const struct gl_shader_program *shader_prog,
68 struct gl_program *prog,
69 gl_shader_stage stage,
70 bool is_scalar)
71 {
72 struct gl_context *ctx = &brw->ctx;
73 const nir_shader_compiler_options *options =
74 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
75 nir_shader *nir;
76
77 /* First, lower the GLSL IR or Mesa IR to NIR */
78 if (shader_prog) {
79 nir = glsl_to_nir(shader_prog, stage, options);
80 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
81 nir_lower_returns(nir);
82 nir_validate_shader(nir);
83 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
84 nir_shader_get_entrypoint(nir), true, false);
85 } else {
86 nir = prog_to_nir(prog, options);
87 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
88 }
89 nir_validate_shader(nir);
90
91 nir = brw_preprocess_nir(brw->screen->compiler, nir);
92
93 if (stage == MESA_SHADER_FRAGMENT) {
94 static const struct nir_lower_wpos_ytransform_options wpos_options = {
95 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
96 .fs_coord_pixel_center_integer = 1,
97 .fs_coord_origin_upper_left = 1,
98 };
99
100 bool progress = false;
101 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
102 if (progress) {
103 _mesa_add_state_reference(prog->Parameters,
104 (gl_state_index *) wpos_options.state_tokens);
105 }
106 }
107
108 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
109
110 return nir;
111 }
112
113 void
114 brw_shader_gather_info(nir_shader *nir, struct gl_program *prog)
115 {
116 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
117
118 /* Copy the info we just generated back into the gl_program */
119 const char *prog_name = prog->info.name;
120 const char *prog_label = prog->info.label;
121 prog->info = nir->info;
122 prog->info.name = prog_name;
123 prog->info.label = prog_label;
124 }
125
126 static unsigned
127 get_new_program_id(struct intel_screen *screen)
128 {
129 return p_atomic_inc_return(&screen->program_id);
130 }
131
132 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
133 GLuint id, bool is_arb_asm)
134 {
135 struct brw_context *brw = brw_context(ctx);
136 struct brw_program *prog = rzalloc(NULL, struct brw_program);
137
138 if (prog) {
139 prog->id = get_new_program_id(brw->screen);
140
141 return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
142 }
143
144 return NULL;
145 }
146
147 static void brwDeleteProgram( struct gl_context *ctx,
148 struct gl_program *prog )
149 {
150 struct brw_context *brw = brw_context(ctx);
151
152 /* Beware! prog's refcount has reached zero, and it's about to be freed.
153 *
154 * In brw_upload_pipeline_state(), we compare brw->programs[i] to
155 * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
156 * pointer has changed.
157 *
158 * We cannot leave brw->programs[i] as a dangling pointer to the dead
159 * program. malloc() may allocate the same memory for a new gl_program,
160 * causing us to see matching pointers...but totally different programs.
161 *
162 * We cannot set brw->programs[i] to NULL, either. If we've deleted the
163 * active program, Mesa may set ctx->FooProgram._Current to NULL. That
164 * would cause us to see matching pointers (NULL == NULL), and fail to
165 * detect that a program has changed since our last draw.
166 *
167 * So, set it to a bogus gl_program pointer that will never match,
168 * causing us to properly reevaluate the state on our next draw.
169 *
170 * Getting this wrong causes heisenbugs which are very hard to catch,
171 * as you need a very specific allocation pattern to hit the problem.
172 */
173 static const struct gl_program deleted_program;
174
175 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
176 if (brw->programs[i] == prog)
177 brw->programs[i] = (struct gl_program *) &deleted_program;
178 }
179
180 _mesa_delete_program( ctx, prog );
181 }
182
183
184 static GLboolean
185 brwProgramStringNotify(struct gl_context *ctx,
186 GLenum target,
187 struct gl_program *prog)
188 {
189 assert(target == GL_VERTEX_PROGRAM_ARB || !prog->arb.IsPositionInvariant);
190
191 struct brw_context *brw = brw_context(ctx);
192 const struct brw_compiler *compiler = brw->screen->compiler;
193
194 switch (target) {
195 case GL_FRAGMENT_PROGRAM_ARB: {
196 struct brw_program *newFP = brw_program(prog);
197 const struct brw_program *curFP =
198 brw_program_const(brw->programs[MESA_SHADER_FRAGMENT]);
199
200 if (newFP == curFP)
201 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
202 newFP->id = get_new_program_id(brw->screen);
203
204 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
205
206 brw_shader_gather_info(prog->nir, prog);
207
208 brw_fs_precompile(ctx, prog);
209 break;
210 }
211 case GL_VERTEX_PROGRAM_ARB: {
212 struct brw_program *newVP = brw_program(prog);
213 const struct brw_program *curVP =
214 brw_program_const(brw->programs[MESA_SHADER_VERTEX]);
215
216 if (newVP == curVP)
217 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
218 if (newVP->program.arb.IsPositionInvariant) {
219 _mesa_insert_mvp_code(ctx, &newVP->program);
220 }
221 newVP->id = get_new_program_id(brw->screen);
222
223 /* Also tell tnl about it:
224 */
225 _tnl_program_string(ctx, target, prog);
226
227 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
228 compiler->scalar_stage[MESA_SHADER_VERTEX]);
229
230 brw_shader_gather_info(prog->nir, prog);
231
232 brw_vs_precompile(ctx, prog);
233 break;
234 }
235 default:
236 /*
237 * driver->ProgramStringNotify is only called for ARB programs, fixed
238 * function vertex programs, and ir_to_mesa (which isn't used by the
239 * i965 back-end). Therefore, even after geometry shaders are added,
240 * this function should only ever be called with a target of
241 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
242 */
243 unreachable("Unexpected target in brwProgramStringNotify");
244 }
245
246 return true;
247 }
248
249 static void
250 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
251 {
252 struct brw_context *brw = brw_context(ctx);
253 const struct gen_device_info *devinfo = &brw->screen->devinfo;
254 unsigned bits = (PIPE_CONTROL_DATA_CACHE_FLUSH |
255 PIPE_CONTROL_NO_WRITE |
256 PIPE_CONTROL_CS_STALL);
257 assert(devinfo->gen >= 7 && devinfo->gen <= 10);
258
259 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
260 GL_ELEMENT_ARRAY_BARRIER_BIT |
261 GL_COMMAND_BARRIER_BIT))
262 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
263
264 if (barriers & GL_UNIFORM_BARRIER_BIT)
265 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
266 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
267
268 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
269 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
270
271 if (barriers & (GL_TEXTURE_UPDATE_BARRIER_BIT |
272 GL_PIXEL_BUFFER_BARRIER_BIT))
273 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
274 PIPE_CONTROL_RENDER_TARGET_FLUSH);
275
276 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
277 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
278 PIPE_CONTROL_RENDER_TARGET_FLUSH);
279
280 /* Typed surface messages are handled by the render cache on IVB, so we
281 * need to flush it too.
282 */
283 if (devinfo->gen == 7 && !devinfo->is_haswell)
284 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
285
286 brw_emit_pipe_control_flush(brw, bits);
287 }
288
289 static void
290 brw_blend_barrier(struct gl_context *ctx)
291 {
292 struct brw_context *brw = brw_context(ctx);
293 const struct gen_device_info *devinfo = &brw->screen->devinfo;
294
295 if (!ctx->Extensions.MESA_shader_framebuffer_fetch) {
296 if (devinfo->gen >= 6) {
297 brw_emit_pipe_control_flush(brw,
298 PIPE_CONTROL_RENDER_TARGET_FLUSH |
299 PIPE_CONTROL_CS_STALL);
300 brw_emit_pipe_control_flush(brw,
301 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
302 } else {
303 brw_emit_pipe_control_flush(brw,
304 PIPE_CONTROL_RENDER_TARGET_FLUSH);
305 }
306 }
307 }
308
309 void
310 brw_get_scratch_bo(struct brw_context *brw,
311 struct brw_bo **scratch_bo, int size)
312 {
313 struct brw_bo *old_bo = *scratch_bo;
314
315 if (old_bo && old_bo->size < size) {
316 brw_bo_unreference(old_bo);
317 old_bo = NULL;
318 }
319
320 if (!old_bo) {
321 *scratch_bo = brw_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
322 }
323 }
324
325 /**
326 * Reserve enough scratch space for the given stage to hold \p per_thread_size
327 * bytes times the given \p thread_count.
328 */
329 void
330 brw_alloc_stage_scratch(struct brw_context *brw,
331 struct brw_stage_state *stage_state,
332 unsigned per_thread_size)
333 {
334 if (stage_state->per_thread_scratch >= per_thread_size)
335 return;
336
337 stage_state->per_thread_scratch = per_thread_size;
338
339 if (stage_state->scratch_bo)
340 brw_bo_unreference(stage_state->scratch_bo);
341
342 const struct gen_device_info *devinfo = &brw->screen->devinfo;
343 unsigned thread_count;
344 switch(stage_state->stage) {
345 case MESA_SHADER_VERTEX:
346 thread_count = devinfo->max_vs_threads;
347 break;
348 case MESA_SHADER_TESS_CTRL:
349 thread_count = devinfo->max_tcs_threads;
350 break;
351 case MESA_SHADER_TESS_EVAL:
352 thread_count = devinfo->max_tes_threads;
353 break;
354 case MESA_SHADER_GEOMETRY:
355 thread_count = devinfo->max_gs_threads;
356 break;
357 case MESA_SHADER_FRAGMENT:
358 thread_count = devinfo->max_wm_threads;
359 break;
360 case MESA_SHADER_COMPUTE: {
361 unsigned subslices = MAX2(brw->screen->subslice_total, 1);
362
363 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
364 *
365 * "Scratch Space per slice is computed based on 4 sub-slices. SW must
366 * allocate scratch space enough so that each slice has 4 slices
367 * allowed."
368 *
369 * According to the other driver team, this applies to compute shaders
370 * as well. This is not currently documented at all.
371 *
372 * brw->screen->subslice_total is the TOTAL number of subslices
373 * and we wish to view that there are 4 subslices per slice
374 * instead of the actual number of subslices per slice.
375 */
376 if (devinfo->gen >= 9)
377 subslices = 4 * brw->screen->devinfo.num_slices;
378
379 /* WaCSScratchSize:hsw
380 *
381 * Haswell's scratch space address calculation appears to be sparse
382 * rather than tightly packed. The Thread ID has bits indicating
383 * which subslice, EU within a subslice, and thread within an EU
384 * it is. There's a maximum of two slices and two subslices, so these
385 * can be stored with a single bit. Even though there are only 10 EUs
386 * per subslice, this is stored in 4 bits, so there's an effective
387 * maximum value of 16 EUs. Similarly, although there are only 7
388 * threads per EU, this is stored in a 3 bit number, giving an effective
389 * maximum value of 8 threads per EU.
390 *
391 * This means that we need to use 16 * 8 instead of 10 * 7 for the
392 * number of threads per subslice.
393 */
394 const unsigned scratch_ids_per_subslice =
395 devinfo->is_haswell ? 16 * 8 : devinfo->max_cs_threads;
396
397 thread_count = scratch_ids_per_subslice * subslices;
398 break;
399 }
400 default:
401 unreachable("Unsupported stage!");
402 }
403
404 stage_state->scratch_bo =
405 brw_bo_alloc(brw->bufmgr, "shader scratch space",
406 per_thread_size * thread_count, 4096);
407 }
408
409 void brwInitFragProgFuncs( struct dd_function_table *functions )
410 {
411 assert(functions->ProgramStringNotify == _tnl_program_string);
412
413 functions->NewProgram = brwNewProgram;
414 functions->DeleteProgram = brwDeleteProgram;
415 functions->ProgramStringNotify = brwProgramStringNotify;
416
417 functions->LinkShader = brw_link_shader;
418
419 functions->MemoryBarrier = brw_memory_barrier;
420 functions->BlendBarrier = brw_blend_barrier;
421 }
422
423 struct shader_times {
424 uint64_t time;
425 uint64_t written;
426 uint64_t reset;
427 };
428
429 void
430 brw_init_shader_time(struct brw_context *brw)
431 {
432 const int max_entries = 2048;
433 brw->shader_time.bo =
434 brw_bo_alloc(brw->bufmgr, "shader time",
435 max_entries * BRW_SHADER_TIME_STRIDE * 3, 4096);
436 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
437 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
438 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
439 max_entries);
440 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
441 max_entries);
442 brw->shader_time.max_entries = max_entries;
443 }
444
445 static int
446 compare_time(const void *a, const void *b)
447 {
448 uint64_t * const *a_val = a;
449 uint64_t * const *b_val = b;
450
451 /* We don't just subtract because we're turning the value to an int. */
452 if (**a_val < **b_val)
453 return -1;
454 else if (**a_val == **b_val)
455 return 0;
456 else
457 return 1;
458 }
459
460 static void
461 print_shader_time_line(const char *stage, const char *name,
462 int shader_num, uint64_t time, uint64_t total)
463 {
464 fprintf(stderr, "%-6s%-18s", stage, name);
465
466 if (shader_num != 0)
467 fprintf(stderr, "%4d: ", shader_num);
468 else
469 fprintf(stderr, " : ");
470
471 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
472 (long long)time,
473 (double)time / 1000000000.0,
474 (double)time / total * 100.0);
475 }
476
477 static void
478 brw_report_shader_time(struct brw_context *brw)
479 {
480 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
481 return;
482
483 uint64_t scaled[brw->shader_time.num_entries];
484 uint64_t *sorted[brw->shader_time.num_entries];
485 uint64_t total_by_type[ST_CS + 1];
486 memset(total_by_type, 0, sizeof(total_by_type));
487 double total = 0;
488 for (int i = 0; i < brw->shader_time.num_entries; i++) {
489 uint64_t written = 0, reset = 0;
490 enum shader_time_shader_type type = brw->shader_time.types[i];
491
492 sorted[i] = &scaled[i];
493
494 switch (type) {
495 case ST_VS:
496 case ST_TCS:
497 case ST_TES:
498 case ST_GS:
499 case ST_FS8:
500 case ST_FS16:
501 case ST_CS:
502 written = brw->shader_time.cumulative[i].written;
503 reset = brw->shader_time.cumulative[i].reset;
504 break;
505
506 default:
507 /* I sometimes want to print things that aren't the 3 shader times.
508 * Just print the sum in that case.
509 */
510 written = 1;
511 reset = 0;
512 break;
513 }
514
515 uint64_t time = brw->shader_time.cumulative[i].time;
516 if (written) {
517 scaled[i] = time / written * (written + reset);
518 } else {
519 scaled[i] = time;
520 }
521
522 switch (type) {
523 case ST_VS:
524 case ST_TCS:
525 case ST_TES:
526 case ST_GS:
527 case ST_FS8:
528 case ST_FS16:
529 case ST_CS:
530 total_by_type[type] += scaled[i];
531 break;
532 default:
533 break;
534 }
535
536 total += scaled[i];
537 }
538
539 if (total == 0) {
540 fprintf(stderr, "No shader time collected yet\n");
541 return;
542 }
543
544 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
545
546 fprintf(stderr, "\n");
547 fprintf(stderr, "type ID cycles spent %% of total\n");
548 for (int s = 0; s < brw->shader_time.num_entries; s++) {
549 const char *stage;
550 /* Work back from the sorted pointers times to a time to print. */
551 int i = sorted[s] - scaled;
552
553 if (scaled[i] == 0)
554 continue;
555
556 int shader_num = brw->shader_time.ids[i];
557 const char *shader_name = brw->shader_time.names[i];
558
559 switch (brw->shader_time.types[i]) {
560 case ST_VS:
561 stage = "vs";
562 break;
563 case ST_TCS:
564 stage = "tcs";
565 break;
566 case ST_TES:
567 stage = "tes";
568 break;
569 case ST_GS:
570 stage = "gs";
571 break;
572 case ST_FS8:
573 stage = "fs8";
574 break;
575 case ST_FS16:
576 stage = "fs16";
577 break;
578 case ST_CS:
579 stage = "cs";
580 break;
581 default:
582 stage = "other";
583 break;
584 }
585
586 print_shader_time_line(stage, shader_name, shader_num,
587 scaled[i], total);
588 }
589
590 fprintf(stderr, "\n");
591 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
592 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
593 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
594 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
595 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
596 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
597 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
598 }
599
600 static void
601 brw_collect_shader_time(struct brw_context *brw)
602 {
603 if (!brw->shader_time.bo)
604 return;
605
606 /* This probably stalls on the last rendering. We could fix that by
607 * delaying reading the reports, but it doesn't look like it's a big
608 * overhead compared to the cost of tracking the time in the first place.
609 */
610 void *bo_map = brw_bo_map(brw, brw->shader_time.bo, MAP_READ | MAP_WRITE);
611
612 for (int i = 0; i < brw->shader_time.num_entries; i++) {
613 uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
614
615 brw->shader_time.cumulative[i].time += times[BRW_SHADER_TIME_STRIDE * 0 / 4];
616 brw->shader_time.cumulative[i].written += times[BRW_SHADER_TIME_STRIDE * 1 / 4];
617 brw->shader_time.cumulative[i].reset += times[BRW_SHADER_TIME_STRIDE * 2 / 4];
618 }
619
620 /* Zero the BO out to clear it out for our next collection.
621 */
622 memset(bo_map, 0, brw->shader_time.bo->size);
623 brw_bo_unmap(brw->shader_time.bo);
624 }
625
626 void
627 brw_collect_and_report_shader_time(struct brw_context *brw)
628 {
629 brw_collect_shader_time(brw);
630
631 if (brw->shader_time.report_time == 0 ||
632 get_time() - brw->shader_time.report_time >= 1.0) {
633 brw_report_shader_time(brw);
634 brw->shader_time.report_time = get_time();
635 }
636 }
637
638 /**
639 * Chooses an index in the shader_time buffer and sets up tracking information
640 * for our printouts.
641 *
642 * Note that this holds on to references to the underlying programs, which may
643 * change their lifetimes compared to normal operation.
644 */
645 int
646 brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
647 enum shader_time_shader_type type, bool is_glsl_sh)
648 {
649 int shader_time_index = brw->shader_time.num_entries++;
650 assert(shader_time_index < brw->shader_time.max_entries);
651 brw->shader_time.types[shader_time_index] = type;
652
653 const char *name;
654 if (prog->Id == 0) {
655 name = "ff";
656 } else if (is_glsl_sh) {
657 name = prog->info.label ?
658 ralloc_strdup(brw->shader_time.names, prog->info.label) : "glsl";
659 } else {
660 name = "prog";
661 }
662
663 brw->shader_time.names[shader_time_index] = name;
664 brw->shader_time.ids[shader_time_index] = prog->Id;
665
666 return shader_time_index;
667 }
668
669 void
670 brw_destroy_shader_time(struct brw_context *brw)
671 {
672 brw_bo_unreference(brw->shader_time.bo);
673 brw->shader_time.bo = NULL;
674 }
675
676 void
677 brw_stage_prog_data_free(const void *p)
678 {
679 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
680
681 ralloc_free(prog_data->param);
682 ralloc_free(prog_data->pull_param);
683 }
684
685 void
686 brw_dump_arb_asm(const char *stage, struct gl_program *prog)
687 {
688 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
689 stage, prog->Id, stage);
690 _mesa_print_program(prog);
691 }
692
693 void
694 brw_setup_tex_for_precompile(struct brw_context *brw,
695 struct brw_sampler_prog_key_data *tex,
696 struct gl_program *prog)
697 {
698 const struct gen_device_info *devinfo = &brw->screen->devinfo;
699 const bool has_shader_channel_select = devinfo->is_haswell || devinfo->gen >= 8;
700 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
701 for (unsigned i = 0; i < sampler_count; i++) {
702 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
703 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
704 tex->swizzles[i] =
705 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
706 } else {
707 /* Color sampler: assume no swizzling. */
708 tex->swizzles[i] = SWIZZLE_XYZW;
709 }
710 }
711 }
712
713 /**
714 * Sets up the starting offsets for the groups of binding table entries
715 * common to all pipeline stages.
716 *
717 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
718 * unused but also make sure that addition of small offsets to them will
719 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
720 */
721 uint32_t
722 brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
723 const struct gl_program *prog,
724 struct brw_stage_prog_data *stage_prog_data,
725 uint32_t next_binding_table_offset)
726 {
727 int num_textures = util_last_bit(prog->SamplersUsed);
728
729 stage_prog_data->binding_table.texture_start = next_binding_table_offset;
730 next_binding_table_offset += num_textures;
731
732 if (prog->info.num_ubos) {
733 assert(prog->info.num_ubos <= BRW_MAX_UBO);
734 stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
735 next_binding_table_offset += prog->info.num_ubos;
736 } else {
737 stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
738 }
739
740 if (prog->info.num_ssbos || prog->info.num_abos) {
741 assert(prog->info.num_abos <= BRW_MAX_ABO);
742 assert(prog->info.num_ssbos <= BRW_MAX_SSBO);
743 stage_prog_data->binding_table.ssbo_start = next_binding_table_offset;
744 next_binding_table_offset += prog->info.num_abos + prog->info.num_ssbos;
745 } else {
746 stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
747 }
748
749 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
750 stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
751 next_binding_table_offset++;
752 } else {
753 stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
754 }
755
756 if (prog->info.uses_texture_gather) {
757 if (devinfo->gen >= 8) {
758 stage_prog_data->binding_table.gather_texture_start =
759 stage_prog_data->binding_table.texture_start;
760 } else {
761 stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
762 next_binding_table_offset += num_textures;
763 }
764 } else {
765 stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
766 }
767
768 if (prog->info.num_images) {
769 stage_prog_data->binding_table.image_start = next_binding_table_offset;
770 next_binding_table_offset += prog->info.num_images;
771 } else {
772 stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
773 }
774
775 /* This may or may not be used depending on how the compile goes. */
776 stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
777 next_binding_table_offset++;
778
779 /* Plane 0 is just the regular texture section */
780 stage_prog_data->binding_table.plane_start[0] = stage_prog_data->binding_table.texture_start;
781
782 stage_prog_data->binding_table.plane_start[1] = next_binding_table_offset;
783 next_binding_table_offset += num_textures;
784
785 stage_prog_data->binding_table.plane_start[2] = next_binding_table_offset;
786 next_binding_table_offset += num_textures;
787
788 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
789
790 assert(next_binding_table_offset <= BRW_MAX_SURFACES);
791 return next_binding_table_offset;
792 }
793
794 void
795 brw_program_serialize_nir(struct gl_context *ctx, struct gl_program *prog)
796 {
797 struct blob writer;
798 blob_init(&writer);
799 nir_serialize(&writer, prog->nir);
800 prog->driver_cache_blob = ralloc_size(NULL, writer.size);
801 memcpy(prog->driver_cache_blob, writer.data, writer.size);
802 prog->driver_cache_blob_size = writer.size;
803 blob_finish(&writer);
804 }
805
806 void
807 brw_program_deserialize_nir(struct gl_context *ctx, struct gl_program *prog,
808 gl_shader_stage stage)
809 {
810 if (!prog->nir) {
811 assert(prog->driver_cache_blob && prog->driver_cache_blob_size > 0);
812 const struct nir_shader_compiler_options *options =
813 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
814 struct blob_reader reader;
815 blob_reader_init(&reader, prog->driver_cache_blob,
816 prog->driver_cache_blob_size);
817 prog->nir = nir_deserialize(NULL, options, &reader);
818 }
819
820 if (prog->driver_cache_blob) {
821 ralloc_free(prog->driver_cache_blob);
822 prog->driver_cache_blob = NULL;
823 prog->driver_cache_blob_size = 0;
824 }
825 }