i965: Calculate thread_count in brw_alloc_stage_scratch
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "program/prog_parameter.h"
35 #include "program/prog_print.h"
36 #include "program/prog_to_nir.h"
37 #include "program/program.h"
38 #include "program/programopt.h"
39 #include "tnl/tnl.h"
40 #include "util/ralloc.h"
41 #include "compiler/glsl/ir.h"
42 #include "compiler/glsl/glsl_to_nir.h"
43
44 #include "brw_program.h"
45 #include "brw_context.h"
46 #include "compiler/brw_nir.h"
47 #include "brw_defines.h"
48 #include "intel_batchbuffer.h"
49
50 static bool
51 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
52 {
53 if (is_scalar) {
54 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
55 type_size_scalar_bytes);
56 return nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
57 } else {
58 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
59 type_size_vec4_bytes);
60 return nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
61 }
62 }
63
64 nir_shader *
65 brw_create_nir(struct brw_context *brw,
66 const struct gl_shader_program *shader_prog,
67 struct gl_program *prog,
68 gl_shader_stage stage,
69 bool is_scalar)
70 {
71 struct gl_context *ctx = &brw->ctx;
72 const nir_shader_compiler_options *options =
73 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
74 nir_shader *nir;
75
76 /* First, lower the GLSL IR or Mesa IR to NIR */
77 if (shader_prog) {
78 nir = glsl_to_nir(shader_prog, stage, options);
79 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
80 nir_lower_returns(nir);
81 nir_validate_shader(nir);
82 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
83 nir_shader_get_entrypoint(nir), true, false);
84 } else {
85 nir = prog_to_nir(prog, options);
86 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
87 }
88 nir_validate_shader(nir);
89
90 nir = brw_preprocess_nir(brw->screen->compiler, nir);
91
92 if (stage == MESA_SHADER_FRAGMENT) {
93 static const struct nir_lower_wpos_ytransform_options wpos_options = {
94 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
95 .fs_coord_pixel_center_integer = 1,
96 .fs_coord_origin_upper_left = 1,
97 };
98
99 bool progress = false;
100 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
101 if (progress) {
102 _mesa_add_state_reference(prog->Parameters,
103 (gl_state_index *) wpos_options.state_tokens);
104 }
105 }
106
107 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
108
109 return nir;
110 }
111
112 void
113 brw_shader_gather_info(nir_shader *nir, struct gl_program *prog)
114 {
115 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
116
117 /* Copy the info we just generated back into the gl_program */
118 const char *prog_name = prog->info.name;
119 const char *prog_label = prog->info.label;
120 prog->info = nir->info;
121 prog->info.name = prog_name;
122 prog->info.label = prog_label;
123 }
124
125 static unsigned
126 get_new_program_id(struct intel_screen *screen)
127 {
128 return p_atomic_inc_return(&screen->program_id);
129 }
130
131 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
132 GLuint id, bool is_arb_asm)
133 {
134 struct brw_context *brw = brw_context(ctx);
135 struct brw_program *prog = rzalloc(NULL, struct brw_program);
136
137 if (prog) {
138 prog->id = get_new_program_id(brw->screen);
139
140 return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
141 }
142
143 return NULL;
144 }
145
146 static void brwDeleteProgram( struct gl_context *ctx,
147 struct gl_program *prog )
148 {
149 struct brw_context *brw = brw_context(ctx);
150
151 /* Beware! prog's refcount has reached zero, and it's about to be freed.
152 *
153 * In brw_upload_pipeline_state(), we compare brw->programs[i] to
154 * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
155 * pointer has changed.
156 *
157 * We cannot leave brw->programs[i] as a dangling pointer to the dead
158 * program. malloc() may allocate the same memory for a new gl_program,
159 * causing us to see matching pointers...but totally different programs.
160 *
161 * We cannot set brw->programs[i] to NULL, either. If we've deleted the
162 * active program, Mesa may set ctx->FooProgram._Current to NULL. That
163 * would cause us to see matching pointers (NULL == NULL), and fail to
164 * detect that a program has changed since our last draw.
165 *
166 * So, set it to a bogus gl_program pointer that will never match,
167 * causing us to properly reevaluate the state on our next draw.
168 *
169 * Getting this wrong causes heisenbugs which are very hard to catch,
170 * as you need a very specific allocation pattern to hit the problem.
171 */
172 static const struct gl_program deleted_program;
173
174 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
175 if (brw->programs[i] == prog)
176 brw->programs[i] = (struct gl_program *) &deleted_program;
177 }
178
179 _mesa_delete_program( ctx, prog );
180 }
181
182
183 static GLboolean
184 brwProgramStringNotify(struct gl_context *ctx,
185 GLenum target,
186 struct gl_program *prog)
187 {
188 assert(target == GL_VERTEX_PROGRAM_ARB || !prog->arb.IsPositionInvariant);
189
190 struct brw_context *brw = brw_context(ctx);
191 const struct brw_compiler *compiler = brw->screen->compiler;
192
193 switch (target) {
194 case GL_FRAGMENT_PROGRAM_ARB: {
195 struct brw_program *newFP = brw_program(prog);
196 const struct brw_program *curFP =
197 brw_program_const(brw->programs[MESA_SHADER_FRAGMENT]);
198
199 if (newFP == curFP)
200 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
201 newFP->id = get_new_program_id(brw->screen);
202
203 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
204
205 brw_shader_gather_info(prog->nir, prog);
206
207 brw_fs_precompile(ctx, prog);
208 break;
209 }
210 case GL_VERTEX_PROGRAM_ARB: {
211 struct brw_program *newVP = brw_program(prog);
212 const struct brw_program *curVP =
213 brw_program_const(brw->programs[MESA_SHADER_VERTEX]);
214
215 if (newVP == curVP)
216 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
217 if (newVP->program.arb.IsPositionInvariant) {
218 _mesa_insert_mvp_code(ctx, &newVP->program);
219 }
220 newVP->id = get_new_program_id(brw->screen);
221
222 /* Also tell tnl about it:
223 */
224 _tnl_program_string(ctx, target, prog);
225
226 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
227 compiler->scalar_stage[MESA_SHADER_VERTEX]);
228
229 brw_shader_gather_info(prog->nir, prog);
230
231 brw_vs_precompile(ctx, prog);
232 break;
233 }
234 default:
235 /*
236 * driver->ProgramStringNotify is only called for ARB programs, fixed
237 * function vertex programs, and ir_to_mesa (which isn't used by the
238 * i965 back-end). Therefore, even after geometry shaders are added,
239 * this function should only ever be called with a target of
240 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
241 */
242 unreachable("Unexpected target in brwProgramStringNotify");
243 }
244
245 return true;
246 }
247
248 static void
249 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
250 {
251 struct brw_context *brw = brw_context(ctx);
252 const struct gen_device_info *devinfo = &brw->screen->devinfo;
253 unsigned bits = (PIPE_CONTROL_DATA_CACHE_FLUSH |
254 PIPE_CONTROL_NO_WRITE |
255 PIPE_CONTROL_CS_STALL);
256 assert(devinfo->gen >= 7 && devinfo->gen <= 10);
257
258 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
259 GL_ELEMENT_ARRAY_BARRIER_BIT |
260 GL_COMMAND_BARRIER_BIT))
261 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
262
263 if (barriers & GL_UNIFORM_BARRIER_BIT)
264 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
265 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
266
267 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
268 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
269
270 if (barriers & (GL_TEXTURE_UPDATE_BARRIER_BIT |
271 GL_PIXEL_BUFFER_BARRIER_BIT))
272 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
273 PIPE_CONTROL_RENDER_TARGET_FLUSH);
274
275 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
276 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
277 PIPE_CONTROL_RENDER_TARGET_FLUSH);
278
279 /* Typed surface messages are handled by the render cache on IVB, so we
280 * need to flush it too.
281 */
282 if (devinfo->gen == 7 && !devinfo->is_haswell)
283 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
284
285 brw_emit_pipe_control_flush(brw, bits);
286 }
287
288 static void
289 brw_blend_barrier(struct gl_context *ctx)
290 {
291 struct brw_context *brw = brw_context(ctx);
292 const struct gen_device_info *devinfo = &brw->screen->devinfo;
293
294 if (!ctx->Extensions.MESA_shader_framebuffer_fetch) {
295 if (devinfo->gen >= 6) {
296 brw_emit_pipe_control_flush(brw,
297 PIPE_CONTROL_RENDER_TARGET_FLUSH |
298 PIPE_CONTROL_CS_STALL);
299 brw_emit_pipe_control_flush(brw,
300 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
301 } else {
302 brw_emit_pipe_control_flush(brw,
303 PIPE_CONTROL_RENDER_TARGET_FLUSH);
304 }
305 }
306 }
307
308 void
309 brw_get_scratch_bo(struct brw_context *brw,
310 struct brw_bo **scratch_bo, int size)
311 {
312 struct brw_bo *old_bo = *scratch_bo;
313
314 if (old_bo && old_bo->size < size) {
315 brw_bo_unreference(old_bo);
316 old_bo = NULL;
317 }
318
319 if (!old_bo) {
320 *scratch_bo = brw_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
321 }
322 }
323
324 /**
325 * Reserve enough scratch space for the given stage to hold \p per_thread_size
326 * bytes times the given \p thread_count.
327 */
328 void
329 brw_alloc_stage_scratch(struct brw_context *brw,
330 struct brw_stage_state *stage_state,
331 unsigned per_thread_size)
332 {
333 if (stage_state->per_thread_scratch >= per_thread_size)
334 return;
335
336 stage_state->per_thread_scratch = per_thread_size;
337
338 if (stage_state->scratch_bo)
339 brw_bo_unreference(stage_state->scratch_bo);
340
341 const struct gen_device_info *devinfo = &brw->screen->devinfo;
342 unsigned thread_count;
343 switch(stage_state->stage) {
344 case MESA_SHADER_VERTEX:
345 thread_count = devinfo->max_vs_threads;
346 break;
347 case MESA_SHADER_TESS_CTRL:
348 thread_count = devinfo->max_tcs_threads;
349 break;
350 case MESA_SHADER_TESS_EVAL:
351 thread_count = devinfo->max_tes_threads;
352 break;
353 case MESA_SHADER_GEOMETRY:
354 thread_count = devinfo->max_gs_threads;
355 break;
356 case MESA_SHADER_FRAGMENT:
357 thread_count = devinfo->max_wm_threads;
358 break;
359 case MESA_SHADER_COMPUTE: {
360 const unsigned subslices = MAX2(brw->screen->subslice_total, 1);
361
362 /* WaCSScratchSize:hsw
363 *
364 * Haswell's scratch space address calculation appears to be sparse
365 * rather than tightly packed. The Thread ID has bits indicating
366 * which subslice, EU within a subslice, and thread within an EU
367 * it is. There's a maximum of two slices and two subslices, so these
368 * can be stored with a single bit. Even though there are only 10 EUs
369 * per subslice, this is stored in 4 bits, so there's an effective
370 * maximum value of 16 EUs. Similarly, although there are only 7
371 * threads per EU, this is stored in a 3 bit number, giving an effective
372 * maximum value of 8 threads per EU.
373 *
374 * This means that we need to use 16 * 8 instead of 10 * 7 for the
375 * number of threads per subslice.
376 */
377 const unsigned scratch_ids_per_subslice =
378 devinfo->is_haswell ? 16 * 8 : devinfo->max_cs_threads;
379
380 thread_count = scratch_ids_per_subslice * subslices;
381 break;
382 }
383 default:
384 unreachable("Unsupported stage!");
385 }
386
387 stage_state->scratch_bo =
388 brw_bo_alloc(brw->bufmgr, "shader scratch space",
389 per_thread_size * thread_count, 4096);
390 }
391
392 void brwInitFragProgFuncs( struct dd_function_table *functions )
393 {
394 assert(functions->ProgramStringNotify == _tnl_program_string);
395
396 functions->NewProgram = brwNewProgram;
397 functions->DeleteProgram = brwDeleteProgram;
398 functions->ProgramStringNotify = brwProgramStringNotify;
399
400 functions->LinkShader = brw_link_shader;
401
402 functions->MemoryBarrier = brw_memory_barrier;
403 functions->BlendBarrier = brw_blend_barrier;
404 }
405
406 struct shader_times {
407 uint64_t time;
408 uint64_t written;
409 uint64_t reset;
410 };
411
412 void
413 brw_init_shader_time(struct brw_context *brw)
414 {
415 const int max_entries = 2048;
416 brw->shader_time.bo =
417 brw_bo_alloc(brw->bufmgr, "shader time",
418 max_entries * BRW_SHADER_TIME_STRIDE * 3, 4096);
419 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
420 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
421 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
422 max_entries);
423 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
424 max_entries);
425 brw->shader_time.max_entries = max_entries;
426 }
427
428 static int
429 compare_time(const void *a, const void *b)
430 {
431 uint64_t * const *a_val = a;
432 uint64_t * const *b_val = b;
433
434 /* We don't just subtract because we're turning the value to an int. */
435 if (**a_val < **b_val)
436 return -1;
437 else if (**a_val == **b_val)
438 return 0;
439 else
440 return 1;
441 }
442
443 static void
444 print_shader_time_line(const char *stage, const char *name,
445 int shader_num, uint64_t time, uint64_t total)
446 {
447 fprintf(stderr, "%-6s%-18s", stage, name);
448
449 if (shader_num != 0)
450 fprintf(stderr, "%4d: ", shader_num);
451 else
452 fprintf(stderr, " : ");
453
454 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
455 (long long)time,
456 (double)time / 1000000000.0,
457 (double)time / total * 100.0);
458 }
459
460 static void
461 brw_report_shader_time(struct brw_context *brw)
462 {
463 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
464 return;
465
466 uint64_t scaled[brw->shader_time.num_entries];
467 uint64_t *sorted[brw->shader_time.num_entries];
468 uint64_t total_by_type[ST_CS + 1];
469 memset(total_by_type, 0, sizeof(total_by_type));
470 double total = 0;
471 for (int i = 0; i < brw->shader_time.num_entries; i++) {
472 uint64_t written = 0, reset = 0;
473 enum shader_time_shader_type type = brw->shader_time.types[i];
474
475 sorted[i] = &scaled[i];
476
477 switch (type) {
478 case ST_VS:
479 case ST_TCS:
480 case ST_TES:
481 case ST_GS:
482 case ST_FS8:
483 case ST_FS16:
484 case ST_CS:
485 written = brw->shader_time.cumulative[i].written;
486 reset = brw->shader_time.cumulative[i].reset;
487 break;
488
489 default:
490 /* I sometimes want to print things that aren't the 3 shader times.
491 * Just print the sum in that case.
492 */
493 written = 1;
494 reset = 0;
495 break;
496 }
497
498 uint64_t time = brw->shader_time.cumulative[i].time;
499 if (written) {
500 scaled[i] = time / written * (written + reset);
501 } else {
502 scaled[i] = time;
503 }
504
505 switch (type) {
506 case ST_VS:
507 case ST_TCS:
508 case ST_TES:
509 case ST_GS:
510 case ST_FS8:
511 case ST_FS16:
512 case ST_CS:
513 total_by_type[type] += scaled[i];
514 break;
515 default:
516 break;
517 }
518
519 total += scaled[i];
520 }
521
522 if (total == 0) {
523 fprintf(stderr, "No shader time collected yet\n");
524 return;
525 }
526
527 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
528
529 fprintf(stderr, "\n");
530 fprintf(stderr, "type ID cycles spent %% of total\n");
531 for (int s = 0; s < brw->shader_time.num_entries; s++) {
532 const char *stage;
533 /* Work back from the sorted pointers times to a time to print. */
534 int i = sorted[s] - scaled;
535
536 if (scaled[i] == 0)
537 continue;
538
539 int shader_num = brw->shader_time.ids[i];
540 const char *shader_name = brw->shader_time.names[i];
541
542 switch (brw->shader_time.types[i]) {
543 case ST_VS:
544 stage = "vs";
545 break;
546 case ST_TCS:
547 stage = "tcs";
548 break;
549 case ST_TES:
550 stage = "tes";
551 break;
552 case ST_GS:
553 stage = "gs";
554 break;
555 case ST_FS8:
556 stage = "fs8";
557 break;
558 case ST_FS16:
559 stage = "fs16";
560 break;
561 case ST_CS:
562 stage = "cs";
563 break;
564 default:
565 stage = "other";
566 break;
567 }
568
569 print_shader_time_line(stage, shader_name, shader_num,
570 scaled[i], total);
571 }
572
573 fprintf(stderr, "\n");
574 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
575 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
576 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
577 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
578 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
579 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
580 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
581 }
582
583 static void
584 brw_collect_shader_time(struct brw_context *brw)
585 {
586 if (!brw->shader_time.bo)
587 return;
588
589 /* This probably stalls on the last rendering. We could fix that by
590 * delaying reading the reports, but it doesn't look like it's a big
591 * overhead compared to the cost of tracking the time in the first place.
592 */
593 void *bo_map = brw_bo_map(brw, brw->shader_time.bo, MAP_READ | MAP_WRITE);
594
595 for (int i = 0; i < brw->shader_time.num_entries; i++) {
596 uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
597
598 brw->shader_time.cumulative[i].time += times[BRW_SHADER_TIME_STRIDE * 0 / 4];
599 brw->shader_time.cumulative[i].written += times[BRW_SHADER_TIME_STRIDE * 1 / 4];
600 brw->shader_time.cumulative[i].reset += times[BRW_SHADER_TIME_STRIDE * 2 / 4];
601 }
602
603 /* Zero the BO out to clear it out for our next collection.
604 */
605 memset(bo_map, 0, brw->shader_time.bo->size);
606 brw_bo_unmap(brw->shader_time.bo);
607 }
608
609 void
610 brw_collect_and_report_shader_time(struct brw_context *brw)
611 {
612 brw_collect_shader_time(brw);
613
614 if (brw->shader_time.report_time == 0 ||
615 get_time() - brw->shader_time.report_time >= 1.0) {
616 brw_report_shader_time(brw);
617 brw->shader_time.report_time = get_time();
618 }
619 }
620
621 /**
622 * Chooses an index in the shader_time buffer and sets up tracking information
623 * for our printouts.
624 *
625 * Note that this holds on to references to the underlying programs, which may
626 * change their lifetimes compared to normal operation.
627 */
628 int
629 brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
630 enum shader_time_shader_type type, bool is_glsl_sh)
631 {
632 int shader_time_index = brw->shader_time.num_entries++;
633 assert(shader_time_index < brw->shader_time.max_entries);
634 brw->shader_time.types[shader_time_index] = type;
635
636 const char *name;
637 if (prog->Id == 0) {
638 name = "ff";
639 } else if (is_glsl_sh) {
640 name = prog->info.label ?
641 ralloc_strdup(brw->shader_time.names, prog->info.label) : "glsl";
642 } else {
643 name = "prog";
644 }
645
646 brw->shader_time.names[shader_time_index] = name;
647 brw->shader_time.ids[shader_time_index] = prog->Id;
648
649 return shader_time_index;
650 }
651
652 void
653 brw_destroy_shader_time(struct brw_context *brw)
654 {
655 brw_bo_unreference(brw->shader_time.bo);
656 brw->shader_time.bo = NULL;
657 }
658
659 void
660 brw_stage_prog_data_free(const void *p)
661 {
662 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
663
664 ralloc_free(prog_data->param);
665 ralloc_free(prog_data->pull_param);
666 }
667
668 void
669 brw_dump_arb_asm(const char *stage, struct gl_program *prog)
670 {
671 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
672 stage, prog->Id, stage);
673 _mesa_print_program(prog);
674 }
675
676 void
677 brw_setup_tex_for_precompile(struct brw_context *brw,
678 struct brw_sampler_prog_key_data *tex,
679 struct gl_program *prog)
680 {
681 const struct gen_device_info *devinfo = &brw->screen->devinfo;
682 const bool has_shader_channel_select = devinfo->is_haswell || devinfo->gen >= 8;
683 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
684 for (unsigned i = 0; i < sampler_count; i++) {
685 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
686 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
687 tex->swizzles[i] =
688 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
689 } else {
690 /* Color sampler: assume no swizzling. */
691 tex->swizzles[i] = SWIZZLE_XYZW;
692 }
693 }
694 }
695
696 /**
697 * Sets up the starting offsets for the groups of binding table entries
698 * common to all pipeline stages.
699 *
700 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
701 * unused but also make sure that addition of small offsets to them will
702 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
703 */
704 uint32_t
705 brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
706 const struct gl_program *prog,
707 struct brw_stage_prog_data *stage_prog_data,
708 uint32_t next_binding_table_offset)
709 {
710 int num_textures = util_last_bit(prog->SamplersUsed);
711
712 stage_prog_data->binding_table.texture_start = next_binding_table_offset;
713 next_binding_table_offset += num_textures;
714
715 if (prog->info.num_ubos) {
716 assert(prog->info.num_ubos <= BRW_MAX_UBO);
717 stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
718 next_binding_table_offset += prog->info.num_ubos;
719 } else {
720 stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
721 }
722
723 if (prog->info.num_ssbos) {
724 assert(prog->info.num_ssbos <= BRW_MAX_SSBO);
725 stage_prog_data->binding_table.ssbo_start = next_binding_table_offset;
726 next_binding_table_offset += prog->info.num_ssbos;
727 } else {
728 stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
729 }
730
731 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
732 stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
733 next_binding_table_offset++;
734 } else {
735 stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
736 }
737
738 if (prog->info.uses_texture_gather) {
739 if (devinfo->gen >= 8) {
740 stage_prog_data->binding_table.gather_texture_start =
741 stage_prog_data->binding_table.texture_start;
742 } else {
743 stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
744 next_binding_table_offset += num_textures;
745 }
746 } else {
747 stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
748 }
749
750 if (prog->info.num_abos) {
751 stage_prog_data->binding_table.abo_start = next_binding_table_offset;
752 next_binding_table_offset += prog->info.num_abos;
753 } else {
754 stage_prog_data->binding_table.abo_start = 0xd0d0d0d0;
755 }
756
757 if (prog->info.num_images) {
758 stage_prog_data->binding_table.image_start = next_binding_table_offset;
759 next_binding_table_offset += prog->info.num_images;
760 } else {
761 stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
762 }
763
764 /* This may or may not be used depending on how the compile goes. */
765 stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
766 next_binding_table_offset++;
767
768 /* Plane 0 is just the regular texture section */
769 stage_prog_data->binding_table.plane_start[0] = stage_prog_data->binding_table.texture_start;
770
771 stage_prog_data->binding_table.plane_start[1] = next_binding_table_offset;
772 next_binding_table_offset += num_textures;
773
774 stage_prog_data->binding_table.plane_start[2] = next_binding_table_offset;
775 next_binding_table_offset += num_textures;
776
777 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
778
779 assert(next_binding_table_offset <= BRW_MAX_SURFACES);
780 return next_binding_table_offset;
781 }