intel: s/brw_device_info/gen_device_info/
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "program/prog_parameter.h"
35 #include "program/prog_print.h"
36 #include "program/prog_to_nir.h"
37 #include "program/program.h"
38 #include "program/programopt.h"
39 #include "tnl/tnl.h"
40 #include "util/ralloc.h"
41 #include "compiler/glsl/ir.h"
42 #include "compiler/glsl/glsl_to_nir.h"
43
44 #include "brw_program.h"
45 #include "brw_context.h"
46 #include "brw_shader.h"
47 #include "brw_nir.h"
48 #include "intel_batchbuffer.h"
49
50 static void
51 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
52 {
53 if (is_scalar) {
54 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
55 type_size_scalar_bytes);
56 nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
57 } else {
58 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
59 type_size_vec4_bytes);
60 nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
61 }
62 }
63
64 nir_shader *
65 brw_create_nir(struct brw_context *brw,
66 const struct gl_shader_program *shader_prog,
67 const struct gl_program *prog,
68 gl_shader_stage stage,
69 bool is_scalar)
70 {
71 struct gl_context *ctx = &brw->ctx;
72 const nir_shader_compiler_options *options =
73 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
74 bool progress;
75 nir_shader *nir;
76
77 /* First, lower the GLSL IR or Mesa IR to NIR */
78 if (shader_prog) {
79 nir = glsl_to_nir(shader_prog, stage, options);
80 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
81 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
82 nir_shader_get_entrypoint(nir), true, false);
83 } else {
84 nir = prog_to_nir(prog, options);
85 NIR_PASS_V(nir, nir_convert_to_ssa); /* turn registers into SSA */
86 }
87 nir_validate_shader(nir);
88
89 (void)progress;
90
91 nir = brw_preprocess_nir(brw->intelScreen->compiler, nir);
92
93 if (stage == MESA_SHADER_FRAGMENT) {
94 static const struct nir_lower_wpos_ytransform_options wpos_options = {
95 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
96 .fs_coord_pixel_center_integer = 1,
97 .fs_coord_origin_upper_left = 1,
98 };
99 _mesa_add_state_reference(prog->Parameters,
100 (gl_state_index *) wpos_options.state_tokens);
101
102 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
103 }
104
105 NIR_PASS(progress, nir, nir_lower_system_values);
106 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
107
108 if (shader_prog) {
109 NIR_PASS_V(nir, nir_lower_samplers, shader_prog);
110 NIR_PASS_V(nir, nir_lower_atomics, shader_prog);
111 }
112
113 return nir;
114 }
115
116 static unsigned
117 get_new_program_id(struct intel_screen *screen)
118 {
119 static pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
120 pthread_mutex_lock(&m);
121 unsigned id = screen->program_id++;
122 pthread_mutex_unlock(&m);
123 return id;
124 }
125
126 static struct gl_program *brwNewProgram( struct gl_context *ctx,
127 GLenum target,
128 GLuint id )
129 {
130 struct brw_context *brw = brw_context(ctx);
131
132 switch (target) {
133 case GL_VERTEX_PROGRAM_ARB: {
134 struct brw_vertex_program *prog = CALLOC_STRUCT(brw_vertex_program);
135 if (prog) {
136 prog->id = get_new_program_id(brw->intelScreen);
137
138 return _mesa_init_gl_program(&prog->program.Base, target, id);
139 }
140 else
141 return NULL;
142 }
143
144 case GL_FRAGMENT_PROGRAM_ARB: {
145 struct brw_fragment_program *prog = CALLOC_STRUCT(brw_fragment_program);
146 if (prog) {
147 prog->id = get_new_program_id(brw->intelScreen);
148
149 return _mesa_init_gl_program(&prog->program.Base, target, id);
150 }
151 else
152 return NULL;
153 }
154
155 case GL_GEOMETRY_PROGRAM_NV: {
156 struct brw_geometry_program *prog = CALLOC_STRUCT(brw_geometry_program);
157 if (prog) {
158 prog->id = get_new_program_id(brw->intelScreen);
159
160 return _mesa_init_gl_program(&prog->program.Base, target, id);
161 } else {
162 return NULL;
163 }
164 }
165
166 case GL_TESS_CONTROL_PROGRAM_NV: {
167 struct brw_tess_ctrl_program *prog = CALLOC_STRUCT(brw_tess_ctrl_program);
168 if (prog) {
169 prog->id = get_new_program_id(brw->intelScreen);
170
171 return _mesa_init_gl_program(&prog->program.Base, target, id);
172 } else {
173 return NULL;
174 }
175 }
176
177 case GL_TESS_EVALUATION_PROGRAM_NV: {
178 struct brw_tess_eval_program *prog = CALLOC_STRUCT(brw_tess_eval_program);
179 if (prog) {
180 prog->id = get_new_program_id(brw->intelScreen);
181
182 return _mesa_init_gl_program(&prog->program.Base, target, id);
183 } else {
184 return NULL;
185 }
186 }
187
188 case GL_COMPUTE_PROGRAM_NV: {
189 struct brw_compute_program *prog = CALLOC_STRUCT(brw_compute_program);
190 if (prog) {
191 prog->id = get_new_program_id(brw->intelScreen);
192
193 return _mesa_init_gl_program(&prog->program.Base, target, id);
194 } else {
195 return NULL;
196 }
197 }
198
199 default:
200 unreachable("Unsupported target in brwNewProgram()");
201 }
202 }
203
204 static void brwDeleteProgram( struct gl_context *ctx,
205 struct gl_program *prog )
206 {
207 _mesa_delete_program( ctx, prog );
208 }
209
210
211 static GLboolean
212 brwProgramStringNotify(struct gl_context *ctx,
213 GLenum target,
214 struct gl_program *prog)
215 {
216 struct brw_context *brw = brw_context(ctx);
217 const struct brw_compiler *compiler = brw->intelScreen->compiler;
218
219 switch (target) {
220 case GL_FRAGMENT_PROGRAM_ARB: {
221 struct gl_fragment_program *fprog = (struct gl_fragment_program *) prog;
222 struct brw_fragment_program *newFP = brw_fragment_program(fprog);
223 const struct brw_fragment_program *curFP =
224 brw_fragment_program_const(brw->fragment_program);
225
226 if (newFP == curFP)
227 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
228 newFP->id = get_new_program_id(brw->intelScreen);
229
230 brw_add_texrect_params(prog);
231
232 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
233
234 brw_fs_precompile(ctx, NULL, prog);
235 break;
236 }
237 case GL_VERTEX_PROGRAM_ARB: {
238 struct gl_vertex_program *vprog = (struct gl_vertex_program *) prog;
239 struct brw_vertex_program *newVP = brw_vertex_program(vprog);
240 const struct brw_vertex_program *curVP =
241 brw_vertex_program_const(brw->vertex_program);
242
243 if (newVP == curVP)
244 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
245 if (newVP->program.IsPositionInvariant) {
246 _mesa_insert_mvp_code(ctx, &newVP->program);
247 }
248 newVP->id = get_new_program_id(brw->intelScreen);
249
250 /* Also tell tnl about it:
251 */
252 _tnl_program_string(ctx, target, prog);
253
254 brw_add_texrect_params(prog);
255
256 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
257 compiler->scalar_stage[MESA_SHADER_VERTEX]);
258
259 brw_vs_precompile(ctx, NULL, prog);
260 break;
261 }
262 default:
263 /*
264 * driver->ProgramStringNotify is only called for ARB programs, fixed
265 * function vertex programs, and ir_to_mesa (which isn't used by the
266 * i965 back-end). Therefore, even after geometry shaders are added,
267 * this function should only ever be called with a target of
268 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
269 */
270 unreachable("Unexpected target in brwProgramStringNotify");
271 }
272
273 return true;
274 }
275
276 static void
277 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
278 {
279 struct brw_context *brw = brw_context(ctx);
280 unsigned bits = (PIPE_CONTROL_DATA_CACHE_FLUSH |
281 PIPE_CONTROL_NO_WRITE |
282 PIPE_CONTROL_CS_STALL);
283 assert(brw->gen >= 7 && brw->gen <= 9);
284
285 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
286 GL_ELEMENT_ARRAY_BARRIER_BIT |
287 GL_COMMAND_BARRIER_BIT))
288 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
289
290 if (barriers & GL_UNIFORM_BARRIER_BIT)
291 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
292 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
293
294 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
295 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
296
297 if (barriers & GL_TEXTURE_UPDATE_BARRIER_BIT)
298 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
299
300 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
301 bits |= (PIPE_CONTROL_DEPTH_CACHE_FLUSH |
302 PIPE_CONTROL_RENDER_TARGET_FLUSH);
303
304 /* Typed surface messages are handled by the render cache on IVB, so we
305 * need to flush it too.
306 */
307 if (brw->gen == 7 && !brw->is_haswell)
308 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
309
310 brw_emit_pipe_control_flush(brw, bits);
311 }
312
313 static void
314 brw_blend_barrier(struct gl_context *ctx)
315 {
316 struct brw_context *brw = brw_context(ctx);
317
318 if (!ctx->Extensions.MESA_shader_framebuffer_fetch) {
319 if (brw->gen >= 6) {
320 brw_emit_pipe_control_flush(brw,
321 PIPE_CONTROL_RENDER_TARGET_FLUSH |
322 PIPE_CONTROL_CS_STALL);
323 brw_emit_pipe_control_flush(brw,
324 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
325 } else {
326 brw_emit_pipe_control_flush(brw,
327 PIPE_CONTROL_RENDER_TARGET_FLUSH);
328 }
329 }
330 }
331
332 void
333 brw_add_texrect_params(struct gl_program *prog)
334 {
335 for (int texunit = 0; texunit < BRW_MAX_TEX_UNIT; texunit++) {
336 if (!(prog->TexturesUsed[texunit] & (1 << TEXTURE_RECT_INDEX)))
337 continue;
338
339 int tokens[STATE_LENGTH] = {
340 STATE_INTERNAL,
341 STATE_TEXRECT_SCALE,
342 texunit,
343 0,
344 0
345 };
346
347 _mesa_add_state_reference(prog->Parameters, (gl_state_index *)tokens);
348 }
349 }
350
351 void
352 brw_get_scratch_bo(struct brw_context *brw,
353 drm_intel_bo **scratch_bo, int size)
354 {
355 drm_intel_bo *old_bo = *scratch_bo;
356
357 if (old_bo && old_bo->size < size) {
358 drm_intel_bo_unreference(old_bo);
359 old_bo = NULL;
360 }
361
362 if (!old_bo) {
363 *scratch_bo = drm_intel_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
364 }
365 }
366
367 /**
368 * Reserve enough scratch space for the given stage to hold \p per_thread_size
369 * bytes times the given \p thread_count.
370 */
371 void
372 brw_alloc_stage_scratch(struct brw_context *brw,
373 struct brw_stage_state *stage_state,
374 unsigned per_thread_size,
375 unsigned thread_count)
376 {
377 if (stage_state->per_thread_scratch < per_thread_size) {
378 stage_state->per_thread_scratch = per_thread_size;
379
380 if (stage_state->scratch_bo)
381 drm_intel_bo_unreference(stage_state->scratch_bo);
382
383 stage_state->scratch_bo =
384 drm_intel_bo_alloc(brw->bufmgr, "shader scratch space",
385 per_thread_size * thread_count, 4096);
386 }
387 }
388
389 void brwInitFragProgFuncs( struct dd_function_table *functions )
390 {
391 assert(functions->ProgramStringNotify == _tnl_program_string);
392
393 functions->NewProgram = brwNewProgram;
394 functions->DeleteProgram = brwDeleteProgram;
395 functions->ProgramStringNotify = brwProgramStringNotify;
396
397 functions->NewShader = brw_new_shader;
398 functions->LinkShader = brw_link_shader;
399
400 functions->MemoryBarrier = brw_memory_barrier;
401 functions->BlendBarrier = brw_blend_barrier;
402 }
403
404 struct shader_times {
405 uint64_t time;
406 uint64_t written;
407 uint64_t reset;
408 };
409
410 void
411 brw_init_shader_time(struct brw_context *brw)
412 {
413 const int max_entries = 2048;
414 brw->shader_time.bo =
415 drm_intel_bo_alloc(brw->bufmgr, "shader time",
416 max_entries * SHADER_TIME_STRIDE * 3, 4096);
417 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
418 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
419 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
420 max_entries);
421 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
422 max_entries);
423 brw->shader_time.max_entries = max_entries;
424 }
425
426 static int
427 compare_time(const void *a, const void *b)
428 {
429 uint64_t * const *a_val = a;
430 uint64_t * const *b_val = b;
431
432 /* We don't just subtract because we're turning the value to an int. */
433 if (**a_val < **b_val)
434 return -1;
435 else if (**a_val == **b_val)
436 return 0;
437 else
438 return 1;
439 }
440
441 static void
442 print_shader_time_line(const char *stage, const char *name,
443 int shader_num, uint64_t time, uint64_t total)
444 {
445 fprintf(stderr, "%-6s%-18s", stage, name);
446
447 if (shader_num != 0)
448 fprintf(stderr, "%4d: ", shader_num);
449 else
450 fprintf(stderr, " : ");
451
452 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
453 (long long)time,
454 (double)time / 1000000000.0,
455 (double)time / total * 100.0);
456 }
457
458 static void
459 brw_report_shader_time(struct brw_context *brw)
460 {
461 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
462 return;
463
464 uint64_t scaled[brw->shader_time.num_entries];
465 uint64_t *sorted[brw->shader_time.num_entries];
466 uint64_t total_by_type[ST_CS + 1];
467 memset(total_by_type, 0, sizeof(total_by_type));
468 double total = 0;
469 for (int i = 0; i < brw->shader_time.num_entries; i++) {
470 uint64_t written = 0, reset = 0;
471 enum shader_time_shader_type type = brw->shader_time.types[i];
472
473 sorted[i] = &scaled[i];
474
475 switch (type) {
476 case ST_VS:
477 case ST_TCS:
478 case ST_TES:
479 case ST_GS:
480 case ST_FS8:
481 case ST_FS16:
482 case ST_CS:
483 written = brw->shader_time.cumulative[i].written;
484 reset = brw->shader_time.cumulative[i].reset;
485 break;
486
487 default:
488 /* I sometimes want to print things that aren't the 3 shader times.
489 * Just print the sum in that case.
490 */
491 written = 1;
492 reset = 0;
493 break;
494 }
495
496 uint64_t time = brw->shader_time.cumulative[i].time;
497 if (written) {
498 scaled[i] = time / written * (written + reset);
499 } else {
500 scaled[i] = time;
501 }
502
503 switch (type) {
504 case ST_VS:
505 case ST_TCS:
506 case ST_TES:
507 case ST_GS:
508 case ST_FS8:
509 case ST_FS16:
510 case ST_CS:
511 total_by_type[type] += scaled[i];
512 break;
513 default:
514 break;
515 }
516
517 total += scaled[i];
518 }
519
520 if (total == 0) {
521 fprintf(stderr, "No shader time collected yet\n");
522 return;
523 }
524
525 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
526
527 fprintf(stderr, "\n");
528 fprintf(stderr, "type ID cycles spent %% of total\n");
529 for (int s = 0; s < brw->shader_time.num_entries; s++) {
530 const char *stage;
531 /* Work back from the sorted pointers times to a time to print. */
532 int i = sorted[s] - scaled;
533
534 if (scaled[i] == 0)
535 continue;
536
537 int shader_num = brw->shader_time.ids[i];
538 const char *shader_name = brw->shader_time.names[i];
539
540 switch (brw->shader_time.types[i]) {
541 case ST_VS:
542 stage = "vs";
543 break;
544 case ST_TCS:
545 stage = "tcs";
546 break;
547 case ST_TES:
548 stage = "tes";
549 break;
550 case ST_GS:
551 stage = "gs";
552 break;
553 case ST_FS8:
554 stage = "fs8";
555 break;
556 case ST_FS16:
557 stage = "fs16";
558 break;
559 case ST_CS:
560 stage = "cs";
561 break;
562 default:
563 stage = "other";
564 break;
565 }
566
567 print_shader_time_line(stage, shader_name, shader_num,
568 scaled[i], total);
569 }
570
571 fprintf(stderr, "\n");
572 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
573 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
574 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
575 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
576 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
577 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
578 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
579 }
580
581 static void
582 brw_collect_shader_time(struct brw_context *brw)
583 {
584 if (!brw->shader_time.bo)
585 return;
586
587 /* This probably stalls on the last rendering. We could fix that by
588 * delaying reading the reports, but it doesn't look like it's a big
589 * overhead compared to the cost of tracking the time in the first place.
590 */
591 drm_intel_bo_map(brw->shader_time.bo, true);
592 void *bo_map = brw->shader_time.bo->virtual;
593
594 for (int i = 0; i < brw->shader_time.num_entries; i++) {
595 uint32_t *times = bo_map + i * 3 * SHADER_TIME_STRIDE;
596
597 brw->shader_time.cumulative[i].time += times[SHADER_TIME_STRIDE * 0 / 4];
598 brw->shader_time.cumulative[i].written += times[SHADER_TIME_STRIDE * 1 / 4];
599 brw->shader_time.cumulative[i].reset += times[SHADER_TIME_STRIDE * 2 / 4];
600 }
601
602 /* Zero the BO out to clear it out for our next collection.
603 */
604 memset(bo_map, 0, brw->shader_time.bo->size);
605 drm_intel_bo_unmap(brw->shader_time.bo);
606 }
607
608 void
609 brw_collect_and_report_shader_time(struct brw_context *brw)
610 {
611 brw_collect_shader_time(brw);
612
613 if (brw->shader_time.report_time == 0 ||
614 get_time() - brw->shader_time.report_time >= 1.0) {
615 brw_report_shader_time(brw);
616 brw->shader_time.report_time = get_time();
617 }
618 }
619
620 /**
621 * Chooses an index in the shader_time buffer and sets up tracking information
622 * for our printouts.
623 *
624 * Note that this holds on to references to the underlying programs, which may
625 * change their lifetimes compared to normal operation.
626 */
627 int
628 brw_get_shader_time_index(struct brw_context *brw,
629 struct gl_shader_program *shader_prog,
630 struct gl_program *prog,
631 enum shader_time_shader_type type)
632 {
633 int shader_time_index = brw->shader_time.num_entries++;
634 assert(shader_time_index < brw->shader_time.max_entries);
635 brw->shader_time.types[shader_time_index] = type;
636
637 int id = shader_prog ? shader_prog->Name : prog->Id;
638 const char *name;
639 if (id == 0) {
640 name = "ff";
641 } else if (!shader_prog) {
642 name = "prog";
643 } else if (shader_prog->Label) {
644 name = ralloc_strdup(brw->shader_time.names, shader_prog->Label);
645 } else {
646 name = "glsl";
647 }
648
649 brw->shader_time.names[shader_time_index] = name;
650 brw->shader_time.ids[shader_time_index] = id;
651
652 return shader_time_index;
653 }
654
655 void
656 brw_destroy_shader_time(struct brw_context *brw)
657 {
658 drm_intel_bo_unreference(brw->shader_time.bo);
659 brw->shader_time.bo = NULL;
660 }
661
662 void
663 brw_stage_prog_data_free(const void *p)
664 {
665 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
666
667 ralloc_free(prog_data->param);
668 ralloc_free(prog_data->pull_param);
669 ralloc_free(prog_data->image_param);
670 }
671
672 void
673 brw_dump_ir(const char *stage, struct gl_shader_program *shader_prog,
674 struct gl_linked_shader *shader, struct gl_program *prog)
675 {
676 if (shader_prog) {
677 if (shader->ir) {
678 fprintf(stderr,
679 "GLSL IR for native %s shader %d:\n",
680 stage, shader_prog->Name);
681 _mesa_print_ir(stderr, shader->ir, NULL);
682 fprintf(stderr, "\n\n");
683 }
684 } else {
685 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
686 stage, prog->Id, stage);
687 _mesa_print_program(prog);
688 }
689 }
690
691 void
692 brw_setup_tex_for_precompile(struct brw_context *brw,
693 struct brw_sampler_prog_key_data *tex,
694 struct gl_program *prog)
695 {
696 const bool has_shader_channel_select = brw->is_haswell || brw->gen >= 8;
697 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
698 for (unsigned i = 0; i < sampler_count; i++) {
699 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
700 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
701 tex->swizzles[i] =
702 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
703 } else {
704 /* Color sampler: assume no swizzling. */
705 tex->swizzles[i] = SWIZZLE_XYZW;
706 }
707 }
708 }