i965/miptree: Add real support for HiZ
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "program/prog_parameter.h"
35 #include "program/prog_print.h"
36 #include "program/prog_to_nir.h"
37 #include "program/program.h"
38 #include "program/programopt.h"
39 #include "tnl/tnl.h"
40 #include "util/ralloc.h"
41 #include "compiler/glsl/ir.h"
42 #include "compiler/glsl/glsl_to_nir.h"
43
44 #include "brw_program.h"
45 #include "brw_context.h"
46 #include "brw_shader.h"
47 #include "brw_nir.h"
48 #include "intel_batchbuffer.h"
49
50 static void
51 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
52 {
53 if (is_scalar) {
54 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
55 type_size_scalar_bytes);
56 nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
57 } else {
58 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
59 type_size_vec4_bytes);
60 nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
61 }
62 }
63
64 nir_shader *
65 brw_create_nir(struct brw_context *brw,
66 const struct gl_shader_program *shader_prog,
67 const struct gl_program *prog,
68 gl_shader_stage stage,
69 bool is_scalar)
70 {
71 struct gl_context *ctx = &brw->ctx;
72 const nir_shader_compiler_options *options =
73 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
74 bool progress;
75 nir_shader *nir;
76
77 /* First, lower the GLSL IR or Mesa IR to NIR */
78 if (shader_prog) {
79 nir = glsl_to_nir(shader_prog, stage, options);
80 nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
81 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
82 nir_shader_get_entrypoint(nir), true, false);
83 } else {
84 nir = prog_to_nir(prog, options);
85 NIR_PASS_V(nir, nir_convert_to_ssa); /* turn registers into SSA */
86 }
87 nir_validate_shader(nir);
88
89 (void)progress;
90
91 nir = brw_preprocess_nir(brw->intelScreen->compiler, nir);
92
93 if (stage == MESA_SHADER_FRAGMENT) {
94 static const struct nir_lower_wpos_ytransform_options wpos_options = {
95 .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
96 .fs_coord_pixel_center_integer = 1,
97 .fs_coord_origin_upper_left = 1,
98 };
99 _mesa_add_state_reference(prog->Parameters,
100 (gl_state_index *) wpos_options.state_tokens);
101
102 NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
103 }
104
105 NIR_PASS(progress, nir, nir_lower_system_values);
106 NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
107
108 if (shader_prog) {
109 NIR_PASS_V(nir, nir_lower_samplers, shader_prog);
110 NIR_PASS_V(nir, nir_lower_atomics, shader_prog);
111 }
112
113 return nir;
114 }
115
116 static unsigned
117 get_new_program_id(struct intel_screen *screen)
118 {
119 static pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
120 pthread_mutex_lock(&m);
121 unsigned id = screen->program_id++;
122 pthread_mutex_unlock(&m);
123 return id;
124 }
125
126 static struct gl_program *brwNewProgram( struct gl_context *ctx,
127 GLenum target,
128 GLuint id )
129 {
130 struct brw_context *brw = brw_context(ctx);
131
132 switch (target) {
133 case GL_VERTEX_PROGRAM_ARB: {
134 struct brw_vertex_program *prog = CALLOC_STRUCT(brw_vertex_program);
135 if (prog) {
136 prog->id = get_new_program_id(brw->intelScreen);
137
138 return _mesa_init_gl_program(&prog->program.Base, target, id);
139 }
140 else
141 return NULL;
142 }
143
144 case GL_FRAGMENT_PROGRAM_ARB: {
145 struct brw_fragment_program *prog = CALLOC_STRUCT(brw_fragment_program);
146 if (prog) {
147 prog->id = get_new_program_id(brw->intelScreen);
148
149 return _mesa_init_gl_program(&prog->program.Base, target, id);
150 }
151 else
152 return NULL;
153 }
154
155 case GL_GEOMETRY_PROGRAM_NV: {
156 struct brw_geometry_program *prog = CALLOC_STRUCT(brw_geometry_program);
157 if (prog) {
158 prog->id = get_new_program_id(brw->intelScreen);
159
160 return _mesa_init_gl_program(&prog->program.Base, target, id);
161 } else {
162 return NULL;
163 }
164 }
165
166 case GL_TESS_CONTROL_PROGRAM_NV: {
167 struct brw_tess_ctrl_program *prog = CALLOC_STRUCT(brw_tess_ctrl_program);
168 if (prog) {
169 prog->id = get_new_program_id(brw->intelScreen);
170
171 return _mesa_init_gl_program(&prog->program.Base, target, id);
172 } else {
173 return NULL;
174 }
175 }
176
177 case GL_TESS_EVALUATION_PROGRAM_NV: {
178 struct brw_tess_eval_program *prog = CALLOC_STRUCT(brw_tess_eval_program);
179 if (prog) {
180 prog->id = get_new_program_id(brw->intelScreen);
181
182 return _mesa_init_gl_program(&prog->program.Base, target, id);
183 } else {
184 return NULL;
185 }
186 }
187
188 case GL_COMPUTE_PROGRAM_NV: {
189 struct brw_compute_program *prog = CALLOC_STRUCT(brw_compute_program);
190 if (prog) {
191 prog->id = get_new_program_id(brw->intelScreen);
192
193 return _mesa_init_gl_program(&prog->program.Base, target, id);
194 } else {
195 return NULL;
196 }
197 }
198
199 default:
200 unreachable("Unsupported target in brwNewProgram()");
201 }
202 }
203
204 static void brwDeleteProgram( struct gl_context *ctx,
205 struct gl_program *prog )
206 {
207 _mesa_delete_program( ctx, prog );
208 }
209
210
211 static GLboolean
212 brwProgramStringNotify(struct gl_context *ctx,
213 GLenum target,
214 struct gl_program *prog)
215 {
216 struct brw_context *brw = brw_context(ctx);
217 const struct brw_compiler *compiler = brw->intelScreen->compiler;
218
219 switch (target) {
220 case GL_FRAGMENT_PROGRAM_ARB: {
221 struct gl_fragment_program *fprog = (struct gl_fragment_program *) prog;
222 struct brw_fragment_program *newFP = brw_fragment_program(fprog);
223 const struct brw_fragment_program *curFP =
224 brw_fragment_program_const(brw->fragment_program);
225
226 if (newFP == curFP)
227 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
228 newFP->id = get_new_program_id(brw->intelScreen);
229
230 brw_add_texrect_params(prog);
231
232 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
233
234 brw_fs_precompile(ctx, NULL, prog);
235 break;
236 }
237 case GL_VERTEX_PROGRAM_ARB: {
238 struct gl_vertex_program *vprog = (struct gl_vertex_program *) prog;
239 struct brw_vertex_program *newVP = brw_vertex_program(vprog);
240 const struct brw_vertex_program *curVP =
241 brw_vertex_program_const(brw->vertex_program);
242
243 if (newVP == curVP)
244 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
245 if (newVP->program.IsPositionInvariant) {
246 _mesa_insert_mvp_code(ctx, &newVP->program);
247 }
248 newVP->id = get_new_program_id(brw->intelScreen);
249
250 /* Also tell tnl about it:
251 */
252 _tnl_program_string(ctx, target, prog);
253
254 brw_add_texrect_params(prog);
255
256 prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
257 compiler->scalar_stage[MESA_SHADER_VERTEX]);
258
259 brw_vs_precompile(ctx, NULL, prog);
260 break;
261 }
262 default:
263 /*
264 * driver->ProgramStringNotify is only called for ARB programs, fixed
265 * function vertex programs, and ir_to_mesa (which isn't used by the
266 * i965 back-end). Therefore, even after geometry shaders are added,
267 * this function should only ever be called with a target of
268 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
269 */
270 unreachable("Unexpected target in brwProgramStringNotify");
271 }
272
273 return true;
274 }
275
276 static void
277 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
278 {
279 struct brw_context *brw = brw_context(ctx);
280 unsigned bits = (PIPE_CONTROL_DATA_CACHE_FLUSH |
281 PIPE_CONTROL_NO_WRITE |
282 PIPE_CONTROL_CS_STALL);
283 assert(brw->gen >= 7 && brw->gen <= 9);
284
285 if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
286 GL_ELEMENT_ARRAY_BARRIER_BIT |
287 GL_COMMAND_BARRIER_BIT))
288 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
289
290 if (barriers & GL_UNIFORM_BARRIER_BIT)
291 bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
292 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
293
294 if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
295 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
296
297 if (barriers & GL_TEXTURE_UPDATE_BARRIER_BIT)
298 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
299
300 if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
301 bits |= (PIPE_CONTROL_DEPTH_CACHE_FLUSH |
302 PIPE_CONTROL_RENDER_TARGET_FLUSH);
303
304 /* Typed surface messages are handled by the render cache on IVB, so we
305 * need to flush it too.
306 */
307 if (brw->gen == 7 && !brw->is_haswell)
308 bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
309
310 brw_emit_pipe_control_flush(brw, bits);
311 }
312
313 void
314 brw_add_texrect_params(struct gl_program *prog)
315 {
316 for (int texunit = 0; texunit < BRW_MAX_TEX_UNIT; texunit++) {
317 if (!(prog->TexturesUsed[texunit] & (1 << TEXTURE_RECT_INDEX)))
318 continue;
319
320 int tokens[STATE_LENGTH] = {
321 STATE_INTERNAL,
322 STATE_TEXRECT_SCALE,
323 texunit,
324 0,
325 0
326 };
327
328 _mesa_add_state_reference(prog->Parameters, (gl_state_index *)tokens);
329 }
330 }
331
332 void
333 brw_get_scratch_bo(struct brw_context *brw,
334 drm_intel_bo **scratch_bo, int size)
335 {
336 drm_intel_bo *old_bo = *scratch_bo;
337
338 if (old_bo && old_bo->size < size) {
339 drm_intel_bo_unreference(old_bo);
340 old_bo = NULL;
341 }
342
343 if (!old_bo) {
344 *scratch_bo = drm_intel_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
345 }
346 }
347
348 /**
349 * Reserve enough scratch space for the given stage to hold \p per_thread_size
350 * bytes times the given \p thread_count.
351 */
352 void
353 brw_alloc_stage_scratch(struct brw_context *brw,
354 struct brw_stage_state *stage_state,
355 unsigned per_thread_size,
356 unsigned thread_count)
357 {
358 if (stage_state->per_thread_scratch < per_thread_size) {
359 stage_state->per_thread_scratch = per_thread_size;
360
361 if (stage_state->scratch_bo)
362 drm_intel_bo_unreference(stage_state->scratch_bo);
363
364 stage_state->scratch_bo =
365 drm_intel_bo_alloc(brw->bufmgr, "shader scratch space",
366 per_thread_size * thread_count, 4096);
367 }
368 }
369
370 void brwInitFragProgFuncs( struct dd_function_table *functions )
371 {
372 assert(functions->ProgramStringNotify == _tnl_program_string);
373
374 functions->NewProgram = brwNewProgram;
375 functions->DeleteProgram = brwDeleteProgram;
376 functions->ProgramStringNotify = brwProgramStringNotify;
377
378 functions->NewShader = brw_new_shader;
379 functions->LinkShader = brw_link_shader;
380
381 functions->MemoryBarrier = brw_memory_barrier;
382 }
383
384 struct shader_times {
385 uint64_t time;
386 uint64_t written;
387 uint64_t reset;
388 };
389
390 void
391 brw_init_shader_time(struct brw_context *brw)
392 {
393 const int max_entries = 2048;
394 brw->shader_time.bo =
395 drm_intel_bo_alloc(brw->bufmgr, "shader time",
396 max_entries * SHADER_TIME_STRIDE * 3, 4096);
397 brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
398 brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
399 brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
400 max_entries);
401 brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
402 max_entries);
403 brw->shader_time.max_entries = max_entries;
404 }
405
406 static int
407 compare_time(const void *a, const void *b)
408 {
409 uint64_t * const *a_val = a;
410 uint64_t * const *b_val = b;
411
412 /* We don't just subtract because we're turning the value to an int. */
413 if (**a_val < **b_val)
414 return -1;
415 else if (**a_val == **b_val)
416 return 0;
417 else
418 return 1;
419 }
420
421 static void
422 print_shader_time_line(const char *stage, const char *name,
423 int shader_num, uint64_t time, uint64_t total)
424 {
425 fprintf(stderr, "%-6s%-18s", stage, name);
426
427 if (shader_num != 0)
428 fprintf(stderr, "%4d: ", shader_num);
429 else
430 fprintf(stderr, " : ");
431
432 fprintf(stderr, "%16lld (%7.2f Gcycles) %4.1f%%\n",
433 (long long)time,
434 (double)time / 1000000000.0,
435 (double)time / total * 100.0);
436 }
437
438 static void
439 brw_report_shader_time(struct brw_context *brw)
440 {
441 if (!brw->shader_time.bo || !brw->shader_time.num_entries)
442 return;
443
444 uint64_t scaled[brw->shader_time.num_entries];
445 uint64_t *sorted[brw->shader_time.num_entries];
446 uint64_t total_by_type[ST_CS + 1];
447 memset(total_by_type, 0, sizeof(total_by_type));
448 double total = 0;
449 for (int i = 0; i < brw->shader_time.num_entries; i++) {
450 uint64_t written = 0, reset = 0;
451 enum shader_time_shader_type type = brw->shader_time.types[i];
452
453 sorted[i] = &scaled[i];
454
455 switch (type) {
456 case ST_VS:
457 case ST_TCS:
458 case ST_TES:
459 case ST_GS:
460 case ST_FS8:
461 case ST_FS16:
462 case ST_CS:
463 written = brw->shader_time.cumulative[i].written;
464 reset = brw->shader_time.cumulative[i].reset;
465 break;
466
467 default:
468 /* I sometimes want to print things that aren't the 3 shader times.
469 * Just print the sum in that case.
470 */
471 written = 1;
472 reset = 0;
473 break;
474 }
475
476 uint64_t time = brw->shader_time.cumulative[i].time;
477 if (written) {
478 scaled[i] = time / written * (written + reset);
479 } else {
480 scaled[i] = time;
481 }
482
483 switch (type) {
484 case ST_VS:
485 case ST_TCS:
486 case ST_TES:
487 case ST_GS:
488 case ST_FS8:
489 case ST_FS16:
490 case ST_CS:
491 total_by_type[type] += scaled[i];
492 break;
493 default:
494 break;
495 }
496
497 total += scaled[i];
498 }
499
500 if (total == 0) {
501 fprintf(stderr, "No shader time collected yet\n");
502 return;
503 }
504
505 qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
506
507 fprintf(stderr, "\n");
508 fprintf(stderr, "type ID cycles spent %% of total\n");
509 for (int s = 0; s < brw->shader_time.num_entries; s++) {
510 const char *stage;
511 /* Work back from the sorted pointers times to a time to print. */
512 int i = sorted[s] - scaled;
513
514 if (scaled[i] == 0)
515 continue;
516
517 int shader_num = brw->shader_time.ids[i];
518 const char *shader_name = brw->shader_time.names[i];
519
520 switch (brw->shader_time.types[i]) {
521 case ST_VS:
522 stage = "vs";
523 break;
524 case ST_TCS:
525 stage = "tcs";
526 break;
527 case ST_TES:
528 stage = "tes";
529 break;
530 case ST_GS:
531 stage = "gs";
532 break;
533 case ST_FS8:
534 stage = "fs8";
535 break;
536 case ST_FS16:
537 stage = "fs16";
538 break;
539 case ST_CS:
540 stage = "cs";
541 break;
542 default:
543 stage = "other";
544 break;
545 }
546
547 print_shader_time_line(stage, shader_name, shader_num,
548 scaled[i], total);
549 }
550
551 fprintf(stderr, "\n");
552 print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
553 print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
554 print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
555 print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
556 print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
557 print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
558 print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
559 }
560
561 static void
562 brw_collect_shader_time(struct brw_context *brw)
563 {
564 if (!brw->shader_time.bo)
565 return;
566
567 /* This probably stalls on the last rendering. We could fix that by
568 * delaying reading the reports, but it doesn't look like it's a big
569 * overhead compared to the cost of tracking the time in the first place.
570 */
571 drm_intel_bo_map(brw->shader_time.bo, true);
572 void *bo_map = brw->shader_time.bo->virtual;
573
574 for (int i = 0; i < brw->shader_time.num_entries; i++) {
575 uint32_t *times = bo_map + i * 3 * SHADER_TIME_STRIDE;
576
577 brw->shader_time.cumulative[i].time += times[SHADER_TIME_STRIDE * 0 / 4];
578 brw->shader_time.cumulative[i].written += times[SHADER_TIME_STRIDE * 1 / 4];
579 brw->shader_time.cumulative[i].reset += times[SHADER_TIME_STRIDE * 2 / 4];
580 }
581
582 /* Zero the BO out to clear it out for our next collection.
583 */
584 memset(bo_map, 0, brw->shader_time.bo->size);
585 drm_intel_bo_unmap(brw->shader_time.bo);
586 }
587
588 void
589 brw_collect_and_report_shader_time(struct brw_context *brw)
590 {
591 brw_collect_shader_time(brw);
592
593 if (brw->shader_time.report_time == 0 ||
594 get_time() - brw->shader_time.report_time >= 1.0) {
595 brw_report_shader_time(brw);
596 brw->shader_time.report_time = get_time();
597 }
598 }
599
600 /**
601 * Chooses an index in the shader_time buffer and sets up tracking information
602 * for our printouts.
603 *
604 * Note that this holds on to references to the underlying programs, which may
605 * change their lifetimes compared to normal operation.
606 */
607 int
608 brw_get_shader_time_index(struct brw_context *brw,
609 struct gl_shader_program *shader_prog,
610 struct gl_program *prog,
611 enum shader_time_shader_type type)
612 {
613 int shader_time_index = brw->shader_time.num_entries++;
614 assert(shader_time_index < brw->shader_time.max_entries);
615 brw->shader_time.types[shader_time_index] = type;
616
617 int id = shader_prog ? shader_prog->Name : prog->Id;
618 const char *name;
619 if (id == 0) {
620 name = "ff";
621 } else if (!shader_prog) {
622 name = "prog";
623 } else if (shader_prog->Label) {
624 name = ralloc_strdup(brw->shader_time.names, shader_prog->Label);
625 } else {
626 name = "glsl";
627 }
628
629 brw->shader_time.names[shader_time_index] = name;
630 brw->shader_time.ids[shader_time_index] = id;
631
632 return shader_time_index;
633 }
634
635 void
636 brw_destroy_shader_time(struct brw_context *brw)
637 {
638 drm_intel_bo_unreference(brw->shader_time.bo);
639 brw->shader_time.bo = NULL;
640 }
641
642 void
643 brw_stage_prog_data_free(const void *p)
644 {
645 struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
646
647 ralloc_free(prog_data->param);
648 ralloc_free(prog_data->pull_param);
649 ralloc_free(prog_data->image_param);
650 }
651
652 void
653 brw_dump_ir(const char *stage, struct gl_shader_program *shader_prog,
654 struct gl_linked_shader *shader, struct gl_program *prog)
655 {
656 if (shader_prog) {
657 if (shader->ir) {
658 fprintf(stderr,
659 "GLSL IR for native %s shader %d:\n",
660 stage, shader_prog->Name);
661 _mesa_print_ir(stderr, shader->ir, NULL);
662 fprintf(stderr, "\n\n");
663 }
664 } else {
665 fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
666 stage, prog->Id, stage);
667 _mesa_print_program(prog);
668 }
669 }
670
671 void
672 brw_setup_tex_for_precompile(struct brw_context *brw,
673 struct brw_sampler_prog_key_data *tex,
674 struct gl_program *prog)
675 {
676 const bool has_shader_channel_select = brw->is_haswell || brw->gen >= 8;
677 unsigned sampler_count = util_last_bit(prog->SamplersUsed);
678 for (unsigned i = 0; i < sampler_count; i++) {
679 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
680 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
681 tex->swizzles[i] =
682 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
683 } else {
684 /* Color sampler: assume no swizzling. */
685 tex->swizzles[i] = SWIZZLE_XYZW;
686 }
687 }
688 }