2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "brw_context.h"
31 #include "glsl/ir_optimization.h"
32 #include "glsl/glsl_parser_extras.h"
33 #include "main/shaderapi.h"
36 shader_debug_log_mesa(void *data
, const char *fmt
, ...)
38 struct brw_context
*brw
= (struct brw_context
*)data
;
43 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
44 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
45 MESA_DEBUG_TYPE_OTHER
,
46 MESA_DEBUG_SEVERITY_NOTIFICATION
, fmt
, args
);
51 shader_perf_log_mesa(void *data
, const char *fmt
, ...)
53 struct brw_context
*brw
= (struct brw_context
*)data
;
58 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
60 va_copy(args_copy
, args
);
61 vfprintf(stderr
, fmt
, args_copy
);
65 if (brw
->perf_debug
) {
67 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
68 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
69 MESA_DEBUG_TYPE_PERFORMANCE
,
70 MESA_DEBUG_SEVERITY_MEDIUM
, fmt
, args
);
76 brw_compiler_create(void *mem_ctx
, const struct brw_device_info
*devinfo
)
78 struct brw_compiler
*compiler
= rzalloc(mem_ctx
, struct brw_compiler
);
80 compiler
->devinfo
= devinfo
;
81 compiler
->shader_debug_log
= shader_debug_log_mesa
;
82 compiler
->shader_perf_log
= shader_perf_log_mesa
;
84 brw_fs_alloc_reg_sets(compiler
);
85 brw_vec4_alloc_reg_set(compiler
);
87 if (devinfo
->gen
>= 8 && !(INTEL_DEBUG
& DEBUG_VEC4VS
))
88 compiler
->scalar_vs
= true;
90 nir_shader_compiler_options
*nir_options
=
91 rzalloc(compiler
, nir_shader_compiler_options
);
92 nir_options
->native_integers
= true;
93 /* In order to help allow for better CSE at the NIR level we tell NIR
94 * to split all ffma instructions during opt_algebraic and we then
95 * re-combine them as a later step.
97 nir_options
->lower_ffma
= true;
98 nir_options
->lower_sub
= true;
100 /* We want the GLSL compiler to emit code that uses condition codes */
101 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
102 compiler
->glsl_compiler_options
[i
].MaxUnrollIterations
= 32;
103 compiler
->glsl_compiler_options
[i
].MaxIfDepth
=
104 devinfo
->gen
< 6 ? 16 : UINT_MAX
;
106 compiler
->glsl_compiler_options
[i
].EmitCondCodes
= true;
107 compiler
->glsl_compiler_options
[i
].EmitNoNoise
= true;
108 compiler
->glsl_compiler_options
[i
].EmitNoMainReturn
= true;
109 compiler
->glsl_compiler_options
[i
].EmitNoIndirectInput
= true;
110 compiler
->glsl_compiler_options
[i
].EmitNoIndirectOutput
=
111 (i
== MESA_SHADER_FRAGMENT
);
112 compiler
->glsl_compiler_options
[i
].EmitNoIndirectTemp
=
113 (i
== MESA_SHADER_FRAGMENT
);
114 compiler
->glsl_compiler_options
[i
].EmitNoIndirectUniform
= false;
115 compiler
->glsl_compiler_options
[i
].LowerClipDistance
= true;
117 /* !ARB_gpu_shader5 */
118 if (devinfo
->gen
< 7)
119 compiler
->glsl_compiler_options
[i
].EmitNoIndirectSampler
= true;
122 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].OptimizeForAOS
= true;
123 compiler
->glsl_compiler_options
[MESA_SHADER_GEOMETRY
].OptimizeForAOS
= true;
125 if (compiler
->scalar_vs
|| brw_env_var_as_boolean("INTEL_USE_NIR", false)) {
126 if (compiler
->scalar_vs
) {
127 /* If we're using the scalar backend for vertex shaders, we need to
128 * configure these accordingly.
130 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].EmitNoIndirectOutput
= true;
131 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].EmitNoIndirectTemp
= true;
133 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].OptimizeForAOS
= false;
135 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].NirOptions
= nir_options
;
138 compiler
->glsl_compiler_options
[MESA_SHADER_FRAGMENT
].NirOptions
= nir_options
;
139 compiler
->glsl_compiler_options
[MESA_SHADER_COMPUTE
].NirOptions
= nir_options
;
145 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
147 struct brw_shader
*shader
;
149 shader
= rzalloc(NULL
, struct brw_shader
);
151 shader
->base
.Type
= type
;
152 shader
->base
.Stage
= _mesa_shader_enum_to_shader_stage(type
);
153 shader
->base
.Name
= name
;
154 _mesa_init_shader(ctx
, &shader
->base
);
157 return &shader
->base
;
161 * Performs a compile of the shader stages even when we don't know
162 * what non-orthogonal state will be set, in the hope that it reflects
163 * the eventual NOS used, and thus allows us to produce link failures.
166 brw_shader_precompile(struct gl_context
*ctx
,
167 struct gl_shader_program
*sh_prog
)
169 struct gl_shader
*vs
= sh_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
170 struct gl_shader
*gs
= sh_prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
171 struct gl_shader
*fs
= sh_prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
172 struct gl_shader
*cs
= sh_prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
174 if (fs
&& !brw_fs_precompile(ctx
, sh_prog
, fs
->Program
))
177 if (gs
&& !brw_gs_precompile(ctx
, sh_prog
, gs
->Program
))
180 if (vs
&& !brw_vs_precompile(ctx
, sh_prog
, vs
->Program
))
183 if (cs
&& !brw_cs_precompile(ctx
, sh_prog
, cs
->Program
))
190 is_scalar_shader_stage(struct brw_context
*brw
, int stage
)
193 case MESA_SHADER_FRAGMENT
:
195 case MESA_SHADER_VERTEX
:
196 return brw
->intelScreen
->compiler
->scalar_vs
;
203 brw_lower_packing_builtins(struct brw_context
*brw
,
204 gl_shader_stage shader_type
,
207 int ops
= LOWER_PACK_SNORM_2x16
208 | LOWER_UNPACK_SNORM_2x16
209 | LOWER_PACK_UNORM_2x16
210 | LOWER_UNPACK_UNORM_2x16
;
212 if (is_scalar_shader_stage(brw
, shader_type
)) {
213 ops
|= LOWER_UNPACK_UNORM_4x8
214 | LOWER_UNPACK_SNORM_4x8
215 | LOWER_PACK_UNORM_4x8
216 | LOWER_PACK_SNORM_4x8
;
220 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
221 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
222 * lowering is needed. For SOA code, the Half2x16 ops must be
225 if (is_scalar_shader_stage(brw
, shader_type
)) {
226 ops
|= LOWER_PACK_HALF_2x16_TO_SPLIT
227 | LOWER_UNPACK_HALF_2x16_TO_SPLIT
;
230 ops
|= LOWER_PACK_HALF_2x16
231 | LOWER_UNPACK_HALF_2x16
;
234 lower_packing_builtins(ir
, ops
);
238 process_glsl_ir(gl_shader_stage stage
,
239 struct brw_context
*brw
,
240 struct gl_shader_program
*shader_prog
,
241 struct gl_shader
*shader
)
243 struct gl_context
*ctx
= &brw
->ctx
;
244 const struct gl_shader_compiler_options
*options
=
245 &ctx
->Const
.ShaderCompilerOptions
[shader
->Stage
];
247 /* Temporary memory context for any new IR. */
248 void *mem_ctx
= ralloc_context(NULL
);
250 ralloc_adopt(mem_ctx
, shader
->ir
);
252 /* lower_packing_builtins() inserts arithmetic instructions, so it
253 * must precede lower_instructions().
255 brw_lower_packing_builtins(brw
, shader
->Stage
, shader
->ir
);
256 do_mat_op_to_vec(shader
->ir
);
257 const int bitfield_insert
= brw
->gen
>= 7 ? BITFIELD_INSERT_TO_BFM_BFI
: 0;
258 lower_instructions(shader
->ir
,
269 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
270 * if-statements need to be flattened.
273 lower_if_to_cond_assign(shader
->ir
, 16);
275 do_lower_texture_projection(shader
->ir
);
276 brw_lower_texture_gradients(brw
, shader
->ir
);
277 do_vec_index_to_cond_assign(shader
->ir
);
278 lower_vector_insert(shader
->ir
, true);
279 if (options
->NirOptions
== NULL
)
280 brw_do_cubemap_normalize(shader
->ir
);
281 lower_offset_arrays(shader
->ir
);
282 brw_do_lower_unnormalized_offset(shader
->ir
);
283 lower_noise(shader
->ir
);
284 lower_quadop_vector(shader
->ir
, false);
286 bool lowered_variable_indexing
=
287 lower_variable_index_to_cond_assign((gl_shader_stage
)stage
,
289 options
->EmitNoIndirectInput
,
290 options
->EmitNoIndirectOutput
,
291 options
->EmitNoIndirectTemp
,
292 options
->EmitNoIndirectUniform
);
294 if (unlikely(brw
->perf_debug
&& lowered_variable_indexing
)) {
295 perf_debug("Unsupported form of variable indexing in %s; falling "
296 "back to very inefficient code generation\n",
297 _mesa_shader_stage_to_abbrev(shader
->Stage
));
300 lower_ubo_reference(shader
, shader
->ir
);
306 if (is_scalar_shader_stage(brw
, shader
->Stage
)) {
307 brw_do_channel_expressions(shader
->ir
);
308 brw_do_vector_splitting(shader
->ir
);
311 progress
= do_lower_jumps(shader
->ir
, true, true,
312 true, /* main return */
313 false, /* continue */
317 progress
= do_common_optimization(shader
->ir
, true, true,
318 options
, ctx
->Const
.NativeIntegers
) || progress
;
321 if (options
->NirOptions
!= NULL
)
322 lower_output_reads(stage
, shader
->ir
);
324 validate_ir_tree(shader
->ir
);
326 /* Now that we've finished altering the linked IR, reparent any live IR back
327 * to the permanent memory context, and free the temporary one (discarding any
328 * junk we optimized away).
330 reparent_ir(shader
->ir
, shader
->ir
);
331 ralloc_free(mem_ctx
);
333 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
334 fprintf(stderr
, "\n");
335 fprintf(stderr
, "GLSL IR for linked %s program %d:\n",
336 _mesa_shader_stage_to_string(shader
->Stage
),
338 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
339 fprintf(stderr
, "\n");
344 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
346 struct brw_context
*brw
= brw_context(ctx
);
349 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
350 struct gl_shader
*shader
= shProg
->_LinkedShaders
[stage
];
351 const struct gl_shader_compiler_options
*options
=
352 &ctx
->Const
.ShaderCompilerOptions
[stage
];
357 struct gl_program
*prog
=
358 ctx
->Driver
.NewProgram(ctx
, _mesa_shader_stage_to_program(stage
),
362 prog
->Parameters
= _mesa_new_parameter_list();
364 _mesa_copy_linked_program_data((gl_shader_stage
) stage
, shProg
, prog
);
366 process_glsl_ir((gl_shader_stage
) stage
, brw
, shProg
, shader
);
368 /* Make a pass over the IR to add state references for any built-in
369 * uniforms that are used. This has to be done now (during linking).
370 * Code generation doesn't happen until the first time this shader is
371 * used for rendering. Waiting until then to generate the parameters is
372 * too late. At that point, the values for the built-in uniforms won't
373 * get sent to the shader.
375 foreach_in_list(ir_instruction
, node
, shader
->ir
) {
376 ir_variable
*var
= node
->as_variable();
378 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_uniform
)
379 || (strncmp(var
->name
, "gl_", 3) != 0))
382 const ir_state_slot
*const slots
= var
->get_state_slots();
383 assert(slots
!= NULL
);
385 for (unsigned int i
= 0; i
< var
->get_num_state_slots(); i
++) {
386 _mesa_add_state_reference(prog
->Parameters
,
387 (gl_state_index
*) slots
[i
].tokens
);
391 do_set_program_inouts(shader
->ir
, prog
, shader
->Stage
);
393 prog
->SamplersUsed
= shader
->active_samplers
;
394 prog
->ShadowSamplers
= shader
->shadow_samplers
;
395 _mesa_update_shader_textures_used(shProg
, prog
);
397 _mesa_reference_program(ctx
, &shader
->Program
, prog
);
399 brw_add_texrect_params(prog
);
401 if (options
->NirOptions
) {
402 prog
->nir
= brw_create_nir(brw
, shProg
, prog
, (gl_shader_stage
) stage
,
403 is_scalar_shader_stage(brw
, stage
));
406 _mesa_reference_program(ctx
, &prog
, NULL
);
409 if ((ctx
->_Shader
->Flags
& GLSL_DUMP
) && shProg
->Name
!= 0) {
410 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
411 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
415 fprintf(stderr
, "GLSL %s shader %d source for linked program %d:\n",
416 _mesa_shader_stage_to_string(sh
->Stage
),
418 fprintf(stderr
, "%s", sh
->Source
);
419 fprintf(stderr
, "\n");
423 if (brw
->precompile
&& !brw_shader_precompile(ctx
, shProg
))
431 brw_type_for_base_type(const struct glsl_type
*type
)
433 switch (type
->base_type
) {
434 case GLSL_TYPE_FLOAT
:
435 return BRW_REGISTER_TYPE_F
;
438 case GLSL_TYPE_SUBROUTINE
:
439 return BRW_REGISTER_TYPE_D
;
441 return BRW_REGISTER_TYPE_UD
;
442 case GLSL_TYPE_ARRAY
:
443 return brw_type_for_base_type(type
->fields
.array
);
444 case GLSL_TYPE_STRUCT
:
445 case GLSL_TYPE_SAMPLER
:
446 case GLSL_TYPE_ATOMIC_UINT
:
447 /* These should be overridden with the type of the member when
448 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
449 * way to trip up if we don't.
451 return BRW_REGISTER_TYPE_UD
;
452 case GLSL_TYPE_IMAGE
:
453 return BRW_REGISTER_TYPE_UD
;
455 case GLSL_TYPE_ERROR
:
456 case GLSL_TYPE_INTERFACE
:
457 case GLSL_TYPE_DOUBLE
:
458 unreachable("not reached");
461 return BRW_REGISTER_TYPE_F
;
464 enum brw_conditional_mod
465 brw_conditional_for_comparison(unsigned int op
)
469 return BRW_CONDITIONAL_L
;
470 case ir_binop_greater
:
471 return BRW_CONDITIONAL_G
;
472 case ir_binop_lequal
:
473 return BRW_CONDITIONAL_LE
;
474 case ir_binop_gequal
:
475 return BRW_CONDITIONAL_GE
;
477 case ir_binop_all_equal
: /* same as equal for scalars */
478 return BRW_CONDITIONAL_Z
;
479 case ir_binop_nequal
:
480 case ir_binop_any_nequal
: /* same as nequal for scalars */
481 return BRW_CONDITIONAL_NZ
;
483 unreachable("not reached: bad operation for comparison");
488 brw_math_function(enum opcode op
)
491 case SHADER_OPCODE_RCP
:
492 return BRW_MATH_FUNCTION_INV
;
493 case SHADER_OPCODE_RSQ
:
494 return BRW_MATH_FUNCTION_RSQ
;
495 case SHADER_OPCODE_SQRT
:
496 return BRW_MATH_FUNCTION_SQRT
;
497 case SHADER_OPCODE_EXP2
:
498 return BRW_MATH_FUNCTION_EXP
;
499 case SHADER_OPCODE_LOG2
:
500 return BRW_MATH_FUNCTION_LOG
;
501 case SHADER_OPCODE_POW
:
502 return BRW_MATH_FUNCTION_POW
;
503 case SHADER_OPCODE_SIN
:
504 return BRW_MATH_FUNCTION_SIN
;
505 case SHADER_OPCODE_COS
:
506 return BRW_MATH_FUNCTION_COS
;
507 case SHADER_OPCODE_INT_QUOTIENT
:
508 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT
;
509 case SHADER_OPCODE_INT_REMAINDER
:
510 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER
;
512 unreachable("not reached: unknown math function");
517 brw_texture_offset(int *offsets
, unsigned num_components
)
519 if (!offsets
) return 0; /* nonconstant offset; caller will handle it. */
521 /* Combine all three offsets into a single unsigned dword:
523 * bits 11:8 - U Offset (X component)
524 * bits 7:4 - V Offset (Y component)
525 * bits 3:0 - R Offset (Z component)
527 unsigned offset_bits
= 0;
528 for (unsigned i
= 0; i
< num_components
; i
++) {
529 const unsigned shift
= 4 * (2 - i
);
530 offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
536 brw_instruction_name(enum opcode op
)
539 case BRW_OPCODE_MOV
... BRW_OPCODE_NOP
:
540 assert(opcode_descs
[op
].name
);
541 return opcode_descs
[op
].name
;
542 case FS_OPCODE_FB_WRITE
:
544 case FS_OPCODE_FB_WRITE_LOGICAL
:
545 return "fb_write_logical";
546 case FS_OPCODE_BLORP_FB_WRITE
:
547 return "blorp_fb_write";
548 case FS_OPCODE_REP_FB_WRITE
:
549 return "rep_fb_write";
551 case SHADER_OPCODE_RCP
:
553 case SHADER_OPCODE_RSQ
:
555 case SHADER_OPCODE_SQRT
:
557 case SHADER_OPCODE_EXP2
:
559 case SHADER_OPCODE_LOG2
:
561 case SHADER_OPCODE_POW
:
563 case SHADER_OPCODE_INT_QUOTIENT
:
565 case SHADER_OPCODE_INT_REMAINDER
:
567 case SHADER_OPCODE_SIN
:
569 case SHADER_OPCODE_COS
:
572 case SHADER_OPCODE_TEX
:
574 case SHADER_OPCODE_TEX_LOGICAL
:
575 return "tex_logical";
576 case SHADER_OPCODE_TXD
:
578 case SHADER_OPCODE_TXD_LOGICAL
:
579 return "txd_logical";
580 case SHADER_OPCODE_TXF
:
582 case SHADER_OPCODE_TXF_LOGICAL
:
583 return "txf_logical";
584 case SHADER_OPCODE_TXL
:
586 case SHADER_OPCODE_TXL_LOGICAL
:
587 return "txl_logical";
588 case SHADER_OPCODE_TXS
:
590 case SHADER_OPCODE_TXS_LOGICAL
:
591 return "txs_logical";
594 case FS_OPCODE_TXB_LOGICAL
:
595 return "txb_logical";
596 case SHADER_OPCODE_TXF_CMS
:
598 case SHADER_OPCODE_TXF_CMS_LOGICAL
:
599 return "txf_cms_logical";
600 case SHADER_OPCODE_TXF_UMS
:
602 case SHADER_OPCODE_TXF_UMS_LOGICAL
:
603 return "txf_ums_logical";
604 case SHADER_OPCODE_TXF_MCS
:
606 case SHADER_OPCODE_TXF_MCS_LOGICAL
:
607 return "txf_mcs_logical";
608 case SHADER_OPCODE_LOD
:
610 case SHADER_OPCODE_LOD_LOGICAL
:
611 return "lod_logical";
612 case SHADER_OPCODE_TG4
:
614 case SHADER_OPCODE_TG4_LOGICAL
:
615 return "tg4_logical";
616 case SHADER_OPCODE_TG4_OFFSET
:
618 case SHADER_OPCODE_TG4_OFFSET_LOGICAL
:
619 return "tg4_offset_logical";
621 case SHADER_OPCODE_SHADER_TIME_ADD
:
622 return "shader_time_add";
624 case SHADER_OPCODE_UNTYPED_ATOMIC
:
625 return "untyped_atomic";
626 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
627 return "untyped_atomic_logical";
628 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
629 return "untyped_surface_read";
630 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL
:
631 return "untyped_surface_read_logical";
632 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
633 return "untyped_surface_write";
634 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
635 return "untyped_surface_write_logical";
636 case SHADER_OPCODE_TYPED_ATOMIC
:
637 return "typed_atomic";
638 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
639 return "typed_atomic_logical";
640 case SHADER_OPCODE_TYPED_SURFACE_READ
:
641 return "typed_surface_read";
642 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL
:
643 return "typed_surface_read_logical";
644 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
645 return "typed_surface_write";
646 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
647 return "typed_surface_write_logical";
648 case SHADER_OPCODE_MEMORY_FENCE
:
649 return "memory_fence";
651 case SHADER_OPCODE_LOAD_PAYLOAD
:
652 return "load_payload";
654 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
655 return "gen4_scratch_read";
656 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
657 return "gen4_scratch_write";
658 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
659 return "gen7_scratch_read";
660 case SHADER_OPCODE_URB_WRITE_SIMD8
:
661 return "gen8_urb_write_simd8";
663 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
664 return "find_live_channel";
665 case SHADER_OPCODE_BROADCAST
:
668 case VEC4_OPCODE_MOV_BYTES
:
670 case VEC4_OPCODE_PACK_BYTES
:
672 case VEC4_OPCODE_UNPACK_UNIFORM
:
673 return "unpack_uniform";
675 case FS_OPCODE_DDX_COARSE
:
677 case FS_OPCODE_DDX_FINE
:
679 case FS_OPCODE_DDY_COARSE
:
681 case FS_OPCODE_DDY_FINE
:
684 case FS_OPCODE_CINTERP
:
686 case FS_OPCODE_LINTERP
:
689 case FS_OPCODE_PIXEL_X
:
691 case FS_OPCODE_PIXEL_Y
:
694 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
695 return "uniform_pull_const";
696 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
697 return "uniform_pull_const_gen7";
698 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
699 return "varying_pull_const";
700 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
701 return "varying_pull_const_gen7";
703 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS
:
704 return "mov_dispatch_to_flags";
705 case FS_OPCODE_DISCARD_JUMP
:
706 return "discard_jump";
708 case FS_OPCODE_SET_SAMPLE_ID
:
709 return "set_sample_id";
710 case FS_OPCODE_SET_SIMD4X2_OFFSET
:
711 return "set_simd4x2_offset";
713 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
714 return "pack_half_2x16_split";
715 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
716 return "unpack_half_2x16_split_x";
717 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
718 return "unpack_half_2x16_split_y";
720 case FS_OPCODE_PLACEHOLDER_HALT
:
721 return "placeholder_halt";
723 case FS_OPCODE_INTERPOLATE_AT_CENTROID
:
724 return "interp_centroid";
725 case FS_OPCODE_INTERPOLATE_AT_SAMPLE
:
726 return "interp_sample";
727 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
:
728 return "interp_shared_offset";
729 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
:
730 return "interp_per_slot_offset";
732 case VS_OPCODE_URB_WRITE
:
733 return "vs_urb_write";
734 case VS_OPCODE_PULL_CONSTANT_LOAD
:
735 return "pull_constant_load";
736 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
737 return "pull_constant_load_gen7";
739 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
740 return "set_simd4x2_header_gen9";
742 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
743 return "unpack_flags_simd4x2";
745 case GS_OPCODE_URB_WRITE
:
746 return "gs_urb_write";
747 case GS_OPCODE_URB_WRITE_ALLOCATE
:
748 return "gs_urb_write_allocate";
749 case GS_OPCODE_THREAD_END
:
750 return "gs_thread_end";
751 case GS_OPCODE_SET_WRITE_OFFSET
:
752 return "set_write_offset";
753 case GS_OPCODE_SET_VERTEX_COUNT
:
754 return "set_vertex_count";
755 case GS_OPCODE_SET_DWORD_2
:
756 return "set_dword_2";
757 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
758 return "prepare_channel_masks";
759 case GS_OPCODE_SET_CHANNEL_MASKS
:
760 return "set_channel_masks";
761 case GS_OPCODE_GET_INSTANCE_ID
:
762 return "get_instance_id";
763 case GS_OPCODE_FF_SYNC
:
765 case GS_OPCODE_SET_PRIMITIVE_ID
:
766 return "set_primitive_id";
767 case GS_OPCODE_SVB_WRITE
:
768 return "gs_svb_write";
769 case GS_OPCODE_SVB_SET_DST_INDEX
:
770 return "gs_svb_set_dst_index";
771 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
772 return "gs_ff_sync_set_primitives";
773 case CS_OPCODE_CS_TERMINATE
:
774 return "cs_terminate";
775 case SHADER_OPCODE_BARRIER
:
779 unreachable("not reached");
783 brw_saturate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
789 } imm
= { reg
->dw1
.ud
}, sat_imm
= { 0 };
792 case BRW_REGISTER_TYPE_UD
:
793 case BRW_REGISTER_TYPE_D
:
794 case BRW_REGISTER_TYPE_UQ
:
795 case BRW_REGISTER_TYPE_Q
:
798 case BRW_REGISTER_TYPE_UW
:
799 sat_imm
.ud
= CLAMP(imm
.ud
, 0, USHRT_MAX
);
801 case BRW_REGISTER_TYPE_W
:
802 sat_imm
.d
= CLAMP(imm
.d
, SHRT_MIN
, SHRT_MAX
);
804 case BRW_REGISTER_TYPE_F
:
805 sat_imm
.f
= CLAMP(imm
.f
, 0.0f
, 1.0f
);
807 case BRW_REGISTER_TYPE_UB
:
808 case BRW_REGISTER_TYPE_B
:
809 unreachable("no UB/B immediates");
810 case BRW_REGISTER_TYPE_V
:
811 case BRW_REGISTER_TYPE_UV
:
812 case BRW_REGISTER_TYPE_VF
:
813 unreachable("unimplemented: saturate vector immediate");
814 case BRW_REGISTER_TYPE_DF
:
815 case BRW_REGISTER_TYPE_HF
:
816 unreachable("unimplemented: saturate DF/HF immediate");
819 if (imm
.ud
!= sat_imm
.ud
) {
820 reg
->dw1
.ud
= sat_imm
.ud
;
827 brw_negate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
830 case BRW_REGISTER_TYPE_D
:
831 case BRW_REGISTER_TYPE_UD
:
832 reg
->dw1
.d
= -reg
->dw1
.d
;
834 case BRW_REGISTER_TYPE_W
:
835 case BRW_REGISTER_TYPE_UW
:
836 reg
->dw1
.d
= -(int16_t)reg
->dw1
.ud
;
838 case BRW_REGISTER_TYPE_F
:
839 reg
->dw1
.f
= -reg
->dw1
.f
;
841 case BRW_REGISTER_TYPE_VF
:
842 reg
->dw1
.ud
^= 0x80808080;
844 case BRW_REGISTER_TYPE_UB
:
845 case BRW_REGISTER_TYPE_B
:
846 unreachable("no UB/B immediates");
847 case BRW_REGISTER_TYPE_UV
:
848 case BRW_REGISTER_TYPE_V
:
849 assert(!"unimplemented: negate UV/V immediate");
850 case BRW_REGISTER_TYPE_UQ
:
851 case BRW_REGISTER_TYPE_Q
:
852 assert(!"unimplemented: negate UQ/Q immediate");
853 case BRW_REGISTER_TYPE_DF
:
854 case BRW_REGISTER_TYPE_HF
:
855 assert(!"unimplemented: negate DF/HF immediate");
862 brw_abs_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
865 case BRW_REGISTER_TYPE_D
:
866 reg
->dw1
.d
= abs(reg
->dw1
.d
);
868 case BRW_REGISTER_TYPE_W
:
869 reg
->dw1
.d
= abs((int16_t)reg
->dw1
.ud
);
871 case BRW_REGISTER_TYPE_F
:
872 reg
->dw1
.f
= fabsf(reg
->dw1
.f
);
874 case BRW_REGISTER_TYPE_VF
:
875 reg
->dw1
.ud
&= ~0x80808080;
877 case BRW_REGISTER_TYPE_UB
:
878 case BRW_REGISTER_TYPE_B
:
879 unreachable("no UB/B immediates");
880 case BRW_REGISTER_TYPE_UQ
:
881 case BRW_REGISTER_TYPE_UD
:
882 case BRW_REGISTER_TYPE_UW
:
883 case BRW_REGISTER_TYPE_UV
:
884 /* Presumably the absolute value modifier on an unsigned source is a
885 * nop, but it would be nice to confirm.
887 assert(!"unimplemented: abs unsigned immediate");
888 case BRW_REGISTER_TYPE_V
:
889 assert(!"unimplemented: abs V immediate");
890 case BRW_REGISTER_TYPE_Q
:
891 assert(!"unimplemented: abs Q immediate");
892 case BRW_REGISTER_TYPE_DF
:
893 case BRW_REGISTER_TYPE_HF
:
894 assert(!"unimplemented: abs DF/HF immediate");
900 backend_shader::backend_shader(const struct brw_compiler
*compiler
,
903 struct gl_shader_program
*shader_prog
,
904 struct gl_program
*prog
,
905 struct brw_stage_prog_data
*stage_prog_data
,
906 gl_shader_stage stage
)
907 : compiler(compiler
),
909 devinfo(compiler
->devinfo
),
911 (struct brw_shader
*)shader_prog
->_LinkedShaders
[stage
] : NULL
),
912 shader_prog(shader_prog
),
914 stage_prog_data(stage_prog_data
),
919 debug_enabled
= INTEL_DEBUG
& intel_debug_flag_for_shader_stage(stage
);
920 stage_name
= _mesa_shader_stage_to_string(stage
);
921 stage_abbrev
= _mesa_shader_stage_to_abbrev(stage
);
925 backend_reg::is_zero() const
930 return fixed_hw_reg
.dw1
.d
== 0;
934 backend_reg::is_one() const
939 return type
== BRW_REGISTER_TYPE_F
940 ? fixed_hw_reg
.dw1
.f
== 1.0
941 : fixed_hw_reg
.dw1
.d
== 1;
945 backend_reg::is_negative_one() const
951 case BRW_REGISTER_TYPE_F
:
952 return fixed_hw_reg
.dw1
.f
== -1.0;
953 case BRW_REGISTER_TYPE_D
:
954 return fixed_hw_reg
.dw1
.d
== -1;
961 backend_reg::is_null() const
963 return file
== HW_REG
&&
964 fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
&&
965 fixed_hw_reg
.nr
== BRW_ARF_NULL
;
970 backend_reg::is_accumulator() const
972 return file
== HW_REG
&&
973 fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
&&
974 fixed_hw_reg
.nr
== BRW_ARF_ACCUMULATOR
;
978 backend_reg::in_range(const backend_reg
&r
, unsigned n
) const
980 return (file
== r
.file
&&
982 reg_offset
>= r
.reg_offset
&&
983 reg_offset
< r
.reg_offset
+ n
);
987 backend_instruction::is_commutative() const
997 /* MIN and MAX are commutative. */
998 if (conditional_mod
== BRW_CONDITIONAL_GE
||
999 conditional_mod
== BRW_CONDITIONAL_L
) {
1009 backend_instruction::is_3src() const
1011 return opcode
< ARRAY_SIZE(opcode_descs
) && opcode_descs
[opcode
].nsrc
== 3;
1015 backend_instruction::is_tex() const
1017 return (opcode
== SHADER_OPCODE_TEX
||
1018 opcode
== FS_OPCODE_TXB
||
1019 opcode
== SHADER_OPCODE_TXD
||
1020 opcode
== SHADER_OPCODE_TXF
||
1021 opcode
== SHADER_OPCODE_TXF_CMS
||
1022 opcode
== SHADER_OPCODE_TXF_UMS
||
1023 opcode
== SHADER_OPCODE_TXF_MCS
||
1024 opcode
== SHADER_OPCODE_TXL
||
1025 opcode
== SHADER_OPCODE_TXS
||
1026 opcode
== SHADER_OPCODE_LOD
||
1027 opcode
== SHADER_OPCODE_TG4
||
1028 opcode
== SHADER_OPCODE_TG4_OFFSET
);
1032 backend_instruction::is_math() const
1034 return (opcode
== SHADER_OPCODE_RCP
||
1035 opcode
== SHADER_OPCODE_RSQ
||
1036 opcode
== SHADER_OPCODE_SQRT
||
1037 opcode
== SHADER_OPCODE_EXP2
||
1038 opcode
== SHADER_OPCODE_LOG2
||
1039 opcode
== SHADER_OPCODE_SIN
||
1040 opcode
== SHADER_OPCODE_COS
||
1041 opcode
== SHADER_OPCODE_INT_QUOTIENT
||
1042 opcode
== SHADER_OPCODE_INT_REMAINDER
||
1043 opcode
== SHADER_OPCODE_POW
);
1047 backend_instruction::is_control_flow() const
1051 case BRW_OPCODE_WHILE
:
1053 case BRW_OPCODE_ELSE
:
1054 case BRW_OPCODE_ENDIF
:
1055 case BRW_OPCODE_BREAK
:
1056 case BRW_OPCODE_CONTINUE
:
1064 backend_instruction::can_do_source_mods() const
1067 case BRW_OPCODE_ADDC
:
1068 case BRW_OPCODE_BFE
:
1069 case BRW_OPCODE_BFI1
:
1070 case BRW_OPCODE_BFI2
:
1071 case BRW_OPCODE_BFREV
:
1072 case BRW_OPCODE_CBIT
:
1073 case BRW_OPCODE_FBH
:
1074 case BRW_OPCODE_FBL
:
1075 case BRW_OPCODE_SUBB
:
1083 backend_instruction::can_do_saturate() const
1086 case BRW_OPCODE_ADD
:
1087 case BRW_OPCODE_ASR
:
1088 case BRW_OPCODE_AVG
:
1089 case BRW_OPCODE_DP2
:
1090 case BRW_OPCODE_DP3
:
1091 case BRW_OPCODE_DP4
:
1092 case BRW_OPCODE_DPH
:
1093 case BRW_OPCODE_F16TO32
:
1094 case BRW_OPCODE_F32TO16
:
1095 case BRW_OPCODE_LINE
:
1096 case BRW_OPCODE_LRP
:
1097 case BRW_OPCODE_MAC
:
1098 case BRW_OPCODE_MAD
:
1099 case BRW_OPCODE_MATH
:
1100 case BRW_OPCODE_MOV
:
1101 case BRW_OPCODE_MUL
:
1102 case BRW_OPCODE_PLN
:
1103 case BRW_OPCODE_RNDD
:
1104 case BRW_OPCODE_RNDE
:
1105 case BRW_OPCODE_RNDU
:
1106 case BRW_OPCODE_RNDZ
:
1107 case BRW_OPCODE_SEL
:
1108 case BRW_OPCODE_SHL
:
1109 case BRW_OPCODE_SHR
:
1110 case FS_OPCODE_LINTERP
:
1111 case SHADER_OPCODE_COS
:
1112 case SHADER_OPCODE_EXP2
:
1113 case SHADER_OPCODE_LOG2
:
1114 case SHADER_OPCODE_POW
:
1115 case SHADER_OPCODE_RCP
:
1116 case SHADER_OPCODE_RSQ
:
1117 case SHADER_OPCODE_SIN
:
1118 case SHADER_OPCODE_SQRT
:
1126 backend_instruction::can_do_cmod() const
1129 case BRW_OPCODE_ADD
:
1130 case BRW_OPCODE_ADDC
:
1131 case BRW_OPCODE_AND
:
1132 case BRW_OPCODE_ASR
:
1133 case BRW_OPCODE_AVG
:
1134 case BRW_OPCODE_CMP
:
1135 case BRW_OPCODE_CMPN
:
1136 case BRW_OPCODE_DP2
:
1137 case BRW_OPCODE_DP3
:
1138 case BRW_OPCODE_DP4
:
1139 case BRW_OPCODE_DPH
:
1140 case BRW_OPCODE_F16TO32
:
1141 case BRW_OPCODE_F32TO16
:
1142 case BRW_OPCODE_FRC
:
1143 case BRW_OPCODE_LINE
:
1144 case BRW_OPCODE_LRP
:
1145 case BRW_OPCODE_LZD
:
1146 case BRW_OPCODE_MAC
:
1147 case BRW_OPCODE_MACH
:
1148 case BRW_OPCODE_MAD
:
1149 case BRW_OPCODE_MOV
:
1150 case BRW_OPCODE_MUL
:
1151 case BRW_OPCODE_NOT
:
1153 case BRW_OPCODE_PLN
:
1154 case BRW_OPCODE_RNDD
:
1155 case BRW_OPCODE_RNDE
:
1156 case BRW_OPCODE_RNDU
:
1157 case BRW_OPCODE_RNDZ
:
1158 case BRW_OPCODE_SAD2
:
1159 case BRW_OPCODE_SADA2
:
1160 case BRW_OPCODE_SHL
:
1161 case BRW_OPCODE_SHR
:
1162 case BRW_OPCODE_SUBB
:
1163 case BRW_OPCODE_XOR
:
1164 case FS_OPCODE_CINTERP
:
1165 case FS_OPCODE_LINTERP
:
1173 backend_instruction::reads_accumulator_implicitly() const
1176 case BRW_OPCODE_MAC
:
1177 case BRW_OPCODE_MACH
:
1178 case BRW_OPCODE_SADA2
:
1186 backend_instruction::writes_accumulator_implicitly(const struct brw_device_info
*devinfo
) const
1188 return writes_accumulator
||
1189 (devinfo
->gen
< 6 &&
1190 ((opcode
>= BRW_OPCODE_ADD
&& opcode
< BRW_OPCODE_NOP
) ||
1191 (opcode
>= FS_OPCODE_DDX_COARSE
&& opcode
<= FS_OPCODE_LINTERP
&&
1192 opcode
!= FS_OPCODE_CINTERP
)));
1196 backend_instruction::has_side_effects() const
1199 case SHADER_OPCODE_UNTYPED_ATOMIC
:
1200 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
1201 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1202 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
1203 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
1204 case SHADER_OPCODE_TYPED_ATOMIC
:
1205 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
1206 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
1207 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
1208 case SHADER_OPCODE_MEMORY_FENCE
:
1209 case SHADER_OPCODE_URB_WRITE_SIMD8
:
1210 case FS_OPCODE_FB_WRITE
:
1211 case SHADER_OPCODE_BARRIER
:
1220 inst_is_in_block(const bblock_t
*block
, const backend_instruction
*inst
)
1223 foreach_inst_in_block (backend_instruction
, i
, block
) {
1233 adjust_later_block_ips(bblock_t
*start_block
, int ip_adjustment
)
1235 for (bblock_t
*block_iter
= start_block
->next();
1236 !block_iter
->link
.is_tail_sentinel();
1237 block_iter
= block_iter
->next()) {
1238 block_iter
->start_ip
+= ip_adjustment
;
1239 block_iter
->end_ip
+= ip_adjustment
;
1244 backend_instruction::insert_after(bblock_t
*block
, backend_instruction
*inst
)
1246 if (!this->is_head_sentinel())
1247 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1251 adjust_later_block_ips(block
, 1);
1253 exec_node::insert_after(inst
);
1257 backend_instruction::insert_before(bblock_t
*block
, backend_instruction
*inst
)
1259 if (!this->is_tail_sentinel())
1260 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1264 adjust_later_block_ips(block
, 1);
1266 exec_node::insert_before(inst
);
1270 backend_instruction::insert_before(bblock_t
*block
, exec_list
*list
)
1272 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1274 unsigned num_inst
= list
->length();
1276 block
->end_ip
+= num_inst
;
1278 adjust_later_block_ips(block
, num_inst
);
1280 exec_node::insert_before(list
);
1284 backend_instruction::remove(bblock_t
*block
)
1286 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1288 adjust_later_block_ips(block
, -1);
1290 if (block
->start_ip
== block
->end_ip
) {
1291 block
->cfg
->remove_block(block
);
1296 exec_node::remove();
1300 backend_shader::dump_instructions()
1302 dump_instructions(NULL
);
1306 backend_shader::dump_instructions(const char *name
)
1308 FILE *file
= stderr
;
1309 if (name
&& geteuid() != 0) {
1310 file
= fopen(name
, "w");
1317 foreach_block_and_inst(block
, backend_instruction
, inst
, cfg
) {
1318 fprintf(file
, "%4d: ", ip
++);
1319 dump_instruction(inst
, file
);
1323 foreach_in_list(backend_instruction
, inst
, &instructions
) {
1324 fprintf(file
, "%4d: ", ip
++);
1325 dump_instruction(inst
, file
);
1329 if (file
!= stderr
) {
1335 backend_shader::calculate_cfg()
1339 cfg
= new(mem_ctx
) cfg_t(&this->instructions
);
1343 backend_shader::invalidate_cfg()
1345 ralloc_free(this->cfg
);
1350 * Sets up the starting offsets for the groups of binding table entries
1351 * commong to all pipeline stages.
1353 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
1354 * unused but also make sure that addition of small offsets to them will
1355 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
1358 backend_shader::assign_common_binding_table_offsets(uint32_t next_binding_table_offset
)
1360 int num_textures
= _mesa_fls(prog
->SamplersUsed
);
1362 stage_prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
1363 next_binding_table_offset
+= num_textures
;
1366 stage_prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
1367 next_binding_table_offset
+= shader
->base
.NumUniformBlocks
;
1369 stage_prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
1372 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1373 stage_prog_data
->binding_table
.shader_time_start
= next_binding_table_offset
;
1374 next_binding_table_offset
++;
1376 stage_prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
1379 if (prog
->UsesGather
) {
1380 if (devinfo
->gen
>= 8) {
1381 stage_prog_data
->binding_table
.gather_texture_start
=
1382 stage_prog_data
->binding_table
.texture_start
;
1384 stage_prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
1385 next_binding_table_offset
+= num_textures
;
1388 stage_prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
1391 if (shader_prog
&& shader_prog
->NumAtomicBuffers
) {
1392 stage_prog_data
->binding_table
.abo_start
= next_binding_table_offset
;
1393 next_binding_table_offset
+= shader_prog
->NumAtomicBuffers
;
1395 stage_prog_data
->binding_table
.abo_start
= 0xd0d0d0d0;
1398 if (shader
&& shader
->base
.NumImages
) {
1399 stage_prog_data
->binding_table
.image_start
= next_binding_table_offset
;
1400 next_binding_table_offset
+= shader
->base
.NumImages
;
1402 stage_prog_data
->binding_table
.image_start
= 0xd0d0d0d0;
1405 /* This may or may not be used depending on how the compile goes. */
1406 stage_prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
1407 next_binding_table_offset
++;
1409 assert(next_binding_table_offset
<= BRW_MAX_SURFACES
);
1411 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */