2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "main/macros.h"
26 #include "brw_context.h"
29 #include "brw_vec4_gs.h"
31 #include "glsl/ir_optimization.h"
32 #include "glsl/glsl_parser_extras.h"
33 #include "main/shaderapi.h"
36 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
38 struct brw_shader
*shader
;
40 shader
= rzalloc(NULL
, struct brw_shader
);
42 shader
->base
.Type
= type
;
43 shader
->base
.Stage
= _mesa_shader_enum_to_shader_stage(type
);
44 shader
->base
.Name
= name
;
45 _mesa_init_shader(ctx
, &shader
->base
);
51 struct gl_shader_program
*
52 brw_new_shader_program(struct gl_context
*ctx
, GLuint name
)
54 struct gl_shader_program
*prog
= rzalloc(NULL
, struct gl_shader_program
);
57 _mesa_init_shader_program(ctx
, prog
);
63 * Performs a compile of the shader stages even when we don't know
64 * what non-orthogonal state will be set, in the hope that it reflects
65 * the eventual NOS used, and thus allows us to produce link failures.
68 brw_shader_precompile(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
70 struct brw_context
*brw
= brw_context(ctx
);
72 if (brw
->precompile
&& !brw_fs_precompile(ctx
, prog
))
75 if (brw
->precompile
&& !brw_gs_precompile(ctx
, prog
))
78 if (brw
->precompile
&& !brw_vs_precompile(ctx
, prog
))
85 brw_lower_packing_builtins(struct brw_context
*brw
,
86 gl_shader_stage shader_type
,
89 int ops
= LOWER_PACK_SNORM_2x16
90 | LOWER_UNPACK_SNORM_2x16
91 | LOWER_PACK_UNORM_2x16
92 | LOWER_UNPACK_UNORM_2x16
93 | LOWER_PACK_SNORM_4x8
94 | LOWER_UNPACK_SNORM_4x8
95 | LOWER_PACK_UNORM_4x8
96 | LOWER_UNPACK_UNORM_4x8
;
99 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
100 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
101 * lowering is needed. For SOA code, the Half2x16 ops must be
104 if (shader_type
== MESA_SHADER_FRAGMENT
) {
105 ops
|= LOWER_PACK_HALF_2x16_TO_SPLIT
106 | LOWER_UNPACK_HALF_2x16_TO_SPLIT
;
109 ops
|= LOWER_PACK_HALF_2x16
110 | LOWER_UNPACK_HALF_2x16
;
113 lower_packing_builtins(ir
, ops
);
117 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
119 struct brw_context
*brw
= brw_context(ctx
);
122 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
123 struct brw_shader
*shader
=
124 (struct brw_shader
*)shProg
->_LinkedShaders
[stage
];
129 struct gl_program
*prog
=
130 ctx
->Driver
.NewProgram(ctx
, _mesa_program_index_to_target(stage
),
134 prog
->Parameters
= _mesa_new_parameter_list();
136 _mesa_copy_linked_program_data((gl_shader_stage
) stage
, shProg
, prog
);
138 void *mem_ctx
= ralloc_context(NULL
);
142 ralloc_free(shader
->ir
);
143 shader
->ir
= new(shader
) exec_list
;
144 clone_ir_list(mem_ctx
, shader
->ir
, shader
->base
.ir
);
146 /* lower_packing_builtins() inserts arithmetic instructions, so it
147 * must precede lower_instructions().
149 brw_lower_packing_builtins(brw
, (gl_shader_stage
) stage
, shader
->ir
);
150 do_mat_op_to_vec(shader
->ir
);
151 const int bitfield_insert
= brw
->gen
>= 7
152 ? BITFIELD_INSERT_TO_BFM_BFI
154 const int lrp_to_arith
= brw
->gen
< 6 ? LRP_TO_ARITH
: 0;
155 lower_instructions(shader
->ir
,
165 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
166 * if-statements need to be flattened.
169 lower_if_to_cond_assign(shader
->ir
, 16);
171 do_lower_texture_projection(shader
->ir
);
172 brw_lower_texture_gradients(brw
, shader
->ir
);
173 do_vec_index_to_cond_assign(shader
->ir
);
174 lower_vector_insert(shader
->ir
, true);
175 brw_do_cubemap_normalize(shader
->ir
);
176 brw_do_lower_offset_arrays(shader
->ir
);
177 brw_do_lower_unnormalized_offset(shader
->ir
);
178 lower_noise(shader
->ir
);
179 lower_quadop_vector(shader
->ir
, false);
182 bool output
= stage
== MESA_SHADER_FRAGMENT
;
183 bool temp
= stage
== MESA_SHADER_FRAGMENT
;
184 bool uniform
= false;
186 bool lowered_variable_indexing
=
187 lower_variable_index_to_cond_assign(shader
->ir
,
188 input
, output
, temp
, uniform
);
190 if (unlikely(brw
->perf_debug
&& lowered_variable_indexing
)) {
191 perf_debug("Unsupported form of variable indexing in FS; falling "
192 "back to very inefficient code generation\n");
195 /* FINISHME: Do this before the variable index lowering. */
196 lower_ubo_reference(&shader
->base
, shader
->ir
);
201 if (stage
== MESA_SHADER_FRAGMENT
) {
202 brw_do_channel_expressions(shader
->ir
);
203 brw_do_vector_splitting(shader
->ir
);
206 progress
= do_lower_jumps(shader
->ir
, true, true,
207 true, /* main return */
208 false, /* continue */
212 progress
= do_common_optimization(shader
->ir
, true, true, 32,
213 &ctx
->ShaderCompilerOptions
[stage
])
217 /* Make a pass over the IR to add state references for any built-in
218 * uniforms that are used. This has to be done now (during linking).
219 * Code generation doesn't happen until the first time this shader is
220 * used for rendering. Waiting until then to generate the parameters is
221 * too late. At that point, the values for the built-in uniforms won't
222 * get sent to the shader.
224 foreach_list(node
, shader
->ir
) {
225 ir_variable
*var
= ((ir_instruction
*) node
)->as_variable();
227 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_uniform
)
228 || (strncmp(var
->name
, "gl_", 3) != 0))
231 const ir_state_slot
*const slots
= var
->state_slots
;
232 assert(var
->state_slots
!= NULL
);
234 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
235 _mesa_add_state_reference(prog
->Parameters
,
236 (gl_state_index
*) slots
[i
].tokens
);
240 validate_ir_tree(shader
->ir
);
242 reparent_ir(shader
->ir
, shader
->ir
);
243 ralloc_free(mem_ctx
);
245 do_set_program_inouts(shader
->ir
, prog
, shader
->base
.Stage
);
247 prog
->SamplersUsed
= shader
->base
.active_samplers
;
248 _mesa_update_shader_textures_used(shProg
, prog
);
250 _mesa_reference_program(ctx
, &shader
->base
.Program
, prog
);
252 brw_add_texrect_params(prog
);
254 /* This has to be done last. Any operation that can cause
255 * prog->ParameterValues to get reallocated (e.g., anything that adds a
256 * program constant) has to happen before creating this linkage.
258 _mesa_associate_uniform_storage(ctx
, shProg
, prog
->Parameters
);
260 _mesa_reference_program(ctx
, &prog
, NULL
);
262 if (ctx
->Shader
.Flags
& GLSL_DUMP
) {
264 printf("GLSL IR for linked %s program %d:\n",
265 _mesa_shader_stage_to_string(shader
->base
.Stage
),
267 _mesa_print_ir(shader
->base
.ir
, NULL
);
272 if (ctx
->Shader
.Flags
& GLSL_DUMP
) {
273 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
274 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
278 printf("GLSL %s shader %d source for linked program %d:\n",
279 _mesa_shader_stage_to_string(sh
->Stage
),
282 printf("%s", sh
->Source
);
287 if (!brw_shader_precompile(ctx
, shProg
))
295 brw_type_for_base_type(const struct glsl_type
*type
)
297 switch (type
->base_type
) {
298 case GLSL_TYPE_FLOAT
:
299 return BRW_REGISTER_TYPE_F
;
302 return BRW_REGISTER_TYPE_D
;
304 return BRW_REGISTER_TYPE_UD
;
305 case GLSL_TYPE_ARRAY
:
306 return brw_type_for_base_type(type
->fields
.array
);
307 case GLSL_TYPE_STRUCT
:
308 case GLSL_TYPE_SAMPLER
:
309 case GLSL_TYPE_ATOMIC_UINT
:
310 /* These should be overridden with the type of the member when
311 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
312 * way to trip up if we don't.
314 return BRW_REGISTER_TYPE_UD
;
316 case GLSL_TYPE_ERROR
:
317 case GLSL_TYPE_INTERFACE
:
318 assert(!"not reached");
322 return BRW_REGISTER_TYPE_F
;
326 brw_conditional_for_comparison(unsigned int op
)
330 return BRW_CONDITIONAL_L
;
331 case ir_binop_greater
:
332 return BRW_CONDITIONAL_G
;
333 case ir_binop_lequal
:
334 return BRW_CONDITIONAL_LE
;
335 case ir_binop_gequal
:
336 return BRW_CONDITIONAL_GE
;
338 case ir_binop_all_equal
: /* same as equal for scalars */
339 return BRW_CONDITIONAL_Z
;
340 case ir_binop_nequal
:
341 case ir_binop_any_nequal
: /* same as nequal for scalars */
342 return BRW_CONDITIONAL_NZ
;
344 assert(!"not reached: bad operation for comparison");
345 return BRW_CONDITIONAL_NZ
;
350 brw_math_function(enum opcode op
)
353 case SHADER_OPCODE_RCP
:
354 return BRW_MATH_FUNCTION_INV
;
355 case SHADER_OPCODE_RSQ
:
356 return BRW_MATH_FUNCTION_RSQ
;
357 case SHADER_OPCODE_SQRT
:
358 return BRW_MATH_FUNCTION_SQRT
;
359 case SHADER_OPCODE_EXP2
:
360 return BRW_MATH_FUNCTION_EXP
;
361 case SHADER_OPCODE_LOG2
:
362 return BRW_MATH_FUNCTION_LOG
;
363 case SHADER_OPCODE_POW
:
364 return BRW_MATH_FUNCTION_POW
;
365 case SHADER_OPCODE_SIN
:
366 return BRW_MATH_FUNCTION_SIN
;
367 case SHADER_OPCODE_COS
:
368 return BRW_MATH_FUNCTION_COS
;
369 case SHADER_OPCODE_INT_QUOTIENT
:
370 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT
;
371 case SHADER_OPCODE_INT_REMAINDER
:
372 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER
;
374 assert(!"not reached: unknown math function");
380 brw_texture_offset(struct gl_context
*ctx
, ir_constant
*offset
)
382 /* If the driver does not support GL_ARB_gpu_shader5, the offset
385 assert(offset
!= NULL
|| ctx
->Extensions
.ARB_gpu_shader5
);
387 if (!offset
) return 0; /* nonconstant offset; caller will handle it. */
389 signed char offsets
[3];
390 for (unsigned i
= 0; i
< offset
->type
->vector_elements
; i
++)
391 offsets
[i
] = (signed char) offset
->value
.i
[i
];
393 /* Combine all three offsets into a single unsigned dword:
395 * bits 11:8 - U Offset (X component)
396 * bits 7:4 - V Offset (Y component)
397 * bits 3:0 - R Offset (Z component)
399 unsigned offset_bits
= 0;
400 for (unsigned i
= 0; i
< offset
->type
->vector_elements
; i
++) {
401 const unsigned shift
= 4 * (2 - i
);
402 offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
408 brw_instruction_name(enum opcode op
)
412 if (op
< ARRAY_SIZE(opcode_descs
) && opcode_descs
[op
].name
)
413 return opcode_descs
[op
].name
;
416 case FS_OPCODE_FB_WRITE
:
419 case SHADER_OPCODE_RCP
:
421 case SHADER_OPCODE_RSQ
:
423 case SHADER_OPCODE_SQRT
:
425 case SHADER_OPCODE_EXP2
:
427 case SHADER_OPCODE_LOG2
:
429 case SHADER_OPCODE_POW
:
431 case SHADER_OPCODE_INT_QUOTIENT
:
433 case SHADER_OPCODE_INT_REMAINDER
:
435 case SHADER_OPCODE_SIN
:
437 case SHADER_OPCODE_COS
:
440 case SHADER_OPCODE_TEX
:
442 case SHADER_OPCODE_TXD
:
444 case SHADER_OPCODE_TXF
:
446 case SHADER_OPCODE_TXL
:
448 case SHADER_OPCODE_TXS
:
452 case SHADER_OPCODE_TXF_MS
:
454 case SHADER_OPCODE_TXF_MCS
:
456 case SHADER_OPCODE_TG4
:
458 case SHADER_OPCODE_TG4_OFFSET
:
461 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
462 return "gen4_scratch_read";
463 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
464 return "gen4_scratch_write";
465 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
466 return "gen7_scratch_read";
473 case FS_OPCODE_PIXEL_X
:
475 case FS_OPCODE_PIXEL_Y
:
478 case FS_OPCODE_CINTERP
:
480 case FS_OPCODE_LINTERP
:
483 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
484 return "uniform_pull_const";
485 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
486 return "uniform_pull_const_gen7";
487 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
488 return "varying_pull_const";
489 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
490 return "varying_pull_const_gen7";
492 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS
:
493 return "mov_dispatch_to_flags";
494 case FS_OPCODE_DISCARD_JUMP
:
495 return "discard_jump";
497 case FS_OPCODE_SET_SIMD4X2_OFFSET
:
498 return "set_simd4x2_offset";
500 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
501 return "pack_half_2x16_split";
502 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
503 return "unpack_half_2x16_split_x";
504 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
505 return "unpack_half_2x16_split_y";
507 case FS_OPCODE_PLACEHOLDER_HALT
:
508 return "placeholder_halt";
510 case VS_OPCODE_URB_WRITE
:
511 return "vs_urb_write";
512 case VS_OPCODE_PULL_CONSTANT_LOAD
:
513 return "pull_constant_load";
514 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
515 return "pull_constant_load_gen7";
516 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
517 return "unpack_flags_simd4x2";
519 case GS_OPCODE_URB_WRITE
:
520 return "gs_urb_write";
521 case GS_OPCODE_THREAD_END
:
522 return "gs_thread_end";
523 case GS_OPCODE_SET_WRITE_OFFSET
:
524 return "set_write_offset";
525 case GS_OPCODE_SET_VERTEX_COUNT
:
526 return "set_vertex_count";
527 case GS_OPCODE_SET_DWORD_2_IMMED
:
528 return "set_dword_2_immed";
529 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
530 return "prepare_channel_masks";
531 case GS_OPCODE_SET_CHANNEL_MASKS
:
532 return "set_channel_masks";
535 /* Yes, this leaks. It's in debug code, it should never occur, and if
536 * it does, you should just add the case to the list above.
538 asprintf(&fallback
, "op%d", op
);
544 backend_instruction::is_tex()
546 return (opcode
== SHADER_OPCODE_TEX
||
547 opcode
== FS_OPCODE_TXB
||
548 opcode
== SHADER_OPCODE_TXD
||
549 opcode
== SHADER_OPCODE_TXF
||
550 opcode
== SHADER_OPCODE_TXF_MS
||
551 opcode
== SHADER_OPCODE_TXF_MCS
||
552 opcode
== SHADER_OPCODE_TXL
||
553 opcode
== SHADER_OPCODE_TXS
||
554 opcode
== SHADER_OPCODE_LOD
||
555 opcode
== SHADER_OPCODE_TG4
||
556 opcode
== SHADER_OPCODE_TG4_OFFSET
);
560 backend_instruction::is_math()
562 return (opcode
== SHADER_OPCODE_RCP
||
563 opcode
== SHADER_OPCODE_RSQ
||
564 opcode
== SHADER_OPCODE_SQRT
||
565 opcode
== SHADER_OPCODE_EXP2
||
566 opcode
== SHADER_OPCODE_LOG2
||
567 opcode
== SHADER_OPCODE_SIN
||
568 opcode
== SHADER_OPCODE_COS
||
569 opcode
== SHADER_OPCODE_INT_QUOTIENT
||
570 opcode
== SHADER_OPCODE_INT_REMAINDER
||
571 opcode
== SHADER_OPCODE_POW
);
575 backend_instruction::is_control_flow()
579 case BRW_OPCODE_WHILE
:
581 case BRW_OPCODE_ELSE
:
582 case BRW_OPCODE_ENDIF
:
583 case BRW_OPCODE_BREAK
:
584 case BRW_OPCODE_CONTINUE
:
592 backend_instruction::can_do_source_mods()
595 case BRW_OPCODE_ADDC
:
597 case BRW_OPCODE_BFI1
:
598 case BRW_OPCODE_BFI2
:
599 case BRW_OPCODE_BFREV
:
600 case BRW_OPCODE_CBIT
:
603 case BRW_OPCODE_SUBB
:
611 backend_instruction::has_side_effects() const
614 case SHADER_OPCODE_UNTYPED_ATOMIC
:
622 backend_visitor::dump_instructions()
625 foreach_list(node
, &this->instructions
) {
626 backend_instruction
*inst
= (backend_instruction
*)node
;
627 printf("%d: ", ip
++);
628 dump_instruction(inst
);
634 * Sets up the starting offsets for the groups of binding table entries
635 * commong to all pipeline stages.
637 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
638 * unused but also make sure that addition of small offsets to them will
639 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
642 backend_visitor::assign_common_binding_table_offsets(uint32_t next_binding_table_offset
)
644 int num_textures
= _mesa_fls(prog
->SamplersUsed
);
646 stage_prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
647 next_binding_table_offset
+= num_textures
;
650 stage_prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
651 next_binding_table_offset
+= shader
->base
.NumUniformBlocks
;
653 stage_prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
656 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
657 stage_prog_data
->binding_table
.shader_time_start
= next_binding_table_offset
;
658 next_binding_table_offset
++;
660 stage_prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
663 if (prog
->UsesGather
) {
664 stage_prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
665 next_binding_table_offset
+= num_textures
;
667 stage_prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
670 if (shader_prog
&& shader_prog
->NumAtomicBuffers
) {
671 stage_prog_data
->binding_table
.abo_start
= next_binding_table_offset
;
672 next_binding_table_offset
+= shader_prog
->NumAtomicBuffers
;
674 stage_prog_data
->binding_table
.abo_start
= 0xd0d0d0d0;
677 /* This may or may not be used depending on how the compile goes. */
678 stage_prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
679 next_binding_table_offset
++;
681 assert(next_binding_table_offset
<= BRW_MAX_SURFACES
);
683 /* prog_data->base.binding_table.size will be set by mark_surface_used. */