2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "brw_context.h"
30 #include "glsl/ir_optimization.h"
31 #include "glsl/glsl_parser_extras.h"
32 #include "main/shaderapi.h"
35 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
37 struct brw_shader
*shader
;
39 shader
= rzalloc(NULL
, struct brw_shader
);
41 shader
->base
.Type
= type
;
42 shader
->base
.Stage
= _mesa_shader_enum_to_shader_stage(type
);
43 shader
->base
.Name
= name
;
44 _mesa_init_shader(ctx
, &shader
->base
);
51 * Performs a compile of the shader stages even when we don't know
52 * what non-orthogonal state will be set, in the hope that it reflects
53 * the eventual NOS used, and thus allows us to produce link failures.
56 brw_shader_precompile(struct gl_context
*ctx
,
57 struct gl_shader_program
*sh_prog
)
59 struct gl_shader
*vs
= sh_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
60 struct gl_shader
*gs
= sh_prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
61 struct gl_shader
*fs
= sh_prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
63 if (fs
&& !brw_fs_precompile(ctx
, sh_prog
, fs
->Program
))
66 if (gs
&& !brw_gs_precompile(ctx
, sh_prog
, gs
->Program
))
69 if (vs
&& !brw_vs_precompile(ctx
, sh_prog
, vs
->Program
))
76 is_scalar_shader_stage(struct brw_context
*brw
, int stage
)
79 case MESA_SHADER_FRAGMENT
:
81 case MESA_SHADER_VERTEX
:
82 return brw
->scalar_vs
;
89 brw_lower_packing_builtins(struct brw_context
*brw
,
90 gl_shader_stage shader_type
,
93 int ops
= LOWER_PACK_SNORM_2x16
94 | LOWER_UNPACK_SNORM_2x16
95 | LOWER_PACK_UNORM_2x16
96 | LOWER_UNPACK_UNORM_2x16
;
98 if (is_scalar_shader_stage(brw
, shader_type
)) {
99 ops
|= LOWER_UNPACK_UNORM_4x8
100 | LOWER_UNPACK_SNORM_4x8
101 | LOWER_PACK_UNORM_4x8
102 | LOWER_PACK_SNORM_4x8
;
106 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
107 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
108 * lowering is needed. For SOA code, the Half2x16 ops must be
111 if (is_scalar_shader_stage(brw
, shader_type
)) {
112 ops
|= LOWER_PACK_HALF_2x16_TO_SPLIT
113 | LOWER_UNPACK_HALF_2x16_TO_SPLIT
;
116 ops
|= LOWER_PACK_HALF_2x16
117 | LOWER_UNPACK_HALF_2x16
;
120 lower_packing_builtins(ir
, ops
);
124 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
126 struct brw_context
*brw
= brw_context(ctx
);
129 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
130 const struct gl_shader_compiler_options
*options
=
131 &ctx
->Const
.ShaderCompilerOptions
[stage
];
132 struct brw_shader
*shader
=
133 (struct brw_shader
*)shProg
->_LinkedShaders
[stage
];
138 struct gl_program
*prog
=
139 ctx
->Driver
.NewProgram(ctx
, _mesa_shader_stage_to_program(stage
),
143 prog
->Parameters
= _mesa_new_parameter_list();
145 _mesa_copy_linked_program_data((gl_shader_stage
) stage
, shProg
, prog
);
149 /* lower_packing_builtins() inserts arithmetic instructions, so it
150 * must precede lower_instructions().
152 brw_lower_packing_builtins(brw
, (gl_shader_stage
) stage
, shader
->base
.ir
);
153 do_mat_op_to_vec(shader
->base
.ir
);
154 const int bitfield_insert
= brw
->gen
>= 7
155 ? BITFIELD_INSERT_TO_BFM_BFI
157 lower_instructions(shader
->base
.ir
,
166 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
167 * if-statements need to be flattened.
170 lower_if_to_cond_assign(shader
->base
.ir
, 16);
172 do_lower_texture_projection(shader
->base
.ir
);
173 brw_lower_texture_gradients(brw
, shader
->base
.ir
);
174 do_vec_index_to_cond_assign(shader
->base
.ir
);
175 lower_vector_insert(shader
->base
.ir
, true);
176 brw_do_cubemap_normalize(shader
->base
.ir
);
177 lower_offset_arrays(shader
->base
.ir
);
178 brw_do_lower_unnormalized_offset(shader
->base
.ir
);
179 lower_noise(shader
->base
.ir
);
180 lower_quadop_vector(shader
->base
.ir
, false);
182 bool lowered_variable_indexing
=
183 lower_variable_index_to_cond_assign(shader
->base
.ir
,
184 options
->EmitNoIndirectInput
,
185 options
->EmitNoIndirectOutput
,
186 options
->EmitNoIndirectTemp
,
187 options
->EmitNoIndirectUniform
);
189 if (unlikely(brw
->perf_debug
&& lowered_variable_indexing
)) {
190 perf_debug("Unsupported form of variable indexing in FS; falling "
191 "back to very inefficient code generation\n");
194 lower_ubo_reference(&shader
->base
, shader
->base
.ir
);
199 if (is_scalar_shader_stage(brw
, stage
)) {
200 brw_do_channel_expressions(shader
->base
.ir
);
201 brw_do_vector_splitting(shader
->base
.ir
);
204 progress
= do_lower_jumps(shader
->base
.ir
, true, true,
205 true, /* main return */
206 false, /* continue */
210 progress
= do_common_optimization(shader
->base
.ir
, true, true,
211 options
, ctx
->Const
.NativeIntegers
)
215 /* Make a pass over the IR to add state references for any built-in
216 * uniforms that are used. This has to be done now (during linking).
217 * Code generation doesn't happen until the first time this shader is
218 * used for rendering. Waiting until then to generate the parameters is
219 * too late. At that point, the values for the built-in uniforms won't
220 * get sent to the shader.
222 foreach_in_list(ir_instruction
, node
, shader
->base
.ir
) {
223 ir_variable
*var
= node
->as_variable();
225 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_uniform
)
226 || (strncmp(var
->name
, "gl_", 3) != 0))
229 const ir_state_slot
*const slots
= var
->get_state_slots();
230 assert(slots
!= NULL
);
232 for (unsigned int i
= 0; i
< var
->get_num_state_slots(); i
++) {
233 _mesa_add_state_reference(prog
->Parameters
,
234 (gl_state_index
*) slots
[i
].tokens
);
238 validate_ir_tree(shader
->base
.ir
);
240 do_set_program_inouts(shader
->base
.ir
, prog
, shader
->base
.Stage
);
242 prog
->SamplersUsed
= shader
->base
.active_samplers
;
243 prog
->ShadowSamplers
= shader
->base
.shadow_samplers
;
244 _mesa_update_shader_textures_used(shProg
, prog
);
246 _mesa_reference_program(ctx
, &shader
->base
.Program
, prog
);
248 brw_add_texrect_params(prog
);
250 _mesa_reference_program(ctx
, &prog
, NULL
);
252 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
253 fprintf(stderr
, "\n");
254 fprintf(stderr
, "GLSL IR for linked %s program %d:\n",
255 _mesa_shader_stage_to_string(shader
->base
.Stage
),
257 _mesa_print_ir(stderr
, shader
->base
.ir
, NULL
);
258 fprintf(stderr
, "\n");
262 if ((ctx
->_Shader
->Flags
& GLSL_DUMP
) && shProg
->Name
!= 0) {
263 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
264 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
268 fprintf(stderr
, "GLSL %s shader %d source for linked program %d:\n",
269 _mesa_shader_stage_to_string(sh
->Stage
),
271 fprintf(stderr
, "%s", sh
->Source
);
272 fprintf(stderr
, "\n");
276 if (brw
->precompile
&& !brw_shader_precompile(ctx
, shProg
))
284 brw_type_for_base_type(const struct glsl_type
*type
)
286 switch (type
->base_type
) {
287 case GLSL_TYPE_FLOAT
:
288 return BRW_REGISTER_TYPE_F
;
291 return BRW_REGISTER_TYPE_D
;
293 return BRW_REGISTER_TYPE_UD
;
294 case GLSL_TYPE_ARRAY
:
295 return brw_type_for_base_type(type
->fields
.array
);
296 case GLSL_TYPE_STRUCT
:
297 case GLSL_TYPE_SAMPLER
:
298 case GLSL_TYPE_ATOMIC_UINT
:
299 /* These should be overridden with the type of the member when
300 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
301 * way to trip up if we don't.
303 return BRW_REGISTER_TYPE_UD
;
304 case GLSL_TYPE_IMAGE
:
305 return BRW_REGISTER_TYPE_UD
;
307 case GLSL_TYPE_ERROR
:
308 case GLSL_TYPE_INTERFACE
:
309 case GLSL_TYPE_DOUBLE
:
310 unreachable("not reached");
313 return BRW_REGISTER_TYPE_F
;
316 enum brw_conditional_mod
317 brw_conditional_for_comparison(unsigned int op
)
321 return BRW_CONDITIONAL_L
;
322 case ir_binop_greater
:
323 return BRW_CONDITIONAL_G
;
324 case ir_binop_lequal
:
325 return BRW_CONDITIONAL_LE
;
326 case ir_binop_gequal
:
327 return BRW_CONDITIONAL_GE
;
329 case ir_binop_all_equal
: /* same as equal for scalars */
330 return BRW_CONDITIONAL_Z
;
331 case ir_binop_nequal
:
332 case ir_binop_any_nequal
: /* same as nequal for scalars */
333 return BRW_CONDITIONAL_NZ
;
335 unreachable("not reached: bad operation for comparison");
340 brw_math_function(enum opcode op
)
343 case SHADER_OPCODE_RCP
:
344 return BRW_MATH_FUNCTION_INV
;
345 case SHADER_OPCODE_RSQ
:
346 return BRW_MATH_FUNCTION_RSQ
;
347 case SHADER_OPCODE_SQRT
:
348 return BRW_MATH_FUNCTION_SQRT
;
349 case SHADER_OPCODE_EXP2
:
350 return BRW_MATH_FUNCTION_EXP
;
351 case SHADER_OPCODE_LOG2
:
352 return BRW_MATH_FUNCTION_LOG
;
353 case SHADER_OPCODE_POW
:
354 return BRW_MATH_FUNCTION_POW
;
355 case SHADER_OPCODE_SIN
:
356 return BRW_MATH_FUNCTION_SIN
;
357 case SHADER_OPCODE_COS
:
358 return BRW_MATH_FUNCTION_COS
;
359 case SHADER_OPCODE_INT_QUOTIENT
:
360 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT
;
361 case SHADER_OPCODE_INT_REMAINDER
:
362 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER
;
364 unreachable("not reached: unknown math function");
369 brw_texture_offset(struct gl_context
*ctx
, int *offsets
,
370 unsigned num_components
)
372 /* If the driver does not support GL_ARB_gpu_shader5, the offset
375 assert(offsets
!= NULL
|| ctx
->Extensions
.ARB_gpu_shader5
);
377 if (!offsets
) return 0; /* nonconstant offset; caller will handle it. */
379 /* Combine all three offsets into a single unsigned dword:
381 * bits 11:8 - U Offset (X component)
382 * bits 7:4 - V Offset (Y component)
383 * bits 3:0 - R Offset (Z component)
385 unsigned offset_bits
= 0;
386 for (unsigned i
= 0; i
< num_components
; i
++) {
387 const unsigned shift
= 4 * (2 - i
);
388 offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
394 brw_instruction_name(enum opcode op
)
397 case BRW_OPCODE_MOV
... BRW_OPCODE_NOP
:
398 assert(opcode_descs
[op
].name
);
399 return opcode_descs
[op
].name
;
400 case FS_OPCODE_FB_WRITE
:
402 case FS_OPCODE_BLORP_FB_WRITE
:
403 return "blorp_fb_write";
404 case FS_OPCODE_REP_FB_WRITE
:
405 return "rep_fb_write";
407 case SHADER_OPCODE_RCP
:
409 case SHADER_OPCODE_RSQ
:
411 case SHADER_OPCODE_SQRT
:
413 case SHADER_OPCODE_EXP2
:
415 case SHADER_OPCODE_LOG2
:
417 case SHADER_OPCODE_POW
:
419 case SHADER_OPCODE_INT_QUOTIENT
:
421 case SHADER_OPCODE_INT_REMAINDER
:
423 case SHADER_OPCODE_SIN
:
425 case SHADER_OPCODE_COS
:
428 case SHADER_OPCODE_TEX
:
430 case SHADER_OPCODE_TXD
:
432 case SHADER_OPCODE_TXF
:
434 case SHADER_OPCODE_TXL
:
436 case SHADER_OPCODE_TXS
:
440 case SHADER_OPCODE_TXF_CMS
:
442 case SHADER_OPCODE_TXF_UMS
:
444 case SHADER_OPCODE_TXF_MCS
:
446 case SHADER_OPCODE_LOD
:
448 case SHADER_OPCODE_TG4
:
450 case SHADER_OPCODE_TG4_OFFSET
:
452 case SHADER_OPCODE_SHADER_TIME_ADD
:
453 return "shader_time_add";
455 case SHADER_OPCODE_UNTYPED_ATOMIC
:
456 return "untyped_atomic";
457 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
458 return "untyped_surface_read";
460 case SHADER_OPCODE_LOAD_PAYLOAD
:
461 return "load_payload";
463 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
464 return "gen4_scratch_read";
465 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
466 return "gen4_scratch_write";
467 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
468 return "gen7_scratch_read";
469 case SHADER_OPCODE_URB_WRITE_SIMD8
:
470 return "gen8_urb_write_simd8";
472 case VEC4_OPCODE_MOV_BYTES
:
474 case VEC4_OPCODE_PACK_BYTES
:
476 case VEC4_OPCODE_UNPACK_UNIFORM
:
477 return "unpack_uniform";
479 case FS_OPCODE_DDX_COARSE
:
481 case FS_OPCODE_DDX_FINE
:
483 case FS_OPCODE_DDY_COARSE
:
485 case FS_OPCODE_DDY_FINE
:
488 case FS_OPCODE_PIXEL_X
:
490 case FS_OPCODE_PIXEL_Y
:
493 case FS_OPCODE_CINTERP
:
495 case FS_OPCODE_LINTERP
:
498 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
499 return "uniform_pull_const";
500 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
501 return "uniform_pull_const_gen7";
502 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
503 return "varying_pull_const";
504 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
505 return "varying_pull_const_gen7";
507 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS
:
508 return "mov_dispatch_to_flags";
509 case FS_OPCODE_DISCARD_JUMP
:
510 return "discard_jump";
512 case FS_OPCODE_SET_OMASK
:
514 case FS_OPCODE_SET_SAMPLE_ID
:
515 return "set_sample_id";
516 case FS_OPCODE_SET_SIMD4X2_OFFSET
:
517 return "set_simd4x2_offset";
519 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
520 return "pack_half_2x16_split";
521 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
522 return "unpack_half_2x16_split_x";
523 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
524 return "unpack_half_2x16_split_y";
526 case FS_OPCODE_PLACEHOLDER_HALT
:
527 return "placeholder_halt";
529 case FS_OPCODE_INTERPOLATE_AT_CENTROID
:
530 return "interp_centroid";
531 case FS_OPCODE_INTERPOLATE_AT_SAMPLE
:
532 return "interp_sample";
533 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
:
534 return "interp_shared_offset";
535 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
:
536 return "interp_per_slot_offset";
538 case VS_OPCODE_URB_WRITE
:
539 return "vs_urb_write";
540 case VS_OPCODE_PULL_CONSTANT_LOAD
:
541 return "pull_constant_load";
542 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
543 return "pull_constant_load_gen7";
544 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
545 return "unpack_flags_simd4x2";
547 case GS_OPCODE_URB_WRITE
:
548 return "gs_urb_write";
549 case GS_OPCODE_URB_WRITE_ALLOCATE
:
550 return "gs_urb_write_allocate";
551 case GS_OPCODE_THREAD_END
:
552 return "gs_thread_end";
553 case GS_OPCODE_SET_WRITE_OFFSET
:
554 return "set_write_offset";
555 case GS_OPCODE_SET_VERTEX_COUNT
:
556 return "set_vertex_count";
557 case GS_OPCODE_SET_DWORD_2
:
558 return "set_dword_2";
559 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
560 return "prepare_channel_masks";
561 case GS_OPCODE_SET_CHANNEL_MASKS
:
562 return "set_channel_masks";
563 case GS_OPCODE_GET_INSTANCE_ID
:
564 return "get_instance_id";
565 case GS_OPCODE_FF_SYNC
:
567 case GS_OPCODE_SET_PRIMITIVE_ID
:
568 return "set_primitive_id";
569 case GS_OPCODE_SVB_WRITE
:
570 return "gs_svb_write";
571 case GS_OPCODE_SVB_SET_DST_INDEX
:
572 return "gs_svb_set_dst_index";
573 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
574 return "gs_ff_sync_set_primitives";
577 unreachable("not reached");
581 brw_saturate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
587 } imm
= { reg
->dw1
.ud
}, sat_imm
= { 0 };
590 case BRW_REGISTER_TYPE_UD
:
591 case BRW_REGISTER_TYPE_D
:
592 case BRW_REGISTER_TYPE_UQ
:
593 case BRW_REGISTER_TYPE_Q
:
596 case BRW_REGISTER_TYPE_UW
:
597 sat_imm
.ud
= CLAMP(imm
.ud
, 0, USHRT_MAX
);
599 case BRW_REGISTER_TYPE_W
:
600 sat_imm
.d
= CLAMP(imm
.d
, SHRT_MIN
, SHRT_MAX
);
602 case BRW_REGISTER_TYPE_F
:
603 sat_imm
.f
= CLAMP(imm
.f
, 0.0f
, 1.0f
);
605 case BRW_REGISTER_TYPE_UB
:
606 case BRW_REGISTER_TYPE_B
:
607 unreachable("no UB/B immediates");
608 case BRW_REGISTER_TYPE_V
:
609 case BRW_REGISTER_TYPE_UV
:
610 case BRW_REGISTER_TYPE_VF
:
611 unreachable("unimplemented: saturate vector immediate");
612 case BRW_REGISTER_TYPE_DF
:
613 case BRW_REGISTER_TYPE_HF
:
614 unreachable("unimplemented: saturate DF/HF immediate");
617 if (imm
.ud
!= sat_imm
.ud
) {
618 reg
->dw1
.ud
= sat_imm
.ud
;
625 brw_negate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
628 case BRW_REGISTER_TYPE_D
:
629 case BRW_REGISTER_TYPE_UD
:
630 reg
->dw1
.d
= -reg
->dw1
.d
;
632 case BRW_REGISTER_TYPE_W
:
633 case BRW_REGISTER_TYPE_UW
:
634 reg
->dw1
.d
= -(int16_t)reg
->dw1
.ud
;
636 case BRW_REGISTER_TYPE_F
:
637 reg
->dw1
.f
= -reg
->dw1
.f
;
639 case BRW_REGISTER_TYPE_VF
:
640 reg
->dw1
.ud
^= 0x80808080;
642 case BRW_REGISTER_TYPE_UB
:
643 case BRW_REGISTER_TYPE_B
:
644 unreachable("no UB/B immediates");
645 case BRW_REGISTER_TYPE_UV
:
646 case BRW_REGISTER_TYPE_V
:
647 assert(!"unimplemented: negate UV/V immediate");
648 case BRW_REGISTER_TYPE_UQ
:
649 case BRW_REGISTER_TYPE_Q
:
650 assert(!"unimplemented: negate UQ/Q immediate");
651 case BRW_REGISTER_TYPE_DF
:
652 case BRW_REGISTER_TYPE_HF
:
653 assert(!"unimplemented: negate DF/HF immediate");
660 brw_abs_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
663 case BRW_REGISTER_TYPE_D
:
664 reg
->dw1
.d
= abs(reg
->dw1
.d
);
666 case BRW_REGISTER_TYPE_W
:
667 reg
->dw1
.d
= abs((int16_t)reg
->dw1
.ud
);
669 case BRW_REGISTER_TYPE_F
:
670 reg
->dw1
.f
= fabsf(reg
->dw1
.f
);
672 case BRW_REGISTER_TYPE_VF
:
673 reg
->dw1
.ud
&= ~0x80808080;
675 case BRW_REGISTER_TYPE_UB
:
676 case BRW_REGISTER_TYPE_B
:
677 unreachable("no UB/B immediates");
678 case BRW_REGISTER_TYPE_UQ
:
679 case BRW_REGISTER_TYPE_UD
:
680 case BRW_REGISTER_TYPE_UW
:
681 case BRW_REGISTER_TYPE_UV
:
682 /* Presumably the absolute value modifier on an unsigned source is a
683 * nop, but it would be nice to confirm.
685 assert(!"unimplemented: abs unsigned immediate");
686 case BRW_REGISTER_TYPE_V
:
687 assert(!"unimplemented: abs V immediate");
688 case BRW_REGISTER_TYPE_Q
:
689 assert(!"unimplemented: abs Q immediate");
690 case BRW_REGISTER_TYPE_DF
:
691 case BRW_REGISTER_TYPE_HF
:
692 assert(!"unimplemented: abs DF/HF immediate");
698 backend_visitor::backend_visitor(struct brw_context
*brw
,
699 struct gl_shader_program
*shader_prog
,
700 struct gl_program
*prog
,
701 struct brw_stage_prog_data
*stage_prog_data
,
702 gl_shader_stage stage
)
706 (struct brw_shader
*)shader_prog
->_LinkedShaders
[stage
] : NULL
),
707 shader_prog(shader_prog
),
709 stage_prog_data(stage_prog_data
),
713 debug_enabled
= INTEL_DEBUG
& intel_debug_flag_for_shader_stage(stage
);
714 stage_name
= _mesa_shader_stage_to_string(stage
);
715 stage_abbrev
= _mesa_shader_stage_to_abbrev(stage
);
719 backend_reg::is_zero() const
724 return fixed_hw_reg
.dw1
.d
== 0;
728 backend_reg::is_one() const
733 return type
== BRW_REGISTER_TYPE_F
734 ? fixed_hw_reg
.dw1
.f
== 1.0
735 : fixed_hw_reg
.dw1
.d
== 1;
739 backend_reg::is_negative_one() const
745 case BRW_REGISTER_TYPE_F
:
746 return fixed_hw_reg
.dw1
.f
== -1.0;
747 case BRW_REGISTER_TYPE_D
:
748 return fixed_hw_reg
.dw1
.d
== -1;
755 backend_reg::is_null() const
757 return file
== HW_REG
&&
758 fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
&&
759 fixed_hw_reg
.nr
== BRW_ARF_NULL
;
764 backend_reg::is_accumulator() const
766 return file
== HW_REG
&&
767 fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
&&
768 fixed_hw_reg
.nr
== BRW_ARF_ACCUMULATOR
;
772 backend_instruction::is_commutative() const
782 /* MIN and MAX are commutative. */
783 if (conditional_mod
== BRW_CONDITIONAL_GE
||
784 conditional_mod
== BRW_CONDITIONAL_L
) {
794 backend_instruction::is_3src() const
796 return opcode
< ARRAY_SIZE(opcode_descs
) && opcode_descs
[opcode
].nsrc
== 3;
800 backend_instruction::is_tex() const
802 return (opcode
== SHADER_OPCODE_TEX
||
803 opcode
== FS_OPCODE_TXB
||
804 opcode
== SHADER_OPCODE_TXD
||
805 opcode
== SHADER_OPCODE_TXF
||
806 opcode
== SHADER_OPCODE_TXF_CMS
||
807 opcode
== SHADER_OPCODE_TXF_UMS
||
808 opcode
== SHADER_OPCODE_TXF_MCS
||
809 opcode
== SHADER_OPCODE_TXL
||
810 opcode
== SHADER_OPCODE_TXS
||
811 opcode
== SHADER_OPCODE_LOD
||
812 opcode
== SHADER_OPCODE_TG4
||
813 opcode
== SHADER_OPCODE_TG4_OFFSET
);
817 backend_instruction::is_math() const
819 return (opcode
== SHADER_OPCODE_RCP
||
820 opcode
== SHADER_OPCODE_RSQ
||
821 opcode
== SHADER_OPCODE_SQRT
||
822 opcode
== SHADER_OPCODE_EXP2
||
823 opcode
== SHADER_OPCODE_LOG2
||
824 opcode
== SHADER_OPCODE_SIN
||
825 opcode
== SHADER_OPCODE_COS
||
826 opcode
== SHADER_OPCODE_INT_QUOTIENT
||
827 opcode
== SHADER_OPCODE_INT_REMAINDER
||
828 opcode
== SHADER_OPCODE_POW
);
832 backend_instruction::is_control_flow() const
836 case BRW_OPCODE_WHILE
:
838 case BRW_OPCODE_ELSE
:
839 case BRW_OPCODE_ENDIF
:
840 case BRW_OPCODE_BREAK
:
841 case BRW_OPCODE_CONTINUE
:
849 backend_instruction::can_do_source_mods() const
852 case BRW_OPCODE_ADDC
:
854 case BRW_OPCODE_BFI1
:
855 case BRW_OPCODE_BFI2
:
856 case BRW_OPCODE_BFREV
:
857 case BRW_OPCODE_CBIT
:
860 case BRW_OPCODE_SUBB
:
868 backend_instruction::can_do_saturate() const
878 case BRW_OPCODE_F16TO32
:
879 case BRW_OPCODE_F32TO16
:
880 case BRW_OPCODE_LINE
:
883 case BRW_OPCODE_MACH
:
885 case BRW_OPCODE_MATH
:
889 case BRW_OPCODE_RNDD
:
890 case BRW_OPCODE_RNDE
:
891 case BRW_OPCODE_RNDU
:
892 case BRW_OPCODE_RNDZ
:
896 case FS_OPCODE_LINTERP
:
897 case SHADER_OPCODE_COS
:
898 case SHADER_OPCODE_EXP2
:
899 case SHADER_OPCODE_LOG2
:
900 case SHADER_OPCODE_POW
:
901 case SHADER_OPCODE_RCP
:
902 case SHADER_OPCODE_RSQ
:
903 case SHADER_OPCODE_SIN
:
904 case SHADER_OPCODE_SQRT
:
912 backend_instruction::can_do_cmod() const
916 case BRW_OPCODE_ADDC
:
921 case BRW_OPCODE_CMPN
:
926 case BRW_OPCODE_F16TO32
:
927 case BRW_OPCODE_F32TO16
:
929 case BRW_OPCODE_LINE
:
933 case BRW_OPCODE_MACH
:
940 case BRW_OPCODE_RNDD
:
941 case BRW_OPCODE_RNDE
:
942 case BRW_OPCODE_RNDU
:
943 case BRW_OPCODE_RNDZ
:
944 case BRW_OPCODE_SAD2
:
945 case BRW_OPCODE_SADA2
:
948 case BRW_OPCODE_SUBB
:
950 case FS_OPCODE_CINTERP
:
951 case FS_OPCODE_LINTERP
:
959 backend_instruction::reads_accumulator_implicitly() const
963 case BRW_OPCODE_MACH
:
964 case BRW_OPCODE_SADA2
:
972 backend_instruction::writes_accumulator_implicitly(struct brw_context
*brw
) const
974 return writes_accumulator
||
976 ((opcode
>= BRW_OPCODE_ADD
&& opcode
< BRW_OPCODE_NOP
) ||
977 (opcode
>= FS_OPCODE_DDX_COARSE
&& opcode
<= FS_OPCODE_LINTERP
&&
978 opcode
!= FS_OPCODE_CINTERP
)));
982 backend_instruction::has_side_effects() const
985 case SHADER_OPCODE_UNTYPED_ATOMIC
:
986 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
987 case SHADER_OPCODE_URB_WRITE_SIMD8
:
988 case FS_OPCODE_FB_WRITE
:
997 inst_is_in_block(const bblock_t
*block
, const backend_instruction
*inst
)
1000 foreach_inst_in_block (backend_instruction
, i
, block
) {
1010 adjust_later_block_ips(bblock_t
*start_block
, int ip_adjustment
)
1012 for (bblock_t
*block_iter
= start_block
->next();
1013 !block_iter
->link
.is_tail_sentinel();
1014 block_iter
= block_iter
->next()) {
1015 block_iter
->start_ip
+= ip_adjustment
;
1016 block_iter
->end_ip
+= ip_adjustment
;
1021 backend_instruction::insert_after(bblock_t
*block
, backend_instruction
*inst
)
1023 if (!this->is_head_sentinel())
1024 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1028 adjust_later_block_ips(block
, 1);
1030 exec_node::insert_after(inst
);
1034 backend_instruction::insert_before(bblock_t
*block
, backend_instruction
*inst
)
1036 if (!this->is_tail_sentinel())
1037 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1041 adjust_later_block_ips(block
, 1);
1043 exec_node::insert_before(inst
);
1047 backend_instruction::insert_before(bblock_t
*block
, exec_list
*list
)
1049 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1051 unsigned num_inst
= list
->length();
1053 block
->end_ip
+= num_inst
;
1055 adjust_later_block_ips(block
, num_inst
);
1057 exec_node::insert_before(list
);
1061 backend_instruction::remove(bblock_t
*block
)
1063 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1065 adjust_later_block_ips(block
, -1);
1067 if (block
->start_ip
== block
->end_ip
) {
1068 block
->cfg
->remove_block(block
);
1073 exec_node::remove();
1077 backend_visitor::dump_instructions()
1079 dump_instructions(NULL
);
1083 backend_visitor::dump_instructions(const char *name
)
1085 FILE *file
= stderr
;
1086 if (name
&& geteuid() != 0) {
1087 file
= fopen(name
, "w");
1094 foreach_block_and_inst(block
, backend_instruction
, inst
, cfg
) {
1095 fprintf(file
, "%4d: ", ip
++);
1096 dump_instruction(inst
, file
);
1100 foreach_in_list(backend_instruction
, inst
, &instructions
) {
1101 fprintf(file
, "%4d: ", ip
++);
1102 dump_instruction(inst
, file
);
1106 if (file
!= stderr
) {
1112 backend_visitor::calculate_cfg()
1116 cfg
= new(mem_ctx
) cfg_t(&this->instructions
);
1120 backend_visitor::invalidate_cfg()
1122 ralloc_free(this->cfg
);
1127 * Sets up the starting offsets for the groups of binding table entries
1128 * commong to all pipeline stages.
1130 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
1131 * unused but also make sure that addition of small offsets to them will
1132 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
1135 backend_visitor::assign_common_binding_table_offsets(uint32_t next_binding_table_offset
)
1137 int num_textures
= _mesa_fls(prog
->SamplersUsed
);
1139 stage_prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
1140 next_binding_table_offset
+= num_textures
;
1143 stage_prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
1144 next_binding_table_offset
+= shader
->base
.NumUniformBlocks
;
1146 stage_prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
1149 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1150 stage_prog_data
->binding_table
.shader_time_start
= next_binding_table_offset
;
1151 next_binding_table_offset
++;
1153 stage_prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
1156 if (prog
->UsesGather
) {
1157 if (brw
->gen
>= 8) {
1158 stage_prog_data
->binding_table
.gather_texture_start
=
1159 stage_prog_data
->binding_table
.texture_start
;
1161 stage_prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
1162 next_binding_table_offset
+= num_textures
;
1165 stage_prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
1168 if (shader_prog
&& shader_prog
->NumAtomicBuffers
) {
1169 stage_prog_data
->binding_table
.abo_start
= next_binding_table_offset
;
1170 next_binding_table_offset
+= shader_prog
->NumAtomicBuffers
;
1172 stage_prog_data
->binding_table
.abo_start
= 0xd0d0d0d0;
1175 if (shader
&& shader
->base
.NumImages
) {
1176 stage_prog_data
->binding_table
.image_start
= next_binding_table_offset
;
1177 next_binding_table_offset
+= shader
->base
.NumImages
;
1179 stage_prog_data
->binding_table
.image_start
= 0xd0d0d0d0;
1182 /* This may or may not be used depending on how the compile goes. */
1183 stage_prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
1184 next_binding_table_offset
++;
1186 assert(next_binding_table_offset
<= BRW_MAX_SURFACES
);
1188 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */