2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "brw_context.h"
31 #include "glsl/ir_optimization.h"
32 #include "glsl/glsl_parser_extras.h"
33 #include "main/shaderapi.h"
36 shader_debug_log_mesa(void *data
, const char *fmt
, ...)
38 struct brw_context
*brw
= (struct brw_context
*)data
;
43 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
44 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
45 MESA_DEBUG_TYPE_OTHER
,
46 MESA_DEBUG_SEVERITY_NOTIFICATION
, fmt
, args
);
51 shader_perf_log_mesa(void *data
, const char *fmt
, ...)
53 struct brw_context
*brw
= (struct brw_context
*)data
;
58 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
60 va_copy(args_copy
, args
);
61 vfprintf(stderr
, fmt
, args_copy
);
65 if (brw
->perf_debug
) {
67 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
68 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
69 MESA_DEBUG_TYPE_PERFORMANCE
,
70 MESA_DEBUG_SEVERITY_MEDIUM
, fmt
, args
);
76 is_scalar_shader_stage(const struct brw_compiler
*compiler
, int stage
)
79 case MESA_SHADER_FRAGMENT
:
80 case MESA_SHADER_COMPUTE
:
82 case MESA_SHADER_GEOMETRY
:
83 return compiler
->scalar_gs
;
84 case MESA_SHADER_VERTEX
:
85 return compiler
->scalar_vs
;
92 brw_compiler_create(void *mem_ctx
, const struct brw_device_info
*devinfo
)
94 struct brw_compiler
*compiler
= rzalloc(mem_ctx
, struct brw_compiler
);
96 compiler
->devinfo
= devinfo
;
97 compiler
->shader_debug_log
= shader_debug_log_mesa
;
98 compiler
->shader_perf_log
= shader_perf_log_mesa
;
100 brw_fs_alloc_reg_sets(compiler
);
101 brw_vec4_alloc_reg_set(compiler
);
103 if (devinfo
->gen
>= 8 && !(INTEL_DEBUG
& DEBUG_VEC4VS
))
104 compiler
->scalar_vs
= true;
106 if (devinfo
->gen
>= 8 && brw_env_var_as_boolean("INTEL_SCALAR_GS", false))
107 compiler
->scalar_gs
= true;
109 nir_shader_compiler_options
*nir_options
=
110 rzalloc(compiler
, nir_shader_compiler_options
);
111 nir_options
->native_integers
= true;
112 /* In order to help allow for better CSE at the NIR level we tell NIR
113 * to split all ffma instructions during opt_algebraic and we then
114 * re-combine them as a later step.
116 nir_options
->lower_ffma
= true;
117 nir_options
->lower_sub
= true;
118 /* In the vec4 backend, our dpN instruction replicates its result to all
119 * the components of a vec4. We would like NIR to give us replicated fdot
120 * instructions because it can optimize better for us.
122 * For the FS backend, it should be lowered away by the scalarizing pass so
123 * we should never see fdot anyway.
125 nir_options
->fdot_replicates
= true;
127 /* We want the GLSL compiler to emit code that uses condition codes */
128 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
129 compiler
->glsl_compiler_options
[i
].MaxUnrollIterations
= 32;
130 compiler
->glsl_compiler_options
[i
].MaxIfDepth
=
131 devinfo
->gen
< 6 ? 16 : UINT_MAX
;
133 compiler
->glsl_compiler_options
[i
].EmitCondCodes
= true;
134 compiler
->glsl_compiler_options
[i
].EmitNoNoise
= true;
135 compiler
->glsl_compiler_options
[i
].EmitNoMainReturn
= true;
136 compiler
->glsl_compiler_options
[i
].EmitNoIndirectInput
= true;
137 compiler
->glsl_compiler_options
[i
].EmitNoIndirectUniform
= false;
138 compiler
->glsl_compiler_options
[i
].LowerClipDistance
= true;
140 bool is_scalar
= is_scalar_shader_stage(compiler
, i
);
142 compiler
->glsl_compiler_options
[i
].EmitNoIndirectOutput
= is_scalar
;
143 compiler
->glsl_compiler_options
[i
].EmitNoIndirectTemp
= is_scalar
;
144 compiler
->glsl_compiler_options
[i
].OptimizeForAOS
= !is_scalar
;
146 /* !ARB_gpu_shader5 */
147 if (devinfo
->gen
< 7)
148 compiler
->glsl_compiler_options
[i
].EmitNoIndirectSampler
= true;
150 compiler
->glsl_compiler_options
[i
].NirOptions
= nir_options
;
157 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
159 struct brw_shader
*shader
;
161 shader
= rzalloc(NULL
, struct brw_shader
);
163 shader
->base
.Type
= type
;
164 shader
->base
.Stage
= _mesa_shader_enum_to_shader_stage(type
);
165 shader
->base
.Name
= name
;
166 _mesa_init_shader(ctx
, &shader
->base
);
169 return &shader
->base
;
173 brw_mark_surface_used(struct brw_stage_prog_data
*prog_data
,
176 assert(surf_index
< BRW_MAX_SURFACES
);
178 prog_data
->binding_table
.size_bytes
=
179 MAX2(prog_data
->binding_table
.size_bytes
, (surf_index
+ 1) * 4);
183 brw_type_for_base_type(const struct glsl_type
*type
)
185 switch (type
->base_type
) {
186 case GLSL_TYPE_FLOAT
:
187 return BRW_REGISTER_TYPE_F
;
190 case GLSL_TYPE_SUBROUTINE
:
191 return BRW_REGISTER_TYPE_D
;
193 return BRW_REGISTER_TYPE_UD
;
194 case GLSL_TYPE_ARRAY
:
195 return brw_type_for_base_type(type
->fields
.array
);
196 case GLSL_TYPE_STRUCT
:
197 case GLSL_TYPE_SAMPLER
:
198 case GLSL_TYPE_ATOMIC_UINT
:
199 /* These should be overridden with the type of the member when
200 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
201 * way to trip up if we don't.
203 return BRW_REGISTER_TYPE_UD
;
204 case GLSL_TYPE_IMAGE
:
205 return BRW_REGISTER_TYPE_UD
;
207 case GLSL_TYPE_ERROR
:
208 case GLSL_TYPE_INTERFACE
:
209 case GLSL_TYPE_DOUBLE
:
210 unreachable("not reached");
213 return BRW_REGISTER_TYPE_F
;
216 enum brw_conditional_mod
217 brw_conditional_for_comparison(unsigned int op
)
221 return BRW_CONDITIONAL_L
;
222 case ir_binop_greater
:
223 return BRW_CONDITIONAL_G
;
224 case ir_binop_lequal
:
225 return BRW_CONDITIONAL_LE
;
226 case ir_binop_gequal
:
227 return BRW_CONDITIONAL_GE
;
229 case ir_binop_all_equal
: /* same as equal for scalars */
230 return BRW_CONDITIONAL_Z
;
231 case ir_binop_nequal
:
232 case ir_binop_any_nequal
: /* same as nequal for scalars */
233 return BRW_CONDITIONAL_NZ
;
235 unreachable("not reached: bad operation for comparison");
240 brw_math_function(enum opcode op
)
243 case SHADER_OPCODE_RCP
:
244 return BRW_MATH_FUNCTION_INV
;
245 case SHADER_OPCODE_RSQ
:
246 return BRW_MATH_FUNCTION_RSQ
;
247 case SHADER_OPCODE_SQRT
:
248 return BRW_MATH_FUNCTION_SQRT
;
249 case SHADER_OPCODE_EXP2
:
250 return BRW_MATH_FUNCTION_EXP
;
251 case SHADER_OPCODE_LOG2
:
252 return BRW_MATH_FUNCTION_LOG
;
253 case SHADER_OPCODE_POW
:
254 return BRW_MATH_FUNCTION_POW
;
255 case SHADER_OPCODE_SIN
:
256 return BRW_MATH_FUNCTION_SIN
;
257 case SHADER_OPCODE_COS
:
258 return BRW_MATH_FUNCTION_COS
;
259 case SHADER_OPCODE_INT_QUOTIENT
:
260 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT
;
261 case SHADER_OPCODE_INT_REMAINDER
:
262 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER
;
264 unreachable("not reached: unknown math function");
269 brw_texture_offset(int *offsets
, unsigned num_components
)
271 if (!offsets
) return 0; /* nonconstant offset; caller will handle it. */
273 /* Combine all three offsets into a single unsigned dword:
275 * bits 11:8 - U Offset (X component)
276 * bits 7:4 - V Offset (Y component)
277 * bits 3:0 - R Offset (Z component)
279 unsigned offset_bits
= 0;
280 for (unsigned i
= 0; i
< num_components
; i
++) {
281 const unsigned shift
= 4 * (2 - i
);
282 offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
288 brw_instruction_name(enum opcode op
)
291 case BRW_OPCODE_MOV
... BRW_OPCODE_NOP
:
292 assert(opcode_descs
[op
].name
);
293 return opcode_descs
[op
].name
;
294 case FS_OPCODE_FB_WRITE
:
296 case FS_OPCODE_FB_WRITE_LOGICAL
:
297 return "fb_write_logical";
298 case FS_OPCODE_PACK_STENCIL_REF
:
299 return "pack_stencil_ref";
300 case FS_OPCODE_BLORP_FB_WRITE
:
301 return "blorp_fb_write";
302 case FS_OPCODE_REP_FB_WRITE
:
303 return "rep_fb_write";
305 case SHADER_OPCODE_RCP
:
307 case SHADER_OPCODE_RSQ
:
309 case SHADER_OPCODE_SQRT
:
311 case SHADER_OPCODE_EXP2
:
313 case SHADER_OPCODE_LOG2
:
315 case SHADER_OPCODE_POW
:
317 case SHADER_OPCODE_INT_QUOTIENT
:
319 case SHADER_OPCODE_INT_REMAINDER
:
321 case SHADER_OPCODE_SIN
:
323 case SHADER_OPCODE_COS
:
326 case SHADER_OPCODE_TEX
:
328 case SHADER_OPCODE_TEX_LOGICAL
:
329 return "tex_logical";
330 case SHADER_OPCODE_TXD
:
332 case SHADER_OPCODE_TXD_LOGICAL
:
333 return "txd_logical";
334 case SHADER_OPCODE_TXF
:
336 case SHADER_OPCODE_TXF_LOGICAL
:
337 return "txf_logical";
338 case SHADER_OPCODE_TXL
:
340 case SHADER_OPCODE_TXL_LOGICAL
:
341 return "txl_logical";
342 case SHADER_OPCODE_TXS
:
344 case SHADER_OPCODE_TXS_LOGICAL
:
345 return "txs_logical";
348 case FS_OPCODE_TXB_LOGICAL
:
349 return "txb_logical";
350 case SHADER_OPCODE_TXF_CMS
:
352 case SHADER_OPCODE_TXF_CMS_LOGICAL
:
353 return "txf_cms_logical";
354 case SHADER_OPCODE_TXF_UMS
:
356 case SHADER_OPCODE_TXF_UMS_LOGICAL
:
357 return "txf_ums_logical";
358 case SHADER_OPCODE_TXF_MCS
:
360 case SHADER_OPCODE_TXF_MCS_LOGICAL
:
361 return "txf_mcs_logical";
362 case SHADER_OPCODE_LOD
:
364 case SHADER_OPCODE_LOD_LOGICAL
:
365 return "lod_logical";
366 case SHADER_OPCODE_TG4
:
368 case SHADER_OPCODE_TG4_LOGICAL
:
369 return "tg4_logical";
370 case SHADER_OPCODE_TG4_OFFSET
:
372 case SHADER_OPCODE_TG4_OFFSET_LOGICAL
:
373 return "tg4_offset_logical";
374 case SHADER_OPCODE_SAMPLEINFO
:
377 case SHADER_OPCODE_SHADER_TIME_ADD
:
378 return "shader_time_add";
380 case SHADER_OPCODE_UNTYPED_ATOMIC
:
381 return "untyped_atomic";
382 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
383 return "untyped_atomic_logical";
384 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
385 return "untyped_surface_read";
386 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL
:
387 return "untyped_surface_read_logical";
388 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
389 return "untyped_surface_write";
390 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
391 return "untyped_surface_write_logical";
392 case SHADER_OPCODE_TYPED_ATOMIC
:
393 return "typed_atomic";
394 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
395 return "typed_atomic_logical";
396 case SHADER_OPCODE_TYPED_SURFACE_READ
:
397 return "typed_surface_read";
398 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL
:
399 return "typed_surface_read_logical";
400 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
401 return "typed_surface_write";
402 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
403 return "typed_surface_write_logical";
404 case SHADER_OPCODE_MEMORY_FENCE
:
405 return "memory_fence";
407 case SHADER_OPCODE_LOAD_PAYLOAD
:
408 return "load_payload";
410 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
411 return "gen4_scratch_read";
412 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
413 return "gen4_scratch_write";
414 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
415 return "gen7_scratch_read";
416 case SHADER_OPCODE_URB_WRITE_SIMD8
:
417 return "gen8_urb_write_simd8";
418 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
:
419 return "gen8_urb_write_simd8_per_slot";
420 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
:
421 return "gen8_urb_write_simd8_masked";
422 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
:
423 return "gen8_urb_write_simd8_masked_per_slot";
424 case SHADER_OPCODE_URB_READ_SIMD8
:
425 return "urb_read_simd8";
427 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
428 return "find_live_channel";
429 case SHADER_OPCODE_BROADCAST
:
432 case VEC4_OPCODE_MOV_BYTES
:
434 case VEC4_OPCODE_PACK_BYTES
:
436 case VEC4_OPCODE_UNPACK_UNIFORM
:
437 return "unpack_uniform";
439 case FS_OPCODE_DDX_COARSE
:
441 case FS_OPCODE_DDX_FINE
:
443 case FS_OPCODE_DDY_COARSE
:
445 case FS_OPCODE_DDY_FINE
:
448 case FS_OPCODE_CINTERP
:
450 case FS_OPCODE_LINTERP
:
453 case FS_OPCODE_PIXEL_X
:
455 case FS_OPCODE_PIXEL_Y
:
458 case FS_OPCODE_GET_BUFFER_SIZE
:
459 return "fs_get_buffer_size";
461 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
462 return "uniform_pull_const";
463 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
464 return "uniform_pull_const_gen7";
465 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
466 return "varying_pull_const";
467 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
468 return "varying_pull_const_gen7";
470 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS
:
471 return "mov_dispatch_to_flags";
472 case FS_OPCODE_DISCARD_JUMP
:
473 return "discard_jump";
475 case FS_OPCODE_SET_SAMPLE_ID
:
476 return "set_sample_id";
477 case FS_OPCODE_SET_SIMD4X2_OFFSET
:
478 return "set_simd4x2_offset";
480 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
481 return "pack_half_2x16_split";
482 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
483 return "unpack_half_2x16_split_x";
484 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
485 return "unpack_half_2x16_split_y";
487 case FS_OPCODE_PLACEHOLDER_HALT
:
488 return "placeholder_halt";
490 case FS_OPCODE_INTERPOLATE_AT_CENTROID
:
491 return "interp_centroid";
492 case FS_OPCODE_INTERPOLATE_AT_SAMPLE
:
493 return "interp_sample";
494 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
:
495 return "interp_shared_offset";
496 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
:
497 return "interp_per_slot_offset";
499 case VS_OPCODE_URB_WRITE
:
500 return "vs_urb_write";
501 case VS_OPCODE_PULL_CONSTANT_LOAD
:
502 return "pull_constant_load";
503 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
504 return "pull_constant_load_gen7";
506 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
507 return "set_simd4x2_header_gen9";
509 case VS_OPCODE_GET_BUFFER_SIZE
:
510 return "vs_get_buffer_size";
512 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
513 return "unpack_flags_simd4x2";
515 case GS_OPCODE_URB_WRITE
:
516 return "gs_urb_write";
517 case GS_OPCODE_URB_WRITE_ALLOCATE
:
518 return "gs_urb_write_allocate";
519 case GS_OPCODE_THREAD_END
:
520 return "gs_thread_end";
521 case GS_OPCODE_SET_WRITE_OFFSET
:
522 return "set_write_offset";
523 case GS_OPCODE_SET_VERTEX_COUNT
:
524 return "set_vertex_count";
525 case GS_OPCODE_SET_DWORD_2
:
526 return "set_dword_2";
527 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
528 return "prepare_channel_masks";
529 case GS_OPCODE_SET_CHANNEL_MASKS
:
530 return "set_channel_masks";
531 case GS_OPCODE_GET_INSTANCE_ID
:
532 return "get_instance_id";
533 case GS_OPCODE_FF_SYNC
:
535 case GS_OPCODE_SET_PRIMITIVE_ID
:
536 return "set_primitive_id";
537 case GS_OPCODE_SVB_WRITE
:
538 return "gs_svb_write";
539 case GS_OPCODE_SVB_SET_DST_INDEX
:
540 return "gs_svb_set_dst_index";
541 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
542 return "gs_ff_sync_set_primitives";
543 case CS_OPCODE_CS_TERMINATE
:
544 return "cs_terminate";
545 case SHADER_OPCODE_BARRIER
:
547 case SHADER_OPCODE_MULH
:
551 unreachable("not reached");
555 brw_saturate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
561 } imm
= { reg
->dw1
.ud
}, sat_imm
= { 0 };
564 case BRW_REGISTER_TYPE_UD
:
565 case BRW_REGISTER_TYPE_D
:
566 case BRW_REGISTER_TYPE_UQ
:
567 case BRW_REGISTER_TYPE_Q
:
570 case BRW_REGISTER_TYPE_UW
:
571 sat_imm
.ud
= CLAMP(imm
.ud
, 0, USHRT_MAX
);
573 case BRW_REGISTER_TYPE_W
:
574 sat_imm
.d
= CLAMP(imm
.d
, SHRT_MIN
, SHRT_MAX
);
576 case BRW_REGISTER_TYPE_F
:
577 sat_imm
.f
= CLAMP(imm
.f
, 0.0f
, 1.0f
);
579 case BRW_REGISTER_TYPE_UB
:
580 case BRW_REGISTER_TYPE_B
:
581 unreachable("no UB/B immediates");
582 case BRW_REGISTER_TYPE_V
:
583 case BRW_REGISTER_TYPE_UV
:
584 case BRW_REGISTER_TYPE_VF
:
585 unreachable("unimplemented: saturate vector immediate");
586 case BRW_REGISTER_TYPE_DF
:
587 case BRW_REGISTER_TYPE_HF
:
588 unreachable("unimplemented: saturate DF/HF immediate");
591 if (imm
.ud
!= sat_imm
.ud
) {
592 reg
->dw1
.ud
= sat_imm
.ud
;
599 brw_negate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
602 case BRW_REGISTER_TYPE_D
:
603 case BRW_REGISTER_TYPE_UD
:
604 reg
->dw1
.d
= -reg
->dw1
.d
;
606 case BRW_REGISTER_TYPE_W
:
607 case BRW_REGISTER_TYPE_UW
:
608 reg
->dw1
.d
= -(int16_t)reg
->dw1
.ud
;
610 case BRW_REGISTER_TYPE_F
:
611 reg
->dw1
.f
= -reg
->dw1
.f
;
613 case BRW_REGISTER_TYPE_VF
:
614 reg
->dw1
.ud
^= 0x80808080;
616 case BRW_REGISTER_TYPE_UB
:
617 case BRW_REGISTER_TYPE_B
:
618 unreachable("no UB/B immediates");
619 case BRW_REGISTER_TYPE_UV
:
620 case BRW_REGISTER_TYPE_V
:
621 assert(!"unimplemented: negate UV/V immediate");
622 case BRW_REGISTER_TYPE_UQ
:
623 case BRW_REGISTER_TYPE_Q
:
624 assert(!"unimplemented: negate UQ/Q immediate");
625 case BRW_REGISTER_TYPE_DF
:
626 case BRW_REGISTER_TYPE_HF
:
627 assert(!"unimplemented: negate DF/HF immediate");
634 brw_abs_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
637 case BRW_REGISTER_TYPE_D
:
638 reg
->dw1
.d
= abs(reg
->dw1
.d
);
640 case BRW_REGISTER_TYPE_W
:
641 reg
->dw1
.d
= abs((int16_t)reg
->dw1
.ud
);
643 case BRW_REGISTER_TYPE_F
:
644 reg
->dw1
.f
= fabsf(reg
->dw1
.f
);
646 case BRW_REGISTER_TYPE_VF
:
647 reg
->dw1
.ud
&= ~0x80808080;
649 case BRW_REGISTER_TYPE_UB
:
650 case BRW_REGISTER_TYPE_B
:
651 unreachable("no UB/B immediates");
652 case BRW_REGISTER_TYPE_UQ
:
653 case BRW_REGISTER_TYPE_UD
:
654 case BRW_REGISTER_TYPE_UW
:
655 case BRW_REGISTER_TYPE_UV
:
656 /* Presumably the absolute value modifier on an unsigned source is a
657 * nop, but it would be nice to confirm.
659 assert(!"unimplemented: abs unsigned immediate");
660 case BRW_REGISTER_TYPE_V
:
661 assert(!"unimplemented: abs V immediate");
662 case BRW_REGISTER_TYPE_Q
:
663 assert(!"unimplemented: abs Q immediate");
664 case BRW_REGISTER_TYPE_DF
:
665 case BRW_REGISTER_TYPE_HF
:
666 assert(!"unimplemented: abs DF/HF immediate");
672 backend_shader::backend_shader(const struct brw_compiler
*compiler
,
675 const nir_shader
*shader
,
676 struct brw_stage_prog_data
*stage_prog_data
)
677 : compiler(compiler
),
679 devinfo(compiler
->devinfo
),
681 stage_prog_data(stage_prog_data
),
686 debug_enabled
= INTEL_DEBUG
& intel_debug_flag_for_shader_stage(stage
);
687 stage_name
= _mesa_shader_stage_to_string(stage
);
688 stage_abbrev
= _mesa_shader_stage_to_abbrev(stage
);
692 backend_reg::is_zero() const
697 return fixed_hw_reg
.dw1
.d
== 0;
701 backend_reg::is_one() const
706 return type
== BRW_REGISTER_TYPE_F
707 ? fixed_hw_reg
.dw1
.f
== 1.0
708 : fixed_hw_reg
.dw1
.d
== 1;
712 backend_reg::is_negative_one() const
718 case BRW_REGISTER_TYPE_F
:
719 return fixed_hw_reg
.dw1
.f
== -1.0;
720 case BRW_REGISTER_TYPE_D
:
721 return fixed_hw_reg
.dw1
.d
== -1;
728 backend_reg::is_null() const
730 return file
== HW_REG
&&
731 fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
&&
732 fixed_hw_reg
.nr
== BRW_ARF_NULL
;
737 backend_reg::is_accumulator() const
739 return file
== HW_REG
&&
740 fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
&&
741 fixed_hw_reg
.nr
== BRW_ARF_ACCUMULATOR
;
745 backend_reg::in_range(const backend_reg
&r
, unsigned n
) const
747 return (file
== r
.file
&&
749 reg_offset
>= r
.reg_offset
&&
750 reg_offset
< r
.reg_offset
+ n
);
754 backend_instruction::is_commutative() const
762 case SHADER_OPCODE_MULH
:
765 /* MIN and MAX are commutative. */
766 if (conditional_mod
== BRW_CONDITIONAL_GE
||
767 conditional_mod
== BRW_CONDITIONAL_L
) {
777 backend_instruction::is_3src() const
779 return opcode
< ARRAY_SIZE(opcode_descs
) && opcode_descs
[opcode
].nsrc
== 3;
783 backend_instruction::is_tex() const
785 return (opcode
== SHADER_OPCODE_TEX
||
786 opcode
== FS_OPCODE_TXB
||
787 opcode
== SHADER_OPCODE_TXD
||
788 opcode
== SHADER_OPCODE_TXF
||
789 opcode
== SHADER_OPCODE_TXF_CMS
||
790 opcode
== SHADER_OPCODE_TXF_UMS
||
791 opcode
== SHADER_OPCODE_TXF_MCS
||
792 opcode
== SHADER_OPCODE_TXL
||
793 opcode
== SHADER_OPCODE_TXS
||
794 opcode
== SHADER_OPCODE_LOD
||
795 opcode
== SHADER_OPCODE_TG4
||
796 opcode
== SHADER_OPCODE_TG4_OFFSET
);
800 backend_instruction::is_math() const
802 return (opcode
== SHADER_OPCODE_RCP
||
803 opcode
== SHADER_OPCODE_RSQ
||
804 opcode
== SHADER_OPCODE_SQRT
||
805 opcode
== SHADER_OPCODE_EXP2
||
806 opcode
== SHADER_OPCODE_LOG2
||
807 opcode
== SHADER_OPCODE_SIN
||
808 opcode
== SHADER_OPCODE_COS
||
809 opcode
== SHADER_OPCODE_INT_QUOTIENT
||
810 opcode
== SHADER_OPCODE_INT_REMAINDER
||
811 opcode
== SHADER_OPCODE_POW
);
815 backend_instruction::is_control_flow() const
819 case BRW_OPCODE_WHILE
:
821 case BRW_OPCODE_ELSE
:
822 case BRW_OPCODE_ENDIF
:
823 case BRW_OPCODE_BREAK
:
824 case BRW_OPCODE_CONTINUE
:
832 backend_instruction::can_do_source_mods() const
835 case BRW_OPCODE_ADDC
:
837 case BRW_OPCODE_BFI1
:
838 case BRW_OPCODE_BFI2
:
839 case BRW_OPCODE_BFREV
:
840 case BRW_OPCODE_CBIT
:
843 case BRW_OPCODE_SUBB
:
851 backend_instruction::can_do_saturate() const
861 case BRW_OPCODE_F16TO32
:
862 case BRW_OPCODE_F32TO16
:
863 case BRW_OPCODE_LINE
:
867 case BRW_OPCODE_MATH
:
870 case SHADER_OPCODE_MULH
:
872 case BRW_OPCODE_RNDD
:
873 case BRW_OPCODE_RNDE
:
874 case BRW_OPCODE_RNDU
:
875 case BRW_OPCODE_RNDZ
:
879 case FS_OPCODE_LINTERP
:
880 case SHADER_OPCODE_COS
:
881 case SHADER_OPCODE_EXP2
:
882 case SHADER_OPCODE_LOG2
:
883 case SHADER_OPCODE_POW
:
884 case SHADER_OPCODE_RCP
:
885 case SHADER_OPCODE_RSQ
:
886 case SHADER_OPCODE_SIN
:
887 case SHADER_OPCODE_SQRT
:
895 backend_instruction::can_do_cmod() const
899 case BRW_OPCODE_ADDC
:
904 case BRW_OPCODE_CMPN
:
909 case BRW_OPCODE_F16TO32
:
910 case BRW_OPCODE_F32TO16
:
912 case BRW_OPCODE_LINE
:
916 case BRW_OPCODE_MACH
:
923 case BRW_OPCODE_RNDD
:
924 case BRW_OPCODE_RNDE
:
925 case BRW_OPCODE_RNDU
:
926 case BRW_OPCODE_RNDZ
:
927 case BRW_OPCODE_SAD2
:
928 case BRW_OPCODE_SADA2
:
931 case BRW_OPCODE_SUBB
:
933 case FS_OPCODE_CINTERP
:
934 case FS_OPCODE_LINTERP
:
942 backend_instruction::reads_accumulator_implicitly() const
946 case BRW_OPCODE_MACH
:
947 case BRW_OPCODE_SADA2
:
955 backend_instruction::writes_accumulator_implicitly(const struct brw_device_info
*devinfo
) const
957 return writes_accumulator
||
959 ((opcode
>= BRW_OPCODE_ADD
&& opcode
< BRW_OPCODE_NOP
) ||
960 (opcode
>= FS_OPCODE_DDX_COARSE
&& opcode
<= FS_OPCODE_LINTERP
&&
961 opcode
!= FS_OPCODE_CINTERP
)));
965 backend_instruction::has_side_effects() const
968 case SHADER_OPCODE_UNTYPED_ATOMIC
:
969 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
970 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
971 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
972 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
973 case SHADER_OPCODE_TYPED_ATOMIC
:
974 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
975 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
976 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
977 case SHADER_OPCODE_MEMORY_FENCE
:
978 case SHADER_OPCODE_URB_WRITE_SIMD8
:
979 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
:
980 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
:
981 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
:
982 case FS_OPCODE_FB_WRITE
:
983 case SHADER_OPCODE_BARRIER
:
991 backend_instruction::is_volatile() const
994 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
995 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL
:
996 case SHADER_OPCODE_TYPED_SURFACE_READ
:
997 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL
:
1006 inst_is_in_block(const bblock_t
*block
, const backend_instruction
*inst
)
1009 foreach_inst_in_block (backend_instruction
, i
, block
) {
1019 adjust_later_block_ips(bblock_t
*start_block
, int ip_adjustment
)
1021 for (bblock_t
*block_iter
= start_block
->next();
1022 !block_iter
->link
.is_tail_sentinel();
1023 block_iter
= block_iter
->next()) {
1024 block_iter
->start_ip
+= ip_adjustment
;
1025 block_iter
->end_ip
+= ip_adjustment
;
1030 backend_instruction::insert_after(bblock_t
*block
, backend_instruction
*inst
)
1032 if (!this->is_head_sentinel())
1033 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1037 adjust_later_block_ips(block
, 1);
1039 exec_node::insert_after(inst
);
1043 backend_instruction::insert_before(bblock_t
*block
, backend_instruction
*inst
)
1045 if (!this->is_tail_sentinel())
1046 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1050 adjust_later_block_ips(block
, 1);
1052 exec_node::insert_before(inst
);
1056 backend_instruction::insert_before(bblock_t
*block
, exec_list
*list
)
1058 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1060 unsigned num_inst
= list
->length();
1062 block
->end_ip
+= num_inst
;
1064 adjust_later_block_ips(block
, num_inst
);
1066 exec_node::insert_before(list
);
1070 backend_instruction::remove(bblock_t
*block
)
1072 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1074 adjust_later_block_ips(block
, -1);
1076 if (block
->start_ip
== block
->end_ip
) {
1077 block
->cfg
->remove_block(block
);
1082 exec_node::remove();
1086 backend_shader::dump_instructions()
1088 dump_instructions(NULL
);
1092 backend_shader::dump_instructions(const char *name
)
1094 FILE *file
= stderr
;
1095 if (name
&& geteuid() != 0) {
1096 file
= fopen(name
, "w");
1103 foreach_block_and_inst(block
, backend_instruction
, inst
, cfg
) {
1104 if (!unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
))
1105 fprintf(file
, "%4d: ", ip
++);
1106 dump_instruction(inst
, file
);
1110 foreach_in_list(backend_instruction
, inst
, &instructions
) {
1111 if (!unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
))
1112 fprintf(file
, "%4d: ", ip
++);
1113 dump_instruction(inst
, file
);
1117 if (file
!= stderr
) {
1123 backend_shader::calculate_cfg()
1127 cfg
= new(mem_ctx
) cfg_t(&this->instructions
);
1131 backend_shader::invalidate_cfg()
1133 ralloc_free(this->cfg
);
1138 * Sets up the starting offsets for the groups of binding table entries
1139 * commong to all pipeline stages.
1141 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
1142 * unused but also make sure that addition of small offsets to them will
1143 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
1146 brw_assign_common_binding_table_offsets(gl_shader_stage stage
,
1147 const struct brw_device_info
*devinfo
,
1148 const struct gl_shader_program
*shader_prog
,
1149 const struct gl_program
*prog
,
1150 struct brw_stage_prog_data
*stage_prog_data
,
1151 uint32_t next_binding_table_offset
)
1153 const struct gl_shader
*shader
= NULL
;
1154 int num_textures
= _mesa_fls(prog
->SamplersUsed
);
1157 shader
= shader_prog
->_LinkedShaders
[stage
];
1159 stage_prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
1160 next_binding_table_offset
+= num_textures
;
1163 assert(shader
->NumUniformBlocks
<= BRW_MAX_UBO
);
1164 stage_prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
1165 next_binding_table_offset
+= shader
->NumUniformBlocks
;
1167 assert(shader
->NumShaderStorageBlocks
<= BRW_MAX_SSBO
);
1168 stage_prog_data
->binding_table
.ssbo_start
= next_binding_table_offset
;
1169 next_binding_table_offset
+= shader
->NumShaderStorageBlocks
;
1171 stage_prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
1172 stage_prog_data
->binding_table
.ssbo_start
= 0xd0d0d0d0;
1175 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1176 stage_prog_data
->binding_table
.shader_time_start
= next_binding_table_offset
;
1177 next_binding_table_offset
++;
1179 stage_prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
1182 if (prog
->UsesGather
) {
1183 if (devinfo
->gen
>= 8) {
1184 stage_prog_data
->binding_table
.gather_texture_start
=
1185 stage_prog_data
->binding_table
.texture_start
;
1187 stage_prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
1188 next_binding_table_offset
+= num_textures
;
1191 stage_prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
1194 if (shader
&& shader
->NumAtomicBuffers
) {
1195 stage_prog_data
->binding_table
.abo_start
= next_binding_table_offset
;
1196 next_binding_table_offset
+= shader
->NumAtomicBuffers
;
1198 stage_prog_data
->binding_table
.abo_start
= 0xd0d0d0d0;
1201 if (shader
&& shader
->NumImages
) {
1202 stage_prog_data
->binding_table
.image_start
= next_binding_table_offset
;
1203 next_binding_table_offset
+= shader
->NumImages
;
1205 stage_prog_data
->binding_table
.image_start
= 0xd0d0d0d0;
1208 /* This may or may not be used depending on how the compile goes. */
1209 stage_prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
1210 next_binding_table_offset
++;
1212 assert(next_binding_table_offset
<= BRW_MAX_SURFACES
);
1214 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
1218 setup_vec4_uniform_value(const gl_constant_value
**params
,
1219 const gl_constant_value
*values
,
1222 static const gl_constant_value zero
= { 0 };
1224 for (unsigned i
= 0; i
< n
; ++i
)
1225 params
[i
] = &values
[i
];
1227 for (unsigned i
= n
; i
< 4; ++i
)
1232 brw_setup_image_uniform_values(gl_shader_stage stage
,
1233 struct brw_stage_prog_data
*stage_prog_data
,
1234 unsigned param_start_index
,
1235 const gl_uniform_storage
*storage
)
1237 const gl_constant_value
**param
=
1238 &stage_prog_data
->param
[param_start_index
];
1240 for (unsigned i
= 0; i
< MAX2(storage
->array_elements
, 1); i
++) {
1241 const unsigned image_idx
= storage
->opaque
[stage
].index
+ i
;
1242 const brw_image_param
*image_param
=
1243 &stage_prog_data
->image_param
[image_idx
];
1245 /* Upload the brw_image_param structure. The order is expected to match
1246 * the BRW_IMAGE_PARAM_*_OFFSET defines.
1248 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET
,
1249 (const gl_constant_value
*)&image_param
->surface_idx
, 1);
1250 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
,
1251 (const gl_constant_value
*)image_param
->offset
, 2);
1252 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SIZE_OFFSET
,
1253 (const gl_constant_value
*)image_param
->size
, 3);
1254 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
,
1255 (const gl_constant_value
*)image_param
->stride
, 4);
1256 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_TILING_OFFSET
,
1257 (const gl_constant_value
*)image_param
->tiling
, 3);
1258 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
,
1259 (const gl_constant_value
*)image_param
->swizzling
, 2);
1260 param
+= BRW_IMAGE_PARAM_SIZE
;
1262 brw_mark_surface_used(
1264 stage_prog_data
->binding_table
.image_start
+ image_idx
);
1269 * Decide which set of clip planes should be used when clipping via
1270 * gl_Position or gl_ClipVertex.
1272 gl_clip_plane
*brw_select_clip_planes(struct gl_context
*ctx
)
1274 if (ctx
->_Shader
->CurrentProgram
[MESA_SHADER_VERTEX
]) {
1275 /* There is currently a GLSL vertex shader, so clip according to GLSL
1276 * rules, which means compare gl_ClipVertex (or gl_Position, if
1277 * gl_ClipVertex wasn't assigned) against the eye-coordinate clip planes
1278 * that were stored in EyeUserPlane at the time the clip planes were
1281 return ctx
->Transform
.EyeUserPlane
;
1283 /* Either we are using fixed function or an ARB vertex program. In
1284 * either case the clip planes are going to be compared against
1285 * gl_Position (which is in clip coordinates) so we have to clip using
1286 * _ClipUserPlane, which was transformed into clip coordinates by Mesa
1289 return ctx
->Transform
._ClipUserPlane
;