2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "brw_context.h"
31 #include "glsl/ir_optimization.h"
32 #include "glsl/glsl_parser_extras.h"
33 #include "main/shaderapi.h"
36 shader_debug_log_mesa(void *data
, const char *fmt
, ...)
38 struct brw_context
*brw
= (struct brw_context
*)data
;
43 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
44 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
45 MESA_DEBUG_TYPE_OTHER
,
46 MESA_DEBUG_SEVERITY_NOTIFICATION
, fmt
, args
);
51 shader_perf_log_mesa(void *data
, const char *fmt
, ...)
53 struct brw_context
*brw
= (struct brw_context
*)data
;
58 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
60 va_copy(args_copy
, args
);
61 vfprintf(stderr
, fmt
, args_copy
);
65 if (brw
->perf_debug
) {
67 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
68 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
69 MESA_DEBUG_TYPE_PERFORMANCE
,
70 MESA_DEBUG_SEVERITY_MEDIUM
, fmt
, args
);
76 brw_compiler_create(void *mem_ctx
, const struct brw_device_info
*devinfo
)
78 struct brw_compiler
*compiler
= rzalloc(mem_ctx
, struct brw_compiler
);
80 compiler
->devinfo
= devinfo
;
81 compiler
->shader_debug_log
= shader_debug_log_mesa
;
82 compiler
->shader_perf_log
= shader_perf_log_mesa
;
84 brw_fs_alloc_reg_sets(compiler
);
85 brw_vec4_alloc_reg_set(compiler
);
87 compiler
->scalar_stage
[MESA_SHADER_VERTEX
] =
88 devinfo
->gen
>= 8 && !(INTEL_DEBUG
& DEBUG_VEC4VS
);
89 compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
] =
90 devinfo
->gen
>= 8 && brw_env_var_as_boolean("INTEL_SCALAR_GS", false);
91 compiler
->scalar_stage
[MESA_SHADER_FRAGMENT
] = true;
92 compiler
->scalar_stage
[MESA_SHADER_COMPUTE
] = true;
94 nir_shader_compiler_options
*nir_options
=
95 rzalloc(compiler
, nir_shader_compiler_options
);
96 nir_options
->native_integers
= true;
97 /* In order to help allow for better CSE at the NIR level we tell NIR
98 * to split all ffma instructions during opt_algebraic and we then
99 * re-combine them as a later step.
101 nir_options
->lower_ffma
= true;
102 nir_options
->lower_sub
= true;
103 /* In the vec4 backend, our dpN instruction replicates its result to all
104 * the components of a vec4. We would like NIR to give us replicated fdot
105 * instructions because it can optimize better for us.
107 * For the FS backend, it should be lowered away by the scalarizing pass so
108 * we should never see fdot anyway.
110 nir_options
->fdot_replicates
= true;
112 /* We want the GLSL compiler to emit code that uses condition codes */
113 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
114 compiler
->glsl_compiler_options
[i
].MaxUnrollIterations
= 32;
115 compiler
->glsl_compiler_options
[i
].MaxIfDepth
=
116 devinfo
->gen
< 6 ? 16 : UINT_MAX
;
118 compiler
->glsl_compiler_options
[i
].EmitCondCodes
= true;
119 compiler
->glsl_compiler_options
[i
].EmitNoNoise
= true;
120 compiler
->glsl_compiler_options
[i
].EmitNoMainReturn
= true;
121 compiler
->glsl_compiler_options
[i
].EmitNoIndirectInput
= true;
122 compiler
->glsl_compiler_options
[i
].EmitNoIndirectUniform
= false;
123 compiler
->glsl_compiler_options
[i
].LowerClipDistance
= true;
125 bool is_scalar
= compiler
->scalar_stage
[i
];
127 compiler
->glsl_compiler_options
[i
].EmitNoIndirectOutput
= is_scalar
;
128 compiler
->glsl_compiler_options
[i
].EmitNoIndirectTemp
= is_scalar
;
129 compiler
->glsl_compiler_options
[i
].OptimizeForAOS
= !is_scalar
;
131 /* !ARB_gpu_shader5 */
132 if (devinfo
->gen
< 7)
133 compiler
->glsl_compiler_options
[i
].EmitNoIndirectSampler
= true;
135 compiler
->glsl_compiler_options
[i
].NirOptions
= nir_options
;
137 compiler
->glsl_compiler_options
[i
].LowerBufferInterfaceBlocks
= true;
140 if (compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
])
141 compiler
->glsl_compiler_options
[MESA_SHADER_GEOMETRY
].EmitNoIndirectInput
= false;
147 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
149 struct brw_shader
*shader
;
151 shader
= rzalloc(NULL
, struct brw_shader
);
153 shader
->base
.Type
= type
;
154 shader
->base
.Stage
= _mesa_shader_enum_to_shader_stage(type
);
155 shader
->base
.Name
= name
;
156 _mesa_init_shader(ctx
, &shader
->base
);
159 return &shader
->base
;
163 brw_mark_surface_used(struct brw_stage_prog_data
*prog_data
,
166 assert(surf_index
< BRW_MAX_SURFACES
);
168 prog_data
->binding_table
.size_bytes
=
169 MAX2(prog_data
->binding_table
.size_bytes
, (surf_index
+ 1) * 4);
173 brw_type_for_base_type(const struct glsl_type
*type
)
175 switch (type
->base_type
) {
176 case GLSL_TYPE_FLOAT
:
177 return BRW_REGISTER_TYPE_F
;
180 case GLSL_TYPE_SUBROUTINE
:
181 return BRW_REGISTER_TYPE_D
;
183 return BRW_REGISTER_TYPE_UD
;
184 case GLSL_TYPE_ARRAY
:
185 return brw_type_for_base_type(type
->fields
.array
);
186 case GLSL_TYPE_STRUCT
:
187 case GLSL_TYPE_SAMPLER
:
188 case GLSL_TYPE_ATOMIC_UINT
:
189 /* These should be overridden with the type of the member when
190 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
191 * way to trip up if we don't.
193 return BRW_REGISTER_TYPE_UD
;
194 case GLSL_TYPE_IMAGE
:
195 return BRW_REGISTER_TYPE_UD
;
197 case GLSL_TYPE_ERROR
:
198 case GLSL_TYPE_INTERFACE
:
199 case GLSL_TYPE_DOUBLE
:
200 unreachable("not reached");
203 return BRW_REGISTER_TYPE_F
;
206 enum brw_conditional_mod
207 brw_conditional_for_comparison(unsigned int op
)
211 return BRW_CONDITIONAL_L
;
212 case ir_binop_greater
:
213 return BRW_CONDITIONAL_G
;
214 case ir_binop_lequal
:
215 return BRW_CONDITIONAL_LE
;
216 case ir_binop_gequal
:
217 return BRW_CONDITIONAL_GE
;
219 case ir_binop_all_equal
: /* same as equal for scalars */
220 return BRW_CONDITIONAL_Z
;
221 case ir_binop_nequal
:
222 case ir_binop_any_nequal
: /* same as nequal for scalars */
223 return BRW_CONDITIONAL_NZ
;
225 unreachable("not reached: bad operation for comparison");
230 brw_math_function(enum opcode op
)
233 case SHADER_OPCODE_RCP
:
234 return BRW_MATH_FUNCTION_INV
;
235 case SHADER_OPCODE_RSQ
:
236 return BRW_MATH_FUNCTION_RSQ
;
237 case SHADER_OPCODE_SQRT
:
238 return BRW_MATH_FUNCTION_SQRT
;
239 case SHADER_OPCODE_EXP2
:
240 return BRW_MATH_FUNCTION_EXP
;
241 case SHADER_OPCODE_LOG2
:
242 return BRW_MATH_FUNCTION_LOG
;
243 case SHADER_OPCODE_POW
:
244 return BRW_MATH_FUNCTION_POW
;
245 case SHADER_OPCODE_SIN
:
246 return BRW_MATH_FUNCTION_SIN
;
247 case SHADER_OPCODE_COS
:
248 return BRW_MATH_FUNCTION_COS
;
249 case SHADER_OPCODE_INT_QUOTIENT
:
250 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT
;
251 case SHADER_OPCODE_INT_REMAINDER
:
252 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER
;
254 unreachable("not reached: unknown math function");
259 brw_texture_offset(int *offsets
, unsigned num_components
)
261 if (!offsets
) return 0; /* nonconstant offset; caller will handle it. */
263 /* Combine all three offsets into a single unsigned dword:
265 * bits 11:8 - U Offset (X component)
266 * bits 7:4 - V Offset (Y component)
267 * bits 3:0 - R Offset (Z component)
269 unsigned offset_bits
= 0;
270 for (unsigned i
= 0; i
< num_components
; i
++) {
271 const unsigned shift
= 4 * (2 - i
);
272 offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
278 brw_instruction_name(enum opcode op
)
281 case BRW_OPCODE_ILLEGAL
... BRW_OPCODE_NOP
:
282 assert(opcode_descs
[op
].name
);
283 return opcode_descs
[op
].name
;
284 case FS_OPCODE_FB_WRITE
:
286 case FS_OPCODE_FB_WRITE_LOGICAL
:
287 return "fb_write_logical";
288 case FS_OPCODE_PACK_STENCIL_REF
:
289 return "pack_stencil_ref";
290 case FS_OPCODE_BLORP_FB_WRITE
:
291 return "blorp_fb_write";
292 case FS_OPCODE_REP_FB_WRITE
:
293 return "rep_fb_write";
295 case SHADER_OPCODE_RCP
:
297 case SHADER_OPCODE_RSQ
:
299 case SHADER_OPCODE_SQRT
:
301 case SHADER_OPCODE_EXP2
:
303 case SHADER_OPCODE_LOG2
:
305 case SHADER_OPCODE_POW
:
307 case SHADER_OPCODE_INT_QUOTIENT
:
309 case SHADER_OPCODE_INT_REMAINDER
:
311 case SHADER_OPCODE_SIN
:
313 case SHADER_OPCODE_COS
:
316 case SHADER_OPCODE_TEX
:
318 case SHADER_OPCODE_TEX_LOGICAL
:
319 return "tex_logical";
320 case SHADER_OPCODE_TXD
:
322 case SHADER_OPCODE_TXD_LOGICAL
:
323 return "txd_logical";
324 case SHADER_OPCODE_TXF
:
326 case SHADER_OPCODE_TXF_LOGICAL
:
327 return "txf_logical";
328 case SHADER_OPCODE_TXL
:
330 case SHADER_OPCODE_TXL_LOGICAL
:
331 return "txl_logical";
332 case SHADER_OPCODE_TXS
:
334 case SHADER_OPCODE_TXS_LOGICAL
:
335 return "txs_logical";
338 case FS_OPCODE_TXB_LOGICAL
:
339 return "txb_logical";
340 case SHADER_OPCODE_TXF_CMS
:
342 case SHADER_OPCODE_TXF_CMS_LOGICAL
:
343 return "txf_cms_logical";
344 case SHADER_OPCODE_TXF_CMS_W
:
346 case SHADER_OPCODE_TXF_CMS_W_LOGICAL
:
347 return "txf_cms_w_logical";
348 case SHADER_OPCODE_TXF_UMS
:
350 case SHADER_OPCODE_TXF_UMS_LOGICAL
:
351 return "txf_ums_logical";
352 case SHADER_OPCODE_TXF_MCS
:
354 case SHADER_OPCODE_TXF_MCS_LOGICAL
:
355 return "txf_mcs_logical";
356 case SHADER_OPCODE_LOD
:
358 case SHADER_OPCODE_LOD_LOGICAL
:
359 return "lod_logical";
360 case SHADER_OPCODE_TG4
:
362 case SHADER_OPCODE_TG4_LOGICAL
:
363 return "tg4_logical";
364 case SHADER_OPCODE_TG4_OFFSET
:
366 case SHADER_OPCODE_TG4_OFFSET_LOGICAL
:
367 return "tg4_offset_logical";
368 case SHADER_OPCODE_SAMPLEINFO
:
371 case SHADER_OPCODE_SHADER_TIME_ADD
:
372 return "shader_time_add";
374 case SHADER_OPCODE_UNTYPED_ATOMIC
:
375 return "untyped_atomic";
376 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
377 return "untyped_atomic_logical";
378 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
379 return "untyped_surface_read";
380 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL
:
381 return "untyped_surface_read_logical";
382 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
383 return "untyped_surface_write";
384 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
385 return "untyped_surface_write_logical";
386 case SHADER_OPCODE_TYPED_ATOMIC
:
387 return "typed_atomic";
388 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
389 return "typed_atomic_logical";
390 case SHADER_OPCODE_TYPED_SURFACE_READ
:
391 return "typed_surface_read";
392 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL
:
393 return "typed_surface_read_logical";
394 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
395 return "typed_surface_write";
396 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
397 return "typed_surface_write_logical";
398 case SHADER_OPCODE_MEMORY_FENCE
:
399 return "memory_fence";
401 case SHADER_OPCODE_LOAD_PAYLOAD
:
402 return "load_payload";
404 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
405 return "gen4_scratch_read";
406 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
407 return "gen4_scratch_write";
408 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
409 return "gen7_scratch_read";
410 case SHADER_OPCODE_URB_WRITE_SIMD8
:
411 return "gen8_urb_write_simd8";
412 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
:
413 return "gen8_urb_write_simd8_per_slot";
414 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
:
415 return "gen8_urb_write_simd8_masked";
416 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
:
417 return "gen8_urb_write_simd8_masked_per_slot";
418 case SHADER_OPCODE_URB_READ_SIMD8
:
419 return "urb_read_simd8";
420 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT
:
421 return "urb_read_simd8_per_slot";
423 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
424 return "find_live_channel";
425 case SHADER_OPCODE_BROADCAST
:
428 case VEC4_OPCODE_MOV_BYTES
:
430 case VEC4_OPCODE_PACK_BYTES
:
432 case VEC4_OPCODE_UNPACK_UNIFORM
:
433 return "unpack_uniform";
435 case FS_OPCODE_DDX_COARSE
:
437 case FS_OPCODE_DDX_FINE
:
439 case FS_OPCODE_DDY_COARSE
:
441 case FS_OPCODE_DDY_FINE
:
444 case FS_OPCODE_CINTERP
:
446 case FS_OPCODE_LINTERP
:
449 case FS_OPCODE_PIXEL_X
:
451 case FS_OPCODE_PIXEL_Y
:
454 case FS_OPCODE_GET_BUFFER_SIZE
:
455 return "fs_get_buffer_size";
457 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
458 return "uniform_pull_const";
459 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
460 return "uniform_pull_const_gen7";
461 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
462 return "varying_pull_const";
463 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
464 return "varying_pull_const_gen7";
466 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS
:
467 return "mov_dispatch_to_flags";
468 case FS_OPCODE_DISCARD_JUMP
:
469 return "discard_jump";
471 case FS_OPCODE_SET_SAMPLE_ID
:
472 return "set_sample_id";
473 case FS_OPCODE_SET_SIMD4X2_OFFSET
:
474 return "set_simd4x2_offset";
476 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
477 return "pack_half_2x16_split";
478 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
479 return "unpack_half_2x16_split_x";
480 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
481 return "unpack_half_2x16_split_y";
483 case FS_OPCODE_PLACEHOLDER_HALT
:
484 return "placeholder_halt";
486 case FS_OPCODE_INTERPOLATE_AT_CENTROID
:
487 return "interp_centroid";
488 case FS_OPCODE_INTERPOLATE_AT_SAMPLE
:
489 return "interp_sample";
490 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
:
491 return "interp_shared_offset";
492 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
:
493 return "interp_per_slot_offset";
495 case VS_OPCODE_URB_WRITE
:
496 return "vs_urb_write";
497 case VS_OPCODE_PULL_CONSTANT_LOAD
:
498 return "pull_constant_load";
499 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
500 return "pull_constant_load_gen7";
502 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
503 return "set_simd4x2_header_gen9";
505 case VS_OPCODE_GET_BUFFER_SIZE
:
506 return "vs_get_buffer_size";
508 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
509 return "unpack_flags_simd4x2";
511 case GS_OPCODE_URB_WRITE
:
512 return "gs_urb_write";
513 case GS_OPCODE_URB_WRITE_ALLOCATE
:
514 return "gs_urb_write_allocate";
515 case GS_OPCODE_THREAD_END
:
516 return "gs_thread_end";
517 case GS_OPCODE_SET_WRITE_OFFSET
:
518 return "set_write_offset";
519 case GS_OPCODE_SET_VERTEX_COUNT
:
520 return "set_vertex_count";
521 case GS_OPCODE_SET_DWORD_2
:
522 return "set_dword_2";
523 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
524 return "prepare_channel_masks";
525 case GS_OPCODE_SET_CHANNEL_MASKS
:
526 return "set_channel_masks";
527 case GS_OPCODE_GET_INSTANCE_ID
:
528 return "get_instance_id";
529 case GS_OPCODE_FF_SYNC
:
531 case GS_OPCODE_SET_PRIMITIVE_ID
:
532 return "set_primitive_id";
533 case GS_OPCODE_SVB_WRITE
:
534 return "gs_svb_write";
535 case GS_OPCODE_SVB_SET_DST_INDEX
:
536 return "gs_svb_set_dst_index";
537 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
538 return "gs_ff_sync_set_primitives";
539 case CS_OPCODE_CS_TERMINATE
:
540 return "cs_terminate";
541 case SHADER_OPCODE_BARRIER
:
543 case SHADER_OPCODE_MULH
:
545 case SHADER_OPCODE_MOV_INDIRECT
:
546 return "mov_indirect";
549 unreachable("not reached");
553 brw_saturate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
559 } imm
= { reg
->ud
}, sat_imm
= { 0 };
562 case BRW_REGISTER_TYPE_UD
:
563 case BRW_REGISTER_TYPE_D
:
564 case BRW_REGISTER_TYPE_UW
:
565 case BRW_REGISTER_TYPE_W
:
566 case BRW_REGISTER_TYPE_UQ
:
567 case BRW_REGISTER_TYPE_Q
:
570 case BRW_REGISTER_TYPE_F
:
571 sat_imm
.f
= CLAMP(imm
.f
, 0.0f
, 1.0f
);
573 case BRW_REGISTER_TYPE_UB
:
574 case BRW_REGISTER_TYPE_B
:
575 unreachable("no UB/B immediates");
576 case BRW_REGISTER_TYPE_V
:
577 case BRW_REGISTER_TYPE_UV
:
578 case BRW_REGISTER_TYPE_VF
:
579 unreachable("unimplemented: saturate vector immediate");
580 case BRW_REGISTER_TYPE_DF
:
581 case BRW_REGISTER_TYPE_HF
:
582 unreachable("unimplemented: saturate DF/HF immediate");
585 if (imm
.ud
!= sat_imm
.ud
) {
586 reg
->ud
= sat_imm
.ud
;
593 brw_negate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
596 case BRW_REGISTER_TYPE_D
:
597 case BRW_REGISTER_TYPE_UD
:
600 case BRW_REGISTER_TYPE_W
:
601 case BRW_REGISTER_TYPE_UW
:
602 reg
->d
= -(int16_t)reg
->ud
;
604 case BRW_REGISTER_TYPE_F
:
607 case BRW_REGISTER_TYPE_VF
:
608 reg
->ud
^= 0x80808080;
610 case BRW_REGISTER_TYPE_UB
:
611 case BRW_REGISTER_TYPE_B
:
612 unreachable("no UB/B immediates");
613 case BRW_REGISTER_TYPE_UV
:
614 case BRW_REGISTER_TYPE_V
:
615 assert(!"unimplemented: negate UV/V immediate");
616 case BRW_REGISTER_TYPE_UQ
:
617 case BRW_REGISTER_TYPE_Q
:
618 assert(!"unimplemented: negate UQ/Q immediate");
619 case BRW_REGISTER_TYPE_DF
:
620 case BRW_REGISTER_TYPE_HF
:
621 assert(!"unimplemented: negate DF/HF immediate");
628 brw_abs_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
631 case BRW_REGISTER_TYPE_D
:
632 reg
->d
= abs(reg
->d
);
634 case BRW_REGISTER_TYPE_W
:
635 reg
->d
= abs((int16_t)reg
->ud
);
637 case BRW_REGISTER_TYPE_F
:
638 reg
->f
= fabsf(reg
->f
);
640 case BRW_REGISTER_TYPE_VF
:
641 reg
->ud
&= ~0x80808080;
643 case BRW_REGISTER_TYPE_UB
:
644 case BRW_REGISTER_TYPE_B
:
645 unreachable("no UB/B immediates");
646 case BRW_REGISTER_TYPE_UQ
:
647 case BRW_REGISTER_TYPE_UD
:
648 case BRW_REGISTER_TYPE_UW
:
649 case BRW_REGISTER_TYPE_UV
:
650 /* Presumably the absolute value modifier on an unsigned source is a
651 * nop, but it would be nice to confirm.
653 assert(!"unimplemented: abs unsigned immediate");
654 case BRW_REGISTER_TYPE_V
:
655 assert(!"unimplemented: abs V immediate");
656 case BRW_REGISTER_TYPE_Q
:
657 assert(!"unimplemented: abs Q immediate");
658 case BRW_REGISTER_TYPE_DF
:
659 case BRW_REGISTER_TYPE_HF
:
660 assert(!"unimplemented: abs DF/HF immediate");
666 backend_shader::backend_shader(const struct brw_compiler
*compiler
,
669 const nir_shader
*shader
,
670 struct brw_stage_prog_data
*stage_prog_data
)
671 : compiler(compiler
),
673 devinfo(compiler
->devinfo
),
675 stage_prog_data(stage_prog_data
),
680 debug_enabled
= INTEL_DEBUG
& intel_debug_flag_for_shader_stage(stage
);
681 stage_name
= _mesa_shader_stage_to_string(stage
);
682 stage_abbrev
= _mesa_shader_stage_to_abbrev(stage
);
686 backend_reg::is_zero() const
695 backend_reg::is_one() const
700 return type
== BRW_REGISTER_TYPE_F
706 backend_reg::is_negative_one() const
712 case BRW_REGISTER_TYPE_F
:
714 case BRW_REGISTER_TYPE_D
:
722 backend_reg::is_null() const
724 return file
== ARF
&& nr
== BRW_ARF_NULL
;
729 backend_reg::is_accumulator() const
731 return file
== ARF
&& nr
== BRW_ARF_ACCUMULATOR
;
735 backend_reg::in_range(const backend_reg
&r
, unsigned n
) const
737 return (file
== r
.file
&&
739 reg_offset
>= r
.reg_offset
&&
740 reg_offset
< r
.reg_offset
+ n
);
744 backend_instruction::is_commutative() const
752 case SHADER_OPCODE_MULH
:
755 /* MIN and MAX are commutative. */
756 if (conditional_mod
== BRW_CONDITIONAL_GE
||
757 conditional_mod
== BRW_CONDITIONAL_L
) {
767 backend_instruction::is_3src() const
769 return ::is_3src(opcode
);
773 backend_instruction::is_tex() const
775 return (opcode
== SHADER_OPCODE_TEX
||
776 opcode
== FS_OPCODE_TXB
||
777 opcode
== SHADER_OPCODE_TXD
||
778 opcode
== SHADER_OPCODE_TXF
||
779 opcode
== SHADER_OPCODE_TXF_CMS
||
780 opcode
== SHADER_OPCODE_TXF_CMS_W
||
781 opcode
== SHADER_OPCODE_TXF_UMS
||
782 opcode
== SHADER_OPCODE_TXF_MCS
||
783 opcode
== SHADER_OPCODE_TXL
||
784 opcode
== SHADER_OPCODE_TXS
||
785 opcode
== SHADER_OPCODE_LOD
||
786 opcode
== SHADER_OPCODE_TG4
||
787 opcode
== SHADER_OPCODE_TG4_OFFSET
);
791 backend_instruction::is_math() const
793 return (opcode
== SHADER_OPCODE_RCP
||
794 opcode
== SHADER_OPCODE_RSQ
||
795 opcode
== SHADER_OPCODE_SQRT
||
796 opcode
== SHADER_OPCODE_EXP2
||
797 opcode
== SHADER_OPCODE_LOG2
||
798 opcode
== SHADER_OPCODE_SIN
||
799 opcode
== SHADER_OPCODE_COS
||
800 opcode
== SHADER_OPCODE_INT_QUOTIENT
||
801 opcode
== SHADER_OPCODE_INT_REMAINDER
||
802 opcode
== SHADER_OPCODE_POW
);
806 backend_instruction::is_control_flow() const
810 case BRW_OPCODE_WHILE
:
812 case BRW_OPCODE_ELSE
:
813 case BRW_OPCODE_ENDIF
:
814 case BRW_OPCODE_BREAK
:
815 case BRW_OPCODE_CONTINUE
:
823 backend_instruction::can_do_source_mods() const
826 case BRW_OPCODE_ADDC
:
828 case BRW_OPCODE_BFI1
:
829 case BRW_OPCODE_BFI2
:
830 case BRW_OPCODE_BFREV
:
831 case BRW_OPCODE_CBIT
:
834 case BRW_OPCODE_SUBB
:
842 backend_instruction::can_do_saturate() const
852 case BRW_OPCODE_F16TO32
:
853 case BRW_OPCODE_F32TO16
:
854 case BRW_OPCODE_LINE
:
858 case BRW_OPCODE_MATH
:
861 case SHADER_OPCODE_MULH
:
863 case BRW_OPCODE_RNDD
:
864 case BRW_OPCODE_RNDE
:
865 case BRW_OPCODE_RNDU
:
866 case BRW_OPCODE_RNDZ
:
870 case FS_OPCODE_LINTERP
:
871 case SHADER_OPCODE_COS
:
872 case SHADER_OPCODE_EXP2
:
873 case SHADER_OPCODE_LOG2
:
874 case SHADER_OPCODE_POW
:
875 case SHADER_OPCODE_RCP
:
876 case SHADER_OPCODE_RSQ
:
877 case SHADER_OPCODE_SIN
:
878 case SHADER_OPCODE_SQRT
:
886 backend_instruction::can_do_cmod() const
890 case BRW_OPCODE_ADDC
:
895 case BRW_OPCODE_CMPN
:
900 case BRW_OPCODE_F16TO32
:
901 case BRW_OPCODE_F32TO16
:
903 case BRW_OPCODE_LINE
:
907 case BRW_OPCODE_MACH
:
914 case BRW_OPCODE_RNDD
:
915 case BRW_OPCODE_RNDE
:
916 case BRW_OPCODE_RNDU
:
917 case BRW_OPCODE_RNDZ
:
918 case BRW_OPCODE_SAD2
:
919 case BRW_OPCODE_SADA2
:
922 case BRW_OPCODE_SUBB
:
924 case FS_OPCODE_CINTERP
:
925 case FS_OPCODE_LINTERP
:
933 backend_instruction::reads_accumulator_implicitly() const
937 case BRW_OPCODE_MACH
:
938 case BRW_OPCODE_SADA2
:
946 backend_instruction::writes_accumulator_implicitly(const struct brw_device_info
*devinfo
) const
948 return writes_accumulator
||
950 ((opcode
>= BRW_OPCODE_ADD
&& opcode
< BRW_OPCODE_NOP
) ||
951 (opcode
>= FS_OPCODE_DDX_COARSE
&& opcode
<= FS_OPCODE_LINTERP
&&
952 opcode
!= FS_OPCODE_CINTERP
)));
956 backend_instruction::has_side_effects() const
959 case SHADER_OPCODE_UNTYPED_ATOMIC
:
960 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
961 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
962 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
963 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
964 case SHADER_OPCODE_TYPED_ATOMIC
:
965 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
966 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
967 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
968 case SHADER_OPCODE_MEMORY_FENCE
:
969 case SHADER_OPCODE_URB_WRITE_SIMD8
:
970 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
:
971 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
:
972 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
:
973 case FS_OPCODE_FB_WRITE
:
974 case SHADER_OPCODE_BARRIER
:
982 backend_instruction::is_volatile() const
985 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
986 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL
:
987 case SHADER_OPCODE_TYPED_SURFACE_READ
:
988 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL
:
997 inst_is_in_block(const bblock_t
*block
, const backend_instruction
*inst
)
1000 foreach_inst_in_block (backend_instruction
, i
, block
) {
1010 adjust_later_block_ips(bblock_t
*start_block
, int ip_adjustment
)
1012 for (bblock_t
*block_iter
= start_block
->next();
1013 !block_iter
->link
.is_tail_sentinel();
1014 block_iter
= block_iter
->next()) {
1015 block_iter
->start_ip
+= ip_adjustment
;
1016 block_iter
->end_ip
+= ip_adjustment
;
1021 backend_instruction::insert_after(bblock_t
*block
, backend_instruction
*inst
)
1023 if (!this->is_head_sentinel())
1024 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1028 adjust_later_block_ips(block
, 1);
1030 exec_node::insert_after(inst
);
1034 backend_instruction::insert_before(bblock_t
*block
, backend_instruction
*inst
)
1036 if (!this->is_tail_sentinel())
1037 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1041 adjust_later_block_ips(block
, 1);
1043 exec_node::insert_before(inst
);
1047 backend_instruction::insert_before(bblock_t
*block
, exec_list
*list
)
1049 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1051 unsigned num_inst
= list
->length();
1053 block
->end_ip
+= num_inst
;
1055 adjust_later_block_ips(block
, num_inst
);
1057 exec_node::insert_before(list
);
1061 backend_instruction::remove(bblock_t
*block
)
1063 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1065 adjust_later_block_ips(block
, -1);
1067 if (block
->start_ip
== block
->end_ip
) {
1068 block
->cfg
->remove_block(block
);
1073 exec_node::remove();
1077 backend_shader::dump_instructions()
1079 dump_instructions(NULL
);
1083 backend_shader::dump_instructions(const char *name
)
1085 FILE *file
= stderr
;
1086 if (name
&& geteuid() != 0) {
1087 file
= fopen(name
, "w");
1094 foreach_block_and_inst(block
, backend_instruction
, inst
, cfg
) {
1095 if (!unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
))
1096 fprintf(file
, "%4d: ", ip
++);
1097 dump_instruction(inst
, file
);
1101 foreach_in_list(backend_instruction
, inst
, &instructions
) {
1102 if (!unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
))
1103 fprintf(file
, "%4d: ", ip
++);
1104 dump_instruction(inst
, file
);
1108 if (file
!= stderr
) {
1114 backend_shader::calculate_cfg()
1118 cfg
= new(mem_ctx
) cfg_t(&this->instructions
);
1122 backend_shader::invalidate_cfg()
1124 ralloc_free(this->cfg
);
1129 * Sets up the starting offsets for the groups of binding table entries
1130 * commong to all pipeline stages.
1132 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
1133 * unused but also make sure that addition of small offsets to them will
1134 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
1137 brw_assign_common_binding_table_offsets(gl_shader_stage stage
,
1138 const struct brw_device_info
*devinfo
,
1139 const struct gl_shader_program
*shader_prog
,
1140 const struct gl_program
*prog
,
1141 struct brw_stage_prog_data
*stage_prog_data
,
1142 uint32_t next_binding_table_offset
)
1144 const struct gl_shader
*shader
= NULL
;
1145 int num_textures
= _mesa_fls(prog
->SamplersUsed
);
1148 shader
= shader_prog
->_LinkedShaders
[stage
];
1150 stage_prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
1151 next_binding_table_offset
+= num_textures
;
1154 assert(shader
->NumUniformBlocks
<= BRW_MAX_UBO
);
1155 stage_prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
1156 next_binding_table_offset
+= shader
->NumUniformBlocks
;
1158 assert(shader
->NumShaderStorageBlocks
<= BRW_MAX_SSBO
);
1159 stage_prog_data
->binding_table
.ssbo_start
= next_binding_table_offset
;
1160 next_binding_table_offset
+= shader
->NumShaderStorageBlocks
;
1162 stage_prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
1163 stage_prog_data
->binding_table
.ssbo_start
= 0xd0d0d0d0;
1166 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1167 stage_prog_data
->binding_table
.shader_time_start
= next_binding_table_offset
;
1168 next_binding_table_offset
++;
1170 stage_prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
1173 if (prog
->UsesGather
) {
1174 if (devinfo
->gen
>= 8) {
1175 stage_prog_data
->binding_table
.gather_texture_start
=
1176 stage_prog_data
->binding_table
.texture_start
;
1178 stage_prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
1179 next_binding_table_offset
+= num_textures
;
1182 stage_prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
1185 if (shader
&& shader
->NumAtomicBuffers
) {
1186 stage_prog_data
->binding_table
.abo_start
= next_binding_table_offset
;
1187 next_binding_table_offset
+= shader
->NumAtomicBuffers
;
1189 stage_prog_data
->binding_table
.abo_start
= 0xd0d0d0d0;
1192 if (shader
&& shader
->NumImages
) {
1193 stage_prog_data
->binding_table
.image_start
= next_binding_table_offset
;
1194 next_binding_table_offset
+= shader
->NumImages
;
1196 stage_prog_data
->binding_table
.image_start
= 0xd0d0d0d0;
1199 /* This may or may not be used depending on how the compile goes. */
1200 stage_prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
1201 next_binding_table_offset
++;
1203 assert(next_binding_table_offset
<= BRW_MAX_SURFACES
);
1205 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
1209 setup_vec4_uniform_value(const gl_constant_value
**params
,
1210 const gl_constant_value
*values
,
1213 static const gl_constant_value zero
= { 0 };
1215 for (unsigned i
= 0; i
< n
; ++i
)
1216 params
[i
] = &values
[i
];
1218 for (unsigned i
= n
; i
< 4; ++i
)
1223 brw_setup_image_uniform_values(gl_shader_stage stage
,
1224 struct brw_stage_prog_data
*stage_prog_data
,
1225 unsigned param_start_index
,
1226 const gl_uniform_storage
*storage
)
1228 const gl_constant_value
**param
=
1229 &stage_prog_data
->param
[param_start_index
];
1231 for (unsigned i
= 0; i
< MAX2(storage
->array_elements
, 1); i
++) {
1232 const unsigned image_idx
= storage
->opaque
[stage
].index
+ i
;
1233 const brw_image_param
*image_param
=
1234 &stage_prog_data
->image_param
[image_idx
];
1236 /* Upload the brw_image_param structure. The order is expected to match
1237 * the BRW_IMAGE_PARAM_*_OFFSET defines.
1239 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET
,
1240 (const gl_constant_value
*)&image_param
->surface_idx
, 1);
1241 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
,
1242 (const gl_constant_value
*)image_param
->offset
, 2);
1243 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SIZE_OFFSET
,
1244 (const gl_constant_value
*)image_param
->size
, 3);
1245 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
,
1246 (const gl_constant_value
*)image_param
->stride
, 4);
1247 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_TILING_OFFSET
,
1248 (const gl_constant_value
*)image_param
->tiling
, 3);
1249 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
,
1250 (const gl_constant_value
*)image_param
->swizzling
, 2);
1251 param
+= BRW_IMAGE_PARAM_SIZE
;
1253 brw_mark_surface_used(
1255 stage_prog_data
->binding_table
.image_start
+ image_idx
);
1260 * Decide which set of clip planes should be used when clipping via
1261 * gl_Position or gl_ClipVertex.
1263 gl_clip_plane
*brw_select_clip_planes(struct gl_context
*ctx
)
1265 if (ctx
->_Shader
->CurrentProgram
[MESA_SHADER_VERTEX
]) {
1266 /* There is currently a GLSL vertex shader, so clip according to GLSL
1267 * rules, which means compare gl_ClipVertex (or gl_Position, if
1268 * gl_ClipVertex wasn't assigned) against the eye-coordinate clip planes
1269 * that were stored in EyeUserPlane at the time the clip planes were
1272 return ctx
->Transform
.EyeUserPlane
;
1274 /* Either we are using fixed function or an ARB vertex program. In
1275 * either case the clip planes are going to be compared against
1276 * gl_Position (which is in clip coordinates) so we have to clip using
1277 * _ClipUserPlane, which was transformed into clip coordinates by Mesa
1280 return ctx
->Transform
._ClipUserPlane
;