2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "brw_vec4_tes.h"
29 #include "common/gen_debug.h"
30 #include "main/uniforms.h"
31 #include "util/macros.h"
34 brw_type_for_base_type(const struct glsl_type
*type
)
36 switch (type
->base_type
) {
38 return BRW_REGISTER_TYPE_F
;
41 case GLSL_TYPE_SUBROUTINE
:
42 return BRW_REGISTER_TYPE_D
;
44 return BRW_REGISTER_TYPE_UD
;
46 return brw_type_for_base_type(type
->fields
.array
);
47 case GLSL_TYPE_STRUCT
:
48 case GLSL_TYPE_SAMPLER
:
49 case GLSL_TYPE_ATOMIC_UINT
:
50 /* These should be overridden with the type of the member when
51 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
52 * way to trip up if we don't.
54 return BRW_REGISTER_TYPE_UD
;
56 return BRW_REGISTER_TYPE_UD
;
57 case GLSL_TYPE_DOUBLE
:
58 return BRW_REGISTER_TYPE_DF
;
59 case GLSL_TYPE_UINT64
:
60 return BRW_REGISTER_TYPE_UQ
;
62 return BRW_REGISTER_TYPE_Q
;
65 case GLSL_TYPE_INTERFACE
:
66 case GLSL_TYPE_FUNCTION
:
67 unreachable("not reached");
70 return BRW_REGISTER_TYPE_F
;
73 enum brw_conditional_mod
74 brw_conditional_for_comparison(unsigned int op
)
78 return BRW_CONDITIONAL_L
;
80 return BRW_CONDITIONAL_GE
;
82 case ir_binop_all_equal
: /* same as equal for scalars */
83 return BRW_CONDITIONAL_Z
;
85 case ir_binop_any_nequal
: /* same as nequal for scalars */
86 return BRW_CONDITIONAL_NZ
;
88 unreachable("not reached: bad operation for comparison");
93 brw_math_function(enum opcode op
)
96 case SHADER_OPCODE_RCP
:
97 return BRW_MATH_FUNCTION_INV
;
98 case SHADER_OPCODE_RSQ
:
99 return BRW_MATH_FUNCTION_RSQ
;
100 case SHADER_OPCODE_SQRT
:
101 return BRW_MATH_FUNCTION_SQRT
;
102 case SHADER_OPCODE_EXP2
:
103 return BRW_MATH_FUNCTION_EXP
;
104 case SHADER_OPCODE_LOG2
:
105 return BRW_MATH_FUNCTION_LOG
;
106 case SHADER_OPCODE_POW
:
107 return BRW_MATH_FUNCTION_POW
;
108 case SHADER_OPCODE_SIN
:
109 return BRW_MATH_FUNCTION_SIN
;
110 case SHADER_OPCODE_COS
:
111 return BRW_MATH_FUNCTION_COS
;
112 case SHADER_OPCODE_INT_QUOTIENT
:
113 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT
;
114 case SHADER_OPCODE_INT_REMAINDER
:
115 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER
;
117 unreachable("not reached: unknown math function");
122 brw_texture_offset(int *offsets
, unsigned num_components
, uint32_t *offset_bits
)
124 if (!offsets
) return false; /* nonconstant offset; caller will handle it. */
126 /* offset out of bounds; caller will handle it. */
127 for (unsigned i
= 0; i
< num_components
; i
++)
128 if (offsets
[i
] > 7 || offsets
[i
] < -8)
131 /* Combine all three offsets into a single unsigned dword:
133 * bits 11:8 - U Offset (X component)
134 * bits 7:4 - V Offset (Y component)
135 * bits 3:0 - R Offset (Z component)
138 for (unsigned i
= 0; i
< num_components
; i
++) {
139 const unsigned shift
= 4 * (2 - i
);
140 *offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
146 brw_instruction_name(const struct gen_device_info
*devinfo
, enum opcode op
)
149 case BRW_OPCODE_ILLEGAL
... BRW_OPCODE_NOP
:
150 /* The DO instruction doesn't exist on Gen6+, but we use it to mark the
151 * start of a loop in the IR.
153 if (devinfo
->gen
>= 6 && op
== BRW_OPCODE_DO
)
156 /* The following conversion opcodes doesn't exist on Gen8+, but we use
157 * then to mark that we want to do the conversion.
159 if (devinfo
->gen
> 7 && op
== BRW_OPCODE_F32TO16
)
162 if (devinfo
->gen
> 7 && op
== BRW_OPCODE_F16TO32
)
165 assert(brw_opcode_desc(devinfo
, op
)->name
);
166 return brw_opcode_desc(devinfo
, op
)->name
;
167 case FS_OPCODE_FB_WRITE
:
169 case FS_OPCODE_FB_WRITE_LOGICAL
:
170 return "fb_write_logical";
171 case FS_OPCODE_REP_FB_WRITE
:
172 return "rep_fb_write";
173 case FS_OPCODE_FB_READ
:
175 case FS_OPCODE_FB_READ_LOGICAL
:
176 return "fb_read_logical";
178 case SHADER_OPCODE_RCP
:
180 case SHADER_OPCODE_RSQ
:
182 case SHADER_OPCODE_SQRT
:
184 case SHADER_OPCODE_EXP2
:
186 case SHADER_OPCODE_LOG2
:
188 case SHADER_OPCODE_POW
:
190 case SHADER_OPCODE_INT_QUOTIENT
:
192 case SHADER_OPCODE_INT_REMAINDER
:
194 case SHADER_OPCODE_SIN
:
196 case SHADER_OPCODE_COS
:
199 case SHADER_OPCODE_TEX
:
201 case SHADER_OPCODE_TEX_LOGICAL
:
202 return "tex_logical";
203 case SHADER_OPCODE_TXD
:
205 case SHADER_OPCODE_TXD_LOGICAL
:
206 return "txd_logical";
207 case SHADER_OPCODE_TXF
:
209 case SHADER_OPCODE_TXF_LOGICAL
:
210 return "txf_logical";
211 case SHADER_OPCODE_TXF_LZ
:
213 case SHADER_OPCODE_TXL
:
215 case SHADER_OPCODE_TXL_LOGICAL
:
216 return "txl_logical";
217 case SHADER_OPCODE_TXL_LZ
:
219 case SHADER_OPCODE_TXS
:
221 case SHADER_OPCODE_TXS_LOGICAL
:
222 return "txs_logical";
225 case FS_OPCODE_TXB_LOGICAL
:
226 return "txb_logical";
227 case SHADER_OPCODE_TXF_CMS
:
229 case SHADER_OPCODE_TXF_CMS_LOGICAL
:
230 return "txf_cms_logical";
231 case SHADER_OPCODE_TXF_CMS_W
:
233 case SHADER_OPCODE_TXF_CMS_W_LOGICAL
:
234 return "txf_cms_w_logical";
235 case SHADER_OPCODE_TXF_UMS
:
237 case SHADER_OPCODE_TXF_UMS_LOGICAL
:
238 return "txf_ums_logical";
239 case SHADER_OPCODE_TXF_MCS
:
241 case SHADER_OPCODE_TXF_MCS_LOGICAL
:
242 return "txf_mcs_logical";
243 case SHADER_OPCODE_LOD
:
245 case SHADER_OPCODE_LOD_LOGICAL
:
246 return "lod_logical";
247 case SHADER_OPCODE_TG4
:
249 case SHADER_OPCODE_TG4_LOGICAL
:
250 return "tg4_logical";
251 case SHADER_OPCODE_TG4_OFFSET
:
253 case SHADER_OPCODE_TG4_OFFSET_LOGICAL
:
254 return "tg4_offset_logical";
255 case SHADER_OPCODE_SAMPLEINFO
:
257 case SHADER_OPCODE_SAMPLEINFO_LOGICAL
:
258 return "sampleinfo_logical";
260 case SHADER_OPCODE_SHADER_TIME_ADD
:
261 return "shader_time_add";
263 case SHADER_OPCODE_UNTYPED_ATOMIC
:
264 return "untyped_atomic";
265 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
266 return "untyped_atomic_logical";
267 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
268 return "untyped_surface_read";
269 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL
:
270 return "untyped_surface_read_logical";
271 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
272 return "untyped_surface_write";
273 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
274 return "untyped_surface_write_logical";
275 case SHADER_OPCODE_TYPED_ATOMIC
:
276 return "typed_atomic";
277 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
278 return "typed_atomic_logical";
279 case SHADER_OPCODE_TYPED_SURFACE_READ
:
280 return "typed_surface_read";
281 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL
:
282 return "typed_surface_read_logical";
283 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
284 return "typed_surface_write";
285 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
286 return "typed_surface_write_logical";
287 case SHADER_OPCODE_MEMORY_FENCE
:
288 return "memory_fence";
290 case SHADER_OPCODE_LOAD_PAYLOAD
:
291 return "load_payload";
295 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
296 return "gen4_scratch_read";
297 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
298 return "gen4_scratch_write";
299 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
300 return "gen7_scratch_read";
301 case SHADER_OPCODE_URB_WRITE_SIMD8
:
302 return "gen8_urb_write_simd8";
303 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
:
304 return "gen8_urb_write_simd8_per_slot";
305 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
:
306 return "gen8_urb_write_simd8_masked";
307 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
:
308 return "gen8_urb_write_simd8_masked_per_slot";
309 case SHADER_OPCODE_URB_READ_SIMD8
:
310 return "urb_read_simd8";
311 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT
:
312 return "urb_read_simd8_per_slot";
314 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
315 return "find_live_channel";
316 case SHADER_OPCODE_BROADCAST
:
319 case VEC4_OPCODE_MOV_BYTES
:
321 case VEC4_OPCODE_PACK_BYTES
:
323 case VEC4_OPCODE_UNPACK_UNIFORM
:
324 return "unpack_uniform";
325 case VEC4_OPCODE_DOUBLE_TO_F32
:
326 return "double_to_f32";
327 case VEC4_OPCODE_DOUBLE_TO_D32
:
328 return "double_to_d32";
329 case VEC4_OPCODE_DOUBLE_TO_U32
:
330 return "double_to_u32";
331 case VEC4_OPCODE_TO_DOUBLE
:
332 return "single_to_double";
333 case VEC4_OPCODE_PICK_LOW_32BIT
:
334 return "pick_low_32bit";
335 case VEC4_OPCODE_PICK_HIGH_32BIT
:
336 return "pick_high_32bit";
337 case VEC4_OPCODE_SET_LOW_32BIT
:
338 return "set_low_32bit";
339 case VEC4_OPCODE_SET_HIGH_32BIT
:
340 return "set_high_32bit";
342 case FS_OPCODE_DDX_COARSE
:
344 case FS_OPCODE_DDX_FINE
:
346 case FS_OPCODE_DDY_COARSE
:
348 case FS_OPCODE_DDY_FINE
:
351 case FS_OPCODE_CINTERP
:
353 case FS_OPCODE_LINTERP
:
356 case FS_OPCODE_PIXEL_X
:
358 case FS_OPCODE_PIXEL_Y
:
361 case FS_OPCODE_GET_BUFFER_SIZE
:
362 return "fs_get_buffer_size";
364 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
365 return "uniform_pull_const";
366 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
367 return "uniform_pull_const_gen7";
368 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4
:
369 return "varying_pull_const_gen4";
370 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
371 return "varying_pull_const_gen7";
372 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL
:
373 return "varying_pull_const_logical";
375 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS
:
376 return "mov_dispatch_to_flags";
377 case FS_OPCODE_DISCARD_JUMP
:
378 return "discard_jump";
380 case FS_OPCODE_SET_SAMPLE_ID
:
381 return "set_sample_id";
383 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
384 return "pack_half_2x16_split";
385 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
386 return "unpack_half_2x16_split_x";
387 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
388 return "unpack_half_2x16_split_y";
390 case FS_OPCODE_PLACEHOLDER_HALT
:
391 return "placeholder_halt";
393 case FS_OPCODE_INTERPOLATE_AT_SAMPLE
:
394 return "interp_sample";
395 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
:
396 return "interp_shared_offset";
397 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
:
398 return "interp_per_slot_offset";
400 case VS_OPCODE_URB_WRITE
:
401 return "vs_urb_write";
402 case VS_OPCODE_PULL_CONSTANT_LOAD
:
403 return "pull_constant_load";
404 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
405 return "pull_constant_load_gen7";
407 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
408 return "set_simd4x2_header_gen9";
410 case VS_OPCODE_GET_BUFFER_SIZE
:
411 return "vs_get_buffer_size";
413 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
414 return "unpack_flags_simd4x2";
416 case GS_OPCODE_URB_WRITE
:
417 return "gs_urb_write";
418 case GS_OPCODE_URB_WRITE_ALLOCATE
:
419 return "gs_urb_write_allocate";
420 case GS_OPCODE_THREAD_END
:
421 return "gs_thread_end";
422 case GS_OPCODE_SET_WRITE_OFFSET
:
423 return "set_write_offset";
424 case GS_OPCODE_SET_VERTEX_COUNT
:
425 return "set_vertex_count";
426 case GS_OPCODE_SET_DWORD_2
:
427 return "set_dword_2";
428 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
429 return "prepare_channel_masks";
430 case GS_OPCODE_SET_CHANNEL_MASKS
:
431 return "set_channel_masks";
432 case GS_OPCODE_GET_INSTANCE_ID
:
433 return "get_instance_id";
434 case GS_OPCODE_FF_SYNC
:
436 case GS_OPCODE_SET_PRIMITIVE_ID
:
437 return "set_primitive_id";
438 case GS_OPCODE_SVB_WRITE
:
439 return "gs_svb_write";
440 case GS_OPCODE_SVB_SET_DST_INDEX
:
441 return "gs_svb_set_dst_index";
442 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
443 return "gs_ff_sync_set_primitives";
444 case CS_OPCODE_CS_TERMINATE
:
445 return "cs_terminate";
446 case SHADER_OPCODE_BARRIER
:
448 case SHADER_OPCODE_MULH
:
450 case SHADER_OPCODE_MOV_INDIRECT
:
451 return "mov_indirect";
453 case VEC4_OPCODE_URB_READ
:
455 case TCS_OPCODE_GET_INSTANCE_ID
:
456 return "tcs_get_instance_id";
457 case TCS_OPCODE_URB_WRITE
:
458 return "tcs_urb_write";
459 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
460 return "tcs_set_input_urb_offsets";
461 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
462 return "tcs_set_output_urb_offsets";
463 case TCS_OPCODE_GET_PRIMITIVE_ID
:
464 return "tcs_get_primitive_id";
465 case TCS_OPCODE_CREATE_BARRIER_HEADER
:
466 return "tcs_create_barrier_header";
467 case TCS_OPCODE_SRC0_010_IS_ZERO
:
468 return "tcs_src0<0,1,0>_is_zero";
469 case TCS_OPCODE_RELEASE_INPUT
:
470 return "tcs_release_input";
471 case TCS_OPCODE_THREAD_END
:
472 return "tcs_thread_end";
473 case TES_OPCODE_CREATE_INPUT_READ_HEADER
:
474 return "tes_create_input_read_header";
475 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
476 return "tes_add_indirect_urb_offset";
477 case TES_OPCODE_GET_PRIMITIVE_ID
:
478 return "tes_get_primitive_id";
481 unreachable("not reached");
485 brw_saturate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
492 } imm
, sat_imm
= { 0 };
494 const unsigned size
= type_sz(type
);
496 /* We want to either do a 32-bit or 64-bit data copy, the type is otherwise
497 * irrelevant, so just check the size of the type and copy from/to an
498 * appropriately sized field.
506 case BRW_REGISTER_TYPE_UD
:
507 case BRW_REGISTER_TYPE_D
:
508 case BRW_REGISTER_TYPE_UW
:
509 case BRW_REGISTER_TYPE_W
:
510 case BRW_REGISTER_TYPE_UQ
:
511 case BRW_REGISTER_TYPE_Q
:
514 case BRW_REGISTER_TYPE_F
:
515 sat_imm
.f
= CLAMP(imm
.f
, 0.0f
, 1.0f
);
517 case BRW_REGISTER_TYPE_DF
:
518 sat_imm
.df
= CLAMP(imm
.df
, 0.0, 1.0);
520 case BRW_REGISTER_TYPE_UB
:
521 case BRW_REGISTER_TYPE_B
:
522 unreachable("no UB/B immediates");
523 case BRW_REGISTER_TYPE_V
:
524 case BRW_REGISTER_TYPE_UV
:
525 case BRW_REGISTER_TYPE_VF
:
526 unreachable("unimplemented: saturate vector immediate");
527 case BRW_REGISTER_TYPE_HF
:
528 unreachable("unimplemented: saturate HF immediate");
532 if (imm
.ud
!= sat_imm
.ud
) {
533 reg
->ud
= sat_imm
.ud
;
537 if (imm
.df
!= sat_imm
.df
) {
538 reg
->df
= sat_imm
.df
;
546 brw_negate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
549 case BRW_REGISTER_TYPE_D
:
550 case BRW_REGISTER_TYPE_UD
:
553 case BRW_REGISTER_TYPE_W
:
554 case BRW_REGISTER_TYPE_UW
:
555 reg
->d
= -(int16_t)reg
->ud
;
557 case BRW_REGISTER_TYPE_F
:
560 case BRW_REGISTER_TYPE_VF
:
561 reg
->ud
^= 0x80808080;
563 case BRW_REGISTER_TYPE_DF
:
566 case BRW_REGISTER_TYPE_UQ
:
567 case BRW_REGISTER_TYPE_Q
:
568 reg
->d64
= -reg
->d64
;
570 case BRW_REGISTER_TYPE_UB
:
571 case BRW_REGISTER_TYPE_B
:
572 unreachable("no UB/B immediates");
573 case BRW_REGISTER_TYPE_UV
:
574 case BRW_REGISTER_TYPE_V
:
575 assert(!"unimplemented: negate UV/V immediate");
576 case BRW_REGISTER_TYPE_HF
:
577 assert(!"unimplemented: negate HF immediate");
584 brw_abs_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
587 case BRW_REGISTER_TYPE_D
:
588 reg
->d
= abs(reg
->d
);
590 case BRW_REGISTER_TYPE_W
:
591 reg
->d
= abs((int16_t)reg
->ud
);
593 case BRW_REGISTER_TYPE_F
:
594 reg
->f
= fabsf(reg
->f
);
596 case BRW_REGISTER_TYPE_DF
:
597 reg
->df
= fabs(reg
->df
);
599 case BRW_REGISTER_TYPE_VF
:
600 reg
->ud
&= ~0x80808080;
602 case BRW_REGISTER_TYPE_Q
:
603 reg
->d64
= imaxabs(reg
->d64
);
605 case BRW_REGISTER_TYPE_UB
:
606 case BRW_REGISTER_TYPE_B
:
607 unreachable("no UB/B immediates");
608 case BRW_REGISTER_TYPE_UQ
:
609 case BRW_REGISTER_TYPE_UD
:
610 case BRW_REGISTER_TYPE_UW
:
611 case BRW_REGISTER_TYPE_UV
:
612 /* Presumably the absolute value modifier on an unsigned source is a
613 * nop, but it would be nice to confirm.
615 assert(!"unimplemented: abs unsigned immediate");
616 case BRW_REGISTER_TYPE_V
:
617 assert(!"unimplemented: abs V immediate");
618 case BRW_REGISTER_TYPE_HF
:
619 assert(!"unimplemented: abs HF immediate");
626 * Get the appropriate atomic op for an image atomic intrinsic.
629 get_atomic_counter_op(nir_intrinsic_op op
)
632 case nir_intrinsic_atomic_counter_inc
:
634 case nir_intrinsic_atomic_counter_dec
:
635 return BRW_AOP_PREDEC
;
636 case nir_intrinsic_atomic_counter_add
:
638 case nir_intrinsic_atomic_counter_min
:
640 case nir_intrinsic_atomic_counter_max
:
642 case nir_intrinsic_atomic_counter_and
:
644 case nir_intrinsic_atomic_counter_or
:
646 case nir_intrinsic_atomic_counter_xor
:
648 case nir_intrinsic_atomic_counter_exchange
:
650 case nir_intrinsic_atomic_counter_comp_swap
:
651 return BRW_AOP_CMPWR
;
653 unreachable("Not reachable.");
657 backend_shader::backend_shader(const struct brw_compiler
*compiler
,
660 const nir_shader
*shader
,
661 struct brw_stage_prog_data
*stage_prog_data
)
662 : compiler(compiler
),
664 devinfo(compiler
->devinfo
),
666 stage_prog_data(stage_prog_data
),
669 stage(shader
->info
.stage
)
671 debug_enabled
= INTEL_DEBUG
& intel_debug_flag_for_shader_stage(stage
);
672 stage_name
= _mesa_shader_stage_to_string(stage
);
673 stage_abbrev
= _mesa_shader_stage_to_abbrev(stage
);
676 backend_shader::~backend_shader()
681 backend_reg::equals(const backend_reg
&r
) const
683 return brw_regs_equal(this, &r
) && offset
== r
.offset
;
687 backend_reg::is_zero() const
693 case BRW_REGISTER_TYPE_F
:
695 case BRW_REGISTER_TYPE_DF
:
697 case BRW_REGISTER_TYPE_D
:
698 case BRW_REGISTER_TYPE_UD
:
700 case BRW_REGISTER_TYPE_UQ
:
701 case BRW_REGISTER_TYPE_Q
:
709 backend_reg::is_one() const
715 case BRW_REGISTER_TYPE_F
:
717 case BRW_REGISTER_TYPE_DF
:
719 case BRW_REGISTER_TYPE_D
:
720 case BRW_REGISTER_TYPE_UD
:
722 case BRW_REGISTER_TYPE_UQ
:
723 case BRW_REGISTER_TYPE_Q
:
731 backend_reg::is_negative_one() const
737 case BRW_REGISTER_TYPE_F
:
739 case BRW_REGISTER_TYPE_DF
:
741 case BRW_REGISTER_TYPE_D
:
743 case BRW_REGISTER_TYPE_Q
:
751 backend_reg::is_null() const
753 return file
== ARF
&& nr
== BRW_ARF_NULL
;
758 backend_reg::is_accumulator() const
760 return file
== ARF
&& nr
== BRW_ARF_ACCUMULATOR
;
764 backend_instruction::is_commutative() const
772 case SHADER_OPCODE_MULH
:
775 /* MIN and MAX are commutative. */
776 if (conditional_mod
== BRW_CONDITIONAL_GE
||
777 conditional_mod
== BRW_CONDITIONAL_L
) {
787 backend_instruction::is_3src(const struct gen_device_info
*devinfo
) const
789 return ::is_3src(devinfo
, opcode
);
793 backend_instruction::is_tex() const
795 return (opcode
== SHADER_OPCODE_TEX
||
796 opcode
== FS_OPCODE_TXB
||
797 opcode
== SHADER_OPCODE_TXD
||
798 opcode
== SHADER_OPCODE_TXF
||
799 opcode
== SHADER_OPCODE_TXF_LZ
||
800 opcode
== SHADER_OPCODE_TXF_CMS
||
801 opcode
== SHADER_OPCODE_TXF_CMS_W
||
802 opcode
== SHADER_OPCODE_TXF_UMS
||
803 opcode
== SHADER_OPCODE_TXF_MCS
||
804 opcode
== SHADER_OPCODE_TXL
||
805 opcode
== SHADER_OPCODE_TXL_LZ
||
806 opcode
== SHADER_OPCODE_TXS
||
807 opcode
== SHADER_OPCODE_LOD
||
808 opcode
== SHADER_OPCODE_TG4
||
809 opcode
== SHADER_OPCODE_TG4_OFFSET
||
810 opcode
== SHADER_OPCODE_SAMPLEINFO
);
814 backend_instruction::is_math() const
816 return (opcode
== SHADER_OPCODE_RCP
||
817 opcode
== SHADER_OPCODE_RSQ
||
818 opcode
== SHADER_OPCODE_SQRT
||
819 opcode
== SHADER_OPCODE_EXP2
||
820 opcode
== SHADER_OPCODE_LOG2
||
821 opcode
== SHADER_OPCODE_SIN
||
822 opcode
== SHADER_OPCODE_COS
||
823 opcode
== SHADER_OPCODE_INT_QUOTIENT
||
824 opcode
== SHADER_OPCODE_INT_REMAINDER
||
825 opcode
== SHADER_OPCODE_POW
);
829 backend_instruction::is_control_flow() const
833 case BRW_OPCODE_WHILE
:
835 case BRW_OPCODE_ELSE
:
836 case BRW_OPCODE_ENDIF
:
837 case BRW_OPCODE_BREAK
:
838 case BRW_OPCODE_CONTINUE
:
846 backend_instruction::can_do_source_mods() const
849 case BRW_OPCODE_ADDC
:
851 case BRW_OPCODE_BFI1
:
852 case BRW_OPCODE_BFI2
:
853 case BRW_OPCODE_BFREV
:
854 case BRW_OPCODE_CBIT
:
857 case BRW_OPCODE_SUBB
:
858 case SHADER_OPCODE_BROADCAST
:
859 case SHADER_OPCODE_MOV_INDIRECT
:
867 backend_instruction::can_do_saturate() const
877 case BRW_OPCODE_F16TO32
:
878 case BRW_OPCODE_F32TO16
:
879 case BRW_OPCODE_LINE
:
883 case BRW_OPCODE_MATH
:
886 case SHADER_OPCODE_MULH
:
888 case BRW_OPCODE_RNDD
:
889 case BRW_OPCODE_RNDE
:
890 case BRW_OPCODE_RNDU
:
891 case BRW_OPCODE_RNDZ
:
895 case FS_OPCODE_LINTERP
:
896 case SHADER_OPCODE_COS
:
897 case SHADER_OPCODE_EXP2
:
898 case SHADER_OPCODE_LOG2
:
899 case SHADER_OPCODE_POW
:
900 case SHADER_OPCODE_RCP
:
901 case SHADER_OPCODE_RSQ
:
902 case SHADER_OPCODE_SIN
:
903 case SHADER_OPCODE_SQRT
:
911 backend_instruction::can_do_cmod() const
915 case BRW_OPCODE_ADDC
:
920 case BRW_OPCODE_CMPN
:
925 case BRW_OPCODE_F16TO32
:
926 case BRW_OPCODE_F32TO16
:
928 case BRW_OPCODE_LINE
:
932 case BRW_OPCODE_MACH
:
939 case BRW_OPCODE_RNDD
:
940 case BRW_OPCODE_RNDE
:
941 case BRW_OPCODE_RNDU
:
942 case BRW_OPCODE_RNDZ
:
943 case BRW_OPCODE_SAD2
:
944 case BRW_OPCODE_SADA2
:
947 case BRW_OPCODE_SUBB
:
949 case FS_OPCODE_CINTERP
:
950 case FS_OPCODE_LINTERP
:
958 backend_instruction::reads_accumulator_implicitly() const
962 case BRW_OPCODE_MACH
:
963 case BRW_OPCODE_SADA2
:
971 backend_instruction::writes_accumulator_implicitly(const struct gen_device_info
*devinfo
) const
973 return writes_accumulator
||
975 ((opcode
>= BRW_OPCODE_ADD
&& opcode
< BRW_OPCODE_NOP
) ||
976 (opcode
>= FS_OPCODE_DDX_COARSE
&& opcode
<= FS_OPCODE_LINTERP
&&
977 opcode
!= FS_OPCODE_CINTERP
)));
981 backend_instruction::has_side_effects() const
984 case SHADER_OPCODE_UNTYPED_ATOMIC
:
985 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
986 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
987 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
988 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
989 case SHADER_OPCODE_TYPED_ATOMIC
:
990 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
991 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
992 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
993 case SHADER_OPCODE_MEMORY_FENCE
:
994 case SHADER_OPCODE_URB_WRITE_SIMD8
:
995 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT
:
996 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED
:
997 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT
:
998 case FS_OPCODE_FB_WRITE
:
999 case FS_OPCODE_FB_WRITE_LOGICAL
:
1000 case SHADER_OPCODE_BARRIER
:
1001 case TCS_OPCODE_URB_WRITE
:
1002 case TCS_OPCODE_RELEASE_INPUT
:
1010 backend_instruction::is_volatile() const
1013 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
1014 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL
:
1015 case SHADER_OPCODE_TYPED_SURFACE_READ
:
1016 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL
:
1017 case SHADER_OPCODE_URB_READ_SIMD8
:
1018 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT
:
1019 case VEC4_OPCODE_URB_READ
:
1028 inst_is_in_block(const bblock_t
*block
, const backend_instruction
*inst
)
1031 foreach_inst_in_block (backend_instruction
, i
, block
) {
1041 adjust_later_block_ips(bblock_t
*start_block
, int ip_adjustment
)
1043 for (bblock_t
*block_iter
= start_block
->next();
1045 block_iter
= block_iter
->next()) {
1046 block_iter
->start_ip
+= ip_adjustment
;
1047 block_iter
->end_ip
+= ip_adjustment
;
1052 backend_instruction::insert_after(bblock_t
*block
, backend_instruction
*inst
)
1054 assert(this != inst
);
1056 if (!this->is_head_sentinel())
1057 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1061 adjust_later_block_ips(block
, 1);
1063 exec_node::insert_after(inst
);
1067 backend_instruction::insert_before(bblock_t
*block
, backend_instruction
*inst
)
1069 assert(this != inst
);
1071 if (!this->is_tail_sentinel())
1072 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1076 adjust_later_block_ips(block
, 1);
1078 exec_node::insert_before(inst
);
1082 backend_instruction::insert_before(bblock_t
*block
, exec_list
*list
)
1084 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1086 unsigned num_inst
= list
->length();
1088 block
->end_ip
+= num_inst
;
1090 adjust_later_block_ips(block
, num_inst
);
1092 exec_node::insert_before(list
);
1096 backend_instruction::remove(bblock_t
*block
)
1098 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1100 adjust_later_block_ips(block
, -1);
1102 if (block
->start_ip
== block
->end_ip
) {
1103 block
->cfg
->remove_block(block
);
1108 exec_node::remove();
1112 backend_shader::dump_instructions()
1114 dump_instructions(NULL
);
1118 backend_shader::dump_instructions(const char *name
)
1120 FILE *file
= stderr
;
1121 if (name
&& geteuid() != 0) {
1122 file
= fopen(name
, "w");
1129 foreach_block_and_inst(block
, backend_instruction
, inst
, cfg
) {
1130 if (!unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
))
1131 fprintf(file
, "%4d: ", ip
++);
1132 dump_instruction(inst
, file
);
1136 foreach_in_list(backend_instruction
, inst
, &instructions
) {
1137 if (!unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
))
1138 fprintf(file
, "%4d: ", ip
++);
1139 dump_instruction(inst
, file
);
1143 if (file
!= stderr
) {
1149 backend_shader::calculate_cfg()
1153 cfg
= new(mem_ctx
) cfg_t(&this->instructions
);
1156 extern "C" const unsigned *
1157 brw_compile_tes(const struct brw_compiler
*compiler
,
1160 const struct brw_tes_prog_key
*key
,
1161 const struct brw_vue_map
*input_vue_map
,
1162 struct brw_tes_prog_data
*prog_data
,
1163 const nir_shader
*src_shader
,
1164 struct gl_program
*prog
,
1165 int shader_time_index
,
1168 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
1169 const bool is_scalar
= compiler
->scalar_stage
[MESA_SHADER_TESS_EVAL
];
1170 const unsigned *assembly
;
1172 nir_shader
*nir
= nir_shader_clone(mem_ctx
, src_shader
);
1173 nir
->info
.inputs_read
= key
->inputs_read
;
1174 nir
->info
.patch_inputs_read
= key
->patch_inputs_read
;
1176 nir
= brw_nir_apply_sampler_key(nir
, compiler
, &key
->tex
, is_scalar
);
1177 brw_nir_lower_tes_inputs(nir
, input_vue_map
);
1178 brw_nir_lower_vue_outputs(nir
, is_scalar
);
1179 nir
= brw_postprocess_nir(nir
, compiler
, is_scalar
);
1181 brw_compute_vue_map(devinfo
, &prog_data
->base
.vue_map
,
1182 nir
->info
.outputs_written
,
1183 nir
->info
.separate_shader
);
1185 unsigned output_size_bytes
= prog_data
->base
.vue_map
.num_slots
* 4 * 4;
1187 assert(output_size_bytes
>= 1);
1188 if (output_size_bytes
> GEN7_MAX_DS_URB_ENTRY_SIZE_BYTES
) {
1190 *error_str
= ralloc_strdup(mem_ctx
, "DS outputs exceed maximum size");
1194 prog_data
->base
.clip_distance_mask
=
1195 ((1 << nir
->info
.clip_distance_array_size
) - 1);
1196 prog_data
->base
.cull_distance_mask
=
1197 ((1 << nir
->info
.cull_distance_array_size
) - 1) <<
1198 nir
->info
.clip_distance_array_size
;
1200 /* URB entry sizes are stored as a multiple of 64 bytes. */
1201 prog_data
->base
.urb_entry_size
= ALIGN(output_size_bytes
, 64) / 64;
1203 /* On Cannonlake software shall not program an allocation size that
1204 * specifies a size that is a multiple of 3 64B (512-bit) cachelines.
1206 if (devinfo
->gen
== 10 &&
1207 prog_data
->base
.urb_entry_size
% 3 == 0)
1208 prog_data
->base
.urb_entry_size
++;
1210 prog_data
->base
.urb_read_length
= 0;
1212 STATIC_ASSERT(BRW_TESS_PARTITIONING_INTEGER
== TESS_SPACING_EQUAL
- 1);
1213 STATIC_ASSERT(BRW_TESS_PARTITIONING_ODD_FRACTIONAL
==
1214 TESS_SPACING_FRACTIONAL_ODD
- 1);
1215 STATIC_ASSERT(BRW_TESS_PARTITIONING_EVEN_FRACTIONAL
==
1216 TESS_SPACING_FRACTIONAL_EVEN
- 1);
1218 prog_data
->partitioning
=
1219 (enum brw_tess_partitioning
) (nir
->info
.tess
.spacing
- 1);
1221 switch (nir
->info
.tess
.primitive_mode
) {
1223 prog_data
->domain
= BRW_TESS_DOMAIN_QUAD
;
1226 prog_data
->domain
= BRW_TESS_DOMAIN_TRI
;
1229 prog_data
->domain
= BRW_TESS_DOMAIN_ISOLINE
;
1232 unreachable("invalid domain shader primitive mode");
1235 if (nir
->info
.tess
.point_mode
) {
1236 prog_data
->output_topology
= BRW_TESS_OUTPUT_TOPOLOGY_POINT
;
1237 } else if (nir
->info
.tess
.primitive_mode
== GL_ISOLINES
) {
1238 prog_data
->output_topology
= BRW_TESS_OUTPUT_TOPOLOGY_LINE
;
1240 /* Hardware winding order is backwards from OpenGL */
1241 prog_data
->output_topology
=
1242 nir
->info
.tess
.ccw
? BRW_TESS_OUTPUT_TOPOLOGY_TRI_CW
1243 : BRW_TESS_OUTPUT_TOPOLOGY_TRI_CCW
;
1246 if (unlikely(INTEL_DEBUG
& DEBUG_TES
)) {
1247 fprintf(stderr
, "TES Input ");
1248 brw_print_vue_map(stderr
, input_vue_map
);
1249 fprintf(stderr
, "TES Output ");
1250 brw_print_vue_map(stderr
, &prog_data
->base
.vue_map
);
1254 fs_visitor
v(compiler
, log_data
, mem_ctx
, (void *) key
,
1255 &prog_data
->base
.base
, NULL
, nir
, 8,
1256 shader_time_index
, input_vue_map
);
1259 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
1263 prog_data
->base
.base
.dispatch_grf_start_reg
= v
.payload
.num_regs
;
1264 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_SIMD8
;
1266 fs_generator
g(compiler
, log_data
, mem_ctx
, (void *) key
,
1267 &prog_data
->base
.base
, v
.promoted_constants
, false,
1268 MESA_SHADER_TESS_EVAL
);
1269 if (unlikely(INTEL_DEBUG
& DEBUG_TES
)) {
1270 g
.enable_debug(ralloc_asprintf(mem_ctx
,
1271 "%s tessellation evaluation shader %s",
1272 nir
->info
.label
? nir
->info
.label
1277 g
.generate_code(v
.cfg
, 8);
1279 assembly
= g
.get_assembly(&prog_data
->base
.base
.program_size
);
1281 brw::vec4_tes_visitor
v(compiler
, log_data
, key
, prog_data
,
1282 nir
, mem_ctx
, shader_time_index
);
1285 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
1289 if (unlikely(INTEL_DEBUG
& DEBUG_TES
))
1290 v
.dump_instructions();
1292 assembly
= brw_vec4_generate_assembly(compiler
, log_data
, mem_ctx
, nir
,
1293 &prog_data
->base
, v
.cfg
,
1294 &prog_data
->base
.base
.program_size
);