2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
33 #include "main/macros.h"
34 #include "program/program.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "brw_context.h"
40 /* Return the SrcReg index of the channels that can be immediate float operands
41 * instead of usage of PROGRAM_CONSTANT values through push/pull.
44 brw_vs_arg_can_be_immediate(enum prog_opcode opcode
, int arg
)
46 int opcode_array
[] = {
66 /* These opcodes get broken down in a way that allow two
67 * args to be immediates.
69 if (opcode
== OPCODE_MAD
|| opcode
== OPCODE_LRP
) {
70 if (arg
== 1 || arg
== 2)
74 if (opcode
> ARRAY_SIZE(opcode_array
))
77 return arg
== opcode_array
[opcode
] - 1;
80 static struct brw_reg
get_tmp( struct brw_vs_compile
*c
)
82 struct brw_reg tmp
= brw_vec8_grf(c
->last_tmp
, 0);
84 if (++c
->last_tmp
> c
->prog_data
.total_grf
)
85 c
->prog_data
.total_grf
= c
->last_tmp
;
90 static void release_tmp( struct brw_vs_compile
*c
, struct brw_reg tmp
)
92 if (tmp
.nr
== c
->last_tmp
-1)
96 static void release_tmps( struct brw_vs_compile
*c
)
98 c
->last_tmp
= c
->first_tmp
;
102 get_first_reladdr_output(struct gl_vertex_program
*vp
)
105 int first_reladdr_output
= VERT_RESULT_MAX
;
107 for (i
= 0; i
< vp
->Base
.NumInstructions
; i
++) {
108 struct prog_instruction
*inst
= vp
->Base
.Instructions
+ i
;
110 if (inst
->DstReg
.File
== PROGRAM_OUTPUT
&&
111 inst
->DstReg
.RelAddr
&&
112 inst
->DstReg
.Index
< first_reladdr_output
)
113 first_reladdr_output
= inst
->DstReg
.Index
;
116 return first_reladdr_output
;
119 /* Clears the record of which vp_const_buffer elements have been
120 * loaded into our constant buffer registers, for the starts of new
121 * blocks after control flow.
124 clear_current_const(struct brw_vs_compile
*c
)
128 if (c
->vp
->use_const_buffer
) {
129 for (i
= 0; i
< 3; i
++) {
130 c
->current_const
[i
].index
= -1;
136 * Preallocate GRF register before code emit.
137 * Do things as simply as possible. Allocate and populate all regs
140 static void brw_vs_alloc_regs( struct brw_vs_compile
*c
)
142 struct intel_context
*intel
= &c
->func
.brw
->intel
;
143 GLuint i
, reg
= 0, mrf
;
144 int attributes_in_vue
;
145 int first_reladdr_output
;
147 /* Determine whether to use a real constant buffer or use a block
148 * of GRF registers for constants. The later is faster but only
149 * works if everything fits in the GRF.
150 * XXX this heuristic/check may need some fine tuning...
152 if (c
->vp
->program
.Base
.Parameters
->NumParameters
+
153 c
->vp
->program
.Base
.NumTemporaries
+ 20 > BRW_MAX_GRF
)
154 c
->vp
->use_const_buffer
= GL_TRUE
;
156 c
->vp
->use_const_buffer
= GL_FALSE
;
158 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
160 /* r0 -- reserved as usual
162 c
->r0
= brw_vec8_grf(reg
, 0);
165 /* User clip planes from curbe:
167 if (c
->key
.nr_userclip
) {
168 for (i
= 0; i
< c
->key
.nr_userclip
; i
++) {
169 c
->userplane
[i
] = stride( brw_vec4_grf(reg
+3+i
/2, (i
%2) * 4), 0, 4, 1);
172 /* Deal with curbe alignment:
174 reg
+= ((6 + c
->key
.nr_userclip
+ 3) / 4) * 2;
177 /* Vertex program parameters from curbe:
179 if (c
->vp
->use_const_buffer
) {
180 int max_constant
= BRW_MAX_GRF
- 20 - c
->vp
->program
.Base
.NumTemporaries
;
183 /* We've got more constants than we can load with the push
184 * mechanism. This is often correlated with reladdr loads where
185 * we should probably be using a pull mechanism anyway to avoid
186 * excessive reading. However, the pull mechanism is slow in
187 * general. So, we try to allocate as many non-reladdr-loaded
188 * constants through the push buffer as we can before giving up.
190 memset(c
->constant_map
, -1, c
->vp
->program
.Base
.Parameters
->NumParameters
);
192 i
< c
->vp
->program
.Base
.NumInstructions
&& constant
< max_constant
;
194 struct prog_instruction
*inst
= &c
->vp
->program
.Base
.Instructions
[i
];
197 for (arg
= 0; arg
< 3 && constant
< max_constant
; arg
++) {
198 if ((inst
->SrcReg
[arg
].File
!= PROGRAM_STATE_VAR
&&
199 inst
->SrcReg
[arg
].File
!= PROGRAM_CONSTANT
&&
200 inst
->SrcReg
[arg
].File
!= PROGRAM_UNIFORM
&&
201 inst
->SrcReg
[arg
].File
!= PROGRAM_ENV_PARAM
&&
202 inst
->SrcReg
[arg
].File
!= PROGRAM_LOCAL_PARAM
) ||
203 inst
->SrcReg
[arg
].RelAddr
)
206 if (c
->constant_map
[inst
->SrcReg
[arg
].Index
] == -1) {
207 c
->constant_map
[inst
->SrcReg
[arg
].Index
] = constant
++;
212 for (i
= 0; i
< constant
; i
++) {
213 c
->regs
[PROGRAM_STATE_VAR
][i
] = stride( brw_vec4_grf(reg
+i
/2,
217 reg
+= (constant
+ 1) / 2;
218 c
->prog_data
.curb_read_length
= reg
- 1;
219 /* XXX 0 causes a bug elsewhere... */
220 c
->prog_data
.nr_params
= MAX2(constant
* 4, 4);
223 /* use a section of the GRF for constants */
224 GLuint nr_params
= c
->vp
->program
.Base
.Parameters
->NumParameters
;
225 for (i
= 0; i
< nr_params
; i
++) {
226 c
->regs
[PROGRAM_STATE_VAR
][i
] = stride( brw_vec4_grf(reg
+i
/2, (i
%2) * 4), 0, 4, 1);
228 reg
+= (nr_params
+ 1) / 2;
229 c
->prog_data
.curb_read_length
= reg
- 1;
231 c
->prog_data
.nr_params
= nr_params
* 4;
234 /* Allocate input regs:
237 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
238 if (c
->prog_data
.inputs_read
& (1 << i
)) {
240 c
->regs
[PROGRAM_INPUT
][i
] = brw_vec8_grf(reg
, 0);
244 /* If there are no inputs, we'll still be reading one attribute's worth
245 * because it's required -- see urb_read_length setting.
247 if (c
->nr_inputs
== 0)
250 /* Allocate outputs. The non-position outputs go straight into message regs.
253 c
->first_output
= reg
;
254 c
->first_overflow_output
= 0;
257 mrf
= 3; /* no more pos store in attribute */
258 else if (intel
->gen
== 5)
263 first_reladdr_output
= get_first_reladdr_output(&c
->vp
->program
);
264 for (i
= 0; i
< VERT_RESULT_MAX
; i
++) {
265 if (c
->prog_data
.outputs_written
& BITFIELD64_BIT(i
)) {
267 assert(i
< Elements(c
->regs
[PROGRAM_OUTPUT
]));
268 if (i
== VERT_RESULT_HPOS
) {
269 c
->regs
[PROGRAM_OUTPUT
][i
] = brw_vec8_grf(reg
, 0);
272 else if (i
== VERT_RESULT_PSIZ
) {
273 c
->regs
[PROGRAM_OUTPUT
][i
] = brw_vec8_grf(reg
, 0);
275 mrf
++; /* just a placeholder? XXX fix later stages & remove this */
278 /* Two restrictions on our compute-to-MRF here. The
279 * message length for all SEND messages is restricted to
280 * [1,15], so we can't use mrf 15, as that means a length
283 * Additionally, URB writes are aligned to URB rows, so we
284 * need to put an even number of registers of URB data in
285 * each URB write so that the later write is aligned. A
286 * message length of 15 means 1 message header reg plus 14
289 * For attributes beyond the compute-to-MRF, we compute to
290 * GRFs and they will be written in the second URB_WRITE.
292 if (first_reladdr_output
> i
&& mrf
< 15) {
293 c
->regs
[PROGRAM_OUTPUT
][i
] = brw_message_reg(mrf
);
297 if (mrf
>= 15 && !c
->first_overflow_output
)
298 c
->first_overflow_output
= i
;
299 c
->regs
[PROGRAM_OUTPUT
][i
] = brw_vec8_grf(reg
, 0);
307 /* Allocate program temporaries:
309 for (i
= 0; i
< c
->vp
->program
.Base
.NumTemporaries
; i
++) {
310 c
->regs
[PROGRAM_TEMPORARY
][i
] = brw_vec8_grf(reg
, 0);
314 /* Address reg(s). Don't try to use the internal address reg until
317 for (i
= 0; i
< c
->vp
->program
.Base
.NumAddressRegs
; i
++) {
318 c
->regs
[PROGRAM_ADDRESS
][i
] = brw_reg(BRW_GENERAL_REGISTER_FILE
,
322 BRW_VERTICAL_STRIDE_8
,
324 BRW_HORIZONTAL_STRIDE_1
,
330 if (c
->vp
->use_const_buffer
) {
331 for (i
= 0; i
< 3; i
++) {
332 c
->current_const
[i
].reg
= brw_vec8_grf(reg
, 0);
335 clear_current_const(c
);
338 for (i
= 0; i
< 128; i
++) {
339 if (c
->output_regs
[i
].used_in_src
) {
340 c
->output_regs
[i
].reg
= brw_vec8_grf(reg
, 0);
345 if (c
->needs_stack
) {
346 c
->stack
= brw_uw16_reg(BRW_GENERAL_REGISTER_FILE
, reg
, 0);
350 /* Some opcodes need an internal temporary:
353 c
->last_tmp
= reg
; /* for allocation purposes */
355 /* Each input reg holds data from two vertices. The
356 * urb_read_length is the number of registers read from *each*
357 * vertex urb, so is half the amount:
359 c
->prog_data
.urb_read_length
= (c
->nr_inputs
+ 1) / 2;
360 /* Setting this field to 0 leads to undefined behavior according to the
361 * the VS_STATE docs. Our VUEs will always have at least one attribute
362 * sitting in them, even if it's padding.
364 if (c
->prog_data
.urb_read_length
== 0)
365 c
->prog_data
.urb_read_length
= 1;
367 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
368 * them to fit the biggest thing they need to.
370 attributes_in_vue
= MAX2(c
->nr_outputs
, c
->nr_inputs
);
372 /* See emit_vertex_write() for where the VUE's overhead on top of the
373 * attributes comes from.
376 c
->prog_data
.urb_entry_size
= (attributes_in_vue
+ 2 + 7) / 8;
377 else if (intel
->gen
== 5)
378 c
->prog_data
.urb_entry_size
= (attributes_in_vue
+ 6 + 3) / 4;
380 c
->prog_data
.urb_entry_size
= (attributes_in_vue
+ 2 + 3) / 4;
382 c
->prog_data
.total_grf
= reg
;
384 if (INTEL_DEBUG
& DEBUG_VS
) {
385 printf("%s NumAddrRegs %d\n", __FUNCTION__
, c
->vp
->program
.Base
.NumAddressRegs
);
386 printf("%s NumTemps %d\n", __FUNCTION__
, c
->vp
->program
.Base
.NumTemporaries
);
387 printf("%s reg = %d\n", __FUNCTION__
, reg
);
393 * If an instruction uses a temp reg both as a src and the dest, we
394 * sometimes need to allocate an intermediate temporary.
396 static void unalias1( struct brw_vs_compile
*c
,
399 void (*func
)( struct brw_vs_compile
*,
403 if (dst
.file
== arg0
.file
&& dst
.nr
== arg0
.nr
) {
404 struct brw_compile
*p
= &c
->func
;
405 struct brw_reg tmp
= brw_writemask(get_tmp(c
), dst
.dw1
.bits
.writemask
);
407 brw_MOV(p
, dst
, tmp
);
417 * Checkes if 2-operand instruction needs an intermediate temporary.
419 static void unalias2( struct brw_vs_compile
*c
,
423 void (*func
)( struct brw_vs_compile
*,
428 if ((dst
.file
== arg0
.file
&& dst
.nr
== arg0
.nr
) ||
429 (dst
.file
== arg1
.file
&& dst
.nr
== arg1
.nr
)) {
430 struct brw_compile
*p
= &c
->func
;
431 struct brw_reg tmp
= brw_writemask(get_tmp(c
), dst
.dw1
.bits
.writemask
);
432 func(c
, tmp
, arg0
, arg1
);
433 brw_MOV(p
, dst
, tmp
);
437 func(c
, dst
, arg0
, arg1
);
443 * Checkes if 3-operand instruction needs an intermediate temporary.
445 static void unalias3( struct brw_vs_compile
*c
,
450 void (*func
)( struct brw_vs_compile
*,
456 if ((dst
.file
== arg0
.file
&& dst
.nr
== arg0
.nr
) ||
457 (dst
.file
== arg1
.file
&& dst
.nr
== arg1
.nr
) ||
458 (dst
.file
== arg2
.file
&& dst
.nr
== arg2
.nr
)) {
459 struct brw_compile
*p
= &c
->func
;
460 struct brw_reg tmp
= brw_writemask(get_tmp(c
), dst
.dw1
.bits
.writemask
);
461 func(c
, tmp
, arg0
, arg1
, arg2
);
462 brw_MOV(p
, dst
, tmp
);
466 func(c
, dst
, arg0
, arg1
, arg2
);
470 static void emit_sop( struct brw_vs_compile
*c
,
476 struct brw_compile
*p
= &c
->func
;
478 brw_MOV(p
, dst
, brw_imm_f(0.0f
));
479 brw_CMP(p
, brw_null_reg(), cond
, arg0
, arg1
);
480 brw_MOV(p
, dst
, brw_imm_f(1.0f
));
481 brw_set_predicate_control_flag_value(p
, 0xff);
484 static void emit_seq( struct brw_vs_compile
*c
,
487 struct brw_reg arg1
)
489 emit_sop(c
, dst
, arg0
, arg1
, BRW_CONDITIONAL_EQ
);
492 static void emit_sne( struct brw_vs_compile
*c
,
495 struct brw_reg arg1
)
497 emit_sop(c
, dst
, arg0
, arg1
, BRW_CONDITIONAL_NEQ
);
499 static void emit_slt( struct brw_vs_compile
*c
,
502 struct brw_reg arg1
)
504 emit_sop(c
, dst
, arg0
, arg1
, BRW_CONDITIONAL_L
);
507 static void emit_sle( struct brw_vs_compile
*c
,
510 struct brw_reg arg1
)
512 emit_sop(c
, dst
, arg0
, arg1
, BRW_CONDITIONAL_LE
);
515 static void emit_sgt( struct brw_vs_compile
*c
,
518 struct brw_reg arg1
)
520 emit_sop(c
, dst
, arg0
, arg1
, BRW_CONDITIONAL_G
);
523 static void emit_sge( struct brw_vs_compile
*c
,
526 struct brw_reg arg1
)
528 emit_sop(c
, dst
, arg0
, arg1
, BRW_CONDITIONAL_GE
);
531 static void emit_cmp( struct brw_compile
*p
,
535 struct brw_reg arg2
)
537 brw_CMP(p
, brw_null_reg(), BRW_CONDITIONAL_L
, arg0
, brw_imm_f(0));
538 brw_SEL(p
, dst
, arg1
, arg2
);
539 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
542 static void emit_sign(struct brw_vs_compile
*c
,
546 struct brw_compile
*p
= &c
->func
;
548 brw_MOV(p
, dst
, brw_imm_f(0));
550 brw_CMP(p
, brw_null_reg(), BRW_CONDITIONAL_L
, arg0
, brw_imm_f(0));
551 brw_MOV(p
, dst
, brw_imm_f(-1.0));
552 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
554 brw_CMP(p
, brw_null_reg(), BRW_CONDITIONAL_G
, arg0
, brw_imm_f(0));
555 brw_MOV(p
, dst
, brw_imm_f(1.0));
556 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
559 static void emit_max( struct brw_compile
*p
,
562 struct brw_reg arg1
)
564 brw_CMP(p
, brw_null_reg(), BRW_CONDITIONAL_GE
, arg0
, arg1
);
565 brw_SEL(p
, dst
, arg0
, arg1
);
566 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
569 static void emit_min( struct brw_compile
*p
,
572 struct brw_reg arg1
)
574 brw_CMP(p
, brw_null_reg(), BRW_CONDITIONAL_L
, arg0
, arg1
);
575 brw_SEL(p
, dst
, arg0
, arg1
);
576 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
580 static void emit_math1( struct brw_vs_compile
*c
,
586 /* There are various odd behaviours with SEND on the simulator. In
587 * addition there are documented issues with the fact that the GEN4
588 * processor doesn't do dependency control properly on SEND
589 * results. So, on balance, this kludge to get around failures
590 * with writemasked math results looks like it might be necessary
591 * whether that turns out to be a simulator bug or not:
593 struct brw_compile
*p
= &c
->func
;
594 struct intel_context
*intel
= &p
->brw
->intel
;
595 struct brw_reg tmp
= dst
;
596 GLboolean need_tmp
= (intel
->gen
< 6 &&
597 (dst
.dw1
.bits
.writemask
!= 0xf ||
598 dst
.file
!= BRW_GENERAL_REGISTER_FILE
));
606 BRW_MATH_SATURATE_NONE
,
609 BRW_MATH_DATA_SCALAR
,
613 brw_MOV(p
, dst
, tmp
);
619 static void emit_math2( struct brw_vs_compile
*c
,
626 struct brw_compile
*p
= &c
->func
;
627 struct intel_context
*intel
= &p
->brw
->intel
;
628 struct brw_reg tmp
= dst
;
629 GLboolean need_tmp
= (intel
->gen
< 6 &&
630 (dst
.dw1
.bits
.writemask
!= 0xf ||
631 dst
.file
!= BRW_GENERAL_REGISTER_FILE
));
636 brw_MOV(p
, brw_message_reg(3), arg1
);
641 BRW_MATH_SATURATE_NONE
,
644 BRW_MATH_DATA_SCALAR
,
648 brw_MOV(p
, dst
, tmp
);
654 static void emit_exp_noalias( struct brw_vs_compile
*c
,
656 struct brw_reg arg0
)
658 struct brw_compile
*p
= &c
->func
;
661 if (dst
.dw1
.bits
.writemask
& WRITEMASK_X
) {
662 struct brw_reg tmp
= get_tmp(c
);
663 struct brw_reg tmp_d
= retype(tmp
, BRW_REGISTER_TYPE_D
);
665 /* tmp_d = floor(arg0.x) */
666 brw_RNDD(p
, tmp_d
, brw_swizzle1(arg0
, 0));
668 /* result[0] = 2.0 ^ tmp */
670 /* Adjust exponent for floating point:
673 brw_ADD(p
, brw_writemask(tmp_d
, WRITEMASK_X
), tmp_d
, brw_imm_d(127));
675 /* Install exponent and sign.
676 * Excess drops off the edge:
678 brw_SHL(p
, brw_writemask(retype(dst
, BRW_REGISTER_TYPE_D
), WRITEMASK_X
),
679 tmp_d
, brw_imm_d(23));
684 if (dst
.dw1
.bits
.writemask
& WRITEMASK_Y
) {
685 /* result[1] = arg0.x - floor(arg0.x) */
686 brw_FRC(p
, brw_writemask(dst
, WRITEMASK_Y
), brw_swizzle1(arg0
, 0));
689 if (dst
.dw1
.bits
.writemask
& WRITEMASK_Z
) {
690 /* As with the LOG instruction, we might be better off just
691 * doing a taylor expansion here, seeing as we have to do all
694 * If mathbox partial precision is too low, consider also:
695 * result[3] = result[0] * EXP(result[1])
698 BRW_MATH_FUNCTION_EXP
,
699 brw_writemask(dst
, WRITEMASK_Z
),
700 brw_swizzle1(arg0
, 0),
701 BRW_MATH_PRECISION_FULL
);
704 if (dst
.dw1
.bits
.writemask
& WRITEMASK_W
) {
705 /* result[3] = 1.0; */
706 brw_MOV(p
, brw_writemask(dst
, WRITEMASK_W
), brw_imm_f(1));
711 static void emit_log_noalias( struct brw_vs_compile
*c
,
713 struct brw_reg arg0
)
715 struct brw_compile
*p
= &c
->func
;
716 struct brw_reg tmp
= dst
;
717 struct brw_reg tmp_ud
= retype(tmp
, BRW_REGISTER_TYPE_UD
);
718 struct brw_reg arg0_ud
= retype(arg0
, BRW_REGISTER_TYPE_UD
);
719 GLboolean need_tmp
= (dst
.dw1
.bits
.writemask
!= 0xf ||
720 dst
.file
!= BRW_GENERAL_REGISTER_FILE
);
724 tmp_ud
= retype(tmp
, BRW_REGISTER_TYPE_UD
);
727 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
730 * These almost look likey they could be joined up, but not really
733 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
734 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
736 if (dst
.dw1
.bits
.writemask
& WRITEMASK_XZ
) {
738 brw_writemask(tmp_ud
, WRITEMASK_X
),
739 brw_swizzle1(arg0_ud
, 0),
740 brw_imm_ud((1U<<31)-1));
743 brw_writemask(tmp_ud
, WRITEMASK_X
),
748 brw_writemask(tmp
, WRITEMASK_X
),
749 retype(tmp_ud
, BRW_REGISTER_TYPE_D
), /* does it matter? */
753 if (dst
.dw1
.bits
.writemask
& WRITEMASK_YZ
) {
755 brw_writemask(tmp_ud
, WRITEMASK_Y
),
756 brw_swizzle1(arg0_ud
, 0),
757 brw_imm_ud((1<<23)-1));
760 brw_writemask(tmp_ud
, WRITEMASK_Y
),
762 brw_imm_ud(127<<23));
765 if (dst
.dw1
.bits
.writemask
& WRITEMASK_Z
) {
766 /* result[2] = result[0] + LOG2(result[1]); */
768 /* Why bother? The above is just a hint how to do this with a
769 * taylor series. Maybe we *should* use a taylor series as by
770 * the time all the above has been done it's almost certainly
771 * quicker than calling the mathbox, even with low precision.
774 * - result[0] + mathbox.LOG2(result[1])
775 * - mathbox.LOG2(arg0.x)
776 * - result[0] + inline_taylor_approx(result[1])
779 BRW_MATH_FUNCTION_LOG
,
780 brw_writemask(tmp
, WRITEMASK_Z
),
781 brw_swizzle1(tmp
, 1),
782 BRW_MATH_PRECISION_FULL
);
785 brw_writemask(tmp
, WRITEMASK_Z
),
786 brw_swizzle1(tmp
, 2),
787 brw_swizzle1(tmp
, 0));
790 if (dst
.dw1
.bits
.writemask
& WRITEMASK_W
) {
791 /* result[3] = 1.0; */
792 brw_MOV(p
, brw_writemask(tmp
, WRITEMASK_W
), brw_imm_f(1));
796 brw_MOV(p
, dst
, tmp
);
802 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
804 static void emit_dst_noalias( struct brw_vs_compile
*c
,
809 struct brw_compile
*p
= &c
->func
;
811 /* There must be a better way to do this:
813 if (dst
.dw1
.bits
.writemask
& WRITEMASK_X
)
814 brw_MOV(p
, brw_writemask(dst
, WRITEMASK_X
), brw_imm_f(1.0));
815 if (dst
.dw1
.bits
.writemask
& WRITEMASK_Y
)
816 brw_MUL(p
, brw_writemask(dst
, WRITEMASK_Y
), arg0
, arg1
);
817 if (dst
.dw1
.bits
.writemask
& WRITEMASK_Z
)
818 brw_MOV(p
, brw_writemask(dst
, WRITEMASK_Z
), arg0
);
819 if (dst
.dw1
.bits
.writemask
& WRITEMASK_W
)
820 brw_MOV(p
, brw_writemask(dst
, WRITEMASK_W
), arg1
);
824 static void emit_xpd( struct brw_compile
*p
,
829 brw_MUL(p
, brw_null_reg(), brw_swizzle(t
, 1,2,0,3), brw_swizzle(u
,2,0,1,3));
830 brw_MAC(p
, dst
, negate(brw_swizzle(t
, 2,0,1,3)), brw_swizzle(u
,1,2,0,3));
834 static void emit_lit_noalias( struct brw_vs_compile
*c
,
836 struct brw_reg arg0
)
838 struct brw_compile
*p
= &c
->func
;
839 struct brw_instruction
*if_insn
;
840 struct brw_reg tmp
= dst
;
841 GLboolean need_tmp
= (dst
.file
!= BRW_GENERAL_REGISTER_FILE
);
846 brw_MOV(p
, brw_writemask(dst
, WRITEMASK_YZ
), brw_imm_f(0));
847 brw_MOV(p
, brw_writemask(dst
, WRITEMASK_XW
), brw_imm_f(1));
849 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
850 * to get all channels active inside the IF. In the clipping code
851 * we run with NoMask, so it's not an option and we can use
852 * BRW_EXECUTE_1 for all comparisions.
854 brw_CMP(p
, brw_null_reg(), BRW_CONDITIONAL_G
, brw_swizzle1(arg0
,0), brw_imm_f(0));
855 if_insn
= brw_IF(p
, BRW_EXECUTE_8
);
857 brw_MOV(p
, brw_writemask(dst
, WRITEMASK_Y
), brw_swizzle1(arg0
,0));
859 brw_CMP(p
, brw_null_reg(), BRW_CONDITIONAL_G
, brw_swizzle1(arg0
,1), brw_imm_f(0));
860 brw_MOV(p
, brw_writemask(tmp
, WRITEMASK_Z
), brw_swizzle1(arg0
,1));
861 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
864 BRW_MATH_FUNCTION_POW
,
865 brw_writemask(dst
, WRITEMASK_Z
),
866 brw_swizzle1(tmp
, 2),
867 brw_swizzle1(arg0
, 3),
868 BRW_MATH_PRECISION_PARTIAL
);
871 brw_ENDIF(p
, if_insn
);
876 static void emit_lrp_noalias(struct brw_vs_compile
*c
,
882 struct brw_compile
*p
= &c
->func
;
884 brw_ADD(p
, dst
, negate(arg0
), brw_imm_f(1.0));
885 brw_MUL(p
, brw_null_reg(), dst
, arg2
);
886 brw_MAC(p
, dst
, arg0
, arg1
);
889 /** 3 or 4-component vector normalization */
890 static void emit_nrm( struct brw_vs_compile
*c
,
895 struct brw_compile
*p
= &c
->func
;
896 struct brw_reg tmp
= get_tmp(c
);
898 /* tmp = dot(arg0, arg0) */
900 brw_DP3(p
, tmp
, arg0
, arg0
);
902 brw_DP4(p
, tmp
, arg0
, arg0
);
904 /* tmp = 1 / sqrt(tmp) */
905 emit_math1(c
, BRW_MATH_FUNCTION_RSQ
, tmp
, tmp
, BRW_MATH_PRECISION_FULL
);
907 /* dst = arg0 * tmp */
908 brw_MUL(p
, dst
, arg0
, tmp
);
914 static struct brw_reg
915 get_constant(struct brw_vs_compile
*c
,
916 const struct prog_instruction
*inst
,
919 const struct prog_src_register
*src
= &inst
->SrcReg
[argIndex
];
920 struct brw_compile
*p
= &c
->func
;
921 struct brw_reg const_reg
= c
->current_const
[argIndex
].reg
;
923 assert(argIndex
< 3);
925 if (c
->current_const
[argIndex
].index
!= src
->Index
) {
926 /* Keep track of the last constant loaded in this slot, for reuse. */
927 c
->current_const
[argIndex
].index
= src
->Index
;
930 printf(" fetch const[%d] for arg %d into reg %d\n",
931 src
->Index
, argIndex
, c
->current_const
[argIndex
].reg
.nr
);
933 /* need to fetch the constant now */
935 const_reg
, /* writeback dest */
936 16 * src
->Index
, /* byte offset */
937 SURF_INDEX_VERT_CONST_BUFFER
/* binding table index */
941 /* replicate lower four floats into upper half (to get XYZWXYZW) */
942 const_reg
= stride(const_reg
, 0, 4, 0);
948 static struct brw_reg
949 get_reladdr_constant(struct brw_vs_compile
*c
,
950 const struct prog_instruction
*inst
,
953 const struct prog_src_register
*src
= &inst
->SrcReg
[argIndex
];
954 struct brw_compile
*p
= &c
->func
;
955 struct brw_reg const_reg
= c
->current_const
[argIndex
].reg
;
956 struct brw_reg addrReg
= c
->regs
[PROGRAM_ADDRESS
][0];
957 struct brw_reg byte_addr_reg
= retype(get_tmp(c
), BRW_REGISTER_TYPE_D
);
959 assert(argIndex
< 3);
961 /* Can't reuse a reladdr constant load. */
962 c
->current_const
[argIndex
].index
= -1;
965 printf(" fetch const[a0.x+%d] for arg %d into reg %d\n",
966 src
->Index
, argIndex
, c
->current_const
[argIndex
].reg
.nr
);
969 brw_MUL(p
, byte_addr_reg
, addrReg
, brw_imm_ud(16));
971 /* fetch the first vec4 */
972 brw_dp_READ_4_vs_relative(p
,
973 const_reg
, /* writeback dest */
974 byte_addr_reg
, /* address register */
975 16 * src
->Index
, /* byte offset */
976 SURF_INDEX_VERT_CONST_BUFFER
/* binding table index */
984 /* TODO: relative addressing!
986 static struct brw_reg
get_reg( struct brw_vs_compile
*c
,
987 gl_register_file file
,
991 case PROGRAM_TEMPORARY
:
994 assert(c
->regs
[file
][index
].nr
!= 0);
995 return c
->regs
[file
][index
];
996 case PROGRAM_STATE_VAR
:
997 case PROGRAM_CONSTANT
:
998 case PROGRAM_UNIFORM
:
999 assert(c
->regs
[PROGRAM_STATE_VAR
][index
].nr
!= 0);
1000 return c
->regs
[PROGRAM_STATE_VAR
][index
];
1001 case PROGRAM_ADDRESS
:
1003 return c
->regs
[file
][index
];
1005 case PROGRAM_UNDEFINED
: /* undef values */
1006 return brw_null_reg();
1008 case PROGRAM_LOCAL_PARAM
:
1009 case PROGRAM_ENV_PARAM
:
1010 case PROGRAM_WRITE_ONLY
:
1013 return brw_null_reg();
1019 * Indirect addressing: get reg[[arg] + offset].
1021 static struct brw_reg
deref( struct brw_vs_compile
*c
,
1026 struct brw_compile
*p
= &c
->func
;
1027 struct brw_reg tmp
= get_tmp(c
);
1028 struct brw_reg addr_reg
= c
->regs
[PROGRAM_ADDRESS
][0];
1029 struct brw_reg vp_address
= retype(vec1(addr_reg
), BRW_REGISTER_TYPE_D
);
1030 GLuint byte_offset
= arg
.nr
* 32 + arg
.subnr
+ offset
* reg_size
;
1031 struct brw_reg indirect
= brw_vec4_indirect(0,0);
1032 struct brw_reg acc
= retype(vec1(get_tmp(c
)), BRW_REGISTER_TYPE_UW
);
1034 /* Set the vertical stride on the register access so that the first
1035 * 4 components come from a0.0 and the second 4 from a0.1.
1037 indirect
.vstride
= BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL
;
1040 brw_push_insn_state(p
);
1041 brw_set_access_mode(p
, BRW_ALIGN_1
);
1043 brw_MUL(p
, acc
, vp_address
, brw_imm_uw(reg_size
));
1044 brw_ADD(p
, brw_address_reg(0), acc
, brw_imm_uw(byte_offset
));
1046 brw_MUL(p
, acc
, suboffset(vp_address
, 4), brw_imm_uw(reg_size
));
1047 brw_ADD(p
, brw_address_reg(1), acc
, brw_imm_uw(byte_offset
));
1049 brw_MOV(p
, tmp
, indirect
);
1051 brw_pop_insn_state(p
);
1054 /* NOTE: tmp not released */
1059 move_to_reladdr_dst(struct brw_vs_compile
*c
,
1060 const struct prog_instruction
*inst
,
1063 struct brw_compile
*p
= &c
->func
;
1065 struct brw_reg addr_reg
= c
->regs
[PROGRAM_ADDRESS
][0];
1066 struct brw_reg vp_address
= retype(vec1(addr_reg
), BRW_REGISTER_TYPE_D
);
1067 struct brw_reg base
= c
->regs
[inst
->DstReg
.File
][inst
->DstReg
.Index
];
1068 GLuint byte_offset
= base
.nr
* 32 + base
.subnr
;
1069 struct brw_reg indirect
= brw_vec4_indirect(0,0);
1070 struct brw_reg acc
= retype(vec1(get_tmp(c
)), BRW_REGISTER_TYPE_UW
);
1072 /* Because destination register indirect addressing can only use
1073 * one index, we'll write each vertex's vec4 value separately.
1075 val
.width
= BRW_WIDTH_4
;
1076 val
.vstride
= BRW_VERTICAL_STRIDE_4
;
1078 brw_push_insn_state(p
);
1079 brw_set_access_mode(p
, BRW_ALIGN_1
);
1081 brw_MUL(p
, acc
, vp_address
, brw_imm_uw(reg_size
));
1082 brw_ADD(p
, brw_address_reg(0), acc
, brw_imm_uw(byte_offset
));
1083 brw_MOV(p
, indirect
, val
);
1085 brw_MUL(p
, acc
, suboffset(vp_address
, 4), brw_imm_uw(reg_size
));
1086 brw_ADD(p
, brw_address_reg(0), acc
,
1087 brw_imm_uw(byte_offset
+ reg_size
/ 2));
1088 brw_MOV(p
, indirect
, suboffset(val
, 4));
1090 brw_pop_insn_state(p
);
1094 * Get brw reg corresponding to the instruction's [argIndex] src reg.
1095 * TODO: relative addressing!
1097 static struct brw_reg
1098 get_src_reg( struct brw_vs_compile
*c
,
1099 const struct prog_instruction
*inst
,
1102 const GLuint file
= inst
->SrcReg
[argIndex
].File
;
1103 const GLint index
= inst
->SrcReg
[argIndex
].Index
;
1104 const GLboolean relAddr
= inst
->SrcReg
[argIndex
].RelAddr
;
1106 if (brw_vs_arg_can_be_immediate(inst
->Opcode
, argIndex
)) {
1107 const struct prog_src_register
*src
= &inst
->SrcReg
[argIndex
];
1109 if (src
->Swizzle
== MAKE_SWIZZLE4(SWIZZLE_ZERO
,
1113 return brw_imm_f(0.0f
);
1114 } else if (src
->Swizzle
== MAKE_SWIZZLE4(SWIZZLE_ONE
,
1119 return brw_imm_f(-1.0F
);
1121 return brw_imm_f(1.0F
);
1122 } else if (src
->File
== PROGRAM_CONSTANT
) {
1123 const struct gl_program_parameter_list
*params
;
1127 switch (src
->Swizzle
) {
1142 if (component
>= 0) {
1143 params
= c
->vp
->program
.Base
.Parameters
;
1144 f
= params
->ParameterValues
[src
->Index
][component
];
1150 return brw_imm_f(f
);
1156 case PROGRAM_TEMPORARY
:
1158 case PROGRAM_OUTPUT
:
1160 return deref(c
, c
->regs
[file
][0], index
, 32);
1163 assert(c
->regs
[file
][index
].nr
!= 0);
1164 return c
->regs
[file
][index
];
1167 case PROGRAM_STATE_VAR
:
1168 case PROGRAM_CONSTANT
:
1169 case PROGRAM_UNIFORM
:
1170 case PROGRAM_ENV_PARAM
:
1171 case PROGRAM_LOCAL_PARAM
:
1172 if (c
->vp
->use_const_buffer
) {
1173 if (!relAddr
&& c
->constant_map
[index
] != -1) {
1174 assert(c
->regs
[PROGRAM_STATE_VAR
][c
->constant_map
[index
]].nr
!= 0);
1175 return c
->regs
[PROGRAM_STATE_VAR
][c
->constant_map
[index
]];
1177 return get_reladdr_constant(c
, inst
, argIndex
);
1179 return get_constant(c
, inst
, argIndex
);
1182 return deref(c
, c
->regs
[PROGRAM_STATE_VAR
][0], index
, 16);
1185 assert(c
->regs
[PROGRAM_STATE_VAR
][index
].nr
!= 0);
1186 return c
->regs
[PROGRAM_STATE_VAR
][index
];
1188 case PROGRAM_ADDRESS
:
1190 return c
->regs
[file
][index
];
1192 case PROGRAM_UNDEFINED
:
1193 /* this is a normal case since we loop over all three src args */
1194 return brw_null_reg();
1196 case PROGRAM_WRITE_ONLY
:
1199 return brw_null_reg();
1204 * Return the brw reg for the given instruction's src argument.
1205 * Will return mangled results for SWZ op. The emit_swz() function
1206 * ignores this result and recalculates taking extended swizzles into
1209 static struct brw_reg
get_arg( struct brw_vs_compile
*c
,
1210 const struct prog_instruction
*inst
,
1213 const struct prog_src_register
*src
= &inst
->SrcReg
[argIndex
];
1216 if (src
->File
== PROGRAM_UNDEFINED
)
1217 return brw_null_reg();
1219 reg
= get_src_reg(c
, inst
, argIndex
);
1221 /* Convert 3-bit swizzle to 2-bit.
1223 if (reg
.file
!= BRW_IMMEDIATE_VALUE
) {
1224 reg
.dw1
.bits
.swizzle
= BRW_SWIZZLE4(GET_SWZ(src
->Swizzle
, 0),
1225 GET_SWZ(src
->Swizzle
, 1),
1226 GET_SWZ(src
->Swizzle
, 2),
1227 GET_SWZ(src
->Swizzle
, 3));
1230 /* Note this is ok for non-swizzle instructions:
1232 reg
.negate
= src
->Negate
? 1 : 0;
1239 * Get brw register for the given program dest register.
1241 static struct brw_reg
get_dst( struct brw_vs_compile
*c
,
1242 struct prog_dst_register dst
)
1247 case PROGRAM_TEMPORARY
:
1248 case PROGRAM_OUTPUT
:
1249 /* register-indirect addressing is only 1x1, not VxH, for
1250 * destination regs. So, for RelAddr we'll return a temporary
1251 * for the dest and do a move of the result to the RelAddr
1252 * register after the instruction emit.
1257 assert(c
->regs
[dst
.File
][dst
.Index
].nr
!= 0);
1258 reg
= c
->regs
[dst
.File
][dst
.Index
];
1261 case PROGRAM_ADDRESS
:
1262 assert(dst
.Index
== 0);
1263 reg
= c
->regs
[dst
.File
][dst
.Index
];
1265 case PROGRAM_UNDEFINED
:
1266 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1267 reg
= brw_null_reg();
1271 reg
= brw_null_reg();
1274 assert(reg
.type
!= BRW_IMMEDIATE_VALUE
);
1275 reg
.dw1
.bits
.writemask
= dst
.WriteMask
;
1281 static void emit_swz( struct brw_vs_compile
*c
,
1283 const struct prog_instruction
*inst
)
1285 const GLuint argIndex
= 0;
1286 const struct prog_src_register src
= inst
->SrcReg
[argIndex
];
1287 struct brw_compile
*p
= &c
->func
;
1288 GLuint zeros_mask
= 0;
1289 GLuint ones_mask
= 0;
1290 GLuint src_mask
= 0;
1292 GLboolean need_tmp
= (src
.Negate
&&
1293 dst
.file
!= BRW_GENERAL_REGISTER_FILE
);
1294 struct brw_reg tmp
= dst
;
1300 for (i
= 0; i
< 4; i
++) {
1301 if (dst
.dw1
.bits
.writemask
& (1<<i
)) {
1302 GLubyte s
= GET_SWZ(src
.Swizzle
, i
);
1321 /* Do src first, in case dst aliases src:
1324 struct brw_reg arg0
;
1326 arg0
= get_src_reg(c
, inst
, argIndex
);
1328 arg0
= brw_swizzle(arg0
,
1329 src_swz
[0], src_swz
[1],
1330 src_swz
[2], src_swz
[3]);
1332 brw_MOV(p
, brw_writemask(tmp
, src_mask
), arg0
);
1336 brw_MOV(p
, brw_writemask(tmp
, zeros_mask
), brw_imm_f(0));
1339 brw_MOV(p
, brw_writemask(tmp
, ones_mask
), brw_imm_f(1));
1342 brw_MOV(p
, brw_writemask(tmp
, src
.Negate
), negate(tmp
));
1345 brw_MOV(p
, dst
, tmp
);
1346 release_tmp(c
, tmp
);
1352 * Post-vertex-program processing. Send the results to the URB.
1354 static void emit_vertex_write( struct brw_vs_compile
*c
)
1356 struct brw_compile
*p
= &c
->func
;
1357 struct brw_context
*brw
= p
->brw
;
1358 struct intel_context
*intel
= &brw
->intel
;
1359 struct brw_reg pos
= c
->regs
[PROGRAM_OUTPUT
][VERT_RESULT_HPOS
];
1362 GLuint len_vertex_header
= 2;
1365 if (c
->key
.copy_edgeflag
) {
1367 get_reg(c
, PROGRAM_OUTPUT
, VERT_RESULT_EDGE
),
1368 get_reg(c
, PROGRAM_INPUT
, VERT_ATTRIB_EDGEFLAG
));
1371 if (intel
->gen
< 6) {
1372 /* Build ndc coords */
1374 /* ndc = 1.0 / pos.w */
1375 emit_math1(c
, BRW_MATH_FUNCTION_INV
, ndc
, brw_swizzle1(pos
, 3), BRW_MATH_PRECISION_FULL
);
1376 /* ndc.xyz = pos * ndc */
1377 brw_MUL(p
, brw_writemask(ndc
, WRITEMASK_XYZ
), pos
, ndc
);
1380 /* Update the header for point size, user clipping flags, and -ve rhw
1383 if ((c
->prog_data
.outputs_written
& BITFIELD64_BIT(VERT_RESULT_PSIZ
)) ||
1384 c
->key
.nr_userclip
|| brw
->has_negative_rhw_bug
)
1386 struct brw_reg header1
= retype(get_tmp(c
), BRW_REGISTER_TYPE_UD
);
1389 brw_MOV(p
, header1
, brw_imm_ud(0));
1391 brw_set_access_mode(p
, BRW_ALIGN_16
);
1393 if (c
->prog_data
.outputs_written
& BITFIELD64_BIT(VERT_RESULT_PSIZ
)) {
1394 struct brw_reg psiz
= c
->regs
[PROGRAM_OUTPUT
][VERT_RESULT_PSIZ
];
1395 if (intel
->gen
< 6) {
1396 brw_MUL(p
, brw_writemask(header1
, WRITEMASK_W
), brw_swizzle1(psiz
, 0), brw_imm_f(1<<11));
1397 brw_AND(p
, brw_writemask(header1
, WRITEMASK_W
), header1
, brw_imm_ud(0x7ff<<8));
1399 brw_MOV(p
, brw_writemask(header1
, WRITEMASK_W
), brw_swizzle1(psiz
, 0));
1402 for (i
= 0; i
< c
->key
.nr_userclip
; i
++) {
1403 brw_set_conditionalmod(p
, BRW_CONDITIONAL_L
);
1404 brw_DP4(p
, brw_null_reg(), pos
, c
->userplane
[i
]);
1405 brw_OR(p
, brw_writemask(header1
, WRITEMASK_W
), header1
, brw_imm_ud(1<<i
));
1406 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
1409 /* i965 clipping workaround:
1410 * 1) Test for -ve rhw
1412 * set ndc = (0,0,0,0)
1415 * Later, clipping will detect ucp[6] and ensure the primitive is
1416 * clipped against all fixed planes.
1418 if (brw
->has_negative_rhw_bug
) {
1420 vec8(brw_null_reg()),
1422 brw_swizzle1(ndc
, 3),
1425 brw_OR(p
, brw_writemask(header1
, WRITEMASK_W
), header1
, brw_imm_ud(1<<6));
1426 brw_MOV(p
, ndc
, brw_imm_f(0));
1427 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
1430 brw_set_access_mode(p
, BRW_ALIGN_1
); /* why? */
1431 brw_MOV(p
, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD
), header1
);
1432 brw_set_access_mode(p
, BRW_ALIGN_16
);
1434 release_tmp(c
, header1
);
1437 brw_MOV(p
, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD
), brw_imm_ud(0));
1440 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1441 * of zeros followed by two sets of NDC coordinates:
1443 brw_set_access_mode(p
, BRW_ALIGN_1
);
1444 brw_set_acc_write_control(p
, 0);
1446 /* The VUE layout is documented in Volume 2a. */
1447 if (intel
->gen
>= 6) {
1448 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
1449 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1450 * dword 4-7 (m2) is the 4D space position
1451 * dword 8-15 (m3,m4) of the vertex header is the user clip distance if
1452 * enabled. We don't use it, so skip it.
1453 * m3 is the first vertex element data we fill, which is the vertex
1456 brw_MOV(p
, brw_message_reg(2), pos
);
1457 len_vertex_header
= 1;
1458 } else if (intel
->gen
== 5) {
1459 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
1460 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1461 * dword 4-7 (m2) is the ndc position (set above)
1462 * dword 8-11 (m3) of the vertex header is the 4D space position
1463 * dword 12-19 (m4,m5) of the vertex header is the user clip distance.
1464 * m6 is a pad so that the vertex element data is aligned
1465 * m7 is the first vertex data we fill, which is the vertex position.
1467 brw_MOV(p
, brw_message_reg(2), ndc
);
1468 brw_MOV(p
, brw_message_reg(3), pos
);
1469 brw_MOV(p
, brw_message_reg(7), pos
);
1470 len_vertex_header
= 6;
1472 /* There are 8 dwords in VUE header pre-Ironlake:
1473 * dword 0-3 (m1) is indices, point width, clip flags.
1474 * dword 4-7 (m2) is ndc position (set above)
1476 * dword 8-11 (m3) is the first vertex data, which we always have be the
1479 brw_MOV(p
, brw_message_reg(2), ndc
);
1480 brw_MOV(p
, brw_message_reg(3), pos
);
1481 len_vertex_header
= 2;
1484 /* Move variable-addressed, non-overflow outputs to their MRFs. */
1485 next_mrf
= 2 + len_vertex_header
;
1486 for (i
= 0; i
< VERT_RESULT_MAX
; i
++) {
1487 if (c
->first_overflow_output
> 0 && i
>= c
->first_overflow_output
)
1489 if (!(c
->prog_data
.outputs_written
& BITFIELD64_BIT(i
)))
1492 if (i
>= VERT_RESULT_TEX0
&&
1493 c
->regs
[PROGRAM_OUTPUT
][i
].file
== BRW_GENERAL_REGISTER_FILE
) {
1494 brw_MOV(p
, brw_message_reg(next_mrf
), c
->regs
[PROGRAM_OUTPUT
][i
]);
1496 } else if (c
->regs
[PROGRAM_OUTPUT
][i
].file
== BRW_MESSAGE_REGISTER_FILE
) {
1497 next_mrf
= c
->regs
[PROGRAM_OUTPUT
][i
].nr
+ 1;
1501 eot
= (c
->first_overflow_output
== 0);
1504 brw_null_reg(), /* dest */
1505 0, /* starting mrf reg nr */
1509 MIN2(c
->nr_outputs
+ 1 + len_vertex_header
, (BRW_MAX_MRF
-1)), /* msg len */
1510 0, /* response len */
1512 eot
, /* writes complete */
1513 0, /* urb destination offset */
1514 BRW_URB_SWIZZLE_INTERLEAVE
);
1516 if (c
->first_overflow_output
> 0) {
1517 /* Not all of the vertex outputs/results fit into the MRF.
1518 * Move the overflowed attributes from the GRF to the MRF and
1519 * issue another brw_urb_WRITE().
1522 for (i
= c
->first_overflow_output
; i
< VERT_RESULT_MAX
; i
++) {
1523 if (c
->prog_data
.outputs_written
& BITFIELD64_BIT(i
)) {
1524 /* move from GRF to MRF */
1525 brw_MOV(p
, brw_message_reg(mrf
), c
->regs
[PROGRAM_OUTPUT
][i
]);
1531 brw_null_reg(), /* dest */
1532 0, /* starting mrf reg nr */
1537 0, /* response len */
1539 1, /* writes complete */
1540 14 / 2, /* urb destination offset */
1541 BRW_URB_SWIZZLE_INTERLEAVE
);
1546 accumulator_contains(struct brw_vs_compile
*c
, struct brw_reg val
)
1548 struct brw_compile
*p
= &c
->func
;
1549 struct brw_instruction
*prev_insn
= &p
->store
[p
->nr_insn
- 1];
1551 if (p
->nr_insn
== 0)
1554 if (val
.address_mode
!= BRW_ADDRESS_DIRECT
)
1557 switch (prev_insn
->header
.opcode
) {
1558 case BRW_OPCODE_MOV
:
1559 case BRW_OPCODE_MAC
:
1560 case BRW_OPCODE_MUL
:
1561 if (prev_insn
->header
.access_mode
== BRW_ALIGN_16
&&
1562 prev_insn
->header
.execution_size
== val
.width
&&
1563 prev_insn
->bits1
.da1
.dest_reg_file
== val
.file
&&
1564 prev_insn
->bits1
.da1
.dest_reg_type
== val
.type
&&
1565 prev_insn
->bits1
.da1
.dest_address_mode
== val
.address_mode
&&
1566 prev_insn
->bits1
.da1
.dest_reg_nr
== val
.nr
&&
1567 prev_insn
->bits1
.da16
.dest_subreg_nr
== val
.subnr
/ 16 &&
1568 prev_insn
->bits1
.da16
.dest_writemask
== 0xf)
1578 get_predicate(const struct prog_instruction
*inst
)
1580 if (inst
->DstReg
.CondMask
== COND_TR
)
1581 return BRW_PREDICATE_NONE
;
1583 /* All of GLSL only produces predicates for COND_NE and one channel per
1584 * vector. Fail badly if someone starts doing something else, as it might
1585 * mean infinite looping or something.
1587 * We'd like to support all the condition codes, but our hardware doesn't
1588 * quite match the Mesa IR, which is modeled after the NV extensions. For
1589 * those, the instruction may update the condition codes or not, then any
1590 * later instruction may use one of those condition codes. For gen4, the
1591 * instruction may update the flags register based on one of the condition
1592 * codes output by the instruction, and then further instructions may
1593 * predicate on that. We can probably support this, but it won't
1594 * necessarily be easy.
1596 assert(inst
->DstReg
.CondMask
== COND_NE
);
1598 switch (inst
->DstReg
.CondSwizzle
) {
1600 return BRW_PREDICATE_ALIGN16_REPLICATE_X
;
1602 return BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
1604 return BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
1606 return BRW_PREDICATE_ALIGN16_REPLICATE_W
;
1608 _mesa_problem(NULL
, "Unexpected predicate: 0x%08x\n",
1609 inst
->DstReg
.CondMask
);
1610 return BRW_PREDICATE_NORMAL
;
1614 /* Emit the vertex program instructions here.
1616 void brw_vs_emit(struct brw_vs_compile
*c
)
1618 #define MAX_IF_DEPTH 32
1619 #define MAX_LOOP_DEPTH 32
1620 struct brw_compile
*p
= &c
->func
;
1621 struct brw_context
*brw
= p
->brw
;
1622 struct intel_context
*intel
= &brw
->intel
;
1623 const GLuint nr_insns
= c
->vp
->program
.Base
.NumInstructions
;
1624 GLuint insn
, if_depth
= 0, loop_depth
= 0;
1625 struct brw_instruction
*if_inst
[MAX_IF_DEPTH
], *loop_inst
[MAX_LOOP_DEPTH
] = { 0 };
1626 int if_depth_in_loop
[MAX_LOOP_DEPTH
];
1627 const struct brw_indirect stack_index
= brw_indirect(0, 0);
1631 if (INTEL_DEBUG
& DEBUG_VS
) {
1632 printf("vs-mesa:\n");
1633 _mesa_fprint_program_opt(stdout
, &c
->vp
->program
.Base
, PROG_PRINT_DEBUG
,
1638 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
1639 brw_set_access_mode(p
, BRW_ALIGN_16
);
1640 if_depth_in_loop
[loop_depth
] = 0;
1642 brw_set_acc_write_control(p
, 1);
1644 for (insn
= 0; insn
< nr_insns
; insn
++) {
1646 struct prog_instruction
*inst
= &c
->vp
->program
.Base
.Instructions
[insn
];
1648 /* Message registers can't be read, so copy the output into GRF
1649 * register if they are used in source registers
1651 for (i
= 0; i
< 3; i
++) {
1652 struct prog_src_register
*src
= &inst
->SrcReg
[i
];
1653 GLuint index
= src
->Index
;
1654 GLuint file
= src
->File
;
1655 if (file
== PROGRAM_OUTPUT
&& index
!= VERT_RESULT_HPOS
)
1656 c
->output_regs
[index
].used_in_src
= GL_TRUE
;
1659 switch (inst
->Opcode
) {
1662 c
->needs_stack
= GL_TRUE
;
1669 /* Static register allocation
1671 brw_vs_alloc_regs(c
);
1674 brw_MOV(p
, get_addr_reg(stack_index
), brw_address(c
->stack
));
1676 for (insn
= 0; insn
< nr_insns
; insn
++) {
1678 const struct prog_instruction
*inst
= &c
->vp
->program
.Base
.Instructions
[insn
];
1679 struct brw_reg args
[3], dst
;
1683 printf("%d: ", insn
);
1684 _mesa_print_instruction(inst
);
1687 /* Get argument regs. SWZ is special and does this itself.
1689 if (inst
->Opcode
!= OPCODE_SWZ
)
1690 for (i
= 0; i
< 3; i
++) {
1691 const struct prog_src_register
*src
= &inst
->SrcReg
[i
];
1694 if (file
== PROGRAM_OUTPUT
&& c
->output_regs
[index
].used_in_src
)
1695 args
[i
] = c
->output_regs
[index
].reg
;
1697 args
[i
] = get_arg(c
, inst
, i
);
1700 /* Get dest regs. Note that it is possible for a reg to be both
1701 * dst and arg, given the static allocation of registers. So
1702 * care needs to be taken emitting multi-operation instructions.
1704 index
= inst
->DstReg
.Index
;
1705 file
= inst
->DstReg
.File
;
1706 if (file
== PROGRAM_OUTPUT
&& c
->output_regs
[index
].used_in_src
)
1707 dst
= c
->output_regs
[index
].reg
;
1709 dst
= get_dst(c
, inst
->DstReg
);
1711 if (inst
->SaturateMode
!= SATURATE_OFF
) {
1712 _mesa_problem(NULL
, "Unsupported saturate %d in vertex shader",
1713 inst
->SaturateMode
);
1716 switch (inst
->Opcode
) {
1718 brw_MOV(p
, dst
, brw_abs(args
[0]));
1721 brw_ADD(p
, dst
, args
[0], args
[1]);
1724 emit_math1(c
, BRW_MATH_FUNCTION_COS
, dst
, args
[0], BRW_MATH_PRECISION_FULL
);
1727 brw_DP2(p
, dst
, args
[0], args
[1]);
1730 brw_DP3(p
, dst
, args
[0], args
[1]);
1733 brw_DP4(p
, dst
, args
[0], args
[1]);
1736 brw_DPH(p
, dst
, args
[0], args
[1]);
1739 emit_nrm(c
, dst
, args
[0], 3);
1742 emit_nrm(c
, dst
, args
[0], 4);
1745 unalias2(c
, dst
, args
[0], args
[1], emit_dst_noalias
);
1748 unalias1(c
, dst
, args
[0], emit_exp_noalias
);
1751 emit_math1(c
, BRW_MATH_FUNCTION_EXP
, dst
, args
[0], BRW_MATH_PRECISION_FULL
);
1754 brw_RNDD(p
, dst
, args
[0]);
1757 brw_RNDD(p
, dst
, args
[0]);
1760 brw_FRC(p
, dst
, args
[0]);
1763 unalias1(c
, dst
, args
[0], emit_log_noalias
);
1766 emit_math1(c
, BRW_MATH_FUNCTION_LOG
, dst
, args
[0], BRW_MATH_PRECISION_FULL
);
1769 unalias1(c
, dst
, args
[0], emit_lit_noalias
);
1772 unalias3(c
, dst
, args
[0], args
[1], args
[2], emit_lrp_noalias
);
1775 if (!accumulator_contains(c
, args
[2]))
1776 brw_MOV(p
, brw_acc_reg(), args
[2]);
1777 brw_MAC(p
, dst
, args
[0], args
[1]);
1780 emit_cmp(p
, dst
, args
[0], args
[1], args
[2]);
1783 emit_max(p
, dst
, args
[0], args
[1]);
1786 emit_min(p
, dst
, args
[0], args
[1]);
1789 brw_MOV(p
, dst
, args
[0]);
1792 brw_MUL(p
, dst
, args
[0], args
[1]);
1795 emit_math2(c
, BRW_MATH_FUNCTION_POW
, dst
, args
[0], args
[1], BRW_MATH_PRECISION_FULL
);
1798 emit_math1(c
, BRW_MATH_FUNCTION_INV
, dst
, args
[0], BRW_MATH_PRECISION_FULL
);
1801 emit_math1(c
, BRW_MATH_FUNCTION_RSQ
, dst
, args
[0], BRW_MATH_PRECISION_FULL
);
1805 unalias2(c
, dst
, args
[0], args
[1], emit_seq
);
1808 emit_math1(c
, BRW_MATH_FUNCTION_SIN
, dst
, args
[0], BRW_MATH_PRECISION_FULL
);
1811 unalias2(c
, dst
, args
[0], args
[1], emit_sne
);
1814 unalias2(c
, dst
, args
[0], args
[1], emit_sge
);
1817 unalias2(c
, dst
, args
[0], args
[1], emit_sgt
);
1820 unalias2(c
, dst
, args
[0], args
[1], emit_slt
);
1823 unalias2(c
, dst
, args
[0], args
[1], emit_sle
);
1826 unalias1(c
, dst
, args
[0], emit_sign
);
1829 brw_ADD(p
, dst
, args
[0], negate(args
[1]));
1832 /* The args[0] value can't be used here as it won't have
1833 * correctly encoded the full swizzle:
1835 emit_swz(c
, dst
, inst
);
1838 /* round toward zero */
1839 brw_RNDZ(p
, dst
, args
[0]);
1842 emit_xpd(p
, dst
, args
[0], args
[1]);
1845 assert(if_depth
< MAX_IF_DEPTH
);
1846 if_inst
[if_depth
] = brw_IF(p
, BRW_EXECUTE_8
);
1847 /* Note that brw_IF smashes the predicate_control field. */
1848 if_inst
[if_depth
]->header
.predicate_control
= get_predicate(inst
);
1849 if_depth_in_loop
[loop_depth
]++;
1853 clear_current_const(c
);
1854 assert(if_depth
> 0);
1855 if_inst
[if_depth
-1] = brw_ELSE(p
, if_inst
[if_depth
-1]);
1858 clear_current_const(c
);
1859 assert(if_depth
> 0);
1860 brw_ENDIF(p
, if_inst
[--if_depth
]);
1861 if_depth_in_loop
[loop_depth
]--;
1863 case OPCODE_BGNLOOP
:
1864 clear_current_const(c
);
1865 loop_inst
[loop_depth
++] = brw_DO(p
, BRW_EXECUTE_8
);
1866 if_depth_in_loop
[loop_depth
] = 0;
1869 brw_set_predicate_control(p
, get_predicate(inst
));
1870 brw_BREAK(p
, if_depth_in_loop
[loop_depth
]);
1871 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
1874 brw_set_predicate_control(p
, get_predicate(inst
));
1875 brw_CONT(p
, if_depth_in_loop
[loop_depth
]);
1876 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
1878 case OPCODE_ENDLOOP
:
1880 clear_current_const(c
);
1881 struct brw_instruction
*inst0
, *inst1
;
1886 if (intel
->gen
== 5)
1889 inst0
= inst1
= brw_WHILE(p
, loop_inst
[loop_depth
]);
1890 /* patch all the BREAK/CONT instructions from last BEGINLOOP */
1891 while (inst0
> loop_inst
[loop_depth
]) {
1893 if (inst0
->header
.opcode
== BRW_OPCODE_BREAK
&&
1894 inst0
->bits3
.if_else
.jump_count
== 0) {
1895 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
+ 1);
1897 else if (inst0
->header
.opcode
== BRW_OPCODE_CONTINUE
&&
1898 inst0
->bits3
.if_else
.jump_count
== 0) {
1899 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
);
1905 brw_set_predicate_control(p
, get_predicate(inst
));
1906 brw_ADD(p
, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1907 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
1910 brw_set_access_mode(p
, BRW_ALIGN_1
);
1911 brw_ADD(p
, deref_1d(stack_index
, 0), brw_ip_reg(), brw_imm_d(3*16));
1912 brw_set_access_mode(p
, BRW_ALIGN_16
);
1913 brw_ADD(p
, get_addr_reg(stack_index
),
1914 get_addr_reg(stack_index
), brw_imm_d(4));
1915 brw_save_call(p
, inst
->Comment
, p
->nr_insn
);
1916 brw_ADD(p
, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
1919 brw_ADD(p
, get_addr_reg(stack_index
),
1920 get_addr_reg(stack_index
), brw_imm_d(-4));
1921 brw_set_access_mode(p
, BRW_ALIGN_1
);
1922 brw_MOV(p
, brw_ip_reg(), deref_1d(stack_index
, 0));
1923 brw_set_access_mode(p
, BRW_ALIGN_16
);
1926 emit_vertex_write(c
);
1932 brw_save_label(p
, inst
->Comment
, p
->nr_insn
);
1938 _mesa_problem(NULL
, "Unsupported opcode %i (%s) in vertex shader",
1939 inst
->Opcode
, inst
->Opcode
< MAX_OPCODE
?
1940 _mesa_opcode_string(inst
->Opcode
) :
1944 /* Set the predication update on the last instruction of the native
1945 * instruction sequence.
1947 * This would be problematic if it was set on a math instruction,
1948 * but that shouldn't be the case with the current GLSL compiler.
1950 if (inst
->CondUpdate
) {
1951 struct brw_instruction
*hw_insn
= &p
->store
[p
->nr_insn
- 1];
1953 assert(hw_insn
->header
.destreg__conditionalmod
== 0);
1954 hw_insn
->header
.destreg__conditionalmod
= BRW_CONDITIONAL_NZ
;
1957 if ((inst
->DstReg
.File
== PROGRAM_OUTPUT
)
1958 && (inst
->DstReg
.Index
!= VERT_RESULT_HPOS
)
1959 && c
->output_regs
[inst
->DstReg
.Index
].used_in_src
) {
1960 brw_MOV(p
, get_dst(c
, inst
->DstReg
), dst
);
1963 /* Result color clamping.
1965 * When destination register is an output register and
1966 * it's primary/secondary front/back color, we have to clamp
1967 * the result to [0,1]. This is done by enabling the
1968 * saturation bit for the last instruction.
1970 * We don't use brw_set_saturate() as it modifies
1971 * p->current->header.saturate, which affects all the subsequent
1972 * instructions. Instead, we directly modify the header
1973 * of the last (already stored) instruction.
1975 if (inst
->DstReg
.File
== PROGRAM_OUTPUT
) {
1976 if ((inst
->DstReg
.Index
== VERT_RESULT_COL0
)
1977 || (inst
->DstReg
.Index
== VERT_RESULT_COL1
)
1978 || (inst
->DstReg
.Index
== VERT_RESULT_BFC0
)
1979 || (inst
->DstReg
.Index
== VERT_RESULT_BFC1
)) {
1980 p
->store
[p
->nr_insn
-1].header
.saturate
= 1;
1984 if (inst
->DstReg
.RelAddr
) {
1985 assert(inst
->DstReg
.File
== PROGRAM_TEMPORARY
||
1986 inst
->DstReg
.File
== PROGRAM_OUTPUT
);
1987 move_to_reladdr_dst(c
, inst
, dst
);
1993 brw_resolve_cals(p
);
1997 if (INTEL_DEBUG
& DEBUG_VS
) {
2000 printf("vs-native:\n");
2001 for (i
= 0; i
< p
->nr_insn
; i
++)
2002 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);