2 * Copyright © 2019 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
23 #include <gtest/gtest.h>
25 #include "nir_builder.h"
27 class comparison_pre_test
: public ::testing::Test
{
31 static const nir_shader_compiler_options options
= { };
32 nir_builder_init_simple_shader(&bld
, NULL
, MESA_SHADER_VERTEX
, &options
);
34 v1
= nir_imm_vec4(&bld
, -2.0, -1.0, 1.0, 2.0);
35 v2
= nir_imm_vec4(&bld
, 2.0, 1.0, -1.0, -2.0);
36 v3
= nir_imm_vec4(&bld
, 3.0, 4.0, 5.0, 6.0);
39 ~comparison_pre_test()
41 ralloc_free(bld
.shader
);
44 struct nir_builder bld
;
50 const uint8_t xxxx
[4] = { 0, 0, 0, 0 };
51 const uint8_t wwww
[4] = { 3, 3, 3, 3 };
54 TEST_F(comparison_pre_test
, a_lt_b_vs_neg_a_plus_b
)
58 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
59 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
60 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
61 * vec1 32 ssa_3 = load_const ( 1.0)
62 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
63 * vec1 32 ssa_5 = mov ssa_4.x
64 * vec1 1 ssa_6 = flt ssa_5, ssa_3
67 * vec1 32 ssa_7 = fneg ssa_5
68 * vec1 32 ssa_8 = fadd ssa_7, ssa_3
74 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
75 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
76 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
77 * vec1 32 ssa_3 = load_const ( 1.0)
78 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
79 * vec1 32 ssa_5 = mov ssa_4.x
80 * vec1 32 ssa_9 = fneg ssa_5
81 * vec1 32 ssa_10 = fadd ssa_3, ssa_9
82 * vec1 32 ssa_11 = load_const (0.0)
83 * vec1 1 ssa_12 = flt ssa_11, ssa_10
84 * vec1 32 ssa_13 = mov ssa_10
85 * vec1 1 ssa_14 = mov ssa_12
88 * vec1 32 ssa_7 = fneg ssa_5
92 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
93 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
95 nir_ssa_def
*flt
= nir_flt(&bld
, a
, one
);
97 nir_if
*nif
= nir_push_if(&bld
, flt
);
99 nir_fadd(&bld
, nir_fneg(&bld
, a
), one
);
101 nir_pop_if(&bld
, nif
);
103 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
106 TEST_F(comparison_pre_test
, a_lt_b_vs_a_minus_b
)
110 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
111 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
112 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
113 * vec1 32 ssa_3 = load_const ( 1.0)
114 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
115 * vec1 32 ssa_5 = mov ssa_4.x
116 * vec1 1 ssa_6 = flt ssa_3, ssa_5
119 * vec1 32 ssa_7 = fneg ssa_5
120 * vec1 32 ssa_8 = fadd ssa_3, ssa_7
126 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
127 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
128 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
129 * vec1 32 ssa_3 = load_const ( 1.0)
130 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
131 * vec1 32 ssa_5 = mov ssa_4.x
132 * vec1 32 ssa_9 = fneg ssa_5
133 * vec1 32 ssa_10 = fadd ssa_3, ssa_9
134 * vec1 32 ssa_11 = load_const (0.0)
135 * vec1 1 ssa_12 = flt ssa_10, ssa_11
136 * vec1 32 ssa_13 = mov ssa_10
137 * vec1 1 ssa_14 = mov ssa_12
140 * vec1 32 ssa_7 = fneg ssa_5
144 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
145 nir_ssa_def
*b
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
147 nir_ssa_def
*flt
= nir_flt(&bld
, one
, b
);
149 nir_if
*nif
= nir_push_if(&bld
, flt
);
151 nir_fadd(&bld
, one
, nir_fneg(&bld
, b
));
153 nir_pop_if(&bld
, nif
);
155 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
158 TEST_F(comparison_pre_test
, neg_a_lt_b_vs_a_plus_b
)
162 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
163 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
164 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
165 * vec1 32 ssa_3 = load_const ( 1.0)
166 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
167 * vec1 32 ssa_5 = mov ssa_4.x
168 * vec1 32 ssa_6 = fneg ssa_5
169 * vec1 1 ssa_7 = flt ssa_6, ssa_3
172 * vec1 32 ssa_8 = fadd ssa_5, ssa_3
178 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
179 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
180 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
181 * vec1 32 ssa_3 = load_const ( 1.0)
182 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
183 * vec1 32 ssa_5 = mov ssa_4.x
184 * vec1 32 ssa_9 = fneg ssa_5
185 * vec1 32 ssa_9 = fneg ssa_6
186 * vec1 32 ssa_10 = fadd ssa_3, ssa_9
187 * vec1 32 ssa_11 = load_const ( 0.0)
188 * vec1 1 ssa_12 = flt ssa_11, ssa_10
189 * vec1 32 ssa_13 = mov ssa_10
190 * vec1 1 ssa_14 = mov ssa_12
197 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
198 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
200 nir_ssa_def
*flt
= nir_flt(&bld
, nir_fneg(&bld
, a
), one
);
202 nir_if
*nif
= nir_push_if(&bld
, flt
);
204 nir_fadd(&bld
, a
, one
);
206 nir_pop_if(&bld
, nif
);
208 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
211 TEST_F(comparison_pre_test
, a_lt_neg_b_vs_a_plus_b
)
215 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
216 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
217 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
218 * vec1 32 ssa_3 = load_const ( 1.0)
219 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
220 * vec1 32 ssa_5 = mov ssa_4.x
221 * vec1 32 ssa_6 = fneg ssa_5
222 * vec1 1 ssa_7 = flt ssa_3, ssa_6
225 * vec1 32 ssa_8 = fadd ssa_3, ssa_5
231 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
232 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
233 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
234 * vec1 32 ssa_3 = load_const ( 1.0)
235 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
236 * vec1 32 ssa_5 = mov ssa_4.x
237 * vec1 32 ssa_9 = fneg ssa_5
238 * vec1 32 ssa_9 = fneg ssa_6
239 * vec1 32 ssa_10 = fadd ssa_3, ssa_9
240 * vec1 32 ssa_11 = load_const ( 0.0)
241 * vec1 1 ssa_12 = flt ssa_10, ssa_11
242 * vec1 32 ssa_13 = mov ssa_10
243 * vec1 1 ssa_14 = mov ssa_12
249 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
250 nir_ssa_def
*b
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
252 nir_ssa_def
*flt
= nir_flt(&bld
, one
, nir_fneg(&bld
, b
));
254 nir_if
*nif
= nir_push_if(&bld
, flt
);
256 nir_fadd(&bld
, one
, b
);
258 nir_pop_if(&bld
, nif
);
260 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
263 TEST_F(comparison_pre_test
, imm_lt_b_vs_neg_imm_plus_b
)
267 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
268 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
269 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
270 * vec1 32 ssa_3 = load_const ( 1.0)
271 * vec1 32 ssa_4 = load_const (-1.0)
272 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
273 * vec1 32 ssa_6 = mov ssa_5.x
274 * vec1 1 ssa_7 = flt ssa_3, ssa_6
277 * vec1 32 ssa_8 = fadd ssa_4, ssa_6
283 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
284 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
285 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
286 * vec1 32 ssa_3 = load_const ( 1.0)
287 * vec1 32 ssa_4 = load_const (-1.0)
288 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
289 * vec1 32 ssa_6 = mov ssa_5.x
290 * vec1 32 ssa_9 = fneg ssa_3
291 * vec1 32 ssa_10 = fadd ssa_6, ssa_9
292 * vec1 32 ssa_11 = load_const ( 0.0)
293 * vec1 1 ssa_12 = flt ssa_11, ssa_10
294 * vec1 32 ssa_13 = mov ssa_10
295 * vec1 1 ssa_14 = mov ssa_12
301 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
302 nir_ssa_def
*neg_one
= nir_imm_float(&bld
, -1.0f
);
303 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
305 nir_ssa_def
*flt
= nir_flt(&bld
, one
, a
);
307 nir_if
*nif
= nir_push_if(&bld
, flt
);
309 nir_fadd(&bld
, neg_one
, a
);
311 nir_pop_if(&bld
, nif
);
313 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
316 TEST_F(comparison_pre_test
, a_lt_imm_vs_a_minus_imm
)
320 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
321 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
322 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
323 * vec1 32 ssa_3 = load_const ( 1.0)
324 * vec1 32 ssa_4 = load_const (-1.0)
325 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
326 * vec1 32 ssa_6 = mov ssa_5.x
327 * vec1 1 ssa_7 = flt ssa_6, ssa_3
330 * vec1 32 ssa_8 = fadd ssa_6, ssa_4
336 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
337 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
338 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
339 * vec1 32 ssa_3 = load_const ( 1.0)
340 * vec1 32 ssa_4 = load_const (-1.0)
341 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
342 * vec1 32 ssa_6 = mov ssa_5.x
343 * vec1 32 ssa_9 = fneg ssa_3
344 * vec1 32 ssa_10 = fadd ssa_6, ssa_9
345 * vec1 32 ssa_11 = load_const ( 0.0)
346 * vec1 1 ssa_12 = flt ssa_10, ssa_11
347 * vec1 32 ssa_13 = mov ssa_10
348 * vec1 1 ssa_14 = mov ssa_12
354 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
355 nir_ssa_def
*neg_one
= nir_imm_float(&bld
, -1.0f
);
356 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
358 nir_ssa_def
*flt
= nir_flt(&bld
, a
, one
);
360 nir_if
*nif
= nir_push_if(&bld
, flt
);
362 nir_fadd(&bld
, a
, neg_one
);
364 nir_pop_if(&bld
, nif
);
366 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
369 TEST_F(comparison_pre_test
, neg_imm_lt_a_vs_a_plus_imm
)
373 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
374 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
375 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
376 * vec1 32 ssa_3 = load_const ( 1.0)
377 * vec1 32 ssa_4 = load_const (-1.0)
378 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
379 * vec1 32 ssa_6 = mov ssa_5.x
380 * vec1 1 ssa_7 = flt ssa_4, ssa_6
383 * vec1 32 ssa_8 = fadd ssa_6, ssa_3
389 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
390 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
391 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
392 * vec1 32 ssa_3 = load_const ( 1.0)
393 * vec1 32 ssa_4 = load_const (-1.0)
394 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
395 * vec1 32 ssa_6 = mov ssa_5.x
396 * vec1 32 ssa_9 = fneg ssa_4
397 * vec1 32 ssa_10 = fadd ssa_6, ssa_9
398 * vec1 32 ssa_11 = load_const ( 0.0)
399 * vec1 1 ssa_12 = flt ssa_11, ssa_10
400 * vec1 32 ssa_13 = mov ssa_10
401 * vec1 1 ssa_14 = mov ssa_12
408 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
409 nir_ssa_def
*neg_one
= nir_imm_float(&bld
, -1.0f
);
410 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
412 nir_ssa_def
*flt
= nir_flt(&bld
, neg_one
, a
);
414 nir_if
*nif
= nir_push_if(&bld
, flt
);
416 nir_fadd(&bld
, a
, one
);
418 nir_pop_if(&bld
, nif
);
420 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
423 TEST_F(comparison_pre_test
, a_lt_neg_imm_vs_a_plus_imm
)
427 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
428 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
429 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
430 * vec1 32 ssa_3 = load_const ( 1.0)
431 * vec1 32 ssa_4 = load_const (-1.0)
432 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
433 * vec1 32 ssa_6 = mov ssa_5.x
434 * vec1 1 ssa_7 = flt ssa_6, ssa_4
437 * vec1 32 ssa_8 = fadd ssa_6, ssa_3
443 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
444 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
445 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
446 * vec1 32 ssa_3 = load_const ( 1.0)
447 * vec1 32 ssa_4 = load_const (-1.0)
448 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
449 * vec1 32 ssa_6 = mov ssa_5.x
450 * vec1 32 ssa_9 = fneg ssa_4
451 * vec1 32 ssa_10 = fadd ssa_6, ssa_9
452 * vec1 32 ssa_11 = load_const ( 0.0)
453 * vec1 1 ssa_12 = flt ssa_10, ssa_11
454 * vec1 32 ssa_13 = mov ssa_10
455 * vec1 1 ssa_14 = mov ssa_12
461 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
462 nir_ssa_def
*neg_one
= nir_imm_float(&bld
, -1.0f
);
463 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
465 nir_ssa_def
*flt
= nir_flt(&bld
, a
, neg_one
);
467 nir_if
*nif
= nir_push_if(&bld
, flt
);
469 nir_fadd(&bld
, a
, one
);
471 nir_pop_if(&bld
, nif
);
473 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
476 TEST_F(comparison_pre_test
, swizzle_of_same_immediate_vector
)
480 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
481 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
482 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
483 * vec4 32 ssa_3 = fadd ssa_0, ssa_2
484 * vec1 1 ssa_4 = flt ssa_0.x, ssa_3.x
487 * vec1 32 ssa_5 = fadd ssa_0.w, ssa_3.x
491 nir_ssa_def
*a
= nir_fadd(&bld
, v1
, v3
);
493 nir_alu_instr
*flt
= nir_alu_instr_create(bld
.shader
, nir_op_flt
);
495 flt
->src
[0].src
= nir_src_for_ssa(v1
);
496 flt
->src
[1].src
= nir_src_for_ssa(a
);
498 memcpy(&flt
->src
[0].swizzle
, xxxx
, sizeof(xxxx
));
499 memcpy(&flt
->src
[1].swizzle
, xxxx
, sizeof(xxxx
));
501 nir_builder_alu_instr_finish_and_insert(&bld
, flt
);
503 flt
->dest
.dest
.ssa
.num_components
= 1;
504 flt
->dest
.write_mask
= 1;
506 nir_if
*nif
= nir_push_if(&bld
, &flt
->dest
.dest
.ssa
);
508 nir_alu_instr
*fadd
= nir_alu_instr_create(bld
.shader
, nir_op_fadd
);
510 fadd
->src
[0].src
= nir_src_for_ssa(v1
);
511 fadd
->src
[1].src
= nir_src_for_ssa(a
);
513 memcpy(&fadd
->src
[0].swizzle
, wwww
, sizeof(wwww
));
514 memcpy(&fadd
->src
[1].swizzle
, xxxx
, sizeof(xxxx
));
516 nir_builder_alu_instr_finish_and_insert(&bld
, fadd
);
518 fadd
->dest
.dest
.ssa
.num_components
= 1;
519 fadd
->dest
.write_mask
= 1;
521 nir_pop_if(&bld
, nif
);
523 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
526 TEST_F(comparison_pre_test
, non_scalar_add_result
)
528 /* The optimization pass should not do anything because the result of the
529 * fadd is not a scalar.
533 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
534 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
535 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
536 * vec4 32 ssa_3 = fadd ssa_0, ssa_2
537 * vec1 1 ssa_4 = flt ssa_0.x, ssa_3.x
540 * vec2 32 ssa_5 = fadd ssa_1.xx, ssa_3.xx
548 nir_ssa_def
*a
= nir_fadd(&bld
, v1
, v3
);
550 nir_alu_instr
*flt
= nir_alu_instr_create(bld
.shader
, nir_op_flt
);
552 flt
->src
[0].src
= nir_src_for_ssa(v1
);
553 flt
->src
[1].src
= nir_src_for_ssa(a
);
555 memcpy(&flt
->src
[0].swizzle
, xxxx
, sizeof(xxxx
));
556 memcpy(&flt
->src
[1].swizzle
, xxxx
, sizeof(xxxx
));
558 nir_builder_alu_instr_finish_and_insert(&bld
, flt
);
560 flt
->dest
.dest
.ssa
.num_components
= 1;
561 flt
->dest
.write_mask
= 1;
563 nir_if
*nif
= nir_push_if(&bld
, &flt
->dest
.dest
.ssa
);
565 nir_alu_instr
*fadd
= nir_alu_instr_create(bld
.shader
, nir_op_fadd
);
567 fadd
->src
[0].src
= nir_src_for_ssa(v2
);
568 fadd
->src
[1].src
= nir_src_for_ssa(a
);
570 memcpy(&fadd
->src
[0].swizzle
, xxxx
, sizeof(xxxx
));
571 memcpy(&fadd
->src
[1].swizzle
, xxxx
, sizeof(xxxx
));
573 nir_builder_alu_instr_finish_and_insert(&bld
, fadd
);
575 fadd
->dest
.dest
.ssa
.num_components
= 2;
576 fadd
->dest
.write_mask
= 3;
578 nir_pop_if(&bld
, nif
);
580 EXPECT_FALSE(nir_opt_comparison_pre_impl(bld
.impl
));