2 * Copyright © 2019 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
23 #include <gtest/gtest.h>
25 #include "nir_builder.h"
27 class comparison_pre_test
: public ::testing::Test
{
31 glsl_type_singleton_init_or_ref();
33 static const nir_shader_compiler_options options
= { };
34 nir_builder_init_simple_shader(&bld
, NULL
, MESA_SHADER_VERTEX
, &options
);
36 v1
= nir_imm_vec4(&bld
, -2.0, -1.0, 1.0, 2.0);
37 v2
= nir_imm_vec4(&bld
, 2.0, 1.0, -1.0, -2.0);
38 v3
= nir_imm_vec4(&bld
, 3.0, 4.0, 5.0, 6.0);
41 ~comparison_pre_test()
43 ralloc_free(bld
.shader
);
44 glsl_type_singleton_decref();
47 struct nir_builder bld
;
53 const uint8_t xxxx
[4] = { 0, 0, 0, 0 };
54 const uint8_t wwww
[4] = { 3, 3, 3, 3 };
57 TEST_F(comparison_pre_test
, a_lt_b_vs_neg_a_plus_b
)
61 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
62 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
63 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
64 * vec1 32 ssa_3 = load_const ( 1.0)
65 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
66 * vec1 32 ssa_5 = mov ssa_4.x
67 * vec1 1 ssa_6 = flt ssa_5, ssa_3
70 * vec1 32 ssa_7 = fneg ssa_5
71 * vec1 32 ssa_8 = fadd ssa_7, ssa_3
77 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
78 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
79 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
80 * vec1 32 ssa_3 = load_const ( 1.0)
81 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
82 * vec1 32 ssa_5 = mov ssa_4.x
83 * vec1 32 ssa_9 = fneg ssa_5
84 * vec1 32 ssa_10 = fadd ssa_3, ssa_9
85 * vec1 32 ssa_11 = load_const (0.0)
86 * vec1 1 ssa_12 = flt ssa_11, ssa_10
87 * vec1 32 ssa_13 = mov ssa_10
88 * vec1 1 ssa_14 = mov ssa_12
91 * vec1 32 ssa_7 = fneg ssa_5
95 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
96 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
98 nir_ssa_def
*flt
= nir_flt(&bld
, a
, one
);
100 nir_if
*nif
= nir_push_if(&bld
, flt
);
102 nir_fadd(&bld
, nir_fneg(&bld
, a
), one
);
104 nir_pop_if(&bld
, nif
);
106 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
109 TEST_F(comparison_pre_test
, a_lt_b_vs_a_minus_b
)
113 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
114 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
115 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
116 * vec1 32 ssa_3 = load_const ( 1.0)
117 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
118 * vec1 32 ssa_5 = mov ssa_4.x
119 * vec1 1 ssa_6 = flt ssa_3, ssa_5
122 * vec1 32 ssa_7 = fneg ssa_5
123 * vec1 32 ssa_8 = fadd ssa_3, ssa_7
129 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
130 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
131 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
132 * vec1 32 ssa_3 = load_const ( 1.0)
133 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
134 * vec1 32 ssa_5 = mov ssa_4.x
135 * vec1 32 ssa_9 = fneg ssa_5
136 * vec1 32 ssa_10 = fadd ssa_3, ssa_9
137 * vec1 32 ssa_11 = load_const (0.0)
138 * vec1 1 ssa_12 = flt ssa_10, ssa_11
139 * vec1 32 ssa_13 = mov ssa_10
140 * vec1 1 ssa_14 = mov ssa_12
143 * vec1 32 ssa_7 = fneg ssa_5
147 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
148 nir_ssa_def
*b
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
150 nir_ssa_def
*flt
= nir_flt(&bld
, one
, b
);
152 nir_if
*nif
= nir_push_if(&bld
, flt
);
154 nir_fadd(&bld
, one
, nir_fneg(&bld
, b
));
156 nir_pop_if(&bld
, nif
);
158 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
161 TEST_F(comparison_pre_test
, neg_a_lt_b_vs_a_plus_b
)
165 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
166 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
167 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
168 * vec1 32 ssa_3 = load_const ( 1.0)
169 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
170 * vec1 32 ssa_5 = mov ssa_4.x
171 * vec1 32 ssa_6 = fneg ssa_5
172 * vec1 1 ssa_7 = flt ssa_6, ssa_3
175 * vec1 32 ssa_8 = fadd ssa_5, ssa_3
181 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
182 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
183 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
184 * vec1 32 ssa_3 = load_const ( 1.0)
185 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
186 * vec1 32 ssa_5 = mov ssa_4.x
187 * vec1 32 ssa_9 = fneg ssa_5
188 * vec1 32 ssa_9 = fneg ssa_6
189 * vec1 32 ssa_10 = fadd ssa_3, ssa_9
190 * vec1 32 ssa_11 = load_const ( 0.0)
191 * vec1 1 ssa_12 = flt ssa_11, ssa_10
192 * vec1 32 ssa_13 = mov ssa_10
193 * vec1 1 ssa_14 = mov ssa_12
200 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
201 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
203 nir_ssa_def
*flt
= nir_flt(&bld
, nir_fneg(&bld
, a
), one
);
205 nir_if
*nif
= nir_push_if(&bld
, flt
);
207 nir_fadd(&bld
, a
, one
);
209 nir_pop_if(&bld
, nif
);
211 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
214 TEST_F(comparison_pre_test
, a_lt_neg_b_vs_a_plus_b
)
218 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
219 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
220 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
221 * vec1 32 ssa_3 = load_const ( 1.0)
222 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
223 * vec1 32 ssa_5 = mov ssa_4.x
224 * vec1 32 ssa_6 = fneg ssa_5
225 * vec1 1 ssa_7 = flt ssa_3, ssa_6
228 * vec1 32 ssa_8 = fadd ssa_3, ssa_5
234 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
235 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
236 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
237 * vec1 32 ssa_3 = load_const ( 1.0)
238 * vec4 32 ssa_4 = fadd ssa_0, ssa_2
239 * vec1 32 ssa_5 = mov ssa_4.x
240 * vec1 32 ssa_9 = fneg ssa_5
241 * vec1 32 ssa_9 = fneg ssa_6
242 * vec1 32 ssa_10 = fadd ssa_3, ssa_9
243 * vec1 32 ssa_11 = load_const ( 0.0)
244 * vec1 1 ssa_12 = flt ssa_10, ssa_11
245 * vec1 32 ssa_13 = mov ssa_10
246 * vec1 1 ssa_14 = mov ssa_12
252 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
253 nir_ssa_def
*b
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
255 nir_ssa_def
*flt
= nir_flt(&bld
, one
, nir_fneg(&bld
, b
));
257 nir_if
*nif
= nir_push_if(&bld
, flt
);
259 nir_fadd(&bld
, one
, b
);
261 nir_pop_if(&bld
, nif
);
263 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
266 TEST_F(comparison_pre_test
, imm_lt_b_vs_neg_imm_plus_b
)
270 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
271 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
272 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
273 * vec1 32 ssa_3 = load_const ( 1.0)
274 * vec1 32 ssa_4 = load_const (-1.0)
275 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
276 * vec1 32 ssa_6 = mov ssa_5.x
277 * vec1 1 ssa_7 = flt ssa_3, ssa_6
280 * vec1 32 ssa_8 = fadd ssa_4, ssa_6
286 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
287 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
288 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
289 * vec1 32 ssa_3 = load_const ( 1.0)
290 * vec1 32 ssa_4 = load_const (-1.0)
291 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
292 * vec1 32 ssa_6 = mov ssa_5.x
293 * vec1 32 ssa_9 = fneg ssa_3
294 * vec1 32 ssa_10 = fadd ssa_6, ssa_9
295 * vec1 32 ssa_11 = load_const ( 0.0)
296 * vec1 1 ssa_12 = flt ssa_11, ssa_10
297 * vec1 32 ssa_13 = mov ssa_10
298 * vec1 1 ssa_14 = mov ssa_12
304 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
305 nir_ssa_def
*neg_one
= nir_imm_float(&bld
, -1.0f
);
306 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
308 nir_ssa_def
*flt
= nir_flt(&bld
, one
, a
);
310 nir_if
*nif
= nir_push_if(&bld
, flt
);
312 nir_fadd(&bld
, neg_one
, a
);
314 nir_pop_if(&bld
, nif
);
316 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
319 TEST_F(comparison_pre_test
, a_lt_imm_vs_a_minus_imm
)
323 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
324 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
325 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
326 * vec1 32 ssa_3 = load_const ( 1.0)
327 * vec1 32 ssa_4 = load_const (-1.0)
328 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
329 * vec1 32 ssa_6 = mov ssa_5.x
330 * vec1 1 ssa_7 = flt ssa_6, ssa_3
333 * vec1 32 ssa_8 = fadd ssa_6, ssa_4
339 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
340 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
341 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
342 * vec1 32 ssa_3 = load_const ( 1.0)
343 * vec1 32 ssa_4 = load_const (-1.0)
344 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
345 * vec1 32 ssa_6 = mov ssa_5.x
346 * vec1 32 ssa_9 = fneg ssa_3
347 * vec1 32 ssa_10 = fadd ssa_6, ssa_9
348 * vec1 32 ssa_11 = load_const ( 0.0)
349 * vec1 1 ssa_12 = flt ssa_10, ssa_11
350 * vec1 32 ssa_13 = mov ssa_10
351 * vec1 1 ssa_14 = mov ssa_12
357 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
358 nir_ssa_def
*neg_one
= nir_imm_float(&bld
, -1.0f
);
359 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
361 nir_ssa_def
*flt
= nir_flt(&bld
, a
, one
);
363 nir_if
*nif
= nir_push_if(&bld
, flt
);
365 nir_fadd(&bld
, a
, neg_one
);
367 nir_pop_if(&bld
, nif
);
369 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
372 TEST_F(comparison_pre_test
, neg_imm_lt_a_vs_a_plus_imm
)
376 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
377 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
378 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
379 * vec1 32 ssa_3 = load_const ( 1.0)
380 * vec1 32 ssa_4 = load_const (-1.0)
381 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
382 * vec1 32 ssa_6 = mov ssa_5.x
383 * vec1 1 ssa_7 = flt ssa_4, ssa_6
386 * vec1 32 ssa_8 = fadd ssa_6, ssa_3
392 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
393 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
394 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
395 * vec1 32 ssa_3 = load_const ( 1.0)
396 * vec1 32 ssa_4 = load_const (-1.0)
397 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
398 * vec1 32 ssa_6 = mov ssa_5.x
399 * vec1 32 ssa_9 = fneg ssa_4
400 * vec1 32 ssa_10 = fadd ssa_6, ssa_9
401 * vec1 32 ssa_11 = load_const ( 0.0)
402 * vec1 1 ssa_12 = flt ssa_11, ssa_10
403 * vec1 32 ssa_13 = mov ssa_10
404 * vec1 1 ssa_14 = mov ssa_12
411 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
412 nir_ssa_def
*neg_one
= nir_imm_float(&bld
, -1.0f
);
413 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
415 nir_ssa_def
*flt
= nir_flt(&bld
, neg_one
, a
);
417 nir_if
*nif
= nir_push_if(&bld
, flt
);
419 nir_fadd(&bld
, a
, one
);
421 nir_pop_if(&bld
, nif
);
423 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
426 TEST_F(comparison_pre_test
, a_lt_neg_imm_vs_a_plus_imm
)
430 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
431 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
432 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
433 * vec1 32 ssa_3 = load_const ( 1.0)
434 * vec1 32 ssa_4 = load_const (-1.0)
435 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
436 * vec1 32 ssa_6 = mov ssa_5.x
437 * vec1 1 ssa_7 = flt ssa_6, ssa_4
440 * vec1 32 ssa_8 = fadd ssa_6, ssa_3
446 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
447 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
448 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
449 * vec1 32 ssa_3 = load_const ( 1.0)
450 * vec1 32 ssa_4 = load_const (-1.0)
451 * vec4 32 ssa_5 = fadd ssa_0, ssa_2
452 * vec1 32 ssa_6 = mov ssa_5.x
453 * vec1 32 ssa_9 = fneg ssa_4
454 * vec1 32 ssa_10 = fadd ssa_6, ssa_9
455 * vec1 32 ssa_11 = load_const ( 0.0)
456 * vec1 1 ssa_12 = flt ssa_10, ssa_11
457 * vec1 32 ssa_13 = mov ssa_10
458 * vec1 1 ssa_14 = mov ssa_12
464 nir_ssa_def
*one
= nir_imm_float(&bld
, 1.0f
);
465 nir_ssa_def
*neg_one
= nir_imm_float(&bld
, -1.0f
);
466 nir_ssa_def
*a
= nir_channel(&bld
, nir_fadd(&bld
, v1
, v3
), 0);
468 nir_ssa_def
*flt
= nir_flt(&bld
, a
, neg_one
);
470 nir_if
*nif
= nir_push_if(&bld
, flt
);
472 nir_fadd(&bld
, a
, one
);
474 nir_pop_if(&bld
, nif
);
476 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
479 TEST_F(comparison_pre_test
, swizzle_of_same_immediate_vector
)
483 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
484 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
485 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
486 * vec4 32 ssa_3 = fadd ssa_0, ssa_2
487 * vec1 1 ssa_4 = flt ssa_0.x, ssa_3.x
490 * vec1 32 ssa_5 = fadd ssa_0.w, ssa_3.x
494 nir_ssa_def
*a
= nir_fadd(&bld
, v1
, v3
);
496 nir_alu_instr
*flt
= nir_alu_instr_create(bld
.shader
, nir_op_flt
);
498 flt
->src
[0].src
= nir_src_for_ssa(v1
);
499 flt
->src
[1].src
= nir_src_for_ssa(a
);
501 memcpy(&flt
->src
[0].swizzle
, xxxx
, sizeof(xxxx
));
502 memcpy(&flt
->src
[1].swizzle
, xxxx
, sizeof(xxxx
));
504 nir_builder_alu_instr_finish_and_insert(&bld
, flt
);
506 flt
->dest
.dest
.ssa
.num_components
= 1;
507 flt
->dest
.write_mask
= 1;
509 nir_if
*nif
= nir_push_if(&bld
, &flt
->dest
.dest
.ssa
);
511 nir_alu_instr
*fadd
= nir_alu_instr_create(bld
.shader
, nir_op_fadd
);
513 fadd
->src
[0].src
= nir_src_for_ssa(v1
);
514 fadd
->src
[1].src
= nir_src_for_ssa(a
);
516 memcpy(&fadd
->src
[0].swizzle
, wwww
, sizeof(wwww
));
517 memcpy(&fadd
->src
[1].swizzle
, xxxx
, sizeof(xxxx
));
519 nir_builder_alu_instr_finish_and_insert(&bld
, fadd
);
521 fadd
->dest
.dest
.ssa
.num_components
= 1;
522 fadd
->dest
.write_mask
= 1;
524 nir_pop_if(&bld
, nif
);
526 EXPECT_TRUE(nir_opt_comparison_pre_impl(bld
.impl
));
529 TEST_F(comparison_pre_test
, non_scalar_add_result
)
531 /* The optimization pass should not do anything because the result of the
532 * fadd is not a scalar.
536 * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0)
537 * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0)
538 * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0)
539 * vec4 32 ssa_3 = fadd ssa_0, ssa_2
540 * vec1 1 ssa_4 = flt ssa_0.x, ssa_3.x
543 * vec2 32 ssa_5 = fadd ssa_1.xx, ssa_3.xx
551 nir_ssa_def
*a
= nir_fadd(&bld
, v1
, v3
);
553 nir_alu_instr
*flt
= nir_alu_instr_create(bld
.shader
, nir_op_flt
);
555 flt
->src
[0].src
= nir_src_for_ssa(v1
);
556 flt
->src
[1].src
= nir_src_for_ssa(a
);
558 memcpy(&flt
->src
[0].swizzle
, xxxx
, sizeof(xxxx
));
559 memcpy(&flt
->src
[1].swizzle
, xxxx
, sizeof(xxxx
));
561 nir_builder_alu_instr_finish_and_insert(&bld
, flt
);
563 flt
->dest
.dest
.ssa
.num_components
= 1;
564 flt
->dest
.write_mask
= 1;
566 nir_if
*nif
= nir_push_if(&bld
, &flt
->dest
.dest
.ssa
);
568 nir_alu_instr
*fadd
= nir_alu_instr_create(bld
.shader
, nir_op_fadd
);
570 fadd
->src
[0].src
= nir_src_for_ssa(v2
);
571 fadd
->src
[1].src
= nir_src_for_ssa(a
);
573 memcpy(&fadd
->src
[0].swizzle
, xxxx
, sizeof(xxxx
));
574 memcpy(&fadd
->src
[1].swizzle
, xxxx
, sizeof(xxxx
));
576 nir_builder_alu_instr_finish_and_insert(&bld
, fadd
);
578 fadd
->dest
.dest
.ssa
.num_components
= 2;
579 fadd
->dest
.write_mask
= 3;
581 nir_pop_if(&bld
, nif
);
583 EXPECT_FALSE(nir_opt_comparison_pre_impl(bld
.impl
));