From b08d7040518cdf76792952ceef72cadaa54d0179 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 17 Jun 2019 16:27:37 -0700 Subject: [PATCH] nir: Add unit tests for nir_opt_comparison_pre Each tests has a comment with the expected before and after NIR. The tests don't actually check this. The tests only check whether or not the optimization pass reported progress. I couldn't think of a robust, future-proof way to check the before and after code. Reviewed-by: Matt Turner --- src/compiler/nir/meson.build | 12 + src/compiler/nir/nir.h | 3 + src/compiler/nir/nir_opt_comparison_pre.c | 2 +- .../nir/tests/comparison_pre_tests.cpp | 318 ++++++++++++++++++ 4 files changed, 334 insertions(+), 1 deletion(-) create mode 100644 src/compiler/nir/tests/comparison_pre_tests.cpp diff --git a/src/compiler/nir/meson.build b/src/compiler/nir/meson.build index 01ddcdf0ea0..169c31aacd5 100644 --- a/src/compiler/nir/meson.build +++ b/src/compiler/nir/meson.build @@ -306,4 +306,16 @@ if with_tests link_with : libmesa_util, ) ) + + test( + 'comparison_pre', + executable( + 'comparison_pre', + files('tests/comparison_pre_tests.cpp'), + c_args : [c_vis_args, c_msvc_compat_args, no_override_init_args], + include_directories : [inc_common], + dependencies : [dep_thread, idep_gtest, idep_nir], + link_with : libmesa_util, + ) + ) endif diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index 55a39a493e9..92dad1c4bb3 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -3625,6 +3625,9 @@ bool nir_lower_phis_to_regs_block(nir_block *block); bool nir_lower_ssa_defs_to_regs_block(nir_block *block); bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl); +/* This is here for unit tests. */ +bool nir_opt_comparison_pre_impl(nir_function_impl *impl); + bool nir_opt_comparison_pre(nir_shader *shader); bool nir_opt_algebraic(nir_shader *shader); diff --git a/src/compiler/nir/nir_opt_comparison_pre.c b/src/compiler/nir/nir_opt_comparison_pre.c index 221379b3a23..33295e5eea6 100644 --- a/src/compiler/nir/nir_opt_comparison_pre.c +++ b/src/compiler/nir/nir_opt_comparison_pre.c @@ -346,7 +346,7 @@ comparison_pre_block(nir_block *block, struct block_queue *bq, nir_builder *bld) return progress; } -static bool +bool nir_opt_comparison_pre_impl(nir_function_impl *impl) { struct block_queue bq; diff --git a/src/compiler/nir/tests/comparison_pre_tests.cpp b/src/compiler/nir/tests/comparison_pre_tests.cpp new file mode 100644 index 00000000000..f31879be6c4 --- /dev/null +++ b/src/compiler/nir/tests/comparison_pre_tests.cpp @@ -0,0 +1,318 @@ +/* + * Copyright © 2019 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include +#include "nir.h" +#include "nir_builder.h" + +class comparison_pre_test : public ::testing::Test { +protected: + comparison_pre_test() + { + static const nir_shader_compiler_options options = { }; + nir_builder_init_simple_shader(&bld, NULL, MESA_SHADER_VERTEX, &options); + + v1 = nir_imm_vec4(&bld, -2.0, -1.0, 1.0, 2.0); + v2 = nir_imm_vec4(&bld, 2.0, 1.0, -1.0, -2.0); + v3 = nir_imm_vec4(&bld, 3.0, 4.0, 5.0, 6.0); + } + + ~comparison_pre_test() + { + ralloc_free(bld.shader); + } + + struct nir_builder bld; + + nir_ssa_def *v1; + nir_ssa_def *v2; + nir_ssa_def *v3; + + const uint8_t xxxx[4] = { 0, 0, 0, 0 }; + const uint8_t wwww[4] = { 3, 3, 3, 3 }; +}; + +TEST_F(comparison_pre_test, a_lt_b_vs_neg_a_plus_b) +{ + /* Before: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec1 32 ssa_3 = load_const ( 1.0) + * vec4 32 ssa_4 = fadd ssa_0, ssa_2 + * vec1 32 ssa_5 = mov ssa_4.x + * vec1 1 ssa_6 = flt ssa_5, ssa_3 + * + * if ssa_6 { + * vec1 32 ssa_7 = fneg ssa_5 + * vec1 32 ssa_8 = fadd ssa_7, ssa_3 + * } else { + * } + * + * After: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec1 32 ssa_3 = load_const ( 1.0) + * vec4 32 ssa_4 = fadd ssa_0, ssa_2 + * vec1 32 ssa_5 = mov ssa_4.x + * vec1 32 ssa_9 = fneg ssa_5 + * vec1 32 ssa_10 = fadd ssa_3, ssa_9 + * vec1 32 ssa_11 = load_const (0.0) + * vec1 1 ssa_12 = flt ssa_11, ssa_10 + * vec1 32 ssa_13 = mov ssa_10 + * vec1 1 ssa_14 = mov ssa_12 + * + * if ssa_14 { + * vec1 32 ssa_7 = fneg ssa_5 + * } else { + * } + */ + nir_ssa_def *one = nir_imm_float(&bld, 1.0f); + nir_ssa_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0); + + nir_ssa_def *flt = nir_flt(&bld, a, one); + + nir_if *nif = nir_push_if(&bld, flt); + + nir_fadd(&bld, nir_fneg(&bld, a), one); + + nir_pop_if(&bld, nif); + + EXPECT_TRUE(nir_opt_comparison_pre_impl(bld.impl)); +} + +TEST_F(comparison_pre_test, a_lt_b_vs_a_minus_b) +{ + /* Before: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec1 32 ssa_3 = load_const ( 1.0) + * vec4 32 ssa_4 = fadd ssa_0, ssa_2 + * vec1 32 ssa_5 = mov ssa_4.x + * vec1 1 ssa_6 = flt ssa_3, ssa_5 + * + * if ssa_6 { + * vec1 32 ssa_7 = fneg ssa_5 + * vec1 32 ssa_8 = fadd ssa_3, ssa_7 + * } else { + * } + * + * After: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec1 32 ssa_3 = load_const ( 1.0) + * vec4 32 ssa_4 = fadd ssa_0, ssa_2 + * vec1 32 ssa_5 = mov ssa_4.x + * vec1 32 ssa_9 = fneg ssa_5 + * vec1 32 ssa_10 = fadd ssa_3, ssa_9 + * vec1 32 ssa_11 = load_const (0.0) + * vec1 1 ssa_12 = flt ssa_10, ssa_11 + * vec1 32 ssa_13 = mov ssa_10 + * vec1 1 ssa_14 = mov ssa_12 + * + * if ssa_14 { + * vec1 32 ssa_7 = fneg ssa_5 + * } else { + * } + */ + nir_ssa_def *one = nir_imm_float(&bld, 1.0f); + nir_ssa_def *b = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0); + + nir_ssa_def *flt = nir_flt(&bld, one, b); + + nir_if *nif = nir_push_if(&bld, flt); + + nir_fadd(&bld, one, nir_fneg(&bld, b)); + + nir_pop_if(&bld, nif); + + EXPECT_TRUE(nir_opt_comparison_pre_impl(bld.impl)); +} + +TEST_F(comparison_pre_test, neg_a_lt_b_vs_a_plus_b) +{ + /* Before: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec1 32 ssa_3 = load_const ( 1.0) + * vec4 32 ssa_4 = fadd ssa_0, ssa_2 + * vec1 32 ssa_5 = mov ssa_4.x + * vec1 32 ssa_6 = fneg ssa_5 + * vec1 1 ssa_7 = flt ssa_6, ssa_3 + * + * if ssa_7 { + * vec1 32 ssa_8 = fadd ssa_5, ssa_3 + * } else { + * } + * + * After: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec1 32 ssa_3 = load_const ( 1.0) + * vec4 32 ssa_4 = fadd ssa_0, ssa_2 + * vec1 32 ssa_5 = mov ssa_4.x + * vec1 32 ssa_9 = fneg ssa_5 + * vec1 32 ssa_9 = fneg ssa_6 + * vec1 32 ssa_10 = fadd ssa_3, ssa_9 + * vec1 32 ssa_11 = load_const ( 0.0) + * vec1 1 ssa_12 = flt ssa_11, ssa_10 + * vec1 32 ssa_13 = mov ssa_10 + * vec1 1 ssa_14 = mov ssa_12 + * + * if ssa_14 { + * } else { + * } + */ + + nir_ssa_def *one = nir_imm_float(&bld, 1.0f); + nir_ssa_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0); + + nir_ssa_def *flt = nir_flt(&bld, nir_fneg(&bld, a), one); + + nir_if *nif = nir_push_if(&bld, flt); + + nir_fadd(&bld, a, one); + + nir_pop_if(&bld, nif); + + EXPECT_TRUE(nir_opt_comparison_pre_impl(bld.impl)); +} + +TEST_F(comparison_pre_test, a_lt_neg_b_vs_a_plus_b) +{ + /* Before: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec1 32 ssa_3 = load_const ( 1.0) + * vec4 32 ssa_4 = fadd ssa_0, ssa_2 + * vec1 32 ssa_5 = mov ssa_4.x + * vec1 32 ssa_6 = fneg ssa_5 + * vec1 1 ssa_7 = flt ssa_3, ssa_6 + * + * if ssa_7 { + * vec1 32 ssa_8 = fadd ssa_3, ssa_5 + * } else { + * } + * + * After: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec1 32 ssa_3 = load_const ( 1.0) + * vec4 32 ssa_4 = fadd ssa_0, ssa_2 + * vec1 32 ssa_5 = mov ssa_4.x + * vec1 32 ssa_9 = fneg ssa_5 + * vec1 32 ssa_9 = fneg ssa_6 + * vec1 32 ssa_10 = fadd ssa_3, ssa_9 + * vec1 32 ssa_11 = load_const ( 0.0) + * vec1 1 ssa_12 = flt ssa_10, ssa_11 + * vec1 32 ssa_13 = mov ssa_10 + * vec1 1 ssa_14 = mov ssa_12 + * + * if ssa_14 { + * } else { + * } + */ + nir_ssa_def *one = nir_imm_float(&bld, 1.0f); + nir_ssa_def *b = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0); + + nir_ssa_def *flt = nir_flt(&bld, one, nir_fneg(&bld, b)); + + nir_if *nif = nir_push_if(&bld, flt); + + nir_fadd(&bld, one, b); + + nir_pop_if(&bld, nif); + + EXPECT_TRUE(nir_opt_comparison_pre_impl(bld.impl)); +} + +TEST_F(comparison_pre_test, non_scalar_add_result) +{ + /* The optimization pass should not do anything because the result of the + * fadd is not a scalar. + * + * Before: + * + * vec4 32 ssa_0 = load_const (-2.0, -1.0, 1.0, 2.0) + * vec4 32 ssa_1 = load_const ( 2.0, 1.0, -1.0, -2.0) + * vec4 32 ssa_2 = load_const ( 3.0, 4.0, 5.0, 6.0) + * vec4 32 ssa_3 = fadd ssa_0, ssa_2 + * vec1 1 ssa_4 = flt ssa_0.x, ssa_3.x + * + * if ssa_4 { + * vec2 32 ssa_5 = fadd ssa_1.xx, ssa_3.xx + * } else { + * } + * + * After: + * + * No change. + */ + nir_ssa_def *a = nir_fadd(&bld, v1, v3); + + nir_alu_instr *flt = nir_alu_instr_create(bld.shader, nir_op_flt); + + flt->src[0].src = nir_src_for_ssa(v1); + flt->src[1].src = nir_src_for_ssa(a); + + memcpy(&flt->src[0].swizzle, xxxx, sizeof(xxxx)); + memcpy(&flt->src[1].swizzle, xxxx, sizeof(xxxx)); + + nir_builder_alu_instr_finish_and_insert(&bld, flt); + + flt->dest.dest.ssa.num_components = 1; + flt->dest.write_mask = 1; + + nir_if *nif = nir_push_if(&bld, &flt->dest.dest.ssa); + + nir_alu_instr *fadd = nir_alu_instr_create(bld.shader, nir_op_fadd); + + fadd->src[0].src = nir_src_for_ssa(v2); + fadd->src[1].src = nir_src_for_ssa(a); + + memcpy(&fadd->src[0].swizzle, xxxx, sizeof(xxxx)); + memcpy(&fadd->src[1].swizzle, xxxx, sizeof(xxxx)); + + nir_builder_alu_instr_finish_and_insert(&bld, fadd); + + fadd->dest.dest.ssa.num_components = 2; + fadd->dest.write_mask = 3; + + nir_pop_if(&bld, nif); + + EXPECT_FALSE(nir_opt_comparison_pre_impl(bld.impl)); +} -- 2.30.2