intel/nir: Pass the nir_builder by reference in lower_alpha_to_coverage
[mesa.git] / src / intel / compiler / brw_nir_lower_alpha_to_coverage.c
1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/nir/nir_builder.h"
25 #include "brw_nir.h"
26
27 /**
28 * We need to compute alpha to coverage dithering manually in shader
29 * and replace sample mask store with the bitwise-AND of sample mask and
30 * alpha to coverage dithering.
31 *
32 * The following formula is used to compute final sample mask:
33 * m = int(16.0 * clamp(src0_alpha, 0.0, 1.0))
34 * dither_mask = 0x1111 * ((0xfea80 >> (m & ~3)) & 0xf) |
35 * 0x0808 * (m & 2) | 0x0100 * (m & 1)
36 * sample_mask = sample_mask & dither_mask
37 *
38 * It gives a number of ones proportional to the alpha for 2, 4, 8 or 16
39 * least significant bits of the result:
40 * 0.0000 0000000000000000
41 * 0.0625 0000000100000000
42 * 0.1250 0001000000010000
43 * 0.1875 0001000100010000
44 * 0.2500 1000100010001000
45 * 0.3125 1000100110001000
46 * 0.3750 1001100010011000
47 * 0.4375 1001100110011000
48 * 0.5000 1010101010101010
49 * 0.5625 1010101110101010
50 * 0.6250 1011101010111010
51 * 0.6875 1011101110111010
52 * 0.7500 1110111011101110
53 * 0.8125 1110111111101110
54 * 0.8750 1111111011111110
55 * 0.9375 1111111111111110
56 * 1.0000 1111111111111111
57 */
58 static nir_ssa_def *
59 build_dither_mask(nir_builder *b, nir_intrinsic_instr *store_instr)
60 {
61 nir_ssa_def *alpha =
62 nir_channel(b, nir_ssa_for_src(b, store_instr->src[0], 4), 3);
63
64 nir_ssa_def *m =
65 nir_f2i32(b, nir_fmul_imm(b, nir_fsat(b, alpha), 16.0));
66
67 nir_ssa_def *part_a =
68 nir_iand(b,
69 nir_imm_int(b, 0xf),
70 nir_ushr(b,
71 nir_imm_int(b, 0xfea80),
72 nir_iand(b, m, nir_imm_int(b, ~3))));
73
74 nir_ssa_def *part_b = nir_iand(b, m, nir_imm_int(b, 2));
75
76 nir_ssa_def *part_c = nir_iand(b, m, nir_imm_int(b, 1));
77
78 return nir_ior(b,
79 nir_imul_imm(b, part_a, 0x1111),
80 nir_ior(b,
81 nir_imul_imm(b, part_b, 0x0808),
82 nir_imul_imm(b, part_c, 0x0100)));
83 }
84
85 void
86 brw_nir_lower_alpha_to_coverage(nir_shader *shader)
87 {
88 assert(shader->info.stage == MESA_SHADER_FRAGMENT);
89
90 /* Bail out early if we don't have gl_SampleMask */
91 if (!nir_find_variable_with_location(shader, nir_var_shader_out,
92 FRAG_RESULT_SAMPLE_MASK))
93 return;
94
95 nir_foreach_function(function, shader) {
96 nir_function_impl *impl = function->impl;
97 nir_builder b;
98 nir_builder_init(&b, impl);
99
100 nir_foreach_block(block, impl) {
101 nir_intrinsic_instr *sample_mask_instr = NULL;
102 nir_intrinsic_instr *store_instr = NULL;
103
104 nir_foreach_instr_safe(instr, block) {
105 if (instr->type == nir_instr_type_intrinsic) {
106 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
107 nir_variable *out = NULL;
108
109 switch (intr->intrinsic) {
110 case nir_intrinsic_store_output:
111 out = nir_find_variable_with_driver_location(shader, nir_var_shader_out,
112 nir_intrinsic_base(intr));
113 assert(out->data.mode == nir_var_shader_out);
114
115 /* save gl_SampleMask instruction pointer */
116 if (out->data.location == FRAG_RESULT_SAMPLE_MASK) {
117 assert(!sample_mask_instr);
118 sample_mask_instr = intr;
119 }
120
121 /* save out_color[0] instruction pointer */
122 if ((out->data.location == FRAG_RESULT_COLOR ||
123 out->data.location == FRAG_RESULT_DATA0)) {
124 nir_src *offset_src = nir_get_io_offset_src(intr);
125 if (nir_src_is_const(*offset_src) && nir_src_as_uint(*offset_src) == 0) {
126 assert(!store_instr);
127 store_instr = intr;
128 }
129 }
130 break;
131 default:
132 continue;
133 }
134 }
135 }
136
137 if (sample_mask_instr && store_instr) {
138 b.cursor = nir_before_instr(&store_instr->instr);
139 nir_ssa_def *dither_mask = build_dither_mask(&b, store_instr);
140
141 /* Combine dither_mask and reorder gl_SampleMask store instruction
142 * after render target 0 store instruction.
143 */
144 nir_instr_remove(&sample_mask_instr->instr);
145 dither_mask = nir_iand(&b, sample_mask_instr->src[0].ssa, dither_mask);
146 nir_instr_insert_after(&store_instr->instr, &sample_mask_instr->instr);
147 nir_instr_rewrite_src(&sample_mask_instr->instr,
148 &sample_mask_instr->src[0],
149 nir_src_for_ssa(dither_mask));
150 }
151 }
152 nir_metadata_preserve(impl, nir_metadata_block_index |
153 nir_metadata_dominance);
154 }
155 }