panfrost/midgard: Allow fp16 in scalar ALU
[mesa.git] / src / gallium / drivers / panfrost / midgard / midgard_emit.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
28 * use scalar ALU instructions, for functional or performance reasons. To do
29 * this, we just demote vector ALU payloads to scalar. */
30
31 static int
32 component_from_mask(unsigned mask, bool full)
33 {
34 for (int c = 0; c < 8; ++c) {
35 if (mask & (1 << c))
36 return c;
37
38 /* Full uses every other bit */
39 if (full)
40 c++;
41 }
42
43 assert(0);
44 return 0;
45 }
46
47 static unsigned
48 vector_to_scalar_source(unsigned u, bool is_int, bool is_full)
49 {
50 midgard_vector_alu_src v;
51 memcpy(&v, &u, sizeof(v));
52
53 /* TODO: Integers */
54
55 unsigned component = v.swizzle & 3;
56 bool upper = false; /* TODO */
57
58 midgard_scalar_alu_src s = { 0 };
59
60 if (is_full) {
61 /* For a 32-bit op, just check the source half flag */
62 s.full = !v.half;
63 } else if (!v.half) {
64 /* For a 16-bit op that's not subdivided, never full */
65 s.full = false;
66 } else {
67 /* We can't do 8-bit scalar, abort! */
68 assert(0);
69 }
70
71 /* Component indexing takes size into account */
72
73 if (s.full)
74 s.component = component << 1;
75 else
76 s.component = component + (upper << 2);
77
78 if (is_int) {
79 /* TODO */
80 } else {
81 s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
82 s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
83 }
84
85 unsigned o;
86 memcpy(&o, &s, sizeof(s));
87
88 return o & ((1 << 6) - 1);
89 }
90
91 static midgard_scalar_alu
92 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
93 {
94 bool is_int = midgard_is_integer_op(v.op);
95 bool is_full = v.reg_mode == midgard_reg_mode_32;
96
97 /* The output component is from the mask */
98 midgard_scalar_alu s = {
99 .op = v.op,
100 .src1 = vector_to_scalar_source(v.src1, is_int, is_full),
101 .src2 = vector_to_scalar_source(v.src2, is_int, is_full),
102 .unknown = 0,
103 .outmod = v.outmod,
104 .output_full = is_full,
105 .output_component = component_from_mask(v.mask, is_full),
106 };
107
108 /* Inline constant is passed along rather than trying to extract it
109 * from v */
110
111 if (ins->ssa_args.inline_constant) {
112 uint16_t imm = 0;
113 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
114 imm |= (lower_11 >> 9) & 3;
115 imm |= (lower_11 >> 6) & 4;
116 imm |= (lower_11 >> 2) & 0x38;
117 imm |= (lower_11 & 63) << 6;
118
119 s.src2 = imm;
120 }
121
122 return s;
123 }
124
125 static void
126 emit_alu_bundle(compiler_context *ctx,
127 midgard_bundle *bundle,
128 struct util_dynarray *emission,
129 unsigned lookahead)
130 {
131 /* Emit the control word */
132 util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
133
134 /* Next up, emit register words */
135 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
136 midgard_instruction *ins = bundle->instructions[i];
137
138 /* Check if this instruction has registers */
139 if (ins->compact_branch || ins->prepacked_branch) continue;
140
141 /* Otherwise, just emit the registers */
142 uint16_t reg_word = 0;
143 memcpy(&reg_word, &ins->registers, sizeof(uint16_t));
144 util_dynarray_append(emission, uint16_t, reg_word);
145 }
146
147 /* Now, we emit the body itself */
148 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
149 midgard_instruction *ins = bundle->instructions[i];
150
151 /* Where is this body */
152 unsigned size = 0;
153 void *source = NULL;
154
155 /* In case we demote to a scalar */
156 midgard_scalar_alu scalarized;
157
158 if (ins->unit & UNITS_ANY_VECTOR) {
159 size = sizeof(midgard_vector_alu);
160 source = &ins->alu;
161 } else if (ins->unit == ALU_ENAB_BR_COMPACT) {
162 size = sizeof(midgard_branch_cond);
163 source = &ins->br_compact;
164 } else if (ins->compact_branch) { /* misnomer */
165 size = sizeof(midgard_branch_extended);
166 source = &ins->branch_extended;
167 } else {
168 size = sizeof(midgard_scalar_alu);
169 scalarized = vector_to_scalar_alu(ins->alu, ins);
170 source = &scalarized;
171 }
172
173 memcpy(util_dynarray_grow_bytes(emission, 1, size), source, size);
174 }
175
176 /* Emit padding (all zero) */
177 memset(util_dynarray_grow_bytes(emission, 1, bundle->padding), 0, bundle->padding);
178
179 /* Tack on constants */
180
181 if (bundle->has_embedded_constants) {
182 util_dynarray_append(emission, float, bundle->constants[0]);
183 util_dynarray_append(emission, float, bundle->constants[1]);
184 util_dynarray_append(emission, float, bundle->constants[2]);
185 util_dynarray_append(emission, float, bundle->constants[3]);
186 }
187 }
188
189 /* After everything is scheduled, emit whole bundles at a time */
190
191 void
192 emit_binary_bundle(compiler_context *ctx,
193 midgard_bundle *bundle,
194 struct util_dynarray *emission,
195 int next_tag)
196 {
197 int lookahead = next_tag << 4;
198
199 switch (bundle->tag) {
200 case TAG_ALU_4:
201 case TAG_ALU_8:
202 case TAG_ALU_12:
203 case TAG_ALU_16:
204 emit_alu_bundle(ctx, bundle, emission, lookahead);
205 break;
206
207 case TAG_LOAD_STORE_4: {
208 /* One or two composing instructions */
209
210 uint64_t current64, next64 = LDST_NOP;
211
212 memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
213
214 if (bundle->instruction_count == 2)
215 memcpy(&next64, &bundle->instructions[1]->load_store, sizeof(next64));
216
217 midgard_load_store instruction = {
218 .type = bundle->tag,
219 .next_type = next_tag,
220 .word1 = current64,
221 .word2 = next64
222 };
223
224 util_dynarray_append(emission, midgard_load_store, instruction);
225
226 break;
227 }
228
229 case TAG_TEXTURE_4:
230 case TAG_TEXTURE_4_VTX: {
231 /* Texture instructions are easy, since there is no pipelining
232 * nor VLIW to worry about. We may need to set .cont/.last
233 * flags. */
234
235 midgard_instruction *ins = bundle->instructions[0];
236
237 ins->texture.type = bundle->tag;
238 ins->texture.next_type = next_tag;
239
240 ctx->texture_op_count--;
241
242 if (ins->texture.op == TEXTURE_OP_NORMAL) {
243 bool continues = ctx->texture_op_count > 0;
244 ins->texture.cont = continues;
245 ins->texture.last = !continues;
246 } else {
247 ins->texture.cont = ins->texture.last = 1;
248 }
249
250 util_dynarray_append(emission, midgard_texture_word, ins->texture);
251 break;
252 }
253
254 default:
255 unreachable("Unknown midgard instruction type\n");
256 }
257 }