intel/compiler: add ASSERTED annotation to avoid "unused variable" warning
[mesa.git] / src / intel / compiler / brw_eu.cpp
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <sys/stat.h>
33 #include <fcntl.h>
34
35 #include "brw_eu_defines.h"
36 #include "brw_eu.h"
37 #include "brw_shader.h"
38 #include "dev/gen_debug.h"
39
40 #include "util/ralloc.h"
41
42 /* Returns a conditional modifier that negates the condition. */
43 enum brw_conditional_mod
44 brw_negate_cmod(enum brw_conditional_mod cmod)
45 {
46 switch (cmod) {
47 case BRW_CONDITIONAL_Z:
48 return BRW_CONDITIONAL_NZ;
49 case BRW_CONDITIONAL_NZ:
50 return BRW_CONDITIONAL_Z;
51 case BRW_CONDITIONAL_G:
52 return BRW_CONDITIONAL_LE;
53 case BRW_CONDITIONAL_GE:
54 return BRW_CONDITIONAL_L;
55 case BRW_CONDITIONAL_L:
56 return BRW_CONDITIONAL_GE;
57 case BRW_CONDITIONAL_LE:
58 return BRW_CONDITIONAL_G;
59 default:
60 unreachable("Can't negate this cmod");
61 }
62 }
63
64 /* Returns the corresponding conditional mod for swapping src0 and
65 * src1 in e.g. CMP.
66 */
67 enum brw_conditional_mod
68 brw_swap_cmod(enum brw_conditional_mod cmod)
69 {
70 switch (cmod) {
71 case BRW_CONDITIONAL_Z:
72 case BRW_CONDITIONAL_NZ:
73 return cmod;
74 case BRW_CONDITIONAL_G:
75 return BRW_CONDITIONAL_L;
76 case BRW_CONDITIONAL_GE:
77 return BRW_CONDITIONAL_LE;
78 case BRW_CONDITIONAL_L:
79 return BRW_CONDITIONAL_G;
80 case BRW_CONDITIONAL_LE:
81 return BRW_CONDITIONAL_GE;
82 default:
83 return BRW_CONDITIONAL_NONE;
84 }
85 }
86
87 /**
88 * Get the least significant bit offset of the i+1-th component of immediate
89 * type \p type. For \p i equal to the two's complement of j, return the
90 * offset of the j-th component starting from the end of the vector. For
91 * scalar register types return zero.
92 */
93 static unsigned
94 imm_shift(enum brw_reg_type type, unsigned i)
95 {
96 assert(type != BRW_REGISTER_TYPE_UV && type != BRW_REGISTER_TYPE_V &&
97 "Not implemented.");
98
99 if (type == BRW_REGISTER_TYPE_VF)
100 return 8 * (i & 3);
101 else
102 return 0;
103 }
104
105 /**
106 * Swizzle an arbitrary immediate \p x of the given type according to the
107 * permutation specified as \p swz.
108 */
109 uint32_t
110 brw_swizzle_immediate(enum brw_reg_type type, uint32_t x, unsigned swz)
111 {
112 if (imm_shift(type, 1)) {
113 const unsigned n = 32 / imm_shift(type, 1);
114 uint32_t y = 0;
115
116 for (unsigned i = 0; i < n; i++) {
117 /* Shift the specified component all the way to the right and left to
118 * discard any undesired L/MSBs, then shift it right into component i.
119 */
120 y |= x >> imm_shift(type, (i & ~3) + BRW_GET_SWZ(swz, i & 3))
121 << imm_shift(type, ~0u)
122 >> imm_shift(type, ~0u - i);
123 }
124
125 return y;
126 } else {
127 return x;
128 }
129 }
130
131 unsigned
132 brw_get_default_exec_size(struct brw_codegen *p)
133 {
134 return p->current->exec_size;
135 }
136
137 unsigned
138 brw_get_default_group(struct brw_codegen *p)
139 {
140 return p->current->group;
141 }
142
143 unsigned
144 brw_get_default_access_mode(struct brw_codegen *p)
145 {
146 return p->current->access_mode;
147 }
148
149 tgl_swsb
150 brw_get_default_swsb(struct brw_codegen *p)
151 {
152 return p->current->swsb;
153 }
154
155 void
156 brw_set_default_exec_size(struct brw_codegen *p, unsigned value)
157 {
158 p->current->exec_size = value;
159 }
160
161 void brw_set_default_predicate_control(struct brw_codegen *p, enum brw_predicate pc)
162 {
163 p->current->predicate = pc;
164 }
165
166 void brw_set_default_predicate_inverse(struct brw_codegen *p, bool predicate_inverse)
167 {
168 p->current->pred_inv = predicate_inverse;
169 }
170
171 void brw_set_default_flag_reg(struct brw_codegen *p, int reg, int subreg)
172 {
173 assert(subreg < 2);
174 p->current->flag_subreg = reg * 2 + subreg;
175 }
176
177 void brw_set_default_access_mode( struct brw_codegen *p, unsigned access_mode )
178 {
179 p->current->access_mode = access_mode;
180 }
181
182 void
183 brw_set_default_compression_control(struct brw_codegen *p,
184 enum brw_compression compression_control)
185 {
186 switch (compression_control) {
187 case BRW_COMPRESSION_NONE:
188 /* This is the "use the first set of bits of dmask/vmask/arf
189 * according to execsize" option.
190 */
191 p->current->group = 0;
192 break;
193 case BRW_COMPRESSION_2NDHALF:
194 /* For SIMD8, this is "use the second set of 8 bits." */
195 p->current->group = 8;
196 break;
197 case BRW_COMPRESSION_COMPRESSED:
198 /* For SIMD16 instruction compression, use the first set of 16 bits
199 * since we don't do SIMD32 dispatch.
200 */
201 p->current->group = 0;
202 break;
203 default:
204 unreachable("not reached");
205 }
206
207 if (p->devinfo->gen <= 6) {
208 p->current->compressed =
209 (compression_control == BRW_COMPRESSION_COMPRESSED);
210 }
211 }
212
213 /**
214 * Enable or disable instruction compression on the given instruction leaving
215 * the currently selected channel enable group untouched.
216 */
217 void
218 brw_inst_set_compression(const struct gen_device_info *devinfo,
219 brw_inst *inst, bool on)
220 {
221 if (devinfo->gen >= 6) {
222 /* No-op, the EU will figure out for us whether the instruction needs to
223 * be compressed.
224 */
225 } else {
226 /* The channel group and compression controls are non-orthogonal, there
227 * are two possible representations for uncompressed instructions and we
228 * may need to preserve the current one to avoid changing the selected
229 * channel group inadvertently.
230 */
231 if (on)
232 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_COMPRESSED);
233 else if (brw_inst_qtr_control(devinfo, inst)
234 == BRW_COMPRESSION_COMPRESSED)
235 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
236 }
237 }
238
239 void
240 brw_set_default_compression(struct brw_codegen *p, bool on)
241 {
242 p->current->compressed = on;
243 }
244
245 /**
246 * Apply the range of channel enable signals given by
247 * [group, group + exec_size) to the instruction passed as argument.
248 */
249 void
250 brw_inst_set_group(const struct gen_device_info *devinfo,
251 brw_inst *inst, unsigned group)
252 {
253 if (devinfo->gen >= 7) {
254 assert(group % 4 == 0 && group < 32);
255 brw_inst_set_qtr_control(devinfo, inst, group / 8);
256 brw_inst_set_nib_control(devinfo, inst, (group / 4) % 2);
257
258 } else if (devinfo->gen == 6) {
259 assert(group % 8 == 0 && group < 32);
260 brw_inst_set_qtr_control(devinfo, inst, group / 8);
261
262 } else {
263 assert(group % 8 == 0 && group < 16);
264 /* The channel group and compression controls are non-orthogonal, there
265 * are two possible representations for group zero and we may need to
266 * preserve the current one to avoid changing the selected compression
267 * enable inadvertently.
268 */
269 if (group == 8)
270 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_2NDHALF);
271 else if (brw_inst_qtr_control(devinfo, inst) == BRW_COMPRESSION_2NDHALF)
272 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
273 }
274 }
275
276 void
277 brw_set_default_group(struct brw_codegen *p, unsigned group)
278 {
279 p->current->group = group;
280 }
281
282 void brw_set_default_mask_control( struct brw_codegen *p, unsigned value )
283 {
284 p->current->mask_control = value;
285 }
286
287 void brw_set_default_saturate( struct brw_codegen *p, bool enable )
288 {
289 p->current->saturate = enable;
290 }
291
292 void brw_set_default_acc_write_control(struct brw_codegen *p, unsigned value)
293 {
294 p->current->acc_wr_control = value;
295 }
296
297 void brw_set_default_swsb(struct brw_codegen *p, tgl_swsb value)
298 {
299 p->current->swsb = value;
300 }
301
302 void brw_push_insn_state( struct brw_codegen *p )
303 {
304 assert(p->current != &p->stack[BRW_EU_MAX_INSN_STACK-1]);
305 *(p->current + 1) = *p->current;
306 p->current++;
307 }
308
309 void brw_pop_insn_state( struct brw_codegen *p )
310 {
311 assert(p->current != p->stack);
312 p->current--;
313 }
314
315
316 /***********************************************************************
317 */
318 void
319 brw_init_codegen(const struct gen_device_info *devinfo,
320 struct brw_codegen *p, void *mem_ctx)
321 {
322 memset(p, 0, sizeof(*p));
323
324 p->devinfo = devinfo;
325 p->automatic_exec_sizes = true;
326 /*
327 * Set the initial instruction store array size to 1024, if found that
328 * isn't enough, then it will double the store size at brw_next_insn()
329 * until out of memory.
330 */
331 p->store_size = 1024;
332 p->store = rzalloc_array(mem_ctx, brw_inst, p->store_size);
333 p->nr_insn = 0;
334 p->current = p->stack;
335 memset(p->current, 0, sizeof(p->current[0]));
336
337 p->mem_ctx = mem_ctx;
338
339 /* Some defaults?
340 */
341 brw_set_default_exec_size(p, BRW_EXECUTE_8);
342 brw_set_default_mask_control(p, BRW_MASK_ENABLE); /* what does this do? */
343 brw_set_default_saturate(p, 0);
344 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
345
346 /* Set up control flow stack */
347 p->if_stack_depth = 0;
348 p->if_stack_array_size = 16;
349 p->if_stack = rzalloc_array(mem_ctx, int, p->if_stack_array_size);
350
351 p->loop_stack_depth = 0;
352 p->loop_stack_array_size = 16;
353 p->loop_stack = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
354 p->if_depth_in_loop = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
355 }
356
357
358 const unsigned *brw_get_program( struct brw_codegen *p,
359 unsigned *sz )
360 {
361 *sz = p->next_insn_offset;
362 return (const unsigned *)p->store;
363 }
364
365 bool brw_try_override_assembly(struct brw_codegen *p, int start_offset,
366 const char *identifier)
367 {
368 const char *read_path = getenv("INTEL_SHADER_ASM_READ_PATH");
369 if (!read_path) {
370 return false;
371 }
372
373 char *name = ralloc_asprintf(NULL, "%s/%s.bin", read_path, identifier);
374
375 int fd = open(name, O_RDONLY);
376 ralloc_free(name);
377
378 if (fd == -1) {
379 return false;
380 }
381
382 struct stat sb;
383 if (fstat(fd, &sb) != 0 || (!S_ISREG(sb.st_mode))) {
384 close(fd);
385 return false;
386 }
387
388 p->nr_insn -= (p->next_insn_offset - start_offset) / sizeof(brw_inst);
389 p->nr_insn += sb.st_size / sizeof(brw_inst);
390
391 p->next_insn_offset = start_offset + sb.st_size;
392 p->store_size = (start_offset + sb.st_size) / sizeof(brw_inst);
393 p->store = (brw_inst *)reralloc_size(p->mem_ctx, p->store, p->next_insn_offset);
394 assert(p->store);
395
396 read(fd, p->store + start_offset, sb.st_size);
397 close(fd);
398
399 ASSERTED bool valid =
400 brw_validate_instructions(p->devinfo, p->store,
401 start_offset, p->next_insn_offset,
402 0);
403 assert(valid);
404
405 return true;
406 }
407
408 void
409 brw_disassemble(const struct gen_device_info *devinfo,
410 const void *assembly, int start, int end, FILE *out)
411 {
412 bool dump_hex = (INTEL_DEBUG & DEBUG_HEX) != 0;
413
414 for (int offset = start; offset < end;) {
415 const brw_inst *insn = (const brw_inst *)((char *)assembly + offset);
416 brw_inst uncompacted;
417 bool compacted = brw_inst_cmpt_control(devinfo, insn);
418 if (0)
419 fprintf(out, "0x%08x: ", offset);
420
421 if (compacted) {
422 brw_compact_inst *compacted = (brw_compact_inst *)insn;
423 if (dump_hex) {
424 unsigned char * insn_ptr = ((unsigned char *)&insn[0]);
425 const unsigned int blank_spaces = 24;
426 for (int i = 0 ; i < 8; i = i + 4) {
427 fprintf(out, "%02x %02x %02x %02x ",
428 insn_ptr[i],
429 insn_ptr[i + 1],
430 insn_ptr[i + 2],
431 insn_ptr[i + 3]);
432 }
433 /* Make compacted instructions hex value output vertically aligned
434 * with uncompacted instructions hex value
435 */
436 fprintf(out, "%*c", blank_spaces, ' ');
437 }
438
439 brw_uncompact_instruction(devinfo, &uncompacted, compacted);
440 insn = &uncompacted;
441 offset += 8;
442 } else {
443 if (dump_hex) {
444 unsigned char * insn_ptr = ((unsigned char *)&insn[0]);
445 for (int i = 0 ; i < 16; i = i + 4) {
446 fprintf(out, "%02x %02x %02x %02x ",
447 insn_ptr[i],
448 insn_ptr[i + 1],
449 insn_ptr[i + 2],
450 insn_ptr[i + 3]);
451 }
452 }
453 offset += 16;
454 }
455
456 brw_disassemble_inst(out, devinfo, insn, compacted);
457 }
458 }
459
460 enum gen {
461 GEN4 = (1 << 0),
462 GEN45 = (1 << 1),
463 GEN5 = (1 << 2),
464 GEN6 = (1 << 3),
465 GEN7 = (1 << 4),
466 GEN75 = (1 << 5),
467 GEN8 = (1 << 6),
468 GEN9 = (1 << 7),
469 GEN10 = (1 << 8),
470 GEN11 = (1 << 9),
471 GEN12 = (1 << 10),
472 GEN_ALL = ~0
473 };
474
475 #define GEN_LT(gen) ((gen) - 1)
476 #define GEN_GE(gen) (~GEN_LT(gen))
477 #define GEN_LE(gen) (GEN_LT(gen) | (gen))
478
479 static const struct opcode_desc opcode_descs[] = {
480 /* IR, HW, name, nsrc, ndst, gens */
481 { BRW_OPCODE_ILLEGAL, 0, "illegal", 0, 0, GEN_ALL },
482 { BRW_OPCODE_SYNC, 1, "sync", 1, 0, GEN_GE(GEN12) },
483 { BRW_OPCODE_MOV, 1, "mov", 1, 1, GEN_LT(GEN12) },
484 { BRW_OPCODE_MOV, 97, "mov", 1, 1, GEN_GE(GEN12) },
485 { BRW_OPCODE_SEL, 2, "sel", 2, 1, GEN_LT(GEN12) },
486 { BRW_OPCODE_SEL, 98, "sel", 2, 1, GEN_GE(GEN12) },
487 { BRW_OPCODE_MOVI, 3, "movi", 2, 1, GEN_GE(GEN45) & GEN_LT(GEN12) },
488 { BRW_OPCODE_MOVI, 99, "movi", 2, 1, GEN_GE(GEN12) },
489 { BRW_OPCODE_NOT, 4, "not", 1, 1, GEN_LT(GEN12) },
490 { BRW_OPCODE_NOT, 100, "not", 1, 1, GEN_GE(GEN12) },
491 { BRW_OPCODE_AND, 5, "and", 2, 1, GEN_LT(GEN12) },
492 { BRW_OPCODE_AND, 101, "and", 2, 1, GEN_GE(GEN12) },
493 { BRW_OPCODE_OR, 6, "or", 2, 1, GEN_LT(GEN12) },
494 { BRW_OPCODE_OR, 102, "or", 2, 1, GEN_GE(GEN12) },
495 { BRW_OPCODE_XOR, 7, "xor", 2, 1, GEN_LT(GEN12) },
496 { BRW_OPCODE_XOR, 103, "xor", 2, 1, GEN_GE(GEN12) },
497 { BRW_OPCODE_SHR, 8, "shr", 2, 1, GEN_LT(GEN12) },
498 { BRW_OPCODE_SHR, 104, "shr", 2, 1, GEN_GE(GEN12) },
499 { BRW_OPCODE_SHL, 9, "shl", 2, 1, GEN_LT(GEN12) },
500 { BRW_OPCODE_SHL, 105, "shl", 2, 1, GEN_GE(GEN12) },
501 { BRW_OPCODE_DIM, 10, "dim", 1, 1, GEN75 },
502 { BRW_OPCODE_SMOV, 10, "smov", 0, 0, GEN_GE(GEN8) & GEN_LT(GEN12) },
503 { BRW_OPCODE_SMOV, 106, "smov", 0, 0, GEN_GE(GEN12) },
504 { BRW_OPCODE_ASR, 12, "asr", 2, 1, GEN_LT(GEN12) },
505 { BRW_OPCODE_ASR, 108, "asr", 2, 1, GEN_GE(GEN12) },
506 { BRW_OPCODE_ROR, 14, "ror", 2, 1, GEN11 },
507 { BRW_OPCODE_ROR, 110, "ror", 2, 1, GEN_GE(GEN12) },
508 { BRW_OPCODE_ROL, 15, "rol", 2, 1, GEN11 },
509 { BRW_OPCODE_ROL, 111, "rol", 2, 1, GEN_GE(GEN12) },
510 { BRW_OPCODE_CMP, 16, "cmp", 2, 1, GEN_LT(GEN12) },
511 { BRW_OPCODE_CMP, 112, "cmp", 2, 1, GEN_GE(GEN12) },
512 { BRW_OPCODE_CMPN, 17, "cmpn", 2, 1, GEN_LT(GEN12) },
513 { BRW_OPCODE_CMPN, 113, "cmpn", 2, 1, GEN_GE(GEN12) },
514 { BRW_OPCODE_CSEL, 18, "csel", 3, 1, GEN_GE(GEN8) & GEN_LT(GEN12) },
515 { BRW_OPCODE_CSEL, 114, "csel", 3, 1, GEN_GE(GEN12) },
516 { BRW_OPCODE_F32TO16, 19, "f32to16", 1, 1, GEN7 | GEN75 },
517 { BRW_OPCODE_F16TO32, 20, "f16to32", 1, 1, GEN7 | GEN75 },
518 { BRW_OPCODE_BFREV, 23, "bfrev", 1, 1, GEN_GE(GEN7) & GEN_LT(GEN12) },
519 { BRW_OPCODE_BFREV, 119, "bfrev", 1, 1, GEN_GE(GEN12) },
520 { BRW_OPCODE_BFE, 24, "bfe", 3, 1, GEN_GE(GEN7) & GEN_LT(GEN12) },
521 { BRW_OPCODE_BFE, 120, "bfe", 3, 1, GEN_GE(GEN12) },
522 { BRW_OPCODE_BFI1, 25, "bfi1", 2, 1, GEN_GE(GEN7) & GEN_LT(GEN12) },
523 { BRW_OPCODE_BFI1, 121, "bfi1", 2, 1, GEN_GE(GEN12) },
524 { BRW_OPCODE_BFI2, 26, "bfi2", 3, 1, GEN_GE(GEN7) & GEN_LT(GEN12) },
525 { BRW_OPCODE_BFI2, 122, "bfi2", 3, 1, GEN_GE(GEN12) },
526 { BRW_OPCODE_JMPI, 32, "jmpi", 0, 0, GEN_ALL },
527 { BRW_OPCODE_BRD, 33, "brd", 0, 0, GEN_GE(GEN7) },
528 { BRW_OPCODE_IF, 34, "if", 0, 0, GEN_ALL },
529 { BRW_OPCODE_IFF, 35, "iff", 0, 0, GEN_LE(GEN5) },
530 { BRW_OPCODE_BRC, 35, "brc", 0, 0, GEN_GE(GEN7) },
531 { BRW_OPCODE_ELSE, 36, "else", 0, 0, GEN_ALL },
532 { BRW_OPCODE_ENDIF, 37, "endif", 0, 0, GEN_ALL },
533 { BRW_OPCODE_DO, 38, "do", 0, 0, GEN_LE(GEN5) },
534 { BRW_OPCODE_CASE, 38, "case", 0, 0, GEN6 },
535 { BRW_OPCODE_WHILE, 39, "while", 0, 0, GEN_ALL },
536 { BRW_OPCODE_BREAK, 40, "break", 0, 0, GEN_ALL },
537 { BRW_OPCODE_CONTINUE, 41, "cont", 0, 0, GEN_ALL },
538 { BRW_OPCODE_HALT, 42, "halt", 0, 0, GEN_ALL },
539 { BRW_OPCODE_CALLA, 43, "calla", 0, 0, GEN_GE(GEN75) },
540 { BRW_OPCODE_MSAVE, 44, "msave", 0, 0, GEN_LE(GEN5) },
541 { BRW_OPCODE_CALL, 44, "call", 0, 0, GEN_GE(GEN6) },
542 { BRW_OPCODE_MREST, 45, "mrest", 0, 0, GEN_LE(GEN5) },
543 { BRW_OPCODE_RET, 45, "ret", 0, 0, GEN_GE(GEN6) },
544 { BRW_OPCODE_PUSH, 46, "push", 0, 0, GEN_LE(GEN5) },
545 { BRW_OPCODE_FORK, 46, "fork", 0, 0, GEN6 },
546 { BRW_OPCODE_GOTO, 46, "goto", 0, 0, GEN_GE(GEN8) },
547 { BRW_OPCODE_POP, 47, "pop", 2, 0, GEN_LE(GEN5) },
548 { BRW_OPCODE_WAIT, 48, "wait", 1, 0, GEN_LT(GEN12) },
549 { BRW_OPCODE_SEND, 49, "send", 1, 1, GEN_ALL },
550 { BRW_OPCODE_SENDC, 50, "sendc", 1, 1, GEN_ALL },
551 { BRW_OPCODE_SENDS, 51, "sends", 2, 1, GEN_GE(GEN9) & GEN_LT(GEN12) },
552 { BRW_OPCODE_SENDSC, 52, "sendsc", 2, 1, GEN_GE(GEN9) & GEN_LT(GEN12) },
553 { BRW_OPCODE_MATH, 56, "math", 2, 1, GEN_GE(GEN6) },
554 { BRW_OPCODE_ADD, 64, "add", 2, 1, GEN_ALL },
555 { BRW_OPCODE_MUL, 65, "mul", 2, 1, GEN_ALL },
556 { BRW_OPCODE_AVG, 66, "avg", 2, 1, GEN_ALL },
557 { BRW_OPCODE_FRC, 67, "frc", 1, 1, GEN_ALL },
558 { BRW_OPCODE_RNDU, 68, "rndu", 1, 1, GEN_ALL },
559 { BRW_OPCODE_RNDD, 69, "rndd", 1, 1, GEN_ALL },
560 { BRW_OPCODE_RNDE, 70, "rnde", 1, 1, GEN_ALL },
561 { BRW_OPCODE_RNDZ, 71, "rndz", 1, 1, GEN_ALL },
562 { BRW_OPCODE_MAC, 72, "mac", 2, 1, GEN_ALL },
563 { BRW_OPCODE_MACH, 73, "mach", 2, 1, GEN_ALL },
564 { BRW_OPCODE_LZD, 74, "lzd", 1, 1, GEN_ALL },
565 { BRW_OPCODE_FBH, 75, "fbh", 1, 1, GEN_GE(GEN7) },
566 { BRW_OPCODE_FBL, 76, "fbl", 1, 1, GEN_GE(GEN7) },
567 { BRW_OPCODE_CBIT, 77, "cbit", 1, 1, GEN_GE(GEN7) },
568 { BRW_OPCODE_ADDC, 78, "addc", 2, 1, GEN_GE(GEN7) },
569 { BRW_OPCODE_SUBB, 79, "subb", 2, 1, GEN_GE(GEN7) },
570 { BRW_OPCODE_SAD2, 80, "sad2", 2, 1, GEN_ALL },
571 { BRW_OPCODE_SADA2, 81, "sada2", 2, 1, GEN_ALL },
572 { BRW_OPCODE_DP4, 84, "dp4", 2, 1, GEN_LT(GEN11) },
573 { BRW_OPCODE_DPH, 85, "dph", 2, 1, GEN_LT(GEN11) },
574 { BRW_OPCODE_DP3, 86, "dp3", 2, 1, GEN_LT(GEN11) },
575 { BRW_OPCODE_DP2, 87, "dp2", 2, 1, GEN_LT(GEN11) },
576 { BRW_OPCODE_LINE, 89, "line", 2, 1, GEN_LE(GEN10) },
577 { BRW_OPCODE_PLN, 90, "pln", 2, 1, GEN_GE(GEN45) & GEN_LE(GEN10) },
578 { BRW_OPCODE_MAD, 91, "mad", 3, 1, GEN_GE(GEN6) },
579 { BRW_OPCODE_LRP, 92, "lrp", 3, 1, GEN_GE(GEN6) & GEN_LE(GEN10) },
580 { BRW_OPCODE_MADM, 93, "madm", 3, 1, GEN_GE(GEN8) },
581 { BRW_OPCODE_NENOP, 125, "nenop", 0, 0, GEN45 },
582 { BRW_OPCODE_NOP, 126, "nop", 0, 0, GEN_LT(GEN12) },
583 { BRW_OPCODE_NOP, 96, "nop", 0, 0, GEN_GE(GEN12) }
584 };
585
586 static enum gen
587 gen_from_devinfo(const struct gen_device_info *devinfo)
588 {
589 switch (devinfo->gen) {
590 case 4: return devinfo->is_g4x ? GEN45 : GEN4;
591 case 5: return GEN5;
592 case 6: return GEN6;
593 case 7: return devinfo->is_haswell ? GEN75 : GEN7;
594 case 8: return GEN8;
595 case 9: return GEN9;
596 case 10: return GEN10;
597 case 11: return GEN11;
598 case 12: return GEN12;
599 default:
600 unreachable("not reached");
601 }
602 }
603
604 /**
605 * Look up the opcode_descs[] entry with \p key member matching \p k which is
606 * supported by the device specified by \p devinfo, or NULL if there is no
607 * matching entry.
608 *
609 * This is implemented by using an index data structure (storage for which is
610 * provided by the caller as \p index_gen and \p index_descs) in order to
611 * provide efficient constant-time look-up.
612 */
613 static const opcode_desc *
614 lookup_opcode_desc(gen *index_gen,
615 const opcode_desc **index_descs,
616 unsigned index_size,
617 unsigned opcode_desc::*key,
618 const gen_device_info *devinfo,
619 unsigned k)
620 {
621 if (*index_gen != gen_from_devinfo(devinfo)) {
622 *index_gen = gen_from_devinfo(devinfo);
623
624 for (unsigned l = 0; l < index_size; l++)
625 index_descs[l] = NULL;
626
627 for (unsigned i = 0; i < ARRAY_SIZE(opcode_descs); i++) {
628 if (opcode_descs[i].gens & *index_gen) {
629 const unsigned l = opcode_descs[i].*key;
630 assert(l < index_size && !index_descs[l]);
631 index_descs[l] = &opcode_descs[i];
632 }
633 }
634 }
635
636 if (k < index_size)
637 return index_descs[k];
638 else
639 return NULL;
640 }
641
642 /**
643 * Return the matching opcode_desc for the specified IR opcode and hardware
644 * generation, or NULL if the opcode is not supported by the device.
645 */
646 const struct opcode_desc *
647 brw_opcode_desc(const struct gen_device_info *devinfo, enum opcode opcode)
648 {
649 static __thread gen index_gen = {};
650 static __thread const opcode_desc *index_descs[NUM_BRW_OPCODES];
651 return lookup_opcode_desc(&index_gen, index_descs, ARRAY_SIZE(index_descs),
652 &opcode_desc::ir, devinfo, opcode);
653 }
654
655 /**
656 * Return the matching opcode_desc for the specified HW opcode and hardware
657 * generation, or NULL if the opcode is not supported by the device.
658 */
659 const struct opcode_desc *
660 brw_opcode_desc_from_hw(const struct gen_device_info *devinfo, unsigned hw)
661 {
662 static __thread gen index_gen = {};
663 static __thread const opcode_desc *index_descs[128];
664 return lookup_opcode_desc(&index_gen, index_descs, ARRAY_SIZE(index_descs),
665 &opcode_desc::hw, devinfo, hw);
666 }