intel/disasm: brw_label and support functions
[mesa.git] / src / intel / compiler / brw_eu.cpp
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #include <sys/stat.h>
33 #include <fcntl.h>
34
35 #include "brw_eu_defines.h"
36 #include "brw_eu.h"
37 #include "brw_shader.h"
38 #include "brw_gen_enum.h"
39 #include "dev/gen_debug.h"
40
41 #include "util/ralloc.h"
42
43 /* Returns a conditional modifier that negates the condition. */
44 enum brw_conditional_mod
45 brw_negate_cmod(enum brw_conditional_mod cmod)
46 {
47 switch (cmod) {
48 case BRW_CONDITIONAL_Z:
49 return BRW_CONDITIONAL_NZ;
50 case BRW_CONDITIONAL_NZ:
51 return BRW_CONDITIONAL_Z;
52 case BRW_CONDITIONAL_G:
53 return BRW_CONDITIONAL_LE;
54 case BRW_CONDITIONAL_GE:
55 return BRW_CONDITIONAL_L;
56 case BRW_CONDITIONAL_L:
57 return BRW_CONDITIONAL_GE;
58 case BRW_CONDITIONAL_LE:
59 return BRW_CONDITIONAL_G;
60 default:
61 unreachable("Can't negate this cmod");
62 }
63 }
64
65 /* Returns the corresponding conditional mod for swapping src0 and
66 * src1 in e.g. CMP.
67 */
68 enum brw_conditional_mod
69 brw_swap_cmod(enum brw_conditional_mod cmod)
70 {
71 switch (cmod) {
72 case BRW_CONDITIONAL_Z:
73 case BRW_CONDITIONAL_NZ:
74 return cmod;
75 case BRW_CONDITIONAL_G:
76 return BRW_CONDITIONAL_L;
77 case BRW_CONDITIONAL_GE:
78 return BRW_CONDITIONAL_LE;
79 case BRW_CONDITIONAL_L:
80 return BRW_CONDITIONAL_G;
81 case BRW_CONDITIONAL_LE:
82 return BRW_CONDITIONAL_GE;
83 default:
84 return BRW_CONDITIONAL_NONE;
85 }
86 }
87
88 /**
89 * Get the least significant bit offset of the i+1-th component of immediate
90 * type \p type. For \p i equal to the two's complement of j, return the
91 * offset of the j-th component starting from the end of the vector. For
92 * scalar register types return zero.
93 */
94 static unsigned
95 imm_shift(enum brw_reg_type type, unsigned i)
96 {
97 assert(type != BRW_REGISTER_TYPE_UV && type != BRW_REGISTER_TYPE_V &&
98 "Not implemented.");
99
100 if (type == BRW_REGISTER_TYPE_VF)
101 return 8 * (i & 3);
102 else
103 return 0;
104 }
105
106 /**
107 * Swizzle an arbitrary immediate \p x of the given type according to the
108 * permutation specified as \p swz.
109 */
110 uint32_t
111 brw_swizzle_immediate(enum brw_reg_type type, uint32_t x, unsigned swz)
112 {
113 if (imm_shift(type, 1)) {
114 const unsigned n = 32 / imm_shift(type, 1);
115 uint32_t y = 0;
116
117 for (unsigned i = 0; i < n; i++) {
118 /* Shift the specified component all the way to the right and left to
119 * discard any undesired L/MSBs, then shift it right into component i.
120 */
121 y |= x >> imm_shift(type, (i & ~3) + BRW_GET_SWZ(swz, i & 3))
122 << imm_shift(type, ~0u)
123 >> imm_shift(type, ~0u - i);
124 }
125
126 return y;
127 } else {
128 return x;
129 }
130 }
131
132 unsigned
133 brw_get_default_exec_size(struct brw_codegen *p)
134 {
135 return p->current->exec_size;
136 }
137
138 unsigned
139 brw_get_default_group(struct brw_codegen *p)
140 {
141 return p->current->group;
142 }
143
144 unsigned
145 brw_get_default_access_mode(struct brw_codegen *p)
146 {
147 return p->current->access_mode;
148 }
149
150 tgl_swsb
151 brw_get_default_swsb(struct brw_codegen *p)
152 {
153 return p->current->swsb;
154 }
155
156 void
157 brw_set_default_exec_size(struct brw_codegen *p, unsigned value)
158 {
159 p->current->exec_size = value;
160 }
161
162 void brw_set_default_predicate_control(struct brw_codegen *p, enum brw_predicate pc)
163 {
164 p->current->predicate = pc;
165 }
166
167 void brw_set_default_predicate_inverse(struct brw_codegen *p, bool predicate_inverse)
168 {
169 p->current->pred_inv = predicate_inverse;
170 }
171
172 void brw_set_default_flag_reg(struct brw_codegen *p, int reg, int subreg)
173 {
174 assert(subreg < 2);
175 p->current->flag_subreg = reg * 2 + subreg;
176 }
177
178 void brw_set_default_access_mode( struct brw_codegen *p, unsigned access_mode )
179 {
180 p->current->access_mode = access_mode;
181 }
182
183 void
184 brw_set_default_compression_control(struct brw_codegen *p,
185 enum brw_compression compression_control)
186 {
187 switch (compression_control) {
188 case BRW_COMPRESSION_NONE:
189 /* This is the "use the first set of bits of dmask/vmask/arf
190 * according to execsize" option.
191 */
192 p->current->group = 0;
193 break;
194 case BRW_COMPRESSION_2NDHALF:
195 /* For SIMD8, this is "use the second set of 8 bits." */
196 p->current->group = 8;
197 break;
198 case BRW_COMPRESSION_COMPRESSED:
199 /* For SIMD16 instruction compression, use the first set of 16 bits
200 * since we don't do SIMD32 dispatch.
201 */
202 p->current->group = 0;
203 break;
204 default:
205 unreachable("not reached");
206 }
207
208 if (p->devinfo->gen <= 6) {
209 p->current->compressed =
210 (compression_control == BRW_COMPRESSION_COMPRESSED);
211 }
212 }
213
214 /**
215 * Enable or disable instruction compression on the given instruction leaving
216 * the currently selected channel enable group untouched.
217 */
218 void
219 brw_inst_set_compression(const struct gen_device_info *devinfo,
220 brw_inst *inst, bool on)
221 {
222 if (devinfo->gen >= 6) {
223 /* No-op, the EU will figure out for us whether the instruction needs to
224 * be compressed.
225 */
226 } else {
227 /* The channel group and compression controls are non-orthogonal, there
228 * are two possible representations for uncompressed instructions and we
229 * may need to preserve the current one to avoid changing the selected
230 * channel group inadvertently.
231 */
232 if (on)
233 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_COMPRESSED);
234 else if (brw_inst_qtr_control(devinfo, inst)
235 == BRW_COMPRESSION_COMPRESSED)
236 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
237 }
238 }
239
240 void
241 brw_set_default_compression(struct brw_codegen *p, bool on)
242 {
243 p->current->compressed = on;
244 }
245
246 /**
247 * Apply the range of channel enable signals given by
248 * [group, group + exec_size) to the instruction passed as argument.
249 */
250 void
251 brw_inst_set_group(const struct gen_device_info *devinfo,
252 brw_inst *inst, unsigned group)
253 {
254 if (devinfo->gen >= 7) {
255 assert(group % 4 == 0 && group < 32);
256 brw_inst_set_qtr_control(devinfo, inst, group / 8);
257 brw_inst_set_nib_control(devinfo, inst, (group / 4) % 2);
258
259 } else if (devinfo->gen == 6) {
260 assert(group % 8 == 0 && group < 32);
261 brw_inst_set_qtr_control(devinfo, inst, group / 8);
262
263 } else {
264 assert(group % 8 == 0 && group < 16);
265 /* The channel group and compression controls are non-orthogonal, there
266 * are two possible representations for group zero and we may need to
267 * preserve the current one to avoid changing the selected compression
268 * enable inadvertently.
269 */
270 if (group == 8)
271 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_2NDHALF);
272 else if (brw_inst_qtr_control(devinfo, inst) == BRW_COMPRESSION_2NDHALF)
273 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
274 }
275 }
276
277 void
278 brw_set_default_group(struct brw_codegen *p, unsigned group)
279 {
280 p->current->group = group;
281 }
282
283 void brw_set_default_mask_control( struct brw_codegen *p, unsigned value )
284 {
285 p->current->mask_control = value;
286 }
287
288 void brw_set_default_saturate( struct brw_codegen *p, bool enable )
289 {
290 p->current->saturate = enable;
291 }
292
293 void brw_set_default_acc_write_control(struct brw_codegen *p, unsigned value)
294 {
295 p->current->acc_wr_control = value;
296 }
297
298 void brw_set_default_swsb(struct brw_codegen *p, tgl_swsb value)
299 {
300 p->current->swsb = value;
301 }
302
303 void brw_push_insn_state( struct brw_codegen *p )
304 {
305 assert(p->current != &p->stack[BRW_EU_MAX_INSN_STACK-1]);
306 *(p->current + 1) = *p->current;
307 p->current++;
308 }
309
310 void brw_pop_insn_state( struct brw_codegen *p )
311 {
312 assert(p->current != p->stack);
313 p->current--;
314 }
315
316
317 /***********************************************************************
318 */
319 void
320 brw_init_codegen(const struct gen_device_info *devinfo,
321 struct brw_codegen *p, void *mem_ctx)
322 {
323 memset(p, 0, sizeof(*p));
324
325 p->devinfo = devinfo;
326 p->automatic_exec_sizes = true;
327 /*
328 * Set the initial instruction store array size to 1024, if found that
329 * isn't enough, then it will double the store size at brw_next_insn()
330 * until out of memory.
331 */
332 p->store_size = 1024;
333 p->store = rzalloc_array(mem_ctx, brw_inst, p->store_size);
334 p->nr_insn = 0;
335 p->current = p->stack;
336 memset(p->current, 0, sizeof(p->current[0]));
337
338 p->mem_ctx = mem_ctx;
339
340 /* Some defaults?
341 */
342 brw_set_default_exec_size(p, BRW_EXECUTE_8);
343 brw_set_default_mask_control(p, BRW_MASK_ENABLE); /* what does this do? */
344 brw_set_default_saturate(p, 0);
345 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
346
347 /* Set up control flow stack */
348 p->if_stack_depth = 0;
349 p->if_stack_array_size = 16;
350 p->if_stack = rzalloc_array(mem_ctx, int, p->if_stack_array_size);
351
352 p->loop_stack_depth = 0;
353 p->loop_stack_array_size = 16;
354 p->loop_stack = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
355 p->if_depth_in_loop = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
356 }
357
358
359 const unsigned *brw_get_program( struct brw_codegen *p,
360 unsigned *sz )
361 {
362 *sz = p->next_insn_offset;
363 return (const unsigned *)p->store;
364 }
365
366 bool brw_try_override_assembly(struct brw_codegen *p, int start_offset,
367 const char *identifier)
368 {
369 const char *read_path = getenv("INTEL_SHADER_ASM_READ_PATH");
370 if (!read_path) {
371 return false;
372 }
373
374 char *name = ralloc_asprintf(NULL, "%s/%s.bin", read_path, identifier);
375
376 int fd = open(name, O_RDONLY);
377 ralloc_free(name);
378
379 if (fd == -1) {
380 return false;
381 }
382
383 struct stat sb;
384 if (fstat(fd, &sb) != 0 || (!S_ISREG(sb.st_mode))) {
385 close(fd);
386 return false;
387 }
388
389 p->nr_insn -= (p->next_insn_offset - start_offset) / sizeof(brw_inst);
390 p->nr_insn += sb.st_size / sizeof(brw_inst);
391
392 p->next_insn_offset = start_offset + sb.st_size;
393 p->store_size = (start_offset + sb.st_size) / sizeof(brw_inst);
394 p->store = (brw_inst *)reralloc_size(p->mem_ctx, p->store, p->next_insn_offset);
395 assert(p->store);
396
397 ssize_t ret = read(fd, p->store + start_offset, sb.st_size);
398 close(fd);
399 if (ret != sb.st_size) {
400 return false;
401 }
402
403 ASSERTED bool valid =
404 brw_validate_instructions(p->devinfo, p->store,
405 start_offset, p->next_insn_offset,
406 NULL);
407 assert(valid);
408
409 return true;
410 }
411
412 const struct brw_label *
413 brw_find_label(const struct brw_label *root, int offset)
414 {
415 const struct brw_label *curr = root;
416
417 if (curr != NULL)
418 {
419 do {
420 if (curr->offset == offset)
421 return curr;
422
423 curr = curr->next;
424 } while (curr != NULL);
425 }
426
427 return curr;
428 }
429
430 void
431 brw_create_label(struct brw_label **labels, int offset, void *mem_ctx)
432 {
433 if (*labels != NULL) {
434 struct brw_label *curr = *labels;
435 struct brw_label *prev;
436
437 do {
438 prev = curr;
439
440 if (curr->offset == offset)
441 return;
442
443 curr = curr->next;
444 } while (curr != NULL);
445
446 curr = ralloc(mem_ctx, struct brw_label);
447 curr->offset = offset;
448 curr->number = prev->number + 1;
449 curr->next = NULL;
450 prev->next = curr;
451 } else {
452 struct brw_label *root = ralloc(mem_ctx, struct brw_label);
453 root->number = 0;
454 root->offset = offset;
455 root->next = NULL;
456 *labels = root;
457 }
458 }
459
460 void
461 brw_disassemble(const struct gen_device_info *devinfo,
462 const void *assembly, int start, int end, FILE *out)
463 {
464 bool dump_hex = (INTEL_DEBUG & DEBUG_HEX) != 0;
465
466 for (int offset = start; offset < end;) {
467 const brw_inst *insn = (const brw_inst *)((char *)assembly + offset);
468 brw_inst uncompacted;
469 bool compacted = brw_inst_cmpt_control(devinfo, insn);
470 if (0)
471 fprintf(out, "0x%08x: ", offset);
472
473 if (compacted) {
474 brw_compact_inst *compacted = (brw_compact_inst *)insn;
475 if (dump_hex) {
476 unsigned char * insn_ptr = ((unsigned char *)&insn[0]);
477 const unsigned int blank_spaces = 24;
478 for (int i = 0 ; i < 8; i = i + 4) {
479 fprintf(out, "%02x %02x %02x %02x ",
480 insn_ptr[i],
481 insn_ptr[i + 1],
482 insn_ptr[i + 2],
483 insn_ptr[i + 3]);
484 }
485 /* Make compacted instructions hex value output vertically aligned
486 * with uncompacted instructions hex value
487 */
488 fprintf(out, "%*c", blank_spaces, ' ');
489 }
490
491 brw_uncompact_instruction(devinfo, &uncompacted, compacted);
492 insn = &uncompacted;
493 offset += 8;
494 } else {
495 if (dump_hex) {
496 unsigned char * insn_ptr = ((unsigned char *)&insn[0]);
497 for (int i = 0 ; i < 16; i = i + 4) {
498 fprintf(out, "%02x %02x %02x %02x ",
499 insn_ptr[i],
500 insn_ptr[i + 1],
501 insn_ptr[i + 2],
502 insn_ptr[i + 3]);
503 }
504 }
505 offset += 16;
506 }
507
508 brw_disassemble_inst(out, devinfo, insn, compacted);
509 }
510 }
511
512 static const struct opcode_desc opcode_descs[] = {
513 /* IR, HW, name, nsrc, ndst, gens */
514 { BRW_OPCODE_ILLEGAL, 0, "illegal", 0, 0, GEN_ALL },
515 { BRW_OPCODE_SYNC, 1, "sync", 1, 0, GEN_GE(GEN12) },
516 { BRW_OPCODE_MOV, 1, "mov", 1, 1, GEN_LT(GEN12) },
517 { BRW_OPCODE_MOV, 97, "mov", 1, 1, GEN_GE(GEN12) },
518 { BRW_OPCODE_SEL, 2, "sel", 2, 1, GEN_LT(GEN12) },
519 { BRW_OPCODE_SEL, 98, "sel", 2, 1, GEN_GE(GEN12) },
520 { BRW_OPCODE_MOVI, 3, "movi", 2, 1, GEN_GE(GEN45) & GEN_LT(GEN12) },
521 { BRW_OPCODE_MOVI, 99, "movi", 2, 1, GEN_GE(GEN12) },
522 { BRW_OPCODE_NOT, 4, "not", 1, 1, GEN_LT(GEN12) },
523 { BRW_OPCODE_NOT, 100, "not", 1, 1, GEN_GE(GEN12) },
524 { BRW_OPCODE_AND, 5, "and", 2, 1, GEN_LT(GEN12) },
525 { BRW_OPCODE_AND, 101, "and", 2, 1, GEN_GE(GEN12) },
526 { BRW_OPCODE_OR, 6, "or", 2, 1, GEN_LT(GEN12) },
527 { BRW_OPCODE_OR, 102, "or", 2, 1, GEN_GE(GEN12) },
528 { BRW_OPCODE_XOR, 7, "xor", 2, 1, GEN_LT(GEN12) },
529 { BRW_OPCODE_XOR, 103, "xor", 2, 1, GEN_GE(GEN12) },
530 { BRW_OPCODE_SHR, 8, "shr", 2, 1, GEN_LT(GEN12) },
531 { BRW_OPCODE_SHR, 104, "shr", 2, 1, GEN_GE(GEN12) },
532 { BRW_OPCODE_SHL, 9, "shl", 2, 1, GEN_LT(GEN12) },
533 { BRW_OPCODE_SHL, 105, "shl", 2, 1, GEN_GE(GEN12) },
534 { BRW_OPCODE_DIM, 10, "dim", 1, 1, GEN75 },
535 { BRW_OPCODE_SMOV, 10, "smov", 0, 0, GEN_GE(GEN8) & GEN_LT(GEN12) },
536 { BRW_OPCODE_SMOV, 106, "smov", 0, 0, GEN_GE(GEN12) },
537 { BRW_OPCODE_ASR, 12, "asr", 2, 1, GEN_LT(GEN12) },
538 { BRW_OPCODE_ASR, 108, "asr", 2, 1, GEN_GE(GEN12) },
539 { BRW_OPCODE_ROR, 14, "ror", 2, 1, GEN11 },
540 { BRW_OPCODE_ROR, 110, "ror", 2, 1, GEN_GE(GEN12) },
541 { BRW_OPCODE_ROL, 15, "rol", 2, 1, GEN11 },
542 { BRW_OPCODE_ROL, 111, "rol", 2, 1, GEN_GE(GEN12) },
543 { BRW_OPCODE_CMP, 16, "cmp", 2, 1, GEN_LT(GEN12) },
544 { BRW_OPCODE_CMP, 112, "cmp", 2, 1, GEN_GE(GEN12) },
545 { BRW_OPCODE_CMPN, 17, "cmpn", 2, 1, GEN_LT(GEN12) },
546 { BRW_OPCODE_CMPN, 113, "cmpn", 2, 1, GEN_GE(GEN12) },
547 { BRW_OPCODE_CSEL, 18, "csel", 3, 1, GEN_GE(GEN8) & GEN_LT(GEN12) },
548 { BRW_OPCODE_CSEL, 114, "csel", 3, 1, GEN_GE(GEN12) },
549 { BRW_OPCODE_F32TO16, 19, "f32to16", 1, 1, GEN7 | GEN75 },
550 { BRW_OPCODE_F16TO32, 20, "f16to32", 1, 1, GEN7 | GEN75 },
551 { BRW_OPCODE_BFREV, 23, "bfrev", 1, 1, GEN_GE(GEN7) & GEN_LT(GEN12) },
552 { BRW_OPCODE_BFREV, 119, "bfrev", 1, 1, GEN_GE(GEN12) },
553 { BRW_OPCODE_BFE, 24, "bfe", 3, 1, GEN_GE(GEN7) & GEN_LT(GEN12) },
554 { BRW_OPCODE_BFE, 120, "bfe", 3, 1, GEN_GE(GEN12) },
555 { BRW_OPCODE_BFI1, 25, "bfi1", 2, 1, GEN_GE(GEN7) & GEN_LT(GEN12) },
556 { BRW_OPCODE_BFI1, 121, "bfi1", 2, 1, GEN_GE(GEN12) },
557 { BRW_OPCODE_BFI2, 26, "bfi2", 3, 1, GEN_GE(GEN7) & GEN_LT(GEN12) },
558 { BRW_OPCODE_BFI2, 122, "bfi2", 3, 1, GEN_GE(GEN12) },
559 { BRW_OPCODE_JMPI, 32, "jmpi", 0, 0, GEN_ALL },
560 { BRW_OPCODE_BRD, 33, "brd", 0, 0, GEN_GE(GEN7) },
561 { BRW_OPCODE_IF, 34, "if", 0, 0, GEN_ALL },
562 { BRW_OPCODE_IFF, 35, "iff", 0, 0, GEN_LE(GEN5) },
563 { BRW_OPCODE_BRC, 35, "brc", 0, 0, GEN_GE(GEN7) },
564 { BRW_OPCODE_ELSE, 36, "else", 0, 0, GEN_ALL },
565 { BRW_OPCODE_ENDIF, 37, "endif", 0, 0, GEN_ALL },
566 { BRW_OPCODE_DO, 38, "do", 0, 0, GEN_LE(GEN5) },
567 { BRW_OPCODE_CASE, 38, "case", 0, 0, GEN6 },
568 { BRW_OPCODE_WHILE, 39, "while", 0, 0, GEN_ALL },
569 { BRW_OPCODE_BREAK, 40, "break", 0, 0, GEN_ALL },
570 { BRW_OPCODE_CONTINUE, 41, "cont", 0, 0, GEN_ALL },
571 { BRW_OPCODE_HALT, 42, "halt", 0, 0, GEN_ALL },
572 { BRW_OPCODE_CALLA, 43, "calla", 0, 0, GEN_GE(GEN75) },
573 { BRW_OPCODE_MSAVE, 44, "msave", 0, 0, GEN_LE(GEN5) },
574 { BRW_OPCODE_CALL, 44, "call", 0, 0, GEN_GE(GEN6) },
575 { BRW_OPCODE_MREST, 45, "mrest", 0, 0, GEN_LE(GEN5) },
576 { BRW_OPCODE_RET, 45, "ret", 0, 0, GEN_GE(GEN6) },
577 { BRW_OPCODE_PUSH, 46, "push", 0, 0, GEN_LE(GEN5) },
578 { BRW_OPCODE_FORK, 46, "fork", 0, 0, GEN6 },
579 { BRW_OPCODE_GOTO, 46, "goto", 0, 0, GEN_GE(GEN8) },
580 { BRW_OPCODE_POP, 47, "pop", 2, 0, GEN_LE(GEN5) },
581 { BRW_OPCODE_WAIT, 48, "wait", 1, 0, GEN_LT(GEN12) },
582 { BRW_OPCODE_SEND, 49, "send", 1, 1, GEN_LT(GEN12) },
583 { BRW_OPCODE_SENDC, 50, "sendc", 1, 1, GEN_LT(GEN12) },
584 { BRW_OPCODE_SEND, 49, "send", 2, 1, GEN_GE(GEN12) },
585 { BRW_OPCODE_SENDC, 50, "sendc", 2, 1, GEN_GE(GEN12) },
586 { BRW_OPCODE_SENDS, 51, "sends", 2, 1, GEN_GE(GEN9) & GEN_LT(GEN12) },
587 { BRW_OPCODE_SENDSC, 52, "sendsc", 2, 1, GEN_GE(GEN9) & GEN_LT(GEN12) },
588 { BRW_OPCODE_MATH, 56, "math", 2, 1, GEN_GE(GEN6) },
589 { BRW_OPCODE_ADD, 64, "add", 2, 1, GEN_ALL },
590 { BRW_OPCODE_MUL, 65, "mul", 2, 1, GEN_ALL },
591 { BRW_OPCODE_AVG, 66, "avg", 2, 1, GEN_ALL },
592 { BRW_OPCODE_FRC, 67, "frc", 1, 1, GEN_ALL },
593 { BRW_OPCODE_RNDU, 68, "rndu", 1, 1, GEN_ALL },
594 { BRW_OPCODE_RNDD, 69, "rndd", 1, 1, GEN_ALL },
595 { BRW_OPCODE_RNDE, 70, "rnde", 1, 1, GEN_ALL },
596 { BRW_OPCODE_RNDZ, 71, "rndz", 1, 1, GEN_ALL },
597 { BRW_OPCODE_MAC, 72, "mac", 2, 1, GEN_ALL },
598 { BRW_OPCODE_MACH, 73, "mach", 2, 1, GEN_ALL },
599 { BRW_OPCODE_LZD, 74, "lzd", 1, 1, GEN_ALL },
600 { BRW_OPCODE_FBH, 75, "fbh", 1, 1, GEN_GE(GEN7) },
601 { BRW_OPCODE_FBL, 76, "fbl", 1, 1, GEN_GE(GEN7) },
602 { BRW_OPCODE_CBIT, 77, "cbit", 1, 1, GEN_GE(GEN7) },
603 { BRW_OPCODE_ADDC, 78, "addc", 2, 1, GEN_GE(GEN7) },
604 { BRW_OPCODE_SUBB, 79, "subb", 2, 1, GEN_GE(GEN7) },
605 { BRW_OPCODE_SAD2, 80, "sad2", 2, 1, GEN_ALL },
606 { BRW_OPCODE_SADA2, 81, "sada2", 2, 1, GEN_ALL },
607 { BRW_OPCODE_DP4, 84, "dp4", 2, 1, GEN_LT(GEN11) },
608 { BRW_OPCODE_DPH, 85, "dph", 2, 1, GEN_LT(GEN11) },
609 { BRW_OPCODE_DP3, 86, "dp3", 2, 1, GEN_LT(GEN11) },
610 { BRW_OPCODE_DP2, 87, "dp2", 2, 1, GEN_LT(GEN11) },
611 { BRW_OPCODE_LINE, 89, "line", 2, 1, GEN_LE(GEN10) },
612 { BRW_OPCODE_PLN, 90, "pln", 2, 1, GEN_GE(GEN45) & GEN_LE(GEN10) },
613 { BRW_OPCODE_MAD, 91, "mad", 3, 1, GEN_GE(GEN6) },
614 { BRW_OPCODE_LRP, 92, "lrp", 3, 1, GEN_GE(GEN6) & GEN_LE(GEN10) },
615 { BRW_OPCODE_MADM, 93, "madm", 3, 1, GEN_GE(GEN8) },
616 { BRW_OPCODE_NENOP, 125, "nenop", 0, 0, GEN45 },
617 { BRW_OPCODE_NOP, 126, "nop", 0, 0, GEN_LT(GEN12) },
618 { BRW_OPCODE_NOP, 96, "nop", 0, 0, GEN_GE(GEN12) }
619 };
620
621 /**
622 * Look up the opcode_descs[] entry with \p key member matching \p k which is
623 * supported by the device specified by \p devinfo, or NULL if there is no
624 * matching entry.
625 *
626 * This is implemented by using an index data structure (storage for which is
627 * provided by the caller as \p index_gen and \p index_descs) in order to
628 * provide efficient constant-time look-up.
629 */
630 static const opcode_desc *
631 lookup_opcode_desc(gen *index_gen,
632 const opcode_desc **index_descs,
633 unsigned index_size,
634 unsigned opcode_desc::*key,
635 const gen_device_info *devinfo,
636 unsigned k)
637 {
638 if (*index_gen != gen_from_devinfo(devinfo)) {
639 *index_gen = gen_from_devinfo(devinfo);
640
641 for (unsigned l = 0; l < index_size; l++)
642 index_descs[l] = NULL;
643
644 for (unsigned i = 0; i < ARRAY_SIZE(opcode_descs); i++) {
645 if (opcode_descs[i].gens & *index_gen) {
646 const unsigned l = opcode_descs[i].*key;
647 assert(l < index_size && !index_descs[l]);
648 index_descs[l] = &opcode_descs[i];
649 }
650 }
651 }
652
653 if (k < index_size)
654 return index_descs[k];
655 else
656 return NULL;
657 }
658
659 /**
660 * Return the matching opcode_desc for the specified IR opcode and hardware
661 * generation, or NULL if the opcode is not supported by the device.
662 */
663 const struct opcode_desc *
664 brw_opcode_desc(const struct gen_device_info *devinfo, enum opcode opcode)
665 {
666 static __thread gen index_gen = {};
667 static __thread const opcode_desc *index_descs[NUM_BRW_OPCODES];
668 return lookup_opcode_desc(&index_gen, index_descs, ARRAY_SIZE(index_descs),
669 &opcode_desc::ir, devinfo, opcode);
670 }
671
672 /**
673 * Return the matching opcode_desc for the specified HW opcode and hardware
674 * generation, or NULL if the opcode is not supported by the device.
675 */
676 const struct opcode_desc *
677 brw_opcode_desc_from_hw(const struct gen_device_info *devinfo, unsigned hw)
678 {
679 static __thread gen index_gen = {};
680 static __thread const opcode_desc *index_descs[128];
681 return lookup_opcode_desc(&index_gen, index_descs, ARRAY_SIZE(index_descs),
682 &opcode_desc::hw, devinfo, hw);
683 }