2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
35 #include "brw_eu_defines.h"
37 #include "brw_shader.h"
38 #include "brw_gen_enum.h"
39 #include "dev/gen_debug.h"
41 #include "util/ralloc.h"
43 /* Returns a conditional modifier that negates the condition. */
44 enum brw_conditional_mod
45 brw_negate_cmod(enum brw_conditional_mod cmod
)
48 case BRW_CONDITIONAL_Z
:
49 return BRW_CONDITIONAL_NZ
;
50 case BRW_CONDITIONAL_NZ
:
51 return BRW_CONDITIONAL_Z
;
52 case BRW_CONDITIONAL_G
:
53 return BRW_CONDITIONAL_LE
;
54 case BRW_CONDITIONAL_GE
:
55 return BRW_CONDITIONAL_L
;
56 case BRW_CONDITIONAL_L
:
57 return BRW_CONDITIONAL_GE
;
58 case BRW_CONDITIONAL_LE
:
59 return BRW_CONDITIONAL_G
;
61 unreachable("Can't negate this cmod");
65 /* Returns the corresponding conditional mod for swapping src0 and
68 enum brw_conditional_mod
69 brw_swap_cmod(enum brw_conditional_mod cmod
)
72 case BRW_CONDITIONAL_Z
:
73 case BRW_CONDITIONAL_NZ
:
75 case BRW_CONDITIONAL_G
:
76 return BRW_CONDITIONAL_L
;
77 case BRW_CONDITIONAL_GE
:
78 return BRW_CONDITIONAL_LE
;
79 case BRW_CONDITIONAL_L
:
80 return BRW_CONDITIONAL_G
;
81 case BRW_CONDITIONAL_LE
:
82 return BRW_CONDITIONAL_GE
;
84 return BRW_CONDITIONAL_NONE
;
89 * Get the least significant bit offset of the i+1-th component of immediate
90 * type \p type. For \p i equal to the two's complement of j, return the
91 * offset of the j-th component starting from the end of the vector. For
92 * scalar register types return zero.
95 imm_shift(enum brw_reg_type type
, unsigned i
)
97 assert(type
!= BRW_REGISTER_TYPE_UV
&& type
!= BRW_REGISTER_TYPE_V
&&
100 if (type
== BRW_REGISTER_TYPE_VF
)
107 * Swizzle an arbitrary immediate \p x of the given type according to the
108 * permutation specified as \p swz.
111 brw_swizzle_immediate(enum brw_reg_type type
, uint32_t x
, unsigned swz
)
113 if (imm_shift(type
, 1)) {
114 const unsigned n
= 32 / imm_shift(type
, 1);
117 for (unsigned i
= 0; i
< n
; i
++) {
118 /* Shift the specified component all the way to the right and left to
119 * discard any undesired L/MSBs, then shift it right into component i.
121 y
|= x
>> imm_shift(type
, (i
& ~3) + BRW_GET_SWZ(swz
, i
& 3))
122 << imm_shift(type
, ~0u)
123 >> imm_shift(type
, ~0u - i
);
133 brw_get_default_exec_size(struct brw_codegen
*p
)
135 return p
->current
->exec_size
;
139 brw_get_default_group(struct brw_codegen
*p
)
141 return p
->current
->group
;
145 brw_get_default_access_mode(struct brw_codegen
*p
)
147 return p
->current
->access_mode
;
151 brw_get_default_swsb(struct brw_codegen
*p
)
153 return p
->current
->swsb
;
157 brw_set_default_exec_size(struct brw_codegen
*p
, unsigned value
)
159 p
->current
->exec_size
= value
;
162 void brw_set_default_predicate_control(struct brw_codegen
*p
, enum brw_predicate pc
)
164 p
->current
->predicate
= pc
;
167 void brw_set_default_predicate_inverse(struct brw_codegen
*p
, bool predicate_inverse
)
169 p
->current
->pred_inv
= predicate_inverse
;
172 void brw_set_default_flag_reg(struct brw_codegen
*p
, int reg
, int subreg
)
175 p
->current
->flag_subreg
= reg
* 2 + subreg
;
178 void brw_set_default_access_mode( struct brw_codegen
*p
, unsigned access_mode
)
180 p
->current
->access_mode
= access_mode
;
184 brw_set_default_compression_control(struct brw_codegen
*p
,
185 enum brw_compression compression_control
)
187 switch (compression_control
) {
188 case BRW_COMPRESSION_NONE
:
189 /* This is the "use the first set of bits of dmask/vmask/arf
190 * according to execsize" option.
192 p
->current
->group
= 0;
194 case BRW_COMPRESSION_2NDHALF
:
195 /* For SIMD8, this is "use the second set of 8 bits." */
196 p
->current
->group
= 8;
198 case BRW_COMPRESSION_COMPRESSED
:
199 /* For SIMD16 instruction compression, use the first set of 16 bits
200 * since we don't do SIMD32 dispatch.
202 p
->current
->group
= 0;
205 unreachable("not reached");
208 if (p
->devinfo
->gen
<= 6) {
209 p
->current
->compressed
=
210 (compression_control
== BRW_COMPRESSION_COMPRESSED
);
215 * Enable or disable instruction compression on the given instruction leaving
216 * the currently selected channel enable group untouched.
219 brw_inst_set_compression(const struct gen_device_info
*devinfo
,
220 brw_inst
*inst
, bool on
)
222 if (devinfo
->gen
>= 6) {
223 /* No-op, the EU will figure out for us whether the instruction needs to
227 /* The channel group and compression controls are non-orthogonal, there
228 * are two possible representations for uncompressed instructions and we
229 * may need to preserve the current one to avoid changing the selected
230 * channel group inadvertently.
233 brw_inst_set_qtr_control(devinfo
, inst
, BRW_COMPRESSION_COMPRESSED
);
234 else if (brw_inst_qtr_control(devinfo
, inst
)
235 == BRW_COMPRESSION_COMPRESSED
)
236 brw_inst_set_qtr_control(devinfo
, inst
, BRW_COMPRESSION_NONE
);
241 brw_set_default_compression(struct brw_codegen
*p
, bool on
)
243 p
->current
->compressed
= on
;
247 * Apply the range of channel enable signals given by
248 * [group, group + exec_size) to the instruction passed as argument.
251 brw_inst_set_group(const struct gen_device_info
*devinfo
,
252 brw_inst
*inst
, unsigned group
)
254 if (devinfo
->gen
>= 7) {
255 assert(group
% 4 == 0 && group
< 32);
256 brw_inst_set_qtr_control(devinfo
, inst
, group
/ 8);
257 brw_inst_set_nib_control(devinfo
, inst
, (group
/ 4) % 2);
259 } else if (devinfo
->gen
== 6) {
260 assert(group
% 8 == 0 && group
< 32);
261 brw_inst_set_qtr_control(devinfo
, inst
, group
/ 8);
264 assert(group
% 8 == 0 && group
< 16);
265 /* The channel group and compression controls are non-orthogonal, there
266 * are two possible representations for group zero and we may need to
267 * preserve the current one to avoid changing the selected compression
268 * enable inadvertently.
271 brw_inst_set_qtr_control(devinfo
, inst
, BRW_COMPRESSION_2NDHALF
);
272 else if (brw_inst_qtr_control(devinfo
, inst
) == BRW_COMPRESSION_2NDHALF
)
273 brw_inst_set_qtr_control(devinfo
, inst
, BRW_COMPRESSION_NONE
);
278 brw_set_default_group(struct brw_codegen
*p
, unsigned group
)
280 p
->current
->group
= group
;
283 void brw_set_default_mask_control( struct brw_codegen
*p
, unsigned value
)
285 p
->current
->mask_control
= value
;
288 void brw_set_default_saturate( struct brw_codegen
*p
, bool enable
)
290 p
->current
->saturate
= enable
;
293 void brw_set_default_acc_write_control(struct brw_codegen
*p
, unsigned value
)
295 p
->current
->acc_wr_control
= value
;
298 void brw_set_default_swsb(struct brw_codegen
*p
, tgl_swsb value
)
300 p
->current
->swsb
= value
;
303 void brw_push_insn_state( struct brw_codegen
*p
)
305 assert(p
->current
!= &p
->stack
[BRW_EU_MAX_INSN_STACK
-1]);
306 *(p
->current
+ 1) = *p
->current
;
310 void brw_pop_insn_state( struct brw_codegen
*p
)
312 assert(p
->current
!= p
->stack
);
317 /***********************************************************************
320 brw_init_codegen(const struct gen_device_info
*devinfo
,
321 struct brw_codegen
*p
, void *mem_ctx
)
323 memset(p
, 0, sizeof(*p
));
325 p
->devinfo
= devinfo
;
326 p
->automatic_exec_sizes
= true;
328 * Set the initial instruction store array size to 1024, if found that
329 * isn't enough, then it will double the store size at brw_next_insn()
330 * until out of memory.
332 p
->store_size
= 1024;
333 p
->store
= rzalloc_array(mem_ctx
, brw_inst
, p
->store_size
);
335 p
->current
= p
->stack
;
336 memset(p
->current
, 0, sizeof(p
->current
[0]));
338 p
->mem_ctx
= mem_ctx
;
342 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
343 brw_set_default_mask_control(p
, BRW_MASK_ENABLE
); /* what does this do? */
344 brw_set_default_saturate(p
, 0);
345 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
347 /* Set up control flow stack */
348 p
->if_stack_depth
= 0;
349 p
->if_stack_array_size
= 16;
350 p
->if_stack
= rzalloc_array(mem_ctx
, int, p
->if_stack_array_size
);
352 p
->loop_stack_depth
= 0;
353 p
->loop_stack_array_size
= 16;
354 p
->loop_stack
= rzalloc_array(mem_ctx
, int, p
->loop_stack_array_size
);
355 p
->if_depth_in_loop
= rzalloc_array(mem_ctx
, int, p
->loop_stack_array_size
);
359 const unsigned *brw_get_program( struct brw_codegen
*p
,
362 *sz
= p
->next_insn_offset
;
363 return (const unsigned *)p
->store
;
366 bool brw_try_override_assembly(struct brw_codegen
*p
, int start_offset
,
367 const char *identifier
)
369 const char *read_path
= getenv("INTEL_SHADER_ASM_READ_PATH");
374 char *name
= ralloc_asprintf(NULL
, "%s/%s.bin", read_path
, identifier
);
376 int fd
= open(name
, O_RDONLY
);
384 if (fstat(fd
, &sb
) != 0 || (!S_ISREG(sb
.st_mode
))) {
389 p
->nr_insn
-= (p
->next_insn_offset
- start_offset
) / sizeof(brw_inst
);
390 p
->nr_insn
+= sb
.st_size
/ sizeof(brw_inst
);
392 p
->next_insn_offset
= start_offset
+ sb
.st_size
;
393 p
->store_size
= (start_offset
+ sb
.st_size
) / sizeof(brw_inst
);
394 p
->store
= (brw_inst
*)reralloc_size(p
->mem_ctx
, p
->store
, p
->next_insn_offset
);
397 ssize_t ret
= read(fd
, p
->store
+ start_offset
, sb
.st_size
);
399 if (ret
!= sb
.st_size
) {
403 ASSERTED
bool valid
=
404 brw_validate_instructions(p
->devinfo
, p
->store
,
405 start_offset
, p
->next_insn_offset
,
413 brw_disassemble(const struct gen_device_info
*devinfo
,
414 const void *assembly
, int start
, int end
, FILE *out
)
416 bool dump_hex
= (INTEL_DEBUG
& DEBUG_HEX
) != 0;
418 for (int offset
= start
; offset
< end
;) {
419 const brw_inst
*insn
= (const brw_inst
*)((char *)assembly
+ offset
);
420 brw_inst uncompacted
;
421 bool compacted
= brw_inst_cmpt_control(devinfo
, insn
);
423 fprintf(out
, "0x%08x: ", offset
);
426 brw_compact_inst
*compacted
= (brw_compact_inst
*)insn
;
428 unsigned char * insn_ptr
= ((unsigned char *)&insn
[0]);
429 const unsigned int blank_spaces
= 24;
430 for (int i
= 0 ; i
< 8; i
= i
+ 4) {
431 fprintf(out
, "%02x %02x %02x %02x ",
437 /* Make compacted instructions hex value output vertically aligned
438 * with uncompacted instructions hex value
440 fprintf(out
, "%*c", blank_spaces
, ' ');
443 brw_uncompact_instruction(devinfo
, &uncompacted
, compacted
);
448 unsigned char * insn_ptr
= ((unsigned char *)&insn
[0]);
449 for (int i
= 0 ; i
< 16; i
= i
+ 4) {
450 fprintf(out
, "%02x %02x %02x %02x ",
460 brw_disassemble_inst(out
, devinfo
, insn
, compacted
);
464 static const struct opcode_desc opcode_descs
[] = {
465 /* IR, HW, name, nsrc, ndst, gens */
466 { BRW_OPCODE_ILLEGAL
, 0, "illegal", 0, 0, GEN_ALL
},
467 { BRW_OPCODE_SYNC
, 1, "sync", 1, 0, GEN_GE(GEN12
) },
468 { BRW_OPCODE_MOV
, 1, "mov", 1, 1, GEN_LT(GEN12
) },
469 { BRW_OPCODE_MOV
, 97, "mov", 1, 1, GEN_GE(GEN12
) },
470 { BRW_OPCODE_SEL
, 2, "sel", 2, 1, GEN_LT(GEN12
) },
471 { BRW_OPCODE_SEL
, 98, "sel", 2, 1, GEN_GE(GEN12
) },
472 { BRW_OPCODE_MOVI
, 3, "movi", 2, 1, GEN_GE(GEN45
) & GEN_LT(GEN12
) },
473 { BRW_OPCODE_MOVI
, 99, "movi", 2, 1, GEN_GE(GEN12
) },
474 { BRW_OPCODE_NOT
, 4, "not", 1, 1, GEN_LT(GEN12
) },
475 { BRW_OPCODE_NOT
, 100, "not", 1, 1, GEN_GE(GEN12
) },
476 { BRW_OPCODE_AND
, 5, "and", 2, 1, GEN_LT(GEN12
) },
477 { BRW_OPCODE_AND
, 101, "and", 2, 1, GEN_GE(GEN12
) },
478 { BRW_OPCODE_OR
, 6, "or", 2, 1, GEN_LT(GEN12
) },
479 { BRW_OPCODE_OR
, 102, "or", 2, 1, GEN_GE(GEN12
) },
480 { BRW_OPCODE_XOR
, 7, "xor", 2, 1, GEN_LT(GEN12
) },
481 { BRW_OPCODE_XOR
, 103, "xor", 2, 1, GEN_GE(GEN12
) },
482 { BRW_OPCODE_SHR
, 8, "shr", 2, 1, GEN_LT(GEN12
) },
483 { BRW_OPCODE_SHR
, 104, "shr", 2, 1, GEN_GE(GEN12
) },
484 { BRW_OPCODE_SHL
, 9, "shl", 2, 1, GEN_LT(GEN12
) },
485 { BRW_OPCODE_SHL
, 105, "shl", 2, 1, GEN_GE(GEN12
) },
486 { BRW_OPCODE_DIM
, 10, "dim", 1, 1, GEN75
},
487 { BRW_OPCODE_SMOV
, 10, "smov", 0, 0, GEN_GE(GEN8
) & GEN_LT(GEN12
) },
488 { BRW_OPCODE_SMOV
, 106, "smov", 0, 0, GEN_GE(GEN12
) },
489 { BRW_OPCODE_ASR
, 12, "asr", 2, 1, GEN_LT(GEN12
) },
490 { BRW_OPCODE_ASR
, 108, "asr", 2, 1, GEN_GE(GEN12
) },
491 { BRW_OPCODE_ROR
, 14, "ror", 2, 1, GEN11
},
492 { BRW_OPCODE_ROR
, 110, "ror", 2, 1, GEN_GE(GEN12
) },
493 { BRW_OPCODE_ROL
, 15, "rol", 2, 1, GEN11
},
494 { BRW_OPCODE_ROL
, 111, "rol", 2, 1, GEN_GE(GEN12
) },
495 { BRW_OPCODE_CMP
, 16, "cmp", 2, 1, GEN_LT(GEN12
) },
496 { BRW_OPCODE_CMP
, 112, "cmp", 2, 1, GEN_GE(GEN12
) },
497 { BRW_OPCODE_CMPN
, 17, "cmpn", 2, 1, GEN_LT(GEN12
) },
498 { BRW_OPCODE_CMPN
, 113, "cmpn", 2, 1, GEN_GE(GEN12
) },
499 { BRW_OPCODE_CSEL
, 18, "csel", 3, 1, GEN_GE(GEN8
) & GEN_LT(GEN12
) },
500 { BRW_OPCODE_CSEL
, 114, "csel", 3, 1, GEN_GE(GEN12
) },
501 { BRW_OPCODE_F32TO16
, 19, "f32to16", 1, 1, GEN7
| GEN75
},
502 { BRW_OPCODE_F16TO32
, 20, "f16to32", 1, 1, GEN7
| GEN75
},
503 { BRW_OPCODE_BFREV
, 23, "bfrev", 1, 1, GEN_GE(GEN7
) & GEN_LT(GEN12
) },
504 { BRW_OPCODE_BFREV
, 119, "bfrev", 1, 1, GEN_GE(GEN12
) },
505 { BRW_OPCODE_BFE
, 24, "bfe", 3, 1, GEN_GE(GEN7
) & GEN_LT(GEN12
) },
506 { BRW_OPCODE_BFE
, 120, "bfe", 3, 1, GEN_GE(GEN12
) },
507 { BRW_OPCODE_BFI1
, 25, "bfi1", 2, 1, GEN_GE(GEN7
) & GEN_LT(GEN12
) },
508 { BRW_OPCODE_BFI1
, 121, "bfi1", 2, 1, GEN_GE(GEN12
) },
509 { BRW_OPCODE_BFI2
, 26, "bfi2", 3, 1, GEN_GE(GEN7
) & GEN_LT(GEN12
) },
510 { BRW_OPCODE_BFI2
, 122, "bfi2", 3, 1, GEN_GE(GEN12
) },
511 { BRW_OPCODE_JMPI
, 32, "jmpi", 0, 0, GEN_ALL
},
512 { BRW_OPCODE_BRD
, 33, "brd", 0, 0, GEN_GE(GEN7
) },
513 { BRW_OPCODE_IF
, 34, "if", 0, 0, GEN_ALL
},
514 { BRW_OPCODE_IFF
, 35, "iff", 0, 0, GEN_LE(GEN5
) },
515 { BRW_OPCODE_BRC
, 35, "brc", 0, 0, GEN_GE(GEN7
) },
516 { BRW_OPCODE_ELSE
, 36, "else", 0, 0, GEN_ALL
},
517 { BRW_OPCODE_ENDIF
, 37, "endif", 0, 0, GEN_ALL
},
518 { BRW_OPCODE_DO
, 38, "do", 0, 0, GEN_LE(GEN5
) },
519 { BRW_OPCODE_CASE
, 38, "case", 0, 0, GEN6
},
520 { BRW_OPCODE_WHILE
, 39, "while", 0, 0, GEN_ALL
},
521 { BRW_OPCODE_BREAK
, 40, "break", 0, 0, GEN_ALL
},
522 { BRW_OPCODE_CONTINUE
, 41, "cont", 0, 0, GEN_ALL
},
523 { BRW_OPCODE_HALT
, 42, "halt", 0, 0, GEN_ALL
},
524 { BRW_OPCODE_CALLA
, 43, "calla", 0, 0, GEN_GE(GEN75
) },
525 { BRW_OPCODE_MSAVE
, 44, "msave", 0, 0, GEN_LE(GEN5
) },
526 { BRW_OPCODE_CALL
, 44, "call", 0, 0, GEN_GE(GEN6
) },
527 { BRW_OPCODE_MREST
, 45, "mrest", 0, 0, GEN_LE(GEN5
) },
528 { BRW_OPCODE_RET
, 45, "ret", 0, 0, GEN_GE(GEN6
) },
529 { BRW_OPCODE_PUSH
, 46, "push", 0, 0, GEN_LE(GEN5
) },
530 { BRW_OPCODE_FORK
, 46, "fork", 0, 0, GEN6
},
531 { BRW_OPCODE_GOTO
, 46, "goto", 0, 0, GEN_GE(GEN8
) },
532 { BRW_OPCODE_POP
, 47, "pop", 2, 0, GEN_LE(GEN5
) },
533 { BRW_OPCODE_WAIT
, 48, "wait", 1, 0, GEN_LT(GEN12
) },
534 { BRW_OPCODE_SEND
, 49, "send", 1, 1, GEN_LT(GEN12
) },
535 { BRW_OPCODE_SENDC
, 50, "sendc", 1, 1, GEN_LT(GEN12
) },
536 { BRW_OPCODE_SEND
, 49, "send", 2, 1, GEN_GE(GEN12
) },
537 { BRW_OPCODE_SENDC
, 50, "sendc", 2, 1, GEN_GE(GEN12
) },
538 { BRW_OPCODE_SENDS
, 51, "sends", 2, 1, GEN_GE(GEN9
) & GEN_LT(GEN12
) },
539 { BRW_OPCODE_SENDSC
, 52, "sendsc", 2, 1, GEN_GE(GEN9
) & GEN_LT(GEN12
) },
540 { BRW_OPCODE_MATH
, 56, "math", 2, 1, GEN_GE(GEN6
) },
541 { BRW_OPCODE_ADD
, 64, "add", 2, 1, GEN_ALL
},
542 { BRW_OPCODE_MUL
, 65, "mul", 2, 1, GEN_ALL
},
543 { BRW_OPCODE_AVG
, 66, "avg", 2, 1, GEN_ALL
},
544 { BRW_OPCODE_FRC
, 67, "frc", 1, 1, GEN_ALL
},
545 { BRW_OPCODE_RNDU
, 68, "rndu", 1, 1, GEN_ALL
},
546 { BRW_OPCODE_RNDD
, 69, "rndd", 1, 1, GEN_ALL
},
547 { BRW_OPCODE_RNDE
, 70, "rnde", 1, 1, GEN_ALL
},
548 { BRW_OPCODE_RNDZ
, 71, "rndz", 1, 1, GEN_ALL
},
549 { BRW_OPCODE_MAC
, 72, "mac", 2, 1, GEN_ALL
},
550 { BRW_OPCODE_MACH
, 73, "mach", 2, 1, GEN_ALL
},
551 { BRW_OPCODE_LZD
, 74, "lzd", 1, 1, GEN_ALL
},
552 { BRW_OPCODE_FBH
, 75, "fbh", 1, 1, GEN_GE(GEN7
) },
553 { BRW_OPCODE_FBL
, 76, "fbl", 1, 1, GEN_GE(GEN7
) },
554 { BRW_OPCODE_CBIT
, 77, "cbit", 1, 1, GEN_GE(GEN7
) },
555 { BRW_OPCODE_ADDC
, 78, "addc", 2, 1, GEN_GE(GEN7
) },
556 { BRW_OPCODE_SUBB
, 79, "subb", 2, 1, GEN_GE(GEN7
) },
557 { BRW_OPCODE_SAD2
, 80, "sad2", 2, 1, GEN_ALL
},
558 { BRW_OPCODE_SADA2
, 81, "sada2", 2, 1, GEN_ALL
},
559 { BRW_OPCODE_DP4
, 84, "dp4", 2, 1, GEN_LT(GEN11
) },
560 { BRW_OPCODE_DPH
, 85, "dph", 2, 1, GEN_LT(GEN11
) },
561 { BRW_OPCODE_DP3
, 86, "dp3", 2, 1, GEN_LT(GEN11
) },
562 { BRW_OPCODE_DP2
, 87, "dp2", 2, 1, GEN_LT(GEN11
) },
563 { BRW_OPCODE_LINE
, 89, "line", 2, 1, GEN_LE(GEN10
) },
564 { BRW_OPCODE_PLN
, 90, "pln", 2, 1, GEN_GE(GEN45
) & GEN_LE(GEN10
) },
565 { BRW_OPCODE_MAD
, 91, "mad", 3, 1, GEN_GE(GEN6
) },
566 { BRW_OPCODE_LRP
, 92, "lrp", 3, 1, GEN_GE(GEN6
) & GEN_LE(GEN10
) },
567 { BRW_OPCODE_MADM
, 93, "madm", 3, 1, GEN_GE(GEN8
) },
568 { BRW_OPCODE_NENOP
, 125, "nenop", 0, 0, GEN45
},
569 { BRW_OPCODE_NOP
, 126, "nop", 0, 0, GEN_LT(GEN12
) },
570 { BRW_OPCODE_NOP
, 96, "nop", 0, 0, GEN_GE(GEN12
) }
574 * Look up the opcode_descs[] entry with \p key member matching \p k which is
575 * supported by the device specified by \p devinfo, or NULL if there is no
578 * This is implemented by using an index data structure (storage for which is
579 * provided by the caller as \p index_gen and \p index_descs) in order to
580 * provide efficient constant-time look-up.
582 static const opcode_desc
*
583 lookup_opcode_desc(gen
*index_gen
,
584 const opcode_desc
**index_descs
,
586 unsigned opcode_desc::*key
,
587 const gen_device_info
*devinfo
,
590 if (*index_gen
!= gen_from_devinfo(devinfo
)) {
591 *index_gen
= gen_from_devinfo(devinfo
);
593 for (unsigned l
= 0; l
< index_size
; l
++)
594 index_descs
[l
] = NULL
;
596 for (unsigned i
= 0; i
< ARRAY_SIZE(opcode_descs
); i
++) {
597 if (opcode_descs
[i
].gens
& *index_gen
) {
598 const unsigned l
= opcode_descs
[i
].*key
;
599 assert(l
< index_size
&& !index_descs
[l
]);
600 index_descs
[l
] = &opcode_descs
[i
];
606 return index_descs
[k
];
612 * Return the matching opcode_desc for the specified IR opcode and hardware
613 * generation, or NULL if the opcode is not supported by the device.
615 const struct opcode_desc
*
616 brw_opcode_desc(const struct gen_device_info
*devinfo
, enum opcode opcode
)
618 static __thread gen index_gen
= {};
619 static __thread
const opcode_desc
*index_descs
[NUM_BRW_OPCODES
];
620 return lookup_opcode_desc(&index_gen
, index_descs
, ARRAY_SIZE(index_descs
),
621 &opcode_desc::ir
, devinfo
, opcode
);
625 * Return the matching opcode_desc for the specified HW opcode and hardware
626 * generation, or NULL if the opcode is not supported by the device.
628 const struct opcode_desc
*
629 brw_opcode_desc_from_hw(const struct gen_device_info
*devinfo
, unsigned hw
)
631 static __thread gen index_gen
= {};
632 static __thread
const opcode_desc
*index_descs
[128];
633 return lookup_opcode_desc(&index_gen
, index_descs
, ARRAY_SIZE(index_descs
),
634 &opcode_desc::hw
, devinfo
, hw
);