2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
35 #include "brw_eu_defines.h"
37 #include "brw_shader.h"
38 #include "dev/gen_debug.h"
40 #include "util/ralloc.h"
42 /* Returns a conditional modifier that negates the condition. */
43 enum brw_conditional_mod
44 brw_negate_cmod(enum brw_conditional_mod cmod
)
47 case BRW_CONDITIONAL_Z
:
48 return BRW_CONDITIONAL_NZ
;
49 case BRW_CONDITIONAL_NZ
:
50 return BRW_CONDITIONAL_Z
;
51 case BRW_CONDITIONAL_G
:
52 return BRW_CONDITIONAL_LE
;
53 case BRW_CONDITIONAL_GE
:
54 return BRW_CONDITIONAL_L
;
55 case BRW_CONDITIONAL_L
:
56 return BRW_CONDITIONAL_GE
;
57 case BRW_CONDITIONAL_LE
:
58 return BRW_CONDITIONAL_G
;
60 unreachable("Can't negate this cmod");
64 /* Returns the corresponding conditional mod for swapping src0 and
67 enum brw_conditional_mod
68 brw_swap_cmod(enum brw_conditional_mod cmod
)
71 case BRW_CONDITIONAL_Z
:
72 case BRW_CONDITIONAL_NZ
:
74 case BRW_CONDITIONAL_G
:
75 return BRW_CONDITIONAL_L
;
76 case BRW_CONDITIONAL_GE
:
77 return BRW_CONDITIONAL_LE
;
78 case BRW_CONDITIONAL_L
:
79 return BRW_CONDITIONAL_G
;
80 case BRW_CONDITIONAL_LE
:
81 return BRW_CONDITIONAL_GE
;
83 return BRW_CONDITIONAL_NONE
;
88 * Get the least significant bit offset of the i+1-th component of immediate
89 * type \p type. For \p i equal to the two's complement of j, return the
90 * offset of the j-th component starting from the end of the vector. For
91 * scalar register types return zero.
94 imm_shift(enum brw_reg_type type
, unsigned i
)
96 assert(type
!= BRW_REGISTER_TYPE_UV
&& type
!= BRW_REGISTER_TYPE_V
&&
99 if (type
== BRW_REGISTER_TYPE_VF
)
106 * Swizzle an arbitrary immediate \p x of the given type according to the
107 * permutation specified as \p swz.
110 brw_swizzle_immediate(enum brw_reg_type type
, uint32_t x
, unsigned swz
)
112 if (imm_shift(type
, 1)) {
113 const unsigned n
= 32 / imm_shift(type
, 1);
116 for (unsigned i
= 0; i
< n
; i
++) {
117 /* Shift the specified component all the way to the right and left to
118 * discard any undesired L/MSBs, then shift it right into component i.
120 y
|= x
>> imm_shift(type
, (i
& ~3) + BRW_GET_SWZ(swz
, i
& 3))
121 << imm_shift(type
, ~0u)
122 >> imm_shift(type
, ~0u - i
);
132 brw_get_default_exec_size(struct brw_codegen
*p
)
134 return p
->current
->exec_size
;
138 brw_get_default_group(struct brw_codegen
*p
)
140 return p
->current
->group
;
144 brw_get_default_access_mode(struct brw_codegen
*p
)
146 return p
->current
->access_mode
;
150 brw_set_default_exec_size(struct brw_codegen
*p
, unsigned value
)
152 p
->current
->exec_size
= value
;
155 void brw_set_default_predicate_control(struct brw_codegen
*p
, enum brw_predicate pc
)
157 p
->current
->predicate
= pc
;
160 void brw_set_default_predicate_inverse(struct brw_codegen
*p
, bool predicate_inverse
)
162 p
->current
->pred_inv
= predicate_inverse
;
165 void brw_set_default_flag_reg(struct brw_codegen
*p
, int reg
, int subreg
)
168 p
->current
->flag_subreg
= reg
* 2 + subreg
;
171 void brw_set_default_access_mode( struct brw_codegen
*p
, unsigned access_mode
)
173 p
->current
->access_mode
= access_mode
;
177 brw_set_default_compression_control(struct brw_codegen
*p
,
178 enum brw_compression compression_control
)
180 switch (compression_control
) {
181 case BRW_COMPRESSION_NONE
:
182 /* This is the "use the first set of bits of dmask/vmask/arf
183 * according to execsize" option.
185 p
->current
->group
= 0;
187 case BRW_COMPRESSION_2NDHALF
:
188 /* For SIMD8, this is "use the second set of 8 bits." */
189 p
->current
->group
= 8;
191 case BRW_COMPRESSION_COMPRESSED
:
192 /* For SIMD16 instruction compression, use the first set of 16 bits
193 * since we don't do SIMD32 dispatch.
195 p
->current
->group
= 0;
198 unreachable("not reached");
201 if (p
->devinfo
->gen
<= 6) {
202 p
->current
->compressed
=
203 (compression_control
== BRW_COMPRESSION_COMPRESSED
);
208 * Enable or disable instruction compression on the given instruction leaving
209 * the currently selected channel enable group untouched.
212 brw_inst_set_compression(const struct gen_device_info
*devinfo
,
213 brw_inst
*inst
, bool on
)
215 if (devinfo
->gen
>= 6) {
216 /* No-op, the EU will figure out for us whether the instruction needs to
220 /* The channel group and compression controls are non-orthogonal, there
221 * are two possible representations for uncompressed instructions and we
222 * may need to preserve the current one to avoid changing the selected
223 * channel group inadvertently.
226 brw_inst_set_qtr_control(devinfo
, inst
, BRW_COMPRESSION_COMPRESSED
);
227 else if (brw_inst_qtr_control(devinfo
, inst
)
228 == BRW_COMPRESSION_COMPRESSED
)
229 brw_inst_set_qtr_control(devinfo
, inst
, BRW_COMPRESSION_NONE
);
234 brw_set_default_compression(struct brw_codegen
*p
, bool on
)
236 p
->current
->compressed
= on
;
240 * Apply the range of channel enable signals given by
241 * [group, group + exec_size) to the instruction passed as argument.
244 brw_inst_set_group(const struct gen_device_info
*devinfo
,
245 brw_inst
*inst
, unsigned group
)
247 if (devinfo
->gen
>= 7) {
248 assert(group
% 4 == 0 && group
< 32);
249 brw_inst_set_qtr_control(devinfo
, inst
, group
/ 8);
250 brw_inst_set_nib_control(devinfo
, inst
, (group
/ 4) % 2);
252 } else if (devinfo
->gen
== 6) {
253 assert(group
% 8 == 0 && group
< 32);
254 brw_inst_set_qtr_control(devinfo
, inst
, group
/ 8);
257 assert(group
% 8 == 0 && group
< 16);
258 /* The channel group and compression controls are non-orthogonal, there
259 * are two possible representations for group zero and we may need to
260 * preserve the current one to avoid changing the selected compression
261 * enable inadvertently.
264 brw_inst_set_qtr_control(devinfo
, inst
, BRW_COMPRESSION_2NDHALF
);
265 else if (brw_inst_qtr_control(devinfo
, inst
) == BRW_COMPRESSION_2NDHALF
)
266 brw_inst_set_qtr_control(devinfo
, inst
, BRW_COMPRESSION_NONE
);
271 brw_set_default_group(struct brw_codegen
*p
, unsigned group
)
273 p
->current
->group
= group
;
276 void brw_set_default_mask_control( struct brw_codegen
*p
, unsigned value
)
278 p
->current
->mask_control
= value
;
281 void brw_set_default_saturate( struct brw_codegen
*p
, bool enable
)
283 p
->current
->saturate
= enable
;
286 void brw_set_default_acc_write_control(struct brw_codegen
*p
, unsigned value
)
288 p
->current
->acc_wr_control
= value
;
291 void brw_push_insn_state( struct brw_codegen
*p
)
293 assert(p
->current
!= &p
->stack
[BRW_EU_MAX_INSN_STACK
-1]);
294 *(p
->current
+ 1) = *p
->current
;
298 void brw_pop_insn_state( struct brw_codegen
*p
)
300 assert(p
->current
!= p
->stack
);
305 /***********************************************************************
308 brw_init_codegen(const struct gen_device_info
*devinfo
,
309 struct brw_codegen
*p
, void *mem_ctx
)
311 memset(p
, 0, sizeof(*p
));
313 p
->devinfo
= devinfo
;
314 p
->automatic_exec_sizes
= true;
316 * Set the initial instruction store array size to 1024, if found that
317 * isn't enough, then it will double the store size at brw_next_insn()
318 * until out of memory.
320 p
->store_size
= 1024;
321 p
->store
= rzalloc_array(mem_ctx
, brw_inst
, p
->store_size
);
323 p
->current
= p
->stack
;
324 memset(p
->current
, 0, sizeof(p
->current
[0]));
326 p
->mem_ctx
= mem_ctx
;
330 brw_set_default_exec_size(p
, BRW_EXECUTE_8
);
331 brw_set_default_mask_control(p
, BRW_MASK_ENABLE
); /* what does this do? */
332 brw_set_default_saturate(p
, 0);
333 brw_set_default_compression_control(p
, BRW_COMPRESSION_NONE
);
335 /* Set up control flow stack */
336 p
->if_stack_depth
= 0;
337 p
->if_stack_array_size
= 16;
338 p
->if_stack
= rzalloc_array(mem_ctx
, int, p
->if_stack_array_size
);
340 p
->loop_stack_depth
= 0;
341 p
->loop_stack_array_size
= 16;
342 p
->loop_stack
= rzalloc_array(mem_ctx
, int, p
->loop_stack_array_size
);
343 p
->if_depth_in_loop
= rzalloc_array(mem_ctx
, int, p
->loop_stack_array_size
);
347 const unsigned *brw_get_program( struct brw_codegen
*p
,
350 *sz
= p
->next_insn_offset
;
351 return (const unsigned *)p
->store
;
354 bool brw_try_override_assembly(struct brw_codegen
*p
, int start_offset
,
355 const char *identifier
)
357 const char *read_path
= getenv("INTEL_SHADER_ASM_READ_PATH");
362 char *name
= ralloc_asprintf(NULL
, "%s/%s.bin", read_path
, identifier
);
364 int fd
= open(name
, O_RDONLY
);
372 if (fstat(fd
, &sb
) != 0 || (!S_ISREG(sb
.st_mode
))) {
377 p
->nr_insn
-= (p
->next_insn_offset
- start_offset
) / sizeof(brw_inst
);
378 p
->nr_insn
+= sb
.st_size
/ sizeof(brw_inst
);
380 p
->next_insn_offset
= start_offset
+ sb
.st_size
;
381 p
->store_size
= (start_offset
+ sb
.st_size
) / sizeof(brw_inst
);
382 p
->store
= (brw_inst
*)reralloc_size(p
->mem_ctx
, p
->store
, p
->next_insn_offset
);
385 read(fd
, p
->store
+ start_offset
, sb
.st_size
);
388 bool valid
= brw_validate_instructions(p
->devinfo
, p
->store
,
389 start_offset
, p
->next_insn_offset
,
397 brw_disassemble(const struct gen_device_info
*devinfo
,
398 const void *assembly
, int start
, int end
, FILE *out
)
400 bool dump_hex
= (INTEL_DEBUG
& DEBUG_HEX
) != 0;
402 for (int offset
= start
; offset
< end
;) {
403 const brw_inst
*insn
= (const brw_inst
*)((char *)assembly
+ offset
);
404 brw_inst uncompacted
;
405 bool compacted
= brw_inst_cmpt_control(devinfo
, insn
);
407 fprintf(out
, "0x%08x: ", offset
);
410 brw_compact_inst
*compacted
= (brw_compact_inst
*)insn
;
412 unsigned char * insn_ptr
= ((unsigned char *)&insn
[0]);
413 const unsigned int blank_spaces
= 24;
414 for (int i
= 0 ; i
< 8; i
= i
+ 4) {
415 fprintf(out
, "%02x %02x %02x %02x ",
421 /* Make compacted instructions hex value output vertically aligned
422 * with uncompacted instructions hex value
424 fprintf(out
, "%*c", blank_spaces
, ' ');
427 brw_uncompact_instruction(devinfo
, &uncompacted
, compacted
);
432 unsigned char * insn_ptr
= ((unsigned char *)&insn
[0]);
433 for (int i
= 0 ; i
< 16; i
= i
+ 4) {
434 fprintf(out
, "%02x %02x %02x %02x ",
444 brw_disassemble_inst(out
, devinfo
, insn
, compacted
);
463 #define GEN_LT(gen) ((gen) - 1)
464 #define GEN_GE(gen) (~GEN_LT(gen))
465 #define GEN_LE(gen) (GEN_LT(gen) | (gen))
467 static const struct opcode_desc opcode_descs
[] = {
468 /* IR, HW, name, nsrc, ndst, gens */
469 { BRW_OPCODE_ILLEGAL
, 0, "illegal", 0, 0, GEN_ALL
},
470 { BRW_OPCODE_MOV
, 1, "mov", 1, 1, GEN_LT(GEN12
) },
471 { BRW_OPCODE_MOV
, 97, "mov", 1, 1, GEN_GE(GEN12
) },
472 { BRW_OPCODE_SEL
, 2, "sel", 2, 1, GEN_LT(GEN12
) },
473 { BRW_OPCODE_SEL
, 98, "sel", 2, 1, GEN_GE(GEN12
) },
474 { BRW_OPCODE_MOVI
, 3, "movi", 2, 1, GEN_GE(GEN45
) & GEN_LT(GEN12
) },
475 { BRW_OPCODE_MOVI
, 99, "movi", 2, 1, GEN_GE(GEN12
) },
476 { BRW_OPCODE_NOT
, 4, "not", 1, 1, GEN_LT(GEN12
) },
477 { BRW_OPCODE_NOT
, 100, "not", 1, 1, GEN_GE(GEN12
) },
478 { BRW_OPCODE_AND
, 5, "and", 2, 1, GEN_LT(GEN12
) },
479 { BRW_OPCODE_AND
, 101, "and", 2, 1, GEN_GE(GEN12
) },
480 { BRW_OPCODE_OR
, 6, "or", 2, 1, GEN_LT(GEN12
) },
481 { BRW_OPCODE_OR
, 102, "or", 2, 1, GEN_GE(GEN12
) },
482 { BRW_OPCODE_XOR
, 7, "xor", 2, 1, GEN_LT(GEN12
) },
483 { BRW_OPCODE_XOR
, 103, "xor", 2, 1, GEN_GE(GEN12
) },
484 { BRW_OPCODE_SHR
, 8, "shr", 2, 1, GEN_LT(GEN12
) },
485 { BRW_OPCODE_SHR
, 104, "shr", 2, 1, GEN_GE(GEN12
) },
486 { BRW_OPCODE_SHL
, 9, "shl", 2, 1, GEN_LT(GEN12
) },
487 { BRW_OPCODE_SHL
, 105, "shl", 2, 1, GEN_GE(GEN12
) },
488 { BRW_OPCODE_DIM
, 10, "dim", 1, 1, GEN75
},
489 { BRW_OPCODE_SMOV
, 10, "smov", 0, 0, GEN_GE(GEN8
) & GEN_LT(GEN12
) },
490 { BRW_OPCODE_SMOV
, 106, "smov", 0, 0, GEN_GE(GEN12
) },
491 { BRW_OPCODE_ASR
, 12, "asr", 2, 1, GEN_LT(GEN12
) },
492 { BRW_OPCODE_ASR
, 108, "asr", 2, 1, GEN_GE(GEN12
) },
493 { BRW_OPCODE_ROR
, 14, "ror", 2, 1, GEN11
},
494 { BRW_OPCODE_ROR
, 110, "ror", 2, 1, GEN_GE(GEN12
) },
495 { BRW_OPCODE_ROL
, 15, "rol", 2, 1, GEN11
},
496 { BRW_OPCODE_ROL
, 111, "rol", 2, 1, GEN_GE(GEN12
) },
497 { BRW_OPCODE_CMP
, 16, "cmp", 2, 1, GEN_LT(GEN12
) },
498 { BRW_OPCODE_CMP
, 112, "cmp", 2, 1, GEN_GE(GEN12
) },
499 { BRW_OPCODE_CMPN
, 17, "cmpn", 2, 1, GEN_LT(GEN12
) },
500 { BRW_OPCODE_CMPN
, 113, "cmpn", 2, 1, GEN_GE(GEN12
) },
501 { BRW_OPCODE_CSEL
, 18, "csel", 3, 1, GEN_GE(GEN8
) & GEN_LT(GEN12
) },
502 { BRW_OPCODE_CSEL
, 114, "csel", 3, 1, GEN_GE(GEN12
) },
503 { BRW_OPCODE_F32TO16
, 19, "f32to16", 1, 1, GEN7
| GEN75
},
504 { BRW_OPCODE_F16TO32
, 20, "f16to32", 1, 1, GEN7
| GEN75
},
505 { BRW_OPCODE_BFREV
, 23, "bfrev", 1, 1, GEN_GE(GEN7
) & GEN_LT(GEN12
) },
506 { BRW_OPCODE_BFREV
, 119, "bfrev", 1, 1, GEN_GE(GEN12
) },
507 { BRW_OPCODE_BFE
, 24, "bfe", 3, 1, GEN_GE(GEN7
) & GEN_LT(GEN12
) },
508 { BRW_OPCODE_BFE
, 120, "bfe", 3, 1, GEN_GE(GEN12
) },
509 { BRW_OPCODE_BFI1
, 25, "bfi1", 2, 1, GEN_GE(GEN7
) & GEN_LT(GEN12
) },
510 { BRW_OPCODE_BFI1
, 121, "bfi1", 2, 1, GEN_GE(GEN12
) },
511 { BRW_OPCODE_BFI2
, 26, "bfi2", 3, 1, GEN_GE(GEN7
) & GEN_LT(GEN12
) },
512 { BRW_OPCODE_BFI2
, 122, "bfi2", 3, 1, GEN_GE(GEN12
) },
513 { BRW_OPCODE_JMPI
, 32, "jmpi", 0, 0, GEN_ALL
},
514 { BRW_OPCODE_BRD
, 33, "brd", 0, 0, GEN_GE(GEN7
) },
515 { BRW_OPCODE_IF
, 34, "if", 0, 0, GEN_ALL
},
516 { BRW_OPCODE_IFF
, 35, "iff", 0, 0, GEN_LE(GEN5
) },
517 { BRW_OPCODE_BRC
, 35, "brc", 0, 0, GEN_GE(GEN7
) },
518 { BRW_OPCODE_ELSE
, 36, "else", 0, 0, GEN_ALL
},
519 { BRW_OPCODE_ENDIF
, 37, "endif", 0, 0, GEN_ALL
},
520 { BRW_OPCODE_DO
, 38, "do", 0, 0, GEN_LE(GEN5
) },
521 { BRW_OPCODE_CASE
, 38, "case", 0, 0, GEN6
},
522 { BRW_OPCODE_WHILE
, 39, "while", 0, 0, GEN_ALL
},
523 { BRW_OPCODE_BREAK
, 40, "break", 0, 0, GEN_ALL
},
524 { BRW_OPCODE_CONTINUE
, 41, "cont", 0, 0, GEN_ALL
},
525 { BRW_OPCODE_HALT
, 42, "halt", 0, 0, GEN_ALL
},
526 { BRW_OPCODE_CALLA
, 43, "calla", 0, 0, GEN_GE(GEN75
) },
527 { BRW_OPCODE_MSAVE
, 44, "msave", 0, 0, GEN_LE(GEN5
) },
528 { BRW_OPCODE_CALL
, 44, "call", 0, 0, GEN_GE(GEN6
) },
529 { BRW_OPCODE_MREST
, 45, "mrest", 0, 0, GEN_LE(GEN5
) },
530 { BRW_OPCODE_RET
, 45, "ret", 0, 0, GEN_GE(GEN6
) },
531 { BRW_OPCODE_PUSH
, 46, "push", 0, 0, GEN_LE(GEN5
) },
532 { BRW_OPCODE_FORK
, 46, "fork", 0, 0, GEN6
},
533 { BRW_OPCODE_GOTO
, 46, "goto", 0, 0, GEN_GE(GEN8
) },
534 { BRW_OPCODE_POP
, 47, "pop", 2, 0, GEN_LE(GEN5
) },
535 { BRW_OPCODE_WAIT
, 48, "wait", 1, 0, GEN_LT(GEN12
) },
536 { BRW_OPCODE_SEND
, 49, "send", 1, 1, GEN_ALL
},
537 { BRW_OPCODE_SENDC
, 50, "sendc", 1, 1, GEN_ALL
},
538 { BRW_OPCODE_SENDS
, 51, "sends", 2, 1, GEN_GE(GEN9
) & GEN_LT(GEN12
) },
539 { BRW_OPCODE_SENDSC
, 52, "sendsc", 2, 1, GEN_GE(GEN9
) & GEN_LT(GEN12
) },
540 { BRW_OPCODE_MATH
, 56, "math", 2, 1, GEN_GE(GEN6
) },
541 { BRW_OPCODE_ADD
, 64, "add", 2, 1, GEN_ALL
},
542 { BRW_OPCODE_MUL
, 65, "mul", 2, 1, GEN_ALL
},
543 { BRW_OPCODE_AVG
, 66, "avg", 2, 1, GEN_ALL
},
544 { BRW_OPCODE_FRC
, 67, "frc", 1, 1, GEN_ALL
},
545 { BRW_OPCODE_RNDU
, 68, "rndu", 1, 1, GEN_ALL
},
546 { BRW_OPCODE_RNDD
, 69, "rndd", 1, 1, GEN_ALL
},
547 { BRW_OPCODE_RNDE
, 70, "rnde", 1, 1, GEN_ALL
},
548 { BRW_OPCODE_RNDZ
, 71, "rndz", 1, 1, GEN_ALL
},
549 { BRW_OPCODE_MAC
, 72, "mac", 2, 1, GEN_ALL
},
550 { BRW_OPCODE_MACH
, 73, "mach", 2, 1, GEN_ALL
},
551 { BRW_OPCODE_LZD
, 74, "lzd", 1, 1, GEN_ALL
},
552 { BRW_OPCODE_FBH
, 75, "fbh", 1, 1, GEN_GE(GEN7
) },
553 { BRW_OPCODE_FBL
, 76, "fbl", 1, 1, GEN_GE(GEN7
) },
554 { BRW_OPCODE_CBIT
, 77, "cbit", 1, 1, GEN_GE(GEN7
) },
555 { BRW_OPCODE_ADDC
, 78, "addc", 2, 1, GEN_GE(GEN7
) },
556 { BRW_OPCODE_SUBB
, 79, "subb", 2, 1, GEN_GE(GEN7
) },
557 { BRW_OPCODE_SAD2
, 80, "sad2", 2, 1, GEN_ALL
},
558 { BRW_OPCODE_SADA2
, 81, "sada2", 2, 1, GEN_ALL
},
559 { BRW_OPCODE_DP4
, 84, "dp4", 2, 1, GEN_LT(GEN11
) },
560 { BRW_OPCODE_DPH
, 85, "dph", 2, 1, GEN_LT(GEN11
) },
561 { BRW_OPCODE_DP3
, 86, "dp3", 2, 1, GEN_LT(GEN11
) },
562 { BRW_OPCODE_DP2
, 87, "dp2", 2, 1, GEN_LT(GEN11
) },
563 { BRW_OPCODE_LINE
, 89, "line", 2, 1, GEN_LE(GEN10
) },
564 { BRW_OPCODE_PLN
, 90, "pln", 2, 1, GEN_GE(GEN45
) & GEN_LE(GEN10
) },
565 { BRW_OPCODE_MAD
, 91, "mad", 3, 1, GEN_GE(GEN6
) },
566 { BRW_OPCODE_LRP
, 92, "lrp", 3, 1, GEN_GE(GEN6
) & GEN_LE(GEN10
) },
567 { BRW_OPCODE_MADM
, 93, "madm", 3, 1, GEN_GE(GEN8
) },
568 { BRW_OPCODE_NENOP
, 125, "nenop", 0, 0, GEN45
},
569 { BRW_OPCODE_NOP
, 126, "nop", 0, 0, GEN_LT(GEN12
) },
570 { BRW_OPCODE_NOP
, 96, "nop", 0, 0, GEN_GE(GEN12
) }
574 gen_from_devinfo(const struct gen_device_info
*devinfo
)
576 switch (devinfo
->gen
) {
577 case 4: return devinfo
->is_g4x
? GEN45
: GEN4
;
580 case 7: return devinfo
->is_haswell
? GEN75
: GEN7
;
583 case 10: return GEN10
;
584 case 11: return GEN11
;
585 case 12: return GEN12
;
587 unreachable("not reached");
592 * Look up the opcode_descs[] entry with \p key member matching \p k which is
593 * supported by the device specified by \p devinfo, or NULL if there is no
596 * This is implemented by using an index data structure (storage for which is
597 * provided by the caller as \p index_gen and \p index_descs) in order to
598 * provide efficient constant-time look-up.
600 static const opcode_desc
*
601 lookup_opcode_desc(gen
*index_gen
,
602 const opcode_desc
**index_descs
,
604 unsigned opcode_desc::*key
,
605 const gen_device_info
*devinfo
,
608 if (*index_gen
!= gen_from_devinfo(devinfo
)) {
609 *index_gen
= gen_from_devinfo(devinfo
);
611 for (unsigned l
= 0; l
< index_size
; l
++)
612 index_descs
[l
] = NULL
;
614 for (unsigned i
= 0; i
< ARRAY_SIZE(opcode_descs
); i
++) {
615 if (opcode_descs
[i
].gens
& *index_gen
) {
616 const unsigned l
= opcode_descs
[i
].*key
;
617 assert(l
< index_size
&& !index_descs
[l
]);
618 index_descs
[l
] = &opcode_descs
[i
];
624 return index_descs
[k
];
630 * Return the matching opcode_desc for the specified IR opcode and hardware
631 * generation, or NULL if the opcode is not supported by the device.
633 const struct opcode_desc
*
634 brw_opcode_desc(const struct gen_device_info
*devinfo
, enum opcode opcode
)
636 static __thread gen index_gen
= {};
637 static __thread
const opcode_desc
*index_descs
[NUM_BRW_OPCODES
];
638 return lookup_opcode_desc(&index_gen
, index_descs
, ARRAY_SIZE(index_descs
),
639 &opcode_desc::ir
, devinfo
, opcode
);
643 * Return the matching opcode_desc for the specified HW opcode and hardware
644 * generation, or NULL if the opcode is not supported by the device.
646 const struct opcode_desc
*
647 brw_opcode_desc_from_hw(const struct gen_device_info
*devinfo
, unsigned hw
)
649 static __thread gen index_gen
= {};
650 static __thread
const opcode_desc
*index_descs
[128];
651 return lookup_opcode_desc(&index_gen
, index_descs
, ARRAY_SIZE(index_descs
),
652 &opcode_desc::hw
, devinfo
, hw
);