intel/compiler: Validate some instruction word encodings
[mesa.git] / src / intel / compiler / brw_eu_validate.c
1 /*
2 * Copyright © 2015-2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_eu_validate.c
25 *
26 * This file implements a pass that validates shader assembly.
27 *
28 * The restrictions implemented herein are intended to verify that instructions
29 * in shader assembly do not violate restrictions documented in the graphics
30 * programming reference manuals.
31 *
32 * The restrictions are difficult for humans to quickly verify due to their
33 * complexity and abundance.
34 *
35 * It is critical that this code is thoroughly unit tested because false
36 * results will lead developers astray, which is worse than having no validator
37 * at all. Functional changes to this file without corresponding unit tests (in
38 * test_eu_validate.cpp) will be rejected.
39 */
40
41 #include "brw_eu.h"
42
43 /* We're going to do lots of string concatenation, so this should help. */
44 struct string {
45 char *str;
46 size_t len;
47 };
48
49 static void
50 cat(struct string *dest, const struct string src)
51 {
52 dest->str = realloc(dest->str, dest->len + src.len + 1);
53 memcpy(dest->str + dest->len, src.str, src.len);
54 dest->str[dest->len + src.len] = '\0';
55 dest->len = dest->len + src.len;
56 }
57 #define CAT(dest, src) cat(&dest, (struct string){src, strlen(src)})
58
59 static bool
60 contains(const struct string haystack, const struct string needle)
61 {
62 return haystack.str && memmem(haystack.str, haystack.len,
63 needle.str, needle.len) != NULL;
64 }
65 #define CONTAINS(haystack, needle) \
66 contains(haystack, (struct string){needle, strlen(needle)})
67
68 #define error(str) "\tERROR: " str "\n"
69 #define ERROR_INDENT "\t "
70
71 #define ERROR(msg) ERROR_IF(true, msg)
72 #define ERROR_IF(cond, msg) \
73 do { \
74 if ((cond) && !CONTAINS(error_msg, error(msg))) { \
75 CAT(error_msg, error(msg)); \
76 } \
77 } while(0)
78
79 #define CHECK(func, args...) \
80 do { \
81 struct string __msg = func(devinfo, inst, ##args); \
82 if (__msg.str) { \
83 cat(&error_msg, __msg); \
84 free(__msg.str); \
85 } \
86 } while (0)
87
88 #define STRIDE(stride) (stride != 0 ? 1 << ((stride) - 1) : 0)
89 #define WIDTH(width) (1 << (width))
90
91 static bool
92 inst_is_send(const struct gen_device_info *devinfo, const brw_inst *inst)
93 {
94 switch (brw_inst_opcode(devinfo, inst)) {
95 case BRW_OPCODE_SEND:
96 case BRW_OPCODE_SENDC:
97 case BRW_OPCODE_SENDS:
98 case BRW_OPCODE_SENDSC:
99 return true;
100 default:
101 return false;
102 }
103 }
104
105 static bool
106 inst_is_split_send(const struct gen_device_info *devinfo, const brw_inst *inst)
107 {
108 if (devinfo->gen >= 12) {
109 return inst_is_send(devinfo, inst);
110 } else {
111 switch (brw_inst_opcode(devinfo, inst)) {
112 case BRW_OPCODE_SENDS:
113 case BRW_OPCODE_SENDSC:
114 return true;
115 default:
116 return false;
117 }
118 }
119 }
120
121 static unsigned
122 signed_type(unsigned type)
123 {
124 switch (type) {
125 case BRW_REGISTER_TYPE_UD: return BRW_REGISTER_TYPE_D;
126 case BRW_REGISTER_TYPE_UW: return BRW_REGISTER_TYPE_W;
127 case BRW_REGISTER_TYPE_UB: return BRW_REGISTER_TYPE_B;
128 case BRW_REGISTER_TYPE_UQ: return BRW_REGISTER_TYPE_Q;
129 default: return type;
130 }
131 }
132
133 static enum brw_reg_type
134 inst_dst_type(const struct gen_device_info *devinfo, const brw_inst *inst)
135 {
136 return (devinfo->gen < 12 || !inst_is_send(devinfo, inst)) ?
137 brw_inst_dst_type(devinfo, inst) : BRW_REGISTER_TYPE_D;
138 }
139
140 static bool
141 inst_is_raw_move(const struct gen_device_info *devinfo, const brw_inst *inst)
142 {
143 unsigned dst_type = signed_type(inst_dst_type(devinfo, inst));
144 unsigned src_type = signed_type(brw_inst_src0_type(devinfo, inst));
145
146 if (brw_inst_src0_reg_file(devinfo, inst) == BRW_IMMEDIATE_VALUE) {
147 /* FIXME: not strictly true */
148 if (brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_VF ||
149 brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_UV ||
150 brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_V) {
151 return false;
152 }
153 } else if (brw_inst_src0_negate(devinfo, inst) ||
154 brw_inst_src0_abs(devinfo, inst)) {
155 return false;
156 }
157
158 return brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MOV &&
159 brw_inst_saturate(devinfo, inst) == 0 &&
160 dst_type == src_type;
161 }
162
163 static bool
164 dst_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
165 {
166 return brw_inst_dst_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
167 brw_inst_dst_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
168 }
169
170 static bool
171 src0_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
172 {
173 return brw_inst_src0_address_mode(devinfo, inst) == BRW_ADDRESS_DIRECT &&
174 brw_inst_src0_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
175 brw_inst_src0_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
176 }
177
178 static bool
179 src1_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
180 {
181 return brw_inst_src1_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
182 brw_inst_src1_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
183 }
184
185 static bool
186 src0_is_acc(const struct gen_device_info *devinfo, const brw_inst *inst)
187 {
188 return brw_inst_src0_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
189 (brw_inst_src0_da_reg_nr(devinfo, inst) & 0xF0) == BRW_ARF_ACCUMULATOR;
190 }
191
192 static bool
193 src1_is_acc(const struct gen_device_info *devinfo, const brw_inst *inst)
194 {
195 return brw_inst_src1_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
196 (brw_inst_src1_da_reg_nr(devinfo, inst) & 0xF0) == BRW_ARF_ACCUMULATOR;
197 }
198
199 static bool
200 src0_has_scalar_region(const struct gen_device_info *devinfo, const brw_inst *inst)
201 {
202 return brw_inst_src0_vstride(devinfo, inst) == BRW_VERTICAL_STRIDE_0 &&
203 brw_inst_src0_width(devinfo, inst) == BRW_WIDTH_1 &&
204 brw_inst_src0_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0;
205 }
206
207 static bool
208 src1_has_scalar_region(const struct gen_device_info *devinfo, const brw_inst *inst)
209 {
210 return brw_inst_src1_vstride(devinfo, inst) == BRW_VERTICAL_STRIDE_0 &&
211 brw_inst_src1_width(devinfo, inst) == BRW_WIDTH_1 &&
212 brw_inst_src1_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0;
213 }
214
215 static unsigned
216 num_sources_from_inst(const struct gen_device_info *devinfo,
217 const brw_inst *inst)
218 {
219 const struct opcode_desc *desc =
220 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
221 unsigned math_function;
222
223 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MATH) {
224 math_function = brw_inst_math_function(devinfo, inst);
225 } else if (devinfo->gen < 6 &&
226 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND) {
227 if (brw_inst_sfid(devinfo, inst) == BRW_SFID_MATH) {
228 /* src1 must be a descriptor (including the information to determine
229 * that the SEND is doing an extended math operation), but src0 can
230 * actually be null since it serves as the source of the implicit GRF
231 * to MRF move.
232 *
233 * If we stop using that functionality, we'll have to revisit this.
234 */
235 return 2;
236 } else {
237 /* Send instructions are allowed to have null sources since they use
238 * the base_mrf field to specify which message register source.
239 */
240 return 0;
241 }
242 } else {
243 assert(desc->nsrc < 4);
244 return desc->nsrc;
245 }
246
247 switch (math_function) {
248 case BRW_MATH_FUNCTION_INV:
249 case BRW_MATH_FUNCTION_LOG:
250 case BRW_MATH_FUNCTION_EXP:
251 case BRW_MATH_FUNCTION_SQRT:
252 case BRW_MATH_FUNCTION_RSQ:
253 case BRW_MATH_FUNCTION_SIN:
254 case BRW_MATH_FUNCTION_COS:
255 case BRW_MATH_FUNCTION_SINCOS:
256 case GEN8_MATH_FUNCTION_INVM:
257 case GEN8_MATH_FUNCTION_RSQRTM:
258 return 1;
259 case BRW_MATH_FUNCTION_FDIV:
260 case BRW_MATH_FUNCTION_POW:
261 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
262 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
263 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
264 return 2;
265 default:
266 unreachable("not reached");
267 }
268 }
269
270 static struct string
271 invalid_values(const struct gen_device_info *devinfo, const brw_inst *inst)
272 {
273 unsigned num_sources = num_sources_from_inst(devinfo, inst);
274 struct string error_msg = { .str = NULL, .len = 0 };
275
276 switch ((enum brw_execution_size) brw_inst_exec_size(devinfo, inst)) {
277 case BRW_EXECUTE_1:
278 case BRW_EXECUTE_2:
279 case BRW_EXECUTE_4:
280 case BRW_EXECUTE_8:
281 case BRW_EXECUTE_16:
282 case BRW_EXECUTE_32:
283 break;
284 default:
285 ERROR("invalid execution size");
286 break;
287 }
288
289 if (inst_is_send(devinfo, inst))
290 return error_msg;
291
292 if (num_sources == 3) {
293 /* Nothing to test:
294 * No 3-src instructions on Gen4-5
295 * No reg file bits on Gen6-10 (align16)
296 * No invalid encodings on Gen10-12 (align1)
297 */
298 } else {
299 if (devinfo->gen > 6) {
300 ERROR_IF(brw_inst_dst_reg_file(devinfo, inst) == MRF ||
301 (num_sources > 0 &&
302 brw_inst_src0_reg_file(devinfo, inst) == MRF) ||
303 (num_sources > 1 &&
304 brw_inst_src1_reg_file(devinfo, inst) == MRF),
305 "invalid register file encoding");
306 }
307 }
308
309 if (error_msg.str)
310 return error_msg;
311
312 if (num_sources == 3) {
313 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
314 if (devinfo->gen >= 10) {
315 ERROR_IF(brw_inst_3src_a1_dst_type (devinfo, inst) == INVALID_REG_TYPE ||
316 brw_inst_3src_a1_src0_type(devinfo, inst) == INVALID_REG_TYPE ||
317 brw_inst_3src_a1_src1_type(devinfo, inst) == INVALID_REG_TYPE ||
318 brw_inst_3src_a1_src2_type(devinfo, inst) == INVALID_REG_TYPE,
319 "invalid register type encoding");
320 } else {
321 ERROR("Align1 mode not allowed on Gen < 10");
322 }
323 } else {
324 ERROR_IF(brw_inst_3src_a16_dst_type(devinfo, inst) == INVALID_REG_TYPE ||
325 brw_inst_3src_a16_src_type(devinfo, inst) == INVALID_REG_TYPE,
326 "invalid register type encoding");
327 }
328 } else {
329 ERROR_IF(brw_inst_dst_type (devinfo, inst) == INVALID_REG_TYPE ||
330 (num_sources > 0 &&
331 brw_inst_src0_type(devinfo, inst) == INVALID_REG_TYPE) ||
332 (num_sources > 1 &&
333 brw_inst_src1_type(devinfo, inst) == INVALID_REG_TYPE),
334 "invalid register type encoding");
335 }
336
337 return error_msg;
338 }
339
340 static struct string
341 sources_not_null(const struct gen_device_info *devinfo,
342 const brw_inst *inst)
343 {
344 unsigned num_sources = num_sources_from_inst(devinfo, inst);
345 struct string error_msg = { .str = NULL, .len = 0 };
346
347 /* Nothing to test. 3-src instructions can only have GRF sources, and
348 * there's no bit to control the file.
349 */
350 if (num_sources == 3)
351 return (struct string){};
352
353 /* Nothing to test. Split sends can only encode a file in sources that are
354 * allowed to be NULL.
355 */
356 if (inst_is_split_send(devinfo, inst))
357 return (struct string){};
358
359 if (num_sources >= 1 && brw_inst_opcode(devinfo, inst) != BRW_OPCODE_SYNC)
360 ERROR_IF(src0_is_null(devinfo, inst), "src0 is null");
361
362 if (num_sources == 2)
363 ERROR_IF(src1_is_null(devinfo, inst), "src1 is null");
364
365 return error_msg;
366 }
367
368 static struct string
369 alignment_supported(const struct gen_device_info *devinfo,
370 const brw_inst *inst)
371 {
372 struct string error_msg = { .str = NULL, .len = 0 };
373
374 ERROR_IF(devinfo->gen >= 11 && brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16,
375 "Align16 not supported");
376
377 return error_msg;
378 }
379
380 static bool
381 inst_uses_src_acc(const struct gen_device_info *devinfo, const brw_inst *inst)
382 {
383 /* Check instructions that use implicit accumulator sources */
384 switch (brw_inst_opcode(devinfo, inst)) {
385 case BRW_OPCODE_MAC:
386 case BRW_OPCODE_MACH:
387 case BRW_OPCODE_SADA2:
388 return true;
389 default:
390 break;
391 }
392
393 /* FIXME: support 3-src instructions */
394 unsigned num_sources = num_sources_from_inst(devinfo, inst);
395 assert(num_sources < 3);
396
397 return src0_is_acc(devinfo, inst) || (num_sources > 1 && src1_is_acc(devinfo, inst));
398 }
399
400 static struct string
401 send_restrictions(const struct gen_device_info *devinfo,
402 const brw_inst *inst)
403 {
404 struct string error_msg = { .str = NULL, .len = 0 };
405
406 if (inst_is_split_send(devinfo, inst)) {
407 ERROR_IF(brw_inst_send_src1_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
408 brw_inst_send_src1_reg_nr(devinfo, inst) != BRW_ARF_NULL,
409 "src1 of split send must be a GRF or NULL");
410
411 ERROR_IF(brw_inst_eot(devinfo, inst) &&
412 brw_inst_src0_da_reg_nr(devinfo, inst) < 112,
413 "send with EOT must use g112-g127");
414 ERROR_IF(brw_inst_eot(devinfo, inst) &&
415 brw_inst_send_src1_reg_file(devinfo, inst) == BRW_GENERAL_REGISTER_FILE &&
416 brw_inst_send_src1_reg_nr(devinfo, inst) < 112,
417 "send with EOT must use g112-g127");
418
419 if (brw_inst_send_src1_reg_file(devinfo, inst) == BRW_GENERAL_REGISTER_FILE) {
420 /* Assume minimums if we don't know */
421 unsigned mlen = 1;
422 if (!brw_inst_send_sel_reg32_desc(devinfo, inst)) {
423 const uint32_t desc = brw_inst_send_desc(devinfo, inst);
424 mlen = brw_message_desc_mlen(devinfo, desc);
425 }
426
427 unsigned ex_mlen = 1;
428 if (!brw_inst_send_sel_reg32_ex_desc(devinfo, inst)) {
429 const uint32_t ex_desc = brw_inst_sends_ex_desc(devinfo, inst);
430 ex_mlen = brw_message_ex_desc_ex_mlen(devinfo, ex_desc);
431 }
432 const unsigned src0_reg_nr = brw_inst_src0_da_reg_nr(devinfo, inst);
433 const unsigned src1_reg_nr = brw_inst_send_src1_reg_nr(devinfo, inst);
434 ERROR_IF((src0_reg_nr <= src1_reg_nr &&
435 src1_reg_nr < src0_reg_nr + mlen) ||
436 (src1_reg_nr <= src0_reg_nr &&
437 src0_reg_nr < src1_reg_nr + ex_mlen),
438 "split send payloads must not overlap");
439 }
440 } else if (inst_is_send(devinfo, inst)) {
441 ERROR_IF(brw_inst_src0_address_mode(devinfo, inst) != BRW_ADDRESS_DIRECT,
442 "send must use direct addressing");
443
444 if (devinfo->gen >= 7) {
445 ERROR_IF(brw_inst_send_src0_reg_file(devinfo, inst) != BRW_GENERAL_REGISTER_FILE,
446 "send from non-GRF");
447 ERROR_IF(brw_inst_eot(devinfo, inst) &&
448 brw_inst_src0_da_reg_nr(devinfo, inst) < 112,
449 "send with EOT must use g112-g127");
450 }
451
452 if (devinfo->gen >= 8) {
453 ERROR_IF(!dst_is_null(devinfo, inst) &&
454 (brw_inst_dst_da_reg_nr(devinfo, inst) +
455 brw_inst_rlen(devinfo, inst) > 127) &&
456 (brw_inst_src0_da_reg_nr(devinfo, inst) +
457 brw_inst_mlen(devinfo, inst) >
458 brw_inst_dst_da_reg_nr(devinfo, inst)),
459 "r127 must not be used for return address when there is "
460 "a src and dest overlap");
461 }
462 }
463
464 return error_msg;
465 }
466
467 static bool
468 is_unsupported_inst(const struct gen_device_info *devinfo,
469 const brw_inst *inst)
470 {
471 return brw_inst_opcode(devinfo, inst) == BRW_OPCODE_ILLEGAL;
472 }
473
474 /**
475 * Returns whether a combination of two types would qualify as mixed float
476 * operation mode
477 */
478 static inline bool
479 types_are_mixed_float(enum brw_reg_type t0, enum brw_reg_type t1)
480 {
481 return (t0 == BRW_REGISTER_TYPE_F && t1 == BRW_REGISTER_TYPE_HF) ||
482 (t1 == BRW_REGISTER_TYPE_F && t0 == BRW_REGISTER_TYPE_HF);
483 }
484
485 static enum brw_reg_type
486 execution_type_for_type(enum brw_reg_type type)
487 {
488 switch (type) {
489 case BRW_REGISTER_TYPE_NF:
490 case BRW_REGISTER_TYPE_DF:
491 case BRW_REGISTER_TYPE_F:
492 case BRW_REGISTER_TYPE_HF:
493 return type;
494
495 case BRW_REGISTER_TYPE_VF:
496 return BRW_REGISTER_TYPE_F;
497
498 case BRW_REGISTER_TYPE_Q:
499 case BRW_REGISTER_TYPE_UQ:
500 return BRW_REGISTER_TYPE_Q;
501
502 case BRW_REGISTER_TYPE_D:
503 case BRW_REGISTER_TYPE_UD:
504 return BRW_REGISTER_TYPE_D;
505
506 case BRW_REGISTER_TYPE_W:
507 case BRW_REGISTER_TYPE_UW:
508 case BRW_REGISTER_TYPE_B:
509 case BRW_REGISTER_TYPE_UB:
510 case BRW_REGISTER_TYPE_V:
511 case BRW_REGISTER_TYPE_UV:
512 return BRW_REGISTER_TYPE_W;
513 }
514 unreachable("not reached");
515 }
516
517 /**
518 * Returns the execution type of an instruction \p inst
519 */
520 static enum brw_reg_type
521 execution_type(const struct gen_device_info *devinfo, const brw_inst *inst)
522 {
523 unsigned num_sources = num_sources_from_inst(devinfo, inst);
524 enum brw_reg_type src0_exec_type, src1_exec_type;
525
526 /* Execution data type is independent of destination data type, except in
527 * mixed F/HF instructions.
528 */
529 enum brw_reg_type dst_exec_type = inst_dst_type(devinfo, inst);
530
531 src0_exec_type = execution_type_for_type(brw_inst_src0_type(devinfo, inst));
532 if (num_sources == 1) {
533 if (src0_exec_type == BRW_REGISTER_TYPE_HF)
534 return dst_exec_type;
535 return src0_exec_type;
536 }
537
538 src1_exec_type = execution_type_for_type(brw_inst_src1_type(devinfo, inst));
539 if (types_are_mixed_float(src0_exec_type, src1_exec_type) ||
540 types_are_mixed_float(src0_exec_type, dst_exec_type) ||
541 types_are_mixed_float(src1_exec_type, dst_exec_type)) {
542 return BRW_REGISTER_TYPE_F;
543 }
544
545 if (src0_exec_type == src1_exec_type)
546 return src0_exec_type;
547
548 if (src0_exec_type == BRW_REGISTER_TYPE_NF ||
549 src1_exec_type == BRW_REGISTER_TYPE_NF)
550 return BRW_REGISTER_TYPE_NF;
551
552 /* Mixed operand types where one is float is float on Gen < 6
553 * (and not allowed on later platforms)
554 */
555 if (devinfo->gen < 6 &&
556 (src0_exec_type == BRW_REGISTER_TYPE_F ||
557 src1_exec_type == BRW_REGISTER_TYPE_F))
558 return BRW_REGISTER_TYPE_F;
559
560 if (src0_exec_type == BRW_REGISTER_TYPE_Q ||
561 src1_exec_type == BRW_REGISTER_TYPE_Q)
562 return BRW_REGISTER_TYPE_Q;
563
564 if (src0_exec_type == BRW_REGISTER_TYPE_D ||
565 src1_exec_type == BRW_REGISTER_TYPE_D)
566 return BRW_REGISTER_TYPE_D;
567
568 if (src0_exec_type == BRW_REGISTER_TYPE_W ||
569 src1_exec_type == BRW_REGISTER_TYPE_W)
570 return BRW_REGISTER_TYPE_W;
571
572 if (src0_exec_type == BRW_REGISTER_TYPE_DF ||
573 src1_exec_type == BRW_REGISTER_TYPE_DF)
574 return BRW_REGISTER_TYPE_DF;
575
576 unreachable("not reached");
577 }
578
579 /**
580 * Returns whether a region is packed
581 *
582 * A region is packed if its elements are adjacent in memory, with no
583 * intervening space, no overlap, and no replicated values.
584 */
585 static bool
586 is_packed(unsigned vstride, unsigned width, unsigned hstride)
587 {
588 if (vstride == width) {
589 if (vstride == 1) {
590 return hstride == 0;
591 } else {
592 return hstride == 1;
593 }
594 }
595
596 return false;
597 }
598
599 /**
600 * Returns whether an instruction is an explicit or implicit conversion
601 * to/from half-float.
602 */
603 static bool
604 is_half_float_conversion(const struct gen_device_info *devinfo,
605 const brw_inst *inst)
606 {
607 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
608
609 unsigned num_sources = num_sources_from_inst(devinfo, inst);
610 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
611
612 if (dst_type != src0_type &&
613 (dst_type == BRW_REGISTER_TYPE_HF || src0_type == BRW_REGISTER_TYPE_HF)) {
614 return true;
615 } else if (num_sources > 1) {
616 enum brw_reg_type src1_type = brw_inst_src1_type(devinfo, inst);
617 return dst_type != src1_type &&
618 (dst_type == BRW_REGISTER_TYPE_HF ||
619 src1_type == BRW_REGISTER_TYPE_HF);
620 }
621
622 return false;
623 }
624
625 /*
626 * Returns whether an instruction is using mixed float operation mode
627 */
628 static bool
629 is_mixed_float(const struct gen_device_info *devinfo, const brw_inst *inst)
630 {
631 if (devinfo->gen < 8)
632 return false;
633
634 if (inst_is_send(devinfo, inst))
635 return false;
636
637 unsigned opcode = brw_inst_opcode(devinfo, inst);
638 const struct opcode_desc *desc = brw_opcode_desc(devinfo, opcode);
639 if (desc->ndst == 0)
640 return false;
641
642 /* FIXME: support 3-src instructions */
643 unsigned num_sources = num_sources_from_inst(devinfo, inst);
644 assert(num_sources < 3);
645
646 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
647 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
648
649 if (num_sources == 1)
650 return types_are_mixed_float(src0_type, dst_type);
651
652 enum brw_reg_type src1_type = brw_inst_src1_type(devinfo, inst);
653
654 return types_are_mixed_float(src0_type, src1_type) ||
655 types_are_mixed_float(src0_type, dst_type) ||
656 types_are_mixed_float(src1_type, dst_type);
657 }
658
659 /**
660 * Returns whether an instruction is an explicit or implicit conversion
661 * to/from byte.
662 */
663 static bool
664 is_byte_conversion(const struct gen_device_info *devinfo,
665 const brw_inst *inst)
666 {
667 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
668
669 unsigned num_sources = num_sources_from_inst(devinfo, inst);
670 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
671
672 if (dst_type != src0_type &&
673 (type_sz(dst_type) == 1 || type_sz(src0_type) == 1)) {
674 return true;
675 } else if (num_sources > 1) {
676 enum brw_reg_type src1_type = brw_inst_src1_type(devinfo, inst);
677 return dst_type != src1_type &&
678 (type_sz(dst_type) == 1 || type_sz(src1_type) == 1);
679 }
680
681 return false;
682 }
683
684 /**
685 * Checks restrictions listed in "General Restrictions Based on Operand Types"
686 * in the "Register Region Restrictions" section.
687 */
688 static struct string
689 general_restrictions_based_on_operand_types(const struct gen_device_info *devinfo,
690 const brw_inst *inst)
691 {
692 const struct opcode_desc *desc =
693 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
694 unsigned num_sources = num_sources_from_inst(devinfo, inst);
695 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
696 struct string error_msg = { .str = NULL, .len = 0 };
697
698 if (devinfo->gen >= 11) {
699 if (num_sources == 3) {
700 ERROR_IF(brw_reg_type_to_size(brw_inst_3src_a1_src1_type(devinfo, inst)) == 1 ||
701 brw_reg_type_to_size(brw_inst_3src_a1_src2_type(devinfo, inst)) == 1,
702 "Byte data type is not supported for src1/2 register regioning. This includes "
703 "byte broadcast as well.");
704 }
705 if (num_sources == 2) {
706 ERROR_IF(brw_reg_type_to_size(brw_inst_src1_type(devinfo, inst)) == 1,
707 "Byte data type is not supported for src1 register regioning. This includes "
708 "byte broadcast as well.");
709 }
710 }
711
712 if (num_sources == 3)
713 return error_msg;
714
715 if (inst_is_send(devinfo, inst))
716 return error_msg;
717
718 if (exec_size == 1)
719 return error_msg;
720
721 if (desc->ndst == 0)
722 return error_msg;
723
724 /* The PRMs say:
725 *
726 * Where n is the largest element size in bytes for any source or
727 * destination operand type, ExecSize * n must be <= 64.
728 *
729 * But we do not attempt to enforce it, because it is implied by other
730 * rules:
731 *
732 * - that the destination stride must match the execution data type
733 * - sources may not span more than two adjacent GRF registers
734 * - destination may not span more than two adjacent GRF registers
735 *
736 * In fact, checking it would weaken testing of the other rules.
737 */
738
739 unsigned dst_stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
740 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
741 bool dst_type_is_byte =
742 inst_dst_type(devinfo, inst) == BRW_REGISTER_TYPE_B ||
743 inst_dst_type(devinfo, inst) == BRW_REGISTER_TYPE_UB;
744
745 if (dst_type_is_byte) {
746 if (is_packed(exec_size * dst_stride, exec_size, dst_stride)) {
747 if (!inst_is_raw_move(devinfo, inst))
748 ERROR("Only raw MOV supports a packed-byte destination");
749 return error_msg;
750 }
751 }
752
753 unsigned exec_type = execution_type(devinfo, inst);
754 unsigned exec_type_size = brw_reg_type_to_size(exec_type);
755 unsigned dst_type_size = brw_reg_type_to_size(dst_type);
756
757 /* On IVB/BYT, region parameters and execution size for DF are in terms of
758 * 32-bit elements, so they are doubled. For evaluating the validity of an
759 * instruction, we halve them.
760 */
761 if (devinfo->gen == 7 && !devinfo->is_haswell &&
762 exec_type_size == 8 && dst_type_size == 4)
763 dst_type_size = 8;
764
765 if (is_byte_conversion(devinfo, inst)) {
766 /* From the BDW+ PRM, Volume 2a, Command Reference, Instructions - MOV:
767 *
768 * "There is no direct conversion from B/UB to DF or DF to B/UB.
769 * There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB."
770 *
771 * Even if these restrictions are listed for the MOV instruction, we
772 * validate this more generally, since there is the possibility
773 * of implicit conversions from other instructions.
774 */
775 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
776 enum brw_reg_type src1_type = num_sources > 1 ?
777 brw_inst_src1_type(devinfo, inst) : 0;
778
779 ERROR_IF(type_sz(dst_type) == 1 &&
780 (type_sz(src0_type) == 8 ||
781 (num_sources > 1 && type_sz(src1_type) == 8)),
782 "There are no direct conversions between 64-bit types and B/UB");
783
784 ERROR_IF(type_sz(dst_type) == 8 &&
785 (type_sz(src0_type) == 1 ||
786 (num_sources > 1 && type_sz(src1_type) == 1)),
787 "There are no direct conversions between 64-bit types and B/UB");
788 }
789
790 if (is_half_float_conversion(devinfo, inst)) {
791 /**
792 * A helper to validate used in the validation of the following restriction
793 * from the BDW+ PRM, Volume 2a, Command Reference, Instructions - MOV:
794 *
795 * "There is no direct conversion from HF to DF or DF to HF.
796 * There is no direct conversion from HF to Q/UQ or Q/UQ to HF."
797 *
798 * Even if these restrictions are listed for the MOV instruction, we
799 * validate this more generally, since there is the possibility
800 * of implicit conversions from other instructions, such us implicit
801 * conversion from integer to HF with the ADD instruction in SKL+.
802 */
803 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
804 enum brw_reg_type src1_type = num_sources > 1 ?
805 brw_inst_src1_type(devinfo, inst) : 0;
806 ERROR_IF(dst_type == BRW_REGISTER_TYPE_HF &&
807 (type_sz(src0_type) == 8 ||
808 (num_sources > 1 && type_sz(src1_type) == 8)),
809 "There are no direct conversions between 64-bit types and HF");
810
811 ERROR_IF(type_sz(dst_type) == 8 &&
812 (src0_type == BRW_REGISTER_TYPE_HF ||
813 (num_sources > 1 && src1_type == BRW_REGISTER_TYPE_HF)),
814 "There are no direct conversions between 64-bit types and HF");
815
816 /* From the BDW+ PRM:
817 *
818 * "Conversion between Integer and HF (Half Float) must be
819 * DWord-aligned and strided by a DWord on the destination."
820 *
821 * Also, the above restrictions seems to be expanded on CHV and SKL+ by:
822 *
823 * "There is a relaxed alignment rule for word destinations. When
824 * the destination type is word (UW, W, HF), destination data types
825 * can be aligned to either the lowest word or the second lowest
826 * word of the execution channel. This means the destination data
827 * words can be either all in the even word locations or all in the
828 * odd word locations."
829 *
830 * We do not implement the second rule as is though, since empirical
831 * testing shows inconsistencies:
832 * - It suggests that packed 16-bit is not allowed, which is not true.
833 * - It suggests that conversions from Q/DF to W (which need to be
834 * 64-bit aligned on the destination) are not possible, which is
835 * not true.
836 *
837 * So from this rule we only validate the implication that conversions
838 * from F to HF need to be DWord strided (except in Align1 mixed
839 * float mode where packed fp16 destination is allowed so long as the
840 * destination is oword-aligned).
841 *
842 * Finally, we only validate this for Align1 because Align16 always
843 * requires packed destinations, so these restrictions can't possibly
844 * apply to Align16 mode.
845 */
846 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
847 if ((dst_type == BRW_REGISTER_TYPE_HF &&
848 (brw_reg_type_is_integer(src0_type) ||
849 (num_sources > 1 && brw_reg_type_is_integer(src1_type)))) ||
850 (brw_reg_type_is_integer(dst_type) &&
851 (src0_type == BRW_REGISTER_TYPE_HF ||
852 (num_sources > 1 && src1_type == BRW_REGISTER_TYPE_HF)))) {
853 ERROR_IF(dst_stride * dst_type_size != 4,
854 "Conversions between integer and half-float must be "
855 "strided by a DWord on the destination");
856
857 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
858 ERROR_IF(subreg % 4 != 0,
859 "Conversions between integer and half-float must be "
860 "aligned to a DWord on the destination");
861 } else if ((devinfo->is_cherryview || devinfo->gen >= 9) &&
862 dst_type == BRW_REGISTER_TYPE_HF) {
863 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
864 ERROR_IF(dst_stride != 2 &&
865 !(is_mixed_float(devinfo, inst) &&
866 dst_stride == 1 && subreg % 16 == 0),
867 "Conversions to HF must have either all words in even "
868 "word locations or all words in odd word locations or "
869 "be mixed-float with Oword-aligned packed destination");
870 }
871 }
872 }
873
874 /* There are special regioning rules for mixed-float mode in CHV and SKL that
875 * override the general rule for the ratio of sizes of the destination type
876 * and the execution type. We will add validation for those in a later patch.
877 */
878 bool validate_dst_size_and_exec_size_ratio =
879 !is_mixed_float(devinfo, inst) ||
880 !(devinfo->is_cherryview || devinfo->gen >= 9);
881
882 if (validate_dst_size_and_exec_size_ratio &&
883 exec_type_size > dst_type_size) {
884 if (!(dst_type_is_byte && inst_is_raw_move(devinfo, inst))) {
885 ERROR_IF(dst_stride * dst_type_size != exec_type_size,
886 "Destination stride must be equal to the ratio of the sizes "
887 "of the execution data type to the destination type");
888 }
889
890 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
891
892 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1 &&
893 brw_inst_dst_address_mode(devinfo, inst) == BRW_ADDRESS_DIRECT) {
894 /* The i965 PRM says:
895 *
896 * Implementation Restriction: The relaxed alignment rule for byte
897 * destination (#10.5) is not supported.
898 */
899 if ((devinfo->gen > 4 || devinfo->is_g4x) && dst_type_is_byte) {
900 ERROR_IF(subreg % exec_type_size != 0 &&
901 subreg % exec_type_size != 1,
902 "Destination subreg must be aligned to the size of the "
903 "execution data type (or to the next lowest byte for byte "
904 "destinations)");
905 } else {
906 ERROR_IF(subreg % exec_type_size != 0,
907 "Destination subreg must be aligned to the size of the "
908 "execution data type");
909 }
910 }
911 }
912
913 return error_msg;
914 }
915
916 /**
917 * Checks restrictions listed in "General Restrictions on Regioning Parameters"
918 * in the "Register Region Restrictions" section.
919 */
920 static struct string
921 general_restrictions_on_region_parameters(const struct gen_device_info *devinfo,
922 const brw_inst *inst)
923 {
924 const struct opcode_desc *desc =
925 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
926 unsigned num_sources = num_sources_from_inst(devinfo, inst);
927 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
928 struct string error_msg = { .str = NULL, .len = 0 };
929
930 if (num_sources == 3)
931 return (struct string){};
932
933 /* Split sends don't have the bits in the instruction to encode regions so
934 * there's nothing to check.
935 */
936 if (inst_is_split_send(devinfo, inst))
937 return (struct string){};
938
939 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16) {
940 if (desc->ndst != 0 && !dst_is_null(devinfo, inst))
941 ERROR_IF(brw_inst_dst_hstride(devinfo, inst) != BRW_HORIZONTAL_STRIDE_1,
942 "Destination Horizontal Stride must be 1");
943
944 if (num_sources >= 1) {
945 if (devinfo->is_haswell || devinfo->gen >= 8) {
946 ERROR_IF(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
947 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
948 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_2 &&
949 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
950 "In Align16 mode, only VertStride of 0, 2, or 4 is allowed");
951 } else {
952 ERROR_IF(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
953 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
954 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
955 "In Align16 mode, only VertStride of 0 or 4 is allowed");
956 }
957 }
958
959 if (num_sources == 2) {
960 if (devinfo->is_haswell || devinfo->gen >= 8) {
961 ERROR_IF(brw_inst_src1_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
962 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
963 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_2 &&
964 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
965 "In Align16 mode, only VertStride of 0, 2, or 4 is allowed");
966 } else {
967 ERROR_IF(brw_inst_src1_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
968 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
969 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
970 "In Align16 mode, only VertStride of 0 or 4 is allowed");
971 }
972 }
973
974 return error_msg;
975 }
976
977 for (unsigned i = 0; i < num_sources; i++) {
978 unsigned vstride, width, hstride, element_size, subreg;
979 enum brw_reg_type type;
980
981 #define DO_SRC(n) \
982 if (brw_inst_src ## n ## _reg_file(devinfo, inst) == \
983 BRW_IMMEDIATE_VALUE) \
984 continue; \
985 \
986 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
987 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
988 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
989 type = brw_inst_src ## n ## _type(devinfo, inst); \
990 element_size = brw_reg_type_to_size(type); \
991 subreg = brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst)
992
993 if (i == 0) {
994 DO_SRC(0);
995 } else {
996 DO_SRC(1);
997 }
998 #undef DO_SRC
999
1000 /* On IVB/BYT, region parameters and execution size for DF are in terms of
1001 * 32-bit elements, so they are doubled. For evaluating the validity of an
1002 * instruction, we halve them.
1003 */
1004 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1005 element_size == 8)
1006 element_size = 4;
1007
1008 /* ExecSize must be greater than or equal to Width. */
1009 ERROR_IF(exec_size < width, "ExecSize must be greater than or equal "
1010 "to Width");
1011
1012 /* If ExecSize = Width and HorzStride ≠ 0,
1013 * VertStride must be set to Width * HorzStride.
1014 */
1015 if (exec_size == width && hstride != 0) {
1016 ERROR_IF(vstride != width * hstride,
1017 "If ExecSize = Width and HorzStride ≠ 0, "
1018 "VertStride must be set to Width * HorzStride");
1019 }
1020
1021 /* If Width = 1, HorzStride must be 0 regardless of the values of
1022 * ExecSize and VertStride.
1023 */
1024 if (width == 1) {
1025 ERROR_IF(hstride != 0,
1026 "If Width = 1, HorzStride must be 0 regardless "
1027 "of the values of ExecSize and VertStride");
1028 }
1029
1030 /* If ExecSize = Width = 1, both VertStride and HorzStride must be 0. */
1031 if (exec_size == 1 && width == 1) {
1032 ERROR_IF(vstride != 0 || hstride != 0,
1033 "If ExecSize = Width = 1, both VertStride "
1034 "and HorzStride must be 0");
1035 }
1036
1037 /* If VertStride = HorzStride = 0, Width must be 1 regardless of the
1038 * value of ExecSize.
1039 */
1040 if (vstride == 0 && hstride == 0) {
1041 ERROR_IF(width != 1,
1042 "If VertStride = HorzStride = 0, Width must be "
1043 "1 regardless of the value of ExecSize");
1044 }
1045
1046 /* VertStride must be used to cross GRF register boundaries. This rule
1047 * implies that elements within a 'Width' cannot cross GRF boundaries.
1048 */
1049 const uint64_t mask = (1ULL << element_size) - 1;
1050 unsigned rowbase = subreg;
1051
1052 for (int y = 0; y < exec_size / width; y++) {
1053 uint64_t access_mask = 0;
1054 unsigned offset = rowbase;
1055
1056 for (int x = 0; x < width; x++) {
1057 access_mask |= mask << (offset % 64);
1058 offset += hstride * element_size;
1059 }
1060
1061 rowbase += vstride * element_size;
1062
1063 if ((uint32_t)access_mask != 0 && (access_mask >> 32) != 0) {
1064 ERROR("VertStride must be used to cross GRF register boundaries");
1065 break;
1066 }
1067 }
1068 }
1069
1070 /* Dst.HorzStride must not be 0. */
1071 if (desc->ndst != 0 && !dst_is_null(devinfo, inst)) {
1072 ERROR_IF(brw_inst_dst_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0,
1073 "Destination Horizontal Stride must not be 0");
1074 }
1075
1076 return error_msg;
1077 }
1078
1079 static struct string
1080 special_restrictions_for_mixed_float_mode(const struct gen_device_info *devinfo,
1081 const brw_inst *inst)
1082 {
1083 struct string error_msg = { .str = NULL, .len = 0 };
1084
1085 const unsigned opcode = brw_inst_opcode(devinfo, inst);
1086 const unsigned num_sources = num_sources_from_inst(devinfo, inst);
1087 if (num_sources >= 3)
1088 return error_msg;
1089
1090 if (!is_mixed_float(devinfo, inst))
1091 return error_msg;
1092
1093 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
1094 bool is_align16 = brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16;
1095
1096 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
1097 enum brw_reg_type src1_type = num_sources > 1 ?
1098 brw_inst_src1_type(devinfo, inst) : 0;
1099 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
1100
1101 unsigned dst_stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1102 bool dst_is_packed = is_packed(exec_size * dst_stride, exec_size, dst_stride);
1103
1104 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1105 * Float Operations:
1106 *
1107 * "Indirect addressing on source is not supported when source and
1108 * destination data types are mixed float."
1109 */
1110 ERROR_IF(brw_inst_src0_address_mode(devinfo, inst) != BRW_ADDRESS_DIRECT ||
1111 (num_sources > 1 &&
1112 brw_inst_src1_address_mode(devinfo, inst) != BRW_ADDRESS_DIRECT),
1113 "Indirect addressing on source is not supported when source and "
1114 "destination data types are mixed float");
1115
1116 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1117 * Float Operations:
1118 *
1119 * "No SIMD16 in mixed mode when destination is f32. Instruction
1120 * execution size must be no more than 8."
1121 */
1122 ERROR_IF(exec_size > 8 && dst_type == BRW_REGISTER_TYPE_F,
1123 "Mixed float mode with 32-bit float destination is limited "
1124 "to SIMD8");
1125
1126 if (is_align16) {
1127 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1128 * Float Operations:
1129 *
1130 * "In Align16 mode, when half float and float data types are mixed
1131 * between source operands OR between source and destination operands,
1132 * the register content are assumed to be packed."
1133 *
1134 * Since Align16 doesn't have a concept of horizontal stride (or width),
1135 * it means that vertical stride must always be 4, since 0 and 2 would
1136 * lead to replicated data, and any other value is disallowed in Align16.
1137 */
1138 ERROR_IF(brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
1139 "Align16 mixed float mode assumes packed data (vstride must be 4");
1140
1141 ERROR_IF(num_sources >= 2 &&
1142 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
1143 "Align16 mixed float mode assumes packed data (vstride must be 4");
1144
1145 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1146 * Float Operations:
1147 *
1148 * "For Align16 mixed mode, both input and output packed f16 data
1149 * must be oword aligned, no oword crossing in packed f16."
1150 *
1151 * The previous rule requires that Align16 operands are always packed,
1152 * and since there is only one bit for Align16 subnr, which represents
1153 * offsets 0B and 16B, this rule is always enforced and we don't need to
1154 * validate it.
1155 */
1156
1157 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1158 * Float Operations:
1159 *
1160 * "No SIMD16 in mixed mode when destination is packed f16 for both
1161 * Align1 and Align16."
1162 *
1163 * And:
1164 *
1165 * "In Align16 mode, when half float and float data types are mixed
1166 * between source operands OR between source and destination operands,
1167 * the register content are assumed to be packed."
1168 *
1169 * Which implies that SIMD16 is not available in Align16. This is further
1170 * confirmed by:
1171 *
1172 * "For Align16 mixed mode, both input and output packed f16 data
1173 * must be oword aligned, no oword crossing in packed f16"
1174 *
1175 * Since oword-aligned packed f16 data would cross oword boundaries when
1176 * the execution size is larger than 8.
1177 */
1178 ERROR_IF(exec_size > 8, "Align16 mixed float mode is limited to SIMD8");
1179
1180 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1181 * Float Operations:
1182 *
1183 * "No accumulator read access for Align16 mixed float."
1184 */
1185 ERROR_IF(inst_uses_src_acc(devinfo, inst),
1186 "No accumulator read access for Align16 mixed float");
1187 } else {
1188 assert(!is_align16);
1189
1190 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1191 * Float Operations:
1192 *
1193 * "No SIMD16 in mixed mode when destination is packed f16 for both
1194 * Align1 and Align16."
1195 */
1196 ERROR_IF(exec_size > 8 && dst_is_packed &&
1197 dst_type == BRW_REGISTER_TYPE_HF,
1198 "Align1 mixed float mode is limited to SIMD8 when destination "
1199 "is packed half-float");
1200
1201 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1202 * Float Operations:
1203 *
1204 * "Math operations for mixed mode:
1205 * - In Align1, f16 inputs need to be strided"
1206 */
1207 if (opcode == BRW_OPCODE_MATH) {
1208 if (src0_type == BRW_REGISTER_TYPE_HF) {
1209 ERROR_IF(STRIDE(brw_inst_src0_hstride(devinfo, inst)) <= 1,
1210 "Align1 mixed mode math needs strided half-float inputs");
1211 }
1212
1213 if (num_sources >= 2 && src1_type == BRW_REGISTER_TYPE_HF) {
1214 ERROR_IF(STRIDE(brw_inst_src1_hstride(devinfo, inst)) <= 1,
1215 "Align1 mixed mode math needs strided half-float inputs");
1216 }
1217 }
1218
1219 if (dst_type == BRW_REGISTER_TYPE_HF && dst_stride == 1) {
1220 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1221 * Float Operations:
1222 *
1223 * "In Align1, destination stride can be smaller than execution
1224 * type. When destination is stride of 1, 16 bit packed data is
1225 * updated on the destination. However, output packed f16 data
1226 * must be oword aligned, no oword crossing in packed f16."
1227 *
1228 * The requirement of not crossing oword boundaries for 16-bit oword
1229 * aligned data means that execution size is limited to 8.
1230 */
1231 unsigned subreg;
1232 if (brw_inst_dst_address_mode(devinfo, inst) == BRW_ADDRESS_DIRECT)
1233 subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
1234 else
1235 subreg = brw_inst_dst_ia_subreg_nr(devinfo, inst);
1236 ERROR_IF(subreg % 16 != 0,
1237 "Align1 mixed mode packed half-float output must be "
1238 "oword aligned");
1239 ERROR_IF(exec_size > 8,
1240 "Align1 mixed mode packed half-float output must not "
1241 "cross oword boundaries (max exec size is 8)");
1242
1243 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1244 * Float Operations:
1245 *
1246 * "When source is float or half float from accumulator register and
1247 * destination is half float with a stride of 1, the source must
1248 * register aligned. i.e., source must have offset zero."
1249 *
1250 * Align16 mixed float mode doesn't allow accumulator access on sources,
1251 * so we only need to check this for Align1.
1252 */
1253 if (src0_is_acc(devinfo, inst) &&
1254 (src0_type == BRW_REGISTER_TYPE_F ||
1255 src0_type == BRW_REGISTER_TYPE_HF)) {
1256 ERROR_IF(brw_inst_src0_da1_subreg_nr(devinfo, inst) != 0,
1257 "Mixed float mode requires register-aligned accumulator "
1258 "source reads when destination is packed half-float");
1259
1260 }
1261
1262 if (num_sources > 1 &&
1263 src1_is_acc(devinfo, inst) &&
1264 (src1_type == BRW_REGISTER_TYPE_F ||
1265 src1_type == BRW_REGISTER_TYPE_HF)) {
1266 ERROR_IF(brw_inst_src1_da1_subreg_nr(devinfo, inst) != 0,
1267 "Mixed float mode requires register-aligned accumulator "
1268 "source reads when destination is packed half-float");
1269 }
1270 }
1271
1272 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1273 * Float Operations:
1274 *
1275 * "No swizzle is allowed when an accumulator is used as an implicit
1276 * source or an explicit source in an instruction. i.e. when
1277 * destination is half float with an implicit accumulator source,
1278 * destination stride needs to be 2."
1279 *
1280 * FIXME: it is not quite clear what the first sentence actually means
1281 * or its link to the implication described after it, so we only
1282 * validate the explicit implication, which is clearly described.
1283 */
1284 if (dst_type == BRW_REGISTER_TYPE_HF &&
1285 inst_uses_src_acc(devinfo, inst)) {
1286 ERROR_IF(dst_stride != 2,
1287 "Mixed float mode with implicit/explicit accumulator "
1288 "source and half-float destination requires a stride "
1289 "of 2 on the destination");
1290 }
1291 }
1292
1293 return error_msg;
1294 }
1295
1296 /**
1297 * Creates an \p access_mask for an \p exec_size, \p element_size, and a region
1298 *
1299 * An \p access_mask is a 32-element array of uint64_t, where each uint64_t is
1300 * a bitmask of bytes accessed by the region.
1301 *
1302 * For instance the access mask of the source gX.1<4,2,2>F in an exec_size = 4
1303 * instruction would be
1304 *
1305 * access_mask[0] = 0x00000000000000F0
1306 * access_mask[1] = 0x000000000000F000
1307 * access_mask[2] = 0x0000000000F00000
1308 * access_mask[3] = 0x00000000F0000000
1309 * access_mask[4-31] = 0
1310 *
1311 * because the first execution channel accesses bytes 7-4 and the second
1312 * execution channel accesses bytes 15-12, etc.
1313 */
1314 static void
1315 align1_access_mask(uint64_t access_mask[static 32],
1316 unsigned exec_size, unsigned element_size, unsigned subreg,
1317 unsigned vstride, unsigned width, unsigned hstride)
1318 {
1319 const uint64_t mask = (1ULL << element_size) - 1;
1320 unsigned rowbase = subreg;
1321 unsigned element = 0;
1322
1323 for (int y = 0; y < exec_size / width; y++) {
1324 unsigned offset = rowbase;
1325
1326 for (int x = 0; x < width; x++) {
1327 access_mask[element++] = mask << (offset % 64);
1328 offset += hstride * element_size;
1329 }
1330
1331 rowbase += vstride * element_size;
1332 }
1333
1334 assert(element == 0 || element == exec_size);
1335 }
1336
1337 /**
1338 * Returns the number of registers accessed according to the \p access_mask
1339 */
1340 static int
1341 registers_read(const uint64_t access_mask[static 32])
1342 {
1343 int regs_read = 0;
1344
1345 for (unsigned i = 0; i < 32; i++) {
1346 if (access_mask[i] > 0xFFFFFFFF) {
1347 return 2;
1348 } else if (access_mask[i]) {
1349 regs_read = 1;
1350 }
1351 }
1352
1353 return regs_read;
1354 }
1355
1356 /**
1357 * Checks restrictions listed in "Region Alignment Rules" in the "Register
1358 * Region Restrictions" section.
1359 */
1360 static struct string
1361 region_alignment_rules(const struct gen_device_info *devinfo,
1362 const brw_inst *inst)
1363 {
1364 const struct opcode_desc *desc =
1365 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
1366 unsigned num_sources = num_sources_from_inst(devinfo, inst);
1367 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
1368 uint64_t dst_access_mask[32], src0_access_mask[32], src1_access_mask[32];
1369 struct string error_msg = { .str = NULL, .len = 0 };
1370
1371 if (num_sources == 3)
1372 return (struct string){};
1373
1374 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16)
1375 return (struct string){};
1376
1377 if (inst_is_send(devinfo, inst))
1378 return (struct string){};
1379
1380 memset(dst_access_mask, 0, sizeof(dst_access_mask));
1381 memset(src0_access_mask, 0, sizeof(src0_access_mask));
1382 memset(src1_access_mask, 0, sizeof(src1_access_mask));
1383
1384 for (unsigned i = 0; i < num_sources; i++) {
1385 unsigned vstride, width, hstride, element_size, subreg;
1386 enum brw_reg_type type;
1387
1388 /* In Direct Addressing mode, a source cannot span more than 2 adjacent
1389 * GRF registers.
1390 */
1391
1392 #define DO_SRC(n) \
1393 if (brw_inst_src ## n ## _address_mode(devinfo, inst) != \
1394 BRW_ADDRESS_DIRECT) \
1395 continue; \
1396 \
1397 if (brw_inst_src ## n ## _reg_file(devinfo, inst) == \
1398 BRW_IMMEDIATE_VALUE) \
1399 continue; \
1400 \
1401 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
1402 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
1403 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
1404 type = brw_inst_src ## n ## _type(devinfo, inst); \
1405 element_size = brw_reg_type_to_size(type); \
1406 subreg = brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1407 align1_access_mask(src ## n ## _access_mask, \
1408 exec_size, element_size, subreg, \
1409 vstride, width, hstride)
1410
1411 if (i == 0) {
1412 DO_SRC(0);
1413 } else {
1414 DO_SRC(1);
1415 }
1416 #undef DO_SRC
1417
1418 unsigned num_vstride = exec_size / width;
1419 unsigned num_hstride = width;
1420 unsigned vstride_elements = (num_vstride - 1) * vstride;
1421 unsigned hstride_elements = (num_hstride - 1) * hstride;
1422 unsigned offset = (vstride_elements + hstride_elements) * element_size +
1423 subreg;
1424 ERROR_IF(offset >= 64,
1425 "A source cannot span more than 2 adjacent GRF registers");
1426 }
1427
1428 if (desc->ndst == 0 || dst_is_null(devinfo, inst))
1429 return error_msg;
1430
1431 unsigned stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1432 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
1433 unsigned element_size = brw_reg_type_to_size(dst_type);
1434 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
1435 unsigned offset = ((exec_size - 1) * stride * element_size) + subreg;
1436 ERROR_IF(offset >= 64,
1437 "A destination cannot span more than 2 adjacent GRF registers");
1438
1439 if (error_msg.str)
1440 return error_msg;
1441
1442 /* On IVB/BYT, region parameters and execution size for DF are in terms of
1443 * 32-bit elements, so they are doubled. For evaluating the validity of an
1444 * instruction, we halve them.
1445 */
1446 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1447 element_size == 8)
1448 element_size = 4;
1449
1450 align1_access_mask(dst_access_mask, exec_size, element_size, subreg,
1451 exec_size == 1 ? 0 : exec_size * stride,
1452 exec_size == 1 ? 1 : exec_size,
1453 exec_size == 1 ? 0 : stride);
1454
1455 unsigned dst_regs = registers_read(dst_access_mask);
1456 unsigned src0_regs = registers_read(src0_access_mask);
1457 unsigned src1_regs = registers_read(src1_access_mask);
1458
1459 /* The SNB, IVB, HSW, BDW, and CHV PRMs say:
1460 *
1461 * When an instruction has a source region spanning two registers and a
1462 * destination region contained in one register, the number of elements
1463 * must be the same between two sources and one of the following must be
1464 * true:
1465 *
1466 * 1. The destination region is entirely contained in the lower OWord
1467 * of a register.
1468 * 2. The destination region is entirely contained in the upper OWord
1469 * of a register.
1470 * 3. The destination elements are evenly split between the two OWords
1471 * of a register.
1472 */
1473 if (devinfo->gen <= 8) {
1474 if (dst_regs == 1 && (src0_regs == 2 || src1_regs == 2)) {
1475 unsigned upper_oword_writes = 0, lower_oword_writes = 0;
1476
1477 for (unsigned i = 0; i < exec_size; i++) {
1478 if (dst_access_mask[i] > 0x0000FFFF) {
1479 upper_oword_writes++;
1480 } else {
1481 assert(dst_access_mask[i] != 0);
1482 lower_oword_writes++;
1483 }
1484 }
1485
1486 ERROR_IF(lower_oword_writes != 0 &&
1487 upper_oword_writes != 0 &&
1488 upper_oword_writes != lower_oword_writes,
1489 "Writes must be to only one OWord or "
1490 "evenly split between OWords");
1491 }
1492 }
1493
1494 /* The IVB and HSW PRMs say:
1495 *
1496 * When an instruction has a source region that spans two registers and
1497 * the destination spans two registers, the destination elements must be
1498 * evenly split between the two registers [...]
1499 *
1500 * The SNB PRM contains similar wording (but written in a much more
1501 * confusing manner).
1502 *
1503 * The BDW PRM says:
1504 *
1505 * When destination spans two registers, the source may be one or two
1506 * registers. The destination elements must be evenly split between the
1507 * two registers.
1508 *
1509 * The SKL PRM says:
1510 *
1511 * When destination of MATH instruction spans two registers, the
1512 * destination elements must be evenly split between the two registers.
1513 *
1514 * It is not known whether this restriction applies to KBL other Gens after
1515 * SKL.
1516 */
1517 if (devinfo->gen <= 8 ||
1518 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MATH) {
1519
1520 /* Nothing explicitly states that on Gen < 8 elements must be evenly
1521 * split between two destination registers in the two exceptional
1522 * source-region-spans-one-register cases, but since Broadwell requires
1523 * evenly split writes regardless of source region, we assume that it was
1524 * an oversight and require it.
1525 */
1526 if (dst_regs == 2) {
1527 unsigned upper_reg_writes = 0, lower_reg_writes = 0;
1528
1529 for (unsigned i = 0; i < exec_size; i++) {
1530 if (dst_access_mask[i] > 0xFFFFFFFF) {
1531 upper_reg_writes++;
1532 } else {
1533 assert(dst_access_mask[i] != 0);
1534 lower_reg_writes++;
1535 }
1536 }
1537
1538 ERROR_IF(upper_reg_writes != lower_reg_writes,
1539 "Writes must be evenly split between the two "
1540 "destination registers");
1541 }
1542 }
1543
1544 /* The IVB and HSW PRMs say:
1545 *
1546 * When an instruction has a source region that spans two registers and
1547 * the destination spans two registers, the destination elements must be
1548 * evenly split between the two registers and each destination register
1549 * must be entirely derived from one source register.
1550 *
1551 * Note: In such cases, the regioning parameters must ensure that the
1552 * offset from the two source registers is the same.
1553 *
1554 * The SNB PRM contains similar wording (but written in a much more
1555 * confusing manner).
1556 *
1557 * There are effectively three rules stated here:
1558 *
1559 * For an instruction with a source and a destination spanning two
1560 * registers,
1561 *
1562 * (1) destination elements must be evenly split between the two
1563 * registers
1564 * (2) all destination elements in a register must be derived
1565 * from one source register
1566 * (3) the offset (i.e. the starting location in each of the two
1567 * registers spanned by a region) must be the same in the two
1568 * registers spanned by a region
1569 *
1570 * It is impossible to violate rule (1) without violating (2) or (3), so we
1571 * do not attempt to validate it.
1572 */
1573 if (devinfo->gen <= 7 && dst_regs == 2) {
1574 for (unsigned i = 0; i < num_sources; i++) {
1575 #define DO_SRC(n) \
1576 if (src ## n ## _regs <= 1) \
1577 continue; \
1578 \
1579 for (unsigned i = 0; i < exec_size; i++) { \
1580 if ((dst_access_mask[i] > 0xFFFFFFFF) != \
1581 (src ## n ## _access_mask[i] > 0xFFFFFFFF)) { \
1582 ERROR("Each destination register must be entirely derived " \
1583 "from one source register"); \
1584 break; \
1585 } \
1586 } \
1587 \
1588 unsigned offset_0 = \
1589 brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1590 unsigned offset_1 = offset_0; \
1591 \
1592 for (unsigned i = 0; i < exec_size; i++) { \
1593 if (src ## n ## _access_mask[i] > 0xFFFFFFFF) { \
1594 offset_1 = __builtin_ctzll(src ## n ## _access_mask[i]) - 32; \
1595 break; \
1596 } \
1597 } \
1598 \
1599 ERROR_IF(num_sources == 2 && offset_0 != offset_1, \
1600 "The offset from the two source registers " \
1601 "must be the same")
1602
1603 if (i == 0) {
1604 DO_SRC(0);
1605 } else {
1606 DO_SRC(1);
1607 }
1608 #undef DO_SRC
1609 }
1610 }
1611
1612 /* The IVB and HSW PRMs say:
1613 *
1614 * When destination spans two registers, the source MUST span two
1615 * registers. The exception to the above rule:
1616 * 1. When source is scalar, the source registers are not
1617 * incremented.
1618 * 2. When source is packed integer Word and destination is packed
1619 * integer DWord, the source register is not incremented by the
1620 * source sub register is incremented.
1621 *
1622 * The SNB PRM does not contain this rule, but the internal documentation
1623 * indicates that it applies to SNB as well. We assume that the rule applies
1624 * to Gen <= 5 although their PRMs do not state it.
1625 *
1626 * While the documentation explicitly says in exception (2) that the
1627 * destination must be an integer DWord, the hardware allows at least a
1628 * float destination type as well. We emit such instructions from
1629 *
1630 * fs_visitor::emit_interpolation_setup_gen6
1631 * fs_visitor::emit_fragcoord_interpolation
1632 *
1633 * and have for years with no ill effects.
1634 *
1635 * Additionally the simulator source code indicates that the real condition
1636 * is that the size of the destination type is 4 bytes.
1637 */
1638 if (devinfo->gen <= 7 && dst_regs == 2) {
1639 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
1640 bool dst_is_packed_dword =
1641 is_packed(exec_size * stride, exec_size, stride) &&
1642 brw_reg_type_to_size(dst_type) == 4;
1643
1644 for (unsigned i = 0; i < num_sources; i++) {
1645 #define DO_SRC(n) \
1646 unsigned vstride, width, hstride; \
1647 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
1648 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
1649 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
1650 bool src ## n ## _is_packed_word = \
1651 is_packed(vstride, width, hstride) && \
1652 (brw_inst_src ## n ## _type(devinfo, inst) == BRW_REGISTER_TYPE_W || \
1653 brw_inst_src ## n ## _type(devinfo, inst) == BRW_REGISTER_TYPE_UW); \
1654 \
1655 ERROR_IF(src ## n ## _regs == 1 && \
1656 !src ## n ## _has_scalar_region(devinfo, inst) && \
1657 !(dst_is_packed_dword && src ## n ## _is_packed_word), \
1658 "When the destination spans two registers, the source must " \
1659 "span two registers\n" ERROR_INDENT "(exceptions for scalar " \
1660 "source and packed-word to packed-dword expansion)")
1661
1662 if (i == 0) {
1663 DO_SRC(0);
1664 } else {
1665 DO_SRC(1);
1666 }
1667 #undef DO_SRC
1668 }
1669 }
1670
1671 return error_msg;
1672 }
1673
1674 static struct string
1675 vector_immediate_restrictions(const struct gen_device_info *devinfo,
1676 const brw_inst *inst)
1677 {
1678 unsigned num_sources = num_sources_from_inst(devinfo, inst);
1679 struct string error_msg = { .str = NULL, .len = 0 };
1680
1681 if (num_sources == 3 || num_sources == 0)
1682 return (struct string){};
1683
1684 unsigned file = num_sources == 1 ?
1685 brw_inst_src0_reg_file(devinfo, inst) :
1686 brw_inst_src1_reg_file(devinfo, inst);
1687 if (file != BRW_IMMEDIATE_VALUE)
1688 return (struct string){};
1689
1690 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
1691 unsigned dst_type_size = brw_reg_type_to_size(dst_type);
1692 unsigned dst_subreg = brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1 ?
1693 brw_inst_dst_da1_subreg_nr(devinfo, inst) : 0;
1694 unsigned dst_stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1695 enum brw_reg_type type = num_sources == 1 ?
1696 brw_inst_src0_type(devinfo, inst) :
1697 brw_inst_src1_type(devinfo, inst);
1698
1699 /* The PRMs say:
1700 *
1701 * When an immediate vector is used in an instruction, the destination
1702 * must be 128-bit aligned with destination horizontal stride equivalent
1703 * to a word for an immediate integer vector (v) and equivalent to a
1704 * DWord for an immediate float vector (vf).
1705 *
1706 * The text has not been updated for the addition of the immediate unsigned
1707 * integer vector type (uv) on SNB, but presumably the same restriction
1708 * applies.
1709 */
1710 switch (type) {
1711 case BRW_REGISTER_TYPE_V:
1712 case BRW_REGISTER_TYPE_UV:
1713 case BRW_REGISTER_TYPE_VF:
1714 ERROR_IF(dst_subreg % (128 / 8) != 0,
1715 "Destination must be 128-bit aligned in order to use immediate "
1716 "vector types");
1717
1718 if (type == BRW_REGISTER_TYPE_VF) {
1719 ERROR_IF(dst_type_size * dst_stride != 4,
1720 "Destination must have stride equivalent to dword in order "
1721 "to use the VF type");
1722 } else {
1723 ERROR_IF(dst_type_size * dst_stride != 2,
1724 "Destination must have stride equivalent to word in order "
1725 "to use the V or UV type");
1726 }
1727 break;
1728 default:
1729 break;
1730 }
1731
1732 return error_msg;
1733 }
1734
1735 static struct string
1736 special_requirements_for_handling_double_precision_data_types(
1737 const struct gen_device_info *devinfo,
1738 const brw_inst *inst)
1739 {
1740 unsigned num_sources = num_sources_from_inst(devinfo, inst);
1741 struct string error_msg = { .str = NULL, .len = 0 };
1742
1743 if (num_sources == 3 || num_sources == 0)
1744 return (struct string){};
1745
1746 /* Split sends don't have types so there's no doubles there. */
1747 if (inst_is_split_send(devinfo, inst))
1748 return (struct string){};
1749
1750 enum brw_reg_type exec_type = execution_type(devinfo, inst);
1751 unsigned exec_type_size = brw_reg_type_to_size(exec_type);
1752
1753 enum brw_reg_file dst_file = brw_inst_dst_reg_file(devinfo, inst);
1754 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
1755 unsigned dst_type_size = brw_reg_type_to_size(dst_type);
1756 unsigned dst_hstride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1757 unsigned dst_reg = brw_inst_dst_da_reg_nr(devinfo, inst);
1758 unsigned dst_subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
1759 unsigned dst_address_mode = brw_inst_dst_address_mode(devinfo, inst);
1760
1761 bool is_integer_dword_multiply =
1762 devinfo->gen >= 8 &&
1763 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MUL &&
1764 (brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_D ||
1765 brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_UD) &&
1766 (brw_inst_src1_type(devinfo, inst) == BRW_REGISTER_TYPE_D ||
1767 brw_inst_src1_type(devinfo, inst) == BRW_REGISTER_TYPE_UD);
1768
1769 if (dst_type_size != 8 && exec_type_size != 8 && !is_integer_dword_multiply)
1770 return (struct string){};
1771
1772 for (unsigned i = 0; i < num_sources; i++) {
1773 unsigned vstride, width, hstride, type_size, reg, subreg, address_mode;
1774 bool is_scalar_region;
1775 enum brw_reg_file file;
1776 enum brw_reg_type type;
1777
1778 #define DO_SRC(n) \
1779 if (brw_inst_src ## n ## _reg_file(devinfo, inst) == \
1780 BRW_IMMEDIATE_VALUE) \
1781 continue; \
1782 \
1783 is_scalar_region = src ## n ## _has_scalar_region(devinfo, inst); \
1784 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
1785 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
1786 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
1787 file = brw_inst_src ## n ## _reg_file(devinfo, inst); \
1788 type = brw_inst_src ## n ## _type(devinfo, inst); \
1789 type_size = brw_reg_type_to_size(type); \
1790 reg = brw_inst_src ## n ## _da_reg_nr(devinfo, inst); \
1791 subreg = brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1792 address_mode = brw_inst_src ## n ## _address_mode(devinfo, inst)
1793
1794 if (i == 0) {
1795 DO_SRC(0);
1796 } else {
1797 DO_SRC(1);
1798 }
1799 #undef DO_SRC
1800
1801 /* The PRMs say that for CHV, BXT:
1802 *
1803 * When source or destination datatype is 64b or operation is integer
1804 * DWord multiply, regioning in Align1 must follow these rules:
1805 *
1806 * 1. Source and Destination horizontal stride must be aligned to the
1807 * same qword.
1808 * 2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride.
1809 * 3. Source and Destination offset must be the same, except the case
1810 * of scalar source.
1811 *
1812 * We assume that the restriction applies to GLK as well.
1813 */
1814 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1 &&
1815 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
1816 unsigned src_stride = hstride * type_size;
1817 unsigned dst_stride = dst_hstride * dst_type_size;
1818
1819 ERROR_IF(!is_scalar_region &&
1820 (src_stride % 8 != 0 ||
1821 dst_stride % 8 != 0 ||
1822 src_stride != dst_stride),
1823 "Source and destination horizontal stride must equal and a "
1824 "multiple of a qword when the execution type is 64-bit");
1825
1826 ERROR_IF(vstride != width * hstride,
1827 "Vstride must be Width * Hstride when the execution type is "
1828 "64-bit");
1829
1830 ERROR_IF(!is_scalar_region && dst_subreg != subreg,
1831 "Source and destination offset must be the same when the "
1832 "execution type is 64-bit");
1833 }
1834
1835 /* The PRMs say that for CHV, BXT:
1836 *
1837 * When source or destination datatype is 64b or operation is integer
1838 * DWord multiply, indirect addressing must not be used.
1839 *
1840 * We assume that the restriction applies to GLK as well.
1841 */
1842 if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) {
1843 ERROR_IF(BRW_ADDRESS_REGISTER_INDIRECT_REGISTER == address_mode ||
1844 BRW_ADDRESS_REGISTER_INDIRECT_REGISTER == dst_address_mode,
1845 "Indirect addressing is not allowed when the execution type "
1846 "is 64-bit");
1847 }
1848
1849 /* The PRMs say that for CHV, BXT:
1850 *
1851 * ARF registers must never be used with 64b datatype or when
1852 * operation is integer DWord multiply.
1853 *
1854 * We assume that the restriction applies to GLK as well.
1855 *
1856 * We assume that the restriction does not apply to the null register.
1857 */
1858 if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) {
1859 ERROR_IF(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MAC ||
1860 brw_inst_acc_wr_control(devinfo, inst) ||
1861 (BRW_ARCHITECTURE_REGISTER_FILE == file &&
1862 reg != BRW_ARF_NULL) ||
1863 (BRW_ARCHITECTURE_REGISTER_FILE == dst_file &&
1864 dst_reg != BRW_ARF_NULL),
1865 "Architecture registers cannot be used when the execution "
1866 "type is 64-bit");
1867 }
1868 }
1869
1870 /* The PRMs say that for BDW, SKL:
1871 *
1872 * If Align16 is required for an operation with QW destination and non-QW
1873 * source datatypes, the execution size cannot exceed 2.
1874 *
1875 * We assume that the restriction applies to all Gen8+ parts.
1876 */
1877 if (devinfo->gen >= 8) {
1878 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
1879 enum brw_reg_type src1_type =
1880 num_sources > 1 ? brw_inst_src1_type(devinfo, inst) : src0_type;
1881 unsigned src0_type_size = brw_reg_type_to_size(src0_type);
1882 unsigned src1_type_size = brw_reg_type_to_size(src1_type);
1883
1884 ERROR_IF(brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16 &&
1885 dst_type_size == 8 &&
1886 (src0_type_size != 8 || src1_type_size != 8) &&
1887 brw_inst_exec_size(devinfo, inst) > BRW_EXECUTE_2,
1888 "In Align16 exec size cannot exceed 2 with a QWord destination "
1889 "and a non-QWord source");
1890 }
1891
1892 /* The PRMs say that for CHV, BXT:
1893 *
1894 * When source or destination datatype is 64b or operation is integer
1895 * DWord multiply, DepCtrl must not be used.
1896 *
1897 * We assume that the restriction applies to GLK as well.
1898 */
1899 if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) {
1900 ERROR_IF(brw_inst_no_dd_check(devinfo, inst) ||
1901 brw_inst_no_dd_clear(devinfo, inst),
1902 "DepCtrl is not allowed when the execution type is 64-bit");
1903 }
1904
1905 return error_msg;
1906 }
1907
1908 static struct string
1909 instruction_restrictions(const struct gen_device_info *devinfo,
1910 const brw_inst *inst)
1911 {
1912 struct string error_msg = { .str = NULL, .len = 0 };
1913
1914 /* From GEN:BUG:1604601757:
1915 *
1916 * "When multiplying a DW and any lower precision integer, source modifier
1917 * is not supported."
1918 */
1919 if (devinfo->gen >= 12 &&
1920 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MUL) {
1921 enum brw_reg_type exec_type = execution_type(devinfo, inst);
1922 const bool src0_valid = type_sz(brw_inst_src0_type(devinfo, inst)) == 4 ||
1923 brw_inst_src0_reg_file(devinfo, inst) == BRW_IMMEDIATE_VALUE ||
1924 !(brw_inst_src0_negate(devinfo, inst) ||
1925 brw_inst_src0_abs(devinfo, inst));
1926 const bool src1_valid = type_sz(brw_inst_src1_type(devinfo, inst)) == 4 ||
1927 brw_inst_src1_reg_file(devinfo, inst) == BRW_IMMEDIATE_VALUE ||
1928 !(brw_inst_src1_negate(devinfo, inst) ||
1929 brw_inst_src1_abs(devinfo, inst));
1930
1931 ERROR_IF(!brw_reg_type_is_floating_point(exec_type) &&
1932 type_sz(exec_type) == 4 && !(src0_valid && src1_valid),
1933 "When multiplying a DW and any lower precision integer, source "
1934 "modifier is not supported.");
1935 }
1936
1937 return error_msg;
1938 }
1939
1940 static bool
1941 brw_validate_instruction(const struct gen_device_info *devinfo,
1942 const brw_inst *inst, int offset,
1943 struct disasm_info *disasm)
1944 {
1945 struct string error_msg = { .str = NULL, .len = 0 };
1946
1947 if (is_unsupported_inst(devinfo, inst)) {
1948 ERROR("Instruction not supported on this Gen");
1949 } else {
1950 CHECK(invalid_values);
1951
1952 if (error_msg.str == NULL) {
1953 CHECK(sources_not_null);
1954 CHECK(send_restrictions);
1955 CHECK(alignment_supported);
1956 CHECK(general_restrictions_based_on_operand_types);
1957 CHECK(general_restrictions_on_region_parameters);
1958 CHECK(special_restrictions_for_mixed_float_mode);
1959 CHECK(region_alignment_rules);
1960 CHECK(vector_immediate_restrictions);
1961 CHECK(special_requirements_for_handling_double_precision_data_types);
1962 CHECK(instruction_restrictions);
1963 }
1964 }
1965
1966 if (error_msg.str && disasm) {
1967 disasm_insert_error(disasm, offset, error_msg.str);
1968 }
1969 free(error_msg.str);
1970
1971 return error_msg.len == 0;
1972 }
1973
1974 bool
1975 brw_validate_instructions(const struct gen_device_info *devinfo,
1976 const void *assembly, int start_offset, int end_offset,
1977 struct disasm_info *disasm)
1978 {
1979 bool valid = true;
1980
1981 for (int src_offset = start_offset; src_offset < end_offset;) {
1982 const brw_inst *inst = assembly + src_offset;
1983 bool is_compact = brw_inst_cmpt_control(devinfo, inst);
1984 unsigned inst_size = is_compact ? sizeof(brw_compact_inst)
1985 : sizeof(brw_inst);
1986 brw_inst uncompacted;
1987
1988 if (is_compact) {
1989 brw_compact_inst *compacted = (void *)inst;
1990 brw_uncompact_instruction(devinfo, &uncompacted, compacted);
1991 inst = &uncompacted;
1992 }
1993
1994 bool v = brw_validate_instruction(devinfo, inst, src_offset, disasm);
1995 valid = valid && v;
1996
1997 src_offset += inst_size;
1998 }
1999
2000 return valid;
2001 }