intel/compiler: Remove emit_alpha_to_coverage workaround from backend
[mesa.git] / src / intel / compiler / brw_eu_validate.c
1 /*
2 * Copyright © 2015-2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_eu_validate.c
25 *
26 * This file implements a pass that validates shader assembly.
27 *
28 * The restrictions implemented herein are intended to verify that instructions
29 * in shader assembly do not violate restrictions documented in the graphics
30 * programming reference manuals.
31 *
32 * The restrictions are difficult for humans to quickly verify due to their
33 * complexity and abundance.
34 *
35 * It is critical that this code is thoroughly unit tested because false
36 * results will lead developers astray, which is worse than having no validator
37 * at all. Functional changes to this file without corresponding unit tests (in
38 * test_eu_validate.cpp) will be rejected.
39 */
40
41 #include "brw_eu.h"
42
43 /* We're going to do lots of string concatenation, so this should help. */
44 struct string {
45 char *str;
46 size_t len;
47 };
48
49 static void
50 cat(struct string *dest, const struct string src)
51 {
52 dest->str = realloc(dest->str, dest->len + src.len + 1);
53 memcpy(dest->str + dest->len, src.str, src.len);
54 dest->str[dest->len + src.len] = '\0';
55 dest->len = dest->len + src.len;
56 }
57 #define CAT(dest, src) cat(&dest, (struct string){src, strlen(src)})
58
59 static bool
60 contains(const struct string haystack, const struct string needle)
61 {
62 return haystack.str && memmem(haystack.str, haystack.len,
63 needle.str, needle.len) != NULL;
64 }
65 #define CONTAINS(haystack, needle) \
66 contains(haystack, (struct string){needle, strlen(needle)})
67
68 #define error(str) "\tERROR: " str "\n"
69 #define ERROR_INDENT "\t "
70
71 #define ERROR(msg) ERROR_IF(true, msg)
72 #define ERROR_IF(cond, msg) \
73 do { \
74 if ((cond) && !CONTAINS(error_msg, error(msg))) { \
75 CAT(error_msg, error(msg)); \
76 } \
77 } while(0)
78
79 #define CHECK(func, args...) \
80 do { \
81 struct string __msg = func(devinfo, inst, ##args); \
82 if (__msg.str) { \
83 cat(&error_msg, __msg); \
84 free(__msg.str); \
85 } \
86 } while (0)
87
88 #define STRIDE(stride) (stride != 0 ? 1 << ((stride) - 1) : 0)
89 #define WIDTH(width) (1 << (width))
90
91 static bool
92 inst_is_send(const struct gen_device_info *devinfo, const brw_inst *inst)
93 {
94 switch (brw_inst_opcode(devinfo, inst)) {
95 case BRW_OPCODE_SEND:
96 case BRW_OPCODE_SENDC:
97 case BRW_OPCODE_SENDS:
98 case BRW_OPCODE_SENDSC:
99 return true;
100 default:
101 return false;
102 }
103 }
104
105 static bool
106 inst_is_split_send(const struct gen_device_info *devinfo, const brw_inst *inst)
107 {
108 if (devinfo->gen >= 12) {
109 return inst_is_send(devinfo, inst);
110 } else {
111 switch (brw_inst_opcode(devinfo, inst)) {
112 case BRW_OPCODE_SENDS:
113 case BRW_OPCODE_SENDSC:
114 return true;
115 default:
116 return false;
117 }
118 }
119 }
120
121 static unsigned
122 signed_type(unsigned type)
123 {
124 switch (type) {
125 case BRW_REGISTER_TYPE_UD: return BRW_REGISTER_TYPE_D;
126 case BRW_REGISTER_TYPE_UW: return BRW_REGISTER_TYPE_W;
127 case BRW_REGISTER_TYPE_UB: return BRW_REGISTER_TYPE_B;
128 case BRW_REGISTER_TYPE_UQ: return BRW_REGISTER_TYPE_Q;
129 default: return type;
130 }
131 }
132
133 static enum brw_reg_type
134 inst_dst_type(const struct gen_device_info *devinfo, const brw_inst *inst)
135 {
136 return (devinfo->gen < 12 || !inst_is_send(devinfo, inst)) ?
137 brw_inst_dst_type(devinfo, inst) : BRW_REGISTER_TYPE_D;
138 }
139
140 static bool
141 inst_is_raw_move(const struct gen_device_info *devinfo, const brw_inst *inst)
142 {
143 unsigned dst_type = signed_type(inst_dst_type(devinfo, inst));
144 unsigned src_type = signed_type(brw_inst_src0_type(devinfo, inst));
145
146 if (brw_inst_src0_reg_file(devinfo, inst) == BRW_IMMEDIATE_VALUE) {
147 /* FIXME: not strictly true */
148 if (brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_VF ||
149 brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_UV ||
150 brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_V) {
151 return false;
152 }
153 } else if (brw_inst_src0_negate(devinfo, inst) ||
154 brw_inst_src0_abs(devinfo, inst)) {
155 return false;
156 }
157
158 return brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MOV &&
159 brw_inst_saturate(devinfo, inst) == 0 &&
160 dst_type == src_type;
161 }
162
163 static bool
164 dst_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
165 {
166 return brw_inst_dst_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
167 brw_inst_dst_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
168 }
169
170 static bool
171 src0_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
172 {
173 return brw_inst_src0_address_mode(devinfo, inst) == BRW_ADDRESS_DIRECT &&
174 brw_inst_src0_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
175 brw_inst_src0_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
176 }
177
178 static bool
179 src1_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
180 {
181 return brw_inst_src1_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
182 brw_inst_src1_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
183 }
184
185 static bool
186 src0_is_acc(const struct gen_device_info *devinfo, const brw_inst *inst)
187 {
188 return brw_inst_src0_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
189 (brw_inst_src0_da_reg_nr(devinfo, inst) & 0xF0) == BRW_ARF_ACCUMULATOR;
190 }
191
192 static bool
193 src1_is_acc(const struct gen_device_info *devinfo, const brw_inst *inst)
194 {
195 return brw_inst_src1_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
196 (brw_inst_src1_da_reg_nr(devinfo, inst) & 0xF0) == BRW_ARF_ACCUMULATOR;
197 }
198
199 static bool
200 src0_has_scalar_region(const struct gen_device_info *devinfo, const brw_inst *inst)
201 {
202 return brw_inst_src0_vstride(devinfo, inst) == BRW_VERTICAL_STRIDE_0 &&
203 brw_inst_src0_width(devinfo, inst) == BRW_WIDTH_1 &&
204 brw_inst_src0_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0;
205 }
206
207 static bool
208 src1_has_scalar_region(const struct gen_device_info *devinfo, const brw_inst *inst)
209 {
210 return brw_inst_src1_vstride(devinfo, inst) == BRW_VERTICAL_STRIDE_0 &&
211 brw_inst_src1_width(devinfo, inst) == BRW_WIDTH_1 &&
212 brw_inst_src1_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0;
213 }
214
215 static unsigned
216 num_sources_from_inst(const struct gen_device_info *devinfo,
217 const brw_inst *inst)
218 {
219 const struct opcode_desc *desc =
220 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
221 unsigned math_function;
222
223 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MATH) {
224 math_function = brw_inst_math_function(devinfo, inst);
225 } else if (devinfo->gen < 6 &&
226 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND) {
227 if (brw_inst_sfid(devinfo, inst) == BRW_SFID_MATH) {
228 /* src1 must be a descriptor (including the information to determine
229 * that the SEND is doing an extended math operation), but src0 can
230 * actually be null since it serves as the source of the implicit GRF
231 * to MRF move.
232 *
233 * If we stop using that functionality, we'll have to revisit this.
234 */
235 return 2;
236 } else {
237 /* Send instructions are allowed to have null sources since they use
238 * the base_mrf field to specify which message register source.
239 */
240 return 0;
241 }
242 } else {
243 assert(desc->nsrc < 4);
244 return desc->nsrc;
245 }
246
247 switch (math_function) {
248 case BRW_MATH_FUNCTION_INV:
249 case BRW_MATH_FUNCTION_LOG:
250 case BRW_MATH_FUNCTION_EXP:
251 case BRW_MATH_FUNCTION_SQRT:
252 case BRW_MATH_FUNCTION_RSQ:
253 case BRW_MATH_FUNCTION_SIN:
254 case BRW_MATH_FUNCTION_COS:
255 case BRW_MATH_FUNCTION_SINCOS:
256 case GEN8_MATH_FUNCTION_INVM:
257 case GEN8_MATH_FUNCTION_RSQRTM:
258 return 1;
259 case BRW_MATH_FUNCTION_FDIV:
260 case BRW_MATH_FUNCTION_POW:
261 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
262 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
263 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
264 return 2;
265 default:
266 unreachable("not reached");
267 }
268 }
269
270 static struct string
271 sources_not_null(const struct gen_device_info *devinfo,
272 const brw_inst *inst)
273 {
274 unsigned num_sources = num_sources_from_inst(devinfo, inst);
275 struct string error_msg = { .str = NULL, .len = 0 };
276
277 /* Nothing to test. 3-src instructions can only have GRF sources, and
278 * there's no bit to control the file.
279 */
280 if (num_sources == 3)
281 return (struct string){};
282
283 /* Nothing to test. Split sends can only encode a file in sources that are
284 * allowed to be NULL.
285 */
286 if (inst_is_split_send(devinfo, inst))
287 return (struct string){};
288
289 if (num_sources >= 1 && brw_inst_opcode(devinfo, inst) != BRW_OPCODE_SYNC)
290 ERROR_IF(src0_is_null(devinfo, inst), "src0 is null");
291
292 if (num_sources == 2)
293 ERROR_IF(src1_is_null(devinfo, inst), "src1 is null");
294
295 return error_msg;
296 }
297
298 static struct string
299 alignment_supported(const struct gen_device_info *devinfo,
300 const brw_inst *inst)
301 {
302 struct string error_msg = { .str = NULL, .len = 0 };
303
304 ERROR_IF(devinfo->gen >= 11 && brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16,
305 "Align16 not supported");
306
307 return error_msg;
308 }
309
310 static bool
311 inst_uses_src_acc(const struct gen_device_info *devinfo, const brw_inst *inst)
312 {
313 /* Check instructions that use implicit accumulator sources */
314 switch (brw_inst_opcode(devinfo, inst)) {
315 case BRW_OPCODE_MAC:
316 case BRW_OPCODE_MACH:
317 case BRW_OPCODE_SADA2:
318 return true;
319 default:
320 break;
321 }
322
323 /* FIXME: support 3-src instructions */
324 unsigned num_sources = num_sources_from_inst(devinfo, inst);
325 assert(num_sources < 3);
326
327 return src0_is_acc(devinfo, inst) || (num_sources > 1 && src1_is_acc(devinfo, inst));
328 }
329
330 static struct string
331 send_restrictions(const struct gen_device_info *devinfo,
332 const brw_inst *inst)
333 {
334 struct string error_msg = { .str = NULL, .len = 0 };
335
336 if (inst_is_split_send(devinfo, inst)) {
337 ERROR_IF(brw_inst_send_src1_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
338 brw_inst_send_src1_reg_nr(devinfo, inst) != BRW_ARF_NULL,
339 "src1 of split send must be a GRF or NULL");
340
341 ERROR_IF(brw_inst_eot(devinfo, inst) &&
342 brw_inst_src0_da_reg_nr(devinfo, inst) < 112,
343 "send with EOT must use g112-g127");
344 ERROR_IF(brw_inst_eot(devinfo, inst) &&
345 brw_inst_send_src1_reg_file(devinfo, inst) == BRW_GENERAL_REGISTER_FILE &&
346 brw_inst_send_src1_reg_nr(devinfo, inst) < 112,
347 "send with EOT must use g112-g127");
348
349 if (brw_inst_send_src1_reg_file(devinfo, inst) == BRW_GENERAL_REGISTER_FILE) {
350 /* Assume minimums if we don't know */
351 unsigned mlen = 1;
352 if (!brw_inst_send_sel_reg32_desc(devinfo, inst)) {
353 const uint32_t desc = brw_inst_send_desc(devinfo, inst);
354 mlen = brw_message_desc_mlen(devinfo, desc);
355 }
356
357 unsigned ex_mlen = 1;
358 if (!brw_inst_send_sel_reg32_ex_desc(devinfo, inst)) {
359 const uint32_t ex_desc = brw_inst_sends_ex_desc(devinfo, inst);
360 ex_mlen = brw_message_ex_desc_ex_mlen(devinfo, ex_desc);
361 }
362 const unsigned src0_reg_nr = brw_inst_src0_da_reg_nr(devinfo, inst);
363 const unsigned src1_reg_nr = brw_inst_send_src1_reg_nr(devinfo, inst);
364 ERROR_IF((src0_reg_nr <= src1_reg_nr &&
365 src1_reg_nr < src0_reg_nr + mlen) ||
366 (src1_reg_nr <= src0_reg_nr &&
367 src0_reg_nr < src1_reg_nr + ex_mlen),
368 "split send payloads must not overlap");
369 }
370 } else if (inst_is_send(devinfo, inst)) {
371 ERROR_IF(brw_inst_src0_address_mode(devinfo, inst) != BRW_ADDRESS_DIRECT,
372 "send must use direct addressing");
373
374 if (devinfo->gen >= 7) {
375 ERROR_IF(brw_inst_send_src0_reg_file(devinfo, inst) != BRW_GENERAL_REGISTER_FILE,
376 "send from non-GRF");
377 ERROR_IF(brw_inst_eot(devinfo, inst) &&
378 brw_inst_src0_da_reg_nr(devinfo, inst) < 112,
379 "send with EOT must use g112-g127");
380 }
381
382 if (devinfo->gen >= 8) {
383 ERROR_IF(!dst_is_null(devinfo, inst) &&
384 (brw_inst_dst_da_reg_nr(devinfo, inst) +
385 brw_inst_rlen(devinfo, inst) > 127) &&
386 (brw_inst_src0_da_reg_nr(devinfo, inst) +
387 brw_inst_mlen(devinfo, inst) >
388 brw_inst_dst_da_reg_nr(devinfo, inst)),
389 "r127 must not be used for return address when there is "
390 "a src and dest overlap");
391 }
392 }
393
394 return error_msg;
395 }
396
397 static bool
398 is_unsupported_inst(const struct gen_device_info *devinfo,
399 const brw_inst *inst)
400 {
401 return brw_inst_opcode(devinfo, inst) == BRW_OPCODE_ILLEGAL;
402 }
403
404 /**
405 * Returns whether a combination of two types would qualify as mixed float
406 * operation mode
407 */
408 static inline bool
409 types_are_mixed_float(enum brw_reg_type t0, enum brw_reg_type t1)
410 {
411 return (t0 == BRW_REGISTER_TYPE_F && t1 == BRW_REGISTER_TYPE_HF) ||
412 (t1 == BRW_REGISTER_TYPE_F && t0 == BRW_REGISTER_TYPE_HF);
413 }
414
415 static enum brw_reg_type
416 execution_type_for_type(enum brw_reg_type type)
417 {
418 switch (type) {
419 case BRW_REGISTER_TYPE_NF:
420 case BRW_REGISTER_TYPE_DF:
421 case BRW_REGISTER_TYPE_F:
422 case BRW_REGISTER_TYPE_HF:
423 return type;
424
425 case BRW_REGISTER_TYPE_VF:
426 return BRW_REGISTER_TYPE_F;
427
428 case BRW_REGISTER_TYPE_Q:
429 case BRW_REGISTER_TYPE_UQ:
430 return BRW_REGISTER_TYPE_Q;
431
432 case BRW_REGISTER_TYPE_D:
433 case BRW_REGISTER_TYPE_UD:
434 return BRW_REGISTER_TYPE_D;
435
436 case BRW_REGISTER_TYPE_W:
437 case BRW_REGISTER_TYPE_UW:
438 case BRW_REGISTER_TYPE_B:
439 case BRW_REGISTER_TYPE_UB:
440 case BRW_REGISTER_TYPE_V:
441 case BRW_REGISTER_TYPE_UV:
442 return BRW_REGISTER_TYPE_W;
443 }
444 unreachable("not reached");
445 }
446
447 /**
448 * Returns the execution type of an instruction \p inst
449 */
450 static enum brw_reg_type
451 execution_type(const struct gen_device_info *devinfo, const brw_inst *inst)
452 {
453 unsigned num_sources = num_sources_from_inst(devinfo, inst);
454 enum brw_reg_type src0_exec_type, src1_exec_type;
455
456 /* Execution data type is independent of destination data type, except in
457 * mixed F/HF instructions.
458 */
459 enum brw_reg_type dst_exec_type = inst_dst_type(devinfo, inst);
460
461 src0_exec_type = execution_type_for_type(brw_inst_src0_type(devinfo, inst));
462 if (num_sources == 1) {
463 if (src0_exec_type == BRW_REGISTER_TYPE_HF)
464 return dst_exec_type;
465 return src0_exec_type;
466 }
467
468 src1_exec_type = execution_type_for_type(brw_inst_src1_type(devinfo, inst));
469 if (types_are_mixed_float(src0_exec_type, src1_exec_type) ||
470 types_are_mixed_float(src0_exec_type, dst_exec_type) ||
471 types_are_mixed_float(src1_exec_type, dst_exec_type)) {
472 return BRW_REGISTER_TYPE_F;
473 }
474
475 if (src0_exec_type == src1_exec_type)
476 return src0_exec_type;
477
478 /* Mixed operand types where one is float is float on Gen < 6
479 * (and not allowed on later platforms)
480 */
481 if (devinfo->gen < 6 &&
482 (src0_exec_type == BRW_REGISTER_TYPE_F ||
483 src1_exec_type == BRW_REGISTER_TYPE_F))
484 return BRW_REGISTER_TYPE_F;
485
486 if (src0_exec_type == BRW_REGISTER_TYPE_Q ||
487 src1_exec_type == BRW_REGISTER_TYPE_Q)
488 return BRW_REGISTER_TYPE_Q;
489
490 if (src0_exec_type == BRW_REGISTER_TYPE_D ||
491 src1_exec_type == BRW_REGISTER_TYPE_D)
492 return BRW_REGISTER_TYPE_D;
493
494 if (src0_exec_type == BRW_REGISTER_TYPE_W ||
495 src1_exec_type == BRW_REGISTER_TYPE_W)
496 return BRW_REGISTER_TYPE_W;
497
498 if (src0_exec_type == BRW_REGISTER_TYPE_DF ||
499 src1_exec_type == BRW_REGISTER_TYPE_DF)
500 return BRW_REGISTER_TYPE_DF;
501
502 unreachable("not reached");
503 }
504
505 /**
506 * Returns whether a region is packed
507 *
508 * A region is packed if its elements are adjacent in memory, with no
509 * intervening space, no overlap, and no replicated values.
510 */
511 static bool
512 is_packed(unsigned vstride, unsigned width, unsigned hstride)
513 {
514 if (vstride == width) {
515 if (vstride == 1) {
516 return hstride == 0;
517 } else {
518 return hstride == 1;
519 }
520 }
521
522 return false;
523 }
524
525 /**
526 * Returns whether an instruction is an explicit or implicit conversion
527 * to/from half-float.
528 */
529 static bool
530 is_half_float_conversion(const struct gen_device_info *devinfo,
531 const brw_inst *inst)
532 {
533 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
534
535 unsigned num_sources = num_sources_from_inst(devinfo, inst);
536 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
537
538 if (dst_type != src0_type &&
539 (dst_type == BRW_REGISTER_TYPE_HF || src0_type == BRW_REGISTER_TYPE_HF)) {
540 return true;
541 } else if (num_sources > 1) {
542 enum brw_reg_type src1_type = brw_inst_src1_type(devinfo, inst);
543 return dst_type != src1_type &&
544 (dst_type == BRW_REGISTER_TYPE_HF ||
545 src1_type == BRW_REGISTER_TYPE_HF);
546 }
547
548 return false;
549 }
550
551 /*
552 * Returns whether an instruction is using mixed float operation mode
553 */
554 static bool
555 is_mixed_float(const struct gen_device_info *devinfo, const brw_inst *inst)
556 {
557 if (devinfo->gen < 8)
558 return false;
559
560 if (inst_is_send(devinfo, inst))
561 return false;
562
563 unsigned opcode = brw_inst_opcode(devinfo, inst);
564 const struct opcode_desc *desc = brw_opcode_desc(devinfo, opcode);
565 if (desc->ndst == 0)
566 return false;
567
568 /* FIXME: support 3-src instructions */
569 unsigned num_sources = num_sources_from_inst(devinfo, inst);
570 assert(num_sources < 3);
571
572 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
573 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
574
575 if (num_sources == 1)
576 return types_are_mixed_float(src0_type, dst_type);
577
578 enum brw_reg_type src1_type = brw_inst_src1_type(devinfo, inst);
579
580 return types_are_mixed_float(src0_type, src1_type) ||
581 types_are_mixed_float(src0_type, dst_type) ||
582 types_are_mixed_float(src1_type, dst_type);
583 }
584
585 /**
586 * Returns whether an instruction is an explicit or implicit conversion
587 * to/from byte.
588 */
589 static bool
590 is_byte_conversion(const struct gen_device_info *devinfo,
591 const brw_inst *inst)
592 {
593 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
594
595 unsigned num_sources = num_sources_from_inst(devinfo, inst);
596 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
597
598 if (dst_type != src0_type &&
599 (type_sz(dst_type) == 1 || type_sz(src0_type) == 1)) {
600 return true;
601 } else if (num_sources > 1) {
602 enum brw_reg_type src1_type = brw_inst_src1_type(devinfo, inst);
603 return dst_type != src1_type &&
604 (type_sz(dst_type) == 1 || type_sz(src1_type) == 1);
605 }
606
607 return false;
608 }
609
610 /**
611 * Checks restrictions listed in "General Restrictions Based on Operand Types"
612 * in the "Register Region Restrictions" section.
613 */
614 static struct string
615 general_restrictions_based_on_operand_types(const struct gen_device_info *devinfo,
616 const brw_inst *inst)
617 {
618 const struct opcode_desc *desc =
619 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
620 unsigned num_sources = num_sources_from_inst(devinfo, inst);
621 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
622 struct string error_msg = { .str = NULL, .len = 0 };
623
624 if (devinfo->gen >= 11) {
625 if (num_sources == 3) {
626 ERROR_IF(brw_reg_type_to_size(brw_inst_3src_a1_src1_type(devinfo, inst)) == 1 ||
627 brw_reg_type_to_size(brw_inst_3src_a1_src2_type(devinfo, inst)) == 1,
628 "Byte data type is not supported for src1/2 register regioning. This includes "
629 "byte broadcast as well.");
630 }
631 if (num_sources == 2) {
632 ERROR_IF(brw_reg_type_to_size(brw_inst_src1_type(devinfo, inst)) == 1,
633 "Byte data type is not supported for src1 register regioning. This includes "
634 "byte broadcast as well.");
635 }
636 }
637
638 if (num_sources == 3)
639 return error_msg;
640
641 if (inst_is_send(devinfo, inst))
642 return error_msg;
643
644 if (exec_size == 1)
645 return error_msg;
646
647 if (desc->ndst == 0)
648 return error_msg;
649
650 /* The PRMs say:
651 *
652 * Where n is the largest element size in bytes for any source or
653 * destination operand type, ExecSize * n must be <= 64.
654 *
655 * But we do not attempt to enforce it, because it is implied by other
656 * rules:
657 *
658 * - that the destination stride must match the execution data type
659 * - sources may not span more than two adjacent GRF registers
660 * - destination may not span more than two adjacent GRF registers
661 *
662 * In fact, checking it would weaken testing of the other rules.
663 */
664
665 unsigned dst_stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
666 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
667 bool dst_type_is_byte =
668 inst_dst_type(devinfo, inst) == BRW_REGISTER_TYPE_B ||
669 inst_dst_type(devinfo, inst) == BRW_REGISTER_TYPE_UB;
670
671 if (dst_type_is_byte) {
672 if (is_packed(exec_size * dst_stride, exec_size, dst_stride)) {
673 if (!inst_is_raw_move(devinfo, inst))
674 ERROR("Only raw MOV supports a packed-byte destination");
675 return error_msg;
676 }
677 }
678
679 unsigned exec_type = execution_type(devinfo, inst);
680 unsigned exec_type_size = brw_reg_type_to_size(exec_type);
681 unsigned dst_type_size = brw_reg_type_to_size(dst_type);
682
683 /* On IVB/BYT, region parameters and execution size for DF are in terms of
684 * 32-bit elements, so they are doubled. For evaluating the validity of an
685 * instruction, we halve them.
686 */
687 if (devinfo->gen == 7 && !devinfo->is_haswell &&
688 exec_type_size == 8 && dst_type_size == 4)
689 dst_type_size = 8;
690
691 if (is_byte_conversion(devinfo, inst)) {
692 /* From the BDW+ PRM, Volume 2a, Command Reference, Instructions - MOV:
693 *
694 * "There is no direct conversion from B/UB to DF or DF to B/UB.
695 * There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB."
696 *
697 * Even if these restrictions are listed for the MOV instruction, we
698 * validate this more generally, since there is the possibility
699 * of implicit conversions from other instructions.
700 */
701 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
702 enum brw_reg_type src1_type = num_sources > 1 ?
703 brw_inst_src1_type(devinfo, inst) : 0;
704
705 ERROR_IF(type_sz(dst_type) == 1 &&
706 (type_sz(src0_type) == 8 ||
707 (num_sources > 1 && type_sz(src1_type) == 8)),
708 "There are no direct conversions between 64-bit types and B/UB");
709
710 ERROR_IF(type_sz(dst_type) == 8 &&
711 (type_sz(src0_type) == 1 ||
712 (num_sources > 1 && type_sz(src1_type) == 1)),
713 "There are no direct conversions between 64-bit types and B/UB");
714 }
715
716 if (is_half_float_conversion(devinfo, inst)) {
717 /**
718 * A helper to validate used in the validation of the following restriction
719 * from the BDW+ PRM, Volume 2a, Command Reference, Instructions - MOV:
720 *
721 * "There is no direct conversion from HF to DF or DF to HF.
722 * There is no direct conversion from HF to Q/UQ or Q/UQ to HF."
723 *
724 * Even if these restrictions are listed for the MOV instruction, we
725 * validate this more generally, since there is the possibility
726 * of implicit conversions from other instructions, such us implicit
727 * conversion from integer to HF with the ADD instruction in SKL+.
728 */
729 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
730 enum brw_reg_type src1_type = num_sources > 1 ?
731 brw_inst_src1_type(devinfo, inst) : 0;
732 ERROR_IF(dst_type == BRW_REGISTER_TYPE_HF &&
733 (type_sz(src0_type) == 8 ||
734 (num_sources > 1 && type_sz(src1_type) == 8)),
735 "There are no direct conversions between 64-bit types and HF");
736
737 ERROR_IF(type_sz(dst_type) == 8 &&
738 (src0_type == BRW_REGISTER_TYPE_HF ||
739 (num_sources > 1 && src1_type == BRW_REGISTER_TYPE_HF)),
740 "There are no direct conversions between 64-bit types and HF");
741
742 /* From the BDW+ PRM:
743 *
744 * "Conversion between Integer and HF (Half Float) must be
745 * DWord-aligned and strided by a DWord on the destination."
746 *
747 * Also, the above restrictions seems to be expanded on CHV and SKL+ by:
748 *
749 * "There is a relaxed alignment rule for word destinations. When
750 * the destination type is word (UW, W, HF), destination data types
751 * can be aligned to either the lowest word or the second lowest
752 * word of the execution channel. This means the destination data
753 * words can be either all in the even word locations or all in the
754 * odd word locations."
755 *
756 * We do not implement the second rule as is though, since empirical
757 * testing shows inconsistencies:
758 * - It suggests that packed 16-bit is not allowed, which is not true.
759 * - It suggests that conversions from Q/DF to W (which need to be
760 * 64-bit aligned on the destination) are not possible, which is
761 * not true.
762 *
763 * So from this rule we only validate the implication that conversions
764 * from F to HF need to be DWord strided (except in Align1 mixed
765 * float mode where packed fp16 destination is allowed so long as the
766 * destination is oword-aligned).
767 *
768 * Finally, we only validate this for Align1 because Align16 always
769 * requires packed destinations, so these restrictions can't possibly
770 * apply to Align16 mode.
771 */
772 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
773 if ((dst_type == BRW_REGISTER_TYPE_HF &&
774 (brw_reg_type_is_integer(src0_type) ||
775 (num_sources > 1 && brw_reg_type_is_integer(src1_type)))) ||
776 (brw_reg_type_is_integer(dst_type) &&
777 (src0_type == BRW_REGISTER_TYPE_HF ||
778 (num_sources > 1 && src1_type == BRW_REGISTER_TYPE_HF)))) {
779 ERROR_IF(dst_stride * dst_type_size != 4,
780 "Conversions between integer and half-float must be "
781 "strided by a DWord on the destination");
782
783 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
784 ERROR_IF(subreg % 4 != 0,
785 "Conversions between integer and half-float must be "
786 "aligned to a DWord on the destination");
787 } else if ((devinfo->is_cherryview || devinfo->gen >= 9) &&
788 dst_type == BRW_REGISTER_TYPE_HF) {
789 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
790 ERROR_IF(dst_stride != 2 &&
791 !(is_mixed_float(devinfo, inst) &&
792 dst_stride == 1 && subreg % 16 == 0),
793 "Conversions to HF must have either all words in even "
794 "word locations or all words in odd word locations or "
795 "be mixed-float with Oword-aligned packed destination");
796 }
797 }
798 }
799
800 /* There are special regioning rules for mixed-float mode in CHV and SKL that
801 * override the general rule for the ratio of sizes of the destination type
802 * and the execution type. We will add validation for those in a later patch.
803 */
804 bool validate_dst_size_and_exec_size_ratio =
805 !is_mixed_float(devinfo, inst) ||
806 !(devinfo->is_cherryview || devinfo->gen >= 9);
807
808 if (validate_dst_size_and_exec_size_ratio &&
809 exec_type_size > dst_type_size) {
810 if (!(dst_type_is_byte && inst_is_raw_move(devinfo, inst))) {
811 ERROR_IF(dst_stride * dst_type_size != exec_type_size,
812 "Destination stride must be equal to the ratio of the sizes "
813 "of the execution data type to the destination type");
814 }
815
816 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
817
818 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1 &&
819 brw_inst_dst_address_mode(devinfo, inst) == BRW_ADDRESS_DIRECT) {
820 /* The i965 PRM says:
821 *
822 * Implementation Restriction: The relaxed alignment rule for byte
823 * destination (#10.5) is not supported.
824 */
825 if ((devinfo->gen > 4 || devinfo->is_g4x) && dst_type_is_byte) {
826 ERROR_IF(subreg % exec_type_size != 0 &&
827 subreg % exec_type_size != 1,
828 "Destination subreg must be aligned to the size of the "
829 "execution data type (or to the next lowest byte for byte "
830 "destinations)");
831 } else {
832 ERROR_IF(subreg % exec_type_size != 0,
833 "Destination subreg must be aligned to the size of the "
834 "execution data type");
835 }
836 }
837 }
838
839 return error_msg;
840 }
841
842 /**
843 * Checks restrictions listed in "General Restrictions on Regioning Parameters"
844 * in the "Register Region Restrictions" section.
845 */
846 static struct string
847 general_restrictions_on_region_parameters(const struct gen_device_info *devinfo,
848 const brw_inst *inst)
849 {
850 const struct opcode_desc *desc =
851 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
852 unsigned num_sources = num_sources_from_inst(devinfo, inst);
853 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
854 struct string error_msg = { .str = NULL, .len = 0 };
855
856 if (num_sources == 3)
857 return (struct string){};
858
859 /* Split sends don't have the bits in the instruction to encode regions so
860 * there's nothing to check.
861 */
862 if (inst_is_split_send(devinfo, inst))
863 return (struct string){};
864
865 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16) {
866 if (desc->ndst != 0 && !dst_is_null(devinfo, inst))
867 ERROR_IF(brw_inst_dst_hstride(devinfo, inst) != BRW_HORIZONTAL_STRIDE_1,
868 "Destination Horizontal Stride must be 1");
869
870 if (num_sources >= 1) {
871 if (devinfo->is_haswell || devinfo->gen >= 8) {
872 ERROR_IF(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
873 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
874 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_2 &&
875 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
876 "In Align16 mode, only VertStride of 0, 2, or 4 is allowed");
877 } else {
878 ERROR_IF(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
879 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
880 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
881 "In Align16 mode, only VertStride of 0 or 4 is allowed");
882 }
883 }
884
885 if (num_sources == 2) {
886 if (devinfo->is_haswell || devinfo->gen >= 8) {
887 ERROR_IF(brw_inst_src1_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
888 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
889 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_2 &&
890 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
891 "In Align16 mode, only VertStride of 0, 2, or 4 is allowed");
892 } else {
893 ERROR_IF(brw_inst_src1_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
894 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
895 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
896 "In Align16 mode, only VertStride of 0 or 4 is allowed");
897 }
898 }
899
900 return error_msg;
901 }
902
903 for (unsigned i = 0; i < num_sources; i++) {
904 unsigned vstride, width, hstride, element_size, subreg;
905 enum brw_reg_type type;
906
907 #define DO_SRC(n) \
908 if (brw_inst_src ## n ## _reg_file(devinfo, inst) == \
909 BRW_IMMEDIATE_VALUE) \
910 continue; \
911 \
912 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
913 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
914 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
915 type = brw_inst_src ## n ## _type(devinfo, inst); \
916 element_size = brw_reg_type_to_size(type); \
917 subreg = brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst)
918
919 if (i == 0) {
920 DO_SRC(0);
921 } else {
922 DO_SRC(1);
923 }
924 #undef DO_SRC
925
926 /* On IVB/BYT, region parameters and execution size for DF are in terms of
927 * 32-bit elements, so they are doubled. For evaluating the validity of an
928 * instruction, we halve them.
929 */
930 if (devinfo->gen == 7 && !devinfo->is_haswell &&
931 element_size == 8)
932 element_size = 4;
933
934 /* ExecSize must be greater than or equal to Width. */
935 ERROR_IF(exec_size < width, "ExecSize must be greater than or equal "
936 "to Width");
937
938 /* If ExecSize = Width and HorzStride ≠ 0,
939 * VertStride must be set to Width * HorzStride.
940 */
941 if (exec_size == width && hstride != 0) {
942 ERROR_IF(vstride != width * hstride,
943 "If ExecSize = Width and HorzStride ≠ 0, "
944 "VertStride must be set to Width * HorzStride");
945 }
946
947 /* If Width = 1, HorzStride must be 0 regardless of the values of
948 * ExecSize and VertStride.
949 */
950 if (width == 1) {
951 ERROR_IF(hstride != 0,
952 "If Width = 1, HorzStride must be 0 regardless "
953 "of the values of ExecSize and VertStride");
954 }
955
956 /* If ExecSize = Width = 1, both VertStride and HorzStride must be 0. */
957 if (exec_size == 1 && width == 1) {
958 ERROR_IF(vstride != 0 || hstride != 0,
959 "If ExecSize = Width = 1, both VertStride "
960 "and HorzStride must be 0");
961 }
962
963 /* If VertStride = HorzStride = 0, Width must be 1 regardless of the
964 * value of ExecSize.
965 */
966 if (vstride == 0 && hstride == 0) {
967 ERROR_IF(width != 1,
968 "If VertStride = HorzStride = 0, Width must be "
969 "1 regardless of the value of ExecSize");
970 }
971
972 /* VertStride must be used to cross GRF register boundaries. This rule
973 * implies that elements within a 'Width' cannot cross GRF boundaries.
974 */
975 const uint64_t mask = (1ULL << element_size) - 1;
976 unsigned rowbase = subreg;
977
978 for (int y = 0; y < exec_size / width; y++) {
979 uint64_t access_mask = 0;
980 unsigned offset = rowbase;
981
982 for (int x = 0; x < width; x++) {
983 access_mask |= mask << offset;
984 offset += hstride * element_size;
985 }
986
987 rowbase += vstride * element_size;
988
989 if ((uint32_t)access_mask != 0 && (access_mask >> 32) != 0) {
990 ERROR("VertStride must be used to cross GRF register boundaries");
991 break;
992 }
993 }
994 }
995
996 /* Dst.HorzStride must not be 0. */
997 if (desc->ndst != 0 && !dst_is_null(devinfo, inst)) {
998 ERROR_IF(brw_inst_dst_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0,
999 "Destination Horizontal Stride must not be 0");
1000 }
1001
1002 return error_msg;
1003 }
1004
1005 static struct string
1006 special_restrictions_for_mixed_float_mode(const struct gen_device_info *devinfo,
1007 const brw_inst *inst)
1008 {
1009 struct string error_msg = { .str = NULL, .len = 0 };
1010
1011 const unsigned opcode = brw_inst_opcode(devinfo, inst);
1012 const unsigned num_sources = num_sources_from_inst(devinfo, inst);
1013 if (num_sources >= 3)
1014 return error_msg;
1015
1016 if (!is_mixed_float(devinfo, inst))
1017 return error_msg;
1018
1019 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
1020 bool is_align16 = brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16;
1021
1022 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
1023 enum brw_reg_type src1_type = num_sources > 1 ?
1024 brw_inst_src1_type(devinfo, inst) : 0;
1025 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
1026
1027 unsigned dst_stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1028 bool dst_is_packed = is_packed(exec_size * dst_stride, exec_size, dst_stride);
1029
1030 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1031 * Float Operations:
1032 *
1033 * "Indirect addressing on source is not supported when source and
1034 * destination data types are mixed float."
1035 */
1036 ERROR_IF(brw_inst_src0_address_mode(devinfo, inst) != BRW_ADDRESS_DIRECT ||
1037 (num_sources > 1 &&
1038 brw_inst_src1_address_mode(devinfo, inst) != BRW_ADDRESS_DIRECT),
1039 "Indirect addressing on source is not supported when source and "
1040 "destination data types are mixed float");
1041
1042 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1043 * Float Operations:
1044 *
1045 * "No SIMD16 in mixed mode when destination is f32. Instruction
1046 * execution size must be no more than 8."
1047 */
1048 ERROR_IF(exec_size > 8 && dst_type == BRW_REGISTER_TYPE_F,
1049 "Mixed float mode with 32-bit float destination is limited "
1050 "to SIMD8");
1051
1052 if (is_align16) {
1053 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1054 * Float Operations:
1055 *
1056 * "In Align16 mode, when half float and float data types are mixed
1057 * between source operands OR between source and destination operands,
1058 * the register content are assumed to be packed."
1059 *
1060 * Since Align16 doesn't have a concept of horizontal stride (or width),
1061 * it means that vertical stride must always be 4, since 0 and 2 would
1062 * lead to replicated data, and any other value is disallowed in Align16.
1063 */
1064 ERROR_IF(brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
1065 "Align16 mixed float mode assumes packed data (vstride must be 4");
1066
1067 ERROR_IF(num_sources >= 2 &&
1068 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
1069 "Align16 mixed float mode assumes packed data (vstride must be 4");
1070
1071 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1072 * Float Operations:
1073 *
1074 * "For Align16 mixed mode, both input and output packed f16 data
1075 * must be oword aligned, no oword crossing in packed f16."
1076 *
1077 * The previous rule requires that Align16 operands are always packed,
1078 * and since there is only one bit for Align16 subnr, which represents
1079 * offsets 0B and 16B, this rule is always enforced and we don't need to
1080 * validate it.
1081 */
1082
1083 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1084 * Float Operations:
1085 *
1086 * "No SIMD16 in mixed mode when destination is packed f16 for both
1087 * Align1 and Align16."
1088 *
1089 * And:
1090 *
1091 * "In Align16 mode, when half float and float data types are mixed
1092 * between source operands OR between source and destination operands,
1093 * the register content are assumed to be packed."
1094 *
1095 * Which implies that SIMD16 is not available in Align16. This is further
1096 * confirmed by:
1097 *
1098 * "For Align16 mixed mode, both input and output packed f16 data
1099 * must be oword aligned, no oword crossing in packed f16"
1100 *
1101 * Since oword-aligned packed f16 data would cross oword boundaries when
1102 * the execution size is larger than 8.
1103 */
1104 ERROR_IF(exec_size > 8, "Align16 mixed float mode is limited to SIMD8");
1105
1106 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1107 * Float Operations:
1108 *
1109 * "No accumulator read access for Align16 mixed float."
1110 */
1111 ERROR_IF(inst_uses_src_acc(devinfo, inst),
1112 "No accumulator read access for Align16 mixed float");
1113 } else {
1114 assert(!is_align16);
1115
1116 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1117 * Float Operations:
1118 *
1119 * "No SIMD16 in mixed mode when destination is packed f16 for both
1120 * Align1 and Align16."
1121 */
1122 ERROR_IF(exec_size > 8 && dst_is_packed &&
1123 dst_type == BRW_REGISTER_TYPE_HF,
1124 "Align1 mixed float mode is limited to SIMD8 when destination "
1125 "is packed half-float");
1126
1127 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1128 * Float Operations:
1129 *
1130 * "Math operations for mixed mode:
1131 * - In Align1, f16 inputs need to be strided"
1132 */
1133 if (opcode == BRW_OPCODE_MATH) {
1134 if (src0_type == BRW_REGISTER_TYPE_HF) {
1135 ERROR_IF(STRIDE(brw_inst_src0_hstride(devinfo, inst)) <= 1,
1136 "Align1 mixed mode math needs strided half-float inputs");
1137 }
1138
1139 if (num_sources >= 2 && src1_type == BRW_REGISTER_TYPE_HF) {
1140 ERROR_IF(STRIDE(brw_inst_src1_hstride(devinfo, inst)) <= 1,
1141 "Align1 mixed mode math needs strided half-float inputs");
1142 }
1143 }
1144
1145 if (dst_type == BRW_REGISTER_TYPE_HF && dst_stride == 1) {
1146 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1147 * Float Operations:
1148 *
1149 * "In Align1, destination stride can be smaller than execution
1150 * type. When destination is stride of 1, 16 bit packed data is
1151 * updated on the destination. However, output packed f16 data
1152 * must be oword aligned, no oword crossing in packed f16."
1153 *
1154 * The requirement of not crossing oword boundaries for 16-bit oword
1155 * aligned data means that execution size is limited to 8.
1156 */
1157 unsigned subreg;
1158 if (brw_inst_dst_address_mode(devinfo, inst) == BRW_ADDRESS_DIRECT)
1159 subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
1160 else
1161 subreg = brw_inst_dst_ia_subreg_nr(devinfo, inst);
1162 ERROR_IF(subreg % 16 != 0,
1163 "Align1 mixed mode packed half-float output must be "
1164 "oword aligned");
1165 ERROR_IF(exec_size > 8,
1166 "Align1 mixed mode packed half-float output must not "
1167 "cross oword boundaries (max exec size is 8)");
1168
1169 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1170 * Float Operations:
1171 *
1172 * "When source is float or half float from accumulator register and
1173 * destination is half float with a stride of 1, the source must
1174 * register aligned. i.e., source must have offset zero."
1175 *
1176 * Align16 mixed float mode doesn't allow accumulator access on sources,
1177 * so we only need to check this for Align1.
1178 */
1179 if (src0_is_acc(devinfo, inst) &&
1180 (src0_type == BRW_REGISTER_TYPE_F ||
1181 src0_type == BRW_REGISTER_TYPE_HF)) {
1182 ERROR_IF(brw_inst_src0_da1_subreg_nr(devinfo, inst) != 0,
1183 "Mixed float mode requires register-aligned accumulator "
1184 "source reads when destination is packed half-float");
1185
1186 }
1187
1188 if (num_sources > 1 &&
1189 src1_is_acc(devinfo, inst) &&
1190 (src1_type == BRW_REGISTER_TYPE_F ||
1191 src1_type == BRW_REGISTER_TYPE_HF)) {
1192 ERROR_IF(brw_inst_src1_da1_subreg_nr(devinfo, inst) != 0,
1193 "Mixed float mode requires register-aligned accumulator "
1194 "source reads when destination is packed half-float");
1195 }
1196 }
1197
1198 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1199 * Float Operations:
1200 *
1201 * "No swizzle is allowed when an accumulator is used as an implicit
1202 * source or an explicit source in an instruction. i.e. when
1203 * destination is half float with an implicit accumulator source,
1204 * destination stride needs to be 2."
1205 *
1206 * FIXME: it is not quite clear what the first sentence actually means
1207 * or its link to the implication described after it, so we only
1208 * validate the explicit implication, which is clearly described.
1209 */
1210 if (dst_type == BRW_REGISTER_TYPE_HF &&
1211 inst_uses_src_acc(devinfo, inst)) {
1212 ERROR_IF(dst_stride != 2,
1213 "Mixed float mode with implicit/explicit accumulator "
1214 "source and half-float destination requires a stride "
1215 "of 2 on the destination");
1216 }
1217 }
1218
1219 return error_msg;
1220 }
1221
1222 /**
1223 * Creates an \p access_mask for an \p exec_size, \p element_size, and a region
1224 *
1225 * An \p access_mask is a 32-element array of uint64_t, where each uint64_t is
1226 * a bitmask of bytes accessed by the region.
1227 *
1228 * For instance the access mask of the source gX.1<4,2,2>F in an exec_size = 4
1229 * instruction would be
1230 *
1231 * access_mask[0] = 0x00000000000000F0
1232 * access_mask[1] = 0x000000000000F000
1233 * access_mask[2] = 0x0000000000F00000
1234 * access_mask[3] = 0x00000000F0000000
1235 * access_mask[4-31] = 0
1236 *
1237 * because the first execution channel accesses bytes 7-4 and the second
1238 * execution channel accesses bytes 15-12, etc.
1239 */
1240 static void
1241 align1_access_mask(uint64_t access_mask[static 32],
1242 unsigned exec_size, unsigned element_size, unsigned subreg,
1243 unsigned vstride, unsigned width, unsigned hstride)
1244 {
1245 const uint64_t mask = (1ULL << element_size) - 1;
1246 unsigned rowbase = subreg;
1247 unsigned element = 0;
1248
1249 for (int y = 0; y < exec_size / width; y++) {
1250 unsigned offset = rowbase;
1251
1252 for (int x = 0; x < width; x++) {
1253 access_mask[element++] = mask << offset;
1254 offset += hstride * element_size;
1255 }
1256
1257 rowbase += vstride * element_size;
1258 }
1259
1260 assert(element == 0 || element == exec_size);
1261 }
1262
1263 /**
1264 * Returns the number of registers accessed according to the \p access_mask
1265 */
1266 static int
1267 registers_read(const uint64_t access_mask[static 32])
1268 {
1269 int regs_read = 0;
1270
1271 for (unsigned i = 0; i < 32; i++) {
1272 if (access_mask[i] > 0xFFFFFFFF) {
1273 return 2;
1274 } else if (access_mask[i]) {
1275 regs_read = 1;
1276 }
1277 }
1278
1279 return regs_read;
1280 }
1281
1282 /**
1283 * Checks restrictions listed in "Region Alignment Rules" in the "Register
1284 * Region Restrictions" section.
1285 */
1286 static struct string
1287 region_alignment_rules(const struct gen_device_info *devinfo,
1288 const brw_inst *inst)
1289 {
1290 const struct opcode_desc *desc =
1291 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
1292 unsigned num_sources = num_sources_from_inst(devinfo, inst);
1293 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
1294 uint64_t dst_access_mask[32], src0_access_mask[32], src1_access_mask[32];
1295 struct string error_msg = { .str = NULL, .len = 0 };
1296
1297 if (num_sources == 3)
1298 return (struct string){};
1299
1300 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16)
1301 return (struct string){};
1302
1303 if (inst_is_send(devinfo, inst))
1304 return (struct string){};
1305
1306 memset(dst_access_mask, 0, sizeof(dst_access_mask));
1307 memset(src0_access_mask, 0, sizeof(src0_access_mask));
1308 memset(src1_access_mask, 0, sizeof(src1_access_mask));
1309
1310 for (unsigned i = 0; i < num_sources; i++) {
1311 unsigned vstride, width, hstride, element_size, subreg;
1312 enum brw_reg_type type;
1313
1314 /* In Direct Addressing mode, a source cannot span more than 2 adjacent
1315 * GRF registers.
1316 */
1317
1318 #define DO_SRC(n) \
1319 if (brw_inst_src ## n ## _address_mode(devinfo, inst) != \
1320 BRW_ADDRESS_DIRECT) \
1321 continue; \
1322 \
1323 if (brw_inst_src ## n ## _reg_file(devinfo, inst) == \
1324 BRW_IMMEDIATE_VALUE) \
1325 continue; \
1326 \
1327 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
1328 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
1329 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
1330 type = brw_inst_src ## n ## _type(devinfo, inst); \
1331 element_size = brw_reg_type_to_size(type); \
1332 subreg = brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1333 align1_access_mask(src ## n ## _access_mask, \
1334 exec_size, element_size, subreg, \
1335 vstride, width, hstride)
1336
1337 if (i == 0) {
1338 DO_SRC(0);
1339 } else {
1340 DO_SRC(1);
1341 }
1342 #undef DO_SRC
1343
1344 unsigned num_vstride = exec_size / width;
1345 unsigned num_hstride = width;
1346 unsigned vstride_elements = (num_vstride - 1) * vstride;
1347 unsigned hstride_elements = (num_hstride - 1) * hstride;
1348 unsigned offset = (vstride_elements + hstride_elements) * element_size +
1349 subreg;
1350 ERROR_IF(offset >= 64,
1351 "A source cannot span more than 2 adjacent GRF registers");
1352 }
1353
1354 if (desc->ndst == 0 || dst_is_null(devinfo, inst))
1355 return error_msg;
1356
1357 unsigned stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1358 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
1359 unsigned element_size = brw_reg_type_to_size(dst_type);
1360 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
1361 unsigned offset = ((exec_size - 1) * stride * element_size) + subreg;
1362 ERROR_IF(offset >= 64,
1363 "A destination cannot span more than 2 adjacent GRF registers");
1364
1365 if (error_msg.str)
1366 return error_msg;
1367
1368 /* On IVB/BYT, region parameters and execution size for DF are in terms of
1369 * 32-bit elements, so they are doubled. For evaluating the validity of an
1370 * instruction, we halve them.
1371 */
1372 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1373 element_size == 8)
1374 element_size = 4;
1375
1376 align1_access_mask(dst_access_mask, exec_size, element_size, subreg,
1377 exec_size == 1 ? 0 : exec_size * stride,
1378 exec_size == 1 ? 1 : exec_size,
1379 exec_size == 1 ? 0 : stride);
1380
1381 unsigned dst_regs = registers_read(dst_access_mask);
1382 unsigned src0_regs = registers_read(src0_access_mask);
1383 unsigned src1_regs = registers_read(src1_access_mask);
1384
1385 /* The SNB, IVB, HSW, BDW, and CHV PRMs say:
1386 *
1387 * When an instruction has a source region spanning two registers and a
1388 * destination region contained in one register, the number of elements
1389 * must be the same between two sources and one of the following must be
1390 * true:
1391 *
1392 * 1. The destination region is entirely contained in the lower OWord
1393 * of a register.
1394 * 2. The destination region is entirely contained in the upper OWord
1395 * of a register.
1396 * 3. The destination elements are evenly split between the two OWords
1397 * of a register.
1398 */
1399 if (devinfo->gen <= 8) {
1400 if (dst_regs == 1 && (src0_regs == 2 || src1_regs == 2)) {
1401 unsigned upper_oword_writes = 0, lower_oword_writes = 0;
1402
1403 for (unsigned i = 0; i < exec_size; i++) {
1404 if (dst_access_mask[i] > 0x0000FFFF) {
1405 upper_oword_writes++;
1406 } else {
1407 assert(dst_access_mask[i] != 0);
1408 lower_oword_writes++;
1409 }
1410 }
1411
1412 ERROR_IF(lower_oword_writes != 0 &&
1413 upper_oword_writes != 0 &&
1414 upper_oword_writes != lower_oword_writes,
1415 "Writes must be to only one OWord or "
1416 "evenly split between OWords");
1417 }
1418 }
1419
1420 /* The IVB and HSW PRMs say:
1421 *
1422 * When an instruction has a source region that spans two registers and
1423 * the destination spans two registers, the destination elements must be
1424 * evenly split between the two registers [...]
1425 *
1426 * The SNB PRM contains similar wording (but written in a much more
1427 * confusing manner).
1428 *
1429 * The BDW PRM says:
1430 *
1431 * When destination spans two registers, the source may be one or two
1432 * registers. The destination elements must be evenly split between the
1433 * two registers.
1434 *
1435 * The SKL PRM says:
1436 *
1437 * When destination of MATH instruction spans two registers, the
1438 * destination elements must be evenly split between the two registers.
1439 *
1440 * It is not known whether this restriction applies to KBL other Gens after
1441 * SKL.
1442 */
1443 if (devinfo->gen <= 8 ||
1444 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MATH) {
1445
1446 /* Nothing explicitly states that on Gen < 8 elements must be evenly
1447 * split between two destination registers in the two exceptional
1448 * source-region-spans-one-register cases, but since Broadwell requires
1449 * evenly split writes regardless of source region, we assume that it was
1450 * an oversight and require it.
1451 */
1452 if (dst_regs == 2) {
1453 unsigned upper_reg_writes = 0, lower_reg_writes = 0;
1454
1455 for (unsigned i = 0; i < exec_size; i++) {
1456 if (dst_access_mask[i] > 0xFFFFFFFF) {
1457 upper_reg_writes++;
1458 } else {
1459 assert(dst_access_mask[i] != 0);
1460 lower_reg_writes++;
1461 }
1462 }
1463
1464 ERROR_IF(upper_reg_writes != lower_reg_writes,
1465 "Writes must be evenly split between the two "
1466 "destination registers");
1467 }
1468 }
1469
1470 /* The IVB and HSW PRMs say:
1471 *
1472 * When an instruction has a source region that spans two registers and
1473 * the destination spans two registers, the destination elements must be
1474 * evenly split between the two registers and each destination register
1475 * must be entirely derived from one source register.
1476 *
1477 * Note: In such cases, the regioning parameters must ensure that the
1478 * offset from the two source registers is the same.
1479 *
1480 * The SNB PRM contains similar wording (but written in a much more
1481 * confusing manner).
1482 *
1483 * There are effectively three rules stated here:
1484 *
1485 * For an instruction with a source and a destination spanning two
1486 * registers,
1487 *
1488 * (1) destination elements must be evenly split between the two
1489 * registers
1490 * (2) all destination elements in a register must be derived
1491 * from one source register
1492 * (3) the offset (i.e. the starting location in each of the two
1493 * registers spanned by a region) must be the same in the two
1494 * registers spanned by a region
1495 *
1496 * It is impossible to violate rule (1) without violating (2) or (3), so we
1497 * do not attempt to validate it.
1498 */
1499 if (devinfo->gen <= 7 && dst_regs == 2) {
1500 for (unsigned i = 0; i < num_sources; i++) {
1501 #define DO_SRC(n) \
1502 if (src ## n ## _regs <= 1) \
1503 continue; \
1504 \
1505 for (unsigned i = 0; i < exec_size; i++) { \
1506 if ((dst_access_mask[i] > 0xFFFFFFFF) != \
1507 (src ## n ## _access_mask[i] > 0xFFFFFFFF)) { \
1508 ERROR("Each destination register must be entirely derived " \
1509 "from one source register"); \
1510 break; \
1511 } \
1512 } \
1513 \
1514 unsigned offset_0 = \
1515 brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1516 unsigned offset_1 = offset_0; \
1517 \
1518 for (unsigned i = 0; i < exec_size; i++) { \
1519 if (src ## n ## _access_mask[i] > 0xFFFFFFFF) { \
1520 offset_1 = __builtin_ctzll(src ## n ## _access_mask[i]) - 32; \
1521 break; \
1522 } \
1523 } \
1524 \
1525 ERROR_IF(num_sources == 2 && offset_0 != offset_1, \
1526 "The offset from the two source registers " \
1527 "must be the same")
1528
1529 if (i == 0) {
1530 DO_SRC(0);
1531 } else {
1532 DO_SRC(1);
1533 }
1534 #undef DO_SRC
1535 }
1536 }
1537
1538 /* The IVB and HSW PRMs say:
1539 *
1540 * When destination spans two registers, the source MUST span two
1541 * registers. The exception to the above rule:
1542 * 1. When source is scalar, the source registers are not
1543 * incremented.
1544 * 2. When source is packed integer Word and destination is packed
1545 * integer DWord, the source register is not incremented by the
1546 * source sub register is incremented.
1547 *
1548 * The SNB PRM does not contain this rule, but the internal documentation
1549 * indicates that it applies to SNB as well. We assume that the rule applies
1550 * to Gen <= 5 although their PRMs do not state it.
1551 *
1552 * While the documentation explicitly says in exception (2) that the
1553 * destination must be an integer DWord, the hardware allows at least a
1554 * float destination type as well. We emit such instructions from
1555 *
1556 * fs_visitor::emit_interpolation_setup_gen6
1557 * fs_visitor::emit_fragcoord_interpolation
1558 *
1559 * and have for years with no ill effects.
1560 *
1561 * Additionally the simulator source code indicates that the real condition
1562 * is that the size of the destination type is 4 bytes.
1563 */
1564 if (devinfo->gen <= 7 && dst_regs == 2) {
1565 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
1566 bool dst_is_packed_dword =
1567 is_packed(exec_size * stride, exec_size, stride) &&
1568 brw_reg_type_to_size(dst_type) == 4;
1569
1570 for (unsigned i = 0; i < num_sources; i++) {
1571 #define DO_SRC(n) \
1572 unsigned vstride, width, hstride; \
1573 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
1574 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
1575 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
1576 bool src ## n ## _is_packed_word = \
1577 is_packed(vstride, width, hstride) && \
1578 (brw_inst_src ## n ## _type(devinfo, inst) == BRW_REGISTER_TYPE_W || \
1579 brw_inst_src ## n ## _type(devinfo, inst) == BRW_REGISTER_TYPE_UW); \
1580 \
1581 ERROR_IF(src ## n ## _regs == 1 && \
1582 !src ## n ## _has_scalar_region(devinfo, inst) && \
1583 !(dst_is_packed_dword && src ## n ## _is_packed_word), \
1584 "When the destination spans two registers, the source must " \
1585 "span two registers\n" ERROR_INDENT "(exceptions for scalar " \
1586 "source and packed-word to packed-dword expansion)")
1587
1588 if (i == 0) {
1589 DO_SRC(0);
1590 } else {
1591 DO_SRC(1);
1592 }
1593 #undef DO_SRC
1594 }
1595 }
1596
1597 return error_msg;
1598 }
1599
1600 static struct string
1601 vector_immediate_restrictions(const struct gen_device_info *devinfo,
1602 const brw_inst *inst)
1603 {
1604 unsigned num_sources = num_sources_from_inst(devinfo, inst);
1605 struct string error_msg = { .str = NULL, .len = 0 };
1606
1607 if (num_sources == 3 || num_sources == 0)
1608 return (struct string){};
1609
1610 unsigned file = num_sources == 1 ?
1611 brw_inst_src0_reg_file(devinfo, inst) :
1612 brw_inst_src1_reg_file(devinfo, inst);
1613 if (file != BRW_IMMEDIATE_VALUE)
1614 return (struct string){};
1615
1616 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
1617 unsigned dst_type_size = brw_reg_type_to_size(dst_type);
1618 unsigned dst_subreg = brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1 ?
1619 brw_inst_dst_da1_subreg_nr(devinfo, inst) : 0;
1620 unsigned dst_stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1621 enum brw_reg_type type = num_sources == 1 ?
1622 brw_inst_src0_type(devinfo, inst) :
1623 brw_inst_src1_type(devinfo, inst);
1624
1625 /* The PRMs say:
1626 *
1627 * When an immediate vector is used in an instruction, the destination
1628 * must be 128-bit aligned with destination horizontal stride equivalent
1629 * to a word for an immediate integer vector (v) and equivalent to a
1630 * DWord for an immediate float vector (vf).
1631 *
1632 * The text has not been updated for the addition of the immediate unsigned
1633 * integer vector type (uv) on SNB, but presumably the same restriction
1634 * applies.
1635 */
1636 switch (type) {
1637 case BRW_REGISTER_TYPE_V:
1638 case BRW_REGISTER_TYPE_UV:
1639 case BRW_REGISTER_TYPE_VF:
1640 ERROR_IF(dst_subreg % (128 / 8) != 0,
1641 "Destination must be 128-bit aligned in order to use immediate "
1642 "vector types");
1643
1644 if (type == BRW_REGISTER_TYPE_VF) {
1645 ERROR_IF(dst_type_size * dst_stride != 4,
1646 "Destination must have stride equivalent to dword in order "
1647 "to use the VF type");
1648 } else {
1649 ERROR_IF(dst_type_size * dst_stride != 2,
1650 "Destination must have stride equivalent to word in order "
1651 "to use the V or UV type");
1652 }
1653 break;
1654 default:
1655 break;
1656 }
1657
1658 return error_msg;
1659 }
1660
1661 static struct string
1662 special_requirements_for_handling_double_precision_data_types(
1663 const struct gen_device_info *devinfo,
1664 const brw_inst *inst)
1665 {
1666 unsigned num_sources = num_sources_from_inst(devinfo, inst);
1667 struct string error_msg = { .str = NULL, .len = 0 };
1668
1669 if (num_sources == 3 || num_sources == 0)
1670 return (struct string){};
1671
1672 /* Split sends don't have types so there's no doubles there. */
1673 if (inst_is_split_send(devinfo, inst))
1674 return (struct string){};
1675
1676 enum brw_reg_type exec_type = execution_type(devinfo, inst);
1677 unsigned exec_type_size = brw_reg_type_to_size(exec_type);
1678
1679 enum brw_reg_file dst_file = brw_inst_dst_reg_file(devinfo, inst);
1680 enum brw_reg_type dst_type = inst_dst_type(devinfo, inst);
1681 unsigned dst_type_size = brw_reg_type_to_size(dst_type);
1682 unsigned dst_hstride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1683 unsigned dst_reg = brw_inst_dst_da_reg_nr(devinfo, inst);
1684 unsigned dst_subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
1685 unsigned dst_address_mode = brw_inst_dst_address_mode(devinfo, inst);
1686
1687 bool is_integer_dword_multiply =
1688 devinfo->gen >= 8 &&
1689 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MUL &&
1690 (brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_D ||
1691 brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_UD) &&
1692 (brw_inst_src1_type(devinfo, inst) == BRW_REGISTER_TYPE_D ||
1693 brw_inst_src1_type(devinfo, inst) == BRW_REGISTER_TYPE_UD);
1694
1695 if (dst_type_size != 8 && exec_type_size != 8 && !is_integer_dword_multiply)
1696 return (struct string){};
1697
1698 for (unsigned i = 0; i < num_sources; i++) {
1699 unsigned vstride, width, hstride, type_size, reg, subreg, address_mode;
1700 bool is_scalar_region;
1701 enum brw_reg_file file;
1702 enum brw_reg_type type;
1703
1704 #define DO_SRC(n) \
1705 if (brw_inst_src ## n ## _reg_file(devinfo, inst) == \
1706 BRW_IMMEDIATE_VALUE) \
1707 continue; \
1708 \
1709 is_scalar_region = src ## n ## _has_scalar_region(devinfo, inst); \
1710 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
1711 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
1712 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
1713 file = brw_inst_src ## n ## _reg_file(devinfo, inst); \
1714 type = brw_inst_src ## n ## _type(devinfo, inst); \
1715 type_size = brw_reg_type_to_size(type); \
1716 reg = brw_inst_src ## n ## _da_reg_nr(devinfo, inst); \
1717 subreg = brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1718 address_mode = brw_inst_src ## n ## _address_mode(devinfo, inst)
1719
1720 if (i == 0) {
1721 DO_SRC(0);
1722 } else {
1723 DO_SRC(1);
1724 }
1725 #undef DO_SRC
1726
1727 /* The PRMs say that for CHV, BXT:
1728 *
1729 * When source or destination datatype is 64b or operation is integer
1730 * DWord multiply, regioning in Align1 must follow these rules:
1731 *
1732 * 1. Source and Destination horizontal stride must be aligned to the
1733 * same qword.
1734 * 2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride.
1735 * 3. Source and Destination offset must be the same, except the case
1736 * of scalar source.
1737 *
1738 * We assume that the restriction applies to GLK as well.
1739 */
1740 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1 &&
1741 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
1742 unsigned src_stride = hstride * type_size;
1743 unsigned dst_stride = dst_hstride * dst_type_size;
1744
1745 ERROR_IF(!is_scalar_region &&
1746 (src_stride % 8 != 0 ||
1747 dst_stride % 8 != 0 ||
1748 src_stride != dst_stride),
1749 "Source and destination horizontal stride must equal and a "
1750 "multiple of a qword when the execution type is 64-bit");
1751
1752 ERROR_IF(vstride != width * hstride,
1753 "Vstride must be Width * Hstride when the execution type is "
1754 "64-bit");
1755
1756 ERROR_IF(!is_scalar_region && dst_subreg != subreg,
1757 "Source and destination offset must be the same when the "
1758 "execution type is 64-bit");
1759 }
1760
1761 /* The PRMs say that for CHV, BXT:
1762 *
1763 * When source or destination datatype is 64b or operation is integer
1764 * DWord multiply, indirect addressing must not be used.
1765 *
1766 * We assume that the restriction applies to GLK as well.
1767 */
1768 if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) {
1769 ERROR_IF(BRW_ADDRESS_REGISTER_INDIRECT_REGISTER == address_mode ||
1770 BRW_ADDRESS_REGISTER_INDIRECT_REGISTER == dst_address_mode,
1771 "Indirect addressing is not allowed when the execution type "
1772 "is 64-bit");
1773 }
1774
1775 /* The PRMs say that for CHV, BXT:
1776 *
1777 * ARF registers must never be used with 64b datatype or when
1778 * operation is integer DWord multiply.
1779 *
1780 * We assume that the restriction applies to GLK as well.
1781 *
1782 * We assume that the restriction does not apply to the null register.
1783 */
1784 if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) {
1785 ERROR_IF(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MAC ||
1786 brw_inst_acc_wr_control(devinfo, inst) ||
1787 (BRW_ARCHITECTURE_REGISTER_FILE == file &&
1788 reg != BRW_ARF_NULL) ||
1789 (BRW_ARCHITECTURE_REGISTER_FILE == dst_file &&
1790 dst_reg != BRW_ARF_NULL),
1791 "Architecture registers cannot be used when the execution "
1792 "type is 64-bit");
1793 }
1794 }
1795
1796 /* The PRMs say that for BDW, SKL:
1797 *
1798 * If Align16 is required for an operation with QW destination and non-QW
1799 * source datatypes, the execution size cannot exceed 2.
1800 *
1801 * We assume that the restriction applies to all Gen8+ parts.
1802 */
1803 if (devinfo->gen >= 8) {
1804 enum brw_reg_type src0_type = brw_inst_src0_type(devinfo, inst);
1805 enum brw_reg_type src1_type =
1806 num_sources > 1 ? brw_inst_src1_type(devinfo, inst) : src0_type;
1807 unsigned src0_type_size = brw_reg_type_to_size(src0_type);
1808 unsigned src1_type_size = brw_reg_type_to_size(src1_type);
1809
1810 ERROR_IF(brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16 &&
1811 dst_type_size == 8 &&
1812 (src0_type_size != 8 || src1_type_size != 8) &&
1813 brw_inst_exec_size(devinfo, inst) > BRW_EXECUTE_2,
1814 "In Align16 exec size cannot exceed 2 with a QWord destination "
1815 "and a non-QWord source");
1816 }
1817
1818 /* The PRMs say that for CHV, BXT:
1819 *
1820 * When source or destination datatype is 64b or operation is integer
1821 * DWord multiply, DepCtrl must not be used.
1822 *
1823 * We assume that the restriction applies to GLK as well.
1824 */
1825 if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) {
1826 ERROR_IF(brw_inst_no_dd_check(devinfo, inst) ||
1827 brw_inst_no_dd_clear(devinfo, inst),
1828 "DepCtrl is not allowed when the execution type is 64-bit");
1829 }
1830
1831 return error_msg;
1832 }
1833
1834 static struct string
1835 instruction_restrictions(const struct gen_device_info *devinfo,
1836 const brw_inst *inst)
1837 {
1838 struct string error_msg = { .str = NULL, .len = 0 };
1839
1840 /* From GEN:BUG:1604601757:
1841 *
1842 * "When multiplying a DW and any lower precision integer, source modifier
1843 * is not supported."
1844 */
1845 if (devinfo->gen >= 12 &&
1846 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MUL) {
1847 enum brw_reg_type exec_type = execution_type(devinfo, inst);
1848 const bool src0_valid = type_sz(brw_inst_src0_type(devinfo, inst)) == 4 ||
1849 brw_inst_src0_reg_file(devinfo, inst) == BRW_IMMEDIATE_VALUE ||
1850 !(brw_inst_src0_negate(devinfo, inst) ||
1851 brw_inst_src0_abs(devinfo, inst));
1852 const bool src1_valid = type_sz(brw_inst_src1_type(devinfo, inst)) == 4 ||
1853 brw_inst_src1_reg_file(devinfo, inst) == BRW_IMMEDIATE_VALUE ||
1854 !(brw_inst_src1_negate(devinfo, inst) ||
1855 brw_inst_src1_abs(devinfo, inst));
1856
1857 ERROR_IF(!brw_reg_type_is_floating_point(exec_type) &&
1858 type_sz(exec_type) == 4 && !(src0_valid && src1_valid),
1859 "When multiplying a DW and any lower precision integer, source "
1860 "modifier is not supported.");
1861 }
1862
1863 return error_msg;
1864 }
1865
1866 bool
1867 brw_validate_instructions(const struct gen_device_info *devinfo,
1868 const void *assembly, int start_offset, int end_offset,
1869 struct disasm_info *disasm)
1870 {
1871 bool valid = true;
1872
1873 for (int src_offset = start_offset; src_offset < end_offset;) {
1874 struct string error_msg = { .str = NULL, .len = 0 };
1875 const brw_inst *inst = assembly + src_offset;
1876 bool is_compact = brw_inst_cmpt_control(devinfo, inst);
1877 brw_inst uncompacted;
1878
1879 if (is_compact) {
1880 brw_compact_inst *compacted = (void *)inst;
1881 brw_uncompact_instruction(devinfo, &uncompacted, compacted);
1882 inst = &uncompacted;
1883 }
1884
1885 if (is_unsupported_inst(devinfo, inst)) {
1886 ERROR("Instruction not supported on this Gen");
1887 } else {
1888 CHECK(sources_not_null);
1889 CHECK(send_restrictions);
1890 CHECK(alignment_supported);
1891 CHECK(general_restrictions_based_on_operand_types);
1892 CHECK(general_restrictions_on_region_parameters);
1893 CHECK(special_restrictions_for_mixed_float_mode);
1894 CHECK(region_alignment_rules);
1895 CHECK(vector_immediate_restrictions);
1896 CHECK(special_requirements_for_handling_double_precision_data_types);
1897 CHECK(instruction_restrictions);
1898 }
1899
1900 if (error_msg.str && disasm) {
1901 disasm_insert_error(disasm, src_offset, error_msg.str);
1902 }
1903 valid = valid && error_msg.len == 0;
1904 free(error_msg.str);
1905
1906 if (is_compact) {
1907 src_offset += sizeof(brw_compact_inst);
1908 } else {
1909 src_offset += sizeof(brw_inst);
1910 }
1911 }
1912
1913 return valid;
1914 }