i965: Avoid validation error when src1 is not present
[mesa.git] / src / intel / compiler / brw_eu_validate.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_eu_validate.c
25 *
26 * This file implements a pass that validates shader assembly.
27 */
28
29 #include "brw_eu.h"
30
31 /* We're going to do lots of string concatenation, so this should help. */
32 struct string {
33 char *str;
34 size_t len;
35 };
36
37 static void
38 cat(struct string *dest, const struct string src)
39 {
40 dest->str = realloc(dest->str, dest->len + src.len + 1);
41 memcpy(dest->str + dest->len, src.str, src.len);
42 dest->str[dest->len + src.len] = '\0';
43 dest->len = dest->len + src.len;
44 }
45 #define CAT(dest, src) cat(&dest, (struct string){src, strlen(src)})
46
47 #define error(str) "\tERROR: " str "\n"
48 #define ERROR_INDENT "\t "
49
50 #define ERROR(msg) ERROR_IF(true, msg)
51 #define ERROR_IF(cond, msg) \
52 do { \
53 if (cond) { \
54 CAT(error_msg, error(msg)); \
55 } \
56 } while(0)
57
58 #define CHECK(func, args...) \
59 do { \
60 struct string __msg = func(devinfo, inst, ##args); \
61 if (__msg.str) { \
62 cat(&error_msg, __msg); \
63 free(__msg.str); \
64 } \
65 } while (0)
66
67 #define STRIDE(stride) (stride != 0 ? 1 << ((stride) - 1) : 0)
68 #define WIDTH(width) (1 << (width))
69
70 static bool
71 inst_is_send(const struct gen_device_info *devinfo, const brw_inst *inst)
72 {
73 switch (brw_inst_opcode(devinfo, inst)) {
74 case BRW_OPCODE_SEND:
75 case BRW_OPCODE_SENDC:
76 case BRW_OPCODE_SENDS:
77 case BRW_OPCODE_SENDSC:
78 return true;
79 default:
80 return false;
81 }
82 }
83
84 static unsigned
85 signed_type(unsigned type)
86 {
87 switch (type) {
88 case BRW_REGISTER_TYPE_UD: return BRW_REGISTER_TYPE_D;
89 case BRW_REGISTER_TYPE_UW: return BRW_REGISTER_TYPE_W;
90 case BRW_REGISTER_TYPE_UB: return BRW_REGISTER_TYPE_B;
91 case BRW_REGISTER_TYPE_UQ: return BRW_REGISTER_TYPE_Q;
92 default: return type;
93 }
94 }
95
96 static bool
97 inst_is_raw_move(const struct gen_device_info *devinfo, const brw_inst *inst)
98 {
99 unsigned dst_type = signed_type(brw_inst_dst_type(devinfo, inst));
100 unsigned src_type = signed_type(brw_inst_src0_type(devinfo, inst));
101
102 if (brw_inst_src0_reg_file(devinfo, inst) == BRW_IMMEDIATE_VALUE) {
103 /* FIXME: not strictly true */
104 if (brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_VF ||
105 brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_UV ||
106 brw_inst_src0_type(devinfo, inst) == BRW_REGISTER_TYPE_V) {
107 return false;
108 }
109 } else if (brw_inst_src0_negate(devinfo, inst) ||
110 brw_inst_src0_abs(devinfo, inst)) {
111 return false;
112 }
113
114 return brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MOV &&
115 brw_inst_saturate(devinfo, inst) == 0 &&
116 dst_type == src_type;
117 }
118
119 static bool
120 dst_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
121 {
122 return brw_inst_dst_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
123 brw_inst_dst_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
124 }
125
126 static bool
127 src0_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
128 {
129 return brw_inst_src0_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
130 brw_inst_src0_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
131 }
132
133 static bool
134 src1_is_null(const struct gen_device_info *devinfo, const brw_inst *inst)
135 {
136 return brw_inst_src1_reg_file(devinfo, inst) == BRW_ARCHITECTURE_REGISTER_FILE &&
137 brw_inst_src1_da_reg_nr(devinfo, inst) == BRW_ARF_NULL;
138 }
139
140 static bool
141 src0_is_grf(const struct gen_device_info *devinfo, const brw_inst *inst)
142 {
143 return brw_inst_src0_reg_file(devinfo, inst) == BRW_GENERAL_REGISTER_FILE;
144 }
145
146 static bool
147 src0_has_scalar_region(const struct gen_device_info *devinfo, const brw_inst *inst)
148 {
149 return brw_inst_src0_vstride(devinfo, inst) == BRW_VERTICAL_STRIDE_0 &&
150 brw_inst_src0_width(devinfo, inst) == BRW_WIDTH_1 &&
151 brw_inst_src0_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0;
152 }
153
154 static bool
155 src1_has_scalar_region(const struct gen_device_info *devinfo, const brw_inst *inst)
156 {
157 return brw_inst_src1_vstride(devinfo, inst) == BRW_VERTICAL_STRIDE_0 &&
158 brw_inst_src1_width(devinfo, inst) == BRW_WIDTH_1 &&
159 brw_inst_src1_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0;
160 }
161
162 static unsigned
163 num_sources_from_inst(const struct gen_device_info *devinfo,
164 const brw_inst *inst)
165 {
166 const struct opcode_desc *desc =
167 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
168 unsigned math_function;
169
170 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MATH) {
171 math_function = brw_inst_math_function(devinfo, inst);
172 } else if (devinfo->gen < 6 &&
173 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND) {
174 if (brw_inst_sfid(devinfo, inst) == BRW_SFID_MATH) {
175 /* src1 must be a descriptor (including the information to determine
176 * that the SEND is doing an extended math operation), but src0 can
177 * actually be null since it serves as the source of the implicit GRF
178 * to MRF move.
179 *
180 * If we stop using that functionality, we'll have to revisit this.
181 */
182 return 2;
183 } else {
184 /* Send instructions are allowed to have null sources since they use
185 * the base_mrf field to specify which message register source.
186 */
187 return 0;
188 }
189 } else {
190 assert(desc->nsrc < 4);
191 return desc->nsrc;
192 }
193
194 switch (math_function) {
195 case BRW_MATH_FUNCTION_INV:
196 case BRW_MATH_FUNCTION_LOG:
197 case BRW_MATH_FUNCTION_EXP:
198 case BRW_MATH_FUNCTION_SQRT:
199 case BRW_MATH_FUNCTION_RSQ:
200 case BRW_MATH_FUNCTION_SIN:
201 case BRW_MATH_FUNCTION_COS:
202 case BRW_MATH_FUNCTION_SINCOS:
203 case GEN8_MATH_FUNCTION_INVM:
204 case GEN8_MATH_FUNCTION_RSQRTM:
205 return 1;
206 case BRW_MATH_FUNCTION_FDIV:
207 case BRW_MATH_FUNCTION_POW:
208 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
209 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
210 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
211 return 2;
212 default:
213 unreachable("not reached");
214 }
215 }
216
217 static struct string
218 sources_not_null(const struct gen_device_info *devinfo,
219 const brw_inst *inst)
220 {
221 unsigned num_sources = num_sources_from_inst(devinfo, inst);
222 struct string error_msg = { .str = NULL, .len = 0 };
223
224 /* Nothing to test. 3-src instructions can only have GRF sources, and
225 * there's no bit to control the file.
226 */
227 if (num_sources == 3)
228 return (struct string){};
229
230 if (num_sources >= 1)
231 ERROR_IF(src0_is_null(devinfo, inst), "src0 is null");
232
233 if (num_sources == 2)
234 ERROR_IF(src1_is_null(devinfo, inst), "src1 is null");
235
236 return error_msg;
237 }
238
239 static struct string
240 send_restrictions(const struct gen_device_info *devinfo,
241 const brw_inst *inst)
242 {
243 struct string error_msg = { .str = NULL, .len = 0 };
244
245 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND) {
246 ERROR_IF(brw_inst_src0_address_mode(devinfo, inst) != BRW_ADDRESS_DIRECT,
247 "send must use direct addressing");
248
249 if (devinfo->gen >= 7) {
250 ERROR_IF(!src0_is_grf(devinfo, inst), "send from non-GRF");
251 ERROR_IF(brw_inst_eot(devinfo, inst) &&
252 brw_inst_src0_da_reg_nr(devinfo, inst) < 112,
253 "send with EOT must use g112-g127");
254 }
255 }
256
257 return error_msg;
258 }
259
260 static bool
261 is_unsupported_inst(const struct gen_device_info *devinfo,
262 const brw_inst *inst)
263 {
264 return brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst)) == NULL;
265 }
266
267 static enum brw_reg_type
268 execution_type_for_type(enum brw_reg_type type)
269 {
270 switch (type) {
271 case BRW_REGISTER_TYPE_DF:
272 case BRW_REGISTER_TYPE_F:
273 case BRW_REGISTER_TYPE_HF:
274 return type;
275
276 case BRW_REGISTER_TYPE_VF:
277 return BRW_REGISTER_TYPE_F;
278
279 case BRW_REGISTER_TYPE_Q:
280 case BRW_REGISTER_TYPE_UQ:
281 return BRW_REGISTER_TYPE_Q;
282
283 case BRW_REGISTER_TYPE_D:
284 case BRW_REGISTER_TYPE_UD:
285 return BRW_REGISTER_TYPE_D;
286
287 case BRW_REGISTER_TYPE_W:
288 case BRW_REGISTER_TYPE_UW:
289 case BRW_REGISTER_TYPE_B:
290 case BRW_REGISTER_TYPE_UB:
291 case BRW_REGISTER_TYPE_V:
292 case BRW_REGISTER_TYPE_UV:
293 return BRW_REGISTER_TYPE_W;
294 }
295 unreachable("not reached");
296 }
297
298 /**
299 * Returns the execution type of an instruction \p inst
300 */
301 static enum brw_reg_type
302 execution_type(const struct gen_device_info *devinfo, const brw_inst *inst)
303 {
304 unsigned num_sources = num_sources_from_inst(devinfo, inst);
305 enum brw_reg_type src0_exec_type, src1_exec_type;
306
307 /* Execution data type is independent of destination data type, except in
308 * mixed F/HF instructions on CHV and SKL+.
309 */
310 enum brw_reg_type dst_exec_type = brw_inst_dst_type(devinfo, inst);
311
312 src0_exec_type = execution_type_for_type(brw_inst_src0_type(devinfo, inst));
313 if (num_sources == 1) {
314 if ((devinfo->gen >= 9 || devinfo->is_cherryview) &&
315 src0_exec_type == BRW_REGISTER_TYPE_HF) {
316 return dst_exec_type;
317 }
318 return src0_exec_type;
319 }
320
321 src1_exec_type = execution_type_for_type(brw_inst_src1_type(devinfo, inst));
322 if (src0_exec_type == src1_exec_type)
323 return src0_exec_type;
324
325 /* Mixed operand types where one is float is float on Gen < 6
326 * (and not allowed on later platforms)
327 */
328 if (devinfo->gen < 6 &&
329 (src0_exec_type == BRW_REGISTER_TYPE_F ||
330 src1_exec_type == BRW_REGISTER_TYPE_F))
331 return BRW_REGISTER_TYPE_F;
332
333 if (src0_exec_type == BRW_REGISTER_TYPE_Q ||
334 src1_exec_type == BRW_REGISTER_TYPE_Q)
335 return BRW_REGISTER_TYPE_Q;
336
337 if (src0_exec_type == BRW_REGISTER_TYPE_D ||
338 src1_exec_type == BRW_REGISTER_TYPE_D)
339 return BRW_REGISTER_TYPE_D;
340
341 if (src0_exec_type == BRW_REGISTER_TYPE_W ||
342 src1_exec_type == BRW_REGISTER_TYPE_W)
343 return BRW_REGISTER_TYPE_W;
344
345 if (src0_exec_type == BRW_REGISTER_TYPE_DF ||
346 src1_exec_type == BRW_REGISTER_TYPE_DF)
347 return BRW_REGISTER_TYPE_DF;
348
349 if (devinfo->gen >= 9 || devinfo->is_cherryview) {
350 if (dst_exec_type == BRW_REGISTER_TYPE_F ||
351 src0_exec_type == BRW_REGISTER_TYPE_F ||
352 src1_exec_type == BRW_REGISTER_TYPE_F) {
353 return BRW_REGISTER_TYPE_F;
354 } else {
355 return BRW_REGISTER_TYPE_HF;
356 }
357 }
358
359 assert(src0_exec_type == BRW_REGISTER_TYPE_F);
360 return BRW_REGISTER_TYPE_F;
361 }
362
363 /**
364 * Returns whether a region is packed
365 *
366 * A region is packed if its elements are adjacent in memory, with no
367 * intervening space, no overlap, and no replicated values.
368 */
369 static bool
370 is_packed(unsigned vstride, unsigned width, unsigned hstride)
371 {
372 if (vstride == width) {
373 if (vstride == 1) {
374 return hstride == 0;
375 } else {
376 return hstride == 1;
377 }
378 }
379
380 return false;
381 }
382
383 /**
384 * Checks restrictions listed in "General Restrictions Based on Operand Types"
385 * in the "Register Region Restrictions" section.
386 */
387 static struct string
388 general_restrictions_based_on_operand_types(const struct gen_device_info *devinfo,
389 const brw_inst *inst)
390 {
391 const struct opcode_desc *desc =
392 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
393 unsigned num_sources = num_sources_from_inst(devinfo, inst);
394 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
395 struct string error_msg = { .str = NULL, .len = 0 };
396
397 if (num_sources == 3)
398 return (struct string){};
399
400 if (inst_is_send(devinfo, inst))
401 return (struct string){};
402
403 if (exec_size == 1)
404 return (struct string){};
405
406 if (desc->ndst == 0)
407 return (struct string){};
408
409 /* The PRMs say:
410 *
411 * Where n is the largest element size in bytes for any source or
412 * destination operand type, ExecSize * n must be <= 64.
413 *
414 * But we do not attempt to enforce it, because it is implied by other
415 * rules:
416 *
417 * - that the destination stride must match the execution data type
418 * - sources may not span more than two adjacent GRF registers
419 * - destination may not span more than two adjacent GRF registers
420 *
421 * In fact, checking it would weaken testing of the other rules.
422 */
423
424 unsigned dst_stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
425 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
426 bool dst_type_is_byte =
427 brw_inst_dst_type(devinfo, inst) == BRW_REGISTER_TYPE_B ||
428 brw_inst_dst_type(devinfo, inst) == BRW_REGISTER_TYPE_UB;
429
430 if (dst_type_is_byte) {
431 if (is_packed(exec_size * dst_stride, exec_size, dst_stride)) {
432 if (!inst_is_raw_move(devinfo, inst)) {
433 ERROR("Only raw MOV supports a packed-byte destination");
434 return error_msg;
435 } else {
436 return (struct string){};
437 }
438 }
439 }
440
441 unsigned exec_type = execution_type(devinfo, inst);
442 unsigned exec_type_size = brw_reg_type_to_size(exec_type);
443 unsigned dst_type_size = brw_reg_type_to_size(dst_type);
444
445 /* On IVB/BYT, region parameters and execution size for DF are in terms of
446 * 32-bit elements, so they are doubled. For evaluating the validity of an
447 * instruction, we halve them.
448 */
449 if (devinfo->gen == 7 && !devinfo->is_haswell &&
450 exec_type_size == 8 && dst_type_size == 4)
451 dst_type_size = 8;
452
453 if (exec_type_size > dst_type_size) {
454 ERROR_IF(dst_stride * dst_type_size != exec_type_size,
455 "Destination stride must be equal to the ratio of the sizes of "
456 "the execution data type to the destination type");
457
458 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
459
460 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1 &&
461 brw_inst_dst_address_mode(devinfo, inst) == BRW_ADDRESS_DIRECT) {
462 /* The i965 PRM says:
463 *
464 * Implementation Restriction: The relaxed alignment rule for byte
465 * destination (#10.5) is not supported.
466 */
467 if ((devinfo->gen > 4 || devinfo->is_g4x) && dst_type_is_byte) {
468 ERROR_IF(subreg % exec_type_size != 0 &&
469 subreg % exec_type_size != 1,
470 "Destination subreg must be aligned to the size of the "
471 "execution data type (or to the next lowest byte for byte "
472 "destinations)");
473 } else {
474 ERROR_IF(subreg % exec_type_size != 0,
475 "Destination subreg must be aligned to the size of the "
476 "execution data type");
477 }
478 }
479 }
480
481 return error_msg;
482 }
483
484 /**
485 * Checks restrictions listed in "General Restrictions on Regioning Parameters"
486 * in the "Register Region Restrictions" section.
487 */
488 static struct string
489 general_restrictions_on_region_parameters(const struct gen_device_info *devinfo,
490 const brw_inst *inst)
491 {
492 const struct opcode_desc *desc =
493 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
494 unsigned num_sources = num_sources_from_inst(devinfo, inst);
495 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
496 struct string error_msg = { .str = NULL, .len = 0 };
497
498 if (num_sources == 3)
499 return (struct string){};
500
501 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16) {
502 if (desc->ndst != 0 && !dst_is_null(devinfo, inst))
503 ERROR_IF(brw_inst_dst_hstride(devinfo, inst) != BRW_HORIZONTAL_STRIDE_1,
504 "Destination Horizontal Stride must be 1");
505
506 if (num_sources >= 1) {
507 if (devinfo->is_haswell || devinfo->gen >= 8) {
508 ERROR_IF(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
509 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
510 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_2 &&
511 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
512 "In Align16 mode, only VertStride of 0, 2, or 4 is allowed");
513 } else {
514 ERROR_IF(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
515 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
516 brw_inst_src0_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
517 "In Align16 mode, only VertStride of 0 or 4 is allowed");
518 }
519 }
520
521 if (num_sources == 2) {
522 if (devinfo->is_haswell || devinfo->gen >= 8) {
523 ERROR_IF(brw_inst_src1_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
524 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
525 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_2 &&
526 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
527 "In Align16 mode, only VertStride of 0, 2, or 4 is allowed");
528 } else {
529 ERROR_IF(brw_inst_src1_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE &&
530 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_0 &&
531 brw_inst_src1_vstride(devinfo, inst) != BRW_VERTICAL_STRIDE_4,
532 "In Align16 mode, only VertStride of 0 or 4 is allowed");
533 }
534 }
535
536 return error_msg;
537 }
538
539 for (unsigned i = 0; i < num_sources; i++) {
540 unsigned vstride, width, hstride, element_size, subreg;
541 enum brw_reg_type type;
542
543 #define DO_SRC(n) \
544 if (brw_inst_src ## n ## _reg_file(devinfo, inst) == \
545 BRW_IMMEDIATE_VALUE) \
546 continue; \
547 \
548 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
549 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
550 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
551 type = brw_inst_src ## n ## _type(devinfo, inst); \
552 element_size = brw_reg_type_to_size(type); \
553 subreg = brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst)
554
555 if (i == 0) {
556 DO_SRC(0);
557 } else {
558 DO_SRC(1);
559 }
560 #undef DO_SRC
561
562 /* On IVB/BYT, region parameters and execution size for DF are in terms of
563 * 32-bit elements, so they are doubled. For evaluating the validity of an
564 * instruction, we halve them.
565 */
566 if (devinfo->gen == 7 && !devinfo->is_haswell &&
567 element_size == 8)
568 element_size = 4;
569
570 /* ExecSize must be greater than or equal to Width. */
571 ERROR_IF(exec_size < width, "ExecSize must be greater than or equal "
572 "to Width");
573
574 /* If ExecSize = Width and HorzStride ≠ 0,
575 * VertStride must be set to Width * HorzStride.
576 */
577 if (exec_size == width && hstride != 0) {
578 ERROR_IF(vstride != width * hstride,
579 "If ExecSize = Width and HorzStride ≠ 0, "
580 "VertStride must be set to Width * HorzStride");
581 }
582
583 /* If Width = 1, HorzStride must be 0 regardless of the values of
584 * ExecSize and VertStride.
585 */
586 if (width == 1) {
587 ERROR_IF(hstride != 0,
588 "If Width = 1, HorzStride must be 0 regardless "
589 "of the values of ExecSize and VertStride");
590 }
591
592 /* If ExecSize = Width = 1, both VertStride and HorzStride must be 0. */
593 if (exec_size == 1 && width == 1) {
594 ERROR_IF(vstride != 0 || hstride != 0,
595 "If ExecSize = Width = 1, both VertStride "
596 "and HorzStride must be 0");
597 }
598
599 /* If VertStride = HorzStride = 0, Width must be 1 regardless of the
600 * value of ExecSize.
601 */
602 if (vstride == 0 && hstride == 0) {
603 ERROR_IF(width != 1,
604 "If VertStride = HorzStride = 0, Width must be "
605 "1 regardless of the value of ExecSize");
606 }
607
608 /* VertStride must be used to cross GRF register boundaries. This rule
609 * implies that elements within a 'Width' cannot cross GRF boundaries.
610 */
611 const uint64_t mask = (1ULL << element_size) - 1;
612 unsigned rowbase = subreg;
613
614 for (int y = 0; y < exec_size / width; y++) {
615 uint64_t access_mask = 0;
616 unsigned offset = rowbase;
617
618 for (int x = 0; x < width; x++) {
619 access_mask |= mask << offset;
620 offset += hstride * element_size;
621 }
622
623 rowbase += vstride * element_size;
624
625 if ((uint32_t)access_mask != 0 && (access_mask >> 32) != 0) {
626 ERROR("VertStride must be used to cross GRF register boundaries");
627 break;
628 }
629 }
630 }
631
632 /* Dst.HorzStride must not be 0. */
633 if (desc->ndst != 0 && !dst_is_null(devinfo, inst)) {
634 ERROR_IF(brw_inst_dst_hstride(devinfo, inst) == BRW_HORIZONTAL_STRIDE_0,
635 "Destination Horizontal Stride must not be 0");
636 }
637
638 return error_msg;
639 }
640
641 /**
642 * Creates an \p access_mask for an \p exec_size, \p element_size, and a region
643 *
644 * An \p access_mask is a 32-element array of uint64_t, where each uint64_t is
645 * a bitmask of bytes accessed by the region.
646 *
647 * For instance the access mask of the source gX.1<4,2,2>F in an exec_size = 4
648 * instruction would be
649 *
650 * access_mask[0] = 0x00000000000000F0
651 * access_mask[1] = 0x000000000000F000
652 * access_mask[2] = 0x0000000000F00000
653 * access_mask[3] = 0x00000000F0000000
654 * access_mask[4-31] = 0
655 *
656 * because the first execution channel accesses bytes 7-4 and the second
657 * execution channel accesses bytes 15-12, etc.
658 */
659 static void
660 align1_access_mask(uint64_t access_mask[static 32],
661 unsigned exec_size, unsigned element_size, unsigned subreg,
662 unsigned vstride, unsigned width, unsigned hstride)
663 {
664 const uint64_t mask = (1ULL << element_size) - 1;
665 unsigned rowbase = subreg;
666 unsigned element = 0;
667
668 for (int y = 0; y < exec_size / width; y++) {
669 unsigned offset = rowbase;
670
671 for (int x = 0; x < width; x++) {
672 access_mask[element++] = mask << offset;
673 offset += hstride * element_size;
674 }
675
676 rowbase += vstride * element_size;
677 }
678
679 assert(element == 0 || element == exec_size);
680 }
681
682 /**
683 * Returns the number of registers accessed according to the \p access_mask
684 */
685 static int
686 registers_read(const uint64_t access_mask[static 32])
687 {
688 int regs_read = 0;
689
690 for (unsigned i = 0; i < 32; i++) {
691 if (access_mask[i] > 0xFFFFFFFF) {
692 return 2;
693 } else if (access_mask[i]) {
694 regs_read = 1;
695 }
696 }
697
698 return regs_read;
699 }
700
701 /**
702 * Checks restrictions listed in "Region Alignment Rules" in the "Register
703 * Region Restrictions" section.
704 */
705 static struct string
706 region_alignment_rules(const struct gen_device_info *devinfo,
707 const brw_inst *inst)
708 {
709 const struct opcode_desc *desc =
710 brw_opcode_desc(devinfo, brw_inst_opcode(devinfo, inst));
711 unsigned num_sources = num_sources_from_inst(devinfo, inst);
712 unsigned exec_size = 1 << brw_inst_exec_size(devinfo, inst);
713 uint64_t dst_access_mask[32], src0_access_mask[32], src1_access_mask[32];
714 struct string error_msg = { .str = NULL, .len = 0 };
715
716 if (num_sources == 3)
717 return (struct string){};
718
719 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16)
720 return (struct string){};
721
722 if (inst_is_send(devinfo, inst))
723 return (struct string){};
724
725 memset(dst_access_mask, 0, sizeof(dst_access_mask));
726 memset(src0_access_mask, 0, sizeof(src0_access_mask));
727 memset(src1_access_mask, 0, sizeof(src1_access_mask));
728
729 for (unsigned i = 0; i < num_sources; i++) {
730 unsigned vstride, width, hstride, element_size, subreg;
731 enum brw_reg_type type;
732
733 /* In Direct Addressing mode, a source cannot span more than 2 adjacent
734 * GRF registers.
735 */
736
737 #define DO_SRC(n) \
738 if (brw_inst_src ## n ## _address_mode(devinfo, inst) != \
739 BRW_ADDRESS_DIRECT) \
740 continue; \
741 \
742 if (brw_inst_src ## n ## _reg_file(devinfo, inst) == \
743 BRW_IMMEDIATE_VALUE) \
744 continue; \
745 \
746 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
747 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
748 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
749 type = brw_inst_src ## n ## _type(devinfo, inst); \
750 element_size = brw_reg_type_to_size(type); \
751 subreg = brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
752 align1_access_mask(src ## n ## _access_mask, \
753 exec_size, element_size, subreg, \
754 vstride, width, hstride)
755
756 if (i == 0) {
757 DO_SRC(0);
758 } else {
759 DO_SRC(1);
760 }
761 #undef DO_SRC
762
763 unsigned num_vstride = exec_size / width;
764 unsigned num_hstride = width;
765 unsigned vstride_elements = (num_vstride - 1) * vstride;
766 unsigned hstride_elements = (num_hstride - 1) * hstride;
767 unsigned offset = (vstride_elements + hstride_elements) * element_size +
768 subreg;
769 ERROR_IF(offset >= 64,
770 "A source cannot span more than 2 adjacent GRF registers");
771 }
772
773 if (desc->ndst == 0 || dst_is_null(devinfo, inst))
774 return error_msg;
775
776 unsigned stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
777 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
778 unsigned element_size = brw_reg_type_to_size(dst_type);
779 unsigned subreg = brw_inst_dst_da1_subreg_nr(devinfo, inst);
780 unsigned offset = ((exec_size - 1) * stride * element_size) + subreg;
781 ERROR_IF(offset >= 64,
782 "A destination cannot span more than 2 adjacent GRF registers");
783
784 if (error_msg.str)
785 return error_msg;
786
787 /* On IVB/BYT, region parameters and execution size for DF are in terms of
788 * 32-bit elements, so they are doubled. For evaluating the validity of an
789 * instruction, we halve them.
790 */
791 if (devinfo->gen == 7 && !devinfo->is_haswell &&
792 element_size == 8)
793 element_size = 4;
794
795 align1_access_mask(dst_access_mask, exec_size, element_size, subreg,
796 exec_size == 1 ? 0 : exec_size * stride,
797 exec_size == 1 ? 1 : exec_size,
798 exec_size == 1 ? 0 : stride);
799
800 unsigned dst_regs = registers_read(dst_access_mask);
801 unsigned src0_regs = registers_read(src0_access_mask);
802 unsigned src1_regs = registers_read(src1_access_mask);
803
804 /* The SNB, IVB, HSW, BDW, and CHV PRMs say:
805 *
806 * When an instruction has a source region spanning two registers and a
807 * destination region contained in one register, the number of elements
808 * must be the same between two sources and one of the following must be
809 * true:
810 *
811 * 1. The destination region is entirely contained in the lower OWord
812 * of a register.
813 * 2. The destination region is entirely contained in the upper OWord
814 * of a register.
815 * 3. The destination elements are evenly split between the two OWords
816 * of a register.
817 */
818 if (devinfo->gen <= 8) {
819 if (dst_regs == 1 && (src0_regs == 2 || src1_regs == 2)) {
820 unsigned upper_oword_writes = 0, lower_oword_writes = 0;
821
822 for (unsigned i = 0; i < exec_size; i++) {
823 if (dst_access_mask[i] > 0x0000FFFF) {
824 upper_oword_writes++;
825 } else {
826 assert(dst_access_mask[i] != 0);
827 lower_oword_writes++;
828 }
829 }
830
831 ERROR_IF(lower_oword_writes != 0 &&
832 upper_oword_writes != 0 &&
833 upper_oword_writes != lower_oword_writes,
834 "Writes must be to only one OWord or "
835 "evenly split between OWords");
836 }
837 }
838
839 /* The IVB and HSW PRMs say:
840 *
841 * When an instruction has a source region that spans two registers and
842 * the destination spans two registers, the destination elements must be
843 * evenly split between the two registers [...]
844 *
845 * The SNB PRM contains similar wording (but written in a much more
846 * confusing manner).
847 *
848 * The BDW PRM says:
849 *
850 * When destination spans two registers, the source may be one or two
851 * registers. The destination elements must be evenly split between the
852 * two registers.
853 *
854 * The SKL PRM says:
855 *
856 * When destination of MATH instruction spans two registers, the
857 * destination elements must be evenly split between the two registers.
858 *
859 * It is not known whether this restriction applies to KBL other Gens after
860 * SKL.
861 */
862 if (devinfo->gen <= 8 ||
863 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MATH) {
864
865 /* Nothing explicitly states that on Gen < 8 elements must be evenly
866 * split between two destination registers in the two exceptional
867 * source-region-spans-one-register cases, but since Broadwell requires
868 * evenly split writes regardless of source region, we assume that it was
869 * an oversight and require it.
870 */
871 if (dst_regs == 2) {
872 unsigned upper_reg_writes = 0, lower_reg_writes = 0;
873
874 for (unsigned i = 0; i < exec_size; i++) {
875 if (dst_access_mask[i] > 0xFFFFFFFF) {
876 upper_reg_writes++;
877 } else {
878 assert(dst_access_mask[i] != 0);
879 lower_reg_writes++;
880 }
881 }
882
883 ERROR_IF(upper_reg_writes != lower_reg_writes,
884 "Writes must be evenly split between the two "
885 "destination registers");
886 }
887 }
888
889 /* The IVB and HSW PRMs say:
890 *
891 * When an instruction has a source region that spans two registers and
892 * the destination spans two registers, the destination elements must be
893 * evenly split between the two registers and each destination register
894 * must be entirely derived from one source register.
895 *
896 * Note: In such cases, the regioning parameters must ensure that the
897 * offset from the two source registers is the same.
898 *
899 * The SNB PRM contains similar wording (but written in a much more
900 * confusing manner).
901 *
902 * There are effectively three rules stated here:
903 *
904 * For an instruction with a source and a destination spanning two
905 * registers,
906 *
907 * (1) destination elements must be evenly split between the two
908 * registers
909 * (2) all destination elements in a register must be derived
910 * from one source register
911 * (3) the offset (i.e. the starting location in each of the two
912 * registers spanned by a region) must be the same in the two
913 * registers spanned by a region
914 *
915 * It is impossible to violate rule (1) without violating (2) or (3), so we
916 * do not attempt to validate it.
917 */
918 if (devinfo->gen <= 7 && dst_regs == 2) {
919 for (unsigned i = 0; i < num_sources; i++) {
920 #define DO_SRC(n) \
921 if (src ## n ## _regs <= 1) \
922 continue; \
923 \
924 for (unsigned i = 0; i < exec_size; i++) { \
925 if ((dst_access_mask[i] > 0xFFFFFFFF) != \
926 (src ## n ## _access_mask[i] > 0xFFFFFFFF)) { \
927 ERROR("Each destination register must be entirely derived " \
928 "from one source register"); \
929 break; \
930 } \
931 } \
932 \
933 unsigned offset_0 = \
934 brw_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
935 unsigned offset_1 = offset_0; \
936 \
937 for (unsigned i = 0; i < exec_size; i++) { \
938 if (src ## n ## _access_mask[i] > 0xFFFFFFFF) { \
939 offset_1 = __builtin_ctzll(src ## n ## _access_mask[i]) - 32; \
940 break; \
941 } \
942 } \
943 \
944 ERROR_IF(num_sources == 2 && offset_0 != offset_1, \
945 "The offset from the two source registers " \
946 "must be the same")
947
948 if (i == 0) {
949 DO_SRC(0);
950 } else {
951 DO_SRC(1);
952 }
953 #undef DO_SRC
954 }
955 }
956
957 /* The IVB and HSW PRMs say:
958 *
959 * When destination spans two registers, the source MUST span two
960 * registers. The exception to the above rule:
961 * 1. When source is scalar, the source registers are not
962 * incremented.
963 * 2. When source is packed integer Word and destination is packed
964 * integer DWord, the source register is not incremented by the
965 * source sub register is incremented.
966 *
967 * The SNB PRM does not contain this rule, but the internal documentation
968 * indicates that it applies to SNB as well. We assume that the rule applies
969 * to Gen <= 5 although their PRMs do not state it.
970 *
971 * While the documentation explicitly says in exception (2) that the
972 * destination must be an integer DWord, the hardware allows at least a
973 * float destination type as well. We emit such instructions from
974 *
975 * fs_visitor::emit_interpolation_setup_gen6
976 * fs_visitor::emit_fragcoord_interpolation
977 *
978 * and have for years with no ill effects.
979 *
980 * Additionally the simulator source code indicates that the real condition
981 * is that the size of the destination type is 4 bytes.
982 */
983 if (devinfo->gen <= 7 && dst_regs == 2) {
984 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
985 bool dst_is_packed_dword =
986 is_packed(exec_size * stride, exec_size, stride) &&
987 brw_reg_type_to_size(dst_type) == 4;
988
989 for (unsigned i = 0; i < num_sources; i++) {
990 #define DO_SRC(n) \
991 unsigned vstride, width, hstride; \
992 vstride = STRIDE(brw_inst_src ## n ## _vstride(devinfo, inst)); \
993 width = WIDTH(brw_inst_src ## n ## _width(devinfo, inst)); \
994 hstride = STRIDE(brw_inst_src ## n ## _hstride(devinfo, inst)); \
995 bool src ## n ## _is_packed_word = \
996 is_packed(vstride, width, hstride) && \
997 (brw_inst_src ## n ## _type(devinfo, inst) == BRW_REGISTER_TYPE_W || \
998 brw_inst_src ## n ## _type(devinfo, inst) == BRW_REGISTER_TYPE_UW); \
999 \
1000 ERROR_IF(src ## n ## _regs == 1 && \
1001 !src ## n ## _has_scalar_region(devinfo, inst) && \
1002 !(dst_is_packed_dword && src ## n ## _is_packed_word), \
1003 "When the destination spans two registers, the source must " \
1004 "span two registers\n" ERROR_INDENT "(exceptions for scalar " \
1005 "source and packed-word to packed-dword expansion)")
1006
1007 if (i == 0) {
1008 DO_SRC(0);
1009 } else {
1010 DO_SRC(1);
1011 }
1012 #undef DO_SRC
1013 }
1014 }
1015
1016 return error_msg;
1017 }
1018
1019 static struct string
1020 vector_immediate_restrictions(const struct gen_device_info *devinfo,
1021 const brw_inst *inst)
1022 {
1023 unsigned num_sources = num_sources_from_inst(devinfo, inst);
1024 struct string error_msg = { .str = NULL, .len = 0 };
1025
1026 if (num_sources == 3 || num_sources == 0)
1027 return (struct string){};
1028
1029 unsigned file = num_sources == 1 ?
1030 brw_inst_src0_reg_file(devinfo, inst) :
1031 brw_inst_src1_reg_file(devinfo, inst);
1032 if (file != BRW_IMMEDIATE_VALUE)
1033 return (struct string){};
1034
1035 enum brw_reg_type dst_type = brw_inst_dst_type(devinfo, inst);
1036 unsigned dst_type_size = brw_reg_type_to_size(dst_type);
1037 unsigned dst_subreg = brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1 ?
1038 brw_inst_dst_da1_subreg_nr(devinfo, inst) : 0;
1039 unsigned dst_stride = STRIDE(brw_inst_dst_hstride(devinfo, inst));
1040 enum brw_reg_type type = num_sources == 1 ?
1041 brw_inst_src0_type(devinfo, inst) :
1042 brw_inst_src1_type(devinfo, inst);
1043
1044 /* The PRMs say:
1045 *
1046 * When an immediate vector is used in an instruction, the destination
1047 * must be 128-bit aligned with destination horizontal stride equivalent
1048 * to a word for an immediate integer vector (v) and equivalent to a
1049 * DWord for an immediate float vector (vf).
1050 *
1051 * The text has not been updated for the addition of the immediate unsigned
1052 * integer vector type (uv) on SNB, but presumably the same restriction
1053 * applies.
1054 */
1055 switch (type) {
1056 case BRW_REGISTER_TYPE_V:
1057 case BRW_REGISTER_TYPE_UV:
1058 case BRW_REGISTER_TYPE_VF:
1059 ERROR_IF(dst_subreg % (128 / 8) != 0,
1060 "Destination must be 128-bit aligned in order to use immediate "
1061 "vector types");
1062
1063 if (type == BRW_REGISTER_TYPE_VF) {
1064 ERROR_IF(dst_type_size * dst_stride != 4,
1065 "Destination must have stride equivalent to dword in order "
1066 "to use the VF type");
1067 } else {
1068 ERROR_IF(dst_type_size * dst_stride != 2,
1069 "Destination must have stride equivalent to word in order "
1070 "to use the V or UV type");
1071 }
1072 break;
1073 default:
1074 break;
1075 }
1076
1077 return error_msg;
1078 }
1079
1080 bool
1081 brw_validate_instructions(const struct gen_device_info *devinfo,
1082 void *assembly, int start_offset, int end_offset,
1083 struct annotation_info *annotation)
1084 {
1085 bool valid = true;
1086
1087 for (int src_offset = start_offset; src_offset < end_offset;) {
1088 struct string error_msg = { .str = NULL, .len = 0 };
1089 const brw_inst *inst = assembly + src_offset;
1090 bool is_compact = brw_inst_cmpt_control(devinfo, inst);
1091 brw_inst uncompacted;
1092
1093 if (is_compact) {
1094 brw_compact_inst *compacted = (void *)inst;
1095 brw_uncompact_instruction(devinfo, &uncompacted, compacted);
1096 inst = &uncompacted;
1097 }
1098
1099 if (is_unsupported_inst(devinfo, inst)) {
1100 ERROR("Instruction not supported on this Gen");
1101 } else {
1102 CHECK(sources_not_null);
1103 CHECK(send_restrictions);
1104 CHECK(general_restrictions_based_on_operand_types);
1105 CHECK(general_restrictions_on_region_parameters);
1106 CHECK(region_alignment_rules);
1107 CHECK(vector_immediate_restrictions);
1108 }
1109
1110 if (error_msg.str && annotation) {
1111 annotation_insert_error(annotation, src_offset, error_msg.str);
1112 }
1113 valid = valid && error_msg.len == 0;
1114 free(error_msg.str);
1115
1116 if (is_compact) {
1117 src_offset += sizeof(brw_compact_inst);
1118 } else {
1119 src_offset += sizeof(brw_inst);
1120 }
1121 }
1122
1123 return valid;
1124 }