intel/compiler: Move Gen4/5 rounding to visitor
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 assert(devinfo->gen < 12);
59 brw_push_insn_state(p);
60 brw_set_default_exec_size(p, BRW_EXECUTE_8);
61 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
62 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
63 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
64 retype(*src, BRW_REGISTER_TYPE_UD));
65 brw_pop_insn_state(p);
66 }
67 *src = brw_message_reg(msg_reg_nr);
68 }
69
70 static void
71 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
72 {
73 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
74 * "The send with EOT should use register space R112-R127 for <src>. This is
75 * to enable loading of a new thread into the same slot while the message
76 * with EOT for current thread is pending dispatch."
77 *
78 * Since we're pretending to have 16 MRFs anyway, we may as well use the
79 * registers required for messages with EOT.
80 */
81 const struct gen_device_info *devinfo = p->devinfo;
82 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
83 reg->file = BRW_GENERAL_REGISTER_FILE;
84 reg->nr += GEN7_MRF_HACK_START;
85 }
86 }
87
88 void
89 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
90 {
91 const struct gen_device_info *devinfo = p->devinfo;
92
93 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
94 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
95 else if (dest.file == BRW_GENERAL_REGISTER_FILE)
96 assert(dest.nr < 128);
97
98 /* The hardware has a restriction where a destination of size Byte with
99 * a stride of 1 is only allowed for a packed byte MOV. For any other
100 * instruction, the stride must be at least 2, even when the destination
101 * is the NULL register.
102 */
103 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
104 dest.nr == BRW_ARF_NULL &&
105 type_sz(dest.type) == 1 &&
106 dest.hstride == BRW_HORIZONTAL_STRIDE_1) {
107 dest.hstride = BRW_HORIZONTAL_STRIDE_2;
108 }
109
110 gen7_convert_mrf_to_grf(p, &dest);
111
112 if (devinfo->gen >= 12 &&
113 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
114 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
115 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
116 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
117 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
118 assert(dest.subnr == 0);
119 assert(brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1 ||
120 (dest.hstride == BRW_HORIZONTAL_STRIDE_1 &&
121 dest.vstride == dest.width + 1));
122 assert(!dest.negate && !dest.abs);
123 brw_inst_set_dst_reg_file(devinfo, inst, dest.file);
124 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
125
126 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
127 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC) {
128 assert(devinfo->gen < 12);
129 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
130 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
131 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
132 assert(dest.subnr % 16 == 0);
133 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1 &&
134 dest.vstride == dest.width + 1);
135 assert(!dest.negate && !dest.abs);
136 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
137 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
138 brw_inst_set_send_dst_reg_file(devinfo, inst, dest.file);
139 } else {
140 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
141 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
142
143 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
144 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
145
146 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
147 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
148 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
149 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
150 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
151 } else {
152 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
153 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
154 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
155 dest.file == BRW_MESSAGE_REGISTER_FILE) {
156 assert(dest.writemask != 0);
157 }
158 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
159 * Although Dst.HorzStride is a don't care for Align16, HW needs
160 * this to be programmed as "01".
161 */
162 brw_inst_set_dst_hstride(devinfo, inst, 1);
163 }
164 } else {
165 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
166
167 /* These are different sizes in align1 vs align16:
168 */
169 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
170 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
171 dest.indirect_offset);
172 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
173 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
174 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
175 } else {
176 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
177 dest.indirect_offset);
178 /* even ignored in da16, still need to set as '01' */
179 brw_inst_set_dst_hstride(devinfo, inst, 1);
180 }
181 }
182 }
183
184 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
185 * or 16 (SIMD16), as that's normally correct. However, when dealing with
186 * small registers, it can be useful for us to automatically reduce it to
187 * match the register size.
188 */
189 if (p->automatic_exec_sizes) {
190 /*
191 * In platforms that support fp64 we can emit instructions with a width
192 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
193 * these cases we need to make sure that these instructions have their
194 * exec sizes set properly when they are emitted and we can't rely on
195 * this code to fix it.
196 */
197 bool fix_exec_size;
198 if (devinfo->gen >= 6)
199 fix_exec_size = dest.width < BRW_EXECUTE_4;
200 else
201 fix_exec_size = dest.width < BRW_EXECUTE_8;
202
203 if (fix_exec_size)
204 brw_inst_set_exec_size(devinfo, inst, dest.width);
205 }
206 }
207
208 void
209 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
210 {
211 const struct gen_device_info *devinfo = p->devinfo;
212
213 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
214 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
215 else if (reg.file == BRW_GENERAL_REGISTER_FILE)
216 assert(reg.nr < 128);
217
218 gen7_convert_mrf_to_grf(p, &reg);
219
220 if (devinfo->gen >= 6 &&
221 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
222 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC ||
223 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
224 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC)) {
225 /* Any source modifiers or regions will be ignored, since this just
226 * identifies the MRF/GRF to start reading the message contents from.
227 * Check for some likely failures.
228 */
229 assert(!reg.negate);
230 assert(!reg.abs);
231 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
232 }
233
234 if (devinfo->gen >= 12 &&
235 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
236 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
237 assert(reg.file != BRW_IMMEDIATE_VALUE);
238 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
239 assert(reg.subnr == 0);
240 assert(brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1 ||
241 (reg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
242 reg.vstride == reg.width + 1));
243 assert(!reg.negate && !reg.abs);
244 brw_inst_set_send_src0_reg_file(devinfo, inst, reg.file);
245 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
246
247 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
248 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC) {
249 assert(reg.file == BRW_GENERAL_REGISTER_FILE);
250 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
251 assert(reg.subnr % 16 == 0);
252 assert(reg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
253 reg.vstride == reg.width + 1);
254 assert(!reg.negate && !reg.abs);
255 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
256 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
257 } else {
258 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
259 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
260 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
261 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
262
263 if (reg.file == BRW_IMMEDIATE_VALUE) {
264 if (reg.type == BRW_REGISTER_TYPE_DF ||
265 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
266 brw_inst_set_imm_df(devinfo, inst, reg.df);
267 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
268 reg.type == BRW_REGISTER_TYPE_Q)
269 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
270 else
271 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
272
273 if (devinfo->gen < 12 && type_sz(reg.type) < 8) {
274 brw_inst_set_src1_reg_file(devinfo, inst,
275 BRW_ARCHITECTURE_REGISTER_FILE);
276 brw_inst_set_src1_reg_hw_type(devinfo, inst,
277 brw_inst_src0_reg_hw_type(devinfo, inst));
278 }
279 } else {
280 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
281 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
282 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
283 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
284 } else {
285 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
286 }
287 } else {
288 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
289
290 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
291 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
292 } else {
293 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
294 }
295 }
296
297 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
298 if (reg.width == BRW_WIDTH_1 &&
299 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
300 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
301 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
302 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
303 } else {
304 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
305 brw_inst_set_src0_width(devinfo, inst, reg.width);
306 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
307 }
308 } else {
309 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
310 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
311 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
312 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
313 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
314 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
315 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
316 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
317
318 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
319 /* This is an oddity of the fact we're using the same
320 * descriptions for registers in align_16 as align_1:
321 */
322 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
323 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
324 reg.type == BRW_REGISTER_TYPE_DF &&
325 reg.vstride == BRW_VERTICAL_STRIDE_2) {
326 /* From SNB PRM:
327 *
328 * "For Align16 access mode, only encodings of 0000 and 0011
329 * are allowed. Other codes are reserved."
330 *
331 * Presumably the DevSNB behavior applies to IVB as well.
332 */
333 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
334 } else {
335 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
336 }
337 }
338 }
339 }
340 }
341
342
343 void
344 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
345 {
346 const struct gen_device_info *devinfo = p->devinfo;
347
348 if (reg.file == BRW_GENERAL_REGISTER_FILE)
349 assert(reg.nr < 128);
350
351 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
352 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC ||
353 (devinfo->gen >= 12 &&
354 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
355 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC))) {
356 assert(reg.file == BRW_GENERAL_REGISTER_FILE ||
357 reg.file == BRW_ARCHITECTURE_REGISTER_FILE);
358 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
359 assert(reg.subnr == 0);
360 assert(brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1 ||
361 (reg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
362 reg.vstride == reg.width + 1));
363 assert(!reg.negate && !reg.abs);
364 brw_inst_set_send_src1_reg_nr(devinfo, inst, reg.nr);
365 brw_inst_set_send_src1_reg_file(devinfo, inst, reg.file);
366 } else {
367 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
368 *
369 * "Accumulator registers may be accessed explicitly as src0
370 * operands only."
371 */
372 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
373 reg.nr != BRW_ARF_ACCUMULATOR);
374
375 gen7_convert_mrf_to_grf(p, &reg);
376 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
377
378 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
379 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
380 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
381
382 /* Only src1 can be immediate in two-argument instructions.
383 */
384 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
385
386 if (reg.file == BRW_IMMEDIATE_VALUE) {
387 /* two-argument instructions can only use 32-bit immediates */
388 assert(type_sz(reg.type) < 8);
389 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
390 } else {
391 /* This is a hardware restriction, which may or may not be lifted
392 * in the future:
393 */
394 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
395 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
396
397 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
398 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
399 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
400 } else {
401 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
402 }
403
404 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
405 if (reg.width == BRW_WIDTH_1 &&
406 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
407 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
408 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
409 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
410 } else {
411 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
412 brw_inst_set_src1_width(devinfo, inst, reg.width);
413 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
414 }
415 } else {
416 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
417 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
418 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
419 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
420 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
421 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
422 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
423 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
424
425 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
426 /* This is an oddity of the fact we're using the same
427 * descriptions for registers in align_16 as align_1:
428 */
429 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
430 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
431 reg.type == BRW_REGISTER_TYPE_DF &&
432 reg.vstride == BRW_VERTICAL_STRIDE_2) {
433 /* From SNB PRM:
434 *
435 * "For Align16 access mode, only encodings of 0000 and 0011
436 * are allowed. Other codes are reserved."
437 *
438 * Presumably the DevSNB behavior applies to IVB as well.
439 */
440 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
441 } else {
442 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
443 }
444 }
445 }
446 }
447 }
448
449 /**
450 * Specify the descriptor and extended descriptor immediate for a SEND(C)
451 * message instruction.
452 */
453 void
454 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
455 unsigned desc, unsigned ex_desc)
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458 assert(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
459 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC);
460 if (devinfo->gen < 12)
461 brw_inst_set_src1_file_type(devinfo, inst,
462 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_UD);
463 brw_inst_set_send_desc(devinfo, inst, desc);
464 if (devinfo->gen >= 9)
465 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
466 }
467
468 static void brw_set_math_message( struct brw_codegen *p,
469 brw_inst *inst,
470 unsigned function,
471 unsigned integer_type,
472 bool low_precision,
473 unsigned dataType )
474 {
475 const struct gen_device_info *devinfo = p->devinfo;
476 unsigned msg_length;
477 unsigned response_length;
478
479 /* Infer message length from the function */
480 switch (function) {
481 case BRW_MATH_FUNCTION_POW:
482 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
483 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
484 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
485 msg_length = 2;
486 break;
487 default:
488 msg_length = 1;
489 break;
490 }
491
492 /* Infer response length from the function */
493 switch (function) {
494 case BRW_MATH_FUNCTION_SINCOS:
495 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
496 response_length = 2;
497 break;
498 default:
499 response_length = 1;
500 break;
501 }
502
503 brw_set_desc(p, inst, brw_message_desc(
504 devinfo, msg_length, response_length, false));
505
506 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
507 brw_inst_set_math_msg_function(devinfo, inst, function);
508 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
509 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
510 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
511 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
512 brw_inst_set_saturate(devinfo, inst, 0);
513 }
514
515
516 static void brw_set_ff_sync_message(struct brw_codegen *p,
517 brw_inst *insn,
518 bool allocate,
519 unsigned response_length,
520 bool end_of_thread)
521 {
522 const struct gen_device_info *devinfo = p->devinfo;
523
524 brw_set_desc(p, insn, brw_message_desc(
525 devinfo, 1, response_length, true));
526
527 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
528 brw_inst_set_eot(devinfo, insn, end_of_thread);
529 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
530 brw_inst_set_urb_allocate(devinfo, insn, allocate);
531 /* The following fields are not used by FF_SYNC: */
532 brw_inst_set_urb_global_offset(devinfo, insn, 0);
533 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
534 brw_inst_set_urb_used(devinfo, insn, 0);
535 brw_inst_set_urb_complete(devinfo, insn, 0);
536 }
537
538 static void brw_set_urb_message( struct brw_codegen *p,
539 brw_inst *insn,
540 enum brw_urb_write_flags flags,
541 unsigned msg_length,
542 unsigned response_length,
543 unsigned offset,
544 unsigned swizzle_control )
545 {
546 const struct gen_device_info *devinfo = p->devinfo;
547
548 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
549 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
550 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
551
552 brw_set_desc(p, insn, brw_message_desc(
553 devinfo, msg_length, response_length, true));
554
555 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
556 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
557
558 if (flags & BRW_URB_WRITE_OWORD) {
559 assert(msg_length == 2); /* header + one OWORD of data */
560 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
561 } else {
562 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
563 }
564
565 brw_inst_set_urb_global_offset(devinfo, insn, offset);
566 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
567
568 if (devinfo->gen < 8) {
569 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
570 }
571
572 if (devinfo->gen < 7) {
573 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
574 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
575 } else {
576 brw_inst_set_urb_per_slot_offset(devinfo, insn,
577 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
578 }
579 }
580
581 static void
582 gen7_set_dp_scratch_message(struct brw_codegen *p,
583 brw_inst *inst,
584 bool write,
585 bool dword,
586 bool invalidate_after_read,
587 unsigned num_regs,
588 unsigned addr_offset,
589 unsigned mlen,
590 unsigned rlen,
591 bool header_present)
592 {
593 const struct gen_device_info *devinfo = p->devinfo;
594 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
595 (devinfo->gen >= 8 && num_regs == 8));
596 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
597 num_regs - 1);
598
599 brw_set_desc(p, inst, brw_message_desc(
600 devinfo, mlen, rlen, header_present));
601
602 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
603 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
604 brw_inst_set_scratch_read_write(devinfo, inst, write);
605 brw_inst_set_scratch_type(devinfo, inst, dword);
606 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
607 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
608 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
609 }
610
611 static void
612 brw_inst_set_state(const struct gen_device_info *devinfo,
613 brw_inst *insn,
614 const struct brw_insn_state *state)
615 {
616 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
617 brw_inst_set_group(devinfo, insn, state->group);
618 brw_inst_set_compression(devinfo, insn, state->compressed);
619 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
620 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
621 if (devinfo->gen >= 12)
622 brw_inst_set_swsb(devinfo, insn, tgl_swsb_encode(state->swsb));
623 brw_inst_set_saturate(devinfo, insn, state->saturate);
624 brw_inst_set_pred_control(devinfo, insn, state->predicate);
625 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
626
627 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
628 state->access_mode == BRW_ALIGN_16) {
629 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
630 if (devinfo->gen >= 7)
631 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
632 } else {
633 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
634 if (devinfo->gen >= 7)
635 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
636 }
637
638 if (devinfo->gen >= 6)
639 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
640 }
641
642 #define next_insn brw_next_insn
643 brw_inst *
644 brw_next_insn(struct brw_codegen *p, unsigned opcode)
645 {
646 const struct gen_device_info *devinfo = p->devinfo;
647 brw_inst *insn;
648
649 if (p->nr_insn + 1 > p->store_size) {
650 p->store_size <<= 1;
651 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
652 }
653
654 p->next_insn_offset += 16;
655 insn = &p->store[p->nr_insn++];
656
657 memset(insn, 0, sizeof(*insn));
658 brw_inst_set_opcode(devinfo, insn, opcode);
659
660 /* Apply the default instruction state */
661 brw_inst_set_state(devinfo, insn, p->current);
662
663 return insn;
664 }
665
666 static brw_inst *
667 brw_alu1(struct brw_codegen *p, unsigned opcode,
668 struct brw_reg dest, struct brw_reg src)
669 {
670 brw_inst *insn = next_insn(p, opcode);
671 brw_set_dest(p, insn, dest);
672 brw_set_src0(p, insn, src);
673 return insn;
674 }
675
676 static brw_inst *
677 brw_alu2(struct brw_codegen *p, unsigned opcode,
678 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
679 {
680 /* 64-bit immediates are only supported on 1-src instructions */
681 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
682 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
683
684 brw_inst *insn = next_insn(p, opcode);
685 brw_set_dest(p, insn, dest);
686 brw_set_src0(p, insn, src0);
687 brw_set_src1(p, insn, src1);
688 return insn;
689 }
690
691 static int
692 get_3src_subreg_nr(struct brw_reg reg)
693 {
694 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
695 * use 32-bit units (components 0..7). Since they only support F/D/UD
696 * types, this doesn't lose any flexibility, but uses fewer bits.
697 */
698 return reg.subnr / 4;
699 }
700
701 static enum gen10_align1_3src_vertical_stride
702 to_3src_align1_vstride(const struct gen_device_info *devinfo,
703 enum brw_vertical_stride vstride)
704 {
705 switch (vstride) {
706 case BRW_VERTICAL_STRIDE_0:
707 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
708 case BRW_VERTICAL_STRIDE_1:
709 assert(devinfo->gen >= 12);
710 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_1;
711 case BRW_VERTICAL_STRIDE_2:
712 assert(devinfo->gen < 12);
713 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
714 case BRW_VERTICAL_STRIDE_4:
715 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
716 case BRW_VERTICAL_STRIDE_8:
717 case BRW_VERTICAL_STRIDE_16:
718 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
719 default:
720 unreachable("invalid vstride");
721 }
722 }
723
724
725 static enum gen10_align1_3src_src_horizontal_stride
726 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
727 {
728 switch (hstride) {
729 case BRW_HORIZONTAL_STRIDE_0:
730 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
731 case BRW_HORIZONTAL_STRIDE_1:
732 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
733 case BRW_HORIZONTAL_STRIDE_2:
734 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
735 case BRW_HORIZONTAL_STRIDE_4:
736 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
737 default:
738 unreachable("invalid hstride");
739 }
740 }
741
742 static brw_inst *
743 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
744 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
745 {
746 const struct gen_device_info *devinfo = p->devinfo;
747 brw_inst *inst = next_insn(p, opcode);
748
749 gen7_convert_mrf_to_grf(p, &dest);
750
751 assert(dest.nr < 128);
752
753 if (devinfo->gen >= 10)
754 assert(!(src0.file == BRW_IMMEDIATE_VALUE &&
755 src2.file == BRW_IMMEDIATE_VALUE));
756
757 assert(src0.file == BRW_IMMEDIATE_VALUE || src0.nr < 128);
758 assert(src1.file != BRW_IMMEDIATE_VALUE && src1.nr < 128);
759 assert(src2.file == BRW_IMMEDIATE_VALUE || src2.nr < 128);
760 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
761 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
762 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
763 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
764
765 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
766 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
767 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
768
769 if (devinfo->gen >= 12) {
770 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst, dest.file);
771 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
772 } else {
773 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
774 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
775 BRW_ALIGN1_3SRC_ACCUMULATOR);
776 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
777 } else {
778 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
779 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
780 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
781 }
782 }
783 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
784
785 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
786
787 if (brw_reg_type_is_floating_point(dest.type)) {
788 brw_inst_set_3src_a1_exec_type(devinfo, inst,
789 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
790 } else {
791 brw_inst_set_3src_a1_exec_type(devinfo, inst,
792 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
793 }
794
795 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
796 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
797 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
798 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
799
800 if (src0.file == BRW_IMMEDIATE_VALUE) {
801 brw_inst_set_3src_a1_src0_imm(devinfo, inst, src0.ud);
802 } else {
803 brw_inst_set_3src_a1_src0_vstride(
804 devinfo, inst, to_3src_align1_vstride(devinfo, src0.vstride));
805 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
806 to_3src_align1_hstride(src0.hstride));
807 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
808 if (src0.type == BRW_REGISTER_TYPE_NF) {
809 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
810 } else {
811 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
812 }
813 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
814 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
815 }
816 brw_inst_set_3src_a1_src1_vstride(
817 devinfo, inst, to_3src_align1_vstride(devinfo, src1.vstride));
818 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
819 to_3src_align1_hstride(src1.hstride));
820
821 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
822 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
823 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
824 } else {
825 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
826 }
827 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
828 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
829
830 if (src2.file == BRW_IMMEDIATE_VALUE) {
831 brw_inst_set_3src_a1_src2_imm(devinfo, inst, src2.ud);
832 } else {
833 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
834 to_3src_align1_hstride(src2.hstride));
835 /* no vstride on src2 */
836 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
837 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
838 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
839 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
840 }
841
842 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
843 src0.file == BRW_IMMEDIATE_VALUE ||
844 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
845 src0.type == BRW_REGISTER_TYPE_NF));
846 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
847 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
848 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
849 src2.file == BRW_IMMEDIATE_VALUE);
850
851 if (devinfo->gen >= 12) {
852 if (src0.file == BRW_IMMEDIATE_VALUE) {
853 brw_inst_set_3src_a1_src0_is_imm(devinfo, inst, 1);
854 } else {
855 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst, src0.file);
856 }
857
858 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst, src1.file);
859
860 if (src2.file == BRW_IMMEDIATE_VALUE) {
861 brw_inst_set_3src_a1_src2_is_imm(devinfo, inst, 1);
862 } else {
863 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst, src2.file);
864 }
865 } else {
866 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
867 src0.file == BRW_GENERAL_REGISTER_FILE ?
868 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
869 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
870 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
871 src1.file == BRW_GENERAL_REGISTER_FILE ?
872 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
873 BRW_ALIGN1_3SRC_ACCUMULATOR);
874 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
875 src2.file == BRW_GENERAL_REGISTER_FILE ?
876 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
877 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
878 }
879
880 } else {
881 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
882 dest.file == BRW_MESSAGE_REGISTER_FILE);
883 assert(dest.type == BRW_REGISTER_TYPE_F ||
884 dest.type == BRW_REGISTER_TYPE_DF ||
885 dest.type == BRW_REGISTER_TYPE_D ||
886 dest.type == BRW_REGISTER_TYPE_UD ||
887 (dest.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 8));
888 if (devinfo->gen == 6) {
889 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
890 dest.file == BRW_MESSAGE_REGISTER_FILE);
891 }
892 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
893 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
894 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
895
896 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
897 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
898 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
899 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
900 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
901 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
902 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
903 src0.vstride == BRW_VERTICAL_STRIDE_0);
904
905 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
906 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
907 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
908 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
909 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
910 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
911 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
912 src1.vstride == BRW_VERTICAL_STRIDE_0);
913
914 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
915 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
916 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
917 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
918 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
919 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
920 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
921 src2.vstride == BRW_VERTICAL_STRIDE_0);
922
923 if (devinfo->gen >= 7) {
924 /* Set both the source and destination types based on dest.type,
925 * ignoring the source register types. The MAD and LRP emitters ensure
926 * that all four types are float. The BFE and BFI2 emitters, however,
927 * may send us mixed D and UD types and want us to ignore that and use
928 * the destination type.
929 */
930 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
931 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
932
933 /* From the Bspec, 3D Media GPGPU, Instruction fields, srcType:
934 *
935 * "Three source instructions can use operands with mixed-mode
936 * precision. When SrcType field is set to :f or :hf it defines
937 * precision for source 0 only, and fields Src1Type and Src2Type
938 * define precision for other source operands:
939 *
940 * 0b = :f. Single precision Float (32-bit).
941 * 1b = :hf. Half precision Float (16-bit)."
942 */
943 if (src1.type == BRW_REGISTER_TYPE_HF)
944 brw_inst_set_3src_a16_src1_type(devinfo, inst, 1);
945
946 if (src2.type == BRW_REGISTER_TYPE_HF)
947 brw_inst_set_3src_a16_src2_type(devinfo, inst, 1);
948 }
949 }
950
951 return inst;
952 }
953
954
955 /***********************************************************************
956 * Convenience routines.
957 */
958 #define ALU1(OP) \
959 brw_inst *brw_##OP(struct brw_codegen *p, \
960 struct brw_reg dest, \
961 struct brw_reg src0) \
962 { \
963 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
964 }
965
966 #define ALU2(OP) \
967 brw_inst *brw_##OP(struct brw_codegen *p, \
968 struct brw_reg dest, \
969 struct brw_reg src0, \
970 struct brw_reg src1) \
971 { \
972 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
973 }
974
975 #define ALU3(OP) \
976 brw_inst *brw_##OP(struct brw_codegen *p, \
977 struct brw_reg dest, \
978 struct brw_reg src0, \
979 struct brw_reg src1, \
980 struct brw_reg src2) \
981 { \
982 if (p->current->access_mode == BRW_ALIGN_16) { \
983 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
984 src0.swizzle = BRW_SWIZZLE_XXXX; \
985 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
986 src1.swizzle = BRW_SWIZZLE_XXXX; \
987 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
988 src2.swizzle = BRW_SWIZZLE_XXXX; \
989 } \
990 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
991 }
992
993 #define ALU3F(OP) \
994 brw_inst *brw_##OP(struct brw_codegen *p, \
995 struct brw_reg dest, \
996 struct brw_reg src0, \
997 struct brw_reg src1, \
998 struct brw_reg src2) \
999 { \
1000 assert(dest.type == BRW_REGISTER_TYPE_F || \
1001 dest.type == BRW_REGISTER_TYPE_DF); \
1002 if (dest.type == BRW_REGISTER_TYPE_F) { \
1003 assert(src0.type == BRW_REGISTER_TYPE_F); \
1004 assert(src1.type == BRW_REGISTER_TYPE_F); \
1005 assert(src2.type == BRW_REGISTER_TYPE_F); \
1006 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
1007 assert(src0.type == BRW_REGISTER_TYPE_DF); \
1008 assert(src1.type == BRW_REGISTER_TYPE_DF); \
1009 assert(src2.type == BRW_REGISTER_TYPE_DF); \
1010 } \
1011 \
1012 if (p->current->access_mode == BRW_ALIGN_16) { \
1013 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
1014 src0.swizzle = BRW_SWIZZLE_XXXX; \
1015 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
1016 src1.swizzle = BRW_SWIZZLE_XXXX; \
1017 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
1018 src2.swizzle = BRW_SWIZZLE_XXXX; \
1019 } \
1020 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1021 }
1022
1023 ALU2(SEL)
1024 ALU1(NOT)
1025 ALU2(AND)
1026 ALU2(OR)
1027 ALU2(XOR)
1028 ALU2(SHR)
1029 ALU2(SHL)
1030 ALU1(DIM)
1031 ALU2(ASR)
1032 ALU2(ROL)
1033 ALU2(ROR)
1034 ALU3(CSEL)
1035 ALU1(FRC)
1036 ALU1(RNDD)
1037 ALU1(RNDE)
1038 ALU1(RNDZ)
1039 ALU2(MAC)
1040 ALU2(MACH)
1041 ALU1(LZD)
1042 ALU2(DP4)
1043 ALU2(DPH)
1044 ALU2(DP3)
1045 ALU2(DP2)
1046 ALU3(MAD)
1047 ALU3F(LRP)
1048 ALU1(BFREV)
1049 ALU3(BFE)
1050 ALU2(BFI1)
1051 ALU3(BFI2)
1052 ALU1(FBH)
1053 ALU1(FBL)
1054 ALU1(CBIT)
1055 ALU2(ADDC)
1056 ALU2(SUBB)
1057
1058 brw_inst *
1059 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
1060 {
1061 const struct gen_device_info *devinfo = p->devinfo;
1062
1063 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
1064 * To avoid the problems that causes, we use an <X,2,0> source region to
1065 * read each element twice.
1066 */
1067 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1068 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
1069 dest.type == BRW_REGISTER_TYPE_DF &&
1070 (src0.type == BRW_REGISTER_TYPE_F ||
1071 src0.type == BRW_REGISTER_TYPE_D ||
1072 src0.type == BRW_REGISTER_TYPE_UD) &&
1073 !has_scalar_region(src0)) {
1074 assert(src0.vstride == src0.width + src0.hstride);
1075 src0.vstride = src0.hstride;
1076 src0.width = BRW_WIDTH_2;
1077 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1078 }
1079
1080 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1081 }
1082
1083 brw_inst *
1084 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1085 struct brw_reg src0, struct brw_reg src1)
1086 {
1087 /* 6.2.2: add */
1088 if (src0.type == BRW_REGISTER_TYPE_F ||
1089 (src0.file == BRW_IMMEDIATE_VALUE &&
1090 src0.type == BRW_REGISTER_TYPE_VF)) {
1091 assert(src1.type != BRW_REGISTER_TYPE_UD);
1092 assert(src1.type != BRW_REGISTER_TYPE_D);
1093 }
1094
1095 if (src1.type == BRW_REGISTER_TYPE_F ||
1096 (src1.file == BRW_IMMEDIATE_VALUE &&
1097 src1.type == BRW_REGISTER_TYPE_VF)) {
1098 assert(src0.type != BRW_REGISTER_TYPE_UD);
1099 assert(src0.type != BRW_REGISTER_TYPE_D);
1100 }
1101
1102 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1103 }
1104
1105 brw_inst *
1106 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1107 struct brw_reg src0, struct brw_reg src1)
1108 {
1109 assert(dest.type == src0.type);
1110 assert(src0.type == src1.type);
1111 switch (src0.type) {
1112 case BRW_REGISTER_TYPE_B:
1113 case BRW_REGISTER_TYPE_UB:
1114 case BRW_REGISTER_TYPE_W:
1115 case BRW_REGISTER_TYPE_UW:
1116 case BRW_REGISTER_TYPE_D:
1117 case BRW_REGISTER_TYPE_UD:
1118 break;
1119 default:
1120 unreachable("Bad type for brw_AVG");
1121 }
1122
1123 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1124 }
1125
1126 brw_inst *
1127 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1128 struct brw_reg src0, struct brw_reg src1)
1129 {
1130 /* 6.32.38: mul */
1131 if (src0.type == BRW_REGISTER_TYPE_D ||
1132 src0.type == BRW_REGISTER_TYPE_UD ||
1133 src1.type == BRW_REGISTER_TYPE_D ||
1134 src1.type == BRW_REGISTER_TYPE_UD) {
1135 assert(dest.type != BRW_REGISTER_TYPE_F);
1136 }
1137
1138 if (src0.type == BRW_REGISTER_TYPE_F ||
1139 (src0.file == BRW_IMMEDIATE_VALUE &&
1140 src0.type == BRW_REGISTER_TYPE_VF)) {
1141 assert(src1.type != BRW_REGISTER_TYPE_UD);
1142 assert(src1.type != BRW_REGISTER_TYPE_D);
1143 }
1144
1145 if (src1.type == BRW_REGISTER_TYPE_F ||
1146 (src1.file == BRW_IMMEDIATE_VALUE &&
1147 src1.type == BRW_REGISTER_TYPE_VF)) {
1148 assert(src0.type != BRW_REGISTER_TYPE_UD);
1149 assert(src0.type != BRW_REGISTER_TYPE_D);
1150 }
1151
1152 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1153 src0.nr != BRW_ARF_ACCUMULATOR);
1154 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1155 src1.nr != BRW_ARF_ACCUMULATOR);
1156
1157 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1158 }
1159
1160 brw_inst *
1161 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1162 struct brw_reg src0, struct brw_reg src1)
1163 {
1164 src0.vstride = BRW_VERTICAL_STRIDE_0;
1165 src0.width = BRW_WIDTH_1;
1166 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1167 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1168 }
1169
1170 brw_inst *
1171 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1172 struct brw_reg src0, struct brw_reg src1)
1173 {
1174 src0.vstride = BRW_VERTICAL_STRIDE_0;
1175 src0.width = BRW_WIDTH_1;
1176 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1177 src1.vstride = BRW_VERTICAL_STRIDE_8;
1178 src1.width = BRW_WIDTH_8;
1179 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1180 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1181 }
1182
1183 brw_inst *
1184 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1185 {
1186 const struct gen_device_info *devinfo = p->devinfo;
1187 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1188 /* The F32TO16 instruction doesn't support 32-bit destination types in
1189 * Align1 mode, and neither does the Gen8 implementation in terms of a
1190 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1191 * an undocumented feature.
1192 */
1193 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1194 (!align16 || devinfo->gen >= 8));
1195 brw_inst *inst;
1196
1197 if (align16) {
1198 assert(dst.type == BRW_REGISTER_TYPE_UD);
1199 } else {
1200 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1201 dst.type == BRW_REGISTER_TYPE_W ||
1202 dst.type == BRW_REGISTER_TYPE_UW ||
1203 dst.type == BRW_REGISTER_TYPE_HF);
1204 }
1205
1206 brw_push_insn_state(p);
1207
1208 if (needs_zero_fill) {
1209 brw_set_default_access_mode(p, BRW_ALIGN_1);
1210 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1211 }
1212
1213 if (devinfo->gen >= 8) {
1214 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1215 } else {
1216 assert(devinfo->gen == 7);
1217 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1218 }
1219
1220 if (needs_zero_fill) {
1221 if (devinfo->gen < 12)
1222 brw_inst_set_no_dd_clear(devinfo, inst, true);
1223 brw_set_default_swsb(p, tgl_swsb_null());
1224 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1225 if (devinfo->gen < 12)
1226 brw_inst_set_no_dd_check(devinfo, inst, true);
1227 }
1228
1229 brw_pop_insn_state(p);
1230 return inst;
1231 }
1232
1233 brw_inst *
1234 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1235 {
1236 const struct gen_device_info *devinfo = p->devinfo;
1237 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1238
1239 if (align16) {
1240 assert(src.type == BRW_REGISTER_TYPE_UD);
1241 } else {
1242 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1243 *
1244 * Because this instruction does not have a 16-bit floating-point
1245 * type, the source data type must be Word (W). The destination type
1246 * must be F (Float).
1247 */
1248 if (src.type == BRW_REGISTER_TYPE_UD)
1249 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1250
1251 assert(src.type == BRW_REGISTER_TYPE_W ||
1252 src.type == BRW_REGISTER_TYPE_UW ||
1253 src.type == BRW_REGISTER_TYPE_HF);
1254 }
1255
1256 if (devinfo->gen >= 8) {
1257 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1258 } else {
1259 assert(devinfo->gen == 7);
1260 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1261 }
1262 }
1263
1264
1265 void brw_NOP(struct brw_codegen *p)
1266 {
1267 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1268 memset(insn, 0, sizeof(*insn));
1269 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1270 }
1271
1272 void brw_SYNC(struct brw_codegen *p, enum tgl_sync_function func)
1273 {
1274 brw_inst *insn = next_insn(p, BRW_OPCODE_SYNC);
1275 brw_inst_set_cond_modifier(p->devinfo, insn, func);
1276 }
1277
1278 /***********************************************************************
1279 * Comparisons, if/else/endif
1280 */
1281
1282 brw_inst *
1283 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1284 unsigned predicate_control)
1285 {
1286 const struct gen_device_info *devinfo = p->devinfo;
1287 struct brw_reg ip = brw_ip_reg();
1288 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1289
1290 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1291 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1292 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1293 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1294
1295 return inst;
1296 }
1297
1298 static void
1299 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1300 {
1301 p->if_stack[p->if_stack_depth] = inst - p->store;
1302
1303 p->if_stack_depth++;
1304 if (p->if_stack_array_size <= p->if_stack_depth) {
1305 p->if_stack_array_size *= 2;
1306 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1307 p->if_stack_array_size);
1308 }
1309 }
1310
1311 static brw_inst *
1312 pop_if_stack(struct brw_codegen *p)
1313 {
1314 p->if_stack_depth--;
1315 return &p->store[p->if_stack[p->if_stack_depth]];
1316 }
1317
1318 static void
1319 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1320 {
1321 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1322 p->loop_stack_array_size *= 2;
1323 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1324 p->loop_stack_array_size);
1325 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1326 p->loop_stack_array_size);
1327 }
1328
1329 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1330 p->loop_stack_depth++;
1331 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1332 }
1333
1334 static brw_inst *
1335 get_inner_do_insn(struct brw_codegen *p)
1336 {
1337 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1338 }
1339
1340 /* EU takes the value from the flag register and pushes it onto some
1341 * sort of a stack (presumably merging with any flag value already on
1342 * the stack). Within an if block, the flags at the top of the stack
1343 * control execution on each channel of the unit, eg. on each of the
1344 * 16 pixel values in our wm programs.
1345 *
1346 * When the matching 'else' instruction is reached (presumably by
1347 * countdown of the instruction count patched in by our ELSE/ENDIF
1348 * functions), the relevant flags are inverted.
1349 *
1350 * When the matching 'endif' instruction is reached, the flags are
1351 * popped off. If the stack is now empty, normal execution resumes.
1352 */
1353 brw_inst *
1354 brw_IF(struct brw_codegen *p, unsigned execute_size)
1355 {
1356 const struct gen_device_info *devinfo = p->devinfo;
1357 brw_inst *insn;
1358
1359 insn = next_insn(p, BRW_OPCODE_IF);
1360
1361 /* Override the defaults for this instruction:
1362 */
1363 if (devinfo->gen < 6) {
1364 brw_set_dest(p, insn, brw_ip_reg());
1365 brw_set_src0(p, insn, brw_ip_reg());
1366 brw_set_src1(p, insn, brw_imm_d(0x0));
1367 } else if (devinfo->gen == 6) {
1368 brw_set_dest(p, insn, brw_imm_w(0));
1369 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1370 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1371 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1372 } else if (devinfo->gen == 7) {
1373 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1374 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1375 brw_set_src1(p, insn, brw_imm_w(0));
1376 brw_inst_set_jip(devinfo, insn, 0);
1377 brw_inst_set_uip(devinfo, insn, 0);
1378 } else {
1379 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1380 if (devinfo->gen < 12)
1381 brw_set_src0(p, insn, brw_imm_d(0));
1382 brw_inst_set_jip(devinfo, insn, 0);
1383 brw_inst_set_uip(devinfo, insn, 0);
1384 }
1385
1386 brw_inst_set_exec_size(devinfo, insn, execute_size);
1387 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1388 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1389 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1390 if (!p->single_program_flow && devinfo->gen < 6)
1391 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1392
1393 push_if_stack(p, insn);
1394 p->if_depth_in_loop[p->loop_stack_depth]++;
1395 return insn;
1396 }
1397
1398 /* This function is only used for gen6-style IF instructions with an
1399 * embedded comparison (conditional modifier). It is not used on gen7.
1400 */
1401 brw_inst *
1402 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1403 struct brw_reg src0, struct brw_reg src1)
1404 {
1405 const struct gen_device_info *devinfo = p->devinfo;
1406 brw_inst *insn;
1407
1408 insn = next_insn(p, BRW_OPCODE_IF);
1409
1410 brw_set_dest(p, insn, brw_imm_w(0));
1411 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1412 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1413 brw_set_src0(p, insn, src0);
1414 brw_set_src1(p, insn, src1);
1415
1416 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1417 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1418 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1419
1420 push_if_stack(p, insn);
1421 return insn;
1422 }
1423
1424 /**
1425 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1426 */
1427 static void
1428 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1429 brw_inst *if_inst, brw_inst *else_inst)
1430 {
1431 const struct gen_device_info *devinfo = p->devinfo;
1432
1433 /* The next instruction (where the ENDIF would be, if it existed) */
1434 brw_inst *next_inst = &p->store[p->nr_insn];
1435
1436 assert(p->single_program_flow);
1437 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1438 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1439 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1440
1441 /* Convert IF to an ADD instruction that moves the instruction pointer
1442 * to the first instruction of the ELSE block. If there is no ELSE
1443 * block, point to where ENDIF would be. Reverse the predicate.
1444 *
1445 * There's no need to execute an ENDIF since we don't need to do any
1446 * stack operations, and if we're currently executing, we just want to
1447 * continue normally.
1448 */
1449 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1450 brw_inst_set_pred_inv(devinfo, if_inst, true);
1451
1452 if (else_inst != NULL) {
1453 /* Convert ELSE to an ADD instruction that points where the ENDIF
1454 * would be.
1455 */
1456 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1457
1458 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1459 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1460 } else {
1461 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1462 }
1463 }
1464
1465 /**
1466 * Patch IF and ELSE instructions with appropriate jump targets.
1467 */
1468 static void
1469 patch_IF_ELSE(struct brw_codegen *p,
1470 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1471 {
1472 const struct gen_device_info *devinfo = p->devinfo;
1473
1474 /* We shouldn't be patching IF and ELSE instructions in single program flow
1475 * mode when gen < 6, because in single program flow mode on those
1476 * platforms, we convert flow control instructions to conditional ADDs that
1477 * operate on IP (see brw_ENDIF).
1478 *
1479 * However, on Gen6, writing to IP doesn't work in single program flow mode
1480 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1481 * not be updated by non-flow control instructions."). And on later
1482 * platforms, there is no significant benefit to converting control flow
1483 * instructions to conditional ADDs. So we do patch IF and ELSE
1484 * instructions in single program flow mode on those platforms.
1485 */
1486 if (devinfo->gen < 6)
1487 assert(!p->single_program_flow);
1488
1489 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1490 assert(endif_inst != NULL);
1491 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1492
1493 unsigned br = brw_jump_scale(devinfo);
1494
1495 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1496 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1497
1498 if (else_inst == NULL) {
1499 /* Patch IF -> ENDIF */
1500 if (devinfo->gen < 6) {
1501 /* Turn it into an IFF, which means no mask stack operations for
1502 * all-false and jumping past the ENDIF.
1503 */
1504 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1505 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1506 br * (endif_inst - if_inst + 1));
1507 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1508 } else if (devinfo->gen == 6) {
1509 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1510 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1511 } else {
1512 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1513 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1514 }
1515 } else {
1516 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1517
1518 /* Patch IF -> ELSE */
1519 if (devinfo->gen < 6) {
1520 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1521 br * (else_inst - if_inst));
1522 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1523 } else if (devinfo->gen == 6) {
1524 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1525 br * (else_inst - if_inst + 1));
1526 }
1527
1528 /* Patch ELSE -> ENDIF */
1529 if (devinfo->gen < 6) {
1530 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1531 * matching ENDIF.
1532 */
1533 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1534 br * (endif_inst - else_inst + 1));
1535 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1536 } else if (devinfo->gen == 6) {
1537 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1538 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1539 br * (endif_inst - else_inst));
1540 } else {
1541 /* The IF instruction's JIP should point just past the ELSE */
1542 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1543 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1544 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1545 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1546 if (devinfo->gen >= 8) {
1547 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1548 * should point to ENDIF.
1549 */
1550 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1551 }
1552 }
1553 }
1554 }
1555
1556 void
1557 brw_ELSE(struct brw_codegen *p)
1558 {
1559 const struct gen_device_info *devinfo = p->devinfo;
1560 brw_inst *insn;
1561
1562 insn = next_insn(p, BRW_OPCODE_ELSE);
1563
1564 if (devinfo->gen < 6) {
1565 brw_set_dest(p, insn, brw_ip_reg());
1566 brw_set_src0(p, insn, brw_ip_reg());
1567 brw_set_src1(p, insn, brw_imm_d(0x0));
1568 } else if (devinfo->gen == 6) {
1569 brw_set_dest(p, insn, brw_imm_w(0));
1570 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1571 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1572 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1573 } else if (devinfo->gen == 7) {
1574 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1575 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1576 brw_set_src1(p, insn, brw_imm_w(0));
1577 brw_inst_set_jip(devinfo, insn, 0);
1578 brw_inst_set_uip(devinfo, insn, 0);
1579 } else {
1580 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1581 if (devinfo->gen < 12)
1582 brw_set_src0(p, insn, brw_imm_d(0));
1583 brw_inst_set_jip(devinfo, insn, 0);
1584 brw_inst_set_uip(devinfo, insn, 0);
1585 }
1586
1587 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1588 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1589 if (!p->single_program_flow && devinfo->gen < 6)
1590 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1591
1592 push_if_stack(p, insn);
1593 }
1594
1595 void
1596 brw_ENDIF(struct brw_codegen *p)
1597 {
1598 const struct gen_device_info *devinfo = p->devinfo;
1599 brw_inst *insn = NULL;
1600 brw_inst *else_inst = NULL;
1601 brw_inst *if_inst = NULL;
1602 brw_inst *tmp;
1603 bool emit_endif = true;
1604
1605 /* In single program flow mode, we can express IF and ELSE instructions
1606 * equivalently as ADD instructions that operate on IP. On platforms prior
1607 * to Gen6, flow control instructions cause an implied thread switch, so
1608 * this is a significant savings.
1609 *
1610 * However, on Gen6, writing to IP doesn't work in single program flow mode
1611 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1612 * not be updated by non-flow control instructions."). And on later
1613 * platforms, there is no significant benefit to converting control flow
1614 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1615 * Gen5.
1616 */
1617 if (devinfo->gen < 6 && p->single_program_flow)
1618 emit_endif = false;
1619
1620 /*
1621 * A single next_insn() may change the base address of instruction store
1622 * memory(p->store), so call it first before referencing the instruction
1623 * store pointer from an index
1624 */
1625 if (emit_endif)
1626 insn = next_insn(p, BRW_OPCODE_ENDIF);
1627
1628 /* Pop the IF and (optional) ELSE instructions from the stack */
1629 p->if_depth_in_loop[p->loop_stack_depth]--;
1630 tmp = pop_if_stack(p);
1631 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1632 else_inst = tmp;
1633 tmp = pop_if_stack(p);
1634 }
1635 if_inst = tmp;
1636
1637 if (!emit_endif) {
1638 /* ENDIF is useless; don't bother emitting it. */
1639 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1640 return;
1641 }
1642
1643 if (devinfo->gen < 6) {
1644 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1645 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1646 brw_set_src1(p, insn, brw_imm_d(0x0));
1647 } else if (devinfo->gen == 6) {
1648 brw_set_dest(p, insn, brw_imm_w(0));
1649 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1650 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1651 } else if (devinfo->gen == 7) {
1652 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1653 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1654 brw_set_src1(p, insn, brw_imm_w(0));
1655 } else {
1656 brw_set_src0(p, insn, brw_imm_d(0));
1657 }
1658
1659 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1660 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1661 if (devinfo->gen < 6)
1662 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1663
1664 /* Also pop item off the stack in the endif instruction: */
1665 if (devinfo->gen < 6) {
1666 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1667 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1668 } else if (devinfo->gen == 6) {
1669 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1670 } else {
1671 brw_inst_set_jip(devinfo, insn, 2);
1672 }
1673 patch_IF_ELSE(p, if_inst, else_inst, insn);
1674 }
1675
1676 brw_inst *
1677 brw_BREAK(struct brw_codegen *p)
1678 {
1679 const struct gen_device_info *devinfo = p->devinfo;
1680 brw_inst *insn;
1681
1682 insn = next_insn(p, BRW_OPCODE_BREAK);
1683 if (devinfo->gen >= 8) {
1684 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1685 brw_set_src0(p, insn, brw_imm_d(0x0));
1686 } else if (devinfo->gen >= 6) {
1687 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1688 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1689 brw_set_src1(p, insn, brw_imm_d(0x0));
1690 } else {
1691 brw_set_dest(p, insn, brw_ip_reg());
1692 brw_set_src0(p, insn, brw_ip_reg());
1693 brw_set_src1(p, insn, brw_imm_d(0x0));
1694 brw_inst_set_gen4_pop_count(devinfo, insn,
1695 p->if_depth_in_loop[p->loop_stack_depth]);
1696 }
1697 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1698 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1699
1700 return insn;
1701 }
1702
1703 brw_inst *
1704 brw_CONT(struct brw_codegen *p)
1705 {
1706 const struct gen_device_info *devinfo = p->devinfo;
1707 brw_inst *insn;
1708
1709 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1710 brw_set_dest(p, insn, brw_ip_reg());
1711 if (devinfo->gen >= 8) {
1712 brw_set_src0(p, insn, brw_imm_d(0x0));
1713 } else {
1714 brw_set_src0(p, insn, brw_ip_reg());
1715 brw_set_src1(p, insn, brw_imm_d(0x0));
1716 }
1717
1718 if (devinfo->gen < 6) {
1719 brw_inst_set_gen4_pop_count(devinfo, insn,
1720 p->if_depth_in_loop[p->loop_stack_depth]);
1721 }
1722 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1723 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1724 return insn;
1725 }
1726
1727 brw_inst *
1728 gen6_HALT(struct brw_codegen *p)
1729 {
1730 const struct gen_device_info *devinfo = p->devinfo;
1731 brw_inst *insn;
1732
1733 insn = next_insn(p, BRW_OPCODE_HALT);
1734 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1735 if (devinfo->gen < 8) {
1736 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1737 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1738 } else if (devinfo->gen < 12) {
1739 brw_set_src0(p, insn, brw_imm_d(0x0));
1740 }
1741
1742 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1743 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1744 return insn;
1745 }
1746
1747 /* DO/WHILE loop:
1748 *
1749 * The DO/WHILE is just an unterminated loop -- break or continue are
1750 * used for control within the loop. We have a few ways they can be
1751 * done.
1752 *
1753 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1754 * jip and no DO instruction.
1755 *
1756 * For non-uniform control flow pre-gen6, there's a DO instruction to
1757 * push the mask, and a WHILE to jump back, and BREAK to get out and
1758 * pop the mask.
1759 *
1760 * For gen6, there's no more mask stack, so no need for DO. WHILE
1761 * just points back to the first instruction of the loop.
1762 */
1763 brw_inst *
1764 brw_DO(struct brw_codegen *p, unsigned execute_size)
1765 {
1766 const struct gen_device_info *devinfo = p->devinfo;
1767
1768 if (devinfo->gen >= 6 || p->single_program_flow) {
1769 push_loop_stack(p, &p->store[p->nr_insn]);
1770 return &p->store[p->nr_insn];
1771 } else {
1772 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1773
1774 push_loop_stack(p, insn);
1775
1776 /* Override the defaults for this instruction:
1777 */
1778 brw_set_dest(p, insn, brw_null_reg());
1779 brw_set_src0(p, insn, brw_null_reg());
1780 brw_set_src1(p, insn, brw_null_reg());
1781
1782 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1783 brw_inst_set_exec_size(devinfo, insn, execute_size);
1784 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1785
1786 return insn;
1787 }
1788 }
1789
1790 /**
1791 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1792 * instruction here.
1793 *
1794 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1795 * nesting, since it can always just point to the end of the block/current loop.
1796 */
1797 static void
1798 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1799 {
1800 const struct gen_device_info *devinfo = p->devinfo;
1801 brw_inst *do_inst = get_inner_do_insn(p);
1802 brw_inst *inst;
1803 unsigned br = brw_jump_scale(devinfo);
1804
1805 assert(devinfo->gen < 6);
1806
1807 for (inst = while_inst - 1; inst != do_inst; inst--) {
1808 /* If the jump count is != 0, that means that this instruction has already
1809 * been patched because it's part of a loop inside of the one we're
1810 * patching.
1811 */
1812 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1813 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1814 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1815 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1816 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1817 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1818 }
1819 }
1820 }
1821
1822 brw_inst *
1823 brw_WHILE(struct brw_codegen *p)
1824 {
1825 const struct gen_device_info *devinfo = p->devinfo;
1826 brw_inst *insn, *do_insn;
1827 unsigned br = brw_jump_scale(devinfo);
1828
1829 if (devinfo->gen >= 6) {
1830 insn = next_insn(p, BRW_OPCODE_WHILE);
1831 do_insn = get_inner_do_insn(p);
1832
1833 if (devinfo->gen >= 8) {
1834 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1835 if (devinfo->gen < 12)
1836 brw_set_src0(p, insn, brw_imm_d(0));
1837 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1838 } else if (devinfo->gen == 7) {
1839 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1840 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1841 brw_set_src1(p, insn, brw_imm_w(0));
1842 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1843 } else {
1844 brw_set_dest(p, insn, brw_imm_w(0));
1845 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1846 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1847 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1848 }
1849
1850 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1851
1852 } else {
1853 if (p->single_program_flow) {
1854 insn = next_insn(p, BRW_OPCODE_ADD);
1855 do_insn = get_inner_do_insn(p);
1856
1857 brw_set_dest(p, insn, brw_ip_reg());
1858 brw_set_src0(p, insn, brw_ip_reg());
1859 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1860 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1861 } else {
1862 insn = next_insn(p, BRW_OPCODE_WHILE);
1863 do_insn = get_inner_do_insn(p);
1864
1865 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1866
1867 brw_set_dest(p, insn, brw_ip_reg());
1868 brw_set_src0(p, insn, brw_ip_reg());
1869 brw_set_src1(p, insn, brw_imm_d(0));
1870
1871 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1872 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1873 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1874
1875 brw_patch_break_cont(p, insn);
1876 }
1877 }
1878 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1879
1880 p->loop_stack_depth--;
1881
1882 return insn;
1883 }
1884
1885 /* FORWARD JUMPS:
1886 */
1887 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1888 {
1889 const struct gen_device_info *devinfo = p->devinfo;
1890 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1891 unsigned jmpi = 1;
1892
1893 if (devinfo->gen >= 5)
1894 jmpi = 2;
1895
1896 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1897 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1898
1899 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1900 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1901 }
1902
1903 /* To integrate with the above, it makes sense that the comparison
1904 * instruction should populate the flag register. It might be simpler
1905 * just to use the flag reg for most WM tasks?
1906 */
1907 void brw_CMP(struct brw_codegen *p,
1908 struct brw_reg dest,
1909 unsigned conditional,
1910 struct brw_reg src0,
1911 struct brw_reg src1)
1912 {
1913 const struct gen_device_info *devinfo = p->devinfo;
1914 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1915
1916 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1917 brw_set_dest(p, insn, dest);
1918 brw_set_src0(p, insn, src0);
1919 brw_set_src1(p, insn, src1);
1920
1921 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1922 * page says:
1923 * "Any CMP instruction with a null destination must use a {switch}."
1924 *
1925 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1926 * mentioned on their work-arounds pages.
1927 */
1928 if (devinfo->gen == 7) {
1929 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1930 dest.nr == BRW_ARF_NULL) {
1931 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1932 }
1933 }
1934 }
1935
1936 /***********************************************************************
1937 * Helpers for the various SEND message types:
1938 */
1939
1940 /** Extended math function, float[8].
1941 */
1942 void gen4_math(struct brw_codegen *p,
1943 struct brw_reg dest,
1944 unsigned function,
1945 unsigned msg_reg_nr,
1946 struct brw_reg src,
1947 unsigned precision )
1948 {
1949 const struct gen_device_info *devinfo = p->devinfo;
1950 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1951 unsigned data_type;
1952 if (has_scalar_region(src)) {
1953 data_type = BRW_MATH_DATA_SCALAR;
1954 } else {
1955 data_type = BRW_MATH_DATA_VECTOR;
1956 }
1957
1958 assert(devinfo->gen < 6);
1959
1960 /* Example code doesn't set predicate_control for send
1961 * instructions.
1962 */
1963 brw_inst_set_pred_control(devinfo, insn, 0);
1964 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1965
1966 brw_set_dest(p, insn, dest);
1967 brw_set_src0(p, insn, src);
1968 brw_set_math_message(p,
1969 insn,
1970 function,
1971 src.type == BRW_REGISTER_TYPE_D,
1972 precision,
1973 data_type);
1974 }
1975
1976 void gen6_math(struct brw_codegen *p,
1977 struct brw_reg dest,
1978 unsigned function,
1979 struct brw_reg src0,
1980 struct brw_reg src1)
1981 {
1982 const struct gen_device_info *devinfo = p->devinfo;
1983 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1984
1985 assert(devinfo->gen >= 6);
1986
1987 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1988 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1989
1990 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1991 if (devinfo->gen == 6) {
1992 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1993 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1994 }
1995
1996 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1997 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1998 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1999 assert(src0.type != BRW_REGISTER_TYPE_F);
2000 assert(src1.type != BRW_REGISTER_TYPE_F);
2001 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
2002 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
2003 } else {
2004 assert(src0.type == BRW_REGISTER_TYPE_F ||
2005 (src0.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 9));
2006 assert(src1.type == BRW_REGISTER_TYPE_F ||
2007 (src1.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 9));
2008 }
2009
2010 /* Source modifiers are ignored for extended math instructions on Gen6. */
2011 if (devinfo->gen == 6) {
2012 assert(!src0.negate);
2013 assert(!src0.abs);
2014 assert(!src1.negate);
2015 assert(!src1.abs);
2016 }
2017
2018 brw_inst_set_math_function(devinfo, insn, function);
2019
2020 brw_set_dest(p, insn, dest);
2021 brw_set_src0(p, insn, src0);
2022 brw_set_src1(p, insn, src1);
2023 }
2024
2025 /**
2026 * Return the right surface index to access the thread scratch space using
2027 * stateless dataport messages.
2028 */
2029 unsigned
2030 brw_scratch_surface_idx(const struct brw_codegen *p)
2031 {
2032 /* The scratch space is thread-local so IA coherency is unnecessary. */
2033 if (p->devinfo->gen >= 8)
2034 return GEN8_BTI_STATELESS_NON_COHERENT;
2035 else
2036 return BRW_BTI_STATELESS;
2037 }
2038
2039 /**
2040 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
2041 * using a constant offset per channel.
2042 *
2043 * The offset must be aligned to oword size (16 bytes). Used for
2044 * register spilling.
2045 */
2046 void brw_oword_block_write_scratch(struct brw_codegen *p,
2047 struct brw_reg mrf,
2048 int num_regs,
2049 unsigned offset)
2050 {
2051 const struct gen_device_info *devinfo = p->devinfo;
2052 const unsigned target_cache =
2053 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2054 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2055 BRW_SFID_DATAPORT_WRITE);
2056 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2057 uint32_t msg_type;
2058
2059 if (devinfo->gen >= 6)
2060 offset /= 16;
2061
2062 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2063
2064 const unsigned mlen = 1 + num_regs;
2065
2066 /* Set up the message header. This is g0, with g0.2 filled with
2067 * the offset. We don't want to leave our offset around in g0 or
2068 * it'll screw up texture samples, so set it up inside the message
2069 * reg.
2070 */
2071 {
2072 brw_push_insn_state(p);
2073 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2074 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2075 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2076 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2077
2078 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2079
2080 /* set message header global offset field (reg 0, element 2) */
2081 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2082 brw_set_default_swsb(p, tgl_swsb_null());
2083 brw_MOV(p,
2084 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2085 mrf.nr,
2086 2), BRW_REGISTER_TYPE_UD),
2087 brw_imm_ud(offset));
2088
2089 brw_pop_insn_state(p);
2090 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2091 }
2092
2093 {
2094 struct brw_reg dest;
2095 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2096 int send_commit_msg;
2097 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2098 BRW_REGISTER_TYPE_UW);
2099
2100 brw_inst_set_sfid(devinfo, insn, target_cache);
2101 brw_inst_set_compression(devinfo, insn, false);
2102
2103 if (brw_inst_exec_size(devinfo, insn) >= 16)
2104 src_header = vec16(src_header);
2105
2106 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2107 if (devinfo->gen < 6)
2108 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2109
2110 /* Until gen6, writes followed by reads from the same location
2111 * are not guaranteed to be ordered unless write_commit is set.
2112 * If set, then a no-op write is issued to the destination
2113 * register to set a dependency, and a read from the destination
2114 * can be used to ensure the ordering.
2115 *
2116 * For gen6, only writes between different threads need ordering
2117 * protection. Our use of DP writes is all about register
2118 * spilling within a thread.
2119 */
2120 if (devinfo->gen >= 6) {
2121 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2122 send_commit_msg = 0;
2123 } else {
2124 dest = src_header;
2125 send_commit_msg = 1;
2126 }
2127
2128 brw_set_dest(p, insn, dest);
2129 if (devinfo->gen >= 6) {
2130 brw_set_src0(p, insn, mrf);
2131 } else {
2132 brw_set_src0(p, insn, brw_null_reg());
2133 }
2134
2135 if (devinfo->gen >= 6)
2136 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2137 else
2138 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2139
2140 brw_set_desc(p, insn,
2141 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
2142 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
2143 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2144 msg_type, 0, /* not a render target */
2145 send_commit_msg));
2146 }
2147 }
2148
2149
2150 /**
2151 * Read a block of owords (half a GRF each) from the scratch buffer
2152 * using a constant index per channel.
2153 *
2154 * Offset must be aligned to oword size (16 bytes). Used for register
2155 * spilling.
2156 */
2157 void
2158 brw_oword_block_read_scratch(struct brw_codegen *p,
2159 struct brw_reg dest,
2160 struct brw_reg mrf,
2161 int num_regs,
2162 unsigned offset)
2163 {
2164 const struct gen_device_info *devinfo = p->devinfo;
2165 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2166
2167 if (devinfo->gen >= 6)
2168 offset /= 16;
2169
2170 if (p->devinfo->gen >= 7) {
2171 /* On gen 7 and above, we no longer have message registers and we can
2172 * send from any register we want. By using the destination register
2173 * for the message, we guarantee that the implied message write won't
2174 * accidentally overwrite anything. This has been a problem because
2175 * the MRF registers and source for the final FB write are both fixed
2176 * and may overlap.
2177 */
2178 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2179 } else {
2180 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2181 }
2182 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2183
2184 const unsigned rlen = num_regs;
2185 const unsigned target_cache =
2186 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2187 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2188 BRW_SFID_DATAPORT_READ);
2189
2190 {
2191 brw_push_insn_state(p);
2192 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2193 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2194 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2195 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2196
2197 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2198
2199 /* set message header global offset field (reg 0, element 2) */
2200 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2201 brw_set_default_swsb(p, tgl_swsb_null());
2202 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2203
2204 brw_pop_insn_state(p);
2205 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2206 }
2207
2208 {
2209 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2210
2211 brw_inst_set_sfid(devinfo, insn, target_cache);
2212 assert(brw_inst_pred_control(devinfo, insn) == 0);
2213 brw_inst_set_compression(devinfo, insn, false);
2214
2215 brw_set_dest(p, insn, dest); /* UW? */
2216 if (devinfo->gen >= 6) {
2217 brw_set_src0(p, insn, mrf);
2218 } else {
2219 brw_set_src0(p, insn, brw_null_reg());
2220 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2221 }
2222
2223 brw_set_desc(p, insn,
2224 brw_message_desc(devinfo, 1, rlen, true) |
2225 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2226 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2227 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2228 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2229 }
2230 }
2231
2232 void
2233 gen7_block_read_scratch(struct brw_codegen *p,
2234 struct brw_reg dest,
2235 int num_regs,
2236 unsigned offset)
2237 {
2238 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2239 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2240
2241 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2242
2243 /* The HW requires that the header is present; this is to get the g0.5
2244 * scratch offset.
2245 */
2246 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2247
2248 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2249 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2250 * is 32 bytes, which happens to be the size of a register.
2251 */
2252 offset /= REG_SIZE;
2253 assert(offset < (1 << 12));
2254
2255 gen7_set_dp_scratch_message(p, insn,
2256 false, /* scratch read */
2257 false, /* OWords */
2258 false, /* invalidate after read */
2259 num_regs,
2260 offset,
2261 1, /* mlen: just g0 */
2262 num_regs, /* rlen */
2263 true); /* header present */
2264 }
2265
2266 /**
2267 * Read float[4] vectors from the data port constant cache.
2268 * Location (in buffer) should be a multiple of 16.
2269 * Used for fetching shader constants.
2270 */
2271 void brw_oword_block_read(struct brw_codegen *p,
2272 struct brw_reg dest,
2273 struct brw_reg mrf,
2274 uint32_t offset,
2275 uint32_t bind_table_index)
2276 {
2277 const struct gen_device_info *devinfo = p->devinfo;
2278 const unsigned target_cache =
2279 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2280 BRW_SFID_DATAPORT_READ);
2281 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2282 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2283
2284 /* On newer hardware, offset is in units of owords. */
2285 if (devinfo->gen >= 6)
2286 offset /= 16;
2287
2288 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2289
2290 brw_push_insn_state(p);
2291 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2292 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2293 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2294
2295 brw_push_insn_state(p);
2296 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2297 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2298 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2299
2300 /* set message header global offset field (reg 0, element 2) */
2301 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2302 brw_set_default_swsb(p, tgl_swsb_null());
2303 brw_MOV(p,
2304 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2305 mrf.nr,
2306 2), BRW_REGISTER_TYPE_UD),
2307 brw_imm_ud(offset));
2308 brw_pop_insn_state(p);
2309
2310 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2311
2312 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2313
2314 brw_inst_set_sfid(devinfo, insn, target_cache);
2315
2316 /* cast dest to a uword[8] vector */
2317 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2318
2319 brw_set_dest(p, insn, dest);
2320 if (devinfo->gen >= 6) {
2321 brw_set_src0(p, insn, mrf);
2322 } else {
2323 brw_set_src0(p, insn, brw_null_reg());
2324 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2325 }
2326
2327 brw_set_desc(p, insn,
2328 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2329 brw_dp_read_desc(devinfo, bind_table_index,
2330 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2331 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2332 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2333
2334 brw_pop_insn_state(p);
2335 }
2336
2337 brw_inst *
2338 brw_fb_WRITE(struct brw_codegen *p,
2339 struct brw_reg payload,
2340 struct brw_reg implied_header,
2341 unsigned msg_control,
2342 unsigned binding_table_index,
2343 unsigned msg_length,
2344 unsigned response_length,
2345 bool eot,
2346 bool last_render_target,
2347 bool header_present)
2348 {
2349 const struct gen_device_info *devinfo = p->devinfo;
2350 const unsigned target_cache =
2351 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2352 BRW_SFID_DATAPORT_WRITE);
2353 brw_inst *insn;
2354 unsigned msg_type;
2355 struct brw_reg dest, src0;
2356
2357 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2358 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2359 else
2360 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2361
2362 if (devinfo->gen >= 6) {
2363 insn = next_insn(p, BRW_OPCODE_SENDC);
2364 } else {
2365 insn = next_insn(p, BRW_OPCODE_SEND);
2366 }
2367 brw_inst_set_sfid(devinfo, insn, target_cache);
2368 brw_inst_set_compression(devinfo, insn, false);
2369
2370 if (devinfo->gen >= 6) {
2371 /* headerless version, just submit color payload */
2372 src0 = payload;
2373
2374 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2375 } else {
2376 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2377 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2378 src0 = implied_header;
2379
2380 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2381 }
2382
2383 brw_set_dest(p, insn, dest);
2384 brw_set_src0(p, insn, src0);
2385 brw_set_desc(p, insn,
2386 brw_message_desc(devinfo, msg_length, response_length,
2387 header_present) |
2388 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2389 msg_type, last_render_target,
2390 0 /* send_commit_msg */));
2391 brw_inst_set_eot(devinfo, insn, eot);
2392
2393 return insn;
2394 }
2395
2396 brw_inst *
2397 gen9_fb_READ(struct brw_codegen *p,
2398 struct brw_reg dst,
2399 struct brw_reg payload,
2400 unsigned binding_table_index,
2401 unsigned msg_length,
2402 unsigned response_length,
2403 bool per_sample)
2404 {
2405 const struct gen_device_info *devinfo = p->devinfo;
2406 assert(devinfo->gen >= 9);
2407 const unsigned msg_subtype =
2408 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2409 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2410
2411 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2412 brw_set_dest(p, insn, dst);
2413 brw_set_src0(p, insn, payload);
2414 brw_set_desc(
2415 p, insn,
2416 brw_message_desc(devinfo, msg_length, response_length, true) |
2417 brw_dp_read_desc(devinfo, binding_table_index,
2418 per_sample << 5 | msg_subtype,
2419 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2420 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2421 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2422
2423 return insn;
2424 }
2425
2426 /**
2427 * Texture sample instruction.
2428 * Note: the msg_type plus msg_length values determine exactly what kind
2429 * of sampling operation is performed. See volume 4, page 161 of docs.
2430 */
2431 void brw_SAMPLE(struct brw_codegen *p,
2432 struct brw_reg dest,
2433 unsigned msg_reg_nr,
2434 struct brw_reg src0,
2435 unsigned binding_table_index,
2436 unsigned sampler,
2437 unsigned msg_type,
2438 unsigned response_length,
2439 unsigned msg_length,
2440 unsigned header_present,
2441 unsigned simd_mode,
2442 unsigned return_format)
2443 {
2444 const struct gen_device_info *devinfo = p->devinfo;
2445 brw_inst *insn;
2446
2447 if (msg_reg_nr != -1)
2448 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2449
2450 insn = next_insn(p, BRW_OPCODE_SEND);
2451 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2452 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2453
2454 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2455 *
2456 * "Instruction compression is not allowed for this instruction (that
2457 * is, send). The hardware behavior is undefined if this instruction is
2458 * set as compressed. However, compress control can be set to "SecHalf"
2459 * to affect the EMask generation."
2460 *
2461 * No similar wording is found in later PRMs, but there are examples
2462 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2463 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2464 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2465 */
2466 brw_inst_set_compression(devinfo, insn, false);
2467
2468 if (devinfo->gen < 6)
2469 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2470
2471 brw_set_dest(p, insn, dest);
2472 brw_set_src0(p, insn, src0);
2473 brw_set_desc(p, insn,
2474 brw_message_desc(devinfo, msg_length, response_length,
2475 header_present) |
2476 brw_sampler_desc(devinfo, binding_table_index, sampler,
2477 msg_type, simd_mode, return_format));
2478 }
2479
2480 /* Adjust the message header's sampler state pointer to
2481 * select the correct group of 16 samplers.
2482 */
2483 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2484 struct brw_reg header,
2485 struct brw_reg sampler_index)
2486 {
2487 /* The "Sampler Index" field can only store values between 0 and 15.
2488 * However, we can add an offset to the "Sampler State Pointer"
2489 * field, effectively selecting a different set of 16 samplers.
2490 *
2491 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2492 * offset, and each sampler state is only 16-bytes, so we can't
2493 * exclusively use the offset - we have to use both.
2494 */
2495
2496 const struct gen_device_info *devinfo = p->devinfo;
2497
2498 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2499 const int sampler_state_size = 16; /* 16 bytes */
2500 uint32_t sampler = sampler_index.ud;
2501
2502 if (sampler >= 16) {
2503 assert(devinfo->is_haswell || devinfo->gen >= 8);
2504 brw_ADD(p,
2505 get_element_ud(header, 3),
2506 get_element_ud(brw_vec8_grf(0, 0), 3),
2507 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2508 }
2509 } else {
2510 /* Non-const sampler array indexing case */
2511 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2512 return;
2513 }
2514
2515 struct brw_reg temp = get_element_ud(header, 3);
2516
2517 brw_push_insn_state(p);
2518 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2519 brw_set_default_swsb(p, tgl_swsb_regdist(1));
2520 brw_SHL(p, temp, temp, brw_imm_ud(4));
2521 brw_ADD(p,
2522 get_element_ud(header, 3),
2523 get_element_ud(brw_vec8_grf(0, 0), 3),
2524 temp);
2525 brw_pop_insn_state(p);
2526 }
2527 }
2528
2529 /* All these variables are pretty confusing - we might be better off
2530 * using bitmasks and macros for this, in the old style. Or perhaps
2531 * just having the caller instantiate the fields in dword3 itself.
2532 */
2533 void brw_urb_WRITE(struct brw_codegen *p,
2534 struct brw_reg dest,
2535 unsigned msg_reg_nr,
2536 struct brw_reg src0,
2537 enum brw_urb_write_flags flags,
2538 unsigned msg_length,
2539 unsigned response_length,
2540 unsigned offset,
2541 unsigned swizzle)
2542 {
2543 const struct gen_device_info *devinfo = p->devinfo;
2544 brw_inst *insn;
2545
2546 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2547
2548 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2549 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2550 brw_push_insn_state(p);
2551 brw_set_default_access_mode(p, BRW_ALIGN_1);
2552 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2553 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2554 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2555 BRW_REGISTER_TYPE_UD),
2556 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2557 brw_imm_ud(0xff00));
2558 brw_pop_insn_state(p);
2559 }
2560
2561 insn = next_insn(p, BRW_OPCODE_SEND);
2562
2563 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2564
2565 brw_set_dest(p, insn, dest);
2566 brw_set_src0(p, insn, src0);
2567 brw_set_src1(p, insn, brw_imm_d(0));
2568
2569 if (devinfo->gen < 6)
2570 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2571
2572 brw_set_urb_message(p,
2573 insn,
2574 flags,
2575 msg_length,
2576 response_length,
2577 offset,
2578 swizzle);
2579 }
2580
2581 void
2582 brw_send_indirect_message(struct brw_codegen *p,
2583 unsigned sfid,
2584 struct brw_reg dst,
2585 struct brw_reg payload,
2586 struct brw_reg desc,
2587 unsigned desc_imm,
2588 bool eot)
2589 {
2590 const struct gen_device_info *devinfo = p->devinfo;
2591 struct brw_inst *send;
2592
2593 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2594
2595 assert(desc.type == BRW_REGISTER_TYPE_UD);
2596
2597 if (desc.file == BRW_IMMEDIATE_VALUE) {
2598 send = next_insn(p, BRW_OPCODE_SEND);
2599 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2600 brw_set_desc(p, send, desc.ud | desc_imm);
2601 } else {
2602 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2603 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2604
2605 brw_push_insn_state(p);
2606 brw_set_default_access_mode(p, BRW_ALIGN_1);
2607 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2608 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2609 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2610 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2611
2612 /* Load the indirect descriptor to an address register using OR so the
2613 * caller can specify additional descriptor bits with the desc_imm
2614 * immediate.
2615 */
2616 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2617
2618 brw_pop_insn_state(p);
2619
2620 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2621 send = next_insn(p, BRW_OPCODE_SEND);
2622 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2623
2624 if (devinfo->gen >= 12)
2625 brw_inst_set_send_sel_reg32_desc(devinfo, send, true);
2626 else
2627 brw_set_src1(p, send, addr);
2628 }
2629
2630 brw_set_dest(p, send, dst);
2631 brw_inst_set_sfid(devinfo, send, sfid);
2632 brw_inst_set_eot(devinfo, send, eot);
2633 }
2634
2635 void
2636 brw_send_indirect_split_message(struct brw_codegen *p,
2637 unsigned sfid,
2638 struct brw_reg dst,
2639 struct brw_reg payload0,
2640 struct brw_reg payload1,
2641 struct brw_reg desc,
2642 unsigned desc_imm,
2643 struct brw_reg ex_desc,
2644 unsigned ex_desc_imm,
2645 bool eot)
2646 {
2647 const struct gen_device_info *devinfo = p->devinfo;
2648 struct brw_inst *send;
2649
2650 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2651
2652 assert(desc.type == BRW_REGISTER_TYPE_UD);
2653
2654 if (desc.file == BRW_IMMEDIATE_VALUE) {
2655 desc.ud |= desc_imm;
2656 } else {
2657 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2658 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2659
2660 brw_push_insn_state(p);
2661 brw_set_default_access_mode(p, BRW_ALIGN_1);
2662 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2663 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2664 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2665 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2666
2667 /* Load the indirect descriptor to an address register using OR so the
2668 * caller can specify additional descriptor bits with the desc_imm
2669 * immediate.
2670 */
2671 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2672
2673 brw_pop_insn_state(p);
2674 desc = addr;
2675
2676 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2677 }
2678
2679 if (ex_desc.file == BRW_IMMEDIATE_VALUE &&
2680 (ex_desc.ud & INTEL_MASK(15, 12)) == 0) {
2681 ex_desc.ud |= ex_desc_imm;
2682 } else {
2683 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2684 struct brw_reg addr = retype(brw_address_reg(2), BRW_REGISTER_TYPE_UD);
2685
2686 brw_push_insn_state(p);
2687 brw_set_default_access_mode(p, BRW_ALIGN_1);
2688 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2689 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2690 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2691 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2692
2693 /* Load the indirect extended descriptor to an address register using OR
2694 * so the caller can specify additional descriptor bits with the
2695 * desc_imm immediate.
2696 *
2697 * Even though the instruction dispatcher always pulls the SFID and EOT
2698 * fields from the instruction itself, actual external unit which
2699 * processes the message gets the SFID and EOT from the extended
2700 * descriptor which comes from the address register. If we don't OR
2701 * those two bits in, the external unit may get confused and hang.
2702 */
2703 unsigned imm_part = ex_desc_imm | sfid | eot << 5;
2704
2705 if (ex_desc.file == BRW_IMMEDIATE_VALUE) {
2706 /* ex_desc bits 15:12 don't exist in the instruction encoding, so
2707 * we may have fallen back to an indirect extended descriptor.
2708 */
2709 brw_MOV(p, addr, brw_imm_ud(ex_desc.ud | imm_part));
2710 } else {
2711 brw_OR(p, addr, ex_desc, brw_imm_ud(imm_part));
2712 }
2713
2714 brw_pop_insn_state(p);
2715 ex_desc = addr;
2716
2717 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2718 }
2719
2720 send = next_insn(p, devinfo->gen >= 12 ? BRW_OPCODE_SEND : BRW_OPCODE_SENDS);
2721 brw_set_dest(p, send, dst);
2722 brw_set_src0(p, send, retype(payload0, BRW_REGISTER_TYPE_UD));
2723 brw_set_src1(p, send, retype(payload1, BRW_REGISTER_TYPE_UD));
2724
2725 if (desc.file == BRW_IMMEDIATE_VALUE) {
2726 brw_inst_set_send_sel_reg32_desc(devinfo, send, 0);
2727 brw_inst_set_send_desc(devinfo, send, desc.ud);
2728 } else {
2729 assert(desc.file == BRW_ARCHITECTURE_REGISTER_FILE);
2730 assert(desc.nr == BRW_ARF_ADDRESS);
2731 assert(desc.subnr == 0);
2732 brw_inst_set_send_sel_reg32_desc(devinfo, send, 1);
2733 }
2734
2735 if (ex_desc.file == BRW_IMMEDIATE_VALUE) {
2736 brw_inst_set_send_sel_reg32_ex_desc(devinfo, send, 0);
2737 brw_inst_set_sends_ex_desc(devinfo, send, ex_desc.ud);
2738 } else {
2739 assert(ex_desc.file == BRW_ARCHITECTURE_REGISTER_FILE);
2740 assert(ex_desc.nr == BRW_ARF_ADDRESS);
2741 assert((ex_desc.subnr & 0x3) == 0);
2742 brw_inst_set_send_sel_reg32_ex_desc(devinfo, send, 1);
2743 brw_inst_set_send_ex_desc_ia_subreg_nr(devinfo, send, ex_desc.subnr >> 2);
2744 }
2745
2746 brw_inst_set_sfid(devinfo, send, sfid);
2747 brw_inst_set_eot(devinfo, send, eot);
2748 }
2749
2750 static void
2751 brw_send_indirect_surface_message(struct brw_codegen *p,
2752 unsigned sfid,
2753 struct brw_reg dst,
2754 struct brw_reg payload,
2755 struct brw_reg surface,
2756 unsigned desc_imm)
2757 {
2758 if (surface.file != BRW_IMMEDIATE_VALUE) {
2759 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2760 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2761
2762 brw_push_insn_state(p);
2763 brw_set_default_access_mode(p, BRW_ALIGN_1);
2764 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2765 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2766 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2767 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2768
2769 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2770 * some surface array is accessed out of bounds.
2771 */
2772 brw_AND(p, addr,
2773 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2774 BRW_GET_SWZ(surface.swizzle, 0)),
2775 brw_imm_ud(0xff));
2776
2777 brw_pop_insn_state(p);
2778
2779 surface = addr;
2780 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2781 }
2782
2783 brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm, false);
2784 }
2785
2786 static bool
2787 while_jumps_before_offset(const struct gen_device_info *devinfo,
2788 brw_inst *insn, int while_offset, int start_offset)
2789 {
2790 int scale = 16 / brw_jump_scale(devinfo);
2791 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2792 : brw_inst_jip(devinfo, insn);
2793 assert(jip < 0);
2794 return while_offset + jip * scale <= start_offset;
2795 }
2796
2797
2798 static int
2799 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2800 {
2801 int offset;
2802 void *store = p->store;
2803 const struct gen_device_info *devinfo = p->devinfo;
2804
2805 int depth = 0;
2806
2807 for (offset = next_offset(devinfo, store, start_offset);
2808 offset < p->next_insn_offset;
2809 offset = next_offset(devinfo, store, offset)) {
2810 brw_inst *insn = store + offset;
2811
2812 switch (brw_inst_opcode(devinfo, insn)) {
2813 case BRW_OPCODE_IF:
2814 depth++;
2815 break;
2816 case BRW_OPCODE_ENDIF:
2817 if (depth == 0)
2818 return offset;
2819 depth--;
2820 break;
2821 case BRW_OPCODE_WHILE:
2822 /* If the while doesn't jump before our instruction, it's the end
2823 * of a sibling do...while loop. Ignore it.
2824 */
2825 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2826 continue;
2827 /* fallthrough */
2828 case BRW_OPCODE_ELSE:
2829 case BRW_OPCODE_HALT:
2830 if (depth == 0)
2831 return offset;
2832 default:
2833 break;
2834 }
2835 }
2836
2837 return 0;
2838 }
2839
2840 /* There is no DO instruction on gen6, so to find the end of the loop
2841 * we have to see if the loop is jumping back before our start
2842 * instruction.
2843 */
2844 static int
2845 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2846 {
2847 const struct gen_device_info *devinfo = p->devinfo;
2848 int offset;
2849 void *store = p->store;
2850
2851 assert(devinfo->gen >= 6);
2852
2853 /* Always start after the instruction (such as a WHILE) we're trying to fix
2854 * up.
2855 */
2856 for (offset = next_offset(devinfo, store, start_offset);
2857 offset < p->next_insn_offset;
2858 offset = next_offset(devinfo, store, offset)) {
2859 brw_inst *insn = store + offset;
2860
2861 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2862 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2863 return offset;
2864 }
2865 }
2866 assert(!"not reached");
2867 return start_offset;
2868 }
2869
2870 /* After program generation, go back and update the UIP and JIP of
2871 * BREAK, CONT, and HALT instructions to their correct locations.
2872 */
2873 void
2874 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2875 {
2876 const struct gen_device_info *devinfo = p->devinfo;
2877 int offset;
2878 int br = brw_jump_scale(devinfo);
2879 int scale = 16 / br;
2880 void *store = p->store;
2881
2882 if (devinfo->gen < 6)
2883 return;
2884
2885 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2886 brw_inst *insn = store + offset;
2887 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2888
2889 int block_end_offset = brw_find_next_block_end(p, offset);
2890 switch (brw_inst_opcode(devinfo, insn)) {
2891 case BRW_OPCODE_BREAK:
2892 assert(block_end_offset != 0);
2893 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2894 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2895 brw_inst_set_uip(devinfo, insn,
2896 (brw_find_loop_end(p, offset) - offset +
2897 (devinfo->gen == 6 ? 16 : 0)) / scale);
2898 break;
2899 case BRW_OPCODE_CONTINUE:
2900 assert(block_end_offset != 0);
2901 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2902 brw_inst_set_uip(devinfo, insn,
2903 (brw_find_loop_end(p, offset) - offset) / scale);
2904
2905 assert(brw_inst_uip(devinfo, insn) != 0);
2906 assert(brw_inst_jip(devinfo, insn) != 0);
2907 break;
2908
2909 case BRW_OPCODE_ENDIF: {
2910 int32_t jump = (block_end_offset == 0) ?
2911 1 * br : (block_end_offset - offset) / scale;
2912 if (devinfo->gen >= 7)
2913 brw_inst_set_jip(devinfo, insn, jump);
2914 else
2915 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2916 break;
2917 }
2918
2919 case BRW_OPCODE_HALT:
2920 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2921 *
2922 * "In case of the halt instruction not inside any conditional
2923 * code block, the value of <JIP> and <UIP> should be the
2924 * same. In case of the halt instruction inside conditional code
2925 * block, the <UIP> should be the end of the program, and the
2926 * <JIP> should be end of the most inner conditional code block."
2927 *
2928 * The uip will have already been set by whoever set up the
2929 * instruction.
2930 */
2931 if (block_end_offset == 0) {
2932 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2933 } else {
2934 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2935 }
2936 assert(brw_inst_uip(devinfo, insn) != 0);
2937 assert(brw_inst_jip(devinfo, insn) != 0);
2938 break;
2939
2940 default:
2941 break;
2942 }
2943 }
2944 }
2945
2946 void brw_ff_sync(struct brw_codegen *p,
2947 struct brw_reg dest,
2948 unsigned msg_reg_nr,
2949 struct brw_reg src0,
2950 bool allocate,
2951 unsigned response_length,
2952 bool eot)
2953 {
2954 const struct gen_device_info *devinfo = p->devinfo;
2955 brw_inst *insn;
2956
2957 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2958
2959 insn = next_insn(p, BRW_OPCODE_SEND);
2960 brw_set_dest(p, insn, dest);
2961 brw_set_src0(p, insn, src0);
2962 brw_set_src1(p, insn, brw_imm_d(0));
2963
2964 if (devinfo->gen < 6)
2965 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2966
2967 brw_set_ff_sync_message(p,
2968 insn,
2969 allocate,
2970 response_length,
2971 eot);
2972 }
2973
2974 /**
2975 * Emit the SEND instruction necessary to generate stream output data on Gen6
2976 * (for transform feedback).
2977 *
2978 * If send_commit_msg is true, this is the last piece of stream output data
2979 * from this thread, so send the data as a committed write. According to the
2980 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2981 *
2982 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2983 * writes are complete by sending the final write as a committed write."
2984 */
2985 void
2986 brw_svb_write(struct brw_codegen *p,
2987 struct brw_reg dest,
2988 unsigned msg_reg_nr,
2989 struct brw_reg src0,
2990 unsigned binding_table_index,
2991 bool send_commit_msg)
2992 {
2993 const struct gen_device_info *devinfo = p->devinfo;
2994 const unsigned target_cache =
2995 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2996 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2997 BRW_SFID_DATAPORT_WRITE);
2998 brw_inst *insn;
2999
3000 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
3001
3002 insn = next_insn(p, BRW_OPCODE_SEND);
3003 brw_inst_set_sfid(devinfo, insn, target_cache);
3004 brw_set_dest(p, insn, dest);
3005 brw_set_src0(p, insn, src0);
3006 brw_set_desc(p, insn,
3007 brw_message_desc(devinfo, 1, send_commit_msg, true) |
3008 brw_dp_write_desc(devinfo, binding_table_index,
3009 0, /* msg_control: ignored */
3010 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
3011 0, /* last_render_target: ignored */
3012 send_commit_msg)); /* send_commit_msg */
3013 }
3014
3015 static unsigned
3016 brw_surface_payload_size(struct brw_codegen *p,
3017 unsigned num_channels,
3018 unsigned exec_size /**< 0 for SIMD4x2 */)
3019 {
3020 if (exec_size == 0)
3021 return 1; /* SIMD4x2 */
3022 else if (exec_size <= 8)
3023 return num_channels;
3024 else
3025 return 2 * num_channels;
3026 }
3027
3028 void
3029 brw_untyped_atomic(struct brw_codegen *p,
3030 struct brw_reg dst,
3031 struct brw_reg payload,
3032 struct brw_reg surface,
3033 unsigned atomic_op,
3034 unsigned msg_length,
3035 bool response_expected,
3036 bool header_present)
3037 {
3038 const struct gen_device_info *devinfo = p->devinfo;
3039 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3040 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3041 GEN7_SFID_DATAPORT_DATA_CACHE);
3042 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3043 /* SIMD4x2 untyped atomic instructions only exist on HSW+ */
3044 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
3045 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
3046 has_simd4x2 ? 0 : 8;
3047 const unsigned response_length =
3048 brw_surface_payload_size(p, response_expected, exec_size);
3049 const unsigned desc =
3050 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3051 brw_dp_untyped_atomic_desc(devinfo, exec_size, atomic_op,
3052 response_expected);
3053 /* Mask out unused components -- This is especially important in Align16
3054 * mode on generations that don't have native support for SIMD4x2 atomics,
3055 * because unused but enabled components will cause the dataport to perform
3056 * additional atomic operations on the addresses that happen to be in the
3057 * uninitialized Y, Z and W coordinates of the payload.
3058 */
3059 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3060
3061 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
3062 payload, surface, desc);
3063 }
3064
3065 void
3066 brw_untyped_surface_read(struct brw_codegen *p,
3067 struct brw_reg dst,
3068 struct brw_reg payload,
3069 struct brw_reg surface,
3070 unsigned msg_length,
3071 unsigned num_channels)
3072 {
3073 const struct gen_device_info *devinfo = p->devinfo;
3074 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3075 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3076 GEN7_SFID_DATAPORT_DATA_CACHE);
3077 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3078 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) : 0;
3079 const unsigned response_length =
3080 brw_surface_payload_size(p, num_channels, exec_size);
3081 const unsigned desc =
3082 brw_message_desc(devinfo, msg_length, response_length, false) |
3083 brw_dp_untyped_surface_rw_desc(devinfo, exec_size, num_channels, false);
3084
3085 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
3086 }
3087
3088 void
3089 brw_untyped_surface_write(struct brw_codegen *p,
3090 struct brw_reg payload,
3091 struct brw_reg surface,
3092 unsigned msg_length,
3093 unsigned num_channels,
3094 bool header_present)
3095 {
3096 const struct gen_device_info *devinfo = p->devinfo;
3097 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3098 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3099 GEN7_SFID_DATAPORT_DATA_CACHE);
3100 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3101 /* SIMD4x2 untyped surface write instructions only exist on HSW+ */
3102 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
3103 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
3104 has_simd4x2 ? 0 : 8;
3105 const unsigned desc =
3106 brw_message_desc(devinfo, msg_length, 0, header_present) |
3107 brw_dp_untyped_surface_rw_desc(devinfo, exec_size, num_channels, true);
3108 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3109 const unsigned mask = !has_simd4x2 && !align1 ? WRITEMASK_X : WRITEMASK_XYZW;
3110
3111 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
3112 payload, surface, desc);
3113 }
3114
3115 static void
3116 brw_set_memory_fence_message(struct brw_codegen *p,
3117 struct brw_inst *insn,
3118 enum brw_message_target sfid,
3119 bool commit_enable,
3120 unsigned bti)
3121 {
3122 const struct gen_device_info *devinfo = p->devinfo;
3123
3124 brw_set_desc(p, insn, brw_message_desc(
3125 devinfo, 1, (commit_enable ? 1 : 0), true));
3126
3127 brw_inst_set_sfid(devinfo, insn, sfid);
3128
3129 switch (sfid) {
3130 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3131 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3132 break;
3133 case GEN7_SFID_DATAPORT_DATA_CACHE:
3134 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3135 break;
3136 default:
3137 unreachable("Not reached");
3138 }
3139
3140 if (commit_enable)
3141 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3142
3143 assert(devinfo->gen >= 11 || bti == 0);
3144 brw_inst_set_binding_table_index(devinfo, insn, bti);
3145 }
3146
3147 void
3148 brw_memory_fence(struct brw_codegen *p,
3149 struct brw_reg dst,
3150 struct brw_reg src,
3151 enum opcode send_op,
3152 bool stall,
3153 unsigned bti)
3154 {
3155 const struct gen_device_info *devinfo = p->devinfo;
3156 const bool commit_enable = stall ||
3157 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3158 (devinfo->gen == 7 && !devinfo->is_haswell);
3159 struct brw_inst *insn;
3160
3161 brw_push_insn_state(p);
3162 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3163 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3164 dst = retype(vec1(dst), BRW_REGISTER_TYPE_UW);
3165 src = retype(vec1(src), BRW_REGISTER_TYPE_UD);
3166
3167 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3168 * message doesn't write anything back.
3169 */
3170 insn = next_insn(p, send_op);
3171 brw_set_dest(p, insn, dst);
3172 brw_set_src0(p, insn, src);
3173 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3174 commit_enable, bti);
3175
3176 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3177 /* IVB does typed surface access through the render cache, so we need to
3178 * flush it too. Use a different register so both flushes can be
3179 * pipelined by the hardware.
3180 */
3181 insn = next_insn(p, send_op);
3182 brw_set_dest(p, insn, offset(dst, 1));
3183 brw_set_src0(p, insn, src);
3184 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3185 commit_enable, bti);
3186
3187 /* Now write the response of the second message into the response of the
3188 * first to trigger a pipeline stall -- This way future render and data
3189 * cache messages will be properly ordered with respect to past data and
3190 * render cache messages.
3191 */
3192 brw_MOV(p, dst, offset(dst, 1));
3193 }
3194
3195 if (stall) {
3196 brw_set_default_swsb(p, tgl_swsb_sbid(TGL_SBID_DST,
3197 brw_get_default_swsb(p).sbid));
3198
3199 brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW), dst);
3200 }
3201
3202 brw_pop_insn_state(p);
3203 }
3204
3205 void
3206 brw_pixel_interpolator_query(struct brw_codegen *p,
3207 struct brw_reg dest,
3208 struct brw_reg mrf,
3209 bool noperspective,
3210 unsigned mode,
3211 struct brw_reg data,
3212 unsigned msg_length,
3213 unsigned response_length)
3214 {
3215 const struct gen_device_info *devinfo = p->devinfo;
3216 const uint16_t exec_size = brw_get_default_exec_size(p);
3217 const unsigned slot_group = brw_get_default_group(p) / 16;
3218 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3219 const unsigned desc =
3220 brw_message_desc(devinfo, msg_length, response_length, false) |
3221 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3222 slot_group);
3223
3224 /* brw_send_indirect_message will automatically use a direct send message
3225 * if data is actually immediate.
3226 */
3227 brw_send_indirect_message(p,
3228 GEN7_SFID_PIXEL_INTERPOLATOR,
3229 dest,
3230 mrf,
3231 vec1(data),
3232 desc,
3233 false);
3234 }
3235
3236 void
3237 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3238 struct brw_reg mask)
3239 {
3240 const struct gen_device_info *devinfo = p->devinfo;
3241 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3242 const unsigned qtr_control = brw_get_default_group(p) / 8;
3243 brw_inst *inst;
3244
3245 assert(devinfo->gen >= 7);
3246 assert(mask.type == BRW_REGISTER_TYPE_UD);
3247
3248 brw_push_insn_state(p);
3249
3250 /* The flag register is only used on Gen7 in align1 mode, so avoid setting
3251 * unnecessary bits in the instruction words, get the information we need
3252 * and reset the default flag register. This allows more instructions to be
3253 * compacted.
3254 */
3255 const unsigned flag_subreg = p->current->flag_subreg;
3256 brw_set_default_flag_reg(p, 0, 0);
3257
3258 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3259 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3260
3261 if (devinfo->gen >= 8) {
3262 /* Getting the first active channel index is easy on Gen8: Just find
3263 * the first bit set in the execution mask. The register exists on
3264 * HSW already but it reads back as all ones when the current
3265 * instruction has execution masking disabled, so it's kind of
3266 * useless.
3267 */
3268 struct brw_reg exec_mask =
3269 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3270
3271 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3272 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3273 /* Unfortunately, ce0 does not take into account the thread
3274 * dispatch mask, which may be a problem in cases where it's not
3275 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3276 * some n). Combine ce0 with the given dispatch (or vector) mask
3277 * to mask off those channels which were never dispatched by the
3278 * hardware.
3279 */
3280 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3281 brw_set_default_swsb(p, tgl_swsb_regdist(1));
3282 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3283 exec_mask = vec1(dst);
3284 }
3285
3286 /* Quarter control has the effect of magically shifting the value of
3287 * ce0 so you'll get the first active channel relative to the
3288 * specified quarter control as result.
3289 */
3290 inst = brw_FBL(p, vec1(dst), exec_mask);
3291 } else {
3292 const struct brw_reg flag = brw_flag_subreg(flag_subreg);
3293
3294 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3295 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3296
3297 /* Run enough instructions returning zero with execution masking and
3298 * a conditional modifier enabled in order to get the full execution
3299 * mask in f1.0. We could use a single 32-wide move here if it
3300 * weren't because of the hardware bug that causes channel enables to
3301 * be applied incorrectly to the second half of 32-wide instructions
3302 * on Gen7.
3303 */
3304 const unsigned lower_size = MIN2(16, exec_size);
3305 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3306 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3307 brw_imm_uw(0));
3308 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3309 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3310 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3311 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3312 brw_inst_set_flag_reg_nr(devinfo, inst, flag_subreg / 2);
3313 brw_inst_set_flag_subreg_nr(devinfo, inst, flag_subreg % 2);
3314 }
3315
3316 /* Find the first bit set in the exec_size-wide portion of the flag
3317 * register that was updated by the last sequence of MOV
3318 * instructions.
3319 */
3320 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3321 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3322 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3323 }
3324 } else {
3325 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3326
3327 if (devinfo->gen >= 8 &&
3328 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3329 /* In SIMD4x2 mode the first active channel index is just the
3330 * negation of the first bit of the mask register. Note that ce0
3331 * doesn't take into account the dispatch mask, so the Gen7 path
3332 * should be used instead unless you have the guarantee that the
3333 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3334 * for some n).
3335 */
3336 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3337 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3338 brw_imm_ud(1));
3339
3340 } else {
3341 /* Overwrite the destination without and with execution masking to
3342 * find out which of the channels is active.
3343 */
3344 brw_push_insn_state(p);
3345 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3346 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3347 brw_imm_ud(1));
3348
3349 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3350 brw_imm_ud(0));
3351 brw_pop_insn_state(p);
3352 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3353 }
3354 }
3355
3356 brw_pop_insn_state(p);
3357 }
3358
3359 void
3360 brw_broadcast(struct brw_codegen *p,
3361 struct brw_reg dst,
3362 struct brw_reg src,
3363 struct brw_reg idx)
3364 {
3365 const struct gen_device_info *devinfo = p->devinfo;
3366 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3367 brw_inst *inst;
3368
3369 brw_push_insn_state(p);
3370 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3371 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3372
3373 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3374 src.address_mode == BRW_ADDRESS_DIRECT);
3375 assert(!src.abs && !src.negate);
3376 assert(src.type == dst.type);
3377
3378 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3379 idx.file == BRW_IMMEDIATE_VALUE) {
3380 /* Trivial, the source is already uniform or the index is a constant.
3381 * We will typically not get here if the optimizer is doing its job, but
3382 * asserting would be mean.
3383 */
3384 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3385 brw_MOV(p, dst,
3386 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3387 stride(suboffset(src, 4 * i), 0, 4, 1)));
3388 } else {
3389 /* From the Haswell PRM section "Register Region Restrictions":
3390 *
3391 * "The lower bits of the AddressImmediate must not overflow to
3392 * change the register address. The lower 5 bits of Address
3393 * Immediate when added to lower 5 bits of address register gives
3394 * the sub-register offset. The upper bits of Address Immediate
3395 * when added to upper bits of address register gives the register
3396 * address. Any overflow from sub-register offset is dropped."
3397 *
3398 * Fortunately, for broadcast, we never have a sub-register offset so
3399 * this isn't an issue.
3400 */
3401 assert(src.subnr == 0);
3402
3403 if (align1) {
3404 const struct brw_reg addr =
3405 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3406 unsigned offset = src.nr * REG_SIZE + src.subnr;
3407 /* Limit in bytes of the signed indirect addressing immediate. */
3408 const unsigned limit = 512;
3409
3410 brw_push_insn_state(p);
3411 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3412 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3413
3414 /* Take into account the component size and horizontal stride. */
3415 assert(src.vstride == src.hstride + src.width);
3416 brw_SHL(p, addr, vec1(idx),
3417 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3418 src.hstride - 1));
3419
3420 /* We can only address up to limit bytes using the indirect
3421 * addressing immediate, account for the difference if the source
3422 * register is above this limit.
3423 */
3424 if (offset >= limit) {
3425 brw_set_default_swsb(p, tgl_swsb_regdist(1));
3426 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3427 offset = offset % limit;
3428 }
3429
3430 brw_pop_insn_state(p);
3431
3432 brw_set_default_swsb(p, tgl_swsb_regdist(1));
3433
3434 /* Use indirect addressing to fetch the specified component. */
3435 if (type_sz(src.type) > 4 &&
3436 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3437 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3438 *
3439 * "When source or destination datatype is 64b or operation is
3440 * integer DWord multiply, indirect addressing must not be
3441 * used."
3442 *
3443 * To work around both of this issue, we do two integer MOVs
3444 * insead of one 64-bit MOV. Because no double value should ever
3445 * cross a register boundary, it's safe to use the immediate
3446 * offset in the indirect here to handle adding 4 bytes to the
3447 * offset and avoid the extra ADD to the register file.
3448 */
3449 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3450 retype(brw_vec1_indirect(addr.subnr, offset),
3451 BRW_REGISTER_TYPE_D));
3452 brw_set_default_swsb(p, tgl_swsb_null());
3453 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3454 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3455 BRW_REGISTER_TYPE_D));
3456 } else {
3457 brw_MOV(p, dst,
3458 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3459 }
3460 } else {
3461 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3462 * to all bits of a flag register,
3463 */
3464 inst = brw_MOV(p,
3465 brw_null_reg(),
3466 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3467 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3468 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3469 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3470
3471 /* and use predicated SEL to pick the right channel. */
3472 inst = brw_SEL(p, dst,
3473 stride(suboffset(src, 4), 4, 4, 1),
3474 stride(src, 4, 4, 1));
3475 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3476 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3477 }
3478 }
3479
3480 brw_pop_insn_state(p);
3481 }
3482
3483 /**
3484 * This instruction is generated as a single-channel align1 instruction by
3485 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3486 *
3487 * We can't use the typed atomic op in the FS because that has the execution
3488 * mask ANDed with the pixel mask, but we just want to write the one dword for
3489 * all the pixels.
3490 *
3491 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3492 * one u32. So we use the same untyped atomic write message as the pixel
3493 * shader.
3494 *
3495 * The untyped atomic operation requires a BUFFER surface type with RAW
3496 * format, and is only accessible through the legacy DATA_CACHE dataport
3497 * messages.
3498 */
3499 void brw_shader_time_add(struct brw_codegen *p,
3500 struct brw_reg payload,
3501 uint32_t surf_index)
3502 {
3503 const struct gen_device_info *devinfo = p->devinfo;
3504 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3505 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3506 GEN7_SFID_DATAPORT_DATA_CACHE);
3507 assert(devinfo->gen >= 7);
3508
3509 brw_push_insn_state(p);
3510 brw_set_default_access_mode(p, BRW_ALIGN_1);
3511 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3512 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3513 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3514
3515 /* We use brw_vec1_reg and unmasked because we want to increment the given
3516 * offset only once.
3517 */
3518 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3519 BRW_ARF_NULL, 0));
3520 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3521 payload.nr, 0));
3522 brw_set_desc(p, send, (brw_message_desc(devinfo, 2, 0, false) |
3523 brw_dp_untyped_atomic_desc(devinfo, 1, BRW_AOP_ADD,
3524 false)));
3525
3526 brw_inst_set_sfid(devinfo, send, sfid);
3527 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3528
3529 brw_pop_insn_state(p);
3530 }
3531
3532
3533 /**
3534 * Emit the SEND message for a barrier
3535 */
3536 void
3537 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3538 {
3539 const struct gen_device_info *devinfo = p->devinfo;
3540 struct brw_inst *inst;
3541
3542 assert(devinfo->gen >= 7);
3543
3544 brw_push_insn_state(p);
3545 brw_set_default_access_mode(p, BRW_ALIGN_1);
3546 inst = next_insn(p, BRW_OPCODE_SEND);
3547 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3548 brw_set_src0(p, inst, src);
3549 brw_set_src1(p, inst, brw_null_reg());
3550 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3551
3552 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3553 brw_inst_set_gateway_subfuncid(devinfo, inst,
3554 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3555
3556 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3557 brw_pop_insn_state(p);
3558 }
3559
3560
3561 /**
3562 * Emit the wait instruction for a barrier
3563 */
3564 void
3565 brw_WAIT(struct brw_codegen *p)
3566 {
3567 const struct gen_device_info *devinfo = p->devinfo;
3568 struct brw_inst *insn;
3569
3570 struct brw_reg src = brw_notification_reg();
3571
3572 insn = next_insn(p, BRW_OPCODE_WAIT);
3573 brw_set_dest(p, insn, src);
3574 brw_set_src0(p, insn, src);
3575 brw_set_src1(p, insn, brw_null_reg());
3576
3577 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3578 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3579 }
3580
3581 void
3582 brw_float_controls_mode(struct brw_codegen *p,
3583 unsigned mode, unsigned mask)
3584 {
3585 /* From the Skylake PRM, Volume 7, page 760:
3586 * "Implementation Restriction on Register Access: When the control
3587 * register is used as an explicit source and/or destination, hardware
3588 * does not ensure execution pipeline coherency. Software must set the
3589 * thread control field to ‘switch’ for an instruction that uses
3590 * control register as an explicit operand."
3591 *
3592 * On Gen12+ this is implemented in terms of SWSB annotations instead.
3593 */
3594 brw_set_default_swsb(p, tgl_swsb_regdist(1));
3595
3596 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3597 brw_imm_ud(~mask));
3598 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3599 if (p->devinfo->gen < 12)
3600 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3601
3602 if (mode) {
3603 brw_inst *inst_or = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3604 brw_imm_ud(mode));
3605 brw_inst_set_exec_size(p->devinfo, inst_or, BRW_EXECUTE_1);
3606 if (p->devinfo->gen < 12)
3607 brw_inst_set_thread_control(p->devinfo, inst_or, BRW_THREAD_SWITCH);
3608 }
3609
3610 if (p->devinfo->gen >= 12)
3611 brw_SYNC(p, TGL_SYNC_NOP);
3612 }