Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 assert(devinfo->gen < 12);
59 brw_push_insn_state(p);
60 brw_set_default_exec_size(p, BRW_EXECUTE_8);
61 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
62 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
63 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
64 retype(*src, BRW_REGISTER_TYPE_UD));
65 brw_pop_insn_state(p);
66 }
67 *src = brw_message_reg(msg_reg_nr);
68 }
69
70 static void
71 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
72 {
73 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
74 * "The send with EOT should use register space R112-R127 for <src>. This is
75 * to enable loading of a new thread into the same slot while the message
76 * with EOT for current thread is pending dispatch."
77 *
78 * Since we're pretending to have 16 MRFs anyway, we may as well use the
79 * registers required for messages with EOT.
80 */
81 const struct gen_device_info *devinfo = p->devinfo;
82 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
83 reg->file = BRW_GENERAL_REGISTER_FILE;
84 reg->nr += GEN7_MRF_HACK_START;
85 }
86 }
87
88 void
89 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
90 {
91 const struct gen_device_info *devinfo = p->devinfo;
92
93 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
94 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
95 else if (dest.file == BRW_GENERAL_REGISTER_FILE)
96 assert(dest.nr < 128);
97
98 /* The hardware has a restriction where a destination of size Byte with
99 * a stride of 1 is only allowed for a packed byte MOV. For any other
100 * instruction, the stride must be at least 2, even when the destination
101 * is the NULL register.
102 */
103 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
104 dest.nr == BRW_ARF_NULL &&
105 type_sz(dest.type) == 1 &&
106 dest.hstride == BRW_HORIZONTAL_STRIDE_1) {
107 dest.hstride = BRW_HORIZONTAL_STRIDE_2;
108 }
109
110 gen7_convert_mrf_to_grf(p, &dest);
111
112 if (devinfo->gen >= 12 &&
113 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
114 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
115 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
116 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
117 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
118 assert(dest.subnr == 0);
119 assert(brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1 ||
120 (dest.hstride == BRW_HORIZONTAL_STRIDE_1 &&
121 dest.vstride == dest.width + 1));
122 assert(!dest.negate && !dest.abs);
123 brw_inst_set_dst_reg_file(devinfo, inst, dest.file);
124 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
125
126 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
127 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC) {
128 assert(devinfo->gen < 12);
129 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
130 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
131 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
132 assert(dest.subnr % 16 == 0);
133 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1 &&
134 dest.vstride == dest.width + 1);
135 assert(!dest.negate && !dest.abs);
136 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
137 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
138 brw_inst_set_send_dst_reg_file(devinfo, inst, dest.file);
139 } else {
140 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
141 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
142
143 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
144 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
145
146 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
147 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
148 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
149 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
150 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
151 } else {
152 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
153 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
154 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
155 dest.file == BRW_MESSAGE_REGISTER_FILE) {
156 assert(dest.writemask != 0);
157 }
158 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
159 * Although Dst.HorzStride is a don't care for Align16, HW needs
160 * this to be programmed as "01".
161 */
162 brw_inst_set_dst_hstride(devinfo, inst, 1);
163 }
164 } else {
165 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
166
167 /* These are different sizes in align1 vs align16:
168 */
169 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
170 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
171 dest.indirect_offset);
172 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
173 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
174 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
175 } else {
176 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
177 dest.indirect_offset);
178 /* even ignored in da16, still need to set as '01' */
179 brw_inst_set_dst_hstride(devinfo, inst, 1);
180 }
181 }
182 }
183
184 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
185 * or 16 (SIMD16), as that's normally correct. However, when dealing with
186 * small registers, it can be useful for us to automatically reduce it to
187 * match the register size.
188 */
189 if (p->automatic_exec_sizes) {
190 /*
191 * In platforms that support fp64 we can emit instructions with a width
192 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
193 * these cases we need to make sure that these instructions have their
194 * exec sizes set properly when they are emitted and we can't rely on
195 * this code to fix it.
196 */
197 bool fix_exec_size;
198 if (devinfo->gen >= 6)
199 fix_exec_size = dest.width < BRW_EXECUTE_4;
200 else
201 fix_exec_size = dest.width < BRW_EXECUTE_8;
202
203 if (fix_exec_size)
204 brw_inst_set_exec_size(devinfo, inst, dest.width);
205 }
206 }
207
208 void
209 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
210 {
211 const struct gen_device_info *devinfo = p->devinfo;
212
213 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
214 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
215 else if (reg.file == BRW_GENERAL_REGISTER_FILE)
216 assert(reg.nr < 128);
217
218 gen7_convert_mrf_to_grf(p, &reg);
219
220 if (devinfo->gen >= 6 &&
221 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
222 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC ||
223 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
224 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC)) {
225 /* Any source modifiers or regions will be ignored, since this just
226 * identifies the MRF/GRF to start reading the message contents from.
227 * Check for some likely failures.
228 */
229 assert(!reg.negate);
230 assert(!reg.abs);
231 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
232 }
233
234 if (devinfo->gen >= 12 &&
235 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
236 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
237 assert(reg.file != BRW_IMMEDIATE_VALUE);
238 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
239 assert(reg.subnr == 0);
240 assert(has_scalar_region(reg) ||
241 (reg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
242 reg.vstride == reg.width + 1));
243 assert(!reg.negate && !reg.abs);
244 brw_inst_set_send_src0_reg_file(devinfo, inst, reg.file);
245 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
246
247 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
248 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC) {
249 assert(reg.file == BRW_GENERAL_REGISTER_FILE);
250 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
251 assert(reg.subnr % 16 == 0);
252 assert(has_scalar_region(reg) ||
253 (reg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
254 reg.vstride == reg.width + 1));
255 assert(!reg.negate && !reg.abs);
256 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
257 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
258 } else {
259 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
260 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
261 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
262 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
263
264 if (reg.file == BRW_IMMEDIATE_VALUE) {
265 if (reg.type == BRW_REGISTER_TYPE_DF ||
266 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
267 brw_inst_set_imm_df(devinfo, inst, reg.df);
268 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
269 reg.type == BRW_REGISTER_TYPE_Q)
270 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
271 else
272 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
273
274 if (devinfo->gen < 12 && type_sz(reg.type) < 8) {
275 brw_inst_set_src1_reg_file(devinfo, inst,
276 BRW_ARCHITECTURE_REGISTER_FILE);
277 brw_inst_set_src1_reg_hw_type(devinfo, inst,
278 brw_inst_src0_reg_hw_type(devinfo, inst));
279 }
280 } else {
281 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
282 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
283 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
284 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
285 } else {
286 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
287 }
288 } else {
289 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
290
291 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
292 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
293 } else {
294 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
295 }
296 }
297
298 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
299 if (reg.width == BRW_WIDTH_1 &&
300 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
301 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
302 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
303 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
304 } else {
305 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
306 brw_inst_set_src0_width(devinfo, inst, reg.width);
307 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
308 }
309 } else {
310 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
311 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
312 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
313 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
314 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
315 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
316 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
317 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
318
319 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
320 /* This is an oddity of the fact we're using the same
321 * descriptions for registers in align_16 as align_1:
322 */
323 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
324 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
325 reg.type == BRW_REGISTER_TYPE_DF &&
326 reg.vstride == BRW_VERTICAL_STRIDE_2) {
327 /* From SNB PRM:
328 *
329 * "For Align16 access mode, only encodings of 0000 and 0011
330 * are allowed. Other codes are reserved."
331 *
332 * Presumably the DevSNB behavior applies to IVB as well.
333 */
334 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
335 } else {
336 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
337 }
338 }
339 }
340 }
341 }
342
343
344 void
345 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
346 {
347 const struct gen_device_info *devinfo = p->devinfo;
348
349 if (reg.file == BRW_GENERAL_REGISTER_FILE)
350 assert(reg.nr < 128);
351
352 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
353 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC ||
354 (devinfo->gen >= 12 &&
355 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
356 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC))) {
357 assert(reg.file == BRW_GENERAL_REGISTER_FILE ||
358 reg.file == BRW_ARCHITECTURE_REGISTER_FILE);
359 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
360 assert(reg.subnr == 0);
361 assert(has_scalar_region(reg) ||
362 (reg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
363 reg.vstride == reg.width + 1));
364 assert(!reg.negate && !reg.abs);
365 brw_inst_set_send_src1_reg_nr(devinfo, inst, reg.nr);
366 brw_inst_set_send_src1_reg_file(devinfo, inst, reg.file);
367 } else {
368 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
369 *
370 * "Accumulator registers may be accessed explicitly as src0
371 * operands only."
372 */
373 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
374 reg.nr != BRW_ARF_ACCUMULATOR);
375
376 gen7_convert_mrf_to_grf(p, &reg);
377 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
378
379 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
380 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
381 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
382
383 /* Only src1 can be immediate in two-argument instructions.
384 */
385 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
386
387 if (reg.file == BRW_IMMEDIATE_VALUE) {
388 /* two-argument instructions can only use 32-bit immediates */
389 assert(type_sz(reg.type) < 8);
390 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
391 } else {
392 /* This is a hardware restriction, which may or may not be lifted
393 * in the future:
394 */
395 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
396 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
397
398 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
399 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
400 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
401 } else {
402 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
403 }
404
405 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
406 if (reg.width == BRW_WIDTH_1 &&
407 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
408 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
409 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
410 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
411 } else {
412 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
413 brw_inst_set_src1_width(devinfo, inst, reg.width);
414 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
415 }
416 } else {
417 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
418 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
419 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
420 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
421 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
422 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
423 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
424 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
425
426 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
427 /* This is an oddity of the fact we're using the same
428 * descriptions for registers in align_16 as align_1:
429 */
430 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
431 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
432 reg.type == BRW_REGISTER_TYPE_DF &&
433 reg.vstride == BRW_VERTICAL_STRIDE_2) {
434 /* From SNB PRM:
435 *
436 * "For Align16 access mode, only encodings of 0000 and 0011
437 * are allowed. Other codes are reserved."
438 *
439 * Presumably the DevSNB behavior applies to IVB as well.
440 */
441 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
442 } else {
443 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
444 }
445 }
446 }
447 }
448 }
449
450 /**
451 * Specify the descriptor and extended descriptor immediate for a SEND(C)
452 * message instruction.
453 */
454 void
455 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
456 unsigned desc, unsigned ex_desc)
457 {
458 const struct gen_device_info *devinfo = p->devinfo;
459 assert(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
460 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC);
461 if (devinfo->gen < 12)
462 brw_inst_set_src1_file_type(devinfo, inst,
463 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_UD);
464 brw_inst_set_send_desc(devinfo, inst, desc);
465 if (devinfo->gen >= 9)
466 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
467 }
468
469 static void brw_set_math_message( struct brw_codegen *p,
470 brw_inst *inst,
471 unsigned function,
472 unsigned integer_type,
473 bool low_precision,
474 unsigned dataType )
475 {
476 const struct gen_device_info *devinfo = p->devinfo;
477 unsigned msg_length;
478 unsigned response_length;
479
480 /* Infer message length from the function */
481 switch (function) {
482 case BRW_MATH_FUNCTION_POW:
483 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
484 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
485 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
486 msg_length = 2;
487 break;
488 default:
489 msg_length = 1;
490 break;
491 }
492
493 /* Infer response length from the function */
494 switch (function) {
495 case BRW_MATH_FUNCTION_SINCOS:
496 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
497 response_length = 2;
498 break;
499 default:
500 response_length = 1;
501 break;
502 }
503
504 brw_set_desc(p, inst, brw_message_desc(
505 devinfo, msg_length, response_length, false));
506
507 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
508 brw_inst_set_math_msg_function(devinfo, inst, function);
509 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
510 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
511 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
512 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
513 brw_inst_set_saturate(devinfo, inst, 0);
514 }
515
516
517 static void brw_set_ff_sync_message(struct brw_codegen *p,
518 brw_inst *insn,
519 bool allocate,
520 unsigned response_length,
521 bool end_of_thread)
522 {
523 const struct gen_device_info *devinfo = p->devinfo;
524
525 brw_set_desc(p, insn, brw_message_desc(
526 devinfo, 1, response_length, true));
527
528 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
529 brw_inst_set_eot(devinfo, insn, end_of_thread);
530 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
531 brw_inst_set_urb_allocate(devinfo, insn, allocate);
532 /* The following fields are not used by FF_SYNC: */
533 brw_inst_set_urb_global_offset(devinfo, insn, 0);
534 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
535 brw_inst_set_urb_used(devinfo, insn, 0);
536 brw_inst_set_urb_complete(devinfo, insn, 0);
537 }
538
539 static void brw_set_urb_message( struct brw_codegen *p,
540 brw_inst *insn,
541 enum brw_urb_write_flags flags,
542 unsigned msg_length,
543 unsigned response_length,
544 unsigned offset,
545 unsigned swizzle_control )
546 {
547 const struct gen_device_info *devinfo = p->devinfo;
548
549 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
550 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
551 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
552
553 brw_set_desc(p, insn, brw_message_desc(
554 devinfo, msg_length, response_length, true));
555
556 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
557 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
558
559 if (flags & BRW_URB_WRITE_OWORD) {
560 assert(msg_length == 2); /* header + one OWORD of data */
561 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
562 } else {
563 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
564 }
565
566 brw_inst_set_urb_global_offset(devinfo, insn, offset);
567 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
568
569 if (devinfo->gen < 8) {
570 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
571 }
572
573 if (devinfo->gen < 7) {
574 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
575 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
576 } else {
577 brw_inst_set_urb_per_slot_offset(devinfo, insn,
578 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
579 }
580 }
581
582 static void
583 gen7_set_dp_scratch_message(struct brw_codegen *p,
584 brw_inst *inst,
585 bool write,
586 bool dword,
587 bool invalidate_after_read,
588 unsigned num_regs,
589 unsigned addr_offset,
590 unsigned mlen,
591 unsigned rlen,
592 bool header_present)
593 {
594 const struct gen_device_info *devinfo = p->devinfo;
595 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
596 (devinfo->gen >= 8 && num_regs == 8));
597 const unsigned block_size = (devinfo->gen >= 8 ? util_logbase2(num_regs) :
598 num_regs - 1);
599
600 brw_set_desc(p, inst, brw_message_desc(
601 devinfo, mlen, rlen, header_present));
602
603 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
604 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
605 brw_inst_set_scratch_read_write(devinfo, inst, write);
606 brw_inst_set_scratch_type(devinfo, inst, dword);
607 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
608 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
609 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
610 }
611
612 static void
613 brw_inst_set_state(const struct gen_device_info *devinfo,
614 brw_inst *insn,
615 const struct brw_insn_state *state)
616 {
617 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
618 brw_inst_set_group(devinfo, insn, state->group);
619 brw_inst_set_compression(devinfo, insn, state->compressed);
620 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
621 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
622 if (devinfo->gen >= 12)
623 brw_inst_set_swsb(devinfo, insn, tgl_swsb_encode(state->swsb));
624 brw_inst_set_saturate(devinfo, insn, state->saturate);
625 brw_inst_set_pred_control(devinfo, insn, state->predicate);
626 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
627
628 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
629 state->access_mode == BRW_ALIGN_16) {
630 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
631 if (devinfo->gen >= 7)
632 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
633 } else {
634 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
635 if (devinfo->gen >= 7)
636 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
637 }
638
639 if (devinfo->gen >= 6)
640 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
641 }
642
643 static brw_inst *
644 brw_append_insns(struct brw_codegen *p, unsigned nr_insn, unsigned align)
645 {
646 assert(util_is_power_of_two_or_zero(sizeof(brw_inst)));
647 assert(util_is_power_of_two_or_zero(align));
648 const unsigned align_insn = MAX2(align / sizeof(brw_inst), 1);
649 const unsigned start_insn = ALIGN(p->nr_insn, align_insn);
650 const unsigned new_nr_insn = start_insn + nr_insn;
651
652 if (p->store_size < new_nr_insn) {
653 p->store_size = util_next_power_of_two(new_nr_insn * sizeof(brw_inst));
654 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
655 }
656
657 /* Memset any padding due to alignment to 0. We don't want to be hashing
658 * or caching a bunch of random bits we got from a memory allocation.
659 */
660 if (p->nr_insn < start_insn) {
661 memset(&p->store[p->nr_insn], 0,
662 (start_insn - p->nr_insn) * sizeof(brw_inst));
663 }
664
665 assert(p->next_insn_offset == p->nr_insn * sizeof(brw_inst));
666 p->nr_insn = new_nr_insn;
667 p->next_insn_offset = new_nr_insn * sizeof(brw_inst);
668
669 return &p->store[start_insn];
670 }
671
672 void
673 brw_realign(struct brw_codegen *p, unsigned align)
674 {
675 brw_append_insns(p, 0, align);
676 }
677
678 int
679 brw_append_data(struct brw_codegen *p, void *data,
680 unsigned size, unsigned align)
681 {
682 unsigned nr_insn = DIV_ROUND_UP(size, sizeof(brw_inst));
683 void *dst = brw_append_insns(p, nr_insn, align);
684 memcpy(dst, data, size);
685
686 /* If it's not a whole number of instructions, memset the end */
687 if (size < nr_insn * sizeof(brw_inst))
688 memset(dst + size, 0, nr_insn * sizeof(brw_inst) - size);
689
690 return dst - (void *)p->store;
691 }
692
693 #define next_insn brw_next_insn
694 brw_inst *
695 brw_next_insn(struct brw_codegen *p, unsigned opcode)
696 {
697 const struct gen_device_info *devinfo = p->devinfo;
698 brw_inst *insn = brw_append_insns(p, 1, sizeof(brw_inst));
699
700 memset(insn, 0, sizeof(*insn));
701 brw_inst_set_opcode(devinfo, insn, opcode);
702
703 /* Apply the default instruction state */
704 brw_inst_set_state(devinfo, insn, p->current);
705
706 return insn;
707 }
708
709 static brw_inst *
710 brw_alu1(struct brw_codegen *p, unsigned opcode,
711 struct brw_reg dest, struct brw_reg src)
712 {
713 brw_inst *insn = next_insn(p, opcode);
714 brw_set_dest(p, insn, dest);
715 brw_set_src0(p, insn, src);
716 return insn;
717 }
718
719 static brw_inst *
720 brw_alu2(struct brw_codegen *p, unsigned opcode,
721 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
722 {
723 /* 64-bit immediates are only supported on 1-src instructions */
724 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
725 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
726
727 brw_inst *insn = next_insn(p, opcode);
728 brw_set_dest(p, insn, dest);
729 brw_set_src0(p, insn, src0);
730 brw_set_src1(p, insn, src1);
731 return insn;
732 }
733
734 static int
735 get_3src_subreg_nr(struct brw_reg reg)
736 {
737 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
738 * use 32-bit units (components 0..7). Since they only support F/D/UD
739 * types, this doesn't lose any flexibility, but uses fewer bits.
740 */
741 return reg.subnr / 4;
742 }
743
744 static enum gen10_align1_3src_vertical_stride
745 to_3src_align1_vstride(const struct gen_device_info *devinfo,
746 enum brw_vertical_stride vstride)
747 {
748 switch (vstride) {
749 case BRW_VERTICAL_STRIDE_0:
750 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
751 case BRW_VERTICAL_STRIDE_1:
752 assert(devinfo->gen >= 12);
753 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_1;
754 case BRW_VERTICAL_STRIDE_2:
755 assert(devinfo->gen < 12);
756 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
757 case BRW_VERTICAL_STRIDE_4:
758 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
759 case BRW_VERTICAL_STRIDE_8:
760 case BRW_VERTICAL_STRIDE_16:
761 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
762 default:
763 unreachable("invalid vstride");
764 }
765 }
766
767
768 static enum gen10_align1_3src_src_horizontal_stride
769 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
770 {
771 switch (hstride) {
772 case BRW_HORIZONTAL_STRIDE_0:
773 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
774 case BRW_HORIZONTAL_STRIDE_1:
775 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
776 case BRW_HORIZONTAL_STRIDE_2:
777 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
778 case BRW_HORIZONTAL_STRIDE_4:
779 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
780 default:
781 unreachable("invalid hstride");
782 }
783 }
784
785 static brw_inst *
786 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
787 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
788 {
789 const struct gen_device_info *devinfo = p->devinfo;
790 brw_inst *inst = next_insn(p, opcode);
791
792 gen7_convert_mrf_to_grf(p, &dest);
793
794 assert(dest.nr < 128);
795
796 if (devinfo->gen >= 10)
797 assert(!(src0.file == BRW_IMMEDIATE_VALUE &&
798 src2.file == BRW_IMMEDIATE_VALUE));
799
800 assert(src0.file == BRW_IMMEDIATE_VALUE || src0.nr < 128);
801 assert(src1.file != BRW_IMMEDIATE_VALUE && src1.nr < 128);
802 assert(src2.file == BRW_IMMEDIATE_VALUE || src2.nr < 128);
803 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
804 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
805 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
806 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
807
808 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
809 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
810 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
811
812 if (devinfo->gen >= 12) {
813 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst, dest.file);
814 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
815 } else {
816 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
817 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
818 BRW_ALIGN1_3SRC_ACCUMULATOR);
819 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
820 } else {
821 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
822 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
823 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
824 }
825 }
826 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
827
828 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
829
830 if (brw_reg_type_is_floating_point(dest.type)) {
831 brw_inst_set_3src_a1_exec_type(devinfo, inst,
832 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
833 } else {
834 brw_inst_set_3src_a1_exec_type(devinfo, inst,
835 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
836 }
837
838 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
839 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
840 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
841 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
842
843 if (src0.file == BRW_IMMEDIATE_VALUE) {
844 brw_inst_set_3src_a1_src0_imm(devinfo, inst, src0.ud);
845 } else {
846 brw_inst_set_3src_a1_src0_vstride(
847 devinfo, inst, to_3src_align1_vstride(devinfo, src0.vstride));
848 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
849 to_3src_align1_hstride(src0.hstride));
850 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
851 if (src0.type == BRW_REGISTER_TYPE_NF) {
852 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
853 } else {
854 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
855 }
856 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
857 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
858 }
859 brw_inst_set_3src_a1_src1_vstride(
860 devinfo, inst, to_3src_align1_vstride(devinfo, src1.vstride));
861 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
862 to_3src_align1_hstride(src1.hstride));
863
864 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
865 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
866 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
867 } else {
868 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
869 }
870 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
871 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
872
873 if (src2.file == BRW_IMMEDIATE_VALUE) {
874 brw_inst_set_3src_a1_src2_imm(devinfo, inst, src2.ud);
875 } else {
876 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
877 to_3src_align1_hstride(src2.hstride));
878 /* no vstride on src2 */
879 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
880 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
881 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
882 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
883 }
884
885 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
886 src0.file == BRW_IMMEDIATE_VALUE ||
887 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
888 src0.type == BRW_REGISTER_TYPE_NF));
889 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
890 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
891 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
892 src2.file == BRW_IMMEDIATE_VALUE);
893
894 if (devinfo->gen >= 12) {
895 if (src0.file == BRW_IMMEDIATE_VALUE) {
896 brw_inst_set_3src_a1_src0_is_imm(devinfo, inst, 1);
897 } else {
898 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst, src0.file);
899 }
900
901 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst, src1.file);
902
903 if (src2.file == BRW_IMMEDIATE_VALUE) {
904 brw_inst_set_3src_a1_src2_is_imm(devinfo, inst, 1);
905 } else {
906 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst, src2.file);
907 }
908 } else {
909 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
910 src0.file == BRW_GENERAL_REGISTER_FILE ?
911 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
912 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
913 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
914 src1.file == BRW_GENERAL_REGISTER_FILE ?
915 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
916 BRW_ALIGN1_3SRC_ACCUMULATOR);
917 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
918 src2.file == BRW_GENERAL_REGISTER_FILE ?
919 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
920 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
921 }
922
923 } else {
924 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
925 dest.file == BRW_MESSAGE_REGISTER_FILE);
926 assert(dest.type == BRW_REGISTER_TYPE_F ||
927 dest.type == BRW_REGISTER_TYPE_DF ||
928 dest.type == BRW_REGISTER_TYPE_D ||
929 dest.type == BRW_REGISTER_TYPE_UD ||
930 (dest.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 8));
931 if (devinfo->gen == 6) {
932 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
933 dest.file == BRW_MESSAGE_REGISTER_FILE);
934 }
935 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
936 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 4);
937 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
938
939 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
940 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
941 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
942 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
943 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
944 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
945 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
946 src0.vstride == BRW_VERTICAL_STRIDE_0);
947
948 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
949 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
950 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
951 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
952 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
953 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
954 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
955 src1.vstride == BRW_VERTICAL_STRIDE_0);
956
957 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
958 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
959 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
960 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
961 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
962 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
963 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
964 src2.vstride == BRW_VERTICAL_STRIDE_0);
965
966 if (devinfo->gen >= 7) {
967 /* Set both the source and destination types based on dest.type,
968 * ignoring the source register types. The MAD and LRP emitters ensure
969 * that all four types are float. The BFE and BFI2 emitters, however,
970 * may send us mixed D and UD types and want us to ignore that and use
971 * the destination type.
972 */
973 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
974 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
975
976 /* From the Bspec, 3D Media GPGPU, Instruction fields, srcType:
977 *
978 * "Three source instructions can use operands with mixed-mode
979 * precision. When SrcType field is set to :f or :hf it defines
980 * precision for source 0 only, and fields Src1Type and Src2Type
981 * define precision for other source operands:
982 *
983 * 0b = :f. Single precision Float (32-bit).
984 * 1b = :hf. Half precision Float (16-bit)."
985 */
986 if (src1.type == BRW_REGISTER_TYPE_HF)
987 brw_inst_set_3src_a16_src1_type(devinfo, inst, 1);
988
989 if (src2.type == BRW_REGISTER_TYPE_HF)
990 brw_inst_set_3src_a16_src2_type(devinfo, inst, 1);
991 }
992 }
993
994 return inst;
995 }
996
997
998 /***********************************************************************
999 * Convenience routines.
1000 */
1001 #define ALU1(OP) \
1002 brw_inst *brw_##OP(struct brw_codegen *p, \
1003 struct brw_reg dest, \
1004 struct brw_reg src0) \
1005 { \
1006 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
1007 }
1008
1009 #define ALU2(OP) \
1010 brw_inst *brw_##OP(struct brw_codegen *p, \
1011 struct brw_reg dest, \
1012 struct brw_reg src0, \
1013 struct brw_reg src1) \
1014 { \
1015 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
1016 }
1017
1018 #define ALU3(OP) \
1019 brw_inst *brw_##OP(struct brw_codegen *p, \
1020 struct brw_reg dest, \
1021 struct brw_reg src0, \
1022 struct brw_reg src1, \
1023 struct brw_reg src2) \
1024 { \
1025 if (p->current->access_mode == BRW_ALIGN_16) { \
1026 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
1027 src0.swizzle = BRW_SWIZZLE_XXXX; \
1028 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
1029 src1.swizzle = BRW_SWIZZLE_XXXX; \
1030 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
1031 src2.swizzle = BRW_SWIZZLE_XXXX; \
1032 } \
1033 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1034 }
1035
1036 #define ALU3F(OP) \
1037 brw_inst *brw_##OP(struct brw_codegen *p, \
1038 struct brw_reg dest, \
1039 struct brw_reg src0, \
1040 struct brw_reg src1, \
1041 struct brw_reg src2) \
1042 { \
1043 assert(dest.type == BRW_REGISTER_TYPE_F || \
1044 dest.type == BRW_REGISTER_TYPE_DF); \
1045 if (dest.type == BRW_REGISTER_TYPE_F) { \
1046 assert(src0.type == BRW_REGISTER_TYPE_F); \
1047 assert(src1.type == BRW_REGISTER_TYPE_F); \
1048 assert(src2.type == BRW_REGISTER_TYPE_F); \
1049 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
1050 assert(src0.type == BRW_REGISTER_TYPE_DF); \
1051 assert(src1.type == BRW_REGISTER_TYPE_DF); \
1052 assert(src2.type == BRW_REGISTER_TYPE_DF); \
1053 } \
1054 \
1055 if (p->current->access_mode == BRW_ALIGN_16) { \
1056 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
1057 src0.swizzle = BRW_SWIZZLE_XXXX; \
1058 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
1059 src1.swizzle = BRW_SWIZZLE_XXXX; \
1060 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
1061 src2.swizzle = BRW_SWIZZLE_XXXX; \
1062 } \
1063 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1064 }
1065
1066 ALU2(SEL)
1067 ALU1(NOT)
1068 ALU2(AND)
1069 ALU2(OR)
1070 ALU2(XOR)
1071 ALU2(SHR)
1072 ALU2(SHL)
1073 ALU1(DIM)
1074 ALU2(ASR)
1075 ALU2(ROL)
1076 ALU2(ROR)
1077 ALU3(CSEL)
1078 ALU1(FRC)
1079 ALU1(RNDD)
1080 ALU1(RNDE)
1081 ALU1(RNDU)
1082 ALU1(RNDZ)
1083 ALU2(MAC)
1084 ALU2(MACH)
1085 ALU1(LZD)
1086 ALU2(DP4)
1087 ALU2(DPH)
1088 ALU2(DP3)
1089 ALU2(DP2)
1090 ALU3(MAD)
1091 ALU3F(LRP)
1092 ALU1(BFREV)
1093 ALU3(BFE)
1094 ALU2(BFI1)
1095 ALU3(BFI2)
1096 ALU1(FBH)
1097 ALU1(FBL)
1098 ALU1(CBIT)
1099 ALU2(ADDC)
1100 ALU2(SUBB)
1101
1102 brw_inst *
1103 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
1104 {
1105 const struct gen_device_info *devinfo = p->devinfo;
1106
1107 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
1108 * To avoid the problems that causes, we use an <X,2,0> source region to
1109 * read each element twice.
1110 */
1111 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1112 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
1113 dest.type == BRW_REGISTER_TYPE_DF &&
1114 (src0.type == BRW_REGISTER_TYPE_F ||
1115 src0.type == BRW_REGISTER_TYPE_D ||
1116 src0.type == BRW_REGISTER_TYPE_UD) &&
1117 !has_scalar_region(src0)) {
1118 assert(src0.vstride == src0.width + src0.hstride);
1119 src0.vstride = src0.hstride;
1120 src0.width = BRW_WIDTH_2;
1121 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1122 }
1123
1124 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1125 }
1126
1127 brw_inst *
1128 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1129 struct brw_reg src0, struct brw_reg src1)
1130 {
1131 /* 6.2.2: add */
1132 if (src0.type == BRW_REGISTER_TYPE_F ||
1133 (src0.file == BRW_IMMEDIATE_VALUE &&
1134 src0.type == BRW_REGISTER_TYPE_VF)) {
1135 assert(src1.type != BRW_REGISTER_TYPE_UD);
1136 assert(src1.type != BRW_REGISTER_TYPE_D);
1137 }
1138
1139 if (src1.type == BRW_REGISTER_TYPE_F ||
1140 (src1.file == BRW_IMMEDIATE_VALUE &&
1141 src1.type == BRW_REGISTER_TYPE_VF)) {
1142 assert(src0.type != BRW_REGISTER_TYPE_UD);
1143 assert(src0.type != BRW_REGISTER_TYPE_D);
1144 }
1145
1146 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1147 }
1148
1149 brw_inst *
1150 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1151 struct brw_reg src0, struct brw_reg src1)
1152 {
1153 assert(dest.type == src0.type);
1154 assert(src0.type == src1.type);
1155 switch (src0.type) {
1156 case BRW_REGISTER_TYPE_B:
1157 case BRW_REGISTER_TYPE_UB:
1158 case BRW_REGISTER_TYPE_W:
1159 case BRW_REGISTER_TYPE_UW:
1160 case BRW_REGISTER_TYPE_D:
1161 case BRW_REGISTER_TYPE_UD:
1162 break;
1163 default:
1164 unreachable("Bad type for brw_AVG");
1165 }
1166
1167 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1168 }
1169
1170 brw_inst *
1171 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1172 struct brw_reg src0, struct brw_reg src1)
1173 {
1174 /* 6.32.38: mul */
1175 if (src0.type == BRW_REGISTER_TYPE_D ||
1176 src0.type == BRW_REGISTER_TYPE_UD ||
1177 src1.type == BRW_REGISTER_TYPE_D ||
1178 src1.type == BRW_REGISTER_TYPE_UD) {
1179 assert(dest.type != BRW_REGISTER_TYPE_F);
1180 }
1181
1182 if (src0.type == BRW_REGISTER_TYPE_F ||
1183 (src0.file == BRW_IMMEDIATE_VALUE &&
1184 src0.type == BRW_REGISTER_TYPE_VF)) {
1185 assert(src1.type != BRW_REGISTER_TYPE_UD);
1186 assert(src1.type != BRW_REGISTER_TYPE_D);
1187 }
1188
1189 if (src1.type == BRW_REGISTER_TYPE_F ||
1190 (src1.file == BRW_IMMEDIATE_VALUE &&
1191 src1.type == BRW_REGISTER_TYPE_VF)) {
1192 assert(src0.type != BRW_REGISTER_TYPE_UD);
1193 assert(src0.type != BRW_REGISTER_TYPE_D);
1194 }
1195
1196 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1197 src0.nr != BRW_ARF_ACCUMULATOR);
1198 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1199 src1.nr != BRW_ARF_ACCUMULATOR);
1200
1201 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1202 }
1203
1204 brw_inst *
1205 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1206 struct brw_reg src0, struct brw_reg src1)
1207 {
1208 src0.vstride = BRW_VERTICAL_STRIDE_0;
1209 src0.width = BRW_WIDTH_1;
1210 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1211 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1212 }
1213
1214 brw_inst *
1215 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1216 struct brw_reg src0, struct brw_reg src1)
1217 {
1218 src0.vstride = BRW_VERTICAL_STRIDE_0;
1219 src0.width = BRW_WIDTH_1;
1220 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1221 src1.vstride = BRW_VERTICAL_STRIDE_8;
1222 src1.width = BRW_WIDTH_8;
1223 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1224 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1225 }
1226
1227 brw_inst *
1228 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1229 {
1230 const struct gen_device_info *devinfo = p->devinfo;
1231 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1232 /* The F32TO16 instruction doesn't support 32-bit destination types in
1233 * Align1 mode, and neither does the Gen8 implementation in terms of a
1234 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1235 * an undocumented feature.
1236 */
1237 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1238 (!align16 || devinfo->gen >= 8));
1239 brw_inst *inst;
1240
1241 if (align16) {
1242 assert(dst.type == BRW_REGISTER_TYPE_UD);
1243 } else {
1244 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1245 dst.type == BRW_REGISTER_TYPE_W ||
1246 dst.type == BRW_REGISTER_TYPE_UW ||
1247 dst.type == BRW_REGISTER_TYPE_HF);
1248 }
1249
1250 brw_push_insn_state(p);
1251
1252 if (needs_zero_fill) {
1253 brw_set_default_access_mode(p, BRW_ALIGN_1);
1254 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1255 }
1256
1257 if (devinfo->gen >= 8) {
1258 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1259 } else {
1260 assert(devinfo->gen == 7);
1261 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1262 }
1263
1264 if (needs_zero_fill) {
1265 if (devinfo->gen < 12)
1266 brw_inst_set_no_dd_clear(devinfo, inst, true);
1267 brw_set_default_swsb(p, tgl_swsb_null());
1268 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1269 if (devinfo->gen < 12)
1270 brw_inst_set_no_dd_check(devinfo, inst, true);
1271 }
1272
1273 brw_pop_insn_state(p);
1274 return inst;
1275 }
1276
1277 brw_inst *
1278 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1279 {
1280 const struct gen_device_info *devinfo = p->devinfo;
1281 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1282
1283 if (align16) {
1284 assert(src.type == BRW_REGISTER_TYPE_UD);
1285 } else {
1286 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1287 *
1288 * Because this instruction does not have a 16-bit floating-point
1289 * type, the source data type must be Word (W). The destination type
1290 * must be F (Float).
1291 */
1292 if (src.type == BRW_REGISTER_TYPE_UD)
1293 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1294
1295 assert(src.type == BRW_REGISTER_TYPE_W ||
1296 src.type == BRW_REGISTER_TYPE_UW ||
1297 src.type == BRW_REGISTER_TYPE_HF);
1298 }
1299
1300 if (devinfo->gen >= 8) {
1301 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1302 } else {
1303 assert(devinfo->gen == 7);
1304 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1305 }
1306 }
1307
1308
1309 void brw_NOP(struct brw_codegen *p)
1310 {
1311 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1312 memset(insn, 0, sizeof(*insn));
1313 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1314 }
1315
1316 void brw_SYNC(struct brw_codegen *p, enum tgl_sync_function func)
1317 {
1318 brw_inst *insn = next_insn(p, BRW_OPCODE_SYNC);
1319 brw_inst_set_cond_modifier(p->devinfo, insn, func);
1320 }
1321
1322 /***********************************************************************
1323 * Comparisons, if/else/endif
1324 */
1325
1326 brw_inst *
1327 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1328 unsigned predicate_control)
1329 {
1330 const struct gen_device_info *devinfo = p->devinfo;
1331 struct brw_reg ip = brw_ip_reg();
1332 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1333
1334 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1335 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1336 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1337 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1338
1339 return inst;
1340 }
1341
1342 static void
1343 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1344 {
1345 p->if_stack[p->if_stack_depth] = inst - p->store;
1346
1347 p->if_stack_depth++;
1348 if (p->if_stack_array_size <= p->if_stack_depth) {
1349 p->if_stack_array_size *= 2;
1350 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1351 p->if_stack_array_size);
1352 }
1353 }
1354
1355 static brw_inst *
1356 pop_if_stack(struct brw_codegen *p)
1357 {
1358 p->if_stack_depth--;
1359 return &p->store[p->if_stack[p->if_stack_depth]];
1360 }
1361
1362 static void
1363 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1364 {
1365 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1366 p->loop_stack_array_size *= 2;
1367 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1368 p->loop_stack_array_size);
1369 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1370 p->loop_stack_array_size);
1371 }
1372
1373 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1374 p->loop_stack_depth++;
1375 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1376 }
1377
1378 static brw_inst *
1379 get_inner_do_insn(struct brw_codegen *p)
1380 {
1381 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1382 }
1383
1384 /* EU takes the value from the flag register and pushes it onto some
1385 * sort of a stack (presumably merging with any flag value already on
1386 * the stack). Within an if block, the flags at the top of the stack
1387 * control execution on each channel of the unit, eg. on each of the
1388 * 16 pixel values in our wm programs.
1389 *
1390 * When the matching 'else' instruction is reached (presumably by
1391 * countdown of the instruction count patched in by our ELSE/ENDIF
1392 * functions), the relevant flags are inverted.
1393 *
1394 * When the matching 'endif' instruction is reached, the flags are
1395 * popped off. If the stack is now empty, normal execution resumes.
1396 */
1397 brw_inst *
1398 brw_IF(struct brw_codegen *p, unsigned execute_size)
1399 {
1400 const struct gen_device_info *devinfo = p->devinfo;
1401 brw_inst *insn;
1402
1403 insn = next_insn(p, BRW_OPCODE_IF);
1404
1405 /* Override the defaults for this instruction:
1406 */
1407 if (devinfo->gen < 6) {
1408 brw_set_dest(p, insn, brw_ip_reg());
1409 brw_set_src0(p, insn, brw_ip_reg());
1410 brw_set_src1(p, insn, brw_imm_d(0x0));
1411 } else if (devinfo->gen == 6) {
1412 brw_set_dest(p, insn, brw_imm_w(0));
1413 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1414 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1415 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1416 } else if (devinfo->gen == 7) {
1417 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1418 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1419 brw_set_src1(p, insn, brw_imm_w(0));
1420 brw_inst_set_jip(devinfo, insn, 0);
1421 brw_inst_set_uip(devinfo, insn, 0);
1422 } else {
1423 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1424 if (devinfo->gen < 12)
1425 brw_set_src0(p, insn, brw_imm_d(0));
1426 brw_inst_set_jip(devinfo, insn, 0);
1427 brw_inst_set_uip(devinfo, insn, 0);
1428 }
1429
1430 brw_inst_set_exec_size(devinfo, insn, execute_size);
1431 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1432 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1433 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1434 if (!p->single_program_flow && devinfo->gen < 6)
1435 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1436
1437 push_if_stack(p, insn);
1438 p->if_depth_in_loop[p->loop_stack_depth]++;
1439 return insn;
1440 }
1441
1442 /* This function is only used for gen6-style IF instructions with an
1443 * embedded comparison (conditional modifier). It is not used on gen7.
1444 */
1445 brw_inst *
1446 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1447 struct brw_reg src0, struct brw_reg src1)
1448 {
1449 const struct gen_device_info *devinfo = p->devinfo;
1450 brw_inst *insn;
1451
1452 insn = next_insn(p, BRW_OPCODE_IF);
1453
1454 brw_set_dest(p, insn, brw_imm_w(0));
1455 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1456 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1457 brw_set_src0(p, insn, src0);
1458 brw_set_src1(p, insn, src1);
1459
1460 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1461 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1462 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1463
1464 push_if_stack(p, insn);
1465 return insn;
1466 }
1467
1468 /**
1469 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1470 */
1471 static void
1472 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1473 brw_inst *if_inst, brw_inst *else_inst)
1474 {
1475 const struct gen_device_info *devinfo = p->devinfo;
1476
1477 /* The next instruction (where the ENDIF would be, if it existed) */
1478 brw_inst *next_inst = &p->store[p->nr_insn];
1479
1480 assert(p->single_program_flow);
1481 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1482 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1483 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1484
1485 /* Convert IF to an ADD instruction that moves the instruction pointer
1486 * to the first instruction of the ELSE block. If there is no ELSE
1487 * block, point to where ENDIF would be. Reverse the predicate.
1488 *
1489 * There's no need to execute an ENDIF since we don't need to do any
1490 * stack operations, and if we're currently executing, we just want to
1491 * continue normally.
1492 */
1493 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1494 brw_inst_set_pred_inv(devinfo, if_inst, true);
1495
1496 if (else_inst != NULL) {
1497 /* Convert ELSE to an ADD instruction that points where the ENDIF
1498 * would be.
1499 */
1500 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1501
1502 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1503 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1504 } else {
1505 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1506 }
1507 }
1508
1509 /**
1510 * Patch IF and ELSE instructions with appropriate jump targets.
1511 */
1512 static void
1513 patch_IF_ELSE(struct brw_codegen *p,
1514 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1515 {
1516 const struct gen_device_info *devinfo = p->devinfo;
1517
1518 /* We shouldn't be patching IF and ELSE instructions in single program flow
1519 * mode when gen < 6, because in single program flow mode on those
1520 * platforms, we convert flow control instructions to conditional ADDs that
1521 * operate on IP (see brw_ENDIF).
1522 *
1523 * However, on Gen6, writing to IP doesn't work in single program flow mode
1524 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1525 * not be updated by non-flow control instructions."). And on later
1526 * platforms, there is no significant benefit to converting control flow
1527 * instructions to conditional ADDs. So we do patch IF and ELSE
1528 * instructions in single program flow mode on those platforms.
1529 */
1530 if (devinfo->gen < 6)
1531 assert(!p->single_program_flow);
1532
1533 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1534 assert(endif_inst != NULL);
1535 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1536
1537 unsigned br = brw_jump_scale(devinfo);
1538
1539 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1540 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1541
1542 if (else_inst == NULL) {
1543 /* Patch IF -> ENDIF */
1544 if (devinfo->gen < 6) {
1545 /* Turn it into an IFF, which means no mask stack operations for
1546 * all-false and jumping past the ENDIF.
1547 */
1548 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1549 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1550 br * (endif_inst - if_inst + 1));
1551 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1552 } else if (devinfo->gen == 6) {
1553 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1554 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1555 } else {
1556 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1557 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1558 }
1559 } else {
1560 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1561
1562 /* Patch IF -> ELSE */
1563 if (devinfo->gen < 6) {
1564 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1565 br * (else_inst - if_inst));
1566 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1567 } else if (devinfo->gen == 6) {
1568 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1569 br * (else_inst - if_inst + 1));
1570 }
1571
1572 /* Patch ELSE -> ENDIF */
1573 if (devinfo->gen < 6) {
1574 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1575 * matching ENDIF.
1576 */
1577 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1578 br * (endif_inst - else_inst + 1));
1579 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1580 } else if (devinfo->gen == 6) {
1581 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1582 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1583 br * (endif_inst - else_inst));
1584 } else {
1585 /* The IF instruction's JIP should point just past the ELSE */
1586 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1587 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1588 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1589 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1590 if (devinfo->gen >= 8) {
1591 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1592 * should point to ENDIF.
1593 */
1594 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1595 }
1596 }
1597 }
1598 }
1599
1600 void
1601 brw_ELSE(struct brw_codegen *p)
1602 {
1603 const struct gen_device_info *devinfo = p->devinfo;
1604 brw_inst *insn;
1605
1606 insn = next_insn(p, BRW_OPCODE_ELSE);
1607
1608 if (devinfo->gen < 6) {
1609 brw_set_dest(p, insn, brw_ip_reg());
1610 brw_set_src0(p, insn, brw_ip_reg());
1611 brw_set_src1(p, insn, brw_imm_d(0x0));
1612 } else if (devinfo->gen == 6) {
1613 brw_set_dest(p, insn, brw_imm_w(0));
1614 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1615 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1616 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1617 } else if (devinfo->gen == 7) {
1618 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1619 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1620 brw_set_src1(p, insn, brw_imm_w(0));
1621 brw_inst_set_jip(devinfo, insn, 0);
1622 brw_inst_set_uip(devinfo, insn, 0);
1623 } else {
1624 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1625 if (devinfo->gen < 12)
1626 brw_set_src0(p, insn, brw_imm_d(0));
1627 brw_inst_set_jip(devinfo, insn, 0);
1628 brw_inst_set_uip(devinfo, insn, 0);
1629 }
1630
1631 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1632 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1633 if (!p->single_program_flow && devinfo->gen < 6)
1634 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1635
1636 push_if_stack(p, insn);
1637 }
1638
1639 void
1640 brw_ENDIF(struct brw_codegen *p)
1641 {
1642 const struct gen_device_info *devinfo = p->devinfo;
1643 brw_inst *insn = NULL;
1644 brw_inst *else_inst = NULL;
1645 brw_inst *if_inst = NULL;
1646 brw_inst *tmp;
1647 bool emit_endif = true;
1648
1649 /* In single program flow mode, we can express IF and ELSE instructions
1650 * equivalently as ADD instructions that operate on IP. On platforms prior
1651 * to Gen6, flow control instructions cause an implied thread switch, so
1652 * this is a significant savings.
1653 *
1654 * However, on Gen6, writing to IP doesn't work in single program flow mode
1655 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1656 * not be updated by non-flow control instructions."). And on later
1657 * platforms, there is no significant benefit to converting control flow
1658 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1659 * Gen5.
1660 */
1661 if (devinfo->gen < 6 && p->single_program_flow)
1662 emit_endif = false;
1663
1664 /*
1665 * A single next_insn() may change the base address of instruction store
1666 * memory(p->store), so call it first before referencing the instruction
1667 * store pointer from an index
1668 */
1669 if (emit_endif)
1670 insn = next_insn(p, BRW_OPCODE_ENDIF);
1671
1672 /* Pop the IF and (optional) ELSE instructions from the stack */
1673 p->if_depth_in_loop[p->loop_stack_depth]--;
1674 tmp = pop_if_stack(p);
1675 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1676 else_inst = tmp;
1677 tmp = pop_if_stack(p);
1678 }
1679 if_inst = tmp;
1680
1681 if (!emit_endif) {
1682 /* ENDIF is useless; don't bother emitting it. */
1683 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1684 return;
1685 }
1686
1687 if (devinfo->gen < 6) {
1688 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1689 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1690 brw_set_src1(p, insn, brw_imm_d(0x0));
1691 } else if (devinfo->gen == 6) {
1692 brw_set_dest(p, insn, brw_imm_w(0));
1693 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1694 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1695 } else if (devinfo->gen == 7) {
1696 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1697 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1698 brw_set_src1(p, insn, brw_imm_w(0));
1699 } else {
1700 brw_set_src0(p, insn, brw_imm_d(0));
1701 }
1702
1703 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1704 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1705 if (devinfo->gen < 6)
1706 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1707
1708 /* Also pop item off the stack in the endif instruction: */
1709 if (devinfo->gen < 6) {
1710 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1711 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1712 } else if (devinfo->gen == 6) {
1713 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1714 } else {
1715 brw_inst_set_jip(devinfo, insn, 2);
1716 }
1717 patch_IF_ELSE(p, if_inst, else_inst, insn);
1718 }
1719
1720 brw_inst *
1721 brw_BREAK(struct brw_codegen *p)
1722 {
1723 const struct gen_device_info *devinfo = p->devinfo;
1724 brw_inst *insn;
1725
1726 insn = next_insn(p, BRW_OPCODE_BREAK);
1727 if (devinfo->gen >= 8) {
1728 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1729 brw_set_src0(p, insn, brw_imm_d(0x0));
1730 } else if (devinfo->gen >= 6) {
1731 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1732 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1733 brw_set_src1(p, insn, brw_imm_d(0x0));
1734 } else {
1735 brw_set_dest(p, insn, brw_ip_reg());
1736 brw_set_src0(p, insn, brw_ip_reg());
1737 brw_set_src1(p, insn, brw_imm_d(0x0));
1738 brw_inst_set_gen4_pop_count(devinfo, insn,
1739 p->if_depth_in_loop[p->loop_stack_depth]);
1740 }
1741 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1742 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1743
1744 return insn;
1745 }
1746
1747 brw_inst *
1748 brw_CONT(struct brw_codegen *p)
1749 {
1750 const struct gen_device_info *devinfo = p->devinfo;
1751 brw_inst *insn;
1752
1753 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1754 brw_set_dest(p, insn, brw_ip_reg());
1755 if (devinfo->gen >= 8) {
1756 brw_set_src0(p, insn, brw_imm_d(0x0));
1757 } else {
1758 brw_set_src0(p, insn, brw_ip_reg());
1759 brw_set_src1(p, insn, brw_imm_d(0x0));
1760 }
1761
1762 if (devinfo->gen < 6) {
1763 brw_inst_set_gen4_pop_count(devinfo, insn,
1764 p->if_depth_in_loop[p->loop_stack_depth]);
1765 }
1766 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1767 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1768 return insn;
1769 }
1770
1771 brw_inst *
1772 brw_HALT(struct brw_codegen *p)
1773 {
1774 const struct gen_device_info *devinfo = p->devinfo;
1775 brw_inst *insn;
1776
1777 insn = next_insn(p, BRW_OPCODE_HALT);
1778 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1779 if (devinfo->gen < 6) {
1780 /* From the Gen4 PRM:
1781 *
1782 * "IP register must be put (for example, by the assembler) at <dst>
1783 * and <src0> locations.
1784 */
1785 brw_set_dest(p, insn, brw_ip_reg());
1786 brw_set_src0(p, insn, brw_ip_reg());
1787 brw_set_src1(p, insn, brw_imm_d(0x0)); /* exitcode updated later. */
1788 } else if (devinfo->gen < 8) {
1789 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1790 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1791 } else if (devinfo->gen < 12) {
1792 brw_set_src0(p, insn, brw_imm_d(0x0));
1793 }
1794
1795 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1796 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1797 return insn;
1798 }
1799
1800 /* DO/WHILE loop:
1801 *
1802 * The DO/WHILE is just an unterminated loop -- break or continue are
1803 * used for control within the loop. We have a few ways they can be
1804 * done.
1805 *
1806 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1807 * jip and no DO instruction.
1808 *
1809 * For non-uniform control flow pre-gen6, there's a DO instruction to
1810 * push the mask, and a WHILE to jump back, and BREAK to get out and
1811 * pop the mask.
1812 *
1813 * For gen6, there's no more mask stack, so no need for DO. WHILE
1814 * just points back to the first instruction of the loop.
1815 */
1816 brw_inst *
1817 brw_DO(struct brw_codegen *p, unsigned execute_size)
1818 {
1819 const struct gen_device_info *devinfo = p->devinfo;
1820
1821 if (devinfo->gen >= 6 || p->single_program_flow) {
1822 push_loop_stack(p, &p->store[p->nr_insn]);
1823 return &p->store[p->nr_insn];
1824 } else {
1825 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1826
1827 push_loop_stack(p, insn);
1828
1829 /* Override the defaults for this instruction:
1830 */
1831 brw_set_dest(p, insn, brw_null_reg());
1832 brw_set_src0(p, insn, brw_null_reg());
1833 brw_set_src1(p, insn, brw_null_reg());
1834
1835 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1836 brw_inst_set_exec_size(devinfo, insn, execute_size);
1837 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1838
1839 return insn;
1840 }
1841 }
1842
1843 /**
1844 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1845 * instruction here.
1846 *
1847 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1848 * nesting, since it can always just point to the end of the block/current loop.
1849 */
1850 static void
1851 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1852 {
1853 const struct gen_device_info *devinfo = p->devinfo;
1854 brw_inst *do_inst = get_inner_do_insn(p);
1855 brw_inst *inst;
1856 unsigned br = brw_jump_scale(devinfo);
1857
1858 assert(devinfo->gen < 6);
1859
1860 for (inst = while_inst - 1; inst != do_inst; inst--) {
1861 /* If the jump count is != 0, that means that this instruction has already
1862 * been patched because it's part of a loop inside of the one we're
1863 * patching.
1864 */
1865 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1866 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1867 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1868 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1869 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1870 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1871 }
1872 }
1873 }
1874
1875 brw_inst *
1876 brw_WHILE(struct brw_codegen *p)
1877 {
1878 const struct gen_device_info *devinfo = p->devinfo;
1879 brw_inst *insn, *do_insn;
1880 unsigned br = brw_jump_scale(devinfo);
1881
1882 if (devinfo->gen >= 6) {
1883 insn = next_insn(p, BRW_OPCODE_WHILE);
1884 do_insn = get_inner_do_insn(p);
1885
1886 if (devinfo->gen >= 8) {
1887 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1888 if (devinfo->gen < 12)
1889 brw_set_src0(p, insn, brw_imm_d(0));
1890 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1891 } else if (devinfo->gen == 7) {
1892 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1893 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1894 brw_set_src1(p, insn, brw_imm_w(0));
1895 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1896 } else {
1897 brw_set_dest(p, insn, brw_imm_w(0));
1898 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1899 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1900 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1901 }
1902
1903 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1904
1905 } else {
1906 if (p->single_program_flow) {
1907 insn = next_insn(p, BRW_OPCODE_ADD);
1908 do_insn = get_inner_do_insn(p);
1909
1910 brw_set_dest(p, insn, brw_ip_reg());
1911 brw_set_src0(p, insn, brw_ip_reg());
1912 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1913 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1914 } else {
1915 insn = next_insn(p, BRW_OPCODE_WHILE);
1916 do_insn = get_inner_do_insn(p);
1917
1918 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1919
1920 brw_set_dest(p, insn, brw_ip_reg());
1921 brw_set_src0(p, insn, brw_ip_reg());
1922 brw_set_src1(p, insn, brw_imm_d(0));
1923
1924 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1925 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1926 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1927
1928 brw_patch_break_cont(p, insn);
1929 }
1930 }
1931 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1932
1933 p->loop_stack_depth--;
1934
1935 return insn;
1936 }
1937
1938 /* FORWARD JUMPS:
1939 */
1940 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1941 {
1942 const struct gen_device_info *devinfo = p->devinfo;
1943 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1944 unsigned jmpi = 1;
1945
1946 if (devinfo->gen >= 5)
1947 jmpi = 2;
1948
1949 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1950 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1951
1952 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1953 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1954 }
1955
1956 /* To integrate with the above, it makes sense that the comparison
1957 * instruction should populate the flag register. It might be simpler
1958 * just to use the flag reg for most WM tasks?
1959 */
1960 void brw_CMP(struct brw_codegen *p,
1961 struct brw_reg dest,
1962 unsigned conditional,
1963 struct brw_reg src0,
1964 struct brw_reg src1)
1965 {
1966 const struct gen_device_info *devinfo = p->devinfo;
1967 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1968
1969 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1970 brw_set_dest(p, insn, dest);
1971 brw_set_src0(p, insn, src0);
1972 brw_set_src1(p, insn, src1);
1973
1974 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1975 * page says:
1976 * "Any CMP instruction with a null destination must use a {switch}."
1977 *
1978 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1979 * mentioned on their work-arounds pages.
1980 */
1981 if (devinfo->gen == 7) {
1982 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1983 dest.nr == BRW_ARF_NULL) {
1984 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1985 }
1986 }
1987 }
1988
1989 /***********************************************************************
1990 * Helpers for the various SEND message types:
1991 */
1992
1993 /** Extended math function, float[8].
1994 */
1995 void gen4_math(struct brw_codegen *p,
1996 struct brw_reg dest,
1997 unsigned function,
1998 unsigned msg_reg_nr,
1999 struct brw_reg src,
2000 unsigned precision )
2001 {
2002 const struct gen_device_info *devinfo = p->devinfo;
2003 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2004 unsigned data_type;
2005 if (has_scalar_region(src)) {
2006 data_type = BRW_MATH_DATA_SCALAR;
2007 } else {
2008 data_type = BRW_MATH_DATA_VECTOR;
2009 }
2010
2011 assert(devinfo->gen < 6);
2012
2013 /* Example code doesn't set predicate_control for send
2014 * instructions.
2015 */
2016 brw_inst_set_pred_control(devinfo, insn, 0);
2017 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2018
2019 brw_set_dest(p, insn, dest);
2020 brw_set_src0(p, insn, src);
2021 brw_set_math_message(p,
2022 insn,
2023 function,
2024 src.type == BRW_REGISTER_TYPE_D,
2025 precision,
2026 data_type);
2027 }
2028
2029 void gen6_math(struct brw_codegen *p,
2030 struct brw_reg dest,
2031 unsigned function,
2032 struct brw_reg src0,
2033 struct brw_reg src1)
2034 {
2035 const struct gen_device_info *devinfo = p->devinfo;
2036 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
2037
2038 assert(devinfo->gen >= 6);
2039
2040 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
2041 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
2042
2043 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
2044 if (devinfo->gen == 6) {
2045 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
2046 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
2047 }
2048
2049 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
2050 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
2051 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
2052 assert(src0.type != BRW_REGISTER_TYPE_F);
2053 assert(src1.type != BRW_REGISTER_TYPE_F);
2054 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
2055 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
2056 } else {
2057 assert(src0.type == BRW_REGISTER_TYPE_F ||
2058 (src0.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 9));
2059 assert(src1.type == BRW_REGISTER_TYPE_F ||
2060 (src1.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 9));
2061 }
2062
2063 /* Source modifiers are ignored for extended math instructions on Gen6. */
2064 if (devinfo->gen == 6) {
2065 assert(!src0.negate);
2066 assert(!src0.abs);
2067 assert(!src1.negate);
2068 assert(!src1.abs);
2069 }
2070
2071 brw_inst_set_math_function(devinfo, insn, function);
2072
2073 brw_set_dest(p, insn, dest);
2074 brw_set_src0(p, insn, src0);
2075 brw_set_src1(p, insn, src1);
2076 }
2077
2078 /**
2079 * Return the right surface index to access the thread scratch space using
2080 * stateless dataport messages.
2081 */
2082 unsigned
2083 brw_scratch_surface_idx(const struct brw_codegen *p)
2084 {
2085 /* The scratch space is thread-local so IA coherency is unnecessary. */
2086 if (p->devinfo->gen >= 8)
2087 return GEN8_BTI_STATELESS_NON_COHERENT;
2088 else
2089 return BRW_BTI_STATELESS;
2090 }
2091
2092 /**
2093 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
2094 * using a constant offset per channel.
2095 *
2096 * The offset must be aligned to oword size (16 bytes). Used for
2097 * register spilling.
2098 */
2099 void brw_oword_block_write_scratch(struct brw_codegen *p,
2100 struct brw_reg mrf,
2101 int num_regs,
2102 unsigned offset)
2103 {
2104 const struct gen_device_info *devinfo = p->devinfo;
2105 const unsigned target_cache =
2106 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2107 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2108 BRW_SFID_DATAPORT_WRITE);
2109 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2110 uint32_t msg_type;
2111
2112 if (devinfo->gen >= 6)
2113 offset /= 16;
2114
2115 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2116
2117 const unsigned mlen = 1 + num_regs;
2118
2119 /* Set up the message header. This is g0, with g0.2 filled with
2120 * the offset. We don't want to leave our offset around in g0 or
2121 * it'll screw up texture samples, so set it up inside the message
2122 * reg.
2123 */
2124 {
2125 brw_push_insn_state(p);
2126 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2127 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2128 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2129 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2130
2131 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2132
2133 /* set message header global offset field (reg 0, element 2) */
2134 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2135 brw_set_default_swsb(p, tgl_swsb_null());
2136 brw_MOV(p,
2137 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2138 mrf.nr,
2139 2), BRW_REGISTER_TYPE_UD),
2140 brw_imm_ud(offset));
2141
2142 brw_pop_insn_state(p);
2143 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2144 }
2145
2146 {
2147 struct brw_reg dest;
2148 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2149 int send_commit_msg;
2150 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2151 BRW_REGISTER_TYPE_UW);
2152
2153 brw_inst_set_sfid(devinfo, insn, target_cache);
2154 brw_inst_set_compression(devinfo, insn, false);
2155
2156 if (brw_inst_exec_size(devinfo, insn) >= 16)
2157 src_header = vec16(src_header);
2158
2159 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2160 if (devinfo->gen < 6)
2161 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2162
2163 /* Until gen6, writes followed by reads from the same location
2164 * are not guaranteed to be ordered unless write_commit is set.
2165 * If set, then a no-op write is issued to the destination
2166 * register to set a dependency, and a read from the destination
2167 * can be used to ensure the ordering.
2168 *
2169 * For gen6, only writes between different threads need ordering
2170 * protection. Our use of DP writes is all about register
2171 * spilling within a thread.
2172 */
2173 if (devinfo->gen >= 6) {
2174 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2175 send_commit_msg = 0;
2176 } else {
2177 dest = src_header;
2178 send_commit_msg = 1;
2179 }
2180
2181 brw_set_dest(p, insn, dest);
2182 if (devinfo->gen >= 6) {
2183 brw_set_src0(p, insn, mrf);
2184 } else {
2185 brw_set_src0(p, insn, brw_null_reg());
2186 }
2187
2188 if (devinfo->gen >= 6)
2189 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2190 else
2191 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2192
2193 brw_set_desc(p, insn,
2194 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
2195 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
2196 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2197 msg_type, 0, /* not a render target */
2198 send_commit_msg));
2199 }
2200 }
2201
2202
2203 /**
2204 * Read a block of owords (half a GRF each) from the scratch buffer
2205 * using a constant index per channel.
2206 *
2207 * Offset must be aligned to oword size (16 bytes). Used for register
2208 * spilling.
2209 */
2210 void
2211 brw_oword_block_read_scratch(struct brw_codegen *p,
2212 struct brw_reg dest,
2213 struct brw_reg mrf,
2214 int num_regs,
2215 unsigned offset)
2216 {
2217 const struct gen_device_info *devinfo = p->devinfo;
2218 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2219
2220 if (devinfo->gen >= 6)
2221 offset /= 16;
2222
2223 if (p->devinfo->gen >= 7) {
2224 /* On gen 7 and above, we no longer have message registers and we can
2225 * send from any register we want. By using the destination register
2226 * for the message, we guarantee that the implied message write won't
2227 * accidentally overwrite anything. This has been a problem because
2228 * the MRF registers and source for the final FB write are both fixed
2229 * and may overlap.
2230 */
2231 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2232 } else {
2233 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2234 }
2235 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2236
2237 const unsigned rlen = num_regs;
2238 const unsigned target_cache =
2239 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2240 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2241 BRW_SFID_DATAPORT_READ);
2242
2243 {
2244 brw_push_insn_state(p);
2245 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2246 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2247 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2248 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2249
2250 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2251
2252 /* set message header global offset field (reg 0, element 2) */
2253 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2254 brw_set_default_swsb(p, tgl_swsb_null());
2255 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2256
2257 brw_pop_insn_state(p);
2258 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2259 }
2260
2261 {
2262 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2263
2264 brw_inst_set_sfid(devinfo, insn, target_cache);
2265 assert(brw_inst_pred_control(devinfo, insn) == 0);
2266 brw_inst_set_compression(devinfo, insn, false);
2267
2268 brw_set_dest(p, insn, dest); /* UW? */
2269 if (devinfo->gen >= 6) {
2270 brw_set_src0(p, insn, mrf);
2271 } else {
2272 brw_set_src0(p, insn, brw_null_reg());
2273 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2274 }
2275
2276 brw_set_desc(p, insn,
2277 brw_message_desc(devinfo, 1, rlen, true) |
2278 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2279 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2280 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2281 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2282 }
2283 }
2284
2285 void
2286 gen7_block_read_scratch(struct brw_codegen *p,
2287 struct brw_reg dest,
2288 int num_regs,
2289 unsigned offset)
2290 {
2291 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2292 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2293
2294 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2295
2296 /* The HW requires that the header is present; this is to get the g0.5
2297 * scratch offset.
2298 */
2299 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2300
2301 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2302 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2303 * is 32 bytes, which happens to be the size of a register.
2304 */
2305 offset /= REG_SIZE;
2306 assert(offset < (1 << 12));
2307
2308 gen7_set_dp_scratch_message(p, insn,
2309 false, /* scratch read */
2310 false, /* OWords */
2311 false, /* invalidate after read */
2312 num_regs,
2313 offset,
2314 1, /* mlen: just g0 */
2315 num_regs, /* rlen */
2316 true); /* header present */
2317 }
2318
2319 /**
2320 * Read float[4] vectors from the data port constant cache.
2321 * Location (in buffer) should be a multiple of 16.
2322 * Used for fetching shader constants.
2323 */
2324 void brw_oword_block_read(struct brw_codegen *p,
2325 struct brw_reg dest,
2326 struct brw_reg mrf,
2327 uint32_t offset,
2328 uint32_t bind_table_index)
2329 {
2330 const struct gen_device_info *devinfo = p->devinfo;
2331 const unsigned target_cache =
2332 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2333 BRW_SFID_DATAPORT_READ);
2334 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2335 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2336
2337 /* On newer hardware, offset is in units of owords. */
2338 if (devinfo->gen >= 6)
2339 offset /= 16;
2340
2341 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2342
2343 brw_push_insn_state(p);
2344 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2345 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2346 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2347
2348 brw_push_insn_state(p);
2349 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2350 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2351 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2352
2353 /* set message header global offset field (reg 0, element 2) */
2354 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2355 brw_set_default_swsb(p, tgl_swsb_null());
2356 brw_MOV(p,
2357 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2358 mrf.nr,
2359 2), BRW_REGISTER_TYPE_UD),
2360 brw_imm_ud(offset));
2361 brw_pop_insn_state(p);
2362
2363 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2364
2365 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2366
2367 brw_inst_set_sfid(devinfo, insn, target_cache);
2368
2369 /* cast dest to a uword[8] vector */
2370 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2371
2372 brw_set_dest(p, insn, dest);
2373 if (devinfo->gen >= 6) {
2374 brw_set_src0(p, insn, mrf);
2375 } else {
2376 brw_set_src0(p, insn, brw_null_reg());
2377 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2378 }
2379
2380 brw_set_desc(p, insn,
2381 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2382 brw_dp_read_desc(devinfo, bind_table_index,
2383 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2384 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2385 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2386
2387 brw_pop_insn_state(p);
2388 }
2389
2390 brw_inst *
2391 brw_fb_WRITE(struct brw_codegen *p,
2392 struct brw_reg payload,
2393 struct brw_reg implied_header,
2394 unsigned msg_control,
2395 unsigned binding_table_index,
2396 unsigned msg_length,
2397 unsigned response_length,
2398 bool eot,
2399 bool last_render_target,
2400 bool header_present)
2401 {
2402 const struct gen_device_info *devinfo = p->devinfo;
2403 const unsigned target_cache =
2404 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2405 BRW_SFID_DATAPORT_WRITE);
2406 brw_inst *insn;
2407 unsigned msg_type;
2408 struct brw_reg dest, src0;
2409
2410 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2411 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2412 else
2413 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2414
2415 if (devinfo->gen >= 6) {
2416 insn = next_insn(p, BRW_OPCODE_SENDC);
2417 } else {
2418 insn = next_insn(p, BRW_OPCODE_SEND);
2419 }
2420 brw_inst_set_sfid(devinfo, insn, target_cache);
2421 brw_inst_set_compression(devinfo, insn, false);
2422
2423 if (devinfo->gen >= 6) {
2424 /* headerless version, just submit color payload */
2425 src0 = payload;
2426
2427 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2428 } else {
2429 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2430 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2431 src0 = implied_header;
2432
2433 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2434 }
2435
2436 brw_set_dest(p, insn, dest);
2437 brw_set_src0(p, insn, src0);
2438 brw_set_desc(p, insn,
2439 brw_message_desc(devinfo, msg_length, response_length,
2440 header_present) |
2441 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2442 msg_type, last_render_target,
2443 0 /* send_commit_msg */));
2444 brw_inst_set_eot(devinfo, insn, eot);
2445
2446 return insn;
2447 }
2448
2449 brw_inst *
2450 gen9_fb_READ(struct brw_codegen *p,
2451 struct brw_reg dst,
2452 struct brw_reg payload,
2453 unsigned binding_table_index,
2454 unsigned msg_length,
2455 unsigned response_length,
2456 bool per_sample)
2457 {
2458 const struct gen_device_info *devinfo = p->devinfo;
2459 assert(devinfo->gen >= 9);
2460 const unsigned msg_subtype =
2461 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2462 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2463
2464 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2465 brw_set_dest(p, insn, dst);
2466 brw_set_src0(p, insn, payload);
2467 brw_set_desc(
2468 p, insn,
2469 brw_message_desc(devinfo, msg_length, response_length, true) |
2470 brw_dp_read_desc(devinfo, binding_table_index,
2471 per_sample << 5 | msg_subtype,
2472 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2473 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2474 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2475
2476 return insn;
2477 }
2478
2479 /**
2480 * Texture sample instruction.
2481 * Note: the msg_type plus msg_length values determine exactly what kind
2482 * of sampling operation is performed. See volume 4, page 161 of docs.
2483 */
2484 void brw_SAMPLE(struct brw_codegen *p,
2485 struct brw_reg dest,
2486 unsigned msg_reg_nr,
2487 struct brw_reg src0,
2488 unsigned binding_table_index,
2489 unsigned sampler,
2490 unsigned msg_type,
2491 unsigned response_length,
2492 unsigned msg_length,
2493 unsigned header_present,
2494 unsigned simd_mode,
2495 unsigned return_format)
2496 {
2497 const struct gen_device_info *devinfo = p->devinfo;
2498 brw_inst *insn;
2499
2500 if (msg_reg_nr != -1)
2501 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2502
2503 insn = next_insn(p, BRW_OPCODE_SEND);
2504 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2505 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2506
2507 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2508 *
2509 * "Instruction compression is not allowed for this instruction (that
2510 * is, send). The hardware behavior is undefined if this instruction is
2511 * set as compressed. However, compress control can be set to "SecHalf"
2512 * to affect the EMask generation."
2513 *
2514 * No similar wording is found in later PRMs, but there are examples
2515 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2516 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2517 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2518 */
2519 brw_inst_set_compression(devinfo, insn, false);
2520
2521 if (devinfo->gen < 6)
2522 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2523
2524 brw_set_dest(p, insn, dest);
2525 brw_set_src0(p, insn, src0);
2526 brw_set_desc(p, insn,
2527 brw_message_desc(devinfo, msg_length, response_length,
2528 header_present) |
2529 brw_sampler_desc(devinfo, binding_table_index, sampler,
2530 msg_type, simd_mode, return_format));
2531 }
2532
2533 /* Adjust the message header's sampler state pointer to
2534 * select the correct group of 16 samplers.
2535 */
2536 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2537 struct brw_reg header,
2538 struct brw_reg sampler_index)
2539 {
2540 /* The "Sampler Index" field can only store values between 0 and 15.
2541 * However, we can add an offset to the "Sampler State Pointer"
2542 * field, effectively selecting a different set of 16 samplers.
2543 *
2544 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2545 * offset, and each sampler state is only 16-bytes, so we can't
2546 * exclusively use the offset - we have to use both.
2547 */
2548
2549 const struct gen_device_info *devinfo = p->devinfo;
2550
2551 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2552 const int sampler_state_size = 16; /* 16 bytes */
2553 uint32_t sampler = sampler_index.ud;
2554
2555 if (sampler >= 16) {
2556 assert(devinfo->is_haswell || devinfo->gen >= 8);
2557 brw_ADD(p,
2558 get_element_ud(header, 3),
2559 get_element_ud(brw_vec8_grf(0, 0), 3),
2560 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2561 }
2562 } else {
2563 /* Non-const sampler array indexing case */
2564 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2565 return;
2566 }
2567
2568 struct brw_reg temp = get_element_ud(header, 3);
2569
2570 brw_push_insn_state(p);
2571 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2572 brw_set_default_swsb(p, tgl_swsb_regdist(1));
2573 brw_SHL(p, temp, temp, brw_imm_ud(4));
2574 brw_ADD(p,
2575 get_element_ud(header, 3),
2576 get_element_ud(brw_vec8_grf(0, 0), 3),
2577 temp);
2578 brw_pop_insn_state(p);
2579 }
2580 }
2581
2582 /* All these variables are pretty confusing - we might be better off
2583 * using bitmasks and macros for this, in the old style. Or perhaps
2584 * just having the caller instantiate the fields in dword3 itself.
2585 */
2586 void brw_urb_WRITE(struct brw_codegen *p,
2587 struct brw_reg dest,
2588 unsigned msg_reg_nr,
2589 struct brw_reg src0,
2590 enum brw_urb_write_flags flags,
2591 unsigned msg_length,
2592 unsigned response_length,
2593 unsigned offset,
2594 unsigned swizzle)
2595 {
2596 const struct gen_device_info *devinfo = p->devinfo;
2597 brw_inst *insn;
2598
2599 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2600
2601 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2602 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2603 brw_push_insn_state(p);
2604 brw_set_default_access_mode(p, BRW_ALIGN_1);
2605 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2606 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2607 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2608 BRW_REGISTER_TYPE_UD),
2609 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2610 brw_imm_ud(0xff00));
2611 brw_pop_insn_state(p);
2612 }
2613
2614 insn = next_insn(p, BRW_OPCODE_SEND);
2615
2616 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2617
2618 brw_set_dest(p, insn, dest);
2619 brw_set_src0(p, insn, src0);
2620 brw_set_src1(p, insn, brw_imm_d(0));
2621
2622 if (devinfo->gen < 6)
2623 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2624
2625 brw_set_urb_message(p,
2626 insn,
2627 flags,
2628 msg_length,
2629 response_length,
2630 offset,
2631 swizzle);
2632 }
2633
2634 void
2635 brw_send_indirect_message(struct brw_codegen *p,
2636 unsigned sfid,
2637 struct brw_reg dst,
2638 struct brw_reg payload,
2639 struct brw_reg desc,
2640 unsigned desc_imm,
2641 bool eot)
2642 {
2643 const struct gen_device_info *devinfo = p->devinfo;
2644 struct brw_inst *send;
2645
2646 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2647
2648 assert(desc.type == BRW_REGISTER_TYPE_UD);
2649
2650 if (desc.file == BRW_IMMEDIATE_VALUE) {
2651 send = next_insn(p, BRW_OPCODE_SEND);
2652 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2653 brw_set_desc(p, send, desc.ud | desc_imm);
2654 } else {
2655 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2656 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2657
2658 brw_push_insn_state(p);
2659 brw_set_default_access_mode(p, BRW_ALIGN_1);
2660 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2661 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2662 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2663 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2664
2665 /* Load the indirect descriptor to an address register using OR so the
2666 * caller can specify additional descriptor bits with the desc_imm
2667 * immediate.
2668 */
2669 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2670
2671 brw_pop_insn_state(p);
2672
2673 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2674 send = next_insn(p, BRW_OPCODE_SEND);
2675 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2676
2677 if (devinfo->gen >= 12)
2678 brw_inst_set_send_sel_reg32_desc(devinfo, send, true);
2679 else
2680 brw_set_src1(p, send, addr);
2681 }
2682
2683 brw_set_dest(p, send, dst);
2684 brw_inst_set_sfid(devinfo, send, sfid);
2685 brw_inst_set_eot(devinfo, send, eot);
2686 }
2687
2688 void
2689 brw_send_indirect_split_message(struct brw_codegen *p,
2690 unsigned sfid,
2691 struct brw_reg dst,
2692 struct brw_reg payload0,
2693 struct brw_reg payload1,
2694 struct brw_reg desc,
2695 unsigned desc_imm,
2696 struct brw_reg ex_desc,
2697 unsigned ex_desc_imm,
2698 bool eot)
2699 {
2700 const struct gen_device_info *devinfo = p->devinfo;
2701 struct brw_inst *send;
2702
2703 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2704
2705 assert(desc.type == BRW_REGISTER_TYPE_UD);
2706
2707 if (desc.file == BRW_IMMEDIATE_VALUE) {
2708 desc.ud |= desc_imm;
2709 } else {
2710 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2711 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2712
2713 brw_push_insn_state(p);
2714 brw_set_default_access_mode(p, BRW_ALIGN_1);
2715 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2716 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2717 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2718 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2719
2720 /* Load the indirect descriptor to an address register using OR so the
2721 * caller can specify additional descriptor bits with the desc_imm
2722 * immediate.
2723 */
2724 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2725
2726 brw_pop_insn_state(p);
2727 desc = addr;
2728
2729 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2730 }
2731
2732 if (ex_desc.file == BRW_IMMEDIATE_VALUE &&
2733 (devinfo->gen >= 12 || (ex_desc.ud & INTEL_MASK(15, 12)) == 0)) {
2734 ex_desc.ud |= ex_desc_imm;
2735 } else {
2736 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2737 struct brw_reg addr = retype(brw_address_reg(2), BRW_REGISTER_TYPE_UD);
2738
2739 brw_push_insn_state(p);
2740 brw_set_default_access_mode(p, BRW_ALIGN_1);
2741 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2742 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2743 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2744 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2745
2746 /* Load the indirect extended descriptor to an address register using OR
2747 * so the caller can specify additional descriptor bits with the
2748 * desc_imm immediate.
2749 *
2750 * Even though the instruction dispatcher always pulls the SFID and EOT
2751 * fields from the instruction itself, actual external unit which
2752 * processes the message gets the SFID and EOT from the extended
2753 * descriptor which comes from the address register. If we don't OR
2754 * those two bits in, the external unit may get confused and hang.
2755 */
2756 unsigned imm_part = ex_desc_imm | sfid | eot << 5;
2757
2758 if (ex_desc.file == BRW_IMMEDIATE_VALUE) {
2759 /* ex_desc bits 15:12 don't exist in the instruction encoding prior
2760 * to Gen12, so we may have fallen back to an indirect extended
2761 * descriptor.
2762 */
2763 brw_MOV(p, addr, brw_imm_ud(ex_desc.ud | imm_part));
2764 } else {
2765 brw_OR(p, addr, ex_desc, brw_imm_ud(imm_part));
2766 }
2767
2768 brw_pop_insn_state(p);
2769 ex_desc = addr;
2770
2771 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2772 }
2773
2774 send = next_insn(p, devinfo->gen >= 12 ? BRW_OPCODE_SEND : BRW_OPCODE_SENDS);
2775 brw_set_dest(p, send, dst);
2776 brw_set_src0(p, send, retype(payload0, BRW_REGISTER_TYPE_UD));
2777 brw_set_src1(p, send, retype(payload1, BRW_REGISTER_TYPE_UD));
2778
2779 if (desc.file == BRW_IMMEDIATE_VALUE) {
2780 brw_inst_set_send_sel_reg32_desc(devinfo, send, 0);
2781 brw_inst_set_send_desc(devinfo, send, desc.ud);
2782 } else {
2783 assert(desc.file == BRW_ARCHITECTURE_REGISTER_FILE);
2784 assert(desc.nr == BRW_ARF_ADDRESS);
2785 assert(desc.subnr == 0);
2786 brw_inst_set_send_sel_reg32_desc(devinfo, send, 1);
2787 }
2788
2789 if (ex_desc.file == BRW_IMMEDIATE_VALUE) {
2790 brw_inst_set_send_sel_reg32_ex_desc(devinfo, send, 0);
2791 brw_inst_set_sends_ex_desc(devinfo, send, ex_desc.ud);
2792 } else {
2793 assert(ex_desc.file == BRW_ARCHITECTURE_REGISTER_FILE);
2794 assert(ex_desc.nr == BRW_ARF_ADDRESS);
2795 assert((ex_desc.subnr & 0x3) == 0);
2796 brw_inst_set_send_sel_reg32_ex_desc(devinfo, send, 1);
2797 brw_inst_set_send_ex_desc_ia_subreg_nr(devinfo, send, ex_desc.subnr >> 2);
2798 }
2799
2800 brw_inst_set_sfid(devinfo, send, sfid);
2801 brw_inst_set_eot(devinfo, send, eot);
2802 }
2803
2804 static void
2805 brw_send_indirect_surface_message(struct brw_codegen *p,
2806 unsigned sfid,
2807 struct brw_reg dst,
2808 struct brw_reg payload,
2809 struct brw_reg surface,
2810 unsigned desc_imm)
2811 {
2812 if (surface.file != BRW_IMMEDIATE_VALUE) {
2813 const struct tgl_swsb swsb = brw_get_default_swsb(p);
2814 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2815
2816 brw_push_insn_state(p);
2817 brw_set_default_access_mode(p, BRW_ALIGN_1);
2818 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2819 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2820 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2821 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
2822
2823 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2824 * some surface array is accessed out of bounds.
2825 */
2826 brw_AND(p, addr,
2827 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2828 BRW_GET_SWZ(surface.swizzle, 0)),
2829 brw_imm_ud(0xff));
2830
2831 brw_pop_insn_state(p);
2832
2833 surface = addr;
2834 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
2835 }
2836
2837 brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm, false);
2838 }
2839
2840 static bool
2841 while_jumps_before_offset(const struct gen_device_info *devinfo,
2842 brw_inst *insn, int while_offset, int start_offset)
2843 {
2844 int scale = 16 / brw_jump_scale(devinfo);
2845 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2846 : brw_inst_jip(devinfo, insn);
2847 assert(jip < 0);
2848 return while_offset + jip * scale <= start_offset;
2849 }
2850
2851
2852 static int
2853 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2854 {
2855 int offset;
2856 void *store = p->store;
2857 const struct gen_device_info *devinfo = p->devinfo;
2858
2859 int depth = 0;
2860
2861 for (offset = next_offset(devinfo, store, start_offset);
2862 offset < p->next_insn_offset;
2863 offset = next_offset(devinfo, store, offset)) {
2864 brw_inst *insn = store + offset;
2865
2866 switch (brw_inst_opcode(devinfo, insn)) {
2867 case BRW_OPCODE_IF:
2868 depth++;
2869 break;
2870 case BRW_OPCODE_ENDIF:
2871 if (depth == 0)
2872 return offset;
2873 depth--;
2874 break;
2875 case BRW_OPCODE_WHILE:
2876 /* If the while doesn't jump before our instruction, it's the end
2877 * of a sibling do...while loop. Ignore it.
2878 */
2879 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2880 continue;
2881 /* fallthrough */
2882 case BRW_OPCODE_ELSE:
2883 case BRW_OPCODE_HALT:
2884 if (depth == 0)
2885 return offset;
2886 default:
2887 break;
2888 }
2889 }
2890
2891 return 0;
2892 }
2893
2894 /* There is no DO instruction on gen6, so to find the end of the loop
2895 * we have to see if the loop is jumping back before our start
2896 * instruction.
2897 */
2898 static int
2899 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2900 {
2901 const struct gen_device_info *devinfo = p->devinfo;
2902 int offset;
2903 void *store = p->store;
2904
2905 assert(devinfo->gen >= 6);
2906
2907 /* Always start after the instruction (such as a WHILE) we're trying to fix
2908 * up.
2909 */
2910 for (offset = next_offset(devinfo, store, start_offset);
2911 offset < p->next_insn_offset;
2912 offset = next_offset(devinfo, store, offset)) {
2913 brw_inst *insn = store + offset;
2914
2915 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2916 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2917 return offset;
2918 }
2919 }
2920 assert(!"not reached");
2921 return start_offset;
2922 }
2923
2924 /* After program generation, go back and update the UIP and JIP of
2925 * BREAK, CONT, and HALT instructions to their correct locations.
2926 */
2927 void
2928 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2929 {
2930 const struct gen_device_info *devinfo = p->devinfo;
2931 int offset;
2932 int br = brw_jump_scale(devinfo);
2933 int scale = 16 / br;
2934 void *store = p->store;
2935
2936 if (devinfo->gen < 6)
2937 return;
2938
2939 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2940 brw_inst *insn = store + offset;
2941 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2942
2943 int block_end_offset = brw_find_next_block_end(p, offset);
2944 switch (brw_inst_opcode(devinfo, insn)) {
2945 case BRW_OPCODE_BREAK:
2946 assert(block_end_offset != 0);
2947 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2948 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2949 brw_inst_set_uip(devinfo, insn,
2950 (brw_find_loop_end(p, offset) - offset +
2951 (devinfo->gen == 6 ? 16 : 0)) / scale);
2952 break;
2953 case BRW_OPCODE_CONTINUE:
2954 assert(block_end_offset != 0);
2955 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2956 brw_inst_set_uip(devinfo, insn,
2957 (brw_find_loop_end(p, offset) - offset) / scale);
2958
2959 assert(brw_inst_uip(devinfo, insn) != 0);
2960 assert(brw_inst_jip(devinfo, insn) != 0);
2961 break;
2962
2963 case BRW_OPCODE_ENDIF: {
2964 int32_t jump = (block_end_offset == 0) ?
2965 1 * br : (block_end_offset - offset) / scale;
2966 if (devinfo->gen >= 7)
2967 brw_inst_set_jip(devinfo, insn, jump);
2968 else
2969 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2970 break;
2971 }
2972
2973 case BRW_OPCODE_HALT:
2974 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2975 *
2976 * "In case of the halt instruction not inside any conditional
2977 * code block, the value of <JIP> and <UIP> should be the
2978 * same. In case of the halt instruction inside conditional code
2979 * block, the <UIP> should be the end of the program, and the
2980 * <JIP> should be end of the most inner conditional code block."
2981 *
2982 * The uip will have already been set by whoever set up the
2983 * instruction.
2984 */
2985 if (block_end_offset == 0) {
2986 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2987 } else {
2988 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2989 }
2990 assert(brw_inst_uip(devinfo, insn) != 0);
2991 assert(brw_inst_jip(devinfo, insn) != 0);
2992 break;
2993
2994 default:
2995 break;
2996 }
2997 }
2998 }
2999
3000 void brw_ff_sync(struct brw_codegen *p,
3001 struct brw_reg dest,
3002 unsigned msg_reg_nr,
3003 struct brw_reg src0,
3004 bool allocate,
3005 unsigned response_length,
3006 bool eot)
3007 {
3008 const struct gen_device_info *devinfo = p->devinfo;
3009 brw_inst *insn;
3010
3011 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
3012
3013 insn = next_insn(p, BRW_OPCODE_SEND);
3014 brw_set_dest(p, insn, dest);
3015 brw_set_src0(p, insn, src0);
3016 brw_set_src1(p, insn, brw_imm_d(0));
3017
3018 if (devinfo->gen < 6)
3019 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
3020
3021 brw_set_ff_sync_message(p,
3022 insn,
3023 allocate,
3024 response_length,
3025 eot);
3026 }
3027
3028 /**
3029 * Emit the SEND instruction necessary to generate stream output data on Gen6
3030 * (for transform feedback).
3031 *
3032 * If send_commit_msg is true, this is the last piece of stream output data
3033 * from this thread, so send the data as a committed write. According to the
3034 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
3035 *
3036 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
3037 * writes are complete by sending the final write as a committed write."
3038 */
3039 void
3040 brw_svb_write(struct brw_codegen *p,
3041 struct brw_reg dest,
3042 unsigned msg_reg_nr,
3043 struct brw_reg src0,
3044 unsigned binding_table_index,
3045 bool send_commit_msg)
3046 {
3047 const struct gen_device_info *devinfo = p->devinfo;
3048 const unsigned target_cache =
3049 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
3050 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
3051 BRW_SFID_DATAPORT_WRITE);
3052 brw_inst *insn;
3053
3054 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
3055
3056 insn = next_insn(p, BRW_OPCODE_SEND);
3057 brw_inst_set_sfid(devinfo, insn, target_cache);
3058 brw_set_dest(p, insn, dest);
3059 brw_set_src0(p, insn, src0);
3060 brw_set_desc(p, insn,
3061 brw_message_desc(devinfo, 1, send_commit_msg, true) |
3062 brw_dp_write_desc(devinfo, binding_table_index,
3063 0, /* msg_control: ignored */
3064 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
3065 0, /* last_render_target: ignored */
3066 send_commit_msg)); /* send_commit_msg */
3067 }
3068
3069 static unsigned
3070 brw_surface_payload_size(struct brw_codegen *p,
3071 unsigned num_channels,
3072 unsigned exec_size /**< 0 for SIMD4x2 */)
3073 {
3074 if (exec_size == 0)
3075 return 1; /* SIMD4x2 */
3076 else if (exec_size <= 8)
3077 return num_channels;
3078 else
3079 return 2 * num_channels;
3080 }
3081
3082 void
3083 brw_untyped_atomic(struct brw_codegen *p,
3084 struct brw_reg dst,
3085 struct brw_reg payload,
3086 struct brw_reg surface,
3087 unsigned atomic_op,
3088 unsigned msg_length,
3089 bool response_expected,
3090 bool header_present)
3091 {
3092 const struct gen_device_info *devinfo = p->devinfo;
3093 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3094 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3095 GEN7_SFID_DATAPORT_DATA_CACHE);
3096 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3097 /* SIMD4x2 untyped atomic instructions only exist on HSW+ */
3098 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
3099 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
3100 has_simd4x2 ? 0 : 8;
3101 const unsigned response_length =
3102 brw_surface_payload_size(p, response_expected, exec_size);
3103 const unsigned desc =
3104 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3105 brw_dp_untyped_atomic_desc(devinfo, exec_size, atomic_op,
3106 response_expected);
3107 /* Mask out unused components -- This is especially important in Align16
3108 * mode on generations that don't have native support for SIMD4x2 atomics,
3109 * because unused but enabled components will cause the dataport to perform
3110 * additional atomic operations on the addresses that happen to be in the
3111 * uninitialized Y, Z and W coordinates of the payload.
3112 */
3113 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3114
3115 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
3116 payload, surface, desc);
3117 }
3118
3119 void
3120 brw_untyped_surface_read(struct brw_codegen *p,
3121 struct brw_reg dst,
3122 struct brw_reg payload,
3123 struct brw_reg surface,
3124 unsigned msg_length,
3125 unsigned num_channels)
3126 {
3127 const struct gen_device_info *devinfo = p->devinfo;
3128 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3129 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3130 GEN7_SFID_DATAPORT_DATA_CACHE);
3131 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3132 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) : 0;
3133 const unsigned response_length =
3134 brw_surface_payload_size(p, num_channels, exec_size);
3135 const unsigned desc =
3136 brw_message_desc(devinfo, msg_length, response_length, false) |
3137 brw_dp_untyped_surface_rw_desc(devinfo, exec_size, num_channels, false);
3138
3139 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
3140 }
3141
3142 void
3143 brw_untyped_surface_write(struct brw_codegen *p,
3144 struct brw_reg payload,
3145 struct brw_reg surface,
3146 unsigned msg_length,
3147 unsigned num_channels,
3148 bool header_present)
3149 {
3150 const struct gen_device_info *devinfo = p->devinfo;
3151 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3152 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3153 GEN7_SFID_DATAPORT_DATA_CACHE);
3154 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3155 /* SIMD4x2 untyped surface write instructions only exist on HSW+ */
3156 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
3157 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
3158 has_simd4x2 ? 0 : 8;
3159 const unsigned desc =
3160 brw_message_desc(devinfo, msg_length, 0, header_present) |
3161 brw_dp_untyped_surface_rw_desc(devinfo, exec_size, num_channels, true);
3162 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3163 const unsigned mask = !has_simd4x2 && !align1 ? WRITEMASK_X : WRITEMASK_XYZW;
3164
3165 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
3166 payload, surface, desc);
3167 }
3168
3169 static void
3170 brw_set_memory_fence_message(struct brw_codegen *p,
3171 struct brw_inst *insn,
3172 enum brw_message_target sfid,
3173 bool commit_enable,
3174 unsigned bti)
3175 {
3176 const struct gen_device_info *devinfo = p->devinfo;
3177
3178 brw_set_desc(p, insn, brw_message_desc(
3179 devinfo, 1, (commit_enable ? 1 : 0), true));
3180
3181 brw_inst_set_sfid(devinfo, insn, sfid);
3182
3183 switch (sfid) {
3184 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3185 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3186 break;
3187 case GEN7_SFID_DATAPORT_DATA_CACHE:
3188 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3189 break;
3190 default:
3191 unreachable("Not reached");
3192 }
3193
3194 if (commit_enable)
3195 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3196
3197 assert(devinfo->gen >= 11 || bti == 0);
3198 brw_inst_set_binding_table_index(devinfo, insn, bti);
3199 }
3200
3201 void
3202 brw_memory_fence(struct brw_codegen *p,
3203 struct brw_reg dst,
3204 struct brw_reg src,
3205 enum opcode send_op,
3206 enum brw_message_target sfid,
3207 bool commit_enable,
3208 unsigned bti)
3209 {
3210 const struct gen_device_info *devinfo = p->devinfo;
3211
3212 dst = retype(vec1(dst), BRW_REGISTER_TYPE_UW);
3213 src = retype(vec1(src), BRW_REGISTER_TYPE_UD);
3214
3215 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3216 * message doesn't write anything back.
3217 */
3218 struct brw_inst *insn = next_insn(p, send_op);
3219 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3220 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3221 brw_set_dest(p, insn, dst);
3222 brw_set_src0(p, insn, src);
3223 brw_set_memory_fence_message(p, insn, sfid, commit_enable, bti);
3224 }
3225
3226 void
3227 brw_pixel_interpolator_query(struct brw_codegen *p,
3228 struct brw_reg dest,
3229 struct brw_reg mrf,
3230 bool noperspective,
3231 unsigned mode,
3232 struct brw_reg data,
3233 unsigned msg_length,
3234 unsigned response_length)
3235 {
3236 const struct gen_device_info *devinfo = p->devinfo;
3237 const uint16_t exec_size = brw_get_default_exec_size(p);
3238 const unsigned slot_group = brw_get_default_group(p) / 16;
3239 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3240 const unsigned desc =
3241 brw_message_desc(devinfo, msg_length, response_length, false) |
3242 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3243 slot_group);
3244
3245 /* brw_send_indirect_message will automatically use a direct send message
3246 * if data is actually immediate.
3247 */
3248 brw_send_indirect_message(p,
3249 GEN7_SFID_PIXEL_INTERPOLATOR,
3250 dest,
3251 mrf,
3252 vec1(data),
3253 desc,
3254 false);
3255 }
3256
3257 void
3258 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3259 struct brw_reg mask)
3260 {
3261 const struct gen_device_info *devinfo = p->devinfo;
3262 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3263 const unsigned qtr_control = brw_get_default_group(p) / 8;
3264 brw_inst *inst;
3265
3266 assert(devinfo->gen >= 7);
3267 assert(mask.type == BRW_REGISTER_TYPE_UD);
3268
3269 brw_push_insn_state(p);
3270
3271 /* The flag register is only used on Gen7 in align1 mode, so avoid setting
3272 * unnecessary bits in the instruction words, get the information we need
3273 * and reset the default flag register. This allows more instructions to be
3274 * compacted.
3275 */
3276 const unsigned flag_subreg = p->current->flag_subreg;
3277 brw_set_default_flag_reg(p, 0, 0);
3278
3279 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3280 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3281
3282 if (devinfo->gen >= 8) {
3283 /* Getting the first active channel index is easy on Gen8: Just find
3284 * the first bit set in the execution mask. The register exists on
3285 * HSW already but it reads back as all ones when the current
3286 * instruction has execution masking disabled, so it's kind of
3287 * useless.
3288 */
3289 struct brw_reg exec_mask =
3290 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3291
3292 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3293 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3294 /* Unfortunately, ce0 does not take into account the thread
3295 * dispatch mask, which may be a problem in cases where it's not
3296 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3297 * some n). Combine ce0 with the given dispatch (or vector) mask
3298 * to mask off those channels which were never dispatched by the
3299 * hardware.
3300 */
3301 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3302 brw_set_default_swsb(p, tgl_swsb_regdist(1));
3303 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3304 exec_mask = vec1(dst);
3305 }
3306
3307 /* Quarter control has the effect of magically shifting the value of
3308 * ce0 so you'll get the first active channel relative to the
3309 * specified quarter control as result.
3310 */
3311 inst = brw_FBL(p, vec1(dst), exec_mask);
3312 } else {
3313 const struct brw_reg flag = brw_flag_subreg(flag_subreg);
3314
3315 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3316 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3317
3318 /* Run enough instructions returning zero with execution masking and
3319 * a conditional modifier enabled in order to get the full execution
3320 * mask in f1.0. We could use a single 32-wide move here if it
3321 * weren't because of the hardware bug that causes channel enables to
3322 * be applied incorrectly to the second half of 32-wide instructions
3323 * on Gen7.
3324 */
3325 const unsigned lower_size = MIN2(16, exec_size);
3326 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3327 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3328 brw_imm_uw(0));
3329 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3330 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3331 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3332 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3333 brw_inst_set_flag_reg_nr(devinfo, inst, flag_subreg / 2);
3334 brw_inst_set_flag_subreg_nr(devinfo, inst, flag_subreg % 2);
3335 }
3336
3337 /* Find the first bit set in the exec_size-wide portion of the flag
3338 * register that was updated by the last sequence of MOV
3339 * instructions.
3340 */
3341 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3342 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3343 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3344 }
3345 } else {
3346 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3347
3348 if (devinfo->gen >= 8 &&
3349 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3350 /* In SIMD4x2 mode the first active channel index is just the
3351 * negation of the first bit of the mask register. Note that ce0
3352 * doesn't take into account the dispatch mask, so the Gen7 path
3353 * should be used instead unless you have the guarantee that the
3354 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3355 * for some n).
3356 */
3357 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3358 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3359 brw_imm_ud(1));
3360
3361 } else {
3362 /* Overwrite the destination without and with execution masking to
3363 * find out which of the channels is active.
3364 */
3365 brw_push_insn_state(p);
3366 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3367 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3368 brw_imm_ud(1));
3369
3370 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3371 brw_imm_ud(0));
3372 brw_pop_insn_state(p);
3373 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3374 }
3375 }
3376
3377 brw_pop_insn_state(p);
3378 }
3379
3380 void
3381 brw_broadcast(struct brw_codegen *p,
3382 struct brw_reg dst,
3383 struct brw_reg src,
3384 struct brw_reg idx)
3385 {
3386 const struct gen_device_info *devinfo = p->devinfo;
3387 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3388 brw_inst *inst;
3389
3390 brw_push_insn_state(p);
3391 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3392 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3393
3394 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3395 src.address_mode == BRW_ADDRESS_DIRECT);
3396 assert(!src.abs && !src.negate);
3397 assert(src.type == dst.type);
3398
3399 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3400 idx.file == BRW_IMMEDIATE_VALUE) {
3401 /* Trivial, the source is already uniform or the index is a constant.
3402 * We will typically not get here if the optimizer is doing its job, but
3403 * asserting would be mean.
3404 */
3405 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3406 src = align1 ? stride(suboffset(src, i), 0, 1, 0) :
3407 stride(suboffset(src, 4 * i), 0, 4, 1);
3408
3409 if (type_sz(src.type) > 4 && !devinfo->has_64bit_float) {
3410 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3411 subscript(src, BRW_REGISTER_TYPE_D, 0));
3412 brw_set_default_swsb(p, tgl_swsb_null());
3413 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3414 subscript(src, BRW_REGISTER_TYPE_D, 1));
3415 } else {
3416 brw_MOV(p, dst, src);
3417 }
3418 } else {
3419 /* From the Haswell PRM section "Register Region Restrictions":
3420 *
3421 * "The lower bits of the AddressImmediate must not overflow to
3422 * change the register address. The lower 5 bits of Address
3423 * Immediate when added to lower 5 bits of address register gives
3424 * the sub-register offset. The upper bits of Address Immediate
3425 * when added to upper bits of address register gives the register
3426 * address. Any overflow from sub-register offset is dropped."
3427 *
3428 * Fortunately, for broadcast, we never have a sub-register offset so
3429 * this isn't an issue.
3430 */
3431 assert(src.subnr == 0);
3432
3433 if (align1) {
3434 const struct brw_reg addr =
3435 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3436 unsigned offset = src.nr * REG_SIZE + src.subnr;
3437 /* Limit in bytes of the signed indirect addressing immediate. */
3438 const unsigned limit = 512;
3439
3440 brw_push_insn_state(p);
3441 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3442 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3443
3444 /* Take into account the component size and horizontal stride. */
3445 assert(src.vstride == src.hstride + src.width);
3446 brw_SHL(p, addr, vec1(idx),
3447 brw_imm_ud(util_logbase2(type_sz(src.type)) +
3448 src.hstride - 1));
3449
3450 /* We can only address up to limit bytes using the indirect
3451 * addressing immediate, account for the difference if the source
3452 * register is above this limit.
3453 */
3454 if (offset >= limit) {
3455 brw_set_default_swsb(p, tgl_swsb_regdist(1));
3456 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3457 offset = offset % limit;
3458 }
3459
3460 brw_pop_insn_state(p);
3461
3462 brw_set_default_swsb(p, tgl_swsb_regdist(1));
3463
3464 /* Use indirect addressing to fetch the specified component. */
3465 if (type_sz(src.type) > 4 &&
3466 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo) ||
3467 !devinfo->has_64bit_float)) {
3468 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3469 *
3470 * "When source or destination datatype is 64b or operation is
3471 * integer DWord multiply, indirect addressing must not be
3472 * used."
3473 *
3474 * To work around both of this issue, we do two integer MOVs
3475 * insead of one 64-bit MOV. Because no double value should ever
3476 * cross a register boundary, it's safe to use the immediate
3477 * offset in the indirect here to handle adding 4 bytes to the
3478 * offset and avoid the extra ADD to the register file.
3479 */
3480 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3481 retype(brw_vec1_indirect(addr.subnr, offset),
3482 BRW_REGISTER_TYPE_D));
3483 brw_set_default_swsb(p, tgl_swsb_null());
3484 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3485 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3486 BRW_REGISTER_TYPE_D));
3487 } else {
3488 brw_MOV(p, dst,
3489 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3490 }
3491 } else {
3492 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3493 * to all bits of a flag register,
3494 */
3495 inst = brw_MOV(p,
3496 brw_null_reg(),
3497 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3498 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3499 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3500 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3501
3502 /* and use predicated SEL to pick the right channel. */
3503 inst = brw_SEL(p, dst,
3504 stride(suboffset(src, 4), 4, 4, 1),
3505 stride(src, 4, 4, 1));
3506 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3507 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3508 }
3509 }
3510
3511 brw_pop_insn_state(p);
3512 }
3513
3514 /**
3515 * This instruction is generated as a single-channel align1 instruction by
3516 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3517 *
3518 * We can't use the typed atomic op in the FS because that has the execution
3519 * mask ANDed with the pixel mask, but we just want to write the one dword for
3520 * all the pixels.
3521 *
3522 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3523 * one u32. So we use the same untyped atomic write message as the pixel
3524 * shader.
3525 *
3526 * The untyped atomic operation requires a BUFFER surface type with RAW
3527 * format, and is only accessible through the legacy DATA_CACHE dataport
3528 * messages.
3529 */
3530 void brw_shader_time_add(struct brw_codegen *p,
3531 struct brw_reg payload,
3532 uint32_t surf_index)
3533 {
3534 const struct gen_device_info *devinfo = p->devinfo;
3535 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3536 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3537 GEN7_SFID_DATAPORT_DATA_CACHE);
3538 assert(devinfo->gen >= 7);
3539
3540 brw_push_insn_state(p);
3541 brw_set_default_access_mode(p, BRW_ALIGN_1);
3542 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3543 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3544 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3545
3546 /* We use brw_vec1_reg and unmasked because we want to increment the given
3547 * offset only once.
3548 */
3549 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3550 BRW_ARF_NULL, 0));
3551 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3552 payload.nr, 0));
3553 brw_set_desc(p, send, (brw_message_desc(devinfo, 2, 0, false) |
3554 brw_dp_untyped_atomic_desc(devinfo, 1, BRW_AOP_ADD,
3555 false)));
3556
3557 brw_inst_set_sfid(devinfo, send, sfid);
3558 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3559
3560 brw_pop_insn_state(p);
3561 }
3562
3563
3564 /**
3565 * Emit the SEND message for a barrier
3566 */
3567 void
3568 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3569 {
3570 const struct gen_device_info *devinfo = p->devinfo;
3571 struct brw_inst *inst;
3572
3573 assert(devinfo->gen >= 7);
3574
3575 brw_push_insn_state(p);
3576 brw_set_default_access_mode(p, BRW_ALIGN_1);
3577 inst = next_insn(p, BRW_OPCODE_SEND);
3578 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3579 brw_set_src0(p, inst, src);
3580 brw_set_src1(p, inst, brw_null_reg());
3581 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3582
3583 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3584 brw_inst_set_gateway_subfuncid(devinfo, inst,
3585 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3586
3587 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3588 brw_pop_insn_state(p);
3589 }
3590
3591
3592 /**
3593 * Emit the wait instruction for a barrier
3594 */
3595 void
3596 brw_WAIT(struct brw_codegen *p)
3597 {
3598 const struct gen_device_info *devinfo = p->devinfo;
3599 struct brw_inst *insn;
3600
3601 struct brw_reg src = brw_notification_reg();
3602
3603 insn = next_insn(p, BRW_OPCODE_WAIT);
3604 brw_set_dest(p, insn, src);
3605 brw_set_src0(p, insn, src);
3606 brw_set_src1(p, insn, brw_null_reg());
3607
3608 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3609 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3610 }
3611
3612 void
3613 brw_float_controls_mode(struct brw_codegen *p,
3614 unsigned mode, unsigned mask)
3615 {
3616 /* From the Skylake PRM, Volume 7, page 760:
3617 * "Implementation Restriction on Register Access: When the control
3618 * register is used as an explicit source and/or destination, hardware
3619 * does not ensure execution pipeline coherency. Software must set the
3620 * thread control field to ‘switch’ for an instruction that uses
3621 * control register as an explicit operand."
3622 *
3623 * On Gen12+ this is implemented in terms of SWSB annotations instead.
3624 */
3625 brw_set_default_swsb(p, tgl_swsb_regdist(1));
3626
3627 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3628 brw_imm_ud(~mask));
3629 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3630 if (p->devinfo->gen < 12)
3631 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3632
3633 if (mode) {
3634 brw_inst *inst_or = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3635 brw_imm_ud(mode));
3636 brw_inst_set_exec_size(p->devinfo, inst_or, BRW_EXECUTE_1);
3637 if (p->devinfo->gen < 12)
3638 brw_inst_set_thread_control(p->devinfo, inst_or, BRW_THREAD_SWITCH);
3639 }
3640
3641 if (p->devinfo->gen >= 12)
3642 brw_SYNC(p, TGL_SYNC_NOP);
3643 }
3644
3645 void
3646 brw_update_reloc_imm(const struct gen_device_info *devinfo,
3647 brw_inst *inst,
3648 uint32_t value)
3649 {
3650 /* Sanity check that the instruction is a MOV of an immediate */
3651 assert(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_MOV);
3652 assert(brw_inst_src0_reg_file(devinfo, inst) == BRW_IMMEDIATE_VALUE);
3653
3654 /* If it was compacted, we can't safely rewrite */
3655 assert(brw_inst_cmpt_control(devinfo, inst) == 0);
3656
3657 brw_inst_set_imm_ud(devinfo, inst, value);
3658 }
3659
3660 /* A default value for constants that will be patched at run-time.
3661 * We pick an arbitrary value that prevents instruction compaction.
3662 */
3663 #define DEFAULT_PATCH_IMM 0x4a7cc037
3664
3665 void
3666 brw_MOV_reloc_imm(struct brw_codegen *p,
3667 struct brw_reg dst,
3668 enum brw_reg_type src_type,
3669 uint32_t id)
3670 {
3671 assert(type_sz(src_type) == 4);
3672 assert(type_sz(dst.type) == 4);
3673
3674 if (p->num_relocs + 1 > p->reloc_array_size) {
3675 p->reloc_array_size = MAX2(16, p->reloc_array_size * 2);
3676 p->relocs = reralloc(p->mem_ctx, p->relocs,
3677 struct brw_shader_reloc, p->reloc_array_size);
3678 }
3679
3680 p->relocs[p->num_relocs++] = (struct brw_shader_reloc) {
3681 .id = id,
3682 .offset = p->next_insn_offset,
3683 };
3684
3685 brw_MOV(p, dst, retype(brw_imm_ud(DEFAULT_PATCH_IMM), src_type));
3686 }