intel/eu: Set flag [sub]register number differently for 3src
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Set the Message Descriptor and Extended Message Descriptor fields
364 * for SEND messages.
365 *
366 * \note This zeroes out the Function Control bits, so it must be called
367 * \b before filling out any message-specific data. Callers can
368 * choose not to fill in irrelevant bits; they will be zero.
369 */
370 void
371 brw_set_message_descriptor(struct brw_codegen *p,
372 brw_inst *inst,
373 enum brw_message_target sfid,
374 unsigned msg_length,
375 unsigned response_length,
376 bool header_present,
377 bool end_of_thread)
378 {
379 const struct gen_device_info *devinfo = p->devinfo;
380
381 brw_set_src1(p, inst, brw_imm_d(0));
382
383 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
384 * itself; instead, it will be a MOV/OR into the address register.
385 *
386 * In this case, we avoid setting the extended message descriptor bits,
387 * since they go on the later SEND/SENDC instead and if set here would
388 * instead clobber the conditionalmod bits.
389 */
390 unsigned opcode = brw_inst_opcode(devinfo, inst);
391 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
392 brw_inst_set_sfid(devinfo, inst, sfid);
393 }
394
395 brw_inst_set_mlen(devinfo, inst, msg_length);
396 brw_inst_set_rlen(devinfo, inst, response_length);
397 brw_inst_set_eot(devinfo, inst, end_of_thread);
398
399 if (devinfo->gen >= 5) {
400 brw_inst_set_header_present(devinfo, inst, header_present);
401 }
402 }
403
404 static void brw_set_math_message( struct brw_codegen *p,
405 brw_inst *inst,
406 unsigned function,
407 unsigned integer_type,
408 bool low_precision,
409 unsigned dataType )
410 {
411 const struct gen_device_info *devinfo = p->devinfo;
412 unsigned msg_length;
413 unsigned response_length;
414
415 /* Infer message length from the function */
416 switch (function) {
417 case BRW_MATH_FUNCTION_POW:
418 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
419 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
420 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
421 msg_length = 2;
422 break;
423 default:
424 msg_length = 1;
425 break;
426 }
427
428 /* Infer response length from the function */
429 switch (function) {
430 case BRW_MATH_FUNCTION_SINCOS:
431 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
432 response_length = 2;
433 break;
434 default:
435 response_length = 1;
436 break;
437 }
438
439
440 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
441 msg_length, response_length, false, false);
442 brw_inst_set_math_msg_function(devinfo, inst, function);
443 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
444 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
445 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
446 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
447 brw_inst_set_saturate(devinfo, inst, 0);
448 }
449
450
451 static void brw_set_ff_sync_message(struct brw_codegen *p,
452 brw_inst *insn,
453 bool allocate,
454 unsigned response_length,
455 bool end_of_thread)
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458
459 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
460 1, response_length, true, end_of_thread);
461 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
462 brw_inst_set_urb_allocate(devinfo, insn, allocate);
463 /* The following fields are not used by FF_SYNC: */
464 brw_inst_set_urb_global_offset(devinfo, insn, 0);
465 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
466 brw_inst_set_urb_used(devinfo, insn, 0);
467 brw_inst_set_urb_complete(devinfo, insn, 0);
468 }
469
470 static void brw_set_urb_message( struct brw_codegen *p,
471 brw_inst *insn,
472 enum brw_urb_write_flags flags,
473 unsigned msg_length,
474 unsigned response_length,
475 unsigned offset,
476 unsigned swizzle_control )
477 {
478 const struct gen_device_info *devinfo = p->devinfo;
479
480 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
481 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
482 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
483
484 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
485 msg_length, response_length, true,
486 flags & BRW_URB_WRITE_EOT);
487
488 if (flags & BRW_URB_WRITE_OWORD) {
489 assert(msg_length == 2); /* header + one OWORD of data */
490 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
491 } else {
492 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
493 }
494
495 brw_inst_set_urb_global_offset(devinfo, insn, offset);
496 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
497
498 if (devinfo->gen < 8) {
499 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
500 }
501
502 if (devinfo->gen < 7) {
503 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
504 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
505 } else {
506 brw_inst_set_urb_per_slot_offset(devinfo, insn,
507 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
508 }
509 }
510
511 void
512 brw_set_dp_write_message(struct brw_codegen *p,
513 brw_inst *insn,
514 unsigned binding_table_index,
515 unsigned msg_control,
516 unsigned msg_type,
517 unsigned target_cache,
518 unsigned msg_length,
519 bool header_present,
520 unsigned last_render_target,
521 unsigned response_length,
522 unsigned end_of_thread,
523 unsigned send_commit_msg)
524 {
525 const struct gen_device_info *devinfo = p->devinfo;
526 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
527 BRW_SFID_DATAPORT_WRITE);
528
529 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
530 header_present, end_of_thread);
531
532 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
533 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
534 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
535 brw_inst_set_rt_last(devinfo, insn, last_render_target);
536 if (devinfo->gen < 7) {
537 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
538 }
539
540 if (devinfo->gen >= 11)
541 brw_inst_set_null_rt(devinfo, insn, false);
542 }
543
544 void
545 brw_set_dp_read_message(struct brw_codegen *p,
546 brw_inst *insn,
547 unsigned binding_table_index,
548 unsigned msg_control,
549 unsigned msg_type,
550 unsigned target_cache,
551 unsigned msg_length,
552 bool header_present,
553 unsigned response_length)
554 {
555 const struct gen_device_info *devinfo = p->devinfo;
556 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
557 BRW_SFID_DATAPORT_READ);
558
559 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
560 header_present, false);
561
562 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
563 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
564 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
565 if (devinfo->gen < 6)
566 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
567 }
568
569 void
570 brw_set_sampler_message(struct brw_codegen *p,
571 brw_inst *inst,
572 unsigned binding_table_index,
573 unsigned sampler,
574 unsigned msg_type,
575 unsigned response_length,
576 unsigned msg_length,
577 unsigned header_present,
578 unsigned simd_mode,
579 unsigned return_format)
580 {
581 const struct gen_device_info *devinfo = p->devinfo;
582
583 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
584 response_length, header_present, false);
585
586 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
587 brw_inst_set_sampler(devinfo, inst, sampler);
588 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
589 if (devinfo->gen >= 5) {
590 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
591 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
592 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
593 }
594 }
595
596 static void
597 gen7_set_dp_scratch_message(struct brw_codegen *p,
598 brw_inst *inst,
599 bool write,
600 bool dword,
601 bool invalidate_after_read,
602 unsigned num_regs,
603 unsigned addr_offset,
604 unsigned mlen,
605 unsigned rlen,
606 bool header_present)
607 {
608 const struct gen_device_info *devinfo = p->devinfo;
609 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
610 (devinfo->gen >= 8 && num_regs == 8));
611 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
612 num_regs - 1);
613
614 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
615 mlen, rlen, header_present, false);
616 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
617 brw_inst_set_scratch_read_write(devinfo, inst, write);
618 brw_inst_set_scratch_type(devinfo, inst, dword);
619 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
620 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
621 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
622 }
623
624 struct brw_insn_state {
625 /* One of BRW_EXECUTE_* */
626 unsigned exec_size:3;
627
628 /* Group in units of channels */
629 unsigned group:5;
630
631 /* Compression control on gen4-5 */
632 bool compressed:1;
633
634 /* One of BRW_MASK_* */
635 unsigned mask_control:1;
636
637 bool saturate:1;
638
639 /* One of BRW_ALIGN_* */
640 unsigned access_mode:1;
641
642 /* One of BRW_PREDICATE_* */
643 enum brw_predicate predicate:4;
644
645 bool pred_inv:1;
646
647 /* Flag subreg. Bottom bit is subreg, top bit is reg */
648 unsigned flag_subreg:2;
649
650 bool acc_wr_control:1;
651 };
652
653 static struct brw_insn_state
654 brw_inst_get_state(const struct gen_device_info *devinfo,
655 const brw_inst *insn)
656 {
657 struct brw_insn_state state = { };
658
659 state.exec_size = brw_inst_exec_size(devinfo, insn);
660 if (devinfo->gen >= 6) {
661 state.group = brw_inst_qtr_control(devinfo, insn) * 8;
662 if (devinfo->gen >= 7)
663 state.group += brw_inst_nib_control(devinfo, insn) * 4;
664 } else {
665 unsigned qtr_control = brw_inst_qtr_control(devinfo, insn);
666 if (qtr_control == BRW_COMPRESSION_COMPRESSED) {
667 state.group = 0;
668 state.compressed = true;
669 } else {
670 state.group = qtr_control * 8;
671 state.compressed = false;
672 }
673 }
674 state.access_mode = brw_inst_access_mode(devinfo, insn);
675 state.mask_control = brw_inst_mask_control(devinfo, insn);
676 state.saturate = brw_inst_saturate(devinfo, insn);
677 state.predicate = brw_inst_pred_control(devinfo, insn);
678 state.pred_inv = brw_inst_pred_inv(devinfo, insn);
679
680 state.flag_subreg = brw_inst_flag_subreg_nr(devinfo, insn);
681 if (devinfo->gen >= 7)
682 state.flag_subreg += brw_inst_flag_reg_nr(devinfo, insn) * 2;
683
684 if (devinfo->gen >= 6)
685 state.acc_wr_control = brw_inst_acc_wr_control(devinfo, insn);
686
687 return state;
688 }
689
690 static void
691 brw_inst_set_state(const struct gen_device_info *devinfo,
692 brw_inst *insn,
693 const struct brw_insn_state *state)
694 {
695 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
696 brw_inst_set_group(devinfo, insn, state->group);
697 brw_inst_set_compression(devinfo, insn, state->compressed);
698 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
699 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
700 brw_inst_set_saturate(devinfo, insn, state->saturate);
701 brw_inst_set_pred_control(devinfo, insn, state->predicate);
702 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
703
704 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
705 state->access_mode == BRW_ALIGN_16) {
706 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
707 if (devinfo->gen >= 7)
708 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
709 } else {
710 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
711 if (devinfo->gen >= 7)
712 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
713 }
714
715 if (devinfo->gen >= 6)
716 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
717 }
718
719 #define next_insn brw_next_insn
720 brw_inst *
721 brw_next_insn(struct brw_codegen *p, unsigned opcode)
722 {
723 const struct gen_device_info *devinfo = p->devinfo;
724 brw_inst *insn;
725
726 if (p->nr_insn + 1 > p->store_size) {
727 p->store_size <<= 1;
728 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
729 }
730
731 p->next_insn_offset += 16;
732 insn = &p->store[p->nr_insn++];
733
734 memset(insn, 0, sizeof(*insn));
735 brw_inst_set_opcode(devinfo, insn, opcode);
736
737 /* Apply the default instruction state */
738 struct brw_insn_state current = brw_inst_get_state(devinfo, p->current);
739 brw_inst_set_state(devinfo, insn, &current);
740
741 return insn;
742 }
743
744 static brw_inst *
745 brw_alu1(struct brw_codegen *p, unsigned opcode,
746 struct brw_reg dest, struct brw_reg src)
747 {
748 brw_inst *insn = next_insn(p, opcode);
749 brw_set_dest(p, insn, dest);
750 brw_set_src0(p, insn, src);
751 return insn;
752 }
753
754 static brw_inst *
755 brw_alu2(struct brw_codegen *p, unsigned opcode,
756 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
757 {
758 /* 64-bit immediates are only supported on 1-src instructions */
759 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
760 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
761
762 brw_inst *insn = next_insn(p, opcode);
763 brw_set_dest(p, insn, dest);
764 brw_set_src0(p, insn, src0);
765 brw_set_src1(p, insn, src1);
766 return insn;
767 }
768
769 static int
770 get_3src_subreg_nr(struct brw_reg reg)
771 {
772 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
773 * use 32-bit units (components 0..7). Since they only support F/D/UD
774 * types, this doesn't lose any flexibility, but uses fewer bits.
775 */
776 return reg.subnr / 4;
777 }
778
779 static enum gen10_align1_3src_vertical_stride
780 to_3src_align1_vstride(enum brw_vertical_stride vstride)
781 {
782 switch (vstride) {
783 case BRW_VERTICAL_STRIDE_0:
784 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
785 case BRW_VERTICAL_STRIDE_2:
786 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
787 case BRW_VERTICAL_STRIDE_4:
788 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
789 case BRW_VERTICAL_STRIDE_8:
790 case BRW_VERTICAL_STRIDE_16:
791 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
792 default:
793 unreachable("invalid vstride");
794 }
795 }
796
797
798 static enum gen10_align1_3src_src_horizontal_stride
799 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
800 {
801 switch (hstride) {
802 case BRW_HORIZONTAL_STRIDE_0:
803 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
804 case BRW_HORIZONTAL_STRIDE_1:
805 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
806 case BRW_HORIZONTAL_STRIDE_2:
807 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
808 case BRW_HORIZONTAL_STRIDE_4:
809 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
810 default:
811 unreachable("invalid hstride");
812 }
813 }
814
815 static brw_inst *
816 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
817 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
818 {
819 const struct gen_device_info *devinfo = p->devinfo;
820 brw_inst *inst = next_insn(p, opcode);
821
822 gen7_convert_mrf_to_grf(p, &dest);
823
824 assert(dest.nr < 128);
825 assert(src0.nr < 128);
826 assert(src1.nr < 128);
827 assert(src2.nr < 128);
828 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
829 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
830 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
831 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
832
833 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
834 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
835 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
836
837 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
838 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
839 BRW_ALIGN1_3SRC_ACCUMULATOR);
840 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
841 } else {
842 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
843 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
844 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
845 }
846 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
847
848 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
849
850 if (brw_reg_type_is_floating_point(dest.type)) {
851 brw_inst_set_3src_a1_exec_type(devinfo, inst,
852 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
853 } else {
854 brw_inst_set_3src_a1_exec_type(devinfo, inst,
855 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
856 }
857
858 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
859 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
860 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
861 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
862
863 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
864 to_3src_align1_vstride(src0.vstride));
865 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
866 to_3src_align1_vstride(src1.vstride));
867 /* no vstride on src2 */
868
869 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
870 to_3src_align1_hstride(src0.hstride));
871 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
872 to_3src_align1_hstride(src1.hstride));
873 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
874 to_3src_align1_hstride(src2.hstride));
875
876 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
877 if (src0.type == BRW_REGISTER_TYPE_NF) {
878 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
879 } else {
880 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
881 }
882 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
883 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
884
885 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
886 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
887 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
888 } else {
889 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
890 }
891 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
892 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
893
894 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
895 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
896 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
897 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
898
899 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
900 src0.file == BRW_IMMEDIATE_VALUE ||
901 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
902 src0.type == BRW_REGISTER_TYPE_NF));
903 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
904 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
905 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
906 src2.file == BRW_IMMEDIATE_VALUE);
907
908 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
909 src0.file == BRW_GENERAL_REGISTER_FILE ?
910 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
911 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
912 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
913 src1.file == BRW_GENERAL_REGISTER_FILE ?
914 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
915 BRW_ALIGN1_3SRC_ACCUMULATOR);
916 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
917 src2.file == BRW_GENERAL_REGISTER_FILE ?
918 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
919 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
920 } else {
921 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
922 dest.file == BRW_MESSAGE_REGISTER_FILE);
923 assert(dest.type == BRW_REGISTER_TYPE_F ||
924 dest.type == BRW_REGISTER_TYPE_DF ||
925 dest.type == BRW_REGISTER_TYPE_D ||
926 dest.type == BRW_REGISTER_TYPE_UD);
927 if (devinfo->gen == 6) {
928 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
929 dest.file == BRW_MESSAGE_REGISTER_FILE);
930 }
931 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
932 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
933 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
934
935 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
936 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
937 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
938 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
939 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
940 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
941 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
942 src0.vstride == BRW_VERTICAL_STRIDE_0);
943
944 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
945 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
946 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
947 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
948 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
949 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
950 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
951 src1.vstride == BRW_VERTICAL_STRIDE_0);
952
953 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
954 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
955 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
956 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
957 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
958 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
959 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
960 src2.vstride == BRW_VERTICAL_STRIDE_0);
961
962 if (devinfo->gen >= 7) {
963 /* Set both the source and destination types based on dest.type,
964 * ignoring the source register types. The MAD and LRP emitters ensure
965 * that all four types are float. The BFE and BFI2 emitters, however,
966 * may send us mixed D and UD types and want us to ignore that and use
967 * the destination type.
968 */
969 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
970 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
971 }
972 }
973
974 return inst;
975 }
976
977
978 /***********************************************************************
979 * Convenience routines.
980 */
981 #define ALU1(OP) \
982 brw_inst *brw_##OP(struct brw_codegen *p, \
983 struct brw_reg dest, \
984 struct brw_reg src0) \
985 { \
986 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
987 }
988
989 #define ALU2(OP) \
990 brw_inst *brw_##OP(struct brw_codegen *p, \
991 struct brw_reg dest, \
992 struct brw_reg src0, \
993 struct brw_reg src1) \
994 { \
995 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
996 }
997
998 #define ALU3(OP) \
999 brw_inst *brw_##OP(struct brw_codegen *p, \
1000 struct brw_reg dest, \
1001 struct brw_reg src0, \
1002 struct brw_reg src1, \
1003 struct brw_reg src2) \
1004 { \
1005 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1006 }
1007
1008 #define ALU3F(OP) \
1009 brw_inst *brw_##OP(struct brw_codegen *p, \
1010 struct brw_reg dest, \
1011 struct brw_reg src0, \
1012 struct brw_reg src1, \
1013 struct brw_reg src2) \
1014 { \
1015 assert(dest.type == BRW_REGISTER_TYPE_F || \
1016 dest.type == BRW_REGISTER_TYPE_DF); \
1017 if (dest.type == BRW_REGISTER_TYPE_F) { \
1018 assert(src0.type == BRW_REGISTER_TYPE_F); \
1019 assert(src1.type == BRW_REGISTER_TYPE_F); \
1020 assert(src2.type == BRW_REGISTER_TYPE_F); \
1021 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
1022 assert(src0.type == BRW_REGISTER_TYPE_DF); \
1023 assert(src1.type == BRW_REGISTER_TYPE_DF); \
1024 assert(src2.type == BRW_REGISTER_TYPE_DF); \
1025 } \
1026 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1027 }
1028
1029 /* Rounding operations (other than RNDD) require two instructions - the first
1030 * stores a rounded value (possibly the wrong way) in the dest register, but
1031 * also sets a per-channel "increment bit" in the flag register. A predicated
1032 * add of 1.0 fixes dest to contain the desired result.
1033 *
1034 * Sandybridge and later appear to round correctly without an ADD.
1035 */
1036 #define ROUND(OP) \
1037 void brw_##OP(struct brw_codegen *p, \
1038 struct brw_reg dest, \
1039 struct brw_reg src) \
1040 { \
1041 const struct gen_device_info *devinfo = p->devinfo; \
1042 brw_inst *rnd, *add; \
1043 rnd = next_insn(p, BRW_OPCODE_##OP); \
1044 brw_set_dest(p, rnd, dest); \
1045 brw_set_src0(p, rnd, src); \
1046 \
1047 if (devinfo->gen < 6) { \
1048 /* turn on round-increments */ \
1049 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
1050 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
1051 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
1052 } \
1053 }
1054
1055
1056 ALU2(SEL)
1057 ALU1(NOT)
1058 ALU2(AND)
1059 ALU2(OR)
1060 ALU2(XOR)
1061 ALU2(SHR)
1062 ALU2(SHL)
1063 ALU1(DIM)
1064 ALU2(ASR)
1065 ALU3(CSEL)
1066 ALU1(FRC)
1067 ALU1(RNDD)
1068 ALU2(MAC)
1069 ALU2(MACH)
1070 ALU1(LZD)
1071 ALU2(DP4)
1072 ALU2(DPH)
1073 ALU2(DP3)
1074 ALU2(DP2)
1075 ALU3(MAD)
1076 ALU3F(LRP)
1077 ALU1(BFREV)
1078 ALU3(BFE)
1079 ALU2(BFI1)
1080 ALU3(BFI2)
1081 ALU1(FBH)
1082 ALU1(FBL)
1083 ALU1(CBIT)
1084 ALU2(ADDC)
1085 ALU2(SUBB)
1086
1087 ROUND(RNDZ)
1088 ROUND(RNDE)
1089
1090 brw_inst *
1091 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
1092 {
1093 const struct gen_device_info *devinfo = p->devinfo;
1094
1095 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
1096 * To avoid the problems that causes, we use a <1,2,0> source region to read
1097 * each element twice.
1098 */
1099 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1100 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
1101 dest.type == BRW_REGISTER_TYPE_DF &&
1102 (src0.type == BRW_REGISTER_TYPE_F ||
1103 src0.type == BRW_REGISTER_TYPE_D ||
1104 src0.type == BRW_REGISTER_TYPE_UD) &&
1105 !has_scalar_region(src0)) {
1106 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
1107 src0.width == BRW_WIDTH_4 &&
1108 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1109
1110 src0.vstride = BRW_VERTICAL_STRIDE_1;
1111 src0.width = BRW_WIDTH_2;
1112 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1113 }
1114
1115 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1116 }
1117
1118 brw_inst *
1119 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1120 struct brw_reg src0, struct brw_reg src1)
1121 {
1122 /* 6.2.2: add */
1123 if (src0.type == BRW_REGISTER_TYPE_F ||
1124 (src0.file == BRW_IMMEDIATE_VALUE &&
1125 src0.type == BRW_REGISTER_TYPE_VF)) {
1126 assert(src1.type != BRW_REGISTER_TYPE_UD);
1127 assert(src1.type != BRW_REGISTER_TYPE_D);
1128 }
1129
1130 if (src1.type == BRW_REGISTER_TYPE_F ||
1131 (src1.file == BRW_IMMEDIATE_VALUE &&
1132 src1.type == BRW_REGISTER_TYPE_VF)) {
1133 assert(src0.type != BRW_REGISTER_TYPE_UD);
1134 assert(src0.type != BRW_REGISTER_TYPE_D);
1135 }
1136
1137 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1138 }
1139
1140 brw_inst *
1141 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1142 struct brw_reg src0, struct brw_reg src1)
1143 {
1144 assert(dest.type == src0.type);
1145 assert(src0.type == src1.type);
1146 switch (src0.type) {
1147 case BRW_REGISTER_TYPE_B:
1148 case BRW_REGISTER_TYPE_UB:
1149 case BRW_REGISTER_TYPE_W:
1150 case BRW_REGISTER_TYPE_UW:
1151 case BRW_REGISTER_TYPE_D:
1152 case BRW_REGISTER_TYPE_UD:
1153 break;
1154 default:
1155 unreachable("Bad type for brw_AVG");
1156 }
1157
1158 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1159 }
1160
1161 brw_inst *
1162 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1163 struct brw_reg src0, struct brw_reg src1)
1164 {
1165 /* 6.32.38: mul */
1166 if (src0.type == BRW_REGISTER_TYPE_D ||
1167 src0.type == BRW_REGISTER_TYPE_UD ||
1168 src1.type == BRW_REGISTER_TYPE_D ||
1169 src1.type == BRW_REGISTER_TYPE_UD) {
1170 assert(dest.type != BRW_REGISTER_TYPE_F);
1171 }
1172
1173 if (src0.type == BRW_REGISTER_TYPE_F ||
1174 (src0.file == BRW_IMMEDIATE_VALUE &&
1175 src0.type == BRW_REGISTER_TYPE_VF)) {
1176 assert(src1.type != BRW_REGISTER_TYPE_UD);
1177 assert(src1.type != BRW_REGISTER_TYPE_D);
1178 }
1179
1180 if (src1.type == BRW_REGISTER_TYPE_F ||
1181 (src1.file == BRW_IMMEDIATE_VALUE &&
1182 src1.type == BRW_REGISTER_TYPE_VF)) {
1183 assert(src0.type != BRW_REGISTER_TYPE_UD);
1184 assert(src0.type != BRW_REGISTER_TYPE_D);
1185 }
1186
1187 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1188 src0.nr != BRW_ARF_ACCUMULATOR);
1189 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1190 src1.nr != BRW_ARF_ACCUMULATOR);
1191
1192 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1193 }
1194
1195 brw_inst *
1196 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1197 struct brw_reg src0, struct brw_reg src1)
1198 {
1199 src0.vstride = BRW_VERTICAL_STRIDE_0;
1200 src0.width = BRW_WIDTH_1;
1201 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1202 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1203 }
1204
1205 brw_inst *
1206 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1207 struct brw_reg src0, struct brw_reg src1)
1208 {
1209 src0.vstride = BRW_VERTICAL_STRIDE_0;
1210 src0.width = BRW_WIDTH_1;
1211 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1212 src1.vstride = BRW_VERTICAL_STRIDE_8;
1213 src1.width = BRW_WIDTH_8;
1214 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1215 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1216 }
1217
1218 brw_inst *
1219 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1220 {
1221 const struct gen_device_info *devinfo = p->devinfo;
1222 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1223 /* The F32TO16 instruction doesn't support 32-bit destination types in
1224 * Align1 mode, and neither does the Gen8 implementation in terms of a
1225 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1226 * an undocumented feature.
1227 */
1228 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1229 (!align16 || devinfo->gen >= 8));
1230 brw_inst *inst;
1231
1232 if (align16) {
1233 assert(dst.type == BRW_REGISTER_TYPE_UD);
1234 } else {
1235 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1236 dst.type == BRW_REGISTER_TYPE_W ||
1237 dst.type == BRW_REGISTER_TYPE_UW ||
1238 dst.type == BRW_REGISTER_TYPE_HF);
1239 }
1240
1241 brw_push_insn_state(p);
1242
1243 if (needs_zero_fill) {
1244 brw_set_default_access_mode(p, BRW_ALIGN_1);
1245 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1246 }
1247
1248 if (devinfo->gen >= 8) {
1249 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1250 } else {
1251 assert(devinfo->gen == 7);
1252 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1253 }
1254
1255 if (needs_zero_fill) {
1256 brw_inst_set_no_dd_clear(devinfo, inst, true);
1257 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1258 brw_inst_set_no_dd_check(devinfo, inst, true);
1259 }
1260
1261 brw_pop_insn_state(p);
1262 return inst;
1263 }
1264
1265 brw_inst *
1266 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1267 {
1268 const struct gen_device_info *devinfo = p->devinfo;
1269 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1270
1271 if (align16) {
1272 assert(src.type == BRW_REGISTER_TYPE_UD);
1273 } else {
1274 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1275 *
1276 * Because this instruction does not have a 16-bit floating-point
1277 * type, the source data type must be Word (W). The destination type
1278 * must be F (Float).
1279 */
1280 if (src.type == BRW_REGISTER_TYPE_UD)
1281 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1282
1283 assert(src.type == BRW_REGISTER_TYPE_W ||
1284 src.type == BRW_REGISTER_TYPE_UW ||
1285 src.type == BRW_REGISTER_TYPE_HF);
1286 }
1287
1288 if (devinfo->gen >= 8) {
1289 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1290 } else {
1291 assert(devinfo->gen == 7);
1292 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1293 }
1294 }
1295
1296
1297 void brw_NOP(struct brw_codegen *p)
1298 {
1299 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1300 memset(insn, 0, sizeof(*insn));
1301 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1302 }
1303
1304
1305
1306
1307
1308 /***********************************************************************
1309 * Comparisons, if/else/endif
1310 */
1311
1312 brw_inst *
1313 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1314 unsigned predicate_control)
1315 {
1316 const struct gen_device_info *devinfo = p->devinfo;
1317 struct brw_reg ip = brw_ip_reg();
1318 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1319
1320 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1321 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1322 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1323 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1324
1325 return inst;
1326 }
1327
1328 static void
1329 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1330 {
1331 p->if_stack[p->if_stack_depth] = inst - p->store;
1332
1333 p->if_stack_depth++;
1334 if (p->if_stack_array_size <= p->if_stack_depth) {
1335 p->if_stack_array_size *= 2;
1336 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1337 p->if_stack_array_size);
1338 }
1339 }
1340
1341 static brw_inst *
1342 pop_if_stack(struct brw_codegen *p)
1343 {
1344 p->if_stack_depth--;
1345 return &p->store[p->if_stack[p->if_stack_depth]];
1346 }
1347
1348 static void
1349 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1350 {
1351 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1352 p->loop_stack_array_size *= 2;
1353 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1354 p->loop_stack_array_size);
1355 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1356 p->loop_stack_array_size);
1357 }
1358
1359 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1360 p->loop_stack_depth++;
1361 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1362 }
1363
1364 static brw_inst *
1365 get_inner_do_insn(struct brw_codegen *p)
1366 {
1367 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1368 }
1369
1370 /* EU takes the value from the flag register and pushes it onto some
1371 * sort of a stack (presumably merging with any flag value already on
1372 * the stack). Within an if block, the flags at the top of the stack
1373 * control execution on each channel of the unit, eg. on each of the
1374 * 16 pixel values in our wm programs.
1375 *
1376 * When the matching 'else' instruction is reached (presumably by
1377 * countdown of the instruction count patched in by our ELSE/ENDIF
1378 * functions), the relevant flags are inverted.
1379 *
1380 * When the matching 'endif' instruction is reached, the flags are
1381 * popped off. If the stack is now empty, normal execution resumes.
1382 */
1383 brw_inst *
1384 brw_IF(struct brw_codegen *p, unsigned execute_size)
1385 {
1386 const struct gen_device_info *devinfo = p->devinfo;
1387 brw_inst *insn;
1388
1389 insn = next_insn(p, BRW_OPCODE_IF);
1390
1391 /* Override the defaults for this instruction:
1392 */
1393 if (devinfo->gen < 6) {
1394 brw_set_dest(p, insn, brw_ip_reg());
1395 brw_set_src0(p, insn, brw_ip_reg());
1396 brw_set_src1(p, insn, brw_imm_d(0x0));
1397 } else if (devinfo->gen == 6) {
1398 brw_set_dest(p, insn, brw_imm_w(0));
1399 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1400 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1401 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1402 } else if (devinfo->gen == 7) {
1403 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1404 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1405 brw_set_src1(p, insn, brw_imm_w(0));
1406 brw_inst_set_jip(devinfo, insn, 0);
1407 brw_inst_set_uip(devinfo, insn, 0);
1408 } else {
1409 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1410 brw_set_src0(p, insn, brw_imm_d(0));
1411 brw_inst_set_jip(devinfo, insn, 0);
1412 brw_inst_set_uip(devinfo, insn, 0);
1413 }
1414
1415 brw_inst_set_exec_size(devinfo, insn, execute_size);
1416 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1417 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1418 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1419 if (!p->single_program_flow && devinfo->gen < 6)
1420 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1421
1422 push_if_stack(p, insn);
1423 p->if_depth_in_loop[p->loop_stack_depth]++;
1424 return insn;
1425 }
1426
1427 /* This function is only used for gen6-style IF instructions with an
1428 * embedded comparison (conditional modifier). It is not used on gen7.
1429 */
1430 brw_inst *
1431 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1432 struct brw_reg src0, struct brw_reg src1)
1433 {
1434 const struct gen_device_info *devinfo = p->devinfo;
1435 brw_inst *insn;
1436
1437 insn = next_insn(p, BRW_OPCODE_IF);
1438
1439 brw_set_dest(p, insn, brw_imm_w(0));
1440 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1441 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1442 brw_set_src0(p, insn, src0);
1443 brw_set_src1(p, insn, src1);
1444
1445 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1446 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1447 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1448
1449 push_if_stack(p, insn);
1450 return insn;
1451 }
1452
1453 /**
1454 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1455 */
1456 static void
1457 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1458 brw_inst *if_inst, brw_inst *else_inst)
1459 {
1460 const struct gen_device_info *devinfo = p->devinfo;
1461
1462 /* The next instruction (where the ENDIF would be, if it existed) */
1463 brw_inst *next_inst = &p->store[p->nr_insn];
1464
1465 assert(p->single_program_flow);
1466 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1467 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1468 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1469
1470 /* Convert IF to an ADD instruction that moves the instruction pointer
1471 * to the first instruction of the ELSE block. If there is no ELSE
1472 * block, point to where ENDIF would be. Reverse the predicate.
1473 *
1474 * There's no need to execute an ENDIF since we don't need to do any
1475 * stack operations, and if we're currently executing, we just want to
1476 * continue normally.
1477 */
1478 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1479 brw_inst_set_pred_inv(devinfo, if_inst, true);
1480
1481 if (else_inst != NULL) {
1482 /* Convert ELSE to an ADD instruction that points where the ENDIF
1483 * would be.
1484 */
1485 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1486
1487 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1488 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1489 } else {
1490 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1491 }
1492 }
1493
1494 /**
1495 * Patch IF and ELSE instructions with appropriate jump targets.
1496 */
1497 static void
1498 patch_IF_ELSE(struct brw_codegen *p,
1499 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1500 {
1501 const struct gen_device_info *devinfo = p->devinfo;
1502
1503 /* We shouldn't be patching IF and ELSE instructions in single program flow
1504 * mode when gen < 6, because in single program flow mode on those
1505 * platforms, we convert flow control instructions to conditional ADDs that
1506 * operate on IP (see brw_ENDIF).
1507 *
1508 * However, on Gen6, writing to IP doesn't work in single program flow mode
1509 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1510 * not be updated by non-flow control instructions."). And on later
1511 * platforms, there is no significant benefit to converting control flow
1512 * instructions to conditional ADDs. So we do patch IF and ELSE
1513 * instructions in single program flow mode on those platforms.
1514 */
1515 if (devinfo->gen < 6)
1516 assert(!p->single_program_flow);
1517
1518 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1519 assert(endif_inst != NULL);
1520 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1521
1522 unsigned br = brw_jump_scale(devinfo);
1523
1524 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1525 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1526
1527 if (else_inst == NULL) {
1528 /* Patch IF -> ENDIF */
1529 if (devinfo->gen < 6) {
1530 /* Turn it into an IFF, which means no mask stack operations for
1531 * all-false and jumping past the ENDIF.
1532 */
1533 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1534 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1535 br * (endif_inst - if_inst + 1));
1536 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1537 } else if (devinfo->gen == 6) {
1538 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1539 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1540 } else {
1541 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1542 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1543 }
1544 } else {
1545 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1546
1547 /* Patch IF -> ELSE */
1548 if (devinfo->gen < 6) {
1549 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1550 br * (else_inst - if_inst));
1551 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1552 } else if (devinfo->gen == 6) {
1553 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1554 br * (else_inst - if_inst + 1));
1555 }
1556
1557 /* Patch ELSE -> ENDIF */
1558 if (devinfo->gen < 6) {
1559 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1560 * matching ENDIF.
1561 */
1562 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1563 br * (endif_inst - else_inst + 1));
1564 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1565 } else if (devinfo->gen == 6) {
1566 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1567 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1568 br * (endif_inst - else_inst));
1569 } else {
1570 /* The IF instruction's JIP should point just past the ELSE */
1571 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1572 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1573 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1574 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1575 if (devinfo->gen >= 8) {
1576 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1577 * should point to ENDIF.
1578 */
1579 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1580 }
1581 }
1582 }
1583 }
1584
1585 void
1586 brw_ELSE(struct brw_codegen *p)
1587 {
1588 const struct gen_device_info *devinfo = p->devinfo;
1589 brw_inst *insn;
1590
1591 insn = next_insn(p, BRW_OPCODE_ELSE);
1592
1593 if (devinfo->gen < 6) {
1594 brw_set_dest(p, insn, brw_ip_reg());
1595 brw_set_src0(p, insn, brw_ip_reg());
1596 brw_set_src1(p, insn, brw_imm_d(0x0));
1597 } else if (devinfo->gen == 6) {
1598 brw_set_dest(p, insn, brw_imm_w(0));
1599 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1600 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1601 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1602 } else if (devinfo->gen == 7) {
1603 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1604 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1605 brw_set_src1(p, insn, brw_imm_w(0));
1606 brw_inst_set_jip(devinfo, insn, 0);
1607 brw_inst_set_uip(devinfo, insn, 0);
1608 } else {
1609 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1610 brw_set_src0(p, insn, brw_imm_d(0));
1611 brw_inst_set_jip(devinfo, insn, 0);
1612 brw_inst_set_uip(devinfo, insn, 0);
1613 }
1614
1615 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1616 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1617 if (!p->single_program_flow && devinfo->gen < 6)
1618 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1619
1620 push_if_stack(p, insn);
1621 }
1622
1623 void
1624 brw_ENDIF(struct brw_codegen *p)
1625 {
1626 const struct gen_device_info *devinfo = p->devinfo;
1627 brw_inst *insn = NULL;
1628 brw_inst *else_inst = NULL;
1629 brw_inst *if_inst = NULL;
1630 brw_inst *tmp;
1631 bool emit_endif = true;
1632
1633 /* In single program flow mode, we can express IF and ELSE instructions
1634 * equivalently as ADD instructions that operate on IP. On platforms prior
1635 * to Gen6, flow control instructions cause an implied thread switch, so
1636 * this is a significant savings.
1637 *
1638 * However, on Gen6, writing to IP doesn't work in single program flow mode
1639 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1640 * not be updated by non-flow control instructions."). And on later
1641 * platforms, there is no significant benefit to converting control flow
1642 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1643 * Gen5.
1644 */
1645 if (devinfo->gen < 6 && p->single_program_flow)
1646 emit_endif = false;
1647
1648 /*
1649 * A single next_insn() may change the base address of instruction store
1650 * memory(p->store), so call it first before referencing the instruction
1651 * store pointer from an index
1652 */
1653 if (emit_endif)
1654 insn = next_insn(p, BRW_OPCODE_ENDIF);
1655
1656 /* Pop the IF and (optional) ELSE instructions from the stack */
1657 p->if_depth_in_loop[p->loop_stack_depth]--;
1658 tmp = pop_if_stack(p);
1659 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1660 else_inst = tmp;
1661 tmp = pop_if_stack(p);
1662 }
1663 if_inst = tmp;
1664
1665 if (!emit_endif) {
1666 /* ENDIF is useless; don't bother emitting it. */
1667 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1668 return;
1669 }
1670
1671 if (devinfo->gen < 6) {
1672 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1673 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1674 brw_set_src1(p, insn, brw_imm_d(0x0));
1675 } else if (devinfo->gen == 6) {
1676 brw_set_dest(p, insn, brw_imm_w(0));
1677 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1678 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1679 } else if (devinfo->gen == 7) {
1680 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1681 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1682 brw_set_src1(p, insn, brw_imm_w(0));
1683 } else {
1684 brw_set_src0(p, insn, brw_imm_d(0));
1685 }
1686
1687 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1688 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1689 if (devinfo->gen < 6)
1690 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1691
1692 /* Also pop item off the stack in the endif instruction: */
1693 if (devinfo->gen < 6) {
1694 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1695 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1696 } else if (devinfo->gen == 6) {
1697 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1698 } else {
1699 brw_inst_set_jip(devinfo, insn, 2);
1700 }
1701 patch_IF_ELSE(p, if_inst, else_inst, insn);
1702 }
1703
1704 brw_inst *
1705 brw_BREAK(struct brw_codegen *p)
1706 {
1707 const struct gen_device_info *devinfo = p->devinfo;
1708 brw_inst *insn;
1709
1710 insn = next_insn(p, BRW_OPCODE_BREAK);
1711 if (devinfo->gen >= 8) {
1712 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1713 brw_set_src0(p, insn, brw_imm_d(0x0));
1714 } else if (devinfo->gen >= 6) {
1715 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1716 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1717 brw_set_src1(p, insn, brw_imm_d(0x0));
1718 } else {
1719 brw_set_dest(p, insn, brw_ip_reg());
1720 brw_set_src0(p, insn, brw_ip_reg());
1721 brw_set_src1(p, insn, brw_imm_d(0x0));
1722 brw_inst_set_gen4_pop_count(devinfo, insn,
1723 p->if_depth_in_loop[p->loop_stack_depth]);
1724 }
1725 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1726 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1727
1728 return insn;
1729 }
1730
1731 brw_inst *
1732 brw_CONT(struct brw_codegen *p)
1733 {
1734 const struct gen_device_info *devinfo = p->devinfo;
1735 brw_inst *insn;
1736
1737 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1738 brw_set_dest(p, insn, brw_ip_reg());
1739 if (devinfo->gen >= 8) {
1740 brw_set_src0(p, insn, brw_imm_d(0x0));
1741 } else {
1742 brw_set_src0(p, insn, brw_ip_reg());
1743 brw_set_src1(p, insn, brw_imm_d(0x0));
1744 }
1745
1746 if (devinfo->gen < 6) {
1747 brw_inst_set_gen4_pop_count(devinfo, insn,
1748 p->if_depth_in_loop[p->loop_stack_depth]);
1749 }
1750 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1751 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1752 return insn;
1753 }
1754
1755 brw_inst *
1756 gen6_HALT(struct brw_codegen *p)
1757 {
1758 const struct gen_device_info *devinfo = p->devinfo;
1759 brw_inst *insn;
1760
1761 insn = next_insn(p, BRW_OPCODE_HALT);
1762 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1763 if (devinfo->gen >= 8) {
1764 brw_set_src0(p, insn, brw_imm_d(0x0));
1765 } else {
1766 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1767 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1768 }
1769
1770 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1771 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1772 return insn;
1773 }
1774
1775 /* DO/WHILE loop:
1776 *
1777 * The DO/WHILE is just an unterminated loop -- break or continue are
1778 * used for control within the loop. We have a few ways they can be
1779 * done.
1780 *
1781 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1782 * jip and no DO instruction.
1783 *
1784 * For non-uniform control flow pre-gen6, there's a DO instruction to
1785 * push the mask, and a WHILE to jump back, and BREAK to get out and
1786 * pop the mask.
1787 *
1788 * For gen6, there's no more mask stack, so no need for DO. WHILE
1789 * just points back to the first instruction of the loop.
1790 */
1791 brw_inst *
1792 brw_DO(struct brw_codegen *p, unsigned execute_size)
1793 {
1794 const struct gen_device_info *devinfo = p->devinfo;
1795
1796 if (devinfo->gen >= 6 || p->single_program_flow) {
1797 push_loop_stack(p, &p->store[p->nr_insn]);
1798 return &p->store[p->nr_insn];
1799 } else {
1800 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1801
1802 push_loop_stack(p, insn);
1803
1804 /* Override the defaults for this instruction:
1805 */
1806 brw_set_dest(p, insn, brw_null_reg());
1807 brw_set_src0(p, insn, brw_null_reg());
1808 brw_set_src1(p, insn, brw_null_reg());
1809
1810 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1811 brw_inst_set_exec_size(devinfo, insn, execute_size);
1812 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1813
1814 return insn;
1815 }
1816 }
1817
1818 /**
1819 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1820 * instruction here.
1821 *
1822 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1823 * nesting, since it can always just point to the end of the block/current loop.
1824 */
1825 static void
1826 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1827 {
1828 const struct gen_device_info *devinfo = p->devinfo;
1829 brw_inst *do_inst = get_inner_do_insn(p);
1830 brw_inst *inst;
1831 unsigned br = brw_jump_scale(devinfo);
1832
1833 assert(devinfo->gen < 6);
1834
1835 for (inst = while_inst - 1; inst != do_inst; inst--) {
1836 /* If the jump count is != 0, that means that this instruction has already
1837 * been patched because it's part of a loop inside of the one we're
1838 * patching.
1839 */
1840 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1841 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1842 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1843 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1844 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1845 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1846 }
1847 }
1848 }
1849
1850 brw_inst *
1851 brw_WHILE(struct brw_codegen *p)
1852 {
1853 const struct gen_device_info *devinfo = p->devinfo;
1854 brw_inst *insn, *do_insn;
1855 unsigned br = brw_jump_scale(devinfo);
1856
1857 if (devinfo->gen >= 6) {
1858 insn = next_insn(p, BRW_OPCODE_WHILE);
1859 do_insn = get_inner_do_insn(p);
1860
1861 if (devinfo->gen >= 8) {
1862 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1863 brw_set_src0(p, insn, brw_imm_d(0));
1864 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1865 } else if (devinfo->gen == 7) {
1866 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1867 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1868 brw_set_src1(p, insn, brw_imm_w(0));
1869 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1870 } else {
1871 brw_set_dest(p, insn, brw_imm_w(0));
1872 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1873 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1874 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1875 }
1876
1877 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1878
1879 } else {
1880 if (p->single_program_flow) {
1881 insn = next_insn(p, BRW_OPCODE_ADD);
1882 do_insn = get_inner_do_insn(p);
1883
1884 brw_set_dest(p, insn, brw_ip_reg());
1885 brw_set_src0(p, insn, brw_ip_reg());
1886 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1887 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1888 } else {
1889 insn = next_insn(p, BRW_OPCODE_WHILE);
1890 do_insn = get_inner_do_insn(p);
1891
1892 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1893
1894 brw_set_dest(p, insn, brw_ip_reg());
1895 brw_set_src0(p, insn, brw_ip_reg());
1896 brw_set_src1(p, insn, brw_imm_d(0));
1897
1898 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1899 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1900 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1901
1902 brw_patch_break_cont(p, insn);
1903 }
1904 }
1905 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1906
1907 p->loop_stack_depth--;
1908
1909 return insn;
1910 }
1911
1912 /* FORWARD JUMPS:
1913 */
1914 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1915 {
1916 const struct gen_device_info *devinfo = p->devinfo;
1917 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1918 unsigned jmpi = 1;
1919
1920 if (devinfo->gen >= 5)
1921 jmpi = 2;
1922
1923 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1924 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1925
1926 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1927 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1928 }
1929
1930 /* To integrate with the above, it makes sense that the comparison
1931 * instruction should populate the flag register. It might be simpler
1932 * just to use the flag reg for most WM tasks?
1933 */
1934 void brw_CMP(struct brw_codegen *p,
1935 struct brw_reg dest,
1936 unsigned conditional,
1937 struct brw_reg src0,
1938 struct brw_reg src1)
1939 {
1940 const struct gen_device_info *devinfo = p->devinfo;
1941 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1942
1943 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1944 brw_set_dest(p, insn, dest);
1945 brw_set_src0(p, insn, src0);
1946 brw_set_src1(p, insn, src1);
1947
1948 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1949 * page says:
1950 * "Any CMP instruction with a null destination must use a {switch}."
1951 *
1952 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1953 * mentioned on their work-arounds pages.
1954 */
1955 if (devinfo->gen == 7) {
1956 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1957 dest.nr == BRW_ARF_NULL) {
1958 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1959 }
1960 }
1961 }
1962
1963 /***********************************************************************
1964 * Helpers for the various SEND message types:
1965 */
1966
1967 /** Extended math function, float[8].
1968 */
1969 void gen4_math(struct brw_codegen *p,
1970 struct brw_reg dest,
1971 unsigned function,
1972 unsigned msg_reg_nr,
1973 struct brw_reg src,
1974 unsigned precision )
1975 {
1976 const struct gen_device_info *devinfo = p->devinfo;
1977 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1978 unsigned data_type;
1979 if (has_scalar_region(src)) {
1980 data_type = BRW_MATH_DATA_SCALAR;
1981 } else {
1982 data_type = BRW_MATH_DATA_VECTOR;
1983 }
1984
1985 assert(devinfo->gen < 6);
1986
1987 /* Example code doesn't set predicate_control for send
1988 * instructions.
1989 */
1990 brw_inst_set_pred_control(devinfo, insn, 0);
1991 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1992
1993 brw_set_dest(p, insn, dest);
1994 brw_set_src0(p, insn, src);
1995 brw_set_math_message(p,
1996 insn,
1997 function,
1998 src.type == BRW_REGISTER_TYPE_D,
1999 precision,
2000 data_type);
2001 }
2002
2003 void gen6_math(struct brw_codegen *p,
2004 struct brw_reg dest,
2005 unsigned function,
2006 struct brw_reg src0,
2007 struct brw_reg src1)
2008 {
2009 const struct gen_device_info *devinfo = p->devinfo;
2010 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
2011
2012 assert(devinfo->gen >= 6);
2013
2014 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
2015 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
2016
2017 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
2018 if (devinfo->gen == 6) {
2019 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
2020 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
2021 }
2022
2023 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
2024 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
2025 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
2026 assert(src0.type != BRW_REGISTER_TYPE_F);
2027 assert(src1.type != BRW_REGISTER_TYPE_F);
2028 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
2029 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
2030 } else {
2031 assert(src0.type == BRW_REGISTER_TYPE_F);
2032 assert(src1.type == BRW_REGISTER_TYPE_F);
2033 }
2034
2035 /* Source modifiers are ignored for extended math instructions on Gen6. */
2036 if (devinfo->gen == 6) {
2037 assert(!src0.negate);
2038 assert(!src0.abs);
2039 assert(!src1.negate);
2040 assert(!src1.abs);
2041 }
2042
2043 brw_inst_set_math_function(devinfo, insn, function);
2044
2045 brw_set_dest(p, insn, dest);
2046 brw_set_src0(p, insn, src0);
2047 brw_set_src1(p, insn, src1);
2048 }
2049
2050 /**
2051 * Return the right surface index to access the thread scratch space using
2052 * stateless dataport messages.
2053 */
2054 unsigned
2055 brw_scratch_surface_idx(const struct brw_codegen *p)
2056 {
2057 /* The scratch space is thread-local so IA coherency is unnecessary. */
2058 if (p->devinfo->gen >= 8)
2059 return GEN8_BTI_STATELESS_NON_COHERENT;
2060 else
2061 return BRW_BTI_STATELESS;
2062 }
2063
2064 /**
2065 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
2066 * using a constant offset per channel.
2067 *
2068 * The offset must be aligned to oword size (16 bytes). Used for
2069 * register spilling.
2070 */
2071 void brw_oword_block_write_scratch(struct brw_codegen *p,
2072 struct brw_reg mrf,
2073 int num_regs,
2074 unsigned offset)
2075 {
2076 const struct gen_device_info *devinfo = p->devinfo;
2077 const unsigned target_cache =
2078 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2079 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2080 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2081 uint32_t msg_type;
2082
2083 if (devinfo->gen >= 6)
2084 offset /= 16;
2085
2086 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2087
2088 const unsigned mlen = 1 + num_regs;
2089
2090 /* Set up the message header. This is g0, with g0.2 filled with
2091 * the offset. We don't want to leave our offset around in g0 or
2092 * it'll screw up texture samples, so set it up inside the message
2093 * reg.
2094 */
2095 {
2096 brw_push_insn_state(p);
2097 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2098 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2099 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2100
2101 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2102
2103 /* set message header global offset field (reg 0, element 2) */
2104 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2105 brw_MOV(p,
2106 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2107 mrf.nr,
2108 2), BRW_REGISTER_TYPE_UD),
2109 brw_imm_ud(offset));
2110
2111 brw_pop_insn_state(p);
2112 }
2113
2114 {
2115 struct brw_reg dest;
2116 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2117 int send_commit_msg;
2118 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2119 BRW_REGISTER_TYPE_UW);
2120
2121 brw_inst_set_compression(devinfo, insn, false);
2122
2123 if (brw_inst_exec_size(devinfo, insn) >= 16)
2124 src_header = vec16(src_header);
2125
2126 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2127 if (devinfo->gen < 6)
2128 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2129
2130 /* Until gen6, writes followed by reads from the same location
2131 * are not guaranteed to be ordered unless write_commit is set.
2132 * If set, then a no-op write is issued to the destination
2133 * register to set a dependency, and a read from the destination
2134 * can be used to ensure the ordering.
2135 *
2136 * For gen6, only writes between different threads need ordering
2137 * protection. Our use of DP writes is all about register
2138 * spilling within a thread.
2139 */
2140 if (devinfo->gen >= 6) {
2141 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2142 send_commit_msg = 0;
2143 } else {
2144 dest = src_header;
2145 send_commit_msg = 1;
2146 }
2147
2148 brw_set_dest(p, insn, dest);
2149 if (devinfo->gen >= 6) {
2150 brw_set_src0(p, insn, mrf);
2151 } else {
2152 brw_set_src0(p, insn, brw_null_reg());
2153 }
2154
2155 if (devinfo->gen >= 6)
2156 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2157 else
2158 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2159
2160 brw_set_dp_write_message(p,
2161 insn,
2162 brw_scratch_surface_idx(p),
2163 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2164 msg_type,
2165 target_cache,
2166 mlen,
2167 true, /* header_present */
2168 0, /* not a render target */
2169 send_commit_msg, /* response_length */
2170 0, /* eot */
2171 send_commit_msg);
2172 }
2173 }
2174
2175
2176 /**
2177 * Read a block of owords (half a GRF each) from the scratch buffer
2178 * using a constant index per channel.
2179 *
2180 * Offset must be aligned to oword size (16 bytes). Used for register
2181 * spilling.
2182 */
2183 void
2184 brw_oword_block_read_scratch(struct brw_codegen *p,
2185 struct brw_reg dest,
2186 struct brw_reg mrf,
2187 int num_regs,
2188 unsigned offset)
2189 {
2190 const struct gen_device_info *devinfo = p->devinfo;
2191
2192 if (devinfo->gen >= 6)
2193 offset /= 16;
2194
2195 if (p->devinfo->gen >= 7) {
2196 /* On gen 7 and above, we no longer have message registers and we can
2197 * send from any register we want. By using the destination register
2198 * for the message, we guarantee that the implied message write won't
2199 * accidentally overwrite anything. This has been a problem because
2200 * the MRF registers and source for the final FB write are both fixed
2201 * and may overlap.
2202 */
2203 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2204 } else {
2205 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2206 }
2207 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2208
2209 const unsigned rlen = num_regs;
2210 const unsigned target_cache =
2211 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2212 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2213 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2214
2215 {
2216 brw_push_insn_state(p);
2217 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2218 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2219 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2220
2221 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2222
2223 /* set message header global offset field (reg 0, element 2) */
2224 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2225 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2226
2227 brw_pop_insn_state(p);
2228 }
2229
2230 {
2231 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2232
2233 assert(brw_inst_pred_control(devinfo, insn) == 0);
2234 brw_inst_set_compression(devinfo, insn, false);
2235
2236 brw_set_dest(p, insn, dest); /* UW? */
2237 if (devinfo->gen >= 6) {
2238 brw_set_src0(p, insn, mrf);
2239 } else {
2240 brw_set_src0(p, insn, brw_null_reg());
2241 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2242 }
2243
2244 brw_set_dp_read_message(p,
2245 insn,
2246 brw_scratch_surface_idx(p),
2247 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2248 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2249 target_cache,
2250 1, /* msg_length */
2251 true, /* header_present */
2252 rlen);
2253 }
2254 }
2255
2256 void
2257 gen7_block_read_scratch(struct brw_codegen *p,
2258 struct brw_reg dest,
2259 int num_regs,
2260 unsigned offset)
2261 {
2262 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2263 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2264
2265 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2266
2267 /* The HW requires that the header is present; this is to get the g0.5
2268 * scratch offset.
2269 */
2270 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2271
2272 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2273 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2274 * is 32 bytes, which happens to be the size of a register.
2275 */
2276 offset /= REG_SIZE;
2277 assert(offset < (1 << 12));
2278
2279 gen7_set_dp_scratch_message(p, insn,
2280 false, /* scratch read */
2281 false, /* OWords */
2282 false, /* invalidate after read */
2283 num_regs,
2284 offset,
2285 1, /* mlen: just g0 */
2286 num_regs, /* rlen */
2287 true); /* header present */
2288 }
2289
2290 /**
2291 * Read float[4] vectors from the data port constant cache.
2292 * Location (in buffer) should be a multiple of 16.
2293 * Used for fetching shader constants.
2294 */
2295 void brw_oword_block_read(struct brw_codegen *p,
2296 struct brw_reg dest,
2297 struct brw_reg mrf,
2298 uint32_t offset,
2299 uint32_t bind_table_index)
2300 {
2301 const struct gen_device_info *devinfo = p->devinfo;
2302 const unsigned target_cache =
2303 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2304 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2305 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2306
2307 /* On newer hardware, offset is in units of owords. */
2308 if (devinfo->gen >= 6)
2309 offset /= 16;
2310
2311 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2312
2313 brw_push_insn_state(p);
2314 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2315 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2316 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2317
2318 brw_push_insn_state(p);
2319 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2320 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2321
2322 /* set message header global offset field (reg 0, element 2) */
2323 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2324 brw_MOV(p,
2325 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2326 mrf.nr,
2327 2), BRW_REGISTER_TYPE_UD),
2328 brw_imm_ud(offset));
2329 brw_pop_insn_state(p);
2330
2331 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2332
2333 /* cast dest to a uword[8] vector */
2334 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2335
2336 brw_set_dest(p, insn, dest);
2337 if (devinfo->gen >= 6) {
2338 brw_set_src0(p, insn, mrf);
2339 } else {
2340 brw_set_src0(p, insn, brw_null_reg());
2341 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2342 }
2343
2344 brw_set_dp_read_message(p, insn, bind_table_index,
2345 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2346 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2347 target_cache,
2348 1, /* msg_length */
2349 true, /* header_present */
2350 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2351
2352 brw_pop_insn_state(p);
2353 }
2354
2355
2356 void brw_fb_WRITE(struct brw_codegen *p,
2357 struct brw_reg payload,
2358 struct brw_reg implied_header,
2359 unsigned msg_control,
2360 unsigned binding_table_index,
2361 unsigned msg_length,
2362 unsigned response_length,
2363 bool eot,
2364 bool last_render_target,
2365 bool header_present)
2366 {
2367 const struct gen_device_info *devinfo = p->devinfo;
2368 const unsigned target_cache =
2369 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2370 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2371 brw_inst *insn;
2372 unsigned msg_type;
2373 struct brw_reg dest, src0;
2374
2375 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2376 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2377 else
2378 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2379
2380 if (devinfo->gen >= 6) {
2381 insn = next_insn(p, BRW_OPCODE_SENDC);
2382 } else {
2383 insn = next_insn(p, BRW_OPCODE_SEND);
2384 }
2385 brw_inst_set_compression(devinfo, insn, false);
2386
2387 if (devinfo->gen >= 6) {
2388 /* headerless version, just submit color payload */
2389 src0 = payload;
2390
2391 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2392 } else {
2393 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2394 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2395 src0 = implied_header;
2396
2397 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2398 }
2399
2400 brw_set_dest(p, insn, dest);
2401 brw_set_src0(p, insn, src0);
2402 brw_set_dp_write_message(p,
2403 insn,
2404 binding_table_index,
2405 msg_control,
2406 msg_type,
2407 target_cache,
2408 msg_length,
2409 header_present,
2410 last_render_target,
2411 response_length,
2412 eot,
2413 0 /* send_commit_msg */);
2414 }
2415
2416 brw_inst *
2417 gen9_fb_READ(struct brw_codegen *p,
2418 struct brw_reg dst,
2419 struct brw_reg payload,
2420 unsigned binding_table_index,
2421 unsigned msg_length,
2422 unsigned response_length,
2423 bool per_sample)
2424 {
2425 const struct gen_device_info *devinfo = p->devinfo;
2426 assert(devinfo->gen >= 9);
2427 const unsigned msg_subtype =
2428 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2429 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2430
2431 brw_set_dest(p, insn, dst);
2432 brw_set_src0(p, insn, payload);
2433 brw_set_dp_read_message(p, insn, binding_table_index,
2434 per_sample << 5 | msg_subtype,
2435 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2436 GEN6_SFID_DATAPORT_RENDER_CACHE,
2437 msg_length, true /* header_present */,
2438 response_length);
2439 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2440
2441 return insn;
2442 }
2443
2444 /**
2445 * Texture sample instruction.
2446 * Note: the msg_type plus msg_length values determine exactly what kind
2447 * of sampling operation is performed. See volume 4, page 161 of docs.
2448 */
2449 void brw_SAMPLE(struct brw_codegen *p,
2450 struct brw_reg dest,
2451 unsigned msg_reg_nr,
2452 struct brw_reg src0,
2453 unsigned binding_table_index,
2454 unsigned sampler,
2455 unsigned msg_type,
2456 unsigned response_length,
2457 unsigned msg_length,
2458 unsigned header_present,
2459 unsigned simd_mode,
2460 unsigned return_format)
2461 {
2462 const struct gen_device_info *devinfo = p->devinfo;
2463 brw_inst *insn;
2464
2465 if (msg_reg_nr != -1)
2466 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2467
2468 insn = next_insn(p, BRW_OPCODE_SEND);
2469 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2470
2471 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2472 *
2473 * "Instruction compression is not allowed for this instruction (that
2474 * is, send). The hardware behavior is undefined if this instruction is
2475 * set as compressed. However, compress control can be set to "SecHalf"
2476 * to affect the EMask generation."
2477 *
2478 * No similar wording is found in later PRMs, but there are examples
2479 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2480 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2481 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2482 */
2483 brw_inst_set_compression(devinfo, insn, false);
2484
2485 if (devinfo->gen < 6)
2486 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2487
2488 brw_set_dest(p, insn, dest);
2489 brw_set_src0(p, insn, src0);
2490 brw_set_sampler_message(p, insn,
2491 binding_table_index,
2492 sampler,
2493 msg_type,
2494 response_length,
2495 msg_length,
2496 header_present,
2497 simd_mode,
2498 return_format);
2499 }
2500
2501 /* Adjust the message header's sampler state pointer to
2502 * select the correct group of 16 samplers.
2503 */
2504 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2505 struct brw_reg header,
2506 struct brw_reg sampler_index)
2507 {
2508 /* The "Sampler Index" field can only store values between 0 and 15.
2509 * However, we can add an offset to the "Sampler State Pointer"
2510 * field, effectively selecting a different set of 16 samplers.
2511 *
2512 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2513 * offset, and each sampler state is only 16-bytes, so we can't
2514 * exclusively use the offset - we have to use both.
2515 */
2516
2517 const struct gen_device_info *devinfo = p->devinfo;
2518
2519 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2520 const int sampler_state_size = 16; /* 16 bytes */
2521 uint32_t sampler = sampler_index.ud;
2522
2523 if (sampler >= 16) {
2524 assert(devinfo->is_haswell || devinfo->gen >= 8);
2525 brw_ADD(p,
2526 get_element_ud(header, 3),
2527 get_element_ud(brw_vec8_grf(0, 0), 3),
2528 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2529 }
2530 } else {
2531 /* Non-const sampler array indexing case */
2532 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2533 return;
2534 }
2535
2536 struct brw_reg temp = get_element_ud(header, 3);
2537
2538 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2539 brw_SHL(p, temp, temp, brw_imm_ud(4));
2540 brw_ADD(p,
2541 get_element_ud(header, 3),
2542 get_element_ud(brw_vec8_grf(0, 0), 3),
2543 temp);
2544 }
2545 }
2546
2547 /* All these variables are pretty confusing - we might be better off
2548 * using bitmasks and macros for this, in the old style. Or perhaps
2549 * just having the caller instantiate the fields in dword3 itself.
2550 */
2551 void brw_urb_WRITE(struct brw_codegen *p,
2552 struct brw_reg dest,
2553 unsigned msg_reg_nr,
2554 struct brw_reg src0,
2555 enum brw_urb_write_flags flags,
2556 unsigned msg_length,
2557 unsigned response_length,
2558 unsigned offset,
2559 unsigned swizzle)
2560 {
2561 const struct gen_device_info *devinfo = p->devinfo;
2562 brw_inst *insn;
2563
2564 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2565
2566 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2567 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2568 brw_push_insn_state(p);
2569 brw_set_default_access_mode(p, BRW_ALIGN_1);
2570 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2571 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2572 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2573 BRW_REGISTER_TYPE_UD),
2574 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2575 brw_imm_ud(0xff00));
2576 brw_pop_insn_state(p);
2577 }
2578
2579 insn = next_insn(p, BRW_OPCODE_SEND);
2580
2581 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2582
2583 brw_set_dest(p, insn, dest);
2584 brw_set_src0(p, insn, src0);
2585 brw_set_src1(p, insn, brw_imm_d(0));
2586
2587 if (devinfo->gen < 6)
2588 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2589
2590 brw_set_urb_message(p,
2591 insn,
2592 flags,
2593 msg_length,
2594 response_length,
2595 offset,
2596 swizzle);
2597 }
2598
2599 struct brw_inst *
2600 brw_send_indirect_message(struct brw_codegen *p,
2601 unsigned sfid,
2602 struct brw_reg dst,
2603 struct brw_reg payload,
2604 struct brw_reg desc)
2605 {
2606 const struct gen_device_info *devinfo = p->devinfo;
2607 struct brw_inst *send;
2608 int setup;
2609
2610 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2611
2612 assert(desc.type == BRW_REGISTER_TYPE_UD);
2613
2614 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2615 * in the indirect case) by its index in the instruction store. The
2616 * pointer returned by next_insn() may become invalid if emitting the SEND
2617 * in the indirect case reallocs the store.
2618 */
2619
2620 if (desc.file == BRW_IMMEDIATE_VALUE) {
2621 setup = p->nr_insn;
2622 send = next_insn(p, BRW_OPCODE_SEND);
2623 brw_set_src1(p, send, desc);
2624
2625 } else {
2626 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2627
2628 brw_push_insn_state(p);
2629 brw_set_default_access_mode(p, BRW_ALIGN_1);
2630 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2631 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2632 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2633
2634 /* Load the indirect descriptor to an address register using OR so the
2635 * caller can specify additional descriptor bits with the usual
2636 * brw_set_*_message() helper functions.
2637 */
2638 setup = p->nr_insn;
2639 brw_OR(p, addr, desc, brw_imm_ud(0));
2640
2641 brw_pop_insn_state(p);
2642
2643 send = next_insn(p, BRW_OPCODE_SEND);
2644 brw_set_src1(p, send, addr);
2645 }
2646
2647 if (dst.width < BRW_EXECUTE_8)
2648 brw_inst_set_exec_size(devinfo, send, dst.width);
2649
2650 brw_set_dest(p, send, dst);
2651 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2652 brw_inst_set_sfid(devinfo, send, sfid);
2653
2654 return &p->store[setup];
2655 }
2656
2657 static struct brw_inst *
2658 brw_send_indirect_surface_message(struct brw_codegen *p,
2659 unsigned sfid,
2660 struct brw_reg dst,
2661 struct brw_reg payload,
2662 struct brw_reg surface,
2663 unsigned message_len,
2664 unsigned response_len,
2665 bool header_present)
2666 {
2667 const struct gen_device_info *devinfo = p->devinfo;
2668 struct brw_inst *insn;
2669
2670 if (surface.file != BRW_IMMEDIATE_VALUE) {
2671 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2672
2673 brw_push_insn_state(p);
2674 brw_set_default_access_mode(p, BRW_ALIGN_1);
2675 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2676 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2677 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2678
2679 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2680 * some surface array is accessed out of bounds.
2681 */
2682 insn = brw_AND(p, addr,
2683 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2684 BRW_GET_SWZ(surface.swizzle, 0)),
2685 brw_imm_ud(0xff));
2686
2687 brw_pop_insn_state(p);
2688
2689 surface = addr;
2690 }
2691
2692 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2693 brw_inst_set_mlen(devinfo, insn, message_len);
2694 brw_inst_set_rlen(devinfo, insn, response_len);
2695 brw_inst_set_header_present(devinfo, insn, header_present);
2696
2697 return insn;
2698 }
2699
2700 static bool
2701 while_jumps_before_offset(const struct gen_device_info *devinfo,
2702 brw_inst *insn, int while_offset, int start_offset)
2703 {
2704 int scale = 16 / brw_jump_scale(devinfo);
2705 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2706 : brw_inst_jip(devinfo, insn);
2707 assert(jip < 0);
2708 return while_offset + jip * scale <= start_offset;
2709 }
2710
2711
2712 static int
2713 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2714 {
2715 int offset;
2716 void *store = p->store;
2717 const struct gen_device_info *devinfo = p->devinfo;
2718
2719 int depth = 0;
2720
2721 for (offset = next_offset(devinfo, store, start_offset);
2722 offset < p->next_insn_offset;
2723 offset = next_offset(devinfo, store, offset)) {
2724 brw_inst *insn = store + offset;
2725
2726 switch (brw_inst_opcode(devinfo, insn)) {
2727 case BRW_OPCODE_IF:
2728 depth++;
2729 break;
2730 case BRW_OPCODE_ENDIF:
2731 if (depth == 0)
2732 return offset;
2733 depth--;
2734 break;
2735 case BRW_OPCODE_WHILE:
2736 /* If the while doesn't jump before our instruction, it's the end
2737 * of a sibling do...while loop. Ignore it.
2738 */
2739 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2740 continue;
2741 /* fallthrough */
2742 case BRW_OPCODE_ELSE:
2743 case BRW_OPCODE_HALT:
2744 if (depth == 0)
2745 return offset;
2746 }
2747 }
2748
2749 return 0;
2750 }
2751
2752 /* There is no DO instruction on gen6, so to find the end of the loop
2753 * we have to see if the loop is jumping back before our start
2754 * instruction.
2755 */
2756 static int
2757 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2758 {
2759 const struct gen_device_info *devinfo = p->devinfo;
2760 int offset;
2761 void *store = p->store;
2762
2763 assert(devinfo->gen >= 6);
2764
2765 /* Always start after the instruction (such as a WHILE) we're trying to fix
2766 * up.
2767 */
2768 for (offset = next_offset(devinfo, store, start_offset);
2769 offset < p->next_insn_offset;
2770 offset = next_offset(devinfo, store, offset)) {
2771 brw_inst *insn = store + offset;
2772
2773 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2774 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2775 return offset;
2776 }
2777 }
2778 assert(!"not reached");
2779 return start_offset;
2780 }
2781
2782 /* After program generation, go back and update the UIP and JIP of
2783 * BREAK, CONT, and HALT instructions to their correct locations.
2784 */
2785 void
2786 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2787 {
2788 const struct gen_device_info *devinfo = p->devinfo;
2789 int offset;
2790 int br = brw_jump_scale(devinfo);
2791 int scale = 16 / br;
2792 void *store = p->store;
2793
2794 if (devinfo->gen < 6)
2795 return;
2796
2797 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2798 brw_inst *insn = store + offset;
2799 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2800
2801 int block_end_offset = brw_find_next_block_end(p, offset);
2802 switch (brw_inst_opcode(devinfo, insn)) {
2803 case BRW_OPCODE_BREAK:
2804 assert(block_end_offset != 0);
2805 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2806 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2807 brw_inst_set_uip(devinfo, insn,
2808 (brw_find_loop_end(p, offset) - offset +
2809 (devinfo->gen == 6 ? 16 : 0)) / scale);
2810 break;
2811 case BRW_OPCODE_CONTINUE:
2812 assert(block_end_offset != 0);
2813 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2814 brw_inst_set_uip(devinfo, insn,
2815 (brw_find_loop_end(p, offset) - offset) / scale);
2816
2817 assert(brw_inst_uip(devinfo, insn) != 0);
2818 assert(brw_inst_jip(devinfo, insn) != 0);
2819 break;
2820
2821 case BRW_OPCODE_ENDIF: {
2822 int32_t jump = (block_end_offset == 0) ?
2823 1 * br : (block_end_offset - offset) / scale;
2824 if (devinfo->gen >= 7)
2825 brw_inst_set_jip(devinfo, insn, jump);
2826 else
2827 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2828 break;
2829 }
2830
2831 case BRW_OPCODE_HALT:
2832 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2833 *
2834 * "In case of the halt instruction not inside any conditional
2835 * code block, the value of <JIP> and <UIP> should be the
2836 * same. In case of the halt instruction inside conditional code
2837 * block, the <UIP> should be the end of the program, and the
2838 * <JIP> should be end of the most inner conditional code block."
2839 *
2840 * The uip will have already been set by whoever set up the
2841 * instruction.
2842 */
2843 if (block_end_offset == 0) {
2844 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2845 } else {
2846 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2847 }
2848 assert(brw_inst_uip(devinfo, insn) != 0);
2849 assert(brw_inst_jip(devinfo, insn) != 0);
2850 break;
2851 }
2852 }
2853 }
2854
2855 void brw_ff_sync(struct brw_codegen *p,
2856 struct brw_reg dest,
2857 unsigned msg_reg_nr,
2858 struct brw_reg src0,
2859 bool allocate,
2860 unsigned response_length,
2861 bool eot)
2862 {
2863 const struct gen_device_info *devinfo = p->devinfo;
2864 brw_inst *insn;
2865
2866 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2867
2868 insn = next_insn(p, BRW_OPCODE_SEND);
2869 brw_set_dest(p, insn, dest);
2870 brw_set_src0(p, insn, src0);
2871 brw_set_src1(p, insn, brw_imm_d(0));
2872
2873 if (devinfo->gen < 6)
2874 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2875
2876 brw_set_ff_sync_message(p,
2877 insn,
2878 allocate,
2879 response_length,
2880 eot);
2881 }
2882
2883 /**
2884 * Emit the SEND instruction necessary to generate stream output data on Gen6
2885 * (for transform feedback).
2886 *
2887 * If send_commit_msg is true, this is the last piece of stream output data
2888 * from this thread, so send the data as a committed write. According to the
2889 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2890 *
2891 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2892 * writes are complete by sending the final write as a committed write."
2893 */
2894 void
2895 brw_svb_write(struct brw_codegen *p,
2896 struct brw_reg dest,
2897 unsigned msg_reg_nr,
2898 struct brw_reg src0,
2899 unsigned binding_table_index,
2900 bool send_commit_msg)
2901 {
2902 const struct gen_device_info *devinfo = p->devinfo;
2903 const unsigned target_cache =
2904 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2905 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2906 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2907 brw_inst *insn;
2908
2909 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2910
2911 insn = next_insn(p, BRW_OPCODE_SEND);
2912 brw_set_dest(p, insn, dest);
2913 brw_set_src0(p, insn, src0);
2914 brw_set_src1(p, insn, brw_imm_d(0));
2915 brw_set_dp_write_message(p, insn,
2916 binding_table_index,
2917 0, /* msg_control: ignored */
2918 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2919 target_cache,
2920 1, /* msg_length */
2921 true, /* header_present */
2922 0, /* last_render_target: ignored */
2923 send_commit_msg, /* response_length */
2924 0, /* end_of_thread */
2925 send_commit_msg); /* send_commit_msg */
2926 }
2927
2928 static unsigned
2929 brw_surface_payload_size(struct brw_codegen *p,
2930 unsigned num_channels,
2931 bool has_simd4x2,
2932 bool has_simd16)
2933 {
2934 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2935 return 1;
2936 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2937 return 2 * num_channels;
2938 else
2939 return num_channels;
2940 }
2941
2942 static void
2943 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2944 brw_inst *insn,
2945 unsigned atomic_op,
2946 bool response_expected)
2947 {
2948 const struct gen_device_info *devinfo = p->devinfo;
2949 unsigned msg_control =
2950 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2951 (response_expected ? 1 << 5 : 0); /* Return data expected */
2952
2953 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2954 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2955 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2956 msg_control |= 1 << 4; /* SIMD8 mode */
2957
2958 brw_inst_set_dp_msg_type(devinfo, insn,
2959 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2960 } else {
2961 brw_inst_set_dp_msg_type(devinfo, insn,
2962 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2963 }
2964 } else {
2965 brw_inst_set_dp_msg_type(devinfo, insn,
2966 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2967
2968 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2969 msg_control |= 1 << 4; /* SIMD8 mode */
2970 }
2971
2972 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2973 }
2974
2975 void
2976 brw_untyped_atomic(struct brw_codegen *p,
2977 struct brw_reg dst,
2978 struct brw_reg payload,
2979 struct brw_reg surface,
2980 unsigned atomic_op,
2981 unsigned msg_length,
2982 bool response_expected,
2983 bool header_present)
2984 {
2985 const struct gen_device_info *devinfo = p->devinfo;
2986 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2987 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2988 GEN7_SFID_DATAPORT_DATA_CACHE);
2989 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2990 /* Mask out unused components -- This is especially important in Align16
2991 * mode on generations that don't have native support for SIMD4x2 atomics,
2992 * because unused but enabled components will cause the dataport to perform
2993 * additional atomic operations on the addresses that happen to be in the
2994 * uninitialized Y, Z and W coordinates of the payload.
2995 */
2996 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2997 struct brw_inst *insn = brw_send_indirect_surface_message(
2998 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2999 brw_surface_payload_size(p, response_expected,
3000 devinfo->gen >= 8 || devinfo->is_haswell, true),
3001 header_present);
3002
3003 brw_set_dp_untyped_atomic_message(
3004 p, insn, atomic_op, response_expected);
3005 }
3006
3007 static void
3008 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
3009 struct brw_inst *insn,
3010 unsigned num_channels)
3011 {
3012 const struct gen_device_info *devinfo = p->devinfo;
3013 /* Set mask of 32-bit channels to drop. */
3014 unsigned msg_control = 0xf & (0xf << num_channels);
3015
3016 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3017 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3018 msg_control |= 1 << 4; /* SIMD16 mode */
3019 else
3020 msg_control |= 2 << 4; /* SIMD8 mode */
3021 }
3022
3023 brw_inst_set_dp_msg_type(devinfo, insn,
3024 (devinfo->gen >= 8 || devinfo->is_haswell ?
3025 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
3026 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
3027 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3028 }
3029
3030 void
3031 brw_untyped_surface_read(struct brw_codegen *p,
3032 struct brw_reg dst,
3033 struct brw_reg payload,
3034 struct brw_reg surface,
3035 unsigned msg_length,
3036 unsigned num_channels)
3037 {
3038 const struct gen_device_info *devinfo = p->devinfo;
3039 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3040 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3041 GEN7_SFID_DATAPORT_DATA_CACHE);
3042 struct brw_inst *insn = brw_send_indirect_surface_message(
3043 p, sfid, dst, payload, surface, msg_length,
3044 brw_surface_payload_size(p, num_channels, true, true),
3045 false);
3046
3047 brw_set_dp_untyped_surface_read_message(
3048 p, insn, num_channels);
3049 }
3050
3051 static void
3052 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
3053 struct brw_inst *insn,
3054 unsigned num_channels)
3055 {
3056 const struct gen_device_info *devinfo = p->devinfo;
3057 /* Set mask of 32-bit channels to drop. */
3058 unsigned msg_control = 0xf & (0xf << num_channels);
3059
3060 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3061 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3062 msg_control |= 1 << 4; /* SIMD16 mode */
3063 else
3064 msg_control |= 2 << 4; /* SIMD8 mode */
3065 } else {
3066 if (devinfo->gen >= 8 || devinfo->is_haswell)
3067 msg_control |= 0 << 4; /* SIMD4x2 mode */
3068 else
3069 msg_control |= 2 << 4; /* SIMD8 mode */
3070 }
3071
3072 brw_inst_set_dp_msg_type(devinfo, insn,
3073 devinfo->gen >= 8 || devinfo->is_haswell ?
3074 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
3075 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
3076 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3077 }
3078
3079 void
3080 brw_untyped_surface_write(struct brw_codegen *p,
3081 struct brw_reg payload,
3082 struct brw_reg surface,
3083 unsigned msg_length,
3084 unsigned num_channels,
3085 bool header_present)
3086 {
3087 const struct gen_device_info *devinfo = p->devinfo;
3088 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3089 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3090 GEN7_SFID_DATAPORT_DATA_CACHE);
3091 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3092 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3093 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3094 WRITEMASK_X : WRITEMASK_XYZW;
3095 struct brw_inst *insn = brw_send_indirect_surface_message(
3096 p, sfid, brw_writemask(brw_null_reg(), mask),
3097 payload, surface, msg_length, 0, header_present);
3098
3099 brw_set_dp_untyped_surface_write_message(
3100 p, insn, num_channels);
3101 }
3102
3103 static unsigned
3104 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
3105 {
3106 switch (bit_size) {
3107 case 8:
3108 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
3109 case 16:
3110 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
3111 case 32:
3112 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
3113 default:
3114 unreachable("Unsupported bit_size for byte scattered messages");
3115 }
3116 }
3117
3118
3119 void
3120 brw_byte_scattered_read(struct brw_codegen *p,
3121 struct brw_reg dst,
3122 struct brw_reg payload,
3123 struct brw_reg surface,
3124 unsigned msg_length,
3125 unsigned bit_size)
3126 {
3127 const struct gen_device_info *devinfo = p->devinfo;
3128 assert(devinfo->gen > 7 || devinfo->is_haswell);
3129 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3130 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3131
3132 struct brw_inst *insn = brw_send_indirect_surface_message(
3133 p, sfid, dst, payload, surface, msg_length,
3134 brw_surface_payload_size(p, 1, true, true),
3135 false);
3136
3137 unsigned msg_control =
3138 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3139
3140 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3141 msg_control |= 1; /* SIMD16 mode */
3142 else
3143 msg_control |= 0; /* SIMD8 mode */
3144
3145 brw_inst_set_dp_msg_type(devinfo, insn,
3146 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3147 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3148 }
3149
3150 void
3151 brw_byte_scattered_write(struct brw_codegen *p,
3152 struct brw_reg payload,
3153 struct brw_reg surface,
3154 unsigned msg_length,
3155 unsigned bit_size,
3156 bool header_present)
3157 {
3158 const struct gen_device_info *devinfo = p->devinfo;
3159 assert(devinfo->gen > 7 || devinfo->is_haswell);
3160 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3161 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3162
3163 struct brw_inst *insn = brw_send_indirect_surface_message(
3164 p, sfid, brw_writemask(brw_null_reg(), WRITEMASK_XYZW),
3165 payload, surface, msg_length, 0, header_present);
3166
3167 unsigned msg_control =
3168 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3169
3170 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3171 msg_control |= 1;
3172 else
3173 msg_control |= 0;
3174
3175 brw_inst_set_dp_msg_type(devinfo, insn,
3176 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3177 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3178 }
3179
3180 static void
3181 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3182 struct brw_inst *insn,
3183 unsigned atomic_op,
3184 bool response_expected)
3185 {
3186 const struct gen_device_info *devinfo = p->devinfo;
3187 unsigned msg_control =
3188 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3189 (response_expected ? 1 << 5 : 0); /* Return data expected */
3190
3191 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3192 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3193 if ((brw_get_default_group(p) / 8) % 2 == 1)
3194 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3195
3196 brw_inst_set_dp_msg_type(devinfo, insn,
3197 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3198 } else {
3199 brw_inst_set_dp_msg_type(devinfo, insn,
3200 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3201 }
3202
3203 } else {
3204 brw_inst_set_dp_msg_type(devinfo, insn,
3205 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3206
3207 if ((brw_get_default_group(p) / 8) % 2 == 1)
3208 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3209 }
3210
3211 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3212 }
3213
3214 void
3215 brw_typed_atomic(struct brw_codegen *p,
3216 struct brw_reg dst,
3217 struct brw_reg payload,
3218 struct brw_reg surface,
3219 unsigned atomic_op,
3220 unsigned msg_length,
3221 bool response_expected,
3222 bool header_present) {
3223 const struct gen_device_info *devinfo = p->devinfo;
3224 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3225 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3226 GEN6_SFID_DATAPORT_RENDER_CACHE);
3227 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3228 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3229 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3230 struct brw_inst *insn = brw_send_indirect_surface_message(
3231 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3232 brw_surface_payload_size(p, response_expected,
3233 devinfo->gen >= 8 || devinfo->is_haswell, false),
3234 header_present);
3235
3236 brw_set_dp_typed_atomic_message(
3237 p, insn, atomic_op, response_expected);
3238 }
3239
3240 static void
3241 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3242 struct brw_inst *insn,
3243 unsigned num_channels)
3244 {
3245 const struct gen_device_info *devinfo = p->devinfo;
3246 /* Set mask of unused channels. */
3247 unsigned msg_control = 0xf & (0xf << num_channels);
3248
3249 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3250 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3251 if ((brw_get_default_group(p) / 8) % 2 == 1)
3252 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3253 else
3254 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3255 }
3256
3257 brw_inst_set_dp_msg_type(devinfo, insn,
3258 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3259 } else {
3260 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3261 if ((brw_get_default_group(p) / 8) % 2 == 1)
3262 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3263 }
3264
3265 brw_inst_set_dp_msg_type(devinfo, insn,
3266 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3267 }
3268
3269 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3270 }
3271
3272 void
3273 brw_typed_surface_read(struct brw_codegen *p,
3274 struct brw_reg dst,
3275 struct brw_reg payload,
3276 struct brw_reg surface,
3277 unsigned msg_length,
3278 unsigned num_channels,
3279 bool header_present)
3280 {
3281 const struct gen_device_info *devinfo = p->devinfo;
3282 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3283 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3284 GEN6_SFID_DATAPORT_RENDER_CACHE);
3285 struct brw_inst *insn = brw_send_indirect_surface_message(
3286 p, sfid, dst, payload, surface, msg_length,
3287 brw_surface_payload_size(p, num_channels,
3288 devinfo->gen >= 8 || devinfo->is_haswell, false),
3289 header_present);
3290
3291 brw_set_dp_typed_surface_read_message(
3292 p, insn, num_channels);
3293 }
3294
3295 static void
3296 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3297 struct brw_inst *insn,
3298 unsigned num_channels)
3299 {
3300 const struct gen_device_info *devinfo = p->devinfo;
3301 /* Set mask of unused channels. */
3302 unsigned msg_control = 0xf & (0xf << num_channels);
3303
3304 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3305 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3306 if ((brw_get_default_group(p) / 8) % 2 == 1)
3307 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3308 else
3309 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3310 }
3311
3312 brw_inst_set_dp_msg_type(devinfo, insn,
3313 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3314
3315 } else {
3316 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3317 if ((brw_get_default_group(p) / 8) % 2 == 1)
3318 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3319 }
3320
3321 brw_inst_set_dp_msg_type(devinfo, insn,
3322 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3323 }
3324
3325 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3326 }
3327
3328 void
3329 brw_typed_surface_write(struct brw_codegen *p,
3330 struct brw_reg payload,
3331 struct brw_reg surface,
3332 unsigned msg_length,
3333 unsigned num_channels,
3334 bool header_present)
3335 {
3336 const struct gen_device_info *devinfo = p->devinfo;
3337 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3338 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3339 GEN6_SFID_DATAPORT_RENDER_CACHE);
3340 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3341 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3342 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3343 WRITEMASK_X : WRITEMASK_XYZW);
3344 struct brw_inst *insn = brw_send_indirect_surface_message(
3345 p, sfid, brw_writemask(brw_null_reg(), mask),
3346 payload, surface, msg_length, 0, header_present);
3347
3348 brw_set_dp_typed_surface_write_message(
3349 p, insn, num_channels);
3350 }
3351
3352 static void
3353 brw_set_memory_fence_message(struct brw_codegen *p,
3354 struct brw_inst *insn,
3355 enum brw_message_target sfid,
3356 bool commit_enable)
3357 {
3358 const struct gen_device_info *devinfo = p->devinfo;
3359
3360 brw_set_message_descriptor(p, insn, sfid,
3361 1 /* message length */,
3362 (commit_enable ? 1 : 0) /* response length */,
3363 true /* header present */,
3364 false);
3365
3366 switch (sfid) {
3367 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3368 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3369 break;
3370 case GEN7_SFID_DATAPORT_DATA_CACHE:
3371 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3372 break;
3373 default:
3374 unreachable("Not reached");
3375 }
3376
3377 if (commit_enable)
3378 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3379 }
3380
3381 void
3382 brw_memory_fence(struct brw_codegen *p,
3383 struct brw_reg dst,
3384 enum opcode send_op)
3385 {
3386 const struct gen_device_info *devinfo = p->devinfo;
3387 const bool commit_enable =
3388 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3389 (devinfo->gen == 7 && !devinfo->is_haswell);
3390 struct brw_inst *insn;
3391
3392 brw_push_insn_state(p);
3393 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3394 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3395 dst = vec1(dst);
3396
3397 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3398 * message doesn't write anything back.
3399 */
3400 insn = next_insn(p, send_op);
3401 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3402 brw_set_dest(p, insn, dst);
3403 brw_set_src0(p, insn, dst);
3404 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3405 commit_enable);
3406
3407 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3408 /* IVB does typed surface access through the render cache, so we need to
3409 * flush it too. Use a different register so both flushes can be
3410 * pipelined by the hardware.
3411 */
3412 insn = next_insn(p, send_op);
3413 brw_set_dest(p, insn, offset(dst, 1));
3414 brw_set_src0(p, insn, offset(dst, 1));
3415 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3416 commit_enable);
3417
3418 /* Now write the response of the second message into the response of the
3419 * first to trigger a pipeline stall -- This way future render and data
3420 * cache messages will be properly ordered with respect to past data and
3421 * render cache messages.
3422 */
3423 brw_MOV(p, dst, offset(dst, 1));
3424 }
3425
3426 brw_pop_insn_state(p);
3427 }
3428
3429 void
3430 brw_pixel_interpolator_query(struct brw_codegen *p,
3431 struct brw_reg dest,
3432 struct brw_reg mrf,
3433 bool noperspective,
3434 unsigned mode,
3435 struct brw_reg data,
3436 unsigned msg_length,
3437 unsigned response_length)
3438 {
3439 const struct gen_device_info *devinfo = p->devinfo;
3440 struct brw_inst *insn;
3441 const uint16_t exec_size = brw_get_default_exec_size(p);
3442
3443 /* brw_send_indirect_message will automatically use a direct send message
3444 * if data is actually immediate.
3445 */
3446 insn = brw_send_indirect_message(p,
3447 GEN7_SFID_PIXEL_INTERPOLATOR,
3448 dest,
3449 mrf,
3450 vec1(data));
3451 brw_inst_set_mlen(devinfo, insn, msg_length);
3452 brw_inst_set_rlen(devinfo, insn, response_length);
3453
3454 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3455 brw_inst_set_pi_slot_group(devinfo, insn, 0); /* zero unless 32/64px dispatch */
3456 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3457 brw_inst_set_pi_message_type(devinfo, insn, mode);
3458 }
3459
3460 void
3461 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3462 struct brw_reg mask)
3463 {
3464 const struct gen_device_info *devinfo = p->devinfo;
3465 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3466 const unsigned qtr_control = brw_get_default_group(p) / 8;
3467 brw_inst *inst;
3468
3469 assert(devinfo->gen >= 7);
3470 assert(mask.type == BRW_REGISTER_TYPE_UD);
3471
3472 brw_push_insn_state(p);
3473
3474 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3475 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3476
3477 if (devinfo->gen >= 8) {
3478 /* Getting the first active channel index is easy on Gen8: Just find
3479 * the first bit set in the execution mask. The register exists on
3480 * HSW already but it reads back as all ones when the current
3481 * instruction has execution masking disabled, so it's kind of
3482 * useless.
3483 */
3484 struct brw_reg exec_mask =
3485 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3486
3487 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3488 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3489 /* Unfortunately, ce0 does not take into account the thread
3490 * dispatch mask, which may be a problem in cases where it's not
3491 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3492 * some n). Combine ce0 with the given dispatch (or vector) mask
3493 * to mask off those channels which were never dispatched by the
3494 * hardware.
3495 */
3496 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3497 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3498 exec_mask = vec1(dst);
3499 }
3500
3501 /* Quarter control has the effect of magically shifting the value of
3502 * ce0 so you'll get the first active channel relative to the
3503 * specified quarter control as result.
3504 */
3505 inst = brw_FBL(p, vec1(dst), exec_mask);
3506 } else {
3507 const struct brw_reg flag = brw_flag_reg(
3508 brw_inst_flag_reg_nr(devinfo, p->current),
3509 brw_inst_flag_subreg_nr(devinfo, p->current));
3510
3511 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3512 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3513
3514 /* Run enough instructions returning zero with execution masking and
3515 * a conditional modifier enabled in order to get the full execution
3516 * mask in f1.0. We could use a single 32-wide move here if it
3517 * weren't because of the hardware bug that causes channel enables to
3518 * be applied incorrectly to the second half of 32-wide instructions
3519 * on Gen7.
3520 */
3521 const unsigned lower_size = MIN2(16, exec_size);
3522 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3523 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3524 brw_imm_uw(0));
3525 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3526 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3527 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3528 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3529 }
3530
3531 /* Find the first bit set in the exec_size-wide portion of the flag
3532 * register that was updated by the last sequence of MOV
3533 * instructions.
3534 */
3535 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3536 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3537 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3538 }
3539 } else {
3540 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3541
3542 if (devinfo->gen >= 8 &&
3543 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3544 /* In SIMD4x2 mode the first active channel index is just the
3545 * negation of the first bit of the mask register. Note that ce0
3546 * doesn't take into account the dispatch mask, so the Gen7 path
3547 * should be used instead unless you have the guarantee that the
3548 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3549 * for some n).
3550 */
3551 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3552 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3553 brw_imm_ud(1));
3554
3555 } else {
3556 /* Overwrite the destination without and with execution masking to
3557 * find out which of the channels is active.
3558 */
3559 brw_push_insn_state(p);
3560 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3561 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3562 brw_imm_ud(1));
3563
3564 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3565 brw_imm_ud(0));
3566 brw_pop_insn_state(p);
3567 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3568 }
3569 }
3570
3571 brw_pop_insn_state(p);
3572 }
3573
3574 void
3575 brw_broadcast(struct brw_codegen *p,
3576 struct brw_reg dst,
3577 struct brw_reg src,
3578 struct brw_reg idx)
3579 {
3580 const struct gen_device_info *devinfo = p->devinfo;
3581 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3582 brw_inst *inst;
3583
3584 brw_push_insn_state(p);
3585 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3586 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3587
3588 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3589 src.address_mode == BRW_ADDRESS_DIRECT);
3590 assert(!src.abs && !src.negate);
3591 assert(src.type == dst.type);
3592
3593 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3594 idx.file == BRW_IMMEDIATE_VALUE) {
3595 /* Trivial, the source is already uniform or the index is a constant.
3596 * We will typically not get here if the optimizer is doing its job, but
3597 * asserting would be mean.
3598 */
3599 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3600 brw_MOV(p, dst,
3601 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3602 stride(suboffset(src, 4 * i), 0, 4, 1)));
3603 } else {
3604 /* From the Haswell PRM section "Register Region Restrictions":
3605 *
3606 * "The lower bits of the AddressImmediate must not overflow to
3607 * change the register address. The lower 5 bits of Address
3608 * Immediate when added to lower 5 bits of address register gives
3609 * the sub-register offset. The upper bits of Address Immediate
3610 * when added to upper bits of address register gives the register
3611 * address. Any overflow from sub-register offset is dropped."
3612 *
3613 * Fortunately, for broadcast, we never have a sub-register offset so
3614 * this isn't an issue.
3615 */
3616 assert(src.subnr == 0);
3617
3618 if (align1) {
3619 const struct brw_reg addr =
3620 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3621 unsigned offset = src.nr * REG_SIZE + src.subnr;
3622 /* Limit in bytes of the signed indirect addressing immediate. */
3623 const unsigned limit = 512;
3624
3625 brw_push_insn_state(p);
3626 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3627 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3628
3629 /* Take into account the component size and horizontal stride. */
3630 assert(src.vstride == src.hstride + src.width);
3631 brw_SHL(p, addr, vec1(idx),
3632 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3633 src.hstride - 1));
3634
3635 /* We can only address up to limit bytes using the indirect
3636 * addressing immediate, account for the difference if the source
3637 * register is above this limit.
3638 */
3639 if (offset >= limit) {
3640 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3641 offset = offset % limit;
3642 }
3643
3644 brw_pop_insn_state(p);
3645
3646 /* Use indirect addressing to fetch the specified component. */
3647 if (type_sz(src.type) > 4 &&
3648 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3649 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3650 *
3651 * "When source or destination datatype is 64b or operation is
3652 * integer DWord multiply, indirect addressing must not be
3653 * used."
3654 *
3655 * To work around both of this issue, we do two integer MOVs
3656 * insead of one 64-bit MOV. Because no double value should ever
3657 * cross a register boundary, it's safe to use the immediate
3658 * offset in the indirect here to handle adding 4 bytes to the
3659 * offset and avoid the extra ADD to the register file.
3660 */
3661 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3662 retype(brw_vec1_indirect(addr.subnr, offset),
3663 BRW_REGISTER_TYPE_D));
3664 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3665 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3666 BRW_REGISTER_TYPE_D));
3667 } else {
3668 brw_MOV(p, dst,
3669 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3670 }
3671 } else {
3672 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3673 * to all bits of a flag register,
3674 */
3675 inst = brw_MOV(p,
3676 brw_null_reg(),
3677 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3678 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3679 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3680 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3681
3682 /* and use predicated SEL to pick the right channel. */
3683 inst = brw_SEL(p, dst,
3684 stride(suboffset(src, 4), 4, 4, 1),
3685 stride(src, 4, 4, 1));
3686 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3687 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3688 }
3689 }
3690
3691 brw_pop_insn_state(p);
3692 }
3693
3694 /**
3695 * This instruction is generated as a single-channel align1 instruction by
3696 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3697 *
3698 * We can't use the typed atomic op in the FS because that has the execution
3699 * mask ANDed with the pixel mask, but we just want to write the one dword for
3700 * all the pixels.
3701 *
3702 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3703 * one u32. So we use the same untyped atomic write message as the pixel
3704 * shader.
3705 *
3706 * The untyped atomic operation requires a BUFFER surface type with RAW
3707 * format, and is only accessible through the legacy DATA_CACHE dataport
3708 * messages.
3709 */
3710 void brw_shader_time_add(struct brw_codegen *p,
3711 struct brw_reg payload,
3712 uint32_t surf_index)
3713 {
3714 const unsigned sfid = (p->devinfo->gen >= 8 || p->devinfo->is_haswell ?
3715 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3716 GEN7_SFID_DATAPORT_DATA_CACHE);
3717 assert(p->devinfo->gen >= 7);
3718
3719 brw_push_insn_state(p);
3720 brw_set_default_access_mode(p, BRW_ALIGN_1);
3721 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3722 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3723 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3724
3725 /* We use brw_vec1_reg and unmasked because we want to increment the given
3726 * offset only once.
3727 */
3728 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3729 BRW_ARF_NULL, 0));
3730 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3731 payload.nr, 0));
3732 brw_set_src1(p, send, brw_imm_ud(0));
3733 brw_set_message_descriptor(p, send, sfid, 2, 0, false, false);
3734 brw_inst_set_binding_table_index(p->devinfo, send, surf_index);
3735 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3736
3737 brw_pop_insn_state(p);
3738 }
3739
3740
3741 /**
3742 * Emit the SEND message for a barrier
3743 */
3744 void
3745 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3746 {
3747 const struct gen_device_info *devinfo = p->devinfo;
3748 struct brw_inst *inst;
3749
3750 assert(devinfo->gen >= 7);
3751
3752 brw_push_insn_state(p);
3753 brw_set_default_access_mode(p, BRW_ALIGN_1);
3754 inst = next_insn(p, BRW_OPCODE_SEND);
3755 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3756 brw_set_src0(p, inst, src);
3757 brw_set_src1(p, inst, brw_null_reg());
3758
3759 brw_set_message_descriptor(p, inst, BRW_SFID_MESSAGE_GATEWAY,
3760 1 /* msg_length */,
3761 0 /* response_length */,
3762 false /* header_present */,
3763 false /* end_of_thread */);
3764
3765 brw_inst_set_gateway_notify(devinfo, inst, 1);
3766 brw_inst_set_gateway_subfuncid(devinfo, inst,
3767 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3768
3769 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3770 brw_pop_insn_state(p);
3771 }
3772
3773
3774 /**
3775 * Emit the wait instruction for a barrier
3776 */
3777 void
3778 brw_WAIT(struct brw_codegen *p)
3779 {
3780 const struct gen_device_info *devinfo = p->devinfo;
3781 struct brw_inst *insn;
3782
3783 struct brw_reg src = brw_notification_reg();
3784
3785 insn = next_insn(p, BRW_OPCODE_WAIT);
3786 brw_set_dest(p, insn, src);
3787 brw_set_src0(p, insn, src);
3788 brw_set_src1(p, insn, brw_null_reg());
3789
3790 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3791 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3792 }
3793
3794 /**
3795 * Changes the floating point rounding mode updating the control register
3796 * field defined at cr0.0[5-6] bits. This function supports the changes to
3797 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3798 * Only RTNE and RTZ rounding are enabled at nir.
3799 */
3800 void
3801 brw_rounding_mode(struct brw_codegen *p,
3802 enum brw_rnd_mode mode)
3803 {
3804 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3805
3806 if (bits != BRW_CR0_RND_MODE_MASK) {
3807 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3808 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3809 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3810
3811 /* From the Skylake PRM, Volume 7, page 760:
3812 * "Implementation Restriction on Register Access: When the control
3813 * register is used as an explicit source and/or destination, hardware
3814 * does not ensure execution pipeline coherency. Software must set the
3815 * thread control field to ‘switch’ for an instruction that uses
3816 * control register as an explicit operand."
3817 */
3818 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3819 }
3820
3821 if (bits) {
3822 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3823 brw_imm_ud(bits));
3824 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3825 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3826 }
3827 }